(Pseudo) Fix history (non gabbiness) by stripping PS_ARGLIST_MASK
[s-mailx.git] / memory.c
blobd9003f58bb1cd1a03b9ea927e8dc3d196f0ae998
1 /*@ S-nail - a mail user agent derived from Berkeley Mail.
2 *@ Memory functions.
4 * Copyright (c) 2000-2004 Gunnar Ritter, Freiburg i. Br., Germany.
5 * Copyright (c) 2012 - 2016 Steffen (Daode) Nurpmeso <steffen@sdaoden.eu>.
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #undef n_FILE
20 #define n_FILE memory
22 #ifndef HAVE_AMALGAMATION
23 # include "nail.h"
24 #endif
26 #ifdef HAVE_MEMORY_DEBUG
27 CTA(sizeof(char) == sizeof(ui8_t));
29 # define _HOPE_SIZE (2 * 8 * sizeof(char))
30 # define _HOPE_SET(C) \
31 do {\
32 union a_mem_ptr __xl, __xu;\
33 struct a_mem_chunk *__xc;\
34 __xl.p_p = (C).p_p;\
35 __xc = __xl.p_c - 1;\
36 __xu.p_p = __xc;\
37 (C).p_cp += 8;\
38 __xl.p_ui8p[0]=0xDE; __xl.p_ui8p[1]=0xAA;\
39 __xl.p_ui8p[2]=0x55; __xl.p_ui8p[3]=0xAD;\
40 __xl.p_ui8p[4]=0xBE; __xl.p_ui8p[5]=0x55;\
41 __xl.p_ui8p[6]=0xAA; __xl.p_ui8p[7]=0xEF;\
42 __xu.p_ui8p += __xc->mc_size - 8;\
43 __xu.p_ui8p[0]=0xDE; __xu.p_ui8p[1]=0xAA;\
44 __xu.p_ui8p[2]=0x55; __xu.p_ui8p[3]=0xAD;\
45 __xu.p_ui8p[4]=0xBE; __xu.p_ui8p[5]=0x55;\
46 __xu.p_ui8p[6]=0xAA; __xu.p_ui8p[7]=0xEF;\
47 } while (0)
49 # define _HOPE_GET_TRACE(C,BAD) \
50 do {\
51 (C).p_cp += 8;\
52 _HOPE_GET(C, BAD);\
53 (C).p_cp += 8;\
54 } while(0)
56 # define _HOPE_GET(C,BAD) \
57 do {\
58 union a_mem_ptr __xl, __xu;\
59 struct a_mem_chunk *__xc;\
60 ui32_t __i;\
61 __xl.p_p = (C).p_p;\
62 __xl.p_cp -= 8;\
63 (C).p_cp = __xl.p_cp;\
64 __xc = __xl.p_c - 1;\
65 (BAD) = FAL0;\
66 __i = 0;\
67 if (__xl.p_ui8p[0] != 0xDE) __i |= 1<<0;\
68 if (__xl.p_ui8p[1] != 0xAA) __i |= 1<<1;\
69 if (__xl.p_ui8p[2] != 0x55) __i |= 1<<2;\
70 if (__xl.p_ui8p[3] != 0xAD) __i |= 1<<3;\
71 if (__xl.p_ui8p[4] != 0xBE) __i |= 1<<4;\
72 if (__xl.p_ui8p[5] != 0x55) __i |= 1<<5;\
73 if (__xl.p_ui8p[6] != 0xAA) __i |= 1<<6;\
74 if (__xl.p_ui8p[7] != 0xEF) __i |= 1<<7;\
75 if (__i != 0) {\
76 (BAD) = TRU1;\
77 n_alert("%p: corrupt lower canary: 0x%02X: %s, line %d",\
78 __xl.p_p, __i, mdbg_file, mdbg_line);\
80 __xu.p_p = __xc;\
81 __xu.p_ui8p += __xc->mc_size - 8;\
82 __i = 0;\
83 if (__xu.p_ui8p[0] != 0xDE) __i |= 1<<0;\
84 if (__xu.p_ui8p[1] != 0xAA) __i |= 1<<1;\
85 if (__xu.p_ui8p[2] != 0x55) __i |= 1<<2;\
86 if (__xu.p_ui8p[3] != 0xAD) __i |= 1<<3;\
87 if (__xu.p_ui8p[4] != 0xBE) __i |= 1<<4;\
88 if (__xu.p_ui8p[5] != 0x55) __i |= 1<<5;\
89 if (__xu.p_ui8p[6] != 0xAA) __i |= 1<<6;\
90 if (__xu.p_ui8p[7] != 0xEF) __i |= 1<<7;\
91 if (__i != 0) {\
92 (BAD) = TRU1;\
93 n_alert("%p: corrupt upper canary: 0x%02X: %s, line %d",\
94 __xl.p_p, __i, mdbg_file, mdbg_line);\
96 if (BAD)\
97 n_alert(" ..canary last seen: %s, line %" PRIu16 "",\
98 __xc->mc_file, __xc->mc_line);\
99 } while (0)
100 #endif /* HAVE_MEMORY_DEBUG */
102 #ifdef HAVE_MEMORY_DEBUG
103 struct a_mem_chunk{
104 struct a_mem_chunk *mc_prev;
105 struct a_mem_chunk *mc_next;
106 char const *mc_file;
107 ui16_t mc_line;
108 ui8_t mc_isfree;
109 ui8_t __dummy[1];
110 ui32_t mc_size;
113 union a_mem_ptr{
114 void *p_p;
115 struct a_mem_chunk *p_c;
116 char *p_cp;
117 ui8_t *p_ui8p;
119 #endif /* HAVE_MEMORY_DEBUG */
122 * String dope -- this is a temporary left over
125 /* In debug mode the "string dope" allocations are enwrapped in canaries, just
126 * as we do with our normal memory allocator */
127 #ifdef HAVE_MEMORY_DEBUG
128 # define _SHOPE_SIZE (2u * 8 * sizeof(char) + sizeof(struct schunk))
130 CTA(sizeof(char) == sizeof(ui8_t));
132 struct schunk {
133 char const *file;
134 ui32_t line;
135 ui16_t usr_size;
136 ui16_t full_size;
139 union sptr {
140 void *p;
141 struct schunk *c;
142 char *cp;
143 ui8_t *ui8p;
145 #endif /* HAVE_MEMORY_DEBUG */
147 union __align__ {
148 char *cp;
149 size_t sz;
150 ul_i ul;
152 #define SALIGN (sizeof(union __align__) - 1)
154 CTA(ISPOW2(SALIGN + 1));
156 struct b_base {
157 struct buffer *_next;
158 char *_bot; /* For spreserve() */
159 char *_relax; /* If !NULL, used by srelax() instead of ._bot */
160 char *_max; /* Max usable byte */
161 char *_caster; /* NULL if full */
164 /* Single instance builtin buffer. Room for anything, most of the time */
165 struct b_bltin {
166 struct b_base b_base;
167 char b_buf[SBUFFER_BUILTIN - sizeof(struct b_base)];
169 #define SBLTIN_SIZE SIZEOF_FIELD(struct b_bltin, b_buf)
171 /* Dynamically allocated buffers to overcome shortage, always released again
172 * once the command loop ticks */
173 struct b_dyn {
174 struct b_base b_base;
175 char b_buf[SBUFFER_SIZE - sizeof(struct b_base)];
177 #define SDYN_SIZE SIZEOF_FIELD(struct b_dyn, b_buf)
179 /* The multiplexer of the several real b_* */
180 struct buffer {
181 struct b_base b;
182 char b_buf[VFIELD_SIZE(SALIGN + 1)];
185 /* Requests that exceed SDYN_SIZE-1 and thus cannot be handled by string dope
186 * are always served by the normal memory allocator (which panics if memory
187 * cannot be served). Note such an allocation has not yet occurred, it is only
188 * included as a security fallback bypass */
189 struct hugebuf {
190 struct hugebuf *hb_next;
191 char hb_buf[VFIELD_SIZE(SALIGN + 1)];
194 #ifdef HAVE_MEMORY_DEBUG
195 static size_t a_mem_aall, a_mem_acur, a_mem_amax,
196 a_mem_mall, a_mem_mcur, a_mem_mmax;
198 static struct a_mem_chunk *a_mem_list, *a_mem_free;
199 #endif
202 * String dope -- this is a temporary left over
205 static struct b_bltin _builtin_buf;
206 static struct buffer *_buf_head, *_buf_list, *_buf_server, *_buf_relax;
207 static size_t _relax_recur_no;
208 static struct hugebuf *_huge_list;
209 #ifdef HAVE_MEMORY_DEBUG
210 static size_t _all_cnt, _all_cycnt, _all_cycnt_max,
211 _all_size, _all_cysize, _all_cysize_max, _all_min,
212 _all_max, _all_wast,
213 _all_bufcnt, _all_cybufcnt, _all_cybufcnt_max,
214 _all_resetreqs, _all_resets;
215 #endif
217 /* sreset() / srelax() release a buffer, check the canaries of all chunks */
218 #ifdef HAVE_MEMORY_DEBUG
219 static void _salloc_bcheck(struct buffer *b);
220 #endif
222 #ifdef HAVE_MEMORY_DEBUG
223 static void
224 _salloc_bcheck(struct buffer *b)
226 union sptr pmax, pp;
227 /*NYD2_ENTER;*/
229 pmax.cp = (b->b._caster == NULL) ? b->b._max : b->b._caster;
230 pp.cp = b->b._bot;
232 while (pp.cp < pmax.cp) {
233 struct schunk *c;
234 union sptr x;
235 void *ux;
236 ui8_t i;
238 c = pp.c;
239 pp.cp += c->full_size;
240 x.p = c + 1;
241 ux = x.cp + 8;
243 i = 0;
244 if (x.ui8p[0] != 0xDE) i |= 1<<0;
245 if (x.ui8p[1] != 0xAA) i |= 1<<1;
246 if (x.ui8p[2] != 0x55) i |= 1<<2;
247 if (x.ui8p[3] != 0xAD) i |= 1<<3;
248 if (x.ui8p[4] != 0xBE) i |= 1<<4;
249 if (x.ui8p[5] != 0x55) i |= 1<<5;
250 if (x.ui8p[6] != 0xAA) i |= 1<<6;
251 if (x.ui8p[7] != 0xEF) i |= 1<<7;
252 if (i != 0)
253 n_alert("sdope %p: corrupt lower canary: 0x%02X, size %u: %s, line %u",
254 ux, i, c->usr_size, c->file, c->line);
255 x.cp += 8 + c->usr_size;
257 i = 0;
258 if (x.ui8p[0] != 0xDE) i |= 1<<0;
259 if (x.ui8p[1] != 0xAA) i |= 1<<1;
260 if (x.ui8p[2] != 0x55) i |= 1<<2;
261 if (x.ui8p[3] != 0xAD) i |= 1<<3;
262 if (x.ui8p[4] != 0xBE) i |= 1<<4;
263 if (x.ui8p[5] != 0x55) i |= 1<<5;
264 if (x.ui8p[6] != 0xAA) i |= 1<<6;
265 if (x.ui8p[7] != 0xEF) i |= 1<<7;
266 if (i != 0)
267 n_alert("sdope %p: corrupt upper canary: 0x%02X, size %u: %s, line %u",
268 ux, i, c->usr_size, c->file, c->line);
270 /*NYD2_LEAVE;*/
272 #endif /* HAVE_MEMORY_DEBUG */
274 #ifndef HAVE_MEMORY_DEBUG
275 FL void *
276 smalloc(size_t s SMALLOC_DEBUG_ARGS)
278 void *rv;
279 NYD2_ENTER;
281 if (s == 0)
282 s = 1;
283 if ((rv = malloc(s)) == NULL)
284 n_panic(_("no memory"));
285 NYD2_LEAVE;
286 return rv;
289 FL void *
290 srealloc(void *v, size_t s SMALLOC_DEBUG_ARGS)
292 void *rv;
293 NYD2_ENTER;
295 if (s == 0)
296 s = 1;
297 if (v == NULL)
298 rv = smalloc(s);
299 else if ((rv = realloc(v, s)) == NULL)
300 n_panic(_("no memory"));
301 NYD2_LEAVE;
302 return rv;
305 FL void *
306 scalloc(size_t nmemb, size_t size SMALLOC_DEBUG_ARGS)
308 void *rv;
309 NYD2_ENTER;
311 if (size == 0)
312 size = 1;
313 if ((rv = calloc(nmemb, size)) == NULL)
314 n_panic(_("no memory"));
315 NYD2_LEAVE;
316 return rv;
319 #else /* !HAVE_MEMORY_DEBUG */
320 FL void *
321 (smalloc)(size_t s SMALLOC_DEBUG_ARGS)
323 union a_mem_ptr p;
324 NYD2_ENTER;
326 if (s == 0)
327 s = 1;
328 if (s > UI32_MAX - sizeof(struct a_mem_chunk) - _HOPE_SIZE)
329 n_panic("smalloc(): allocation too large: %s, line %d",
330 mdbg_file, mdbg_line);
331 s += sizeof(struct a_mem_chunk) + _HOPE_SIZE;
333 if ((p.p_p = (malloc)(s)) == NULL)
334 n_panic(_("no memory"));
335 p.p_c->mc_prev = NULL;
336 if ((p.p_c->mc_next = a_mem_list) != NULL)
337 a_mem_list->mc_prev = p.p_c;
338 p.p_c->mc_file = mdbg_file;
339 p.p_c->mc_line = (ui16_t)mdbg_line;
340 p.p_c->mc_isfree = FAL0;
341 p.p_c->mc_size = (ui32_t)s;
343 a_mem_list = p.p_c++;
344 _HOPE_SET(p);
346 ++a_mem_aall;
347 ++a_mem_acur;
348 a_mem_amax = MAX(a_mem_amax, a_mem_acur);
349 a_mem_mall += s;
350 a_mem_mcur += s;
351 a_mem_mmax = MAX(a_mem_mmax, a_mem_mcur);
352 NYD2_LEAVE;
353 return p.p_p;
356 FL void *
357 (srealloc)(void *v, size_t s SMALLOC_DEBUG_ARGS)
359 union a_mem_ptr p;
360 bool_t isbad;
361 NYD2_ENTER;
363 if ((p.p_p = v) == NULL) {
364 p.p_p = (smalloc)(s, mdbg_file, mdbg_line);
365 goto jleave;
368 _HOPE_GET(p, isbad);
369 --p.p_c;
370 if (p.p_c->mc_isfree) {
371 n_err("srealloc(): region freed! At %s, line %d\n"
372 "\tLast seen: %s, line %" PRIu16 "\n",
373 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
374 goto jforce;
377 if (p.p_c == a_mem_list)
378 a_mem_list = p.p_c->mc_next;
379 else
380 p.p_c->mc_prev->mc_next = p.p_c->mc_next;
381 if (p.p_c->mc_next != NULL)
382 p.p_c->mc_next->mc_prev = p.p_c->mc_prev;
384 --a_mem_acur;
385 a_mem_mcur -= p.p_c->mc_size;
386 jforce:
387 if (s == 0)
388 s = 1;
389 if (s > UI32_MAX - sizeof(struct a_mem_chunk) - _HOPE_SIZE)
390 n_panic("srealloc(): allocation too large: %s, line %d",
391 mdbg_file, mdbg_line);
392 s += sizeof(struct a_mem_chunk) + _HOPE_SIZE;
394 if ((p.p_p = (realloc)(p.p_c, s)) == NULL)
395 n_panic(_("no memory"));
396 p.p_c->mc_prev = NULL;
397 if ((p.p_c->mc_next = a_mem_list) != NULL)
398 a_mem_list->mc_prev = p.p_c;
399 p.p_c->mc_file = mdbg_file;
400 p.p_c->mc_line = (ui16_t)mdbg_line;
401 p.p_c->mc_isfree = FAL0;
402 p.p_c->mc_size = (ui32_t)s;
403 a_mem_list = p.p_c++;
404 _HOPE_SET(p);
406 ++a_mem_aall;
407 ++a_mem_acur;
408 a_mem_amax = MAX(a_mem_amax, a_mem_acur);
409 a_mem_mall += s;
410 a_mem_mcur += s;
411 a_mem_mmax = MAX(a_mem_mmax, a_mem_mcur);
412 jleave:
413 NYD2_LEAVE;
414 return p.p_p;
417 FL void *
418 (scalloc)(size_t nmemb, size_t size SMALLOC_DEBUG_ARGS)
420 union a_mem_ptr p;
421 NYD2_ENTER;
423 if (size == 0)
424 size = 1;
425 if (nmemb == 0)
426 nmemb = 1;
427 if (size > UI32_MAX - sizeof(struct a_mem_chunk) - _HOPE_SIZE)
428 n_panic("scalloc(): allocation size too large: %s, line %d",
429 mdbg_file, mdbg_line);
430 if ((UI32_MAX - sizeof(struct a_mem_chunk) - _HOPE_SIZE) / nmemb < size)
431 n_panic("scalloc(): allocation count too large: %s, line %d",
432 mdbg_file, mdbg_line);
434 size *= nmemb;
435 size += sizeof(struct a_mem_chunk) + _HOPE_SIZE;
437 if ((p.p_p = (malloc)(size)) == NULL)
438 n_panic(_("no memory"));
439 memset(p.p_p, 0, size);
440 p.p_c->mc_prev = NULL;
441 if ((p.p_c->mc_next = a_mem_list) != NULL)
442 a_mem_list->mc_prev = p.p_c;
443 p.p_c->mc_file = mdbg_file;
444 p.p_c->mc_line = (ui16_t)mdbg_line;
445 p.p_c->mc_isfree = FAL0;
446 p.p_c->mc_size = (ui32_t)size;
447 a_mem_list = p.p_c++;
448 _HOPE_SET(p);
450 ++a_mem_aall;
451 ++a_mem_acur;
452 a_mem_amax = MAX(a_mem_amax, a_mem_acur);
453 a_mem_mall += size;
454 a_mem_mcur += size;
455 a_mem_mmax = MAX(a_mem_mmax, a_mem_mcur);
456 NYD2_LEAVE;
457 return p.p_p;
460 FL void
461 (sfree)(void *v SMALLOC_DEBUG_ARGS)
463 union a_mem_ptr p;
464 bool_t isbad;
465 NYD2_ENTER;
467 if ((p.p_p = v) == NULL) {
468 n_err("sfree(NULL) from %s, line %d\n", mdbg_file, mdbg_line);
469 goto jleave;
472 _HOPE_GET(p, isbad);
473 --p.p_c;
474 if (p.p_c->mc_isfree) {
475 n_err("sfree(): double-free avoided at %s, line %d\n"
476 "\tLast seen: %s, line %" PRIu16 "\n",
477 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
478 goto jleave;
481 if (p.p_c == a_mem_list)
482 a_mem_list = p.p_c->mc_next;
483 else
484 p.p_c->mc_prev->mc_next = p.p_c->mc_next;
485 if (p.p_c->mc_next != NULL)
486 p.p_c->mc_next->mc_prev = p.p_c->mc_prev;
487 p.p_c->mc_isfree = TRU1;
488 /* Trash contents (also see [21c05f8]) */
489 memset(v, 0377, p.p_c->mc_size - sizeof(struct a_mem_chunk) - _HOPE_SIZE);
491 --a_mem_acur;
492 a_mem_mcur -= p.p_c->mc_size;
494 if (options & (OPT_DEBUG | OPT_MEMDEBUG)) {
495 p.p_c->mc_next = a_mem_free;
496 a_mem_free = p.p_c;
497 } else
498 (free)(p.p_c);
499 jleave:
500 NYD2_LEAVE;
503 FL void
504 n_memreset(void)
506 union a_mem_ptr p;
507 size_t c = 0, s = 0;
508 NYD_ENTER;
510 n_memcheck();
512 for (p.p_c = a_mem_free; p.p_c != NULL;) {
513 void *vp = p.p_c;
514 ++c;
515 s += p.p_c->mc_size;
516 p.p_c = p.p_c->mc_next;
517 (free)(vp);
519 a_mem_free = NULL;
521 if (options & (OPT_DEBUG | OPT_MEMDEBUG))
522 n_err("memreset: freed %" PRIuZ " chunks/%" PRIuZ " bytes\n", c, s);
523 NYD_LEAVE;
526 FL int
527 c_memtrace(void *v)
529 /* For _HOPE_GET() */
530 char const * const mdbg_file = "memtrace()";
531 int const mdbg_line = -1;
532 FILE *fp;
533 union a_mem_ptr p, xp;
534 bool_t isbad;
535 size_t lines;
536 NYD_ENTER;
538 v = (void*)0x1;
539 if ((fp = Ftmp(NULL, "memtr", OF_RDWR | OF_UNLINK | OF_REGISTER)) == NULL) {
540 n_perr("tmpfile", 0);
541 goto jleave;
544 fprintf(fp, "Memory statistics:\n"
545 " Count cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
546 " Bytes cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n\n",
547 a_mem_acur, a_mem_amax, a_mem_aall, a_mem_mcur, a_mem_mmax, a_mem_mall);
549 fprintf(fp, "Currently allocated memory chunks:\n");
550 for (lines = 0, p.p_c = a_mem_list; p.p_c != NULL;
551 ++lines, p.p_c = p.p_c->mc_next) {
552 xp = p;
553 ++xp.p_c;
554 _HOPE_GET_TRACE(xp, isbad);
555 fprintf(fp, "%s%p (%5" PRIuZ " bytes): %s, line %" PRIu16 "\n",
556 (isbad ? "! CANARY ERROR: " : ""), xp.p_p,
557 (size_t)(p.p_c->mc_size - sizeof(struct a_mem_chunk)), p.p_c->mc_file,
558 p.p_c->mc_line);
561 if (options & (OPT_DEBUG | OPT_MEMDEBUG)) {
562 fprintf(fp, "sfree()d memory chunks awaiting free():\n");
563 for (p.p_c = a_mem_free; p.p_c != NULL; ++lines, p.p_c = p.p_c->mc_next) {
564 xp = p;
565 ++xp.p_c;
566 _HOPE_GET_TRACE(xp, isbad);
567 fprintf(fp, "%s%p (%5" PRIuZ " bytes): %s, line %" PRIu16 "\n",
568 (isbad ? "! CANARY ERROR: " : ""), xp.p_p,
569 (size_t)(p.p_c->mc_size - sizeof(struct a_mem_chunk)),
570 p.p_c->mc_file, p.p_c->mc_line);
574 # if defined HAVE_OPENSSL && defined HAVE_OPENSSL_MEMHOOKS
575 fprintf(fp, "OpenSSL leak report:\n");
576 CRYPTO_mem_leaks_fp(fp);
577 # endif
579 page_or_print(fp, lines);
580 Fclose(fp);
581 v = NULL;
582 jleave:
583 NYD_LEAVE;
584 return (v != NULL);
587 FL bool_t
588 n__memcheck(char const *mdbg_file, int mdbg_line)
590 union a_mem_ptr p, xp;
591 bool_t anybad = FAL0, isbad;
592 size_t lines;
593 NYD_ENTER;
595 for (lines = 0, p.p_c = a_mem_list; p.p_c != NULL;
596 ++lines, p.p_c = p.p_c->mc_next) {
597 xp = p;
598 ++xp.p_c;
599 _HOPE_GET_TRACE(xp, isbad);
600 if (isbad) {
601 anybad = TRU1;
602 n_err(
603 "! CANARY ERROR: %p (%5" PRIuZ " bytes): %s, line %" PRIu16 "\n",
604 xp.p_p, (size_t)(p.p_c->mc_size - sizeof(struct a_mem_chunk)),
605 p.p_c->mc_file, p.p_c->mc_line);
609 if (options & (OPT_DEBUG | OPT_MEMDEBUG)) {
610 for (p.p_c = a_mem_free; p.p_c != NULL; ++lines, p.p_c = p.p_c->mc_next) {
611 xp = p;
612 ++xp.p_c;
613 _HOPE_GET_TRACE(xp, isbad);
614 if (isbad) {
615 anybad = TRU1;
616 n_err(
617 "! CANARY ERROR: %p (%5" PRIuZ " bytes): %s, line %" PRIu16 "\n",
618 xp.p_p, (size_t)(p.p_c->mc_size - sizeof(struct a_mem_chunk)),
619 p.p_c->mc_file, p.p_c->mc_line);
623 NYD_LEAVE;
624 return anybad;
626 #endif /* HAVE_MEMORY_DEBUG */
628 FL void *
629 (salloc)(size_t size SALLOC_DEBUG_ARGS)
631 #ifdef HAVE_MEMORY_DEBUG
632 size_t orig_size = size;
633 #endif
634 union {struct buffer *b; struct hugebuf *hb; char *cp;} u;
635 char *x, *y, *z;
636 NYD2_ENTER;
638 if (size == 0)
639 ++size;
640 size += SALIGN;
641 size &= ~SALIGN;
643 #ifdef HAVE_MEMORY_DEBUG
644 ++_all_cnt;
645 ++_all_cycnt;
646 _all_cycnt_max = MAX(_all_cycnt_max, _all_cycnt);
647 _all_size += size;
648 _all_cysize += size;
649 _all_cysize_max = MAX(_all_cysize_max, _all_cysize);
650 _all_min = (_all_max == 0) ? size : MIN(_all_min, size);
651 _all_max = MAX(_all_max, size);
652 _all_wast += size - orig_size;
654 size += _SHOPE_SIZE;
656 if (size >= SDYN_SIZE - 1)
657 n_alert("salloc() of %" PRIuZ " bytes from %s, line %d",
658 size, mdbg_file, mdbg_line);
659 #endif
661 /* Huge allocations are special */
662 if (UNLIKELY(size >= SDYN_SIZE - 1))
663 goto jhuge;
665 /* Search for a buffer with enough free space to serve request */
666 if ((u.b = _buf_server) != NULL)
667 goto jumpin;
668 jredo:
669 for (u.b = _buf_head; u.b != NULL; u.b = u.b->b._next) {
670 jumpin:
671 x = u.b->b._caster;
672 if (x == NULL) {
673 if (u.b == _buf_server) {
674 if (u.b == _buf_head && (u.b = _buf_head->b._next) != NULL) {
675 _buf_server = u.b;
676 goto jumpin;
678 _buf_server = NULL;
679 goto jredo;
681 continue;
683 y = x + size;
684 z = u.b->b._max;
685 if (PTRCMP(y, <=, z)) {
686 /* Alignment is the one thing, the other is what is usually allocated,
687 * and here about 40 bytes seems to be a good cut to avoid non-usable
688 * non-NULL casters. However, because of _salloc_bcheck(), we may not
689 * set ._caster to NULL because then it would check all chunks up to
690 * ._max, which surely doesn't work; speed is no issue with DEBUG */
691 u.b->b._caster = NDBG( PTRCMP(y + 42 + 16, >=, z) ? NULL : ) y;
692 u.cp = x;
693 goto jleave;
697 /* Need a new buffer */
698 if (_buf_head == NULL) {
699 struct b_bltin *b = &_builtin_buf;
700 b->b_base._max = b->b_buf + SBLTIN_SIZE - 1;
701 _buf_head = (struct buffer*)b;
702 u.b = _buf_head;
703 } else {
704 #ifdef HAVE_MEMORY_DEBUG
705 ++_all_bufcnt;
706 ++_all_cybufcnt;
707 _all_cybufcnt_max = MAX(_all_cybufcnt_max, _all_cybufcnt);
708 #endif
709 u.b = smalloc(sizeof(struct b_dyn));
710 u.b->b._max = u.b->b_buf + SDYN_SIZE - 1;
712 if (_buf_list != NULL)
713 _buf_list->b._next = u.b;
714 _buf_server = _buf_list = u.b;
715 u.b->b._next = NULL;
716 u.b->b._caster = (u.b->b._bot = u.b->b_buf) + size;
717 u.b->b._relax = NULL;
718 u.cp = u.b->b._bot;
720 jleave:
721 /* Encapsulate user chunk in debug canaries */
722 #ifdef HAVE_MEMORY_DEBUG
724 union sptr xl, xu;
725 struct schunk *xc;
727 xl.p = u.cp;
728 xc = xl.c;
729 xc->file = mdbg_file;
730 xc->line = mdbg_line;
731 xc->usr_size = (ui16_t)orig_size;
732 xc->full_size = (ui16_t)size;
733 xl.p = xc + 1;
734 xl.ui8p[0]=0xDE; xl.ui8p[1]=0xAA; xl.ui8p[2]=0x55; xl.ui8p[3]=0xAD;
735 xl.ui8p[4]=0xBE; xl.ui8p[5]=0x55; xl.ui8p[6]=0xAA; xl.ui8p[7]=0xEF;
736 u.cp = xl.cp + 8;
737 xu.p = u.cp;
738 xu.cp += orig_size;
739 xu.ui8p[0]=0xDE; xu.ui8p[1]=0xAA; xu.ui8p[2]=0x55; xu.ui8p[3]=0xAD;
740 xu.ui8p[4]=0xBE; xu.ui8p[5]=0x55; xu.ui8p[6]=0xAA; xu.ui8p[7]=0xEF;
742 #endif
743 NYD2_LEAVE;
744 return u.cp;
746 jhuge:
747 u.hb = smalloc(sizeof(*u.hb) - VFIELD_SIZEOF(struct hugebuf, hb_buf) +
748 size +1);
749 u.hb->hb_next = _huge_list;
750 _huge_list = u.hb;
751 u.cp = u.hb->hb_buf;
752 goto jleave;
755 FL void *
756 (csalloc)(size_t nmemb, size_t size SALLOC_DEBUG_ARGS)
758 void *vp;
759 NYD2_ENTER;
761 size *= nmemb;
762 vp = (salloc)(size SALLOC_DEBUG_ARGSCALL);
763 memset(vp, 0, size);
764 NYD2_LEAVE;
765 return vp;
768 FL void
769 sreset(bool_t only_if_relaxed)
771 struct buffer *blh, *bh;
772 NYD_ENTER;
774 #ifdef HAVE_MEMORY_DEBUG
775 ++_all_resetreqs;
776 #endif
777 if (noreset) {
778 /* Reset relaxation after any jump is a MUST */
779 if (_relax_recur_no > 0)
780 srelax_rele();
781 goto jleave;
783 if (only_if_relaxed && _relax_recur_no == 0)
784 goto jleave;
786 #ifdef HAVE_MEMORY_DEBUG
787 _all_cycnt = _all_cysize = 0;
788 _all_cybufcnt = (_buf_head != NULL && _buf_head->b._next != NULL);
789 ++_all_resets;
790 #endif
792 /* Reset relaxation after jump */
793 if (_relax_recur_no > 0) {
794 srelax_rele();
795 assert(_relax_recur_no == 0);
798 blh = NULL;
799 if ((bh = _buf_head) != NULL) {
800 do {
801 struct buffer *x = bh;
802 bh = x->b._next;
803 #ifdef HAVE_MEMORY_DEBUG
804 _salloc_bcheck(x);
805 #endif
807 /* Give away all buffers that are not covered by sreset().
808 * _buf_head is builtin and thus cannot be free()d */
809 if (blh != NULL && x->b._bot == x->b_buf) {
810 blh->b._next = bh;
811 free(x);
812 } else {
813 blh = x;
814 x->b._caster = x->b._bot;
815 x->b._relax = NULL;
816 DBG( memset(x->b._caster, 0377,
817 PTR2SIZE(x->b._max - x->b._caster)); )
819 } while (bh != NULL);
821 _buf_server = _buf_head;
822 _buf_list = blh;
823 _buf_relax = NULL;
826 while (_huge_list != NULL) {
827 struct hugebuf *hb = _huge_list;
828 _huge_list = hb->hb_next;
829 free(hb);
832 n_memreset();
833 jleave:
834 NYD_LEAVE;
837 FL void
838 srelax_hold(void)
840 struct buffer *b;
841 NYD_ENTER;
843 if (_relax_recur_no++ == 0) {
844 for (b = _buf_head; b != NULL; b = b->b._next)
845 b->b._relax = b->b._caster;
846 _buf_relax = _buf_server;
848 NYD_LEAVE;
851 FL void
852 srelax_rele(void)
854 struct buffer *b;
855 NYD_ENTER;
857 assert(_relax_recur_no > 0);
859 if (--_relax_recur_no == 0) {
860 for (b = _buf_head; b != NULL; b = b->b._next) {
861 #ifdef HAVE_MEMORY_DEBUG
862 _salloc_bcheck(b);
863 #endif
864 b->b._caster = (b->b._relax != NULL) ? b->b._relax : b->b._bot;
865 b->b._relax = NULL;
868 _buf_relax = NULL;
870 #ifdef HAVE_DEVEL
871 else
872 n_err("srelax_rele(): recursion >0!\n");
873 #endif
874 NYD_LEAVE;
877 FL void
878 srelax(void)
880 /* The purpose of relaxation is only that it is possible to reset the
881 * casters, *not* to give back memory to the system. We are presumably in
882 * an iteration over all messages of a mailbox, and it'd be quite
883 * counterproductive to give the system allocator a chance to waste time */
884 struct buffer *b;
885 NYD_ENTER;
887 assert(_relax_recur_no > 0);
889 if (_relax_recur_no == 1) {
890 for (b = _buf_head; b != NULL; b = b->b._next) {
891 #ifdef HAVE_MEMORY_DEBUG
892 _salloc_bcheck(b);
893 #endif
894 b->b._caster = (b->b._relax != NULL) ? b->b._relax : b->b._bot;
895 DBG( memset(b->b._caster, 0377, PTR2SIZE(b->b._max - b->b._caster)); )
898 NYD_LEAVE;
901 FL void
902 spreserve(void)
904 struct buffer *b;
905 NYD_ENTER;
907 for (b = _buf_head; b != NULL; b = b->b._next)
908 b->b._bot = b->b._caster;
909 NYD_LEAVE;
912 #ifdef HAVE_MEMORY_DEBUG
913 FL int
914 c_sstats(void *v)
916 size_t excess;
917 NYD_ENTER;
918 UNUSED(v);
920 excess = (_all_cybufcnt_max * SDYN_SIZE) + SBLTIN_SIZE;
921 excess = (excess >= _all_cysize_max) ? 0 : _all_cysize_max - excess;
923 printf("String usage statistics (cycle means one sreset() cycle):\n"
924 " Buffer allocs ever/max a time : %" PRIuZ "/%" PRIuZ "\n"
925 " .. size of the builtin/dynamic: %" PRIuZ "/%" PRIuZ "\n"
926 " Overall alloc count/bytes : %" PRIuZ "/%" PRIuZ "\n"
927 " .. bytes min/max/align wastage: %" PRIuZ "/%" PRIuZ "/%" PRIuZ "\n"
928 " sreset() cycles : %" PRIuZ " (%" PRIuZ " performed)\n"
929 " Cycle max.: alloc count/bytes : %" PRIuZ "/%" PRIuZ "+%" PRIuZ "\n",
930 _all_bufcnt, _all_cybufcnt_max,
931 SBLTIN_SIZE, SDYN_SIZE,
932 _all_cnt, _all_size,
933 _all_min, _all_max, _all_wast,
934 _all_resetreqs, _all_resets,
935 _all_cycnt_max, _all_cysize_max, excess);
936 NYD_LEAVE;
937 return 0;
939 #endif /* HAVE_MEMORY_DEBUG */
941 #ifdef HAVE_MEMORY_DEBUG
942 # undef _HOPE_SIZE
943 # undef _HOPE_SET
944 # undef _HOPE_GET_TRACE
945 # undef _HOPE_GET
946 #endif
948 /* s-it-mode */