Make combinetime() overflow safe (Vincent Lefevre)..
[s-mailx.git] / memory.c
blob716f03be10dc24f6ad3dae9b028b010c4effd165
1 /*@ S-nail - a mail user agent derived from Berkeley Mail.
2 *@ Memory functions.
4 * Copyright (c) 2000-2004 Gunnar Ritter, Freiburg i. Br., Germany.
5 * Copyright (c) 2012 - 2016 Steffen (Daode) Nurpmeso <steffen@sdaoden.eu>.
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #undef n_FILE
20 #define n_FILE memory
22 #ifndef HAVE_AMALGAMATION
23 # include "nail.h"
24 #endif
26 #ifdef HAVE_MEMORY_DEBUG
27 CTA(sizeof(char) == sizeof(ui8_t));
29 # define _HOPE_SIZE (2 * 8 * sizeof(char))
30 # define _HOPE_SET(C) \
31 do {\
32 union a_mem_ptr __xl, __xu;\
33 struct a_mem_chunk *__xc;\
34 __xl.p_p = (C).p_p;\
35 __xc = __xl.p_c - 1;\
36 __xu.p_p = __xc;\
37 (C).p_cp += 8;\
38 __xl.p_ui8p[0]=0xDE; __xl.p_ui8p[1]=0xAA;\
39 __xl.p_ui8p[2]=0x55; __xl.p_ui8p[3]=0xAD;\
40 __xl.p_ui8p[4]=0xBE; __xl.p_ui8p[5]=0x55;\
41 __xl.p_ui8p[6]=0xAA; __xl.p_ui8p[7]=0xEF;\
42 __xu.p_ui8p += __xc->mc_size - 8;\
43 __xu.p_ui8p[0]=0xDE; __xu.p_ui8p[1]=0xAA;\
44 __xu.p_ui8p[2]=0x55; __xu.p_ui8p[3]=0xAD;\
45 __xu.p_ui8p[4]=0xBE; __xu.p_ui8p[5]=0x55;\
46 __xu.p_ui8p[6]=0xAA; __xu.p_ui8p[7]=0xEF;\
47 } while (0)
49 # define _HOPE_GET_TRACE(C,BAD) \
50 do {\
51 (C).p_cp += 8;\
52 _HOPE_GET(C, BAD);\
53 (C).p_cp += 8;\
54 } while(0)
56 # define _HOPE_GET(C,BAD) \
57 do {\
58 union a_mem_ptr __xl, __xu;\
59 struct a_mem_chunk *__xc;\
60 ui32_t __i;\
61 __xl.p_p = (C).p_p;\
62 __xl.p_cp -= 8;\
63 (C).p_cp = __xl.p_cp;\
64 __xc = __xl.p_c - 1;\
65 (BAD) = FAL0;\
66 __i = 0;\
67 if (__xl.p_ui8p[0] != 0xDE) __i |= 1<<0;\
68 if (__xl.p_ui8p[1] != 0xAA) __i |= 1<<1;\
69 if (__xl.p_ui8p[2] != 0x55) __i |= 1<<2;\
70 if (__xl.p_ui8p[3] != 0xAD) __i |= 1<<3;\
71 if (__xl.p_ui8p[4] != 0xBE) __i |= 1<<4;\
72 if (__xl.p_ui8p[5] != 0x55) __i |= 1<<5;\
73 if (__xl.p_ui8p[6] != 0xAA) __i |= 1<<6;\
74 if (__xl.p_ui8p[7] != 0xEF) __i |= 1<<7;\
75 if (__i != 0) {\
76 (BAD) = TRU1;\
77 n_alert("%p: corrupt lower canary: 0x%02X: %s, line %d",\
78 __xl.p_p, __i, mdbg_file, mdbg_line);\
80 __xu.p_p = __xc;\
81 __xu.p_ui8p += __xc->mc_size - 8;\
82 __i = 0;\
83 if (__xu.p_ui8p[0] != 0xDE) __i |= 1<<0;\
84 if (__xu.p_ui8p[1] != 0xAA) __i |= 1<<1;\
85 if (__xu.p_ui8p[2] != 0x55) __i |= 1<<2;\
86 if (__xu.p_ui8p[3] != 0xAD) __i |= 1<<3;\
87 if (__xu.p_ui8p[4] != 0xBE) __i |= 1<<4;\
88 if (__xu.p_ui8p[5] != 0x55) __i |= 1<<5;\
89 if (__xu.p_ui8p[6] != 0xAA) __i |= 1<<6;\
90 if (__xu.p_ui8p[7] != 0xEF) __i |= 1<<7;\
91 if (__i != 0) {\
92 (BAD) = TRU1;\
93 n_alert("%p: corrupt upper canary: 0x%02X: %s, line %d",\
94 __xl.p_p, __i, mdbg_file, mdbg_line);\
96 if (BAD)\
97 n_alert(" ..canary last seen: %s, line %" PRIu16 "",\
98 __xc->mc_file, __xc->mc_line);\
99 } while (0)
100 #endif /* HAVE_MEMORY_DEBUG */
102 #ifdef HAVE_MEMORY_DEBUG
103 struct a_mem_chunk{
104 struct a_mem_chunk *mc_prev;
105 struct a_mem_chunk *mc_next;
106 char const *mc_file;
107 ui16_t mc_line;
108 ui8_t mc_isfree;
109 ui8_t __dummy[1];
110 ui32_t mc_size;
113 union a_mem_ptr{
114 void *p_p;
115 struct a_mem_chunk *p_c;
116 char *p_cp;
117 ui8_t *p_ui8p;
119 #endif /* HAVE_MEMORY_DEBUG */
122 * String dope -- this is a temporary left over
125 /* In debug mode the "string dope" allocations are enwrapped in canaries, just
126 * as we do with our normal memory allocator */
127 #ifdef HAVE_MEMORY_DEBUG
128 # define _SHOPE_SIZE (2u * 8 * sizeof(char) + sizeof(struct schunk))
130 CTA(sizeof(char) == sizeof(ui8_t));
132 struct schunk {
133 char const *file;
134 ui32_t line;
135 ui16_t usr_size;
136 ui16_t full_size;
139 union sptr {
140 void *p;
141 struct schunk *c;
142 char *cp;
143 ui8_t *ui8p;
145 #endif /* HAVE_MEMORY_DEBUG */
147 union __align__ {
148 char *cp;
149 size_t sz;
150 ul_i ul;
152 #define SALIGN (sizeof(union __align__) - 1)
154 CTA(ISPOW2(SALIGN + 1));
156 struct b_base {
157 struct buffer *_next;
158 char *_bot; /* For spreserve() */
159 char *_relax; /* If !NULL, used by srelax() instead of ._bot */
160 char *_max; /* Max usable byte */
161 char *_caster; /* NULL if full */
164 /* Single instance builtin buffer. Room for anything, most of the time */
165 struct b_bltin {
166 struct b_base b_base;
167 char b_buf[SBUFFER_BUILTIN - sizeof(struct b_base)];
169 #define SBLTIN_SIZE SIZEOF_FIELD(struct b_bltin, b_buf)
171 /* Dynamically allocated buffers to overcome shortage, always released again
172 * once the command loop ticks */
173 struct b_dyn {
174 struct b_base b_base;
175 char b_buf[SBUFFER_SIZE - sizeof(struct b_base)];
177 #define SDYN_SIZE SIZEOF_FIELD(struct b_dyn, b_buf)
179 /* The multiplexer of the several real b_* */
180 struct buffer {
181 struct b_base b;
182 char b_buf[VFIELD_SIZE(SALIGN + 1)];
185 /* Requests that exceed SDYN_SIZE-1 and thus cannot be handled by string dope
186 * are always served by the normal memory allocator (which panics if memory
187 * cannot be served). Note such an allocation has not yet occurred, it is only
188 * included as a security fallback bypass */
189 struct hugebuf {
190 struct hugebuf *hb_next;
191 char hb_buf[VFIELD_SIZE(SALIGN + 1)];
194 #ifdef HAVE_MEMORY_DEBUG
195 static size_t a_mem_aall, a_mem_acur, a_mem_amax,
196 a_mem_mall, a_mem_mcur, a_mem_mmax;
198 static struct a_mem_chunk *a_mem_list, *a_mem_free;
199 #endif
202 * String dope -- this is a temporary left over
205 static struct b_bltin _builtin_buf;
206 static struct buffer *_buf_head, *_buf_list, *_buf_server, *_buf_relax;
207 static size_t _relax_recur_no;
208 static struct hugebuf *_huge_list;
209 #ifdef HAVE_MEMORY_DEBUG
210 static size_t _all_cnt, _all_cycnt, _all_cycnt_max,
211 _all_size, _all_cysize, _all_cysize_max, _all_min,
212 _all_max, _all_wast,
213 _all_bufcnt, _all_cybufcnt, _all_cybufcnt_max,
214 _all_resetreqs, _all_resets;
215 #endif
217 /* sreset() / srelax() release a buffer, check the canaries of all chunks */
218 #ifdef HAVE_MEMORY_DEBUG
219 static void _salloc_bcheck(struct buffer *b);
220 #endif
222 #ifdef HAVE_MEMORY_DEBUG
223 static void
224 _salloc_bcheck(struct buffer *b)
226 union sptr pmax, pp;
227 /*NYD2_ENTER;*/
229 pmax.cp = (b->b._caster == NULL) ? b->b._max : b->b._caster;
230 pp.cp = b->b._bot;
232 while (pp.cp < pmax.cp) {
233 struct schunk *c;
234 union sptr x;
235 void *ux;
236 ui8_t i;
238 c = pp.c;
239 pp.cp += c->full_size;
240 x.p = c + 1;
241 ux = x.cp + 8;
243 i = 0;
244 if (x.ui8p[0] != 0xDE) i |= 1<<0;
245 if (x.ui8p[1] != 0xAA) i |= 1<<1;
246 if (x.ui8p[2] != 0x55) i |= 1<<2;
247 if (x.ui8p[3] != 0xAD) i |= 1<<3;
248 if (x.ui8p[4] != 0xBE) i |= 1<<4;
249 if (x.ui8p[5] != 0x55) i |= 1<<5;
250 if (x.ui8p[6] != 0xAA) i |= 1<<6;
251 if (x.ui8p[7] != 0xEF) i |= 1<<7;
252 if (i != 0)
253 n_alert("sdope %p: corrupt lower canary: 0x%02X, size %u: %s, line %u",
254 ux, i, c->usr_size, c->file, c->line);
255 x.cp += 8 + c->usr_size;
257 i = 0;
258 if (x.ui8p[0] != 0xDE) i |= 1<<0;
259 if (x.ui8p[1] != 0xAA) i |= 1<<1;
260 if (x.ui8p[2] != 0x55) i |= 1<<2;
261 if (x.ui8p[3] != 0xAD) i |= 1<<3;
262 if (x.ui8p[4] != 0xBE) i |= 1<<4;
263 if (x.ui8p[5] != 0x55) i |= 1<<5;
264 if (x.ui8p[6] != 0xAA) i |= 1<<6;
265 if (x.ui8p[7] != 0xEF) i |= 1<<7;
266 if (i != 0)
267 n_alert("sdope %p: corrupt upper canary: 0x%02X, size %u: %s, line %u",
268 ux, i, c->usr_size, c->file, c->line);
270 /*NYD2_LEAVE;*/
272 #endif /* HAVE_MEMORY_DEBUG */
274 #ifndef HAVE_MEMORY_DEBUG
275 FL void *
276 smalloc(size_t s SMALLOC_DEBUG_ARGS)
278 void *rv;
279 NYD2_ENTER;
281 if (s == 0)
282 s = 1;
283 if ((rv = malloc(s)) == NULL)
284 n_panic(_("no memory"));
285 NYD2_LEAVE;
286 return rv;
289 FL void *
290 srealloc(void *v, size_t s SMALLOC_DEBUG_ARGS)
292 void *rv;
293 NYD2_ENTER;
295 if (s == 0)
296 s = 1;
297 if (v == NULL)
298 rv = smalloc(s);
299 else if ((rv = realloc(v, s)) == NULL)
300 n_panic(_("no memory"));
301 NYD2_LEAVE;
302 return rv;
305 FL void *
306 scalloc(size_t nmemb, size_t size SMALLOC_DEBUG_ARGS)
308 void *rv;
309 NYD2_ENTER;
311 if (size == 0)
312 size = 1;
313 if ((rv = calloc(nmemb, size)) == NULL)
314 n_panic(_("no memory"));
315 NYD2_LEAVE;
316 return rv;
319 #else /* !HAVE_MEMORY_DEBUG */
320 FL void *
321 (smalloc)(size_t s SMALLOC_DEBUG_ARGS)
323 union a_mem_ptr p;
324 NYD2_ENTER;
326 if (s == 0)
327 s = 1;
328 if (s > UI32_MAX - sizeof(struct a_mem_chunk) - _HOPE_SIZE)
329 n_panic("smalloc(): allocation too large: %s, line %d",
330 mdbg_file, mdbg_line);
331 s += sizeof(struct a_mem_chunk) + _HOPE_SIZE;
333 if ((p.p_p = (malloc)(s)) == NULL)
334 n_panic(_("no memory"));
335 p.p_c->mc_prev = NULL;
336 if ((p.p_c->mc_next = a_mem_list) != NULL)
337 a_mem_list->mc_prev = p.p_c;
338 p.p_c->mc_file = mdbg_file;
339 p.p_c->mc_line = (ui16_t)mdbg_line;
340 p.p_c->mc_isfree = FAL0;
341 p.p_c->mc_size = (ui32_t)s;
343 a_mem_list = p.p_c++;
344 _HOPE_SET(p);
346 ++a_mem_aall;
347 ++a_mem_acur;
348 a_mem_amax = MAX(a_mem_amax, a_mem_acur);
349 a_mem_mall += s;
350 a_mem_mcur += s;
351 a_mem_mmax = MAX(a_mem_mmax, a_mem_mcur);
352 NYD2_LEAVE;
353 return p.p_p;
356 FL void *
357 (srealloc)(void *v, size_t s SMALLOC_DEBUG_ARGS)
359 union a_mem_ptr p;
360 bool_t isbad;
361 NYD2_ENTER;
363 if ((p.p_p = v) == NULL) {
364 p.p_p = (smalloc)(s, mdbg_file, mdbg_line);
365 goto jleave;
368 _HOPE_GET(p, isbad);
369 --p.p_c;
370 if (p.p_c->mc_isfree) {
371 n_err("srealloc(): region freed! At %s, line %d\n"
372 "\tLast seen: %s, line %" PRIu16 "\n",
373 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
374 goto jforce;
377 if (p.p_c == a_mem_list)
378 a_mem_list = p.p_c->mc_next;
379 else
380 p.p_c->mc_prev->mc_next = p.p_c->mc_next;
381 if (p.p_c->mc_next != NULL)
382 p.p_c->mc_next->mc_prev = p.p_c->mc_prev;
384 --a_mem_acur;
385 a_mem_mcur -= p.p_c->mc_size;
386 jforce:
387 if (s == 0)
388 s = 1;
389 if (s > UI32_MAX - sizeof(struct a_mem_chunk) - _HOPE_SIZE)
390 n_panic("srealloc(): allocation too large: %s, line %d",
391 mdbg_file, mdbg_line);
392 s += sizeof(struct a_mem_chunk) + _HOPE_SIZE;
394 if ((p.p_p = (realloc)(p.p_c, s)) == NULL)
395 n_panic(_("no memory"));
396 p.p_c->mc_prev = NULL;
397 if ((p.p_c->mc_next = a_mem_list) != NULL)
398 a_mem_list->mc_prev = p.p_c;
399 p.p_c->mc_file = mdbg_file;
400 p.p_c->mc_line = (ui16_t)mdbg_line;
401 p.p_c->mc_isfree = FAL0;
402 p.p_c->mc_size = (ui32_t)s;
403 a_mem_list = p.p_c++;
404 _HOPE_SET(p);
406 ++a_mem_aall;
407 ++a_mem_acur;
408 a_mem_amax = MAX(a_mem_amax, a_mem_acur);
409 a_mem_mall += s;
410 a_mem_mcur += s;
411 a_mem_mmax = MAX(a_mem_mmax, a_mem_mcur);
412 jleave:
413 NYD2_LEAVE;
414 return p.p_p;
417 FL void *
418 (scalloc)(size_t nmemb, size_t size SMALLOC_DEBUG_ARGS)
420 union a_mem_ptr p;
421 NYD2_ENTER;
423 if (size == 0)
424 size = 1;
425 if (nmemb == 0)
426 nmemb = 1;
427 if (size > UI32_MAX - sizeof(struct a_mem_chunk) - _HOPE_SIZE)
428 n_panic("scalloc(): allocation size too large: %s, line %d",
429 mdbg_file, mdbg_line);
430 if ((UI32_MAX - sizeof(struct a_mem_chunk) - _HOPE_SIZE) / nmemb < size)
431 n_panic("scalloc(): allocation count too large: %s, line %d",
432 mdbg_file, mdbg_line);
434 size *= nmemb;
435 size += sizeof(struct a_mem_chunk) + _HOPE_SIZE;
437 if ((p.p_p = (malloc)(size)) == NULL)
438 n_panic(_("no memory"));
439 memset(p.p_p, 0, size);
440 p.p_c->mc_prev = NULL;
441 if ((p.p_c->mc_next = a_mem_list) != NULL)
442 a_mem_list->mc_prev = p.p_c;
443 p.p_c->mc_file = mdbg_file;
444 p.p_c->mc_line = (ui16_t)mdbg_line;
445 p.p_c->mc_isfree = FAL0;
446 p.p_c->mc_size = (ui32_t)size;
447 a_mem_list = p.p_c++;
448 _HOPE_SET(p);
450 ++a_mem_aall;
451 ++a_mem_acur;
452 a_mem_amax = MAX(a_mem_amax, a_mem_acur);
453 a_mem_mall += size;
454 a_mem_mcur += size;
455 a_mem_mmax = MAX(a_mem_mmax, a_mem_mcur);
456 NYD2_LEAVE;
457 return p.p_p;
460 FL void
461 (sfree)(void *v SMALLOC_DEBUG_ARGS)
463 union a_mem_ptr p;
464 bool_t isbad;
465 NYD2_ENTER;
467 if ((p.p_p = v) == NULL) {
468 n_err("sfree(NULL) from %s, line %d\n", mdbg_file, mdbg_line);
469 goto jleave;
472 _HOPE_GET(p, isbad);
473 --p.p_c;
474 if (p.p_c->mc_isfree) {
475 n_err("sfree(): double-free avoided at %s, line %d\n"
476 "\tLast seen: %s, line %" PRIu16 "\n",
477 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
478 goto jleave;
481 if (p.p_c == a_mem_list)
482 a_mem_list = p.p_c->mc_next;
483 else
484 p.p_c->mc_prev->mc_next = p.p_c->mc_next;
485 if (p.p_c->mc_next != NULL)
486 p.p_c->mc_next->mc_prev = p.p_c->mc_prev;
487 p.p_c->mc_isfree = TRU1;
488 /* Trash contents (also see [21c05f8]) */
489 memset(v, 0377, p.p_c->mc_size - sizeof(struct a_mem_chunk) - _HOPE_SIZE);
491 --a_mem_acur;
492 a_mem_mcur -= p.p_c->mc_size;
494 if (options & (OPT_DEBUG | OPT_MEMDEBUG)) {
495 p.p_c->mc_next = a_mem_free;
496 a_mem_free = p.p_c;
497 } else
498 (free)(p.p_c);
499 jleave:
500 NYD2_LEAVE;
503 FL void
504 n_memreset(void)
506 union a_mem_ptr p;
507 size_t c = 0, s = 0;
508 NYD_ENTER;
510 n_memcheck();
512 for (p.p_c = a_mem_free; p.p_c != NULL;) {
513 void *vp = p.p_c;
514 ++c;
515 s += p.p_c->mc_size;
516 p.p_c = p.p_c->mc_next;
517 (free)(vp);
519 a_mem_free = NULL;
521 if (options & (OPT_DEBUG | OPT_MEMDEBUG))
522 n_err("memreset: freed %" PRIuZ " chunks/%" PRIuZ " bytes\n", c, s);
523 NYD_LEAVE;
526 FL int
527 c_memtrace(void *v)
529 /* For _HOPE_GET() */
530 char const * const mdbg_file = "memtrace()";
531 int const mdbg_line = -1;
532 FILE *fp;
533 union a_mem_ptr p, xp;
534 bool_t isbad;
535 size_t lines;
536 NYD_ENTER;
538 v = (void*)0x1;
539 if ((fp = Ftmp(NULL, "memtr", OF_RDWR | OF_UNLINK | OF_REGISTER)) == NULL) {
540 n_perr("tmpfile", 0);
541 goto jleave;
544 fprintf(fp, "Memory statistics:\n"
545 " Count cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
546 " Bytes cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n\n",
547 a_mem_acur, a_mem_amax, a_mem_aall, a_mem_mcur, a_mem_mmax, a_mem_mall);
549 fprintf(fp, "Currently allocated memory chunks:\n");
550 for (lines = 0, p.p_c = a_mem_list; p.p_c != NULL;
551 ++lines, p.p_c = p.p_c->mc_next) {
552 xp = p;
553 ++xp.p_c;
554 _HOPE_GET_TRACE(xp, isbad);
555 fprintf(fp, "%s%p (%5" PRIuZ " bytes): %s, line %" PRIu16 "\n",
556 (isbad ? "! CANARY ERROR: " : ""), xp.p_p,
557 (size_t)(p.p_c->mc_size - sizeof(struct a_mem_chunk)), p.p_c->mc_file,
558 p.p_c->mc_line);
561 if (options & (OPT_DEBUG | OPT_MEMDEBUG)) {
562 fprintf(fp, "sfree()d memory chunks awaiting free():\n");
563 for (p.p_c = a_mem_free; p.p_c != NULL; ++lines, p.p_c = p.p_c->mc_next) {
564 xp = p;
565 ++xp.p_c;
566 _HOPE_GET_TRACE(xp, isbad);
567 fprintf(fp, "%s%p (%5" PRIuZ " bytes): %s, line %" PRIu16 "\n",
568 (isbad ? "! CANARY ERROR: " : ""), xp.p_p,
569 (size_t)(p.p_c->mc_size - sizeof(struct a_mem_chunk)),
570 p.p_c->mc_file, p.p_c->mc_line);
574 page_or_print(fp, lines);
575 Fclose(fp);
576 v = NULL;
577 jleave:
578 NYD_LEAVE;
579 return (v != NULL);
582 FL bool_t
583 n__memcheck(char const *mdbg_file, int mdbg_line)
585 union a_mem_ptr p, xp;
586 bool_t anybad = FAL0, isbad;
587 size_t lines;
588 NYD_ENTER;
590 for (lines = 0, p.p_c = a_mem_list; p.p_c != NULL;
591 ++lines, p.p_c = p.p_c->mc_next) {
592 xp = p;
593 ++xp.p_c;
594 _HOPE_GET_TRACE(xp, isbad);
595 if (isbad) {
596 anybad = TRU1;
597 n_err(
598 "! CANARY ERROR: %p (%5" PRIuZ " bytes): %s, line %" PRIu16 "\n",
599 xp.p_p, (size_t)(p.p_c->mc_size - sizeof(struct a_mem_chunk)),
600 p.p_c->mc_file, p.p_c->mc_line);
604 if (options & (OPT_DEBUG | OPT_MEMDEBUG)) {
605 for (p.p_c = a_mem_free; p.p_c != NULL; ++lines, p.p_c = p.p_c->mc_next) {
606 xp = p;
607 ++xp.p_c;
608 _HOPE_GET_TRACE(xp, isbad);
609 if (isbad) {
610 anybad = TRU1;
611 n_err(
612 "! CANARY ERROR: %p (%5" PRIuZ " bytes): %s, line %" PRIu16 "\n",
613 xp.p_p, (size_t)(p.p_c->mc_size - sizeof(struct a_mem_chunk)),
614 p.p_c->mc_file, p.p_c->mc_line);
618 NYD_LEAVE;
619 return anybad;
621 #endif /* HAVE_MEMORY_DEBUG */
623 FL void *
624 (salloc)(size_t size SALLOC_DEBUG_ARGS)
626 #ifdef HAVE_MEMORY_DEBUG
627 size_t orig_size = size;
628 #endif
629 union {struct buffer *b; struct hugebuf *hb; char *cp;} u;
630 char *x, *y, *z;
631 NYD2_ENTER;
633 if (size == 0)
634 ++size;
635 size += SALIGN;
636 size &= ~SALIGN;
638 #ifdef HAVE_MEMORY_DEBUG
639 ++_all_cnt;
640 ++_all_cycnt;
641 _all_cycnt_max = MAX(_all_cycnt_max, _all_cycnt);
642 _all_size += size;
643 _all_cysize += size;
644 _all_cysize_max = MAX(_all_cysize_max, _all_cysize);
645 _all_min = (_all_max == 0) ? size : MIN(_all_min, size);
646 _all_max = MAX(_all_max, size);
647 _all_wast += size - orig_size;
649 size += _SHOPE_SIZE;
651 if (size >= SDYN_SIZE - 1)
652 n_alert("salloc() of %" PRIuZ " bytes from %s, line %d",
653 size, mdbg_file, mdbg_line);
654 #endif
656 /* Huge allocations are special */
657 if (UNLIKELY(size >= SDYN_SIZE - 1))
658 goto jhuge;
660 /* Search for a buffer with enough free space to serve request */
661 if ((u.b = _buf_server) != NULL)
662 goto jumpin;
663 jredo:
664 for (u.b = _buf_head; u.b != NULL; u.b = u.b->b._next) {
665 jumpin:
666 x = u.b->b._caster;
667 if (x == NULL) {
668 if (u.b == _buf_server) {
669 if (u.b == _buf_head && (u.b = _buf_head->b._next) != NULL) {
670 _buf_server = u.b;
671 goto jumpin;
673 _buf_server = NULL;
674 goto jredo;
676 continue;
678 y = x + size;
679 z = u.b->b._max;
680 if (PTRCMP(y, <=, z)) {
681 /* Alignment is the one thing, the other is what is usually allocated,
682 * and here about 40 bytes seems to be a good cut to avoid non-usable
683 * non-NULL casters. However, because of _salloc_bcheck(), we may not
684 * set ._caster to NULL because then it would check all chunks up to
685 * ._max, which surely doesn't work; speed is no issue with DEBUG */
686 u.b->b._caster = NDBG( PTRCMP(y + 42 + 16, >=, z) ? NULL : ) y;
687 u.cp = x;
688 goto jleave;
692 /* Need a new buffer */
693 if (_buf_head == NULL) {
694 struct b_bltin *b = &_builtin_buf;
695 b->b_base._max = b->b_buf + SBLTIN_SIZE - 1;
696 _buf_head = (struct buffer*)b;
697 u.b = _buf_head;
698 } else {
699 #ifdef HAVE_MEMORY_DEBUG
700 ++_all_bufcnt;
701 ++_all_cybufcnt;
702 _all_cybufcnt_max = MAX(_all_cybufcnt_max, _all_cybufcnt);
703 #endif
704 u.b = smalloc(sizeof(struct b_dyn));
705 u.b->b._max = u.b->b_buf + SDYN_SIZE - 1;
707 if (_buf_list != NULL)
708 _buf_list->b._next = u.b;
709 _buf_server = _buf_list = u.b;
710 u.b->b._next = NULL;
711 u.b->b._caster = (u.b->b._bot = u.b->b_buf) + size;
712 u.b->b._relax = NULL;
713 u.cp = u.b->b._bot;
715 jleave:
716 /* Encapsulate user chunk in debug canaries */
717 #ifdef HAVE_MEMORY_DEBUG
719 union sptr xl, xu;
720 struct schunk *xc;
722 xl.p = u.cp;
723 xc = xl.c;
724 xc->file = mdbg_file;
725 xc->line = mdbg_line;
726 xc->usr_size = (ui16_t)orig_size;
727 xc->full_size = (ui16_t)size;
728 xl.p = xc + 1;
729 xl.ui8p[0]=0xDE; xl.ui8p[1]=0xAA; xl.ui8p[2]=0x55; xl.ui8p[3]=0xAD;
730 xl.ui8p[4]=0xBE; xl.ui8p[5]=0x55; xl.ui8p[6]=0xAA; xl.ui8p[7]=0xEF;
731 u.cp = xl.cp + 8;
732 xu.p = u.cp;
733 xu.cp += orig_size;
734 xu.ui8p[0]=0xDE; xu.ui8p[1]=0xAA; xu.ui8p[2]=0x55; xu.ui8p[3]=0xAD;
735 xu.ui8p[4]=0xBE; xu.ui8p[5]=0x55; xu.ui8p[6]=0xAA; xu.ui8p[7]=0xEF;
737 #endif
738 NYD2_LEAVE;
739 return u.cp;
741 jhuge:
742 u.hb = smalloc(sizeof(*u.hb) - VFIELD_SIZEOF(struct hugebuf, hb_buf) +
743 size +1);
744 u.hb->hb_next = _huge_list;
745 _huge_list = u.hb;
746 u.cp = u.hb->hb_buf;
747 goto jleave;
750 FL void *
751 (csalloc)(size_t nmemb, size_t size SALLOC_DEBUG_ARGS)
753 void *vp;
754 NYD2_ENTER;
756 size *= nmemb;
757 vp = (salloc)(size SALLOC_DEBUG_ARGSCALL);
758 memset(vp, 0, size);
759 NYD2_LEAVE;
760 return vp;
763 FL void
764 sreset(bool_t only_if_relaxed)
766 struct buffer *blh, *bh;
767 NYD_ENTER;
769 #ifdef HAVE_MEMORY_DEBUG
770 ++_all_resetreqs;
771 #endif
772 if (noreset) {
773 /* Reset relaxation after any jump is a MUST */
774 if (_relax_recur_no > 0)
775 srelax_rele();
776 goto jleave;
778 if (only_if_relaxed && _relax_recur_no == 0)
779 goto jleave;
781 #ifdef HAVE_MEMORY_DEBUG
782 _all_cycnt = _all_cysize = 0;
783 _all_cybufcnt = (_buf_head != NULL && _buf_head->b._next != NULL);
784 ++_all_resets;
785 #endif
787 /* Reset relaxation after jump */
788 if (_relax_recur_no > 0) {
789 srelax_rele();
790 assert(_relax_recur_no == 0);
793 blh = NULL;
794 if ((bh = _buf_head) != NULL) {
795 do {
796 struct buffer *x = bh;
797 bh = x->b._next;
798 #ifdef HAVE_MEMORY_DEBUG
799 _salloc_bcheck(x);
800 #endif
802 /* Give away all buffers that are not covered by sreset().
803 * _buf_head is builtin and thus cannot be free()d */
804 if (blh != NULL && x->b._bot == x->b_buf) {
805 blh->b._next = bh;
806 free(x);
807 } else {
808 blh = x;
809 x->b._caster = x->b._bot;
810 x->b._relax = NULL;
811 DBG( memset(x->b._caster, 0377,
812 PTR2SIZE(x->b._max - x->b._caster)); )
814 } while (bh != NULL);
816 _buf_server = _buf_head;
817 _buf_list = blh;
818 _buf_relax = NULL;
821 while (_huge_list != NULL) {
822 struct hugebuf *hb = _huge_list;
823 _huge_list = hb->hb_next;
824 free(hb);
827 n_memreset();
828 jleave:
829 NYD_LEAVE;
832 FL void
833 srelax_hold(void)
835 struct buffer *b;
836 NYD_ENTER;
838 if (_relax_recur_no++ == 0) {
839 for (b = _buf_head; b != NULL; b = b->b._next)
840 b->b._relax = b->b._caster;
841 _buf_relax = _buf_server;
843 NYD_LEAVE;
846 FL void
847 srelax_rele(void)
849 struct buffer *b;
850 NYD_ENTER;
852 assert(_relax_recur_no > 0);
854 if (--_relax_recur_no == 0) {
855 for (b = _buf_head; b != NULL; b = b->b._next) {
856 #ifdef HAVE_MEMORY_DEBUG
857 _salloc_bcheck(b);
858 #endif
859 b->b._caster = (b->b._relax != NULL) ? b->b._relax : b->b._bot;
860 b->b._relax = NULL;
863 _buf_relax = NULL;
865 #ifdef HAVE_DEVEL
866 else
867 n_err("srelax_rele(): recursion >0!\n");
868 #endif
869 NYD_LEAVE;
872 FL void
873 srelax(void)
875 /* The purpose of relaxation is only that it is possible to reset the
876 * casters, *not* to give back memory to the system. We are presumably in
877 * an iteration over all messages of a mailbox, and it'd be quite
878 * counterproductive to give the system allocator a chance to waste time */
879 struct buffer *b;
880 NYD_ENTER;
882 assert(_relax_recur_no > 0);
884 if (_relax_recur_no == 1) {
885 for (b = _buf_head; b != NULL; b = b->b._next) {
886 #ifdef HAVE_MEMORY_DEBUG
887 _salloc_bcheck(b);
888 #endif
889 b->b._caster = (b->b._relax != NULL) ? b->b._relax : b->b._bot;
890 DBG( memset(b->b._caster, 0377, PTR2SIZE(b->b._max - b->b._caster)); )
893 NYD_LEAVE;
896 FL void
897 spreserve(void)
899 struct buffer *b;
900 NYD_ENTER;
902 for (b = _buf_head; b != NULL; b = b->b._next)
903 b->b._bot = b->b._caster;
904 NYD_LEAVE;
907 #ifdef HAVE_MEMORY_DEBUG
908 FL int
909 c_sstats(void *v)
911 size_t excess;
912 NYD_ENTER;
913 UNUSED(v);
915 excess = (_all_cybufcnt_max * SDYN_SIZE) + SBLTIN_SIZE;
916 excess = (excess >= _all_cysize_max) ? 0 : _all_cysize_max - excess;
918 printf("String usage statistics (cycle means one sreset() cycle):\n"
919 " Buffer allocs ever/max a time : %" PRIuZ "/%" PRIuZ "\n"
920 " .. size of the builtin/dynamic: %" PRIuZ "/%" PRIuZ "\n"
921 " Overall alloc count/bytes : %" PRIuZ "/%" PRIuZ "\n"
922 " .. bytes min/max/align wastage: %" PRIuZ "/%" PRIuZ "/%" PRIuZ "\n"
923 " sreset() cycles : %" PRIuZ " (%" PRIuZ " performed)\n"
924 " Cycle max.: alloc count/bytes : %" PRIuZ "/%" PRIuZ "+%" PRIuZ "\n",
925 _all_bufcnt, _all_cybufcnt_max,
926 SBLTIN_SIZE, SDYN_SIZE,
927 _all_cnt, _all_size,
928 _all_min, _all_max, _all_wast,
929 _all_resetreqs, _all_resets,
930 _all_cycnt_max, _all_cysize_max, excess);
931 NYD_LEAVE;
932 return 0;
934 #endif /* HAVE_MEMORY_DEBUG */
936 #ifdef HAVE_MEMORY_DEBUG
937 # undef _HOPE_SIZE
938 # undef _HOPE_SET
939 # undef _HOPE_GET_TRACE
940 # undef _HOPE_GET
941 #endif
943 /* s-it-mode */