FIX false rebase back for new variable handling..
[s-mailx.git] / memory.c
blobfb3fc501c363dd8fec9a168a9618b9e663336063
1 /*@ S-nail - a mail user agent derived from Berkeley Mail.
2 *@ Heap memory and automatically reclaimed storage.
3 *@ TODO Back the _flux_ heap.
4 *@ TODO Add cache for "the youngest" two or three n_MEMORY_AUTOREC_SIZE arenas
6 * Copyright (c) 2012 - 2017 Steffen (Daode) Nurpmeso <steffen@sdaoden.eu>.
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #undef n_FILE
21 #define n_FILE memory
23 #ifndef HAVE_AMALGAMATION
24 # include "nail.h"
25 #endif
28 * Our (main)loops _autorec_push() arenas for their lifetime, the
29 * n_memory_reset() that happens on loop ticks reclaims their memory, and
30 * performs debug checks also on the former #ifdef HAVE_MEMORY_DEBUG.
31 * There is one global anonymous autorec arena which is used during the
32 * startup phase and for the interactive n_commands() instance -- this special
33 * arena is autorec_fixate()d from within main.c to not waste space, i.e.,
34 * remaining arena memory is reused and topic to normal _reset() reclaiming.
35 * That was so in historical code with the globally shared single string dope
36 * implementation, too.
38 * AutoReclaimedStorage memory is the follow-up to the historical "stringdope"
39 * allocator from 1979 (see [timeline:a7342d9]:src/Mail/strings.c), it is
40 * a steadily growing pool (but srelax_hold()..[:srelax():]..srelax_rele() can
41 * be used to reduce pressure) until n_memory_reset() time.
43 * LastOutFirstIn memory is ment as an alloca(3) replacement but which requires
44 * lofi_free()ing pointers (otherwise growing until n_memory_reset()).
46 * TODO Flux heap memory is like LOFI except that any pointer can be freed (and
47 * TODO reused) at any time, just like normal heap memory. It is notational in
48 * TODO that it clearly states that the allocation will go away after a loop
49 * TODO tick, and also we can use some buffer caches.
52 /* Maximum allocation (directly) handled by A-R-Storage */
53 #define a_MEMORY_ARS_MAX (n_MEMORY_AUTOREC_SIZE / 2 + n_MEMORY_AUTOREC_SIZE / 4)
54 #define a_MEMORY_LOFI_MAX a_MEMORY_ARS_MAX
56 n_CTA(a_MEMORY_ARS_MAX > 1024,
57 "Auto-reclaimed memory requires a larger buffer size"); /* Anway > 42! */
58 n_CTA(n_ISPOW2(n_MEMORY_AUTOREC_SIZE),
59 "Buffers should be POW2 (may be wasteful on native allocators otherwise)");
61 /* Alignment of ARS memory. Simply go for pointer alignment */
62 #define a_MEMORY_ARS_ROUNDUP(S) n_ALIGN_SMALL(S)
63 #define a_MEMORY_LOFI_ROUNDUP(S) a_MEMORY_ARS_ROUNDUP(S)
65 #ifdef HAVE_MEMORY_DEBUG
66 n_CTA(sizeof(char) == sizeof(ui8_t), "But POSIX says a byte is 8 bit");
68 # define a_MEMORY_HOPE_SIZE (2 * 8 * sizeof(char))
70 /* We use address-induced canary values, inspiration (but he didn't invent)
71 * and primes from maxv@netbsd.org, src/sys/kern/subr_kmem.c */
72 # define a_MEMORY_HOPE_LOWER(S,P) \
73 do{\
74 ui64_t __h__ = (uintptr_t)(P);\
75 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
76 __h__ >>= 56;\
77 (S) = (ui8_t)__h__;\
78 }while(0)
80 # define a_MEMORY_HOPE_UPPER(S,P) \
81 do{\
82 ui32_t __i__;\
83 ui64_t __x__, __h__ = (uintptr_t)(P);\
84 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
85 for(__i__ = 56; __i__ != 0; __i__ -= 8)\
86 if((__x__ = (__h__ >> __i__)) != 0){\
87 (S) = (ui8_t)__x__;\
88 break;\
90 if(__i__ == 0)\
91 (S) = 0xAAu;\
92 }while(0)
94 # define a_MEMORY_HOPE_SET(T,C) \
95 do{\
96 union a_memory_ptr __xp;\
97 struct a_memory_chunk *__xc;\
98 __xp.p_vp = (C).p_vp;\
99 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
100 (C).p_cp += 8;\
101 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
102 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
103 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
104 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
105 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
106 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
107 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
108 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
109 __xp.p_ui8p += 8 + __xc->mc_user_size;\
110 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
111 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
112 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
113 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
114 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
115 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
116 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
117 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
118 }while(0)
120 # define a_MEMORY_HOPE_GET_TRACE(T,C,BAD) \
121 do{\
122 (C).p_cp += 8;\
123 a_MEMORY_HOPE_GET(T, C, BAD);\
124 (C).p_cp += 8;\
125 }while(0)
127 # define a_MEMORY_HOPE_GET(T,C,BAD) \
128 do{\
129 union a_memory_ptr __xp;\
130 struct a_memory_chunk *__xc;\
131 ui32_t __i;\
132 ui8_t __m;\
133 __xp.p_vp = (C).p_vp;\
134 __xp.p_cp -= 8;\
135 (C).p_cp = __xp.p_cp;\
136 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
137 (BAD) = FAL0;\
138 __i = 0;\
139 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[0]);\
140 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
141 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[1]);\
142 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
143 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[2]);\
144 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
145 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[3]);\
146 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
147 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[4]);\
148 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
149 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[5]);\
150 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
151 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[6]);\
152 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
153 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[7]);\
154 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
155 if(__i != 0){\
156 (BAD) = TRU1;\
157 n_alert("%p: corrupt lower canary: 0x%02X: %s, line %d",\
158 (C).p_cp + 8, __i, mdbg_file, mdbg_line);\
160 __xp.p_ui8p += 8 + __xc->mc_user_size;\
161 __i = 0;\
162 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[0]);\
163 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
164 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[1]);\
165 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
166 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[2]);\
167 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
168 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[3]);\
169 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
170 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[4]);\
171 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
172 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[5]);\
173 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
174 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[6]);\
175 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
176 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[7]);\
177 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
178 if(__i != 0){\
179 (BAD) = TRU1;\
180 n_alert("%p: corrupt upper canary: 0x%02X: %s, line %d",\
181 (C).p_cp + 8, __i, mdbg_file, mdbg_line);\
183 if(BAD)\
184 n_alert(" ..canary last seen: %s, line %u",\
185 __xc->mc_file, __xc->mc_line);\
186 }while(0)
187 #endif /* HAVE_MEMORY_DEBUG */
189 #ifdef HAVE_MEMORY_DEBUG
190 struct a_memory_chunk{
191 char const *mc_file;
192 ui32_t mc_line;
193 ui8_t mc_isfree;
194 ui8_t mc__dummy[3];
195 ui32_t mc_user_size;
196 ui32_t mc_size;
199 /* The heap memory free() may become delayed to detect double frees.
200 * It is primitive, but ok: speed and memory usage don't matter here */
201 struct a_memory_heap_chunk{
202 struct a_memory_chunk mhc_super;
203 struct a_memory_heap_chunk *mhc_prev;
204 struct a_memory_heap_chunk *mhc_next;
206 #endif /* HAVE_MEMORY_DEBUG */
208 struct a_memory_ars_lofi_chunk{
209 #ifdef HAVE_MEMORY_DEBUG
210 struct a_memory_chunk malc_super;
211 #endif
212 struct a_memory_ars_lofi_chunk *malc_last; /* Bit 1 set: it's a heap alloc */
215 union a_memory_ptr{
216 void *p_vp;
217 char *p_cp;
218 ui8_t *p_ui8p;
219 #ifdef HAVE_MEMORY_DEBUG
220 struct a_memory_chunk *p_c;
221 struct a_memory_heap_chunk *p_hc;
222 #endif
223 struct a_memory_ars_lofi_chunk *p_alc;
226 struct a_memory_ars_ctx{
227 struct a_memory_ars_ctx *mac_outer;
228 struct a_memory_ars_buffer *mac_top; /* Alloc stack */
229 struct a_memory_ars_buffer *mac_full; /* Alloc stack, cpl. filled */
230 size_t mac_recur; /* srelax_hold() recursion */
231 struct a_memory_ars_huge *mac_huge; /* Huge allocation bypass list */
232 struct a_memory_ars_lofi *mac_lofi; /* Pseudo alloca */
233 struct a_memory_ars_lofi_chunk *mac_lofi_top;
235 n_CTA(n_MEMORY_AUTOREC_TYPE_SIZEOF >= sizeof(struct a_memory_ars_ctx),
236 "Our command loops do not provide enough memory for auto-reclaimed storage");
238 struct a_memory_ars_buffer{
239 struct a_memory_ars_buffer *mab_last;
240 char *mab_bot; /* For _autorec_fixate(). Only used for the global _ctx */
241 char *mab_relax; /* If !NULL, used by srelax() instead of .mab_bot */
242 char *mab_caster; /* Point of casting memory, NULL if full */
243 char mab_buf[n_MEMORY_AUTOREC_SIZE - (4 * sizeof(void*))];
245 n_CTA(sizeof(struct a_memory_ars_buffer) == n_MEMORY_AUTOREC_SIZE,
246 "Resulting structure size is not the expected one");
247 #ifdef HAVE_DEBUG
248 n_CTA(a_MEMORY_ARS_MAX + a_MEMORY_HOPE_SIZE + sizeof(struct a_memory_chunk)
249 < n_SIZEOF_FIELD(struct a_memory_ars_buffer, mab_buf),
250 "Memory layout of auto-reclaimed storage does not work out that way");
251 #endif
253 /* Requests that exceed a_MEMORY_ARS_MAX are always served by the normal
254 * memory allocator (which panics if memory cannot be served). This can be
255 * seen as a security fallback bypass only */
256 struct a_memory_ars_huge{
257 struct a_memory_ars_huge *mah_last;
258 char mah_buf[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
261 struct a_memory_ars_lofi{
262 struct a_memory_ars_lofi *mal_last;
263 char *mal_caster;
264 char *mal_max;
265 char mal_buf[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
268 /* */
269 #ifdef HAVE_MEMORY_DEBUG
270 static size_t a_memory_heap_aall, a_memory_heap_acur, a_memory_heap_amax,
271 a_memory_heap_mall, a_memory_heap_mcur, a_memory_heap_mmax;
272 static struct a_memory_heap_chunk *a_memory_heap_list, *a_memory_heap_free;
274 static size_t a_memory_ars_ball, a_memory_ars_bcur, a_memory_ars_bmax,
275 a_memory_ars_hall, a_memory_ars_hcur, a_memory_ars_hmax,
276 a_memory_ars_aall, a_memory_ars_mall;
278 static size_t a_memory_lofi_ball, a_memory_lofi_bcur, a_memory_lofi_bmax,
279 a_memory_lofi_aall, a_memory_lofi_acur, a_memory_lofi_amax,
280 a_memory_lofi_mall, a_memory_lofi_mcur, a_memory_lofi_mmax;
281 #endif
283 /* The anonymous global topmost auto-reclaimed storage instance, and the
284 * current top of the stack for recursions, `source's etc */
285 static struct a_memory_ars_ctx a_memory_ars_global;
286 static struct a_memory_ars_ctx *a_memory_ars_top;
288 /* */
289 SINLINE void a_memory_lofi_free(struct a_memory_ars_ctx *macp, void *vp);
291 /* Reset an ars_ctx */
292 static void a_memory_ars_reset(struct a_memory_ars_ctx *macp);
294 SINLINE void
295 a_memory_lofi_free(struct a_memory_ars_ctx *macp, void *vp){
296 struct a_memory_ars_lofi *malp;
297 union a_memory_ptr p;
298 NYD2_ENTER;
300 p.p_vp = vp;
301 #ifdef HAVE_MEMORY_DEBUG
302 --a_memory_lofi_acur;
303 a_memory_lofi_mcur -= p.p_c->mc_user_size;
304 #endif
306 /* The heap allocations are released immediately */
307 if((uintptr_t)p.p_alc->malc_last & 0x1){
308 malp = macp->mac_lofi;
309 macp->mac_lofi = malp->mal_last;
310 macp->mac_lofi_top = (struct a_memory_ars_lofi_chunk*)
311 ((uintptr_t)p.p_alc->malc_last & ~0x1);
312 free(malp);
313 #ifdef HAVE_MEMORY_DEBUG
314 --a_memory_lofi_bcur;
315 #endif
316 }else{
317 macp->mac_lofi_top = p.p_alc->malc_last;
319 /* The normal arena ones only if the arena is empty, except for when
320 * it is the last - that we'll keep until _autorec_pop() or exit(3) */
321 if(p.p_cp == (malp = macp->mac_lofi)->mal_buf){
322 if(malp->mal_last != NULL){
323 macp->mac_lofi = malp->mal_last;
324 free(malp);
325 #ifdef HAVE_MEMORY_DEBUG
326 --a_memory_lofi_bcur;
327 #endif
329 }else
330 malp->mal_caster = p.p_cp;
332 NYD2_LEAVE;
335 static void
336 a_memory_ars_reset(struct a_memory_ars_ctx *macp){
337 union{
338 struct a_memory_ars_lofi_chunk *alcp;
339 struct a_memory_ars_lofi *alp;
340 struct a_memory_ars_buffer *abp;
341 struct a_memory_ars_huge *ahp;
342 } m, m2;
343 NYD2_ENTER;
345 /* Simply move all buffers away from .mac_full */
346 for(m.abp = macp->mac_full; m.abp != NULL; m.abp = m2.abp){
347 m2.abp = m.abp->mab_last;
348 m.abp->mab_last = macp->mac_top;
349 macp->mac_top = m.abp;
351 macp->mac_full = NULL;
353 for(m2.abp = NULL, m.abp = macp->mac_top; m.abp != NULL;){
354 struct a_memory_ars_buffer *x;
356 x = m.abp;
357 m.abp = m.abp->mab_last;
359 /* Give away all buffers that are not covered by autorec_fixate() */
360 if(x->mab_bot == x->mab_buf){
361 if(m2.abp == NULL)
362 macp->mac_top = m.abp;
363 else
364 m2.abp->mab_last = m.abp;
365 free(x);
366 #ifdef HAVE_MEMORY_DEBUG
367 --a_memory_ars_bcur;
368 #endif
369 }else{
370 m2.abp = x;
371 x->mab_caster = x->mab_bot;
372 x->mab_relax = NULL;
373 #ifdef HAVE_MEMORY_DEBUG
374 memset(x->mab_caster, 0377,
375 PTR2SIZE(&x->mab_buf[sizeof(x->mab_buf)] - x->mab_caster));
376 #endif
380 while((m.ahp = macp->mac_huge) != NULL){
381 macp->mac_huge = m.ahp->mah_last;
382 free(m.ahp);
383 #ifdef HAVE_MEMORY_DEBUG
384 --a_memory_ars_hcur;
385 #endif
388 /* "alloca(3)" memory goes away, too. XXX Must be last as long we jump */
389 #ifdef HAVE_MEMORY_DEBUG
390 if(macp->mac_lofi_top != NULL)
391 n_alert("There still is LOFI memory upon ARS reset!");
392 #endif
393 while((m.alcp = macp->mac_lofi_top) != NULL)
394 a_memory_lofi_free(macp, m.alcp);
395 NYD2_LEAVE;
398 FL void
399 n_memory_reset(void){
400 #ifdef HAVE_MEMORY_DEBUG
401 union a_memory_ptr p;
402 size_t c, s;
403 #endif
404 struct a_memory_ars_ctx *macp;
405 NYD_ENTER;
407 if((macp = a_memory_ars_top) == NULL)
408 macp = &a_memory_ars_global;
410 n_memory_check();
412 /* First of all reset auto-reclaimed storage so that heap freed during this
413 * can be handled in a second step */
414 /* TODO v15 active recursion can only happen after a jump */
415 if(macp->mac_recur > 0){
416 macp->mac_recur = 1;
417 srelax_rele();
419 a_memory_ars_reset(macp);
421 /* Now we are ready to deal with heap */
422 #ifdef HAVE_MEMORY_DEBUG
423 c = s = 0;
425 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;){
426 void *vp;
428 vp = p.p_hc;
429 ++c;
430 s += p.p_c->mc_size;
431 p.p_hc = p.p_hc->mhc_next;
432 (free)(vp);
434 a_memory_heap_free = NULL;
436 if(options & (OPT_DEBUG | OPT_MEMDEBUG))
437 n_err("memreset: freed %" PRIuZ " chunks/%" PRIuZ " bytes\n", c, s);
438 #endif
439 NYD_LEAVE;
442 #ifndef HAVE_MEMORY_DEBUG
443 FL void *
444 n_alloc(size_t s){
445 void *rv;
446 NYD2_ENTER;
448 if(s == 0)
449 s = 1;
450 if((rv = malloc(s)) == NULL)
451 n_panic(_("no memory"));
452 NYD2_LEAVE;
453 return rv;
456 FL void *
457 n_realloc(void *vp, size_t s){
458 void *rv;
459 NYD2_ENTER;
461 if(vp == NULL)
462 rv = n_alloc(s);
463 else{
464 if(s == 0)
465 s = 1;
466 if((rv = realloc(vp, s)) == NULL)
467 n_panic(_("no memory"));
469 NYD2_LEAVE;
470 return rv;
473 FL void *
474 n_calloc(size_t nmemb, size_t size){
475 void *rv;
476 NYD2_ENTER;
478 if(size == 0)
479 size = 1;
480 if((rv = calloc(nmemb, size)) == NULL)
481 n_panic(_("no memory"));
482 NYD2_LEAVE;
483 return rv;
486 FL void
487 (n_free)(void *vp){
488 NYD2_ENTER;
489 (free)(vp);
490 NYD2_LEAVE;
493 #else /* !HAVE_MEMORY_DEBUG */
494 FL void *
495 (n_alloc)(size_t s n_MEMORY_DEBUG_ARGS){
496 union a_memory_ptr p;
497 ui32_t user_s;
498 NYD2_ENTER;
500 if(s > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
501 n_panic("n_alloc(): allocation too large: %s, line %d",
502 mdbg_file, mdbg_line);
503 if((user_s = (ui32_t)s) == 0)
504 s = 1;
505 s += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
507 if((p.p_vp = (malloc)(s)) == NULL)
508 n_panic(_("no memory"));
510 p.p_hc->mhc_prev = NULL;
511 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
512 a_memory_heap_list->mhc_prev = p.p_hc;
514 p.p_c->mc_file = mdbg_file;
515 p.p_c->mc_line = (ui16_t)mdbg_line;
516 p.p_c->mc_isfree = FAL0;
517 p.p_c->mc_user_size = user_s;
518 p.p_c->mc_size = (ui32_t)s;
520 a_memory_heap_list = p.p_hc++;
521 a_MEMORY_HOPE_SET(p_hc, p);
523 ++a_memory_heap_aall;
524 ++a_memory_heap_acur;
525 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
526 a_memory_heap_mall += user_s;
527 a_memory_heap_mcur += user_s;
528 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
529 NYD2_LEAVE;
530 return p.p_vp;
533 FL void *
534 (n_realloc)(void *vp, size_t s n_MEMORY_DEBUG_ARGS){
535 union a_memory_ptr p;
536 ui32_t user_s;
537 bool_t isbad;
538 NYD2_ENTER;
540 if((p.p_vp = vp) == NULL){
541 jforce:
542 p.p_vp = (n_alloc)(s, mdbg_file, mdbg_line);
543 goto jleave;
546 a_MEMORY_HOPE_GET(p_hc, p, isbad);
547 --p.p_hc;
549 if(p.p_c->mc_isfree){
550 n_err("n_realloc(): region freed! At %s, line %d\n"
551 "\tLast seen: %s, line %" PRIu16 "\n",
552 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
553 goto jforce;
556 if(p.p_hc == a_memory_heap_list)
557 a_memory_heap_list = p.p_hc->mhc_next;
558 else
559 p.p_hc->mhc_prev->mhc_next = p.p_hc->mhc_next;
560 if (p.p_hc->mhc_next != NULL)
561 p.p_hc->mhc_next->mhc_prev = p.p_hc->mhc_prev;
563 --a_memory_heap_acur;
564 a_memory_heap_mcur -= p.p_c->mc_user_size;
566 if(s > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
567 n_panic("n_realloc(): allocation too large: %s, line %d",
568 mdbg_file, mdbg_line);
569 if((user_s = (ui32_t)s) == 0)
570 s = 1;
571 s += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
573 if((p.p_vp = (realloc)(p.p_c, s)) == NULL)
574 n_panic(_("no memory"));
575 p.p_hc->mhc_prev = NULL;
576 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
577 a_memory_heap_list->mhc_prev = p.p_hc;
579 p.p_c->mc_file = mdbg_file;
580 p.p_c->mc_line = (ui16_t)mdbg_line;
581 p.p_c->mc_isfree = FAL0;
582 p.p_c->mc_user_size = user_s;
583 p.p_c->mc_size = (ui32_t)s;
585 a_memory_heap_list = p.p_hc++;
586 a_MEMORY_HOPE_SET(p_hc, p);
588 ++a_memory_heap_aall;
589 ++a_memory_heap_acur;
590 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
591 a_memory_heap_mall += user_s;
592 a_memory_heap_mcur += user_s;
593 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
594 jleave:
595 NYD2_LEAVE;
596 return p.p_vp;
599 FL void *
600 (n_calloc)(size_t nmemb, size_t size n_MEMORY_DEBUG_ARGS){
601 union a_memory_ptr p;
602 ui32_t user_s;
603 NYD2_ENTER;
605 if(nmemb == 0)
606 nmemb = 1;
607 if(size > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
608 n_panic("n_calloc(): allocation size too large: %s, line %d",
609 mdbg_file, mdbg_line);
610 if((user_s = (ui32_t)size) == 0)
611 size = 1;
612 if((UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE) /
613 nmemb < size)
614 n_panic("n_calloc(): allocation count too large: %s, line %d",
615 mdbg_file, mdbg_line);
617 size *= nmemb;
618 size += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
620 if((p.p_vp = (malloc)(size)) == NULL)
621 n_panic(_("no memory"));
622 memset(p.p_vp, 0, size);
624 p.p_hc->mhc_prev = NULL;
625 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
626 a_memory_heap_list->mhc_prev = p.p_hc;
628 p.p_c->mc_file = mdbg_file;
629 p.p_c->mc_line = (ui16_t)mdbg_line;
630 p.p_c->mc_isfree = FAL0;
631 p.p_c->mc_user_size = (user_s > 0) ? user_s *= nmemb : 0;
632 p.p_c->mc_size = (ui32_t)size;
634 a_memory_heap_list = p.p_hc++;
635 a_MEMORY_HOPE_SET(p_hc, p);
637 ++a_memory_heap_aall;
638 ++a_memory_heap_acur;
639 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
640 a_memory_heap_mall += user_s;
641 a_memory_heap_mcur += user_s;
642 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
643 NYD2_LEAVE;
644 return p.p_vp;
647 FL void
648 (n_free)(void *vp n_MEMORY_DEBUG_ARGS){
649 union a_memory_ptr p;
650 bool_t isbad;
651 NYD2_ENTER;
653 if((p.p_vp = vp) == NULL){
654 n_err("n_free(NULL) from %s, line %d\n", mdbg_file, mdbg_line);
655 goto jleave;
658 a_MEMORY_HOPE_GET(p_hc, p, isbad);
659 --p.p_hc;
661 if(p.p_c->mc_isfree){
662 n_err("n_free(): double-free avoided at %s, line %d\n"
663 "\tLast seen: %s, line %" PRIu16 "\n",
664 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
665 goto jleave;
668 if(p.p_hc == a_memory_heap_list){
669 if((a_memory_heap_list = p.p_hc->mhc_next) != NULL)
670 a_memory_heap_list->mhc_prev = NULL;
671 }else
672 p.p_hc->mhc_prev->mhc_next = p.p_hc->mhc_next;
673 if(p.p_hc->mhc_next != NULL)
674 p.p_hc->mhc_next->mhc_prev = p.p_hc->mhc_prev;
676 p.p_c->mc_isfree = TRU1;
677 /* Trash contents (also see [21c05f8]) */
678 memset(vp, 0377, p.p_c->mc_user_size);
680 --a_memory_heap_acur;
681 a_memory_heap_mcur -= p.p_c->mc_user_size;
683 if(options & (OPT_DEBUG | OPT_MEMDEBUG)){
684 p.p_hc->mhc_next = a_memory_heap_free;
685 a_memory_heap_free = p.p_hc;
686 }else
687 (free)(p.p_vp);
688 jleave:
689 NYD2_LEAVE;
691 #endif /* HAVE_MEMORY_DEBUG */
693 FL void
694 n_memory_autorec_fixate(void){
695 struct a_memory_ars_buffer *mabp;
696 NYD_ENTER;
698 for(mabp = a_memory_ars_global.mac_top; mabp != NULL; mabp = mabp->mab_last)
699 mabp->mab_bot = mabp->mab_caster;
700 for(mabp = a_memory_ars_global.mac_full; mabp != NULL; mabp = mabp->mab_last)
701 mabp->mab_bot = mabp->mab_caster;
702 NYD_LEAVE;
705 FL void
706 n_memory_autorec_push(void *vp){
707 struct a_memory_ars_ctx *macp;
708 NYD_ENTER;
710 macp = vp;
711 memset(macp, 0, sizeof *macp);
712 macp->mac_outer = a_memory_ars_top;
713 a_memory_ars_top = macp;
714 NYD_LEAVE;
717 FL void
718 n_memory_autorec_pop(void *vp){
719 struct a_memory_ars_buffer *mabp;
720 struct a_memory_ars_ctx *macp;
721 NYD_ENTER;
723 if((macp = vp) == NULL)
724 macp = &a_memory_ars_global;
725 else{
726 /* XXX May not be ARS top upon jump */
727 while(a_memory_ars_top != macp){
728 DBG( n_err("ARS pop %p to reach freed context\n", a_memory_ars_top); )
729 n_memory_autorec_pop(a_memory_ars_top);
731 a_memory_ars_top = macp->mac_outer;
734 a_memory_ars_reset(macp);
735 assert(macp->mac_full == NULL);
736 assert(macp->mac_huge == NULL);
738 for(mabp = macp->mac_top; mabp != NULL;){
739 vp = mabp;
740 mabp = mabp->mab_last;
741 free(vp);
744 /* We (may) have kept one buffer for our pseudo alloca(3) */
745 if(macp->mac_lofi != NULL){
746 assert(macp->mac_lofi->mal_last == NULL);
747 free(macp->mac_lofi);
748 #ifdef HAVE_MEMORY_DEBUG
749 --a_memory_lofi_bcur;
750 #endif
753 memset(macp, 0, sizeof *macp);
754 NYD_LEAVE;
757 FL void *
758 n_memory_autorec_current(void){
759 return (a_memory_ars_top != NULL ? a_memory_ars_top : &a_memory_ars_global);
762 FL void *
763 (n_autorec_alloc)(void *vp, size_t size n_MEMORY_DEBUG_ARGS){
764 #ifdef HAVE_MEMORY_DEBUG
765 ui32_t user_s;
766 #endif
767 union a_memory_ptr p;
768 union{
769 struct a_memory_ars_buffer *abp;
770 struct a_memory_ars_huge *ahp;
771 } m, m2;
772 struct a_memory_ars_ctx *macp;
773 NYD2_ENTER;
775 if((macp = vp) == NULL && (macp = a_memory_ars_top) == NULL)
776 macp = &a_memory_ars_global;
778 #ifdef HAVE_MEMORY_DEBUG
779 user_s = (ui32_t)size;
780 #endif
781 if(size == 0)
782 ++size;
783 #ifdef HAVE_MEMORY_DEBUG
784 size += sizeof(struct a_memory_chunk) + a_MEMORY_HOPE_SIZE;
785 #endif
786 size = a_MEMORY_ARS_ROUNDUP(size);
788 /* Huge allocations are special */
789 if(n_UNLIKELY(size > a_MEMORY_ARS_MAX)){
790 #ifdef HAVE_MEMORY_DEBUG
791 n_alert("n_autorec_alloc() of %" PRIuZ " bytes from %s, line %d",
792 size, mdbg_file, mdbg_line);
793 #endif
794 goto jhuge;
797 /* Search for a buffer with enough free space to serve request */
798 for(m2.abp = NULL, m.abp = macp->mac_top; m.abp != NULL;
799 m2.abp = m.abp, m.abp = m.abp->mab_last){
800 if((p.p_cp = m.abp->mab_caster) <=
801 &m.abp->mab_buf[sizeof(m.abp->mab_buf) - size]){
802 /* Alignment is the one thing, the other is what is usually allocated,
803 * and here about 40 bytes seems to be a good cut to avoid non-usable
804 * casters. Reown buffers supposed to be "full" to .mac_full */
805 if(n_UNLIKELY((m.abp->mab_caster = &p.p_cp[size]) >=
806 &m.abp->mab_buf[sizeof(m.abp->mab_buf) - 42])){
807 if(m2.abp == NULL)
808 macp->mac_top = m.abp->mab_last;
809 else
810 m2.abp->mab_last = m.abp->mab_last;
811 m.abp->mab_last = macp->mac_full;
812 macp->mac_full = m.abp;
814 goto jleave;
818 /* Need a new buffer XXX "page" pool */
819 m.abp = n_alloc(sizeof *m.abp);
820 m.abp->mab_last = macp->mac_top;
821 m.abp->mab_caster = &(m.abp->mab_bot = m.abp->mab_buf)[size];
822 m.abp->mab_relax = NULL; /* Thus indicates allocation after srelax_hold() */
823 macp->mac_top = m.abp;
824 p.p_cp = m.abp->mab_bot;
826 #ifdef HAVE_MEMORY_DEBUG
827 ++a_memory_ars_ball;
828 ++a_memory_ars_bcur;
829 a_memory_ars_bmax = n_MAX(a_memory_ars_bmax, a_memory_ars_bcur);
830 #endif
832 jleave:
833 #ifdef HAVE_MEMORY_DEBUG
834 p.p_c->mc_file = mdbg_file;
835 p.p_c->mc_line = (ui16_t)mdbg_line;
836 p.p_c->mc_user_size = user_s;
837 p.p_c->mc_size = (ui32_t)size;
838 ++p.p_c;
839 a_MEMORY_HOPE_SET(p_c, p);
841 ++a_memory_ars_aall;
842 a_memory_ars_mall += user_s;
843 #endif
844 NYD2_LEAVE;
845 return p.p_vp;
847 jhuge:
848 m.ahp = n_alloc(n_VSTRUCT_SIZEOF(struct a_memory_ars_huge, mah_buf) + size);
849 m.ahp->mah_last = macp->mac_huge;
850 macp->mac_huge = m.ahp;
851 p.p_cp = m.ahp->mah_buf;
852 #ifdef HAVE_MEMORY_DEBUG
853 ++a_memory_ars_hall;
854 ++a_memory_ars_hcur;
855 a_memory_ars_hmax = n_MAX(a_memory_ars_hmax, a_memory_ars_hcur);
856 #endif
857 goto jleave;
860 FL void *
861 (n_autorec_calloc)(void *vp, size_t nmemb, size_t size n_MEMORY_DEBUG_ARGS){
862 void *rv;
863 NYD2_ENTER;
865 size *= nmemb; /* XXX overflow, but only used for struct inits */
866 rv = (n_autorec_alloc)(vp, size n_MEMORY_DEBUG_ARGSCALL);
867 memset(rv, 0, size);
868 NYD2_LEAVE;
869 return rv;
872 FL void
873 srelax_hold(void){
874 struct a_memory_ars_ctx *macp;
875 NYD2_ENTER;
877 if((macp = a_memory_ars_top) == NULL)
878 macp = &a_memory_ars_global;
880 if(macp->mac_recur++ == 0){
881 struct a_memory_ars_buffer *mabp;
883 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
884 mabp->mab_relax = mabp->mab_caster;
885 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
886 mabp->mab_relax = mabp->mab_caster;
888 #ifdef HAVE_DEVEL
889 else
890 n_err("srelax_hold(): recursion >0\n");
891 #endif
892 NYD2_LEAVE;
895 FL void
896 srelax_rele(void){
897 struct a_memory_ars_ctx *macp;
898 NYD2_ENTER;
900 if((macp = a_memory_ars_top) == NULL)
901 macp = &a_memory_ars_global;
903 assert(macp->mac_recur > 0);
905 if(--macp->mac_recur == 0){
906 struct a_memory_ars_buffer *mabp;
908 macp->mac_recur = 1;
909 srelax();
910 macp->mac_recur = 0;
912 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
913 mabp->mab_relax = NULL;
914 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
915 mabp->mab_relax = NULL;
917 #ifdef HAVE_DEVEL
918 else
919 n_err("srelax_rele(): recursion >0\n");
920 #endif
921 NYD2_LEAVE;
924 FL void
925 srelax(void){
926 /* The purpose of relaxation is only that it is possible to reset the
927 * casters, *not* to give back memory to the system. We are presumably in
928 * an iteration over all messages of a mailbox, and it'd be quite
929 * counterproductive to give the system allocator a chance to waste time */
930 struct a_memory_ars_ctx *macp;
931 NYD2_ENTER;
933 if((macp = a_memory_ars_top) == NULL)
934 macp = &a_memory_ars_global;
936 assert(macp->mac_recur > 0);
937 n_memory_check();
939 if(macp->mac_recur == 1){
940 struct a_memory_ars_buffer *mabp, *x, *y;
942 /* Buffers in the full list may become usable again! */
943 for(x = NULL, mabp = macp->mac_full; mabp != NULL; mabp = y){
944 y = mabp->mab_last;
946 if(mabp->mab_relax == NULL ||
947 mabp->mab_relax < &mabp->mab_buf[sizeof(mabp->mab_buf) - 42]){
948 if(x == NULL)
949 macp->mac_full = y;
950 else
951 x->mab_last = y;
952 mabp->mab_last = macp->mac_top;
953 macp->mac_top = mabp;
954 }else
955 x = mabp;
958 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
959 mabp->mab_caster = (mabp->mab_relax != NULL)
960 ? mabp->mab_relax : mabp->mab_bot;
961 #ifdef HAVE_MEMORY_DEBUG
962 memset(mabp->mab_caster, 0377,
963 PTR2SIZE(&mabp->mab_buf[sizeof(mabp->mab_buf)] - mabp->mab_caster));
964 #endif
967 NYD2_LEAVE;
970 FL void *
971 (n_lofi_alloc)(size_t size n_MEMORY_DEBUG_ARGS){
972 #ifdef HAVE_MEMORY_DEBUG
973 ui32_t user_s;
974 #endif
975 union a_memory_ptr p;
976 struct a_memory_ars_lofi *malp;
977 bool_t isheap;
978 struct a_memory_ars_ctx *macp;
979 NYD2_ENTER;
981 if((macp = a_memory_ars_top) == NULL)
982 macp = &a_memory_ars_global;
984 #ifdef HAVE_MEMORY_DEBUG
985 user_s = (ui32_t)size;
986 #endif
987 if(size == 0)
988 ++size;
989 size += sizeof(struct a_memory_ars_lofi_chunk);
990 #ifdef HAVE_MEMORY_DEBUG
991 size += a_MEMORY_HOPE_SIZE;
992 #endif
993 size = a_MEMORY_LOFI_ROUNDUP(size);
995 /* Huge allocations are special */
996 if(n_UNLIKELY(isheap = (size > a_MEMORY_LOFI_MAX))){
997 #ifdef HAVE_MEMORY_DEBUG
998 n_alert("n_lofi_alloc() of %" PRIuZ " bytes from %s, line %d",
999 size, mdbg_file, mdbg_line);
1000 #endif
1001 }else if((malp = macp->mac_lofi) != NULL &&
1002 ((p.p_cp = malp->mal_caster) <= &malp->mal_max[-size])){
1003 malp->mal_caster = &p.p_cp[size];
1004 goto jleave;
1007 /* Need a new buffer */
1008 /* C99 */{
1009 size_t i;
1011 i = n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi, mal_buf) + size;
1012 i = n_MAX(i, n_MEMORY_AUTOREC_SIZE);
1013 malp = n_alloc(i);
1014 malp->mal_last = macp->mac_lofi;
1015 malp->mal_caster = &malp->mal_buf[size];
1016 i -= n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi, mal_buf);
1017 malp->mal_max = &malp->mal_buf[i];
1018 macp->mac_lofi = malp;
1019 p.p_cp = malp->mal_buf;
1021 #ifdef HAVE_MEMORY_DEBUG
1022 ++a_memory_lofi_ball;
1023 ++a_memory_lofi_bcur;
1024 a_memory_lofi_bmax = n_MAX(a_memory_lofi_bmax, a_memory_lofi_bcur);
1025 #endif
1028 jleave:
1029 p.p_alc->malc_last = macp->mac_lofi_top;
1030 macp->mac_lofi_top = p.p_alc;
1031 if(isheap)
1032 p.p_alc->malc_last = (struct a_memory_ars_lofi_chunk*)
1033 ((uintptr_t)p.p_alc->malc_last | 0x1);
1035 #ifndef HAVE_MEMORY_DEBUG
1036 ++p.p_alc;
1037 #else
1038 p.p_c->mc_file = mdbg_file;
1039 p.p_c->mc_line = (ui16_t)mdbg_line;
1040 p.p_c->mc_isfree = FAL0;
1041 p.p_c->mc_user_size = user_s;
1042 p.p_c->mc_size = (ui32_t)size;
1043 ++p.p_alc;
1044 a_MEMORY_HOPE_SET(p_alc, p);
1046 ++a_memory_lofi_aall;
1047 ++a_memory_lofi_acur;
1048 a_memory_lofi_amax = n_MAX(a_memory_lofi_amax, a_memory_lofi_acur);
1049 a_memory_lofi_mall += user_s;
1050 a_memory_lofi_mcur += user_s;
1051 a_memory_lofi_mmax = n_MAX(a_memory_lofi_mmax, a_memory_lofi_mcur);
1052 #endif
1053 NYD2_LEAVE;
1054 return p.p_vp;
1057 FL void
1058 (n_lofi_free)(void *vp n_MEMORY_DEBUG_ARGS){
1059 #ifdef HAVE_MEMORY_DEBUG
1060 bool_t isbad;
1061 #endif
1062 union a_memory_ptr p;
1063 struct a_memory_ars_ctx *macp;
1064 NYD2_ENTER;
1066 if((macp = a_memory_ars_top) == NULL)
1067 macp = &a_memory_ars_global;
1069 if((p.p_vp = vp) == NULL){
1070 #ifdef HAVE_MEMORY_DEBUG
1071 n_err("n_lofi_free(NULL) from %s, line %d\n", mdbg_file, mdbg_line);
1072 #endif
1073 goto jleave;
1076 #ifdef HAVE_MEMORY_DEBUG
1077 a_MEMORY_HOPE_GET(p_alc, p, isbad);
1078 --p.p_alc;
1080 if(p.p_c->mc_isfree){
1081 n_err("n_lofi_free(): double-free avoided at %s, line %d\n"
1082 "\tLast seen: %s, line %" PRIu16 "\n",
1083 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
1084 goto jleave;
1086 p.p_c->mc_isfree = TRU1;
1087 memset(vp, 0377, p.p_c->mc_user_size);
1089 if(p.p_alc != macp->mac_lofi_top){
1090 n_err("n_lofi_free(): this is not alloca top at %s, line %d\n"
1091 "\tLast seen: %s, line %" PRIu16 "\n",
1092 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
1093 goto jleave;
1096 ++p.p_alc;
1097 #endif /* HAVE_MEMORY_DEBUG */
1099 a_memory_lofi_free(macp, --p.p_alc);
1100 jleave:
1101 NYD2_LEAVE;
1104 #ifdef HAVE_MEMORY_DEBUG
1105 FL int
1106 c_memtrace(void *vp){
1107 /* For a_MEMORY_HOPE_GET() */
1108 char const * const mdbg_file = "memtrace()";
1109 int const mdbg_line = -1;
1110 struct a_memory_ars_buffer *mabp;
1111 struct a_memory_ars_lofi_chunk *malcp;
1112 struct a_memory_ars_lofi *malp;
1113 struct a_memory_ars_ctx *macp;
1114 bool_t isbad;
1115 union a_memory_ptr p, xp;
1116 size_t lines;
1117 FILE *fp;
1118 NYD2_ENTER;
1120 vp = (void*)0x1;
1121 if((fp = Ftmp(NULL, "memtr", OF_RDWR | OF_UNLINK | OF_REGISTER)) == NULL){
1122 n_perr("tmpfile", 0);
1123 goto jleave;
1125 lines = 0;
1127 fprintf(fp,
1128 "Last-Out-First-In (alloca) storage:\n"
1129 " Buffer cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1130 " Allocations cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1131 " Bytes cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n\n",
1132 a_memory_lofi_bcur, a_memory_lofi_bmax, a_memory_lofi_ball,
1133 a_memory_lofi_acur, a_memory_lofi_amax, a_memory_lofi_aall,
1134 a_memory_lofi_mcur, a_memory_lofi_mmax, a_memory_lofi_mall);
1135 lines += 7;
1137 if((macp = a_memory_ars_top) == NULL)
1138 macp = &a_memory_ars_global;
1139 for(; macp != NULL; macp = macp->mac_outer){
1140 fprintf(fp, " Evaluation stack context %p (outer: %p):\n",
1141 (void*)macp, (void*)macp->mac_outer);
1142 ++lines;
1144 for(malp = macp->mac_lofi; malp != NULL;){
1145 fprintf(fp, " Buffer %p%s, %" PRIuZ "/%" PRIuZ " used/free:\n",
1146 (void*)malp, ((uintptr_t)malp->mal_last & 0x1 ? " (huge)" : ""),
1147 PTR2SIZE(malp->mal_caster - &malp->mal_buf[0]),
1148 PTR2SIZE(malp->mal_max - malp->mal_caster));
1149 ++lines;
1150 malp = malp->mal_last;
1151 malp = (struct a_memory_ars_lofi*)((uintptr_t)malp & ~1);
1154 for(malcp = macp->mac_lofi_top; malcp != NULL;){
1155 p.p_alc = malcp;
1156 malcp = (struct a_memory_ars_lofi_chunk*)
1157 ((uintptr_t)malcp->malc_last & ~0x1);
1158 xp = p;
1159 ++xp.p_alc;
1160 a_MEMORY_HOPE_GET_TRACE(p_alc, xp, isbad);
1161 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1162 (isbad ? "! CANARY ERROR (LOFI): " : ""), xp.p_vp,
1163 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1167 fprintf(fp,
1168 "\nAuto-reclaimed storage:\n"
1169 " Buffers cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1170 " Huge allocations cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1171 " Allocations all: %" PRIuZ ", Bytes all: %" PRIuZ "\n\n",
1172 a_memory_ars_bcur, a_memory_ars_bmax, a_memory_ars_ball,
1173 a_memory_ars_hcur, a_memory_ars_hmax, a_memory_ars_hall,
1174 a_memory_ars_aall, a_memory_ars_mall);
1175 lines += 7;
1177 if((macp = a_memory_ars_top) == NULL)
1178 macp = &a_memory_ars_global;
1179 for(; macp != NULL; macp = macp->mac_outer){
1180 fprintf(fp, " Evaluation stack context %p (outer: %p):\n",
1181 (void*)macp, (void*)macp->mac_outer);
1182 ++lines;
1184 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
1185 fprintf(fp, " Buffer %p, %" PRIuZ "/%" PRIuZ " used/free:\n",
1186 (void*)mabp,
1187 PTR2SIZE(mabp->mab_caster - &mabp->mab_buf[0]),
1188 PTR2SIZE(&mabp->mab_buf[sizeof(mabp->mab_buf)] - mabp->mab_caster));
1189 ++lines;
1191 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1192 ++lines, p.p_cp += p.p_c->mc_size){
1193 xp = p;
1194 ++xp.p_c;
1195 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1196 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1197 (isbad ? "! CANARY ERROR (ARS, top): " : ""), xp.p_vp,
1198 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1200 ++lines;
1203 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last){
1204 fprintf(fp, " Buffer %p, full:\n", (void*)mabp);
1205 ++lines;
1207 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1208 ++lines, p.p_cp += p.p_c->mc_size){
1209 xp = p;
1210 ++xp.p_c;
1211 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1212 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1213 (isbad ? "! CANARY ERROR (ARS, full): " : ""), xp.p_vp,
1214 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1216 ++lines;
1220 fprintf(fp,
1221 "\nHeap memory buffers:\n"
1222 " Allocation cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1223 " Bytes cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n\n",
1224 a_memory_heap_acur, a_memory_heap_amax, a_memory_heap_aall,
1225 a_memory_heap_mcur, a_memory_heap_mmax, a_memory_heap_mall);
1226 lines += 6;
1228 for(p.p_hc = a_memory_heap_list; p.p_hc != NULL;
1229 ++lines, p.p_hc = p.p_hc->mhc_next){
1230 xp = p;
1231 ++xp.p_hc;
1232 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1233 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1234 (isbad ? "! CANARY ERROR (heap): " : ""), xp.p_vp,
1235 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1238 if(options & (OPT_DEBUG | OPT_MEMDEBUG)){
1239 fprintf(fp, "Heap buffers lingering for free():\n");
1240 ++lines;
1242 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;
1243 ++lines, p.p_hc = p.p_hc->mhc_next){
1244 xp = p;
1245 ++xp.p_hc;
1246 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1247 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1248 (isbad ? "! CANARY ERROR (free): " : ""), xp.p_vp,
1249 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1253 page_or_print(fp, lines);
1254 Fclose(fp);
1255 vp = NULL;
1256 jleave:
1257 NYD2_LEAVE;
1258 return (vp != NULL);
1261 FL bool_t
1262 n__memory_check(char const *mdbg_file, int mdbg_line){
1263 union a_memory_ptr p, xp;
1264 struct a_memory_ars_buffer *mabp;
1265 struct a_memory_ars_lofi_chunk *malcp;
1266 struct a_memory_ars_ctx *macp;
1267 bool_t anybad, isbad;
1268 NYD2_ENTER;
1270 anybad = FAL0;
1272 if((macp = a_memory_ars_top) == NULL)
1273 macp = &a_memory_ars_global;
1275 /* Alloca */
1277 for(malcp = macp->mac_lofi_top; malcp != NULL;){
1278 p.p_alc = malcp;
1279 malcp = (struct a_memory_ars_lofi_chunk*)
1280 ((uintptr_t)malcp->malc_last & ~0x1);
1281 xp = p;
1282 ++xp.p_alc;
1283 a_MEMORY_HOPE_GET_TRACE(p_alc, xp, isbad);
1284 if(isbad){
1285 anybad = TRU1;
1286 n_err(
1287 "! CANARY ERROR (LOFI): %p (%u bytes): %s, line %u\n",
1288 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1292 /* Auto-reclaimed */
1294 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
1295 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1296 p.p_cp += p.p_c->mc_size){
1297 xp = p;
1298 ++xp.p_c;
1299 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1300 if(isbad){
1301 anybad = TRU1;
1302 n_err(
1303 "! CANARY ERROR (ARS, top): %p (%u bytes): %s, line %u\n",
1304 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1309 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last){
1310 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1311 p.p_cp += p.p_c->mc_size){
1312 xp = p;
1313 ++xp.p_c;
1314 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1315 if(isbad){
1316 anybad = TRU1;
1317 n_err(
1318 "! CANARY ERROR (ARS, full): %p (%u bytes): %s, line %u\n",
1319 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1324 /* Heap*/
1326 for(p.p_hc = a_memory_heap_list; p.p_hc != NULL; p.p_hc = p.p_hc->mhc_next){
1327 xp = p;
1328 ++xp.p_hc;
1329 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1330 if(isbad){
1331 anybad = TRU1;
1332 n_err(
1333 "! CANARY ERROR (heap): %p (%u bytes): %s, line %u\n",
1334 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1338 if(options & (OPT_DEBUG | OPT_MEMDEBUG)){
1339 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;
1340 p.p_hc = p.p_hc->mhc_next){
1341 xp = p;
1342 ++xp.p_hc;
1343 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1344 if(isbad){
1345 anybad = TRU1;
1346 n_err(
1347 "! CANARY ERROR (free): %p (%u bytes): %s, line %u\n",
1348 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1353 if(anybad && ok_blook(memdebug))
1354 n_panic("Memory errors encountered");
1355 NYD2_LEAVE;
1356 return anybad;
1358 #endif /* HAVE_MEMORY_DEBUG */
1360 /* s-it-mode */