Implement `vput' modifier: store result in variable..
[s-mailx.git] / memory.c
blobe31205f808bb68174b7420f91c50b68cd60c0fb5
1 /*@ S-nail - a mail user agent derived from Berkeley Mail.
2 *@ Heap memory and automatically reclaimed storage.
3 *@ TODO Back the _flux_ heap.
4 *@ TODO Add cache for "the youngest" two or three n_MEMORY_AUTOREC_SIZE arenas
6 * Copyright (c) 2012 - 2017 Steffen (Daode) Nurpmeso <steffen@sdaoden.eu>.
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #undef n_FILE
21 #define n_FILE memory
23 #ifndef HAVE_AMALGAMATION
24 # include "nail.h"
25 #endif
28 * Our (main)loops _autorec_push() arenas for their lifetime, the
29 * n_memory_reset() that happens on loop ticks reclaims their memory, and
30 * performs debug checks also on the former #ifdef HAVE_MEMORY_DEBUG.
31 * There is one global anonymous autorec arena which is used during the
32 * startup phase and for the interactive n_commands() instance -- this special
33 * arena is autorec_fixate()d from within main.c to not waste space, i.e.,
34 * remaining arena memory is reused and topic to normal _reset() reclaiming.
35 * That was so in historical code with the globally shared single string dope
36 * implementation, too.
38 * AutoReclaimedStorage memory is the follow-up to the historical "stringdope"
39 * allocator from 1979 (see [timeline:a7342d9]:src/Mail/strings.c), it is
40 * a steadily growing pool (but srelax_hold()..[:srelax():]..srelax_rele() can
41 * be used to reduce pressure) until n_memory_reset() time.
43 * LastOutFirstIn memory is ment as an alloca(3) replacement but which requires
44 * lofi_free()ing pointers (otherwise growing until n_memory_reset()).
46 * TODO Flux heap memory is like LOFI except that any pointer can be freed (and
47 * TODO reused) at any time, just like normal heap memory. It is notational in
48 * TODO that it clearly states that the allocation will go away after a loop
49 * TODO tick, and also we can use some buffer caches.
52 /* Maximum allocation (directly) handled by A-R-Storage */
53 #define a_MEMORY_ARS_MAX (n_MEMORY_AUTOREC_SIZE / 2 + n_MEMORY_AUTOREC_SIZE / 4)
54 #define a_MEMORY_LOFI_MAX a_MEMORY_ARS_MAX
56 n_CTA(a_MEMORY_ARS_MAX > 1024,
57 "Auto-reclaimed memory requires a larger buffer size"); /* Anway > 42! */
58 n_CTA(n_ISPOW2(n_MEMORY_AUTOREC_SIZE),
59 "Buffers should be POW2 (may be wasteful on native allocators otherwise)");
61 /* Alignment of ARS memory. Simply go for pointer alignment */
62 #define a_MEMORY_ARS_ROUNDUP(S) n_ALIGN_SMALL(S)
63 #define a_MEMORY_LOFI_ROUNDUP(S) a_MEMORY_ARS_ROUNDUP(S)
65 #ifdef HAVE_MEMORY_DEBUG
66 n_CTA(sizeof(char) == sizeof(ui8_t), "But POSIX says a byte is 8 bit");
68 # define a_MEMORY_HOPE_SIZE (2 * 8 * sizeof(char))
70 /* We use address-induced canary values, inspiration (but he didn't invent)
71 * and primes from maxv@netbsd.org, src/sys/kern/subr_kmem.c */
72 # define a_MEMORY_HOPE_LOWER(S,P) \
73 do{\
74 ui64_t __h__ = (uintptr_t)(P);\
75 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
76 __h__ >>= 56;\
77 (S) = (ui8_t)__h__;\
78 }while(0)
80 # define a_MEMORY_HOPE_UPPER(S,P) \
81 do{\
82 ui32_t __i__;\
83 ui64_t __x__, __h__ = (uintptr_t)(P);\
84 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
85 for(__i__ = 56; __i__ != 0; __i__ -= 8)\
86 if((__x__ = (__h__ >> __i__)) != 0){\
87 (S) = (ui8_t)__x__;\
88 break;\
90 if(__i__ == 0)\
91 (S) = 0xAAu;\
92 }while(0)
94 # define a_MEMORY_HOPE_SET(T,C) \
95 do{\
96 union a_memory_ptr __xp;\
97 struct a_memory_chunk *__xc;\
98 __xp.p_vp = (C).p_vp;\
99 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
100 (C).p_cp += 8;\
101 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
102 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
103 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
104 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
105 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
106 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
107 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
108 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
109 __xp.p_ui8p += 8 + __xc->mc_user_size;\
110 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
111 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
112 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
113 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
114 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
115 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
116 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
117 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
118 }while(0)
120 # define a_MEMORY_HOPE_GET_TRACE(T,C,BAD) \
121 do{\
122 (C).p_cp += 8;\
123 a_MEMORY_HOPE_GET(T, C, BAD);\
124 (C).p_cp += 8;\
125 }while(0)
127 # define a_MEMORY_HOPE_GET(T,C,BAD) \
128 do{\
129 union a_memory_ptr __xp;\
130 struct a_memory_chunk *__xc;\
131 ui32_t __i;\
132 ui8_t __m;\
133 __xp.p_vp = (C).p_vp;\
134 __xp.p_cp -= 8;\
135 (C).p_cp = __xp.p_cp;\
136 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
137 (BAD) = FAL0;\
138 __i = 0;\
139 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[0]);\
140 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
141 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[1]);\
142 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
143 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[2]);\
144 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
145 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[3]);\
146 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
147 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[4]);\
148 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
149 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[5]);\
150 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
151 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[6]);\
152 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
153 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[7]);\
154 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
155 if(__i != 0){\
156 (BAD) = TRU1;\
157 n_alert("%p: corrupt lower canary: 0x%02X: %s, line %d",\
158 (C).p_cp + 8, __i, mdbg_file, mdbg_line);\
160 __xp.p_ui8p += 8 + __xc->mc_user_size;\
161 __i = 0;\
162 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[0]);\
163 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
164 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[1]);\
165 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
166 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[2]);\
167 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
168 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[3]);\
169 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
170 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[4]);\
171 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
172 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[5]);\
173 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
174 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[6]);\
175 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
176 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[7]);\
177 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
178 if(__i != 0){\
179 (BAD) = TRU1;\
180 n_alert("%p: corrupt upper canary: 0x%02X: %s, line %d",\
181 (C).p_cp + 8, __i, mdbg_file, mdbg_line);\
183 if(BAD)\
184 n_alert(" ..canary last seen: %s, line %u",\
185 __xc->mc_file, __xc->mc_line);\
186 }while(0)
187 #endif /* HAVE_MEMORY_DEBUG */
189 #ifdef HAVE_MEMORY_DEBUG
190 struct a_memory_chunk{
191 char const *mc_file;
192 ui32_t mc_line;
193 ui8_t mc_isfree;
194 ui8_t mc__dummy[3];
195 ui32_t mc_user_size;
196 ui32_t mc_size;
199 /* The heap memory free() may become delayed to detect double frees.
200 * It is primitive, but ok: speed and memory usage don't matter here */
201 struct a_memory_heap_chunk{
202 struct a_memory_chunk mhc_super;
203 struct a_memory_heap_chunk *mhc_prev;
204 struct a_memory_heap_chunk *mhc_next;
206 #endif /* HAVE_MEMORY_DEBUG */
208 struct a_memory_ars_lofi_chunk{
209 #ifdef HAVE_MEMORY_DEBUG
210 struct a_memory_chunk malc_super;
211 #endif
212 struct a_memory_ars_lofi_chunk *malc_last; /* Bit 1 set: it's a heap alloc */
215 union a_memory_ptr{
216 void *p_vp;
217 char *p_cp;
218 ui8_t *p_ui8p;
219 #ifdef HAVE_MEMORY_DEBUG
220 struct a_memory_chunk *p_c;
221 struct a_memory_heap_chunk *p_hc;
222 #endif
223 struct a_memory_ars_lofi_chunk *p_alc;
226 struct a_memory_ars_ctx{
227 struct a_memory_ars_ctx *mac_outer;
228 struct a_memory_ars_buffer *mac_top; /* Alloc stack */
229 struct a_memory_ars_buffer *mac_full; /* Alloc stack, cpl. filled */
230 size_t mac_recur; /* srelax_hold() recursion */
231 struct a_memory_ars_huge *mac_huge; /* Huge allocation bypass list */
232 struct a_memory_ars_lofi *mac_lofi; /* Pseudo alloca */
233 struct a_memory_ars_lofi_chunk *mac_lofi_top;
235 n_CTA(n_MEMORY_AUTOREC_TYPE_SIZEOF >= sizeof(struct a_memory_ars_ctx),
236 "Our command loops do not provide enough memory for auto-reclaimed storage");
238 struct a_memory_ars_buffer{
239 struct a_memory_ars_buffer *mab_last;
240 char *mab_bot; /* For _autorec_fixate(). Only used for the global _ctx */
241 char *mab_relax; /* If !NULL, used by srelax() instead of .mab_bot */
242 char *mab_caster; /* Point of casting memory, NULL if full */
243 char mab_buf[n_MEMORY_AUTOREC_SIZE - (4 * sizeof(void*))];
245 n_CTA(sizeof(struct a_memory_ars_buffer) == n_MEMORY_AUTOREC_SIZE,
246 "Resulting structure size is not the expected one");
247 #ifdef HAVE_DEBUG
248 n_CTA(a_MEMORY_ARS_MAX + a_MEMORY_HOPE_SIZE + sizeof(struct a_memory_chunk)
249 < n_SIZEOF_FIELD(struct a_memory_ars_buffer, mab_buf),
250 "Memory layout of auto-reclaimed storage does not work out that way");
251 #endif
253 /* Requests that exceed a_MEMORY_ARS_MAX are always served by the normal
254 * memory allocator (which panics if memory cannot be served). This can be
255 * seen as a security fallback bypass only */
256 struct a_memory_ars_huge{
257 struct a_memory_ars_huge *mah_last;
258 char mah_buf[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
261 struct a_memory_ars_lofi{
262 struct a_memory_ars_lofi *mal_last;
263 char *mal_caster;
264 char *mal_max;
265 char mal_buf[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
268 /* */
269 #ifdef HAVE_MEMORY_DEBUG
270 static size_t a_memory_heap_aall, a_memory_heap_acur, a_memory_heap_amax,
271 a_memory_heap_mall, a_memory_heap_mcur, a_memory_heap_mmax;
272 static struct a_memory_heap_chunk *a_memory_heap_list, *a_memory_heap_free;
274 static size_t a_memory_ars_ball, a_memory_ars_bcur, a_memory_ars_bmax,
275 a_memory_ars_hall, a_memory_ars_hcur, a_memory_ars_hmax,
276 a_memory_ars_aall, a_memory_ars_mall;
278 static size_t a_memory_lofi_ball, a_memory_lofi_bcur, a_memory_lofi_bmax,
279 a_memory_lofi_aall, a_memory_lofi_acur, a_memory_lofi_amax,
280 a_memory_lofi_mall, a_memory_lofi_mcur, a_memory_lofi_mmax;
281 #endif
283 /* The anonymous global topmost auto-reclaimed storage instance, and the
284 * current top of the stack for recursions, `source's etc */
285 static struct a_memory_ars_ctx a_memory_ars_global;
286 static struct a_memory_ars_ctx *a_memory_ars_top;
288 /* */
289 SINLINE void a_memory_lofi_free(struct a_memory_ars_ctx *macp, void *vp);
291 /* Reset an ars_ctx */
292 static void a_memory_ars_reset(struct a_memory_ars_ctx *macp);
294 SINLINE void
295 a_memory_lofi_free(struct a_memory_ars_ctx *macp, void *vp){
296 struct a_memory_ars_lofi *malp;
297 union a_memory_ptr p;
298 NYD2_ENTER;
300 p.p_vp = vp;
301 #ifdef HAVE_MEMORY_DEBUG
302 --a_memory_lofi_acur;
303 a_memory_lofi_mcur -= p.p_c->mc_user_size;
304 #endif
306 /* The heap allocations are released immediately */
307 if((uintptr_t)p.p_alc->malc_last & 0x1){
308 malp = macp->mac_lofi;
309 macp->mac_lofi = malp->mal_last;
310 macp->mac_lofi_top = (struct a_memory_ars_lofi_chunk*)
311 ((uintptr_t)p.p_alc->malc_last & ~0x1);
312 free(malp);
313 #ifdef HAVE_MEMORY_DEBUG
314 --a_memory_lofi_bcur;
315 #endif
316 }else{
317 macp->mac_lofi_top = p.p_alc->malc_last;
319 /* The normal arena ones only if the arena is empty, except for when
320 * it is the last - that we'll keep until _autorec_pop() or exit(3) */
321 if(p.p_cp == (malp = macp->mac_lofi)->mal_buf){
322 if(malp->mal_last != NULL){
323 macp->mac_lofi = malp->mal_last;
324 free(malp);
325 #ifdef HAVE_MEMORY_DEBUG
326 --a_memory_lofi_bcur;
327 #endif
329 }else
330 malp->mal_caster = p.p_cp;
332 NYD2_LEAVE;
335 static void
336 a_memory_ars_reset(struct a_memory_ars_ctx *macp){
337 union{
338 struct a_memory_ars_lofi_chunk *alcp;
339 struct a_memory_ars_lofi *alp;
340 struct a_memory_ars_buffer *abp;
341 struct a_memory_ars_huge *ahp;
342 } m, m2;
343 NYD2_ENTER;
345 /* Simply move all buffers away from .mac_full */
346 for(m.abp = macp->mac_full; m.abp != NULL; m.abp = m2.abp){
347 m2.abp = m.abp->mab_last;
348 m.abp->mab_last = macp->mac_top;
349 macp->mac_top = m.abp;
351 macp->mac_full = NULL;
353 for(m2.abp = NULL, m.abp = macp->mac_top; m.abp != NULL;){
354 struct a_memory_ars_buffer *x;
356 x = m.abp;
357 m.abp = m.abp->mab_last;
359 /* Give away all buffers that are not covered by autorec_fixate() */
360 if(x->mab_bot == x->mab_buf){
361 if(m2.abp == NULL)
362 macp->mac_top = m.abp;
363 else
364 m2.abp->mab_last = m.abp;
365 free(x);
366 #ifdef HAVE_MEMORY_DEBUG
367 --a_memory_ars_bcur;
368 #endif
369 }else{
370 m2.abp = x;
371 x->mab_caster = x->mab_bot;
372 x->mab_relax = NULL;
373 #ifdef HAVE_MEMORY_DEBUG
374 memset(x->mab_caster, 0377,
375 PTR2SIZE(&x->mab_buf[sizeof(x->mab_buf)] - x->mab_caster));
376 #endif
380 while((m.ahp = macp->mac_huge) != NULL){
381 macp->mac_huge = m.ahp->mah_last;
382 free(m.ahp);
383 #ifdef HAVE_MEMORY_DEBUG
384 --a_memory_ars_hcur;
385 #endif
388 /* "alloca(3)" memory goes away, too. XXX Must be last as long we jump */
389 #ifdef HAVE_MEMORY_DEBUG
390 if(macp->mac_lofi_top != NULL)
391 n_alert("There still is LOFI memory upon ARS reset!");
392 #endif
393 while((m.alcp = macp->mac_lofi_top) != NULL)
394 a_memory_lofi_free(macp, m.alcp);
395 NYD2_LEAVE;
398 FL void
399 n_memory_reset(void){
400 #ifdef HAVE_MEMORY_DEBUG
401 union a_memory_ptr p;
402 size_t c, s;
403 #endif
404 struct a_memory_ars_ctx *macp;
405 NYD_ENTER;
407 n_memory_check();
409 if((macp = a_memory_ars_top) == NULL)
410 macp = &a_memory_ars_global;
412 /* First of all reset auto-reclaimed storage so that heap freed during this
413 * can be handled in a second step */
414 /* TODO v15 active recursion can only happen after a jump */
415 if(macp->mac_recur > 0){
416 macp->mac_recur = 1;
417 srelax_rele();
419 a_memory_ars_reset(macp);
421 /* Now we are ready to deal with heap */
422 #ifdef HAVE_MEMORY_DEBUG
423 c = s = 0;
425 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;){
426 void *vp;
428 vp = p.p_hc;
429 ++c;
430 s += p.p_c->mc_size;
431 p.p_hc = p.p_hc->mhc_next;
432 (free)(vp);
434 a_memory_heap_free = NULL;
436 if((n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)) && c > 0)
437 n_err("memreset: freed %" PRIuZ " chunks/%" PRIuZ " bytes\n", c, s);
438 #endif
439 NYD_LEAVE;
442 #ifndef HAVE_MEMORY_DEBUG
443 FL void *
444 n_alloc(size_t s){
445 void *rv;
446 NYD2_ENTER;
448 if(s == 0)
449 s = 1;
450 if((rv = malloc(s)) == NULL)
451 n_panic(_("no memory"));
452 NYD2_LEAVE;
453 return rv;
456 FL void *
457 n_realloc(void *vp, size_t s){
458 void *rv;
459 NYD2_ENTER;
461 if(vp == NULL)
462 rv = n_alloc(s);
463 else{
464 if(s == 0)
465 s = 1;
466 if((rv = realloc(vp, s)) == NULL)
467 n_panic(_("no memory"));
469 NYD2_LEAVE;
470 return rv;
473 FL void *
474 n_calloc(size_t nmemb, size_t size){
475 void *rv;
476 NYD2_ENTER;
478 if(size == 0)
479 size = 1;
480 if((rv = calloc(nmemb, size)) == NULL)
481 n_panic(_("no memory"));
482 NYD2_LEAVE;
483 return rv;
486 FL void
487 (n_free)(void *vp){
488 NYD2_ENTER;
489 (free)(vp);
490 NYD2_LEAVE;
493 #else /* !HAVE_MEMORY_DEBUG */
494 FL void *
495 (n_alloc)(size_t s n_MEMORY_DEBUG_ARGS){
496 union a_memory_ptr p;
497 ui32_t user_s;
498 NYD2_ENTER;
500 if(s > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
501 n_panic("n_alloc(): allocation too large: %s, line %d",
502 mdbg_file, mdbg_line);
503 if((user_s = (ui32_t)s) == 0)
504 s = 1;
505 s += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
507 if((p.p_vp = (malloc)(s)) == NULL)
508 n_panic(_("no memory"));
510 p.p_hc->mhc_prev = NULL;
511 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
512 a_memory_heap_list->mhc_prev = p.p_hc;
514 p.p_c->mc_file = mdbg_file;
515 p.p_c->mc_line = (ui16_t)mdbg_line;
516 p.p_c->mc_isfree = FAL0;
517 p.p_c->mc_user_size = user_s;
518 p.p_c->mc_size = (ui32_t)s;
520 a_memory_heap_list = p.p_hc++;
521 a_MEMORY_HOPE_SET(p_hc, p);
523 ++a_memory_heap_aall;
524 ++a_memory_heap_acur;
525 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
526 a_memory_heap_mall += user_s;
527 a_memory_heap_mcur += user_s;
528 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
529 NYD2_LEAVE;
530 return p.p_vp;
533 FL void *
534 (n_realloc)(void *vp, size_t s n_MEMORY_DEBUG_ARGS){
535 union a_memory_ptr p;
536 ui32_t user_s;
537 bool_t isbad;
538 NYD2_ENTER;
540 if((p.p_vp = vp) == NULL){
541 jforce:
542 p.p_vp = (n_alloc)(s, mdbg_file, mdbg_line);
543 goto jleave;
546 a_MEMORY_HOPE_GET(p_hc, p, isbad);
547 --p.p_hc;
549 if(p.p_c->mc_isfree){
550 n_err("n_realloc(): region freed! At %s, line %d\n"
551 "\tLast seen: %s, line %" PRIu16 "\n",
552 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
553 goto jforce;
556 if(p.p_hc == a_memory_heap_list)
557 a_memory_heap_list = p.p_hc->mhc_next;
558 else
559 p.p_hc->mhc_prev->mhc_next = p.p_hc->mhc_next;
560 if (p.p_hc->mhc_next != NULL)
561 p.p_hc->mhc_next->mhc_prev = p.p_hc->mhc_prev;
563 --a_memory_heap_acur;
564 a_memory_heap_mcur -= p.p_c->mc_user_size;
566 if(s > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
567 n_panic("n_realloc(): allocation too large: %s, line %d",
568 mdbg_file, mdbg_line);
569 if((user_s = (ui32_t)s) == 0)
570 s = 1;
571 s += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
573 if((p.p_vp = (realloc)(p.p_c, s)) == NULL)
574 n_panic(_("no memory"));
575 p.p_hc->mhc_prev = NULL;
576 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
577 a_memory_heap_list->mhc_prev = p.p_hc;
579 p.p_c->mc_file = mdbg_file;
580 p.p_c->mc_line = (ui16_t)mdbg_line;
581 p.p_c->mc_isfree = FAL0;
582 p.p_c->mc_user_size = user_s;
583 p.p_c->mc_size = (ui32_t)s;
585 a_memory_heap_list = p.p_hc++;
586 a_MEMORY_HOPE_SET(p_hc, p);
588 ++a_memory_heap_aall;
589 ++a_memory_heap_acur;
590 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
591 a_memory_heap_mall += user_s;
592 a_memory_heap_mcur += user_s;
593 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
594 jleave:
595 NYD2_LEAVE;
596 return p.p_vp;
599 FL void *
600 (n_calloc)(size_t nmemb, size_t size n_MEMORY_DEBUG_ARGS){
601 union a_memory_ptr p;
602 ui32_t user_s;
603 NYD2_ENTER;
605 if(nmemb == 0)
606 nmemb = 1;
607 if(size > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
608 n_panic("n_calloc(): allocation size too large: %s, line %d",
609 mdbg_file, mdbg_line);
610 if((user_s = (ui32_t)size) == 0)
611 size = 1;
612 if((UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE) /
613 nmemb < size)
614 n_panic("n_calloc(): allocation count too large: %s, line %d",
615 mdbg_file, mdbg_line);
617 size *= nmemb;
618 size += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
620 if((p.p_vp = (malloc)(size)) == NULL)
621 n_panic(_("no memory"));
622 memset(p.p_vp, 0, size);
624 p.p_hc->mhc_prev = NULL;
625 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
626 a_memory_heap_list->mhc_prev = p.p_hc;
628 p.p_c->mc_file = mdbg_file;
629 p.p_c->mc_line = (ui16_t)mdbg_line;
630 p.p_c->mc_isfree = FAL0;
631 p.p_c->mc_user_size = (user_s > 0) ? user_s *= nmemb : 0;
632 p.p_c->mc_size = (ui32_t)size;
634 a_memory_heap_list = p.p_hc++;
635 a_MEMORY_HOPE_SET(p_hc, p);
637 ++a_memory_heap_aall;
638 ++a_memory_heap_acur;
639 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
640 a_memory_heap_mall += user_s;
641 a_memory_heap_mcur += user_s;
642 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
643 NYD2_LEAVE;
644 return p.p_vp;
647 FL void
648 (n_free)(void *vp n_MEMORY_DEBUG_ARGS){
649 union a_memory_ptr p;
650 bool_t isbad;
651 NYD2_ENTER;
653 if((p.p_vp = vp) == NULL){
654 n_err("n_free(NULL) from %s, line %d\n", mdbg_file, mdbg_line);
655 goto jleave;
658 a_MEMORY_HOPE_GET(p_hc, p, isbad);
659 --p.p_hc;
661 if(p.p_c->mc_isfree){
662 n_err("n_free(): double-free avoided at %s, line %d\n"
663 "\tLast seen: %s, line %" PRIu16 "\n",
664 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
665 goto jleave;
668 if(p.p_hc == a_memory_heap_list){
669 if((a_memory_heap_list = p.p_hc->mhc_next) != NULL)
670 a_memory_heap_list->mhc_prev = NULL;
671 }else
672 p.p_hc->mhc_prev->mhc_next = p.p_hc->mhc_next;
673 if(p.p_hc->mhc_next != NULL)
674 p.p_hc->mhc_next->mhc_prev = p.p_hc->mhc_prev;
676 p.p_c->mc_isfree = TRU1;
677 /* Trash contents (also see [21c05f8]) */
678 memset(vp, 0377, p.p_c->mc_user_size);
680 --a_memory_heap_acur;
681 a_memory_heap_mcur -= p.p_c->mc_user_size;
683 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)){
684 p.p_hc->mhc_next = a_memory_heap_free;
685 a_memory_heap_free = p.p_hc;
686 }else
687 (free)(p.p_vp);
688 jleave:
689 NYD2_LEAVE;
691 #endif /* HAVE_MEMORY_DEBUG */
693 FL void
694 n_memory_autorec_fixate(void){
695 struct a_memory_ars_buffer *mabp;
696 NYD_ENTER;
698 for(mabp = a_memory_ars_global.mac_top; mabp != NULL; mabp = mabp->mab_last)
699 mabp->mab_bot = mabp->mab_caster;
700 for(mabp = a_memory_ars_global.mac_full; mabp != NULL; mabp = mabp->mab_last)
701 mabp->mab_bot = mabp->mab_caster;
702 NYD_LEAVE;
705 FL void
706 n_memory_autorec_push(void *vp){
707 struct a_memory_ars_ctx *macp;
708 NYD_ENTER;
710 macp = vp;
711 memset(macp, 0, sizeof *macp);
712 macp->mac_outer = a_memory_ars_top;
713 a_memory_ars_top = macp;
714 NYD_LEAVE;
717 FL void
718 n_memory_autorec_pop(void *vp){
719 struct a_memory_ars_buffer *mabp;
720 struct a_memory_ars_ctx *macp;
721 NYD_ENTER;
723 n_memory_check();
725 if((macp = vp) == NULL)
726 macp = &a_memory_ars_global;
727 else{
728 /* XXX May not be ARS top upon jump */
729 while(a_memory_ars_top != macp){
730 DBG( n_err("ARS pop %p to reach freed context\n", a_memory_ars_top); )
731 n_memory_autorec_pop(a_memory_ars_top);
733 a_memory_ars_top = macp->mac_outer;
736 a_memory_ars_reset(macp);
737 assert(macp->mac_full == NULL);
738 assert(macp->mac_huge == NULL);
740 for(mabp = macp->mac_top; mabp != NULL;){
741 vp = mabp;
742 mabp = mabp->mab_last;
743 free(vp);
746 /* We (may) have kept one buffer for our pseudo alloca(3) */
747 if(macp->mac_lofi != NULL){
748 assert(macp->mac_lofi->mal_last == NULL);
749 free(macp->mac_lofi);
750 #ifdef HAVE_MEMORY_DEBUG
751 --a_memory_lofi_bcur;
752 #endif
755 memset(macp, 0, sizeof *macp);
756 NYD_LEAVE;
759 FL void *
760 n_memory_autorec_current(void){
761 return (a_memory_ars_top != NULL ? a_memory_ars_top : &a_memory_ars_global);
764 FL void *
765 (n_autorec_alloc)(void *vp, size_t size n_MEMORY_DEBUG_ARGS){
766 #ifdef HAVE_MEMORY_DEBUG
767 ui32_t user_s;
768 #endif
769 union a_memory_ptr p;
770 union{
771 struct a_memory_ars_buffer *abp;
772 struct a_memory_ars_huge *ahp;
773 } m, m2;
774 struct a_memory_ars_ctx *macp;
775 NYD2_ENTER;
777 if((macp = vp) == NULL && (macp = a_memory_ars_top) == NULL)
778 macp = &a_memory_ars_global;
780 #ifdef HAVE_MEMORY_DEBUG
781 user_s = (ui32_t)size;
782 #endif
783 if(size == 0)
784 ++size;
785 #ifdef HAVE_MEMORY_DEBUG
786 size += sizeof(struct a_memory_chunk) + a_MEMORY_HOPE_SIZE;
787 #endif
788 size = a_MEMORY_ARS_ROUNDUP(size);
790 /* Huge allocations are special */
791 if(n_UNLIKELY(size > a_MEMORY_ARS_MAX)){
792 #ifdef HAVE_MEMORY_DEBUG
793 n_alert("n_autorec_alloc() of %" PRIuZ " bytes from %s, line %d",
794 size, mdbg_file, mdbg_line);
795 #endif
796 goto jhuge;
799 /* Search for a buffer with enough free space to serve request */
800 for(m2.abp = NULL, m.abp = macp->mac_top; m.abp != NULL;
801 m2.abp = m.abp, m.abp = m.abp->mab_last){
802 if((p.p_cp = m.abp->mab_caster) <=
803 &m.abp->mab_buf[sizeof(m.abp->mab_buf) - size]){
804 /* Alignment is the one thing, the other is what is usually allocated,
805 * and here about 40 bytes seems to be a good cut to avoid non-usable
806 * casters. Reown buffers supposed to be "full" to .mac_full */
807 if(n_UNLIKELY((m.abp->mab_caster = &p.p_cp[size]) >=
808 &m.abp->mab_buf[sizeof(m.abp->mab_buf) - 42])){
809 if(m2.abp == NULL)
810 macp->mac_top = m.abp->mab_last;
811 else
812 m2.abp->mab_last = m.abp->mab_last;
813 m.abp->mab_last = macp->mac_full;
814 macp->mac_full = m.abp;
816 goto jleave;
820 /* Need a new buffer XXX "page" pool */
821 m.abp = n_alloc(sizeof *m.abp);
822 m.abp->mab_last = macp->mac_top;
823 m.abp->mab_caster = &(m.abp->mab_bot = m.abp->mab_buf)[size];
824 m.abp->mab_relax = NULL; /* Thus indicates allocation after srelax_hold() */
825 macp->mac_top = m.abp;
826 p.p_cp = m.abp->mab_bot;
828 #ifdef HAVE_MEMORY_DEBUG
829 ++a_memory_ars_ball;
830 ++a_memory_ars_bcur;
831 a_memory_ars_bmax = n_MAX(a_memory_ars_bmax, a_memory_ars_bcur);
832 #endif
834 jleave:
835 #ifdef HAVE_MEMORY_DEBUG
836 p.p_c->mc_file = mdbg_file;
837 p.p_c->mc_line = (ui16_t)mdbg_line;
838 p.p_c->mc_user_size = user_s;
839 p.p_c->mc_size = (ui32_t)size;
840 ++p.p_c;
841 a_MEMORY_HOPE_SET(p_c, p);
843 ++a_memory_ars_aall;
844 a_memory_ars_mall += user_s;
845 #endif
846 NYD2_LEAVE;
847 return p.p_vp;
849 jhuge:
850 m.ahp = n_alloc(n_VSTRUCT_SIZEOF(struct a_memory_ars_huge, mah_buf) + size);
851 m.ahp->mah_last = macp->mac_huge;
852 macp->mac_huge = m.ahp;
853 p.p_cp = m.ahp->mah_buf;
854 #ifdef HAVE_MEMORY_DEBUG
855 ++a_memory_ars_hall;
856 ++a_memory_ars_hcur;
857 a_memory_ars_hmax = n_MAX(a_memory_ars_hmax, a_memory_ars_hcur);
858 #endif
859 goto jleave;
862 FL void *
863 (n_autorec_calloc)(void *vp, size_t nmemb, size_t size n_MEMORY_DEBUG_ARGS){
864 void *rv;
865 NYD2_ENTER;
867 size *= nmemb; /* XXX overflow, but only used for struct inits */
868 rv = (n_autorec_alloc)(vp, size n_MEMORY_DEBUG_ARGSCALL);
869 memset(rv, 0, size);
870 NYD2_LEAVE;
871 return rv;
874 FL void
875 srelax_hold(void){
876 struct a_memory_ars_ctx *macp;
877 NYD2_ENTER;
879 if((macp = a_memory_ars_top) == NULL)
880 macp = &a_memory_ars_global;
882 if(macp->mac_recur++ == 0){
883 struct a_memory_ars_buffer *mabp;
885 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
886 mabp->mab_relax = mabp->mab_caster;
887 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
888 mabp->mab_relax = mabp->mab_caster;
890 #ifdef HAVE_DEVEL
891 else
892 n_err("srelax_hold(): recursion >0\n");
893 #endif
894 NYD2_LEAVE;
897 FL void
898 srelax_rele(void){
899 struct a_memory_ars_ctx *macp;
900 NYD2_ENTER;
902 if((macp = a_memory_ars_top) == NULL)
903 macp = &a_memory_ars_global;
905 assert(macp->mac_recur > 0);
907 if(--macp->mac_recur == 0){
908 struct a_memory_ars_buffer *mabp;
910 macp->mac_recur = 1;
911 srelax();
912 macp->mac_recur = 0;
914 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
915 mabp->mab_relax = NULL;
916 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
917 mabp->mab_relax = NULL;
919 #ifdef HAVE_DEVEL
920 else
921 n_err("srelax_rele(): recursion >0\n");
922 #endif
923 NYD2_LEAVE;
926 FL void
927 srelax(void){
928 /* The purpose of relaxation is only that it is possible to reset the
929 * casters, *not* to give back memory to the system. We are presumably in
930 * an iteration over all messages of a mailbox, and it'd be quite
931 * counterproductive to give the system allocator a chance to waste time */
932 struct a_memory_ars_ctx *macp;
933 NYD2_ENTER;
935 if((macp = a_memory_ars_top) == NULL)
936 macp = &a_memory_ars_global;
938 assert(macp->mac_recur > 0);
939 n_memory_check();
941 if(macp->mac_recur == 1){
942 struct a_memory_ars_buffer *mabp, *x, *y;
944 /* Buffers in the full list may become usable again! */
945 for(x = NULL, mabp = macp->mac_full; mabp != NULL; mabp = y){
946 y = mabp->mab_last;
948 if(mabp->mab_relax == NULL ||
949 mabp->mab_relax < &mabp->mab_buf[sizeof(mabp->mab_buf) - 42]){
950 if(x == NULL)
951 macp->mac_full = y;
952 else
953 x->mab_last = y;
954 mabp->mab_last = macp->mac_top;
955 macp->mac_top = mabp;
956 }else
957 x = mabp;
960 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
961 mabp->mab_caster = (mabp->mab_relax != NULL)
962 ? mabp->mab_relax : mabp->mab_bot;
963 #ifdef HAVE_MEMORY_DEBUG
964 memset(mabp->mab_caster, 0377,
965 PTR2SIZE(&mabp->mab_buf[sizeof(mabp->mab_buf)] - mabp->mab_caster));
966 #endif
969 NYD2_LEAVE;
972 FL void *
973 (n_lofi_alloc)(size_t size n_MEMORY_DEBUG_ARGS){
974 #ifdef HAVE_MEMORY_DEBUG
975 ui32_t user_s;
976 #endif
977 union a_memory_ptr p;
978 struct a_memory_ars_lofi *malp;
979 bool_t isheap;
980 struct a_memory_ars_ctx *macp;
981 NYD2_ENTER;
983 if((macp = a_memory_ars_top) == NULL)
984 macp = &a_memory_ars_global;
986 #ifdef HAVE_MEMORY_DEBUG
987 user_s = (ui32_t)size;
988 #endif
989 if(size == 0)
990 ++size;
991 size += sizeof(struct a_memory_ars_lofi_chunk);
992 #ifdef HAVE_MEMORY_DEBUG
993 size += a_MEMORY_HOPE_SIZE;
994 #endif
995 size = a_MEMORY_LOFI_ROUNDUP(size);
997 /* Huge allocations are special */
998 if(n_UNLIKELY(isheap = (size > a_MEMORY_LOFI_MAX))){
999 #ifdef HAVE_MEMORY_DEBUG
1000 n_alert("n_lofi_alloc() of %" PRIuZ " bytes from %s, line %d",
1001 size, mdbg_file, mdbg_line);
1002 #endif
1003 }else if((malp = macp->mac_lofi) != NULL &&
1004 ((p.p_cp = malp->mal_caster) <= &malp->mal_max[-size])){
1005 malp->mal_caster = &p.p_cp[size];
1006 goto jleave;
1009 /* Need a new buffer */
1010 /* C99 */{
1011 size_t i;
1013 i = n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi, mal_buf) + size;
1014 i = n_MAX(i, n_MEMORY_AUTOREC_SIZE);
1015 malp = n_alloc(i);
1016 malp->mal_last = macp->mac_lofi;
1017 malp->mal_caster = &malp->mal_buf[size];
1018 i -= n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi, mal_buf);
1019 malp->mal_max = &malp->mal_buf[i];
1020 macp->mac_lofi = malp;
1021 p.p_cp = malp->mal_buf;
1023 #ifdef HAVE_MEMORY_DEBUG
1024 ++a_memory_lofi_ball;
1025 ++a_memory_lofi_bcur;
1026 a_memory_lofi_bmax = n_MAX(a_memory_lofi_bmax, a_memory_lofi_bcur);
1027 #endif
1030 jleave:
1031 p.p_alc->malc_last = macp->mac_lofi_top;
1032 macp->mac_lofi_top = p.p_alc;
1033 if(isheap)
1034 p.p_alc->malc_last = (struct a_memory_ars_lofi_chunk*)
1035 ((uintptr_t)p.p_alc->malc_last | 0x1);
1037 #ifndef HAVE_MEMORY_DEBUG
1038 ++p.p_alc;
1039 #else
1040 p.p_c->mc_file = mdbg_file;
1041 p.p_c->mc_line = (ui16_t)mdbg_line;
1042 p.p_c->mc_isfree = FAL0;
1043 p.p_c->mc_user_size = user_s;
1044 p.p_c->mc_size = (ui32_t)size;
1045 ++p.p_alc;
1046 a_MEMORY_HOPE_SET(p_alc, p);
1048 ++a_memory_lofi_aall;
1049 ++a_memory_lofi_acur;
1050 a_memory_lofi_amax = n_MAX(a_memory_lofi_amax, a_memory_lofi_acur);
1051 a_memory_lofi_mall += user_s;
1052 a_memory_lofi_mcur += user_s;
1053 a_memory_lofi_mmax = n_MAX(a_memory_lofi_mmax, a_memory_lofi_mcur);
1054 #endif
1055 NYD2_LEAVE;
1056 return p.p_vp;
1059 FL void
1060 (n_lofi_free)(void *vp n_MEMORY_DEBUG_ARGS){
1061 #ifdef HAVE_MEMORY_DEBUG
1062 bool_t isbad;
1063 #endif
1064 union a_memory_ptr p;
1065 struct a_memory_ars_ctx *macp;
1066 NYD2_ENTER;
1068 if((macp = a_memory_ars_top) == NULL)
1069 macp = &a_memory_ars_global;
1071 if((p.p_vp = vp) == NULL){
1072 #ifdef HAVE_MEMORY_DEBUG
1073 n_err("n_lofi_free(NULL) from %s, line %d\n", mdbg_file, mdbg_line);
1074 #endif
1075 goto jleave;
1078 #ifdef HAVE_MEMORY_DEBUG
1079 a_MEMORY_HOPE_GET(p_alc, p, isbad);
1080 --p.p_alc;
1082 if(p.p_c->mc_isfree){
1083 n_err("n_lofi_free(): double-free avoided at %s, line %d\n"
1084 "\tLast seen: %s, line %" PRIu16 "\n",
1085 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
1086 goto jleave;
1088 p.p_c->mc_isfree = TRU1;
1089 memset(vp, 0377, p.p_c->mc_user_size);
1091 if(p.p_alc != macp->mac_lofi_top){
1092 n_err("n_lofi_free(): this is not alloca top at %s, line %d\n"
1093 "\tLast seen: %s, line %" PRIu16 "\n",
1094 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
1095 goto jleave;
1098 ++p.p_alc;
1099 #endif /* HAVE_MEMORY_DEBUG */
1101 a_memory_lofi_free(macp, --p.p_alc);
1102 jleave:
1103 NYD2_LEAVE;
1106 #ifdef HAVE_MEMORY_DEBUG
1107 FL int
1108 c_memtrace(void *vp){
1109 /* For a_MEMORY_HOPE_GET() */
1110 char const * const mdbg_file = "memtrace()";
1111 int const mdbg_line = -1;
1112 struct a_memory_ars_buffer *mabp;
1113 struct a_memory_ars_lofi_chunk *malcp;
1114 struct a_memory_ars_lofi *malp;
1115 struct a_memory_ars_ctx *macp;
1116 bool_t isbad;
1117 union a_memory_ptr p, xp;
1118 size_t lines;
1119 FILE *fp;
1120 NYD2_ENTER;
1122 vp = (void*)0x1;
1123 if((fp = Ftmp(NULL, "memtr", OF_RDWR | OF_UNLINK | OF_REGISTER)) == NULL){
1124 n_perr("tmpfile", 0);
1125 goto jleave;
1127 lines = 0;
1129 fprintf(fp,
1130 "Last-Out-First-In (alloca) storage:\n"
1131 " Buffer cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1132 " Allocations cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1133 " Bytes cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n\n",
1134 a_memory_lofi_bcur, a_memory_lofi_bmax, a_memory_lofi_ball,
1135 a_memory_lofi_acur, a_memory_lofi_amax, a_memory_lofi_aall,
1136 a_memory_lofi_mcur, a_memory_lofi_mmax, a_memory_lofi_mall);
1137 lines += 7;
1139 if((macp = a_memory_ars_top) == NULL)
1140 macp = &a_memory_ars_global;
1141 for(; macp != NULL; macp = macp->mac_outer){
1142 fprintf(fp, " Evaluation stack context %p (outer: %p):\n",
1143 (void*)macp, (void*)macp->mac_outer);
1144 ++lines;
1146 for(malp = macp->mac_lofi; malp != NULL;){
1147 fprintf(fp, " Buffer %p%s, %" PRIuZ "/%" PRIuZ " used/free:\n",
1148 (void*)malp, ((uintptr_t)malp->mal_last & 0x1 ? " (huge)" : ""),
1149 PTR2SIZE(malp->mal_caster - &malp->mal_buf[0]),
1150 PTR2SIZE(malp->mal_max - malp->mal_caster));
1151 ++lines;
1152 malp = malp->mal_last;
1153 malp = (struct a_memory_ars_lofi*)((uintptr_t)malp & ~1);
1156 for(malcp = macp->mac_lofi_top; malcp != NULL;){
1157 p.p_alc = malcp;
1158 malcp = (struct a_memory_ars_lofi_chunk*)
1159 ((uintptr_t)malcp->malc_last & ~0x1);
1160 xp = p;
1161 ++xp.p_alc;
1162 a_MEMORY_HOPE_GET_TRACE(p_alc, xp, isbad);
1163 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1164 (isbad ? "! CANARY ERROR (LOFI): " : ""), xp.p_vp,
1165 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1169 fprintf(fp,
1170 "\nAuto-reclaimed storage:\n"
1171 " Buffers cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1172 " Huge allocations cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1173 " Allocations all: %" PRIuZ ", Bytes all: %" PRIuZ "\n\n",
1174 a_memory_ars_bcur, a_memory_ars_bmax, a_memory_ars_ball,
1175 a_memory_ars_hcur, a_memory_ars_hmax, a_memory_ars_hall,
1176 a_memory_ars_aall, a_memory_ars_mall);
1177 lines += 7;
1179 if((macp = a_memory_ars_top) == NULL)
1180 macp = &a_memory_ars_global;
1181 for(; macp != NULL; macp = macp->mac_outer){
1182 fprintf(fp, " Evaluation stack context %p (outer: %p):\n",
1183 (void*)macp, (void*)macp->mac_outer);
1184 ++lines;
1186 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
1187 fprintf(fp, " Buffer %p, %" PRIuZ "/%" PRIuZ " used/free:\n",
1188 (void*)mabp,
1189 PTR2SIZE(mabp->mab_caster - &mabp->mab_buf[0]),
1190 PTR2SIZE(&mabp->mab_buf[sizeof(mabp->mab_buf)] - mabp->mab_caster));
1191 ++lines;
1193 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1194 ++lines, p.p_cp += p.p_c->mc_size){
1195 xp = p;
1196 ++xp.p_c;
1197 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1198 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1199 (isbad ? "! CANARY ERROR (ARS, top): " : ""), xp.p_vp,
1200 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1202 ++lines;
1205 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last){
1206 fprintf(fp, " Buffer %p, full:\n", (void*)mabp);
1207 ++lines;
1209 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1210 ++lines, p.p_cp += p.p_c->mc_size){
1211 xp = p;
1212 ++xp.p_c;
1213 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1214 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1215 (isbad ? "! CANARY ERROR (ARS, full): " : ""), xp.p_vp,
1216 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1218 ++lines;
1222 fprintf(fp,
1223 "\nHeap memory buffers:\n"
1224 " Allocation cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1225 " Bytes cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n\n",
1226 a_memory_heap_acur, a_memory_heap_amax, a_memory_heap_aall,
1227 a_memory_heap_mcur, a_memory_heap_mmax, a_memory_heap_mall);
1228 lines += 6;
1230 for(p.p_hc = a_memory_heap_list; p.p_hc != NULL;
1231 ++lines, p.p_hc = p.p_hc->mhc_next){
1232 xp = p;
1233 ++xp.p_hc;
1234 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1235 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1236 (isbad ? "! CANARY ERROR (heap): " : ""), xp.p_vp,
1237 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1240 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)){
1241 fprintf(fp, "Heap buffers lingering for free():\n");
1242 ++lines;
1244 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;
1245 ++lines, p.p_hc = p.p_hc->mhc_next){
1246 xp = p;
1247 ++xp.p_hc;
1248 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1249 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1250 (isbad ? "! CANARY ERROR (free): " : ""), xp.p_vp,
1251 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1255 page_or_print(fp, lines);
1256 Fclose(fp);
1257 vp = NULL;
1258 jleave:
1259 NYD2_LEAVE;
1260 return (vp != NULL);
1263 FL bool_t
1264 n__memory_check(char const *mdbg_file, int mdbg_line){
1265 union a_memory_ptr p, xp;
1266 struct a_memory_ars_buffer *mabp;
1267 struct a_memory_ars_lofi_chunk *malcp;
1268 struct a_memory_ars_ctx *macp;
1269 bool_t anybad, isbad;
1270 NYD2_ENTER;
1272 anybad = FAL0;
1274 if((macp = a_memory_ars_top) == NULL)
1275 macp = &a_memory_ars_global;
1277 /* Alloca */
1279 for(malcp = macp->mac_lofi_top; malcp != NULL;){
1280 p.p_alc = malcp;
1281 malcp = (struct a_memory_ars_lofi_chunk*)
1282 ((uintptr_t)malcp->malc_last & ~0x1);
1283 xp = p;
1284 ++xp.p_alc;
1285 a_MEMORY_HOPE_GET_TRACE(p_alc, xp, isbad);
1286 if(isbad){
1287 anybad = TRU1;
1288 n_err(
1289 "! CANARY ERROR (LOFI): %p (%u bytes): %s, line %u\n",
1290 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1294 /* Auto-reclaimed */
1296 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
1297 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1298 p.p_cp += p.p_c->mc_size){
1299 xp = p;
1300 ++xp.p_c;
1301 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1302 if(isbad){
1303 anybad = TRU1;
1304 n_err(
1305 "! CANARY ERROR (ARS, top): %p (%u bytes): %s, line %u\n",
1306 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1311 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last){
1312 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1313 p.p_cp += p.p_c->mc_size){
1314 xp = p;
1315 ++xp.p_c;
1316 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1317 if(isbad){
1318 anybad = TRU1;
1319 n_err(
1320 "! CANARY ERROR (ARS, full): %p (%u bytes): %s, line %u\n",
1321 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1326 /* Heap*/
1328 for(p.p_hc = a_memory_heap_list; p.p_hc != NULL; p.p_hc = p.p_hc->mhc_next){
1329 xp = p;
1330 ++xp.p_hc;
1331 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1332 if(isbad){
1333 anybad = TRU1;
1334 n_err(
1335 "! CANARY ERROR (heap): %p (%u bytes): %s, line %u\n",
1336 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1340 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)){
1341 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;
1342 p.p_hc = p.p_hc->mhc_next){
1343 xp = p;
1344 ++xp.p_hc;
1345 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1346 if(isbad){
1347 anybad = TRU1;
1348 n_err(
1349 "! CANARY ERROR (free): %p (%u bytes): %s, line %u\n",
1350 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1355 if(anybad && ok_blook(memdebug))
1356 n_panic("Memory errors encountered");
1357 NYD2_LEAVE;
1358 return anybad;
1360 #endif /* HAVE_MEMORY_DEBUG */
1362 /* s-it-mode */