Drop global "image", _outof() -> a_sendout_file_a_pipe()..
[s-mailx.git] / memory.c
blob91656126ff19ba6df04ea2c09e9cd2e786f9f22b
1 /*@ S-nail - a mail user agent derived from Berkeley Mail.
2 *@ Heap memory and automatically reclaimed storage.
3 *@ TODO Back the _flux_ heap.
4 *@ TODO Add cache for "the youngest" two or three n_MEMORY_AUTOREC_SIZE arenas
6 * Copyright (c) 2012 - 2017 Steffen (Daode) Nurpmeso <steffen@sdaoden.eu>.
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #undef n_FILE
21 #define n_FILE memory
23 #ifndef HAVE_AMALGAMATION
24 # include "nail.h"
25 #endif
28 * Our (main)loops _autorec_push() arenas for their lifetime, the
29 * n_memory_reset() that happens on loop ticks reclaims their memory, and
30 * performs debug checks also on the former #ifdef HAVE_MEMORY_DEBUG.
31 * There is one global anonymous autorec arena which is used during the
32 * startup phase and for the interactive n_commands() instance -- this special
33 * arena is autorec_fixate()d from within main.c to not waste space, i.e.,
34 * remaining arena memory is reused and topic to normal _reset() reclaiming.
35 * That was so in historical code with the globally shared single string dope
36 * implementation, too.
38 * AutoReclaimedStorage memory is the follow-up to the historical "stringdope"
39 * allocator from 1979 (see [timeline:a7342d9]:src/Mail/strings.c), it is
40 * a steadily growing pool (but srelax_hold()..[:srelax():]..srelax_rele() can
41 * be used to reduce pressure) until n_memory_reset() time.
43 * LastOutFirstIn memory is ment as an alloca(3) replacement but which requires
44 * lofi_free()ing pointers (otherwise growing until n_memory_reset()).
46 * TODO Flux heap memory is like LOFI except that any pointer can be freed (and
47 * TODO reused) at any time, just like normal heap memory. It is notational in
48 * TODO that it clearly states that the allocation will go away after a loop
49 * TODO tick, and also we can use some buffer caches.
52 /* Maximum allocation (directly) handled by A-R-Storage */
53 #define a_MEMORY_ARS_MAX (n_MEMORY_AUTOREC_SIZE / 2 + n_MEMORY_AUTOREC_SIZE / 4)
54 #define a_MEMORY_LOFI_MAX a_MEMORY_ARS_MAX
56 n_CTA(a_MEMORY_ARS_MAX > 1024,
57 "Auto-reclaimed memory requires a larger buffer size"); /* Anway > 42! */
58 n_CTA(n_ISPOW2(n_MEMORY_AUTOREC_SIZE),
59 "Buffers should be POW2 (may be wasteful on native allocators otherwise)");
61 /* Alignment of ARS memory. Simply go for pointer alignment */
62 #define a_MEMORY_ARS_ROUNDUP(S) n_ALIGN_SMALL(S)
63 #define a_MEMORY_LOFI_ROUNDUP(S) a_MEMORY_ARS_ROUNDUP(S)
65 #ifdef HAVE_MEMORY_DEBUG
66 n_CTA(sizeof(char) == sizeof(ui8_t), "But POSIX says a byte is 8 bit");
68 # define a_MEMORY_HOPE_SIZE (2 * 8 * sizeof(char))
69 # define a_MEMORY_HOPE_INC(P) (P) += 8
70 # define a_MEMORY_HOPE_DEC(P) (P) -= 8
72 /* We use address-induced canary values, inspiration (but he didn't invent)
73 * and primes from maxv@netbsd.org, src/sys/kern/subr_kmem.c */
74 # define a_MEMORY_HOPE_LOWER(S,P) \
75 do{\
76 ui64_t __h__ = (uintptr_t)(P);\
77 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
78 __h__ >>= 56;\
79 (S) = (ui8_t)__h__;\
80 }while(0)
82 # define a_MEMORY_HOPE_UPPER(S,P) \
83 do{\
84 ui32_t __i__;\
85 ui64_t __x__, __h__ = (uintptr_t)(P);\
86 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
87 for(__i__ = 56; __i__ != 0; __i__ -= 8)\
88 if((__x__ = (__h__ >> __i__)) != 0){\
89 (S) = (ui8_t)__x__;\
90 break;\
92 if(__i__ == 0)\
93 (S) = 0xAAu;\
94 }while(0)
96 # define a_MEMORY_HOPE_SET(T,C) \
97 do{\
98 union a_memory_ptr __xp;\
99 struct a_memory_chunk *__xc;\
100 __xp.p_vp = (C).p_vp;\
101 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
102 a_MEMORY_HOPE_INC((C).p_cp);\
103 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
104 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
105 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
106 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
107 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
108 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
109 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
110 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
111 a_MEMORY_HOPE_INC(__xp.p_ui8p) + __xc->mc_user_size;\
112 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
113 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
114 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
115 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
116 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
117 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
118 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
119 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
120 }while(0)
122 # define a_MEMORY_HOPE_GET_TRACE(T,C,BAD) \
123 do{\
124 a_MEMORY_HOPE_INC((C).p_cp);\
125 a_MEMORY_HOPE_GET(T, C, BAD);\
126 a_MEMORY_HOPE_INC((C).p_cp);\
127 }while(0)
129 # define a_MEMORY_HOPE_GET(T,C,BAD) \
130 do{\
131 union a_memory_ptr __xp;\
132 struct a_memory_chunk *__xc;\
133 ui32_t __i;\
134 ui8_t __m;\
135 __xp.p_vp = (C).p_vp;\
136 a_MEMORY_HOPE_DEC(__xp.p_cp);\
137 (C).p_cp = __xp.p_cp;\
138 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
139 (BAD) = FAL0;\
140 __i = 0;\
141 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[0]);\
142 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
143 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[1]);\
144 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
145 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[2]);\
146 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
147 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[3]);\
148 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
149 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[4]);\
150 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
151 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[5]);\
152 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
153 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[6]);\
154 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
155 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[7]);\
156 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
157 if(__i != 0){\
158 (BAD) = TRU1;\
159 a_MEMORY_HOPE_INC((C).p_cp);\
160 n_alert("%p: corrupt lower canary: 0x%02X: %s, line %d",\
161 (C).p_cp, __i, mdbg_file, mdbg_line);\
162 a_MEMORY_HOPE_DEC((C).p_cp);\
164 a_MEMORY_HOPE_INC(__xp.p_ui8p) + __xc->mc_user_size;\
165 __i = 0;\
166 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[0]);\
167 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
168 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[1]);\
169 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
170 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[2]);\
171 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
172 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[3]);\
173 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
174 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[4]);\
175 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
176 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[5]);\
177 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
178 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[6]);\
179 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
180 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[7]);\
181 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
182 if(__i != 0){\
183 (BAD) = TRU1;\
184 a_MEMORY_HOPE_INC((C).p_cp);\
185 n_alert("%p: corrupt upper canary: 0x%02X: %s, line %d",\
186 (C).p_cp, __i, mdbg_file, mdbg_line);\
187 a_MEMORY_HOPE_DEC((C).p_cp);\
189 if(BAD)\
190 n_alert(" ..canary last seen: %s, line %u",\
191 __xc->mc_file, __xc->mc_line);\
192 }while(0)
193 #endif /* HAVE_MEMORY_DEBUG */
195 #ifdef HAVE_MEMORY_DEBUG
196 struct a_memory_chunk{
197 char const *mc_file;
198 ui32_t mc_line;
199 ui8_t mc_isfree;
200 ui8_t mc__dummy[3];
201 ui32_t mc_user_size;
202 ui32_t mc_size;
205 /* The heap memory free() may become delayed to detect double frees.
206 * It is primitive, but ok: speed and memory usage don't matter here */
207 struct a_memory_heap_chunk{
208 struct a_memory_chunk mhc_super;
209 struct a_memory_heap_chunk *mhc_prev;
210 struct a_memory_heap_chunk *mhc_next;
212 #endif /* HAVE_MEMORY_DEBUG */
214 struct a_memory_ars_lofi_chunk{
215 #ifdef HAVE_MEMORY_DEBUG
216 struct a_memory_chunk malc_super;
217 #endif
218 struct a_memory_ars_lofi_chunk *malc_last; /* Bit 1 set: it's a heap alloc */
221 union a_memory_ptr{
222 void *p_vp;
223 char *p_cp;
224 ui8_t *p_ui8p;
225 #ifdef HAVE_MEMORY_DEBUG
226 struct a_memory_chunk *p_c;
227 struct a_memory_heap_chunk *p_hc;
228 #endif
229 struct a_memory_ars_lofi_chunk *p_alc;
232 struct a_memory_ars_ctx{
233 struct a_memory_ars_ctx *mac_outer;
234 struct a_memory_ars_buffer *mac_top; /* Alloc stack */
235 struct a_memory_ars_buffer *mac_full; /* Alloc stack, cpl. filled */
236 size_t mac_recur; /* srelax_hold() recursion */
237 struct a_memory_ars_huge *mac_huge; /* Huge allocation bypass list */
238 struct a_memory_ars_lofi *mac_lofi; /* Pseudo alloca */
239 struct a_memory_ars_lofi_chunk *mac_lofi_top;
241 n_CTA(n_MEMORY_AUTOREC_TYPE_SIZEOF >= sizeof(struct a_memory_ars_ctx),
242 "Our command loops do not provide enough memory for auto-reclaimed storage");
244 struct a_memory_ars_buffer{
245 struct a_memory_ars_buffer *mab_last;
246 char *mab_bot; /* For _autorec_fixate(). Only used for the global _ctx */
247 char *mab_relax; /* If !NULL, used by srelax() instead of .mab_bot */
248 char *mab_caster; /* Point of casting memory, NULL if full */
249 char mab_buf[n_MEMORY_AUTOREC_SIZE - (4 * sizeof(void*))];
251 n_CTA(sizeof(struct a_memory_ars_buffer) == n_MEMORY_AUTOREC_SIZE,
252 "Resulting structure size is not the expected one");
253 #ifdef HAVE_DEBUG
254 n_CTA(a_MEMORY_ARS_MAX + a_MEMORY_HOPE_SIZE + sizeof(struct a_memory_chunk)
255 < n_SIZEOF_FIELD(struct a_memory_ars_buffer, mab_buf),
256 "Memory layout of auto-reclaimed storage does not work out that way");
257 #endif
259 /* Requests that exceed a_MEMORY_ARS_MAX are always served by the normal
260 * memory allocator (which panics if memory cannot be served). This can be
261 * seen as a security fallback bypass only */
262 struct a_memory_ars_huge{
263 struct a_memory_ars_huge *mah_last;
264 char mah_buf[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
267 struct a_memory_ars_lofi{
268 struct a_memory_ars_lofi *mal_last;
269 char *mal_caster;
270 char *mal_max;
271 char mal_buf[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
274 /* */
275 #ifdef HAVE_MEMORY_DEBUG
276 static size_t a_memory_heap_aall, a_memory_heap_acur, a_memory_heap_amax,
277 a_memory_heap_mall, a_memory_heap_mcur, a_memory_heap_mmax;
278 static struct a_memory_heap_chunk *a_memory_heap_list, *a_memory_heap_free;
280 static size_t a_memory_ars_ball, a_memory_ars_bcur, a_memory_ars_bmax,
281 a_memory_ars_hall, a_memory_ars_hcur, a_memory_ars_hmax,
282 a_memory_ars_aall, a_memory_ars_mall;
284 static size_t a_memory_lofi_ball, a_memory_lofi_bcur, a_memory_lofi_bmax,
285 a_memory_lofi_aall, a_memory_lofi_acur, a_memory_lofi_amax,
286 a_memory_lofi_mall, a_memory_lofi_mcur, a_memory_lofi_mmax;
287 #endif
289 /* The anonymous global topmost auto-reclaimed storage instance, and the
290 * current top of the stack for recursions, `source's etc */
291 static struct a_memory_ars_ctx a_memory_ars_global;
292 static struct a_memory_ars_ctx *a_memory_ars_top;
294 /* */
295 SINLINE void a_memory_lofi_free(struct a_memory_ars_ctx *macp, void *vp);
297 /* Reset an ars_ctx */
298 static void a_memory_ars_reset(struct a_memory_ars_ctx *macp);
300 SINLINE void
301 a_memory_lofi_free(struct a_memory_ars_ctx *macp, void *vp){
302 struct a_memory_ars_lofi *malp;
303 union a_memory_ptr p;
304 NYD2_ENTER;
306 p.p_vp = vp;
307 #ifdef HAVE_MEMORY_DEBUG
308 --a_memory_lofi_acur;
309 a_memory_lofi_mcur -= p.p_c->mc_user_size;
310 #endif
312 /* The heap allocations are released immediately */
313 if((uintptr_t)p.p_alc->malc_last & 0x1){
314 malp = macp->mac_lofi;
315 macp->mac_lofi = malp->mal_last;
316 macp->mac_lofi_top = (struct a_memory_ars_lofi_chunk*)
317 ((uintptr_t)p.p_alc->malc_last & ~0x1);
318 free(malp);
319 #ifdef HAVE_MEMORY_DEBUG
320 --a_memory_lofi_bcur;
321 #endif
322 }else{
323 macp->mac_lofi_top = p.p_alc->malc_last;
325 /* The normal arena ones only if the arena is empty, except for when
326 * it is the last - that we'll keep until _autorec_pop() or exit(3) */
327 if(p.p_cp == (malp = macp->mac_lofi)->mal_buf){
328 if(malp->mal_last != NULL){
329 macp->mac_lofi = malp->mal_last;
330 free(malp);
331 #ifdef HAVE_MEMORY_DEBUG
332 --a_memory_lofi_bcur;
333 #endif
335 }else
336 malp->mal_caster = p.p_cp;
338 NYD2_LEAVE;
341 static void
342 a_memory_ars_reset(struct a_memory_ars_ctx *macp){
343 union{
344 struct a_memory_ars_lofi_chunk *alcp;
345 struct a_memory_ars_lofi *alp;
346 struct a_memory_ars_buffer *abp;
347 struct a_memory_ars_huge *ahp;
348 } m, m2;
349 NYD2_ENTER;
351 /* Simply move all buffers away from .mac_full */
352 for(m.abp = macp->mac_full; m.abp != NULL; m.abp = m2.abp){
353 m2.abp = m.abp->mab_last;
354 m.abp->mab_last = macp->mac_top;
355 macp->mac_top = m.abp;
357 macp->mac_full = NULL;
359 for(m2.abp = NULL, m.abp = macp->mac_top; m.abp != NULL;){
360 struct a_memory_ars_buffer *x;
362 x = m.abp;
363 m.abp = m.abp->mab_last;
365 /* Give away all buffers that are not covered by autorec_fixate() */
366 if(x->mab_bot == x->mab_buf){
367 if(m2.abp == NULL)
368 macp->mac_top = m.abp;
369 else
370 m2.abp->mab_last = m.abp;
371 free(x);
372 #ifdef HAVE_MEMORY_DEBUG
373 --a_memory_ars_bcur;
374 #endif
375 }else{
376 m2.abp = x;
377 x->mab_caster = x->mab_bot;
378 x->mab_relax = NULL;
379 #ifdef HAVE_MEMORY_DEBUG
380 memset(x->mab_caster, 0377,
381 PTR2SIZE(&x->mab_buf[sizeof(x->mab_buf)] - x->mab_caster));
382 #endif
386 while((m.ahp = macp->mac_huge) != NULL){
387 macp->mac_huge = m.ahp->mah_last;
388 free(m.ahp);
389 #ifdef HAVE_MEMORY_DEBUG
390 --a_memory_ars_hcur;
391 #endif
394 /* "alloca(3)" memory goes away, too. XXX Must be last as long we jump */
395 #ifdef HAVE_MEMORY_DEBUG
396 if(macp->mac_lofi_top != NULL)
397 n_alert("There still is LOFI memory upon ARS reset!");
398 #endif
399 while((m.alcp = macp->mac_lofi_top) != NULL)
400 a_memory_lofi_free(macp, m.alcp);
401 NYD2_LEAVE;
404 FL void
405 n_memory_reset(void){
406 #ifdef HAVE_MEMORY_DEBUG
407 union a_memory_ptr p;
408 size_t c, s;
409 #endif
410 struct a_memory_ars_ctx *macp;
411 NYD_ENTER;
413 n_memory_check();
415 if((macp = a_memory_ars_top) == NULL)
416 macp = &a_memory_ars_global;
418 /* First of all reset auto-reclaimed storage so that heap freed during this
419 * can be handled in a second step */
420 /* TODO v15 active recursion can only happen after a jump */
421 if(macp->mac_recur > 0){
422 macp->mac_recur = 1;
423 srelax_rele();
425 a_memory_ars_reset(macp);
427 /* Now we are ready to deal with heap */
428 #ifdef HAVE_MEMORY_DEBUG
429 c = s = 0;
431 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;){
432 void *vp;
434 vp = p.p_hc;
435 ++c;
436 s += p.p_c->mc_size;
437 p.p_hc = p.p_hc->mhc_next;
438 (free)(vp);
440 a_memory_heap_free = NULL;
442 if((n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)) && c > 0)
443 n_err("memreset: freed %" PRIuZ " chunks/%" PRIuZ " bytes\n", c, s);
444 #endif
445 NYD_LEAVE;
448 #ifndef HAVE_MEMORY_DEBUG
449 FL void *
450 n_alloc(size_t s){
451 void *rv;
452 NYD2_ENTER;
454 if(s == 0)
455 s = 1;
456 if((rv = malloc(s)) == NULL)
457 n_panic(_("no memory"));
458 NYD2_LEAVE;
459 return rv;
462 FL void *
463 n_realloc(void *vp, size_t s){
464 void *rv;
465 NYD2_ENTER;
467 if(vp == NULL)
468 rv = n_alloc(s);
469 else{
470 if(s == 0)
471 s = 1;
472 if((rv = realloc(vp, s)) == NULL)
473 n_panic(_("no memory"));
475 NYD2_LEAVE;
476 return rv;
479 FL void *
480 n_calloc(size_t nmemb, size_t size){
481 void *rv;
482 NYD2_ENTER;
484 if(size == 0)
485 size = 1;
486 if((rv = calloc(nmemb, size)) == NULL)
487 n_panic(_("no memory"));
488 NYD2_LEAVE;
489 return rv;
492 FL void
493 (n_free)(void *vp){
494 NYD2_ENTER;
495 (free)(vp);
496 NYD2_LEAVE;
499 #else /* !HAVE_MEMORY_DEBUG */
500 FL void *
501 (n_alloc)(size_t s n_MEMORY_DEBUG_ARGS){
502 union a_memory_ptr p;
503 ui32_t user_s;
504 NYD2_ENTER;
506 if(s > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
507 n_panic("n_alloc(): allocation too large: %s, line %d",
508 mdbg_file, mdbg_line);
509 if((user_s = (ui32_t)s) == 0)
510 s = 1;
511 s += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
513 if((p.p_vp = (malloc)(s)) == NULL)
514 n_panic(_("no memory"));
516 p.p_hc->mhc_prev = NULL;
517 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
518 a_memory_heap_list->mhc_prev = p.p_hc;
520 p.p_c->mc_file = mdbg_file;
521 p.p_c->mc_line = (ui16_t)mdbg_line;
522 p.p_c->mc_isfree = FAL0;
523 p.p_c->mc_user_size = user_s;
524 p.p_c->mc_size = (ui32_t)s;
526 a_memory_heap_list = p.p_hc++;
527 a_MEMORY_HOPE_SET(p_hc, p);
529 ++a_memory_heap_aall;
530 ++a_memory_heap_acur;
531 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
532 a_memory_heap_mall += user_s;
533 a_memory_heap_mcur += user_s;
534 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
535 NYD2_LEAVE;
536 return p.p_vp;
539 FL void *
540 (n_realloc)(void *vp, size_t s n_MEMORY_DEBUG_ARGS){
541 union a_memory_ptr p;
542 ui32_t user_s;
543 bool_t isbad;
544 NYD2_ENTER;
546 if((p.p_vp = vp) == NULL){
547 jforce:
548 p.p_vp = (n_alloc)(s, mdbg_file, mdbg_line);
549 goto jleave;
552 a_MEMORY_HOPE_GET(p_hc, p, isbad);
553 --p.p_hc;
555 if(p.p_c->mc_isfree){
556 n_err("n_realloc(): region freed! At %s, line %d\n"
557 "\tLast seen: %s, line %" PRIu16 "\n",
558 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
559 goto jforce;
562 if(p.p_hc == a_memory_heap_list)
563 a_memory_heap_list = p.p_hc->mhc_next;
564 else
565 p.p_hc->mhc_prev->mhc_next = p.p_hc->mhc_next;
566 if (p.p_hc->mhc_next != NULL)
567 p.p_hc->mhc_next->mhc_prev = p.p_hc->mhc_prev;
569 --a_memory_heap_acur;
570 a_memory_heap_mcur -= p.p_c->mc_user_size;
572 if(s > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
573 n_panic("n_realloc(): allocation too large: %s, line %d",
574 mdbg_file, mdbg_line);
575 if((user_s = (ui32_t)s) == 0)
576 s = 1;
577 s += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
579 if((p.p_vp = (realloc)(p.p_c, s)) == NULL)
580 n_panic(_("no memory"));
581 p.p_hc->mhc_prev = NULL;
582 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
583 a_memory_heap_list->mhc_prev = p.p_hc;
585 p.p_c->mc_file = mdbg_file;
586 p.p_c->mc_line = (ui16_t)mdbg_line;
587 p.p_c->mc_isfree = FAL0;
588 p.p_c->mc_user_size = user_s;
589 p.p_c->mc_size = (ui32_t)s;
591 a_memory_heap_list = p.p_hc++;
592 a_MEMORY_HOPE_SET(p_hc, p);
594 ++a_memory_heap_aall;
595 ++a_memory_heap_acur;
596 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
597 a_memory_heap_mall += user_s;
598 a_memory_heap_mcur += user_s;
599 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
600 jleave:
601 NYD2_LEAVE;
602 return p.p_vp;
605 FL void *
606 (n_calloc)(size_t nmemb, size_t size n_MEMORY_DEBUG_ARGS){
607 union a_memory_ptr p;
608 ui32_t user_s;
609 NYD2_ENTER;
611 if(nmemb == 0)
612 nmemb = 1;
613 if(size > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
614 n_panic("n_calloc(): allocation size too large: %s, line %d",
615 mdbg_file, mdbg_line);
616 if((user_s = (ui32_t)size) == 0)
617 size = 1;
618 if((UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE) /
619 nmemb < size)
620 n_panic("n_calloc(): allocation count too large: %s, line %d",
621 mdbg_file, mdbg_line);
623 size *= nmemb;
624 size += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
626 if((p.p_vp = (malloc)(size)) == NULL)
627 n_panic(_("no memory"));
628 memset(p.p_vp, 0, size);
630 p.p_hc->mhc_prev = NULL;
631 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
632 a_memory_heap_list->mhc_prev = p.p_hc;
634 p.p_c->mc_file = mdbg_file;
635 p.p_c->mc_line = (ui16_t)mdbg_line;
636 p.p_c->mc_isfree = FAL0;
637 p.p_c->mc_user_size = (user_s > 0) ? user_s *= nmemb : 0;
638 p.p_c->mc_size = (ui32_t)size;
640 a_memory_heap_list = p.p_hc++;
641 a_MEMORY_HOPE_SET(p_hc, p);
643 ++a_memory_heap_aall;
644 ++a_memory_heap_acur;
645 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
646 a_memory_heap_mall += user_s;
647 a_memory_heap_mcur += user_s;
648 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
649 NYD2_LEAVE;
650 return p.p_vp;
653 FL void
654 (n_free)(void *vp n_MEMORY_DEBUG_ARGS){
655 union a_memory_ptr p;
656 bool_t isbad;
657 NYD2_ENTER;
659 if((p.p_vp = vp) == NULL){
660 n_err("n_free(NULL) from %s, line %d\n", mdbg_file, mdbg_line);
661 goto jleave;
664 a_MEMORY_HOPE_GET(p_hc, p, isbad);
665 --p.p_hc;
667 if(p.p_c->mc_isfree){
668 n_err("n_free(): double-free avoided at %s, line %d\n"
669 "\tLast seen: %s, line %" PRIu16 "\n",
670 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
671 goto jleave;
674 if(p.p_hc == a_memory_heap_list){
675 if((a_memory_heap_list = p.p_hc->mhc_next) != NULL)
676 a_memory_heap_list->mhc_prev = NULL;
677 }else
678 p.p_hc->mhc_prev->mhc_next = p.p_hc->mhc_next;
679 if(p.p_hc->mhc_next != NULL)
680 p.p_hc->mhc_next->mhc_prev = p.p_hc->mhc_prev;
682 p.p_c->mc_isfree = TRU1;
683 /* Trash contents (also see [21c05f8]) */
684 memset(vp, 0377, p.p_c->mc_user_size);
686 --a_memory_heap_acur;
687 a_memory_heap_mcur -= p.p_c->mc_user_size;
689 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)){
690 p.p_hc->mhc_next = a_memory_heap_free;
691 a_memory_heap_free = p.p_hc;
692 }else
693 (free)(p.p_vp);
694 jleave:
695 NYD2_LEAVE;
697 #endif /* HAVE_MEMORY_DEBUG */
699 FL void
700 n_memory_autorec_fixate(void){
701 struct a_memory_ars_buffer *mabp;
702 NYD_ENTER;
704 for(mabp = a_memory_ars_global.mac_top; mabp != NULL; mabp = mabp->mab_last)
705 mabp->mab_bot = mabp->mab_caster;
706 for(mabp = a_memory_ars_global.mac_full; mabp != NULL; mabp = mabp->mab_last)
707 mabp->mab_bot = mabp->mab_caster;
708 NYD_LEAVE;
711 FL void
712 n_memory_autorec_push(void *vp){
713 struct a_memory_ars_ctx *macp;
714 NYD_ENTER;
716 macp = vp;
717 memset(macp, 0, sizeof *macp);
718 macp->mac_outer = a_memory_ars_top;
719 a_memory_ars_top = macp;
720 NYD_LEAVE;
723 FL void
724 n_memory_autorec_pop(void *vp){
725 struct a_memory_ars_buffer *mabp;
726 struct a_memory_ars_ctx *macp;
727 NYD_ENTER;
729 n_memory_check();
731 if((macp = vp) == NULL)
732 macp = &a_memory_ars_global;
733 else{
734 /* XXX May not be ARS top upon jump */
735 while(a_memory_ars_top != macp){
736 DBG( n_err("ARS pop %p to reach freed context\n", a_memory_ars_top); )
737 n_memory_autorec_pop(a_memory_ars_top);
739 a_memory_ars_top = macp->mac_outer;
742 a_memory_ars_reset(macp);
743 assert(macp->mac_full == NULL);
744 assert(macp->mac_huge == NULL);
746 for(mabp = macp->mac_top; mabp != NULL;){
747 vp = mabp;
748 mabp = mabp->mab_last;
749 free(vp);
752 /* We (may) have kept one buffer for our pseudo alloca(3) */
753 if(macp->mac_lofi != NULL){
754 assert(macp->mac_lofi->mal_last == NULL);
755 free(macp->mac_lofi);
756 #ifdef HAVE_MEMORY_DEBUG
757 --a_memory_lofi_bcur;
758 #endif
761 memset(macp, 0, sizeof *macp);
762 NYD_LEAVE;
765 FL void *
766 n_memory_autorec_current(void){
767 return (a_memory_ars_top != NULL ? a_memory_ars_top : &a_memory_ars_global);
770 FL void *
771 (n_autorec_alloc)(void *vp, size_t size n_MEMORY_DEBUG_ARGS){
772 #ifdef HAVE_MEMORY_DEBUG
773 ui32_t user_s;
774 #endif
775 union a_memory_ptr p;
776 union{
777 struct a_memory_ars_buffer *abp;
778 struct a_memory_ars_huge *ahp;
779 } m, m2;
780 struct a_memory_ars_ctx *macp;
781 NYD2_ENTER;
783 if((macp = vp) == NULL && (macp = a_memory_ars_top) == NULL)
784 macp = &a_memory_ars_global;
786 #ifdef HAVE_MEMORY_DEBUG
787 user_s = (ui32_t)size;
788 #endif
789 if(size == 0)
790 ++size;
791 #ifdef HAVE_MEMORY_DEBUG
792 size += sizeof(struct a_memory_chunk) + a_MEMORY_HOPE_SIZE;
793 #endif
794 size = a_MEMORY_ARS_ROUNDUP(size);
796 /* Huge allocations are special */
797 if(n_UNLIKELY(size > a_MEMORY_ARS_MAX)){
798 #ifdef HAVE_MEMORY_DEBUG
799 n_alert("n_autorec_alloc() of %" PRIuZ " bytes from %s, line %d",
800 size, mdbg_file, mdbg_line);
801 #endif
802 goto jhuge;
805 /* Search for a buffer with enough free space to serve request */
806 for(m2.abp = NULL, m.abp = macp->mac_top; m.abp != NULL;
807 m2.abp = m.abp, m.abp = m.abp->mab_last){
808 if((p.p_cp = m.abp->mab_caster) <=
809 &m.abp->mab_buf[sizeof(m.abp->mab_buf) - size]){
810 /* Alignment is the one thing, the other is what is usually allocated,
811 * and here about 40 bytes seems to be a good cut to avoid non-usable
812 * casters. Reown buffers supposed to be "full" to .mac_full */
813 if(n_UNLIKELY((m.abp->mab_caster = &p.p_cp[size]) >=
814 &m.abp->mab_buf[sizeof(m.abp->mab_buf) - 42])){
815 if(m2.abp == NULL)
816 macp->mac_top = m.abp->mab_last;
817 else
818 m2.abp->mab_last = m.abp->mab_last;
819 m.abp->mab_last = macp->mac_full;
820 macp->mac_full = m.abp;
822 goto jleave;
826 /* Need a new buffer XXX "page" pool */
827 m.abp = n_alloc(sizeof *m.abp);
828 m.abp->mab_last = macp->mac_top;
829 m.abp->mab_caster = &(m.abp->mab_bot = m.abp->mab_buf)[size];
830 m.abp->mab_relax = NULL; /* Thus indicates allocation after srelax_hold() */
831 macp->mac_top = m.abp;
832 p.p_cp = m.abp->mab_bot;
834 #ifdef HAVE_MEMORY_DEBUG
835 ++a_memory_ars_ball;
836 ++a_memory_ars_bcur;
837 a_memory_ars_bmax = n_MAX(a_memory_ars_bmax, a_memory_ars_bcur);
838 #endif
840 jleave:
841 #ifdef HAVE_MEMORY_DEBUG
842 p.p_c->mc_file = mdbg_file;
843 p.p_c->mc_line = (ui16_t)mdbg_line;
844 p.p_c->mc_user_size = user_s;
845 p.p_c->mc_size = (ui32_t)size;
846 ++p.p_c;
847 a_MEMORY_HOPE_SET(p_c, p);
849 ++a_memory_ars_aall;
850 a_memory_ars_mall += user_s;
851 #endif
852 NYD2_LEAVE;
853 return p.p_vp;
855 jhuge:
856 m.ahp = n_alloc(n_VSTRUCT_SIZEOF(struct a_memory_ars_huge, mah_buf) + size);
857 m.ahp->mah_last = macp->mac_huge;
858 macp->mac_huge = m.ahp;
859 p.p_cp = m.ahp->mah_buf;
860 #ifdef HAVE_MEMORY_DEBUG
861 ++a_memory_ars_hall;
862 ++a_memory_ars_hcur;
863 a_memory_ars_hmax = n_MAX(a_memory_ars_hmax, a_memory_ars_hcur);
864 #endif
865 goto jleave;
868 FL void *
869 (n_autorec_calloc)(void *vp, size_t nmemb, size_t size n_MEMORY_DEBUG_ARGS){
870 void *rv;
871 NYD2_ENTER;
873 size *= nmemb; /* XXX overflow, but only used for struct inits */
874 rv = (n_autorec_alloc)(vp, size n_MEMORY_DEBUG_ARGSCALL);
875 memset(rv, 0, size);
876 NYD2_LEAVE;
877 return rv;
880 FL void
881 srelax_hold(void){
882 struct a_memory_ars_ctx *macp;
883 NYD2_ENTER;
885 if((macp = a_memory_ars_top) == NULL)
886 macp = &a_memory_ars_global;
888 if(macp->mac_recur++ == 0){
889 struct a_memory_ars_buffer *mabp;
891 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
892 mabp->mab_relax = mabp->mab_caster;
893 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
894 mabp->mab_relax = mabp->mab_caster;
896 #ifdef HAVE_DEVEL
897 else
898 n_err("srelax_hold(): recursion >0\n");
899 #endif
900 NYD2_LEAVE;
903 FL void
904 srelax_rele(void){
905 struct a_memory_ars_ctx *macp;
906 NYD2_ENTER;
908 if((macp = a_memory_ars_top) == NULL)
909 macp = &a_memory_ars_global;
911 assert(macp->mac_recur > 0);
913 if(--macp->mac_recur == 0){
914 struct a_memory_ars_buffer *mabp;
916 macp->mac_recur = 1;
917 srelax();
918 macp->mac_recur = 0;
920 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
921 mabp->mab_relax = NULL;
922 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
923 mabp->mab_relax = NULL;
925 #ifdef HAVE_DEVEL
926 else
927 n_err("srelax_rele(): recursion >0\n");
928 #endif
929 NYD2_LEAVE;
932 FL void
933 srelax(void){
934 /* The purpose of relaxation is only that it is possible to reset the
935 * casters, *not* to give back memory to the system. We are presumably in
936 * an iteration over all messages of a mailbox, and it'd be quite
937 * counterproductive to give the system allocator a chance to waste time */
938 struct a_memory_ars_ctx *macp;
939 NYD2_ENTER;
941 if((macp = a_memory_ars_top) == NULL)
942 macp = &a_memory_ars_global;
944 assert(macp->mac_recur > 0);
945 n_memory_check();
947 if(macp->mac_recur == 1){
948 struct a_memory_ars_buffer *mabp, *x, *y;
950 /* Buffers in the full list may become usable again! */
951 for(x = NULL, mabp = macp->mac_full; mabp != NULL; mabp = y){
952 y = mabp->mab_last;
954 if(mabp->mab_relax == NULL ||
955 mabp->mab_relax < &mabp->mab_buf[sizeof(mabp->mab_buf) - 42]){
956 if(x == NULL)
957 macp->mac_full = y;
958 else
959 x->mab_last = y;
960 mabp->mab_last = macp->mac_top;
961 macp->mac_top = mabp;
962 }else
963 x = mabp;
966 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
967 mabp->mab_caster = (mabp->mab_relax != NULL)
968 ? mabp->mab_relax : mabp->mab_bot;
969 #ifdef HAVE_MEMORY_DEBUG
970 memset(mabp->mab_caster, 0377,
971 PTR2SIZE(&mabp->mab_buf[sizeof(mabp->mab_buf)] - mabp->mab_caster));
972 #endif
975 NYD2_LEAVE;
978 FL void *
979 (n_lofi_alloc)(size_t size n_MEMORY_DEBUG_ARGS){
980 #ifdef HAVE_MEMORY_DEBUG
981 ui32_t user_s;
982 #endif
983 union a_memory_ptr p;
984 struct a_memory_ars_lofi *malp;
985 bool_t isheap;
986 struct a_memory_ars_ctx *macp;
987 NYD2_ENTER;
989 if((macp = a_memory_ars_top) == NULL)
990 macp = &a_memory_ars_global;
992 #ifdef HAVE_MEMORY_DEBUG
993 user_s = (ui32_t)size;
994 #endif
995 if(size == 0)
996 ++size;
997 size += sizeof(struct a_memory_ars_lofi_chunk);
998 #ifdef HAVE_MEMORY_DEBUG
999 size += a_MEMORY_HOPE_SIZE;
1000 #endif
1001 size = a_MEMORY_LOFI_ROUNDUP(size);
1003 /* Huge allocations are special */
1004 if(n_UNLIKELY(isheap = (size > a_MEMORY_LOFI_MAX))){
1005 #ifdef HAVE_MEMORY_DEBUG
1006 n_alert("n_lofi_alloc() of %" PRIuZ " bytes from %s, line %d",
1007 size, mdbg_file, mdbg_line);
1008 #endif
1009 }else if((malp = macp->mac_lofi) != NULL &&
1010 ((p.p_cp = malp->mal_caster) <= &malp->mal_max[-size])){
1011 malp->mal_caster = &p.p_cp[size];
1012 goto jleave;
1015 /* Need a new buffer */
1016 /* C99 */{
1017 size_t i;
1019 i = n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi, mal_buf) + size;
1020 i = n_MAX(i, n_MEMORY_AUTOREC_SIZE);
1021 malp = n_alloc(i);
1022 malp->mal_last = macp->mac_lofi;
1023 malp->mal_caster = &malp->mal_buf[size];
1024 i -= n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi, mal_buf);
1025 malp->mal_max = &malp->mal_buf[i];
1026 macp->mac_lofi = malp;
1027 p.p_cp = malp->mal_buf;
1029 #ifdef HAVE_MEMORY_DEBUG
1030 ++a_memory_lofi_ball;
1031 ++a_memory_lofi_bcur;
1032 a_memory_lofi_bmax = n_MAX(a_memory_lofi_bmax, a_memory_lofi_bcur);
1033 #endif
1036 jleave:
1037 p.p_alc->malc_last = macp->mac_lofi_top;
1038 macp->mac_lofi_top = p.p_alc;
1039 if(isheap)
1040 p.p_alc->malc_last = (struct a_memory_ars_lofi_chunk*)
1041 ((uintptr_t)p.p_alc->malc_last | 0x1);
1043 #ifndef HAVE_MEMORY_DEBUG
1044 ++p.p_alc;
1045 #else
1046 p.p_c->mc_file = mdbg_file;
1047 p.p_c->mc_line = (ui16_t)mdbg_line;
1048 p.p_c->mc_isfree = FAL0;
1049 p.p_c->mc_user_size = user_s;
1050 p.p_c->mc_size = (ui32_t)size;
1051 ++p.p_alc;
1052 a_MEMORY_HOPE_SET(p_alc, p);
1054 ++a_memory_lofi_aall;
1055 ++a_memory_lofi_acur;
1056 a_memory_lofi_amax = n_MAX(a_memory_lofi_amax, a_memory_lofi_acur);
1057 a_memory_lofi_mall += user_s;
1058 a_memory_lofi_mcur += user_s;
1059 a_memory_lofi_mmax = n_MAX(a_memory_lofi_mmax, a_memory_lofi_mcur);
1060 #endif
1061 NYD2_LEAVE;
1062 return p.p_vp;
1065 FL void
1066 (n_lofi_free)(void *vp n_MEMORY_DEBUG_ARGS){
1067 #ifdef HAVE_MEMORY_DEBUG
1068 bool_t isbad;
1069 #endif
1070 union a_memory_ptr p;
1071 struct a_memory_ars_ctx *macp;
1072 NYD2_ENTER;
1074 if((macp = a_memory_ars_top) == NULL)
1075 macp = &a_memory_ars_global;
1077 if((p.p_vp = vp) == NULL){
1078 #ifdef HAVE_MEMORY_DEBUG
1079 n_err("n_lofi_free(NULL) from %s, line %d\n", mdbg_file, mdbg_line);
1080 #endif
1081 goto jleave;
1084 #ifdef HAVE_MEMORY_DEBUG
1085 a_MEMORY_HOPE_GET(p_alc, p, isbad);
1086 --p.p_alc;
1088 if(p.p_c->mc_isfree){
1089 n_err("n_lofi_free(): double-free avoided at %s, line %d\n"
1090 "\tLast seen: %s, line %" PRIu16 "\n",
1091 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
1092 goto jleave;
1094 p.p_c->mc_isfree = TRU1;
1095 memset(vp, 0377, p.p_c->mc_user_size);
1097 if(p.p_alc != macp->mac_lofi_top){
1098 n_err("n_lofi_free(): this is not alloca top at %s, line %d\n"
1099 "\tLast seen: %s, line %" PRIu16 "\n",
1100 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
1101 goto jleave;
1104 ++p.p_alc;
1105 #endif /* HAVE_MEMORY_DEBUG */
1107 a_memory_lofi_free(macp, --p.p_alc);
1108 jleave:
1109 NYD2_LEAVE;
1112 FL void *
1113 n_lofi_snap_create(void){ /* TODO avoid temporary alloc */
1114 void *rv;
1115 NYD2_ENTER;
1117 rv = n_lofi_alloc(1);
1118 NYD2_LEAVE;
1119 return rv;
1122 FL void
1123 n_lofi_snap_unroll(void *cookie){ /* TODO optimise */
1124 union a_memory_ptr p;
1125 struct a_memory_ars_ctx *macp;
1126 NYD2_ENTER;
1128 n_memory_check();
1130 if((macp = a_memory_ars_top) == NULL)
1131 macp = &a_memory_ars_global;
1133 for(;;){
1134 p.p_alc = macp->mac_lofi_top;
1135 a_memory_lofi_free(macp, p.p_vp);
1136 ++p.p_alc;
1137 #ifdef HAVE_DEBUG
1138 a_MEMORY_HOPE_INC(p.p_ui8p);
1139 #endif
1140 if(p.p_vp == cookie)
1141 break;
1143 NYD2_LEAVE;
1146 #ifdef HAVE_MEMORY_DEBUG
1147 FL int
1148 c_memtrace(void *vp){
1149 /* For a_MEMORY_HOPE_GET() */
1150 char const * const mdbg_file = "memtrace()";
1151 int const mdbg_line = -1;
1152 struct a_memory_ars_buffer *mabp;
1153 struct a_memory_ars_lofi_chunk *malcp;
1154 struct a_memory_ars_lofi *malp;
1155 struct a_memory_ars_ctx *macp;
1156 bool_t isbad;
1157 union a_memory_ptr p, xp;
1158 size_t lines;
1159 FILE *fp;
1160 NYD2_ENTER;
1162 vp = (void*)0x1;
1163 if((fp = Ftmp(NULL, "memtr", OF_RDWR | OF_UNLINK | OF_REGISTER)) == NULL){
1164 n_perr("tmpfile", 0);
1165 goto jleave;
1167 lines = 0;
1169 fprintf(fp,
1170 "Last-Out-First-In (alloca) storage:\n"
1171 " Buffer cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1172 " Allocations cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1173 " Bytes cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n\n",
1174 a_memory_lofi_bcur, a_memory_lofi_bmax, a_memory_lofi_ball,
1175 a_memory_lofi_acur, a_memory_lofi_amax, a_memory_lofi_aall,
1176 a_memory_lofi_mcur, a_memory_lofi_mmax, a_memory_lofi_mall);
1177 lines += 7;
1179 if((macp = a_memory_ars_top) == NULL)
1180 macp = &a_memory_ars_global;
1181 for(; macp != NULL; macp = macp->mac_outer){
1182 fprintf(fp, " Evaluation stack context %p (outer: %p):\n",
1183 (void*)macp, (void*)macp->mac_outer);
1184 ++lines;
1186 for(malp = macp->mac_lofi; malp != NULL;){
1187 fprintf(fp, " Buffer %p%s, %" PRIuZ "/%" PRIuZ " used/free:\n",
1188 (void*)malp, ((uintptr_t)malp->mal_last & 0x1 ? " (huge)" : ""),
1189 PTR2SIZE(malp->mal_caster - &malp->mal_buf[0]),
1190 PTR2SIZE(malp->mal_max - malp->mal_caster));
1191 ++lines;
1192 malp = malp->mal_last;
1193 malp = (struct a_memory_ars_lofi*)((uintptr_t)malp & ~1);
1196 for(malcp = macp->mac_lofi_top; malcp != NULL;){
1197 p.p_alc = malcp;
1198 malcp = (struct a_memory_ars_lofi_chunk*)
1199 ((uintptr_t)malcp->malc_last & ~0x1);
1200 xp = p;
1201 ++xp.p_alc;
1202 a_MEMORY_HOPE_GET_TRACE(p_alc, xp, isbad);
1203 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1204 (isbad ? "! CANARY ERROR (LOFI): " : ""), xp.p_vp,
1205 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1209 fprintf(fp,
1210 "\nAuto-reclaimed storage:\n"
1211 " Buffers cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1212 " Huge allocations cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1213 " Allocations all: %" PRIuZ ", Bytes all: %" PRIuZ "\n\n",
1214 a_memory_ars_bcur, a_memory_ars_bmax, a_memory_ars_ball,
1215 a_memory_ars_hcur, a_memory_ars_hmax, a_memory_ars_hall,
1216 a_memory_ars_aall, a_memory_ars_mall);
1217 lines += 7;
1219 if((macp = a_memory_ars_top) == NULL)
1220 macp = &a_memory_ars_global;
1221 for(; macp != NULL; macp = macp->mac_outer){
1222 fprintf(fp, " Evaluation stack context %p (outer: %p):\n",
1223 (void*)macp, (void*)macp->mac_outer);
1224 ++lines;
1226 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
1227 fprintf(fp, " Buffer %p, %" PRIuZ "/%" PRIuZ " used/free:\n",
1228 (void*)mabp,
1229 PTR2SIZE(mabp->mab_caster - &mabp->mab_buf[0]),
1230 PTR2SIZE(&mabp->mab_buf[sizeof(mabp->mab_buf)] - mabp->mab_caster));
1231 ++lines;
1233 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1234 ++lines, p.p_cp += p.p_c->mc_size){
1235 xp = p;
1236 ++xp.p_c;
1237 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1238 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1239 (isbad ? "! CANARY ERROR (ARS, top): " : ""), xp.p_vp,
1240 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1242 ++lines;
1245 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last){
1246 fprintf(fp, " Buffer %p, full:\n", (void*)mabp);
1247 ++lines;
1249 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1250 ++lines, p.p_cp += p.p_c->mc_size){
1251 xp = p;
1252 ++xp.p_c;
1253 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1254 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1255 (isbad ? "! CANARY ERROR (ARS, full): " : ""), xp.p_vp,
1256 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1258 ++lines;
1262 fprintf(fp,
1263 "\nHeap memory buffers:\n"
1264 " Allocation cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1265 " Bytes cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n\n",
1266 a_memory_heap_acur, a_memory_heap_amax, a_memory_heap_aall,
1267 a_memory_heap_mcur, a_memory_heap_mmax, a_memory_heap_mall);
1268 lines += 6;
1270 for(p.p_hc = a_memory_heap_list; p.p_hc != NULL;
1271 ++lines, p.p_hc = p.p_hc->mhc_next){
1272 xp = p;
1273 ++xp.p_hc;
1274 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1275 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1276 (isbad ? "! CANARY ERROR (heap): " : ""), xp.p_vp,
1277 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1280 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)){
1281 fprintf(fp, "Heap buffers lingering for free():\n");
1282 ++lines;
1284 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;
1285 ++lines, p.p_hc = p.p_hc->mhc_next){
1286 xp = p;
1287 ++xp.p_hc;
1288 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1289 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1290 (isbad ? "! CANARY ERROR (free): " : ""), xp.p_vp,
1291 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1295 page_or_print(fp, lines);
1296 Fclose(fp);
1297 vp = NULL;
1298 jleave:
1299 NYD2_LEAVE;
1300 return (vp != NULL);
1303 FL bool_t
1304 n__memory_check(char const *mdbg_file, int mdbg_line){
1305 union a_memory_ptr p, xp;
1306 struct a_memory_ars_buffer *mabp;
1307 struct a_memory_ars_lofi_chunk *malcp;
1308 struct a_memory_ars_ctx *macp;
1309 bool_t anybad, isbad;
1310 NYD2_ENTER;
1312 anybad = FAL0;
1314 if((macp = a_memory_ars_top) == NULL)
1315 macp = &a_memory_ars_global;
1317 /* Alloca */
1319 for(malcp = macp->mac_lofi_top; malcp != NULL;){
1320 p.p_alc = malcp;
1321 malcp = (struct a_memory_ars_lofi_chunk*)
1322 ((uintptr_t)malcp->malc_last & ~0x1);
1323 xp = p;
1324 ++xp.p_alc;
1325 a_MEMORY_HOPE_GET_TRACE(p_alc, xp, isbad);
1326 if(isbad){
1327 anybad = TRU1;
1328 n_err(
1329 "! CANARY ERROR (LOFI): %p (%u bytes): %s, line %u\n",
1330 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1334 /* Auto-reclaimed */
1336 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
1337 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1338 p.p_cp += p.p_c->mc_size){
1339 xp = p;
1340 ++xp.p_c;
1341 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1342 if(isbad){
1343 anybad = TRU1;
1344 n_err(
1345 "! CANARY ERROR (ARS, top): %p (%u bytes): %s, line %u\n",
1346 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1351 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last){
1352 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1353 p.p_cp += p.p_c->mc_size){
1354 xp = p;
1355 ++xp.p_c;
1356 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1357 if(isbad){
1358 anybad = TRU1;
1359 n_err(
1360 "! CANARY ERROR (ARS, full): %p (%u bytes): %s, line %u\n",
1361 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1366 /* Heap*/
1368 for(p.p_hc = a_memory_heap_list; p.p_hc != NULL; p.p_hc = p.p_hc->mhc_next){
1369 xp = p;
1370 ++xp.p_hc;
1371 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1372 if(isbad){
1373 anybad = TRU1;
1374 n_err(
1375 "! CANARY ERROR (heap): %p (%u bytes): %s, line %u\n",
1376 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1380 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)){
1381 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;
1382 p.p_hc = p.p_hc->mhc_next){
1383 xp = p;
1384 ++xp.p_hc;
1385 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1386 if(isbad){
1387 anybad = TRU1;
1388 n_err(
1389 "! CANARY ERROR (free): %p (%u bytes): %s, line %u\n",
1390 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1395 if(anybad && ok_blook(memdebug))
1396 n_panic("Memory errors encountered");
1397 NYD2_LEAVE;
1398 return anybad;
1400 #endif /* HAVE_MEMORY_DEBUG */
1402 /* s-it-mode */