Add is_mlist_mp() and :Ll colon modifiers, and use "Ll" for *headline*'s %T
[s-mailx.git] / memory.c
blobd8c047b956f10139e12ca944d6039e743e8135d8
1 /*@ S-nail - a mail user agent derived from Berkeley Mail.
2 *@ Heap memory and automatically reclaimed storage.
3 *@ TODO Back the _flux_ heap.
4 *@ TODO Add cache for "the youngest" two or three n_MEMORY_AUTOREC_SIZE arenas
6 * Copyright (c) 2012 - 2017 Steffen (Daode) Nurpmeso <steffen@sdaoden.eu>.
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #undef n_FILE
21 #define n_FILE memory
23 #ifndef HAVE_AMALGAMATION
24 # include "nail.h"
25 #endif
28 * We use per-execution context memory arenas, to be found in
29 * n_go_data->gdc_mempool; if NULL, set to ->gdc__mempool_buf.
30 * n_memory_reset() that happens on loop ticks reclaims their memory, and
31 * performs debug checks also on the former #ifdef HAVE_MEMORY_DEBUG.
32 * The arena that is used already during program startup is special in that
33 * _pool_fixate() will set "a lower bound" in order not to reclaim memory that
34 * must be kept vivid during the lifetime of the program.
35 * That was so in historical code with the globally shared single string dope
36 * implementation, too. (And it still seems easier than bypassing to normal
37 * heap memory before _fixate() is called, today.)
39 * AutoReclaimedStorage memory is the follow-up to the historical "stringdope"
40 * allocator from 1979 (see [timeline:a7342d9]:src/Mail/strings.c), it is
41 * a steadily growing pool (but _relax_hold()..[:_relax_unroll():]..relax_gut()
42 * can be used to reduce pressure) until n_memory_reset() time.
44 * LastOutFirstIn memory is meant as an alloca(3) replacement but which requires
45 * lofi_free()ing pointers (otherwise growing until n_memory_reset()).
47 * TODO Flux heap memory is like LOFI except that any pointer can be freed (and
48 * TODO reused) at any time, just like normal heap memory. It is notational in
49 * TODO that it clearly states that the allocation will go away after a loop
50 * TODO tick, and also we can use some buffer caches.
53 /* Maximum allocation (directly) handled by A-R-Storage */
54 #define a_MEMORY_ARS_MAX (n_MEMORY_AUTOREC_SIZE / 2 + n_MEMORY_AUTOREC_SIZE / 4)
55 #define a_MEMORY_LOFI_MAX a_MEMORY_ARS_MAX
57 n_CTA(a_MEMORY_ARS_MAX > 1024,
58 "Auto-reclaimed memory requires a larger buffer size"); /* Anway > 42! */
59 n_CTA(n_ISPOW2(n_MEMORY_AUTOREC_SIZE),
60 "Buffers should be POW2 (may be wasteful on native allocators otherwise)");
62 /* Alignment of ARS memory. Simply go for pointer alignment */
63 #define a_MEMORY_ARS_ROUNDUP(S) n_ALIGN_SMALL(S)
64 #define a_MEMORY_LOFI_ROUNDUP(S) a_MEMORY_ARS_ROUNDUP(S)
66 #ifdef HAVE_MEMORY_DEBUG
67 n_CTA(sizeof(char) == sizeof(ui8_t), "But POSIX says a byte is 8 bit");
69 # define a_MEMORY_HOPE_SIZE (2 * 8 * sizeof(char))
70 # define a_MEMORY_HOPE_INC(P) (P) += 8
71 # define a_MEMORY_HOPE_DEC(P) (P) -= 8
73 /* We use address-induced canary values, inspiration (but he didn't invent)
74 * and primes from maxv@netbsd.org, src/sys/kern/subr_kmem.c */
75 # define a_MEMORY_HOPE_LOWER(S,P) \
76 do{\
77 ui64_t __h__ = (uintptr_t)(P);\
78 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
79 __h__ >>= 56;\
80 (S) = (ui8_t)__h__;\
81 }while(0)
83 # define a_MEMORY_HOPE_UPPER(S,P) \
84 do{\
85 ui32_t __i__;\
86 ui64_t __x__, __h__ = (uintptr_t)(P);\
87 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
88 for(__i__ = 56; __i__ != 0; __i__ -= 8)\
89 if((__x__ = (__h__ >> __i__)) != 0){\
90 (S) = (ui8_t)__x__;\
91 break;\
93 if(__i__ == 0)\
94 (S) = 0xAAu;\
95 }while(0)
97 # define a_MEMORY_HOPE_SET(T,C) \
98 do{\
99 union a_memory_ptr __xp;\
100 struct a_memory_chunk *__xc;\
101 __xp.p_vp = (C).p_vp;\
102 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
103 a_MEMORY_HOPE_INC((C).p_cp);\
104 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
105 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
106 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
107 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
108 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
109 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
110 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
111 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
112 a_MEMORY_HOPE_INC(__xp.p_ui8p) + __xc->mc_user_size;\
113 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
114 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
115 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
116 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
117 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
118 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
119 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
120 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
121 }while(0)
123 # define a_MEMORY_HOPE_GET_TRACE(T,C,BAD) \
124 do{\
125 a_MEMORY_HOPE_INC((C).p_cp);\
126 a_MEMORY_HOPE_GET(T, C, BAD);\
127 a_MEMORY_HOPE_INC((C).p_cp);\
128 }while(0)
130 # define a_MEMORY_HOPE_GET(T,C,BAD) \
131 do{\
132 union a_memory_ptr __xp;\
133 struct a_memory_chunk *__xc;\
134 ui32_t __i;\
135 ui8_t __m;\
136 __xp.p_vp = (C).p_vp;\
137 a_MEMORY_HOPE_DEC(__xp.p_cp);\
138 (C).p_cp = __xp.p_cp;\
139 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
140 (BAD) = FAL0;\
141 __i = 0;\
142 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[0]);\
143 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
144 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[1]);\
145 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
146 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[2]);\
147 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
148 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[3]);\
149 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
150 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[4]);\
151 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
152 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[5]);\
153 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
154 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[6]);\
155 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
156 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[7]);\
157 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
158 if(__i != 0){\
159 (BAD) = TRU1;\
160 a_MEMORY_HOPE_INC((C).p_cp);\
161 n_alert("%p: corrupt lower canary: 0x%02X: %s, line %d",\
162 (C).p_cp, __i, mdbg_file, mdbg_line);\
163 a_MEMORY_HOPE_DEC((C).p_cp);\
165 a_MEMORY_HOPE_INC(__xp.p_ui8p) + __xc->mc_user_size;\
166 __i = 0;\
167 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[0]);\
168 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
169 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[1]);\
170 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
171 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[2]);\
172 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
173 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[3]);\
174 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
175 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[4]);\
176 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
177 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[5]);\
178 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
179 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[6]);\
180 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
181 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[7]);\
182 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
183 if(__i != 0){\
184 (BAD) = TRU1;\
185 a_MEMORY_HOPE_INC((C).p_cp);\
186 n_alert("%p: corrupt upper canary: 0x%02X: %s, line %d",\
187 (C).p_cp, __i, mdbg_file, mdbg_line);\
188 a_MEMORY_HOPE_DEC((C).p_cp);\
190 if(BAD)\
191 n_alert(" ..canary last seen: %s, line %u",\
192 __xc->mc_file, __xc->mc_line);\
193 }while(0)
194 #endif /* HAVE_MEMORY_DEBUG */
196 #ifdef HAVE_MEMORY_DEBUG
197 struct a_memory_chunk{
198 char const *mc_file;
199 ui32_t mc_line;
200 ui8_t mc_isfree;
201 ui8_t mc__dummy[3];
202 ui32_t mc_user_size;
203 ui32_t mc_size;
206 /* The heap memory n_free() may become delayed to detect double frees.
207 * It is primitive, but ok: speed and memory usage don't matter here */
208 struct a_memory_heap_chunk{
209 struct a_memory_chunk mhc_super;
210 struct a_memory_heap_chunk *mhc_prev;
211 struct a_memory_heap_chunk *mhc_next;
213 #endif /* HAVE_MEMORY_DEBUG */
215 struct a_memory_ars_lofi_chunk{
216 #ifdef HAVE_MEMORY_DEBUG
217 struct a_memory_chunk malc_super;
218 #endif
219 struct a_memory_ars_lofi_chunk *malc_last; /* Bit 1 set: it's a heap alloc */
222 union a_memory_ptr{
223 void *p_vp;
224 char *p_cp;
225 ui8_t *p_ui8p;
226 #ifdef HAVE_MEMORY_DEBUG
227 struct a_memory_chunk *p_c;
228 struct a_memory_heap_chunk *p_hc;
229 #endif
230 struct a_memory_ars_lofi_chunk *p_alc;
233 struct a_memory_ars_ctx{
234 struct a_memory_ars_ctx *mac_outer;
235 struct a_memory_ars_buffer *mac_top; /* Alloc stack */
236 struct a_memory_ars_buffer *mac_full; /* Alloc stack, cpl. filled */
237 size_t mac_recur; /* _relax_create() recursion */
238 struct a_memory_ars_huge *mac_huge; /* Huge allocation bypass list */
239 struct a_memory_ars_lofi *mac_lofi; /* Pseudo alloca */
240 struct a_memory_ars_lofi_chunk *mac_lofi_top;
242 n_CTA(n_MEMORY_POOL_TYPE_SIZEOF >= sizeof(struct a_memory_ars_ctx),
243 "struct n_go_data_ctx.gdc_mempool is not large enough for memory pool");
245 struct a_memory_ars_buffer{
246 struct a_memory_ars_buffer *mab_last;
247 char *mab_bot; /* For _autorec_fixate(): keep startup memory lingering */
248 char *mab_relax; /* If !NULL, used by _relax_unroll() instead of .mab_bot */
249 char *mab_caster; /* Point of casting off memory */
250 char mab_buf[n_MEMORY_AUTOREC_SIZE - (4 * sizeof(void*))];
252 n_CTA(sizeof(struct a_memory_ars_buffer) == n_MEMORY_AUTOREC_SIZE,
253 "Resulting structure size is not the expected one");
254 #ifdef HAVE_MEMORY_DEBUG
255 n_CTA(a_MEMORY_ARS_MAX + a_MEMORY_HOPE_SIZE + sizeof(struct a_memory_chunk)
256 < n_SIZEOF_FIELD(struct a_memory_ars_buffer, mab_buf),
257 "Memory layout of auto-reclaimed storage does not work out that way");
258 #endif
260 /* Requests that exceed a_MEMORY_ARS_MAX are always served by the normal
261 * memory allocator (which panics if memory cannot be served). This can be
262 * seen as a security fallback bypass only */
263 struct a_memory_ars_huge{
264 struct a_memory_ars_huge *mah_last;
265 char mah_buf[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
268 struct a_memory_ars_lofi{
269 struct a_memory_ars_lofi *mal_last;
270 char *mal_caster;
271 char *mal_max;
272 char mal_buf[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
275 /* */
276 #ifdef HAVE_MEMORY_DEBUG
277 static size_t a_memory_heap_aall, a_memory_heap_acur, a_memory_heap_amax,
278 a_memory_heap_mall, a_memory_heap_mcur, a_memory_heap_mmax;
279 static struct a_memory_heap_chunk *a_memory_heap_list, *a_memory_heap_free;
281 static size_t a_memory_ars_ball, a_memory_ars_bcur, a_memory_ars_bmax,
282 a_memory_ars_hall, a_memory_ars_hcur, a_memory_ars_hmax,
283 a_memory_ars_aall, a_memory_ars_mall;
285 static size_t a_memory_lofi_ball, a_memory_lofi_bcur, a_memory_lofi_bmax,
286 a_memory_lofi_aall, a_memory_lofi_acur, a_memory_lofi_amax,
287 a_memory_lofi_mall, a_memory_lofi_mcur, a_memory_lofi_mmax;
288 #endif
290 /* */
291 SINLINE void a_memory_lofi_free(struct a_memory_ars_ctx *macp, void *vp);
293 /* Reset an ars_ctx */
294 static void a_memory_ars_reset(struct a_memory_ars_ctx *macp);
296 SINLINE void
297 a_memory_lofi_free(struct a_memory_ars_ctx *macp, void *vp){
298 struct a_memory_ars_lofi *malp;
299 union a_memory_ptr p;
300 NYD2_ENTER;
302 p.p_vp = vp;
303 #ifdef HAVE_MEMORY_DEBUG
304 --a_memory_lofi_acur;
305 a_memory_lofi_mcur -= p.p_c->mc_user_size;
306 #endif
308 /* The heap allocations are released immediately */
309 if((uintptr_t)p.p_alc->malc_last & 0x1){
310 malp = macp->mac_lofi;
311 macp->mac_lofi = malp->mal_last;
312 macp->mac_lofi_top = (struct a_memory_ars_lofi_chunk*)
313 ((uintptr_t)p.p_alc->malc_last & ~0x1);
314 n_free(malp);
315 #ifdef HAVE_MEMORY_DEBUG
316 --a_memory_lofi_bcur;
317 #endif
318 }else{
319 macp->mac_lofi_top = p.p_alc->malc_last;
321 /* The normal arena ones only if the arena is empty, except for when
322 * it is the last - that we'll keep until _pool_pop() or exit(3) */
323 if(p.p_cp == (malp = macp->mac_lofi)->mal_buf){
324 if(malp->mal_last != NULL){
325 macp->mac_lofi = malp->mal_last;
326 n_free(malp);
327 #ifdef HAVE_MEMORY_DEBUG
328 --a_memory_lofi_bcur;
329 #endif
331 }else
332 malp->mal_caster = p.p_cp;
334 NYD2_LEAVE;
337 static void
338 a_memory_ars_reset(struct a_memory_ars_ctx *macp){
339 union{
340 struct a_memory_ars_lofi_chunk *alcp;
341 struct a_memory_ars_lofi *alp;
342 struct a_memory_ars_buffer *abp;
343 struct a_memory_ars_huge *ahp;
344 } m, m2;
345 NYD2_ENTER;
347 /* Simply move all buffers away from .mac_full */
348 for(m.abp = macp->mac_full; m.abp != NULL; m.abp = m2.abp){
349 m2.abp = m.abp->mab_last;
350 m.abp->mab_last = macp->mac_top;
351 macp->mac_top = m.abp;
353 macp->mac_full = NULL;
355 for(m2.abp = NULL, m.abp = macp->mac_top; m.abp != NULL;){
356 struct a_memory_ars_buffer *x;
358 x = m.abp;
359 m.abp = m.abp->mab_last;
361 /* Give away all buffers that are not covered by autorec_fixate() */
362 if(x->mab_bot == x->mab_buf){
363 if(m2.abp == NULL)
364 macp->mac_top = m.abp;
365 else
366 m2.abp->mab_last = m.abp;
367 n_free(x);
368 #ifdef HAVE_MEMORY_DEBUG
369 --a_memory_ars_bcur;
370 #endif
371 }else{
372 m2.abp = x;
373 x->mab_caster = x->mab_bot;
374 x->mab_relax = NULL;
375 #ifdef HAVE_MEMORY_DEBUG
376 memset(x->mab_caster, 0377,
377 PTR2SIZE(&x->mab_buf[sizeof(x->mab_buf)] - x->mab_caster));
378 #endif
382 while((m.ahp = macp->mac_huge) != NULL){
383 macp->mac_huge = m.ahp->mah_last;
384 n_free(m.ahp);
385 #ifdef HAVE_MEMORY_DEBUG
386 --a_memory_ars_hcur;
387 #endif
390 /* "alloca(3)" memory goes away, too. XXX Must be last as long we jump */
391 #ifdef HAVE_MEMORY_DEBUG
392 if(macp->mac_lofi_top != NULL && (n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)))
393 n_alert("There still is LOFI memory upon ARS reset!");
394 #endif
395 while((m.alcp = macp->mac_lofi_top) != NULL)
396 a_memory_lofi_free(macp, m.alcp);
397 NYD2_LEAVE;
400 FL void
401 n_memory_reset(void){
402 #ifdef HAVE_MEMORY_DEBUG
403 union a_memory_ptr p;
404 size_t c, s;
405 #endif
406 struct a_memory_ars_ctx *macp;
407 NYD_ENTER;
409 n_memory_check();
411 if((macp = n_go_data->gdc_mempool) != NULL){
412 /* First of all reset auto-reclaimed storage so that heap freed during
413 * this can be handled in a second step */
414 /* TODO v15 active recursion can only happen after a jump */
415 if(macp->mac_recur > 0){
416 macp->mac_recur = 1;
417 n_autorec_relax_gut();
419 a_memory_ars_reset(macp);
422 /* Now we are ready to deal with heap */
423 #ifdef HAVE_MEMORY_DEBUG
424 c = s = 0;
426 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;){
427 void *vp;
429 vp = p.p_hc;
430 ++c;
431 s += p.p_c->mc_size;
432 p.p_hc = p.p_hc->mhc_next;
433 (free)(vp);
435 a_memory_heap_free = NULL;
437 if((n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)) && c > 0)
438 n_err("memreset: freed %" PRIuZ " chunks/%" PRIuZ " bytes\n", c, s);
439 #endif
440 NYD_LEAVE;
443 FL void
444 n_memory_pool_fixate(void){
445 struct a_memory_ars_buffer *mabp;
446 struct a_memory_ars_ctx *macp;
447 NYD_ENTER;
449 if((macp = n_go_data->gdc_mempool) != NULL){
450 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
451 mabp->mab_bot = mabp->mab_caster;
452 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
453 mabp->mab_bot = mabp->mab_caster;
455 NYD_LEAVE;
458 FL void
459 n_memory_pool_push(void *vp){
460 struct a_memory_ars_ctx *macp;
461 NYD_ENTER;
463 if(n_go_data->gdc_mempool == NULL)
464 n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
466 memset(macp = vp, 0, sizeof *macp);
467 macp->mac_outer = n_go_data->gdc_mempool;
468 n_go_data->gdc_mempool = macp;
469 NYD_LEAVE;
472 FL void
473 n_memory_pool_pop(void *vp){
474 struct a_memory_ars_buffer *mabp;
475 struct a_memory_ars_ctx *macp;
476 NYD_ENTER;
478 n_memory_check();
480 if((macp = vp) == NULL){
481 macp = n_go_data->gdc_mempool;
482 assert(macp != NULL);
483 }else{
484 /* XXX May not be ARS top upon jump */
485 while(n_go_data->gdc_mempool != macp){
486 DBG( n_err("ARS pop %p to reach freed context\n",
487 n_go_data->gdc_mempool); )
488 n_memory_pool_pop(n_go_data->gdc_mempool);
491 n_go_data->gdc_mempool = macp->mac_outer;
493 a_memory_ars_reset(macp);
494 assert(macp->mac_full == NULL);
495 assert(macp->mac_huge == NULL);
497 mabp = macp->mac_top;
498 macp->mac_top = NULL;
499 while(mabp != NULL){
500 vp = mabp;
501 mabp = mabp->mab_last;
502 n_free(vp);
505 /* We (may) have kept one buffer for our pseudo alloca(3) */
506 if((vp = macp->mac_lofi) != NULL){
507 assert(macp->mac_lofi->mal_last == NULL);
508 macp->mac_lofi = NULL;
509 #ifdef HAVE_MEMORY_DEBUG
510 --a_memory_lofi_bcur;
511 #endif
512 n_free(vp);
514 NYD_LEAVE;
517 #ifndef HAVE_MEMORY_DEBUG
518 FL void *
519 n_alloc(size_t s){
520 void *rv;
521 NYD2_ENTER;
523 if(s == 0)
524 s = 1;
525 if((rv = malloc(s)) == NULL)
526 n_panic(_("no memory"));
527 NYD2_LEAVE;
528 return rv;
531 FL void *
532 n_realloc(void *vp, size_t s){
533 void *rv;
534 NYD2_ENTER;
536 if(vp == NULL)
537 rv = n_alloc(s);
538 else{
539 if(s == 0)
540 s = 1;
541 if((rv = realloc(vp, s)) == NULL)
542 n_panic(_("no memory"));
544 NYD2_LEAVE;
545 return rv;
548 FL void *
549 n_calloc(size_t nmemb, size_t size){
550 void *rv;
551 NYD2_ENTER;
553 if(size == 0)
554 size = 1;
555 if((rv = calloc(nmemb, size)) == NULL)
556 n_panic(_("no memory"));
557 NYD2_LEAVE;
558 return rv;
561 FL void
562 (n_free)(void *vp){
563 NYD2_ENTER;
564 (free)(vp);
565 NYD2_LEAVE;
568 #else /* !HAVE_MEMORY_DEBUG */
569 FL void *
570 (n_alloc)(size_t s n_MEMORY_DEBUG_ARGS){
571 union a_memory_ptr p;
572 ui32_t user_s;
573 NYD2_ENTER;
575 if(s > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
576 n_panic("n_alloc(): allocation too large: %s, line %d",
577 mdbg_file, mdbg_line);
578 if((user_s = (ui32_t)s) == 0)
579 s = 1;
580 s += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
582 if((p.p_vp = (malloc)(s)) == NULL)
583 n_panic(_("no memory"));
585 p.p_hc->mhc_prev = NULL;
586 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
587 a_memory_heap_list->mhc_prev = p.p_hc;
589 p.p_c->mc_file = mdbg_file;
590 p.p_c->mc_line = (ui16_t)mdbg_line;
591 p.p_c->mc_isfree = FAL0;
592 p.p_c->mc_user_size = user_s;
593 p.p_c->mc_size = (ui32_t)s;
595 a_memory_heap_list = p.p_hc++;
596 a_MEMORY_HOPE_SET(p_hc, p);
598 ++a_memory_heap_aall;
599 ++a_memory_heap_acur;
600 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
601 a_memory_heap_mall += user_s;
602 a_memory_heap_mcur += user_s;
603 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
604 NYD2_LEAVE;
605 return p.p_vp;
608 FL void *
609 (n_realloc)(void *vp, size_t s n_MEMORY_DEBUG_ARGS){
610 union a_memory_ptr p;
611 ui32_t user_s;
612 bool_t isbad;
613 NYD2_ENTER;
615 if((p.p_vp = vp) == NULL){
616 jforce:
617 p.p_vp = (n_alloc)(s, mdbg_file, mdbg_line);
618 goto jleave;
621 a_MEMORY_HOPE_GET(p_hc, p, isbad);
622 --p.p_hc;
624 if(p.p_c->mc_isfree){
625 n_err("n_realloc(): region freed! At %s, line %d\n"
626 "\tLast seen: %s, line %" PRIu16 "\n",
627 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
628 goto jforce;
631 if(p.p_hc == a_memory_heap_list)
632 a_memory_heap_list = p.p_hc->mhc_next;
633 else
634 p.p_hc->mhc_prev->mhc_next = p.p_hc->mhc_next;
635 if (p.p_hc->mhc_next != NULL)
636 p.p_hc->mhc_next->mhc_prev = p.p_hc->mhc_prev;
638 --a_memory_heap_acur;
639 a_memory_heap_mcur -= p.p_c->mc_user_size;
641 if(s > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
642 n_panic("n_realloc(): allocation too large: %s, line %d",
643 mdbg_file, mdbg_line);
644 if((user_s = (ui32_t)s) == 0)
645 s = 1;
646 s += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
648 if((p.p_vp = (realloc)(p.p_c, s)) == NULL)
649 n_panic(_("no memory"));
650 p.p_hc->mhc_prev = NULL;
651 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
652 a_memory_heap_list->mhc_prev = p.p_hc;
654 p.p_c->mc_file = mdbg_file;
655 p.p_c->mc_line = (ui16_t)mdbg_line;
656 p.p_c->mc_isfree = FAL0;
657 p.p_c->mc_user_size = user_s;
658 p.p_c->mc_size = (ui32_t)s;
660 a_memory_heap_list = p.p_hc++;
661 a_MEMORY_HOPE_SET(p_hc, p);
663 ++a_memory_heap_aall;
664 ++a_memory_heap_acur;
665 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
666 a_memory_heap_mall += user_s;
667 a_memory_heap_mcur += user_s;
668 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
669 jleave:
670 NYD2_LEAVE;
671 return p.p_vp;
674 FL void *
675 (n_calloc)(size_t nmemb, size_t size n_MEMORY_DEBUG_ARGS){
676 union a_memory_ptr p;
677 ui32_t user_s;
678 NYD2_ENTER;
680 if(nmemb == 0)
681 nmemb = 1;
682 if(size > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
683 n_panic("n_calloc(): allocation size too large: %s, line %d",
684 mdbg_file, mdbg_line);
685 if((user_s = (ui32_t)size) == 0)
686 size = 1;
687 if((UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE) /
688 nmemb < size)
689 n_panic("n_calloc(): allocation count too large: %s, line %d",
690 mdbg_file, mdbg_line);
692 size *= nmemb;
693 size += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
695 if((p.p_vp = (malloc)(size)) == NULL)
696 n_panic(_("no memory"));
697 memset(p.p_vp, 0, size);
699 p.p_hc->mhc_prev = NULL;
700 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
701 a_memory_heap_list->mhc_prev = p.p_hc;
703 p.p_c->mc_file = mdbg_file;
704 p.p_c->mc_line = (ui16_t)mdbg_line;
705 p.p_c->mc_isfree = FAL0;
706 p.p_c->mc_user_size = (user_s > 0) ? user_s *= nmemb : 0;
707 p.p_c->mc_size = (ui32_t)size;
709 a_memory_heap_list = p.p_hc++;
710 a_MEMORY_HOPE_SET(p_hc, p);
712 ++a_memory_heap_aall;
713 ++a_memory_heap_acur;
714 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
715 a_memory_heap_mall += user_s;
716 a_memory_heap_mcur += user_s;
717 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
718 NYD2_LEAVE;
719 return p.p_vp;
722 FL void
723 (n_free)(void *vp n_MEMORY_DEBUG_ARGS){
724 union a_memory_ptr p;
725 bool_t isbad;
726 NYD2_ENTER;
728 if((p.p_vp = vp) == NULL){
729 n_err("n_free(NULL) from %s, line %d\n", mdbg_file, mdbg_line);
730 goto jleave;
733 a_MEMORY_HOPE_GET(p_hc, p, isbad);
734 --p.p_hc;
736 if(p.p_c->mc_isfree){
737 n_err("n_free(): double-free avoided at %s, line %d\n"
738 "\tLast seen: %s, line %" PRIu16 "\n",
739 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
740 goto jleave;
743 if(p.p_hc == a_memory_heap_list){
744 if((a_memory_heap_list = p.p_hc->mhc_next) != NULL)
745 a_memory_heap_list->mhc_prev = NULL;
746 }else
747 p.p_hc->mhc_prev->mhc_next = p.p_hc->mhc_next;
748 if(p.p_hc->mhc_next != NULL)
749 p.p_hc->mhc_next->mhc_prev = p.p_hc->mhc_prev;
751 p.p_c->mc_isfree = TRU1;
752 /* Trash contents (also see [21c05f8]) */
753 memset(vp, 0377, p.p_c->mc_user_size);
755 --a_memory_heap_acur;
756 a_memory_heap_mcur -= p.p_c->mc_user_size;
758 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)){
759 p.p_hc->mhc_next = a_memory_heap_free;
760 a_memory_heap_free = p.p_hc;
761 }else
762 (free)(p.p_vp);
763 jleave:
764 NYD2_LEAVE;
766 #endif /* HAVE_MEMORY_DEBUG */
768 FL void *
769 (n_autorec_alloc_from_pool)(void *vp, size_t size n_MEMORY_DEBUG_ARGS){
770 #ifdef HAVE_MEMORY_DEBUG
771 ui32_t user_s;
772 #endif
773 union a_memory_ptr p;
774 union{
775 struct a_memory_ars_buffer *abp;
776 struct a_memory_ars_huge *ahp;
777 } m, m2;
778 struct a_memory_ars_ctx *macp;
779 NYD2_ENTER;
781 if((macp = vp) == NULL && (macp = n_go_data->gdc_mempool) == NULL)
782 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
784 #ifdef HAVE_MEMORY_DEBUG
785 user_s = (ui32_t)size;
786 #endif
787 if(size == 0)
788 ++size;
789 #ifdef HAVE_MEMORY_DEBUG
790 size += sizeof(struct a_memory_chunk) + a_MEMORY_HOPE_SIZE;
791 #endif
792 size = a_MEMORY_ARS_ROUNDUP(size);
794 /* Huge allocations are special */
795 if(n_UNLIKELY(size > a_MEMORY_ARS_MAX)){
796 #ifdef HAVE_MEMORY_DEBUG
797 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG))
798 n_alert("n_autorec_alloc() of %" PRIuZ " bytes from %s, line %d",
799 size, mdbg_file, mdbg_line);
800 #endif
801 goto jhuge;
804 /* Search for a buffer with enough free space to serve request */
805 for(m2.abp = NULL, m.abp = macp->mac_top; m.abp != NULL;
806 m2.abp = m.abp, m.abp = m.abp->mab_last){
807 if((p.p_cp = m.abp->mab_caster) <=
808 &m.abp->mab_buf[sizeof(m.abp->mab_buf) - size]){
809 /* Alignment is the one thing, the other is what is usually allocated,
810 * and here about 40 bytes seems to be a good cut to avoid non-usable
811 * casters. Reown buffers supposed to be "full" to .mac_full */
812 if(n_UNLIKELY((m.abp->mab_caster = &p.p_cp[size]) >=
813 &m.abp->mab_buf[sizeof(m.abp->mab_buf) - 42])){
814 if(m2.abp == NULL)
815 macp->mac_top = m.abp->mab_last;
816 else
817 m2.abp->mab_last = m.abp->mab_last;
818 m.abp->mab_last = macp->mac_full;
819 macp->mac_full = m.abp;
821 goto jleave;
825 /* Need a new buffer XXX "page" pool */
826 m.abp = n_alloc(sizeof *m.abp);
827 m.abp->mab_last = macp->mac_top;
828 m.abp->mab_caster = &(m.abp->mab_bot = m.abp->mab_buf)[size];
829 m.abp->mab_relax = NULL; /* Indicates allocation after _relax_create() */
830 macp->mac_top = m.abp;
831 p.p_cp = m.abp->mab_bot;
833 #ifdef HAVE_MEMORY_DEBUG
834 ++a_memory_ars_ball;
835 ++a_memory_ars_bcur;
836 a_memory_ars_bmax = n_MAX(a_memory_ars_bmax, a_memory_ars_bcur);
837 #endif
839 jleave:
840 #ifdef HAVE_MEMORY_DEBUG
841 p.p_c->mc_file = mdbg_file;
842 p.p_c->mc_line = (ui16_t)mdbg_line;
843 p.p_c->mc_user_size = user_s;
844 p.p_c->mc_size = (ui32_t)size;
845 ++p.p_c;
846 a_MEMORY_HOPE_SET(p_c, p);
848 ++a_memory_ars_aall;
849 a_memory_ars_mall += user_s;
850 #endif
851 NYD2_LEAVE;
852 return p.p_vp;
854 jhuge:
855 m.ahp = n_alloc(n_VSTRUCT_SIZEOF(struct a_memory_ars_huge, mah_buf) + size);
856 m.ahp->mah_last = macp->mac_huge;
857 macp->mac_huge = m.ahp;
858 p.p_cp = m.ahp->mah_buf;
859 #ifdef HAVE_MEMORY_DEBUG
860 ++a_memory_ars_hall;
861 ++a_memory_ars_hcur;
862 a_memory_ars_hmax = n_MAX(a_memory_ars_hmax, a_memory_ars_hcur);
863 #endif
864 goto jleave;
867 FL void *
868 (n_autorec_calloc_from_pool)(void *vp, size_t nmemb, size_t size
869 n_MEMORY_DEBUG_ARGS){
870 void *rv;
871 NYD2_ENTER;
873 size *= nmemb; /* XXX overflow, but only used for struct inits */
874 rv = (n_autorec_alloc_from_pool)(vp, size n_MEMORY_DEBUG_ARGSCALL);
875 memset(rv, 0, size);
876 NYD2_LEAVE;
877 return rv;
880 FL void
881 n_autorec_relax_create(void){
882 struct a_memory_ars_ctx *macp;
883 NYD2_ENTER;
885 if((macp = n_go_data->gdc_mempool) == NULL)
886 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
888 if(macp->mac_recur++ == 0){
889 struct a_memory_ars_buffer *mabp;
891 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
892 mabp->mab_relax = mabp->mab_caster;
893 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
894 mabp->mab_relax = mabp->mab_caster;
896 #ifdef HAVE_DEVEL
897 else
898 n_err("n_autorec_relax_create(): recursion >0\n");
899 #endif
900 NYD2_LEAVE;
903 FL void
904 n_autorec_relax_gut(void){
905 struct a_memory_ars_ctx *macp;
906 NYD2_ENTER;
908 if((macp = n_go_data->gdc_mempool) == NULL)
909 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
911 assert(macp->mac_recur > 0);
913 if(--macp->mac_recur == 0){
914 struct a_memory_ars_buffer *mabp;
916 macp->mac_recur = 1;
917 n_autorec_relax_unroll();
918 macp->mac_recur = 0;
920 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
921 mabp->mab_relax = NULL;
922 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
923 mabp->mab_relax = NULL;
925 #ifdef HAVE_DEVEL
926 else
927 n_err("n_autorec_relax_unroll(): recursion >0\n");
928 #endif
929 NYD2_LEAVE;
932 FL void
933 n_autorec_relax_unroll(void){
934 /* The purpose of relaxation is only that it is possible to reset the
935 * casters, *not* to give back memory to the system. We are presumably in
936 * an iteration over all messages of a mailbox, and it'd be quite
937 * counterproductive to give the system allocator a chance to waste time */
938 struct a_memory_ars_ctx *macp;
939 NYD2_ENTER;
941 if((macp = n_go_data->gdc_mempool) == NULL)
942 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
944 assert(macp->mac_recur > 0);
945 n_memory_check();
947 if(macp->mac_recur == 1){
948 struct a_memory_ars_buffer *mabp, *x, *y;
950 /* Buffers in the full list may become usable again! */
951 for(x = NULL, mabp = macp->mac_full; mabp != NULL; mabp = y){
952 y = mabp->mab_last;
954 if(mabp->mab_relax == NULL ||
955 mabp->mab_relax < &mabp->mab_buf[sizeof(mabp->mab_buf) - 42]){
956 if(x == NULL)
957 macp->mac_full = y;
958 else
959 x->mab_last = y;
960 mabp->mab_last = macp->mac_top;
961 macp->mac_top = mabp;
962 }else
963 x = mabp;
966 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
967 mabp->mab_caster = (mabp->mab_relax != NULL)
968 ? mabp->mab_relax : mabp->mab_bot;
969 #ifdef HAVE_MEMORY_DEBUG
970 memset(mabp->mab_caster, 0377,
971 PTR2SIZE(&mabp->mab_buf[sizeof(mabp->mab_buf)] - mabp->mab_caster));
972 #endif
975 NYD2_LEAVE;
978 FL void *
979 (n_lofi_alloc)(size_t size n_MEMORY_DEBUG_ARGS){
980 #ifdef HAVE_MEMORY_DEBUG
981 ui32_t user_s;
982 #endif
983 union a_memory_ptr p;
984 struct a_memory_ars_lofi *malp;
985 bool_t isheap;
986 struct a_memory_ars_ctx *macp;
987 NYD2_ENTER;
989 if((macp = n_go_data->gdc_mempool) == NULL)
990 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
992 #ifdef HAVE_MEMORY_DEBUG
993 user_s = (ui32_t)size;
994 #endif
995 if(size == 0)
996 ++size;
997 size += sizeof(struct a_memory_ars_lofi_chunk);
998 #ifdef HAVE_MEMORY_DEBUG
999 size += a_MEMORY_HOPE_SIZE;
1000 #endif
1001 size = a_MEMORY_LOFI_ROUNDUP(size);
1003 /* Huge allocations are special */
1004 if(n_UNLIKELY(isheap = (size > a_MEMORY_LOFI_MAX))){
1005 #ifdef HAVE_MEMORY_DEBUG
1006 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG))
1007 n_alert("n_lofi_alloc() of %" PRIuZ " bytes from %s, line %d",
1008 size, mdbg_file, mdbg_line);
1009 #endif
1010 }else if((malp = macp->mac_lofi) != NULL &&
1011 ((p.p_cp = malp->mal_caster) <= &malp->mal_max[-size])){
1012 malp->mal_caster = &p.p_cp[size];
1013 goto jleave;
1016 /* Need a new buffer */
1017 /* C99 */{
1018 size_t i;
1020 i = n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi, mal_buf) + size;
1021 i = n_MAX(i, n_MEMORY_AUTOREC_SIZE);
1022 malp = n_alloc(i);
1023 malp->mal_last = macp->mac_lofi;
1024 malp->mal_caster = &malp->mal_buf[size];
1025 i -= n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi, mal_buf);
1026 malp->mal_max = &malp->mal_buf[i];
1027 macp->mac_lofi = malp;
1028 p.p_cp = malp->mal_buf;
1030 #ifdef HAVE_MEMORY_DEBUG
1031 ++a_memory_lofi_ball;
1032 ++a_memory_lofi_bcur;
1033 a_memory_lofi_bmax = n_MAX(a_memory_lofi_bmax, a_memory_lofi_bcur);
1034 #endif
1037 jleave:
1038 p.p_alc->malc_last = macp->mac_lofi_top;
1039 macp->mac_lofi_top = p.p_alc;
1040 if(isheap)
1041 p.p_alc->malc_last = (struct a_memory_ars_lofi_chunk*)
1042 ((uintptr_t)p.p_alc->malc_last | 0x1);
1044 #ifndef HAVE_MEMORY_DEBUG
1045 ++p.p_alc;
1046 #else
1047 p.p_c->mc_file = mdbg_file;
1048 p.p_c->mc_line = (ui16_t)mdbg_line;
1049 p.p_c->mc_isfree = FAL0;
1050 p.p_c->mc_user_size = user_s;
1051 p.p_c->mc_size = (ui32_t)size;
1052 ++p.p_alc;
1053 a_MEMORY_HOPE_SET(p_alc, p);
1055 ++a_memory_lofi_aall;
1056 ++a_memory_lofi_acur;
1057 a_memory_lofi_amax = n_MAX(a_memory_lofi_amax, a_memory_lofi_acur);
1058 a_memory_lofi_mall += user_s;
1059 a_memory_lofi_mcur += user_s;
1060 a_memory_lofi_mmax = n_MAX(a_memory_lofi_mmax, a_memory_lofi_mcur);
1061 #endif
1062 NYD2_LEAVE;
1063 return p.p_vp;
1066 FL void
1067 (n_lofi_free)(void *vp n_MEMORY_DEBUG_ARGS){
1068 #ifdef HAVE_MEMORY_DEBUG
1069 bool_t isbad;
1070 #endif
1071 union a_memory_ptr p;
1072 struct a_memory_ars_ctx *macp;
1073 NYD2_ENTER;
1075 if((macp = n_go_data->gdc_mempool) == NULL)
1076 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
1078 if((p.p_vp = vp) == NULL){
1079 #ifdef HAVE_MEMORY_DEBUG
1080 n_err("n_lofi_free(NULL) from %s, line %d\n", mdbg_file, mdbg_line);
1081 #endif
1082 goto jleave;
1085 #ifdef HAVE_MEMORY_DEBUG
1086 a_MEMORY_HOPE_GET(p_alc, p, isbad);
1087 --p.p_alc;
1089 if(p.p_c->mc_isfree){
1090 n_err("n_lofi_free(): double-free avoided at %s, line %d\n"
1091 "\tLast seen: %s, line %" PRIu16 "\n",
1092 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
1093 goto jleave;
1095 p.p_c->mc_isfree = TRU1;
1096 memset(vp, 0377, p.p_c->mc_user_size);
1098 if(p.p_alc != macp->mac_lofi_top){
1099 n_err("n_lofi_free(): this is not alloca top at %s, line %d\n"
1100 "\tLast seen: %s, line %" PRIu16 "\n",
1101 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
1102 goto jleave;
1105 ++p.p_alc;
1106 #endif /* HAVE_MEMORY_DEBUG */
1108 a_memory_lofi_free(macp, --p.p_alc);
1109 jleave:
1110 NYD2_LEAVE;
1113 FL void *
1114 n_lofi_snap_create(void){ /* TODO avoid temporary alloc */
1115 void *rv;
1116 NYD2_ENTER;
1118 rv = n_lofi_alloc(1);
1119 NYD2_LEAVE;
1120 return rv;
1123 FL void
1124 n_lofi_snap_unroll(void *cookie){ /* TODO optimise */
1125 union a_memory_ptr p;
1126 struct a_memory_ars_ctx *macp;
1127 NYD2_ENTER;
1129 n_memory_check();
1131 if((macp = n_go_data->gdc_mempool) == NULL)
1132 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
1134 for(;;){
1135 p.p_alc = macp->mac_lofi_top;
1136 a_memory_lofi_free(macp, p.p_vp);
1137 ++p.p_alc;
1138 #ifdef HAVE_MEMORY_DEBUG
1139 a_MEMORY_HOPE_INC(p.p_ui8p);
1140 #endif
1141 if(p.p_vp == cookie)
1142 break;
1144 NYD2_LEAVE;
1147 #ifdef HAVE_MEMORY_DEBUG
1148 FL int
1149 c_memtrace(void *vp){
1150 /* For a_MEMORY_HOPE_GET() */
1151 char const * const mdbg_file = "memtrace()";
1152 int const mdbg_line = -1;
1153 struct a_memory_ars_buffer *mabp;
1154 struct a_memory_ars_lofi_chunk *malcp;
1155 struct a_memory_ars_lofi *malp;
1156 struct a_memory_ars_ctx *macp;
1157 bool_t isbad;
1158 union a_memory_ptr p, xp;
1159 size_t lines;
1160 FILE *fp;
1161 NYD2_ENTER;
1163 vp = (void*)0x1;
1164 if((fp = Ftmp(NULL, "memtr", OF_RDWR | OF_UNLINK | OF_REGISTER)) == NULL){
1165 n_perr("tmpfile", 0);
1166 goto jleave;
1168 lines = 0;
1170 fprintf(fp,
1171 "Last-Out-First-In (alloca) storage:\n"
1172 " Buffer cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1173 " Allocations cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1174 " Bytes cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n\n",
1175 a_memory_lofi_bcur, a_memory_lofi_bmax, a_memory_lofi_ball,
1176 a_memory_lofi_acur, a_memory_lofi_amax, a_memory_lofi_aall,
1177 a_memory_lofi_mcur, a_memory_lofi_mmax, a_memory_lofi_mall);
1178 lines += 7;
1180 if((macp = n_go_data->gdc_mempool) == NULL)
1181 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
1182 for(; macp != NULL; macp = macp->mac_outer){
1183 fprintf(fp, " Evaluation stack context %p (outer: %p):\n",
1184 (void*)macp, (void*)macp->mac_outer);
1185 ++lines;
1187 for(malp = macp->mac_lofi; malp != NULL;){
1188 fprintf(fp, " Buffer %p%s, %" PRIuZ "/%" PRIuZ " used/free:\n",
1189 (void*)malp, ((uintptr_t)malp->mal_last & 0x1 ? " (huge)" : ""),
1190 PTR2SIZE(malp->mal_caster - &malp->mal_buf[0]),
1191 PTR2SIZE(malp->mal_max - malp->mal_caster));
1192 ++lines;
1193 malp = malp->mal_last;
1194 malp = (struct a_memory_ars_lofi*)((uintptr_t)malp & ~1);
1197 for(malcp = macp->mac_lofi_top; malcp != NULL;){
1198 p.p_alc = malcp;
1199 malcp = (struct a_memory_ars_lofi_chunk*)
1200 ((uintptr_t)malcp->malc_last & ~0x1);
1201 xp = p;
1202 ++xp.p_alc;
1203 a_MEMORY_HOPE_GET_TRACE(p_alc, xp, isbad);
1204 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1205 (isbad ? "! CANARY ERROR (LOFI): " : ""), xp.p_vp,
1206 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1210 fprintf(fp,
1211 "\nAuto-reclaimed storage:\n"
1212 " Buffers cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1213 " Huge allocations cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1214 " Allocations all: %" PRIuZ ", Bytes all: %" PRIuZ "\n\n",
1215 a_memory_ars_bcur, a_memory_ars_bmax, a_memory_ars_ball,
1216 a_memory_ars_hcur, a_memory_ars_hmax, a_memory_ars_hall,
1217 a_memory_ars_aall, a_memory_ars_mall);
1218 lines += 7;
1220 for(macp = n_go_data->gdc_mempool; macp != NULL; macp = macp->mac_outer){
1221 fprintf(fp, " Evaluation stack context %p (outer: %p):\n",
1222 (void*)macp, (void*)macp->mac_outer);
1223 ++lines;
1225 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
1226 fprintf(fp, " Buffer %p, %" PRIuZ "/%" PRIuZ " used/free:\n",
1227 (void*)mabp,
1228 PTR2SIZE(mabp->mab_caster - &mabp->mab_buf[0]),
1229 PTR2SIZE(&mabp->mab_buf[sizeof(mabp->mab_buf)] - mabp->mab_caster));
1230 ++lines;
1232 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1233 ++lines, p.p_cp += p.p_c->mc_size){
1234 xp = p;
1235 ++xp.p_c;
1236 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1237 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1238 (isbad ? "! CANARY ERROR (ARS, top): " : ""), xp.p_vp,
1239 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1241 ++lines;
1244 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last){
1245 fprintf(fp, " Buffer %p, full:\n", (void*)mabp);
1246 ++lines;
1248 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1249 ++lines, p.p_cp += p.p_c->mc_size){
1250 xp = p;
1251 ++xp.p_c;
1252 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1253 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1254 (isbad ? "! CANARY ERROR (ARS, full): " : ""), xp.p_vp,
1255 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1257 ++lines;
1261 fprintf(fp,
1262 "\nHeap memory buffers:\n"
1263 " Allocation cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1264 " Bytes cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n\n",
1265 a_memory_heap_acur, a_memory_heap_amax, a_memory_heap_aall,
1266 a_memory_heap_mcur, a_memory_heap_mmax, a_memory_heap_mall);
1267 lines += 6;
1269 for(p.p_hc = a_memory_heap_list; p.p_hc != NULL;
1270 ++lines, p.p_hc = p.p_hc->mhc_next){
1271 xp = p;
1272 ++xp.p_hc;
1273 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1274 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1275 (isbad ? "! CANARY ERROR (heap): " : ""), xp.p_vp,
1276 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1279 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)){
1280 fprintf(fp, "Heap buffers lingering for n_free():\n");
1281 ++lines;
1283 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;
1284 ++lines, p.p_hc = p.p_hc->mhc_next){
1285 xp = p;
1286 ++xp.p_hc;
1287 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1288 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1289 (isbad ? "! CANARY ERROR (free): " : ""), xp.p_vp,
1290 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1294 page_or_print(fp, lines);
1295 Fclose(fp);
1296 vp = NULL;
1297 jleave:
1298 NYD2_LEAVE;
1299 return (vp != NULL);
1302 FL bool_t
1303 n__memory_check(char const *mdbg_file, int mdbg_line){
1304 union a_memory_ptr p, xp;
1305 struct a_memory_ars_buffer *mabp;
1306 struct a_memory_ars_lofi_chunk *malcp;
1307 struct a_memory_ars_ctx *macp;
1308 bool_t anybad, isbad;
1309 NYD2_ENTER;
1311 anybad = FAL0;
1313 if((macp = n_go_data->gdc_mempool) == NULL)
1314 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
1316 /* Alloca */
1318 for(malcp = macp->mac_lofi_top; malcp != NULL;){
1319 p.p_alc = malcp;
1320 malcp = (struct a_memory_ars_lofi_chunk*)
1321 ((uintptr_t)malcp->malc_last & ~0x1);
1322 xp = p;
1323 ++xp.p_alc;
1324 a_MEMORY_HOPE_GET_TRACE(p_alc, xp, isbad);
1325 if(isbad){
1326 anybad = TRU1;
1327 n_err(
1328 "! CANARY ERROR (LOFI): %p (%u bytes): %s, line %u\n",
1329 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1333 /* Auto-reclaimed */
1335 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
1336 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1337 p.p_cp += p.p_c->mc_size){
1338 xp = p;
1339 ++xp.p_c;
1340 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1341 if(isbad){
1342 anybad = TRU1;
1343 n_err(
1344 "! CANARY ERROR (ARS, top): %p (%u bytes): %s, line %u\n",
1345 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1350 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last){
1351 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1352 p.p_cp += p.p_c->mc_size){
1353 xp = p;
1354 ++xp.p_c;
1355 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1356 if(isbad){
1357 anybad = TRU1;
1358 n_err(
1359 "! CANARY ERROR (ARS, full): %p (%u bytes): %s, line %u\n",
1360 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1365 /* Heap*/
1367 for(p.p_hc = a_memory_heap_list; p.p_hc != NULL; p.p_hc = p.p_hc->mhc_next){
1368 xp = p;
1369 ++xp.p_hc;
1370 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1371 if(isbad){
1372 anybad = TRU1;
1373 n_err(
1374 "! CANARY ERROR (heap): %p (%u bytes): %s, line %u\n",
1375 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1379 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)){
1380 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;
1381 p.p_hc = p.p_hc->mhc_next){
1382 xp = p;
1383 ++xp.p_hc;
1384 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1385 if(isbad){
1386 anybad = TRU1;
1387 n_err(
1388 "! CANARY ERROR (free): %p (%u bytes): %s, line %u\n",
1389 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1394 if(anybad && ok_blook(memdebug))
1395 n_panic("Memory errors encountered");
1396 NYD2_LEAVE;
1397 return anybad;
1399 #endif /* HAVE_MEMORY_DEBUG */
1401 /* s-it-mode */