quoteflt_dummy() usage: FIX usage without preceeding _reset() (Jürgen Bruckner)
[s-mailx.git] / memory.c
blobd96556fa658d445013173fb5bc30c4d3bd1d2db3
1 /*@ S-nail - a mail user agent derived from Berkeley Mail.
2 *@ Heap memory and automatically reclaimed storage.
3 *@ TODO Back the _flux_ heap.
4 *@ TODO Add cache for "the youngest" two or three n_MEMORY_AUTOREC_SIZE arenas
6 * Copyright (c) 2012 - 2018 Steffen (Daode) Nurpmeso <steffen@sdaoden.eu>.
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #undef n_FILE
21 #define n_FILE memory
23 #ifndef HAVE_AMALGAMATION
24 # include "nail.h"
25 #endif
28 * We use per-execution context memory arenas, to be found in
29 * n_go_data->gdc_mempool; if NULL, set to ->gdc__mempool_buf.
30 * n_memory_reset() that happens on loop ticks reclaims their memory, and
31 * performs debug checks also on the former #ifdef HAVE_MEMORY_DEBUG.
32 * The arena that is used already during program startup is special in that
33 * _pool_fixate() will set "a lower bound" in order not to reclaim memory that
34 * must be kept vivid during the lifetime of the program.
35 * That was so in historical code with the globally shared single string dope
36 * implementation, too. (And it still seems easier than bypassing to normal
37 * heap memory before _fixate() is called, today.)
39 * AutoReclaimedStorage memory is the follow-up to the historical "stringdope"
40 * allocator from 1979 (see [timeline:a7342d9]:src/Mail/strings.c), it is
41 * a steadily growing pool (but _relax_hold()..[:_relax_unroll():]..relax_gut()
42 * can be used to reduce pressure) until n_memory_reset() time.
44 * LastOutFirstIn memory is meant as an alloca(3) replacement but which requires
45 * lofi_free()ing pointers (otherwise growing until n_memory_reset()).
47 * TODO Flux heap memory is like LOFI except that any pointer can be freed (and
48 * TODO reused) at any time, just like normal heap memory. It is notational in
49 * TODO that it clearly states that the allocation will go away after a loop
50 * TODO tick, and also we can use some buffer caches.
53 /* If defined (and HAVE_MEMORY_DEBUG), realloc acts like alloc+free, which can
54 * help very bogus double-free attempts */
55 #define a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE /* TODO runtime opt <> C++ cache */
57 /* Maximum allocation (directly) handled by A-R-Storage */
58 #define a_MEMORY_ARS_MAX (n_MEMORY_AUTOREC_SIZE / 2 + n_MEMORY_AUTOREC_SIZE / 4)
59 #define a_MEMORY_LOFI_MAX a_MEMORY_ARS_MAX
61 n_CTA(a_MEMORY_ARS_MAX > 1024,
62 "Auto-reclaimed memory requires a larger buffer size"); /* Anway > 42! */
63 n_CTA(n_ISPOW2(n_MEMORY_AUTOREC_SIZE),
64 "Buffers should be POW2 (may be wasteful on native allocators otherwise)");
66 /* Alignment of ARS memory. Simply go for pointer alignment */
67 #define a_MEMORY_ARS_ROUNDUP(S) n_ALIGN_SMALL(S)
68 #define a_MEMORY_LOFI_ROUNDUP(S) a_MEMORY_ARS_ROUNDUP(S)
70 #ifdef HAVE_MEMORY_DEBUG
71 n_CTA(sizeof(char) == sizeof(ui8_t), "But POSIX says a byte is 8 bit");
73 # define a_MEMORY_HOPE_SIZE (2 * 8 * sizeof(char))
74 # define a_MEMORY_HOPE_INC(P) (P) += 8
75 # define a_MEMORY_HOPE_DEC(P) (P) -= 8
77 /* We use address-induced canary values, inspiration (but he didn't invent)
78 * and primes from maxv@netbsd.org, src/sys/kern/subr_kmem.c */
79 # define a_MEMORY_HOPE_LOWER(S,P) \
80 do{\
81 ui64_t __h__ = (uintptr_t)(P);\
82 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
83 __h__ >>= 56;\
84 (S) = (ui8_t)__h__;\
85 }while(0)
87 # define a_MEMORY_HOPE_UPPER(S,P) \
88 do{\
89 ui32_t __i__;\
90 ui64_t __x__, __h__ = (uintptr_t)(P);\
91 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
92 for(__i__ = 56; __i__ != 0; __i__ -= 8)\
93 if((__x__ = (__h__ >> __i__)) != 0){\
94 (S) = (ui8_t)__x__;\
95 break;\
97 if(__i__ == 0)\
98 (S) = 0xAAu;\
99 }while(0)
101 # define a_MEMORY_HOPE_SET(T,C) \
102 do{\
103 union a_memory_ptr __xp;\
104 struct a_memory_chunk *__xc;\
105 __xp.p_vp = (C).p_vp;\
106 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
107 a_MEMORY_HOPE_INC((C).p_cp);\
108 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
109 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
110 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
111 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
112 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
113 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
114 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
115 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
116 a_MEMORY_HOPE_INC(__xp.p_ui8p) + __xc->mc_user_size;\
117 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
118 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
119 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
120 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
121 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
122 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
123 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
124 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
125 }while(0)
127 # define a_MEMORY_HOPE_GET_TRACE(T,C,BAD) \
128 do{\
129 a_MEMORY_HOPE_INC((C).p_cp);\
130 a_MEMORY_HOPE_GET(T, C, BAD);\
131 a_MEMORY_HOPE_INC((C).p_cp);\
132 }while(0)
134 # define a_MEMORY_HOPE_GET(T,C,BAD) \
135 do{\
136 union a_memory_ptr __xp;\
137 struct a_memory_chunk *__xc;\
138 ui32_t __i;\
139 ui8_t __m;\
140 __xp.p_vp = (C).p_vp;\
141 a_MEMORY_HOPE_DEC(__xp.p_cp);\
142 (C).p_cp = __xp.p_cp;\
143 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
144 (BAD) = FAL0;\
145 __i = 0;\
146 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[0]);\
147 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
148 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[1]);\
149 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
150 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[2]);\
151 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
152 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[3]);\
153 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
154 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[4]);\
155 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
156 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[5]);\
157 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
158 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[6]);\
159 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
160 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[7]);\
161 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
162 if(__i != 0){\
163 (BAD) = TRU1;\
164 a_MEMORY_HOPE_INC((C).p_cp);\
165 n_alert("%p: corrupt lower canary: 0x%02X: %s, line %d",\
166 (C).p_cp, __i, mdbg_file, mdbg_line);\
167 a_MEMORY_HOPE_DEC((C).p_cp);\
169 a_MEMORY_HOPE_INC(__xp.p_ui8p) + __xc->mc_user_size;\
170 __i = 0;\
171 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[0]);\
172 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
173 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[1]);\
174 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
175 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[2]);\
176 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
177 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[3]);\
178 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
179 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[4]);\
180 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
181 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[5]);\
182 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
183 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[6]);\
184 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
185 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[7]);\
186 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
187 if(__i != 0){\
188 (BAD) = TRU1;\
189 a_MEMORY_HOPE_INC((C).p_cp);\
190 n_alert("%p: corrupt upper canary: 0x%02X: %s, line %d",\
191 (C).p_cp, __i, mdbg_file, mdbg_line);\
192 a_MEMORY_HOPE_DEC((C).p_cp);\
194 if(BAD)\
195 n_alert(" ..canary last seen: %s, line %u",\
196 __xc->mc_file, __xc->mc_line);\
197 }while(0)
198 #endif /* HAVE_MEMORY_DEBUG */
200 #ifdef HAVE_MEMORY_DEBUG
201 struct a_memory_chunk{
202 char const *mc_file;
203 ui32_t mc_line;
204 ui8_t mc_isfree;
205 ui8_t mc__dummy[3];
206 ui32_t mc_user_size;
207 ui32_t mc_size;
210 /* The heap memory n_free() may become delayed to detect double frees.
211 * It is primitive, but ok: speed and memory usage don't matter here */
212 struct a_memory_heap_chunk{
213 struct a_memory_chunk mhc_super;
214 struct a_memory_heap_chunk *mhc_prev;
215 struct a_memory_heap_chunk *mhc_next;
217 #endif /* HAVE_MEMORY_DEBUG */
219 struct a_memory_ars_lofi_chunk{
220 #ifdef HAVE_MEMORY_DEBUG
221 struct a_memory_chunk malc_super;
222 #endif
223 struct a_memory_ars_lofi_chunk *malc_last; /* Bit 1 set: it's a heap alloc */
226 union a_memory_ptr{
227 void *p_vp;
228 char *p_cp;
229 ui8_t *p_ui8p;
230 #ifdef HAVE_MEMORY_DEBUG
231 struct a_memory_chunk *p_c;
232 struct a_memory_heap_chunk *p_hc;
233 #endif
234 struct a_memory_ars_lofi_chunk *p_alc;
237 struct a_memory_ars_ctx{
238 struct a_memory_ars_ctx *mac_outer;
239 struct a_memory_ars_buffer *mac_top; /* Alloc stack */
240 struct a_memory_ars_buffer *mac_full; /* Alloc stack, cpl. filled */
241 size_t mac_recur; /* _relax_create() recursion */
242 struct a_memory_ars_huge *mac_huge; /* Huge allocation bypass list */
243 struct a_memory_ars_lofi *mac_lofi; /* Pseudo alloca */
244 struct a_memory_ars_lofi_chunk *mac_lofi_top;
246 n_CTA(n_MEMORY_POOL_TYPE_SIZEOF >= sizeof(struct a_memory_ars_ctx),
247 "struct n_go_data_ctx.gdc_mempool is not large enough for memory pool");
249 struct a_memory_ars_buffer{
250 struct a_memory_ars_buffer *mab_last;
251 char *mab_bot; /* For _autorec_fixate(): keep startup memory lingering */
252 char *mab_relax; /* If !NULL, used by _relax_unroll() instead of .mab_bot */
253 char *mab_caster; /* Point of casting off memory */
254 char mab_buf[n_MEMORY_AUTOREC_SIZE - (4 * sizeof(void*))];
256 n_CTA(sizeof(struct a_memory_ars_buffer) == n_MEMORY_AUTOREC_SIZE,
257 "Resulting structure size is not the expected one");
258 #ifdef HAVE_MEMORY_DEBUG
259 n_CTA(a_MEMORY_ARS_MAX + a_MEMORY_HOPE_SIZE + sizeof(struct a_memory_chunk)
260 < n_SIZEOF_FIELD(struct a_memory_ars_buffer, mab_buf),
261 "Memory layout of auto-reclaimed storage does not work out that way");
262 #endif
264 /* Requests that exceed a_MEMORY_ARS_MAX are always served by the normal
265 * memory allocator (which panics if memory cannot be served). This can be
266 * seen as a security fallback bypass only */
267 struct a_memory_ars_huge{
268 struct a_memory_ars_huge *mah_last;
269 char mah_buf[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
272 struct a_memory_ars_lofi{
273 struct a_memory_ars_lofi *mal_last;
274 char *mal_caster;
275 char *mal_max;
276 char mal_buf[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
279 /* */
280 #ifdef HAVE_MEMORY_DEBUG
281 static size_t a_memory_heap_aall, a_memory_heap_acur, a_memory_heap_amax,
282 a_memory_heap_mall, a_memory_heap_mcur, a_memory_heap_mmax;
283 static struct a_memory_heap_chunk *a_memory_heap_list, *a_memory_heap_free;
285 static size_t a_memory_ars_ball, a_memory_ars_bcur, a_memory_ars_bmax,
286 a_memory_ars_hall, a_memory_ars_hcur, a_memory_ars_hmax,
287 a_memory_ars_aall, a_memory_ars_mall;
289 static size_t a_memory_lofi_ball, a_memory_lofi_bcur, a_memory_lofi_bmax,
290 a_memory_lofi_aall, a_memory_lofi_acur, a_memory_lofi_amax,
291 a_memory_lofi_mall, a_memory_lofi_mcur, a_memory_lofi_mmax;
292 #endif
294 /* */
295 n_INLINE void a_memory_lofi_free(struct a_memory_ars_ctx *macp, void *vp);
297 /* Reset an ars_ctx */
298 static void a_memory_ars_reset(struct a_memory_ars_ctx *macp);
300 n_INLINE void
301 a_memory_lofi_free(struct a_memory_ars_ctx *macp, void *vp){
302 struct a_memory_ars_lofi *malp;
303 union a_memory_ptr p;
304 NYD2_ENTER;
306 p.p_vp = vp;
307 #ifdef HAVE_MEMORY_DEBUG
308 --a_memory_lofi_acur;
309 a_memory_lofi_mcur -= p.p_c->mc_user_size;
310 #endif
312 /* The heap allocations are released immediately */
313 if((uintptr_t)p.p_alc->malc_last & 0x1){
314 malp = macp->mac_lofi;
315 macp->mac_lofi = malp->mal_last;
316 macp->mac_lofi_top = (struct a_memory_ars_lofi_chunk*)
317 ((uintptr_t)p.p_alc->malc_last & ~0x1);
318 n_free(malp);
319 #ifdef HAVE_MEMORY_DEBUG
320 --a_memory_lofi_bcur;
321 #endif
322 }else{
323 macp->mac_lofi_top = p.p_alc->malc_last;
325 /* The normal arena ones only if the arena is empty, except for when
326 * it is the last - that we'll keep until _pool_pop() or exit(3) */
327 if(p.p_cp == (malp = macp->mac_lofi)->mal_buf){
328 if(malp->mal_last != NULL){
329 macp->mac_lofi = malp->mal_last;
330 n_free(malp);
331 #ifdef HAVE_MEMORY_DEBUG
332 --a_memory_lofi_bcur;
333 #endif
335 }else
336 malp->mal_caster = p.p_cp;
338 NYD2_LEAVE;
341 static void
342 a_memory_ars_reset(struct a_memory_ars_ctx *macp){
343 union{
344 struct a_memory_ars_lofi_chunk *alcp;
345 struct a_memory_ars_lofi *alp;
346 struct a_memory_ars_buffer *abp;
347 struct a_memory_ars_huge *ahp;
348 } m, m2;
349 NYD2_ENTER;
351 /* Simply move all buffers away from .mac_full */
352 for(m.abp = macp->mac_full; m.abp != NULL; m.abp = m2.abp){
353 m2.abp = m.abp->mab_last;
354 m.abp->mab_last = macp->mac_top;
355 macp->mac_top = m.abp;
357 macp->mac_full = NULL;
359 for(m2.abp = NULL, m.abp = macp->mac_top; m.abp != NULL;){
360 struct a_memory_ars_buffer *x;
362 x = m.abp;
363 m.abp = m.abp->mab_last;
365 /* Give away all buffers that are not covered by autorec_fixate() */
366 if(x->mab_bot == x->mab_buf){
367 if(m2.abp == NULL)
368 macp->mac_top = m.abp;
369 else
370 m2.abp->mab_last = m.abp;
371 n_free(x);
372 #ifdef HAVE_MEMORY_DEBUG
373 --a_memory_ars_bcur;
374 #endif
375 }else{
376 m2.abp = x;
377 x->mab_caster = x->mab_bot;
378 x->mab_relax = NULL;
379 #ifdef HAVE_MEMORY_DEBUG
380 memset(x->mab_caster, 0377,
381 PTR2SIZE(&x->mab_buf[sizeof(x->mab_buf)] - x->mab_caster));
382 #endif
386 while((m.ahp = macp->mac_huge) != NULL){
387 macp->mac_huge = m.ahp->mah_last;
388 n_free(m.ahp);
389 #ifdef HAVE_MEMORY_DEBUG
390 --a_memory_ars_hcur;
391 #endif
394 /* "alloca(3)" memory goes away, too. XXX Must be last as long we jump */
395 #ifdef HAVE_MEMORY_DEBUG
396 if(macp->mac_lofi_top != NULL &&
397 ((n_psonce & n_PSO_REPRODUCIBLE) ||
398 (n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG))))
399 n_alert("There still is LOFI memory upon ARS reset!");
400 #endif
401 while((m.alcp = macp->mac_lofi_top) != NULL)
402 a_memory_lofi_free(macp, m.alcp);
403 NYD2_LEAVE;
406 FL void
407 n_memory_reset(void){
408 #ifdef HAVE_MEMORY_DEBUG
409 union a_memory_ptr p;
410 size_t c, s;
411 #endif
412 struct a_memory_ars_ctx *macp;
413 NYD_ENTER;
415 n_memory_check();
417 if((macp = n_go_data->gdc_mempool) != NULL){
418 /* First of all reset auto-reclaimed storage so that heap freed during
419 * this can be handled in a second step */
420 /* TODO v15 active recursion can only happen after a jump */
421 if(macp->mac_recur > 0){
422 macp->mac_recur = 1;
423 n_autorec_relax_gut();
425 a_memory_ars_reset(macp);
428 /* Now we are ready to deal with heap */
429 #ifdef HAVE_MEMORY_DEBUG
430 c = s = 0;
432 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;){
433 void *vp;
435 vp = p.p_hc;
436 ++c;
437 s += p.p_c->mc_size;
438 p.p_hc = p.p_hc->mhc_next;
439 (free)(vp);
441 a_memory_heap_free = NULL;
443 if((n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)) && c > 0)
444 n_err("memreset: freed %" PRIuZ " chunks/%" PRIuZ " bytes\n", c, s);
445 #endif
446 NYD_LEAVE;
449 FL void
450 n_memory_pool_fixate(void){
451 struct a_memory_ars_buffer *mabp;
452 struct a_memory_ars_ctx *macp;
453 NYD_ENTER;
455 if((macp = n_go_data->gdc_mempool) != NULL){
456 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
457 mabp->mab_bot = mabp->mab_caster;
458 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
459 mabp->mab_bot = mabp->mab_caster;
461 NYD_LEAVE;
464 FL void
465 n_memory_pool_push(void *vp){
466 struct a_memory_ars_ctx *macp;
467 NYD_ENTER;
469 if(n_go_data->gdc_mempool == NULL)
470 n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
472 memset(macp = vp, 0, sizeof *macp);
473 macp->mac_outer = n_go_data->gdc_mempool;
474 n_go_data->gdc_mempool = macp;
475 NYD_LEAVE;
478 FL void
479 n_memory_pool_pop(void *vp){
480 struct a_memory_ars_buffer *mabp;
481 struct a_memory_ars_ctx *macp;
482 NYD_ENTER;
484 n_memory_check();
486 if((macp = vp) == NULL){
487 macp = n_go_data->gdc_mempool;
488 assert(macp != NULL);
489 }else{
490 /* XXX May not be ARS top upon jump */
491 while(n_go_data->gdc_mempool != macp){
492 DBG( n_err("ARS pop %p to reach freed context\n",
493 n_go_data->gdc_mempool); )
494 n_memory_pool_pop(n_go_data->gdc_mempool);
497 n_go_data->gdc_mempool = macp->mac_outer;
499 a_memory_ars_reset(macp);
500 assert(macp->mac_full == NULL);
501 assert(macp->mac_huge == NULL);
503 mabp = macp->mac_top;
504 macp->mac_top = NULL;
505 while(mabp != NULL){
506 vp = mabp;
507 mabp = mabp->mab_last;
508 n_free(vp);
511 /* We (may) have kept one buffer for our pseudo alloca(3) */
512 if((vp = macp->mac_lofi) != NULL){
513 assert(macp->mac_lofi->mal_last == NULL);
514 macp->mac_lofi = NULL;
515 #ifdef HAVE_MEMORY_DEBUG
516 --a_memory_lofi_bcur;
517 #endif
518 n_free(vp);
520 NYD_LEAVE;
523 #ifndef HAVE_MEMORY_DEBUG
524 FL void *
525 n_alloc(size_t s){
526 void *rv;
527 NYD2_ENTER;
529 if(s == 0)
530 s = 1;
531 if((rv = malloc(s)) == NULL)
532 n_panic(_("no memory"));
533 NYD2_LEAVE;
534 return rv;
537 FL void *
538 n_realloc(void *vp, size_t s){
539 void *rv;
540 NYD2_ENTER;
542 if(vp == NULL)
543 rv = n_alloc(s);
544 else{
545 if(s == 0)
546 s = 1;
547 if((rv = realloc(vp, s)) == NULL)
548 n_panic(_("no memory"));
550 NYD2_LEAVE;
551 return rv;
554 FL void *
555 n_calloc(size_t nmemb, size_t size){
556 void *rv;
557 NYD2_ENTER;
559 if(size == 0)
560 size = 1;
561 if((rv = calloc(nmemb, size)) == NULL)
562 n_panic(_("no memory"));
563 NYD2_LEAVE;
564 return rv;
567 FL void
568 (n_free)(void *vp){
569 NYD2_ENTER;
570 (free)(vp);
571 NYD2_LEAVE;
574 #else /* !HAVE_MEMORY_DEBUG */
575 FL void *
576 (n_alloc)(size_t s n_MEMORY_DEBUG_ARGS){
577 union a_memory_ptr p;
578 ui32_t user_s;
579 NYD2_ENTER;
581 if(s > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
582 n_panic("n_alloc(): allocation too large: %s, line %d",
583 mdbg_file, mdbg_line);
584 if((user_s = (ui32_t)s) == 0)
585 s = 1;
586 s += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
588 if((p.p_vp = (malloc)(s)) == NULL)
589 n_panic(_("no memory"));
591 p.p_hc->mhc_prev = NULL;
592 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
593 a_memory_heap_list->mhc_prev = p.p_hc;
595 p.p_c->mc_file = mdbg_file;
596 p.p_c->mc_line = (ui16_t)mdbg_line;
597 p.p_c->mc_isfree = FAL0;
598 p.p_c->mc_user_size = user_s;
599 p.p_c->mc_size = (ui32_t)s;
601 a_memory_heap_list = p.p_hc++;
602 a_MEMORY_HOPE_SET(p_hc, p);
604 ++a_memory_heap_aall;
605 ++a_memory_heap_acur;
606 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
607 a_memory_heap_mall += user_s;
608 a_memory_heap_mcur += user_s;
609 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
610 NYD2_LEAVE;
611 return p.p_vp;
614 FL void *
615 (n_realloc)(void *vp, size_t s n_MEMORY_DEBUG_ARGS){
616 # ifndef a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE
617 ui32_t user_s;
618 # endif
619 bool_t isbad;
620 union a_memory_ptr p;
621 NYD2_ENTER;
623 if((p.p_vp = vp) == NULL){
624 jforce:
625 p.p_vp = (n_alloc)(s, mdbg_file, mdbg_line);
626 goto jleave;
629 a_MEMORY_HOPE_GET(p_hc, p, isbad);
630 --p.p_hc;
632 if(p.p_c->mc_isfree){
633 n_err("n_realloc(): region freed! At %s, line %d\n"
634 "\tLast seen: %s, line %" PRIu16 "\n",
635 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
636 goto jforce;
639 # ifdef a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE
640 /* C99 */{
641 char *xp;
643 xp = (n_alloc)(s, mdbg_file, mdbg_line);
644 memcpy(xp, vp, n_MIN(s, p.p_c->mc_user_size));
645 (n_free)(vp, mdbg_file, mdbg_line);
646 p.p_vp = xp;
647 goto jleave;
649 # else
651 if(p.p_hc == a_memory_heap_list)
652 a_memory_heap_list = p.p_hc->mhc_next;
653 else
654 p.p_hc->mhc_prev->mhc_next = p.p_hc->mhc_next;
655 if (p.p_hc->mhc_next != NULL)
656 p.p_hc->mhc_next->mhc_prev = p.p_hc->mhc_prev;
658 --a_memory_heap_acur;
659 a_memory_heap_mcur -= p.p_c->mc_user_size;
661 if(s > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
662 n_panic("n_realloc(): allocation too large: %s, line %d",
663 mdbg_file, mdbg_line);
664 if((user_s = (ui32_t)s) == 0)
665 s = 1;
666 s += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
668 if((p.p_vp = (realloc)(p.p_c, s)) == NULL)
669 n_panic(_("no memory"));
670 p.p_hc->mhc_prev = NULL;
671 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
672 a_memory_heap_list->mhc_prev = p.p_hc;
674 p.p_c->mc_file = mdbg_file;
675 p.p_c->mc_line = (ui16_t)mdbg_line;
676 p.p_c->mc_isfree = FAL0;
677 p.p_c->mc_user_size = user_s;
678 p.p_c->mc_size = (ui32_t)s;
680 a_memory_heap_list = p.p_hc++;
681 a_MEMORY_HOPE_SET(p_hc, p);
683 ++a_memory_heap_aall;
684 ++a_memory_heap_acur;
685 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
686 a_memory_heap_mall += user_s;
687 a_memory_heap_mcur += user_s;
688 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
689 # endif /* a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE */
690 jleave:
691 NYD2_LEAVE;
692 return p.p_vp;
695 FL void *
696 (n_calloc)(size_t nmemb, size_t size n_MEMORY_DEBUG_ARGS){
697 union a_memory_ptr p;
698 ui32_t user_s;
699 NYD2_ENTER;
701 if(nmemb == 0)
702 nmemb = 1;
703 if(size > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
704 n_panic("n_calloc(): allocation size too large: %s, line %d",
705 mdbg_file, mdbg_line);
706 if((user_s = (ui32_t)size) == 0)
707 size = 1;
708 if((UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE) /
709 nmemb < size)
710 n_panic("n_calloc(): allocation count too large: %s, line %d",
711 mdbg_file, mdbg_line);
713 size *= nmemb;
714 size += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
716 if((p.p_vp = (malloc)(size)) == NULL)
717 n_panic(_("no memory"));
718 memset(p.p_vp, 0, size);
720 p.p_hc->mhc_prev = NULL;
721 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
722 a_memory_heap_list->mhc_prev = p.p_hc;
724 p.p_c->mc_file = mdbg_file;
725 p.p_c->mc_line = (ui16_t)mdbg_line;
726 p.p_c->mc_isfree = FAL0;
727 p.p_c->mc_user_size = (user_s > 0) ? user_s *= nmemb : 0;
728 p.p_c->mc_size = (ui32_t)size;
730 a_memory_heap_list = p.p_hc++;
731 a_MEMORY_HOPE_SET(p_hc, p);
733 ++a_memory_heap_aall;
734 ++a_memory_heap_acur;
735 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
736 a_memory_heap_mall += user_s;
737 a_memory_heap_mcur += user_s;
738 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
739 NYD2_LEAVE;
740 return p.p_vp;
743 FL void
744 (n_free)(void *vp n_MEMORY_DEBUG_ARGS){
745 union a_memory_ptr p;
746 bool_t isbad;
747 NYD2_ENTER;
749 if((p.p_vp = vp) == NULL){
750 n_err("n_free(NULL) from %s, line %d\n", mdbg_file, mdbg_line);
751 goto jleave;
754 a_MEMORY_HOPE_GET(p_hc, p, isbad);
755 --p.p_hc;
757 if(p.p_c->mc_isfree){
758 n_err("n_free(): double-free avoided at %s, line %d\n"
759 "\tLast seen: %s, line %" PRIu16 "\n",
760 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
761 goto jleave;
764 if(p.p_hc == a_memory_heap_list){
765 if((a_memory_heap_list = p.p_hc->mhc_next) != NULL)
766 a_memory_heap_list->mhc_prev = NULL;
767 }else
768 p.p_hc->mhc_prev->mhc_next = p.p_hc->mhc_next;
769 if(p.p_hc->mhc_next != NULL)
770 p.p_hc->mhc_next->mhc_prev = p.p_hc->mhc_prev;
772 p.p_c->mc_file = mdbg_file;
773 p.p_c->mc_line = (ui16_t)mdbg_line;
774 p.p_c->mc_isfree = TRU1;
775 /* Trash contents (also see [21c05f8]) */
776 memset(vp, 0377, p.p_c->mc_user_size);
778 --a_memory_heap_acur;
779 a_memory_heap_mcur -= p.p_c->mc_user_size;
781 if((n_psonce & n_PSO_REPRODUCIBLE) ||
782 (n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG))){
783 p.p_hc->mhc_next = a_memory_heap_free;
784 a_memory_heap_free = p.p_hc;
785 }else
786 (free)(p.p_vp);
787 jleave:
788 NYD2_LEAVE;
790 #endif /* HAVE_MEMORY_DEBUG */
792 FL void *
793 (n_autorec_alloc_from_pool)(void *vp, size_t size n_MEMORY_DEBUG_ARGS){
794 #ifdef HAVE_MEMORY_DEBUG
795 ui32_t user_s;
796 #endif
797 union a_memory_ptr p;
798 union{
799 struct a_memory_ars_buffer *abp;
800 struct a_memory_ars_huge *ahp;
801 } m, m2;
802 struct a_memory_ars_ctx *macp;
803 NYD2_ENTER;
805 if((macp = vp) == NULL && (macp = n_go_data->gdc_mempool) == NULL)
806 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
808 #ifdef HAVE_MEMORY_DEBUG
809 user_s = (ui32_t)size;
810 #endif
811 if(size == 0)
812 ++size;
813 #ifdef HAVE_MEMORY_DEBUG
814 size += sizeof(struct a_memory_chunk) + a_MEMORY_HOPE_SIZE;
815 #endif
816 size = a_MEMORY_ARS_ROUNDUP(size);
818 /* Huge allocations are special */
819 if(n_UNLIKELY(size > a_MEMORY_ARS_MAX)){
820 #ifdef HAVE_MEMORY_DEBUG
821 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG))
822 n_alert("n_autorec_alloc() of %" PRIuZ " bytes from %s, line %d",
823 size, mdbg_file, mdbg_line);
824 #endif
825 goto jhuge;
828 /* Search for a buffer with enough free space to serve request */
829 for(m2.abp = NULL, m.abp = macp->mac_top; m.abp != NULL;
830 m2.abp = m.abp, m.abp = m.abp->mab_last){
831 if((p.p_cp = m.abp->mab_caster) <=
832 &m.abp->mab_buf[sizeof(m.abp->mab_buf) - size]){
833 /* Alignment is the one thing, the other is what is usually allocated,
834 * and here about 40 bytes seems to be a good cut to avoid non-usable
835 * casters. Reown buffers supposed to be "full" to .mac_full */
836 if(n_UNLIKELY((m.abp->mab_caster = &p.p_cp[size]) >=
837 &m.abp->mab_buf[sizeof(m.abp->mab_buf) - 42])){
838 if(m2.abp == NULL)
839 macp->mac_top = m.abp->mab_last;
840 else
841 m2.abp->mab_last = m.abp->mab_last;
842 m.abp->mab_last = macp->mac_full;
843 macp->mac_full = m.abp;
845 goto jleave;
849 /* Need a new buffer XXX "page" pool */
850 m.abp = n_alloc(sizeof *m.abp);
851 m.abp->mab_last = macp->mac_top;
852 m.abp->mab_caster = &(m.abp->mab_bot = m.abp->mab_buf)[size];
853 m.abp->mab_relax = NULL; /* Indicates allocation after _relax_create() */
854 macp->mac_top = m.abp;
855 p.p_cp = m.abp->mab_bot;
857 #ifdef HAVE_MEMORY_DEBUG
858 ++a_memory_ars_ball;
859 ++a_memory_ars_bcur;
860 a_memory_ars_bmax = n_MAX(a_memory_ars_bmax, a_memory_ars_bcur);
861 #endif
863 jleave:
864 #ifdef HAVE_MEMORY_DEBUG
865 p.p_c->mc_file = mdbg_file;
866 p.p_c->mc_line = (ui16_t)mdbg_line;
867 p.p_c->mc_user_size = user_s;
868 p.p_c->mc_size = (ui32_t)size;
869 ++p.p_c;
870 a_MEMORY_HOPE_SET(p_c, p);
872 ++a_memory_ars_aall;
873 a_memory_ars_mall += user_s;
874 #endif
875 NYD2_LEAVE;
876 return p.p_vp;
878 jhuge:
879 m.ahp = n_alloc(n_VSTRUCT_SIZEOF(struct a_memory_ars_huge, mah_buf) + size);
880 m.ahp->mah_last = macp->mac_huge;
881 macp->mac_huge = m.ahp;
882 p.p_cp = m.ahp->mah_buf;
883 #ifdef HAVE_MEMORY_DEBUG
884 ++a_memory_ars_hall;
885 ++a_memory_ars_hcur;
886 a_memory_ars_hmax = n_MAX(a_memory_ars_hmax, a_memory_ars_hcur);
887 #endif
888 goto jleave;
891 FL void *
892 (n_autorec_calloc_from_pool)(void *vp, size_t nmemb, size_t size
893 n_MEMORY_DEBUG_ARGS){
894 void *rv;
895 NYD2_ENTER;
897 size *= nmemb; /* XXX overflow, but only used for struct inits */
898 rv = (n_autorec_alloc_from_pool)(vp, size n_MEMORY_DEBUG_ARGSCALL);
899 memset(rv, 0, size);
900 NYD2_LEAVE;
901 return rv;
904 FL void
905 n_autorec_relax_create(void){
906 struct a_memory_ars_ctx *macp;
907 NYD2_ENTER;
909 if((macp = n_go_data->gdc_mempool) == NULL)
910 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
912 if(macp->mac_recur++ == 0){
913 struct a_memory_ars_buffer *mabp;
915 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
916 mabp->mab_relax = mabp->mab_caster;
917 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
918 mabp->mab_relax = mabp->mab_caster;
920 #ifdef HAVE_DEVEL
921 else
922 n_err("n_autorec_relax_create(): recursion >0\n");
923 #endif
924 NYD2_LEAVE;
927 FL void
928 n_autorec_relax_gut(void){
929 struct a_memory_ars_ctx *macp;
930 NYD2_ENTER;
932 if((macp = n_go_data->gdc_mempool) == NULL)
933 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
935 assert(macp->mac_recur > 0);
937 if(--macp->mac_recur == 0){
938 struct a_memory_ars_buffer *mabp;
940 macp->mac_recur = 1;
941 n_autorec_relax_unroll();
942 macp->mac_recur = 0;
944 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
945 mabp->mab_relax = NULL;
946 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
947 mabp->mab_relax = NULL;
949 #ifdef HAVE_DEVEL
950 else
951 n_err("n_autorec_relax_unroll(): recursion >0\n");
952 #endif
953 NYD2_LEAVE;
956 FL void
957 n_autorec_relax_unroll(void){
958 /* The purpose of relaxation is only that it is possible to reset the
959 * casters, *not* to give back memory to the system. We are presumably in
960 * an iteration over all messages of a mailbox, and it'd be quite
961 * counterproductive to give the system allocator a chance to waste time */
962 struct a_memory_ars_ctx *macp;
963 NYD2_ENTER;
965 if((macp = n_go_data->gdc_mempool) == NULL)
966 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
968 assert(macp->mac_recur > 0);
969 n_memory_check();
971 if(macp->mac_recur == 1){
972 struct a_memory_ars_buffer *mabp, *x, *y;
974 /* Buffers in the full list may become usable again! */
975 for(x = NULL, mabp = macp->mac_full; mabp != NULL; mabp = y){
976 y = mabp->mab_last;
978 if(mabp->mab_relax == NULL ||
979 mabp->mab_relax < &mabp->mab_buf[sizeof(mabp->mab_buf) - 42]){
980 if(x == NULL)
981 macp->mac_full = y;
982 else
983 x->mab_last = y;
984 mabp->mab_last = macp->mac_top;
985 macp->mac_top = mabp;
986 }else
987 x = mabp;
990 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
991 mabp->mab_caster = (mabp->mab_relax != NULL)
992 ? mabp->mab_relax : mabp->mab_bot;
993 #ifdef HAVE_MEMORY_DEBUG
994 memset(mabp->mab_caster, 0377,
995 PTR2SIZE(&mabp->mab_buf[sizeof(mabp->mab_buf)] - mabp->mab_caster));
996 #endif
999 NYD2_LEAVE;
1002 FL void *
1003 (n_lofi_alloc)(size_t size n_MEMORY_DEBUG_ARGS){
1004 #ifdef HAVE_MEMORY_DEBUG
1005 ui32_t user_s;
1006 #endif
1007 union a_memory_ptr p;
1008 struct a_memory_ars_lofi *malp;
1009 bool_t isheap;
1010 struct a_memory_ars_ctx *macp;
1011 NYD2_ENTER;
1013 if((macp = n_go_data->gdc_mempool) == NULL)
1014 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
1016 #ifdef HAVE_MEMORY_DEBUG
1017 user_s = (ui32_t)size;
1018 #endif
1019 if(size == 0)
1020 ++size;
1021 size += sizeof(struct a_memory_ars_lofi_chunk);
1022 #ifdef HAVE_MEMORY_DEBUG
1023 size += a_MEMORY_HOPE_SIZE;
1024 #endif
1025 size = a_MEMORY_LOFI_ROUNDUP(size);
1027 /* Huge allocations are special */
1028 if(n_UNLIKELY(isheap = (size > a_MEMORY_LOFI_MAX))){
1029 #ifdef HAVE_MEMORY_DEBUG
1030 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG))
1031 n_alert("n_lofi_alloc() of %" PRIuZ " bytes from %s, line %d",
1032 size, mdbg_file, mdbg_line);
1033 #endif
1034 }else if((malp = macp->mac_lofi) != NULL &&
1035 ((p.p_cp = malp->mal_caster) <= &malp->mal_max[-size])){
1036 malp->mal_caster = &p.p_cp[size];
1037 goto jleave;
1040 /* Need a new buffer */
1041 /* C99 */{
1042 size_t i;
1044 i = n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi, mal_buf) + size;
1045 i = n_MAX(i, n_MEMORY_AUTOREC_SIZE);
1046 malp = n_alloc(i);
1047 malp->mal_last = macp->mac_lofi;
1048 malp->mal_caster = &malp->mal_buf[size];
1049 i -= n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi, mal_buf);
1050 malp->mal_max = &malp->mal_buf[i];
1051 macp->mac_lofi = malp;
1052 p.p_cp = malp->mal_buf;
1054 #ifdef HAVE_MEMORY_DEBUG
1055 ++a_memory_lofi_ball;
1056 ++a_memory_lofi_bcur;
1057 a_memory_lofi_bmax = n_MAX(a_memory_lofi_bmax, a_memory_lofi_bcur);
1058 #endif
1061 jleave:
1062 p.p_alc->malc_last = macp->mac_lofi_top;
1063 macp->mac_lofi_top = p.p_alc;
1064 if(isheap)
1065 p.p_alc->malc_last = (struct a_memory_ars_lofi_chunk*)
1066 ((uintptr_t)p.p_alc->malc_last | 0x1);
1068 #ifndef HAVE_MEMORY_DEBUG
1069 ++p.p_alc;
1070 #else
1071 p.p_c->mc_file = mdbg_file;
1072 p.p_c->mc_line = (ui16_t)mdbg_line;
1073 p.p_c->mc_isfree = FAL0;
1074 p.p_c->mc_user_size = user_s;
1075 p.p_c->mc_size = (ui32_t)size;
1076 ++p.p_alc;
1077 a_MEMORY_HOPE_SET(p_alc, p);
1079 ++a_memory_lofi_aall;
1080 ++a_memory_lofi_acur;
1081 a_memory_lofi_amax = n_MAX(a_memory_lofi_amax, a_memory_lofi_acur);
1082 a_memory_lofi_mall += user_s;
1083 a_memory_lofi_mcur += user_s;
1084 a_memory_lofi_mmax = n_MAX(a_memory_lofi_mmax, a_memory_lofi_mcur);
1085 #endif
1086 NYD2_LEAVE;
1087 return p.p_vp;
1090 FL void
1091 (n_lofi_free)(void *vp n_MEMORY_DEBUG_ARGS){
1092 #ifdef HAVE_MEMORY_DEBUG
1093 bool_t isbad;
1094 #endif
1095 union a_memory_ptr p;
1096 struct a_memory_ars_ctx *macp;
1097 NYD2_ENTER;
1099 if((macp = n_go_data->gdc_mempool) == NULL)
1100 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
1102 if((p.p_vp = vp) == NULL){
1103 #ifdef HAVE_MEMORY_DEBUG
1104 n_err("n_lofi_free(NULL) from %s, line %d\n", mdbg_file, mdbg_line);
1105 #endif
1106 goto jleave;
1109 #ifdef HAVE_MEMORY_DEBUG
1110 a_MEMORY_HOPE_GET(p_alc, p, isbad);
1111 --p.p_alc;
1113 if(p.p_c->mc_isfree){
1114 n_err("n_lofi_free(): double-free avoided at %s, line %d\n"
1115 "\tLast seen: %s, line %" PRIu16 "\n",
1116 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
1117 goto jleave;
1119 p.p_c->mc_isfree = TRU1;
1120 memset(vp, 0377, p.p_c->mc_user_size);
1122 if(p.p_alc != macp->mac_lofi_top){
1123 n_err("n_lofi_free(): this is not alloca top at %s, line %d\n"
1124 "\tLast seen: %s, line %" PRIu16 "\n",
1125 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
1126 goto jleave;
1129 ++p.p_alc;
1130 #endif /* HAVE_MEMORY_DEBUG */
1132 a_memory_lofi_free(macp, --p.p_alc);
1133 jleave:
1134 NYD2_LEAVE;
1137 FL void *
1138 n_lofi_snap_create(void){ /* TODO avoid temporary alloc */
1139 void *rv;
1140 NYD2_ENTER;
1142 rv = n_lofi_alloc(1);
1143 NYD2_LEAVE;
1144 return rv;
1147 FL void
1148 n_lofi_snap_unroll(void *cookie){ /* TODO optimise */
1149 union a_memory_ptr p;
1150 struct a_memory_ars_ctx *macp;
1151 NYD2_ENTER;
1153 n_memory_check();
1155 if((macp = n_go_data->gdc_mempool) == NULL)
1156 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
1158 for(;;){
1159 p.p_alc = macp->mac_lofi_top;
1160 a_memory_lofi_free(macp, p.p_vp);
1161 ++p.p_alc;
1162 #ifdef HAVE_MEMORY_DEBUG
1163 a_MEMORY_HOPE_INC(p.p_ui8p);
1164 #endif
1165 if(p.p_vp == cookie)
1166 break;
1168 NYD2_LEAVE;
1171 #ifdef HAVE_MEMORY_DEBUG
1172 FL int
1173 c_memtrace(void *vp){
1174 /* For a_MEMORY_HOPE_GET() */
1175 char const * const mdbg_file = "memtrace()";
1176 int const mdbg_line = -1;
1177 struct a_memory_ars_buffer *mabp;
1178 struct a_memory_ars_lofi_chunk *malcp;
1179 struct a_memory_ars_lofi *malp;
1180 struct a_memory_ars_ctx *macp;
1181 bool_t isbad;
1182 union a_memory_ptr p, xp;
1183 size_t lines;
1184 FILE *fp;
1185 NYD2_ENTER;
1187 vp = (void*)0x1;
1188 if((fp = Ftmp(NULL, "memtr", OF_RDWR | OF_UNLINK | OF_REGISTER)) == NULL){
1189 n_perr("tmpfile", 0);
1190 goto jleave;
1192 lines = 0;
1194 fprintf(fp,
1195 "Last-Out-First-In (alloca) storage:\n"
1196 " Buffer cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1197 " Allocations cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1198 " Bytes cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n\n",
1199 a_memory_lofi_bcur, a_memory_lofi_bmax, a_memory_lofi_ball,
1200 a_memory_lofi_acur, a_memory_lofi_amax, a_memory_lofi_aall,
1201 a_memory_lofi_mcur, a_memory_lofi_mmax, a_memory_lofi_mall);
1202 lines += 7;
1204 if((macp = n_go_data->gdc_mempool) == NULL)
1205 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
1206 for(; macp != NULL; macp = macp->mac_outer){
1207 fprintf(fp, " Evaluation stack context %p (outer: %p):\n",
1208 (void*)macp, (void*)macp->mac_outer);
1209 ++lines;
1211 for(malp = macp->mac_lofi; malp != NULL;){
1212 fprintf(fp, " Buffer %p%s, %" PRIuZ "/%" PRIuZ " used/free:\n",
1213 (void*)malp, ((uintptr_t)malp->mal_last & 0x1 ? " (huge)" : ""),
1214 PTR2SIZE(malp->mal_caster - &malp->mal_buf[0]),
1215 PTR2SIZE(malp->mal_max - malp->mal_caster));
1216 ++lines;
1217 malp = malp->mal_last;
1218 malp = (struct a_memory_ars_lofi*)((uintptr_t)malp & ~1);
1221 for(malcp = macp->mac_lofi_top; malcp != NULL;){
1222 p.p_alc = malcp;
1223 malcp = (struct a_memory_ars_lofi_chunk*)
1224 ((uintptr_t)malcp->malc_last & ~0x1);
1225 xp = p;
1226 ++xp.p_alc;
1227 a_MEMORY_HOPE_GET_TRACE(p_alc, xp, isbad);
1228 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1229 (isbad ? "! CANARY ERROR (LOFI): " : ""), xp.p_vp,
1230 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1234 fprintf(fp,
1235 "\nAuto-reclaimed storage:\n"
1236 " Buffers cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1237 " Huge allocations cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1238 " Allocations all: %" PRIuZ ", Bytes all: %" PRIuZ "\n\n",
1239 a_memory_ars_bcur, a_memory_ars_bmax, a_memory_ars_ball,
1240 a_memory_ars_hcur, a_memory_ars_hmax, a_memory_ars_hall,
1241 a_memory_ars_aall, a_memory_ars_mall);
1242 lines += 7;
1244 for(macp = n_go_data->gdc_mempool; macp != NULL; macp = macp->mac_outer){
1245 fprintf(fp, " Evaluation stack context %p (outer: %p):\n",
1246 (void*)macp, (void*)macp->mac_outer);
1247 ++lines;
1249 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
1250 fprintf(fp, " Buffer %p, %" PRIuZ "/%" PRIuZ " used/free:\n",
1251 (void*)mabp,
1252 PTR2SIZE(mabp->mab_caster - &mabp->mab_buf[0]),
1253 PTR2SIZE(&mabp->mab_buf[sizeof(mabp->mab_buf)] - mabp->mab_caster));
1254 ++lines;
1256 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1257 ++lines, p.p_cp += p.p_c->mc_size){
1258 xp = p;
1259 ++xp.p_c;
1260 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1261 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1262 (isbad ? "! CANARY ERROR (ARS, top): " : ""), xp.p_vp,
1263 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1265 ++lines;
1268 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last){
1269 fprintf(fp, " Buffer %p, full:\n", (void*)mabp);
1270 ++lines;
1272 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1273 ++lines, p.p_cp += p.p_c->mc_size){
1274 xp = p;
1275 ++xp.p_c;
1276 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1277 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1278 (isbad ? "! CANARY ERROR (ARS, full): " : ""), xp.p_vp,
1279 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1281 ++lines;
1285 fprintf(fp,
1286 "\nHeap memory buffers:\n"
1287 " Allocation cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1288 " Bytes cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n\n",
1289 a_memory_heap_acur, a_memory_heap_amax, a_memory_heap_aall,
1290 a_memory_heap_mcur, a_memory_heap_mmax, a_memory_heap_mall);
1291 lines += 6;
1293 for(p.p_hc = a_memory_heap_list; p.p_hc != NULL;
1294 ++lines, p.p_hc = p.p_hc->mhc_next){
1295 xp = p;
1296 ++xp.p_hc;
1297 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1298 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1299 (isbad ? "! CANARY ERROR (heap): " : ""), xp.p_vp,
1300 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1303 if((n_psonce & n_PSO_REPRODUCIBLE) ||
1304 (n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG))){
1305 fprintf(fp, "Heap buffers lingering for n_free():\n");
1306 ++lines;
1308 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;
1309 ++lines, p.p_hc = p.p_hc->mhc_next){
1310 xp = p;
1311 ++xp.p_hc;
1312 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1313 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1314 (isbad ? "! CANARY ERROR (free): " : ""), xp.p_vp,
1315 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1319 page_or_print(fp, lines);
1320 Fclose(fp);
1321 vp = NULL;
1322 jleave:
1323 NYD2_LEAVE;
1324 return (vp != NULL);
1327 FL bool_t
1328 n__memory_check(char const *mdbg_file, int mdbg_line){
1329 union a_memory_ptr p, xp;
1330 struct a_memory_ars_buffer *mabp;
1331 struct a_memory_ars_lofi_chunk *malcp;
1332 struct a_memory_ars_ctx *macp;
1333 bool_t anybad, isbad;
1334 NYD2_ENTER;
1336 anybad = FAL0;
1338 if((macp = n_go_data->gdc_mempool) == NULL)
1339 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
1341 /* Alloca */
1343 for(malcp = macp->mac_lofi_top; malcp != NULL;){
1344 p.p_alc = malcp;
1345 malcp = (struct a_memory_ars_lofi_chunk*)
1346 ((uintptr_t)malcp->malc_last & ~0x1);
1347 xp = p;
1348 ++xp.p_alc;
1349 a_MEMORY_HOPE_GET_TRACE(p_alc, xp, isbad);
1350 if(isbad){
1351 anybad = TRU1;
1352 n_err(
1353 "! CANARY ERROR (LOFI): %p (%u bytes): %s, line %u\n",
1354 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1358 /* Auto-reclaimed */
1360 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
1361 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1362 p.p_cp += p.p_c->mc_size){
1363 xp = p;
1364 ++xp.p_c;
1365 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1366 if(isbad){
1367 anybad = TRU1;
1368 n_err(
1369 "! CANARY ERROR (ARS, top): %p (%u bytes): %s, line %u\n",
1370 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1375 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last){
1376 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1377 p.p_cp += p.p_c->mc_size){
1378 xp = p;
1379 ++xp.p_c;
1380 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1381 if(isbad){
1382 anybad = TRU1;
1383 n_err(
1384 "! CANARY ERROR (ARS, full): %p (%u bytes): %s, line %u\n",
1385 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1390 /* Heap*/
1392 for(p.p_hc = a_memory_heap_list; p.p_hc != NULL; p.p_hc = p.p_hc->mhc_next){
1393 xp = p;
1394 ++xp.p_hc;
1395 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1396 if(isbad){
1397 anybad = TRU1;
1398 n_err(
1399 "! CANARY ERROR (heap): %p (%u bytes): %s, line %u\n",
1400 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1404 if((n_psonce & n_PSO_REPRODUCIBLE) ||
1405 (n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG))){
1406 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;
1407 p.p_hc = p.p_hc->mhc_next){
1408 xp = p;
1409 ++xp.p_hc;
1410 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1411 if(isbad){
1412 anybad = TRU1;
1413 n_err(
1414 "! CANARY ERROR (free): %p (%u bytes): %s, line %u\n",
1415 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1420 if(anybad && ok_blook(memdebug))
1421 n_panic("Memory errors encountered");
1422 NYD2_LEAVE;
1423 return anybad;
1425 #endif /* HAVE_MEMORY_DEBUG */
1427 /* s-it-mode */