make-config.in: complete path (leftover of [807f64e2], 2015-12-26!)
[s-mailx.git] / memory.c
blobe403fc8121bab3d51eaed84fd9d353e8b5242d11
1 /*@ S-nail - a mail user agent derived from Berkeley Mail.
2 *@ Heap memory and automatically reclaimed storage.
3 *@ TODO Back the _flux_ heap.
4 *@ TODO Add cache for "the youngest" two or three n_MEMORY_AUTOREC_SIZE arenas
6 * Copyright (c) 2012 - 2018 Steffen (Daode) Nurpmeso <steffen@sdaoden.eu>.
7 * SPDX-License-Identifier: ISC
9 * Permission to use, copy, modify, and/or distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #undef n_FILE
22 #define n_FILE memory
24 #ifndef HAVE_AMALGAMATION
25 # include "nail.h"
26 #endif
29 * We use per-execution context memory arenas, to be found in
30 * n_go_data->gdc_mempool; if NULL, set to ->gdc__mempool_buf.
31 * n_memory_reset() that happens on loop ticks reclaims their memory, and
32 * performs debug checks also on the former #ifdef HAVE_MEMORY_DEBUG.
33 * The arena that is used already during program startup is special in that
34 * _pool_fixate() will set "a lower bound" in order not to reclaim memory that
35 * must be kept vivid during the lifetime of the program.
36 * That was so in historical code with the globally shared single string dope
37 * implementation, too. (And it still seems easier than bypassing to normal
38 * heap memory before _fixate() is called, today.)
40 * AutoReclaimedStorage memory is the follow-up to the historical "stringdope"
41 * allocator from 1979 (see [timeline:a7342d9]:src/Mail/strings.c), it is
42 * a steadily growing pool (but _autorec_relax_create() .. [:_relax_unroll():]
43 * ..autorec_relax_gut() will reduce pressure) until n_memory_reset() time.
45 * LastOutFirstIn memory is meant as an alloca(3) replacement but which requires
46 * lofi_free()ing pointers (otherwise growing until n_memory_reset()).
48 * TODO Flux heap memory is like LOFI except that any pointer can be freed (and
49 * TODO reused) at any time, just like normal heap memory. It is notational in
50 * TODO that it clearly states that the allocation will go away after a loop
51 * TODO tick, and also we can use some buffer caches.
54 /* If defined (and HAVE_MEMORY_DEBUG), realloc acts like alloc+free, which can
55 * help very bogus double-free attempts */
56 #define a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE /* TODO runtime opt <> C++ cache */
58 /* Maximum allocation (directly) handled by A-R-Storage */
59 #define a_MEMORY_ARS_MAX (n_MEMORY_AUTOREC_SIZE / 2 + n_MEMORY_AUTOREC_SIZE / 4)
60 #define a_MEMORY_LOFI_MAX a_MEMORY_ARS_MAX
62 n_CTA(a_MEMORY_ARS_MAX > 1024,
63 "Auto-reclaimed memory requires a larger buffer size"); /* Anway > 42! */
64 n_CTA(n_ISPOW2(n_MEMORY_AUTOREC_SIZE),
65 "Buffers should be POW2 (may be wasteful on native allocators otherwise)");
67 /* Alignment of ARS memory. Simply go for pointer alignment */
68 #define a_MEMORY_ARS_ROUNDUP(S) n_ALIGN_SMALL(S)
69 #define a_MEMORY_LOFI_ROUNDUP(S) a_MEMORY_ARS_ROUNDUP(S)
71 #ifdef HAVE_MEMORY_DEBUG
72 n_CTA(sizeof(char) == sizeof(ui8_t), "But POSIX says a byte is 8 bit");
74 # define a_MEMORY_HOPE_SIZE (2 * 8 * sizeof(char))
75 # define a_MEMORY_HOPE_INC(P) (P) += 8
76 # define a_MEMORY_HOPE_DEC(P) (P) -= 8
78 /* We use address-induced canary values, inspiration (but he didn't invent)
79 * and primes from maxv@netbsd.org, src/sys/kern/subr_kmem.c */
80 # define a_MEMORY_HOPE_LOWER(S,P) \
81 do{\
82 ui64_t __h__ = (uintptr_t)(P);\
83 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
84 __h__ >>= 56;\
85 (S) = (ui8_t)__h__;\
86 }while(0)
88 # define a_MEMORY_HOPE_UPPER(S,P) \
89 do{\
90 ui32_t __i__;\
91 ui64_t __x__, __h__ = (uintptr_t)(P);\
92 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
93 for(__i__ = 56; __i__ != 0; __i__ -= 8)\
94 if((__x__ = (__h__ >> __i__)) != 0){\
95 (S) = (ui8_t)__x__;\
96 break;\
98 if(__i__ == 0)\
99 (S) = 0xAAu;\
100 }while(0)
102 # define a_MEMORY_HOPE_SET(T,C) \
103 do{\
104 union a_memory_ptr __xp;\
105 struct a_memory_chunk *__xc;\
106 __xp.p_vp = (C).p_vp;\
107 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
108 a_MEMORY_HOPE_INC((C).p_cp);\
109 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
110 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
111 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
112 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
113 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
114 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
115 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
116 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
117 a_MEMORY_HOPE_INC(__xp.p_ui8p) + __xc->mc_user_size;\
118 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
119 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
120 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
121 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
122 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
123 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
124 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
125 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
126 }while(0)
128 # define a_MEMORY_HOPE_GET_TRACE(T,C,BAD) \
129 do{\
130 a_MEMORY_HOPE_INC((C).p_cp);\
131 a_MEMORY_HOPE_GET(T, C, BAD);\
132 a_MEMORY_HOPE_INC((C).p_cp);\
133 }while(0)
135 # define a_MEMORY_HOPE_GET(T,C,BAD) \
136 do{\
137 union a_memory_ptr __xp;\
138 struct a_memory_chunk *__xc;\
139 ui32_t __i;\
140 ui8_t __m;\
141 __xp.p_vp = (C).p_vp;\
142 a_MEMORY_HOPE_DEC(__xp.p_cp);\
143 (C).p_cp = __xp.p_cp;\
144 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
145 (BAD) = FAL0;\
146 __i = 0;\
147 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[0]);\
148 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
149 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[1]);\
150 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
151 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[2]);\
152 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
153 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[3]);\
154 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
155 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[4]);\
156 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
157 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[5]);\
158 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
159 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[6]);\
160 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
161 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[7]);\
162 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
163 if(__i != 0){\
164 (BAD) = TRU1;\
165 a_MEMORY_HOPE_INC((C).p_cp);\
166 n_alert("%p: corrupt lower canary: 0x%02X: %s, line %d",\
167 (C).p_cp, __i, mdbg_file, mdbg_line);\
168 a_MEMORY_HOPE_DEC((C).p_cp);\
170 a_MEMORY_HOPE_INC(__xp.p_ui8p) + __xc->mc_user_size;\
171 __i = 0;\
172 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[0]);\
173 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
174 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[1]);\
175 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
176 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[2]);\
177 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
178 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[3]);\
179 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
180 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[4]);\
181 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
182 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[5]);\
183 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
184 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[6]);\
185 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
186 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[7]);\
187 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
188 if(__i != 0){\
189 (BAD) = TRU1;\
190 a_MEMORY_HOPE_INC((C).p_cp);\
191 n_alert("%p: corrupt upper canary: 0x%02X: %s, line %d",\
192 (C).p_cp, __i, mdbg_file, mdbg_line);\
193 a_MEMORY_HOPE_DEC((C).p_cp);\
195 if(BAD)\
196 n_alert(" ..canary last seen: %s, line %u",\
197 __xc->mc_file, __xc->mc_line);\
198 }while(0)
199 #endif /* HAVE_MEMORY_DEBUG */
201 #ifdef HAVE_MEMORY_DEBUG
202 struct a_memory_chunk{
203 char const *mc_file;
204 ui32_t mc_line;
205 ui8_t mc_isfree;
206 ui8_t mc__dummy[3];
207 ui32_t mc_user_size;
208 ui32_t mc_size;
211 /* The heap memory n_free() may become delayed to detect double frees.
212 * It is primitive, but ok: speed and memory usage don't matter here */
213 struct a_memory_heap_chunk{
214 struct a_memory_chunk mhc_super;
215 struct a_memory_heap_chunk *mhc_prev;
216 struct a_memory_heap_chunk *mhc_next;
218 #endif /* HAVE_MEMORY_DEBUG */
220 struct a_memory_ars_lofi_chunk{
221 #ifdef HAVE_MEMORY_DEBUG
222 struct a_memory_chunk malc_super;
223 #endif
224 struct a_memory_ars_lofi_chunk *malc_last; /* Bit 1 set: it's a heap alloc */
227 union a_memory_ptr{
228 void *p_vp;
229 char *p_cp;
230 ui8_t *p_ui8p;
231 #ifdef HAVE_MEMORY_DEBUG
232 struct a_memory_chunk *p_c;
233 struct a_memory_heap_chunk *p_hc;
234 #endif
235 struct a_memory_ars_lofi_chunk *p_alc;
238 struct a_memory_ars_ctx{
239 struct a_memory_ars_ctx *mac_outer;
240 struct a_memory_ars_ctx *mac_outer_save;
241 struct a_memory_ars_buffer *mac_top; /* Alloc stack */
242 struct a_memory_ars_buffer *mac_full; /* Alloc stack, cpl. filled */
243 size_t mac_recur; /* _relax_create() recursion */
244 struct a_memory_ars_huge *mac_huge; /* Huge allocation bypass list */
245 struct a_memory_ars_lofi *mac_lofi; /* Pseudo alloca */
246 struct a_memory_ars_lofi_chunk *mac_lofi_top;
248 n_CTA(n_MEMORY_POOL_TYPE_SIZEOF >= sizeof(struct a_memory_ars_ctx),
249 "struct n_go_data_ctx.gdc_mempool is not large enough for memory pool");
251 struct a_memory_ars_buffer{
252 struct a_memory_ars_buffer *mab_last;
253 char *mab_bot; /* For _autorec_fixate(): keep startup memory lingering */
254 char *mab_relax; /* If !NULL, used by _relax_unroll() instead of .mab_bot */
255 char *mab_caster; /* Point of casting off memory */
256 char mab_buf[n_MEMORY_AUTOREC_SIZE - (4 * sizeof(void*))];
258 n_CTA(sizeof(struct a_memory_ars_buffer) == n_MEMORY_AUTOREC_SIZE,
259 "Resulting structure size is not the expected one");
260 #ifdef HAVE_MEMORY_DEBUG
261 n_CTA(a_MEMORY_ARS_MAX + a_MEMORY_HOPE_SIZE + sizeof(struct a_memory_chunk)
262 < n_SIZEOF_FIELD(struct a_memory_ars_buffer, mab_buf),
263 "Memory layout of auto-reclaimed storage does not work out that way");
264 #endif
266 /* Requests that exceed a_MEMORY_ARS_MAX are always served by the normal
267 * memory allocator (which panics if memory cannot be served). This can be
268 * seen as a security fallback bypass only */
269 struct a_memory_ars_huge{
270 struct a_memory_ars_huge *mah_last;
271 char mah_buf[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
274 struct a_memory_ars_lofi{
275 struct a_memory_ars_lofi *mal_last;
276 char *mal_caster;
277 char *mal_max;
278 char mal_buf[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
281 /* */
282 #ifdef HAVE_MEMORY_DEBUG
283 static size_t a_memory_heap_aall, a_memory_heap_acur, a_memory_heap_amax,
284 a_memory_heap_mall, a_memory_heap_mcur, a_memory_heap_mmax;
285 static struct a_memory_heap_chunk *a_memory_heap_list, *a_memory_heap_free;
287 static size_t a_memory_ars_ball, a_memory_ars_bcur, a_memory_ars_bmax,
288 a_memory_ars_hall, a_memory_ars_hcur, a_memory_ars_hmax,
289 a_memory_ars_aall, a_memory_ars_mall;
291 static size_t a_memory_lofi_ball, a_memory_lofi_bcur, a_memory_lofi_bmax,
292 a_memory_lofi_aall, a_memory_lofi_acur, a_memory_lofi_amax,
293 a_memory_lofi_mall, a_memory_lofi_mcur, a_memory_lofi_mmax;
294 #endif
296 /* */
297 n_INLINE void a_memory_lofi_free(struct a_memory_ars_ctx *macp, void *vp);
299 /* Reset an ars_ctx */
300 static void a_memory_ars_reset(struct a_memory_ars_ctx *macp);
302 n_INLINE void
303 a_memory_lofi_free(struct a_memory_ars_ctx *macp, void *vp){
304 struct a_memory_ars_lofi *malp;
305 union a_memory_ptr p;
306 NYD2_ENTER;
308 p.p_vp = vp;
309 #ifdef HAVE_MEMORY_DEBUG
310 --a_memory_lofi_acur;
311 a_memory_lofi_mcur -= p.p_c->mc_user_size;
312 #endif
314 /* The heap allocations are released immediately */
315 if((uintptr_t)p.p_alc->malc_last & 0x1){
316 malp = macp->mac_lofi;
317 macp->mac_lofi = malp->mal_last;
318 macp->mac_lofi_top = (struct a_memory_ars_lofi_chunk*)
319 ((uintptr_t)p.p_alc->malc_last & ~0x1);
320 n_free(malp);
321 #ifdef HAVE_MEMORY_DEBUG
322 --a_memory_lofi_bcur;
323 #endif
324 }else{
325 macp->mac_lofi_top = p.p_alc->malc_last;
327 /* The normal arena ones only if the arena is empty, except for when
328 * it is the last - that we'll keep until _pool_pop() or exit(3) */
329 if(p.p_cp == (malp = macp->mac_lofi)->mal_buf){
330 if(malp->mal_last != NULL){
331 macp->mac_lofi = malp->mal_last;
332 n_free(malp);
333 #ifdef HAVE_MEMORY_DEBUG
334 --a_memory_lofi_bcur;
335 #endif
337 }else
338 malp->mal_caster = p.p_cp;
340 NYD2_LEAVE;
343 static void
344 a_memory_ars_reset(struct a_memory_ars_ctx *macp){
345 union{
346 struct a_memory_ars_lofi_chunk *alcp;
347 struct a_memory_ars_lofi *alp;
348 struct a_memory_ars_buffer *abp;
349 struct a_memory_ars_huge *ahp;
350 } m, m2;
351 NYD2_ENTER;
353 /* Simply move all buffers away from .mac_full */
354 for(m.abp = macp->mac_full; m.abp != NULL; m.abp = m2.abp){
355 m2.abp = m.abp->mab_last;
356 m.abp->mab_last = macp->mac_top;
357 macp->mac_top = m.abp;
359 macp->mac_full = NULL;
361 for(m2.abp = NULL, m.abp = macp->mac_top; m.abp != NULL;){
362 struct a_memory_ars_buffer *x;
364 x = m.abp;
365 m.abp = m.abp->mab_last;
367 /* Give away all buffers that are not covered by autorec_fixate() */
368 if(x->mab_bot == x->mab_buf){
369 if(m2.abp == NULL)
370 macp->mac_top = m.abp;
371 else
372 m2.abp->mab_last = m.abp;
373 n_free(x);
374 #ifdef HAVE_MEMORY_DEBUG
375 --a_memory_ars_bcur;
376 #endif
377 }else{
378 m2.abp = x;
379 x->mab_caster = x->mab_bot;
380 x->mab_relax = NULL;
381 #ifdef HAVE_MEMORY_DEBUG
382 memset(x->mab_caster, 0377,
383 PTR2SIZE(&x->mab_buf[sizeof(x->mab_buf)] - x->mab_caster));
384 #endif
388 while((m.ahp = macp->mac_huge) != NULL){
389 macp->mac_huge = m.ahp->mah_last;
390 n_free(m.ahp);
391 #ifdef HAVE_MEMORY_DEBUG
392 --a_memory_ars_hcur;
393 #endif
396 /* "alloca(3)" memory goes away, too. XXX Must be last as long we jump */
397 #ifdef HAVE_MEMORY_DEBUG
398 if(macp->mac_lofi_top != NULL &&
399 ((n_psonce & n_PSO_REPRODUCIBLE) ||
400 (n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG))))
401 n_alert("There still is LOFI memory upon ARS reset!");
402 #endif
403 while((m.alcp = macp->mac_lofi_top) != NULL)
404 a_memory_lofi_free(macp, m.alcp);
405 NYD2_LEAVE;
408 FL void
409 n_memory_reset(void){
410 #ifdef HAVE_MEMORY_DEBUG
411 union a_memory_ptr p;
412 size_t c, s;
413 #endif
414 struct a_memory_ars_ctx *macp;
415 NYD_ENTER;
417 n_memory_check();
419 if((macp = n_go_data->gdc_mempool) != NULL){
420 /* First of all reset auto-reclaimed storage so that heap freed during
421 * this can be handled in a second step */
422 /* TODO v15 active recursion can only happen after a jump */
423 if(macp->mac_recur > 0){
424 macp->mac_recur = 1;
425 n_autorec_relax_gut();
427 a_memory_ars_reset(macp);
430 /* Now we are ready to deal with heap */
431 #ifdef HAVE_MEMORY_DEBUG
432 c = s = 0;
434 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;){
435 void *vp;
437 vp = p.p_hc;
438 ++c;
439 s += p.p_c->mc_size;
440 p.p_hc = p.p_hc->mhc_next;
441 (free)(vp);
443 a_memory_heap_free = NULL;
445 if((n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG)) && c > 0)
446 n_err("memreset: freed %" PRIuZ " chunks/%" PRIuZ " bytes\n", c, s);
447 #endif
448 NYD_LEAVE;
451 FL void
452 n_memory_pool_fixate(void){
453 struct a_memory_ars_buffer *mabp;
454 struct a_memory_ars_ctx *macp;
455 NYD_ENTER;
457 if((macp = n_go_data->gdc_mempool) != NULL){
458 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
459 mabp->mab_bot = mabp->mab_caster;
460 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
461 mabp->mab_bot = mabp->mab_caster;
463 NYD_LEAVE;
466 FL void
467 n_memory_pool_push(void *vp, bool_t init){
468 struct a_memory_ars_ctx *macp;
469 NYD_ENTER;
471 macp = vp;
473 if(init){
474 memset(macp, 0, sizeof *macp);
476 assert(macp->mac_outer_save == NULL);
478 if(n_go_data->gdc_mempool == NULL)
479 n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
481 macp->mac_outer_save = macp->mac_outer;
482 macp->mac_outer = n_go_data->gdc_mempool;
483 n_go_data->gdc_mempool = macp;
484 NYD_LEAVE;
487 FL void
488 n_memory_pool_pop(void *vp, bool_t gut){
489 struct a_memory_ars_buffer *mabp;
490 struct a_memory_ars_ctx *macp;
491 NYD_ENTER;
493 n_memory_check();
495 if((macp = vp) == NULL){
496 assert(gut);
497 macp = n_go_data->gdc_mempool;
498 assert(macp != NULL);
499 }else{
500 /* XXX May not be ARS top upon jump */
501 while(n_go_data->gdc_mempool != macp){
502 assert(gut);
503 DBG( n_err("ARS pop %p to reach freed context\n",
504 n_go_data->gdc_mempool); )
505 n_memory_pool_pop(n_go_data->gdc_mempool, gut);
508 n_go_data->gdc_mempool = macp->mac_outer;
509 macp->mac_outer = macp->mac_outer_save;
510 macp->mac_outer_save = NULL;
512 if(gut){
513 a_memory_ars_reset(macp);
514 assert(macp->mac_full == NULL);
515 assert(macp->mac_huge == NULL);
517 mabp = macp->mac_top;
518 macp->mac_top = NULL;
519 while(mabp != NULL){
520 vp = mabp;
521 mabp = mabp->mab_last;
522 n_free(vp);
525 /* We (may) have kept one buffer for our pseudo alloca(3) */
526 if((vp = macp->mac_lofi) != NULL){
527 assert(macp->mac_lofi->mal_last == NULL);
528 macp->mac_lofi = NULL;
529 #ifdef HAVE_MEMORY_DEBUG
530 --a_memory_lofi_bcur;
531 #endif
532 n_free(vp);
535 NYD_LEAVE;
538 FL void *
539 n_memory_pool_top(void){
540 void *rv;
541 NYD2_IN;
543 if((rv = n_go_data->gdc_mempool) == NULL)
544 rv = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
545 NYD2_OU;
546 return rv;
549 #ifndef HAVE_MEMORY_DEBUG
550 FL void *
551 n_alloc(size_t s){
552 void *rv;
553 NYD2_ENTER;
555 if(s == 0)
556 s = 1;
557 if((rv = malloc(s)) == NULL)
558 n_panic(_("no memory"));
559 NYD2_LEAVE;
560 return rv;
563 FL void *
564 n_realloc(void *vp, size_t s){
565 void *rv;
566 NYD2_ENTER;
568 if(vp == NULL)
569 rv = n_alloc(s);
570 else{
571 if(s == 0)
572 s = 1;
573 if((rv = realloc(vp, s)) == NULL)
574 n_panic(_("no memory"));
576 NYD2_LEAVE;
577 return rv;
580 FL void *
581 n_calloc(size_t nmemb, size_t size){
582 void *rv;
583 NYD2_ENTER;
585 if(size == 0)
586 size = 1;
587 if((rv = calloc(nmemb, size)) == NULL)
588 n_panic(_("no memory"));
589 NYD2_LEAVE;
590 return rv;
593 FL void
594 (n_free)(void *vp){
595 NYD2_ENTER;
596 (free)(vp);
597 NYD2_LEAVE;
600 #else /* !HAVE_MEMORY_DEBUG */
601 FL void *
602 (n_alloc)(size_t s n_MEMORY_DEBUG_ARGS){
603 union a_memory_ptr p;
604 ui32_t user_s;
605 NYD2_ENTER;
607 if(s > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
608 n_panic("n_alloc(): allocation too large: %s, line %d",
609 mdbg_file, mdbg_line);
610 if((user_s = (ui32_t)s) == 0)
611 s = 1;
612 s += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
614 if((p.p_vp = (malloc)(s)) == NULL)
615 n_panic(_("no memory"));
617 p.p_hc->mhc_prev = NULL;
618 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
619 a_memory_heap_list->mhc_prev = p.p_hc;
621 p.p_c->mc_file = mdbg_file;
622 p.p_c->mc_line = (ui16_t)mdbg_line;
623 p.p_c->mc_isfree = FAL0;
624 p.p_c->mc_user_size = user_s;
625 p.p_c->mc_size = (ui32_t)s;
627 a_memory_heap_list = p.p_hc++;
628 a_MEMORY_HOPE_SET(p_hc, p);
630 ++a_memory_heap_aall;
631 ++a_memory_heap_acur;
632 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
633 a_memory_heap_mall += user_s;
634 a_memory_heap_mcur += user_s;
635 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
636 NYD2_LEAVE;
637 return p.p_vp;
640 FL void *
641 (n_realloc)(void *vp, size_t s n_MEMORY_DEBUG_ARGS){
642 # ifndef a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE
643 ui32_t user_s;
644 # endif
645 bool_t isbad;
646 union a_memory_ptr p;
647 NYD2_ENTER;
649 if((p.p_vp = vp) == NULL){
650 jforce:
651 p.p_vp = (n_alloc)(s, mdbg_file, mdbg_line);
652 goto jleave;
655 a_MEMORY_HOPE_GET(p_hc, p, isbad);
656 --p.p_hc;
658 if(p.p_c->mc_isfree){
659 n_err("n_realloc(): region freed! At %s, line %d\n"
660 "\tLast seen: %s, line %" PRIu16 "\n",
661 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
662 goto jforce;
665 # ifdef a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE
666 /* C99 */{
667 char *xp;
669 xp = (n_alloc)(s, mdbg_file, mdbg_line);
670 memcpy(xp, vp, n_MIN(s, p.p_c->mc_user_size));
671 (n_free)(vp, mdbg_file, mdbg_line);
672 p.p_vp = xp;
673 goto jleave;
675 # else
677 if(p.p_hc == a_memory_heap_list)
678 a_memory_heap_list = p.p_hc->mhc_next;
679 else
680 p.p_hc->mhc_prev->mhc_next = p.p_hc->mhc_next;
681 if (p.p_hc->mhc_next != NULL)
682 p.p_hc->mhc_next->mhc_prev = p.p_hc->mhc_prev;
684 --a_memory_heap_acur;
685 a_memory_heap_mcur -= p.p_c->mc_user_size;
687 if(s > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
688 n_panic("n_realloc(): allocation too large: %s, line %d",
689 mdbg_file, mdbg_line);
690 if((user_s = (ui32_t)s) == 0)
691 s = 1;
692 s += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
694 if((p.p_vp = (realloc)(p.p_c, s)) == NULL)
695 n_panic(_("no memory"));
696 p.p_hc->mhc_prev = NULL;
697 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
698 a_memory_heap_list->mhc_prev = p.p_hc;
700 p.p_c->mc_file = mdbg_file;
701 p.p_c->mc_line = (ui16_t)mdbg_line;
702 p.p_c->mc_isfree = FAL0;
703 p.p_c->mc_user_size = user_s;
704 p.p_c->mc_size = (ui32_t)s;
706 a_memory_heap_list = p.p_hc++;
707 a_MEMORY_HOPE_SET(p_hc, p);
709 ++a_memory_heap_aall;
710 ++a_memory_heap_acur;
711 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
712 a_memory_heap_mall += user_s;
713 a_memory_heap_mcur += user_s;
714 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
715 # endif /* a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE */
716 jleave:
717 NYD2_LEAVE;
718 return p.p_vp;
721 FL void *
722 (n_calloc)(size_t nmemb, size_t size n_MEMORY_DEBUG_ARGS){
723 union a_memory_ptr p;
724 ui32_t user_s;
725 NYD2_ENTER;
727 if(nmemb == 0)
728 nmemb = 1;
729 if(size > UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE)
730 n_panic("n_calloc(): allocation size too large: %s, line %d",
731 mdbg_file, mdbg_line);
732 if((user_s = (ui32_t)size) == 0)
733 size = 1;
734 if((UI32_MAX - sizeof(struct a_memory_heap_chunk) - a_MEMORY_HOPE_SIZE) /
735 nmemb < size)
736 n_panic("n_calloc(): allocation count too large: %s, line %d",
737 mdbg_file, mdbg_line);
739 size *= nmemb;
740 size += sizeof(struct a_memory_heap_chunk) + a_MEMORY_HOPE_SIZE;
742 if((p.p_vp = (malloc)(size)) == NULL)
743 n_panic(_("no memory"));
744 memset(p.p_vp, 0, size);
746 p.p_hc->mhc_prev = NULL;
747 if((p.p_hc->mhc_next = a_memory_heap_list) != NULL)
748 a_memory_heap_list->mhc_prev = p.p_hc;
750 p.p_c->mc_file = mdbg_file;
751 p.p_c->mc_line = (ui16_t)mdbg_line;
752 p.p_c->mc_isfree = FAL0;
753 p.p_c->mc_user_size = (user_s > 0) ? user_s *= nmemb : 0;
754 p.p_c->mc_size = (ui32_t)size;
756 a_memory_heap_list = p.p_hc++;
757 a_MEMORY_HOPE_SET(p_hc, p);
759 ++a_memory_heap_aall;
760 ++a_memory_heap_acur;
761 a_memory_heap_amax = n_MAX(a_memory_heap_amax, a_memory_heap_acur);
762 a_memory_heap_mall += user_s;
763 a_memory_heap_mcur += user_s;
764 a_memory_heap_mmax = n_MAX(a_memory_heap_mmax, a_memory_heap_mcur);
765 NYD2_LEAVE;
766 return p.p_vp;
769 FL void
770 (n_free)(void *vp n_MEMORY_DEBUG_ARGS){
771 union a_memory_ptr p;
772 bool_t isbad;
773 NYD2_ENTER;
775 if((p.p_vp = vp) == NULL){
776 n_err("n_free(NULL) from %s, line %d\n", mdbg_file, mdbg_line);
777 goto jleave;
780 a_MEMORY_HOPE_GET(p_hc, p, isbad);
781 --p.p_hc;
783 if(p.p_c->mc_isfree){
784 n_err("n_free(): double-free avoided at %s, line %d\n"
785 "\tLast seen: %s, line %" PRIu16 "\n",
786 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
787 goto jleave;
790 if(p.p_hc == a_memory_heap_list){
791 if((a_memory_heap_list = p.p_hc->mhc_next) != NULL)
792 a_memory_heap_list->mhc_prev = NULL;
793 }else
794 p.p_hc->mhc_prev->mhc_next = p.p_hc->mhc_next;
795 if(p.p_hc->mhc_next != NULL)
796 p.p_hc->mhc_next->mhc_prev = p.p_hc->mhc_prev;
798 p.p_c->mc_file = mdbg_file;
799 p.p_c->mc_line = (ui16_t)mdbg_line;
800 p.p_c->mc_isfree = TRU1;
801 /* Trash contents (also see [21c05f8]) */
802 memset(vp, 0377, p.p_c->mc_user_size);
804 --a_memory_heap_acur;
805 a_memory_heap_mcur -= p.p_c->mc_user_size;
807 if((n_psonce & n_PSO_REPRODUCIBLE) ||
808 (n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG))){
809 p.p_hc->mhc_next = a_memory_heap_free;
810 a_memory_heap_free = p.p_hc;
811 }else
812 (free)(p.p_vp);
813 jleave:
814 NYD2_LEAVE;
816 #endif /* HAVE_MEMORY_DEBUG */
818 FL void *
819 (n_autorec_alloc_from_pool)(void *vp, size_t size n_MEMORY_DEBUG_ARGS){
820 #ifdef HAVE_MEMORY_DEBUG
821 ui32_t user_s;
822 #endif
823 union a_memory_ptr p;
824 union{
825 struct a_memory_ars_buffer *abp;
826 struct a_memory_ars_huge *ahp;
827 } m, m2;
828 struct a_memory_ars_ctx *macp;
829 NYD2_ENTER;
831 if((macp = vp) == NULL && (macp = n_go_data->gdc_mempool) == NULL)
832 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
834 #ifdef HAVE_MEMORY_DEBUG
835 user_s = (ui32_t)size;
836 #endif
837 if(size == 0)
838 ++size;
839 #ifdef HAVE_MEMORY_DEBUG
840 size += sizeof(struct a_memory_chunk) + a_MEMORY_HOPE_SIZE;
841 #endif
842 size = a_MEMORY_ARS_ROUNDUP(size);
844 /* Huge allocations are special */
845 if(n_UNLIKELY(size > a_MEMORY_ARS_MAX)){
846 #ifdef HAVE_MEMORY_DEBUG
847 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG))
848 n_alert("n_autorec_alloc() of %" PRIuZ " bytes from %s, line %d",
849 size, mdbg_file, mdbg_line);
850 #endif
851 goto jhuge;
854 /* Search for a buffer with enough free space to serve request */
855 for(m2.abp = NULL, m.abp = macp->mac_top; m.abp != NULL;
856 m2.abp = m.abp, m.abp = m.abp->mab_last){
857 if((p.p_cp = m.abp->mab_caster) <=
858 &m.abp->mab_buf[sizeof(m.abp->mab_buf) - size]){
859 /* Alignment is the one thing, the other is what is usually allocated,
860 * and here about 40 bytes seems to be a good cut to avoid non-usable
861 * casters. Reown buffers supposed to be "full" to .mac_full */
862 if(n_UNLIKELY((m.abp->mab_caster = &p.p_cp[size]) >=
863 &m.abp->mab_buf[sizeof(m.abp->mab_buf) - 42])){
864 if(m2.abp == NULL)
865 macp->mac_top = m.abp->mab_last;
866 else
867 m2.abp->mab_last = m.abp->mab_last;
868 m.abp->mab_last = macp->mac_full;
869 macp->mac_full = m.abp;
871 goto jleave;
875 /* Need a new buffer XXX "page" pool */
876 m.abp = n_alloc(sizeof *m.abp);
877 m.abp->mab_last = macp->mac_top;
878 m.abp->mab_caster = &(m.abp->mab_bot = m.abp->mab_buf)[size];
879 m.abp->mab_relax = NULL; /* Indicates allocation after _relax_create() */
880 macp->mac_top = m.abp;
881 p.p_cp = m.abp->mab_bot;
883 #ifdef HAVE_MEMORY_DEBUG
884 ++a_memory_ars_ball;
885 ++a_memory_ars_bcur;
886 a_memory_ars_bmax = n_MAX(a_memory_ars_bmax, a_memory_ars_bcur);
887 #endif
889 jleave:
890 #ifdef HAVE_MEMORY_DEBUG
891 p.p_c->mc_file = mdbg_file;
892 p.p_c->mc_line = (ui16_t)mdbg_line;
893 p.p_c->mc_user_size = user_s;
894 p.p_c->mc_size = (ui32_t)size;
895 ++p.p_c;
896 a_MEMORY_HOPE_SET(p_c, p);
898 ++a_memory_ars_aall;
899 a_memory_ars_mall += user_s;
900 #endif
901 NYD2_LEAVE;
902 return p.p_vp;
904 jhuge:
905 m.ahp = n_alloc(n_VSTRUCT_SIZEOF(struct a_memory_ars_huge, mah_buf) + size);
906 m.ahp->mah_last = macp->mac_huge;
907 macp->mac_huge = m.ahp;
908 p.p_cp = m.ahp->mah_buf;
909 #ifdef HAVE_MEMORY_DEBUG
910 ++a_memory_ars_hall;
911 ++a_memory_ars_hcur;
912 a_memory_ars_hmax = n_MAX(a_memory_ars_hmax, a_memory_ars_hcur);
913 #endif
914 goto jleave;
917 FL void *
918 (n_autorec_calloc_from_pool)(void *vp, size_t nmemb, size_t size
919 n_MEMORY_DEBUG_ARGS){
920 void *rv;
921 NYD2_ENTER;
923 size *= nmemb; /* XXX overflow, but only used for struct inits */
924 rv = (n_autorec_alloc_from_pool)(vp, size n_MEMORY_DEBUG_ARGSCALL);
925 memset(rv, 0, size);
926 NYD2_LEAVE;
927 return rv;
930 FL void
931 n_autorec_relax_create(void){
932 struct a_memory_ars_ctx *macp;
933 NYD2_ENTER;
935 if((macp = n_go_data->gdc_mempool) == NULL)
936 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
938 if(macp->mac_recur++ == 0){
939 struct a_memory_ars_buffer *mabp;
941 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
942 mabp->mab_relax = mabp->mab_caster;
943 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
944 mabp->mab_relax = mabp->mab_caster;
946 #if 0 && defined HAVE_DEVEL
947 else
948 n_err("n_autorec_relax_create(): recursion >0\n");
949 #endif
950 NYD2_LEAVE;
953 FL void
954 n_autorec_relax_gut(void){
955 struct a_memory_ars_ctx *macp;
956 NYD2_ENTER;
958 if((macp = n_go_data->gdc_mempool) == NULL)
959 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
961 assert(macp->mac_recur > 0);
963 if(--macp->mac_recur == 0){
964 struct a_memory_ars_buffer *mabp;
966 macp->mac_recur = 1;
967 n_autorec_relax_unroll();
968 macp->mac_recur = 0;
970 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last)
971 mabp->mab_relax = NULL;
972 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last)
973 mabp->mab_relax = NULL;
975 #if 0 && defined HAVE_DEVEL
976 else
977 n_err("n_autorec_relax_unroll(): recursion >0\n");
978 #endif
979 NYD2_LEAVE;
982 FL void
983 n_autorec_relax_unroll(void){
984 /* The purpose of relaxation is only that it is possible to reset the
985 * casters, *not* to give back memory to the system. We are presumably in
986 * an iteration over all messages of a mailbox, and it'd be quite
987 * counterproductive to give the system allocator a chance to waste time */
988 struct a_memory_ars_ctx *macp;
989 NYD2_ENTER;
991 if((macp = n_go_data->gdc_mempool) == NULL)
992 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
994 assert(macp->mac_recur > 0);
995 n_memory_check();
997 if(macp->mac_recur == 1){
998 struct a_memory_ars_buffer *mabp, *x, *y;
1000 /* Buffers in the full list may become usable again! */
1001 for(x = NULL, mabp = macp->mac_full; mabp != NULL; mabp = y){
1002 y = mabp->mab_last;
1004 if(mabp->mab_relax == NULL ||
1005 mabp->mab_relax < &mabp->mab_buf[sizeof(mabp->mab_buf) - 42]){
1006 if(x == NULL)
1007 macp->mac_full = y;
1008 else
1009 x->mab_last = y;
1010 mabp->mab_last = macp->mac_top;
1011 macp->mac_top = mabp;
1012 }else
1013 x = mabp;
1016 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
1017 mabp->mab_caster = (mabp->mab_relax != NULL)
1018 ? mabp->mab_relax : mabp->mab_bot;
1019 #ifdef HAVE_MEMORY_DEBUG
1020 memset(mabp->mab_caster, 0377,
1021 PTR2SIZE(&mabp->mab_buf[sizeof(mabp->mab_buf)] - mabp->mab_caster));
1022 #endif
1025 NYD2_LEAVE;
1028 FL void *
1029 (n_lofi_alloc)(size_t size n_MEMORY_DEBUG_ARGS){
1030 #ifdef HAVE_MEMORY_DEBUG
1031 ui32_t user_s;
1032 #endif
1033 union a_memory_ptr p;
1034 struct a_memory_ars_lofi *malp;
1035 bool_t isheap;
1036 struct a_memory_ars_ctx *macp;
1037 NYD2_ENTER;
1039 if((macp = n_go_data->gdc_mempool) == NULL)
1040 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
1042 #ifdef HAVE_MEMORY_DEBUG
1043 user_s = (ui32_t)size;
1044 #endif
1045 if(size == 0)
1046 ++size;
1047 size += sizeof(struct a_memory_ars_lofi_chunk);
1048 #ifdef HAVE_MEMORY_DEBUG
1049 size += a_MEMORY_HOPE_SIZE;
1050 #endif
1051 size = a_MEMORY_LOFI_ROUNDUP(size);
1053 /* Huge allocations are special */
1054 if(n_UNLIKELY(isheap = (size > a_MEMORY_LOFI_MAX))){
1055 #ifdef HAVE_MEMORY_DEBUG
1056 if(n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG))
1057 n_alert("n_lofi_alloc() of %" PRIuZ " bytes from %s, line %d",
1058 size, mdbg_file, mdbg_line);
1059 #endif
1060 }else if((malp = macp->mac_lofi) != NULL &&
1061 ((p.p_cp = malp->mal_caster) <= &malp->mal_max[-size])){
1062 malp->mal_caster = &p.p_cp[size];
1063 goto jleave;
1066 /* Need a new buffer */
1067 /* C99 */{
1068 size_t i;
1070 i = n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi, mal_buf) + size;
1071 i = n_MAX(i, n_MEMORY_AUTOREC_SIZE);
1072 malp = n_alloc(i);
1073 malp->mal_last = macp->mac_lofi;
1074 malp->mal_caster = &malp->mal_buf[size];
1075 i -= n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi, mal_buf);
1076 malp->mal_max = &malp->mal_buf[i];
1077 macp->mac_lofi = malp;
1078 p.p_cp = malp->mal_buf;
1080 #ifdef HAVE_MEMORY_DEBUG
1081 ++a_memory_lofi_ball;
1082 ++a_memory_lofi_bcur;
1083 a_memory_lofi_bmax = n_MAX(a_memory_lofi_bmax, a_memory_lofi_bcur);
1084 #endif
1087 jleave:
1088 p.p_alc->malc_last = macp->mac_lofi_top;
1089 macp->mac_lofi_top = p.p_alc;
1090 if(isheap)
1091 p.p_alc->malc_last = (struct a_memory_ars_lofi_chunk*)
1092 ((uintptr_t)p.p_alc->malc_last | 0x1);
1094 #ifndef HAVE_MEMORY_DEBUG
1095 ++p.p_alc;
1096 #else
1097 p.p_c->mc_file = mdbg_file;
1098 p.p_c->mc_line = (ui16_t)mdbg_line;
1099 p.p_c->mc_isfree = FAL0;
1100 p.p_c->mc_user_size = user_s;
1101 p.p_c->mc_size = (ui32_t)size;
1102 ++p.p_alc;
1103 a_MEMORY_HOPE_SET(p_alc, p);
1105 ++a_memory_lofi_aall;
1106 ++a_memory_lofi_acur;
1107 a_memory_lofi_amax = n_MAX(a_memory_lofi_amax, a_memory_lofi_acur);
1108 a_memory_lofi_mall += user_s;
1109 a_memory_lofi_mcur += user_s;
1110 a_memory_lofi_mmax = n_MAX(a_memory_lofi_mmax, a_memory_lofi_mcur);
1111 #endif
1112 NYD2_LEAVE;
1113 return p.p_vp;
1116 FL void
1117 (n_lofi_free)(void *vp n_MEMORY_DEBUG_ARGS){
1118 #ifdef HAVE_MEMORY_DEBUG
1119 bool_t isbad;
1120 #endif
1121 union a_memory_ptr p;
1122 struct a_memory_ars_ctx *macp;
1123 NYD2_ENTER;
1125 if((macp = n_go_data->gdc_mempool) == NULL)
1126 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
1128 if((p.p_vp = vp) == NULL){
1129 #ifdef HAVE_MEMORY_DEBUG
1130 n_err("n_lofi_free(NULL) from %s, line %d\n", mdbg_file, mdbg_line);
1131 #endif
1132 goto jleave;
1135 #ifdef HAVE_MEMORY_DEBUG
1136 a_MEMORY_HOPE_GET(p_alc, p, isbad);
1137 --p.p_alc;
1139 if(p.p_c->mc_isfree){
1140 n_err("n_lofi_free(): double-free avoided at %s, line %d\n"
1141 "\tLast seen: %s, line %" PRIu16 "\n",
1142 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
1143 goto jleave;
1145 p.p_c->mc_isfree = TRU1;
1146 memset(vp, 0377, p.p_c->mc_user_size);
1148 if(p.p_alc != macp->mac_lofi_top){
1149 n_err("n_lofi_free(): this is not alloca top at %s, line %d\n"
1150 "\tLast seen: %s, line %" PRIu16 "\n",
1151 mdbg_file, mdbg_line, p.p_c->mc_file, p.p_c->mc_line);
1152 goto jleave;
1155 ++p.p_alc;
1156 #endif /* HAVE_MEMORY_DEBUG */
1158 a_memory_lofi_free(macp, --p.p_alc);
1159 jleave:
1160 NYD2_LEAVE;
1163 FL void *
1164 n_lofi_snap_create(void){ /* TODO avoid temporary alloc */
1165 void *rv;
1166 NYD2_ENTER;
1168 rv = n_lofi_alloc(1);
1169 NYD2_LEAVE;
1170 return rv;
1173 FL void
1174 n_lofi_snap_unroll(void *cookie){ /* TODO optimise */
1175 union a_memory_ptr p;
1176 struct a_memory_ars_ctx *macp;
1177 NYD2_ENTER;
1179 n_memory_check();
1181 if((macp = n_go_data->gdc_mempool) == NULL)
1182 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
1184 for(;;){
1185 p.p_alc = macp->mac_lofi_top;
1186 a_memory_lofi_free(macp, p.p_vp);
1187 ++p.p_alc;
1188 #ifdef HAVE_MEMORY_DEBUG
1189 a_MEMORY_HOPE_INC(p.p_ui8p);
1190 #endif
1191 if(p.p_vp == cookie)
1192 break;
1194 NYD2_LEAVE;
1197 #ifdef HAVE_MEMORY_DEBUG
1198 FL int
1199 c_memtrace(void *vp){
1200 /* For a_MEMORY_HOPE_GET() */
1201 char const * const mdbg_file = "memtrace()";
1202 int const mdbg_line = -1;
1203 struct a_memory_ars_buffer *mabp;
1204 struct a_memory_ars_lofi_chunk *malcp;
1205 struct a_memory_ars_lofi *malp;
1206 struct a_memory_ars_ctx *macp;
1207 bool_t isbad;
1208 union a_memory_ptr p, xp;
1209 size_t lines;
1210 FILE *fp;
1211 NYD2_ENTER;
1213 vp = (void*)0x1;
1214 if((fp = Ftmp(NULL, "memtr", OF_RDWR | OF_UNLINK | OF_REGISTER)) == NULL){
1215 n_perr("tmpfile", 0);
1216 goto jleave;
1218 lines = 0;
1220 fprintf(fp,
1221 "Last-Out-First-In (alloca) storage:\n"
1222 " Buffer cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1223 " Allocations cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1224 " Bytes cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n\n",
1225 a_memory_lofi_bcur, a_memory_lofi_bmax, a_memory_lofi_ball,
1226 a_memory_lofi_acur, a_memory_lofi_amax, a_memory_lofi_aall,
1227 a_memory_lofi_mcur, a_memory_lofi_mmax, a_memory_lofi_mall);
1228 lines += 7;
1230 if((macp = n_go_data->gdc_mempool) == NULL)
1231 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
1232 for(; macp != NULL; macp = macp->mac_outer){
1233 fprintf(fp, " Evaluation stack context %p (outer: %p):\n",
1234 (void*)macp, (void*)macp->mac_outer);
1235 ++lines;
1237 for(malp = macp->mac_lofi; malp != NULL;){
1238 fprintf(fp, " Buffer %p%s, %" PRIuZ "/%" PRIuZ " used/free:\n",
1239 (void*)malp, ((uintptr_t)malp->mal_last & 0x1 ? " (huge)" : ""),
1240 PTR2SIZE(malp->mal_caster - &malp->mal_buf[0]),
1241 PTR2SIZE(malp->mal_max - malp->mal_caster));
1242 ++lines;
1243 malp = malp->mal_last;
1244 malp = (struct a_memory_ars_lofi*)((uintptr_t)malp & ~1);
1247 for(malcp = macp->mac_lofi_top; malcp != NULL;){
1248 p.p_alc = malcp;
1249 malcp = (struct a_memory_ars_lofi_chunk*)
1250 ((uintptr_t)malcp->malc_last & ~0x1);
1251 xp = p;
1252 ++xp.p_alc;
1253 a_MEMORY_HOPE_GET_TRACE(p_alc, xp, isbad);
1254 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1255 (isbad ? "! CANARY ERROR (LOFI): " : ""), xp.p_vp,
1256 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1260 fprintf(fp,
1261 "\nAuto-reclaimed storage:\n"
1262 " Buffers cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1263 " Huge allocations cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1264 " Allocations all: %" PRIuZ ", Bytes all: %" PRIuZ "\n\n",
1265 a_memory_ars_bcur, a_memory_ars_bmax, a_memory_ars_ball,
1266 a_memory_ars_hcur, a_memory_ars_hmax, a_memory_ars_hall,
1267 a_memory_ars_aall, a_memory_ars_mall);
1268 lines += 7;
1270 for(macp = n_go_data->gdc_mempool; macp != NULL; macp = macp->mac_outer){
1271 fprintf(fp, " Evaluation stack context %p (outer: %p):\n",
1272 (void*)macp, (void*)macp->mac_outer);
1273 ++lines;
1275 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
1276 fprintf(fp, " Buffer %p, %" PRIuZ "/%" PRIuZ " used/free:\n",
1277 (void*)mabp,
1278 PTR2SIZE(mabp->mab_caster - &mabp->mab_buf[0]),
1279 PTR2SIZE(&mabp->mab_buf[sizeof(mabp->mab_buf)] - mabp->mab_caster));
1280 ++lines;
1282 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1283 ++lines, p.p_cp += p.p_c->mc_size){
1284 xp = p;
1285 ++xp.p_c;
1286 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1287 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1288 (isbad ? "! CANARY ERROR (ARS, top): " : ""), xp.p_vp,
1289 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1291 ++lines;
1294 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last){
1295 fprintf(fp, " Buffer %p, full:\n", (void*)mabp);
1296 ++lines;
1298 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1299 ++lines, p.p_cp += p.p_c->mc_size){
1300 xp = p;
1301 ++xp.p_c;
1302 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1303 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1304 (isbad ? "! CANARY ERROR (ARS, full): " : ""), xp.p_vp,
1305 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1307 ++lines;
1311 fprintf(fp,
1312 "\nHeap memory buffers:\n"
1313 " Allocation cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n"
1314 " Bytes cur/peek/all: %7" PRIuZ "/%7" PRIuZ "/%10" PRIuZ "\n\n",
1315 a_memory_heap_acur, a_memory_heap_amax, a_memory_heap_aall,
1316 a_memory_heap_mcur, a_memory_heap_mmax, a_memory_heap_mall);
1317 lines += 6;
1319 for(p.p_hc = a_memory_heap_list; p.p_hc != NULL;
1320 ++lines, p.p_hc = p.p_hc->mhc_next){
1321 xp = p;
1322 ++xp.p_hc;
1323 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1324 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1325 (isbad ? "! CANARY ERROR (heap): " : ""), xp.p_vp,
1326 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1329 if((n_psonce & n_PSO_REPRODUCIBLE) ||
1330 (n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG))){
1331 fprintf(fp, "Heap buffers lingering for n_free():\n");
1332 ++lines;
1334 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;
1335 ++lines, p.p_hc = p.p_hc->mhc_next){
1336 xp = p;
1337 ++xp.p_hc;
1338 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1339 fprintf(fp, " %s%p (%u bytes): %s, line %u\n",
1340 (isbad ? "! CANARY ERROR (free): " : ""), xp.p_vp,
1341 p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1345 page_or_print(fp, lines);
1346 Fclose(fp);
1347 vp = NULL;
1348 jleave:
1349 NYD2_LEAVE;
1350 return (vp != NULL);
1353 FL bool_t
1354 n__memory_check(char const *mdbg_file, int mdbg_line){
1355 union a_memory_ptr p, xp;
1356 struct a_memory_ars_buffer *mabp;
1357 struct a_memory_ars_lofi_chunk *malcp;
1358 struct a_memory_ars_ctx *macp;
1359 bool_t anybad, isbad;
1360 NYD2_ENTER;
1362 anybad = FAL0;
1364 if((macp = n_go_data->gdc_mempool) == NULL)
1365 macp = n_go_data->gdc_mempool = n_go_data->gdc__mempool_buf;
1367 /* Alloca */
1369 for(malcp = macp->mac_lofi_top; malcp != NULL;){
1370 p.p_alc = malcp;
1371 malcp = (struct a_memory_ars_lofi_chunk*)
1372 ((uintptr_t)malcp->malc_last & ~0x1);
1373 xp = p;
1374 ++xp.p_alc;
1375 a_MEMORY_HOPE_GET_TRACE(p_alc, xp, isbad);
1376 if(isbad){
1377 anybad = TRU1;
1378 n_err(
1379 "! CANARY ERROR (LOFI): %p (%u bytes): %s, line %u\n",
1380 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1384 /* Auto-reclaimed */
1386 for(mabp = macp->mac_top; mabp != NULL; mabp = mabp->mab_last){
1387 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1388 p.p_cp += p.p_c->mc_size){
1389 xp = p;
1390 ++xp.p_c;
1391 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1392 if(isbad){
1393 anybad = TRU1;
1394 n_err(
1395 "! CANARY ERROR (ARS, top): %p (%u bytes): %s, line %u\n",
1396 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1401 for(mabp = macp->mac_full; mabp != NULL; mabp = mabp->mab_last){
1402 for(p.p_cp = mabp->mab_buf; p.p_cp < mabp->mab_caster;
1403 p.p_cp += p.p_c->mc_size){
1404 xp = p;
1405 ++xp.p_c;
1406 a_MEMORY_HOPE_GET_TRACE(p_c, xp, isbad);
1407 if(isbad){
1408 anybad = TRU1;
1409 n_err(
1410 "! CANARY ERROR (ARS, full): %p (%u bytes): %s, line %u\n",
1411 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1416 /* Heap*/
1418 for(p.p_hc = a_memory_heap_list; p.p_hc != NULL; p.p_hc = p.p_hc->mhc_next){
1419 xp = p;
1420 ++xp.p_hc;
1421 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1422 if(isbad){
1423 anybad = TRU1;
1424 n_err(
1425 "! CANARY ERROR (heap): %p (%u bytes): %s, line %u\n",
1426 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1430 if((n_psonce & n_PSO_REPRODUCIBLE) ||
1431 (n_poption & (n_PO_DEBUG | n_PO_MEMDEBUG))){
1432 for(p.p_hc = a_memory_heap_free; p.p_hc != NULL;
1433 p.p_hc = p.p_hc->mhc_next){
1434 xp = p;
1435 ++xp.p_hc;
1436 a_MEMORY_HOPE_GET_TRACE(p_hc, xp, isbad);
1437 if(isbad){
1438 anybad = TRU1;
1439 n_err(
1440 "! CANARY ERROR (free): %p (%u bytes): %s, line %u\n",
1441 xp.p_vp, p.p_c->mc_user_size, p.p_c->mc_file, p.p_c->mc_line);
1446 if(anybad && ok_blook(memdebug))
1447 n_panic("Memory errors encountered");
1448 NYD2_LEAVE;
1449 return anybad;
1451 #endif /* HAVE_MEMORY_DEBUG */
1453 /* s-it-mode */