1 /*@ S-nail - a mail user agent derived from Berkeley Mail.
2 *@ Heap memory and automatically reclaimed storage.
3 *@ TODO Back the _flux_ heap.
4 *@ TODO Add cache for "the youngest" two or three n_MEMORY_AUTOREC_SIZE arenas
6 * Copyright (c) 2012 - 2018 Steffen (Daode) Nurpmeso <steffen@sdaoden.eu>.
7 * SPDX-License-Identifier: ISC
9 * Permission to use, copy, modify, and/or distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 #ifndef HAVE_AMALGAMATION
29 * We use per-execution context memory arenas, to be found in
30 * n_go_data->gdc_mempool; if NULL, set to ->gdc__mempool_buf.
31 * n_memory_reset() that happens on loop ticks reclaims their memory, and
32 * performs debug checks also on the former #ifdef HAVE_MEMORY_DEBUG.
33 * The arena that is used already during program startup is special in that
34 * _pool_fixate() will set "a lower bound" in order not to reclaim memory that
35 * must be kept vivid during the lifetime of the program.
36 * That was so in historical code with the globally shared single string dope
37 * implementation, too. (And it still seems easier than bypassing to normal
38 * heap memory before _fixate() is called, today.)
40 * AutoReclaimedStorage memory is the follow-up to the historical "stringdope"
41 * allocator from 1979 (see [timeline:a7342d9]:src/Mail/strings.c), it is
42 * a steadily growing pool (but _autorec_relax_create() .. [:_relax_unroll():]
43 * ..autorec_relax_gut() will reduce pressure) until n_memory_reset() time.
45 * LastOutFirstIn memory is meant as an alloca(3) replacement but which requires
46 * lofi_free()ing pointers (otherwise growing until n_memory_reset()).
48 * TODO Flux heap memory is like LOFI except that any pointer can be freed (and
49 * TODO reused) at any time, just like normal heap memory. It is notational in
50 * TODO that it clearly states that the allocation will go away after a loop
51 * TODO tick, and also we can use some buffer caches.
54 /* If defined (and HAVE_MEMORY_DEBUG), realloc acts like alloc+free, which can
55 * help very bogus double-free attempts */
56 #define a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE /* TODO runtime opt <> C++ cache */
58 /* Maximum allocation (directly) handled by A-R-Storage */
59 #define a_MEMORY_ARS_MAX (n_MEMORY_AUTOREC_SIZE / 2 + n_MEMORY_AUTOREC_SIZE / 4)
60 #define a_MEMORY_LOFI_MAX a_MEMORY_ARS_MAX
62 n_CTA(a_MEMORY_ARS_MAX
> 1024,
63 "Auto-reclaimed memory requires a larger buffer size"); /* Anway > 42! */
64 n_CTA(n_ISPOW2(n_MEMORY_AUTOREC_SIZE
),
65 "Buffers should be POW2 (may be wasteful on native allocators otherwise)");
67 /* Alignment of ARS memory. Simply go for pointer alignment */
68 #define a_MEMORY_ARS_ROUNDUP(S) n_ALIGN_SMALL(S)
69 #define a_MEMORY_LOFI_ROUNDUP(S) a_MEMORY_ARS_ROUNDUP(S)
71 #ifdef HAVE_MEMORY_DEBUG
72 n_CTA(sizeof(char) == sizeof(ui8_t
), "But POSIX says a byte is 8 bit");
74 # define a_MEMORY_HOPE_SIZE (2 * 8 * sizeof(char))
75 # define a_MEMORY_HOPE_INC(P) (P) += 8
76 # define a_MEMORY_HOPE_DEC(P) (P) -= 8
78 /* We use address-induced canary values, inspiration (but he didn't invent)
79 * and primes from maxv@netbsd.org, src/sys/kern/subr_kmem.c */
80 # define a_MEMORY_HOPE_LOWER(S,P) \
82 ui64_t __h__ = (uintptr_t)(P);\
83 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
88 # define a_MEMORY_HOPE_UPPER(S,P) \
91 ui64_t __x__, __h__ = (uintptr_t)(P);\
92 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
93 for(__i__ = 56; __i__ != 0; __i__ -= 8)\
94 if((__x__ = (__h__ >> __i__)) != 0){\
102 # define a_MEMORY_HOPE_SET(T,C) \
104 union a_memory_ptr __xp;\
105 struct a_memory_chunk *__xc;\
106 __xp.p_vp = (C).p_vp;\
107 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
108 a_MEMORY_HOPE_INC((C).p_cp);\
109 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
110 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
111 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
112 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
113 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
114 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
115 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
116 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
117 a_MEMORY_HOPE_INC(__xp.p_ui8p) + __xc->mc_user_size;\
118 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
119 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
120 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
121 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
122 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
123 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
124 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
125 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
128 # define a_MEMORY_HOPE_GET_TRACE(T,C,BAD) \
130 a_MEMORY_HOPE_INC((C).p_cp);\
131 a_MEMORY_HOPE_GET(T, C, BAD);\
132 a_MEMORY_HOPE_INC((C).p_cp);\
135 # define a_MEMORY_HOPE_GET(T,C,BAD) \
137 union a_memory_ptr __xp;\
138 struct a_memory_chunk *__xc;\
141 __xp.p_vp = (C).p_vp;\
142 a_MEMORY_HOPE_DEC(__xp.p_cp);\
143 (C).p_cp = __xp.p_cp;\
144 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
147 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[0]);\
148 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
149 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[1]);\
150 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
151 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[2]);\
152 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
153 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[3]);\
154 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
155 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[4]);\
156 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
157 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[5]);\
158 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
159 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[6]);\
160 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
161 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[7]);\
162 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
165 a_MEMORY_HOPE_INC((C).p_cp);\
166 n_alert("%p: corrupt lower canary: 0x%02X: %s, line %d",\
167 (C).p_cp, __i, mdbg_file, mdbg_line);\
168 a_MEMORY_HOPE_DEC((C).p_cp);\
170 a_MEMORY_HOPE_INC(__xp.p_ui8p) + __xc->mc_user_size;\
172 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[0]);\
173 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
174 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[1]);\
175 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
176 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[2]);\
177 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
178 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[3]);\
179 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
180 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[4]);\
181 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
182 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[5]);\
183 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
184 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[6]);\
185 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
186 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[7]);\
187 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
190 a_MEMORY_HOPE_INC((C).p_cp);\
191 n_alert("%p: corrupt upper canary: 0x%02X: %s, line %d",\
192 (C).p_cp, __i, mdbg_file, mdbg_line);\
193 a_MEMORY_HOPE_DEC((C).p_cp);\
196 n_alert(" ..canary last seen: %s, line %u",\
197 __xc->mc_file, __xc->mc_line);\
199 #endif /* HAVE_MEMORY_DEBUG */
201 #ifdef HAVE_MEMORY_DEBUG
202 struct a_memory_chunk
{
211 /* The heap memory n_free() may become delayed to detect double frees.
212 * It is primitive, but ok: speed and memory usage don't matter here */
213 struct a_memory_heap_chunk
{
214 struct a_memory_chunk mhc_super
;
215 struct a_memory_heap_chunk
*mhc_prev
;
216 struct a_memory_heap_chunk
*mhc_next
;
218 #endif /* HAVE_MEMORY_DEBUG */
220 struct a_memory_ars_lofi_chunk
{
221 #ifdef HAVE_MEMORY_DEBUG
222 struct a_memory_chunk malc_super
;
224 struct a_memory_ars_lofi_chunk
*malc_last
; /* Bit 1 set: it's a heap alloc */
231 #ifdef HAVE_MEMORY_DEBUG
232 struct a_memory_chunk
*p_c
;
233 struct a_memory_heap_chunk
*p_hc
;
235 struct a_memory_ars_lofi_chunk
*p_alc
;
238 struct a_memory_ars_ctx
{
239 struct a_memory_ars_ctx
*mac_outer
;
240 struct a_memory_ars_ctx
*mac_outer_save
;
241 struct a_memory_ars_buffer
*mac_top
; /* Alloc stack */
242 struct a_memory_ars_buffer
*mac_full
; /* Alloc stack, cpl. filled */
243 size_t mac_recur
; /* _relax_create() recursion */
244 struct a_memory_ars_huge
*mac_huge
; /* Huge allocation bypass list */
245 struct a_memory_ars_lofi
*mac_lofi
; /* Pseudo alloca */
246 struct a_memory_ars_lofi_chunk
*mac_lofi_top
;
248 n_CTA(n_MEMORY_POOL_TYPE_SIZEOF
>= sizeof(struct a_memory_ars_ctx
),
249 "struct n_go_data_ctx.gdc_mempool is not large enough for memory pool");
251 struct a_memory_ars_buffer
{
252 struct a_memory_ars_buffer
*mab_last
;
253 char *mab_bot
; /* For _autorec_fixate(): keep startup memory lingering */
254 char *mab_relax
; /* If !NULL, used by _relax_unroll() instead of .mab_bot */
255 char *mab_caster
; /* Point of casting off memory */
256 char mab_buf
[n_MEMORY_AUTOREC_SIZE
- (4 * sizeof(void*))];
258 n_CTA(sizeof(struct a_memory_ars_buffer
) == n_MEMORY_AUTOREC_SIZE
,
259 "Resulting structure size is not the expected one");
260 #ifdef HAVE_MEMORY_DEBUG
261 n_CTA(a_MEMORY_ARS_MAX
+ a_MEMORY_HOPE_SIZE
+ sizeof(struct a_memory_chunk
)
262 < n_SIZEOF_FIELD(struct a_memory_ars_buffer
, mab_buf
),
263 "Memory layout of auto-reclaimed storage does not work out that way");
266 /* Requests that exceed a_MEMORY_ARS_MAX are always served by the normal
267 * memory allocator (which panics if memory cannot be served). This can be
268 * seen as a security fallback bypass only */
269 struct a_memory_ars_huge
{
270 struct a_memory_ars_huge
*mah_last
;
271 char mah_buf
[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
274 struct a_memory_ars_lofi
{
275 struct a_memory_ars_lofi
*mal_last
;
278 char mal_buf
[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
282 #ifdef HAVE_MEMORY_DEBUG
283 static size_t a_memory_heap_aall
, a_memory_heap_acur
, a_memory_heap_amax
,
284 a_memory_heap_mall
, a_memory_heap_mcur
, a_memory_heap_mmax
;
285 static struct a_memory_heap_chunk
*a_memory_heap_list
, *a_memory_heap_free
;
287 static size_t a_memory_ars_ball
, a_memory_ars_bcur
, a_memory_ars_bmax
,
288 a_memory_ars_hall
, a_memory_ars_hcur
, a_memory_ars_hmax
,
289 a_memory_ars_aall
, a_memory_ars_mall
;
291 static size_t a_memory_lofi_ball
, a_memory_lofi_bcur
, a_memory_lofi_bmax
,
292 a_memory_lofi_aall
, a_memory_lofi_acur
, a_memory_lofi_amax
,
293 a_memory_lofi_mall
, a_memory_lofi_mcur
, a_memory_lofi_mmax
;
297 n_INLINE
void a_memory_lofi_free(struct a_memory_ars_ctx
*macp
, void *vp
);
299 /* Reset an ars_ctx */
300 static void a_memory_ars_reset(struct a_memory_ars_ctx
*macp
);
303 a_memory_lofi_free(struct a_memory_ars_ctx
*macp
, void *vp
){
304 struct a_memory_ars_lofi
*malp
;
305 union a_memory_ptr p
;
309 #ifdef HAVE_MEMORY_DEBUG
310 --a_memory_lofi_acur
;
311 a_memory_lofi_mcur
-= p
.p_c
->mc_user_size
;
314 /* The heap allocations are released immediately */
315 if((uintptr_t)p
.p_alc
->malc_last
& 0x1){
316 malp
= macp
->mac_lofi
;
317 macp
->mac_lofi
= malp
->mal_last
;
318 macp
->mac_lofi_top
= (struct a_memory_ars_lofi_chunk
*)
319 ((uintptr_t)p
.p_alc
->malc_last
& ~0x1);
321 #ifdef HAVE_MEMORY_DEBUG
322 --a_memory_lofi_bcur
;
325 macp
->mac_lofi_top
= p
.p_alc
->malc_last
;
327 /* The normal arena ones only if the arena is empty, except for when
328 * it is the last - that we'll keep until _pool_pop() or exit(3) */
329 if(p
.p_cp
== (malp
= macp
->mac_lofi
)->mal_buf
){
330 if(malp
->mal_last
!= NULL
){
331 macp
->mac_lofi
= malp
->mal_last
;
333 #ifdef HAVE_MEMORY_DEBUG
334 --a_memory_lofi_bcur
;
338 malp
->mal_caster
= p
.p_cp
;
344 a_memory_ars_reset(struct a_memory_ars_ctx
*macp
){
346 struct a_memory_ars_lofi_chunk
*alcp
;
347 struct a_memory_ars_lofi
*alp
;
348 struct a_memory_ars_buffer
*abp
;
349 struct a_memory_ars_huge
*ahp
;
353 /* Simply move all buffers away from .mac_full */
354 for(m
.abp
= macp
->mac_full
; m
.abp
!= NULL
; m
.abp
= m2
.abp
){
355 m2
.abp
= m
.abp
->mab_last
;
356 m
.abp
->mab_last
= macp
->mac_top
;
357 macp
->mac_top
= m
.abp
;
359 macp
->mac_full
= NULL
;
361 for(m2
.abp
= NULL
, m
.abp
= macp
->mac_top
; m
.abp
!= NULL
;){
362 struct a_memory_ars_buffer
*x
;
365 m
.abp
= m
.abp
->mab_last
;
367 /* Give away all buffers that are not covered by autorec_fixate() */
368 if(x
->mab_bot
== x
->mab_buf
){
370 macp
->mac_top
= m
.abp
;
372 m2
.abp
->mab_last
= m
.abp
;
374 #ifdef HAVE_MEMORY_DEBUG
379 x
->mab_caster
= x
->mab_bot
;
381 #ifdef HAVE_MEMORY_DEBUG
382 memset(x
->mab_caster
, 0377,
383 PTR2SIZE(&x
->mab_buf
[sizeof(x
->mab_buf
)] - x
->mab_caster
));
388 while((m
.ahp
= macp
->mac_huge
) != NULL
){
389 macp
->mac_huge
= m
.ahp
->mah_last
;
391 #ifdef HAVE_MEMORY_DEBUG
396 /* "alloca(3)" memory goes away, too. XXX Must be last as long we jump */
397 #ifdef HAVE_MEMORY_DEBUG
398 if(macp
->mac_lofi_top
!= NULL
&&
399 ((n_psonce
& n_PSO_REPRODUCIBLE
) ||
400 (n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
))))
401 n_alert("There still is LOFI memory upon ARS reset!");
403 while((m
.alcp
= macp
->mac_lofi_top
) != NULL
)
404 a_memory_lofi_free(macp
, m
.alcp
);
409 n_memory_reset(void){
410 #ifdef HAVE_MEMORY_DEBUG
411 union a_memory_ptr p
;
414 struct a_memory_ars_ctx
*macp
;
419 if((macp
= n_go_data
->gdc_mempool
) != NULL
){
420 /* First of all reset auto-reclaimed storage so that heap freed during
421 * this can be handled in a second step */
422 /* TODO v15 active recursion can only happen after a jump */
423 if(macp
->mac_recur
> 0){
425 n_autorec_relax_gut();
427 a_memory_ars_reset(macp
);
430 /* Now we are ready to deal with heap */
431 #ifdef HAVE_MEMORY_DEBUG
434 for(p
.p_hc
= a_memory_heap_free
; p
.p_hc
!= NULL
;){
440 p
.p_hc
= p
.p_hc
->mhc_next
;
443 a_memory_heap_free
= NULL
;
445 if((n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
)) && c
> 0)
446 n_err("memreset: freed %" PRIuZ
" chunks/%" PRIuZ
" bytes\n", c
, s
);
452 n_memory_pool_fixate(void){
453 struct a_memory_ars_buffer
*mabp
;
454 struct a_memory_ars_ctx
*macp
;
457 if((macp
= n_go_data
->gdc_mempool
) != NULL
){
458 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
459 mabp
->mab_bot
= mabp
->mab_caster
;
460 for(mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
461 mabp
->mab_bot
= mabp
->mab_caster
;
467 n_memory_pool_push(void *vp
, bool_t init
){
468 struct a_memory_ars_ctx
*macp
;
474 memset(macp
, 0, sizeof *macp
);
476 assert(macp
->mac_outer_save
== NULL
);
478 if(n_go_data
->gdc_mempool
== NULL
)
479 n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
481 macp
->mac_outer_save
= macp
->mac_outer
;
482 macp
->mac_outer
= n_go_data
->gdc_mempool
;
483 n_go_data
->gdc_mempool
= macp
;
488 n_memory_pool_pop(void *vp
, bool_t gut
){
489 struct a_memory_ars_buffer
*mabp
;
490 struct a_memory_ars_ctx
*macp
;
495 if((macp
= vp
) == NULL
){
497 macp
= n_go_data
->gdc_mempool
;
498 assert(macp
!= NULL
);
500 /* XXX May not be ARS top upon jump */
501 while(n_go_data
->gdc_mempool
!= macp
){
503 DBG( n_err("ARS pop %p to reach freed context\n",
504 n_go_data
->gdc_mempool
); )
505 n_memory_pool_pop(n_go_data
->gdc_mempool
, gut
);
508 n_go_data
->gdc_mempool
= macp
->mac_outer
;
509 macp
->mac_outer
= macp
->mac_outer_save
;
510 macp
->mac_outer_save
= NULL
;
513 a_memory_ars_reset(macp
);
514 assert(macp
->mac_full
== NULL
);
515 assert(macp
->mac_huge
== NULL
);
517 mabp
= macp
->mac_top
;
518 macp
->mac_top
= NULL
;
521 mabp
= mabp
->mab_last
;
525 /* We (may) have kept one buffer for our pseudo alloca(3) */
526 if((vp
= macp
->mac_lofi
) != NULL
){
527 assert(macp
->mac_lofi
->mal_last
== NULL
);
528 macp
->mac_lofi
= NULL
;
529 #ifdef HAVE_MEMORY_DEBUG
530 --a_memory_lofi_bcur
;
539 n_memory_pool_top(void){
543 if((rv
= n_go_data
->gdc_mempool
) == NULL
)
544 rv
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
549 #ifndef HAVE_MEMORY_DEBUG
557 if((rv
= malloc(s
)) == NULL
)
558 n_panic(_("no memory"));
564 n_realloc(void *vp
, size_t s
){
573 if((rv
= realloc(vp
, s
)) == NULL
)
574 n_panic(_("no memory"));
581 n_calloc(size_t nmemb
, size_t size
){
587 if((rv
= calloc(nmemb
, size
)) == NULL
)
588 n_panic(_("no memory"));
600 #else /* !HAVE_MEMORY_DEBUG */
602 (n_alloc
)(size_t s n_MEMORY_DEBUG_ARGS
){
603 union a_memory_ptr p
;
607 if(s
> UI32_MAX
- sizeof(struct a_memory_heap_chunk
) - a_MEMORY_HOPE_SIZE
)
608 n_panic("n_alloc(): allocation too large: %s, line %d",
609 mdbg_file
, mdbg_line
);
610 if((user_s
= (ui32_t
)s
) == 0)
612 s
+= sizeof(struct a_memory_heap_chunk
) + a_MEMORY_HOPE_SIZE
;
614 if((p
.p_vp
= (malloc
)(s
)) == NULL
)
615 n_panic(_("no memory"));
617 p
.p_hc
->mhc_prev
= NULL
;
618 if((p
.p_hc
->mhc_next
= a_memory_heap_list
) != NULL
)
619 a_memory_heap_list
->mhc_prev
= p
.p_hc
;
621 p
.p_c
->mc_file
= mdbg_file
;
622 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
623 p
.p_c
->mc_isfree
= FAL0
;
624 p
.p_c
->mc_user_size
= user_s
;
625 p
.p_c
->mc_size
= (ui32_t
)s
;
627 a_memory_heap_list
= p
.p_hc
++;
628 a_MEMORY_HOPE_SET(p_hc
, p
);
630 ++a_memory_heap_aall
;
631 ++a_memory_heap_acur
;
632 a_memory_heap_amax
= n_MAX(a_memory_heap_amax
, a_memory_heap_acur
);
633 a_memory_heap_mall
+= user_s
;
634 a_memory_heap_mcur
+= user_s
;
635 a_memory_heap_mmax
= n_MAX(a_memory_heap_mmax
, a_memory_heap_mcur
);
641 (n_realloc
)(void *vp
, size_t s n_MEMORY_DEBUG_ARGS
){
642 # ifndef a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE
646 union a_memory_ptr p
;
649 if((p
.p_vp
= vp
) == NULL
){
651 p
.p_vp
= (n_alloc
)(s
, mdbg_file
, mdbg_line
);
655 a_MEMORY_HOPE_GET(p_hc
, p
, isbad
);
658 if(p
.p_c
->mc_isfree
){
659 n_err("n_realloc(): region freed! At %s, line %d\n"
660 "\tLast seen: %s, line %" PRIu16
"\n",
661 mdbg_file
, mdbg_line
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
665 # ifdef a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE
669 xp
= (n_alloc
)(s
, mdbg_file
, mdbg_line
);
670 memcpy(xp
, vp
, n_MIN(s
, p
.p_c
->mc_user_size
));
671 (n_free
)(vp
, mdbg_file
, mdbg_line
);
677 if(p
.p_hc
== a_memory_heap_list
)
678 a_memory_heap_list
= p
.p_hc
->mhc_next
;
680 p
.p_hc
->mhc_prev
->mhc_next
= p
.p_hc
->mhc_next
;
681 if (p
.p_hc
->mhc_next
!= NULL
)
682 p
.p_hc
->mhc_next
->mhc_prev
= p
.p_hc
->mhc_prev
;
684 --a_memory_heap_acur
;
685 a_memory_heap_mcur
-= p
.p_c
->mc_user_size
;
687 if(s
> UI32_MAX
- sizeof(struct a_memory_heap_chunk
) - a_MEMORY_HOPE_SIZE
)
688 n_panic("n_realloc(): allocation too large: %s, line %d",
689 mdbg_file
, mdbg_line
);
690 if((user_s
= (ui32_t
)s
) == 0)
692 s
+= sizeof(struct a_memory_heap_chunk
) + a_MEMORY_HOPE_SIZE
;
694 if((p
.p_vp
= (realloc
)(p
.p_c
, s
)) == NULL
)
695 n_panic(_("no memory"));
696 p
.p_hc
->mhc_prev
= NULL
;
697 if((p
.p_hc
->mhc_next
= a_memory_heap_list
) != NULL
)
698 a_memory_heap_list
->mhc_prev
= p
.p_hc
;
700 p
.p_c
->mc_file
= mdbg_file
;
701 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
702 p
.p_c
->mc_isfree
= FAL0
;
703 p
.p_c
->mc_user_size
= user_s
;
704 p
.p_c
->mc_size
= (ui32_t
)s
;
706 a_memory_heap_list
= p
.p_hc
++;
707 a_MEMORY_HOPE_SET(p_hc
, p
);
709 ++a_memory_heap_aall
;
710 ++a_memory_heap_acur
;
711 a_memory_heap_amax
= n_MAX(a_memory_heap_amax
, a_memory_heap_acur
);
712 a_memory_heap_mall
+= user_s
;
713 a_memory_heap_mcur
+= user_s
;
714 a_memory_heap_mmax
= n_MAX(a_memory_heap_mmax
, a_memory_heap_mcur
);
715 # endif /* a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE */
722 (n_calloc
)(size_t nmemb
, size_t size n_MEMORY_DEBUG_ARGS
){
723 union a_memory_ptr p
;
729 if(size
> UI32_MAX
- sizeof(struct a_memory_heap_chunk
) - a_MEMORY_HOPE_SIZE
)
730 n_panic("n_calloc(): allocation size too large: %s, line %d",
731 mdbg_file
, mdbg_line
);
732 if((user_s
= (ui32_t
)size
) == 0)
734 if((UI32_MAX
- sizeof(struct a_memory_heap_chunk
) - a_MEMORY_HOPE_SIZE
) /
736 n_panic("n_calloc(): allocation count too large: %s, line %d",
737 mdbg_file
, mdbg_line
);
740 size
+= sizeof(struct a_memory_heap_chunk
) + a_MEMORY_HOPE_SIZE
;
742 if((p
.p_vp
= (malloc
)(size
)) == NULL
)
743 n_panic(_("no memory"));
744 memset(p
.p_vp
, 0, size
);
746 p
.p_hc
->mhc_prev
= NULL
;
747 if((p
.p_hc
->mhc_next
= a_memory_heap_list
) != NULL
)
748 a_memory_heap_list
->mhc_prev
= p
.p_hc
;
750 p
.p_c
->mc_file
= mdbg_file
;
751 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
752 p
.p_c
->mc_isfree
= FAL0
;
753 p
.p_c
->mc_user_size
= (user_s
> 0) ? user_s
*= nmemb
: 0;
754 p
.p_c
->mc_size
= (ui32_t
)size
;
756 a_memory_heap_list
= p
.p_hc
++;
757 a_MEMORY_HOPE_SET(p_hc
, p
);
759 ++a_memory_heap_aall
;
760 ++a_memory_heap_acur
;
761 a_memory_heap_amax
= n_MAX(a_memory_heap_amax
, a_memory_heap_acur
);
762 a_memory_heap_mall
+= user_s
;
763 a_memory_heap_mcur
+= user_s
;
764 a_memory_heap_mmax
= n_MAX(a_memory_heap_mmax
, a_memory_heap_mcur
);
770 (n_free
)(void *vp n_MEMORY_DEBUG_ARGS
){
771 union a_memory_ptr p
;
775 if((p
.p_vp
= vp
) == NULL
){
776 n_err("n_free(NULL) from %s, line %d\n", mdbg_file
, mdbg_line
);
780 a_MEMORY_HOPE_GET(p_hc
, p
, isbad
);
783 if(p
.p_c
->mc_isfree
){
784 n_err("n_free(): double-free avoided at %s, line %d\n"
785 "\tLast seen: %s, line %" PRIu16
"\n",
786 mdbg_file
, mdbg_line
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
790 if(p
.p_hc
== a_memory_heap_list
){
791 if((a_memory_heap_list
= p
.p_hc
->mhc_next
) != NULL
)
792 a_memory_heap_list
->mhc_prev
= NULL
;
794 p
.p_hc
->mhc_prev
->mhc_next
= p
.p_hc
->mhc_next
;
795 if(p
.p_hc
->mhc_next
!= NULL
)
796 p
.p_hc
->mhc_next
->mhc_prev
= p
.p_hc
->mhc_prev
;
798 p
.p_c
->mc_file
= mdbg_file
;
799 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
800 p
.p_c
->mc_isfree
= TRU1
;
801 /* Trash contents (also see [21c05f8]) */
802 memset(vp
, 0377, p
.p_c
->mc_user_size
);
804 --a_memory_heap_acur
;
805 a_memory_heap_mcur
-= p
.p_c
->mc_user_size
;
807 if((n_psonce
& n_PSO_REPRODUCIBLE
) ||
808 (n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
))){
809 p
.p_hc
->mhc_next
= a_memory_heap_free
;
810 a_memory_heap_free
= p
.p_hc
;
816 #endif /* HAVE_MEMORY_DEBUG */
819 (n_autorec_alloc_from_pool
)(void *vp
, size_t size n_MEMORY_DEBUG_ARGS
){
820 #ifdef HAVE_MEMORY_DEBUG
823 union a_memory_ptr p
;
825 struct a_memory_ars_buffer
*abp
;
826 struct a_memory_ars_huge
*ahp
;
828 struct a_memory_ars_ctx
*macp
;
831 if((macp
= vp
) == NULL
&& (macp
= n_go_data
->gdc_mempool
) == NULL
)
832 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
834 #ifdef HAVE_MEMORY_DEBUG
835 user_s
= (ui32_t
)size
;
839 #ifdef HAVE_MEMORY_DEBUG
840 size
+= sizeof(struct a_memory_chunk
) + a_MEMORY_HOPE_SIZE
;
842 size
= a_MEMORY_ARS_ROUNDUP(size
);
844 /* Huge allocations are special */
845 if(n_UNLIKELY(size
> a_MEMORY_ARS_MAX
)){
846 #ifdef HAVE_MEMORY_DEBUG
847 if(n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
))
848 n_alert("n_autorec_alloc() of %" PRIuZ
" bytes from %s, line %d",
849 size
, mdbg_file
, mdbg_line
);
854 /* Search for a buffer with enough free space to serve request */
855 for(m2
.abp
= NULL
, m
.abp
= macp
->mac_top
; m
.abp
!= NULL
;
856 m2
.abp
= m
.abp
, m
.abp
= m
.abp
->mab_last
){
857 if((p
.p_cp
= m
.abp
->mab_caster
) <=
858 &m
.abp
->mab_buf
[sizeof(m
.abp
->mab_buf
) - size
]){
859 /* Alignment is the one thing, the other is what is usually allocated,
860 * and here about 40 bytes seems to be a good cut to avoid non-usable
861 * casters. Reown buffers supposed to be "full" to .mac_full */
862 if(n_UNLIKELY((m
.abp
->mab_caster
= &p
.p_cp
[size
]) >=
863 &m
.abp
->mab_buf
[sizeof(m
.abp
->mab_buf
) - 42])){
865 macp
->mac_top
= m
.abp
->mab_last
;
867 m2
.abp
->mab_last
= m
.abp
->mab_last
;
868 m
.abp
->mab_last
= macp
->mac_full
;
869 macp
->mac_full
= m
.abp
;
875 /* Need a new buffer XXX "page" pool */
876 m
.abp
= n_alloc(sizeof *m
.abp
);
877 m
.abp
->mab_last
= macp
->mac_top
;
878 m
.abp
->mab_caster
= &(m
.abp
->mab_bot
= m
.abp
->mab_buf
)[size
];
879 m
.abp
->mab_relax
= NULL
; /* Indicates allocation after _relax_create() */
880 macp
->mac_top
= m
.abp
;
881 p
.p_cp
= m
.abp
->mab_bot
;
883 #ifdef HAVE_MEMORY_DEBUG
886 a_memory_ars_bmax
= n_MAX(a_memory_ars_bmax
, a_memory_ars_bcur
);
890 #ifdef HAVE_MEMORY_DEBUG
891 p
.p_c
->mc_file
= mdbg_file
;
892 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
893 p
.p_c
->mc_user_size
= user_s
;
894 p
.p_c
->mc_size
= (ui32_t
)size
;
896 a_MEMORY_HOPE_SET(p_c
, p
);
899 a_memory_ars_mall
+= user_s
;
905 m
.ahp
= n_alloc(n_VSTRUCT_SIZEOF(struct a_memory_ars_huge
, mah_buf
) + size
);
906 m
.ahp
->mah_last
= macp
->mac_huge
;
907 macp
->mac_huge
= m
.ahp
;
908 p
.p_cp
= m
.ahp
->mah_buf
;
909 #ifdef HAVE_MEMORY_DEBUG
912 a_memory_ars_hmax
= n_MAX(a_memory_ars_hmax
, a_memory_ars_hcur
);
918 (n_autorec_calloc_from_pool
)(void *vp
, size_t nmemb
, size_t size
919 n_MEMORY_DEBUG_ARGS
){
923 size
*= nmemb
; /* XXX overflow, but only used for struct inits */
924 rv
= (n_autorec_alloc_from_pool
)(vp
, size n_MEMORY_DEBUG_ARGSCALL
);
931 n_autorec_relax_create(void){
932 struct a_memory_ars_ctx
*macp
;
935 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
936 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
938 if(macp
->mac_recur
++ == 0){
939 struct a_memory_ars_buffer
*mabp
;
941 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
942 mabp
->mab_relax
= mabp
->mab_caster
;
943 for(mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
944 mabp
->mab_relax
= mabp
->mab_caster
;
946 #if 0 && defined HAVE_DEVEL
948 n_err("n_autorec_relax_create(): recursion >0\n");
954 n_autorec_relax_gut(void){
955 struct a_memory_ars_ctx
*macp
;
958 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
959 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
961 assert(macp
->mac_recur
> 0);
963 if(--macp
->mac_recur
== 0){
964 struct a_memory_ars_buffer
*mabp
;
967 n_autorec_relax_unroll();
970 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
971 mabp
->mab_relax
= NULL
;
972 for(mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
973 mabp
->mab_relax
= NULL
;
975 #if 0 && defined HAVE_DEVEL
977 n_err("n_autorec_relax_unroll(): recursion >0\n");
983 n_autorec_relax_unroll(void){
984 /* The purpose of relaxation is only that it is possible to reset the
985 * casters, *not* to give back memory to the system. We are presumably in
986 * an iteration over all messages of a mailbox, and it'd be quite
987 * counterproductive to give the system allocator a chance to waste time */
988 struct a_memory_ars_ctx
*macp
;
991 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
992 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
994 assert(macp
->mac_recur
> 0);
997 if(macp
->mac_recur
== 1){
998 struct a_memory_ars_buffer
*mabp
, *x
, *y
;
1000 /* Buffers in the full list may become usable again! */
1001 for(x
= NULL
, mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= y
){
1004 if(mabp
->mab_relax
== NULL
||
1005 mabp
->mab_relax
< &mabp
->mab_buf
[sizeof(mabp
->mab_buf
) - 42]){
1010 mabp
->mab_last
= macp
->mac_top
;
1011 macp
->mac_top
= mabp
;
1016 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
1017 mabp
->mab_caster
= (mabp
->mab_relax
!= NULL
)
1018 ? mabp
->mab_relax
: mabp
->mab_bot
;
1019 #ifdef HAVE_MEMORY_DEBUG
1020 memset(mabp
->mab_caster
, 0377,
1021 PTR2SIZE(&mabp
->mab_buf
[sizeof(mabp
->mab_buf
)] - mabp
->mab_caster
));
1029 (n_lofi_alloc
)(size_t size n_MEMORY_DEBUG_ARGS
){
1030 #ifdef HAVE_MEMORY_DEBUG
1033 union a_memory_ptr p
;
1034 struct a_memory_ars_lofi
*malp
;
1036 struct a_memory_ars_ctx
*macp
;
1039 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
1040 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
1042 #ifdef HAVE_MEMORY_DEBUG
1043 user_s
= (ui32_t
)size
;
1047 size
+= sizeof(struct a_memory_ars_lofi_chunk
);
1048 #ifdef HAVE_MEMORY_DEBUG
1049 size
+= a_MEMORY_HOPE_SIZE
;
1051 size
= a_MEMORY_LOFI_ROUNDUP(size
);
1053 /* Huge allocations are special */
1054 if(n_UNLIKELY(isheap
= (size
> a_MEMORY_LOFI_MAX
))){
1055 #ifdef HAVE_MEMORY_DEBUG
1056 if(n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
))
1057 n_alert("n_lofi_alloc() of %" PRIuZ
" bytes from %s, line %d",
1058 size
, mdbg_file
, mdbg_line
);
1060 }else if((malp
= macp
->mac_lofi
) != NULL
&&
1061 ((p
.p_cp
= malp
->mal_caster
) <= &malp
->mal_max
[-size
])){
1062 malp
->mal_caster
= &p
.p_cp
[size
];
1066 /* Need a new buffer */
1070 i
= n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi
, mal_buf
) + size
;
1071 i
= n_MAX(i
, n_MEMORY_AUTOREC_SIZE
);
1073 malp
->mal_last
= macp
->mac_lofi
;
1074 malp
->mal_caster
= &malp
->mal_buf
[size
];
1075 i
-= n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi
, mal_buf
);
1076 malp
->mal_max
= &malp
->mal_buf
[i
];
1077 macp
->mac_lofi
= malp
;
1078 p
.p_cp
= malp
->mal_buf
;
1080 #ifdef HAVE_MEMORY_DEBUG
1081 ++a_memory_lofi_ball
;
1082 ++a_memory_lofi_bcur
;
1083 a_memory_lofi_bmax
= n_MAX(a_memory_lofi_bmax
, a_memory_lofi_bcur
);
1088 p
.p_alc
->malc_last
= macp
->mac_lofi_top
;
1089 macp
->mac_lofi_top
= p
.p_alc
;
1091 p
.p_alc
->malc_last
= (struct a_memory_ars_lofi_chunk
*)
1092 ((uintptr_t)p
.p_alc
->malc_last
| 0x1);
1094 #ifndef HAVE_MEMORY_DEBUG
1097 p
.p_c
->mc_file
= mdbg_file
;
1098 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
1099 p
.p_c
->mc_isfree
= FAL0
;
1100 p
.p_c
->mc_user_size
= user_s
;
1101 p
.p_c
->mc_size
= (ui32_t
)size
;
1103 a_MEMORY_HOPE_SET(p_alc
, p
);
1105 ++a_memory_lofi_aall
;
1106 ++a_memory_lofi_acur
;
1107 a_memory_lofi_amax
= n_MAX(a_memory_lofi_amax
, a_memory_lofi_acur
);
1108 a_memory_lofi_mall
+= user_s
;
1109 a_memory_lofi_mcur
+= user_s
;
1110 a_memory_lofi_mmax
= n_MAX(a_memory_lofi_mmax
, a_memory_lofi_mcur
);
1117 (n_lofi_free
)(void *vp n_MEMORY_DEBUG_ARGS
){
1118 #ifdef HAVE_MEMORY_DEBUG
1121 union a_memory_ptr p
;
1122 struct a_memory_ars_ctx
*macp
;
1125 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
1126 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
1128 if((p
.p_vp
= vp
) == NULL
){
1129 #ifdef HAVE_MEMORY_DEBUG
1130 n_err("n_lofi_free(NULL) from %s, line %d\n", mdbg_file
, mdbg_line
);
1135 #ifdef HAVE_MEMORY_DEBUG
1136 a_MEMORY_HOPE_GET(p_alc
, p
, isbad
);
1139 if(p
.p_c
->mc_isfree
){
1140 n_err("n_lofi_free(): double-free avoided at %s, line %d\n"
1141 "\tLast seen: %s, line %" PRIu16
"\n",
1142 mdbg_file
, mdbg_line
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1145 p
.p_c
->mc_isfree
= TRU1
;
1146 memset(vp
, 0377, p
.p_c
->mc_user_size
);
1148 if(p
.p_alc
!= macp
->mac_lofi_top
){
1149 n_err("n_lofi_free(): this is not alloca top at %s, line %d\n"
1150 "\tLast seen: %s, line %" PRIu16
"\n",
1151 mdbg_file
, mdbg_line
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1156 #endif /* HAVE_MEMORY_DEBUG */
1158 a_memory_lofi_free(macp
, --p
.p_alc
);
1164 n_lofi_snap_create(void){ /* TODO avoid temporary alloc */
1168 rv
= n_lofi_alloc(1);
1174 n_lofi_snap_unroll(void *cookie
){ /* TODO optimise */
1175 union a_memory_ptr p
;
1176 struct a_memory_ars_ctx
*macp
;
1181 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
1182 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
1185 p
.p_alc
= macp
->mac_lofi_top
;
1186 a_memory_lofi_free(macp
, p
.p_vp
);
1188 #ifdef HAVE_MEMORY_DEBUG
1189 a_MEMORY_HOPE_INC(p
.p_ui8p
);
1191 if(p
.p_vp
== cookie
)
1197 #ifdef HAVE_MEMORY_DEBUG
1199 c_memtrace(void *vp
){
1200 /* For a_MEMORY_HOPE_GET() */
1201 char const * const mdbg_file
= "memtrace()";
1202 int const mdbg_line
= -1;
1203 struct a_memory_ars_buffer
*mabp
;
1204 struct a_memory_ars_lofi_chunk
*malcp
;
1205 struct a_memory_ars_lofi
*malp
;
1206 struct a_memory_ars_ctx
*macp
;
1208 union a_memory_ptr p
, xp
;
1214 if((fp
= Ftmp(NULL
, "memtr", OF_RDWR
| OF_UNLINK
| OF_REGISTER
)) == NULL
){
1215 n_perr("tmpfile", 0);
1221 "Last-Out-First-In (alloca) storage:\n"
1222 " Buffer cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1223 " Allocations cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1224 " Bytes cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n\n",
1225 a_memory_lofi_bcur
, a_memory_lofi_bmax
, a_memory_lofi_ball
,
1226 a_memory_lofi_acur
, a_memory_lofi_amax
, a_memory_lofi_aall
,
1227 a_memory_lofi_mcur
, a_memory_lofi_mmax
, a_memory_lofi_mall
);
1230 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
1231 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
1232 for(; macp
!= NULL
; macp
= macp
->mac_outer
){
1233 fprintf(fp
, " Evaluation stack context %p (outer: %p):\n",
1234 (void*)macp
, (void*)macp
->mac_outer
);
1237 for(malp
= macp
->mac_lofi
; malp
!= NULL
;){
1238 fprintf(fp
, " Buffer %p%s, %" PRIuZ
"/%" PRIuZ
" used/free:\n",
1239 (void*)malp
, ((uintptr_t)malp
->mal_last
& 0x1 ? " (huge)" : ""),
1240 PTR2SIZE(malp
->mal_caster
- &malp
->mal_buf
[0]),
1241 PTR2SIZE(malp
->mal_max
- malp
->mal_caster
));
1243 malp
= malp
->mal_last
;
1244 malp
= (struct a_memory_ars_lofi
*)((uintptr_t)malp
& ~1);
1247 for(malcp
= macp
->mac_lofi_top
; malcp
!= NULL
;){
1249 malcp
= (struct a_memory_ars_lofi_chunk
*)
1250 ((uintptr_t)malcp
->malc_last
& ~0x1);
1253 a_MEMORY_HOPE_GET_TRACE(p_alc
, xp
, isbad
);
1254 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1255 (isbad
? "! CANARY ERROR (LOFI): " : ""), xp
.p_vp
,
1256 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1261 "\nAuto-reclaimed storage:\n"
1262 " Buffers cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1263 " Huge allocations cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1264 " Allocations all: %" PRIuZ
", Bytes all: %" PRIuZ
"\n\n",
1265 a_memory_ars_bcur
, a_memory_ars_bmax
, a_memory_ars_ball
,
1266 a_memory_ars_hcur
, a_memory_ars_hmax
, a_memory_ars_hall
,
1267 a_memory_ars_aall
, a_memory_ars_mall
);
1270 for(macp
= n_go_data
->gdc_mempool
; macp
!= NULL
; macp
= macp
->mac_outer
){
1271 fprintf(fp
, " Evaluation stack context %p (outer: %p):\n",
1272 (void*)macp
, (void*)macp
->mac_outer
);
1275 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
1276 fprintf(fp
, " Buffer %p, %" PRIuZ
"/%" PRIuZ
" used/free:\n",
1278 PTR2SIZE(mabp
->mab_caster
- &mabp
->mab_buf
[0]),
1279 PTR2SIZE(&mabp
->mab_buf
[sizeof(mabp
->mab_buf
)] - mabp
->mab_caster
));
1282 for(p
.p_cp
= mabp
->mab_buf
; p
.p_cp
< mabp
->mab_caster
;
1283 ++lines
, p
.p_cp
+= p
.p_c
->mc_size
){
1286 a_MEMORY_HOPE_GET_TRACE(p_c
, xp
, isbad
);
1287 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1288 (isbad
? "! CANARY ERROR (ARS, top): " : ""), xp
.p_vp
,
1289 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1294 for(mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
1295 fprintf(fp
, " Buffer %p, full:\n", (void*)mabp
);
1298 for(p
.p_cp
= mabp
->mab_buf
; p
.p_cp
< mabp
->mab_caster
;
1299 ++lines
, p
.p_cp
+= p
.p_c
->mc_size
){
1302 a_MEMORY_HOPE_GET_TRACE(p_c
, xp
, isbad
);
1303 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1304 (isbad
? "! CANARY ERROR (ARS, full): " : ""), xp
.p_vp
,
1305 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1312 "\nHeap memory buffers:\n"
1313 " Allocation cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1314 " Bytes cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n\n",
1315 a_memory_heap_acur
, a_memory_heap_amax
, a_memory_heap_aall
,
1316 a_memory_heap_mcur
, a_memory_heap_mmax
, a_memory_heap_mall
);
1319 for(p
.p_hc
= a_memory_heap_list
; p
.p_hc
!= NULL
;
1320 ++lines
, p
.p_hc
= p
.p_hc
->mhc_next
){
1323 a_MEMORY_HOPE_GET_TRACE(p_hc
, xp
, isbad
);
1324 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1325 (isbad
? "! CANARY ERROR (heap): " : ""), xp
.p_vp
,
1326 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1329 if((n_psonce
& n_PSO_REPRODUCIBLE
) ||
1330 (n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
))){
1331 fprintf(fp
, "Heap buffers lingering for n_free():\n");
1334 for(p
.p_hc
= a_memory_heap_free
; p
.p_hc
!= NULL
;
1335 ++lines
, p
.p_hc
= p
.p_hc
->mhc_next
){
1338 a_MEMORY_HOPE_GET_TRACE(p_hc
, xp
, isbad
);
1339 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1340 (isbad
? "! CANARY ERROR (free): " : ""), xp
.p_vp
,
1341 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1345 page_or_print(fp
, lines
);
1350 return (vp
!= NULL
);
1354 n__memory_check(char const *mdbg_file
, int mdbg_line
){
1355 union a_memory_ptr p
, xp
;
1356 struct a_memory_ars_buffer
*mabp
;
1357 struct a_memory_ars_lofi_chunk
*malcp
;
1358 struct a_memory_ars_ctx
*macp
;
1359 bool_t anybad
, isbad
;
1364 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
1365 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
1369 for(malcp
= macp
->mac_lofi_top
; malcp
!= NULL
;){
1371 malcp
= (struct a_memory_ars_lofi_chunk
*)
1372 ((uintptr_t)malcp
->malc_last
& ~0x1);
1375 a_MEMORY_HOPE_GET_TRACE(p_alc
, xp
, isbad
);
1379 "! CANARY ERROR (LOFI): %p (%u bytes): %s, line %u\n",
1380 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1384 /* Auto-reclaimed */
1386 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
1387 for(p
.p_cp
= mabp
->mab_buf
; p
.p_cp
< mabp
->mab_caster
;
1388 p
.p_cp
+= p
.p_c
->mc_size
){
1391 a_MEMORY_HOPE_GET_TRACE(p_c
, xp
, isbad
);
1395 "! CANARY ERROR (ARS, top): %p (%u bytes): %s, line %u\n",
1396 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1401 for(mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
1402 for(p
.p_cp
= mabp
->mab_buf
; p
.p_cp
< mabp
->mab_caster
;
1403 p
.p_cp
+= p
.p_c
->mc_size
){
1406 a_MEMORY_HOPE_GET_TRACE(p_c
, xp
, isbad
);
1410 "! CANARY ERROR (ARS, full): %p (%u bytes): %s, line %u\n",
1411 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1418 for(p
.p_hc
= a_memory_heap_list
; p
.p_hc
!= NULL
; p
.p_hc
= p
.p_hc
->mhc_next
){
1421 a_MEMORY_HOPE_GET_TRACE(p_hc
, xp
, isbad
);
1425 "! CANARY ERROR (heap): %p (%u bytes): %s, line %u\n",
1426 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1430 if((n_psonce
& n_PSO_REPRODUCIBLE
) ||
1431 (n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
))){
1432 for(p
.p_hc
= a_memory_heap_free
; p
.p_hc
!= NULL
;
1433 p
.p_hc
= p
.p_hc
->mhc_next
){
1436 a_MEMORY_HOPE_GET_TRACE(p_hc
, xp
, isbad
);
1440 "! CANARY ERROR (free): %p (%u bytes): %s, line %u\n",
1441 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1446 if(anybad
&& ok_blook(memdebug
))
1447 n_panic("Memory errors encountered");
1451 #endif /* HAVE_MEMORY_DEBUG */