1 /*@ S-nail - a mail user agent derived from Berkeley Mail.
2 *@ Heap memory and automatically reclaimed storage.
3 *@ TODO Back the _flux_ heap.
4 *@ TODO Add cache for "the youngest" two or three n_MEMORY_AUTOREC_SIZE arenas
6 * Copyright (c) 2012 - 2018 Steffen (Daode) Nurpmeso <steffen@sdaoden.eu>.
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 #ifndef HAVE_AMALGAMATION
28 * We use per-execution context memory arenas, to be found in
29 * n_go_data->gdc_mempool; if NULL, set to ->gdc__mempool_buf.
30 * n_memory_reset() that happens on loop ticks reclaims their memory, and
31 * performs debug checks also on the former #ifdef HAVE_MEMORY_DEBUG.
32 * The arena that is used already during program startup is special in that
33 * _pool_fixate() will set "a lower bound" in order not to reclaim memory that
34 * must be kept vivid during the lifetime of the program.
35 * That was so in historical code with the globally shared single string dope
36 * implementation, too. (And it still seems easier than bypassing to normal
37 * heap memory before _fixate() is called, today.)
39 * AutoReclaimedStorage memory is the follow-up to the historical "stringdope"
40 * allocator from 1979 (see [timeline:a7342d9]:src/Mail/strings.c), it is
41 * a steadily growing pool (but _relax_hold()..[:_relax_unroll():]..relax_gut()
42 * can be used to reduce pressure) until n_memory_reset() time.
44 * LastOutFirstIn memory is meant as an alloca(3) replacement but which requires
45 * lofi_free()ing pointers (otherwise growing until n_memory_reset()).
47 * TODO Flux heap memory is like LOFI except that any pointer can be freed (and
48 * TODO reused) at any time, just like normal heap memory. It is notational in
49 * TODO that it clearly states that the allocation will go away after a loop
50 * TODO tick, and also we can use some buffer caches.
53 /* If defined (and HAVE_MEMORY_DEBUG), realloc acts like alloc+free, which can
54 * help very bogus double-free attempts */
55 #define a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE /* TODO runtime opt <> C++ cache */
57 /* Maximum allocation (directly) handled by A-R-Storage */
58 #define a_MEMORY_ARS_MAX (n_MEMORY_AUTOREC_SIZE / 2 + n_MEMORY_AUTOREC_SIZE / 4)
59 #define a_MEMORY_LOFI_MAX a_MEMORY_ARS_MAX
61 n_CTA(a_MEMORY_ARS_MAX
> 1024,
62 "Auto-reclaimed memory requires a larger buffer size"); /* Anway > 42! */
63 n_CTA(n_ISPOW2(n_MEMORY_AUTOREC_SIZE
),
64 "Buffers should be POW2 (may be wasteful on native allocators otherwise)");
66 /* Alignment of ARS memory. Simply go for pointer alignment */
67 #define a_MEMORY_ARS_ROUNDUP(S) n_ALIGN_SMALL(S)
68 #define a_MEMORY_LOFI_ROUNDUP(S) a_MEMORY_ARS_ROUNDUP(S)
70 #ifdef HAVE_MEMORY_DEBUG
71 n_CTA(sizeof(char) == sizeof(ui8_t
), "But POSIX says a byte is 8 bit");
73 # define a_MEMORY_HOPE_SIZE (2 * 8 * sizeof(char))
74 # define a_MEMORY_HOPE_INC(P) (P) += 8
75 # define a_MEMORY_HOPE_DEC(P) (P) -= 8
77 /* We use address-induced canary values, inspiration (but he didn't invent)
78 * and primes from maxv@netbsd.org, src/sys/kern/subr_kmem.c */
79 # define a_MEMORY_HOPE_LOWER(S,P) \
81 ui64_t __h__ = (uintptr_t)(P);\
82 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
87 # define a_MEMORY_HOPE_UPPER(S,P) \
90 ui64_t __x__, __h__ = (uintptr_t)(P);\
91 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
92 for(__i__ = 56; __i__ != 0; __i__ -= 8)\
93 if((__x__ = (__h__ >> __i__)) != 0){\
101 # define a_MEMORY_HOPE_SET(T,C) \
103 union a_memory_ptr __xp;\
104 struct a_memory_chunk *__xc;\
105 __xp.p_vp = (C).p_vp;\
106 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
107 a_MEMORY_HOPE_INC((C).p_cp);\
108 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
109 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
110 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
111 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
112 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
113 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
114 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
115 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
116 a_MEMORY_HOPE_INC(__xp.p_ui8p) + __xc->mc_user_size;\
117 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
118 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
119 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
120 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
121 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
122 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
123 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
124 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
127 # define a_MEMORY_HOPE_GET_TRACE(T,C,BAD) \
129 a_MEMORY_HOPE_INC((C).p_cp);\
130 a_MEMORY_HOPE_GET(T, C, BAD);\
131 a_MEMORY_HOPE_INC((C).p_cp);\
134 # define a_MEMORY_HOPE_GET(T,C,BAD) \
136 union a_memory_ptr __xp;\
137 struct a_memory_chunk *__xc;\
140 __xp.p_vp = (C).p_vp;\
141 a_MEMORY_HOPE_DEC(__xp.p_cp);\
142 (C).p_cp = __xp.p_cp;\
143 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
146 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[0]);\
147 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
148 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[1]);\
149 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
150 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[2]);\
151 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
152 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[3]);\
153 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
154 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[4]);\
155 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
156 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[5]);\
157 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
158 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[6]);\
159 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
160 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[7]);\
161 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
164 a_MEMORY_HOPE_INC((C).p_cp);\
165 n_alert("%p: corrupt lower canary: 0x%02X: %s, line %d",\
166 (C).p_cp, __i, mdbg_file, mdbg_line);\
167 a_MEMORY_HOPE_DEC((C).p_cp);\
169 a_MEMORY_HOPE_INC(__xp.p_ui8p) + __xc->mc_user_size;\
171 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[0]);\
172 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
173 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[1]);\
174 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
175 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[2]);\
176 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
177 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[3]);\
178 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
179 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[4]);\
180 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
181 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[5]);\
182 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
183 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[6]);\
184 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
185 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[7]);\
186 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
189 a_MEMORY_HOPE_INC((C).p_cp);\
190 n_alert("%p: corrupt upper canary: 0x%02X: %s, line %d",\
191 (C).p_cp, __i, mdbg_file, mdbg_line);\
192 a_MEMORY_HOPE_DEC((C).p_cp);\
195 n_alert(" ..canary last seen: %s, line %u",\
196 __xc->mc_file, __xc->mc_line);\
198 #endif /* HAVE_MEMORY_DEBUG */
200 #ifdef HAVE_MEMORY_DEBUG
201 struct a_memory_chunk
{
210 /* The heap memory n_free() may become delayed to detect double frees.
211 * It is primitive, but ok: speed and memory usage don't matter here */
212 struct a_memory_heap_chunk
{
213 struct a_memory_chunk mhc_super
;
214 struct a_memory_heap_chunk
*mhc_prev
;
215 struct a_memory_heap_chunk
*mhc_next
;
217 #endif /* HAVE_MEMORY_DEBUG */
219 struct a_memory_ars_lofi_chunk
{
220 #ifdef HAVE_MEMORY_DEBUG
221 struct a_memory_chunk malc_super
;
223 struct a_memory_ars_lofi_chunk
*malc_last
; /* Bit 1 set: it's a heap alloc */
230 #ifdef HAVE_MEMORY_DEBUG
231 struct a_memory_chunk
*p_c
;
232 struct a_memory_heap_chunk
*p_hc
;
234 struct a_memory_ars_lofi_chunk
*p_alc
;
237 struct a_memory_ars_ctx
{
238 struct a_memory_ars_ctx
*mac_outer
;
239 struct a_memory_ars_buffer
*mac_top
; /* Alloc stack */
240 struct a_memory_ars_buffer
*mac_full
; /* Alloc stack, cpl. filled */
241 size_t mac_recur
; /* _relax_create() recursion */
242 struct a_memory_ars_huge
*mac_huge
; /* Huge allocation bypass list */
243 struct a_memory_ars_lofi
*mac_lofi
; /* Pseudo alloca */
244 struct a_memory_ars_lofi_chunk
*mac_lofi_top
;
246 n_CTA(n_MEMORY_POOL_TYPE_SIZEOF
>= sizeof(struct a_memory_ars_ctx
),
247 "struct n_go_data_ctx.gdc_mempool is not large enough for memory pool");
249 struct a_memory_ars_buffer
{
250 struct a_memory_ars_buffer
*mab_last
;
251 char *mab_bot
; /* For _autorec_fixate(): keep startup memory lingering */
252 char *mab_relax
; /* If !NULL, used by _relax_unroll() instead of .mab_bot */
253 char *mab_caster
; /* Point of casting off memory */
254 char mab_buf
[n_MEMORY_AUTOREC_SIZE
- (4 * sizeof(void*))];
256 n_CTA(sizeof(struct a_memory_ars_buffer
) == n_MEMORY_AUTOREC_SIZE
,
257 "Resulting structure size is not the expected one");
258 #ifdef HAVE_MEMORY_DEBUG
259 n_CTA(a_MEMORY_ARS_MAX
+ a_MEMORY_HOPE_SIZE
+ sizeof(struct a_memory_chunk
)
260 < n_SIZEOF_FIELD(struct a_memory_ars_buffer
, mab_buf
),
261 "Memory layout of auto-reclaimed storage does not work out that way");
264 /* Requests that exceed a_MEMORY_ARS_MAX are always served by the normal
265 * memory allocator (which panics if memory cannot be served). This can be
266 * seen as a security fallback bypass only */
267 struct a_memory_ars_huge
{
268 struct a_memory_ars_huge
*mah_last
;
269 char mah_buf
[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
272 struct a_memory_ars_lofi
{
273 struct a_memory_ars_lofi
*mal_last
;
276 char mal_buf
[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
280 #ifdef HAVE_MEMORY_DEBUG
281 static size_t a_memory_heap_aall
, a_memory_heap_acur
, a_memory_heap_amax
,
282 a_memory_heap_mall
, a_memory_heap_mcur
, a_memory_heap_mmax
;
283 static struct a_memory_heap_chunk
*a_memory_heap_list
, *a_memory_heap_free
;
285 static size_t a_memory_ars_ball
, a_memory_ars_bcur
, a_memory_ars_bmax
,
286 a_memory_ars_hall
, a_memory_ars_hcur
, a_memory_ars_hmax
,
287 a_memory_ars_aall
, a_memory_ars_mall
;
289 static size_t a_memory_lofi_ball
, a_memory_lofi_bcur
, a_memory_lofi_bmax
,
290 a_memory_lofi_aall
, a_memory_lofi_acur
, a_memory_lofi_amax
,
291 a_memory_lofi_mall
, a_memory_lofi_mcur
, a_memory_lofi_mmax
;
295 n_INLINE
void a_memory_lofi_free(struct a_memory_ars_ctx
*macp
, void *vp
);
297 /* Reset an ars_ctx */
298 static void a_memory_ars_reset(struct a_memory_ars_ctx
*macp
);
301 a_memory_lofi_free(struct a_memory_ars_ctx
*macp
, void *vp
){
302 struct a_memory_ars_lofi
*malp
;
303 union a_memory_ptr p
;
307 #ifdef HAVE_MEMORY_DEBUG
308 --a_memory_lofi_acur
;
309 a_memory_lofi_mcur
-= p
.p_c
->mc_user_size
;
312 /* The heap allocations are released immediately */
313 if((uintptr_t)p
.p_alc
->malc_last
& 0x1){
314 malp
= macp
->mac_lofi
;
315 macp
->mac_lofi
= malp
->mal_last
;
316 macp
->mac_lofi_top
= (struct a_memory_ars_lofi_chunk
*)
317 ((uintptr_t)p
.p_alc
->malc_last
& ~0x1);
319 #ifdef HAVE_MEMORY_DEBUG
320 --a_memory_lofi_bcur
;
323 macp
->mac_lofi_top
= p
.p_alc
->malc_last
;
325 /* The normal arena ones only if the arena is empty, except for when
326 * it is the last - that we'll keep until _pool_pop() or exit(3) */
327 if(p
.p_cp
== (malp
= macp
->mac_lofi
)->mal_buf
){
328 if(malp
->mal_last
!= NULL
){
329 macp
->mac_lofi
= malp
->mal_last
;
331 #ifdef HAVE_MEMORY_DEBUG
332 --a_memory_lofi_bcur
;
336 malp
->mal_caster
= p
.p_cp
;
342 a_memory_ars_reset(struct a_memory_ars_ctx
*macp
){
344 struct a_memory_ars_lofi_chunk
*alcp
;
345 struct a_memory_ars_lofi
*alp
;
346 struct a_memory_ars_buffer
*abp
;
347 struct a_memory_ars_huge
*ahp
;
351 /* Simply move all buffers away from .mac_full */
352 for(m
.abp
= macp
->mac_full
; m
.abp
!= NULL
; m
.abp
= m2
.abp
){
353 m2
.abp
= m
.abp
->mab_last
;
354 m
.abp
->mab_last
= macp
->mac_top
;
355 macp
->mac_top
= m
.abp
;
357 macp
->mac_full
= NULL
;
359 for(m2
.abp
= NULL
, m
.abp
= macp
->mac_top
; m
.abp
!= NULL
;){
360 struct a_memory_ars_buffer
*x
;
363 m
.abp
= m
.abp
->mab_last
;
365 /* Give away all buffers that are not covered by autorec_fixate() */
366 if(x
->mab_bot
== x
->mab_buf
){
368 macp
->mac_top
= m
.abp
;
370 m2
.abp
->mab_last
= m
.abp
;
372 #ifdef HAVE_MEMORY_DEBUG
377 x
->mab_caster
= x
->mab_bot
;
379 #ifdef HAVE_MEMORY_DEBUG
380 memset(x
->mab_caster
, 0377,
381 PTR2SIZE(&x
->mab_buf
[sizeof(x
->mab_buf
)] - x
->mab_caster
));
386 while((m
.ahp
= macp
->mac_huge
) != NULL
){
387 macp
->mac_huge
= m
.ahp
->mah_last
;
389 #ifdef HAVE_MEMORY_DEBUG
394 /* "alloca(3)" memory goes away, too. XXX Must be last as long we jump */
395 #ifdef HAVE_MEMORY_DEBUG
396 if(macp
->mac_lofi_top
!= NULL
&&
397 ((n_psonce
& n_PSO_REPRODUCIBLE
) ||
398 (n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
))))
399 n_alert("There still is LOFI memory upon ARS reset!");
401 while((m
.alcp
= macp
->mac_lofi_top
) != NULL
)
402 a_memory_lofi_free(macp
, m
.alcp
);
407 n_memory_reset(void){
408 #ifdef HAVE_MEMORY_DEBUG
409 union a_memory_ptr p
;
412 struct a_memory_ars_ctx
*macp
;
417 if((macp
= n_go_data
->gdc_mempool
) != NULL
){
418 /* First of all reset auto-reclaimed storage so that heap freed during
419 * this can be handled in a second step */
420 /* TODO v15 active recursion can only happen after a jump */
421 if(macp
->mac_recur
> 0){
423 n_autorec_relax_gut();
425 a_memory_ars_reset(macp
);
428 /* Now we are ready to deal with heap */
429 #ifdef HAVE_MEMORY_DEBUG
432 for(p
.p_hc
= a_memory_heap_free
; p
.p_hc
!= NULL
;){
438 p
.p_hc
= p
.p_hc
->mhc_next
;
441 a_memory_heap_free
= NULL
;
443 if((n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
)) && c
> 0)
444 n_err("memreset: freed %" PRIuZ
" chunks/%" PRIuZ
" bytes\n", c
, s
);
450 n_memory_pool_fixate(void){
451 struct a_memory_ars_buffer
*mabp
;
452 struct a_memory_ars_ctx
*macp
;
455 if((macp
= n_go_data
->gdc_mempool
) != NULL
){
456 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
457 mabp
->mab_bot
= mabp
->mab_caster
;
458 for(mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
459 mabp
->mab_bot
= mabp
->mab_caster
;
465 n_memory_pool_push(void *vp
){
466 struct a_memory_ars_ctx
*macp
;
469 if(n_go_data
->gdc_mempool
== NULL
)
470 n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
472 memset(macp
= vp
, 0, sizeof *macp
);
473 macp
->mac_outer
= n_go_data
->gdc_mempool
;
474 n_go_data
->gdc_mempool
= macp
;
479 n_memory_pool_pop(void *vp
){
480 struct a_memory_ars_buffer
*mabp
;
481 struct a_memory_ars_ctx
*macp
;
486 if((macp
= vp
) == NULL
){
487 macp
= n_go_data
->gdc_mempool
;
488 assert(macp
!= NULL
);
490 /* XXX May not be ARS top upon jump */
491 while(n_go_data
->gdc_mempool
!= macp
){
492 DBG( n_err("ARS pop %p to reach freed context\n",
493 n_go_data
->gdc_mempool
); )
494 n_memory_pool_pop(n_go_data
->gdc_mempool
);
497 n_go_data
->gdc_mempool
= macp
->mac_outer
;
499 a_memory_ars_reset(macp
);
500 assert(macp
->mac_full
== NULL
);
501 assert(macp
->mac_huge
== NULL
);
503 mabp
= macp
->mac_top
;
504 macp
->mac_top
= NULL
;
507 mabp
= mabp
->mab_last
;
511 /* We (may) have kept one buffer for our pseudo alloca(3) */
512 if((vp
= macp
->mac_lofi
) != NULL
){
513 assert(macp
->mac_lofi
->mal_last
== NULL
);
514 macp
->mac_lofi
= NULL
;
515 #ifdef HAVE_MEMORY_DEBUG
516 --a_memory_lofi_bcur
;
523 #ifndef HAVE_MEMORY_DEBUG
531 if((rv
= malloc(s
)) == NULL
)
532 n_panic(_("no memory"));
538 n_realloc(void *vp
, size_t s
){
547 if((rv
= realloc(vp
, s
)) == NULL
)
548 n_panic(_("no memory"));
555 n_calloc(size_t nmemb
, size_t size
){
561 if((rv
= calloc(nmemb
, size
)) == NULL
)
562 n_panic(_("no memory"));
574 #else /* !HAVE_MEMORY_DEBUG */
576 (n_alloc
)(size_t s n_MEMORY_DEBUG_ARGS
){
577 union a_memory_ptr p
;
581 if(s
> UI32_MAX
- sizeof(struct a_memory_heap_chunk
) - a_MEMORY_HOPE_SIZE
)
582 n_panic("n_alloc(): allocation too large: %s, line %d",
583 mdbg_file
, mdbg_line
);
584 if((user_s
= (ui32_t
)s
) == 0)
586 s
+= sizeof(struct a_memory_heap_chunk
) + a_MEMORY_HOPE_SIZE
;
588 if((p
.p_vp
= (malloc
)(s
)) == NULL
)
589 n_panic(_("no memory"));
591 p
.p_hc
->mhc_prev
= NULL
;
592 if((p
.p_hc
->mhc_next
= a_memory_heap_list
) != NULL
)
593 a_memory_heap_list
->mhc_prev
= p
.p_hc
;
595 p
.p_c
->mc_file
= mdbg_file
;
596 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
597 p
.p_c
->mc_isfree
= FAL0
;
598 p
.p_c
->mc_user_size
= user_s
;
599 p
.p_c
->mc_size
= (ui32_t
)s
;
601 a_memory_heap_list
= p
.p_hc
++;
602 a_MEMORY_HOPE_SET(p_hc
, p
);
604 ++a_memory_heap_aall
;
605 ++a_memory_heap_acur
;
606 a_memory_heap_amax
= n_MAX(a_memory_heap_amax
, a_memory_heap_acur
);
607 a_memory_heap_mall
+= user_s
;
608 a_memory_heap_mcur
+= user_s
;
609 a_memory_heap_mmax
= n_MAX(a_memory_heap_mmax
, a_memory_heap_mcur
);
615 (n_realloc
)(void *vp
, size_t s n_MEMORY_DEBUG_ARGS
){
616 # ifndef a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE
620 union a_memory_ptr p
;
623 if((p
.p_vp
= vp
) == NULL
){
625 p
.p_vp
= (n_alloc
)(s
, mdbg_file
, mdbg_line
);
629 a_MEMORY_HOPE_GET(p_hc
, p
, isbad
);
632 if(p
.p_c
->mc_isfree
){
633 n_err("n_realloc(): region freed! At %s, line %d\n"
634 "\tLast seen: %s, line %" PRIu16
"\n",
635 mdbg_file
, mdbg_line
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
639 # ifdef a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE
643 xp
= (n_alloc
)(s
, mdbg_file
, mdbg_line
);
644 memcpy(xp
, vp
, n_MIN(s
, p
.p_c
->mc_user_size
));
645 (n_free
)(vp
, mdbg_file
, mdbg_line
);
651 if(p
.p_hc
== a_memory_heap_list
)
652 a_memory_heap_list
= p
.p_hc
->mhc_next
;
654 p
.p_hc
->mhc_prev
->mhc_next
= p
.p_hc
->mhc_next
;
655 if (p
.p_hc
->mhc_next
!= NULL
)
656 p
.p_hc
->mhc_next
->mhc_prev
= p
.p_hc
->mhc_prev
;
658 --a_memory_heap_acur
;
659 a_memory_heap_mcur
-= p
.p_c
->mc_user_size
;
661 if(s
> UI32_MAX
- sizeof(struct a_memory_heap_chunk
) - a_MEMORY_HOPE_SIZE
)
662 n_panic("n_realloc(): allocation too large: %s, line %d",
663 mdbg_file
, mdbg_line
);
664 if((user_s
= (ui32_t
)s
) == 0)
666 s
+= sizeof(struct a_memory_heap_chunk
) + a_MEMORY_HOPE_SIZE
;
668 if((p
.p_vp
= (realloc
)(p
.p_c
, s
)) == NULL
)
669 n_panic(_("no memory"));
670 p
.p_hc
->mhc_prev
= NULL
;
671 if((p
.p_hc
->mhc_next
= a_memory_heap_list
) != NULL
)
672 a_memory_heap_list
->mhc_prev
= p
.p_hc
;
674 p
.p_c
->mc_file
= mdbg_file
;
675 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
676 p
.p_c
->mc_isfree
= FAL0
;
677 p
.p_c
->mc_user_size
= user_s
;
678 p
.p_c
->mc_size
= (ui32_t
)s
;
680 a_memory_heap_list
= p
.p_hc
++;
681 a_MEMORY_HOPE_SET(p_hc
, p
);
683 ++a_memory_heap_aall
;
684 ++a_memory_heap_acur
;
685 a_memory_heap_amax
= n_MAX(a_memory_heap_amax
, a_memory_heap_acur
);
686 a_memory_heap_mall
+= user_s
;
687 a_memory_heap_mcur
+= user_s
;
688 a_memory_heap_mmax
= n_MAX(a_memory_heap_mmax
, a_memory_heap_mcur
);
689 # endif /* a_MEMORY_REALLOC_IS_ALLOC_PLUS_FREE */
696 (n_calloc
)(size_t nmemb
, size_t size n_MEMORY_DEBUG_ARGS
){
697 union a_memory_ptr p
;
703 if(size
> UI32_MAX
- sizeof(struct a_memory_heap_chunk
) - a_MEMORY_HOPE_SIZE
)
704 n_panic("n_calloc(): allocation size too large: %s, line %d",
705 mdbg_file
, mdbg_line
);
706 if((user_s
= (ui32_t
)size
) == 0)
708 if((UI32_MAX
- sizeof(struct a_memory_heap_chunk
) - a_MEMORY_HOPE_SIZE
) /
710 n_panic("n_calloc(): allocation count too large: %s, line %d",
711 mdbg_file
, mdbg_line
);
714 size
+= sizeof(struct a_memory_heap_chunk
) + a_MEMORY_HOPE_SIZE
;
716 if((p
.p_vp
= (malloc
)(size
)) == NULL
)
717 n_panic(_("no memory"));
718 memset(p
.p_vp
, 0, size
);
720 p
.p_hc
->mhc_prev
= NULL
;
721 if((p
.p_hc
->mhc_next
= a_memory_heap_list
) != NULL
)
722 a_memory_heap_list
->mhc_prev
= p
.p_hc
;
724 p
.p_c
->mc_file
= mdbg_file
;
725 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
726 p
.p_c
->mc_isfree
= FAL0
;
727 p
.p_c
->mc_user_size
= (user_s
> 0) ? user_s
*= nmemb
: 0;
728 p
.p_c
->mc_size
= (ui32_t
)size
;
730 a_memory_heap_list
= p
.p_hc
++;
731 a_MEMORY_HOPE_SET(p_hc
, p
);
733 ++a_memory_heap_aall
;
734 ++a_memory_heap_acur
;
735 a_memory_heap_amax
= n_MAX(a_memory_heap_amax
, a_memory_heap_acur
);
736 a_memory_heap_mall
+= user_s
;
737 a_memory_heap_mcur
+= user_s
;
738 a_memory_heap_mmax
= n_MAX(a_memory_heap_mmax
, a_memory_heap_mcur
);
744 (n_free
)(void *vp n_MEMORY_DEBUG_ARGS
){
745 union a_memory_ptr p
;
749 if((p
.p_vp
= vp
) == NULL
){
750 n_err("n_free(NULL) from %s, line %d\n", mdbg_file
, mdbg_line
);
754 a_MEMORY_HOPE_GET(p_hc
, p
, isbad
);
757 if(p
.p_c
->mc_isfree
){
758 n_err("n_free(): double-free avoided at %s, line %d\n"
759 "\tLast seen: %s, line %" PRIu16
"\n",
760 mdbg_file
, mdbg_line
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
764 if(p
.p_hc
== a_memory_heap_list
){
765 if((a_memory_heap_list
= p
.p_hc
->mhc_next
) != NULL
)
766 a_memory_heap_list
->mhc_prev
= NULL
;
768 p
.p_hc
->mhc_prev
->mhc_next
= p
.p_hc
->mhc_next
;
769 if(p
.p_hc
->mhc_next
!= NULL
)
770 p
.p_hc
->mhc_next
->mhc_prev
= p
.p_hc
->mhc_prev
;
772 p
.p_c
->mc_file
= mdbg_file
;
773 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
774 p
.p_c
->mc_isfree
= TRU1
;
775 /* Trash contents (also see [21c05f8]) */
776 memset(vp
, 0377, p
.p_c
->mc_user_size
);
778 --a_memory_heap_acur
;
779 a_memory_heap_mcur
-= p
.p_c
->mc_user_size
;
781 if((n_psonce
& n_PSO_REPRODUCIBLE
) ||
782 (n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
))){
783 p
.p_hc
->mhc_next
= a_memory_heap_free
;
784 a_memory_heap_free
= p
.p_hc
;
790 #endif /* HAVE_MEMORY_DEBUG */
793 (n_autorec_alloc_from_pool
)(void *vp
, size_t size n_MEMORY_DEBUG_ARGS
){
794 #ifdef HAVE_MEMORY_DEBUG
797 union a_memory_ptr p
;
799 struct a_memory_ars_buffer
*abp
;
800 struct a_memory_ars_huge
*ahp
;
802 struct a_memory_ars_ctx
*macp
;
805 if((macp
= vp
) == NULL
&& (macp
= n_go_data
->gdc_mempool
) == NULL
)
806 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
808 #ifdef HAVE_MEMORY_DEBUG
809 user_s
= (ui32_t
)size
;
813 #ifdef HAVE_MEMORY_DEBUG
814 size
+= sizeof(struct a_memory_chunk
) + a_MEMORY_HOPE_SIZE
;
816 size
= a_MEMORY_ARS_ROUNDUP(size
);
818 /* Huge allocations are special */
819 if(n_UNLIKELY(size
> a_MEMORY_ARS_MAX
)){
820 #ifdef HAVE_MEMORY_DEBUG
821 if(n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
))
822 n_alert("n_autorec_alloc() of %" PRIuZ
" bytes from %s, line %d",
823 size
, mdbg_file
, mdbg_line
);
828 /* Search for a buffer with enough free space to serve request */
829 for(m2
.abp
= NULL
, m
.abp
= macp
->mac_top
; m
.abp
!= NULL
;
830 m2
.abp
= m
.abp
, m
.abp
= m
.abp
->mab_last
){
831 if((p
.p_cp
= m
.abp
->mab_caster
) <=
832 &m
.abp
->mab_buf
[sizeof(m
.abp
->mab_buf
) - size
]){
833 /* Alignment is the one thing, the other is what is usually allocated,
834 * and here about 40 bytes seems to be a good cut to avoid non-usable
835 * casters. Reown buffers supposed to be "full" to .mac_full */
836 if(n_UNLIKELY((m
.abp
->mab_caster
= &p
.p_cp
[size
]) >=
837 &m
.abp
->mab_buf
[sizeof(m
.abp
->mab_buf
) - 42])){
839 macp
->mac_top
= m
.abp
->mab_last
;
841 m2
.abp
->mab_last
= m
.abp
->mab_last
;
842 m
.abp
->mab_last
= macp
->mac_full
;
843 macp
->mac_full
= m
.abp
;
849 /* Need a new buffer XXX "page" pool */
850 m
.abp
= n_alloc(sizeof *m
.abp
);
851 m
.abp
->mab_last
= macp
->mac_top
;
852 m
.abp
->mab_caster
= &(m
.abp
->mab_bot
= m
.abp
->mab_buf
)[size
];
853 m
.abp
->mab_relax
= NULL
; /* Indicates allocation after _relax_create() */
854 macp
->mac_top
= m
.abp
;
855 p
.p_cp
= m
.abp
->mab_bot
;
857 #ifdef HAVE_MEMORY_DEBUG
860 a_memory_ars_bmax
= n_MAX(a_memory_ars_bmax
, a_memory_ars_bcur
);
864 #ifdef HAVE_MEMORY_DEBUG
865 p
.p_c
->mc_file
= mdbg_file
;
866 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
867 p
.p_c
->mc_user_size
= user_s
;
868 p
.p_c
->mc_size
= (ui32_t
)size
;
870 a_MEMORY_HOPE_SET(p_c
, p
);
873 a_memory_ars_mall
+= user_s
;
879 m
.ahp
= n_alloc(n_VSTRUCT_SIZEOF(struct a_memory_ars_huge
, mah_buf
) + size
);
880 m
.ahp
->mah_last
= macp
->mac_huge
;
881 macp
->mac_huge
= m
.ahp
;
882 p
.p_cp
= m
.ahp
->mah_buf
;
883 #ifdef HAVE_MEMORY_DEBUG
886 a_memory_ars_hmax
= n_MAX(a_memory_ars_hmax
, a_memory_ars_hcur
);
892 (n_autorec_calloc_from_pool
)(void *vp
, size_t nmemb
, size_t size
893 n_MEMORY_DEBUG_ARGS
){
897 size
*= nmemb
; /* XXX overflow, but only used for struct inits */
898 rv
= (n_autorec_alloc_from_pool
)(vp
, size n_MEMORY_DEBUG_ARGSCALL
);
905 n_autorec_relax_create(void){
906 struct a_memory_ars_ctx
*macp
;
909 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
910 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
912 if(macp
->mac_recur
++ == 0){
913 struct a_memory_ars_buffer
*mabp
;
915 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
916 mabp
->mab_relax
= mabp
->mab_caster
;
917 for(mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
918 mabp
->mab_relax
= mabp
->mab_caster
;
922 n_err("n_autorec_relax_create(): recursion >0\n");
928 n_autorec_relax_gut(void){
929 struct a_memory_ars_ctx
*macp
;
932 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
933 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
935 assert(macp
->mac_recur
> 0);
937 if(--macp
->mac_recur
== 0){
938 struct a_memory_ars_buffer
*mabp
;
941 n_autorec_relax_unroll();
944 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
945 mabp
->mab_relax
= NULL
;
946 for(mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
947 mabp
->mab_relax
= NULL
;
951 n_err("n_autorec_relax_unroll(): recursion >0\n");
957 n_autorec_relax_unroll(void){
958 /* The purpose of relaxation is only that it is possible to reset the
959 * casters, *not* to give back memory to the system. We are presumably in
960 * an iteration over all messages of a mailbox, and it'd be quite
961 * counterproductive to give the system allocator a chance to waste time */
962 struct a_memory_ars_ctx
*macp
;
965 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
966 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
968 assert(macp
->mac_recur
> 0);
971 if(macp
->mac_recur
== 1){
972 struct a_memory_ars_buffer
*mabp
, *x
, *y
;
974 /* Buffers in the full list may become usable again! */
975 for(x
= NULL
, mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= y
){
978 if(mabp
->mab_relax
== NULL
||
979 mabp
->mab_relax
< &mabp
->mab_buf
[sizeof(mabp
->mab_buf
) - 42]){
984 mabp
->mab_last
= macp
->mac_top
;
985 macp
->mac_top
= mabp
;
990 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
991 mabp
->mab_caster
= (mabp
->mab_relax
!= NULL
)
992 ? mabp
->mab_relax
: mabp
->mab_bot
;
993 #ifdef HAVE_MEMORY_DEBUG
994 memset(mabp
->mab_caster
, 0377,
995 PTR2SIZE(&mabp
->mab_buf
[sizeof(mabp
->mab_buf
)] - mabp
->mab_caster
));
1003 (n_lofi_alloc
)(size_t size n_MEMORY_DEBUG_ARGS
){
1004 #ifdef HAVE_MEMORY_DEBUG
1007 union a_memory_ptr p
;
1008 struct a_memory_ars_lofi
*malp
;
1010 struct a_memory_ars_ctx
*macp
;
1013 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
1014 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
1016 #ifdef HAVE_MEMORY_DEBUG
1017 user_s
= (ui32_t
)size
;
1021 size
+= sizeof(struct a_memory_ars_lofi_chunk
);
1022 #ifdef HAVE_MEMORY_DEBUG
1023 size
+= a_MEMORY_HOPE_SIZE
;
1025 size
= a_MEMORY_LOFI_ROUNDUP(size
);
1027 /* Huge allocations are special */
1028 if(n_UNLIKELY(isheap
= (size
> a_MEMORY_LOFI_MAX
))){
1029 #ifdef HAVE_MEMORY_DEBUG
1030 if(n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
))
1031 n_alert("n_lofi_alloc() of %" PRIuZ
" bytes from %s, line %d",
1032 size
, mdbg_file
, mdbg_line
);
1034 }else if((malp
= macp
->mac_lofi
) != NULL
&&
1035 ((p
.p_cp
= malp
->mal_caster
) <= &malp
->mal_max
[-size
])){
1036 malp
->mal_caster
= &p
.p_cp
[size
];
1040 /* Need a new buffer */
1044 i
= n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi
, mal_buf
) + size
;
1045 i
= n_MAX(i
, n_MEMORY_AUTOREC_SIZE
);
1047 malp
->mal_last
= macp
->mac_lofi
;
1048 malp
->mal_caster
= &malp
->mal_buf
[size
];
1049 i
-= n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi
, mal_buf
);
1050 malp
->mal_max
= &malp
->mal_buf
[i
];
1051 macp
->mac_lofi
= malp
;
1052 p
.p_cp
= malp
->mal_buf
;
1054 #ifdef HAVE_MEMORY_DEBUG
1055 ++a_memory_lofi_ball
;
1056 ++a_memory_lofi_bcur
;
1057 a_memory_lofi_bmax
= n_MAX(a_memory_lofi_bmax
, a_memory_lofi_bcur
);
1062 p
.p_alc
->malc_last
= macp
->mac_lofi_top
;
1063 macp
->mac_lofi_top
= p
.p_alc
;
1065 p
.p_alc
->malc_last
= (struct a_memory_ars_lofi_chunk
*)
1066 ((uintptr_t)p
.p_alc
->malc_last
| 0x1);
1068 #ifndef HAVE_MEMORY_DEBUG
1071 p
.p_c
->mc_file
= mdbg_file
;
1072 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
1073 p
.p_c
->mc_isfree
= FAL0
;
1074 p
.p_c
->mc_user_size
= user_s
;
1075 p
.p_c
->mc_size
= (ui32_t
)size
;
1077 a_MEMORY_HOPE_SET(p_alc
, p
);
1079 ++a_memory_lofi_aall
;
1080 ++a_memory_lofi_acur
;
1081 a_memory_lofi_amax
= n_MAX(a_memory_lofi_amax
, a_memory_lofi_acur
);
1082 a_memory_lofi_mall
+= user_s
;
1083 a_memory_lofi_mcur
+= user_s
;
1084 a_memory_lofi_mmax
= n_MAX(a_memory_lofi_mmax
, a_memory_lofi_mcur
);
1091 (n_lofi_free
)(void *vp n_MEMORY_DEBUG_ARGS
){
1092 #ifdef HAVE_MEMORY_DEBUG
1095 union a_memory_ptr p
;
1096 struct a_memory_ars_ctx
*macp
;
1099 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
1100 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
1102 if((p
.p_vp
= vp
) == NULL
){
1103 #ifdef HAVE_MEMORY_DEBUG
1104 n_err("n_lofi_free(NULL) from %s, line %d\n", mdbg_file
, mdbg_line
);
1109 #ifdef HAVE_MEMORY_DEBUG
1110 a_MEMORY_HOPE_GET(p_alc
, p
, isbad
);
1113 if(p
.p_c
->mc_isfree
){
1114 n_err("n_lofi_free(): double-free avoided at %s, line %d\n"
1115 "\tLast seen: %s, line %" PRIu16
"\n",
1116 mdbg_file
, mdbg_line
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1119 p
.p_c
->mc_isfree
= TRU1
;
1120 memset(vp
, 0377, p
.p_c
->mc_user_size
);
1122 if(p
.p_alc
!= macp
->mac_lofi_top
){
1123 n_err("n_lofi_free(): this is not alloca top at %s, line %d\n"
1124 "\tLast seen: %s, line %" PRIu16
"\n",
1125 mdbg_file
, mdbg_line
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1130 #endif /* HAVE_MEMORY_DEBUG */
1132 a_memory_lofi_free(macp
, --p
.p_alc
);
1138 n_lofi_snap_create(void){ /* TODO avoid temporary alloc */
1142 rv
= n_lofi_alloc(1);
1148 n_lofi_snap_unroll(void *cookie
){ /* TODO optimise */
1149 union a_memory_ptr p
;
1150 struct a_memory_ars_ctx
*macp
;
1155 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
1156 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
1159 p
.p_alc
= macp
->mac_lofi_top
;
1160 a_memory_lofi_free(macp
, p
.p_vp
);
1162 #ifdef HAVE_MEMORY_DEBUG
1163 a_MEMORY_HOPE_INC(p
.p_ui8p
);
1165 if(p
.p_vp
== cookie
)
1171 #ifdef HAVE_MEMORY_DEBUG
1173 c_memtrace(void *vp
){
1174 /* For a_MEMORY_HOPE_GET() */
1175 char const * const mdbg_file
= "memtrace()";
1176 int const mdbg_line
= -1;
1177 struct a_memory_ars_buffer
*mabp
;
1178 struct a_memory_ars_lofi_chunk
*malcp
;
1179 struct a_memory_ars_lofi
*malp
;
1180 struct a_memory_ars_ctx
*macp
;
1182 union a_memory_ptr p
, xp
;
1188 if((fp
= Ftmp(NULL
, "memtr", OF_RDWR
| OF_UNLINK
| OF_REGISTER
)) == NULL
){
1189 n_perr("tmpfile", 0);
1195 "Last-Out-First-In (alloca) storage:\n"
1196 " Buffer cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1197 " Allocations cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1198 " Bytes cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n\n",
1199 a_memory_lofi_bcur
, a_memory_lofi_bmax
, a_memory_lofi_ball
,
1200 a_memory_lofi_acur
, a_memory_lofi_amax
, a_memory_lofi_aall
,
1201 a_memory_lofi_mcur
, a_memory_lofi_mmax
, a_memory_lofi_mall
);
1204 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
1205 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
1206 for(; macp
!= NULL
; macp
= macp
->mac_outer
){
1207 fprintf(fp
, " Evaluation stack context %p (outer: %p):\n",
1208 (void*)macp
, (void*)macp
->mac_outer
);
1211 for(malp
= macp
->mac_lofi
; malp
!= NULL
;){
1212 fprintf(fp
, " Buffer %p%s, %" PRIuZ
"/%" PRIuZ
" used/free:\n",
1213 (void*)malp
, ((uintptr_t)malp
->mal_last
& 0x1 ? " (huge)" : ""),
1214 PTR2SIZE(malp
->mal_caster
- &malp
->mal_buf
[0]),
1215 PTR2SIZE(malp
->mal_max
- malp
->mal_caster
));
1217 malp
= malp
->mal_last
;
1218 malp
= (struct a_memory_ars_lofi
*)((uintptr_t)malp
& ~1);
1221 for(malcp
= macp
->mac_lofi_top
; malcp
!= NULL
;){
1223 malcp
= (struct a_memory_ars_lofi_chunk
*)
1224 ((uintptr_t)malcp
->malc_last
& ~0x1);
1227 a_MEMORY_HOPE_GET_TRACE(p_alc
, xp
, isbad
);
1228 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1229 (isbad
? "! CANARY ERROR (LOFI): " : ""), xp
.p_vp
,
1230 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1235 "\nAuto-reclaimed storage:\n"
1236 " Buffers cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1237 " Huge allocations cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1238 " Allocations all: %" PRIuZ
", Bytes all: %" PRIuZ
"\n\n",
1239 a_memory_ars_bcur
, a_memory_ars_bmax
, a_memory_ars_ball
,
1240 a_memory_ars_hcur
, a_memory_ars_hmax
, a_memory_ars_hall
,
1241 a_memory_ars_aall
, a_memory_ars_mall
);
1244 for(macp
= n_go_data
->gdc_mempool
; macp
!= NULL
; macp
= macp
->mac_outer
){
1245 fprintf(fp
, " Evaluation stack context %p (outer: %p):\n",
1246 (void*)macp
, (void*)macp
->mac_outer
);
1249 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
1250 fprintf(fp
, " Buffer %p, %" PRIuZ
"/%" PRIuZ
" used/free:\n",
1252 PTR2SIZE(mabp
->mab_caster
- &mabp
->mab_buf
[0]),
1253 PTR2SIZE(&mabp
->mab_buf
[sizeof(mabp
->mab_buf
)] - mabp
->mab_caster
));
1256 for(p
.p_cp
= mabp
->mab_buf
; p
.p_cp
< mabp
->mab_caster
;
1257 ++lines
, p
.p_cp
+= p
.p_c
->mc_size
){
1260 a_MEMORY_HOPE_GET_TRACE(p_c
, xp
, isbad
);
1261 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1262 (isbad
? "! CANARY ERROR (ARS, top): " : ""), xp
.p_vp
,
1263 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1268 for(mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
1269 fprintf(fp
, " Buffer %p, full:\n", (void*)mabp
);
1272 for(p
.p_cp
= mabp
->mab_buf
; p
.p_cp
< mabp
->mab_caster
;
1273 ++lines
, p
.p_cp
+= p
.p_c
->mc_size
){
1276 a_MEMORY_HOPE_GET_TRACE(p_c
, xp
, isbad
);
1277 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1278 (isbad
? "! CANARY ERROR (ARS, full): " : ""), xp
.p_vp
,
1279 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1286 "\nHeap memory buffers:\n"
1287 " Allocation cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1288 " Bytes cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n\n",
1289 a_memory_heap_acur
, a_memory_heap_amax
, a_memory_heap_aall
,
1290 a_memory_heap_mcur
, a_memory_heap_mmax
, a_memory_heap_mall
);
1293 for(p
.p_hc
= a_memory_heap_list
; p
.p_hc
!= NULL
;
1294 ++lines
, p
.p_hc
= p
.p_hc
->mhc_next
){
1297 a_MEMORY_HOPE_GET_TRACE(p_hc
, xp
, isbad
);
1298 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1299 (isbad
? "! CANARY ERROR (heap): " : ""), xp
.p_vp
,
1300 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1303 if((n_psonce
& n_PSO_REPRODUCIBLE
) ||
1304 (n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
))){
1305 fprintf(fp
, "Heap buffers lingering for n_free():\n");
1308 for(p
.p_hc
= a_memory_heap_free
; p
.p_hc
!= NULL
;
1309 ++lines
, p
.p_hc
= p
.p_hc
->mhc_next
){
1312 a_MEMORY_HOPE_GET_TRACE(p_hc
, xp
, isbad
);
1313 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1314 (isbad
? "! CANARY ERROR (free): " : ""), xp
.p_vp
,
1315 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1319 page_or_print(fp
, lines
);
1324 return (vp
!= NULL
);
1328 n__memory_check(char const *mdbg_file
, int mdbg_line
){
1329 union a_memory_ptr p
, xp
;
1330 struct a_memory_ars_buffer
*mabp
;
1331 struct a_memory_ars_lofi_chunk
*malcp
;
1332 struct a_memory_ars_ctx
*macp
;
1333 bool_t anybad
, isbad
;
1338 if((macp
= n_go_data
->gdc_mempool
) == NULL
)
1339 macp
= n_go_data
->gdc_mempool
= n_go_data
->gdc__mempool_buf
;
1343 for(malcp
= macp
->mac_lofi_top
; malcp
!= NULL
;){
1345 malcp
= (struct a_memory_ars_lofi_chunk
*)
1346 ((uintptr_t)malcp
->malc_last
& ~0x1);
1349 a_MEMORY_HOPE_GET_TRACE(p_alc
, xp
, isbad
);
1353 "! CANARY ERROR (LOFI): %p (%u bytes): %s, line %u\n",
1354 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1358 /* Auto-reclaimed */
1360 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
1361 for(p
.p_cp
= mabp
->mab_buf
; p
.p_cp
< mabp
->mab_caster
;
1362 p
.p_cp
+= p
.p_c
->mc_size
){
1365 a_MEMORY_HOPE_GET_TRACE(p_c
, xp
, isbad
);
1369 "! CANARY ERROR (ARS, top): %p (%u bytes): %s, line %u\n",
1370 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1375 for(mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
1376 for(p
.p_cp
= mabp
->mab_buf
; p
.p_cp
< mabp
->mab_caster
;
1377 p
.p_cp
+= p
.p_c
->mc_size
){
1380 a_MEMORY_HOPE_GET_TRACE(p_c
, xp
, isbad
);
1384 "! CANARY ERROR (ARS, full): %p (%u bytes): %s, line %u\n",
1385 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1392 for(p
.p_hc
= a_memory_heap_list
; p
.p_hc
!= NULL
; p
.p_hc
= p
.p_hc
->mhc_next
){
1395 a_MEMORY_HOPE_GET_TRACE(p_hc
, xp
, isbad
);
1399 "! CANARY ERROR (heap): %p (%u bytes): %s, line %u\n",
1400 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1404 if((n_psonce
& n_PSO_REPRODUCIBLE
) ||
1405 (n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
))){
1406 for(p
.p_hc
= a_memory_heap_free
; p
.p_hc
!= NULL
;
1407 p
.p_hc
= p
.p_hc
->mhc_next
){
1410 a_MEMORY_HOPE_GET_TRACE(p_hc
, xp
, isbad
);
1414 "! CANARY ERROR (free): %p (%u bytes): %s, line %u\n",
1415 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1420 if(anybad
&& ok_blook(memdebug
))
1421 n_panic("Memory errors encountered");
1425 #endif /* HAVE_MEMORY_DEBUG */