1 /*@ S-nail - a mail user agent derived from Berkeley Mail.
2 *@ Heap memory and automatically reclaimed storage.
3 *@ TODO Back the _flux_ heap.
4 *@ TODO Add cache for "the youngest" two or three n_MEMORY_AUTOREC_SIZE arenas
6 * Copyright (c) 2012 - 2017 Steffen (Daode) Nurpmeso <steffen@sdaoden.eu>.
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 #ifndef HAVE_AMALGAMATION
28 * Our (main)loops _autorec_push() arenas for their lifetime, the
29 * n_memory_reset() that happens on loop ticks reclaims their memory, and
30 * performs debug checks also on the former #ifdef HAVE_MEMORY_DEBUG.
31 * There is one global anonymous autorec arena which is used during the
32 * startup phase and for the interactive n_commands() instance -- this special
33 * arena is autorec_fixate()d from within main.c to not waste space, i.e.,
34 * remaining arena memory is reused and topic to normal _reset() reclaiming.
35 * That was so in historical code with the globally shared single string dope
36 * implementation, too.
38 * AutoReclaimedStorage memory is the follow-up to the historical "stringdope"
39 * allocator from 1979 (see [timeline:a7342d9]:src/Mail/strings.c), it is
40 * a steadily growing pool (but srelax_hold()..[:srelax():]..srelax_rele() can
41 * be used to reduce pressure) until n_memory_reset() time.
43 * LastOutFirstIn memory is ment as an alloca(3) replacement but which requires
44 * lofi_free()ing pointers (otherwise growing until n_memory_reset()).
46 * TODO Flux heap memory is like LOFI except that any pointer can be freed (and
47 * TODO reused) at any time, just like normal heap memory. It is notational in
48 * TODO that it clearly states that the allocation will go away after a loop
49 * TODO tick, and also we can use some buffer caches.
52 /* Maximum allocation (directly) handled by A-R-Storage */
53 #define a_MEMORY_ARS_MAX (n_MEMORY_AUTOREC_SIZE / 2 + n_MEMORY_AUTOREC_SIZE / 4)
54 #define a_MEMORY_LOFI_MAX a_MEMORY_ARS_MAX
56 n_CTA(a_MEMORY_ARS_MAX
> 1024,
57 "Auto-reclaimed memory requires a larger buffer size"); /* Anway > 42! */
58 n_CTA(n_ISPOW2(n_MEMORY_AUTOREC_SIZE
),
59 "Buffers should be POW2 (may be wasteful on native allocators otherwise)");
61 /* Alignment of ARS memory. Simply go for pointer alignment */
62 #define a_MEMORY_ARS_ROUNDUP(S) n_ALIGN_SMALL(S)
63 #define a_MEMORY_LOFI_ROUNDUP(S) a_MEMORY_ARS_ROUNDUP(S)
65 #ifdef HAVE_MEMORY_DEBUG
66 n_CTA(sizeof(char) == sizeof(ui8_t
), "But POSIX says a byte is 8 bit");
68 # define a_MEMORY_HOPE_SIZE (2 * 8 * sizeof(char))
70 /* We use address-induced canary values, inspiration (but he didn't invent)
71 * and primes from maxv@netbsd.org, src/sys/kern/subr_kmem.c */
72 # define a_MEMORY_HOPE_LOWER(S,P) \
74 ui64_t __h__ = (uintptr_t)(P);\
75 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
80 # define a_MEMORY_HOPE_UPPER(S,P) \
83 ui64_t __x__, __h__ = (uintptr_t)(P);\
84 __h__ *= ((ui64_t)0x9E37FFFFu << 32) | 0xFFFC0000u;\
85 for(__i__ = 56; __i__ != 0; __i__ -= 8)\
86 if((__x__ = (__h__ >> __i__)) != 0){\
94 # define a_MEMORY_HOPE_SET(T,C) \
96 union a_memory_ptr __xp;\
97 struct a_memory_chunk *__xc;\
98 __xp.p_vp = (C).p_vp;\
99 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
101 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
102 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
103 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
104 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
105 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
106 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
107 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
108 a_MEMORY_HOPE_LOWER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
109 __xp.p_ui8p += 8 + __xc->mc_user_size;\
110 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[0], &__xp.p_ui8p[0]);\
111 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[1], &__xp.p_ui8p[1]);\
112 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[2], &__xp.p_ui8p[2]);\
113 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[3], &__xp.p_ui8p[3]);\
114 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[4], &__xp.p_ui8p[4]);\
115 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[5], &__xp.p_ui8p[5]);\
116 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[6], &__xp.p_ui8p[6]);\
117 a_MEMORY_HOPE_UPPER(__xp.p_ui8p[7], &__xp.p_ui8p[7]);\
120 # define a_MEMORY_HOPE_GET_TRACE(T,C,BAD) \
123 a_MEMORY_HOPE_GET(T, C, BAD);\
127 # define a_MEMORY_HOPE_GET(T,C,BAD) \
129 union a_memory_ptr __xp;\
130 struct a_memory_chunk *__xc;\
133 __xp.p_vp = (C).p_vp;\
135 (C).p_cp = __xp.p_cp;\
136 __xc = (struct a_memory_chunk*)(__xp.T - 1);\
139 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[0]);\
140 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
141 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[1]);\
142 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
143 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[2]);\
144 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
145 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[3]);\
146 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
147 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[4]);\
148 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
149 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[5]);\
150 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
151 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[6]);\
152 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
153 a_MEMORY_HOPE_LOWER(__m, &__xp.p_ui8p[7]);\
154 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
157 n_alert("%p: corrupt lower canary: 0x%02X: %s, line %d",\
158 (C).p_cp + 8, __i, mdbg_file, mdbg_line);\
160 __xp.p_ui8p += 8 + __xc->mc_user_size;\
162 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[0]);\
163 if(__xp.p_ui8p[0] != __m) __i |= 1<<0;\
164 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[1]);\
165 if(__xp.p_ui8p[1] != __m) __i |= 1<<1;\
166 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[2]);\
167 if(__xp.p_ui8p[2] != __m) __i |= 1<<2;\
168 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[3]);\
169 if(__xp.p_ui8p[3] != __m) __i |= 1<<3;\
170 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[4]);\
171 if(__xp.p_ui8p[4] != __m) __i |= 1<<4;\
172 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[5]);\
173 if(__xp.p_ui8p[5] != __m) __i |= 1<<5;\
174 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[6]);\
175 if(__xp.p_ui8p[6] != __m) __i |= 1<<6;\
176 a_MEMORY_HOPE_UPPER(__m, &__xp.p_ui8p[7]);\
177 if(__xp.p_ui8p[7] != __m) __i |= 1<<7;\
180 n_alert("%p: corrupt upper canary: 0x%02X: %s, line %d",\
181 (C).p_cp + 8, __i, mdbg_file, mdbg_line);\
184 n_alert(" ..canary last seen: %s, line %u",\
185 __xc->mc_file, __xc->mc_line);\
187 #endif /* HAVE_MEMORY_DEBUG */
189 #ifdef HAVE_MEMORY_DEBUG
190 struct a_memory_chunk
{
199 /* The heap memory free() may become delayed to detect double frees.
200 * It is primitive, but ok: speed and memory usage don't matter here */
201 struct a_memory_heap_chunk
{
202 struct a_memory_chunk mhc_super
;
203 struct a_memory_heap_chunk
*mhc_prev
;
204 struct a_memory_heap_chunk
*mhc_next
;
206 #endif /* HAVE_MEMORY_DEBUG */
208 struct a_memory_ars_lofi_chunk
{
209 #ifdef HAVE_MEMORY_DEBUG
210 struct a_memory_chunk malc_super
;
212 struct a_memory_ars_lofi_chunk
*malc_last
; /* Bit 1 set: it's a heap alloc */
219 #ifdef HAVE_MEMORY_DEBUG
220 struct a_memory_chunk
*p_c
;
221 struct a_memory_heap_chunk
*p_hc
;
223 struct a_memory_ars_lofi_chunk
*p_alc
;
226 struct a_memory_ars_ctx
{
227 struct a_memory_ars_ctx
*mac_outer
;
228 struct a_memory_ars_buffer
*mac_top
; /* Alloc stack */
229 struct a_memory_ars_buffer
*mac_full
; /* Alloc stack, cpl. filled */
230 size_t mac_recur
; /* srelax_hold() recursion */
231 struct a_memory_ars_huge
*mac_huge
; /* Huge allocation bypass list */
232 struct a_memory_ars_lofi
*mac_lofi
; /* Pseudo alloca */
233 struct a_memory_ars_lofi_chunk
*mac_lofi_top
;
235 n_CTA(n_MEMORY_AUTOREC_TYPE_SIZEOF
>= sizeof(struct a_memory_ars_ctx
),
236 "Our command loops do not provide enough memory for auto-reclaimed storage");
238 struct a_memory_ars_buffer
{
239 struct a_memory_ars_buffer
*mab_last
;
240 char *mab_bot
; /* For _autorec_fixate(). Only used for the global _ctx */
241 char *mab_relax
; /* If !NULL, used by srelax() instead of .mab_bot */
242 char *mab_caster
; /* Point of casting memory, NULL if full */
243 char mab_buf
[n_MEMORY_AUTOREC_SIZE
- (4 * sizeof(void*))];
245 n_CTA(sizeof(struct a_memory_ars_buffer
) == n_MEMORY_AUTOREC_SIZE
,
246 "Resulting structure size is not the expected one");
248 n_CTA(a_MEMORY_ARS_MAX
+ a_MEMORY_HOPE_SIZE
+ sizeof(struct a_memory_chunk
)
249 < n_SIZEOF_FIELD(struct a_memory_ars_buffer
, mab_buf
),
250 "Memory layout of auto-reclaimed storage does not work out that way");
253 /* Requests that exceed a_MEMORY_ARS_MAX are always served by the normal
254 * memory allocator (which panics if memory cannot be served). This can be
255 * seen as a security fallback bypass only */
256 struct a_memory_ars_huge
{
257 struct a_memory_ars_huge
*mah_last
;
258 char mah_buf
[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
261 struct a_memory_ars_lofi
{
262 struct a_memory_ars_lofi
*mal_last
;
265 char mal_buf
[n_VFIELD_SIZE(a_MEMORY_ARS_ROUNDUP(1))];
269 #ifdef HAVE_MEMORY_DEBUG
270 static size_t a_memory_heap_aall
, a_memory_heap_acur
, a_memory_heap_amax
,
271 a_memory_heap_mall
, a_memory_heap_mcur
, a_memory_heap_mmax
;
272 static struct a_memory_heap_chunk
*a_memory_heap_list
, *a_memory_heap_free
;
274 static size_t a_memory_ars_ball
, a_memory_ars_bcur
, a_memory_ars_bmax
,
275 a_memory_ars_hall
, a_memory_ars_hcur
, a_memory_ars_hmax
,
276 a_memory_ars_aall
, a_memory_ars_mall
;
278 static size_t a_memory_lofi_ball
, a_memory_lofi_bcur
, a_memory_lofi_bmax
,
279 a_memory_lofi_aall
, a_memory_lofi_acur
, a_memory_lofi_amax
,
280 a_memory_lofi_mall
, a_memory_lofi_mcur
, a_memory_lofi_mmax
;
283 /* The anonymous global topmost auto-reclaimed storage instance, and the
284 * current top of the stack for recursions, `source's etc */
285 static struct a_memory_ars_ctx a_memory_ars_global
;
286 static struct a_memory_ars_ctx
*a_memory_ars_top
;
289 SINLINE
void a_memory_lofi_free(struct a_memory_ars_ctx
*macp
, void *vp
);
291 /* Reset an ars_ctx */
292 static void a_memory_ars_reset(struct a_memory_ars_ctx
*macp
);
295 a_memory_lofi_free(struct a_memory_ars_ctx
*macp
, void *vp
){
296 struct a_memory_ars_lofi
*malp
;
297 union a_memory_ptr p
;
301 #ifdef HAVE_MEMORY_DEBUG
302 --a_memory_lofi_acur
;
303 a_memory_lofi_mcur
-= p
.p_c
->mc_user_size
;
306 /* The heap allocations are released immediately */
307 if((uintptr_t)p
.p_alc
->malc_last
& 0x1){
308 malp
= macp
->mac_lofi
;
309 macp
->mac_lofi
= malp
->mal_last
;
310 macp
->mac_lofi_top
= (struct a_memory_ars_lofi_chunk
*)
311 ((uintptr_t)p
.p_alc
->malc_last
& ~0x1);
313 #ifdef HAVE_MEMORY_DEBUG
314 --a_memory_lofi_bcur
;
317 macp
->mac_lofi_top
= p
.p_alc
->malc_last
;
319 /* The normal arena ones only if the arena is empty, except for when
320 * it is the last - that we'll keep until _autorec_pop() or exit(3) */
321 if(p
.p_cp
== (malp
= macp
->mac_lofi
)->mal_buf
){
322 if(malp
->mal_last
!= NULL
){
323 macp
->mac_lofi
= malp
->mal_last
;
325 #ifdef HAVE_MEMORY_DEBUG
326 --a_memory_lofi_bcur
;
330 malp
->mal_caster
= p
.p_cp
;
336 a_memory_ars_reset(struct a_memory_ars_ctx
*macp
){
338 struct a_memory_ars_lofi_chunk
*alcp
;
339 struct a_memory_ars_lofi
*alp
;
340 struct a_memory_ars_buffer
*abp
;
341 struct a_memory_ars_huge
*ahp
;
345 /* Simply move all buffers away from .mac_full */
346 for(m
.abp
= macp
->mac_full
; m
.abp
!= NULL
; m
.abp
= m2
.abp
){
347 m2
.abp
= m
.abp
->mab_last
;
348 m
.abp
->mab_last
= macp
->mac_top
;
349 macp
->mac_top
= m
.abp
;
351 macp
->mac_full
= NULL
;
353 for(m2
.abp
= NULL
, m
.abp
= macp
->mac_top
; m
.abp
!= NULL
;){
354 struct a_memory_ars_buffer
*x
;
357 m
.abp
= m
.abp
->mab_last
;
359 /* Give away all buffers that are not covered by autorec_fixate() */
360 if(x
->mab_bot
== x
->mab_buf
){
362 macp
->mac_top
= m
.abp
;
364 m2
.abp
->mab_last
= m
.abp
;
366 #ifdef HAVE_MEMORY_DEBUG
371 x
->mab_caster
= x
->mab_bot
;
373 #ifdef HAVE_MEMORY_DEBUG
374 memset(x
->mab_caster
, 0377,
375 PTR2SIZE(&x
->mab_buf
[sizeof(x
->mab_buf
)] - x
->mab_caster
));
380 while((m
.ahp
= macp
->mac_huge
) != NULL
){
381 macp
->mac_huge
= m
.ahp
->mah_last
;
383 #ifdef HAVE_MEMORY_DEBUG
388 /* "alloca(3)" memory goes away, too. XXX Must be last as long we jump */
389 #ifdef HAVE_MEMORY_DEBUG
390 if(macp
->mac_lofi_top
!= NULL
)
391 n_alert("There still is LOFI memory upon ARS reset!");
393 while((m
.alcp
= macp
->mac_lofi_top
) != NULL
)
394 a_memory_lofi_free(macp
, m
.alcp
);
399 n_memory_reset(void){
400 #ifdef HAVE_MEMORY_DEBUG
401 union a_memory_ptr p
;
404 struct a_memory_ars_ctx
*macp
;
409 if((macp
= a_memory_ars_top
) == NULL
)
410 macp
= &a_memory_ars_global
;
412 /* First of all reset auto-reclaimed storage so that heap freed during this
413 * can be handled in a second step */
414 /* TODO v15 active recursion can only happen after a jump */
415 if(macp
->mac_recur
> 0){
419 a_memory_ars_reset(macp
);
421 /* Now we are ready to deal with heap */
422 #ifdef HAVE_MEMORY_DEBUG
425 for(p
.p_hc
= a_memory_heap_free
; p
.p_hc
!= NULL
;){
431 p
.p_hc
= p
.p_hc
->mhc_next
;
434 a_memory_heap_free
= NULL
;
436 if((n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
)) && c
> 0)
437 n_err("memreset: freed %" PRIuZ
" chunks/%" PRIuZ
" bytes\n", c
, s
);
442 #ifndef HAVE_MEMORY_DEBUG
450 if((rv
= malloc(s
)) == NULL
)
451 n_panic(_("no memory"));
457 n_realloc(void *vp
, size_t s
){
466 if((rv
= realloc(vp
, s
)) == NULL
)
467 n_panic(_("no memory"));
474 n_calloc(size_t nmemb
, size_t size
){
480 if((rv
= calloc(nmemb
, size
)) == NULL
)
481 n_panic(_("no memory"));
493 #else /* !HAVE_MEMORY_DEBUG */
495 (n_alloc
)(size_t s n_MEMORY_DEBUG_ARGS
){
496 union a_memory_ptr p
;
500 if(s
> UI32_MAX
- sizeof(struct a_memory_heap_chunk
) - a_MEMORY_HOPE_SIZE
)
501 n_panic("n_alloc(): allocation too large: %s, line %d",
502 mdbg_file
, mdbg_line
);
503 if((user_s
= (ui32_t
)s
) == 0)
505 s
+= sizeof(struct a_memory_heap_chunk
) + a_MEMORY_HOPE_SIZE
;
507 if((p
.p_vp
= (malloc
)(s
)) == NULL
)
508 n_panic(_("no memory"));
510 p
.p_hc
->mhc_prev
= NULL
;
511 if((p
.p_hc
->mhc_next
= a_memory_heap_list
) != NULL
)
512 a_memory_heap_list
->mhc_prev
= p
.p_hc
;
514 p
.p_c
->mc_file
= mdbg_file
;
515 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
516 p
.p_c
->mc_isfree
= FAL0
;
517 p
.p_c
->mc_user_size
= user_s
;
518 p
.p_c
->mc_size
= (ui32_t
)s
;
520 a_memory_heap_list
= p
.p_hc
++;
521 a_MEMORY_HOPE_SET(p_hc
, p
);
523 ++a_memory_heap_aall
;
524 ++a_memory_heap_acur
;
525 a_memory_heap_amax
= n_MAX(a_memory_heap_amax
, a_memory_heap_acur
);
526 a_memory_heap_mall
+= user_s
;
527 a_memory_heap_mcur
+= user_s
;
528 a_memory_heap_mmax
= n_MAX(a_memory_heap_mmax
, a_memory_heap_mcur
);
534 (n_realloc
)(void *vp
, size_t s n_MEMORY_DEBUG_ARGS
){
535 union a_memory_ptr p
;
540 if((p
.p_vp
= vp
) == NULL
){
542 p
.p_vp
= (n_alloc
)(s
, mdbg_file
, mdbg_line
);
546 a_MEMORY_HOPE_GET(p_hc
, p
, isbad
);
549 if(p
.p_c
->mc_isfree
){
550 n_err("n_realloc(): region freed! At %s, line %d\n"
551 "\tLast seen: %s, line %" PRIu16
"\n",
552 mdbg_file
, mdbg_line
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
556 if(p
.p_hc
== a_memory_heap_list
)
557 a_memory_heap_list
= p
.p_hc
->mhc_next
;
559 p
.p_hc
->mhc_prev
->mhc_next
= p
.p_hc
->mhc_next
;
560 if (p
.p_hc
->mhc_next
!= NULL
)
561 p
.p_hc
->mhc_next
->mhc_prev
= p
.p_hc
->mhc_prev
;
563 --a_memory_heap_acur
;
564 a_memory_heap_mcur
-= p
.p_c
->mc_user_size
;
566 if(s
> UI32_MAX
- sizeof(struct a_memory_heap_chunk
) - a_MEMORY_HOPE_SIZE
)
567 n_panic("n_realloc(): allocation too large: %s, line %d",
568 mdbg_file
, mdbg_line
);
569 if((user_s
= (ui32_t
)s
) == 0)
571 s
+= sizeof(struct a_memory_heap_chunk
) + a_MEMORY_HOPE_SIZE
;
573 if((p
.p_vp
= (realloc
)(p
.p_c
, s
)) == NULL
)
574 n_panic(_("no memory"));
575 p
.p_hc
->mhc_prev
= NULL
;
576 if((p
.p_hc
->mhc_next
= a_memory_heap_list
) != NULL
)
577 a_memory_heap_list
->mhc_prev
= p
.p_hc
;
579 p
.p_c
->mc_file
= mdbg_file
;
580 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
581 p
.p_c
->mc_isfree
= FAL0
;
582 p
.p_c
->mc_user_size
= user_s
;
583 p
.p_c
->mc_size
= (ui32_t
)s
;
585 a_memory_heap_list
= p
.p_hc
++;
586 a_MEMORY_HOPE_SET(p_hc
, p
);
588 ++a_memory_heap_aall
;
589 ++a_memory_heap_acur
;
590 a_memory_heap_amax
= n_MAX(a_memory_heap_amax
, a_memory_heap_acur
);
591 a_memory_heap_mall
+= user_s
;
592 a_memory_heap_mcur
+= user_s
;
593 a_memory_heap_mmax
= n_MAX(a_memory_heap_mmax
, a_memory_heap_mcur
);
600 (n_calloc
)(size_t nmemb
, size_t size n_MEMORY_DEBUG_ARGS
){
601 union a_memory_ptr p
;
607 if(size
> UI32_MAX
- sizeof(struct a_memory_heap_chunk
) - a_MEMORY_HOPE_SIZE
)
608 n_panic("n_calloc(): allocation size too large: %s, line %d",
609 mdbg_file
, mdbg_line
);
610 if((user_s
= (ui32_t
)size
) == 0)
612 if((UI32_MAX
- sizeof(struct a_memory_heap_chunk
) - a_MEMORY_HOPE_SIZE
) /
614 n_panic("n_calloc(): allocation count too large: %s, line %d",
615 mdbg_file
, mdbg_line
);
618 size
+= sizeof(struct a_memory_heap_chunk
) + a_MEMORY_HOPE_SIZE
;
620 if((p
.p_vp
= (malloc
)(size
)) == NULL
)
621 n_panic(_("no memory"));
622 memset(p
.p_vp
, 0, size
);
624 p
.p_hc
->mhc_prev
= NULL
;
625 if((p
.p_hc
->mhc_next
= a_memory_heap_list
) != NULL
)
626 a_memory_heap_list
->mhc_prev
= p
.p_hc
;
628 p
.p_c
->mc_file
= mdbg_file
;
629 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
630 p
.p_c
->mc_isfree
= FAL0
;
631 p
.p_c
->mc_user_size
= (user_s
> 0) ? user_s
*= nmemb
: 0;
632 p
.p_c
->mc_size
= (ui32_t
)size
;
634 a_memory_heap_list
= p
.p_hc
++;
635 a_MEMORY_HOPE_SET(p_hc
, p
);
637 ++a_memory_heap_aall
;
638 ++a_memory_heap_acur
;
639 a_memory_heap_amax
= n_MAX(a_memory_heap_amax
, a_memory_heap_acur
);
640 a_memory_heap_mall
+= user_s
;
641 a_memory_heap_mcur
+= user_s
;
642 a_memory_heap_mmax
= n_MAX(a_memory_heap_mmax
, a_memory_heap_mcur
);
648 (n_free
)(void *vp n_MEMORY_DEBUG_ARGS
){
649 union a_memory_ptr p
;
653 if((p
.p_vp
= vp
) == NULL
){
654 n_err("n_free(NULL) from %s, line %d\n", mdbg_file
, mdbg_line
);
658 a_MEMORY_HOPE_GET(p_hc
, p
, isbad
);
661 if(p
.p_c
->mc_isfree
){
662 n_err("n_free(): double-free avoided at %s, line %d\n"
663 "\tLast seen: %s, line %" PRIu16
"\n",
664 mdbg_file
, mdbg_line
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
668 if(p
.p_hc
== a_memory_heap_list
){
669 if((a_memory_heap_list
= p
.p_hc
->mhc_next
) != NULL
)
670 a_memory_heap_list
->mhc_prev
= NULL
;
672 p
.p_hc
->mhc_prev
->mhc_next
= p
.p_hc
->mhc_next
;
673 if(p
.p_hc
->mhc_next
!= NULL
)
674 p
.p_hc
->mhc_next
->mhc_prev
= p
.p_hc
->mhc_prev
;
676 p
.p_c
->mc_isfree
= TRU1
;
677 /* Trash contents (also see [21c05f8]) */
678 memset(vp
, 0377, p
.p_c
->mc_user_size
);
680 --a_memory_heap_acur
;
681 a_memory_heap_mcur
-= p
.p_c
->mc_user_size
;
683 if(n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
)){
684 p
.p_hc
->mhc_next
= a_memory_heap_free
;
685 a_memory_heap_free
= p
.p_hc
;
691 #endif /* HAVE_MEMORY_DEBUG */
694 n_memory_autorec_fixate(void){
695 struct a_memory_ars_buffer
*mabp
;
698 for(mabp
= a_memory_ars_global
.mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
699 mabp
->mab_bot
= mabp
->mab_caster
;
700 for(mabp
= a_memory_ars_global
.mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
701 mabp
->mab_bot
= mabp
->mab_caster
;
706 n_memory_autorec_push(void *vp
){
707 struct a_memory_ars_ctx
*macp
;
711 memset(macp
, 0, sizeof *macp
);
712 macp
->mac_outer
= a_memory_ars_top
;
713 a_memory_ars_top
= macp
;
718 n_memory_autorec_pop(void *vp
){
719 struct a_memory_ars_buffer
*mabp
;
720 struct a_memory_ars_ctx
*macp
;
725 if((macp
= vp
) == NULL
)
726 macp
= &a_memory_ars_global
;
728 /* XXX May not be ARS top upon jump */
729 while(a_memory_ars_top
!= macp
){
730 DBG( n_err("ARS pop %p to reach freed context\n", a_memory_ars_top
); )
731 n_memory_autorec_pop(a_memory_ars_top
);
733 a_memory_ars_top
= macp
->mac_outer
;
736 a_memory_ars_reset(macp
);
737 assert(macp
->mac_full
== NULL
);
738 assert(macp
->mac_huge
== NULL
);
740 for(mabp
= macp
->mac_top
; mabp
!= NULL
;){
742 mabp
= mabp
->mab_last
;
746 /* We (may) have kept one buffer for our pseudo alloca(3) */
747 if(macp
->mac_lofi
!= NULL
){
748 assert(macp
->mac_lofi
->mal_last
== NULL
);
749 free(macp
->mac_lofi
);
750 #ifdef HAVE_MEMORY_DEBUG
751 --a_memory_lofi_bcur
;
755 memset(macp
, 0, sizeof *macp
);
760 n_memory_autorec_current(void){
761 return (a_memory_ars_top
!= NULL
? a_memory_ars_top
: &a_memory_ars_global
);
765 (n_autorec_alloc
)(void *vp
, size_t size n_MEMORY_DEBUG_ARGS
){
766 #ifdef HAVE_MEMORY_DEBUG
769 union a_memory_ptr p
;
771 struct a_memory_ars_buffer
*abp
;
772 struct a_memory_ars_huge
*ahp
;
774 struct a_memory_ars_ctx
*macp
;
777 if((macp
= vp
) == NULL
&& (macp
= a_memory_ars_top
) == NULL
)
778 macp
= &a_memory_ars_global
;
780 #ifdef HAVE_MEMORY_DEBUG
781 user_s
= (ui32_t
)size
;
785 #ifdef HAVE_MEMORY_DEBUG
786 size
+= sizeof(struct a_memory_chunk
) + a_MEMORY_HOPE_SIZE
;
788 size
= a_MEMORY_ARS_ROUNDUP(size
);
790 /* Huge allocations are special */
791 if(n_UNLIKELY(size
> a_MEMORY_ARS_MAX
)){
792 #ifdef HAVE_MEMORY_DEBUG
793 n_alert("n_autorec_alloc() of %" PRIuZ
" bytes from %s, line %d",
794 size
, mdbg_file
, mdbg_line
);
799 /* Search for a buffer with enough free space to serve request */
800 for(m2
.abp
= NULL
, m
.abp
= macp
->mac_top
; m
.abp
!= NULL
;
801 m2
.abp
= m
.abp
, m
.abp
= m
.abp
->mab_last
){
802 if((p
.p_cp
= m
.abp
->mab_caster
) <=
803 &m
.abp
->mab_buf
[sizeof(m
.abp
->mab_buf
) - size
]){
804 /* Alignment is the one thing, the other is what is usually allocated,
805 * and here about 40 bytes seems to be a good cut to avoid non-usable
806 * casters. Reown buffers supposed to be "full" to .mac_full */
807 if(n_UNLIKELY((m
.abp
->mab_caster
= &p
.p_cp
[size
]) >=
808 &m
.abp
->mab_buf
[sizeof(m
.abp
->mab_buf
) - 42])){
810 macp
->mac_top
= m
.abp
->mab_last
;
812 m2
.abp
->mab_last
= m
.abp
->mab_last
;
813 m
.abp
->mab_last
= macp
->mac_full
;
814 macp
->mac_full
= m
.abp
;
820 /* Need a new buffer XXX "page" pool */
821 m
.abp
= n_alloc(sizeof *m
.abp
);
822 m
.abp
->mab_last
= macp
->mac_top
;
823 m
.abp
->mab_caster
= &(m
.abp
->mab_bot
= m
.abp
->mab_buf
)[size
];
824 m
.abp
->mab_relax
= NULL
; /* Thus indicates allocation after srelax_hold() */
825 macp
->mac_top
= m
.abp
;
826 p
.p_cp
= m
.abp
->mab_bot
;
828 #ifdef HAVE_MEMORY_DEBUG
831 a_memory_ars_bmax
= n_MAX(a_memory_ars_bmax
, a_memory_ars_bcur
);
835 #ifdef HAVE_MEMORY_DEBUG
836 p
.p_c
->mc_file
= mdbg_file
;
837 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
838 p
.p_c
->mc_user_size
= user_s
;
839 p
.p_c
->mc_size
= (ui32_t
)size
;
841 a_MEMORY_HOPE_SET(p_c
, p
);
844 a_memory_ars_mall
+= user_s
;
850 m
.ahp
= n_alloc(n_VSTRUCT_SIZEOF(struct a_memory_ars_huge
, mah_buf
) + size
);
851 m
.ahp
->mah_last
= macp
->mac_huge
;
852 macp
->mac_huge
= m
.ahp
;
853 p
.p_cp
= m
.ahp
->mah_buf
;
854 #ifdef HAVE_MEMORY_DEBUG
857 a_memory_ars_hmax
= n_MAX(a_memory_ars_hmax
, a_memory_ars_hcur
);
863 (n_autorec_calloc
)(void *vp
, size_t nmemb
, size_t size n_MEMORY_DEBUG_ARGS
){
867 size
*= nmemb
; /* XXX overflow, but only used for struct inits */
868 rv
= (n_autorec_alloc
)(vp
, size n_MEMORY_DEBUG_ARGSCALL
);
876 struct a_memory_ars_ctx
*macp
;
879 if((macp
= a_memory_ars_top
) == NULL
)
880 macp
= &a_memory_ars_global
;
882 if(macp
->mac_recur
++ == 0){
883 struct a_memory_ars_buffer
*mabp
;
885 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
886 mabp
->mab_relax
= mabp
->mab_caster
;
887 for(mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
888 mabp
->mab_relax
= mabp
->mab_caster
;
892 n_err("srelax_hold(): recursion >0\n");
899 struct a_memory_ars_ctx
*macp
;
902 if((macp
= a_memory_ars_top
) == NULL
)
903 macp
= &a_memory_ars_global
;
905 assert(macp
->mac_recur
> 0);
907 if(--macp
->mac_recur
== 0){
908 struct a_memory_ars_buffer
*mabp
;
914 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
915 mabp
->mab_relax
= NULL
;
916 for(mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
)
917 mabp
->mab_relax
= NULL
;
921 n_err("srelax_rele(): recursion >0\n");
928 /* The purpose of relaxation is only that it is possible to reset the
929 * casters, *not* to give back memory to the system. We are presumably in
930 * an iteration over all messages of a mailbox, and it'd be quite
931 * counterproductive to give the system allocator a chance to waste time */
932 struct a_memory_ars_ctx
*macp
;
935 if((macp
= a_memory_ars_top
) == NULL
)
936 macp
= &a_memory_ars_global
;
938 assert(macp
->mac_recur
> 0);
941 if(macp
->mac_recur
== 1){
942 struct a_memory_ars_buffer
*mabp
, *x
, *y
;
944 /* Buffers in the full list may become usable again! */
945 for(x
= NULL
, mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= y
){
948 if(mabp
->mab_relax
== NULL
||
949 mabp
->mab_relax
< &mabp
->mab_buf
[sizeof(mabp
->mab_buf
) - 42]){
954 mabp
->mab_last
= macp
->mac_top
;
955 macp
->mac_top
= mabp
;
960 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
961 mabp
->mab_caster
= (mabp
->mab_relax
!= NULL
)
962 ? mabp
->mab_relax
: mabp
->mab_bot
;
963 #ifdef HAVE_MEMORY_DEBUG
964 memset(mabp
->mab_caster
, 0377,
965 PTR2SIZE(&mabp
->mab_buf
[sizeof(mabp
->mab_buf
)] - mabp
->mab_caster
));
973 (n_lofi_alloc
)(size_t size n_MEMORY_DEBUG_ARGS
){
974 #ifdef HAVE_MEMORY_DEBUG
977 union a_memory_ptr p
;
978 struct a_memory_ars_lofi
*malp
;
980 struct a_memory_ars_ctx
*macp
;
983 if((macp
= a_memory_ars_top
) == NULL
)
984 macp
= &a_memory_ars_global
;
986 #ifdef HAVE_MEMORY_DEBUG
987 user_s
= (ui32_t
)size
;
991 size
+= sizeof(struct a_memory_ars_lofi_chunk
);
992 #ifdef HAVE_MEMORY_DEBUG
993 size
+= a_MEMORY_HOPE_SIZE
;
995 size
= a_MEMORY_LOFI_ROUNDUP(size
);
997 /* Huge allocations are special */
998 if(n_UNLIKELY(isheap
= (size
> a_MEMORY_LOFI_MAX
))){
999 #ifdef HAVE_MEMORY_DEBUG
1000 n_alert("n_lofi_alloc() of %" PRIuZ
" bytes from %s, line %d",
1001 size
, mdbg_file
, mdbg_line
);
1003 }else if((malp
= macp
->mac_lofi
) != NULL
&&
1004 ((p
.p_cp
= malp
->mal_caster
) <= &malp
->mal_max
[-size
])){
1005 malp
->mal_caster
= &p
.p_cp
[size
];
1009 /* Need a new buffer */
1013 i
= n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi
, mal_buf
) + size
;
1014 i
= n_MAX(i
, n_MEMORY_AUTOREC_SIZE
);
1016 malp
->mal_last
= macp
->mac_lofi
;
1017 malp
->mal_caster
= &malp
->mal_buf
[size
];
1018 i
-= n_VSTRUCT_SIZEOF(struct a_memory_ars_lofi
, mal_buf
);
1019 malp
->mal_max
= &malp
->mal_buf
[i
];
1020 macp
->mac_lofi
= malp
;
1021 p
.p_cp
= malp
->mal_buf
;
1023 #ifdef HAVE_MEMORY_DEBUG
1024 ++a_memory_lofi_ball
;
1025 ++a_memory_lofi_bcur
;
1026 a_memory_lofi_bmax
= n_MAX(a_memory_lofi_bmax
, a_memory_lofi_bcur
);
1031 p
.p_alc
->malc_last
= macp
->mac_lofi_top
;
1032 macp
->mac_lofi_top
= p
.p_alc
;
1034 p
.p_alc
->malc_last
= (struct a_memory_ars_lofi_chunk
*)
1035 ((uintptr_t)p
.p_alc
->malc_last
| 0x1);
1037 #ifndef HAVE_MEMORY_DEBUG
1040 p
.p_c
->mc_file
= mdbg_file
;
1041 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
1042 p
.p_c
->mc_isfree
= FAL0
;
1043 p
.p_c
->mc_user_size
= user_s
;
1044 p
.p_c
->mc_size
= (ui32_t
)size
;
1046 a_MEMORY_HOPE_SET(p_alc
, p
);
1048 ++a_memory_lofi_aall
;
1049 ++a_memory_lofi_acur
;
1050 a_memory_lofi_amax
= n_MAX(a_memory_lofi_amax
, a_memory_lofi_acur
);
1051 a_memory_lofi_mall
+= user_s
;
1052 a_memory_lofi_mcur
+= user_s
;
1053 a_memory_lofi_mmax
= n_MAX(a_memory_lofi_mmax
, a_memory_lofi_mcur
);
1060 (n_lofi_free
)(void *vp n_MEMORY_DEBUG_ARGS
){
1061 #ifdef HAVE_MEMORY_DEBUG
1064 union a_memory_ptr p
;
1065 struct a_memory_ars_ctx
*macp
;
1068 if((macp
= a_memory_ars_top
) == NULL
)
1069 macp
= &a_memory_ars_global
;
1071 if((p
.p_vp
= vp
) == NULL
){
1072 #ifdef HAVE_MEMORY_DEBUG
1073 n_err("n_lofi_free(NULL) from %s, line %d\n", mdbg_file
, mdbg_line
);
1078 #ifdef HAVE_MEMORY_DEBUG
1079 a_MEMORY_HOPE_GET(p_alc
, p
, isbad
);
1082 if(p
.p_c
->mc_isfree
){
1083 n_err("n_lofi_free(): double-free avoided at %s, line %d\n"
1084 "\tLast seen: %s, line %" PRIu16
"\n",
1085 mdbg_file
, mdbg_line
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1088 p
.p_c
->mc_isfree
= TRU1
;
1089 memset(vp
, 0377, p
.p_c
->mc_user_size
);
1091 if(p
.p_alc
!= macp
->mac_lofi_top
){
1092 n_err("n_lofi_free(): this is not alloca top at %s, line %d\n"
1093 "\tLast seen: %s, line %" PRIu16
"\n",
1094 mdbg_file
, mdbg_line
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1099 #endif /* HAVE_MEMORY_DEBUG */
1101 a_memory_lofi_free(macp
, --p
.p_alc
);
1106 #ifdef HAVE_MEMORY_DEBUG
1108 c_memtrace(void *vp
){
1109 /* For a_MEMORY_HOPE_GET() */
1110 char const * const mdbg_file
= "memtrace()";
1111 int const mdbg_line
= -1;
1112 struct a_memory_ars_buffer
*mabp
;
1113 struct a_memory_ars_lofi_chunk
*malcp
;
1114 struct a_memory_ars_lofi
*malp
;
1115 struct a_memory_ars_ctx
*macp
;
1117 union a_memory_ptr p
, xp
;
1123 if((fp
= Ftmp(NULL
, "memtr", OF_RDWR
| OF_UNLINK
| OF_REGISTER
)) == NULL
){
1124 n_perr("tmpfile", 0);
1130 "Last-Out-First-In (alloca) storage:\n"
1131 " Buffer cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1132 " Allocations cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1133 " Bytes cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n\n",
1134 a_memory_lofi_bcur
, a_memory_lofi_bmax
, a_memory_lofi_ball
,
1135 a_memory_lofi_acur
, a_memory_lofi_amax
, a_memory_lofi_aall
,
1136 a_memory_lofi_mcur
, a_memory_lofi_mmax
, a_memory_lofi_mall
);
1139 if((macp
= a_memory_ars_top
) == NULL
)
1140 macp
= &a_memory_ars_global
;
1141 for(; macp
!= NULL
; macp
= macp
->mac_outer
){
1142 fprintf(fp
, " Evaluation stack context %p (outer: %p):\n",
1143 (void*)macp
, (void*)macp
->mac_outer
);
1146 for(malp
= macp
->mac_lofi
; malp
!= NULL
;){
1147 fprintf(fp
, " Buffer %p%s, %" PRIuZ
"/%" PRIuZ
" used/free:\n",
1148 (void*)malp
, ((uintptr_t)malp
->mal_last
& 0x1 ? " (huge)" : ""),
1149 PTR2SIZE(malp
->mal_caster
- &malp
->mal_buf
[0]),
1150 PTR2SIZE(malp
->mal_max
- malp
->mal_caster
));
1152 malp
= malp
->mal_last
;
1153 malp
= (struct a_memory_ars_lofi
*)((uintptr_t)malp
& ~1);
1156 for(malcp
= macp
->mac_lofi_top
; malcp
!= NULL
;){
1158 malcp
= (struct a_memory_ars_lofi_chunk
*)
1159 ((uintptr_t)malcp
->malc_last
& ~0x1);
1162 a_MEMORY_HOPE_GET_TRACE(p_alc
, xp
, isbad
);
1163 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1164 (isbad
? "! CANARY ERROR (LOFI): " : ""), xp
.p_vp
,
1165 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1170 "\nAuto-reclaimed storage:\n"
1171 " Buffers cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1172 " Huge allocations cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1173 " Allocations all: %" PRIuZ
", Bytes all: %" PRIuZ
"\n\n",
1174 a_memory_ars_bcur
, a_memory_ars_bmax
, a_memory_ars_ball
,
1175 a_memory_ars_hcur
, a_memory_ars_hmax
, a_memory_ars_hall
,
1176 a_memory_ars_aall
, a_memory_ars_mall
);
1179 if((macp
= a_memory_ars_top
) == NULL
)
1180 macp
= &a_memory_ars_global
;
1181 for(; macp
!= NULL
; macp
= macp
->mac_outer
){
1182 fprintf(fp
, " Evaluation stack context %p (outer: %p):\n",
1183 (void*)macp
, (void*)macp
->mac_outer
);
1186 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
1187 fprintf(fp
, " Buffer %p, %" PRIuZ
"/%" PRIuZ
" used/free:\n",
1189 PTR2SIZE(mabp
->mab_caster
- &mabp
->mab_buf
[0]),
1190 PTR2SIZE(&mabp
->mab_buf
[sizeof(mabp
->mab_buf
)] - mabp
->mab_caster
));
1193 for(p
.p_cp
= mabp
->mab_buf
; p
.p_cp
< mabp
->mab_caster
;
1194 ++lines
, p
.p_cp
+= p
.p_c
->mc_size
){
1197 a_MEMORY_HOPE_GET_TRACE(p_c
, xp
, isbad
);
1198 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1199 (isbad
? "! CANARY ERROR (ARS, top): " : ""), xp
.p_vp
,
1200 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1205 for(mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
1206 fprintf(fp
, " Buffer %p, full:\n", (void*)mabp
);
1209 for(p
.p_cp
= mabp
->mab_buf
; p
.p_cp
< mabp
->mab_caster
;
1210 ++lines
, p
.p_cp
+= p
.p_c
->mc_size
){
1213 a_MEMORY_HOPE_GET_TRACE(p_c
, xp
, isbad
);
1214 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1215 (isbad
? "! CANARY ERROR (ARS, full): " : ""), xp
.p_vp
,
1216 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1223 "\nHeap memory buffers:\n"
1224 " Allocation cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
1225 " Bytes cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n\n",
1226 a_memory_heap_acur
, a_memory_heap_amax
, a_memory_heap_aall
,
1227 a_memory_heap_mcur
, a_memory_heap_mmax
, a_memory_heap_mall
);
1230 for(p
.p_hc
= a_memory_heap_list
; p
.p_hc
!= NULL
;
1231 ++lines
, p
.p_hc
= p
.p_hc
->mhc_next
){
1234 a_MEMORY_HOPE_GET_TRACE(p_hc
, xp
, isbad
);
1235 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1236 (isbad
? "! CANARY ERROR (heap): " : ""), xp
.p_vp
,
1237 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1240 if(n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
)){
1241 fprintf(fp
, "Heap buffers lingering for free():\n");
1244 for(p
.p_hc
= a_memory_heap_free
; p
.p_hc
!= NULL
;
1245 ++lines
, p
.p_hc
= p
.p_hc
->mhc_next
){
1248 a_MEMORY_HOPE_GET_TRACE(p_hc
, xp
, isbad
);
1249 fprintf(fp
, " %s%p (%u bytes): %s, line %u\n",
1250 (isbad
? "! CANARY ERROR (free): " : ""), xp
.p_vp
,
1251 p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1255 page_or_print(fp
, lines
);
1260 return (vp
!= NULL
);
1264 n__memory_check(char const *mdbg_file
, int mdbg_line
){
1265 union a_memory_ptr p
, xp
;
1266 struct a_memory_ars_buffer
*mabp
;
1267 struct a_memory_ars_lofi_chunk
*malcp
;
1268 struct a_memory_ars_ctx
*macp
;
1269 bool_t anybad
, isbad
;
1274 if((macp
= a_memory_ars_top
) == NULL
)
1275 macp
= &a_memory_ars_global
;
1279 for(malcp
= macp
->mac_lofi_top
; malcp
!= NULL
;){
1281 malcp
= (struct a_memory_ars_lofi_chunk
*)
1282 ((uintptr_t)malcp
->malc_last
& ~0x1);
1285 a_MEMORY_HOPE_GET_TRACE(p_alc
, xp
, isbad
);
1289 "! CANARY ERROR (LOFI): %p (%u bytes): %s, line %u\n",
1290 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1294 /* Auto-reclaimed */
1296 for(mabp
= macp
->mac_top
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
1297 for(p
.p_cp
= mabp
->mab_buf
; p
.p_cp
< mabp
->mab_caster
;
1298 p
.p_cp
+= p
.p_c
->mc_size
){
1301 a_MEMORY_HOPE_GET_TRACE(p_c
, xp
, isbad
);
1305 "! CANARY ERROR (ARS, top): %p (%u bytes): %s, line %u\n",
1306 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1311 for(mabp
= macp
->mac_full
; mabp
!= NULL
; mabp
= mabp
->mab_last
){
1312 for(p
.p_cp
= mabp
->mab_buf
; p
.p_cp
< mabp
->mab_caster
;
1313 p
.p_cp
+= p
.p_c
->mc_size
){
1316 a_MEMORY_HOPE_GET_TRACE(p_c
, xp
, isbad
);
1320 "! CANARY ERROR (ARS, full): %p (%u bytes): %s, line %u\n",
1321 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1328 for(p
.p_hc
= a_memory_heap_list
; p
.p_hc
!= NULL
; p
.p_hc
= p
.p_hc
->mhc_next
){
1331 a_MEMORY_HOPE_GET_TRACE(p_hc
, xp
, isbad
);
1335 "! CANARY ERROR (heap): %p (%u bytes): %s, line %u\n",
1336 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1340 if(n_poption
& (n_PO_DEBUG
| n_PO_MEMDEBUG
)){
1341 for(p
.p_hc
= a_memory_heap_free
; p
.p_hc
!= NULL
;
1342 p
.p_hc
= p
.p_hc
->mhc_next
){
1345 a_MEMORY_HOPE_GET_TRACE(p_hc
, xp
, isbad
);
1349 "! CANARY ERROR (free): %p (%u bytes): %s, line %u\n",
1350 xp
.p_vp
, p
.p_c
->mc_user_size
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
1355 if(anybad
&& ok_blook(memdebug
))
1356 n_panic("Memory errors encountered");
1360 #endif /* HAVE_MEMORY_DEBUG */