2 * Copyright (c) 2005 by Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (c) 1997,1999 by Internet Software Consortium.
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 /* When this symbol is defined allocations via memget are made slightly
20 bigger and some debugging info stuck before and after the region given
21 back to the caller. */
22 /* #define DEBUGGING_MEMCLUSTER */
23 #define MEMCLUSTER_ATEND
26 #if !defined(LINT) && !defined(CODECENTER)
27 static const char rcsid
[] = "$Id: memcluster.c,v 1.11 2006/08/30 23:34:38 marka Exp $";
30 #include "port_before.h"
32 #include <sys/types.h>
34 #include <sys/param.h>
37 #include <netinet/in.h>
38 #include <arpa/inet.h>
39 #include <arpa/nameser.h>
47 #include <isc/memcluster.h>
48 #include <isc/assertions.h>
50 #include "port_after.h"
52 #ifdef MEMCLUSTER_RECORD
53 #ifndef DEBUGGING_MEMCLUSTER
54 #define DEBUGGING_MEMCLUSTER
58 #define DEF_MAX_SIZE 1100
59 #define DEF_MEM_TARGET 4096
61 typedef u_int32_t fence_t
;
65 #if defined(DEBUGGING_MEMCLUSTER)
66 #if defined(MEMCLUSTER_RECORD)
75 #define SMALL_SIZE_LIMIT sizeof(memcluster_element)
76 #define P_SIZE sizeof(void *)
77 #define FRONT_FENCEPOST 0xfebafeba
78 #define BACK_FENCEPOST 0xabefabef
79 #define FENCEPOST_SIZE 4
81 #ifndef MEMCLUSTER_LITTLE_MALLOC
82 #define MEMCLUSTER_BIG_MALLOC 1
83 #define NUM_BASIC_BLOCKS 64
95 static pthread_mutex_t memlock
= PTHREAD_MUTEX_INITIALIZER
;
96 #define MEMLOCK (void)pthread_mutex_lock(&memlock)
97 #define MEMUNLOCK (void)pthread_mutex_unlock(&memlock)
100 * Catch bad lock usage in non threaded build.
102 static unsigned int memlock
= 0;
103 #define MEMLOCK do { INSIST(memlock == 0); memlock = 1; } while (0)
104 #define MEMUNLOCK do { INSIST(memlock == 1); memlock = 0; } while (0)
105 #endif /* DO_PTHEADS */
109 static size_t max_size
;
110 static size_t mem_target
;
111 #ifndef MEMCLUSTER_BIG_MALLOC
112 static size_t mem_target_half
;
113 static size_t mem_target_fudge
;
115 static memcluster_element
** freelists
;
116 #ifdef MEMCLUSTER_RECORD
117 static memcluster_element
** activelists
;
119 #ifdef MEMCLUSTER_BIG_MALLOC
120 static memcluster_element
* basic_blocks
;
122 static struct stats
* stats
;
126 static size_t quantize(size_t);
127 #if defined(DEBUGGING_MEMCLUSTER)
128 static void check(unsigned char *, int, size_t);
134 meminit(size_t init_max_size
, size_t target_size
) {
136 #if defined(DEBUGGING_MEMCLUSTER)
137 INSIST(sizeof(fence_t
) == FENCEPOST_SIZE
);
139 if (freelists
!= NULL
) {
143 if (init_max_size
== 0U)
144 max_size
= DEF_MAX_SIZE
;
146 max_size
= init_max_size
;
147 if (target_size
== 0U)
148 mem_target
= DEF_MEM_TARGET
;
150 mem_target
= target_size
;
151 #ifndef MEMCLUSTER_BIG_MALLOC
152 mem_target_half
= mem_target
/ 2;
153 mem_target_fudge
= mem_target
+ mem_target
/ 4;
155 freelists
= malloc(max_size
* sizeof (memcluster_element
*));
156 stats
= malloc((max_size
+1) * sizeof (struct stats
));
157 if (freelists
== NULL
|| stats
== NULL
) {
162 max_size
* sizeof (memcluster_element
*));
163 memset(stats
, 0, (max_size
+ 1) * sizeof (struct stats
));
164 #ifdef MEMCLUSTER_RECORD
165 activelists
= malloc((max_size
+ 1) * sizeof (memcluster_element
*));
166 if (activelists
== NULL
) {
170 memset(activelists
, 0,
171 (max_size
+ 1) * sizeof (memcluster_element
*));
173 #ifdef MEMCLUSTER_BIG_MALLOC
180 __memget(size_t size
) {
181 return (__memget_record(size
, NULL
, 0));
185 __memget_record(size_t size
, const char *file
, int line
) {
186 size_t new_size
= quantize(size
);
187 #if defined(DEBUGGING_MEMCLUSTER)
188 memcluster_element
*e
;
190 fence_t fp
= BACK_FENCEPOST
;
196 #if !defined(MEMCLUSTER_RECORD)
200 if (freelists
== NULL
) {
201 if (meminit(0, 0) == -1) {
211 if (size
>= max_size
|| new_size
>= max_size
) {
212 /* memget() was called on something beyond our upper limit. */
213 stats
[max_size
].gets
++;
214 stats
[max_size
].totalgets
++;
215 #if defined(DEBUGGING_MEMCLUSTER)
216 e
= malloc(new_size
);
224 #ifdef MEMCLUSTER_RECORD
227 e
->next
= activelists
[max_size
];
228 activelists
[max_size
] = e
;
231 e
->fencepost
= FRONT_FENCEPOST
;
232 p
= (char *)e
+ sizeof *e
+ size
;
233 memcpy(p
, &fp
, sizeof fp
);
234 return ((char *)e
+ sizeof *e
);
237 return (malloc(size
));
242 * If there are no blocks in the free list for this size, get a chunk
243 * of memory and then break it up into "new_size"-sized blocks, adding
244 * them to the free list.
246 if (freelists
[new_size
] == NULL
) {
252 #ifdef MEMCLUSTER_BIG_MALLOC
253 if (basic_blocks
== NULL
) {
254 new = malloc(NUM_BASIC_BLOCKS
* mem_target
);
261 next
= curr
+ mem_target
;
262 for (i
= 0; i
< (NUM_BASIC_BLOCKS
- 1); i
++) {
263 ((memcluster_element
*)curr
)->next
= next
;
268 * curr is now pointing at the last block in the
271 ((memcluster_element
*)curr
)->next
= NULL
;
274 total_size
= mem_target
;
276 basic_blocks
= basic_blocks
->next
;
278 if (new_size
> mem_target_half
)
279 total_size
= mem_target_fudge
;
281 total_size
= mem_target
;
282 new = malloc(total_size
);
289 frags
= total_size
/ new_size
;
290 stats
[new_size
].blocks
++;
291 stats
[new_size
].freefrags
+= frags
;
292 /* Set up a linked-list of blocks of size "new_size". */
294 next
= curr
+ new_size
;
295 for (i
= 0; i
< (frags
- 1); i
++) {
296 #if defined (DEBUGGING_MEMCLUSTER)
297 memset(curr
, 0xa5, new_size
);
299 ((memcluster_element
*)curr
)->next
= next
;
303 /* curr is now pointing at the last block in the array. */
304 #if defined (DEBUGGING_MEMCLUSTER)
305 memset(curr
, 0xa5, new_size
);
307 ((memcluster_element
*)curr
)->next
= freelists
[new_size
];
308 freelists
[new_size
] = new;
311 /* The free list uses the "rounded-up" size "new_size". */
312 #if defined (DEBUGGING_MEMCLUSTER)
313 e
= freelists
[new_size
];
314 ret
= (char *)e
+ sizeof *e
;
316 * Check to see if this buffer has been written to while on free list.
318 check(ret
, 0xa5, new_size
- sizeof *e
);
320 * Mark memory we are returning.
322 memset(ret
, 0xe5, size
);
324 ret
= freelists
[new_size
];
326 freelists
[new_size
] = freelists
[new_size
]->next
;
327 #if defined(DEBUGGING_MEMCLUSTER)
330 e
->fencepost
= FRONT_FENCEPOST
;
331 #ifdef MEMCLUSTER_RECORD
334 e
->next
= activelists
[size
];
335 activelists
[size
] = e
;
337 p
= (char *)e
+ sizeof *e
+ size
;
338 memcpy(p
, &fp
, sizeof fp
);
342 * The stats[] uses the _actual_ "size" requested by the
343 * caller, with the caveat (in the code above) that "size" >= the
344 * max. size (max_size) ends up getting recorded as a call to
348 stats
[size
].totalgets
++;
349 stats
[new_size
].freefrags
--;
351 #if defined(DEBUGGING_MEMCLUSTER)
352 return ((char *)e
+ sizeof *e
);
359 * This is a call from an external caller,
360 * so we want to count this as a user "put".
363 __memput(void *mem
, size_t size
) {
364 __memput_record(mem
, size
, NULL
, 0);
368 __memput_record(void *mem
, size_t size
, const char *file
, int line
) {
369 size_t new_size
= quantize(size
);
370 #if defined (DEBUGGING_MEMCLUSTER)
371 memcluster_element
*e
;
372 memcluster_element
*el
;
373 #ifdef MEMCLUSTER_RECORD
374 memcluster_element
*prev
;
382 #if !defined (MEMCLUSTER_RECORD)
387 REQUIRE(freelists
!= NULL
);
395 #if defined (DEBUGGING_MEMCLUSTER)
396 e
= (memcluster_element
*) ((char *)mem
- sizeof *e
);
397 INSIST(e
->fencepost
== FRONT_FENCEPOST
);
398 INSIST(e
->size
== size
);
399 p
= (char *)e
+ sizeof *e
+ size
;
400 memcpy(&fp
, p
, sizeof fp
);
401 INSIST(fp
== BACK_FENCEPOST
);
402 INSIST(((u_long
)mem
% 4) == 0);
403 #ifdef MEMCLUSTER_RECORD
405 if (size
== max_size
|| new_size
>= max_size
)
406 el
= activelists
[max_size
];
408 el
= activelists
[size
];
409 while (el
!= NULL
&& el
!= e
) {
413 INSIST(el
!= NULL
); /*%< double free */
415 if (size
== max_size
|| new_size
>= max_size
)
416 activelists
[max_size
] = el
->next
;
418 activelists
[size
] = el
->next
;
420 prev
->next
= el
->next
;
424 if (size
== max_size
|| new_size
>= max_size
) {
425 /* memput() called on something beyond our upper limit */
426 #if defined(DEBUGGING_MEMCLUSTER)
432 INSIST(stats
[max_size
].gets
!= 0U);
433 stats
[max_size
].gets
--;
438 /* The free list uses the "rounded-up" size "new_size": */
439 #if defined(DEBUGGING_MEMCLUSTER)
440 memset(mem
, 0xa5, new_size
- sizeof *e
); /*%< catch write after free */
441 e
->size
= 0; /*%< catch double memput() */
442 #ifdef MEMCLUSTER_RECORD
446 #ifdef MEMCLUSTER_ATEND
448 el
= freelists
[new_size
];
449 while (el
!= NULL
&& el
->next
!= NULL
)
454 freelists
[new_size
] = e
;
456 e
->next
= freelists
[new_size
];
457 freelists
[new_size
] = (void *)e
;
460 ((memcluster_element
*)mem
)->next
= freelists
[new_size
];
461 freelists
[new_size
] = (memcluster_element
*)mem
;
465 * The stats[] uses the _actual_ "size" requested by the
466 * caller, with the caveat (in the code above) that "size" >= the
467 * max. size (max_size) ends up getting recorded as a call to
470 INSIST(stats
[size
].gets
!= 0U);
472 stats
[new_size
].freefrags
++;
477 __memget_debug(size_t size
, const char *file
, int line
) {
479 ptr
= __memget_record(size
, file
, line
);
480 fprintf(stderr
, "%s:%d: memget(%lu) -> %p\n", file
, line
,
486 __memput_debug(void *ptr
, size_t size
, const char *file
, int line
) {
487 fprintf(stderr
, "%s:%d: memput(%p, %lu)\n", file
, line
, ptr
,
489 __memput_record(ptr
, size
, file
, line
);
493 * Print the stats[] on the stream "out" with suitable formatting.
496 memstats(FILE *out
) {
498 #ifdef MEMCLUSTER_RECORD
499 memcluster_element
*e
;
504 if (freelists
== NULL
) {
508 for (i
= 1; i
<= max_size
; i
++) {
509 const struct stats
*s
= &stats
[i
];
511 if (s
->totalgets
== 0U && s
->gets
== 0U)
513 fprintf(out
, "%s%5lu: %11lu gets, %11lu rem",
514 (i
== max_size
) ? ">=" : " ",
515 (unsigned long)i
, s
->totalgets
, s
->gets
);
517 fprintf(out
, " (%lu bl, %lu ff)",
518 s
->blocks
, s
->freefrags
);
521 #ifdef MEMCLUSTER_RECORD
522 fprintf(out
, "Active Memory:\n");
523 for (i
= 1; i
<= max_size
; i
++) {
524 if ((e
= activelists
[i
]) != NULL
)
526 fprintf(out
, "%s:%d %p:%lu\n",
527 e
->file
!= NULL
? e
->file
:
528 "<UNKNOWN>", e
->line
,
529 (char *)e
+ sizeof *e
,
544 for (i
= 1; i
<= max_size
; i
++)
545 if (stats
[i
].gets
!= 0U)
553 * Round up size to a multiple of sizeof(void *). This guarantees that a
554 * block is at least sizeof void *, and that we won't violate alignment
555 * restrictions, both of which are needed to make lists of blocks.
558 quantize(size_t size
) {
561 * If there is no remainder for the integer division of
565 * then we already have a good size; if not, then we need
566 * to round up the result in order to get a size big
567 * enough to satisfy the request _and_ aligned on P_SIZE boundaries.
569 remainder
= size
% P_SIZE
;
571 size
+= P_SIZE
- remainder
;
572 #if defined(DEBUGGING_MEMCLUSTER)
573 return (size
+ SMALL_SIZE_LIMIT
+ sizeof (int));
579 #if defined(DEBUGGING_MEMCLUSTER)
581 check(unsigned char *a
, int value
, size_t len
) {
583 for (i
= 0; i
< len
; i
++)
584 INSIST(a
[i
] == value
);