2 * tc.alloc.c (Caltech) 2/21/82
3 * Chris Kingsley, kingsley@cit-20.
5 * This is a very fast storage allocator. It allocates blocks of a small
6 * number of different sizes, and keeps free lists of each size. Blocks that
7 * don't exactly fit are passed up to the next larger size. In this
8 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
9 * This is designed for use in a program that uses vast quantities of memory,
10 * but bombs when it runs out.
13 * Copyright (c) 1980, 1991 The Regents of the University of California.
14 * All rights reserved.
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44 #if defined(HAVE_SBRK) && !defined(__APPLE__)
51 static char *memtop
= NULL
; /* PWP: top of current memory */
52 static char *membot
= NULL
; /* PWP: bottom of allocatable memory */
57 # define malloc fmalloc
59 # define calloc fcalloc
60 # define realloc frealloc
61 #endif /* WINNT_NATIVE */
63 #if !defined(DEBUG) || defined(SYSMALLOC)
67 static const char msg
[] = "Out of memory\n";
69 TCSH_IGNORE(write(didfds
? 2 : SHDIAG
, msg
, strlen(msg
)));
80 * Lots of os routines are busted and try to free invalid pointers.
81 * Although our free routine is smart enough and it will pick bad
82 * pointers most of the time, in cases where we know we are going to get
83 * a bad pointer, we'd rather leak.
90 typedef unsigned char U_char
; /* we don't really have signed chars */
91 typedef unsigned int U_int
;
92 typedef unsigned short U_short
;
93 typedef unsigned long U_long
;
97 * The overhead on a block is at least 4 bytes. When free, this space
98 * contains a pointer to the next free block, and the bottom two bits must
99 * be zero. When in use, the first byte is set to MAGIC, and the second
100 * byte is the size index. The remaining bytes are for alignment.
101 * If range checking is enabled and the size of the block fits
102 * in two bytes, then the top two bytes hold the size of the requested block
103 * plus the range checking words, and the header word MINUS ONE.
107 #define MEMALIGN(a) (((a) + ROUNDUP) & ~ROUNDUP)
110 union overhead
*ov_next
; /* when free */
112 U_char ovu_magic
; /* magic number */
113 U_char ovu_index
; /* bucket # */
115 U_short ovu_size
; /* actual block size */
116 U_int ovu_rmagic
; /* range magic number */
119 #define ov_magic ovu.ovu_magic
120 #define ov_index ovu.ovu_index
121 #define ov_size ovu.ovu_size
122 #define ov_rmagic ovu.ovu_rmagic
125 #define MAGIC 0xfd /* magic # on accounting info */
126 #define RMAGIC 0x55555555 /* magic # on range info */
128 #define RSLOP sizeof (U_int)
141 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
142 * smallest allocatable block is 8 bytes. The overhead information
143 * precedes the data area returned to the user.
145 #define NBUCKETS ((sizeof(long) << 3) - 3)
146 static union overhead
*nextf
[NBUCKETS
] IZERO_STRUCT
;
149 * nmalloc[i] is the difference between the number of mallocs and frees
150 * for a given block size.
152 static U_int nmalloc
[NBUCKETS
] IZERO_STRUCT
;
155 static int findbucket (union overhead
*, int);
156 static void morecore (int);
161 # define CHECK(a, str, p) \
164 xprintf(" (memtop = %p membot = %p)\n", memtop, membot); \
168 # define CHECK(a, str, p) \
171 xprintf(" (memtop = %p membot = %p)\n", memtop, membot); \
177 malloc(size_t nbytes
)
185 * Convert amount of memory requested into closest block size stored in
186 * hash buckets which satisfies request. Account for space used per block
191 * SunOS localtime() overwrites the 9th byte on an 8 byte malloc()....
192 * so we get one more...
193 * From Michael Schroeder: This is not true. It depends on the
194 * timezone string. In Europe it can overwrite the 13th byte on a
196 * So we punt and we always allocate an extra byte.
201 nbytes
= MEMALIGN(MEMALIGN(sizeof(union overhead
)) + nbytes
+ RSLOP
);
202 shiftr
= (nbytes
- 1) >> 2;
204 /* apart from this loop, this is O(1) */
205 while ((shiftr
>>= 1) != 0)
208 * If nothing in hash bucket right now, request more memory from the
211 if (nextf
[bucket
] == NULL
)
213 if ((p
= nextf
[bucket
]) == NULL
) {
219 xprintf(CGETS(19, 1, "nbytes=%zu: Out of memory\n"), nbytes
);
223 return ((memalign_t
) 0);
225 /* remove from linked list */
226 nextf
[bucket
] = nextf
[bucket
]->ov_next
;
228 p
->ov_index
= bucket
;
232 * Record allocated size of block and bound space with magic numbers.
234 p
->ov_size
= (p
->ov_index
<= 13) ? (U_short
)nbytes
- 1 : 0;
235 p
->ov_rmagic
= RMAGIC
;
236 *((U_int
*) (((caddr_t
) p
) + nbytes
- RSLOP
)) = RMAGIC
;
238 return ((memalign_t
) (((caddr_t
) p
) + MEMALIGN(sizeof(union overhead
))));
241 return ((memalign_t
) 0);
243 return ((memalign_t
) 0);
249 * Allocate more memory to the indicated bucket.
255 int rnu
; /* 2^rnu bytes will be requested */
256 int nblks
; /* become nblks blocks of the desired size */
262 * Insure memory is allocated on a page boundary. Should make getpageize
265 op
= (union overhead
*) sbrk(0);
266 memtop
= (char *) op
;
269 if ((long) op
& 0x3ff) {
270 memtop
= sbrk((int) (1024 - ((long) op
& 0x3ff)));
271 memtop
+= (long) (1024 - ((long) op
& 0x3ff));
274 /* take 2k unless the block is bigger than that */
275 rnu
= (bucket
<= 8) ? 11 : bucket
+ 3;
276 nblks
= 1 << (rnu
- (bucket
+ 3)); /* how many blocks to get */
277 memtop
= sbrk(1 << rnu
); /* PWP */
278 op
= (union overhead
*) memtop
;
282 memtop
+= (long) (1 << rnu
);
284 * Round up to minimum allocation size boundary and deduct from block count
287 if (((U_long
) op
) & ROUNDUP
) {
288 op
= (union overhead
*) (((U_long
) op
+ (ROUNDUP
+ 1)) & ~ROUNDUP
);
292 * Add new memory allocated to that on free list for this hash bucket.
295 siz
= 1 << (bucket
+ 3);
296 while (--nblks
> 0) {
297 op
->ov_next
= (union overhead
*) (((caddr_t
) op
) + siz
);
298 op
= (union overhead
*) (((caddr_t
) op
) + siz
);
313 * the don't free flag is there so that we avoid os bugs in routines
314 * that free invalid pointers!
316 if (cp
== NULL
|| dont_free
)
318 CHECK(!memtop
|| !membot
,
319 CGETS(19, 2, "free(%p) called before any allocations."), cp
);
320 CHECK(cp
> (ptr_t
) memtop
,
321 CGETS(19, 3, "free(%p) above top of memory."), cp
);
322 CHECK(cp
< (ptr_t
) membot
,
323 CGETS(19, 4, "free(%p) below bottom of memory."), cp
);
324 op
= (union overhead
*) (((caddr_t
) cp
) - MEMALIGN(sizeof(union overhead
)));
325 CHECK(op
->ov_magic
!= MAGIC
,
326 CGETS(19, 5, "free(%p) bad block."), cp
);
329 if (op
->ov_index
<= 13)
330 CHECK(*(U_int
*) ((caddr_t
) op
+ op
->ov_size
+ 1 - RSLOP
) != RMAGIC
,
331 CGETS(19, 6, "free(%p) bad range check."), cp
);
333 CHECK(op
->ov_index
>= NBUCKETS
,
334 CGETS(19, 7, "free(%p) bad block index."), cp
);
336 op
->ov_next
= nextf
[size
];
348 calloc(size_t i
, size_t j
)
356 /* Stop gcc 5.x from optimizing malloc+memset = calloc */
360 return ((memalign_t
) cp
);
363 return ((memalign_t
) 0);
365 return ((memalign_t
) 0);
370 * When a program attempts "storage compaction" as mentioned in the
371 * old malloc man page, it realloc's an already freed block. Usually
372 * this is the last block it freed; occasionally it might be farther
373 * back. We have to search all the free lists for the block in order
374 * to determine its bucket: 1st we make one pass thru the lists
375 * checking only the first block in each; if that fails we search
376 * ``realloc_srchlen'' blocks in each list for a match (the variable
377 * is extern so the caller can modify it). If that fails we just copy
378 * however many bytes was given to realloc() and hope it's not huge.
381 /* 4 should be plenty, -1 =>'s whole list */
382 static int realloc_srchlen
= 4;
386 realloc(ptr_t cp
, size_t nbytes
)
396 return (malloc(nbytes
));
397 op
= (union overhead
*) (((caddr_t
) cp
) - MEMALIGN(sizeof(union overhead
)));
398 if (op
->ov_magic
== MAGIC
) {
404 * Already free, doing "compaction".
406 * Search for the old block of memory on the free list. First, check the
407 * most common case (last element free'd), then (this failing) the last
408 * ``realloc_srchlen'' items free'd. If all lookups fail, then assume
409 * the size of the memory block being realloc'd is the smallest
412 if ((i
= findbucket(op
, 1)) < 0 &&
413 (i
= findbucket(op
, realloc_srchlen
)) < 0)
416 onb
= MEMALIGN(nbytes
+ MEMALIGN(sizeof(union overhead
)) + RSLOP
);
418 /* avoid the copy if same size block */
419 if (was_alloced
&& (onb
<= (U_int
) (1 << (i
+ 3))) &&
420 (onb
> (U_int
) (1 << (i
+ 2)))) {
422 /* JMR: formerly this wasn't updated ! */
423 nbytes
= MEMALIGN(MEMALIGN(sizeof(union overhead
))+nbytes
+RSLOP
);
424 *((U_int
*) (((caddr_t
) op
) + nbytes
- RSLOP
)) = RMAGIC
;
425 op
->ov_rmagic
= RMAGIC
;
426 op
->ov_size
= (op
->ov_index
<= 13) ? (U_short
)nbytes
- 1 : 0;
428 return ((memalign_t
) cp
);
430 if ((res
= malloc(nbytes
)) == NULL
)
431 return ((memalign_t
) NULL
);
432 if (cp
!= res
) { /* common optimization */
434 * christos: this used to copy nbytes! It should copy the
435 * smaller of the old and new size
437 onb
= (1 << (i
+ 3)) - MEMALIGN(sizeof(union overhead
)) - RSLOP
;
438 (void) memmove(res
, cp
, onb
< nbytes
? onb
: nbytes
);
442 return ((memalign_t
) res
);
445 return ((memalign_t
) 0);
447 return ((memalign_t
) 0);
452 * On linux, _nss_nis_setnetgrent() calls this function to determine
453 * the usable size of the pointer passed, but this is not a portable
454 * API, so we cannot use our malloc replacement without providing one.
455 * Thanks a lot glibc!
459 #elif defined(__DragonFly__)
460 #define M_U_S_CONST const
464 size_t malloc_usable_size(M_U_S_CONST
void *);
466 malloc_usable_size(M_U_S_CONST
void *ptr
)
468 const union overhead
*op
= (const union overhead
*)
469 (((const char *) ptr
) - MEMALIGN(sizeof(*op
)));
470 if (op
->ov_magic
== MAGIC
)
471 return 1 << (op
->ov_index
+ 3);
479 * Search ``srchlen'' elements of each free list for a block whose
480 * header starts at ``freep''. If srchlen is -1 search the whole list.
481 * Return bucket number, or -1 if not found.
484 findbucket(union overhead
*freep
, int srchlen
)
490 for (i
= 0; i
< NBUCKETS
; i
++) {
492 for (p
= nextf
[i
]; p
&& j
!= srchlen
; p
= p
->ov_next
) {
504 #else /* SYSMALLOC */
507 ** ``Protected versions'' of malloc, realloc, calloc, and free
511 ** 1. malloc(0) is bad
513 ** 3. realloc(0, n) is bad
514 ** 4. realloc(n, 0) is bad
516 ** Also we call our error routine if we run out of memory.
528 #endif /* USE_SBRK */
530 if ((ptr
= malloc(n
)) == NULL
)
533 if (memtop
< ((char *) ptr
) + n
)
534 memtop
= ((char *) ptr
) + n
;
537 #endif /* !USE_SBRK */
538 return ((memalign_t
) ptr
);
542 srealloc(ptr_t p
, size_t n
)
551 #endif /* USE_SBRK */
553 if ((ptr
= (p
? realloc(p
, n
) : malloc(n
))) == NULL
)
556 if (memtop
< ((char *) ptr
) + n
)
557 memtop
= ((char *) ptr
) + n
;
560 #endif /* !USE_SBRK */
561 return ((memalign_t
) ptr
);
565 scalloc(size_t s
, size_t n
)
575 #endif /* USE_SBRK */
577 if ((ptr
= malloc(n
)) == NULL
)
583 if (memtop
< ((char *) ptr
) + n
)
584 memtop
= ((char *) ptr
) + n
;
587 #endif /* !USE_SBRK */
589 return ((memalign_t
) ptr
);
599 #endif /* SYSMALLOC */
602 * mstats - print out statistics about malloc
604 * Prints two lines of numbers, one showing the length of the free list
605 * for each size category, the second showing the number of mallocs -
606 * frees for each size category.
610 showall(Char
**v
, struct command
*c
)
615 int totfree
= 0, totused
= 0;
617 xprintf(CGETS(19, 8, "%s current memory allocation:\nfree:\t"), progname
);
618 for (i
= 0; i
< NBUCKETS
; i
++) {
619 for (j
= 0, p
= nextf
[i
]; p
; p
= p
->ov_next
, j
++)
622 totfree
+= j
* (1 << (i
+ 3));
624 xprintf("\n%s:\t", CGETS(19, 9, "used"));
625 for (i
= 0; i
< NBUCKETS
; i
++) {
626 xprintf(" %4d", nmalloc
[i
]);
627 totused
+= nmalloc
[i
] * (1 << (i
+ 3));
629 xprintf(CGETS(19, 10, "\n\tTotal in use: %d, total free: %d\n"),
631 xprintf(CGETS(19, 11,
632 "\tAllocated memory from 0x%lx to 0x%lx. Real top at 0x%lx\n"),
633 (unsigned long) membot
, (unsigned long) memtop
,
634 (unsigned long) sbrk(0));
635 #else /* SYSMALLOC */
636 #if !defined(HAVE_MALLINFO) && !defined(HAVE_MALLINFO2)
639 #endif /* USE_SBRK */
640 xprintf(CGETS(19, 12, "Allocated memory from 0x%lx to 0x%lx (%ld).\n"),
641 (unsigned long) membot
, (unsigned long) memtop
,
642 (unsigned long) (memtop
- membot
));
644 # if defined(HAVE_MALLINFO2)
653 xprintf(CGETS(19, 13, "%s current memory allocation:\n"), progname
);
654 xprintf(CGETS(19, 14, "Total space allocated from system: %zu\n"),
656 xprintf(CGETS(19, 15, "Number of non-inuse chunks: %zu\n"),
658 xprintf(CGETS(19, 16, "Number of mmapped regions: %zu\n"),
660 xprintf(CGETS(19, 17, "Total space in mmapped regions: %zu\n"),
662 xprintf(CGETS(19, 18, "Total allocated space: %zu\n"),
663 (size_t)mi
.uordblks
);
664 xprintf(CGETS(19, 19, "Total non-inuse space: %zu\n"),
665 (size_t)mi
.fordblks
);
666 xprintf(CGETS(19, 20, "Top-most, releasable space: %zu\n"),
667 (size_t)mi
.keepcost
);
668 #endif /* HAVE_MALLINFO || HAVE_MALLINFO2 */
669 #endif /* SYSMALLOC */
675 /* jemalloc defines these */
676 void _malloc_prefork(void);
677 void _malloc_postfork(void);
678 void _malloc_postfork_child(void);
679 void _malloc_prefork(void) {}
680 void _malloc_postfork(void) {}
681 void _malloc_postfork_child(void) {}