1 /* $Header: /src/pub/tcsh/tc.alloc.c,v 3.36 2002/03/08 17:36:47 christos Exp $ */
3 * tc.alloc.c (Caltech) 2/21/82
4 * Chris Kingsley, kingsley@cit-20.
6 * This is a very fast storage allocator. It allocates blocks of a small
7 * number of different sizes, and keeps free lists of each size. Blocks that
8 * don't exactly fit are passed up to the next larger size. In this
9 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
10 * This is designed for use in a program that uses vast quantities of memory,
11 * but bombs when it runs out.
14 * Copyright (c) 1980, 1991 The Regents of the University of California.
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 RCSID("$Id: tc.alloc.c,v 3.36 2002/03/08 17:36:47 christos Exp $")
45 static char *memtop
= NULL
; /* PWP: top of current memory */
46 static char *membot
= NULL
; /* PWP: bottom of allocatable memory */
50 #if defined(_VMS_POSIX) || defined(_AMIGA_MEMORY)
55 # define malloc fmalloc
57 # define calloc fcalloc
58 # define realloc frealloc
59 #endif /* WINNT_NATIVE */
70 * Lots of os routines are busted and try to free invalid pointers.
71 * Although our free routine is smart enough and it will pick bad
72 * pointers most of the time, in cases where we know we are going to get
73 * a bad pointer, we'd rather leak.
80 typedef unsigned char U_char
; /* we don't really have signed chars */
81 typedef unsigned int U_int
;
82 typedef unsigned short U_short
;
83 typedef unsigned long U_long
;
87 * The overhead on a block is at least 4 bytes. When free, this space
88 * contains a pointer to the next free block, and the bottom two bits must
89 * be zero. When in use, the first byte is set to MAGIC, and the second
90 * byte is the size index. The remaining bytes are for alignment.
91 * If range checking is enabled and the size of the block fits
92 * in two bytes, then the top two bytes hold the size of the requested block
93 * plus the range checking words, and the header word MINUS ONE.
97 #define MEMALIGN(a) (((a) + ROUNDUP) & ~ROUNDUP)
100 union overhead
*ov_next
; /* when free */
102 U_char ovu_magic
; /* magic number */
103 U_char ovu_index
; /* bucket # */
105 U_short ovu_size
; /* actual block size */
106 U_int ovu_rmagic
; /* range magic number */
109 #define ov_magic ovu.ovu_magic
110 #define ov_index ovu.ovu_index
111 #define ov_size ovu.ovu_size
112 #define ov_rmagic ovu.ovu_rmagic
115 #define MAGIC 0xfd /* magic # on accounting info */
116 #define RMAGIC 0x55555555 /* magic # on range info */
118 #define RSLOP sizeof (U_int)
127 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
128 * smallest allocatable block is 8 bytes. The overhead information
129 * precedes the data area returned to the user.
131 #define NBUCKETS ((sizeof(long) << 3) - 3)
132 static union overhead
*nextf
[NBUCKETS
] IZERO_STRUCT
;
135 * nmalloc[i] is the difference between the number of mallocs and frees
136 * for a given block size.
138 static U_int nmalloc
[NBUCKETS
] IZERO_STRUCT
;
141 static int findbucket
__P((union overhead
*, int));
142 static void morecore
__P((int));
147 # define CHECK(a, str, p) \
150 xprintf(" (memtop = %lx membot = %lx)\n", memtop, membot); \
154 # define CHECK(a, str, p) \
157 xprintf(" (memtop = %lx membot = %lx)\n", memtop, membot); \
164 register size_t nbytes
;
167 register union overhead
*p
;
168 register int bucket
= 0;
169 register unsigned shiftr
;
172 * Convert amount of memory requested into closest block size stored in
173 * hash buckets which satisfies request. Account for space used per block
178 * SunOS localtime() overwrites the 9th byte on an 8 byte malloc()....
179 * so we get one more...
180 * From Michael Schroeder: This is not true. It depends on the
181 * timezone string. In Europe it can overwrite the 13th byte on a
183 * So we punt and we always allocate an extra byte.
188 nbytes
= MEMALIGN(MEMALIGN(sizeof(union overhead
)) + nbytes
+ RSLOP
);
189 shiftr
= (nbytes
- 1) >> 2;
191 /* apart from this loop, this is O(1) */
192 while ((shiftr
>>= 1) != 0)
195 * If nothing in hash bucket right now, request more memory from the
198 if (nextf
[bucket
] == NULL
)
200 if ((p
= (union overhead
*) nextf
[bucket
]) == NULL
) {
206 xprintf(CGETS(19, 1, "nbytes=%d: Out of memory\n"), nbytes
);
210 return ((memalign_t
) 0);
212 /* remove from linked list */
213 nextf
[bucket
] = nextf
[bucket
]->ov_next
;
215 p
->ov_index
= bucket
;
219 * Record allocated size of block and bound space with magic numbers.
221 p
->ov_size
= (p
->ov_index
<= 13) ? nbytes
- 1 : 0;
222 p
->ov_rmagic
= RMAGIC
;
223 *((U_int
*) (((caddr_t
) p
) + nbytes
- RSLOP
)) = RMAGIC
;
225 return ((memalign_t
) (((caddr_t
) p
) + MEMALIGN(sizeof(union overhead
))));
228 return ((memalign_t
) 0);
230 return ((memalign_t
) 0);
236 * Allocate more memory to the indicated bucket.
242 register union overhead
*op
;
243 register int rnu
; /* 2^rnu bytes will be requested */
244 register int nblks
; /* become nblks blocks of the desired size */
250 * Insure memory is allocated on a page boundary. Should make getpageize
253 op
= (union overhead
*) sbrk(0);
254 memtop
= (char *) op
;
257 if ((long) op
& 0x3ff) {
258 memtop
= (char *) sbrk((int) (1024 - ((long) op
& 0x3ff)));
259 memtop
+= (long) (1024 - ((long) op
& 0x3ff));
262 /* take 2k unless the block is bigger than that */
263 rnu
= (bucket
<= 8) ? 11 : bucket
+ 3;
264 nblks
= 1 << (rnu
- (bucket
+ 3)); /* how many blocks to get */
265 memtop
= (char *) sbrk(1 << rnu
); /* PWP */
266 op
= (union overhead
*) memtop
;
270 memtop
+= (long) (1 << rnu
);
272 * Round up to minimum allocation size boundary and deduct from block count
275 if (((U_long
) op
) & ROUNDUP
) {
276 op
= (union overhead
*) (((U_long
) op
+ (ROUNDUP
+ 1)) & ~ROUNDUP
);
280 * Add new memory allocated to that on free list for this hash bucket.
283 siz
= 1 << (bucket
+ 3);
284 while (--nblks
> 0) {
285 op
->ov_next
= (union overhead
*) (((caddr_t
) op
) + siz
);
286 op
= (union overhead
*) (((caddr_t
) op
) + siz
);
299 register union overhead
*op
;
302 * the don't free flag is there so that we avoid os bugs in routines
303 * that free invalid pointers!
305 if (cp
== NULL
|| dont_free
)
307 CHECK(!memtop
|| !membot
,
308 CGETS(19, 2, "free(%lx) called before any allocations."), cp
);
309 CHECK(cp
> (ptr_t
) memtop
,
310 CGETS(19, 3, "free(%lx) above top of memory."), cp
);
311 CHECK(cp
< (ptr_t
) membot
,
312 CGETS(19, 4, "free(%lx) below bottom of memory."), cp
);
313 op
= (union overhead
*) (((caddr_t
) cp
) - MEMALIGN(sizeof(union overhead
)));
314 CHECK(op
->ov_magic
!= MAGIC
,
315 CGETS(19, 5, "free(%lx) bad block."), cp
);
318 if (op
->ov_index
<= 13)
319 CHECK(*(U_int
*) ((caddr_t
) op
+ op
->ov_size
+ 1 - RSLOP
) != RMAGIC
,
320 CGETS(19, 6, "free(%lx) bad range check."), cp
);
322 CHECK(op
->ov_index
>= NBUCKETS
,
323 CGETS(19, 7, "free(%lx) bad block index."), cp
);
325 op
->ov_next
= nextf
[size
];
341 register char *cp
, *scp
;
344 scp
= cp
= (char *) xmalloc((size_t) i
);
350 return ((memalign_t
) scp
);
353 return ((memalign_t
) 0);
355 return ((memalign_t
) 0);
360 * When a program attempts "storage compaction" as mentioned in the
361 * old malloc man page, it realloc's an already freed block. Usually
362 * this is the last block it freed; occasionally it might be farther
363 * back. We have to search all the free lists for the block in order
364 * to determine its bucket: 1st we make one pass thru the lists
365 * checking only the first block in each; if that fails we search
366 * ``realloc_srchlen'' blocks in each list for a match (the variable
367 * is extern so the caller can modify it). If that fails we just copy
368 * however many bytes was given to realloc() and hope it's not huge.
371 /* 4 should be plenty, -1 =>'s whole list */
372 static int realloc_srchlen
= 4;
388 return (malloc(nbytes
));
389 op
= (union overhead
*) (((caddr_t
) cp
) - MEMALIGN(sizeof(union overhead
)));
390 if (op
->ov_magic
== MAGIC
) {
396 * Already free, doing "compaction".
398 * Search for the old block of memory on the free list. First, check the
399 * most common case (last element free'd), then (this failing) the last
400 * ``realloc_srchlen'' items free'd. If all lookups fail, then assume
401 * the size of the memory block being realloc'd is the smallest
404 if ((i
= findbucket(op
, 1)) < 0 &&
405 (i
= findbucket(op
, realloc_srchlen
)) < 0)
408 onb
= MEMALIGN(nbytes
+ MEMALIGN(sizeof(union overhead
)) + RSLOP
);
410 /* avoid the copy if same size block */
411 if (was_alloced
&& (onb
<= (U_int
) (1 << (i
+ 3))) &&
412 (onb
> (U_int
) (1 << (i
+ 2)))) {
414 /* JMR: formerly this wasn't updated ! */
415 nbytes
= MEMALIGN(MEMALIGN(sizeof(union overhead
))+nbytes
+RSLOP
);
416 *((U_int
*) (((caddr_t
) op
) + nbytes
- RSLOP
)) = RMAGIC
;
417 op
->ov_rmagic
= RMAGIC
;
418 op
->ov_size
= (op
->ov_index
<= 13) ? nbytes
- 1 : 0;
420 return ((memalign_t
) cp
);
422 if ((res
= malloc(nbytes
)) == NULL
)
423 return ((memalign_t
) NULL
);
424 if (cp
!= res
) { /* common optimization */
426 * christos: this used to copy nbytes! It should copy the
427 * smaller of the old and new size
429 onb
= (1 << (i
+ 3)) - MEMALIGN(sizeof(union overhead
)) - RSLOP
;
430 (void) memmove((ptr_t
) res
, (ptr_t
) cp
,
431 (size_t) (onb
< nbytes
? onb
: nbytes
));
435 return ((memalign_t
) res
);
438 return ((memalign_t
) 0);
440 return ((memalign_t
) 0);
448 * Search ``srchlen'' elements of each free list for a block whose
449 * header starts at ``freep''. If srchlen is -1 search the whole list.
450 * Return bucket number, or -1 if not found.
453 findbucket(freep
, srchlen
)
454 union overhead
*freep
;
457 register union overhead
*p
;
460 for (i
= 0; i
< NBUCKETS
; i
++) {
462 for (p
= nextf
[i
]; p
&& j
!= srchlen
; p
= p
->ov_next
) {
474 #else /* SYSMALLOC */
477 ** ``Protected versions'' of malloc, realloc, calloc, and free
481 ** 1. malloc(0) is bad
483 ** 3. realloc(0, n) is bad
484 ** 4. realloc(n, 0) is bad
486 ** Also we call our error routine if we run out of memory.
498 membot
= (char*) sbrk(0);
499 #endif /* !NO_SBRK */
501 if ((ptr
= malloc(n
)) == (ptr_t
) 0) {
506 if (memtop
< ((char *) ptr
) + n
)
507 memtop
= ((char *) ptr
) + n
;
509 membot
= (char*) ptr
;
511 return ((memalign_t
) ptr
);
525 membot
= (char*) sbrk(0);
528 if ((ptr
= (p
? realloc(p
, n
) : malloc(n
))) == (ptr_t
) 0) {
533 if (memtop
< ((char *) ptr
) + n
)
534 memtop
= ((char *) ptr
) + n
;
536 membot
= (char*) ptr
;
538 return ((memalign_t
) ptr
);
553 membot
= (char*) sbrk(0);
556 if ((ptr
= malloc(n
)) == (ptr_t
) 0) {
568 if (memtop
< ((char *) ptr
) + n
)
569 memtop
= ((char *) ptr
) + n
;
571 membot
= (char*) ptr
;
574 return ((memalign_t
) ptr
);
585 #endif /* SYSMALLOC */
588 * mstats - print out statistics about malloc
590 * Prints two lines of numbers, one showing the length of the free list
591 * for each size category, the second showing the number of mallocs -
592 * frees for each size category.
602 register union overhead
*p
;
603 int totfree
= 0, totused
= 0;
605 xprintf(CGETS(19, 8, "%s current memory allocation:\nfree:\t"), progname
);
606 for (i
= 0; i
< NBUCKETS
; i
++) {
607 for (j
= 0, p
= nextf
[i
]; p
; p
= p
->ov_next
, j
++)
610 totfree
+= j
* (1 << (i
+ 3));
612 xprintf(CGETS(19, 9, "\nused:\t"));
613 for (i
= 0; i
< NBUCKETS
; i
++) {
614 xprintf(" %4u", nmalloc
[i
]);
615 totused
+= nmalloc
[i
] * (1 << (i
+ 3));
617 xprintf(CGETS(19, 10, "\n\tTotal in use: %d, total free: %d\n"),
619 xprintf(CGETS(19, 11,
620 "\tAllocated memory from 0x%lx to 0x%lx. Real top at 0x%lx\n"),
621 (unsigned long) membot
, (unsigned long) memtop
,
622 (unsigned long) sbrk(0));
625 memtop
= (char *) sbrk(0);
626 #endif /* !NO_SBRK */
627 xprintf(CGETS(19, 12, "Allocated memory from 0x%lx to 0x%lx (%ld).\n"),
628 (unsigned long) membot
, (unsigned long) memtop
,
629 (unsigned long) (memtop
- membot
));
630 #endif /* SYSMALLOC */