6811333 Remove prom_printf() message in emlxs driver
[opensolaris.git] / usr / src / lib / libc / port / gen / malloc.c
blob4973aed7922e2579215ecef6e9d28551c87198f9
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 /* Copyright (c) 1988 AT&T */
28 /* All Rights Reserved */
30 #pragma ident "%Z%%M% %I% %E% SMI"
33 * Memory management: malloc(), realloc(), free().
35 * The following #-parameters may be redefined:
36 * SEGMENTED: if defined, memory requests are assumed to be
37 * non-contiguous across calls of GETCORE's.
38 * GETCORE: a function to get more core memory. If not SEGMENTED,
39 * GETCORE(0) is assumed to return the next available
40 * address. Default is 'sbrk'.
41 * ERRCORE: the error code as returned by GETCORE.
42 * Default is (char *)(-1).
43 * CORESIZE: a desired unit (measured in bytes) to be used
44 * with GETCORE. Default is (1024*ALIGN).
46 * This algorithm is based on a best fit strategy with lists of
47 * free elts maintained in a self-adjusting binary tree. Each list
48 * contains all elts of the same size. The tree is ordered by size.
49 * For results on self-adjusting trees, see the paper:
50 * Self-Adjusting Binary Trees,
51 * DD Sleator & RE Tarjan, JACM 1985.
53 * The header of a block contains the size of the data part in bytes.
54 * Since the size of a block is 0%4, the low two bits of the header
55 * are free and used as follows:
57 * BIT0: 1 for busy (block is in use), 0 for free.
58 * BIT1: if the block is busy, this bit is 1 if the
59 * preceding block in contiguous memory is free.
60 * Otherwise, it is always 0.
63 #include "lint.h"
64 #include "mallint.h"
65 #include "mtlib.h"
68 * Some abusers of the system (notably java1.2) acquire __malloc_lock
69 * in order to prevent threads from holding it while they are being
70 * suspended via thr_suspend() or thr_suspend_allmutators().
71 * This never worked when alternate malloc() libraries were used
72 * because they don't use __malloc_lock for their locking strategy.
73 * We leave __malloc_lock as an external variable to satisfy these
74 * old programs, but we define a new lock, private to libc, to do the
75 * real locking: libc_malloc_lock. This puts libc's malloc() package
76 * on the same footing as all other malloc packages.
78 mutex_t __malloc_lock = DEFAULTMUTEX;
79 mutex_t libc_malloc_lock = DEFAULTMUTEX;
81 static TREE *Root, /* root of the free tree */
82 *Bottom, /* the last free chunk in the arena */
83 *_morecore(size_t); /* function to get more core */
85 static char *Baddr; /* current high address of the arena */
86 static char *Lfree; /* last freed block with data intact */
88 static void t_delete(TREE *);
89 static void t_splay(TREE *);
90 static void realfree(void *);
91 static void cleanfree(void *);
92 static void *_malloc_unlocked(size_t);
94 #define FREESIZE (1<<5) /* size for preserving free blocks until next malloc */
95 #define FREEMASK FREESIZE-1
97 static void *flist[FREESIZE]; /* list of blocks to be freed on next malloc */
98 static int freeidx; /* index of free blocks in flist % FREESIZE */
101 * Interfaces used only by atfork_init() functions.
103 void
104 malloc_locks(void)
106 (void) mutex_lock(&libc_malloc_lock);
109 void
110 malloc_unlocks(void)
112 (void) mutex_unlock(&libc_malloc_lock);
116 * Allocation of small blocks
118 static TREE *List[MINSIZE/WORDSIZE-1]; /* lists of small blocks */
120 static void *
121 _smalloc(size_t size)
123 TREE *tp;
124 size_t i;
126 ASSERT(size % WORDSIZE == 0);
127 /* want to return a unique pointer on malloc(0) */
128 if (size == 0)
129 size = WORDSIZE;
131 /* list to use */
132 i = size / WORDSIZE - 1;
134 if (List[i] == NULL) {
135 TREE *np;
136 int n;
137 /* number of blocks to get at one time */
138 #define NPS (WORDSIZE*8)
139 ASSERT((size + WORDSIZE) * NPS >= MINSIZE);
141 /* get NPS of these block types */
142 if ((List[i] = _malloc_unlocked((size + WORDSIZE) * NPS)) == 0)
143 return (0);
145 /* make them into a link list */
146 for (n = 0, np = List[i]; n < NPS; ++n) {
147 tp = np;
148 SIZE(tp) = size;
149 np = NEXT(tp);
150 AFTER(tp) = np;
152 AFTER(tp) = NULL;
155 /* allocate from the head of the queue */
156 tp = List[i];
157 List[i] = AFTER(tp);
158 SETBIT0(SIZE(tp));
159 return (DATA(tp));
162 void *
163 malloc(size_t size)
165 void *ret;
167 if (!primary_link_map) {
168 errno = ENOTSUP;
169 return (NULL);
171 assert_no_libc_locks_held();
172 (void) mutex_lock(&libc_malloc_lock);
173 ret = _malloc_unlocked(size);
174 (void) mutex_unlock(&libc_malloc_lock);
175 return (ret);
178 static void *
179 _malloc_unlocked(size_t size)
181 size_t n;
182 TREE *tp, *sp;
183 size_t o_bit1;
185 COUNT(nmalloc);
186 ASSERT(WORDSIZE == ALIGN);
188 /* check for size that could overflow calculations */
189 if (size > MAX_MALLOC) {
190 errno = ENOMEM;
191 return (NULL);
194 /* make sure that size is 0 mod ALIGN */
195 ROUND(size);
197 /* see if the last free block can be used */
198 if (Lfree) {
199 sp = BLOCK(Lfree);
200 n = SIZE(sp);
201 CLRBITS01(n);
202 if (n == size) {
204 * exact match, use it as is
206 freeidx = (freeidx + FREESIZE - 1) &
207 FREEMASK; /* 1 back */
208 flist[freeidx] = Lfree = NULL;
209 return (DATA(sp));
210 } else if (size >= MINSIZE && n > size) {
212 * got a big enough piece
214 freeidx = (freeidx + FREESIZE - 1) &
215 FREEMASK; /* 1 back */
216 flist[freeidx] = Lfree = NULL;
217 o_bit1 = SIZE(sp) & BIT1;
218 SIZE(sp) = n;
219 goto leftover;
222 o_bit1 = 0;
224 /* perform free's of space since last malloc */
225 cleanfree(NULL);
227 /* small blocks */
228 if (size < MINSIZE)
229 return (_smalloc(size));
231 /* search for an elt of the right size */
232 sp = NULL;
233 n = 0;
234 if (Root) {
235 tp = Root;
236 for (;;) {
237 /* branch left */
238 if (SIZE(tp) >= size) {
239 if (n == 0 || n >= SIZE(tp)) {
240 sp = tp;
241 n = SIZE(tp);
243 if (LEFT(tp))
244 tp = LEFT(tp);
245 else
246 break;
247 } else { /* branch right */
248 if (RIGHT(tp))
249 tp = RIGHT(tp);
250 else
251 break;
255 if (sp) {
256 t_delete(sp);
257 } else if (tp != Root) {
258 /* make the searched-to element the root */
259 t_splay(tp);
260 Root = tp;
264 /* if found none fitted in the tree */
265 if (!sp) {
266 if (Bottom && size <= SIZE(Bottom)) {
267 sp = Bottom;
268 CLRBITS01(SIZE(sp));
269 } else if ((sp = _morecore(size)) == NULL) /* no more memory */
270 return (NULL);
273 /* tell the forward neighbor that we're busy */
274 CLRBIT1(SIZE(NEXT(sp)));
276 ASSERT(ISBIT0(SIZE(NEXT(sp))));
278 leftover:
279 /* if the leftover is enough for a new free piece */
280 if ((n = (SIZE(sp) - size)) >= MINSIZE + WORDSIZE) {
281 n -= WORDSIZE;
282 SIZE(sp) = size;
283 tp = NEXT(sp);
284 SIZE(tp) = n|BIT0;
285 realfree(DATA(tp));
286 } else if (BOTTOM(sp))
287 Bottom = NULL;
289 /* return the allocated space */
290 SIZE(sp) |= BIT0 | o_bit1;
291 return (DATA(sp));
296 * realloc().
298 * If the block size is increasing, we try forward merging first.
299 * This is not best-fit but it avoids some data recopying.
301 void *
302 realloc(void *old, size_t size)
304 TREE *tp, *np;
305 size_t ts;
306 char *new;
308 if (!primary_link_map) {
309 errno = ENOTSUP;
310 return (NULL);
313 assert_no_libc_locks_held();
314 COUNT(nrealloc);
316 /* check for size that could overflow calculations */
317 if (size > MAX_MALLOC) {
318 errno = ENOMEM;
319 return (NULL);
322 /* pointer to the block */
323 (void) mutex_lock(&libc_malloc_lock);
324 if (old == NULL) {
325 new = _malloc_unlocked(size);
326 (void) mutex_unlock(&libc_malloc_lock);
327 return (new);
330 /* perform free's of space since last malloc */
331 cleanfree(old);
333 /* make sure that size is 0 mod ALIGN */
334 ROUND(size);
336 tp = BLOCK(old);
337 ts = SIZE(tp);
339 /* if the block was freed, data has been destroyed. */
340 if (!ISBIT0(ts)) {
341 (void) mutex_unlock(&libc_malloc_lock);
342 return (NULL);
345 /* nothing to do */
346 CLRBITS01(SIZE(tp));
347 if (size == SIZE(tp)) {
348 SIZE(tp) = ts;
349 (void) mutex_unlock(&libc_malloc_lock);
350 return (old);
353 /* special cases involving small blocks */
354 if (size < MINSIZE || SIZE(tp) < MINSIZE) {
355 /* free is size is zero */
356 if (size == 0) {
357 SETOLD01(SIZE(tp), ts);
358 _free_unlocked(old);
359 (void) mutex_unlock(&libc_malloc_lock);
360 return (NULL);
361 } else {
362 goto call_malloc;
366 /* block is increasing in size, try merging the next block */
367 if (size > SIZE(tp)) {
368 np = NEXT(tp);
369 if (!ISBIT0(SIZE(np))) {
370 ASSERT(SIZE(np) >= MINSIZE);
371 ASSERT(!ISBIT1(SIZE(np)));
372 SIZE(tp) += SIZE(np) + WORDSIZE;
373 if (np != Bottom)
374 t_delete(np);
375 else
376 Bottom = NULL;
377 CLRBIT1(SIZE(NEXT(np)));
380 #ifndef SEGMENTED
381 /* not enough & at TRUE end of memory, try extending core */
382 if (size > SIZE(tp) && BOTTOM(tp) && GETCORE(0) == Baddr) {
383 Bottom = tp;
384 if ((tp = _morecore(size)) == NULL) {
385 tp = Bottom;
386 Bottom = NULL;
389 #endif
392 /* got enough space to use */
393 if (size <= SIZE(tp)) {
394 size_t n;
396 chop_big:
397 if ((n = (SIZE(tp) - size)) >= MINSIZE + WORDSIZE) {
398 n -= WORDSIZE;
399 SIZE(tp) = size;
400 np = NEXT(tp);
401 SIZE(np) = n|BIT0;
402 realfree(DATA(np));
403 } else if (BOTTOM(tp))
404 Bottom = NULL;
406 /* the previous block may be free */
407 SETOLD01(SIZE(tp), ts);
408 (void) mutex_unlock(&libc_malloc_lock);
409 return (old);
412 /* call malloc to get a new block */
413 call_malloc:
414 SETOLD01(SIZE(tp), ts);
415 if ((new = _malloc_unlocked(size)) != NULL) {
416 CLRBITS01(ts);
417 if (ts > size)
418 ts = size;
419 MEMCOPY(new, old, ts);
420 _free_unlocked(old);
421 (void) mutex_unlock(&libc_malloc_lock);
422 return (new);
426 * Attempt special case recovery allocations since malloc() failed:
428 * 1. size <= SIZE(tp) < MINSIZE
429 * Simply return the existing block
430 * 2. SIZE(tp) < size < MINSIZE
431 * malloc() may have failed to allocate the chunk of
432 * small blocks. Try asking for MINSIZE bytes.
433 * 3. size < MINSIZE <= SIZE(tp)
434 * malloc() may have failed as with 2. Change to
435 * MINSIZE allocation which is taken from the beginning
436 * of the current block.
437 * 4. MINSIZE <= SIZE(tp) < size
438 * If the previous block is free and the combination of
439 * these two blocks has at least size bytes, then merge
440 * the two blocks copying the existing contents backwards.
442 CLRBITS01(SIZE(tp));
443 if (SIZE(tp) < MINSIZE) {
444 if (size < SIZE(tp)) { /* case 1. */
445 SETOLD01(SIZE(tp), ts);
446 (void) mutex_unlock(&libc_malloc_lock);
447 return (old);
448 } else if (size < MINSIZE) { /* case 2. */
449 size = MINSIZE;
450 goto call_malloc;
452 } else if (size < MINSIZE) { /* case 3. */
453 size = MINSIZE;
454 goto chop_big;
455 } else if (ISBIT1(ts) &&
456 (SIZE(np = LAST(tp)) + SIZE(tp) + WORDSIZE) >= size) {
457 ASSERT(!ISBIT0(SIZE(np)));
458 t_delete(np);
459 SIZE(np) += SIZE(tp) + WORDSIZE;
461 * Since the copy may overlap, use memmove() if available.
462 * Otherwise, copy by hand.
464 (void) memmove(DATA(np), old, SIZE(tp));
465 old = DATA(np);
466 tp = np;
467 CLRBIT1(ts);
468 goto chop_big;
470 SETOLD01(SIZE(tp), ts);
471 (void) mutex_unlock(&libc_malloc_lock);
472 return (NULL);
477 * realfree().
479 * Coalescing of adjacent free blocks is done first.
480 * Then, the new free block is leaf-inserted into the free tree
481 * without splaying. This strategy does not guarantee the amortized
482 * O(nlogn) behaviour for the insert/delete/find set of operations
483 * on the tree. In practice, however, free is much more infrequent
484 * than malloc/realloc and the tree searches performed by these
485 * functions adequately keep the tree in balance.
487 static void
488 realfree(void *old)
490 TREE *tp, *sp, *np;
491 size_t ts, size;
493 COUNT(nfree);
495 /* pointer to the block */
496 tp = BLOCK(old);
497 ts = SIZE(tp);
498 if (!ISBIT0(ts))
499 return;
500 CLRBITS01(SIZE(tp));
502 /* small block, put it in the right linked list */
503 if (SIZE(tp) < MINSIZE) {
504 ASSERT(SIZE(tp) / WORDSIZE >= 1);
505 ts = SIZE(tp) / WORDSIZE - 1;
506 AFTER(tp) = List[ts];
507 List[ts] = tp;
508 return;
511 /* see if coalescing with next block is warranted */
512 np = NEXT(tp);
513 if (!ISBIT0(SIZE(np))) {
514 if (np != Bottom)
515 t_delete(np);
516 SIZE(tp) += SIZE(np) + WORDSIZE;
519 /* the same with the preceding block */
520 if (ISBIT1(ts)) {
521 np = LAST(tp);
522 ASSERT(!ISBIT0(SIZE(np)));
523 ASSERT(np != Bottom);
524 t_delete(np);
525 SIZE(np) += SIZE(tp) + WORDSIZE;
526 tp = np;
529 /* initialize tree info */
530 PARENT(tp) = LEFT(tp) = RIGHT(tp) = LINKFOR(tp) = NULL;
532 /* the last word of the block contains self's address */
533 *(SELFP(tp)) = tp;
535 /* set bottom block, or insert in the free tree */
536 if (BOTTOM(tp))
537 Bottom = tp;
538 else {
539 /* search for the place to insert */
540 if (Root) {
541 size = SIZE(tp);
542 np = Root;
543 for (;;) {
544 if (SIZE(np) > size) {
545 if (LEFT(np))
546 np = LEFT(np);
547 else {
548 LEFT(np) = tp;
549 PARENT(tp) = np;
550 break;
552 } else if (SIZE(np) < size) {
553 if (RIGHT(np))
554 np = RIGHT(np);
555 else {
556 RIGHT(np) = tp;
557 PARENT(tp) = np;
558 break;
560 } else {
561 if ((sp = PARENT(np)) != NULL) {
562 if (np == LEFT(sp))
563 LEFT(sp) = tp;
564 else
565 RIGHT(sp) = tp;
566 PARENT(tp) = sp;
567 } else
568 Root = tp;
570 /* insert to head of list */
571 if ((sp = LEFT(np)) != NULL)
572 PARENT(sp) = tp;
573 LEFT(tp) = sp;
575 if ((sp = RIGHT(np)) != NULL)
576 PARENT(sp) = tp;
577 RIGHT(tp) = sp;
579 /* doubly link list */
580 LINKFOR(tp) = np;
581 LINKBAK(np) = tp;
582 SETNOTREE(np);
584 break;
587 } else
588 Root = tp;
591 /* tell next block that this one is free */
592 SETBIT1(SIZE(NEXT(tp)));
594 ASSERT(ISBIT0(SIZE(NEXT(tp))));
598 * Get more core. Gaps in memory are noted as busy blocks.
600 static TREE *
601 _morecore(size_t size)
603 TREE *tp;
604 ssize_t n, offset;
605 char *addr;
606 ssize_t nsize;
608 /* compute new amount of memory to get */
609 tp = Bottom;
610 n = (ssize_t)size + 2 * WORDSIZE;
611 addr = GETCORE(0);
613 if (addr == ERRCORE)
614 return (NULL);
616 /* need to pad size out so that addr is aligned */
617 if ((((uintptr_t)addr) % ALIGN) != 0)
618 offset = ALIGN - (uintptr_t)addr % ALIGN;
619 else
620 offset = 0;
622 #ifndef SEGMENTED
623 /* if not segmented memory, what we need may be smaller */
624 if (addr == Baddr) {
625 n -= WORDSIZE;
626 if (tp != NULL)
627 n -= SIZE(tp);
629 #endif
631 /* get a multiple of CORESIZE */
632 n = ((n - 1) / CORESIZE + 1) * CORESIZE;
633 nsize = n + offset;
635 /* check if nsize request could overflow in GETCORE */
636 if ((size_t)nsize > MAX_MALLOC - (uintptr_t)addr) {
637 errno = ENOMEM;
638 return (NULL);
641 if ((size_t)nsize <= MAX_GETCORE) {
642 if (GETCORE(nsize) == ERRCORE)
643 return (NULL);
644 } else {
645 intptr_t delta;
647 * the value required is too big for GETCORE() to deal with
648 * in one go, so use GETCORE() at most 2 times instead.
649 * Argument to GETCORE() must be multiple of ALIGN.
650 * If not, GETCORE(-MAX_GETCORE) will not return brk point
651 * to previous value, but will be ALIGN more.
652 * This would leave a small hole.
654 delta = MAX_GETCORE;
655 while (delta > 0) {
656 if (GETCORE(delta) == ERRCORE) {
657 if (addr != GETCORE(0))
658 (void) GETCORE(-MAX_GETCORE);
659 return (NULL);
661 nsize -= MAX_GETCORE;
662 delta = nsize;
666 /* contiguous memory */
667 if (addr == Baddr) {
668 ASSERT(offset == 0);
669 if (tp) {
670 addr = (char *)tp;
671 n += SIZE(tp) + 2 * WORDSIZE;
672 } else {
673 addr = Baddr - WORDSIZE;
674 n += WORDSIZE;
676 } else
677 addr += offset;
679 /* new bottom address */
680 Baddr = addr + n;
682 /* new bottom block */
683 tp = (TREE *)(uintptr_t)addr;
684 SIZE(tp) = n - 2 * WORDSIZE;
685 ASSERT((SIZE(tp) % ALIGN) == 0);
687 /* reserved the last word to head any noncontiguous memory */
688 SETBIT0(SIZE(NEXT(tp)));
690 /* non-contiguous memory, free old bottom block */
691 if (Bottom && Bottom != tp) {
692 SETBIT0(SIZE(Bottom));
693 realfree(DATA(Bottom));
696 return (tp);
701 * Tree rotation functions (BU: bottom-up, TD: top-down)
704 #define LEFT1(x, y) \
705 if ((RIGHT(x) = LEFT(y)) != NULL) PARENT(RIGHT(x)) = x;\
706 if ((PARENT(y) = PARENT(x)) != NULL)\
707 if (LEFT(PARENT(x)) == x) LEFT(PARENT(y)) = y;\
708 else RIGHT(PARENT(y)) = y;\
709 LEFT(y) = x; PARENT(x) = y
711 #define RIGHT1(x, y) \
712 if ((LEFT(x) = RIGHT(y)) != NULL) PARENT(LEFT(x)) = x;\
713 if ((PARENT(y) = PARENT(x)) != NULL)\
714 if (LEFT(PARENT(x)) == x) LEFT(PARENT(y)) = y;\
715 else RIGHT(PARENT(y)) = y;\
716 RIGHT(y) = x; PARENT(x) = y
718 #define BULEFT2(x, y, z) \
719 if ((RIGHT(x) = LEFT(y)) != NULL) PARENT(RIGHT(x)) = x;\
720 if ((RIGHT(y) = LEFT(z)) != NULL) PARENT(RIGHT(y)) = y;\
721 if ((PARENT(z) = PARENT(x)) != NULL)\
722 if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
723 else RIGHT(PARENT(z)) = z;\
724 LEFT(z) = y; PARENT(y) = z; LEFT(y) = x; PARENT(x) = y
726 #define BURIGHT2(x, y, z) \
727 if ((LEFT(x) = RIGHT(y)) != NULL) PARENT(LEFT(x)) = x;\
728 if ((LEFT(y) = RIGHT(z)) != NULL) PARENT(LEFT(y)) = y;\
729 if ((PARENT(z) = PARENT(x)) != NULL)\
730 if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
731 else RIGHT(PARENT(z)) = z;\
732 RIGHT(z) = y; PARENT(y) = z; RIGHT(y) = x; PARENT(x) = y
734 #define TDLEFT2(x, y, z) \
735 if ((RIGHT(y) = LEFT(z)) != NULL) PARENT(RIGHT(y)) = y;\
736 if ((PARENT(z) = PARENT(x)) != NULL)\
737 if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
738 else RIGHT(PARENT(z)) = z;\
739 PARENT(x) = z; LEFT(z) = x;
741 #define TDRIGHT2(x, y, z) \
742 if ((LEFT(y) = RIGHT(z)) != NULL) PARENT(LEFT(y)) = y;\
743 if ((PARENT(z) = PARENT(x)) != NULL)\
744 if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
745 else RIGHT(PARENT(z)) = z;\
746 PARENT(x) = z; RIGHT(z) = x;
749 * Delete a tree element
751 static void
752 t_delete(TREE *op)
754 TREE *tp, *sp, *gp;
756 /* if this is a non-tree node */
757 if (ISNOTREE(op)) {
758 tp = LINKBAK(op);
759 if ((sp = LINKFOR(op)) != NULL)
760 LINKBAK(sp) = tp;
761 LINKFOR(tp) = sp;
762 return;
765 /* make op the root of the tree */
766 if (PARENT(op))
767 t_splay(op);
769 /* if this is the start of a list */
770 if ((tp = LINKFOR(op)) != NULL) {
771 PARENT(tp) = NULL;
772 if ((sp = LEFT(op)) != NULL)
773 PARENT(sp) = tp;
774 LEFT(tp) = sp;
776 if ((sp = RIGHT(op)) != NULL)
777 PARENT(sp) = tp;
778 RIGHT(tp) = sp;
780 Root = tp;
781 return;
784 /* if op has a non-null left subtree */
785 if ((tp = LEFT(op)) != NULL) {
786 PARENT(tp) = NULL;
788 if (RIGHT(op)) {
789 /* make the right-end of the left subtree its root */
790 while ((sp = RIGHT(tp)) != NULL) {
791 if ((gp = RIGHT(sp)) != NULL) {
792 TDLEFT2(tp, sp, gp);
793 tp = gp;
794 } else {
795 LEFT1(tp, sp);
796 tp = sp;
800 /* hook the right subtree of op to the above elt */
801 RIGHT(tp) = RIGHT(op);
802 PARENT(RIGHT(tp)) = tp;
804 } else if ((tp = RIGHT(op)) != NULL) /* no left subtree */
805 PARENT(tp) = NULL;
807 Root = tp;
811 * Bottom up splaying (simple version).
812 * The basic idea is to roughly cut in half the
813 * path from Root to tp and make tp the new root.
815 static void
816 t_splay(TREE *tp)
818 TREE *pp, *gp;
820 /* iterate until tp is the root */
821 while ((pp = PARENT(tp)) != NULL) {
822 /* grandparent of tp */
823 gp = PARENT(pp);
825 /* x is a left child */
826 if (LEFT(pp) == tp) {
827 if (gp && LEFT(gp) == pp) {
828 BURIGHT2(gp, pp, tp);
829 } else {
830 RIGHT1(pp, tp);
832 } else {
833 ASSERT(RIGHT(pp) == tp);
834 if (gp && RIGHT(gp) == pp) {
835 BULEFT2(gp, pp, tp);
836 } else {
837 LEFT1(pp, tp);
845 * free().
846 * Performs a delayed free of the block pointed to
847 * by old. The pointer to old is saved on a list, flist,
848 * until the next malloc or realloc. At that time, all the
849 * blocks pointed to in flist are actually freed via
850 * realfree(). This allows the contents of free blocks to
851 * remain undisturbed until the next malloc or realloc.
853 void
854 free(void *old)
856 if (!primary_link_map) {
857 errno = ENOTSUP;
858 return;
860 assert_no_libc_locks_held();
861 (void) mutex_lock(&libc_malloc_lock);
862 _free_unlocked(old);
863 (void) mutex_unlock(&libc_malloc_lock);
867 void
868 _free_unlocked(void *old)
870 int i;
872 if (old == NULL)
873 return;
876 * Make sure the same data block is not freed twice.
877 * 3 cases are checked. It returns immediately if either
878 * one of the conditions is true.
879 * 1. Last freed.
880 * 2. Not in use or freed already.
881 * 3. In the free list.
883 if (old == Lfree)
884 return;
885 if (!ISBIT0(SIZE(BLOCK(old))))
886 return;
887 for (i = 0; i < freeidx; i++)
888 if (old == flist[i])
889 return;
891 if (flist[freeidx] != NULL)
892 realfree(flist[freeidx]);
893 flist[freeidx] = Lfree = old;
894 freeidx = (freeidx + 1) & FREEMASK; /* one forward */
898 * cleanfree() frees all the blocks pointed to be flist.
900 * realloc() should work if it is called with a pointer
901 * to a block that was freed since the last call to malloc() or
902 * realloc(). If cleanfree() is called from realloc(), ptr
903 * is set to the old block and that block should not be
904 * freed since it is actually being reallocated.
906 static void
907 cleanfree(void *ptr)
909 char **flp;
911 flp = (char **)&(flist[freeidx]);
912 for (;;) {
913 if (flp == (char **)&(flist[0]))
914 flp = (char **)&(flist[FREESIZE]);
915 if (*--flp == NULL)
916 break;
917 if (*flp != ptr)
918 realfree(*flp);
919 *flp = NULL;
921 freeidx = 0;
922 Lfree = NULL;