1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of GNU Emacs.
7 GNU Emacs is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 GNU Emacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
29 #include "lisp.h" /* Needed for VALBITS. */
30 #include "blockinput.h"
36 typedef POINTER_TYPE
*POINTER
;
39 /* Declared in dispnew.c, this version doesn't screw up if regions
42 extern void safe_bcopy ();
44 #ifdef DOUG_LEA_MALLOC
46 extern int mallopt ();
47 #else /* not DOUG_LEA_MALLOC */
49 extern size_t __malloc_extra_blocks
;
50 #endif /* SYSTEM_MALLOC */
51 #endif /* not DOUG_LEA_MALLOC */
58 typedef void *POINTER
;
63 #define safe_bcopy(x, y, z) memmove (y, x, z)
64 #define bzero(x, len) memset (x, 0, len)
66 #endif /* not emacs */
69 #include "getpagesize.h"
71 #define NIL ((POINTER) 0)
73 /* A flag to indicate whether we have initialized ralloc yet. For
74 Emacs's sake, please do not make this local to malloc_init; on some
75 machines, the dumping procedure makes all static variables
76 read-only. On these machines, the word static is #defined to be
77 the empty string, meaning that r_alloc_initialized becomes an
78 automatic variable, and loses its value each time Emacs is started
81 static int r_alloc_initialized
= 0;
83 static void r_alloc_init ();
86 /* Declarations for working with the malloc, ralloc, and system breaks. */
88 /* Function to set the real break value. */
89 POINTER (*real_morecore
) ();
91 /* The break value, as seen by malloc. */
92 static POINTER virtual_break_value
;
94 /* The address of the end of the last data in use by ralloc,
95 including relocatable blocs as well as malloc data. */
96 static POINTER break_value
;
98 /* This is the size of a page. We round memory requests to this boundary. */
101 /* Whenever we get memory from the system, get this many extra bytes. This
102 must be a multiple of page_size. */
103 static int extra_bytes
;
105 /* Macros for rounding. Note that rounding to any value is possible
106 by changing the definition of PAGE. */
107 #define PAGE (getpagesize ())
108 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
109 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
111 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
113 #define MEM_ALIGN sizeof(double)
114 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
117 /* The hook `malloc' uses for the function which gets more space
120 #ifndef SYSTEM_MALLOC
121 extern POINTER (*__morecore
) ();
126 /***********************************************************************
127 Implementation using sbrk
128 ***********************************************************************/
130 /* Data structures of heaps and blocs. */
132 /* The relocatable objects, or blocs, and the malloc data
133 both reside within one or more heaps.
134 Each heap contains malloc data, running from `start' to `bloc_start',
135 and relocatable objects, running from `bloc_start' to `free'.
137 Relocatable objects may relocate within the same heap
138 or may move into another heap; the heaps themselves may grow
141 We try to make just one heap and make it larger as necessary.
142 But sometimes we can't do that, because we can't get contiguous
143 space to add onto the heap. When that happens, we start a new heap. */
149 /* Start of memory range of this heap. */
151 /* End of memory range of this heap. */
153 /* Start of relocatable data in this heap. */
155 /* Start of unused space in this heap. */
157 /* First bloc in this heap. */
158 struct bp
*first_bloc
;
159 /* Last bloc in this heap. */
160 struct bp
*last_bloc
;
163 #define NIL_HEAP ((heap_ptr) 0)
164 #define HEAP_PTR_SIZE (sizeof (struct heap))
166 /* This is the first heap object.
167 If we need additional heap objects, each one resides at the beginning of
168 the space it covers. */
169 static struct heap heap_base
;
171 /* Head and tail of the list of heaps. */
172 static heap_ptr first_heap
, last_heap
;
174 /* These structures are allocated in the malloc arena.
175 The linked list is kept in order of increasing '.data' members.
176 The data blocks abut each other; if b->next is non-nil, then
177 b->data + b->size == b->next->data.
179 An element with variable==NIL denotes a freed block, which has not yet
180 been collected. They may only appear while r_alloc_freeze_level > 0,
181 and will be freed when the arena is thawed. Currently, these blocs are
182 not reusable, while the arena is frozen. Very inefficient. */
191 POINTER new_data
; /* temporarily used for relocation */
192 struct heap
*heap
; /* Heap this bloc is in. */
195 #define NIL_BLOC ((bloc_ptr) 0)
196 #define BLOC_PTR_SIZE (sizeof (struct bp))
198 /* Head and tail of the list of relocatable blocs. */
199 static bloc_ptr first_bloc
, last_bloc
;
201 static int use_relocatable_buffers
;
203 /* If >0, no relocation whatsoever takes place. */
204 static int r_alloc_freeze_level
;
207 /* Functions to get and return memory from the system. */
209 /* Find the heap that ADDRESS falls within. */
217 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
219 if (heap
->start
<= address
&& address
<= heap
->end
)
226 /* Find SIZE bytes of space in a heap.
227 Try to get them at ADDRESS (which must fall within some heap's range)
228 if we can get that many within one heap.
230 If enough space is not presently available in our reserve, this means
231 getting more page-aligned space from the system. If the returned space
232 is not contiguous to the last heap, allocate a new heap, and append it
234 obtain does not try to keep track of whether space is in use
235 or not in use. It just returns the address of SIZE bytes that
236 fall within a single heap. If you call obtain twice in a row
237 with the same arguments, you typically get the same value.
238 to the heap list. It's the caller's responsibility to keep
239 track of what space is in use.
241 Return the address of the space if all went well, or zero if we couldn't
242 allocate the memory. */
245 obtain (address
, size
)
250 SIZE already_available
;
252 /* Find the heap that ADDRESS falls within. */
253 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
255 if (heap
->start
<= address
&& address
<= heap
->end
)
262 /* If we can't fit SIZE bytes in that heap,
263 try successive later heaps. */
264 while (heap
&& (char *) address
+ size
> (char *) heap
->end
)
267 if (heap
== NIL_HEAP
)
269 address
= heap
->bloc_start
;
272 /* If we can't fit them within any existing heap,
274 if (heap
== NIL_HEAP
)
276 POINTER
new = (*real_morecore
)(0);
279 already_available
= (char *)last_heap
->end
- (char *)address
;
281 if (new != last_heap
->end
)
283 /* Someone else called sbrk. Make a new heap. */
285 heap_ptr new_heap
= (heap_ptr
) MEM_ROUNDUP (new);
286 POINTER bloc_start
= (POINTER
) MEM_ROUNDUP ((POINTER
)(new_heap
+ 1));
288 if ((*real_morecore
) ((char *) bloc_start
- (char *) new) != new)
291 new_heap
->start
= new;
292 new_heap
->end
= bloc_start
;
293 new_heap
->bloc_start
= bloc_start
;
294 new_heap
->free
= bloc_start
;
295 new_heap
->next
= NIL_HEAP
;
296 new_heap
->prev
= last_heap
;
297 new_heap
->first_bloc
= NIL_BLOC
;
298 new_heap
->last_bloc
= NIL_BLOC
;
299 last_heap
->next
= new_heap
;
300 last_heap
= new_heap
;
302 address
= bloc_start
;
303 already_available
= 0;
306 /* Add space to the last heap (which we may have just created).
307 Get some extra, so we can come here less often. */
309 get
= size
+ extra_bytes
- already_available
;
310 get
= (char *) ROUNDUP ((char *)last_heap
->end
+ get
)
311 - (char *) last_heap
->end
;
313 if ((*real_morecore
) (get
) != last_heap
->end
)
316 last_heap
->end
= (char *) last_heap
->end
+ get
;
322 /* Return unused heap space to the system
323 if there is a lot of unused space now.
324 This can make the last heap smaller;
325 it can also eliminate the last heap entirely. */
333 /* Add the amount of space beyond break_value
334 in all heaps which have extend beyond break_value at all. */
336 for (h
= last_heap
; h
&& break_value
< h
->end
; h
= h
->prev
)
338 excess
+= (char *) h
->end
- (char *) ((break_value
< h
->bloc_start
)
339 ? h
->bloc_start
: break_value
);
342 if (excess
> extra_bytes
* 2 && (*real_morecore
) (0) == last_heap
->end
)
344 /* Keep extra_bytes worth of empty space.
345 And don't free anything unless we can free at least extra_bytes. */
346 excess
-= extra_bytes
;
348 if ((char *)last_heap
->end
- (char *)last_heap
->bloc_start
<= excess
)
350 /* This heap should have no blocs in it. */
351 if (last_heap
->first_bloc
!= NIL_BLOC
352 || last_heap
->last_bloc
!= NIL_BLOC
)
355 /* Return the last heap, with its header, to the system. */
356 excess
= (char *)last_heap
->end
- (char *)last_heap
->start
;
357 last_heap
= last_heap
->prev
;
358 last_heap
->next
= NIL_HEAP
;
362 excess
= (char *) last_heap
->end
363 - (char *) ROUNDUP ((char *)last_heap
->end
- excess
);
364 last_heap
->end
= (char *) last_heap
->end
- excess
;
367 if ((*real_morecore
) (- excess
) == 0)
369 /* If the system didn't want that much memory back, adjust
370 the end of the last heap to reflect that. This can occur
371 if break_value is still within the original data segment. */
372 last_heap
->end
= (char *) last_heap
->end
+ excess
;
373 /* Make sure that the result of the adjustment is accurate.
374 It should be, for the else clause above; the other case,
375 which returns the entire last heap to the system, seems
376 unlikely to trigger this mode of failure. */
377 if (last_heap
->end
!= (*real_morecore
) (0))
383 /* Return the total size in use by relocating allocator,
384 above where malloc gets space. */
387 r_alloc_size_in_use ()
389 return (char *) break_value
- (char *) virtual_break_value
;
392 /* The meat - allocating, freeing, and relocating blocs. */
394 /* Find the bloc referenced by the address in PTR. Returns a pointer
401 register bloc_ptr p
= first_bloc
;
403 while (p
!= NIL_BLOC
)
405 if (p
->variable
== ptr
&& p
->data
== *ptr
)
414 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
415 Returns a pointer to the new bloc, or zero if we couldn't allocate
416 memory for the new block. */
422 register bloc_ptr new_bloc
;
423 register heap_ptr heap
;
425 if (! (new_bloc
= (bloc_ptr
) malloc (BLOC_PTR_SIZE
))
426 || ! (new_bloc
->data
= obtain (break_value
, size
)))
433 break_value
= (char *) new_bloc
->data
+ size
;
435 new_bloc
->size
= size
;
436 new_bloc
->next
= NIL_BLOC
;
437 new_bloc
->variable
= (POINTER
*) NIL
;
438 new_bloc
->new_data
= 0;
440 /* Record in the heap that this space is in use. */
441 heap
= find_heap (new_bloc
->data
);
442 heap
->free
= break_value
;
444 /* Maintain the correspondence between heaps and blocs. */
445 new_bloc
->heap
= heap
;
446 heap
->last_bloc
= new_bloc
;
447 if (heap
->first_bloc
== NIL_BLOC
)
448 heap
->first_bloc
= new_bloc
;
450 /* Put this bloc on the doubly-linked list of blocs. */
453 new_bloc
->prev
= last_bloc
;
454 last_bloc
->next
= new_bloc
;
455 last_bloc
= new_bloc
;
459 first_bloc
= last_bloc
= new_bloc
;
460 new_bloc
->prev
= NIL_BLOC
;
466 /* Calculate new locations of blocs in the list beginning with BLOC,
467 relocating it to start at ADDRESS, in heap HEAP. If enough space is
468 not presently available in our reserve, call obtain for
471 Store the new location of each bloc in its new_data field.
472 Do not touch the contents of blocs or break_value. */
475 relocate_blocs (bloc
, heap
, address
)
480 register bloc_ptr b
= bloc
;
482 /* No need to ever call this if arena is frozen, bug somewhere! */
483 if (r_alloc_freeze_level
)
488 /* If bloc B won't fit within HEAP,
489 move to the next heap and try again. */
490 while (heap
&& (char *) address
+ b
->size
> (char *) heap
->end
)
493 if (heap
== NIL_HEAP
)
495 address
= heap
->bloc_start
;
498 /* If BLOC won't fit in any heap,
499 get enough new space to hold BLOC and all following blocs. */
500 if (heap
== NIL_HEAP
)
502 register bloc_ptr tb
= b
;
505 /* Add up the size of all the following blocs. */
506 while (tb
!= NIL_BLOC
)
514 /* Get that space. */
515 address
= obtain (address
, s
);
522 /* Record the new address of this bloc
523 and update where the next bloc can start. */
524 b
->new_data
= address
;
526 address
= (char *) address
+ b
->size
;
533 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
534 This is necessary if we put the memory of space of BLOC
535 before that of BEFORE. */
538 reorder_bloc (bloc
, before
)
539 bloc_ptr bloc
, before
;
543 /* Splice BLOC out from where it is. */
552 /* Splice it in before BEFORE. */
563 /* Update the records of which heaps contain which blocs, starting
564 with heap HEAP and bloc BLOC. */
567 update_heap_bloc_correspondence (bloc
, heap
)
573 /* Initialize HEAP's status to reflect blocs before BLOC. */
574 if (bloc
!= NIL_BLOC
&& bloc
->prev
!= NIL_BLOC
&& bloc
->prev
->heap
== heap
)
576 /* The previous bloc is in HEAP. */
577 heap
->last_bloc
= bloc
->prev
;
578 heap
->free
= (char *) bloc
->prev
->data
+ bloc
->prev
->size
;
582 /* HEAP contains no blocs before BLOC. */
583 heap
->first_bloc
= NIL_BLOC
;
584 heap
->last_bloc
= NIL_BLOC
;
585 heap
->free
= heap
->bloc_start
;
588 /* Advance through blocs one by one. */
589 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
591 /* Advance through heaps, marking them empty,
592 till we get to the one that B is in. */
595 if (heap
->bloc_start
<= b
->data
&& b
->data
<= heap
->end
)
598 /* We know HEAP is not null now,
599 because there has to be space for bloc B. */
600 heap
->first_bloc
= NIL_BLOC
;
601 heap
->last_bloc
= NIL_BLOC
;
602 heap
->free
= heap
->bloc_start
;
605 /* Update HEAP's status for bloc B. */
606 heap
->free
= (char *) b
->data
+ b
->size
;
608 if (heap
->first_bloc
== NIL_BLOC
)
609 heap
->first_bloc
= b
;
611 /* Record that B is in HEAP. */
615 /* If there are any remaining heaps and no blocs left,
616 mark those heaps as empty. */
620 heap
->first_bloc
= NIL_BLOC
;
621 heap
->last_bloc
= NIL_BLOC
;
622 heap
->free
= heap
->bloc_start
;
627 /* Resize BLOC to SIZE bytes. This relocates the blocs
628 that come after BLOC in memory. */
631 resize_bloc (bloc
, size
)
640 /* No need to ever call this if arena is frozen, bug somewhere! */
641 if (r_alloc_freeze_level
)
644 if (bloc
== NIL_BLOC
|| size
== bloc
->size
)
647 for (heap
= first_heap
; heap
!= NIL_HEAP
; heap
= heap
->next
)
649 if (heap
->bloc_start
<= bloc
->data
&& bloc
->data
<= heap
->end
)
653 if (heap
== NIL_HEAP
)
656 old_size
= bloc
->size
;
659 /* Note that bloc could be moved into the previous heap. */
660 address
= (bloc
->prev
? (char *) bloc
->prev
->data
+ bloc
->prev
->size
661 : (char *) first_heap
->bloc_start
);
664 if (heap
->bloc_start
<= address
&& address
<= heap
->end
)
669 if (! relocate_blocs (bloc
, heap
, address
))
671 bloc
->size
= old_size
;
677 for (b
= last_bloc
; b
!= bloc
; b
= b
->prev
)
682 b
->data
= b
->new_data
;
686 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
687 *b
->variable
= b
->data
= b
->new_data
;
693 bloc
->data
= bloc
->new_data
;
697 safe_bcopy (bloc
->data
, bloc
->new_data
, old_size
);
698 bzero ((char *) bloc
->new_data
+ old_size
, size
- old_size
);
699 *bloc
->variable
= bloc
->data
= bloc
->new_data
;
704 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
709 b
->data
= b
->new_data
;
713 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
714 *b
->variable
= b
->data
= b
->new_data
;
719 update_heap_bloc_correspondence (bloc
, heap
);
721 break_value
= (last_bloc
? (char *) last_bloc
->data
+ last_bloc
->size
722 : (char *) first_heap
->bloc_start
);
726 /* Free BLOC from the chain of blocs, relocating any blocs above it.
727 This may return space to the system. */
733 heap_ptr heap
= bloc
->heap
;
735 if (r_alloc_freeze_level
)
737 bloc
->variable
= (POINTER
*) NIL
;
741 resize_bloc (bloc
, 0);
743 if (bloc
== first_bloc
&& bloc
== last_bloc
)
745 first_bloc
= last_bloc
= NIL_BLOC
;
747 else if (bloc
== last_bloc
)
749 last_bloc
= bloc
->prev
;
750 last_bloc
->next
= NIL_BLOC
;
752 else if (bloc
== first_bloc
)
754 first_bloc
= bloc
->next
;
755 first_bloc
->prev
= NIL_BLOC
;
759 bloc
->next
->prev
= bloc
->prev
;
760 bloc
->prev
->next
= bloc
->next
;
763 /* Update the records of which blocs are in HEAP. */
764 if (heap
->first_bloc
== bloc
)
766 if (bloc
->next
!= 0 && bloc
->next
->heap
== heap
)
767 heap
->first_bloc
= bloc
->next
;
769 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
771 if (heap
->last_bloc
== bloc
)
773 if (bloc
->prev
!= 0 && bloc
->prev
->heap
== heap
)
774 heap
->last_bloc
= bloc
->prev
;
776 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
783 /* Interface routines. */
785 /* Obtain SIZE bytes of storage from the free pool, or the system, as
786 necessary. If relocatable blocs are in use, this means relocating
787 them. This function gets plugged into the GNU malloc's __morecore
790 We provide hysteresis, never relocating by less than extra_bytes.
792 If we're out of memory, we should return zero, to imitate the other
793 __morecore hook values - in particular, __default_morecore in the
794 GNU malloc package. */
803 if (! r_alloc_initialized
)
806 if (! use_relocatable_buffers
)
807 return (*real_morecore
) (size
);
810 return virtual_break_value
;
814 /* Allocate a page-aligned space. GNU malloc would reclaim an
815 extra space if we passed an unaligned one. But we could
816 not always find a space which is contiguous to the previous. */
817 POINTER new_bloc_start
;
818 heap_ptr h
= first_heap
;
819 SIZE get
= ROUNDUP (size
);
821 address
= (POINTER
) ROUNDUP (virtual_break_value
);
823 /* Search the list upward for a heap which is large enough. */
824 while ((char *) h
->end
< (char *) MEM_ROUNDUP ((char *)address
+ get
))
829 address
= (POINTER
) ROUNDUP (h
->start
);
832 /* If not found, obtain more space. */
835 get
+= extra_bytes
+ page_size
;
837 if (! obtain (address
, get
))
840 if (first_heap
== last_heap
)
841 address
= (POINTER
) ROUNDUP (virtual_break_value
);
843 address
= (POINTER
) ROUNDUP (last_heap
->start
);
847 new_bloc_start
= (POINTER
) MEM_ROUNDUP ((char *)address
+ get
);
849 if (first_heap
->bloc_start
< new_bloc_start
)
851 /* This is no clean solution - no idea how to do it better. */
852 if (r_alloc_freeze_level
)
855 /* There is a bug here: if the above obtain call succeeded, but the
856 relocate_blocs call below does not succeed, we need to free
857 the memory that we got with obtain. */
859 /* Move all blocs upward. */
860 if (! relocate_blocs (first_bloc
, h
, new_bloc_start
))
863 /* Note that (POINTER)(h+1) <= new_bloc_start since
864 get >= page_size, so the following does not destroy the heap
866 for (b
= last_bloc
; b
!= NIL_BLOC
; b
= b
->prev
)
868 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
869 *b
->variable
= b
->data
= b
->new_data
;
872 h
->bloc_start
= new_bloc_start
;
874 update_heap_bloc_correspondence (first_bloc
, h
);
878 /* Give up managing heaps below the one the new
879 virtual_break_value points to. */
880 first_heap
->prev
= NIL_HEAP
;
881 first_heap
->next
= h
->next
;
882 first_heap
->start
= h
->start
;
883 first_heap
->end
= h
->end
;
884 first_heap
->free
= h
->free
;
885 first_heap
->first_bloc
= h
->first_bloc
;
886 first_heap
->last_bloc
= h
->last_bloc
;
887 first_heap
->bloc_start
= h
->bloc_start
;
889 if (first_heap
->next
)
890 first_heap
->next
->prev
= first_heap
;
892 last_heap
= first_heap
;
895 bzero (address
, size
);
899 SIZE excess
= (char *)first_heap
->bloc_start
900 - ((char *)virtual_break_value
+ size
);
902 address
= virtual_break_value
;
904 if (r_alloc_freeze_level
== 0 && excess
> 2 * extra_bytes
)
906 excess
-= extra_bytes
;
907 first_heap
->bloc_start
908 = (POINTER
) MEM_ROUNDUP ((char *)first_heap
->bloc_start
- excess
);
910 relocate_blocs (first_bloc
, first_heap
, first_heap
->bloc_start
);
912 for (b
= first_bloc
; b
!= NIL_BLOC
; b
= b
->next
)
914 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
915 *b
->variable
= b
->data
= b
->new_data
;
919 if ((char *)virtual_break_value
+ size
< (char *)first_heap
->start
)
921 /* We found an additional space below the first heap */
922 first_heap
->start
= (POINTER
) ((char *)virtual_break_value
+ size
);
926 virtual_break_value
= (POINTER
) ((char *)address
+ size
);
927 break_value
= (last_bloc
928 ? (char *) last_bloc
->data
+ last_bloc
->size
929 : (char *) first_heap
->bloc_start
);
937 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
938 the data is returned in *PTR. PTR is thus the address of some variable
939 which will use the data area.
941 The allocation of 0 bytes is valid.
942 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
943 done before allocating a new area. Not yet done.
945 If we can't allocate the necessary memory, set *PTR to zero, and
953 register bloc_ptr new_bloc
;
955 if (! r_alloc_initialized
)
958 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
961 new_bloc
->variable
= ptr
;
962 *ptr
= new_bloc
->data
;
970 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
971 Store 0 in *PTR to show there's no block allocated. */
975 register POINTER
*ptr
;
977 register bloc_ptr dead_bloc
;
979 if (! r_alloc_initialized
)
982 dead_bloc
= find_bloc (ptr
);
983 if (dead_bloc
== NIL_BLOC
)
986 free_bloc (dead_bloc
);
990 refill_memory_reserve ();
994 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
995 Do this by shifting all blocks above this one up in memory, unless
996 SIZE is less than or equal to the current bloc size, in which case
999 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
1000 memory copied to it. Not very efficient. We could traverse the
1001 bloc_list for a best fit of free blocs first.
1003 Change *PTR to reflect the new bloc, and return this value.
1005 If more memory cannot be allocated, then leave *PTR unchanged, and
1009 r_re_alloc (ptr
, size
)
1013 register bloc_ptr bloc
;
1015 if (! r_alloc_initialized
)
1019 return r_alloc (ptr
, size
);
1023 return r_alloc (ptr
, 0);
1026 bloc
= find_bloc (ptr
);
1027 if (bloc
== NIL_BLOC
)
1030 if (size
< bloc
->size
)
1032 /* Wouldn't it be useful to actually resize the bloc here? */
1033 /* I think so too, but not if it's too expensive... */
1034 if ((bloc
->size
- MEM_ROUNDUP (size
) >= page_size
)
1035 && r_alloc_freeze_level
== 0)
1037 resize_bloc (bloc
, MEM_ROUNDUP (size
));
1038 /* Never mind if this fails, just do nothing... */
1039 /* It *should* be infallible! */
1042 else if (size
> bloc
->size
)
1044 if (r_alloc_freeze_level
)
1047 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
1050 new_bloc
->variable
= ptr
;
1051 *ptr
= new_bloc
->data
;
1052 bloc
->variable
= (POINTER
*) NIL
;
1059 if (! resize_bloc (bloc
, MEM_ROUNDUP (size
)))
1066 /* Disable relocations, after making room for at least SIZE bytes
1067 of non-relocatable heap if possible. The relocatable blocs are
1068 guaranteed to hold still until thawed, even if this means that
1069 malloc must return a null pointer. */
1072 r_alloc_freeze (size
)
1075 if (! r_alloc_initialized
)
1078 /* If already frozen, we can't make any more room, so don't try. */
1079 if (r_alloc_freeze_level
> 0)
1081 /* If we can't get the amount requested, half is better than nothing. */
1082 while (size
> 0 && r_alloc_sbrk (size
) == 0)
1084 ++r_alloc_freeze_level
;
1086 r_alloc_sbrk (-size
);
1093 if (! r_alloc_initialized
)
1096 if (--r_alloc_freeze_level
< 0)
1099 /* This frees all unused blocs. It is not too inefficient, as the resize
1100 and bcopy is done only once. Afterwards, all unreferenced blocs are
1101 already shrunk to zero size. */
1102 if (!r_alloc_freeze_level
)
1104 bloc_ptr
*b
= &first_bloc
;
1106 if (!(*b
)->variable
)
1114 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1116 /* Reinitialize the morecore hook variables after restarting a dumped
1117 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1121 /* Only do this if the hook has been reset, so that we don't get an
1122 infinite loop, in case Emacs was linked statically. */
1123 if (__morecore
!= r_alloc_sbrk
)
1125 real_morecore
= __morecore
;
1126 __morecore
= r_alloc_sbrk
;
1130 #endif /* emacs && DOUG_LEA_MALLOC */
1143 if (!r_alloc_initialized
)
1146 assert (first_heap
);
1147 assert (last_heap
->end
<= (POINTER
) sbrk (0));
1148 assert ((POINTER
) first_heap
< first_heap
->start
);
1149 assert (first_heap
->start
<= virtual_break_value
);
1150 assert (virtual_break_value
<= first_heap
->end
);
1152 for (h
= first_heap
; h
; h
= h
->next
)
1154 assert (h
->prev
== ph
);
1155 assert ((POINTER
) ROUNDUP (h
->end
) == h
->end
);
1156 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1157 the heap start has any sort of alignment.
1158 Perhaps it should. */
1159 assert ((POINTER
) MEM_ROUNDUP (h
->start
) == h
->start
);
1161 assert ((POINTER
) MEM_ROUNDUP (h
->bloc_start
) == h
->bloc_start
);
1162 assert (h
->start
<= h
->bloc_start
&& h
->bloc_start
<= h
->end
);
1166 assert (ph
->end
< h
->start
);
1167 assert (h
->start
<= (POINTER
)h
&& (POINTER
)(h
+1) <= h
->bloc_start
);
1170 if (h
->bloc_start
<= break_value
&& break_value
<= h
->end
)
1177 assert (last_heap
== ph
);
1179 for (b
= first_bloc
; b
; b
= b
->next
)
1181 assert (b
->prev
== pb
);
1182 assert ((POINTER
) MEM_ROUNDUP (b
->data
) == b
->data
);
1183 assert ((SIZE
) MEM_ROUNDUP (b
->size
) == b
->size
);
1186 for (h
= first_heap
; h
; h
= h
->next
)
1188 if (h
->bloc_start
<= b
->data
&& b
->data
+ b
->size
<= h
->end
)
1195 if (pb
&& pb
->data
+ pb
->size
!= b
->data
)
1197 assert (ph
&& b
->data
== h
->bloc_start
);
1200 if (ph
->bloc_start
<= pb
->data
1201 && pb
->data
+ pb
->size
<= ph
->end
)
1203 assert (pb
->data
+ pb
->size
+ b
->size
> ph
->end
);
1208 assert (ph
->bloc_start
+ b
->size
> ph
->end
);
1216 assert (last_bloc
== pb
);
1219 assert (last_bloc
->data
+ last_bloc
->size
== break_value
);
1221 assert (first_heap
->bloc_start
== break_value
);
1228 /***********************************************************************
1230 ***********************************************************************/
1232 /* Initialize various things for memory allocation. */
1237 if (r_alloc_initialized
)
1239 r_alloc_initialized
= 1;
1242 #ifndef SYSTEM_MALLOC
1243 real_morecore
= __morecore
;
1244 __morecore
= r_alloc_sbrk
;
1246 first_heap
= last_heap
= &heap_base
;
1247 first_heap
->next
= first_heap
->prev
= NIL_HEAP
;
1248 first_heap
->start
= first_heap
->bloc_start
1249 = virtual_break_value
= break_value
= (*real_morecore
) (0);
1250 if (break_value
== NIL
)
1253 extra_bytes
= ROUNDUP (50000);
1256 #ifdef DOUG_LEA_MALLOC
1258 mallopt (M_TOP_PAD
, 64 * 4096);
1261 #ifndef SYSTEM_MALLOC
1262 /* Give GNU malloc's morecore some hysteresis
1263 so that we move all the relocatable blocks much less often. */
1264 __malloc_extra_blocks
= 64;
1268 #ifndef SYSTEM_MALLOC
1269 first_heap
->end
= (POINTER
) ROUNDUP (first_heap
->start
);
1271 /* The extra call to real_morecore guarantees that the end of the
1272 address space is a multiple of page_size, even if page_size is
1273 not really the page size of the system running the binary in
1274 which page_size is stored. This allows a binary to be built on a
1275 system with one page size and run on a system with a smaller page
1277 (*real_morecore
) ((char *) first_heap
->end
- (char *) first_heap
->start
);
1279 /* Clear the rest of the last page; this memory is in our address space
1280 even though it is after the sbrk value. */
1281 /* Doubly true, with the additional call that explicitly adds the
1282 rest of that page to the address space. */
1283 bzero (first_heap
->start
,
1284 (char *) first_heap
->end
- (char *) first_heap
->start
);
1285 virtual_break_value
= break_value
= first_heap
->bloc_start
= first_heap
->end
;
1288 use_relocatable_buffers
= 1;
1291 /* arch-tag: 6a524a15-faff-44c8-95d4-a5da6f55110f
1292 (do not change this comment) */