1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
24 rather than all of them. This means allowing for a possible
25 hole between the first bloc and the end of malloc storage. */
30 #include "lisp.h" /* Needed for VALBITS. */
36 typedef POINTER_TYPE
*POINTER
;
39 /* Declared in dispnew.c, this version doesn't screw up if regions
42 extern void safe_bcopy ();
44 #ifdef DOUG_LEA_MALLOC
46 extern int mallopt ();
47 #else /* not DOUG_LEA_MALLOC */
49 extern size_t __malloc_extra_blocks
;
50 #endif /* SYSTEM_MALLOC */
51 #endif /* not DOUG_LEA_MALLOC */
58 typedef void *POINTER
;
63 #define safe_bcopy(x, y, z) memmove (y, x, z)
64 #define bzero(x, len) memset (x, 0, len)
66 #endif /* not emacs */
69 #include "getpagesize.h"
71 #define NIL ((POINTER) 0)
73 /* A flag to indicate whether we have initialized ralloc yet. For
74 Emacs's sake, please do not make this local to malloc_init; on some
75 machines, the dumping procedure makes all static variables
76 read-only. On these machines, the word static is #defined to be
77 the empty string, meaning that r_alloc_initialized becomes an
78 automatic variable, and loses its value each time Emacs is started
81 static int r_alloc_initialized
= 0;
83 static void r_alloc_init ();
86 /* Declarations for working with the malloc, ralloc, and system breaks. */
88 /* Function to set the real break value. */
89 POINTER (*real_morecore
) ();
91 /* The break value, as seen by malloc. */
92 static POINTER virtual_break_value
;
94 /* The address of the end of the last data in use by ralloc,
95 including relocatable blocs as well as malloc data. */
96 static POINTER break_value
;
98 /* This is the size of a page. We round memory requests to this boundary. */
101 /* Whenever we get memory from the system, get this many extra bytes. This
102 must be a multiple of page_size. */
103 static int extra_bytes
;
105 /* Macros for rounding. Note that rounding to any value is possible
106 by changing the definition of PAGE. */
107 #define PAGE (getpagesize ())
108 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
109 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
111 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
113 #define MEM_ALIGN sizeof(double)
114 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
118 /***********************************************************************
119 Implementation using sbrk
120 ***********************************************************************/
122 /* Data structures of heaps and blocs. */
124 /* The relocatable objects, or blocs, and the malloc data
125 both reside within one or more heaps.
126 Each heap contains malloc data, running from `start' to `bloc_start',
127 and relocatable objects, running from `bloc_start' to `free'.
129 Relocatable objects may relocate within the same heap
130 or may move into another heap; the heaps themselves may grow
133 We try to make just one heap and make it larger as necessary.
134 But sometimes we can't do that, because we can't get contiguous
135 space to add onto the heap. When that happens, we start a new heap. */
141 /* Start of memory range of this heap. */
143 /* End of memory range of this heap. */
145 /* Start of relocatable data in this heap. */
147 /* Start of unused space in this heap. */
149 /* First bloc in this heap. */
150 struct bp
*first_bloc
;
151 /* Last bloc in this heap. */
152 struct bp
*last_bloc
;
155 #define NIL_HEAP ((heap_ptr) 0)
156 #define HEAP_PTR_SIZE (sizeof (struct heap))
158 /* This is the first heap object.
159 If we need additional heap objects, each one resides at the beginning of
160 the space it covers. */
161 static struct heap heap_base
;
163 /* Head and tail of the list of heaps. */
164 static heap_ptr first_heap
, last_heap
;
166 /* These structures are allocated in the malloc arena.
167 The linked list is kept in order of increasing '.data' members.
168 The data blocks abut each other; if b->next is non-nil, then
169 b->data + b->size == b->next->data.
171 An element with variable==NIL denotes a freed block, which has not yet
172 been collected. They may only appear while r_alloc_freeze > 0, and will be
173 freed when the arena is thawed. Currently, these blocs are not reusable,
174 while the arena is frozen. Very inefficient. */
183 POINTER new_data
; /* temporarily used for relocation */
184 struct heap
*heap
; /* Heap this bloc is in. */
187 #define NIL_BLOC ((bloc_ptr) 0)
188 #define BLOC_PTR_SIZE (sizeof (struct bp))
190 /* Head and tail of the list of relocatable blocs. */
191 static bloc_ptr first_bloc
, last_bloc
;
193 static int use_relocatable_buffers
;
195 /* If >0, no relocation whatsoever takes place. */
196 static int r_alloc_freeze_level
;
199 /* Functions to get and return memory from the system. */
201 /* Find the heap that ADDRESS falls within. */
209 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
211 if (heap
->start
<= address
&& address
<= heap
->end
)
218 /* Find SIZE bytes of space in a heap.
219 Try to get them at ADDRESS (which must fall within some heap's range)
220 if we can get that many within one heap.
222 If enough space is not presently available in our reserve, this means
223 getting more page-aligned space from the system. If the returned space
224 is not contiguous to the last heap, allocate a new heap, and append it
226 obtain does not try to keep track of whether space is in use
227 or not in use. It just returns the address of SIZE bytes that
228 fall within a single heap. If you call obtain twice in a row
229 with the same arguments, you typically get the same value.
230 to the heap list. It's the caller's responsibility to keep
231 track of what space is in use.
233 Return the address of the space if all went well, or zero if we couldn't
234 allocate the memory. */
237 obtain (address
, size
)
242 SIZE already_available
;
244 /* Find the heap that ADDRESS falls within. */
245 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
247 if (heap
->start
<= address
&& address
<= heap
->end
)
254 /* If we can't fit SIZE bytes in that heap,
255 try successive later heaps. */
256 while (heap
&& (char *) address
+ size
> (char *) heap
->end
)
259 if (heap
== NIL_HEAP
)
261 address
= heap
->bloc_start
;
264 /* If we can't fit them within any existing heap,
266 if (heap
== NIL_HEAP
)
268 POINTER
new = (*real_morecore
)(0);
271 already_available
= (char *)last_heap
->end
- (char *)address
;
273 if (new != last_heap
->end
)
275 /* Someone else called sbrk. Make a new heap. */
277 heap_ptr new_heap
= (heap_ptr
) MEM_ROUNDUP (new);
278 POINTER bloc_start
= (POINTER
) MEM_ROUNDUP ((POINTER
)(new_heap
+ 1));
280 if ((*real_morecore
) ((char *) bloc_start
- (char *) new) != new)
283 new_heap
->start
= new;
284 new_heap
->end
= bloc_start
;
285 new_heap
->bloc_start
= bloc_start
;
286 new_heap
->free
= bloc_start
;
287 new_heap
->next
= NIL_HEAP
;
288 new_heap
->prev
= last_heap
;
289 new_heap
->first_bloc
= NIL_BLOC
;
290 new_heap
->last_bloc
= NIL_BLOC
;
291 last_heap
->next
= new_heap
;
292 last_heap
= new_heap
;
294 address
= bloc_start
;
295 already_available
= 0;
298 /* Add space to the last heap (which we may have just created).
299 Get some extra, so we can come here less often. */
301 get
= size
+ extra_bytes
- already_available
;
302 get
= (char *) ROUNDUP ((char *)last_heap
->end
+ get
)
303 - (char *) last_heap
->end
;
305 if ((*real_morecore
) (get
) != last_heap
->end
)
308 last_heap
->end
= (char *) last_heap
->end
+ get
;
314 /* Return unused heap space to the system
315 if there is a lot of unused space now.
316 This can make the last heap smaller;
317 it can also eliminate the last heap entirely. */
325 /* Add the amount of space beyond break_value
326 in all heaps which have extend beyond break_value at all. */
328 for (h
= last_heap
; h
&& break_value
< h
->end
; h
= h
->prev
)
330 excess
+= (char *) h
->end
- (char *) ((break_value
< h
->bloc_start
)
331 ? h
->bloc_start
: break_value
);
334 if (excess
> extra_bytes
* 2 && (*real_morecore
) (0) == last_heap
->end
)
336 /* Keep extra_bytes worth of empty space.
337 And don't free anything unless we can free at least extra_bytes. */
338 excess
-= extra_bytes
;
340 if ((char *)last_heap
->end
- (char *)last_heap
->bloc_start
<= excess
)
342 /* This heap should have no blocs in it. */
343 if (last_heap
->first_bloc
!= NIL_BLOC
344 || last_heap
->last_bloc
!= NIL_BLOC
)
347 /* Return the last heap, with its header, to the system. */
348 excess
= (char *)last_heap
->end
- (char *)last_heap
->start
;
349 last_heap
= last_heap
->prev
;
350 last_heap
->next
= NIL_HEAP
;
354 excess
= (char *) last_heap
->end
355 - (char *) ROUNDUP ((char *)last_heap
->end
- excess
);
356 last_heap
->end
= (char *) last_heap
->end
- excess
;
359 if ((*real_morecore
) (- excess
) == 0)
361 /* If the system didn't want that much memory back, adjust
362 the end of the last heap to reflect that. This can occur
363 if break_value is still within the original data segment. */
364 last_heap
->end
= (char *) last_heap
->end
+ excess
;
365 /* Make sure that the result of the adjustment is accurate.
366 It should be, for the else clause above; the other case,
367 which returns the entire last heap to the system, seems
368 unlikely to trigger this mode of failure. */
369 if (last_heap
->end
!= (*real_morecore
) (0))
375 /* Return the total size in use by relocating allocator,
376 above where malloc gets space. */
379 r_alloc_size_in_use ()
381 return (char *) break_value
- (char *) virtual_break_value
;
384 /* The meat - allocating, freeing, and relocating blocs. */
386 /* Find the bloc referenced by the address in PTR. Returns a pointer
393 register bloc_ptr p
= first_bloc
;
395 while (p
!= NIL_BLOC
)
397 if (p
->variable
== ptr
&& p
->data
== *ptr
)
406 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
407 Returns a pointer to the new bloc, or zero if we couldn't allocate
408 memory for the new block. */
414 register bloc_ptr new_bloc
;
415 register heap_ptr heap
;
417 if (! (new_bloc
= (bloc_ptr
) malloc (BLOC_PTR_SIZE
))
418 || ! (new_bloc
->data
= obtain (break_value
, size
)))
426 break_value
= (char *) new_bloc
->data
+ size
;
428 new_bloc
->size
= size
;
429 new_bloc
->next
= NIL_BLOC
;
430 new_bloc
->variable
= (POINTER
*) NIL
;
431 new_bloc
->new_data
= 0;
433 /* Record in the heap that this space is in use. */
434 heap
= find_heap (new_bloc
->data
);
435 heap
->free
= break_value
;
437 /* Maintain the correspondence between heaps and blocs. */
438 new_bloc
->heap
= heap
;
439 heap
->last_bloc
= new_bloc
;
440 if (heap
->first_bloc
== NIL_BLOC
)
441 heap
->first_bloc
= new_bloc
;
443 /* Put this bloc on the doubly-linked list of blocs. */
446 new_bloc
->prev
= last_bloc
;
447 last_bloc
->next
= new_bloc
;
448 last_bloc
= new_bloc
;
452 first_bloc
= last_bloc
= new_bloc
;
453 new_bloc
->prev
= NIL_BLOC
;
459 /* Calculate new locations of blocs in the list beginning with BLOC,
460 relocating it to start at ADDRESS, in heap HEAP. If enough space is
461 not presently available in our reserve, call obtain for
464 Store the new location of each bloc in its new_data field.
465 Do not touch the contents of blocs or break_value. */
468 relocate_blocs (bloc
, heap
, address
)
473 register bloc_ptr b
= bloc
;
475 /* No need to ever call this if arena is frozen, bug somewhere! */
476 if (r_alloc_freeze_level
)
481 /* If bloc B won't fit within HEAP,
482 move to the next heap and try again. */
483 while (heap
&& (char *) address
+ b
->size
> (char *) heap
->end
)
486 if (heap
== NIL_HEAP
)
488 address
= heap
->bloc_start
;
491 /* If BLOC won't fit in any heap,
492 get enough new space to hold BLOC and all following blocs. */
493 if (heap
== NIL_HEAP
)
495 register bloc_ptr tb
= b
;
498 /* Add up the size of all the following blocs. */
499 while (tb
!= NIL_BLOC
)
507 /* Get that space. */
508 address
= obtain (address
, s
);
515 /* Record the new address of this bloc
516 and update where the next bloc can start. */
517 b
->new_data
= address
;
519 address
= (char *) address
+ b
->size
;
526 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
527 This is necessary if we put the memory of space of BLOC
528 before that of BEFORE. */
531 reorder_bloc (bloc
, before
)
532 bloc_ptr bloc
, before
;
536 /* Splice BLOC out from where it is. */
545 /* Splice it in before BEFORE. */
556 /* Update the records of which heaps contain which blocs, starting
557 with heap HEAP and bloc BLOC. */
560 update_heap_bloc_correspondence (bloc
, heap
)
566 /* Initialize HEAP's status to reflect blocs before BLOC. */
567 if (bloc
!= NIL_BLOC
&& bloc
->prev
!= NIL_BLOC
&& bloc
->prev
->heap
== heap
)
569 /* The previous bloc is in HEAP. */
570 heap
->last_bloc
= bloc
->prev
;
571 heap
->free
= (char *) bloc
->prev
->data
+ bloc
->prev
->size
;
575 /* HEAP contains no blocs before BLOC. */
576 heap
->first_bloc
= NIL_BLOC
;
577 heap
->last_bloc
= NIL_BLOC
;
578 heap
->free
= heap
->bloc_start
;
581 /* Advance through blocs one by one. */
582 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
584 /* Advance through heaps, marking them empty,
585 till we get to the one that B is in. */
588 if (heap
->bloc_start
<= b
->data
&& b
->data
<= heap
->end
)
591 /* We know HEAP is not null now,
592 because there has to be space for bloc B. */
593 heap
->first_bloc
= NIL_BLOC
;
594 heap
->last_bloc
= NIL_BLOC
;
595 heap
->free
= heap
->bloc_start
;
598 /* Update HEAP's status for bloc B. */
599 heap
->free
= (char *) b
->data
+ b
->size
;
601 if (heap
->first_bloc
== NIL_BLOC
)
602 heap
->first_bloc
= b
;
604 /* Record that B is in HEAP. */
608 /* If there are any remaining heaps and no blocs left,
609 mark those heaps as empty. */
613 heap
->first_bloc
= NIL_BLOC
;
614 heap
->last_bloc
= NIL_BLOC
;
615 heap
->free
= heap
->bloc_start
;
620 /* Resize BLOC to SIZE bytes. This relocates the blocs
621 that come after BLOC in memory. */
624 resize_bloc (bloc
, size
)
633 /* No need to ever call this if arena is frozen, bug somewhere! */
634 if (r_alloc_freeze_level
)
637 if (bloc
== NIL_BLOC
|| size
== bloc
->size
)
640 for (heap
= first_heap
; heap
!= NIL_HEAP
; heap
= heap
->next
)
642 if (heap
->bloc_start
<= bloc
->data
&& bloc
->data
<= heap
->end
)
646 if (heap
== NIL_HEAP
)
649 old_size
= bloc
->size
;
652 /* Note that bloc could be moved into the previous heap. */
653 address
= (bloc
->prev
? (char *) bloc
->prev
->data
+ bloc
->prev
->size
654 : (char *) first_heap
->bloc_start
);
657 if (heap
->bloc_start
<= address
&& address
<= heap
->end
)
662 if (! relocate_blocs (bloc
, heap
, address
))
664 bloc
->size
= old_size
;
670 for (b
= last_bloc
; b
!= bloc
; b
= b
->prev
)
675 b
->data
= b
->new_data
;
679 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
680 *b
->variable
= b
->data
= b
->new_data
;
686 bloc
->data
= bloc
->new_data
;
690 safe_bcopy (bloc
->data
, bloc
->new_data
, old_size
);
691 bzero ((char *) bloc
->new_data
+ old_size
, size
- old_size
);
692 *bloc
->variable
= bloc
->data
= bloc
->new_data
;
697 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
702 b
->data
= b
->new_data
;
706 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
707 *b
->variable
= b
->data
= b
->new_data
;
712 update_heap_bloc_correspondence (bloc
, heap
);
714 break_value
= (last_bloc
? (char *) last_bloc
->data
+ last_bloc
->size
715 : (char *) first_heap
->bloc_start
);
719 /* Free BLOC from the chain of blocs, relocating any blocs above it.
720 This may return space to the system. */
726 heap_ptr heap
= bloc
->heap
;
728 if (r_alloc_freeze_level
)
730 bloc
->variable
= (POINTER
*) NIL
;
734 resize_bloc (bloc
, 0);
736 if (bloc
== first_bloc
&& bloc
== last_bloc
)
738 first_bloc
= last_bloc
= NIL_BLOC
;
740 else if (bloc
== last_bloc
)
742 last_bloc
= bloc
->prev
;
743 last_bloc
->next
= NIL_BLOC
;
745 else if (bloc
== first_bloc
)
747 first_bloc
= bloc
->next
;
748 first_bloc
->prev
= NIL_BLOC
;
752 bloc
->next
->prev
= bloc
->prev
;
753 bloc
->prev
->next
= bloc
->next
;
756 /* Update the records of which blocs are in HEAP. */
757 if (heap
->first_bloc
== bloc
)
759 if (bloc
->next
!= 0 && bloc
->next
->heap
== heap
)
760 heap
->first_bloc
= bloc
->next
;
762 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
764 if (heap
->last_bloc
== bloc
)
766 if (bloc
->prev
!= 0 && bloc
->prev
->heap
== heap
)
767 heap
->last_bloc
= bloc
->prev
;
769 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
776 /* Interface routines. */
778 /* Obtain SIZE bytes of storage from the free pool, or the system, as
779 necessary. If relocatable blocs are in use, this means relocating
780 them. This function gets plugged into the GNU malloc's __morecore
783 We provide hysteresis, never relocating by less than extra_bytes.
785 If we're out of memory, we should return zero, to imitate the other
786 __morecore hook values - in particular, __default_morecore in the
787 GNU malloc package. */
796 if (! r_alloc_initialized
)
799 if (! use_relocatable_buffers
)
800 return (*real_morecore
) (size
);
803 return virtual_break_value
;
807 /* Allocate a page-aligned space. GNU malloc would reclaim an
808 extra space if we passed an unaligned one. But we could
809 not always find a space which is contiguous to the previous. */
810 POINTER new_bloc_start
;
811 heap_ptr h
= first_heap
;
812 SIZE get
= ROUNDUP (size
);
814 address
= (POINTER
) ROUNDUP (virtual_break_value
);
816 /* Search the list upward for a heap which is large enough. */
817 while ((char *) h
->end
< (char *) MEM_ROUNDUP ((char *)address
+ get
))
822 address
= (POINTER
) ROUNDUP (h
->start
);
825 /* If not found, obtain more space. */
828 get
+= extra_bytes
+ page_size
;
830 if (! obtain (address
, get
))
833 if (first_heap
== last_heap
)
834 address
= (POINTER
) ROUNDUP (virtual_break_value
);
836 address
= (POINTER
) ROUNDUP (last_heap
->start
);
840 new_bloc_start
= (POINTER
) MEM_ROUNDUP ((char *)address
+ get
);
842 if (first_heap
->bloc_start
< new_bloc_start
)
844 /* This is no clean solution - no idea how to do it better. */
845 if (r_alloc_freeze_level
)
848 /* There is a bug here: if the above obtain call succeeded, but the
849 relocate_blocs call below does not succeed, we need to free
850 the memory that we got with obtain. */
852 /* Move all blocs upward. */
853 if (! relocate_blocs (first_bloc
, h
, new_bloc_start
))
856 /* Note that (POINTER)(h+1) <= new_bloc_start since
857 get >= page_size, so the following does not destroy the heap
859 for (b
= last_bloc
; b
!= NIL_BLOC
; b
= b
->prev
)
861 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
862 *b
->variable
= b
->data
= b
->new_data
;
865 h
->bloc_start
= new_bloc_start
;
867 update_heap_bloc_correspondence (first_bloc
, h
);
871 /* Give up managing heaps below the one the new
872 virtual_break_value points to. */
873 first_heap
->prev
= NIL_HEAP
;
874 first_heap
->next
= h
->next
;
875 first_heap
->start
= h
->start
;
876 first_heap
->end
= h
->end
;
877 first_heap
->free
= h
->free
;
878 first_heap
->first_bloc
= h
->first_bloc
;
879 first_heap
->last_bloc
= h
->last_bloc
;
880 first_heap
->bloc_start
= h
->bloc_start
;
882 if (first_heap
->next
)
883 first_heap
->next
->prev
= first_heap
;
885 last_heap
= first_heap
;
888 bzero (address
, size
);
892 SIZE excess
= (char *)first_heap
->bloc_start
893 - ((char *)virtual_break_value
+ size
);
895 address
= virtual_break_value
;
897 if (r_alloc_freeze_level
== 0 && excess
> 2 * extra_bytes
)
899 excess
-= extra_bytes
;
900 first_heap
->bloc_start
901 = (POINTER
) MEM_ROUNDUP ((char *)first_heap
->bloc_start
- excess
);
903 relocate_blocs (first_bloc
, first_heap
, first_heap
->bloc_start
);
905 for (b
= first_bloc
; b
!= NIL_BLOC
; b
= b
->next
)
907 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
908 *b
->variable
= b
->data
= b
->new_data
;
912 if ((char *)virtual_break_value
+ size
< (char *)first_heap
->start
)
914 /* We found an additional space below the first heap */
915 first_heap
->start
= (POINTER
) ((char *)virtual_break_value
+ size
);
919 virtual_break_value
= (POINTER
) ((char *)address
+ size
);
920 break_value
= (last_bloc
921 ? (char *) last_bloc
->data
+ last_bloc
->size
922 : (char *) first_heap
->bloc_start
);
930 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
931 the data is returned in *PTR. PTR is thus the address of some variable
932 which will use the data area.
934 The allocation of 0 bytes is valid.
935 In case r_alloc_freeze is set, a best fit of unused blocs could be done
936 before allocating a new area. Not yet done.
938 If we can't allocate the necessary memory, set *PTR to zero, and
946 register bloc_ptr new_bloc
;
948 if (! r_alloc_initialized
)
951 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
954 new_bloc
->variable
= ptr
;
955 *ptr
= new_bloc
->data
;
963 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
964 Store 0 in *PTR to show there's no block allocated. */
968 register POINTER
*ptr
;
970 register bloc_ptr dead_bloc
;
972 if (! r_alloc_initialized
)
975 dead_bloc
= find_bloc (ptr
);
976 if (dead_bloc
== NIL_BLOC
)
979 free_bloc (dead_bloc
);
983 refill_memory_reserve ();
987 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
988 Do this by shifting all blocks above this one up in memory, unless
989 SIZE is less than or equal to the current bloc size, in which case
992 In case r_alloc_freeze is set, a new bloc is allocated, and the
993 memory copied to it. Not very efficient. We could traverse the
994 bloc_list for a best fit of free blocs first.
996 Change *PTR to reflect the new bloc, and return this value.
998 If more memory cannot be allocated, then leave *PTR unchanged, and
1002 r_re_alloc (ptr
, size
)
1006 register bloc_ptr bloc
;
1008 if (! r_alloc_initialized
)
1012 return r_alloc (ptr
, size
);
1016 return r_alloc (ptr
, 0);
1019 bloc
= find_bloc (ptr
);
1020 if (bloc
== NIL_BLOC
)
1023 if (size
< bloc
->size
)
1025 /* Wouldn't it be useful to actually resize the bloc here? */
1026 /* I think so too, but not if it's too expensive... */
1027 if ((bloc
->size
- MEM_ROUNDUP (size
) >= page_size
)
1028 && r_alloc_freeze_level
== 0)
1030 resize_bloc (bloc
, MEM_ROUNDUP (size
));
1031 /* Never mind if this fails, just do nothing... */
1032 /* It *should* be infallible! */
1035 else if (size
> bloc
->size
)
1037 if (r_alloc_freeze_level
)
1040 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
1043 new_bloc
->variable
= ptr
;
1044 *ptr
= new_bloc
->data
;
1045 bloc
->variable
= (POINTER
*) NIL
;
1052 if (! resize_bloc (bloc
, MEM_ROUNDUP (size
)))
1059 /* Disable relocations, after making room for at least SIZE bytes
1060 of non-relocatable heap if possible. The relocatable blocs are
1061 guaranteed to hold still until thawed, even if this means that
1062 malloc must return a null pointer. */
1065 r_alloc_freeze (size
)
1068 if (! r_alloc_initialized
)
1071 /* If already frozen, we can't make any more room, so don't try. */
1072 if (r_alloc_freeze_level
> 0)
1074 /* If we can't get the amount requested, half is better than nothing. */
1075 while (size
> 0 && r_alloc_sbrk (size
) == 0)
1077 ++r_alloc_freeze_level
;
1079 r_alloc_sbrk (-size
);
1086 if (! r_alloc_initialized
)
1089 if (--r_alloc_freeze_level
< 0)
1092 /* This frees all unused blocs. It is not too inefficient, as the resize
1093 and bcopy is done only once. Afterwards, all unreferenced blocs are
1094 already shrunk to zero size. */
1095 if (!r_alloc_freeze_level
)
1097 bloc_ptr
*b
= &first_bloc
;
1099 if (!(*b
)->variable
)
1107 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1109 /* Reinitialize the morecore hook variables after restarting a dumped
1110 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1114 /* Only do this if the hook has been reset, so that we don't get an
1115 infinite loop, in case Emacs was linked statically. */
1116 if (__morecore
!= r_alloc_sbrk
)
1118 real_morecore
= __morecore
;
1119 __morecore
= r_alloc_sbrk
;
1123 #endif /* emacs && DOUG_LEA_MALLOC */
1136 if (!r_alloc_initialized
)
1139 assert (first_heap
);
1140 assert (last_heap
->end
<= (POINTER
) sbrk (0));
1141 assert ((POINTER
) first_heap
< first_heap
->start
);
1142 assert (first_heap
->start
<= virtual_break_value
);
1143 assert (virtual_break_value
<= first_heap
->end
);
1145 for (h
= first_heap
; h
; h
= h
->next
)
1147 assert (h
->prev
== ph
);
1148 assert ((POINTER
) ROUNDUP (h
->end
) == h
->end
);
1149 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1150 the heap start has any sort of alignment.
1151 Perhaps it should. */
1152 assert ((POINTER
) MEM_ROUNDUP (h
->start
) == h
->start
);
1154 assert ((POINTER
) MEM_ROUNDUP (h
->bloc_start
) == h
->bloc_start
);
1155 assert (h
->start
<= h
->bloc_start
&& h
->bloc_start
<= h
->end
);
1159 assert (ph
->end
< h
->start
);
1160 assert (h
->start
<= (POINTER
)h
&& (POINTER
)(h
+1) <= h
->bloc_start
);
1163 if (h
->bloc_start
<= break_value
&& break_value
<= h
->end
)
1170 assert (last_heap
== ph
);
1172 for (b
= first_bloc
; b
; b
= b
->next
)
1174 assert (b
->prev
== pb
);
1175 assert ((POINTER
) MEM_ROUNDUP (b
->data
) == b
->data
);
1176 assert ((SIZE
) MEM_ROUNDUP (b
->size
) == b
->size
);
1179 for (h
= first_heap
; h
; h
= h
->next
)
1181 if (h
->bloc_start
<= b
->data
&& b
->data
+ b
->size
<= h
->end
)
1188 if (pb
&& pb
->data
+ pb
->size
!= b
->data
)
1190 assert (ph
&& b
->data
== h
->bloc_start
);
1193 if (ph
->bloc_start
<= pb
->data
1194 && pb
->data
+ pb
->size
<= ph
->end
)
1196 assert (pb
->data
+ pb
->size
+ b
->size
> ph
->end
);
1201 assert (ph
->bloc_start
+ b
->size
> ph
->end
);
1209 assert (last_bloc
== pb
);
1212 assert (last_bloc
->data
+ last_bloc
->size
== break_value
);
1214 assert (first_heap
->bloc_start
== break_value
);
1221 /***********************************************************************
1223 ***********************************************************************/
1225 /* The hook `malloc' uses for the function which gets more space
1228 #ifndef SYSTEM_MALLOC
1229 extern POINTER (*__morecore
) ();
1233 /* Initialize various things for memory allocation. */
1238 if (r_alloc_initialized
)
1240 r_alloc_initialized
= 1;
1243 #ifndef SYSTEM_MALLOC
1244 real_morecore
= __morecore
;
1245 __morecore
= r_alloc_sbrk
;
1247 first_heap
= last_heap
= &heap_base
;
1248 first_heap
->next
= first_heap
->prev
= NIL_HEAP
;
1249 first_heap
->start
= first_heap
->bloc_start
1250 = virtual_break_value
= break_value
= (*real_morecore
) (0);
1251 if (break_value
== NIL
)
1254 extra_bytes
= ROUNDUP (50000);
1257 #ifdef DOUG_LEA_MALLOC
1258 mallopt (M_TOP_PAD
, 64 * 4096);
1260 #ifndef SYSTEM_MALLOC
1261 /* Give GNU malloc's morecore some hysteresis
1262 so that we move all the relocatable blocks much less often. */
1263 __malloc_extra_blocks
= 64;
1267 #ifndef SYSTEM_MALLOC
1268 first_heap
->end
= (POINTER
) ROUNDUP (first_heap
->start
);
1270 /* The extra call to real_morecore guarantees that the end of the
1271 address space is a multiple of page_size, even if page_size is
1272 not really the page size of the system running the binary in
1273 which page_size is stored. This allows a binary to be built on a
1274 system with one page size and run on a system with a smaller page
1276 (*real_morecore
) ((char *) first_heap
->end
- (char *) first_heap
->start
);
1278 /* Clear the rest of the last page; this memory is in our address space
1279 even though it is after the sbrk value. */
1280 /* Doubly true, with the additional call that explicitly adds the
1281 rest of that page to the address space. */
1282 bzero (first_heap
->start
,
1283 (char *) first_heap
->end
- (char *) first_heap
->start
);
1284 virtual_break_value
= break_value
= first_heap
->bloc_start
= first_heap
->end
;
1287 use_relocatable_buffers
= 1;