1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007 Free Software Foundation, Inc.
5 This file is part of GNU Emacs.
7 GNU Emacs is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU Emacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU Emacs; see the file COPYING. If not, write to
19 the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
24 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
25 rather than all of them. This means allowing for a possible
26 hole between the first bloc and the end of malloc storage. */
31 #include "lisp.h" /* Needed for VALBITS. */
32 #include "blockinput.h"
38 typedef POINTER_TYPE
*POINTER
;
41 /* Declared in dispnew.c, this version doesn't screw up if regions
44 extern void safe_bcopy ();
46 #ifdef DOUG_LEA_MALLOC
48 extern int mallopt ();
49 #else /* not DOUG_LEA_MALLOC */
51 extern size_t __malloc_extra_blocks
;
52 #endif /* SYSTEM_MALLOC */
53 #endif /* not DOUG_LEA_MALLOC */
60 typedef void *POINTER
;
65 #define safe_bcopy(x, y, z) memmove (y, x, z)
66 #define bzero(x, len) memset (x, 0, len)
68 #endif /* not emacs */
71 #include "getpagesize.h"
73 #define NIL ((POINTER) 0)
75 /* A flag to indicate whether we have initialized ralloc yet. For
76 Emacs's sake, please do not make this local to malloc_init; on some
77 machines, the dumping procedure makes all static variables
78 read-only. On these machines, the word static is #defined to be
79 the empty string, meaning that r_alloc_initialized becomes an
80 automatic variable, and loses its value each time Emacs is started
83 static int r_alloc_initialized
= 0;
85 static void r_alloc_init ();
88 /* Declarations for working with the malloc, ralloc, and system breaks. */
90 /* Function to set the real break value. */
91 POINTER (*real_morecore
) ();
93 /* The break value, as seen by malloc. */
94 static POINTER virtual_break_value
;
96 /* The address of the end of the last data in use by ralloc,
97 including relocatable blocs as well as malloc data. */
98 static POINTER break_value
;
100 /* This is the size of a page. We round memory requests to this boundary. */
101 static int page_size
;
103 /* Whenever we get memory from the system, get this many extra bytes. This
104 must be a multiple of page_size. */
105 static int extra_bytes
;
107 /* Macros for rounding. Note that rounding to any value is possible
108 by changing the definition of PAGE. */
109 #define PAGE (getpagesize ())
110 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
111 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
113 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
115 #define MEM_ALIGN sizeof(double)
116 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
119 /* The hook `malloc' uses for the function which gets more space
122 #ifndef SYSTEM_MALLOC
123 extern POINTER (*__morecore
) ();
128 /***********************************************************************
129 Implementation using sbrk
130 ***********************************************************************/
132 /* Data structures of heaps and blocs. */
134 /* The relocatable objects, or blocs, and the malloc data
135 both reside within one or more heaps.
136 Each heap contains malloc data, running from `start' to `bloc_start',
137 and relocatable objects, running from `bloc_start' to `free'.
139 Relocatable objects may relocate within the same heap
140 or may move into another heap; the heaps themselves may grow
143 We try to make just one heap and make it larger as necessary.
144 But sometimes we can't do that, because we can't get contiguous
145 space to add onto the heap. When that happens, we start a new heap. */
151 /* Start of memory range of this heap. */
153 /* End of memory range of this heap. */
155 /* Start of relocatable data in this heap. */
157 /* Start of unused space in this heap. */
159 /* First bloc in this heap. */
160 struct bp
*first_bloc
;
161 /* Last bloc in this heap. */
162 struct bp
*last_bloc
;
165 #define NIL_HEAP ((heap_ptr) 0)
166 #define HEAP_PTR_SIZE (sizeof (struct heap))
168 /* This is the first heap object.
169 If we need additional heap objects, each one resides at the beginning of
170 the space it covers. */
171 static struct heap heap_base
;
173 /* Head and tail of the list of heaps. */
174 static heap_ptr first_heap
, last_heap
;
176 /* These structures are allocated in the malloc arena.
177 The linked list is kept in order of increasing '.data' members.
178 The data blocks abut each other; if b->next is non-nil, then
179 b->data + b->size == b->next->data.
181 An element with variable==NIL denotes a freed block, which has not yet
182 been collected. They may only appear while r_alloc_freeze > 0, and will be
183 freed when the arena is thawed. Currently, these blocs are not reusable,
184 while the arena is frozen. Very inefficient. */
193 POINTER new_data
; /* temporarily used for relocation */
194 struct heap
*heap
; /* Heap this bloc is in. */
197 #define NIL_BLOC ((bloc_ptr) 0)
198 #define BLOC_PTR_SIZE (sizeof (struct bp))
200 /* Head and tail of the list of relocatable blocs. */
201 static bloc_ptr first_bloc
, last_bloc
;
203 static int use_relocatable_buffers
;
205 /* If >0, no relocation whatsoever takes place. */
206 static int r_alloc_freeze_level
;
209 /* Functions to get and return memory from the system. */
211 /* Find the heap that ADDRESS falls within. */
219 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
221 if (heap
->start
<= address
&& address
<= heap
->end
)
228 /* Find SIZE bytes of space in a heap.
229 Try to get them at ADDRESS (which must fall within some heap's range)
230 if we can get that many within one heap.
232 If enough space is not presently available in our reserve, this means
233 getting more page-aligned space from the system. If the returned space
234 is not contiguous to the last heap, allocate a new heap, and append it
236 obtain does not try to keep track of whether space is in use
237 or not in use. It just returns the address of SIZE bytes that
238 fall within a single heap. If you call obtain twice in a row
239 with the same arguments, you typically get the same value.
240 to the heap list. It's the caller's responsibility to keep
241 track of what space is in use.
243 Return the address of the space if all went well, or zero if we couldn't
244 allocate the memory. */
247 obtain (address
, size
)
252 SIZE already_available
;
254 /* Find the heap that ADDRESS falls within. */
255 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
257 if (heap
->start
<= address
&& address
<= heap
->end
)
264 /* If we can't fit SIZE bytes in that heap,
265 try successive later heaps. */
266 while (heap
&& (char *) address
+ size
> (char *) heap
->end
)
269 if (heap
== NIL_HEAP
)
271 address
= heap
->bloc_start
;
274 /* If we can't fit them within any existing heap,
276 if (heap
== NIL_HEAP
)
278 POINTER
new = (*real_morecore
)(0);
281 already_available
= (char *)last_heap
->end
- (char *)address
;
283 if (new != last_heap
->end
)
285 /* Someone else called sbrk. Make a new heap. */
287 heap_ptr new_heap
= (heap_ptr
) MEM_ROUNDUP (new);
288 POINTER bloc_start
= (POINTER
) MEM_ROUNDUP ((POINTER
)(new_heap
+ 1));
290 if ((*real_morecore
) ((char *) bloc_start
- (char *) new) != new)
293 new_heap
->start
= new;
294 new_heap
->end
= bloc_start
;
295 new_heap
->bloc_start
= bloc_start
;
296 new_heap
->free
= bloc_start
;
297 new_heap
->next
= NIL_HEAP
;
298 new_heap
->prev
= last_heap
;
299 new_heap
->first_bloc
= NIL_BLOC
;
300 new_heap
->last_bloc
= NIL_BLOC
;
301 last_heap
->next
= new_heap
;
302 last_heap
= new_heap
;
304 address
= bloc_start
;
305 already_available
= 0;
308 /* Add space to the last heap (which we may have just created).
309 Get some extra, so we can come here less often. */
311 get
= size
+ extra_bytes
- already_available
;
312 get
= (char *) ROUNDUP ((char *)last_heap
->end
+ get
)
313 - (char *) last_heap
->end
;
315 if ((*real_morecore
) (get
) != last_heap
->end
)
318 last_heap
->end
= (char *) last_heap
->end
+ get
;
324 /* Return unused heap space to the system
325 if there is a lot of unused space now.
326 This can make the last heap smaller;
327 it can also eliminate the last heap entirely. */
335 /* Add the amount of space beyond break_value
336 in all heaps which have extend beyond break_value at all. */
338 for (h
= last_heap
; h
&& break_value
< h
->end
; h
= h
->prev
)
340 excess
+= (char *) h
->end
- (char *) ((break_value
< h
->bloc_start
)
341 ? h
->bloc_start
: break_value
);
344 if (excess
> extra_bytes
* 2 && (*real_morecore
) (0) == last_heap
->end
)
346 /* Keep extra_bytes worth of empty space.
347 And don't free anything unless we can free at least extra_bytes. */
348 excess
-= extra_bytes
;
350 if ((char *)last_heap
->end
- (char *)last_heap
->bloc_start
<= excess
)
352 /* This heap should have no blocs in it. */
353 if (last_heap
->first_bloc
!= NIL_BLOC
354 || last_heap
->last_bloc
!= NIL_BLOC
)
357 /* Return the last heap, with its header, to the system. */
358 excess
= (char *)last_heap
->end
- (char *)last_heap
->start
;
359 last_heap
= last_heap
->prev
;
360 last_heap
->next
= NIL_HEAP
;
364 excess
= (char *) last_heap
->end
365 - (char *) ROUNDUP ((char *)last_heap
->end
- excess
);
366 last_heap
->end
= (char *) last_heap
->end
- excess
;
369 if ((*real_morecore
) (- excess
) == 0)
371 /* If the system didn't want that much memory back, adjust
372 the end of the last heap to reflect that. This can occur
373 if break_value is still within the original data segment. */
374 last_heap
->end
= (char *) last_heap
->end
+ excess
;
375 /* Make sure that the result of the adjustment is accurate.
376 It should be, for the else clause above; the other case,
377 which returns the entire last heap to the system, seems
378 unlikely to trigger this mode of failure. */
379 if (last_heap
->end
!= (*real_morecore
) (0))
385 /* Return the total size in use by relocating allocator,
386 above where malloc gets space. */
389 r_alloc_size_in_use ()
391 return (char *) break_value
- (char *) virtual_break_value
;
394 /* The meat - allocating, freeing, and relocating blocs. */
396 /* Find the bloc referenced by the address in PTR. Returns a pointer
403 register bloc_ptr p
= first_bloc
;
405 while (p
!= NIL_BLOC
)
407 if (p
->variable
== ptr
&& p
->data
== *ptr
)
416 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
417 Returns a pointer to the new bloc, or zero if we couldn't allocate
418 memory for the new block. */
424 register bloc_ptr new_bloc
;
425 register heap_ptr heap
;
427 if (! (new_bloc
= (bloc_ptr
) malloc (BLOC_PTR_SIZE
))
428 || ! (new_bloc
->data
= obtain (break_value
, size
)))
436 break_value
= (char *) new_bloc
->data
+ size
;
438 new_bloc
->size
= size
;
439 new_bloc
->next
= NIL_BLOC
;
440 new_bloc
->variable
= (POINTER
*) NIL
;
441 new_bloc
->new_data
= 0;
443 /* Record in the heap that this space is in use. */
444 heap
= find_heap (new_bloc
->data
);
445 heap
->free
= break_value
;
447 /* Maintain the correspondence between heaps and blocs. */
448 new_bloc
->heap
= heap
;
449 heap
->last_bloc
= new_bloc
;
450 if (heap
->first_bloc
== NIL_BLOC
)
451 heap
->first_bloc
= new_bloc
;
453 /* Put this bloc on the doubly-linked list of blocs. */
456 new_bloc
->prev
= last_bloc
;
457 last_bloc
->next
= new_bloc
;
458 last_bloc
= new_bloc
;
462 first_bloc
= last_bloc
= new_bloc
;
463 new_bloc
->prev
= NIL_BLOC
;
469 /* Calculate new locations of blocs in the list beginning with BLOC,
470 relocating it to start at ADDRESS, in heap HEAP. If enough space is
471 not presently available in our reserve, call obtain for
474 Store the new location of each bloc in its new_data field.
475 Do not touch the contents of blocs or break_value. */
478 relocate_blocs (bloc
, heap
, address
)
483 register bloc_ptr b
= bloc
;
485 /* No need to ever call this if arena is frozen, bug somewhere! */
486 if (r_alloc_freeze_level
)
491 /* If bloc B won't fit within HEAP,
492 move to the next heap and try again. */
493 while (heap
&& (char *) address
+ b
->size
> (char *) heap
->end
)
496 if (heap
== NIL_HEAP
)
498 address
= heap
->bloc_start
;
501 /* If BLOC won't fit in any heap,
502 get enough new space to hold BLOC and all following blocs. */
503 if (heap
== NIL_HEAP
)
505 register bloc_ptr tb
= b
;
508 /* Add up the size of all the following blocs. */
509 while (tb
!= NIL_BLOC
)
517 /* Get that space. */
518 address
= obtain (address
, s
);
525 /* Record the new address of this bloc
526 and update where the next bloc can start. */
527 b
->new_data
= address
;
529 address
= (char *) address
+ b
->size
;
536 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
537 This is necessary if we put the memory of space of BLOC
538 before that of BEFORE. */
541 reorder_bloc (bloc
, before
)
542 bloc_ptr bloc
, before
;
546 /* Splice BLOC out from where it is. */
555 /* Splice it in before BEFORE. */
566 /* Update the records of which heaps contain which blocs, starting
567 with heap HEAP and bloc BLOC. */
570 update_heap_bloc_correspondence (bloc
, heap
)
576 /* Initialize HEAP's status to reflect blocs before BLOC. */
577 if (bloc
!= NIL_BLOC
&& bloc
->prev
!= NIL_BLOC
&& bloc
->prev
->heap
== heap
)
579 /* The previous bloc is in HEAP. */
580 heap
->last_bloc
= bloc
->prev
;
581 heap
->free
= (char *) bloc
->prev
->data
+ bloc
->prev
->size
;
585 /* HEAP contains no blocs before BLOC. */
586 heap
->first_bloc
= NIL_BLOC
;
587 heap
->last_bloc
= NIL_BLOC
;
588 heap
->free
= heap
->bloc_start
;
591 /* Advance through blocs one by one. */
592 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
594 /* Advance through heaps, marking them empty,
595 till we get to the one that B is in. */
598 if (heap
->bloc_start
<= b
->data
&& b
->data
<= heap
->end
)
601 /* We know HEAP is not null now,
602 because there has to be space for bloc B. */
603 heap
->first_bloc
= NIL_BLOC
;
604 heap
->last_bloc
= NIL_BLOC
;
605 heap
->free
= heap
->bloc_start
;
608 /* Update HEAP's status for bloc B. */
609 heap
->free
= (char *) b
->data
+ b
->size
;
611 if (heap
->first_bloc
== NIL_BLOC
)
612 heap
->first_bloc
= b
;
614 /* Record that B is in HEAP. */
618 /* If there are any remaining heaps and no blocs left,
619 mark those heaps as empty. */
623 heap
->first_bloc
= NIL_BLOC
;
624 heap
->last_bloc
= NIL_BLOC
;
625 heap
->free
= heap
->bloc_start
;
630 /* Resize BLOC to SIZE bytes. This relocates the blocs
631 that come after BLOC in memory. */
634 resize_bloc (bloc
, size
)
643 /* No need to ever call this if arena is frozen, bug somewhere! */
644 if (r_alloc_freeze_level
)
647 if (bloc
== NIL_BLOC
|| size
== bloc
->size
)
650 for (heap
= first_heap
; heap
!= NIL_HEAP
; heap
= heap
->next
)
652 if (heap
->bloc_start
<= bloc
->data
&& bloc
->data
<= heap
->end
)
656 if (heap
== NIL_HEAP
)
659 old_size
= bloc
->size
;
662 /* Note that bloc could be moved into the previous heap. */
663 address
= (bloc
->prev
? (char *) bloc
->prev
->data
+ bloc
->prev
->size
664 : (char *) first_heap
->bloc_start
);
667 if (heap
->bloc_start
<= address
&& address
<= heap
->end
)
672 if (! relocate_blocs (bloc
, heap
, address
))
674 bloc
->size
= old_size
;
680 for (b
= last_bloc
; b
!= bloc
; b
= b
->prev
)
685 b
->data
= b
->new_data
;
689 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
690 *b
->variable
= b
->data
= b
->new_data
;
696 bloc
->data
= bloc
->new_data
;
700 safe_bcopy (bloc
->data
, bloc
->new_data
, old_size
);
701 bzero ((char *) bloc
->new_data
+ old_size
, size
- old_size
);
702 *bloc
->variable
= bloc
->data
= bloc
->new_data
;
707 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
712 b
->data
= b
->new_data
;
716 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
717 *b
->variable
= b
->data
= b
->new_data
;
722 update_heap_bloc_correspondence (bloc
, heap
);
724 break_value
= (last_bloc
? (char *) last_bloc
->data
+ last_bloc
->size
725 : (char *) first_heap
->bloc_start
);
729 /* Free BLOC from the chain of blocs, relocating any blocs above it.
730 This may return space to the system. */
736 heap_ptr heap
= bloc
->heap
;
738 if (r_alloc_freeze_level
)
740 bloc
->variable
= (POINTER
*) NIL
;
744 resize_bloc (bloc
, 0);
746 if (bloc
== first_bloc
&& bloc
== last_bloc
)
748 first_bloc
= last_bloc
= NIL_BLOC
;
750 else if (bloc
== last_bloc
)
752 last_bloc
= bloc
->prev
;
753 last_bloc
->next
= NIL_BLOC
;
755 else if (bloc
== first_bloc
)
757 first_bloc
= bloc
->next
;
758 first_bloc
->prev
= NIL_BLOC
;
762 bloc
->next
->prev
= bloc
->prev
;
763 bloc
->prev
->next
= bloc
->next
;
766 /* Update the records of which blocs are in HEAP. */
767 if (heap
->first_bloc
== bloc
)
769 if (bloc
->next
!= 0 && bloc
->next
->heap
== heap
)
770 heap
->first_bloc
= bloc
->next
;
772 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
774 if (heap
->last_bloc
== bloc
)
776 if (bloc
->prev
!= 0 && bloc
->prev
->heap
== heap
)
777 heap
->last_bloc
= bloc
->prev
;
779 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
786 /* Interface routines. */
788 /* Obtain SIZE bytes of storage from the free pool, or the system, as
789 necessary. If relocatable blocs are in use, this means relocating
790 them. This function gets plugged into the GNU malloc's __morecore
793 We provide hysteresis, never relocating by less than extra_bytes.
795 If we're out of memory, we should return zero, to imitate the other
796 __morecore hook values - in particular, __default_morecore in the
797 GNU malloc package. */
806 if (! r_alloc_initialized
)
809 if (! use_relocatable_buffers
)
810 return (*real_morecore
) (size
);
813 return virtual_break_value
;
817 /* Allocate a page-aligned space. GNU malloc would reclaim an
818 extra space if we passed an unaligned one. But we could
819 not always find a space which is contiguous to the previous. */
820 POINTER new_bloc_start
;
821 heap_ptr h
= first_heap
;
822 SIZE get
= ROUNDUP (size
);
824 address
= (POINTER
) ROUNDUP (virtual_break_value
);
826 /* Search the list upward for a heap which is large enough. */
827 while ((char *) h
->end
< (char *) MEM_ROUNDUP ((char *)address
+ get
))
832 address
= (POINTER
) ROUNDUP (h
->start
);
835 /* If not found, obtain more space. */
838 get
+= extra_bytes
+ page_size
;
840 if (! obtain (address
, get
))
843 if (first_heap
== last_heap
)
844 address
= (POINTER
) ROUNDUP (virtual_break_value
);
846 address
= (POINTER
) ROUNDUP (last_heap
->start
);
850 new_bloc_start
= (POINTER
) MEM_ROUNDUP ((char *)address
+ get
);
852 if (first_heap
->bloc_start
< new_bloc_start
)
854 /* This is no clean solution - no idea how to do it better. */
855 if (r_alloc_freeze_level
)
858 /* There is a bug here: if the above obtain call succeeded, but the
859 relocate_blocs call below does not succeed, we need to free
860 the memory that we got with obtain. */
862 /* Move all blocs upward. */
863 if (! relocate_blocs (first_bloc
, h
, new_bloc_start
))
866 /* Note that (POINTER)(h+1) <= new_bloc_start since
867 get >= page_size, so the following does not destroy the heap
869 for (b
= last_bloc
; b
!= NIL_BLOC
; b
= b
->prev
)
871 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
872 *b
->variable
= b
->data
= b
->new_data
;
875 h
->bloc_start
= new_bloc_start
;
877 update_heap_bloc_correspondence (first_bloc
, h
);
881 /* Give up managing heaps below the one the new
882 virtual_break_value points to. */
883 first_heap
->prev
= NIL_HEAP
;
884 first_heap
->next
= h
->next
;
885 first_heap
->start
= h
->start
;
886 first_heap
->end
= h
->end
;
887 first_heap
->free
= h
->free
;
888 first_heap
->first_bloc
= h
->first_bloc
;
889 first_heap
->last_bloc
= h
->last_bloc
;
890 first_heap
->bloc_start
= h
->bloc_start
;
892 if (first_heap
->next
)
893 first_heap
->next
->prev
= first_heap
;
895 last_heap
= first_heap
;
898 bzero (address
, size
);
902 SIZE excess
= (char *)first_heap
->bloc_start
903 - ((char *)virtual_break_value
+ size
);
905 address
= virtual_break_value
;
907 if (r_alloc_freeze_level
== 0 && excess
> 2 * extra_bytes
)
909 excess
-= extra_bytes
;
910 first_heap
->bloc_start
911 = (POINTER
) MEM_ROUNDUP ((char *)first_heap
->bloc_start
- excess
);
913 relocate_blocs (first_bloc
, first_heap
, first_heap
->bloc_start
);
915 for (b
= first_bloc
; b
!= NIL_BLOC
; b
= b
->next
)
917 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
918 *b
->variable
= b
->data
= b
->new_data
;
922 if ((char *)virtual_break_value
+ size
< (char *)first_heap
->start
)
924 /* We found an additional space below the first heap */
925 first_heap
->start
= (POINTER
) ((char *)virtual_break_value
+ size
);
929 virtual_break_value
= (POINTER
) ((char *)address
+ size
);
930 break_value
= (last_bloc
931 ? (char *) last_bloc
->data
+ last_bloc
->size
932 : (char *) first_heap
->bloc_start
);
940 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
941 the data is returned in *PTR. PTR is thus the address of some variable
942 which will use the data area.
944 The allocation of 0 bytes is valid.
945 In case r_alloc_freeze is set, a best fit of unused blocs could be done
946 before allocating a new area. Not yet done.
948 If we can't allocate the necessary memory, set *PTR to zero, and
956 register bloc_ptr new_bloc
;
958 if (! r_alloc_initialized
)
961 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
964 new_bloc
->variable
= ptr
;
965 *ptr
= new_bloc
->data
;
973 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
974 Store 0 in *PTR to show there's no block allocated. */
978 register POINTER
*ptr
;
980 register bloc_ptr dead_bloc
;
982 if (! r_alloc_initialized
)
985 dead_bloc
= find_bloc (ptr
);
986 if (dead_bloc
== NIL_BLOC
)
989 free_bloc (dead_bloc
);
993 refill_memory_reserve ();
997 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
998 Do this by shifting all blocks above this one up in memory, unless
999 SIZE is less than or equal to the current bloc size, in which case
1002 In case r_alloc_freeze is set, a new bloc is allocated, and the
1003 memory copied to it. Not very efficient. We could traverse the
1004 bloc_list for a best fit of free blocs first.
1006 Change *PTR to reflect the new bloc, and return this value.
1008 If more memory cannot be allocated, then leave *PTR unchanged, and
1012 r_re_alloc (ptr
, size
)
1016 register bloc_ptr bloc
;
1018 if (! r_alloc_initialized
)
1022 return r_alloc (ptr
, size
);
1026 return r_alloc (ptr
, 0);
1029 bloc
= find_bloc (ptr
);
1030 if (bloc
== NIL_BLOC
)
1033 if (size
< bloc
->size
)
1035 /* Wouldn't it be useful to actually resize the bloc here? */
1036 /* I think so too, but not if it's too expensive... */
1037 if ((bloc
->size
- MEM_ROUNDUP (size
) >= page_size
)
1038 && r_alloc_freeze_level
== 0)
1040 resize_bloc (bloc
, MEM_ROUNDUP (size
));
1041 /* Never mind if this fails, just do nothing... */
1042 /* It *should* be infallible! */
1045 else if (size
> bloc
->size
)
1047 if (r_alloc_freeze_level
)
1050 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
1053 new_bloc
->variable
= ptr
;
1054 *ptr
= new_bloc
->data
;
1055 bloc
->variable
= (POINTER
*) NIL
;
1062 if (! resize_bloc (bloc
, MEM_ROUNDUP (size
)))
1069 /* Disable relocations, after making room for at least SIZE bytes
1070 of non-relocatable heap if possible. The relocatable blocs are
1071 guaranteed to hold still until thawed, even if this means that
1072 malloc must return a null pointer. */
1075 r_alloc_freeze (size
)
1078 if (! r_alloc_initialized
)
1081 /* If already frozen, we can't make any more room, so don't try. */
1082 if (r_alloc_freeze_level
> 0)
1084 /* If we can't get the amount requested, half is better than nothing. */
1085 while (size
> 0 && r_alloc_sbrk (size
) == 0)
1087 ++r_alloc_freeze_level
;
1089 r_alloc_sbrk (-size
);
1096 if (! r_alloc_initialized
)
1099 if (--r_alloc_freeze_level
< 0)
1102 /* This frees all unused blocs. It is not too inefficient, as the resize
1103 and bcopy is done only once. Afterwards, all unreferenced blocs are
1104 already shrunk to zero size. */
1105 if (!r_alloc_freeze_level
)
1107 bloc_ptr
*b
= &first_bloc
;
1109 if (!(*b
)->variable
)
1117 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1119 /* Reinitialize the morecore hook variables after restarting a dumped
1120 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1124 /* Only do this if the hook has been reset, so that we don't get an
1125 infinite loop, in case Emacs was linked statically. */
1126 if (__morecore
!= r_alloc_sbrk
)
1128 real_morecore
= __morecore
;
1129 __morecore
= r_alloc_sbrk
;
1133 #endif /* emacs && DOUG_LEA_MALLOC */
1146 if (!r_alloc_initialized
)
1149 assert (first_heap
);
1150 assert (last_heap
->end
<= (POINTER
) sbrk (0));
1151 assert ((POINTER
) first_heap
< first_heap
->start
);
1152 assert (first_heap
->start
<= virtual_break_value
);
1153 assert (virtual_break_value
<= first_heap
->end
);
1155 for (h
= first_heap
; h
; h
= h
->next
)
1157 assert (h
->prev
== ph
);
1158 assert ((POINTER
) ROUNDUP (h
->end
) == h
->end
);
1159 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1160 the heap start has any sort of alignment.
1161 Perhaps it should. */
1162 assert ((POINTER
) MEM_ROUNDUP (h
->start
) == h
->start
);
1164 assert ((POINTER
) MEM_ROUNDUP (h
->bloc_start
) == h
->bloc_start
);
1165 assert (h
->start
<= h
->bloc_start
&& h
->bloc_start
<= h
->end
);
1169 assert (ph
->end
< h
->start
);
1170 assert (h
->start
<= (POINTER
)h
&& (POINTER
)(h
+1) <= h
->bloc_start
);
1173 if (h
->bloc_start
<= break_value
&& break_value
<= h
->end
)
1180 assert (last_heap
== ph
);
1182 for (b
= first_bloc
; b
; b
= b
->next
)
1184 assert (b
->prev
== pb
);
1185 assert ((POINTER
) MEM_ROUNDUP (b
->data
) == b
->data
);
1186 assert ((SIZE
) MEM_ROUNDUP (b
->size
) == b
->size
);
1189 for (h
= first_heap
; h
; h
= h
->next
)
1191 if (h
->bloc_start
<= b
->data
&& b
->data
+ b
->size
<= h
->end
)
1198 if (pb
&& pb
->data
+ pb
->size
!= b
->data
)
1200 assert (ph
&& b
->data
== h
->bloc_start
);
1203 if (ph
->bloc_start
<= pb
->data
1204 && pb
->data
+ pb
->size
<= ph
->end
)
1206 assert (pb
->data
+ pb
->size
+ b
->size
> ph
->end
);
1211 assert (ph
->bloc_start
+ b
->size
> ph
->end
);
1219 assert (last_bloc
== pb
);
1222 assert (last_bloc
->data
+ last_bloc
->size
== break_value
);
1224 assert (first_heap
->bloc_start
== break_value
);
1231 /***********************************************************************
1233 ***********************************************************************/
1235 /* Initialize various things for memory allocation. */
1240 if (r_alloc_initialized
)
1242 r_alloc_initialized
= 1;
1245 #ifndef SYSTEM_MALLOC
1246 real_morecore
= __morecore
;
1247 __morecore
= r_alloc_sbrk
;
1249 first_heap
= last_heap
= &heap_base
;
1250 first_heap
->next
= first_heap
->prev
= NIL_HEAP
;
1251 first_heap
->start
= first_heap
->bloc_start
1252 = virtual_break_value
= break_value
= (*real_morecore
) (0);
1253 if (break_value
== NIL
)
1256 extra_bytes
= ROUNDUP (50000);
1259 #ifdef DOUG_LEA_MALLOC
1261 mallopt (M_TOP_PAD
, 64 * 4096);
1264 #ifndef SYSTEM_MALLOC
1265 /* Give GNU malloc's morecore some hysteresis
1266 so that we move all the relocatable blocks much less often. */
1267 __malloc_extra_blocks
= 64;
1271 #ifndef SYSTEM_MALLOC
1272 first_heap
->end
= (POINTER
) ROUNDUP (first_heap
->start
);
1274 /* The extra call to real_morecore guarantees that the end of the
1275 address space is a multiple of page_size, even if page_size is
1276 not really the page size of the system running the binary in
1277 which page_size is stored. This allows a binary to be built on a
1278 system with one page size and run on a system with a smaller page
1280 (*real_morecore
) ((char *) first_heap
->end
- (char *) first_heap
->start
);
1282 /* Clear the rest of the last page; this memory is in our address space
1283 even though it is after the sbrk value. */
1284 /* Doubly true, with the additional call that explicitly adds the
1285 rest of that page to the address space. */
1286 bzero (first_heap
->start
,
1287 (char *) first_heap
->end
- (char *) first_heap
->start
);
1288 virtual_break_value
= break_value
= first_heap
->bloc_start
= first_heap
->end
;
1291 use_relocatable_buffers
= 1;
1294 /* arch-tag: 6a524a15-faff-44c8-95d4-a5da6f55110f
1295 (do not change this comment) */