1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 This file is part of GNU Emacs.
7 GNU Emacs is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 GNU Emacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
29 #include "lisp.h" /* Needed for VALBITS. */
30 #include "blockinput.h"
36 typedef POINTER_TYPE
*POINTER
;
39 /* Declared in dispnew.c, this version doesn't screw up if regions
42 extern void safe_bcopy ();
44 #ifdef DOUG_LEA_MALLOC
46 extern int mallopt ();
47 #else /* not DOUG_LEA_MALLOC */
49 extern size_t __malloc_extra_blocks
;
50 #endif /* SYSTEM_MALLOC */
51 #endif /* not DOUG_LEA_MALLOC */
58 typedef void *POINTER
;
63 #define safe_bcopy(x, y, z) memmove (y, x, z)
64 #define bzero(x, len) memset (x, 0, len)
66 #endif /* not emacs */
69 #include "getpagesize.h"
71 #define NIL ((POINTER) 0)
73 /* A flag to indicate whether we have initialized ralloc yet. For
74 Emacs's sake, please do not make this local to malloc_init; on some
75 machines, the dumping procedure makes all static variables
76 read-only. On these machines, the word static is #defined to be
77 the empty string, meaning that r_alloc_initialized becomes an
78 automatic variable, and loses its value each time Emacs is started
81 static int r_alloc_initialized
= 0;
83 static void r_alloc_init ();
86 /* Declarations for working with the malloc, ralloc, and system breaks. */
88 /* Function to set the real break value. */
89 POINTER (*real_morecore
) ();
91 /* The break value, as seen by malloc. */
92 static POINTER virtual_break_value
;
94 /* The address of the end of the last data in use by ralloc,
95 including relocatable blocs as well as malloc data. */
96 static POINTER break_value
;
98 /* This is the size of a page. We round memory requests to this boundary. */
101 /* Whenever we get memory from the system, get this many extra bytes. This
102 must be a multiple of page_size. */
103 static int extra_bytes
;
105 /* Macros for rounding. Note that rounding to any value is possible
106 by changing the definition of PAGE. */
107 #define PAGE (getpagesize ())
108 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
109 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
111 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
113 #define MEM_ALIGN sizeof(double)
114 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
117 /* The hook `malloc' uses for the function which gets more space
120 #ifndef SYSTEM_MALLOC
121 extern POINTER (*__morecore
) ();
126 /***********************************************************************
127 Implementation using sbrk
128 ***********************************************************************/
130 /* Data structures of heaps and blocs. */
132 /* The relocatable objects, or blocs, and the malloc data
133 both reside within one or more heaps.
134 Each heap contains malloc data, running from `start' to `bloc_start',
135 and relocatable objects, running from `bloc_start' to `free'.
137 Relocatable objects may relocate within the same heap
138 or may move into another heap; the heaps themselves may grow
141 We try to make just one heap and make it larger as necessary.
142 But sometimes we can't do that, because we can't get contiguous
143 space to add onto the heap. When that happens, we start a new heap. */
149 /* Start of memory range of this heap. */
151 /* End of memory range of this heap. */
153 /* Start of relocatable data in this heap. */
155 /* Start of unused space in this heap. */
157 /* First bloc in this heap. */
158 struct bp
*first_bloc
;
159 /* Last bloc in this heap. */
160 struct bp
*last_bloc
;
163 #define NIL_HEAP ((heap_ptr) 0)
164 #define HEAP_PTR_SIZE (sizeof (struct heap))
166 /* This is the first heap object.
167 If we need additional heap objects, each one resides at the beginning of
168 the space it covers. */
169 static struct heap heap_base
;
171 /* Head and tail of the list of heaps. */
172 static heap_ptr first_heap
, last_heap
;
174 /* These structures are allocated in the malloc arena.
175 The linked list is kept in order of increasing '.data' members.
176 The data blocks abut each other; if b->next is non-nil, then
177 b->data + b->size == b->next->data.
179 An element with variable==NIL denotes a freed block, which has not yet
180 been collected. They may only appear while r_alloc_freeze_level > 0,
181 and will be freed when the arena is thawed. Currently, these blocs are
182 not reusable, while the arena is frozen. Very inefficient. */
191 POINTER new_data
; /* temporarily used for relocation */
192 struct heap
*heap
; /* Heap this bloc is in. */
195 #define NIL_BLOC ((bloc_ptr) 0)
196 #define BLOC_PTR_SIZE (sizeof (struct bp))
198 /* Head and tail of the list of relocatable blocs. */
199 static bloc_ptr first_bloc
, last_bloc
;
201 static int use_relocatable_buffers
;
203 /* If >0, no relocation whatsoever takes place. */
204 static int r_alloc_freeze_level
;
207 /* Functions to get and return memory from the system. */
209 /* Find the heap that ADDRESS falls within. */
217 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
219 if (heap
->start
<= address
&& address
<= heap
->end
)
226 /* Find SIZE bytes of space in a heap.
227 Try to get them at ADDRESS (which must fall within some heap's range)
228 if we can get that many within one heap.
230 If enough space is not presently available in our reserve, this means
231 getting more page-aligned space from the system. If the returned space
232 is not contiguous to the last heap, allocate a new heap, and append it
234 obtain does not try to keep track of whether space is in use
235 or not in use. It just returns the address of SIZE bytes that
236 fall within a single heap. If you call obtain twice in a row
237 with the same arguments, you typically get the same value.
238 to the heap list. It's the caller's responsibility to keep
239 track of what space is in use.
241 Return the address of the space if all went well, or zero if we couldn't
242 allocate the memory. */
245 obtain (address
, size
)
250 SIZE already_available
;
252 /* Find the heap that ADDRESS falls within. */
253 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
255 if (heap
->start
<= address
&& address
<= heap
->end
)
262 /* If we can't fit SIZE bytes in that heap,
263 try successive later heaps. */
264 while (heap
&& (char *) address
+ size
> (char *) heap
->end
)
267 if (heap
== NIL_HEAP
)
269 address
= heap
->bloc_start
;
272 /* If we can't fit them within any existing heap,
274 if (heap
== NIL_HEAP
)
276 POINTER
new = (*real_morecore
)(0);
279 already_available
= (char *)last_heap
->end
- (char *)address
;
281 if (new != last_heap
->end
)
283 /* Someone else called sbrk. Make a new heap. */
285 heap_ptr new_heap
= (heap_ptr
) MEM_ROUNDUP (new);
286 POINTER bloc_start
= (POINTER
) MEM_ROUNDUP ((POINTER
)(new_heap
+ 1));
288 if ((*real_morecore
) ((char *) bloc_start
- (char *) new) != new)
291 new_heap
->start
= new;
292 new_heap
->end
= bloc_start
;
293 new_heap
->bloc_start
= bloc_start
;
294 new_heap
->free
= bloc_start
;
295 new_heap
->next
= NIL_HEAP
;
296 new_heap
->prev
= last_heap
;
297 new_heap
->first_bloc
= NIL_BLOC
;
298 new_heap
->last_bloc
= NIL_BLOC
;
299 last_heap
->next
= new_heap
;
300 last_heap
= new_heap
;
302 address
= bloc_start
;
303 already_available
= 0;
306 /* Add space to the last heap (which we may have just created).
307 Get some extra, so we can come here less often. */
309 get
= size
+ extra_bytes
- already_available
;
310 get
= (char *) ROUNDUP ((char *)last_heap
->end
+ get
)
311 - (char *) last_heap
->end
;
313 if ((*real_morecore
) (get
) != last_heap
->end
)
316 last_heap
->end
= (char *) last_heap
->end
+ get
;
322 /* Return unused heap space to the system
323 if there is a lot of unused space now.
324 This can make the last heap smaller;
325 it can also eliminate the last heap entirely. */
333 /* Add the amount of space beyond break_value
334 in all heaps which have extend beyond break_value at all. */
336 for (h
= last_heap
; h
&& break_value
< h
->end
; h
= h
->prev
)
338 excess
+= (char *) h
->end
- (char *) ((break_value
< h
->bloc_start
)
339 ? h
->bloc_start
: break_value
);
342 if (excess
> extra_bytes
* 2 && (*real_morecore
) (0) == last_heap
->end
)
344 /* Keep extra_bytes worth of empty space.
345 And don't free anything unless we can free at least extra_bytes. */
346 excess
-= extra_bytes
;
348 if ((char *)last_heap
->end
- (char *)last_heap
->bloc_start
<= excess
)
350 /* This heap should have no blocs in it. */
351 if (last_heap
->first_bloc
!= NIL_BLOC
352 || last_heap
->last_bloc
!= NIL_BLOC
)
355 /* Return the last heap, with its header, to the system. */
356 excess
= (char *)last_heap
->end
- (char *)last_heap
->start
;
357 last_heap
= last_heap
->prev
;
358 last_heap
->next
= NIL_HEAP
;
362 excess
= (char *) last_heap
->end
363 - (char *) ROUNDUP ((char *)last_heap
->end
- excess
);
364 last_heap
->end
= (char *) last_heap
->end
- excess
;
367 if ((*real_morecore
) (- excess
) == 0)
369 /* If the system didn't want that much memory back, adjust
370 the end of the last heap to reflect that. This can occur
371 if break_value is still within the original data segment. */
372 last_heap
->end
= (char *) last_heap
->end
+ excess
;
373 /* Make sure that the result of the adjustment is accurate.
374 It should be, for the else clause above; the other case,
375 which returns the entire last heap to the system, seems
376 unlikely to trigger this mode of failure. */
377 if (last_heap
->end
!= (*real_morecore
) (0))
383 /* Return the total size in use by relocating allocator,
384 above where malloc gets space. */
387 r_alloc_size_in_use ()
389 return (char *) break_value
- (char *) virtual_break_value
;
392 /* The meat - allocating, freeing, and relocating blocs. */
394 /* Find the bloc referenced by the address in PTR. Returns a pointer
401 register bloc_ptr p
= first_bloc
;
403 while (p
!= NIL_BLOC
)
405 /* Consistency check. Don't return inconsistent blocs.
406 Don't abort here, as callers might be expecting this, but
407 callers that always expect a bloc to be returned should abort
408 if one isn't to avoid a memory corruption bug that is
409 difficult to track down. */
410 if (p
->variable
== ptr
&& p
->data
== *ptr
)
419 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
420 Returns a pointer to the new bloc, or zero if we couldn't allocate
421 memory for the new block. */
427 register bloc_ptr new_bloc
;
428 register heap_ptr heap
;
430 if (! (new_bloc
= (bloc_ptr
) malloc (BLOC_PTR_SIZE
))
431 || ! (new_bloc
->data
= obtain (break_value
, size
)))
438 break_value
= (char *) new_bloc
->data
+ size
;
440 new_bloc
->size
= size
;
441 new_bloc
->next
= NIL_BLOC
;
442 new_bloc
->variable
= (POINTER
*) NIL
;
443 new_bloc
->new_data
= 0;
445 /* Record in the heap that this space is in use. */
446 heap
= find_heap (new_bloc
->data
);
447 heap
->free
= break_value
;
449 /* Maintain the correspondence between heaps and blocs. */
450 new_bloc
->heap
= heap
;
451 heap
->last_bloc
= new_bloc
;
452 if (heap
->first_bloc
== NIL_BLOC
)
453 heap
->first_bloc
= new_bloc
;
455 /* Put this bloc on the doubly-linked list of blocs. */
458 new_bloc
->prev
= last_bloc
;
459 last_bloc
->next
= new_bloc
;
460 last_bloc
= new_bloc
;
464 first_bloc
= last_bloc
= new_bloc
;
465 new_bloc
->prev
= NIL_BLOC
;
471 /* Calculate new locations of blocs in the list beginning with BLOC,
472 relocating it to start at ADDRESS, in heap HEAP. If enough space is
473 not presently available in our reserve, call obtain for
476 Store the new location of each bloc in its new_data field.
477 Do not touch the contents of blocs or break_value. */
480 relocate_blocs (bloc
, heap
, address
)
485 register bloc_ptr b
= bloc
;
487 /* No need to ever call this if arena is frozen, bug somewhere! */
488 if (r_alloc_freeze_level
)
493 /* If bloc B won't fit within HEAP,
494 move to the next heap and try again. */
495 while (heap
&& (char *) address
+ b
->size
> (char *) heap
->end
)
498 if (heap
== NIL_HEAP
)
500 address
= heap
->bloc_start
;
503 /* If BLOC won't fit in any heap,
504 get enough new space to hold BLOC and all following blocs. */
505 if (heap
== NIL_HEAP
)
507 register bloc_ptr tb
= b
;
510 /* Add up the size of all the following blocs. */
511 while (tb
!= NIL_BLOC
)
519 /* Get that space. */
520 address
= obtain (address
, s
);
527 /* Record the new address of this bloc
528 and update where the next bloc can start. */
529 b
->new_data
= address
;
531 address
= (char *) address
+ b
->size
;
538 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
539 This is necessary if we put the memory of space of BLOC
540 before that of BEFORE. */
543 reorder_bloc (bloc
, before
)
544 bloc_ptr bloc
, before
;
548 /* Splice BLOC out from where it is. */
557 /* Splice it in before BEFORE. */
568 /* Update the records of which heaps contain which blocs, starting
569 with heap HEAP and bloc BLOC. */
572 update_heap_bloc_correspondence (bloc
, heap
)
578 /* Initialize HEAP's status to reflect blocs before BLOC. */
579 if (bloc
!= NIL_BLOC
&& bloc
->prev
!= NIL_BLOC
&& bloc
->prev
->heap
== heap
)
581 /* The previous bloc is in HEAP. */
582 heap
->last_bloc
= bloc
->prev
;
583 heap
->free
= (char *) bloc
->prev
->data
+ bloc
->prev
->size
;
587 /* HEAP contains no blocs before BLOC. */
588 heap
->first_bloc
= NIL_BLOC
;
589 heap
->last_bloc
= NIL_BLOC
;
590 heap
->free
= heap
->bloc_start
;
593 /* Advance through blocs one by one. */
594 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
596 /* Advance through heaps, marking them empty,
597 till we get to the one that B is in. */
600 if (heap
->bloc_start
<= b
->data
&& b
->data
<= heap
->end
)
603 /* We know HEAP is not null now,
604 because there has to be space for bloc B. */
605 heap
->first_bloc
= NIL_BLOC
;
606 heap
->last_bloc
= NIL_BLOC
;
607 heap
->free
= heap
->bloc_start
;
610 /* Update HEAP's status for bloc B. */
611 heap
->free
= (char *) b
->data
+ b
->size
;
613 if (heap
->first_bloc
== NIL_BLOC
)
614 heap
->first_bloc
= b
;
616 /* Record that B is in HEAP. */
620 /* If there are any remaining heaps and no blocs left,
621 mark those heaps as empty. */
625 heap
->first_bloc
= NIL_BLOC
;
626 heap
->last_bloc
= NIL_BLOC
;
627 heap
->free
= heap
->bloc_start
;
632 /* Resize BLOC to SIZE bytes. This relocates the blocs
633 that come after BLOC in memory. */
636 resize_bloc (bloc
, size
)
645 /* No need to ever call this if arena is frozen, bug somewhere! */
646 if (r_alloc_freeze_level
)
649 if (bloc
== NIL_BLOC
|| size
== bloc
->size
)
652 for (heap
= first_heap
; heap
!= NIL_HEAP
; heap
= heap
->next
)
654 if (heap
->bloc_start
<= bloc
->data
&& bloc
->data
<= heap
->end
)
658 if (heap
== NIL_HEAP
)
661 old_size
= bloc
->size
;
664 /* Note that bloc could be moved into the previous heap. */
665 address
= (bloc
->prev
? (char *) bloc
->prev
->data
+ bloc
->prev
->size
666 : (char *) first_heap
->bloc_start
);
669 if (heap
->bloc_start
<= address
&& address
<= heap
->end
)
674 if (! relocate_blocs (bloc
, heap
, address
))
676 bloc
->size
= old_size
;
682 for (b
= last_bloc
; b
!= bloc
; b
= b
->prev
)
687 b
->data
= b
->new_data
;
691 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
692 *b
->variable
= b
->data
= b
->new_data
;
698 bloc
->data
= bloc
->new_data
;
702 safe_bcopy (bloc
->data
, bloc
->new_data
, old_size
);
703 bzero ((char *) bloc
->new_data
+ old_size
, size
- old_size
);
704 *bloc
->variable
= bloc
->data
= bloc
->new_data
;
709 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
714 b
->data
= b
->new_data
;
718 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
719 *b
->variable
= b
->data
= b
->new_data
;
724 update_heap_bloc_correspondence (bloc
, heap
);
726 break_value
= (last_bloc
? (char *) last_bloc
->data
+ last_bloc
->size
727 : (char *) first_heap
->bloc_start
);
731 /* Free BLOC from the chain of blocs, relocating any blocs above it.
732 This may return space to the system. */
738 heap_ptr heap
= bloc
->heap
;
740 if (r_alloc_freeze_level
)
742 bloc
->variable
= (POINTER
*) NIL
;
746 resize_bloc (bloc
, 0);
748 if (bloc
== first_bloc
&& bloc
== last_bloc
)
750 first_bloc
= last_bloc
= NIL_BLOC
;
752 else if (bloc
== last_bloc
)
754 last_bloc
= bloc
->prev
;
755 last_bloc
->next
= NIL_BLOC
;
757 else if (bloc
== first_bloc
)
759 first_bloc
= bloc
->next
;
760 first_bloc
->prev
= NIL_BLOC
;
764 bloc
->next
->prev
= bloc
->prev
;
765 bloc
->prev
->next
= bloc
->next
;
768 /* Update the records of which blocs are in HEAP. */
769 if (heap
->first_bloc
== bloc
)
771 if (bloc
->next
!= 0 && bloc
->next
->heap
== heap
)
772 heap
->first_bloc
= bloc
->next
;
774 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
776 if (heap
->last_bloc
== bloc
)
778 if (bloc
->prev
!= 0 && bloc
->prev
->heap
== heap
)
779 heap
->last_bloc
= bloc
->prev
;
781 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
788 /* Interface routines. */
790 /* Obtain SIZE bytes of storage from the free pool, or the system, as
791 necessary. If relocatable blocs are in use, this means relocating
792 them. This function gets plugged into the GNU malloc's __morecore
795 We provide hysteresis, never relocating by less than extra_bytes.
797 If we're out of memory, we should return zero, to imitate the other
798 __morecore hook values - in particular, __default_morecore in the
799 GNU malloc package. */
808 if (! r_alloc_initialized
)
811 if (! use_relocatable_buffers
)
812 return (*real_morecore
) (size
);
815 return virtual_break_value
;
819 /* Allocate a page-aligned space. GNU malloc would reclaim an
820 extra space if we passed an unaligned one. But we could
821 not always find a space which is contiguous to the previous. */
822 POINTER new_bloc_start
;
823 heap_ptr h
= first_heap
;
824 SIZE get
= ROUNDUP (size
);
826 address
= (POINTER
) ROUNDUP (virtual_break_value
);
828 /* Search the list upward for a heap which is large enough. */
829 while ((char *) h
->end
< (char *) MEM_ROUNDUP ((char *)address
+ get
))
834 address
= (POINTER
) ROUNDUP (h
->start
);
837 /* If not found, obtain more space. */
840 get
+= extra_bytes
+ page_size
;
842 if (! obtain (address
, get
))
845 if (first_heap
== last_heap
)
846 address
= (POINTER
) ROUNDUP (virtual_break_value
);
848 address
= (POINTER
) ROUNDUP (last_heap
->start
);
852 new_bloc_start
= (POINTER
) MEM_ROUNDUP ((char *)address
+ get
);
854 if (first_heap
->bloc_start
< new_bloc_start
)
856 /* This is no clean solution - no idea how to do it better. */
857 if (r_alloc_freeze_level
)
860 /* There is a bug here: if the above obtain call succeeded, but the
861 relocate_blocs call below does not succeed, we need to free
862 the memory that we got with obtain. */
864 /* Move all blocs upward. */
865 if (! relocate_blocs (first_bloc
, h
, new_bloc_start
))
868 /* Note that (POINTER)(h+1) <= new_bloc_start since
869 get >= page_size, so the following does not destroy the heap
871 for (b
= last_bloc
; b
!= NIL_BLOC
; b
= b
->prev
)
873 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
874 *b
->variable
= b
->data
= b
->new_data
;
877 h
->bloc_start
= new_bloc_start
;
879 update_heap_bloc_correspondence (first_bloc
, h
);
883 /* Give up managing heaps below the one the new
884 virtual_break_value points to. */
885 first_heap
->prev
= NIL_HEAP
;
886 first_heap
->next
= h
->next
;
887 first_heap
->start
= h
->start
;
888 first_heap
->end
= h
->end
;
889 first_heap
->free
= h
->free
;
890 first_heap
->first_bloc
= h
->first_bloc
;
891 first_heap
->last_bloc
= h
->last_bloc
;
892 first_heap
->bloc_start
= h
->bloc_start
;
894 if (first_heap
->next
)
895 first_heap
->next
->prev
= first_heap
;
897 last_heap
= first_heap
;
900 bzero (address
, size
);
904 SIZE excess
= (char *)first_heap
->bloc_start
905 - ((char *)virtual_break_value
+ size
);
907 address
= virtual_break_value
;
909 if (r_alloc_freeze_level
== 0 && excess
> 2 * extra_bytes
)
911 excess
-= extra_bytes
;
912 first_heap
->bloc_start
913 = (POINTER
) MEM_ROUNDUP ((char *)first_heap
->bloc_start
- excess
);
915 relocate_blocs (first_bloc
, first_heap
, first_heap
->bloc_start
);
917 for (b
= first_bloc
; b
!= NIL_BLOC
; b
= b
->next
)
919 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
920 *b
->variable
= b
->data
= b
->new_data
;
924 if ((char *)virtual_break_value
+ size
< (char *)first_heap
->start
)
926 /* We found an additional space below the first heap */
927 first_heap
->start
= (POINTER
) ((char *)virtual_break_value
+ size
);
931 virtual_break_value
= (POINTER
) ((char *)address
+ size
);
932 break_value
= (last_bloc
933 ? (char *) last_bloc
->data
+ last_bloc
->size
934 : (char *) first_heap
->bloc_start
);
942 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
943 the data is returned in *PTR. PTR is thus the address of some variable
944 which will use the data area.
946 The allocation of 0 bytes is valid.
947 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
948 done before allocating a new area. Not yet done.
950 If we can't allocate the necessary memory, set *PTR to zero, and
958 register bloc_ptr new_bloc
;
960 if (! r_alloc_initialized
)
963 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
966 new_bloc
->variable
= ptr
;
967 *ptr
= new_bloc
->data
;
975 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
976 Store 0 in *PTR to show there's no block allocated. */
980 register POINTER
*ptr
;
982 register bloc_ptr dead_bloc
;
984 if (! r_alloc_initialized
)
987 dead_bloc
= find_bloc (ptr
);
988 if (dead_bloc
== NIL_BLOC
)
989 abort (); /* Double free? PTR not originally used to allocate? */
991 free_bloc (dead_bloc
);
995 refill_memory_reserve ();
999 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
1000 Do this by shifting all blocks above this one up in memory, unless
1001 SIZE is less than or equal to the current bloc size, in which case
1004 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
1005 memory copied to it. Not very efficient. We could traverse the
1006 bloc_list for a best fit of free blocs first.
1008 Change *PTR to reflect the new bloc, and return this value.
1010 If more memory cannot be allocated, then leave *PTR unchanged, and
1014 r_re_alloc (ptr
, size
)
1018 register bloc_ptr bloc
;
1020 if (! r_alloc_initialized
)
1024 return r_alloc (ptr
, size
);
1028 return r_alloc (ptr
, 0);
1031 bloc
= find_bloc (ptr
);
1032 if (bloc
== NIL_BLOC
)
1033 abort (); /* Already freed? PTR not originally used to allocate? */
1035 if (size
< bloc
->size
)
1037 /* Wouldn't it be useful to actually resize the bloc here? */
1038 /* I think so too, but not if it's too expensive... */
1039 if ((bloc
->size
- MEM_ROUNDUP (size
) >= page_size
)
1040 && r_alloc_freeze_level
== 0)
1042 resize_bloc (bloc
, MEM_ROUNDUP (size
));
1043 /* Never mind if this fails, just do nothing... */
1044 /* It *should* be infallible! */
1047 else if (size
> bloc
->size
)
1049 if (r_alloc_freeze_level
)
1052 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
1055 new_bloc
->variable
= ptr
;
1056 *ptr
= new_bloc
->data
;
1057 bloc
->variable
= (POINTER
*) NIL
;
1064 if (! resize_bloc (bloc
, MEM_ROUNDUP (size
)))
1071 /* Disable relocations, after making room for at least SIZE bytes
1072 of non-relocatable heap if possible. The relocatable blocs are
1073 guaranteed to hold still until thawed, even if this means that
1074 malloc must return a null pointer. */
1077 r_alloc_freeze (size
)
1080 if (! r_alloc_initialized
)
1083 /* If already frozen, we can't make any more room, so don't try. */
1084 if (r_alloc_freeze_level
> 0)
1086 /* If we can't get the amount requested, half is better than nothing. */
1087 while (size
> 0 && r_alloc_sbrk (size
) == 0)
1089 ++r_alloc_freeze_level
;
1091 r_alloc_sbrk (-size
);
1098 if (! r_alloc_initialized
)
1101 if (--r_alloc_freeze_level
< 0)
1104 /* This frees all unused blocs. It is not too inefficient, as the resize
1105 and bcopy is done only once. Afterwards, all unreferenced blocs are
1106 already shrunk to zero size. */
1107 if (!r_alloc_freeze_level
)
1109 bloc_ptr
*b
= &first_bloc
;
1111 if (!(*b
)->variable
)
1119 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1121 /* Reinitialize the morecore hook variables after restarting a dumped
1122 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1126 /* Only do this if the hook has been reset, so that we don't get an
1127 infinite loop, in case Emacs was linked statically. */
1128 if (__morecore
!= r_alloc_sbrk
)
1130 real_morecore
= __morecore
;
1131 __morecore
= r_alloc_sbrk
;
1135 #endif /* emacs && DOUG_LEA_MALLOC */
1148 if (!r_alloc_initialized
)
1151 assert (first_heap
);
1152 assert (last_heap
->end
<= (POINTER
) sbrk (0));
1153 assert ((POINTER
) first_heap
< first_heap
->start
);
1154 assert (first_heap
->start
<= virtual_break_value
);
1155 assert (virtual_break_value
<= first_heap
->end
);
1157 for (h
= first_heap
; h
; h
= h
->next
)
1159 assert (h
->prev
== ph
);
1160 assert ((POINTER
) ROUNDUP (h
->end
) == h
->end
);
1161 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1162 the heap start has any sort of alignment.
1163 Perhaps it should. */
1164 assert ((POINTER
) MEM_ROUNDUP (h
->start
) == h
->start
);
1166 assert ((POINTER
) MEM_ROUNDUP (h
->bloc_start
) == h
->bloc_start
);
1167 assert (h
->start
<= h
->bloc_start
&& h
->bloc_start
<= h
->end
);
1171 assert (ph
->end
< h
->start
);
1172 assert (h
->start
<= (POINTER
)h
&& (POINTER
)(h
+1) <= h
->bloc_start
);
1175 if (h
->bloc_start
<= break_value
&& break_value
<= h
->end
)
1182 assert (last_heap
== ph
);
1184 for (b
= first_bloc
; b
; b
= b
->next
)
1186 assert (b
->prev
== pb
);
1187 assert ((POINTER
) MEM_ROUNDUP (b
->data
) == b
->data
);
1188 assert ((SIZE
) MEM_ROUNDUP (b
->size
) == b
->size
);
1191 for (h
= first_heap
; h
; h
= h
->next
)
1193 if (h
->bloc_start
<= b
->data
&& b
->data
+ b
->size
<= h
->end
)
1200 if (pb
&& pb
->data
+ pb
->size
!= b
->data
)
1202 assert (ph
&& b
->data
== h
->bloc_start
);
1205 if (ph
->bloc_start
<= pb
->data
1206 && pb
->data
+ pb
->size
<= ph
->end
)
1208 assert (pb
->data
+ pb
->size
+ b
->size
> ph
->end
);
1213 assert (ph
->bloc_start
+ b
->size
> ph
->end
);
1221 assert (last_bloc
== pb
);
1224 assert (last_bloc
->data
+ last_bloc
->size
== break_value
);
1226 assert (first_heap
->bloc_start
== break_value
);
1231 /* Update the internal record of which variable points to some data to NEW.
1232 Used by buffer-swap-text in Emacs to restore consistency after it
1233 swaps the buffer text between two buffer objects. The OLD pointer
1234 is checked to ensure that memory corruption does not occur due to
1237 r_alloc_reset_variable (old
, new)
1240 bloc_ptr bloc
= first_bloc
;
1242 /* Find the bloc that corresponds to the data pointed to by pointer.
1243 find_bloc cannot be used, as it has internal consistency checks
1244 which fail when the variable needs reseting. */
1245 while (bloc
!= NIL_BLOC
)
1247 if (bloc
->data
== *new)
1253 if (bloc
== NIL_BLOC
|| bloc
->variable
!= old
)
1254 abort (); /* Already freed? OLD not originally used to allocate? */
1256 /* Update variable to point to the new location. */
1257 bloc
->variable
= new;
1261 /***********************************************************************
1263 ***********************************************************************/
1265 /* Initialize various things for memory allocation. */
1270 if (r_alloc_initialized
)
1272 r_alloc_initialized
= 1;
1275 #ifndef SYSTEM_MALLOC
1276 real_morecore
= __morecore
;
1277 __morecore
= r_alloc_sbrk
;
1279 first_heap
= last_heap
= &heap_base
;
1280 first_heap
->next
= first_heap
->prev
= NIL_HEAP
;
1281 first_heap
->start
= first_heap
->bloc_start
1282 = virtual_break_value
= break_value
= (*real_morecore
) (0);
1283 if (break_value
== NIL
)
1286 extra_bytes
= ROUNDUP (50000);
1289 #ifdef DOUG_LEA_MALLOC
1291 mallopt (M_TOP_PAD
, 64 * 4096);
1294 #ifndef SYSTEM_MALLOC
1295 /* Give GNU malloc's morecore some hysteresis
1296 so that we move all the relocatable blocks much less often. */
1297 __malloc_extra_blocks
= 64;
1301 #ifndef SYSTEM_MALLOC
1302 first_heap
->end
= (POINTER
) ROUNDUP (first_heap
->start
);
1304 /* The extra call to real_morecore guarantees that the end of the
1305 address space is a multiple of page_size, even if page_size is
1306 not really the page size of the system running the binary in
1307 which page_size is stored. This allows a binary to be built on a
1308 system with one page size and run on a system with a smaller page
1310 (*real_morecore
) ((char *) first_heap
->end
- (char *) first_heap
->start
);
1312 /* Clear the rest of the last page; this memory is in our address space
1313 even though it is after the sbrk value. */
1314 /* Doubly true, with the additional call that explicitly adds the
1315 rest of that page to the address space. */
1316 bzero (first_heap
->start
,
1317 (char *) first_heap
->end
- (char *) first_heap
->start
);
1318 virtual_break_value
= break_value
= first_heap
->bloc_start
= first_heap
->end
;
1321 use_relocatable_buffers
= 1;
1324 /* arch-tag: 6a524a15-faff-44c8-95d4-a5da6f55110f
1325 (do not change this comment) */