1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
5 This file is part of GNU Emacs.
7 GNU Emacs is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 GNU Emacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
30 #include "lisp.h" /* Needed for VALBITS. */
31 #include "blockinput.h"
35 typedef POINTER_TYPE
*POINTER
;
38 #ifdef DOUG_LEA_MALLOC
40 extern int mallopt (int, int);
41 #else /* not DOUG_LEA_MALLOC */
43 extern size_t __malloc_extra_blocks
;
44 #endif /* SYSTEM_MALLOC */
45 #endif /* not DOUG_LEA_MALLOC */
52 typedef void *POINTER
;
57 #endif /* not emacs */
60 #include "getpagesize.h"
62 #define NIL ((POINTER) 0)
64 /* A flag to indicate whether we have initialized ralloc yet. For
65 Emacs's sake, please do not make this local to malloc_init; on some
66 machines, the dumping procedure makes all static variables
67 read-only. On these machines, the word static is #defined to be
68 the empty string, meaning that r_alloc_initialized becomes an
69 automatic variable, and loses its value each time Emacs is started
72 static int r_alloc_initialized
= 0;
74 static void r_alloc_init (void);
77 /* Declarations for working with the malloc, ralloc, and system breaks. */
79 /* Function to set the real break value. */
80 POINTER (*real_morecore
) (long int);
82 /* The break value, as seen by malloc. */
83 static POINTER virtual_break_value
;
85 /* The address of the end of the last data in use by ralloc,
86 including relocatable blocs as well as malloc data. */
87 static POINTER break_value
;
89 /* This is the size of a page. We round memory requests to this boundary. */
92 /* Whenever we get memory from the system, get this many extra bytes. This
93 must be a multiple of page_size. */
94 static int extra_bytes
;
96 /* Macros for rounding. Note that rounding to any value is possible
97 by changing the definition of PAGE. */
98 #define PAGE (getpagesize ())
99 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
100 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
102 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
104 #define MEM_ALIGN sizeof(double)
105 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
108 /* The hook `malloc' uses for the function which gets more space
111 #ifndef SYSTEM_MALLOC
112 extern POINTER (*__morecore
) (long int);
117 /***********************************************************************
118 Implementation using sbrk
119 ***********************************************************************/
121 /* Data structures of heaps and blocs. */
123 /* The relocatable objects, or blocs, and the malloc data
124 both reside within one or more heaps.
125 Each heap contains malloc data, running from `start' to `bloc_start',
126 and relocatable objects, running from `bloc_start' to `free'.
128 Relocatable objects may relocate within the same heap
129 or may move into another heap; the heaps themselves may grow
132 We try to make just one heap and make it larger as necessary.
133 But sometimes we can't do that, because we can't get contiguous
134 space to add onto the heap. When that happens, we start a new heap. */
140 /* Start of memory range of this heap. */
142 /* End of memory range of this heap. */
144 /* Start of relocatable data in this heap. */
146 /* Start of unused space in this heap. */
148 /* First bloc in this heap. */
149 struct bp
*first_bloc
;
150 /* Last bloc in this heap. */
151 struct bp
*last_bloc
;
154 #define NIL_HEAP ((heap_ptr) 0)
155 #define HEAP_PTR_SIZE (sizeof (struct heap))
157 /* This is the first heap object.
158 If we need additional heap objects, each one resides at the beginning of
159 the space it covers. */
160 static struct heap heap_base
;
162 /* Head and tail of the list of heaps. */
163 static heap_ptr first_heap
, last_heap
;
165 /* These structures are allocated in the malloc arena.
166 The linked list is kept in order of increasing '.data' members.
167 The data blocks abut each other; if b->next is non-nil, then
168 b->data + b->size == b->next->data.
170 An element with variable==NIL denotes a freed block, which has not yet
171 been collected. They may only appear while r_alloc_freeze_level > 0,
172 and will be freed when the arena is thawed. Currently, these blocs are
173 not reusable, while the arena is frozen. Very inefficient. */
182 POINTER new_data
; /* temporarily used for relocation */
183 struct heap
*heap
; /* Heap this bloc is in. */
186 #define NIL_BLOC ((bloc_ptr) 0)
187 #define BLOC_PTR_SIZE (sizeof (struct bp))
189 /* Head and tail of the list of relocatable blocs. */
190 static bloc_ptr first_bloc
, last_bloc
;
192 static int use_relocatable_buffers
;
194 /* If >0, no relocation whatsoever takes place. */
195 static int r_alloc_freeze_level
;
198 /* Functions to get and return memory from the system. */
200 /* Find the heap that ADDRESS falls within. */
203 find_heap (POINTER address
)
207 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
209 if (heap
->start
<= address
&& address
<= heap
->end
)
216 /* Find SIZE bytes of space in a heap.
217 Try to get them at ADDRESS (which must fall within some heap's range)
218 if we can get that many within one heap.
220 If enough space is not presently available in our reserve, this means
221 getting more page-aligned space from the system. If the returned space
222 is not contiguous to the last heap, allocate a new heap, and append it
224 obtain does not try to keep track of whether space is in use
225 or not in use. It just returns the address of SIZE bytes that
226 fall within a single heap. If you call obtain twice in a row
227 with the same arguments, you typically get the same value.
228 to the heap list. It's the caller's responsibility to keep
229 track of what space is in use.
231 Return the address of the space if all went well, or zero if we couldn't
232 allocate the memory. */
235 obtain (POINTER address
, SIZE size
)
238 SIZE already_available
;
240 /* Find the heap that ADDRESS falls within. */
241 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
243 if (heap
->start
<= address
&& address
<= heap
->end
)
250 /* If we can't fit SIZE bytes in that heap,
251 try successive later heaps. */
252 while (heap
&& (char *) address
+ size
> (char *) heap
->end
)
255 if (heap
== NIL_HEAP
)
257 address
= heap
->bloc_start
;
260 /* If we can't fit them within any existing heap,
262 if (heap
== NIL_HEAP
)
264 POINTER
new = (*real_morecore
)(0);
267 already_available
= (char *)last_heap
->end
- (char *)address
;
269 if (new != last_heap
->end
)
271 /* Someone else called sbrk. Make a new heap. */
273 heap_ptr new_heap
= (heap_ptr
) MEM_ROUNDUP (new);
274 POINTER bloc_start
= (POINTER
) MEM_ROUNDUP ((POINTER
)(new_heap
+ 1));
276 if ((*real_morecore
) ((char *) bloc_start
- (char *) new) != new)
279 new_heap
->start
= new;
280 new_heap
->end
= bloc_start
;
281 new_heap
->bloc_start
= bloc_start
;
282 new_heap
->free
= bloc_start
;
283 new_heap
->next
= NIL_HEAP
;
284 new_heap
->prev
= last_heap
;
285 new_heap
->first_bloc
= NIL_BLOC
;
286 new_heap
->last_bloc
= NIL_BLOC
;
287 last_heap
->next
= new_heap
;
288 last_heap
= new_heap
;
290 address
= bloc_start
;
291 already_available
= 0;
294 /* Add space to the last heap (which we may have just created).
295 Get some extra, so we can come here less often. */
297 get
= size
+ extra_bytes
- already_available
;
298 get
= (char *) ROUNDUP ((char *)last_heap
->end
+ get
)
299 - (char *) last_heap
->end
;
301 if ((*real_morecore
) (get
) != last_heap
->end
)
304 last_heap
->end
= (char *) last_heap
->end
+ get
;
310 /* Return unused heap space to the system
311 if there is a lot of unused space now.
312 This can make the last heap smaller;
313 it can also eliminate the last heap entirely. */
321 /* Add the amount of space beyond break_value
322 in all heaps which have extend beyond break_value at all. */
324 for (h
= last_heap
; h
&& break_value
< h
->end
; h
= h
->prev
)
326 excess
+= (char *) h
->end
- (char *) ((break_value
< h
->bloc_start
)
327 ? h
->bloc_start
: break_value
);
330 if (excess
> extra_bytes
* 2 && (*real_morecore
) (0) == last_heap
->end
)
332 /* Keep extra_bytes worth of empty space.
333 And don't free anything unless we can free at least extra_bytes. */
334 excess
-= extra_bytes
;
336 if ((char *)last_heap
->end
- (char *)last_heap
->bloc_start
<= excess
)
338 /* This heap should have no blocs in it. */
339 if (last_heap
->first_bloc
!= NIL_BLOC
340 || last_heap
->last_bloc
!= NIL_BLOC
)
343 /* Return the last heap, with its header, to the system. */
344 excess
= (char *)last_heap
->end
- (char *)last_heap
->start
;
345 last_heap
= last_heap
->prev
;
346 last_heap
->next
= NIL_HEAP
;
350 excess
= (char *) last_heap
->end
351 - (char *) ROUNDUP ((char *)last_heap
->end
- excess
);
352 last_heap
->end
= (char *) last_heap
->end
- excess
;
355 if ((*real_morecore
) (- excess
) == 0)
357 /* If the system didn't want that much memory back, adjust
358 the end of the last heap to reflect that. This can occur
359 if break_value is still within the original data segment. */
360 last_heap
->end
= (char *) last_heap
->end
+ excess
;
361 /* Make sure that the result of the adjustment is accurate.
362 It should be, for the else clause above; the other case,
363 which returns the entire last heap to the system, seems
364 unlikely to trigger this mode of failure. */
365 if (last_heap
->end
!= (*real_morecore
) (0))
371 /* Return the total size in use by relocating allocator,
372 above where malloc gets space. */
375 r_alloc_size_in_use (void)
377 return (char *) break_value
- (char *) virtual_break_value
;
380 /* The meat - allocating, freeing, and relocating blocs. */
382 /* Find the bloc referenced by the address in PTR. Returns a pointer
386 find_bloc (POINTER
*ptr
)
388 register bloc_ptr p
= first_bloc
;
390 while (p
!= NIL_BLOC
)
392 /* Consistency check. Don't return inconsistent blocs.
393 Don't abort here, as callers might be expecting this, but
394 callers that always expect a bloc to be returned should abort
395 if one isn't to avoid a memory corruption bug that is
396 difficult to track down. */
397 if (p
->variable
== ptr
&& p
->data
== *ptr
)
406 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
407 Returns a pointer to the new bloc, or zero if we couldn't allocate
408 memory for the new block. */
413 register bloc_ptr new_bloc
;
414 register heap_ptr heap
;
416 if (! (new_bloc
= (bloc_ptr
) malloc (BLOC_PTR_SIZE
))
417 || ! (new_bloc
->data
= obtain (break_value
, size
)))
424 break_value
= (char *) new_bloc
->data
+ size
;
426 new_bloc
->size
= size
;
427 new_bloc
->next
= NIL_BLOC
;
428 new_bloc
->variable
= (POINTER
*) NIL
;
429 new_bloc
->new_data
= 0;
431 /* Record in the heap that this space is in use. */
432 heap
= find_heap (new_bloc
->data
);
433 heap
->free
= break_value
;
435 /* Maintain the correspondence between heaps and blocs. */
436 new_bloc
->heap
= heap
;
437 heap
->last_bloc
= new_bloc
;
438 if (heap
->first_bloc
== NIL_BLOC
)
439 heap
->first_bloc
= new_bloc
;
441 /* Put this bloc on the doubly-linked list of blocs. */
444 new_bloc
->prev
= last_bloc
;
445 last_bloc
->next
= new_bloc
;
446 last_bloc
= new_bloc
;
450 first_bloc
= last_bloc
= new_bloc
;
451 new_bloc
->prev
= NIL_BLOC
;
457 /* Calculate new locations of blocs in the list beginning with BLOC,
458 relocating it to start at ADDRESS, in heap HEAP. If enough space is
459 not presently available in our reserve, call obtain for
462 Store the new location of each bloc in its new_data field.
463 Do not touch the contents of blocs or break_value. */
466 relocate_blocs (bloc_ptr bloc
, heap_ptr heap
, POINTER address
)
468 register bloc_ptr b
= bloc
;
470 /* No need to ever call this if arena is frozen, bug somewhere! */
471 if (r_alloc_freeze_level
)
476 /* If bloc B won't fit within HEAP,
477 move to the next heap and try again. */
478 while (heap
&& (char *) address
+ b
->size
> (char *) heap
->end
)
481 if (heap
== NIL_HEAP
)
483 address
= heap
->bloc_start
;
486 /* If BLOC won't fit in any heap,
487 get enough new space to hold BLOC and all following blocs. */
488 if (heap
== NIL_HEAP
)
490 register bloc_ptr tb
= b
;
493 /* Add up the size of all the following blocs. */
494 while (tb
!= NIL_BLOC
)
502 /* Get that space. */
503 address
= obtain (address
, s
);
510 /* Record the new address of this bloc
511 and update where the next bloc can start. */
512 b
->new_data
= address
;
514 address
= (char *) address
+ b
->size
;
521 /* Update the records of which heaps contain which blocs, starting
522 with heap HEAP and bloc BLOC. */
525 update_heap_bloc_correspondence (bloc_ptr bloc
, heap_ptr heap
)
529 /* Initialize HEAP's status to reflect blocs before BLOC. */
530 if (bloc
!= NIL_BLOC
&& bloc
->prev
!= NIL_BLOC
&& bloc
->prev
->heap
== heap
)
532 /* The previous bloc is in HEAP. */
533 heap
->last_bloc
= bloc
->prev
;
534 heap
->free
= (char *) bloc
->prev
->data
+ bloc
->prev
->size
;
538 /* HEAP contains no blocs before BLOC. */
539 heap
->first_bloc
= NIL_BLOC
;
540 heap
->last_bloc
= NIL_BLOC
;
541 heap
->free
= heap
->bloc_start
;
544 /* Advance through blocs one by one. */
545 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
547 /* Advance through heaps, marking them empty,
548 till we get to the one that B is in. */
551 if (heap
->bloc_start
<= b
->data
&& b
->data
<= heap
->end
)
554 /* We know HEAP is not null now,
555 because there has to be space for bloc B. */
556 heap
->first_bloc
= NIL_BLOC
;
557 heap
->last_bloc
= NIL_BLOC
;
558 heap
->free
= heap
->bloc_start
;
561 /* Update HEAP's status for bloc B. */
562 heap
->free
= (char *) b
->data
+ b
->size
;
564 if (heap
->first_bloc
== NIL_BLOC
)
565 heap
->first_bloc
= b
;
567 /* Record that B is in HEAP. */
571 /* If there are any remaining heaps and no blocs left,
572 mark those heaps as empty. */
576 heap
->first_bloc
= NIL_BLOC
;
577 heap
->last_bloc
= NIL_BLOC
;
578 heap
->free
= heap
->bloc_start
;
583 /* Resize BLOC to SIZE bytes. This relocates the blocs
584 that come after BLOC in memory. */
587 resize_bloc (bloc_ptr bloc
, SIZE size
)
594 /* No need to ever call this if arena is frozen, bug somewhere! */
595 if (r_alloc_freeze_level
)
598 if (bloc
== NIL_BLOC
|| size
== bloc
->size
)
601 for (heap
= first_heap
; heap
!= NIL_HEAP
; heap
= heap
->next
)
603 if (heap
->bloc_start
<= bloc
->data
&& bloc
->data
<= heap
->end
)
607 if (heap
== NIL_HEAP
)
610 old_size
= bloc
->size
;
613 /* Note that bloc could be moved into the previous heap. */
614 address
= (bloc
->prev
? (char *) bloc
->prev
->data
+ bloc
->prev
->size
615 : (char *) first_heap
->bloc_start
);
618 if (heap
->bloc_start
<= address
&& address
<= heap
->end
)
623 if (! relocate_blocs (bloc
, heap
, address
))
625 bloc
->size
= old_size
;
631 for (b
= last_bloc
; b
!= bloc
; b
= b
->prev
)
636 b
->data
= b
->new_data
;
640 memmove (b
->new_data
, b
->data
, b
->size
);
641 *b
->variable
= b
->data
= b
->new_data
;
647 bloc
->data
= bloc
->new_data
;
651 memmove (bloc
->new_data
, bloc
->data
, old_size
);
652 memset (bloc
->new_data
+ old_size
, 0, size
- old_size
);
653 *bloc
->variable
= bloc
->data
= bloc
->new_data
;
658 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
663 b
->data
= b
->new_data
;
667 memmove (b
->new_data
, b
->data
, b
->size
);
668 *b
->variable
= b
->data
= b
->new_data
;
673 update_heap_bloc_correspondence (bloc
, heap
);
675 break_value
= (last_bloc
? (char *) last_bloc
->data
+ last_bloc
->size
676 : (char *) first_heap
->bloc_start
);
680 /* Free BLOC from the chain of blocs, relocating any blocs above it.
681 This may return space to the system. */
684 free_bloc (bloc_ptr bloc
)
686 heap_ptr heap
= bloc
->heap
;
688 if (r_alloc_freeze_level
)
690 bloc
->variable
= (POINTER
*) NIL
;
694 resize_bloc (bloc
, 0);
696 if (bloc
== first_bloc
&& bloc
== last_bloc
)
698 first_bloc
= last_bloc
= NIL_BLOC
;
700 else if (bloc
== last_bloc
)
702 last_bloc
= bloc
->prev
;
703 last_bloc
->next
= NIL_BLOC
;
705 else if (bloc
== first_bloc
)
707 first_bloc
= bloc
->next
;
708 first_bloc
->prev
= NIL_BLOC
;
712 bloc
->next
->prev
= bloc
->prev
;
713 bloc
->prev
->next
= bloc
->next
;
716 /* Update the records of which blocs are in HEAP. */
717 if (heap
->first_bloc
== bloc
)
719 if (bloc
->next
!= 0 && bloc
->next
->heap
== heap
)
720 heap
->first_bloc
= bloc
->next
;
722 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
724 if (heap
->last_bloc
== bloc
)
726 if (bloc
->prev
!= 0 && bloc
->prev
->heap
== heap
)
727 heap
->last_bloc
= bloc
->prev
;
729 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
736 /* Interface routines. */
738 /* Obtain SIZE bytes of storage from the free pool, or the system, as
739 necessary. If relocatable blocs are in use, this means relocating
740 them. This function gets plugged into the GNU malloc's __morecore
743 We provide hysteresis, never relocating by less than extra_bytes.
745 If we're out of memory, we should return zero, to imitate the other
746 __morecore hook values - in particular, __default_morecore in the
747 GNU malloc package. */
750 r_alloc_sbrk (long int size
)
755 if (! r_alloc_initialized
)
758 if (! use_relocatable_buffers
)
759 return (*real_morecore
) (size
);
762 return virtual_break_value
;
766 /* Allocate a page-aligned space. GNU malloc would reclaim an
767 extra space if we passed an unaligned one. But we could
768 not always find a space which is contiguous to the previous. */
769 POINTER new_bloc_start
;
770 heap_ptr h
= first_heap
;
771 SIZE get
= ROUNDUP (size
);
773 address
= (POINTER
) ROUNDUP (virtual_break_value
);
775 /* Search the list upward for a heap which is large enough. */
776 while ((char *) h
->end
< (char *) MEM_ROUNDUP ((char *)address
+ get
))
781 address
= (POINTER
) ROUNDUP (h
->start
);
784 /* If not found, obtain more space. */
787 get
+= extra_bytes
+ page_size
;
789 if (! obtain (address
, get
))
792 if (first_heap
== last_heap
)
793 address
= (POINTER
) ROUNDUP (virtual_break_value
);
795 address
= (POINTER
) ROUNDUP (last_heap
->start
);
799 new_bloc_start
= (POINTER
) MEM_ROUNDUP ((char *)address
+ get
);
801 if (first_heap
->bloc_start
< new_bloc_start
)
803 /* This is no clean solution - no idea how to do it better. */
804 if (r_alloc_freeze_level
)
807 /* There is a bug here: if the above obtain call succeeded, but the
808 relocate_blocs call below does not succeed, we need to free
809 the memory that we got with obtain. */
811 /* Move all blocs upward. */
812 if (! relocate_blocs (first_bloc
, h
, new_bloc_start
))
815 /* Note that (POINTER)(h+1) <= new_bloc_start since
816 get >= page_size, so the following does not destroy the heap
818 for (b
= last_bloc
; b
!= NIL_BLOC
; b
= b
->prev
)
820 memmove (b
->new_data
, b
->data
, b
->size
);
821 *b
->variable
= b
->data
= b
->new_data
;
824 h
->bloc_start
= new_bloc_start
;
826 update_heap_bloc_correspondence (first_bloc
, h
);
830 /* Give up managing heaps below the one the new
831 virtual_break_value points to. */
832 first_heap
->prev
= NIL_HEAP
;
833 first_heap
->next
= h
->next
;
834 first_heap
->start
= h
->start
;
835 first_heap
->end
= h
->end
;
836 first_heap
->free
= h
->free
;
837 first_heap
->first_bloc
= h
->first_bloc
;
838 first_heap
->last_bloc
= h
->last_bloc
;
839 first_heap
->bloc_start
= h
->bloc_start
;
841 if (first_heap
->next
)
842 first_heap
->next
->prev
= first_heap
;
844 last_heap
= first_heap
;
847 memset (address
, 0, size
);
851 SIZE excess
= (char *)first_heap
->bloc_start
852 - ((char *)virtual_break_value
+ size
);
854 address
= virtual_break_value
;
856 if (r_alloc_freeze_level
== 0 && excess
> 2 * extra_bytes
)
858 excess
-= extra_bytes
;
859 first_heap
->bloc_start
860 = (POINTER
) MEM_ROUNDUP ((char *)first_heap
->bloc_start
- excess
);
862 relocate_blocs (first_bloc
, first_heap
, first_heap
->bloc_start
);
864 for (b
= first_bloc
; b
!= NIL_BLOC
; b
= b
->next
)
866 memmove (b
->new_data
, b
->data
, b
->size
);
867 *b
->variable
= b
->data
= b
->new_data
;
871 if ((char *)virtual_break_value
+ size
< (char *)first_heap
->start
)
873 /* We found an additional space below the first heap */
874 first_heap
->start
= (POINTER
) ((char *)virtual_break_value
+ size
);
878 virtual_break_value
= (POINTER
) ((char *)address
+ size
);
879 break_value
= (last_bloc
880 ? (char *) last_bloc
->data
+ last_bloc
->size
881 : (char *) first_heap
->bloc_start
);
889 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
890 the data is returned in *PTR. PTR is thus the address of some variable
891 which will use the data area.
893 The allocation of 0 bytes is valid.
894 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
895 done before allocating a new area. Not yet done.
897 If we can't allocate the necessary memory, set *PTR to zero, and
901 r_alloc (POINTER
*ptr
, SIZE size
)
903 register bloc_ptr new_bloc
;
905 if (! r_alloc_initialized
)
908 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
911 new_bloc
->variable
= ptr
;
912 *ptr
= new_bloc
->data
;
920 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
921 Store 0 in *PTR to show there's no block allocated. */
924 r_alloc_free (register POINTER
*ptr
)
926 register bloc_ptr dead_bloc
;
928 if (! r_alloc_initialized
)
931 dead_bloc
= find_bloc (ptr
);
932 if (dead_bloc
== NIL_BLOC
)
933 abort (); /* Double free? PTR not originally used to allocate? */
935 free_bloc (dead_bloc
);
939 refill_memory_reserve ();
943 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
944 Do this by shifting all blocks above this one up in memory, unless
945 SIZE is less than or equal to the current bloc size, in which case
948 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
949 memory copied to it. Not very efficient. We could traverse the
950 bloc_list for a best fit of free blocs first.
952 Change *PTR to reflect the new bloc, and return this value.
954 If more memory cannot be allocated, then leave *PTR unchanged, and
958 r_re_alloc (POINTER
*ptr
, SIZE size
)
960 register bloc_ptr bloc
;
962 if (! r_alloc_initialized
)
966 return r_alloc (ptr
, size
);
970 return r_alloc (ptr
, 0);
973 bloc
= find_bloc (ptr
);
974 if (bloc
== NIL_BLOC
)
975 abort (); /* Already freed? PTR not originally used to allocate? */
977 if (size
< bloc
->size
)
979 /* Wouldn't it be useful to actually resize the bloc here? */
980 /* I think so too, but not if it's too expensive... */
981 if ((bloc
->size
- MEM_ROUNDUP (size
) >= page_size
)
982 && r_alloc_freeze_level
== 0)
984 resize_bloc (bloc
, MEM_ROUNDUP (size
));
985 /* Never mind if this fails, just do nothing... */
986 /* It *should* be infallible! */
989 else if (size
> bloc
->size
)
991 if (r_alloc_freeze_level
)
994 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
997 new_bloc
->variable
= ptr
;
998 *ptr
= new_bloc
->data
;
999 bloc
->variable
= (POINTER
*) NIL
;
1006 if (! resize_bloc (bloc
, MEM_ROUNDUP (size
)))
1013 /* Disable relocations, after making room for at least SIZE bytes
1014 of non-relocatable heap if possible. The relocatable blocs are
1015 guaranteed to hold still until thawed, even if this means that
1016 malloc must return a null pointer. */
1019 r_alloc_freeze (long int size
)
1021 if (! r_alloc_initialized
)
1024 /* If already frozen, we can't make any more room, so don't try. */
1025 if (r_alloc_freeze_level
> 0)
1027 /* If we can't get the amount requested, half is better than nothing. */
1028 while (size
> 0 && r_alloc_sbrk (size
) == 0)
1030 ++r_alloc_freeze_level
;
1032 r_alloc_sbrk (-size
);
1039 if (! r_alloc_initialized
)
1042 if (--r_alloc_freeze_level
< 0)
1045 /* This frees all unused blocs. It is not too inefficient, as the resize
1046 and memcpy is done only once. Afterwards, all unreferenced blocs are
1047 already shrunk to zero size. */
1048 if (!r_alloc_freeze_level
)
1050 bloc_ptr
*b
= &first_bloc
;
1052 if (!(*b
)->variable
)
1060 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1062 /* Reinitialize the morecore hook variables after restarting a dumped
1063 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1065 r_alloc_reinit (void)
1067 /* Only do this if the hook has been reset, so that we don't get an
1068 infinite loop, in case Emacs was linked statically. */
1069 if (__morecore
!= r_alloc_sbrk
)
1071 real_morecore
= __morecore
;
1072 __morecore
= r_alloc_sbrk
;
1076 #endif /* emacs && DOUG_LEA_MALLOC */
1089 if (!r_alloc_initialized
)
1092 assert (first_heap
);
1093 assert (last_heap
->end
<= (POINTER
) sbrk (0));
1094 assert ((POINTER
) first_heap
< first_heap
->start
);
1095 assert (first_heap
->start
<= virtual_break_value
);
1096 assert (virtual_break_value
<= first_heap
->end
);
1098 for (h
= first_heap
; h
; h
= h
->next
)
1100 assert (h
->prev
== ph
);
1101 assert ((POINTER
) ROUNDUP (h
->end
) == h
->end
);
1102 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1103 the heap start has any sort of alignment.
1104 Perhaps it should. */
1105 assert ((POINTER
) MEM_ROUNDUP (h
->start
) == h
->start
);
1107 assert ((POINTER
) MEM_ROUNDUP (h
->bloc_start
) == h
->bloc_start
);
1108 assert (h
->start
<= h
->bloc_start
&& h
->bloc_start
<= h
->end
);
1112 assert (ph
->end
< h
->start
);
1113 assert (h
->start
<= (POINTER
)h
&& (POINTER
)(h
+1) <= h
->bloc_start
);
1116 if (h
->bloc_start
<= break_value
&& break_value
<= h
->end
)
1123 assert (last_heap
== ph
);
1125 for (b
= first_bloc
; b
; b
= b
->next
)
1127 assert (b
->prev
== pb
);
1128 assert ((POINTER
) MEM_ROUNDUP (b
->data
) == b
->data
);
1129 assert ((SIZE
) MEM_ROUNDUP (b
->size
) == b
->size
);
1132 for (h
= first_heap
; h
; h
= h
->next
)
1134 if (h
->bloc_start
<= b
->data
&& b
->data
+ b
->size
<= h
->end
)
1141 if (pb
&& pb
->data
+ pb
->size
!= b
->data
)
1143 assert (ph
&& b
->data
== h
->bloc_start
);
1146 if (ph
->bloc_start
<= pb
->data
1147 && pb
->data
+ pb
->size
<= ph
->end
)
1149 assert (pb
->data
+ pb
->size
+ b
->size
> ph
->end
);
1154 assert (ph
->bloc_start
+ b
->size
> ph
->end
);
1162 assert (last_bloc
== pb
);
1165 assert (last_bloc
->data
+ last_bloc
->size
== break_value
);
1167 assert (first_heap
->bloc_start
== break_value
);
1172 /* Update the internal record of which variable points to some data to NEW.
1173 Used by buffer-swap-text in Emacs to restore consistency after it
1174 swaps the buffer text between two buffer objects. The OLD pointer
1175 is checked to ensure that memory corruption does not occur due to
1178 r_alloc_reset_variable (POINTER
*old
, POINTER
*new)
1180 bloc_ptr bloc
= first_bloc
;
1182 /* Find the bloc that corresponds to the data pointed to by pointer.
1183 find_bloc cannot be used, as it has internal consistency checks
1184 which fail when the variable needs reseting. */
1185 while (bloc
!= NIL_BLOC
)
1187 if (bloc
->data
== *new)
1193 if (bloc
== NIL_BLOC
|| bloc
->variable
!= old
)
1194 abort (); /* Already freed? OLD not originally used to allocate? */
1196 /* Update variable to point to the new location. */
1197 bloc
->variable
= new;
1201 /***********************************************************************
1203 ***********************************************************************/
1205 /* Initialize various things for memory allocation. */
1210 if (r_alloc_initialized
)
1212 r_alloc_initialized
= 1;
1215 #ifndef SYSTEM_MALLOC
1216 real_morecore
= __morecore
;
1217 __morecore
= r_alloc_sbrk
;
1219 first_heap
= last_heap
= &heap_base
;
1220 first_heap
->next
= first_heap
->prev
= NIL_HEAP
;
1221 first_heap
->start
= first_heap
->bloc_start
1222 = virtual_break_value
= break_value
= (*real_morecore
) (0);
1223 if (break_value
== NIL
)
1226 extra_bytes
= ROUNDUP (50000);
1229 #ifdef DOUG_LEA_MALLOC
1231 mallopt (M_TOP_PAD
, 64 * 4096);
1234 #ifndef SYSTEM_MALLOC
1235 /* Give GNU malloc's morecore some hysteresis
1236 so that we move all the relocatable blocks much less often. */
1237 __malloc_extra_blocks
= 64;
1241 #ifndef SYSTEM_MALLOC
1242 first_heap
->end
= (POINTER
) ROUNDUP (first_heap
->start
);
1244 /* The extra call to real_morecore guarantees that the end of the
1245 address space is a multiple of page_size, even if page_size is
1246 not really the page size of the system running the binary in
1247 which page_size is stored. This allows a binary to be built on a
1248 system with one page size and run on a system with a smaller page
1250 (*real_morecore
) ((char *) first_heap
->end
- (char *) first_heap
->start
);
1252 /* Clear the rest of the last page; this memory is in our address space
1253 even though it is after the sbrk value. */
1254 /* Doubly true, with the additional call that explicitly adds the
1255 rest of that page to the address space. */
1256 memset (first_heap
->start
, 0,
1257 (char *) first_heap
->end
- (char *) first_heap
->start
);
1258 virtual_break_value
= break_value
= first_heap
->bloc_start
= first_heap
->end
;
1261 use_relocatable_buffers
= 1;
1264 /* arch-tag: 6a524a15-faff-44c8-95d4-a5da6f55110f
1265 (do not change this comment) */