1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000-2012 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 3 of the License, or
9 (at your option) any later version.
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
21 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
22 rather than all of them. This means allowing for a possible
23 hole between the first bloc and the end of malloc storage. */
29 #include "lisp.h" /* Needed for VALBITS. */
30 #include "blockinput.h"
34 typedef POINTER_TYPE
*POINTER
;
37 #ifdef DOUG_LEA_MALLOC
39 extern int mallopt (int, int);
40 #else /* not DOUG_LEA_MALLOC */
42 extern size_t __malloc_extra_blocks
;
43 #endif /* SYSTEM_MALLOC */
44 #endif /* not DOUG_LEA_MALLOC */
51 typedef void *POINTER
;
56 #endif /* not emacs */
59 #include "getpagesize.h"
61 #define NIL ((POINTER) 0)
63 /* A flag to indicate whether we have initialized ralloc yet. For
64 Emacs's sake, please do not make this local to malloc_init; on some
65 machines, the dumping procedure makes all static variables
66 read-only. On these machines, the word static is #defined to be
67 the empty string, meaning that r_alloc_initialized becomes an
68 automatic variable, and loses its value each time Emacs is started
71 static int r_alloc_initialized
= 0;
73 static void r_alloc_init (void);
76 /* Declarations for working with the malloc, ralloc, and system breaks. */
78 /* Function to set the real break value. */
79 POINTER (*real_morecore
) (long int);
81 /* The break value, as seen by malloc. */
82 static POINTER virtual_break_value
;
84 /* The address of the end of the last data in use by ralloc,
85 including relocatable blocs as well as malloc data. */
86 static POINTER break_value
;
88 /* This is the size of a page. We round memory requests to this boundary. */
91 /* Whenever we get memory from the system, get this many extra bytes. This
92 must be a multiple of page_size. */
93 static int extra_bytes
;
95 /* Macros for rounding. Note that rounding to any value is possible
96 by changing the definition of PAGE. */
97 #define PAGE (getpagesize ())
98 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
101 #define MEM_ALIGN sizeof (double)
102 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
105 /* The hook `malloc' uses for the function which gets more space
108 #ifndef SYSTEM_MALLOC
109 extern POINTER (*__morecore
) (long int);
114 /***********************************************************************
115 Implementation using sbrk
116 ***********************************************************************/
118 /* Data structures of heaps and blocs. */
120 /* The relocatable objects, or blocs, and the malloc data
121 both reside within one or more heaps.
122 Each heap contains malloc data, running from `start' to `bloc_start',
123 and relocatable objects, running from `bloc_start' to `free'.
125 Relocatable objects may relocate within the same heap
126 or may move into another heap; the heaps themselves may grow
129 We try to make just one heap and make it larger as necessary.
130 But sometimes we can't do that, because we can't get contiguous
131 space to add onto the heap. When that happens, we start a new heap. */
137 /* Start of memory range of this heap. */
139 /* End of memory range of this heap. */
141 /* Start of relocatable data in this heap. */
143 /* Start of unused space in this heap. */
145 /* First bloc in this heap. */
146 struct bp
*first_bloc
;
147 /* Last bloc in this heap. */
148 struct bp
*last_bloc
;
151 #define NIL_HEAP ((heap_ptr) 0)
153 /* This is the first heap object.
154 If we need additional heap objects, each one resides at the beginning of
155 the space it covers. */
156 static struct heap heap_base
;
158 /* Head and tail of the list of heaps. */
159 static heap_ptr first_heap
, last_heap
;
161 /* These structures are allocated in the malloc arena.
162 The linked list is kept in order of increasing '.data' members.
163 The data blocks abut each other; if b->next is non-nil, then
164 b->data + b->size == b->next->data.
166 An element with variable==NIL denotes a freed block, which has not yet
167 been collected. They may only appear while r_alloc_freeze_level > 0,
168 and will be freed when the arena is thawed. Currently, these blocs are
169 not reusable, while the arena is frozen. Very inefficient. */
178 POINTER new_data
; /* temporarily used for relocation */
179 struct heap
*heap
; /* Heap this bloc is in. */
182 #define NIL_BLOC ((bloc_ptr) 0)
183 #define BLOC_PTR_SIZE (sizeof (struct bp))
185 /* Head and tail of the list of relocatable blocs. */
186 static bloc_ptr first_bloc
, last_bloc
;
188 static int use_relocatable_buffers
;
190 /* If >0, no relocation whatsoever takes place. */
191 static int r_alloc_freeze_level
;
194 /* Functions to get and return memory from the system. */
196 /* Find the heap that ADDRESS falls within. */
199 find_heap (POINTER address
)
203 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
205 if (heap
->start
<= address
&& address
<= heap
->end
)
212 /* Find SIZE bytes of space in a heap.
213 Try to get them at ADDRESS (which must fall within some heap's range)
214 if we can get that many within one heap.
216 If enough space is not presently available in our reserve, this means
217 getting more page-aligned space from the system. If the returned space
218 is not contiguous to the last heap, allocate a new heap, and append it
221 obtain does not try to keep track of whether space is in use or not
222 in use. It just returns the address of SIZE bytes that fall within a
223 single heap. If you call obtain twice in a row with the same arguments,
224 you typically get the same value. It's the caller's responsibility to
225 keep track of what space is in use.
227 Return the address of the space if all went well, or zero if we couldn't
228 allocate the memory. */
231 obtain (POINTER address
, SIZE size
)
234 SIZE already_available
;
236 /* Find the heap that ADDRESS falls within. */
237 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
239 if (heap
->start
<= address
&& address
<= heap
->end
)
246 /* If we can't fit SIZE bytes in that heap,
247 try successive later heaps. */
248 while (heap
&& (char *) address
+ size
> (char *) heap
->end
)
251 if (heap
== NIL_HEAP
)
253 address
= heap
->bloc_start
;
256 /* If we can't fit them within any existing heap,
258 if (heap
== NIL_HEAP
)
260 POINTER
new = (*real_morecore
)(0);
263 already_available
= (char *)last_heap
->end
- (char *)address
;
265 if (new != last_heap
->end
)
267 /* Someone else called sbrk. Make a new heap. */
269 heap_ptr new_heap
= (heap_ptr
) MEM_ROUNDUP (new);
270 POINTER bloc_start
= (POINTER
) MEM_ROUNDUP ((POINTER
)(new_heap
+ 1));
272 if ((*real_morecore
) ((char *) bloc_start
- (char *) new) != new)
275 new_heap
->start
= new;
276 new_heap
->end
= bloc_start
;
277 new_heap
->bloc_start
= bloc_start
;
278 new_heap
->free
= bloc_start
;
279 new_heap
->next
= NIL_HEAP
;
280 new_heap
->prev
= last_heap
;
281 new_heap
->first_bloc
= NIL_BLOC
;
282 new_heap
->last_bloc
= NIL_BLOC
;
283 last_heap
->next
= new_heap
;
284 last_heap
= new_heap
;
286 address
= bloc_start
;
287 already_available
= 0;
290 /* Add space to the last heap (which we may have just created).
291 Get some extra, so we can come here less often. */
293 get
= size
+ extra_bytes
- already_available
;
294 get
= (char *) ROUNDUP ((char *)last_heap
->end
+ get
)
295 - (char *) last_heap
->end
;
297 if ((*real_morecore
) (get
) != last_heap
->end
)
300 last_heap
->end
= (char *) last_heap
->end
+ get
;
306 /* Return unused heap space to the system
307 if there is a lot of unused space now.
308 This can make the last heap smaller;
309 it can also eliminate the last heap entirely. */
317 /* Add the amount of space beyond break_value
318 in all heaps which have extend beyond break_value at all. */
320 for (h
= last_heap
; h
&& break_value
< h
->end
; h
= h
->prev
)
322 excess
+= (char *) h
->end
- (char *) ((break_value
< h
->bloc_start
)
323 ? h
->bloc_start
: break_value
);
326 if (excess
> extra_bytes
* 2 && (*real_morecore
) (0) == last_heap
->end
)
328 /* Keep extra_bytes worth of empty space.
329 And don't free anything unless we can free at least extra_bytes. */
330 excess
-= extra_bytes
;
332 if ((char *)last_heap
->end
- (char *)last_heap
->bloc_start
<= excess
)
334 /* This heap should have no blocs in it. */
335 if (last_heap
->first_bloc
!= NIL_BLOC
336 || last_heap
->last_bloc
!= NIL_BLOC
)
339 /* Return the last heap, with its header, to the system. */
340 excess
= (char *)last_heap
->end
- (char *)last_heap
->start
;
341 last_heap
= last_heap
->prev
;
342 last_heap
->next
= NIL_HEAP
;
346 excess
= (char *) last_heap
->end
347 - (char *) ROUNDUP ((char *)last_heap
->end
- excess
);
348 last_heap
->end
= (char *) last_heap
->end
- excess
;
351 if ((*real_morecore
) (- excess
) == 0)
353 /* If the system didn't want that much memory back, adjust
354 the end of the last heap to reflect that. This can occur
355 if break_value is still within the original data segment. */
356 last_heap
->end
= (char *) last_heap
->end
+ excess
;
357 /* Make sure that the result of the adjustment is accurate.
358 It should be, for the else clause above; the other case,
359 which returns the entire last heap to the system, seems
360 unlikely to trigger this mode of failure. */
361 if (last_heap
->end
!= (*real_morecore
) (0))
367 /* The meat - allocating, freeing, and relocating blocs. */
369 /* Find the bloc referenced by the address in PTR. Returns a pointer
373 find_bloc (POINTER
*ptr
)
375 register bloc_ptr p
= first_bloc
;
377 while (p
!= NIL_BLOC
)
379 /* Consistency check. Don't return inconsistent blocs.
380 Don't abort here, as callers might be expecting this, but
381 callers that always expect a bloc to be returned should abort
382 if one isn't to avoid a memory corruption bug that is
383 difficult to track down. */
384 if (p
->variable
== ptr
&& p
->data
== *ptr
)
393 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
394 Returns a pointer to the new bloc, or zero if we couldn't allocate
395 memory for the new block. */
400 register bloc_ptr new_bloc
;
401 register heap_ptr heap
;
403 if (! (new_bloc
= (bloc_ptr
) malloc (BLOC_PTR_SIZE
))
404 || ! (new_bloc
->data
= obtain (break_value
, size
)))
411 break_value
= (char *) new_bloc
->data
+ size
;
413 new_bloc
->size
= size
;
414 new_bloc
->next
= NIL_BLOC
;
415 new_bloc
->variable
= (POINTER
*) NIL
;
416 new_bloc
->new_data
= 0;
418 /* Record in the heap that this space is in use. */
419 heap
= find_heap (new_bloc
->data
);
420 heap
->free
= break_value
;
422 /* Maintain the correspondence between heaps and blocs. */
423 new_bloc
->heap
= heap
;
424 heap
->last_bloc
= new_bloc
;
425 if (heap
->first_bloc
== NIL_BLOC
)
426 heap
->first_bloc
= new_bloc
;
428 /* Put this bloc on the doubly-linked list of blocs. */
431 new_bloc
->prev
= last_bloc
;
432 last_bloc
->next
= new_bloc
;
433 last_bloc
= new_bloc
;
437 first_bloc
= last_bloc
= new_bloc
;
438 new_bloc
->prev
= NIL_BLOC
;
444 /* Calculate new locations of blocs in the list beginning with BLOC,
445 relocating it to start at ADDRESS, in heap HEAP. If enough space is
446 not presently available in our reserve, call obtain for
449 Store the new location of each bloc in its new_data field.
450 Do not touch the contents of blocs or break_value. */
453 relocate_blocs (bloc_ptr bloc
, heap_ptr heap
, POINTER address
)
455 register bloc_ptr b
= bloc
;
457 /* No need to ever call this if arena is frozen, bug somewhere! */
458 if (r_alloc_freeze_level
)
463 /* If bloc B won't fit within HEAP,
464 move to the next heap and try again. */
465 while (heap
&& (char *) address
+ b
->size
> (char *) heap
->end
)
468 if (heap
== NIL_HEAP
)
470 address
= heap
->bloc_start
;
473 /* If BLOC won't fit in any heap,
474 get enough new space to hold BLOC and all following blocs. */
475 if (heap
== NIL_HEAP
)
477 register bloc_ptr tb
= b
;
480 /* Add up the size of all the following blocs. */
481 while (tb
!= NIL_BLOC
)
489 /* Get that space. */
490 address
= obtain (address
, s
);
497 /* Record the new address of this bloc
498 and update where the next bloc can start. */
499 b
->new_data
= address
;
501 address
= (char *) address
+ b
->size
;
508 /* Update the records of which heaps contain which blocs, starting
509 with heap HEAP and bloc BLOC. */
512 update_heap_bloc_correspondence (bloc_ptr bloc
, heap_ptr heap
)
516 /* Initialize HEAP's status to reflect blocs before BLOC. */
517 if (bloc
!= NIL_BLOC
&& bloc
->prev
!= NIL_BLOC
&& bloc
->prev
->heap
== heap
)
519 /* The previous bloc is in HEAP. */
520 heap
->last_bloc
= bloc
->prev
;
521 heap
->free
= (char *) bloc
->prev
->data
+ bloc
->prev
->size
;
525 /* HEAP contains no blocs before BLOC. */
526 heap
->first_bloc
= NIL_BLOC
;
527 heap
->last_bloc
= NIL_BLOC
;
528 heap
->free
= heap
->bloc_start
;
531 /* Advance through blocs one by one. */
532 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
534 /* Advance through heaps, marking them empty,
535 till we get to the one that B is in. */
538 if (heap
->bloc_start
<= b
->data
&& b
->data
<= heap
->end
)
541 /* We know HEAP is not null now,
542 because there has to be space for bloc B. */
543 heap
->first_bloc
= NIL_BLOC
;
544 heap
->last_bloc
= NIL_BLOC
;
545 heap
->free
= heap
->bloc_start
;
548 /* Update HEAP's status for bloc B. */
549 heap
->free
= (char *) b
->data
+ b
->size
;
551 if (heap
->first_bloc
== NIL_BLOC
)
552 heap
->first_bloc
= b
;
554 /* Record that B is in HEAP. */
558 /* If there are any remaining heaps and no blocs left,
559 mark those heaps as empty. */
563 heap
->first_bloc
= NIL_BLOC
;
564 heap
->last_bloc
= NIL_BLOC
;
565 heap
->free
= heap
->bloc_start
;
570 /* Resize BLOC to SIZE bytes. This relocates the blocs
571 that come after BLOC in memory. */
574 resize_bloc (bloc_ptr bloc
, SIZE size
)
581 /* No need to ever call this if arena is frozen, bug somewhere! */
582 if (r_alloc_freeze_level
)
585 if (bloc
== NIL_BLOC
|| size
== bloc
->size
)
588 for (heap
= first_heap
; heap
!= NIL_HEAP
; heap
= heap
->next
)
590 if (heap
->bloc_start
<= bloc
->data
&& bloc
->data
<= heap
->end
)
594 if (heap
== NIL_HEAP
)
597 old_size
= bloc
->size
;
600 /* Note that bloc could be moved into the previous heap. */
601 address
= (bloc
->prev
? (char *) bloc
->prev
->data
+ bloc
->prev
->size
602 : (char *) first_heap
->bloc_start
);
605 if (heap
->bloc_start
<= address
&& address
<= heap
->end
)
610 if (! relocate_blocs (bloc
, heap
, address
))
612 bloc
->size
= old_size
;
618 for (b
= last_bloc
; b
!= bloc
; b
= b
->prev
)
623 b
->data
= b
->new_data
;
627 if (b
->new_data
!= b
->data
)
628 memmove (b
->new_data
, b
->data
, b
->size
);
629 *b
->variable
= b
->data
= b
->new_data
;
635 bloc
->data
= bloc
->new_data
;
639 if (bloc
->new_data
!= bloc
->data
)
640 memmove (bloc
->new_data
, bloc
->data
, old_size
);
641 memset ((char *) bloc
->new_data
+ old_size
, 0, size
- old_size
);
642 *bloc
->variable
= bloc
->data
= bloc
->new_data
;
647 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
652 b
->data
= b
->new_data
;
656 if (b
->new_data
!= b
->data
)
657 memmove (b
->new_data
, b
->data
, b
->size
);
658 *b
->variable
= b
->data
= b
->new_data
;
663 update_heap_bloc_correspondence (bloc
, heap
);
665 break_value
= (last_bloc
? (char *) last_bloc
->data
+ last_bloc
->size
666 : (char *) first_heap
->bloc_start
);
670 /* Free BLOC from the chain of blocs, relocating any blocs above it.
671 This may return space to the system. */
674 free_bloc (bloc_ptr bloc
)
676 heap_ptr heap
= bloc
->heap
;
678 if (r_alloc_freeze_level
)
680 bloc
->variable
= (POINTER
*) NIL
;
684 resize_bloc (bloc
, 0);
686 if (bloc
== first_bloc
&& bloc
== last_bloc
)
688 first_bloc
= last_bloc
= NIL_BLOC
;
690 else if (bloc
== last_bloc
)
692 last_bloc
= bloc
->prev
;
693 last_bloc
->next
= NIL_BLOC
;
695 else if (bloc
== first_bloc
)
697 first_bloc
= bloc
->next
;
698 first_bloc
->prev
= NIL_BLOC
;
702 bloc
->next
->prev
= bloc
->prev
;
703 bloc
->prev
->next
= bloc
->next
;
706 /* Update the records of which blocs are in HEAP. */
707 if (heap
->first_bloc
== bloc
)
709 if (bloc
->next
!= 0 && bloc
->next
->heap
== heap
)
710 heap
->first_bloc
= bloc
->next
;
712 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
714 if (heap
->last_bloc
== bloc
)
716 if (bloc
->prev
!= 0 && bloc
->prev
->heap
== heap
)
717 heap
->last_bloc
= bloc
->prev
;
719 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
726 /* Interface routines. */
728 /* Obtain SIZE bytes of storage from the free pool, or the system, as
729 necessary. If relocatable blocs are in use, this means relocating
730 them. This function gets plugged into the GNU malloc's __morecore
733 We provide hysteresis, never relocating by less than extra_bytes.
735 If we're out of memory, we should return zero, to imitate the other
736 __morecore hook values - in particular, __default_morecore in the
737 GNU malloc package. */
740 r_alloc_sbrk (long int size
)
745 if (! r_alloc_initialized
)
748 if (! use_relocatable_buffers
)
749 return (*real_morecore
) (size
);
752 return virtual_break_value
;
756 /* Allocate a page-aligned space. GNU malloc would reclaim an
757 extra space if we passed an unaligned one. But we could
758 not always find a space which is contiguous to the previous. */
759 POINTER new_bloc_start
;
760 heap_ptr h
= first_heap
;
761 SIZE get
= ROUNDUP (size
);
763 address
= (POINTER
) ROUNDUP (virtual_break_value
);
765 /* Search the list upward for a heap which is large enough. */
766 while ((char *) h
->end
< (char *) MEM_ROUNDUP ((char *)address
+ get
))
771 address
= (POINTER
) ROUNDUP (h
->start
);
774 /* If not found, obtain more space. */
777 get
+= extra_bytes
+ page_size
;
779 if (! obtain (address
, get
))
782 if (first_heap
== last_heap
)
783 address
= (POINTER
) ROUNDUP (virtual_break_value
);
785 address
= (POINTER
) ROUNDUP (last_heap
->start
);
789 new_bloc_start
= (POINTER
) MEM_ROUNDUP ((char *)address
+ get
);
791 if (first_heap
->bloc_start
< new_bloc_start
)
793 /* This is no clean solution - no idea how to do it better. */
794 if (r_alloc_freeze_level
)
797 /* There is a bug here: if the above obtain call succeeded, but the
798 relocate_blocs call below does not succeed, we need to free
799 the memory that we got with obtain. */
801 /* Move all blocs upward. */
802 if (! relocate_blocs (first_bloc
, h
, new_bloc_start
))
805 /* Note that (POINTER)(h+1) <= new_bloc_start since
806 get >= page_size, so the following does not destroy the heap
808 for (b
= last_bloc
; b
!= NIL_BLOC
; b
= b
->prev
)
810 if (b
->new_data
!= b
->data
)
811 memmove (b
->new_data
, b
->data
, b
->size
);
812 *b
->variable
= b
->data
= b
->new_data
;
815 h
->bloc_start
= new_bloc_start
;
817 update_heap_bloc_correspondence (first_bloc
, h
);
821 /* Give up managing heaps below the one the new
822 virtual_break_value points to. */
823 first_heap
->prev
= NIL_HEAP
;
824 first_heap
->next
= h
->next
;
825 first_heap
->start
= h
->start
;
826 first_heap
->end
= h
->end
;
827 first_heap
->free
= h
->free
;
828 first_heap
->first_bloc
= h
->first_bloc
;
829 first_heap
->last_bloc
= h
->last_bloc
;
830 first_heap
->bloc_start
= h
->bloc_start
;
832 if (first_heap
->next
)
833 first_heap
->next
->prev
= first_heap
;
835 last_heap
= first_heap
;
838 memset (address
, 0, size
);
842 SIZE excess
= (char *)first_heap
->bloc_start
843 - ((char *)virtual_break_value
+ size
);
845 address
= virtual_break_value
;
847 if (r_alloc_freeze_level
== 0 && excess
> 2 * extra_bytes
)
849 excess
-= extra_bytes
;
850 first_heap
->bloc_start
851 = (POINTER
) MEM_ROUNDUP ((char *)first_heap
->bloc_start
- excess
);
853 relocate_blocs (first_bloc
, first_heap
, first_heap
->bloc_start
);
855 for (b
= first_bloc
; b
!= NIL_BLOC
; b
= b
->next
)
857 if (b
->new_data
!= b
->data
)
858 memmove (b
->new_data
, b
->data
, b
->size
);
859 *b
->variable
= b
->data
= b
->new_data
;
863 if ((char *)virtual_break_value
+ size
< (char *)first_heap
->start
)
865 /* We found an additional space below the first heap */
866 first_heap
->start
= (POINTER
) ((char *)virtual_break_value
+ size
);
870 virtual_break_value
= (POINTER
) ((char *)address
+ size
);
871 break_value
= (last_bloc
872 ? (char *) last_bloc
->data
+ last_bloc
->size
873 : (char *) first_heap
->bloc_start
);
881 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
882 the data is returned in *PTR. PTR is thus the address of some variable
883 which will use the data area.
885 The allocation of 0 bytes is valid.
886 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
887 done before allocating a new area. Not yet done.
889 If we can't allocate the necessary memory, set *PTR to zero, and
893 r_alloc (POINTER
*ptr
, SIZE size
)
895 register bloc_ptr new_bloc
;
897 if (! r_alloc_initialized
)
900 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
903 new_bloc
->variable
= ptr
;
904 *ptr
= new_bloc
->data
;
912 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
913 Store 0 in *PTR to show there's no block allocated. */
916 r_alloc_free (register POINTER
*ptr
)
918 register bloc_ptr dead_bloc
;
920 if (! r_alloc_initialized
)
923 dead_bloc
= find_bloc (ptr
);
924 if (dead_bloc
== NIL_BLOC
)
925 abort (); /* Double free? PTR not originally used to allocate? */
927 free_bloc (dead_bloc
);
931 refill_memory_reserve ();
935 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
936 Do this by shifting all blocks above this one up in memory, unless
937 SIZE is less than or equal to the current bloc size, in which case
940 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
941 memory copied to it. Not very efficient. We could traverse the
942 bloc_list for a best fit of free blocs first.
944 Change *PTR to reflect the new bloc, and return this value.
946 If more memory cannot be allocated, then leave *PTR unchanged, and
950 r_re_alloc (POINTER
*ptr
, SIZE size
)
952 register bloc_ptr bloc
;
954 if (! r_alloc_initialized
)
958 return r_alloc (ptr
, size
);
962 return r_alloc (ptr
, 0);
965 bloc
= find_bloc (ptr
);
966 if (bloc
== NIL_BLOC
)
967 abort (); /* Already freed? PTR not originally used to allocate? */
969 if (size
< bloc
->size
)
971 /* Wouldn't it be useful to actually resize the bloc here? */
972 /* I think so too, but not if it's too expensive... */
973 if ((bloc
->size
- MEM_ROUNDUP (size
) >= page_size
)
974 && r_alloc_freeze_level
== 0)
976 resize_bloc (bloc
, MEM_ROUNDUP (size
));
977 /* Never mind if this fails, just do nothing... */
978 /* It *should* be infallible! */
981 else if (size
> bloc
->size
)
983 if (r_alloc_freeze_level
)
986 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
989 new_bloc
->variable
= ptr
;
990 *ptr
= new_bloc
->data
;
991 bloc
->variable
= (POINTER
*) NIL
;
998 if (! resize_bloc (bloc
, MEM_ROUNDUP (size
)))
1006 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1008 /* Reinitialize the morecore hook variables after restarting a dumped
1009 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1011 r_alloc_reinit (void)
1013 /* Only do this if the hook has been reset, so that we don't get an
1014 infinite loop, in case Emacs was linked statically. */
1015 if (__morecore
!= r_alloc_sbrk
)
1017 real_morecore
= __morecore
;
1018 __morecore
= r_alloc_sbrk
;
1022 #endif /* emacs && DOUG_LEA_MALLOC */
1029 r_alloc_check (void)
1035 if (!r_alloc_initialized
)
1038 assert (first_heap
);
1039 assert (last_heap
->end
<= (POINTER
) sbrk (0));
1040 assert ((POINTER
) first_heap
< first_heap
->start
);
1041 assert (first_heap
->start
<= virtual_break_value
);
1042 assert (virtual_break_value
<= first_heap
->end
);
1044 for (h
= first_heap
; h
; h
= h
->next
)
1046 assert (h
->prev
== ph
);
1047 assert ((POINTER
) ROUNDUP (h
->end
) == h
->end
);
1048 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1049 the heap start has any sort of alignment.
1050 Perhaps it should. */
1051 assert ((POINTER
) MEM_ROUNDUP (h
->start
) == h
->start
);
1053 assert ((POINTER
) MEM_ROUNDUP (h
->bloc_start
) == h
->bloc_start
);
1054 assert (h
->start
<= h
->bloc_start
&& h
->bloc_start
<= h
->end
);
1058 assert (ph
->end
< h
->start
);
1059 assert (h
->start
<= (POINTER
)h
&& (POINTER
)(h
+1) <= h
->bloc_start
);
1062 if (h
->bloc_start
<= break_value
&& break_value
<= h
->end
)
1069 assert (last_heap
== ph
);
1071 for (b
= first_bloc
; b
; b
= b
->next
)
1073 assert (b
->prev
== pb
);
1074 assert ((POINTER
) MEM_ROUNDUP (b
->data
) == b
->data
);
1075 assert ((SIZE
) MEM_ROUNDUP (b
->size
) == b
->size
);
1078 for (h
= first_heap
; h
; h
= h
->next
)
1080 if (h
->bloc_start
<= b
->data
&& b
->data
+ b
->size
<= h
->end
)
1087 if (pb
&& pb
->data
+ pb
->size
!= b
->data
)
1089 assert (ph
&& b
->data
== h
->bloc_start
);
1092 if (ph
->bloc_start
<= pb
->data
1093 && pb
->data
+ pb
->size
<= ph
->end
)
1095 assert (pb
->data
+ pb
->size
+ b
->size
> ph
->end
);
1100 assert (ph
->bloc_start
+ b
->size
> ph
->end
);
1108 assert (last_bloc
== pb
);
1111 assert (last_bloc
->data
+ last_bloc
->size
== break_value
);
1113 assert (first_heap
->bloc_start
== break_value
);
1118 /* Update the internal record of which variable points to some data to NEW.
1119 Used by buffer-swap-text in Emacs to restore consistency after it
1120 swaps the buffer text between two buffer objects. The OLD pointer
1121 is checked to ensure that memory corruption does not occur due to
1124 r_alloc_reset_variable (POINTER
*old
, POINTER
*new)
1126 bloc_ptr bloc
= first_bloc
;
1128 /* Find the bloc that corresponds to the data pointed to by pointer.
1129 find_bloc cannot be used, as it has internal consistency checks
1130 which fail when the variable needs resetting. */
1131 while (bloc
!= NIL_BLOC
)
1133 if (bloc
->data
== *new)
1139 if (bloc
== NIL_BLOC
|| bloc
->variable
!= old
)
1140 abort (); /* Already freed? OLD not originally used to allocate? */
1142 /* Update variable to point to the new location. */
1143 bloc
->variable
= new;
1147 /***********************************************************************
1149 ***********************************************************************/
1151 /* Initialize various things for memory allocation. */
1156 if (r_alloc_initialized
)
1158 r_alloc_initialized
= 1;
1161 #ifndef SYSTEM_MALLOC
1162 real_morecore
= __morecore
;
1163 __morecore
= r_alloc_sbrk
;
1165 first_heap
= last_heap
= &heap_base
;
1166 first_heap
->next
= first_heap
->prev
= NIL_HEAP
;
1167 first_heap
->start
= first_heap
->bloc_start
1168 = virtual_break_value
= break_value
= (*real_morecore
) (0);
1169 if (break_value
== NIL
)
1172 extra_bytes
= ROUNDUP (50000);
1175 #ifdef DOUG_LEA_MALLOC
1177 mallopt (M_TOP_PAD
, 64 * 4096);
1180 #ifndef SYSTEM_MALLOC
1181 /* Give GNU malloc's morecore some hysteresis
1182 so that we move all the relocatable blocks much less often. */
1183 __malloc_extra_blocks
= 64;
1187 #ifndef SYSTEM_MALLOC
1188 first_heap
->end
= (POINTER
) ROUNDUP (first_heap
->start
);
1190 /* The extra call to real_morecore guarantees that the end of the
1191 address space is a multiple of page_size, even if page_size is
1192 not really the page size of the system running the binary in
1193 which page_size is stored. This allows a binary to be built on a
1194 system with one page size and run on a system with a smaller page
1196 (*real_morecore
) ((char *) first_heap
->end
- (char *) first_heap
->start
);
1198 /* Clear the rest of the last page; this memory is in our address space
1199 even though it is after the sbrk value. */
1200 /* Doubly true, with the additional call that explicitly adds the
1201 rest of that page to the address space. */
1202 memset (first_heap
->start
, 0,
1203 (char *) first_heap
->end
- (char *) first_heap
->start
);
1204 virtual_break_value
= break_value
= first_heap
->bloc_start
= first_heap
->end
;
1207 use_relocatable_buffers
= 1;