1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000-2012 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 3 of the License, or
9 (at your option) any later version.
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
21 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
22 rather than all of them. This means allowing for a possible
23 hole between the first bloc and the end of malloc storage. */
29 #include "lisp.h" /* Needed for VALBITS. */
30 #include "blockinput.h"
34 #ifdef DOUG_LEA_MALLOC
36 extern int mallopt (int, int);
37 #else /* not DOUG_LEA_MALLOC */
39 extern size_t __malloc_extra_blocks
;
40 #endif /* SYSTEM_MALLOC */
41 #endif /* not DOUG_LEA_MALLOC */
50 #endif /* not emacs */
53 #include "getpagesize.h"
56 typedef void *POINTER
;
57 #define NIL ((POINTER) 0)
59 /* A flag to indicate whether we have initialized ralloc yet. For
60 Emacs's sake, please do not make this local to malloc_init; on some
61 machines, the dumping procedure makes all static variables
62 read-only. On these machines, the word static is #defined to be
63 the empty string, meaning that r_alloc_initialized becomes an
64 automatic variable, and loses its value each time Emacs is started
67 static int r_alloc_initialized
= 0;
69 static void r_alloc_init (void);
72 /* Declarations for working with the malloc, ralloc, and system breaks. */
74 /* Function to set the real break value. */
75 POINTER (*real_morecore
) (ptrdiff_t);
77 /* The break value, as seen by malloc. */
78 static POINTER virtual_break_value
;
80 /* The address of the end of the last data in use by ralloc,
81 including relocatable blocs as well as malloc data. */
82 static POINTER break_value
;
84 /* This is the size of a page. We round memory requests to this boundary. */
87 /* Whenever we get memory from the system, get this many extra bytes. This
88 must be a multiple of page_size. */
89 static int extra_bytes
;
91 /* Macros for rounding. Note that rounding to any value is possible
92 by changing the definition of PAGE. */
93 #define PAGE (getpagesize ())
94 #define ROUNDUP(size) (((size_t) (size) + page_size - 1) \
95 & ~((size_t)(page_size - 1)))
97 #define MEM_ALIGN sizeof (double)
98 #define MEM_ROUNDUP(addr) (((size_t)(addr) + MEM_ALIGN - 1) \
101 /* The hook `malloc' uses for the function which gets more space
104 #ifndef SYSTEM_MALLOC
105 extern POINTER (*__morecore
) (ptrdiff_t);
110 /***********************************************************************
111 Implementation using sbrk
112 ***********************************************************************/
114 /* Data structures of heaps and blocs. */
116 /* The relocatable objects, or blocs, and the malloc data
117 both reside within one or more heaps.
118 Each heap contains malloc data, running from `start' to `bloc_start',
119 and relocatable objects, running from `bloc_start' to `free'.
121 Relocatable objects may relocate within the same heap
122 or may move into another heap; the heaps themselves may grow
125 We try to make just one heap and make it larger as necessary.
126 But sometimes we can't do that, because we can't get contiguous
127 space to add onto the heap. When that happens, we start a new heap. */
133 /* Start of memory range of this heap. */
135 /* End of memory range of this heap. */
137 /* Start of relocatable data in this heap. */
139 /* Start of unused space in this heap. */
141 /* First bloc in this heap. */
142 struct bp
*first_bloc
;
143 /* Last bloc in this heap. */
144 struct bp
*last_bloc
;
147 #define NIL_HEAP ((heap_ptr) 0)
149 /* This is the first heap object.
150 If we need additional heap objects, each one resides at the beginning of
151 the space it covers. */
152 static struct heap heap_base
;
154 /* Head and tail of the list of heaps. */
155 static heap_ptr first_heap
, last_heap
;
157 /* These structures are allocated in the malloc arena.
158 The linked list is kept in order of increasing '.data' members.
159 The data blocks abut each other; if b->next is non-nil, then
160 b->data + b->size == b->next->data.
162 An element with variable==NIL denotes a freed block, which has not yet
163 been collected. They may only appear while r_alloc_freeze_level > 0,
164 and will be freed when the arena is thawed. Currently, these blocs are
165 not reusable, while the arena is frozen. Very inefficient. */
174 POINTER new_data
; /* temporarily used for relocation */
175 struct heap
*heap
; /* Heap this bloc is in. */
178 #define NIL_BLOC ((bloc_ptr) 0)
179 #define BLOC_PTR_SIZE (sizeof (struct bp))
181 /* Head and tail of the list of relocatable blocs. */
182 static bloc_ptr first_bloc
, last_bloc
;
184 static int use_relocatable_buffers
;
186 /* If >0, no relocation whatsoever takes place. */
187 static int r_alloc_freeze_level
;
190 /* Functions to get and return memory from the system. */
192 /* Find the heap that ADDRESS falls within. */
195 find_heap (POINTER address
)
199 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
201 if (heap
->start
<= address
&& address
<= heap
->end
)
208 /* Find SIZE bytes of space in a heap.
209 Try to get them at ADDRESS (which must fall within some heap's range)
210 if we can get that many within one heap.
212 If enough space is not presently available in our reserve, this means
213 getting more page-aligned space from the system. If the returned space
214 is not contiguous to the last heap, allocate a new heap, and append it
217 obtain does not try to keep track of whether space is in use or not
218 in use. It just returns the address of SIZE bytes that fall within a
219 single heap. If you call obtain twice in a row with the same arguments,
220 you typically get the same value. It's the caller's responsibility to
221 keep track of what space is in use.
223 Return the address of the space if all went well, or zero if we couldn't
224 allocate the memory. */
227 obtain (POINTER address
, SIZE size
)
230 SIZE already_available
;
232 /* Find the heap that ADDRESS falls within. */
233 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
235 if (heap
->start
<= address
&& address
<= heap
->end
)
242 /* If we can't fit SIZE bytes in that heap,
243 try successive later heaps. */
244 while (heap
&& (char *) address
+ size
> (char *) heap
->end
)
247 if (heap
== NIL_HEAP
)
249 address
= heap
->bloc_start
;
252 /* If we can't fit them within any existing heap,
254 if (heap
== NIL_HEAP
)
256 POINTER
new = (*real_morecore
)(0);
259 already_available
= (char *)last_heap
->end
- (char *)address
;
261 if (new != last_heap
->end
)
263 /* Someone else called sbrk. Make a new heap. */
265 heap_ptr new_heap
= (heap_ptr
) MEM_ROUNDUP (new);
266 POINTER bloc_start
= (POINTER
) MEM_ROUNDUP ((POINTER
)(new_heap
+ 1));
268 if ((*real_morecore
) ((char *) bloc_start
- (char *) new) != new)
271 new_heap
->start
= new;
272 new_heap
->end
= bloc_start
;
273 new_heap
->bloc_start
= bloc_start
;
274 new_heap
->free
= bloc_start
;
275 new_heap
->next
= NIL_HEAP
;
276 new_heap
->prev
= last_heap
;
277 new_heap
->first_bloc
= NIL_BLOC
;
278 new_heap
->last_bloc
= NIL_BLOC
;
279 last_heap
->next
= new_heap
;
280 last_heap
= new_heap
;
282 address
= bloc_start
;
283 already_available
= 0;
286 /* Add space to the last heap (which we may have just created).
287 Get some extra, so we can come here less often. */
289 get
= size
+ extra_bytes
- already_available
;
290 get
= (char *) ROUNDUP ((char *)last_heap
->end
+ get
)
291 - (char *) last_heap
->end
;
293 if ((*real_morecore
) (get
) != last_heap
->end
)
296 last_heap
->end
= (char *) last_heap
->end
+ get
;
302 /* Return unused heap space to the system
303 if there is a lot of unused space now.
304 This can make the last heap smaller;
305 it can also eliminate the last heap entirely. */
311 ptrdiff_t excess
= 0;
313 /* Add the amount of space beyond break_value
314 in all heaps which have extend beyond break_value at all. */
316 for (h
= last_heap
; h
&& break_value
< h
->end
; h
= h
->prev
)
318 excess
+= (char *) h
->end
- (char *) ((break_value
< h
->bloc_start
)
319 ? h
->bloc_start
: break_value
);
322 if (excess
> extra_bytes
* 2 && (*real_morecore
) (0) == last_heap
->end
)
324 /* Keep extra_bytes worth of empty space.
325 And don't free anything unless we can free at least extra_bytes. */
326 excess
-= extra_bytes
;
328 if ((char *)last_heap
->end
- (char *)last_heap
->bloc_start
<= excess
)
330 /* This heap should have no blocs in it. If it does, we
331 cannot return it to the system. */
332 if (last_heap
->first_bloc
!= NIL_BLOC
333 || last_heap
->last_bloc
!= NIL_BLOC
)
336 /* Return the last heap, with its header, to the system. */
337 excess
= (char *)last_heap
->end
- (char *)last_heap
->start
;
338 last_heap
= last_heap
->prev
;
339 last_heap
->next
= NIL_HEAP
;
343 excess
= (char *) last_heap
->end
344 - (char *) ROUNDUP ((char *)last_heap
->end
- excess
);
345 last_heap
->end
= (char *) last_heap
->end
- excess
;
348 if ((*real_morecore
) (- excess
) == 0)
350 /* If the system didn't want that much memory back, adjust
351 the end of the last heap to reflect that. This can occur
352 if break_value is still within the original data segment. */
353 last_heap
->end
= (char *) last_heap
->end
+ excess
;
354 /* Make sure that the result of the adjustment is accurate.
355 It should be, for the else clause above; the other case,
356 which returns the entire last heap to the system, seems
357 unlikely to trigger this mode of failure. */
358 if (last_heap
->end
!= (*real_morecore
) (0))
364 /* The meat - allocating, freeing, and relocating blocs. */
366 /* Find the bloc referenced by the address in PTR. Returns a pointer
370 find_bloc (POINTER
*ptr
)
372 register bloc_ptr p
= first_bloc
;
374 while (p
!= NIL_BLOC
)
376 /* Consistency check. Don't return inconsistent blocs.
377 Don't abort here, as callers might be expecting this, but
378 callers that always expect a bloc to be returned should abort
379 if one isn't to avoid a memory corruption bug that is
380 difficult to track down. */
381 if (p
->variable
== ptr
&& p
->data
== *ptr
)
390 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
391 Returns a pointer to the new bloc, or zero if we couldn't allocate
392 memory for the new block. */
397 register bloc_ptr new_bloc
;
398 register heap_ptr heap
;
400 if (! (new_bloc
= malloc (BLOC_PTR_SIZE
))
401 || ! (new_bloc
->data
= obtain (break_value
, size
)))
408 break_value
= (char *) new_bloc
->data
+ size
;
410 new_bloc
->size
= size
;
411 new_bloc
->next
= NIL_BLOC
;
412 new_bloc
->variable
= (POINTER
*) NIL
;
413 new_bloc
->new_data
= 0;
415 /* Record in the heap that this space is in use. */
416 heap
= find_heap (new_bloc
->data
);
417 heap
->free
= break_value
;
419 /* Maintain the correspondence between heaps and blocs. */
420 new_bloc
->heap
= heap
;
421 heap
->last_bloc
= new_bloc
;
422 if (heap
->first_bloc
== NIL_BLOC
)
423 heap
->first_bloc
= new_bloc
;
425 /* Put this bloc on the doubly-linked list of blocs. */
428 new_bloc
->prev
= last_bloc
;
429 last_bloc
->next
= new_bloc
;
430 last_bloc
= new_bloc
;
434 first_bloc
= last_bloc
= new_bloc
;
435 new_bloc
->prev
= NIL_BLOC
;
441 /* Calculate new locations of blocs in the list beginning with BLOC,
442 relocating it to start at ADDRESS, in heap HEAP. If enough space is
443 not presently available in our reserve, call obtain for
446 Store the new location of each bloc in its new_data field.
447 Do not touch the contents of blocs or break_value. */
450 relocate_blocs (bloc_ptr bloc
, heap_ptr heap
, POINTER address
)
452 register bloc_ptr b
= bloc
;
454 /* No need to ever call this if arena is frozen, bug somewhere! */
455 if (r_alloc_freeze_level
)
460 /* If bloc B won't fit within HEAP,
461 move to the next heap and try again. */
462 while (heap
&& (char *) address
+ b
->size
> (char *) heap
->end
)
465 if (heap
== NIL_HEAP
)
467 address
= heap
->bloc_start
;
470 /* If BLOC won't fit in any heap,
471 get enough new space to hold BLOC and all following blocs. */
472 if (heap
== NIL_HEAP
)
474 register bloc_ptr tb
= b
;
477 /* Add up the size of all the following blocs. */
478 while (tb
!= NIL_BLOC
)
486 /* Get that space. */
487 address
= obtain (address
, s
);
494 /* Record the new address of this bloc
495 and update where the next bloc can start. */
496 b
->new_data
= address
;
498 address
= (char *) address
+ b
->size
;
505 /* Update the records of which heaps contain which blocs, starting
506 with heap HEAP and bloc BLOC. */
509 update_heap_bloc_correspondence (bloc_ptr bloc
, heap_ptr heap
)
513 /* Initialize HEAP's status to reflect blocs before BLOC. */
514 if (bloc
!= NIL_BLOC
&& bloc
->prev
!= NIL_BLOC
&& bloc
->prev
->heap
== heap
)
516 /* The previous bloc is in HEAP. */
517 heap
->last_bloc
= bloc
->prev
;
518 heap
->free
= (char *) bloc
->prev
->data
+ bloc
->prev
->size
;
522 /* HEAP contains no blocs before BLOC. */
523 heap
->first_bloc
= NIL_BLOC
;
524 heap
->last_bloc
= NIL_BLOC
;
525 heap
->free
= heap
->bloc_start
;
528 /* Advance through blocs one by one. */
529 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
531 /* Advance through heaps, marking them empty,
532 till we get to the one that B is in. */
535 if (heap
->bloc_start
<= b
->data
&& b
->data
<= heap
->end
)
538 /* We know HEAP is not null now,
539 because there has to be space for bloc B. */
540 heap
->first_bloc
= NIL_BLOC
;
541 heap
->last_bloc
= NIL_BLOC
;
542 heap
->free
= heap
->bloc_start
;
545 /* Update HEAP's status for bloc B. */
546 heap
->free
= (char *) b
->data
+ b
->size
;
548 if (heap
->first_bloc
== NIL_BLOC
)
549 heap
->first_bloc
= b
;
551 /* Record that B is in HEAP. */
555 /* If there are any remaining heaps and no blocs left,
556 mark those heaps as empty. */
560 heap
->first_bloc
= NIL_BLOC
;
561 heap
->last_bloc
= NIL_BLOC
;
562 heap
->free
= heap
->bloc_start
;
567 /* Resize BLOC to SIZE bytes. This relocates the blocs
568 that come after BLOC in memory. */
571 resize_bloc (bloc_ptr bloc
, SIZE size
)
578 /* No need to ever call this if arena is frozen, bug somewhere! */
579 if (r_alloc_freeze_level
)
582 if (bloc
== NIL_BLOC
|| size
== bloc
->size
)
585 for (heap
= first_heap
; heap
!= NIL_HEAP
; heap
= heap
->next
)
587 if (heap
->bloc_start
<= bloc
->data
&& bloc
->data
<= heap
->end
)
591 if (heap
== NIL_HEAP
)
594 old_size
= bloc
->size
;
597 /* Note that bloc could be moved into the previous heap. */
598 address
= (bloc
->prev
? (char *) bloc
->prev
->data
+ bloc
->prev
->size
599 : (char *) first_heap
->bloc_start
);
602 if (heap
->bloc_start
<= address
&& address
<= heap
->end
)
607 if (! relocate_blocs (bloc
, heap
, address
))
609 bloc
->size
= old_size
;
615 for (b
= last_bloc
; b
!= bloc
; b
= b
->prev
)
620 b
->data
= b
->new_data
;
624 if (b
->new_data
!= b
->data
)
625 memmove (b
->new_data
, b
->data
, b
->size
);
626 *b
->variable
= b
->data
= b
->new_data
;
632 bloc
->data
= bloc
->new_data
;
636 if (bloc
->new_data
!= bloc
->data
)
637 memmove (bloc
->new_data
, bloc
->data
, old_size
);
638 memset ((char *) bloc
->new_data
+ old_size
, 0, size
- old_size
);
639 *bloc
->variable
= bloc
->data
= bloc
->new_data
;
644 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
649 b
->data
= b
->new_data
;
653 if (b
->new_data
!= b
->data
)
654 memmove (b
->new_data
, b
->data
, b
->size
);
655 *b
->variable
= b
->data
= b
->new_data
;
660 update_heap_bloc_correspondence (bloc
, heap
);
662 break_value
= (last_bloc
? (char *) last_bloc
->data
+ last_bloc
->size
663 : (char *) first_heap
->bloc_start
);
667 /* Free BLOC from the chain of blocs, relocating any blocs above it.
668 This may return space to the system. */
671 free_bloc (bloc_ptr bloc
)
673 heap_ptr heap
= bloc
->heap
;
676 if (r_alloc_freeze_level
)
678 bloc
->variable
= (POINTER
*) NIL
;
682 resize_bloc (bloc
, 0);
684 if (bloc
== first_bloc
&& bloc
== last_bloc
)
686 first_bloc
= last_bloc
= NIL_BLOC
;
688 else if (bloc
== last_bloc
)
690 last_bloc
= bloc
->prev
;
691 last_bloc
->next
= NIL_BLOC
;
693 else if (bloc
== first_bloc
)
695 first_bloc
= bloc
->next
;
696 first_bloc
->prev
= NIL_BLOC
;
700 bloc
->next
->prev
= bloc
->prev
;
701 bloc
->prev
->next
= bloc
->next
;
704 /* Sometimes, 'heap' obtained from bloc->heap above is not really a
705 'heap' structure. It can even be beyond the current break point,
706 which will cause crashes when we dereference it below (see
707 bug#12242). Evidently, the reason is bloc allocations done while
708 use_relocatable_buffers was non-positive, because additional
709 memory we get then is not recorded in the heaps we manage. If
710 bloc->heap records such a "heap", we cannot (and don't need to)
711 update its records. So we validate the 'heap' value by making
712 sure it is one of the heaps we manage via the heaps linked list,
713 and don't touch a 'heap' that isn't found there. This avoids
714 accessing memory we know nothing about. */
715 for (h
= first_heap
; h
!= NIL_HEAP
; h
= h
->next
)
721 /* Update the records of which blocs are in HEAP. */
722 if (heap
->first_bloc
== bloc
)
724 if (bloc
->next
!= 0 && bloc
->next
->heap
== heap
)
725 heap
->first_bloc
= bloc
->next
;
727 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
729 if (heap
->last_bloc
== bloc
)
731 if (bloc
->prev
!= 0 && bloc
->prev
->heap
== heap
)
732 heap
->last_bloc
= bloc
->prev
;
734 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
742 /* Interface routines. */
744 /* Obtain SIZE bytes of storage from the free pool, or the system, as
745 necessary. If relocatable blocs are in use, this means relocating
746 them. This function gets plugged into the GNU malloc's __morecore
749 We provide hysteresis, never relocating by less than extra_bytes.
751 If we're out of memory, we should return zero, to imitate the other
752 __morecore hook values - in particular, __default_morecore in the
753 GNU malloc package. */
756 r_alloc_sbrk (ptrdiff_t size
)
761 if (! r_alloc_initialized
)
764 if (use_relocatable_buffers
<= 0)
765 return (*real_morecore
) (size
);
768 return virtual_break_value
;
772 /* Allocate a page-aligned space. GNU malloc would reclaim an
773 extra space if we passed an unaligned one. But we could
774 not always find a space which is contiguous to the previous. */
775 POINTER new_bloc_start
;
776 heap_ptr h
= first_heap
;
777 SIZE get
= ROUNDUP (size
);
779 address
= (POINTER
) ROUNDUP (virtual_break_value
);
781 /* Search the list upward for a heap which is large enough. */
782 while ((char *) h
->end
< (char *) MEM_ROUNDUP ((char *)address
+ get
))
787 address
= (POINTER
) ROUNDUP (h
->start
);
790 /* If not found, obtain more space. */
793 get
+= extra_bytes
+ page_size
;
795 if (! obtain (address
, get
))
798 if (first_heap
== last_heap
)
799 address
= (POINTER
) ROUNDUP (virtual_break_value
);
801 address
= (POINTER
) ROUNDUP (last_heap
->start
);
805 new_bloc_start
= (POINTER
) MEM_ROUNDUP ((char *)address
+ get
);
807 if (first_heap
->bloc_start
< new_bloc_start
)
809 /* This is no clean solution - no idea how to do it better. */
810 if (r_alloc_freeze_level
)
813 /* There is a bug here: if the above obtain call succeeded, but the
814 relocate_blocs call below does not succeed, we need to free
815 the memory that we got with obtain. */
817 /* Move all blocs upward. */
818 if (! relocate_blocs (first_bloc
, h
, new_bloc_start
))
821 /* Note that (POINTER)(h+1) <= new_bloc_start since
822 get >= page_size, so the following does not destroy the heap
824 for (b
= last_bloc
; b
!= NIL_BLOC
; b
= b
->prev
)
826 if (b
->new_data
!= b
->data
)
827 memmove (b
->new_data
, b
->data
, b
->size
);
828 *b
->variable
= b
->data
= b
->new_data
;
831 h
->bloc_start
= new_bloc_start
;
833 update_heap_bloc_correspondence (first_bloc
, h
);
837 /* Give up managing heaps below the one the new
838 virtual_break_value points to. */
839 first_heap
->prev
= NIL_HEAP
;
840 first_heap
->next
= h
->next
;
841 first_heap
->start
= h
->start
;
842 first_heap
->end
= h
->end
;
843 first_heap
->free
= h
->free
;
844 first_heap
->first_bloc
= h
->first_bloc
;
845 first_heap
->last_bloc
= h
->last_bloc
;
846 first_heap
->bloc_start
= h
->bloc_start
;
848 if (first_heap
->next
)
849 first_heap
->next
->prev
= first_heap
;
851 last_heap
= first_heap
;
854 memset (address
, 0, size
);
858 SIZE excess
= (char *)first_heap
->bloc_start
859 - ((char *)virtual_break_value
+ size
);
861 address
= virtual_break_value
;
863 if (r_alloc_freeze_level
== 0 && excess
> 2 * extra_bytes
)
865 excess
-= extra_bytes
;
866 first_heap
->bloc_start
867 = (POINTER
) MEM_ROUNDUP ((char *)first_heap
->bloc_start
- excess
);
869 relocate_blocs (first_bloc
, first_heap
, first_heap
->bloc_start
);
871 for (b
= first_bloc
; b
!= NIL_BLOC
; b
= b
->next
)
873 if (b
->new_data
!= b
->data
)
874 memmove (b
->new_data
, b
->data
, b
->size
);
875 *b
->variable
= b
->data
= b
->new_data
;
879 if ((char *)virtual_break_value
+ size
< (char *)first_heap
->start
)
881 /* We found an additional space below the first heap */
882 first_heap
->start
= (POINTER
) ((char *)virtual_break_value
+ size
);
886 virtual_break_value
= (POINTER
) ((char *)address
+ size
);
887 break_value
= (last_bloc
888 ? (char *) last_bloc
->data
+ last_bloc
->size
889 : (char *) first_heap
->bloc_start
);
897 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
898 the data is returned in *PTR. PTR is thus the address of some variable
899 which will use the data area.
901 The allocation of 0 bytes is valid.
902 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
903 done before allocating a new area. Not yet done.
905 If we can't allocate the necessary memory, set *PTR to zero, and
909 r_alloc (POINTER
*ptr
, SIZE size
)
911 register bloc_ptr new_bloc
;
913 if (! r_alloc_initialized
)
916 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
919 new_bloc
->variable
= ptr
;
920 *ptr
= new_bloc
->data
;
928 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
929 Store 0 in *PTR to show there's no block allocated. */
932 r_alloc_free (register POINTER
*ptr
)
934 register bloc_ptr dead_bloc
;
936 if (! r_alloc_initialized
)
939 dead_bloc
= find_bloc (ptr
);
940 if (dead_bloc
== NIL_BLOC
)
941 emacs_abort (); /* Double free? PTR not originally used to allocate? */
943 free_bloc (dead_bloc
);
947 refill_memory_reserve ();
951 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
952 Do this by shifting all blocks above this one up in memory, unless
953 SIZE is less than or equal to the current bloc size, in which case
956 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
957 memory copied to it. Not very efficient. We could traverse the
958 bloc_list for a best fit of free blocs first.
960 Change *PTR to reflect the new bloc, and return this value.
962 If more memory cannot be allocated, then leave *PTR unchanged, and
966 r_re_alloc (POINTER
*ptr
, SIZE size
)
968 register bloc_ptr bloc
;
970 if (! r_alloc_initialized
)
974 return r_alloc (ptr
, size
);
978 return r_alloc (ptr
, 0);
981 bloc
= find_bloc (ptr
);
982 if (bloc
== NIL_BLOC
)
983 emacs_abort (); /* Already freed? PTR not originally used to allocate? */
985 if (size
< bloc
->size
)
987 /* Wouldn't it be useful to actually resize the bloc here? */
988 /* I think so too, but not if it's too expensive... */
989 if ((bloc
->size
- MEM_ROUNDUP (size
) >= page_size
)
990 && r_alloc_freeze_level
== 0)
992 resize_bloc (bloc
, MEM_ROUNDUP (size
));
993 /* Never mind if this fails, just do nothing... */
994 /* It *should* be infallible! */
997 else if (size
> bloc
->size
)
999 if (r_alloc_freeze_level
)
1002 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
1005 new_bloc
->variable
= ptr
;
1006 *ptr
= new_bloc
->data
;
1007 bloc
->variable
= (POINTER
*) NIL
;
1014 if (! resize_bloc (bloc
, MEM_ROUNDUP (size
)))
1022 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1024 /* Reinitialize the morecore hook variables after restarting a dumped
1025 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1027 r_alloc_reinit (void)
1029 /* Only do this if the hook has been reset, so that we don't get an
1030 infinite loop, in case Emacs was linked statically. */
1031 if (__morecore
!= r_alloc_sbrk
)
1033 real_morecore
= __morecore
;
1034 __morecore
= r_alloc_sbrk
;
1038 #endif /* emacs && DOUG_LEA_MALLOC */
1045 r_alloc_check (void)
1051 if (!r_alloc_initialized
)
1054 assert (first_heap
);
1055 assert (last_heap
->end
<= (POINTER
) sbrk (0));
1056 assert ((POINTER
) first_heap
< first_heap
->start
);
1057 assert (first_heap
->start
<= virtual_break_value
);
1058 assert (virtual_break_value
<= first_heap
->end
);
1060 for (h
= first_heap
; h
; h
= h
->next
)
1062 assert (h
->prev
== ph
);
1063 assert ((POINTER
) ROUNDUP (h
->end
) == h
->end
);
1064 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1065 the heap start has any sort of alignment.
1066 Perhaps it should. */
1067 assert ((POINTER
) MEM_ROUNDUP (h
->start
) == h
->start
);
1069 assert ((POINTER
) MEM_ROUNDUP (h
->bloc_start
) == h
->bloc_start
);
1070 assert (h
->start
<= h
->bloc_start
&& h
->bloc_start
<= h
->end
);
1074 assert (ph
->end
< h
->start
);
1075 assert (h
->start
<= (POINTER
)h
&& (POINTER
)(h
+1) <= h
->bloc_start
);
1078 if (h
->bloc_start
<= break_value
&& break_value
<= h
->end
)
1085 assert (last_heap
== ph
);
1087 for (b
= first_bloc
; b
; b
= b
->next
)
1089 assert (b
->prev
== pb
);
1090 assert ((POINTER
) MEM_ROUNDUP (b
->data
) == b
->data
);
1091 assert ((SIZE
) MEM_ROUNDUP (b
->size
) == b
->size
);
1094 for (h
= first_heap
; h
; h
= h
->next
)
1096 if (h
->bloc_start
<= b
->data
&& b
->data
+ b
->size
<= h
->end
)
1103 if (pb
&& pb
->data
+ pb
->size
!= b
->data
)
1105 assert (ph
&& b
->data
== h
->bloc_start
);
1108 if (ph
->bloc_start
<= pb
->data
1109 && pb
->data
+ pb
->size
<= ph
->end
)
1111 assert (pb
->data
+ pb
->size
+ b
->size
> ph
->end
);
1116 assert (ph
->bloc_start
+ b
->size
> ph
->end
);
1124 assert (last_bloc
== pb
);
1127 assert (last_bloc
->data
+ last_bloc
->size
== break_value
);
1129 assert (first_heap
->bloc_start
== break_value
);
1134 /* Update the internal record of which variable points to some data to NEW.
1135 Used by buffer-swap-text in Emacs to restore consistency after it
1136 swaps the buffer text between two buffer objects. The OLD pointer
1137 is checked to ensure that memory corruption does not occur due to
1140 r_alloc_reset_variable (POINTER
*old
, POINTER
*new)
1142 bloc_ptr bloc
= first_bloc
;
1144 /* Find the bloc that corresponds to the data pointed to by pointer.
1145 find_bloc cannot be used, as it has internal consistency checks
1146 which fail when the variable needs resetting. */
1147 while (bloc
!= NIL_BLOC
)
1149 if (bloc
->data
== *new)
1155 if (bloc
== NIL_BLOC
|| bloc
->variable
!= old
)
1156 emacs_abort (); /* Already freed? OLD not originally used to allocate? */
1158 /* Update variable to point to the new location. */
1159 bloc
->variable
= new;
1163 r_alloc_inhibit_buffer_relocation (int inhibit
)
1165 if (use_relocatable_buffers
> 1)
1166 use_relocatable_buffers
= 1;
1168 use_relocatable_buffers
--;
1169 else if (use_relocatable_buffers
< 1)
1170 use_relocatable_buffers
++;
1174 /***********************************************************************
1176 ***********************************************************************/
1178 /* Initialize various things for memory allocation. */
1183 if (r_alloc_initialized
)
1185 r_alloc_initialized
= 1;
1188 #ifndef SYSTEM_MALLOC
1189 real_morecore
= __morecore
;
1190 __morecore
= r_alloc_sbrk
;
1192 first_heap
= last_heap
= &heap_base
;
1193 first_heap
->next
= first_heap
->prev
= NIL_HEAP
;
1194 first_heap
->start
= first_heap
->bloc_start
1195 = virtual_break_value
= break_value
= (*real_morecore
) (0);
1196 if (break_value
== NIL
)
1199 extra_bytes
= ROUNDUP (50000);
1202 #ifdef DOUG_LEA_MALLOC
1204 mallopt (M_TOP_PAD
, 64 * 4096);
1207 #ifndef SYSTEM_MALLOC
1208 /* Give GNU malloc's morecore some hysteresis so that we move all
1209 the relocatable blocks much less often. The number used to be
1210 64, but alloc.c would override that with 32 in code that was
1211 removed when SYNC_INPUT became the only input handling mode.
1212 That code was conditioned on !DOUG_LEA_MALLOC, so the call to
1213 mallopt above is left unchanged. (Actually, I think there's no
1214 system nowadays that uses DOUG_LEA_MALLOC and also uses
1216 __malloc_extra_blocks
= 32;
1220 #ifndef SYSTEM_MALLOC
1221 first_heap
->end
= (POINTER
) ROUNDUP (first_heap
->start
);
1223 /* The extra call to real_morecore guarantees that the end of the
1224 address space is a multiple of page_size, even if page_size is
1225 not really the page size of the system running the binary in
1226 which page_size is stored. This allows a binary to be built on a
1227 system with one page size and run on a system with a smaller page
1229 (*real_morecore
) ((char *) first_heap
->end
- (char *) first_heap
->start
);
1231 /* Clear the rest of the last page; this memory is in our address space
1232 even though it is after the sbrk value. */
1233 /* Doubly true, with the additional call that explicitly adds the
1234 rest of that page to the address space. */
1235 memset (first_heap
->start
, 0,
1236 (char *) first_heap
->end
- (char *) first_heap
->start
);
1237 virtual_break_value
= break_value
= first_heap
->bloc_start
= first_heap
->end
;
1240 use_relocatable_buffers
= 1;