1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
29 #include "lisp.h" /* Needed for VALBITS. */
33 /* The important properties of this type are that 1) it's a pointer, and
34 2) arithmetic on it should work as if the size of the object pointed
35 to has a size of 1. */
36 #if 0 /* Arithmetic on void* is a GCC extension. */
38 typedef void *POINTER
;
45 typedef char *POINTER
;
50 /* Unconditionally use char * for this. */
51 typedef char *POINTER
;
53 typedef unsigned long SIZE
;
55 /* Declared in dispnew.c, this version doesn't screw up if regions
57 extern void safe_bcopy ();
59 extern int __malloc_extra_blocks
;
66 typedef void *POINTER
;
72 #define safe_bcopy(x, y, z) memmove (y, x, z)
73 #define bzero(x, len) memset (x, 0, len)
75 #endif /* not emacs */
77 #include "getpagesize.h"
79 #define NIL ((POINTER) 0)
81 /* A flag to indicate whether we have initialized ralloc yet. For
82 Emacs's sake, please do not make this local to malloc_init; on some
83 machines, the dumping procedure makes all static variables
84 read-only. On these machines, the word static is #defined to be
85 the empty string, meaning that r_alloc_initialized becomes an
86 automatic variable, and loses its value each time Emacs is started up. */
87 static int r_alloc_initialized
= 0;
89 static void r_alloc_init ();
91 /* Declarations for working with the malloc, ralloc, and system breaks. */
93 /* Function to set the real break value. */
94 static POINTER (*real_morecore
) ();
96 /* The break value, as seen by malloc. */
97 static POINTER virtual_break_value
;
99 /* The address of the end of the last data in use by ralloc,
100 including relocatable blocs as well as malloc data. */
101 static POINTER break_value
;
103 /* This is the size of a page. We round memory requests to this boundary. */
104 static int page_size
;
106 /* Whenever we get memory from the system, get this many extra bytes. This
107 must be a multiple of page_size. */
108 static int extra_bytes
;
110 /* Macros for rounding. Note that rounding to any value is possible
111 by changing the definition of PAGE. */
112 #define PAGE (getpagesize ())
113 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
114 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
116 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
118 #define MEM_ALIGN sizeof(double)
119 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
122 /* Data structures of heaps and blocs. */
124 /* The relocatable objects, or blocs, and the malloc data
125 both reside within one or more heaps.
126 Each heap contains malloc data, running from `start' to `bloc_start',
127 and relocatable objects, running from `bloc_start' to `free'.
129 Relocatable objects may relocate within the same heap
130 or may move into another heap; the heaps themselves may grow
133 We try to make just one heap and make it larger as necessary.
134 But sometimes we can't do that, because we can't get continguous
135 space to add onto the heap. When that happens, we start a new heap. */
141 /* Start of memory range of this heap. */
143 /* End of memory range of this heap. */
145 /* Start of relocatable data in this heap. */
147 /* Start of unused space in this heap. */
149 /* First bloc in this heap. */
150 struct bp
*first_bloc
;
151 /* Last bloc in this heap. */
152 struct bp
*last_bloc
;
155 #define NIL_HEAP ((heap_ptr) 0)
156 #define HEAP_PTR_SIZE (sizeof (struct heap))
158 /* This is the first heap object.
159 If we need additional heap objects, each one resides at the beginning of
160 the space it covers. */
161 static struct heap heap_base
;
163 /* Head and tail of the list of heaps. */
164 static heap_ptr first_heap
, last_heap
;
166 /* These structures are allocated in the malloc arena.
167 The linked list is kept in order of increasing '.data' members.
168 The data blocks abut each other; if b->next is non-nil, then
169 b->data + b->size == b->next->data.
171 An element with variable==NIL denotes a freed block, which has not yet
172 been collected. They may only appear while r_alloc_freeze > 0, and will be
173 freed when the arena is thawed. Currently, these blocs are not reusable,
174 while the arena is frozen. Very inefficent. */
183 POINTER new_data
; /* tmporarily used for relocation */
184 struct heap
*heap
; /* Heap this bloc is in. */
187 #define NIL_BLOC ((bloc_ptr) 0)
188 #define BLOC_PTR_SIZE (sizeof (struct bp))
190 /* Head and tail of the list of relocatable blocs. */
191 static bloc_ptr first_bloc
, last_bloc
;
193 static int use_relocatable_buffers
;
195 /* If >0, no relocation whatsoever takes place. */
196 static int r_alloc_freeze_level
;
199 /* Functions to get and return memory from the system. */
201 /* Find the heap that ADDRESS falls within. */
209 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
211 if (heap
->start
<= address
&& address
<= heap
->end
)
218 /* Find SIZE bytes of space in a heap.
219 Try to get them at ADDRESS (which must fall within some heap's range)
220 if we can get that many within one heap.
222 If enough space is not presently available in our reserve, this means
223 getting more page-aligned space from the system. If the retuned space
224 is not contiguos to the last heap, allocate a new heap, and append it
226 obtain does not try to keep track of whether space is in use
227 or not in use. It just returns the address of SIZE bytes that
228 fall within a single heap. If you call obtain twice in a row
229 with the same arguments, you typically get the same value.
230 to the heap list. It's the caller's responsibility to keep
231 track of what space is in use.
233 Return the address of the space if all went well, or zero if we couldn't
234 allocate the memory. */
237 obtain (address
, size
)
242 SIZE already_available
;
244 /* Find the heap that ADDRESS falls within. */
245 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
247 if (heap
->start
<= address
&& address
<= heap
->end
)
254 /* If we can't fit SIZE bytes in that heap,
255 try successive later heaps. */
256 while (heap
&& address
+ size
> heap
->end
)
259 if (heap
== NIL_HEAP
)
261 address
= heap
->bloc_start
;
264 /* If we can't fit them within any existing heap,
266 if (heap
== NIL_HEAP
)
268 POINTER
new = (*real_morecore
)(0);
271 already_available
= (char *)last_heap
->end
- (char *)address
;
273 if (new != last_heap
->end
)
275 /* Someone else called sbrk. Make a new heap. */
277 heap_ptr new_heap
= (heap_ptr
) MEM_ROUNDUP (new);
278 POINTER bloc_start
= (POINTER
) MEM_ROUNDUP ((POINTER
)(new_heap
+ 1));
280 if ((*real_morecore
) (bloc_start
- new) != new)
283 new_heap
->start
= new;
284 new_heap
->end
= bloc_start
;
285 new_heap
->bloc_start
= bloc_start
;
286 new_heap
->free
= bloc_start
;
287 new_heap
->next
= NIL_HEAP
;
288 new_heap
->prev
= last_heap
;
289 new_heap
->first_bloc
= NIL_BLOC
;
290 new_heap
->last_bloc
= NIL_BLOC
;
291 last_heap
->next
= new_heap
;
292 last_heap
= new_heap
;
294 address
= bloc_start
;
295 already_available
= 0;
298 /* Add space to the last heap (which we may have just created).
299 Get some extra, so we can come here less often. */
301 get
= size
+ extra_bytes
- already_available
;
302 get
= (char *) ROUNDUP ((char *)last_heap
->end
+ get
)
303 - (char *) last_heap
->end
;
305 if ((*real_morecore
) (get
) != last_heap
->end
)
308 last_heap
->end
+= get
;
314 /* Return unused heap space to the system
315 if there is a lot of unused space now.
316 This can make the last heap smaller;
317 it can also eliminate the last heap entirely. */
325 /* Add the amount of space beyond break_value
326 in all heaps which have extend beyond break_value at all. */
328 for (h
= last_heap
; h
&& break_value
< h
->end
; h
= h
->prev
)
330 excess
+= (char *) h
->end
- (char *) ((break_value
< h
->bloc_start
)
331 ? h
->bloc_start
: break_value
);
334 if (excess
> extra_bytes
* 2 && (*real_morecore
) (0) == last_heap
->end
)
336 /* Keep extra_bytes worth of empty space.
337 And don't free anything unless we can free at least extra_bytes. */
338 excess
-= extra_bytes
;
340 if ((char *)last_heap
->end
- (char *)last_heap
->bloc_start
<= excess
)
342 /* This heap should have no blocs in it. */
343 if (last_heap
->first_bloc
!= NIL_BLOC
344 || last_heap
->last_bloc
!= NIL_BLOC
)
347 /* Return the last heap, with its header, to the system. */
348 excess
= (char *)last_heap
->end
- (char *)last_heap
->start
;
349 last_heap
= last_heap
->prev
;
350 last_heap
->next
= NIL_HEAP
;
354 excess
= (char *) last_heap
->end
355 - (char *) ROUNDUP ((char *)last_heap
->end
- excess
);
356 last_heap
->end
-= excess
;
359 if ((*real_morecore
) (- excess
) == 0)
364 /* Return the total size in use by relocating allocator,
365 above where malloc gets space. */
368 r_alloc_size_in_use ()
370 return break_value
- virtual_break_value
;
373 /* The meat - allocating, freeing, and relocating blocs. */
375 /* Find the bloc referenced by the address in PTR. Returns a pointer
382 register bloc_ptr p
= first_bloc
;
384 while (p
!= NIL_BLOC
)
386 if (p
->variable
== ptr
&& p
->data
== *ptr
)
395 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
396 Returns a pointer to the new bloc, or zero if we couldn't allocate
397 memory for the new block. */
403 register bloc_ptr new_bloc
;
404 register heap_ptr heap
;
406 if (! (new_bloc
= (bloc_ptr
) malloc (BLOC_PTR_SIZE
))
407 || ! (new_bloc
->data
= obtain (break_value
, size
)))
415 break_value
= new_bloc
->data
+ size
;
417 new_bloc
->size
= size
;
418 new_bloc
->next
= NIL_BLOC
;
419 new_bloc
->variable
= (POINTER
*) NIL
;
420 new_bloc
->new_data
= 0;
422 /* Record in the heap that this space is in use. */
423 heap
= find_heap (new_bloc
->data
);
424 heap
->free
= break_value
;
426 /* Maintain the correspondence between heaps and blocs. */
427 new_bloc
->heap
= heap
;
428 heap
->last_bloc
= new_bloc
;
429 if (heap
->first_bloc
== NIL_BLOC
)
430 heap
->first_bloc
= new_bloc
;
432 /* Put this bloc on the doubly-linked list of blocs. */
435 new_bloc
->prev
= last_bloc
;
436 last_bloc
->next
= new_bloc
;
437 last_bloc
= new_bloc
;
441 first_bloc
= last_bloc
= new_bloc
;
442 new_bloc
->prev
= NIL_BLOC
;
448 /* Calculate new locations of blocs in the list beginning with BLOC,
449 relocating it to start at ADDRESS, in heap HEAP. If enough space is
450 not presently available in our reserve, call obtain for
453 Store the new location of each bloc in its new_data field.
454 Do not touch the contents of blocs or break_value. */
457 relocate_blocs (bloc
, heap
, address
)
462 register bloc_ptr b
= bloc
;
464 /* No need to ever call this if arena is frozen, bug somewhere! */
465 if (r_alloc_freeze_level
)
470 /* If bloc B won't fit within HEAP,
471 move to the next heap and try again. */
472 while (heap
&& address
+ b
->size
> heap
->end
)
475 if (heap
== NIL_HEAP
)
477 address
= heap
->bloc_start
;
480 /* If BLOC won't fit in any heap,
481 get enough new space to hold BLOC and all following blocs. */
482 if (heap
== NIL_HEAP
)
484 register bloc_ptr tb
= b
;
487 /* Add up the size of all the following blocs. */
488 while (tb
!= NIL_BLOC
)
496 /* Get that space. */
497 address
= obtain (address
, s
);
504 /* Record the new address of this bloc
505 and update where the next bloc can start. */
506 b
->new_data
= address
;
515 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
516 This is necessary if we put the memory of space of BLOC
517 before that of BEFORE. */
520 reorder_bloc (bloc
, before
)
521 bloc_ptr bloc
, before
;
525 /* Splice BLOC out from where it is. */
534 /* Splice it in before BEFORE. */
545 /* Update the records of which heaps contain which blocs, starting
546 with heap HEAP and bloc BLOC. */
549 update_heap_bloc_correspondence (bloc
, heap
)
555 /* Initialize HEAP's status to reflect blocs before BLOC. */
556 if (bloc
!= NIL_BLOC
&& bloc
->prev
!= NIL_BLOC
&& bloc
->prev
->heap
== heap
)
558 /* The previous bloc is in HEAP. */
559 heap
->last_bloc
= bloc
->prev
;
560 heap
->free
= bloc
->prev
->data
+ bloc
->prev
->size
;
564 /* HEAP contains no blocs before BLOC. */
565 heap
->first_bloc
= NIL_BLOC
;
566 heap
->last_bloc
= NIL_BLOC
;
567 heap
->free
= heap
->bloc_start
;
570 /* Advance through blocs one by one. */
571 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
573 /* Advance through heaps, marking them empty,
574 till we get to the one that B is in. */
577 if (heap
->bloc_start
<= b
->data
&& b
->data
<= heap
->end
)
580 /* We know HEAP is not null now,
581 because there has to be space for bloc B. */
582 heap
->first_bloc
= NIL_BLOC
;
583 heap
->last_bloc
= NIL_BLOC
;
584 heap
->free
= heap
->bloc_start
;
587 /* Update HEAP's status for bloc B. */
588 heap
->free
= b
->data
+ b
->size
;
590 if (heap
->first_bloc
== NIL_BLOC
)
591 heap
->first_bloc
= b
;
593 /* Record that B is in HEAP. */
597 /* If there are any remaining heaps and no blocs left,
598 mark those heaps as empty. */
602 heap
->first_bloc
= NIL_BLOC
;
603 heap
->last_bloc
= NIL_BLOC
;
604 heap
->free
= heap
->bloc_start
;
609 /* Resize BLOC to SIZE bytes. This relocates the blocs
610 that come after BLOC in memory. */
613 resize_bloc (bloc
, size
)
622 /* No need to ever call this if arena is frozen, bug somewhere! */
623 if (r_alloc_freeze_level
)
626 if (bloc
== NIL_BLOC
|| size
== bloc
->size
)
629 for (heap
= first_heap
; heap
!= NIL_HEAP
; heap
= heap
->next
)
631 if (heap
->bloc_start
<= bloc
->data
&& bloc
->data
<= heap
->end
)
635 if (heap
== NIL_HEAP
)
638 old_size
= bloc
->size
;
641 /* Note that bloc could be moved into the previous heap. */
642 address
= (bloc
->prev
? bloc
->prev
->data
+ bloc
->prev
->size
643 : first_heap
->bloc_start
);
646 if (heap
->bloc_start
<= address
&& address
<= heap
->end
)
651 if (! relocate_blocs (bloc
, heap
, address
))
653 bloc
->size
= old_size
;
659 for (b
= last_bloc
; b
!= bloc
; b
= b
->prev
)
664 b
->data
= b
->new_data
;
668 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
669 *b
->variable
= b
->data
= b
->new_data
;
675 bloc
->data
= bloc
->new_data
;
679 safe_bcopy (bloc
->data
, bloc
->new_data
, old_size
);
680 bzero (bloc
->new_data
+ old_size
, size
- old_size
);
681 *bloc
->variable
= bloc
->data
= bloc
->new_data
;
686 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
691 b
->data
= b
->new_data
;
695 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
696 *b
->variable
= b
->data
= b
->new_data
;
701 update_heap_bloc_correspondence (bloc
, heap
);
703 break_value
= (last_bloc
? last_bloc
->data
+ last_bloc
->size
704 : first_heap
->bloc_start
);
708 /* Free BLOC from the chain of blocs, relocating any blocs above it.
709 This may return space to the system. */
715 heap_ptr heap
= bloc
->heap
;
717 if (r_alloc_freeze_level
)
719 bloc
->variable
= (POINTER
*) NIL
;
723 resize_bloc (bloc
, 0);
725 if (bloc
== first_bloc
&& bloc
== last_bloc
)
727 first_bloc
= last_bloc
= NIL_BLOC
;
729 else if (bloc
== last_bloc
)
731 last_bloc
= bloc
->prev
;
732 last_bloc
->next
= NIL_BLOC
;
734 else if (bloc
== first_bloc
)
736 first_bloc
= bloc
->next
;
737 first_bloc
->prev
= NIL_BLOC
;
741 bloc
->next
->prev
= bloc
->prev
;
742 bloc
->prev
->next
= bloc
->next
;
745 /* Update the records of which blocs are in HEAP. */
746 if (heap
->first_bloc
== bloc
)
748 if (bloc
->next
!= 0 && bloc
->next
->heap
== heap
)
749 heap
->first_bloc
= bloc
->next
;
751 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
753 if (heap
->last_bloc
== bloc
)
755 if (bloc
->prev
!= 0 && bloc
->prev
->heap
== heap
)
756 heap
->last_bloc
= bloc
->prev
;
758 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
765 /* Interface routines. */
767 /* Obtain SIZE bytes of storage from the free pool, or the system, as
768 necessary. If relocatable blocs are in use, this means relocating
769 them. This function gets plugged into the GNU malloc's __morecore
772 We provide hysteresis, never relocating by less than extra_bytes.
774 If we're out of memory, we should return zero, to imitate the other
775 __morecore hook values - in particular, __default_morecore in the
776 GNU malloc package. */
785 if (! r_alloc_initialized
)
788 if (! use_relocatable_buffers
)
789 return (*real_morecore
) (size
);
792 return virtual_break_value
;
796 /* Allocate a page-aligned space. GNU malloc would reclaim an
797 extra space if we passed an unaligned one. But we could
798 not always find a space which is contiguos to the previous. */
799 POINTER new_bloc_start
;
800 heap_ptr h
= first_heap
;
801 SIZE get
= ROUNDUP (size
);
803 address
= (POINTER
) ROUNDUP (virtual_break_value
);
805 /* Search the list upward for a heap which is large enough. */
806 while ((char *) h
->end
< (char *) MEM_ROUNDUP ((char *)address
+ get
))
811 address
= (POINTER
) ROUNDUP (h
->start
);
814 /* If not found, obtain more space. */
817 get
+= extra_bytes
+ page_size
;
819 if (! obtain (address
, get
))
822 if (first_heap
== last_heap
)
823 address
= (POINTER
) ROUNDUP (virtual_break_value
);
825 address
= (POINTER
) ROUNDUP (last_heap
->start
);
829 new_bloc_start
= (POINTER
) MEM_ROUNDUP ((char *)address
+ get
);
831 if (first_heap
->bloc_start
< new_bloc_start
)
833 /* This is no clean solution - no idea how to do it better. */
834 if (r_alloc_freeze_level
)
837 /* There is a bug here: if the above obtain call succeeded, but the
838 relocate_blocs call below does not succeed, we need to free
839 the memory that we got with obtain. */
841 /* Move all blocs upward. */
842 if (! relocate_blocs (first_bloc
, h
, new_bloc_start
))
845 /* Note that (POINTER)(h+1) <= new_bloc_start since
846 get >= page_size, so the following does not destroy the heap
848 for (b
= last_bloc
; b
!= NIL_BLOC
; b
= b
->prev
)
850 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
851 *b
->variable
= b
->data
= b
->new_data
;
854 h
->bloc_start
= new_bloc_start
;
856 update_heap_bloc_correspondence (first_bloc
, h
);
860 /* Give up managing heaps below the one the new
861 virtual_break_value points to. */
862 first_heap
->prev
= NIL_HEAP
;
863 first_heap
->next
= h
->next
;
864 first_heap
->start
= h
->start
;
865 first_heap
->end
= h
->end
;
866 first_heap
->free
= h
->free
;
867 first_heap
->first_bloc
= h
->first_bloc
;
868 first_heap
->last_bloc
= h
->last_bloc
;
869 first_heap
->bloc_start
= h
->bloc_start
;
871 if (first_heap
->next
)
872 first_heap
->next
->prev
= first_heap
;
874 last_heap
= first_heap
;
877 bzero (address
, size
);
881 SIZE excess
= (char *)first_heap
->bloc_start
882 - ((char *)virtual_break_value
+ size
);
884 address
= virtual_break_value
;
886 if (r_alloc_freeze_level
== 0 && excess
> 2 * extra_bytes
)
888 excess
-= extra_bytes
;
889 first_heap
->bloc_start
890 = (POINTER
) MEM_ROUNDUP ((char *)first_heap
->bloc_start
- excess
);
892 relocate_blocs (first_bloc
, first_heap
, first_heap
->bloc_start
);
894 for (b
= first_bloc
; b
!= NIL_BLOC
; b
= b
->next
)
896 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
897 *b
->variable
= b
->data
= b
->new_data
;
901 if ((char *)virtual_break_value
+ size
< (char *)first_heap
->start
)
903 /* We found an additional space below the first heap */
904 first_heap
->start
= (POINTER
) ((char *)virtual_break_value
+ size
);
908 virtual_break_value
= (POINTER
) ((char *)address
+ size
);
909 break_value
= (last_bloc
910 ? last_bloc
->data
+ last_bloc
->size
911 : first_heap
->bloc_start
);
918 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
919 the data is returned in *PTR. PTR is thus the address of some variable
920 which will use the data area.
922 The allocation of 0 bytes is valid.
923 In case r_alloc_freeze is set, a best fit of unused blocs could be done
924 before allocating a new area. Not yet done.
926 If we can't allocate the necessary memory, set *PTR to zero, and
934 register bloc_ptr new_bloc
;
936 if (! r_alloc_initialized
)
939 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
942 new_bloc
->variable
= ptr
;
943 *ptr
= new_bloc
->data
;
951 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
952 Store 0 in *PTR to show there's no block allocated. */
956 register POINTER
*ptr
;
958 register bloc_ptr dead_bloc
;
960 if (! r_alloc_initialized
)
963 dead_bloc
= find_bloc (ptr
);
964 if (dead_bloc
== NIL_BLOC
)
967 free_bloc (dead_bloc
);
971 refill_memory_reserve ();
975 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
976 Do this by shifting all blocks above this one up in memory, unless
977 SIZE is less than or equal to the current bloc size, in which case
980 In case r_alloc_freeze is set, a new bloc is allocated, and the
981 memory copied to it. Not very efficent. We could traverse the
982 bloc_list for a best fit of free blocs first.
984 Change *PTR to reflect the new bloc, and return this value.
986 If more memory cannot be allocated, then leave *PTR unchanged, and
990 r_re_alloc (ptr
, size
)
994 register bloc_ptr bloc
;
996 if (! r_alloc_initialized
)
1000 return r_alloc (ptr
, size
);
1004 return r_alloc (ptr
, 0);
1007 bloc
= find_bloc (ptr
);
1008 if (bloc
== NIL_BLOC
)
1011 if (size
< bloc
->size
)
1013 /* Wouldn't it be useful to actually resize the bloc here? */
1014 /* I think so too, but not if it's too expensive... */
1015 if ((bloc
->size
- MEM_ROUNDUP (size
) >= page_size
)
1016 && r_alloc_freeze_level
== 0)
1018 resize_bloc (bloc
, MEM_ROUNDUP (size
));
1019 /* Never mind if this fails, just do nothing... */
1020 /* It *should* be infallible! */
1023 else if (size
> bloc
->size
)
1025 if (r_alloc_freeze_level
)
1028 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
1031 new_bloc
->variable
= ptr
;
1032 *ptr
= new_bloc
->data
;
1033 bloc
->variable
= (POINTER
*) NIL
;
1040 if (! resize_bloc (bloc
, MEM_ROUNDUP (size
)))
1047 /* Disable relocations, after making room for at least SIZE bytes
1048 of non-relocatable heap if possible. The relocatable blocs are
1049 guaranteed to hold still until thawed, even if this means that
1050 malloc must return a null pointer. */
1053 r_alloc_freeze (size
)
1056 if (! r_alloc_initialized
)
1059 /* If already frozen, we can't make any more room, so don't try. */
1060 if (r_alloc_freeze_level
> 0)
1062 /* If we can't get the amount requested, half is better than nothing. */
1063 while (size
> 0 && r_alloc_sbrk (size
) == 0)
1065 ++r_alloc_freeze_level
;
1067 r_alloc_sbrk (-size
);
1074 if (! r_alloc_initialized
)
1077 if (--r_alloc_freeze_level
< 0)
1080 /* This frees all unused blocs. It is not too inefficent, as the resize
1081 and bcopy is done only once. Afterwards, all unreferenced blocs are
1082 already shrunk to zero size. */
1083 if (!r_alloc_freeze_level
)
1085 bloc_ptr
*b
= &first_bloc
;
1087 if (!(*b
)->variable
)
1095 /* The hook `malloc' uses for the function which gets more space
1097 extern POINTER (*__morecore
) ();
1099 /* Initialize various things for memory allocation. */
1104 if (r_alloc_initialized
)
1107 r_alloc_initialized
= 1;
1108 real_morecore
= __morecore
;
1109 __morecore
= r_alloc_sbrk
;
1111 first_heap
= last_heap
= &heap_base
;
1112 first_heap
->next
= first_heap
->prev
= NIL_HEAP
;
1113 first_heap
->start
= first_heap
->bloc_start
1114 = virtual_break_value
= break_value
= (*real_morecore
) (0);
1115 if (break_value
== NIL
)
1119 extra_bytes
= ROUNDUP (50000);
1121 /* Give GNU malloc's morecore some hysteresis
1122 so that we move all the relocatable blocks much less often. */
1123 __malloc_extra_blocks
= 64;
1125 first_heap
->end
= (POINTER
) ROUNDUP (first_heap
->start
);
1127 /* The extra call to real_morecore guarantees that the end of the
1128 address space is a multiple of page_size, even if page_size is
1129 not really the page size of the system running the binary in
1130 which page_size is stored. This allows a binary to be built on a
1131 system with one page size and run on a system with a smaller page
1133 (*real_morecore
) (first_heap
->end
- first_heap
->start
);
1135 /* Clear the rest of the last page; this memory is in our address space
1136 even though it is after the sbrk value. */
1137 /* Doubly true, with the additional call that explicitly adds the
1138 rest of that page to the address space. */
1139 bzero (first_heap
->start
, first_heap
->end
- first_heap
->start
);
1140 virtual_break_value
= break_value
= first_heap
->bloc_start
= first_heap
->end
;
1141 use_relocatable_buffers
= 1;
1153 if (!r_alloc_initialized
)
1156 assert (first_heap
);
1157 assert (last_heap
->end
<= (POINTER
) sbrk (0));
1158 assert ((POINTER
) first_heap
< first_heap
->start
);
1159 assert (first_heap
->start
<= virtual_break_value
);
1160 assert (virtual_break_value
<= first_heap
->end
);
1162 for (h
= first_heap
; h
; h
= h
->next
)
1164 assert (h
->prev
== ph
);
1165 assert ((POINTER
) ROUNDUP (h
->end
) == h
->end
);
1166 assert ((POINTER
) MEM_ROUNDUP (h
->start
) == h
->start
);
1167 assert ((POINTER
) MEM_ROUNDUP (h
->bloc_start
) == h
->bloc_start
);
1168 assert (h
->start
<= h
->bloc_start
&& h
->bloc_start
<= h
->end
);
1172 assert (ph
->end
< h
->start
);
1173 assert (h
->start
<= (POINTER
)h
&& (POINTER
)(h
+1) <= h
->bloc_start
);
1176 if (h
->bloc_start
<= break_value
&& break_value
<= h
->end
)
1183 assert (last_heap
== ph
);
1185 for (b
= first_bloc
; b
; b
= b
->next
)
1187 assert (b
->prev
== pb
);
1188 assert ((POINTER
) MEM_ROUNDUP (b
->data
) == b
->data
);
1189 assert ((SIZE
) MEM_ROUNDUP (b
->size
) == b
->size
);
1192 for (h
= first_heap
; h
; h
= h
->next
)
1194 if (h
->bloc_start
<= b
->data
&& b
->data
+ b
->size
<= h
->end
)
1201 if (pb
&& pb
->data
+ pb
->size
!= b
->data
)
1203 assert (ph
&& b
->data
== h
->bloc_start
);
1206 if (ph
->bloc_start
<= pb
->data
1207 && pb
->data
+ pb
->size
<= ph
->end
)
1209 assert (pb
->data
+ pb
->size
+ b
->size
> ph
->end
);
1214 assert (ph
->bloc_start
+ b
->size
> ph
->end
);
1222 assert (last_bloc
== pb
);
1225 assert (last_bloc
->data
+ last_bloc
->size
== break_value
);
1227 assert (first_heap
->bloc_start
== break_value
);