Rewrote completely for better accuracy.
[emacs.git] / src / ralloc.c
blobb6fcb32339504f42f8571038fe4eaeed1c723273
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /* NOTES:
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
26 #ifdef emacs
28 #include <config.h>
29 #include "lisp.h" /* Needed for VALBITS. */
31 #undef NULL
33 /* The important properties of this type are that 1) it's a pointer, and
34 2) arithmetic on it should work as if the size of the object pointed
35 to has a size of 1. */
36 #if 0 /* Arithmetic on void* is a GCC extension. */
37 #ifdef __STDC__
38 typedef void *POINTER;
39 #else
41 #ifdef HAVE_CONFIG_H
42 #include "config.h"
43 #endif
45 typedef char *POINTER;
47 #endif
48 #endif /* 0 */
50 /* Unconditionally use char * for this. */
51 typedef char *POINTER;
53 typedef unsigned long SIZE;
55 /* Declared in dispnew.c, this version doesn't screw up if regions
56 overlap. */
57 extern void safe_bcopy ();
59 extern int __malloc_extra_blocks;
61 #else /* not emacs */
63 #include <stddef.h>
65 typedef size_t SIZE;
66 typedef void *POINTER;
68 #include <unistd.h>
69 #include <malloc.h>
70 #include <string.h>
72 #define safe_bcopy(x, y, z) memmove (y, x, z)
73 #define bzero(x, len) memset (x, 0, len)
75 #endif /* not emacs */
77 #include "getpagesize.h"
79 #define NIL ((POINTER) 0)
81 /* A flag to indicate whether we have initialized ralloc yet. For
82 Emacs's sake, please do not make this local to malloc_init; on some
83 machines, the dumping procedure makes all static variables
84 read-only. On these machines, the word static is #defined to be
85 the empty string, meaning that r_alloc_initialized becomes an
86 automatic variable, and loses its value each time Emacs is started up. */
87 static int r_alloc_initialized = 0;
89 static void r_alloc_init ();
91 /* Declarations for working with the malloc, ralloc, and system breaks. */
93 /* Function to set the real break value. */
94 static POINTER (*real_morecore) ();
96 /* The break value, as seen by malloc. */
97 static POINTER virtual_break_value;
99 /* The address of the end of the last data in use by ralloc,
100 including relocatable blocs as well as malloc data. */
101 static POINTER break_value;
103 /* This is the size of a page. We round memory requests to this boundary. */
104 static int page_size;
106 /* Whenever we get memory from the system, get this many extra bytes. This
107 must be a multiple of page_size. */
108 static int extra_bytes;
110 /* Macros for rounding. Note that rounding to any value is possible
111 by changing the definition of PAGE. */
112 #define PAGE (getpagesize ())
113 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
114 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
115 & ~(page_size - 1))
116 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
118 #define MEM_ALIGN sizeof(double)
119 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
120 & ~(MEM_ALIGN - 1))
122 /* Data structures of heaps and blocs. */
124 /* The relocatable objects, or blocs, and the malloc data
125 both reside within one or more heaps.
126 Each heap contains malloc data, running from `start' to `bloc_start',
127 and relocatable objects, running from `bloc_start' to `free'.
129 Relocatable objects may relocate within the same heap
130 or may move into another heap; the heaps themselves may grow
131 but they never move.
133 We try to make just one heap and make it larger as necessary.
134 But sometimes we can't do that, because we can't get continguous
135 space to add onto the heap. When that happens, we start a new heap. */
137 typedef struct heap
139 struct heap *next;
140 struct heap *prev;
141 /* Start of memory range of this heap. */
142 POINTER start;
143 /* End of memory range of this heap. */
144 POINTER end;
145 /* Start of relocatable data in this heap. */
146 POINTER bloc_start;
147 /* Start of unused space in this heap. */
148 POINTER free;
149 /* First bloc in this heap. */
150 struct bp *first_bloc;
151 /* Last bloc in this heap. */
152 struct bp *last_bloc;
153 } *heap_ptr;
155 #define NIL_HEAP ((heap_ptr) 0)
156 #define HEAP_PTR_SIZE (sizeof (struct heap))
158 /* This is the first heap object.
159 If we need additional heap objects, each one resides at the beginning of
160 the space it covers. */
161 static struct heap heap_base;
163 /* Head and tail of the list of heaps. */
164 static heap_ptr first_heap, last_heap;
166 /* These structures are allocated in the malloc arena.
167 The linked list is kept in order of increasing '.data' members.
168 The data blocks abut each other; if b->next is non-nil, then
169 b->data + b->size == b->next->data.
171 An element with variable==NIL denotes a freed block, which has not yet
172 been collected. They may only appear while r_alloc_freeze > 0, and will be
173 freed when the arena is thawed. Currently, these blocs are not reusable,
174 while the arena is frozen. Very inefficent. */
176 typedef struct bp
178 struct bp *next;
179 struct bp *prev;
180 POINTER *variable;
181 POINTER data;
182 SIZE size;
183 POINTER new_data; /* tmporarily used for relocation */
184 struct heap *heap; /* Heap this bloc is in. */
185 } *bloc_ptr;
187 #define NIL_BLOC ((bloc_ptr) 0)
188 #define BLOC_PTR_SIZE (sizeof (struct bp))
190 /* Head and tail of the list of relocatable blocs. */
191 static bloc_ptr first_bloc, last_bloc;
193 static int use_relocatable_buffers;
195 /* If >0, no relocation whatsoever takes place. */
196 static int r_alloc_freeze_level;
199 /* Functions to get and return memory from the system. */
201 /* Find the heap that ADDRESS falls within. */
203 static heap_ptr
204 find_heap (address)
205 POINTER address;
207 heap_ptr heap;
209 for (heap = last_heap; heap; heap = heap->prev)
211 if (heap->start <= address && address <= heap->end)
212 return heap;
215 return NIL_HEAP;
218 /* Find SIZE bytes of space in a heap.
219 Try to get them at ADDRESS (which must fall within some heap's range)
220 if we can get that many within one heap.
222 If enough space is not presently available in our reserve, this means
223 getting more page-aligned space from the system. If the retuned space
224 is not contiguos to the last heap, allocate a new heap, and append it
226 obtain does not try to keep track of whether space is in use
227 or not in use. It just returns the address of SIZE bytes that
228 fall within a single heap. If you call obtain twice in a row
229 with the same arguments, you typically get the same value.
230 to the heap list. It's the caller's responsibility to keep
231 track of what space is in use.
233 Return the address of the space if all went well, or zero if we couldn't
234 allocate the memory. */
236 static POINTER
237 obtain (address, size)
238 POINTER address;
239 SIZE size;
241 heap_ptr heap;
242 SIZE already_available;
244 /* Find the heap that ADDRESS falls within. */
245 for (heap = last_heap; heap; heap = heap->prev)
247 if (heap->start <= address && address <= heap->end)
248 break;
251 if (! heap)
252 abort ();
254 /* If we can't fit SIZE bytes in that heap,
255 try successive later heaps. */
256 while (heap && address + size > heap->end)
258 heap = heap->next;
259 if (heap == NIL_HEAP)
260 break;
261 address = heap->bloc_start;
264 /* If we can't fit them within any existing heap,
265 get more space. */
266 if (heap == NIL_HEAP)
268 POINTER new = (*real_morecore)(0);
269 SIZE get;
271 already_available = (char *)last_heap->end - (char *)address;
273 if (new != last_heap->end)
275 /* Someone else called sbrk. Make a new heap. */
277 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
278 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
280 if ((*real_morecore) (bloc_start - new) != new)
281 return 0;
283 new_heap->start = new;
284 new_heap->end = bloc_start;
285 new_heap->bloc_start = bloc_start;
286 new_heap->free = bloc_start;
287 new_heap->next = NIL_HEAP;
288 new_heap->prev = last_heap;
289 new_heap->first_bloc = NIL_BLOC;
290 new_heap->last_bloc = NIL_BLOC;
291 last_heap->next = new_heap;
292 last_heap = new_heap;
294 address = bloc_start;
295 already_available = 0;
298 /* Add space to the last heap (which we may have just created).
299 Get some extra, so we can come here less often. */
301 get = size + extra_bytes - already_available;
302 get = (char *) ROUNDUP ((char *)last_heap->end + get)
303 - (char *) last_heap->end;
305 if ((*real_morecore) (get) != last_heap->end)
306 return 0;
308 last_heap->end += get;
311 return address;
314 /* Return unused heap space to the system
315 if there is a lot of unused space now.
316 This can make the last heap smaller;
317 it can also eliminate the last heap entirely. */
319 static void
320 relinquish ()
322 register heap_ptr h;
323 int excess = 0;
325 /* Add the amount of space beyond break_value
326 in all heaps which have extend beyond break_value at all. */
328 for (h = last_heap; h && break_value < h->end; h = h->prev)
330 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
331 ? h->bloc_start : break_value);
334 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
336 /* Keep extra_bytes worth of empty space.
337 And don't free anything unless we can free at least extra_bytes. */
338 excess -= extra_bytes;
340 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
342 /* This heap should have no blocs in it. */
343 if (last_heap->first_bloc != NIL_BLOC
344 || last_heap->last_bloc != NIL_BLOC)
345 abort ();
347 /* Return the last heap, with its header, to the system. */
348 excess = (char *)last_heap->end - (char *)last_heap->start;
349 last_heap = last_heap->prev;
350 last_heap->next = NIL_HEAP;
352 else
354 excess = (char *) last_heap->end
355 - (char *) ROUNDUP ((char *)last_heap->end - excess);
356 last_heap->end -= excess;
359 if ((*real_morecore) (- excess) == 0)
360 abort ();
364 /* Return the total size in use by relocating allocator,
365 above where malloc gets space. */
367 long
368 r_alloc_size_in_use ()
370 return break_value - virtual_break_value;
373 /* The meat - allocating, freeing, and relocating blocs. */
375 /* Find the bloc referenced by the address in PTR. Returns a pointer
376 to that block. */
378 static bloc_ptr
379 find_bloc (ptr)
380 POINTER *ptr;
382 register bloc_ptr p = first_bloc;
384 while (p != NIL_BLOC)
386 if (p->variable == ptr && p->data == *ptr)
387 return p;
389 p = p->next;
392 return p;
395 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
396 Returns a pointer to the new bloc, or zero if we couldn't allocate
397 memory for the new block. */
399 static bloc_ptr
400 get_bloc (size)
401 SIZE size;
403 register bloc_ptr new_bloc;
404 register heap_ptr heap;
406 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
407 || ! (new_bloc->data = obtain (break_value, size)))
409 if (new_bloc)
410 free (new_bloc);
412 return 0;
415 break_value = new_bloc->data + size;
417 new_bloc->size = size;
418 new_bloc->next = NIL_BLOC;
419 new_bloc->variable = (POINTER *) NIL;
420 new_bloc->new_data = 0;
422 /* Record in the heap that this space is in use. */
423 heap = find_heap (new_bloc->data);
424 heap->free = break_value;
426 /* Maintain the correspondence between heaps and blocs. */
427 new_bloc->heap = heap;
428 heap->last_bloc = new_bloc;
429 if (heap->first_bloc == NIL_BLOC)
430 heap->first_bloc = new_bloc;
432 /* Put this bloc on the doubly-linked list of blocs. */
433 if (first_bloc)
435 new_bloc->prev = last_bloc;
436 last_bloc->next = new_bloc;
437 last_bloc = new_bloc;
439 else
441 first_bloc = last_bloc = new_bloc;
442 new_bloc->prev = NIL_BLOC;
445 return new_bloc;
448 /* Calculate new locations of blocs in the list beginning with BLOC,
449 relocating it to start at ADDRESS, in heap HEAP. If enough space is
450 not presently available in our reserve, call obtain for
451 more space.
453 Store the new location of each bloc in its new_data field.
454 Do not touch the contents of blocs or break_value. */
456 static int
457 relocate_blocs (bloc, heap, address)
458 bloc_ptr bloc;
459 heap_ptr heap;
460 POINTER address;
462 register bloc_ptr b = bloc;
464 /* No need to ever call this if arena is frozen, bug somewhere! */
465 if (r_alloc_freeze_level)
466 abort();
468 while (b)
470 /* If bloc B won't fit within HEAP,
471 move to the next heap and try again. */
472 while (heap && address + b->size > heap->end)
474 heap = heap->next;
475 if (heap == NIL_HEAP)
476 break;
477 address = heap->bloc_start;
480 /* If BLOC won't fit in any heap,
481 get enough new space to hold BLOC and all following blocs. */
482 if (heap == NIL_HEAP)
484 register bloc_ptr tb = b;
485 register SIZE s = 0;
487 /* Add up the size of all the following blocs. */
488 while (tb != NIL_BLOC)
490 if (tb->variable)
491 s += tb->size;
493 tb = tb->next;
496 /* Get that space. */
497 address = obtain (address, s);
498 if (address == 0)
499 return 0;
501 heap = last_heap;
504 /* Record the new address of this bloc
505 and update where the next bloc can start. */
506 b->new_data = address;
507 if (b->variable)
508 address += b->size;
509 b = b->next;
512 return 1;
515 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
516 This is necessary if we put the memory of space of BLOC
517 before that of BEFORE. */
519 static void
520 reorder_bloc (bloc, before)
521 bloc_ptr bloc, before;
523 bloc_ptr prev, next;
525 /* Splice BLOC out from where it is. */
526 prev = bloc->prev;
527 next = bloc->next;
529 if (prev)
530 prev->next = next;
531 if (next)
532 next->prev = prev;
534 /* Splice it in before BEFORE. */
535 prev = before->prev;
537 if (prev)
538 prev->next = bloc;
539 bloc->prev = prev;
541 before->prev = bloc;
542 bloc->next = before;
545 /* Update the records of which heaps contain which blocs, starting
546 with heap HEAP and bloc BLOC. */
548 static void
549 update_heap_bloc_correspondence (bloc, heap)
550 bloc_ptr bloc;
551 heap_ptr heap;
553 register bloc_ptr b;
555 /* Initialize HEAP's status to reflect blocs before BLOC. */
556 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
558 /* The previous bloc is in HEAP. */
559 heap->last_bloc = bloc->prev;
560 heap->free = bloc->prev->data + bloc->prev->size;
562 else
564 /* HEAP contains no blocs before BLOC. */
565 heap->first_bloc = NIL_BLOC;
566 heap->last_bloc = NIL_BLOC;
567 heap->free = heap->bloc_start;
570 /* Advance through blocs one by one. */
571 for (b = bloc; b != NIL_BLOC; b = b->next)
573 /* Advance through heaps, marking them empty,
574 till we get to the one that B is in. */
575 while (heap)
577 if (heap->bloc_start <= b->data && b->data <= heap->end)
578 break;
579 heap = heap->next;
580 /* We know HEAP is not null now,
581 because there has to be space for bloc B. */
582 heap->first_bloc = NIL_BLOC;
583 heap->last_bloc = NIL_BLOC;
584 heap->free = heap->bloc_start;
587 /* Update HEAP's status for bloc B. */
588 heap->free = b->data + b->size;
589 heap->last_bloc = b;
590 if (heap->first_bloc == NIL_BLOC)
591 heap->first_bloc = b;
593 /* Record that B is in HEAP. */
594 b->heap = heap;
597 /* If there are any remaining heaps and no blocs left,
598 mark those heaps as empty. */
599 heap = heap->next;
600 while (heap)
602 heap->first_bloc = NIL_BLOC;
603 heap->last_bloc = NIL_BLOC;
604 heap->free = heap->bloc_start;
605 heap = heap->next;
609 /* Resize BLOC to SIZE bytes. This relocates the blocs
610 that come after BLOC in memory. */
612 static int
613 resize_bloc (bloc, size)
614 bloc_ptr bloc;
615 SIZE size;
617 register bloc_ptr b;
618 heap_ptr heap;
619 POINTER address;
620 SIZE old_size;
622 /* No need to ever call this if arena is frozen, bug somewhere! */
623 if (r_alloc_freeze_level)
624 abort();
626 if (bloc == NIL_BLOC || size == bloc->size)
627 return 1;
629 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
631 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
632 break;
635 if (heap == NIL_HEAP)
636 abort ();
638 old_size = bloc->size;
639 bloc->size = size;
641 /* Note that bloc could be moved into the previous heap. */
642 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
643 : first_heap->bloc_start);
644 while (heap)
646 if (heap->bloc_start <= address && address <= heap->end)
647 break;
648 heap = heap->prev;
651 if (! relocate_blocs (bloc, heap, address))
653 bloc->size = old_size;
654 return 0;
657 if (size > old_size)
659 for (b = last_bloc; b != bloc; b = b->prev)
661 if (!b->variable)
663 b->size = 0;
664 b->data = b->new_data;
666 else
668 safe_bcopy (b->data, b->new_data, b->size);
669 *b->variable = b->data = b->new_data;
672 if (!bloc->variable)
674 bloc->size = 0;
675 bloc->data = bloc->new_data;
677 else
679 safe_bcopy (bloc->data, bloc->new_data, old_size);
680 bzero (bloc->new_data + old_size, size - old_size);
681 *bloc->variable = bloc->data = bloc->new_data;
684 else
686 for (b = bloc; b != NIL_BLOC; b = b->next)
688 if (!b->variable)
690 b->size = 0;
691 b->data = b->new_data;
693 else
695 safe_bcopy (b->data, b->new_data, b->size);
696 *b->variable = b->data = b->new_data;
701 update_heap_bloc_correspondence (bloc, heap);
703 break_value = (last_bloc ? last_bloc->data + last_bloc->size
704 : first_heap->bloc_start);
705 return 1;
708 /* Free BLOC from the chain of blocs, relocating any blocs above it.
709 This may return space to the system. */
711 static void
712 free_bloc (bloc)
713 bloc_ptr bloc;
715 heap_ptr heap = bloc->heap;
717 if (r_alloc_freeze_level)
719 bloc->variable = (POINTER *) NIL;
720 return;
723 resize_bloc (bloc, 0);
725 if (bloc == first_bloc && bloc == last_bloc)
727 first_bloc = last_bloc = NIL_BLOC;
729 else if (bloc == last_bloc)
731 last_bloc = bloc->prev;
732 last_bloc->next = NIL_BLOC;
734 else if (bloc == first_bloc)
736 first_bloc = bloc->next;
737 first_bloc->prev = NIL_BLOC;
739 else
741 bloc->next->prev = bloc->prev;
742 bloc->prev->next = bloc->next;
745 /* Update the records of which blocs are in HEAP. */
746 if (heap->first_bloc == bloc)
748 if (bloc->next != 0 && bloc->next->heap == heap)
749 heap->first_bloc = bloc->next;
750 else
751 heap->first_bloc = heap->last_bloc = NIL_BLOC;
753 if (heap->last_bloc == bloc)
755 if (bloc->prev != 0 && bloc->prev->heap == heap)
756 heap->last_bloc = bloc->prev;
757 else
758 heap->first_bloc = heap->last_bloc = NIL_BLOC;
761 relinquish ();
762 free (bloc);
765 /* Interface routines. */
767 /* Obtain SIZE bytes of storage from the free pool, or the system, as
768 necessary. If relocatable blocs are in use, this means relocating
769 them. This function gets plugged into the GNU malloc's __morecore
770 hook.
772 We provide hysteresis, never relocating by less than extra_bytes.
774 If we're out of memory, we should return zero, to imitate the other
775 __morecore hook values - in particular, __default_morecore in the
776 GNU malloc package. */
778 POINTER
779 r_alloc_sbrk (size)
780 long size;
782 register bloc_ptr b;
783 POINTER address;
785 if (! r_alloc_initialized)
786 r_alloc_init ();
788 if (! use_relocatable_buffers)
789 return (*real_morecore) (size);
791 if (size == 0)
792 return virtual_break_value;
794 if (size > 0)
796 /* Allocate a page-aligned space. GNU malloc would reclaim an
797 extra space if we passed an unaligned one. But we could
798 not always find a space which is contiguos to the previous. */
799 POINTER new_bloc_start;
800 heap_ptr h = first_heap;
801 SIZE get = ROUNDUP (size);
803 address = (POINTER) ROUNDUP (virtual_break_value);
805 /* Search the list upward for a heap which is large enough. */
806 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
808 h = h->next;
809 if (h == NIL_HEAP)
810 break;
811 address = (POINTER) ROUNDUP (h->start);
814 /* If not found, obtain more space. */
815 if (h == NIL_HEAP)
817 get += extra_bytes + page_size;
819 if (! obtain (address, get))
820 return 0;
822 if (first_heap == last_heap)
823 address = (POINTER) ROUNDUP (virtual_break_value);
824 else
825 address = (POINTER) ROUNDUP (last_heap->start);
826 h = last_heap;
829 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
831 if (first_heap->bloc_start < new_bloc_start)
833 /* This is no clean solution - no idea how to do it better. */
834 if (r_alloc_freeze_level)
835 return NIL;
837 /* There is a bug here: if the above obtain call succeeded, but the
838 relocate_blocs call below does not succeed, we need to free
839 the memory that we got with obtain. */
841 /* Move all blocs upward. */
842 if (! relocate_blocs (first_bloc, h, new_bloc_start))
843 return 0;
845 /* Note that (POINTER)(h+1) <= new_bloc_start since
846 get >= page_size, so the following does not destroy the heap
847 header. */
848 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
850 safe_bcopy (b->data, b->new_data, b->size);
851 *b->variable = b->data = b->new_data;
854 h->bloc_start = new_bloc_start;
856 update_heap_bloc_correspondence (first_bloc, h);
858 if (h != first_heap)
860 /* Give up managing heaps below the one the new
861 virtual_break_value points to. */
862 first_heap->prev = NIL_HEAP;
863 first_heap->next = h->next;
864 first_heap->start = h->start;
865 first_heap->end = h->end;
866 first_heap->free = h->free;
867 first_heap->first_bloc = h->first_bloc;
868 first_heap->last_bloc = h->last_bloc;
869 first_heap->bloc_start = h->bloc_start;
871 if (first_heap->next)
872 first_heap->next->prev = first_heap;
873 else
874 last_heap = first_heap;
877 bzero (address, size);
879 else /* size < 0 */
881 SIZE excess = (char *)first_heap->bloc_start
882 - ((char *)virtual_break_value + size);
884 address = virtual_break_value;
886 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
888 excess -= extra_bytes;
889 first_heap->bloc_start
890 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
892 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
894 for (b = first_bloc; b != NIL_BLOC; b = b->next)
896 safe_bcopy (b->data, b->new_data, b->size);
897 *b->variable = b->data = b->new_data;
901 if ((char *)virtual_break_value + size < (char *)first_heap->start)
903 /* We found an additional space below the first heap */
904 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
908 virtual_break_value = (POINTER) ((char *)address + size);
909 break_value = (last_bloc
910 ? last_bloc->data + last_bloc->size
911 : first_heap->bloc_start);
912 if (size < 0)
913 relinquish ();
915 return address;
918 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
919 the data is returned in *PTR. PTR is thus the address of some variable
920 which will use the data area.
922 The allocation of 0 bytes is valid.
923 In case r_alloc_freeze is set, a best fit of unused blocs could be done
924 before allocating a new area. Not yet done.
926 If we can't allocate the necessary memory, set *PTR to zero, and
927 return zero. */
929 POINTER
930 r_alloc (ptr, size)
931 POINTER *ptr;
932 SIZE size;
934 register bloc_ptr new_bloc;
936 if (! r_alloc_initialized)
937 r_alloc_init ();
939 new_bloc = get_bloc (MEM_ROUNDUP (size));
940 if (new_bloc)
942 new_bloc->variable = ptr;
943 *ptr = new_bloc->data;
945 else
946 *ptr = 0;
948 return *ptr;
951 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
952 Store 0 in *PTR to show there's no block allocated. */
954 void
955 r_alloc_free (ptr)
956 register POINTER *ptr;
958 register bloc_ptr dead_bloc;
960 if (! r_alloc_initialized)
961 r_alloc_init ();
963 dead_bloc = find_bloc (ptr);
964 if (dead_bloc == NIL_BLOC)
965 abort ();
967 free_bloc (dead_bloc);
968 *ptr = 0;
970 #ifdef emacs
971 refill_memory_reserve ();
972 #endif
975 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
976 Do this by shifting all blocks above this one up in memory, unless
977 SIZE is less than or equal to the current bloc size, in which case
978 do nothing.
980 In case r_alloc_freeze is set, a new bloc is allocated, and the
981 memory copied to it. Not very efficent. We could traverse the
982 bloc_list for a best fit of free blocs first.
984 Change *PTR to reflect the new bloc, and return this value.
986 If more memory cannot be allocated, then leave *PTR unchanged, and
987 return zero. */
989 POINTER
990 r_re_alloc (ptr, size)
991 POINTER *ptr;
992 SIZE size;
994 register bloc_ptr bloc;
996 if (! r_alloc_initialized)
997 r_alloc_init ();
999 if (!*ptr)
1000 return r_alloc (ptr, size);
1001 if (!size)
1003 r_alloc_free (ptr);
1004 return r_alloc (ptr, 0);
1007 bloc = find_bloc (ptr);
1008 if (bloc == NIL_BLOC)
1009 abort ();
1011 if (size < bloc->size)
1013 /* Wouldn't it be useful to actually resize the bloc here? */
1014 /* I think so too, but not if it's too expensive... */
1015 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1016 && r_alloc_freeze_level == 0)
1018 resize_bloc (bloc, MEM_ROUNDUP (size));
1019 /* Never mind if this fails, just do nothing... */
1020 /* It *should* be infallible! */
1023 else if (size > bloc->size)
1025 if (r_alloc_freeze_level)
1027 bloc_ptr new_bloc;
1028 new_bloc = get_bloc (MEM_ROUNDUP (size));
1029 if (new_bloc)
1031 new_bloc->variable = ptr;
1032 *ptr = new_bloc->data;
1033 bloc->variable = (POINTER *) NIL;
1035 else
1036 return NIL;
1038 else
1040 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1041 return NIL;
1044 return *ptr;
1047 /* Disable relocations, after making room for at least SIZE bytes
1048 of non-relocatable heap if possible. The relocatable blocs are
1049 guaranteed to hold still until thawed, even if this means that
1050 malloc must return a null pointer. */
1052 void
1053 r_alloc_freeze (size)
1054 long size;
1056 if (! r_alloc_initialized)
1057 r_alloc_init ();
1059 /* If already frozen, we can't make any more room, so don't try. */
1060 if (r_alloc_freeze_level > 0)
1061 size = 0;
1062 /* If we can't get the amount requested, half is better than nothing. */
1063 while (size > 0 && r_alloc_sbrk (size) == 0)
1064 size /= 2;
1065 ++r_alloc_freeze_level;
1066 if (size > 0)
1067 r_alloc_sbrk (-size);
1070 void
1071 r_alloc_thaw ()
1074 if (! r_alloc_initialized)
1075 r_alloc_init ();
1077 if (--r_alloc_freeze_level < 0)
1078 abort ();
1080 /* This frees all unused blocs. It is not too inefficent, as the resize
1081 and bcopy is done only once. Afterwards, all unreferenced blocs are
1082 already shrunk to zero size. */
1083 if (!r_alloc_freeze_level)
1085 bloc_ptr *b = &first_bloc;
1086 while (*b)
1087 if (!(*b)->variable)
1088 free_bloc (*b);
1089 else
1090 b = &(*b)->next;
1095 /* The hook `malloc' uses for the function which gets more space
1096 from the system. */
1097 extern POINTER (*__morecore) ();
1099 /* Initialize various things for memory allocation. */
1101 static void
1102 r_alloc_init ()
1104 if (r_alloc_initialized)
1105 return;
1107 r_alloc_initialized = 1;
1108 real_morecore = __morecore;
1109 __morecore = r_alloc_sbrk;
1111 first_heap = last_heap = &heap_base;
1112 first_heap->next = first_heap->prev = NIL_HEAP;
1113 first_heap->start = first_heap->bloc_start
1114 = virtual_break_value = break_value = (*real_morecore) (0);
1115 if (break_value == NIL)
1116 abort ();
1118 page_size = PAGE;
1119 extra_bytes = ROUNDUP (50000);
1121 /* Give GNU malloc's morecore some hysteresis
1122 so that we move all the relocatable blocks much less often. */
1123 __malloc_extra_blocks = 64;
1125 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1127 /* The extra call to real_morecore guarantees that the end of the
1128 address space is a multiple of page_size, even if page_size is
1129 not really the page size of the system running the binary in
1130 which page_size is stored. This allows a binary to be built on a
1131 system with one page size and run on a system with a smaller page
1132 size. */
1133 (*real_morecore) (first_heap->end - first_heap->start);
1135 /* Clear the rest of the last page; this memory is in our address space
1136 even though it is after the sbrk value. */
1137 /* Doubly true, with the additional call that explicitly adds the
1138 rest of that page to the address space. */
1139 bzero (first_heap->start, first_heap->end - first_heap->start);
1140 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1141 use_relocatable_buffers = 1;
1143 #ifdef DEBUG
1144 #include <assert.h>
1146 void
1147 r_alloc_check ()
1149 int found = 0;
1150 heap_ptr h, ph = 0;
1151 bloc_ptr b, pb = 0;
1153 if (!r_alloc_initialized)
1154 return;
1156 assert (first_heap);
1157 assert (last_heap->end <= (POINTER) sbrk (0));
1158 assert ((POINTER) first_heap < first_heap->start);
1159 assert (first_heap->start <= virtual_break_value);
1160 assert (virtual_break_value <= first_heap->end);
1162 for (h = first_heap; h; h = h->next)
1164 assert (h->prev == ph);
1165 assert ((POINTER) ROUNDUP (h->end) == h->end);
1166 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1167 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1168 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1170 if (ph)
1172 assert (ph->end < h->start);
1173 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1176 if (h->bloc_start <= break_value && break_value <= h->end)
1177 found = 1;
1179 ph = h;
1182 assert (found);
1183 assert (last_heap == ph);
1185 for (b = first_bloc; b; b = b->next)
1187 assert (b->prev == pb);
1188 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1189 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1191 ph = 0;
1192 for (h = first_heap; h; h = h->next)
1194 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1195 break;
1196 ph = h;
1199 assert (h);
1201 if (pb && pb->data + pb->size != b->data)
1203 assert (ph && b->data == h->bloc_start);
1204 while (ph)
1206 if (ph->bloc_start <= pb->data
1207 && pb->data + pb->size <= ph->end)
1209 assert (pb->data + pb->size + b->size > ph->end);
1210 break;
1212 else
1214 assert (ph->bloc_start + b->size > ph->end);
1216 ph = ph->prev;
1219 pb = b;
1222 assert (last_bloc == pb);
1224 if (last_bloc)
1225 assert (last_bloc->data + last_bloc->size == break_value);
1226 else
1227 assert (first_heap->bloc_start == break_value);
1229 #endif /* DEBUG */