(current_perdisplay): New var.
[emacs.git] / src / ralloc.c
blob5a2e507feb22a97a5464872864a49535d4b3fb3c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /* NOTES:
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
26 #ifdef emacs
28 #include <config.h>
29 #include "lisp.h" /* Needed for VALBITS. */
31 #undef NULL
33 /* The important properties of this type are that 1) it's a pointer, and
34 2) arithmetic on it should work as if the size of the object pointed
35 to has a size of 1. */
36 #if 0 /* Arithmetic on void* is a GCC extension. */
37 #ifdef __STDC__
38 typedef void *POINTER;
39 #else
41 #ifdef HAVE_CONFIG_H
42 #include "config.h"
43 #endif
45 typedef char *POINTER;
47 #endif
48 #endif /* 0 */
50 /* Unconditionally use char * for this. */
51 typedef char *POINTER;
53 typedef unsigned long SIZE;
55 /* Declared in dispnew.c, this version doesn't screw up if regions
56 overlap. */
57 extern void safe_bcopy ();
59 #include "getpagesize.h"
61 #else /* Not emacs. */
63 #include <stddef.h>
65 typedef size_t SIZE;
66 typedef void *POINTER;
68 #include <unistd.h>
69 #include <malloc.h>
70 #include <string.h>
72 #define safe_bcopy(x, y, z) memmove (y, x, z)
74 #endif /* emacs. */
76 #define NIL ((POINTER) 0)
78 /* A flag to indicate whether we have initialized ralloc yet. For
79 Emacs's sake, please do not make this local to malloc_init; on some
80 machines, the dumping procedure makes all static variables
81 read-only. On these machines, the word static is #defined to be
82 the empty string, meaning that r_alloc_initialized becomes an
83 automatic variable, and loses its value each time Emacs is started up. */
84 static int r_alloc_initialized = 0;
86 static void r_alloc_init ();
88 /* Declarations for working with the malloc, ralloc, and system breaks. */
90 /* Function to set the real break value. */
91 static POINTER (*real_morecore) ();
93 /* The break value, as seen by malloc. */
94 static POINTER virtual_break_value;
96 /* The address of the end of the last data in use by ralloc,
97 including relocatable blocs as well as malloc data. */
98 static POINTER break_value;
100 /* This is the size of a page. We round memory requests to this boundary. */
101 static int page_size;
103 /* Whenever we get memory from the system, get this many extra bytes. This
104 must be a multiple of page_size. */
105 static int extra_bytes;
107 /* Macros for rounding. Note that rounding to any value is possible
108 by changing the definition of PAGE. */
109 #define PAGE (getpagesize ())
110 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
111 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
112 & ~(page_size - 1))
113 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
115 #define MEM_ALIGN sizeof(double)
116 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
117 & ~(MEM_ALIGN - 1))
119 /* Data structures of heaps and blocs. */
121 /* The relocatable objects, or blocs, and the malloc data
122 both reside within one or more heaps.
123 Each heap contains malloc data, running from `start' to `bloc_start',
124 and relocatable objects, running from `bloc_start' to `free'.
126 Relocatable objects may relocate within the same heap
127 or may move into another heap; the heaps themselves may grow
128 but they never move.
130 We try to make just one heap and make it larger as necessary.
131 But sometimes we can't do that, because we can't get continguous
132 space to add onto the heap. When that happens, we start a new heap. */
134 typedef struct heap
136 struct heap *next;
137 struct heap *prev;
138 /* Start of memory range of this heap. */
139 POINTER start;
140 /* End of memory range of this heap. */
141 POINTER end;
142 /* Start of relocatable data in this heap. */
143 POINTER bloc_start;
144 /* Start of unused space in this heap. */
145 POINTER free;
146 /* First bloc in this heap. */
147 struct bp *first_bloc;
148 /* Last bloc in this heap. */
149 struct bp *last_bloc;
150 } *heap_ptr;
152 #define NIL_HEAP ((heap_ptr) 0)
153 #define HEAP_PTR_SIZE (sizeof (struct heap))
155 /* This is the first heap object.
156 If we need additional heap objects, each one resides at the beginning of
157 the space it covers. */
158 static struct heap heap_base;
160 /* Head and tail of the list of heaps. */
161 static heap_ptr first_heap, last_heap;
163 /* These structures are allocated in the malloc arena.
164 The linked list is kept in order of increasing '.data' members.
165 The data blocks abut each other; if b->next is non-nil, then
166 b->data + b->size == b->next->data. */
167 typedef struct bp
169 struct bp *next;
170 struct bp *prev;
171 POINTER *variable;
172 POINTER data;
173 SIZE size;
174 POINTER new_data; /* tmporarily used for relocation */
175 /* Heap this bloc is in. */
176 struct heap *heap;
177 } *bloc_ptr;
179 #define NIL_BLOC ((bloc_ptr) 0)
180 #define BLOC_PTR_SIZE (sizeof (struct bp))
182 /* Head and tail of the list of relocatable blocs. */
183 static bloc_ptr first_bloc, last_bloc;
186 /* Functions to get and return memory from the system. */
188 /* Find the heap that ADDRESS falls within. */
190 static heap_ptr
191 find_heap (address)
192 POINTER address;
194 heap_ptr heap;
196 for (heap = last_heap; heap; heap = heap->prev)
198 if (heap->start <= address && address <= heap->end)
199 return heap;
202 return NIL_HEAP;
205 /* Find SIZE bytes of space in a heap.
206 Try to get them at ADDRESS (which must fall within some heap's range)
207 if we can get that many within one heap.
209 If enough space is not presently available in our reserve, this means
210 getting more page-aligned space from the system. If the retuned space
211 is not contiguos to the last heap, allocate a new heap, and append it
213 obtain does not try to keep track of whether space is in use
214 or not in use. It just returns the address of SIZE bytes that
215 fall within a single heap. If you call obtain twice in a row
216 with the same arguments, you typically get the same value.
217 to the heap list. It's the caller's responsibility to keep
218 track of what space is in use.
220 Return the address of the space if all went well, or zero if we couldn't
221 allocate the memory. */
223 static POINTER
224 obtain (address, size)
225 POINTER address;
226 SIZE size;
228 heap_ptr heap;
229 SIZE already_available;
231 /* Find the heap that ADDRESS falls within. */
232 for (heap = last_heap; heap; heap = heap->prev)
234 if (heap->start <= address && address <= heap->end)
235 break;
238 if (! heap)
239 abort ();
241 /* If we can't fit SIZE bytes in that heap,
242 try successive later heaps. */
243 while (heap && address + size > heap->end)
245 heap = heap->next;
246 if (heap == NIL_HEAP)
247 break;
248 address = heap->bloc_start;
251 /* If we can't fit them within any existing heap,
252 get more space. */
253 if (heap == NIL_HEAP)
255 POINTER new = (*real_morecore)(0);
256 SIZE get;
258 already_available = (char *)last_heap->end - (char *)address;
260 if (new != last_heap->end)
262 /* Someone else called sbrk. Make a new heap. */
264 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
265 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
267 if ((*real_morecore) (bloc_start - new) != new)
268 return 0;
270 new_heap->start = new;
271 new_heap->end = bloc_start;
272 new_heap->bloc_start = bloc_start;
273 new_heap->free = bloc_start;
274 new_heap->next = NIL_HEAP;
275 new_heap->prev = last_heap;
276 new_heap->first_bloc = NIL_BLOC;
277 new_heap->last_bloc = NIL_BLOC;
278 last_heap->next = new_heap;
279 last_heap = new_heap;
281 address = bloc_start;
282 already_available = 0;
285 /* Add space to the last heap (which we may have just created).
286 Get some extra, so we can come here less often. */
288 get = size + extra_bytes - already_available;
289 get = (char *) ROUNDUP ((char *)last_heap->end + get)
290 - (char *) last_heap->end;
292 if ((*real_morecore) (get) != last_heap->end)
293 return 0;
295 last_heap->end += get;
298 return address;
301 /* Return unused heap space to the system
302 if there is a lot of unused space now.
303 This can make the last heap smaller;
304 it can also eliminate the last heap entirely. */
306 static void
307 relinquish ()
309 register heap_ptr h;
310 int excess = 0;
312 /* Add the amount of space beyond break_value
313 in all heaps which have extend beyond break_value at all. */
315 for (h = last_heap; h && break_value < h->end; h = h->prev)
317 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
318 ? h->bloc_start : break_value);
321 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
323 /* Keep extra_bytes worth of empty space.
324 And don't free anything unless we can free at least extra_bytes. */
325 excess -= extra_bytes;
327 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
329 /* This heap should have no blocs in it. */
330 if (last_heap->first_bloc != NIL_BLOC
331 || last_heap->last_bloc != NIL_BLOC)
332 abort ();
334 /* Return the last heap, with its header, to the system. */
335 excess = (char *)last_heap->end - (char *)last_heap->start;
336 last_heap = last_heap->prev;
337 last_heap->next = NIL_HEAP;
339 else
341 excess = (char *) last_heap->end
342 - (char *) ROUNDUP ((char *)last_heap->end - excess);
343 last_heap->end -= excess;
346 if ((*real_morecore) (- excess) == 0)
347 abort ();
351 /* The meat - allocating, freeing, and relocating blocs. */
353 /* Find the bloc referenced by the address in PTR. Returns a pointer
354 to that block. */
356 static bloc_ptr
357 find_bloc (ptr)
358 POINTER *ptr;
360 register bloc_ptr p = first_bloc;
362 while (p != NIL_BLOC)
364 if (p->variable == ptr && p->data == *ptr)
365 return p;
367 p = p->next;
370 return p;
373 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
374 Returns a pointer to the new bloc, or zero if we couldn't allocate
375 memory for the new block. */
377 static bloc_ptr
378 get_bloc (size)
379 SIZE size;
381 register bloc_ptr new_bloc;
382 register heap_ptr heap;
384 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
385 || ! (new_bloc->data = obtain (break_value, size)))
387 if (new_bloc)
388 free (new_bloc);
390 return 0;
393 break_value = new_bloc->data + size;
395 new_bloc->size = size;
396 new_bloc->next = NIL_BLOC;
397 new_bloc->variable = (POINTER *) NIL;
398 new_bloc->new_data = 0;
400 /* Record in the heap that this space is in use. */
401 heap = find_heap (new_bloc->data);
402 heap->free = break_value;
404 /* Maintain the correspondence between heaps and blocs. */
405 new_bloc->heap = heap;
406 heap->last_bloc = new_bloc;
407 if (heap->first_bloc == NIL_BLOC)
408 heap->first_bloc = new_bloc;
410 /* Put this bloc on the doubly-linked list of blocs. */
411 if (first_bloc)
413 new_bloc->prev = last_bloc;
414 last_bloc->next = new_bloc;
415 last_bloc = new_bloc;
417 else
419 first_bloc = last_bloc = new_bloc;
420 new_bloc->prev = NIL_BLOC;
423 return new_bloc;
426 /* Calculate new locations of blocs in the list beginning with BLOC,
427 relocating it to start at ADDRESS, in heap HEAP. If enough space is
428 not presently available in our reserve, call obtain for
429 more space.
431 Store the new location of each bloc in its new_data field.
432 Do not touch the contents of blocs or break_value. */
434 static int
435 relocate_blocs (bloc, heap, address)
436 bloc_ptr bloc;
437 heap_ptr heap;
438 POINTER address;
440 register bloc_ptr b = bloc;
442 while (b)
444 /* If bloc B won't fit within HEAP,
445 move to the next heap and try again. */
446 while (heap && address + b->size > heap->end)
448 heap = heap->next;
449 if (heap == NIL_HEAP)
450 break;
451 address = heap->bloc_start;
454 /* If BLOC won't fit in any heap,
455 get enough new space to hold BLOC and all following blocs. */
456 if (heap == NIL_HEAP)
458 register bloc_ptr tb = b;
459 register SIZE s = 0;
461 /* Add up the size of all the following blocs. */
462 while (tb != NIL_BLOC)
464 s += tb->size;
465 tb = tb->next;
468 /* Get that space. */
469 address = obtain (address, s);
470 if (address == 0)
471 return 0;
473 heap = last_heap;
476 /* Record the new address of this bloc
477 and update where the next bloc can start. */
478 b->new_data = address;
479 address += b->size;
480 b = b->next;
483 return 1;
486 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
487 This is necessary if we put the memory of space of BLOC
488 before that of BEFORE. */
490 static void
491 reorder_bloc (bloc, before)
492 bloc_ptr bloc, before;
494 bloc_ptr prev, next;
496 /* Splice BLOC out from where it is. */
497 prev = bloc->prev;
498 next = bloc->next;
500 if (prev)
501 prev->next = next;
502 if (next)
503 next->prev = prev;
505 /* Splice it in before BEFORE. */
506 prev = before->prev;
508 if (prev)
509 prev->next = bloc;
510 bloc->prev = prev;
512 before->prev = bloc;
513 bloc->next = before;
516 /* Update the records of which heaps contain which blocs, starting
517 with heap HEAP and bloc BLOC. */
519 static void
520 update_heap_bloc_correspondence (bloc, heap)
521 bloc_ptr bloc;
522 heap_ptr heap;
524 register bloc_ptr b;
526 /* Initialize HEAP's status to reflect blocs before BLOC. */
527 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
529 /* The previous bloc is in HEAP. */
530 heap->last_bloc = bloc->prev;
531 heap->free = bloc->prev->data + bloc->prev->size;
533 else
535 /* HEAP contains no blocs before BLOC. */
536 heap->first_bloc = NIL_BLOC;
537 heap->last_bloc = NIL_BLOC;
538 heap->free = heap->bloc_start;
541 /* Advance through blocs one by one. */
542 for (b = bloc; b != NIL_BLOC; b = b->next)
544 /* Advance through heaps, marking them empty,
545 till we get to the one that B is in. */
546 while (heap)
548 if (heap->bloc_start <= b->data && b->data <= heap->end)
549 break;
550 heap = heap->next;
551 /* We know HEAP is not null now,
552 because there has to be space for bloc B. */
553 heap->first_bloc = NIL_BLOC;
554 heap->last_bloc = NIL_BLOC;
555 heap->free = heap->bloc_start;
558 /* Update HEAP's status for bloc B. */
559 heap->free = b->data + b->size;
560 heap->last_bloc = b;
561 if (heap->first_bloc == NIL_BLOC)
562 heap->first_bloc = b;
564 /* Record that B is in HEAP. */
565 b->heap = heap;
568 /* If there are any remaining heaps and no blocs left,
569 mark those heaps as empty. */
570 heap = heap->next;
571 while (heap)
573 heap->first_bloc = NIL_BLOC;
574 heap->last_bloc = NIL_BLOC;
575 heap->free = heap->bloc_start;
576 heap = heap->next;
580 /* Resize BLOC to SIZE bytes. This relocates the blocs
581 that come after BLOC in memory. */
583 static int
584 resize_bloc (bloc, size)
585 bloc_ptr bloc;
586 SIZE size;
588 register bloc_ptr b;
589 heap_ptr heap;
590 POINTER address;
591 SIZE old_size;
593 if (bloc == NIL_BLOC || size == bloc->size)
594 return 1;
596 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
598 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
599 break;
602 if (heap == NIL_HEAP)
603 abort ();
605 old_size = bloc->size;
606 bloc->size = size;
608 /* Note that bloc could be moved into the previous heap. */
609 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
610 : first_heap->bloc_start);
611 while (heap)
613 if (heap->bloc_start <= address && address <= heap->end)
614 break;
615 heap = heap->prev;
618 if (! relocate_blocs (bloc, heap, address))
620 bloc->size = old_size;
621 return 0;
624 if (size > old_size)
626 for (b = last_bloc; b != bloc; b = b->prev)
628 safe_bcopy (b->data, b->new_data, b->size);
629 *b->variable = b->data = b->new_data;
631 safe_bcopy (bloc->data, bloc->new_data, old_size);
632 bzero (bloc->new_data + old_size, size - old_size);
633 *bloc->variable = bloc->data = bloc->new_data;
635 else
637 for (b = bloc; b != NIL_BLOC; b = b->next)
639 safe_bcopy (b->data, b->new_data, b->size);
640 *b->variable = b->data = b->new_data;
644 update_heap_bloc_correspondence (bloc, heap);
646 break_value = (last_bloc ? last_bloc->data + last_bloc->size
647 : first_heap->bloc_start);
648 return 1;
651 /* Free BLOC from the chain of blocs, relocating any blocs above it.
652 This may return space to the system. */
654 static void
655 free_bloc (bloc)
656 bloc_ptr bloc;
658 heap_ptr heap = bloc->heap;
660 resize_bloc (bloc, 0);
662 if (bloc == first_bloc && bloc == last_bloc)
664 first_bloc = last_bloc = NIL_BLOC;
666 else if (bloc == last_bloc)
668 last_bloc = bloc->prev;
669 last_bloc->next = NIL_BLOC;
671 else if (bloc == first_bloc)
673 first_bloc = bloc->next;
674 first_bloc->prev = NIL_BLOC;
676 else
678 bloc->next->prev = bloc->prev;
679 bloc->prev->next = bloc->next;
682 /* Update the records of which blocs are in HEAP. */
683 if (heap->first_bloc == bloc)
685 if (bloc->next->heap == heap)
686 heap->first_bloc = bloc->next;
687 else
688 heap->first_bloc = heap->last_bloc = NIL_BLOC;
690 if (heap->last_bloc == bloc)
692 if (bloc->prev->heap == heap)
693 heap->last_bloc = bloc->prev;
694 else
695 heap->first_bloc = heap->last_bloc = NIL_BLOC;
698 relinquish ();
699 free (bloc);
702 /* Interface routines. */
704 static int use_relocatable_buffers;
705 static int r_alloc_freeze_level;
707 /* Obtain SIZE bytes of storage from the free pool, or the system, as
708 necessary. If relocatable blocs are in use, this means relocating
709 them. This function gets plugged into the GNU malloc's __morecore
710 hook.
712 We provide hysteresis, never relocating by less than extra_bytes.
714 If we're out of memory, we should return zero, to imitate the other
715 __morecore hook values - in particular, __default_morecore in the
716 GNU malloc package. */
718 POINTER
719 r_alloc_sbrk (size)
720 long size;
722 register bloc_ptr b;
723 POINTER address;
725 if (! use_relocatable_buffers)
726 return (*real_morecore) (size);
728 if (size == 0)
729 return virtual_break_value;
731 if (size > 0)
733 /* Allocate a page-aligned space. GNU malloc would reclaim an
734 extra space if we passed an unaligned one. But we could
735 not always find a space which is contiguos to the previous. */
736 POINTER new_bloc_start;
737 heap_ptr h = first_heap;
738 SIZE get = ROUNDUP (size);
740 address = (POINTER) ROUNDUP (virtual_break_value);
742 /* Search the list upward for a heap which is large enough. */
743 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
745 h = h->next;
746 if (h == NIL_HEAP)
747 break;
748 address = (POINTER) ROUNDUP (h->start);
751 /* If not found, obtain more space. */
752 if (h == NIL_HEAP)
754 get += extra_bytes + page_size;
756 if (r_alloc_freeze_level > 0 || ! obtain (address, get))
757 return 0;
759 if (first_heap == last_heap)
760 address = (POINTER) ROUNDUP (virtual_break_value);
761 else
762 address = (POINTER) ROUNDUP (last_heap->start);
763 h = last_heap;
766 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
768 if (first_heap->bloc_start < new_bloc_start)
770 /* Move all blocs upward. */
771 if (r_alloc_freeze_level > 0
772 || ! relocate_blocs (first_bloc, h, new_bloc_start))
773 return 0;
775 /* Note that (POINTER)(h+1) <= new_bloc_start since
776 get >= page_size, so the following does not destroy the heap
777 header. */
778 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
780 safe_bcopy (b->data, b->new_data, b->size);
781 *b->variable = b->data = b->new_data;
784 h->bloc_start = new_bloc_start;
786 update_heap_bloc_correspondence (first_bloc, h);
789 if (h != first_heap)
791 /* Give up managing heaps below the one the new
792 virtual_break_value points to. */
793 first_heap->prev = NIL_HEAP;
794 first_heap->next = h->next;
795 first_heap->start = h->start;
796 first_heap->end = h->end;
797 first_heap->free = h->free;
798 first_heap->first_bloc = h->first_bloc;
799 first_heap->last_bloc = h->last_bloc;
800 first_heap->bloc_start = h->bloc_start;
802 if (first_heap->next)
803 first_heap->next->prev = first_heap;
804 else
805 last_heap = first_heap;
808 bzero (address, size);
810 else /* size < 0 */
812 SIZE excess = (char *)first_heap->bloc_start
813 - ((char *)virtual_break_value + size);
815 address = virtual_break_value;
817 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
819 excess -= extra_bytes;
820 first_heap->bloc_start
821 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
823 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
825 for (b = first_bloc; b != NIL_BLOC; b = b->next)
827 safe_bcopy (b->data, b->new_data, b->size);
828 *b->variable = b->data = b->new_data;
832 if ((char *)virtual_break_value + size < (char *)first_heap->start)
834 /* We found an additional space below the first heap */
835 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
839 virtual_break_value = (POINTER) ((char *)address + size);
840 break_value = (last_bloc
841 ? last_bloc->data + last_bloc->size
842 : first_heap->bloc_start);
843 if (size < 0)
844 relinquish ();
846 return address;
849 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
850 the data is returned in *PTR. PTR is thus the address of some variable
851 which will use the data area.
853 If we can't allocate the necessary memory, set *PTR to zero, and
854 return zero. */
856 POINTER
857 r_alloc (ptr, size)
858 POINTER *ptr;
859 SIZE size;
861 register bloc_ptr new_bloc;
863 if (! r_alloc_initialized)
864 r_alloc_init ();
866 new_bloc = get_bloc (MEM_ROUNDUP (size));
867 if (new_bloc)
869 new_bloc->variable = ptr;
870 *ptr = new_bloc->data;
872 else
873 *ptr = 0;
875 return *ptr;
878 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
879 Store 0 in *PTR to show there's no block allocated. */
881 void
882 r_alloc_free (ptr)
883 register POINTER *ptr;
885 register bloc_ptr dead_bloc;
887 dead_bloc = find_bloc (ptr);
888 if (dead_bloc == NIL_BLOC)
889 abort ();
891 free_bloc (dead_bloc);
892 *ptr = 0;
895 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
896 Do this by shifting all blocks above this one up in memory, unless
897 SIZE is less than or equal to the current bloc size, in which case
898 do nothing.
900 Change *PTR to reflect the new bloc, and return this value.
902 If more memory cannot be allocated, then leave *PTR unchanged, and
903 return zero. */
905 POINTER
906 r_re_alloc (ptr, size)
907 POINTER *ptr;
908 SIZE size;
910 register bloc_ptr bloc;
912 bloc = find_bloc (ptr);
913 if (bloc == NIL_BLOC)
914 abort ();
916 if (size <= bloc->size)
917 /* Wouldn't it be useful to actually resize the bloc here? */
918 return *ptr;
920 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
921 return 0;
923 return *ptr;
926 /* Disable relocations, after making room for at least SIZE bytes
927 of non-relocatable heap if possible. The relocatable blocs are
928 guaranteed to hold still until thawed, even if this means that
929 malloc must return a null pointer. */
931 void
932 r_alloc_freeze (size)
933 long size;
935 /* If already frozen, we can't make any more room, so don't try. */
936 if (r_alloc_freeze_level > 0)
937 size = 0;
938 /* If we can't get the amount requested, half is better than nothing. */
939 while (size > 0 && r_alloc_sbrk (size) == 0)
940 size /= 2;
941 ++r_alloc_freeze_level;
942 if (size > 0)
943 r_alloc_sbrk (-size);
946 void
947 r_alloc_thaw ()
949 if (--r_alloc_freeze_level < 0)
950 abort ();
953 /* The hook `malloc' uses for the function which gets more space
954 from the system. */
955 extern POINTER (*__morecore) ();
957 /* Initialize various things for memory allocation. */
959 static void
960 r_alloc_init ()
962 if (r_alloc_initialized)
963 return;
965 r_alloc_initialized = 1;
966 real_morecore = __morecore;
967 __morecore = r_alloc_sbrk;
969 first_heap = last_heap = &heap_base;
970 first_heap->next = first_heap->prev = NIL_HEAP;
971 first_heap->start = first_heap->bloc_start
972 = virtual_break_value = break_value = (*real_morecore) (0);
973 if (break_value == NIL)
974 abort ();
976 page_size = PAGE;
977 extra_bytes = ROUNDUP (50000);
979 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
981 /* The extra call to real_morecore guarantees that the end of the
982 address space is a multiple of page_size, even if page_size is
983 not really the page size of the system running the binary in
984 which page_size is stored. This allows a binary to be built on a
985 system with one page size and run on a system with a smaller page
986 size. */
987 (*real_morecore) (first_heap->end - first_heap->start);
989 /* Clear the rest of the last page; this memory is in our address space
990 even though it is after the sbrk value. */
991 /* Doubly true, with the additional call that explicitly adds the
992 rest of that page to the address space. */
993 bzero (first_heap->start, first_heap->end - first_heap->start);
994 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
995 use_relocatable_buffers = 1;
997 #ifdef DEBUG
998 #include <assert.h>
1001 r_alloc_check ()
1003 int found = 0;
1004 heap_ptr h, ph = 0;
1005 bloc_ptr b, pb = 0;
1007 if (!r_alloc_initialized)
1008 return;
1010 assert (first_heap);
1011 assert (last_heap->end <= (POINTER) sbrk (0));
1012 assert ((POINTER) first_heap < first_heap->start);
1013 assert (first_heap->start <= virtual_break_value);
1014 assert (virtual_break_value <= first_heap->end);
1016 for (h = first_heap; h; h = h->next)
1018 assert (h->prev == ph);
1019 assert ((POINTER) ROUNDUP (h->end) == h->end);
1020 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1021 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1022 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1024 if (ph)
1026 assert (ph->end < h->start);
1027 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1030 if (h->bloc_start <= break_value && break_value <= h->end)
1031 found = 1;
1033 ph = h;
1036 assert (found);
1037 assert (last_heap == ph);
1039 for (b = first_bloc; b; b = b->next)
1041 assert (b->prev == pb);
1042 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1043 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1045 ph = 0;
1046 for (h = first_heap; h; h = h->next)
1048 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1049 break;
1050 ph = h;
1053 assert (h);
1055 if (pb && pb->data + pb->size != b->data)
1057 assert (ph && b->data == h->bloc_start);
1058 while (ph)
1060 if (ph->bloc_start <= pb->data
1061 && pb->data + pb->size <= ph->end)
1063 assert (pb->data + pb->size + b->size > ph->end);
1064 break;
1066 else
1068 assert (ph->bloc_start + b->size > ph->end);
1070 ph = ph->prev;
1073 pb = b;
1076 assert (last_bloc == pb);
1078 if (last_bloc)
1079 assert (last_bloc->data + last_bloc->size == break_value);
1080 else
1081 assert (first_heap->bloc_start == break_value);
1083 #endif /* DEBUG */