(imenu--cleanup): Set alist to its default just once, at the beginning.
[emacs.git] / src / ralloc.c
blobd5248f2cb0ad339d6abbfc6a2dbe7b631b90804b
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /* NOTES:
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
26 #ifdef emacs
28 #include <config.h>
29 #include "lisp.h" /* Needed for VALBITS. */
31 #undef NULL
33 /* The important properties of this type are that 1) it's a pointer, and
34 2) arithmetic on it should work as if the size of the object pointed
35 to has a size of 1. */
36 #if 0 /* Arithmetic on void* is a GCC extension. */
37 #ifdef __STDC__
38 typedef void *POINTER;
39 #else
41 #ifdef HAVE_CONFIG_H
42 #include "config.h"
43 #endif
45 typedef char *POINTER;
47 #endif
48 #endif /* 0 */
50 /* Unconditionally use char * for this. */
51 typedef char *POINTER;
53 typedef unsigned long SIZE;
55 /* Declared in dispnew.c, this version doesn't screw up if regions
56 overlap. */
57 extern void safe_bcopy ();
59 extern int __malloc_extra_blocks;
61 #else /* not emacs */
63 #include <stddef.h>
65 typedef size_t SIZE;
66 typedef void *POINTER;
68 #include <unistd.h>
69 #include <malloc.h>
70 #include <string.h>
72 #define safe_bcopy(x, y, z) memmove (y, x, z)
73 #define bzero(x, len) memset (x, 0, len)
75 #endif /* not emacs */
77 #include "getpagesize.h"
79 #define NIL ((POINTER) 0)
81 /* A flag to indicate whether we have initialized ralloc yet. For
82 Emacs's sake, please do not make this local to malloc_init; on some
83 machines, the dumping procedure makes all static variables
84 read-only. On these machines, the word static is #defined to be
85 the empty string, meaning that r_alloc_initialized becomes an
86 automatic variable, and loses its value each time Emacs is started up. */
87 static int r_alloc_initialized = 0;
89 static void r_alloc_init ();
91 /* Declarations for working with the malloc, ralloc, and system breaks. */
93 /* Function to set the real break value. */
94 static POINTER (*real_morecore) ();
96 /* The break value, as seen by malloc. */
97 static POINTER virtual_break_value;
99 /* The address of the end of the last data in use by ralloc,
100 including relocatable blocs as well as malloc data. */
101 static POINTER break_value;
103 /* This is the size of a page. We round memory requests to this boundary. */
104 static int page_size;
106 /* Whenever we get memory from the system, get this many extra bytes. This
107 must be a multiple of page_size. */
108 static int extra_bytes;
110 /* Macros for rounding. Note that rounding to any value is possible
111 by changing the definition of PAGE. */
112 #define PAGE (getpagesize ())
113 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
114 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
115 & ~(page_size - 1))
116 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
118 #define MEM_ALIGN sizeof(double)
119 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
120 & ~(MEM_ALIGN - 1))
122 /* Data structures of heaps and blocs. */
124 /* The relocatable objects, or blocs, and the malloc data
125 both reside within one or more heaps.
126 Each heap contains malloc data, running from `start' to `bloc_start',
127 and relocatable objects, running from `bloc_start' to `free'.
129 Relocatable objects may relocate within the same heap
130 or may move into another heap; the heaps themselves may grow
131 but they never move.
133 We try to make just one heap and make it larger as necessary.
134 But sometimes we can't do that, because we can't get continguous
135 space to add onto the heap. When that happens, we start a new heap. */
137 typedef struct heap
139 struct heap *next;
140 struct heap *prev;
141 /* Start of memory range of this heap. */
142 POINTER start;
143 /* End of memory range of this heap. */
144 POINTER end;
145 /* Start of relocatable data in this heap. */
146 POINTER bloc_start;
147 /* Start of unused space in this heap. */
148 POINTER free;
149 /* First bloc in this heap. */
150 struct bp *first_bloc;
151 /* Last bloc in this heap. */
152 struct bp *last_bloc;
153 } *heap_ptr;
155 #define NIL_HEAP ((heap_ptr) 0)
156 #define HEAP_PTR_SIZE (sizeof (struct heap))
158 /* This is the first heap object.
159 If we need additional heap objects, each one resides at the beginning of
160 the space it covers. */
161 static struct heap heap_base;
163 /* Head and tail of the list of heaps. */
164 static heap_ptr first_heap, last_heap;
166 /* These structures are allocated in the malloc arena.
167 The linked list is kept in order of increasing '.data' members.
168 The data blocks abut each other; if b->next is non-nil, then
169 b->data + b->size == b->next->data. */
170 typedef struct bp
172 struct bp *next;
173 struct bp *prev;
174 POINTER *variable;
175 POINTER data;
176 SIZE size;
177 POINTER new_data; /* tmporarily used for relocation */
178 /* Heap this bloc is in. */
179 struct heap *heap;
180 } *bloc_ptr;
182 #define NIL_BLOC ((bloc_ptr) 0)
183 #define BLOC_PTR_SIZE (sizeof (struct bp))
185 /* Head and tail of the list of relocatable blocs. */
186 static bloc_ptr first_bloc, last_bloc;
189 /* Functions to get and return memory from the system. */
191 /* Find the heap that ADDRESS falls within. */
193 static heap_ptr
194 find_heap (address)
195 POINTER address;
197 heap_ptr heap;
199 for (heap = last_heap; heap; heap = heap->prev)
201 if (heap->start <= address && address <= heap->end)
202 return heap;
205 return NIL_HEAP;
208 /* Find SIZE bytes of space in a heap.
209 Try to get them at ADDRESS (which must fall within some heap's range)
210 if we can get that many within one heap.
212 If enough space is not presently available in our reserve, this means
213 getting more page-aligned space from the system. If the retuned space
214 is not contiguos to the last heap, allocate a new heap, and append it
216 obtain does not try to keep track of whether space is in use
217 or not in use. It just returns the address of SIZE bytes that
218 fall within a single heap. If you call obtain twice in a row
219 with the same arguments, you typically get the same value.
220 to the heap list. It's the caller's responsibility to keep
221 track of what space is in use.
223 Return the address of the space if all went well, or zero if we couldn't
224 allocate the memory. */
226 static POINTER
227 obtain (address, size)
228 POINTER address;
229 SIZE size;
231 heap_ptr heap;
232 SIZE already_available;
234 /* Find the heap that ADDRESS falls within. */
235 for (heap = last_heap; heap; heap = heap->prev)
237 if (heap->start <= address && address <= heap->end)
238 break;
241 if (! heap)
242 abort ();
244 /* If we can't fit SIZE bytes in that heap,
245 try successive later heaps. */
246 while (heap && address + size > heap->end)
248 heap = heap->next;
249 if (heap == NIL_HEAP)
250 break;
251 address = heap->bloc_start;
254 /* If we can't fit them within any existing heap,
255 get more space. */
256 if (heap == NIL_HEAP)
258 POINTER new = (*real_morecore)(0);
259 SIZE get;
261 already_available = (char *)last_heap->end - (char *)address;
263 if (new != last_heap->end)
265 /* Someone else called sbrk. Make a new heap. */
267 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
268 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
270 if ((*real_morecore) (bloc_start - new) != new)
271 return 0;
273 new_heap->start = new;
274 new_heap->end = bloc_start;
275 new_heap->bloc_start = bloc_start;
276 new_heap->free = bloc_start;
277 new_heap->next = NIL_HEAP;
278 new_heap->prev = last_heap;
279 new_heap->first_bloc = NIL_BLOC;
280 new_heap->last_bloc = NIL_BLOC;
281 last_heap->next = new_heap;
282 last_heap = new_heap;
284 address = bloc_start;
285 already_available = 0;
288 /* Add space to the last heap (which we may have just created).
289 Get some extra, so we can come here less often. */
291 get = size + extra_bytes - already_available;
292 get = (char *) ROUNDUP ((char *)last_heap->end + get)
293 - (char *) last_heap->end;
295 if ((*real_morecore) (get) != last_heap->end)
296 return 0;
298 last_heap->end += get;
301 return address;
304 /* Return unused heap space to the system
305 if there is a lot of unused space now.
306 This can make the last heap smaller;
307 it can also eliminate the last heap entirely. */
309 static void
310 relinquish ()
312 register heap_ptr h;
313 int excess = 0;
315 /* Add the amount of space beyond break_value
316 in all heaps which have extend beyond break_value at all. */
318 for (h = last_heap; h && break_value < h->end; h = h->prev)
320 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
321 ? h->bloc_start : break_value);
324 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
326 /* Keep extra_bytes worth of empty space.
327 And don't free anything unless we can free at least extra_bytes. */
328 excess -= extra_bytes;
330 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
332 /* This heap should have no blocs in it. */
333 if (last_heap->first_bloc != NIL_BLOC
334 || last_heap->last_bloc != NIL_BLOC)
335 abort ();
337 /* Return the last heap, with its header, to the system. */
338 excess = (char *)last_heap->end - (char *)last_heap->start;
339 last_heap = last_heap->prev;
340 last_heap->next = NIL_HEAP;
342 else
344 excess = (char *) last_heap->end
345 - (char *) ROUNDUP ((char *)last_heap->end - excess);
346 last_heap->end -= excess;
349 if ((*real_morecore) (- excess) == 0)
350 abort ();
354 /* Return the total size in use by relocating allocator,
355 above where malloc gets space. */
357 long
358 r_alloc_size_in_use ()
360 return break_value - virtual_break_value;
363 /* The meat - allocating, freeing, and relocating blocs. */
365 /* Find the bloc referenced by the address in PTR. Returns a pointer
366 to that block. */
368 static bloc_ptr
369 find_bloc (ptr)
370 POINTER *ptr;
372 register bloc_ptr p = first_bloc;
374 while (p != NIL_BLOC)
376 if (p->variable == ptr && p->data == *ptr)
377 return p;
379 p = p->next;
382 return p;
385 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
386 Returns a pointer to the new bloc, or zero if we couldn't allocate
387 memory for the new block. */
389 static bloc_ptr
390 get_bloc (size)
391 SIZE size;
393 register bloc_ptr new_bloc;
394 register heap_ptr heap;
396 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
397 || ! (new_bloc->data = obtain (break_value, size)))
399 if (new_bloc)
400 free (new_bloc);
402 return 0;
405 break_value = new_bloc->data + size;
407 new_bloc->size = size;
408 new_bloc->next = NIL_BLOC;
409 new_bloc->variable = (POINTER *) NIL;
410 new_bloc->new_data = 0;
412 /* Record in the heap that this space is in use. */
413 heap = find_heap (new_bloc->data);
414 heap->free = break_value;
416 /* Maintain the correspondence between heaps and blocs. */
417 new_bloc->heap = heap;
418 heap->last_bloc = new_bloc;
419 if (heap->first_bloc == NIL_BLOC)
420 heap->first_bloc = new_bloc;
422 /* Put this bloc on the doubly-linked list of blocs. */
423 if (first_bloc)
425 new_bloc->prev = last_bloc;
426 last_bloc->next = new_bloc;
427 last_bloc = new_bloc;
429 else
431 first_bloc = last_bloc = new_bloc;
432 new_bloc->prev = NIL_BLOC;
435 return new_bloc;
438 /* Calculate new locations of blocs in the list beginning with BLOC,
439 relocating it to start at ADDRESS, in heap HEAP. If enough space is
440 not presently available in our reserve, call obtain for
441 more space.
443 Store the new location of each bloc in its new_data field.
444 Do not touch the contents of blocs or break_value. */
446 static int
447 relocate_blocs (bloc, heap, address)
448 bloc_ptr bloc;
449 heap_ptr heap;
450 POINTER address;
452 register bloc_ptr b = bloc;
454 while (b)
456 /* If bloc B won't fit within HEAP,
457 move to the next heap and try again. */
458 while (heap && address + b->size > heap->end)
460 heap = heap->next;
461 if (heap == NIL_HEAP)
462 break;
463 address = heap->bloc_start;
466 /* If BLOC won't fit in any heap,
467 get enough new space to hold BLOC and all following blocs. */
468 if (heap == NIL_HEAP)
470 register bloc_ptr tb = b;
471 register SIZE s = 0;
473 /* Add up the size of all the following blocs. */
474 while (tb != NIL_BLOC)
476 s += tb->size;
477 tb = tb->next;
480 /* Get that space. */
481 address = obtain (address, s);
482 if (address == 0)
483 return 0;
485 heap = last_heap;
488 /* Record the new address of this bloc
489 and update where the next bloc can start. */
490 b->new_data = address;
491 address += b->size;
492 b = b->next;
495 return 1;
498 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
499 This is necessary if we put the memory of space of BLOC
500 before that of BEFORE. */
502 static void
503 reorder_bloc (bloc, before)
504 bloc_ptr bloc, before;
506 bloc_ptr prev, next;
508 /* Splice BLOC out from where it is. */
509 prev = bloc->prev;
510 next = bloc->next;
512 if (prev)
513 prev->next = next;
514 if (next)
515 next->prev = prev;
517 /* Splice it in before BEFORE. */
518 prev = before->prev;
520 if (prev)
521 prev->next = bloc;
522 bloc->prev = prev;
524 before->prev = bloc;
525 bloc->next = before;
528 /* Update the records of which heaps contain which blocs, starting
529 with heap HEAP and bloc BLOC. */
531 static void
532 update_heap_bloc_correspondence (bloc, heap)
533 bloc_ptr bloc;
534 heap_ptr heap;
536 register bloc_ptr b;
538 /* Initialize HEAP's status to reflect blocs before BLOC. */
539 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
541 /* The previous bloc is in HEAP. */
542 heap->last_bloc = bloc->prev;
543 heap->free = bloc->prev->data + bloc->prev->size;
545 else
547 /* HEAP contains no blocs before BLOC. */
548 heap->first_bloc = NIL_BLOC;
549 heap->last_bloc = NIL_BLOC;
550 heap->free = heap->bloc_start;
553 /* Advance through blocs one by one. */
554 for (b = bloc; b != NIL_BLOC; b = b->next)
556 /* Advance through heaps, marking them empty,
557 till we get to the one that B is in. */
558 while (heap)
560 if (heap->bloc_start <= b->data && b->data <= heap->end)
561 break;
562 heap = heap->next;
563 /* We know HEAP is not null now,
564 because there has to be space for bloc B. */
565 heap->first_bloc = NIL_BLOC;
566 heap->last_bloc = NIL_BLOC;
567 heap->free = heap->bloc_start;
570 /* Update HEAP's status for bloc B. */
571 heap->free = b->data + b->size;
572 heap->last_bloc = b;
573 if (heap->first_bloc == NIL_BLOC)
574 heap->first_bloc = b;
576 /* Record that B is in HEAP. */
577 b->heap = heap;
580 /* If there are any remaining heaps and no blocs left,
581 mark those heaps as empty. */
582 heap = heap->next;
583 while (heap)
585 heap->first_bloc = NIL_BLOC;
586 heap->last_bloc = NIL_BLOC;
587 heap->free = heap->bloc_start;
588 heap = heap->next;
592 /* Resize BLOC to SIZE bytes. This relocates the blocs
593 that come after BLOC in memory. */
595 static int
596 resize_bloc (bloc, size)
597 bloc_ptr bloc;
598 SIZE size;
600 register bloc_ptr b;
601 heap_ptr heap;
602 POINTER address;
603 SIZE old_size;
605 if (bloc == NIL_BLOC || size == bloc->size)
606 return 1;
608 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
610 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
611 break;
614 if (heap == NIL_HEAP)
615 abort ();
617 old_size = bloc->size;
618 bloc->size = size;
620 /* Note that bloc could be moved into the previous heap. */
621 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
622 : first_heap->bloc_start);
623 while (heap)
625 if (heap->bloc_start <= address && address <= heap->end)
626 break;
627 heap = heap->prev;
630 if (! relocate_blocs (bloc, heap, address))
632 bloc->size = old_size;
633 return 0;
636 if (size > old_size)
638 for (b = last_bloc; b != bloc; b = b->prev)
640 safe_bcopy (b->data, b->new_data, b->size);
641 *b->variable = b->data = b->new_data;
643 safe_bcopy (bloc->data, bloc->new_data, old_size);
644 bzero (bloc->new_data + old_size, size - old_size);
645 *bloc->variable = bloc->data = bloc->new_data;
647 else
649 for (b = bloc; b != NIL_BLOC; b = b->next)
651 safe_bcopy (b->data, b->new_data, b->size);
652 *b->variable = b->data = b->new_data;
656 update_heap_bloc_correspondence (bloc, heap);
658 break_value = (last_bloc ? last_bloc->data + last_bloc->size
659 : first_heap->bloc_start);
660 return 1;
663 /* Free BLOC from the chain of blocs, relocating any blocs above it.
664 This may return space to the system. */
666 static void
667 free_bloc (bloc)
668 bloc_ptr bloc;
670 heap_ptr heap = bloc->heap;
672 resize_bloc (bloc, 0);
674 if (bloc == first_bloc && bloc == last_bloc)
676 first_bloc = last_bloc = NIL_BLOC;
678 else if (bloc == last_bloc)
680 last_bloc = bloc->prev;
681 last_bloc->next = NIL_BLOC;
683 else if (bloc == first_bloc)
685 first_bloc = bloc->next;
686 first_bloc->prev = NIL_BLOC;
688 else
690 bloc->next->prev = bloc->prev;
691 bloc->prev->next = bloc->next;
694 /* Update the records of which blocs are in HEAP. */
695 if (heap->first_bloc == bloc)
697 if (bloc->next != 0 && bloc->next->heap == heap)
698 heap->first_bloc = bloc->next;
699 else
700 heap->first_bloc = heap->last_bloc = NIL_BLOC;
702 if (heap->last_bloc == bloc)
704 if (bloc->prev != 0 && bloc->prev->heap == heap)
705 heap->last_bloc = bloc->prev;
706 else
707 heap->first_bloc = heap->last_bloc = NIL_BLOC;
710 relinquish ();
711 free (bloc);
714 /* Interface routines. */
716 static int use_relocatable_buffers;
717 static int r_alloc_freeze_level;
719 /* Obtain SIZE bytes of storage from the free pool, or the system, as
720 necessary. If relocatable blocs are in use, this means relocating
721 them. This function gets plugged into the GNU malloc's __morecore
722 hook.
724 We provide hysteresis, never relocating by less than extra_bytes.
726 If we're out of memory, we should return zero, to imitate the other
727 __morecore hook values - in particular, __default_morecore in the
728 GNU malloc package. */
730 POINTER
731 r_alloc_sbrk (size)
732 long size;
734 register bloc_ptr b;
735 POINTER address;
737 if (! r_alloc_initialized)
738 r_alloc_init ();
740 if (! use_relocatable_buffers)
741 return (*real_morecore) (size);
743 if (size == 0)
744 return virtual_break_value;
746 if (size > 0)
748 /* Allocate a page-aligned space. GNU malloc would reclaim an
749 extra space if we passed an unaligned one. But we could
750 not always find a space which is contiguos to the previous. */
751 POINTER new_bloc_start;
752 heap_ptr h = first_heap;
753 SIZE get = ROUNDUP (size);
755 address = (POINTER) ROUNDUP (virtual_break_value);
757 /* Search the list upward for a heap which is large enough. */
758 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
760 h = h->next;
761 if (h == NIL_HEAP)
762 break;
763 address = (POINTER) ROUNDUP (h->start);
766 /* If not found, obtain more space. */
767 if (h == NIL_HEAP)
769 get += extra_bytes + page_size;
771 if (r_alloc_freeze_level > 0 || ! obtain (address, get))
772 return 0;
774 if (first_heap == last_heap)
775 address = (POINTER) ROUNDUP (virtual_break_value);
776 else
777 address = (POINTER) ROUNDUP (last_heap->start);
778 h = last_heap;
781 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
783 if (first_heap->bloc_start < new_bloc_start)
785 /* Move all blocs upward. */
786 if (r_alloc_freeze_level > 0
787 || ! relocate_blocs (first_bloc, h, new_bloc_start))
788 return 0;
790 /* Note that (POINTER)(h+1) <= new_bloc_start since
791 get >= page_size, so the following does not destroy the heap
792 header. */
793 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
795 safe_bcopy (b->data, b->new_data, b->size);
796 *b->variable = b->data = b->new_data;
799 h->bloc_start = new_bloc_start;
801 update_heap_bloc_correspondence (first_bloc, h);
804 if (h != first_heap)
806 /* Give up managing heaps below the one the new
807 virtual_break_value points to. */
808 first_heap->prev = NIL_HEAP;
809 first_heap->next = h->next;
810 first_heap->start = h->start;
811 first_heap->end = h->end;
812 first_heap->free = h->free;
813 first_heap->first_bloc = h->first_bloc;
814 first_heap->last_bloc = h->last_bloc;
815 first_heap->bloc_start = h->bloc_start;
817 if (first_heap->next)
818 first_heap->next->prev = first_heap;
819 else
820 last_heap = first_heap;
823 bzero (address, size);
825 else /* size < 0 */
827 SIZE excess = (char *)first_heap->bloc_start
828 - ((char *)virtual_break_value + size);
830 address = virtual_break_value;
832 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
834 excess -= extra_bytes;
835 first_heap->bloc_start
836 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
838 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
840 for (b = first_bloc; b != NIL_BLOC; b = b->next)
842 safe_bcopy (b->data, b->new_data, b->size);
843 *b->variable = b->data = b->new_data;
847 if ((char *)virtual_break_value + size < (char *)first_heap->start)
849 /* We found an additional space below the first heap */
850 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
854 virtual_break_value = (POINTER) ((char *)address + size);
855 break_value = (last_bloc
856 ? last_bloc->data + last_bloc->size
857 : first_heap->bloc_start);
858 if (size < 0)
859 relinquish ();
861 return address;
864 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
865 the data is returned in *PTR. PTR is thus the address of some variable
866 which will use the data area.
868 If we can't allocate the necessary memory, set *PTR to zero, and
869 return zero. */
871 POINTER
872 r_alloc (ptr, size)
873 POINTER *ptr;
874 SIZE size;
876 register bloc_ptr new_bloc;
878 if (! r_alloc_initialized)
879 r_alloc_init ();
881 new_bloc = get_bloc (MEM_ROUNDUP (size));
882 if (new_bloc)
884 new_bloc->variable = ptr;
885 *ptr = new_bloc->data;
887 else
888 *ptr = 0;
890 return *ptr;
893 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
894 Store 0 in *PTR to show there's no block allocated. */
896 void
897 r_alloc_free (ptr)
898 register POINTER *ptr;
900 register bloc_ptr dead_bloc;
902 if (! r_alloc_initialized)
903 r_alloc_init ();
905 dead_bloc = find_bloc (ptr);
906 if (dead_bloc == NIL_BLOC)
907 abort ();
909 free_bloc (dead_bloc);
910 *ptr = 0;
912 #ifdef emacs
913 refill_memory_reserve ();
914 #endif
917 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
918 Do this by shifting all blocks above this one up in memory, unless
919 SIZE is less than or equal to the current bloc size, in which case
920 do nothing.
922 Change *PTR to reflect the new bloc, and return this value.
924 If more memory cannot be allocated, then leave *PTR unchanged, and
925 return zero. */
927 POINTER
928 r_re_alloc (ptr, size)
929 POINTER *ptr;
930 SIZE size;
932 register bloc_ptr bloc;
934 if (! r_alloc_initialized)
935 r_alloc_init ();
937 bloc = find_bloc (ptr);
938 if (bloc == NIL_BLOC)
939 abort ();
941 if (size <= bloc->size)
942 /* Wouldn't it be useful to actually resize the bloc here? */
943 return *ptr;
945 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
946 return 0;
948 return *ptr;
951 /* Disable relocations, after making room for at least SIZE bytes
952 of non-relocatable heap if possible. The relocatable blocs are
953 guaranteed to hold still until thawed, even if this means that
954 malloc must return a null pointer. */
956 void
957 r_alloc_freeze (size)
958 long size;
960 if (! r_alloc_initialized)
961 r_alloc_init ();
963 /* If already frozen, we can't make any more room, so don't try. */
964 if (r_alloc_freeze_level > 0)
965 size = 0;
966 /* If we can't get the amount requested, half is better than nothing. */
967 while (size > 0 && r_alloc_sbrk (size) == 0)
968 size /= 2;
969 ++r_alloc_freeze_level;
970 if (size > 0)
971 r_alloc_sbrk (-size);
974 void
975 r_alloc_thaw ()
977 if (--r_alloc_freeze_level < 0)
978 abort ();
981 /* The hook `malloc' uses for the function which gets more space
982 from the system. */
983 extern POINTER (*__morecore) ();
985 /* Initialize various things for memory allocation. */
987 static void
988 r_alloc_init ()
990 if (r_alloc_initialized)
991 return;
993 r_alloc_initialized = 1;
994 real_morecore = __morecore;
995 __morecore = r_alloc_sbrk;
997 first_heap = last_heap = &heap_base;
998 first_heap->next = first_heap->prev = NIL_HEAP;
999 first_heap->start = first_heap->bloc_start
1000 = virtual_break_value = break_value = (*real_morecore) (0);
1001 if (break_value == NIL)
1002 abort ();
1004 page_size = PAGE;
1005 extra_bytes = ROUNDUP (50000);
1007 /* Give GNU malloc's morecore some hysteresis
1008 so that we move all the relocatable blocks much less often. */
1009 __malloc_extra_blocks = 64;
1011 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1013 /* The extra call to real_morecore guarantees that the end of the
1014 address space is a multiple of page_size, even if page_size is
1015 not really the page size of the system running the binary in
1016 which page_size is stored. This allows a binary to be built on a
1017 system with one page size and run on a system with a smaller page
1018 size. */
1019 (*real_morecore) (first_heap->end - first_heap->start);
1021 /* Clear the rest of the last page; this memory is in our address space
1022 even though it is after the sbrk value. */
1023 /* Doubly true, with the additional call that explicitly adds the
1024 rest of that page to the address space. */
1025 bzero (first_heap->start, first_heap->end - first_heap->start);
1026 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1027 use_relocatable_buffers = 1;
1029 #ifdef DEBUG
1030 #include <assert.h>
1032 void
1033 r_alloc_check ()
1035 int found = 0;
1036 heap_ptr h, ph = 0;
1037 bloc_ptr b, pb = 0;
1039 if (!r_alloc_initialized)
1040 return;
1042 assert (first_heap);
1043 assert (last_heap->end <= (POINTER) sbrk (0));
1044 assert ((POINTER) first_heap < first_heap->start);
1045 assert (first_heap->start <= virtual_break_value);
1046 assert (virtual_break_value <= first_heap->end);
1048 for (h = first_heap; h; h = h->next)
1050 assert (h->prev == ph);
1051 assert ((POINTER) ROUNDUP (h->end) == h->end);
1052 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1053 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1054 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1056 if (ph)
1058 assert (ph->end < h->start);
1059 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1062 if (h->bloc_start <= break_value && break_value <= h->end)
1063 found = 1;
1065 ph = h;
1068 assert (found);
1069 assert (last_heap == ph);
1071 for (b = first_bloc; b; b = b->next)
1073 assert (b->prev == pb);
1074 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1075 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1077 ph = 0;
1078 for (h = first_heap; h; h = h->next)
1080 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1081 break;
1082 ph = h;
1085 assert (h);
1087 if (pb && pb->data + pb->size != b->data)
1089 assert (ph && b->data == h->bloc_start);
1090 while (ph)
1092 if (ph->bloc_start <= pb->data
1093 && pb->data + pb->size <= ph->end)
1095 assert (pb->data + pb->size + b->size > ph->end);
1096 break;
1098 else
1100 assert (ph->bloc_start + b->size > ph->end);
1102 ph = ph->prev;
1105 pb = b;
1108 assert (last_bloc == pb);
1110 if (last_bloc)
1111 assert (last_bloc->data + last_bloc->size == break_value);
1112 else
1113 assert (first_heap->bloc_start == break_value);
1115 #endif /* DEBUG */