(r_alloc_check): Return void.
[emacs.git] / src / ralloc.c
blobd9fb639379b0ada7ef7296354a11dd17318108ea
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /* NOTES:
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
26 #ifdef emacs
28 #include <config.h>
29 #include "lisp.h" /* Needed for VALBITS. */
31 #undef NULL
33 /* The important properties of this type are that 1) it's a pointer, and
34 2) arithmetic on it should work as if the size of the object pointed
35 to has a size of 1. */
36 #if 0 /* Arithmetic on void* is a GCC extension. */
37 #ifdef __STDC__
38 typedef void *POINTER;
39 #else
41 #ifdef HAVE_CONFIG_H
42 #include "config.h"
43 #endif
45 typedef char *POINTER;
47 #endif
48 #endif /* 0 */
50 /* Unconditionally use char * for this. */
51 typedef char *POINTER;
53 typedef unsigned long SIZE;
55 /* Declared in dispnew.c, this version doesn't screw up if regions
56 overlap. */
57 extern void safe_bcopy ();
59 #else /* not emacs */
61 #include <stddef.h>
63 typedef size_t SIZE;
64 typedef void *POINTER;
66 #include <unistd.h>
67 #include <malloc.h>
68 #include <string.h>
70 #define safe_bcopy(x, y, z) memmove (y, x, z)
71 #define bzero(x, len) memset (x, 0, len)
73 #endif /* not emacs */
75 #include "getpagesize.h"
77 #define NIL ((POINTER) 0)
79 /* A flag to indicate whether we have initialized ralloc yet. For
80 Emacs's sake, please do not make this local to malloc_init; on some
81 machines, the dumping procedure makes all static variables
82 read-only. On these machines, the word static is #defined to be
83 the empty string, meaning that r_alloc_initialized becomes an
84 automatic variable, and loses its value each time Emacs is started up. */
85 static int r_alloc_initialized = 0;
87 static void r_alloc_init ();
89 /* Declarations for working with the malloc, ralloc, and system breaks. */
91 /* Function to set the real break value. */
92 static POINTER (*real_morecore) ();
94 /* The break value, as seen by malloc. */
95 static POINTER virtual_break_value;
97 /* The address of the end of the last data in use by ralloc,
98 including relocatable blocs as well as malloc data. */
99 static POINTER break_value;
101 /* This is the size of a page. We round memory requests to this boundary. */
102 static int page_size;
104 /* Whenever we get memory from the system, get this many extra bytes. This
105 must be a multiple of page_size. */
106 static int extra_bytes;
108 /* Macros for rounding. Note that rounding to any value is possible
109 by changing the definition of PAGE. */
110 #define PAGE (getpagesize ())
111 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
112 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
113 & ~(page_size - 1))
114 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
116 #define MEM_ALIGN sizeof(double)
117 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
118 & ~(MEM_ALIGN - 1))
120 /* Data structures of heaps and blocs. */
122 /* The relocatable objects, or blocs, and the malloc data
123 both reside within one or more heaps.
124 Each heap contains malloc data, running from `start' to `bloc_start',
125 and relocatable objects, running from `bloc_start' to `free'.
127 Relocatable objects may relocate within the same heap
128 or may move into another heap; the heaps themselves may grow
129 but they never move.
131 We try to make just one heap and make it larger as necessary.
132 But sometimes we can't do that, because we can't get continguous
133 space to add onto the heap. When that happens, we start a new heap. */
135 typedef struct heap
137 struct heap *next;
138 struct heap *prev;
139 /* Start of memory range of this heap. */
140 POINTER start;
141 /* End of memory range of this heap. */
142 POINTER end;
143 /* Start of relocatable data in this heap. */
144 POINTER bloc_start;
145 /* Start of unused space in this heap. */
146 POINTER free;
147 /* First bloc in this heap. */
148 struct bp *first_bloc;
149 /* Last bloc in this heap. */
150 struct bp *last_bloc;
151 } *heap_ptr;
153 #define NIL_HEAP ((heap_ptr) 0)
154 #define HEAP_PTR_SIZE (sizeof (struct heap))
156 /* This is the first heap object.
157 If we need additional heap objects, each one resides at the beginning of
158 the space it covers. */
159 static struct heap heap_base;
161 /* Head and tail of the list of heaps. */
162 static heap_ptr first_heap, last_heap;
164 /* These structures are allocated in the malloc arena.
165 The linked list is kept in order of increasing '.data' members.
166 The data blocks abut each other; if b->next is non-nil, then
167 b->data + b->size == b->next->data. */
168 typedef struct bp
170 struct bp *next;
171 struct bp *prev;
172 POINTER *variable;
173 POINTER data;
174 SIZE size;
175 POINTER new_data; /* tmporarily used for relocation */
176 /* Heap this bloc is in. */
177 struct heap *heap;
178 } *bloc_ptr;
180 #define NIL_BLOC ((bloc_ptr) 0)
181 #define BLOC_PTR_SIZE (sizeof (struct bp))
183 /* Head and tail of the list of relocatable blocs. */
184 static bloc_ptr first_bloc, last_bloc;
187 /* Functions to get and return memory from the system. */
189 /* Find the heap that ADDRESS falls within. */
191 static heap_ptr
192 find_heap (address)
193 POINTER address;
195 heap_ptr heap;
197 for (heap = last_heap; heap; heap = heap->prev)
199 if (heap->start <= address && address <= heap->end)
200 return heap;
203 return NIL_HEAP;
206 /* Find SIZE bytes of space in a heap.
207 Try to get them at ADDRESS (which must fall within some heap's range)
208 if we can get that many within one heap.
210 If enough space is not presently available in our reserve, this means
211 getting more page-aligned space from the system. If the retuned space
212 is not contiguos to the last heap, allocate a new heap, and append it
214 obtain does not try to keep track of whether space is in use
215 or not in use. It just returns the address of SIZE bytes that
216 fall within a single heap. If you call obtain twice in a row
217 with the same arguments, you typically get the same value.
218 to the heap list. It's the caller's responsibility to keep
219 track of what space is in use.
221 Return the address of the space if all went well, or zero if we couldn't
222 allocate the memory. */
224 static POINTER
225 obtain (address, size)
226 POINTER address;
227 SIZE size;
229 heap_ptr heap;
230 SIZE already_available;
232 /* Find the heap that ADDRESS falls within. */
233 for (heap = last_heap; heap; heap = heap->prev)
235 if (heap->start <= address && address <= heap->end)
236 break;
239 if (! heap)
240 abort ();
242 /* If we can't fit SIZE bytes in that heap,
243 try successive later heaps. */
244 while (heap && address + size > heap->end)
246 heap = heap->next;
247 if (heap == NIL_HEAP)
248 break;
249 address = heap->bloc_start;
252 /* If we can't fit them within any existing heap,
253 get more space. */
254 if (heap == NIL_HEAP)
256 POINTER new = (*real_morecore)(0);
257 SIZE get;
259 already_available = (char *)last_heap->end - (char *)address;
261 if (new != last_heap->end)
263 /* Someone else called sbrk. Make a new heap. */
265 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
266 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
268 if ((*real_morecore) (bloc_start - new) != new)
269 return 0;
271 new_heap->start = new;
272 new_heap->end = bloc_start;
273 new_heap->bloc_start = bloc_start;
274 new_heap->free = bloc_start;
275 new_heap->next = NIL_HEAP;
276 new_heap->prev = last_heap;
277 new_heap->first_bloc = NIL_BLOC;
278 new_heap->last_bloc = NIL_BLOC;
279 last_heap->next = new_heap;
280 last_heap = new_heap;
282 address = bloc_start;
283 already_available = 0;
286 /* Add space to the last heap (which we may have just created).
287 Get some extra, so we can come here less often. */
289 get = size + extra_bytes - already_available;
290 get = (char *) ROUNDUP ((char *)last_heap->end + get)
291 - (char *) last_heap->end;
293 if ((*real_morecore) (get) != last_heap->end)
294 return 0;
296 last_heap->end += get;
299 return address;
302 /* Return unused heap space to the system
303 if there is a lot of unused space now.
304 This can make the last heap smaller;
305 it can also eliminate the last heap entirely. */
307 static void
308 relinquish ()
310 register heap_ptr h;
311 int excess = 0;
313 /* Add the amount of space beyond break_value
314 in all heaps which have extend beyond break_value at all. */
316 for (h = last_heap; h && break_value < h->end; h = h->prev)
318 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
319 ? h->bloc_start : break_value);
322 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
324 /* Keep extra_bytes worth of empty space.
325 And don't free anything unless we can free at least extra_bytes. */
326 excess -= extra_bytes;
328 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
330 /* This heap should have no blocs in it. */
331 if (last_heap->first_bloc != NIL_BLOC
332 || last_heap->last_bloc != NIL_BLOC)
333 abort ();
335 /* Return the last heap, with its header, to the system. */
336 excess = (char *)last_heap->end - (char *)last_heap->start;
337 last_heap = last_heap->prev;
338 last_heap->next = NIL_HEAP;
340 else
342 excess = (char *) last_heap->end
343 - (char *) ROUNDUP ((char *)last_heap->end - excess);
344 last_heap->end -= excess;
347 if ((*real_morecore) (- excess) == 0)
348 abort ();
352 /* Return the total size in use by relocating allocator,
353 above where malloc gets space. */
355 long
356 r_alloc_size_in_use ()
358 return break_value - virtual_break_value;
361 /* The meat - allocating, freeing, and relocating blocs. */
363 /* Find the bloc referenced by the address in PTR. Returns a pointer
364 to that block. */
366 static bloc_ptr
367 find_bloc (ptr)
368 POINTER *ptr;
370 register bloc_ptr p = first_bloc;
372 while (p != NIL_BLOC)
374 if (p->variable == ptr && p->data == *ptr)
375 return p;
377 p = p->next;
380 return p;
383 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
384 Returns a pointer to the new bloc, or zero if we couldn't allocate
385 memory for the new block. */
387 static bloc_ptr
388 get_bloc (size)
389 SIZE size;
391 register bloc_ptr new_bloc;
392 register heap_ptr heap;
394 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
395 || ! (new_bloc->data = obtain (break_value, size)))
397 if (new_bloc)
398 free (new_bloc);
400 return 0;
403 break_value = new_bloc->data + size;
405 new_bloc->size = size;
406 new_bloc->next = NIL_BLOC;
407 new_bloc->variable = (POINTER *) NIL;
408 new_bloc->new_data = 0;
410 /* Record in the heap that this space is in use. */
411 heap = find_heap (new_bloc->data);
412 heap->free = break_value;
414 /* Maintain the correspondence between heaps and blocs. */
415 new_bloc->heap = heap;
416 heap->last_bloc = new_bloc;
417 if (heap->first_bloc == NIL_BLOC)
418 heap->first_bloc = new_bloc;
420 /* Put this bloc on the doubly-linked list of blocs. */
421 if (first_bloc)
423 new_bloc->prev = last_bloc;
424 last_bloc->next = new_bloc;
425 last_bloc = new_bloc;
427 else
429 first_bloc = last_bloc = new_bloc;
430 new_bloc->prev = NIL_BLOC;
433 return new_bloc;
436 /* Calculate new locations of blocs in the list beginning with BLOC,
437 relocating it to start at ADDRESS, in heap HEAP. If enough space is
438 not presently available in our reserve, call obtain for
439 more space.
441 Store the new location of each bloc in its new_data field.
442 Do not touch the contents of blocs or break_value. */
444 static int
445 relocate_blocs (bloc, heap, address)
446 bloc_ptr bloc;
447 heap_ptr heap;
448 POINTER address;
450 register bloc_ptr b = bloc;
452 while (b)
454 /* If bloc B won't fit within HEAP,
455 move to the next heap and try again. */
456 while (heap && address + b->size > heap->end)
458 heap = heap->next;
459 if (heap == NIL_HEAP)
460 break;
461 address = heap->bloc_start;
464 /* If BLOC won't fit in any heap,
465 get enough new space to hold BLOC and all following blocs. */
466 if (heap == NIL_HEAP)
468 register bloc_ptr tb = b;
469 register SIZE s = 0;
471 /* Add up the size of all the following blocs. */
472 while (tb != NIL_BLOC)
474 s += tb->size;
475 tb = tb->next;
478 /* Get that space. */
479 address = obtain (address, s);
480 if (address == 0)
481 return 0;
483 heap = last_heap;
486 /* Record the new address of this bloc
487 and update where the next bloc can start. */
488 b->new_data = address;
489 address += b->size;
490 b = b->next;
493 return 1;
496 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
497 This is necessary if we put the memory of space of BLOC
498 before that of BEFORE. */
500 static void
501 reorder_bloc (bloc, before)
502 bloc_ptr bloc, before;
504 bloc_ptr prev, next;
506 /* Splice BLOC out from where it is. */
507 prev = bloc->prev;
508 next = bloc->next;
510 if (prev)
511 prev->next = next;
512 if (next)
513 next->prev = prev;
515 /* Splice it in before BEFORE. */
516 prev = before->prev;
518 if (prev)
519 prev->next = bloc;
520 bloc->prev = prev;
522 before->prev = bloc;
523 bloc->next = before;
526 /* Update the records of which heaps contain which blocs, starting
527 with heap HEAP and bloc BLOC. */
529 static void
530 update_heap_bloc_correspondence (bloc, heap)
531 bloc_ptr bloc;
532 heap_ptr heap;
534 register bloc_ptr b;
536 /* Initialize HEAP's status to reflect blocs before BLOC. */
537 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
539 /* The previous bloc is in HEAP. */
540 heap->last_bloc = bloc->prev;
541 heap->free = bloc->prev->data + bloc->prev->size;
543 else
545 /* HEAP contains no blocs before BLOC. */
546 heap->first_bloc = NIL_BLOC;
547 heap->last_bloc = NIL_BLOC;
548 heap->free = heap->bloc_start;
551 /* Advance through blocs one by one. */
552 for (b = bloc; b != NIL_BLOC; b = b->next)
554 /* Advance through heaps, marking them empty,
555 till we get to the one that B is in. */
556 while (heap)
558 if (heap->bloc_start <= b->data && b->data <= heap->end)
559 break;
560 heap = heap->next;
561 /* We know HEAP is not null now,
562 because there has to be space for bloc B. */
563 heap->first_bloc = NIL_BLOC;
564 heap->last_bloc = NIL_BLOC;
565 heap->free = heap->bloc_start;
568 /* Update HEAP's status for bloc B. */
569 heap->free = b->data + b->size;
570 heap->last_bloc = b;
571 if (heap->first_bloc == NIL_BLOC)
572 heap->first_bloc = b;
574 /* Record that B is in HEAP. */
575 b->heap = heap;
578 /* If there are any remaining heaps and no blocs left,
579 mark those heaps as empty. */
580 heap = heap->next;
581 while (heap)
583 heap->first_bloc = NIL_BLOC;
584 heap->last_bloc = NIL_BLOC;
585 heap->free = heap->bloc_start;
586 heap = heap->next;
590 /* Resize BLOC to SIZE bytes. This relocates the blocs
591 that come after BLOC in memory. */
593 static int
594 resize_bloc (bloc, size)
595 bloc_ptr bloc;
596 SIZE size;
598 register bloc_ptr b;
599 heap_ptr heap;
600 POINTER address;
601 SIZE old_size;
603 if (bloc == NIL_BLOC || size == bloc->size)
604 return 1;
606 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
608 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
609 break;
612 if (heap == NIL_HEAP)
613 abort ();
615 old_size = bloc->size;
616 bloc->size = size;
618 /* Note that bloc could be moved into the previous heap. */
619 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
620 : first_heap->bloc_start);
621 while (heap)
623 if (heap->bloc_start <= address && address <= heap->end)
624 break;
625 heap = heap->prev;
628 if (! relocate_blocs (bloc, heap, address))
630 bloc->size = old_size;
631 return 0;
634 if (size > old_size)
636 for (b = last_bloc; b != bloc; b = b->prev)
638 safe_bcopy (b->data, b->new_data, b->size);
639 *b->variable = b->data = b->new_data;
641 safe_bcopy (bloc->data, bloc->new_data, old_size);
642 bzero (bloc->new_data + old_size, size - old_size);
643 *bloc->variable = bloc->data = bloc->new_data;
645 else
647 for (b = bloc; b != NIL_BLOC; b = b->next)
649 safe_bcopy (b->data, b->new_data, b->size);
650 *b->variable = b->data = b->new_data;
654 update_heap_bloc_correspondence (bloc, heap);
656 break_value = (last_bloc ? last_bloc->data + last_bloc->size
657 : first_heap->bloc_start);
658 return 1;
661 /* Free BLOC from the chain of blocs, relocating any blocs above it.
662 This may return space to the system. */
664 static void
665 free_bloc (bloc)
666 bloc_ptr bloc;
668 heap_ptr heap = bloc->heap;
670 resize_bloc (bloc, 0);
672 if (bloc == first_bloc && bloc == last_bloc)
674 first_bloc = last_bloc = NIL_BLOC;
676 else if (bloc == last_bloc)
678 last_bloc = bloc->prev;
679 last_bloc->next = NIL_BLOC;
681 else if (bloc == first_bloc)
683 first_bloc = bloc->next;
684 first_bloc->prev = NIL_BLOC;
686 else
688 bloc->next->prev = bloc->prev;
689 bloc->prev->next = bloc->next;
692 /* Update the records of which blocs are in HEAP. */
693 if (heap->first_bloc == bloc)
695 if (bloc->next != 0 && bloc->next->heap == heap)
696 heap->first_bloc = bloc->next;
697 else
698 heap->first_bloc = heap->last_bloc = NIL_BLOC;
700 if (heap->last_bloc == bloc)
702 if (bloc->prev != 0 && bloc->prev->heap == heap)
703 heap->last_bloc = bloc->prev;
704 else
705 heap->first_bloc = heap->last_bloc = NIL_BLOC;
708 relinquish ();
709 free (bloc);
712 /* Interface routines. */
714 static int use_relocatable_buffers;
715 static int r_alloc_freeze_level;
717 /* Obtain SIZE bytes of storage from the free pool, or the system, as
718 necessary. If relocatable blocs are in use, this means relocating
719 them. This function gets plugged into the GNU malloc's __morecore
720 hook.
722 We provide hysteresis, never relocating by less than extra_bytes.
724 If we're out of memory, we should return zero, to imitate the other
725 __morecore hook values - in particular, __default_morecore in the
726 GNU malloc package. */
728 POINTER
729 r_alloc_sbrk (size)
730 long size;
732 register bloc_ptr b;
733 POINTER address;
735 if (! r_alloc_initialized)
736 r_alloc_init ();
738 if (! use_relocatable_buffers)
739 return (*real_morecore) (size);
741 if (size == 0)
742 return virtual_break_value;
744 if (size > 0)
746 /* Allocate a page-aligned space. GNU malloc would reclaim an
747 extra space if we passed an unaligned one. But we could
748 not always find a space which is contiguos to the previous. */
749 POINTER new_bloc_start;
750 heap_ptr h = first_heap;
751 SIZE get = ROUNDUP (size);
753 address = (POINTER) ROUNDUP (virtual_break_value);
755 /* Search the list upward for a heap which is large enough. */
756 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
758 h = h->next;
759 if (h == NIL_HEAP)
760 break;
761 address = (POINTER) ROUNDUP (h->start);
764 /* If not found, obtain more space. */
765 if (h == NIL_HEAP)
767 get += extra_bytes + page_size;
769 if (r_alloc_freeze_level > 0 || ! obtain (address, get))
770 return 0;
772 if (first_heap == last_heap)
773 address = (POINTER) ROUNDUP (virtual_break_value);
774 else
775 address = (POINTER) ROUNDUP (last_heap->start);
776 h = last_heap;
779 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
781 if (first_heap->bloc_start < new_bloc_start)
783 /* Move all blocs upward. */
784 if (r_alloc_freeze_level > 0
785 || ! relocate_blocs (first_bloc, h, new_bloc_start))
786 return 0;
788 /* Note that (POINTER)(h+1) <= new_bloc_start since
789 get >= page_size, so the following does not destroy the heap
790 header. */
791 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
793 safe_bcopy (b->data, b->new_data, b->size);
794 *b->variable = b->data = b->new_data;
797 h->bloc_start = new_bloc_start;
799 update_heap_bloc_correspondence (first_bloc, h);
802 if (h != first_heap)
804 /* Give up managing heaps below the one the new
805 virtual_break_value points to. */
806 first_heap->prev = NIL_HEAP;
807 first_heap->next = h->next;
808 first_heap->start = h->start;
809 first_heap->end = h->end;
810 first_heap->free = h->free;
811 first_heap->first_bloc = h->first_bloc;
812 first_heap->last_bloc = h->last_bloc;
813 first_heap->bloc_start = h->bloc_start;
815 if (first_heap->next)
816 first_heap->next->prev = first_heap;
817 else
818 last_heap = first_heap;
821 bzero (address, size);
823 else /* size < 0 */
825 SIZE excess = (char *)first_heap->bloc_start
826 - ((char *)virtual_break_value + size);
828 address = virtual_break_value;
830 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
832 excess -= extra_bytes;
833 first_heap->bloc_start
834 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
836 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
838 for (b = first_bloc; b != NIL_BLOC; b = b->next)
840 safe_bcopy (b->data, b->new_data, b->size);
841 *b->variable = b->data = b->new_data;
845 if ((char *)virtual_break_value + size < (char *)first_heap->start)
847 /* We found an additional space below the first heap */
848 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
852 virtual_break_value = (POINTER) ((char *)address + size);
853 break_value = (last_bloc
854 ? last_bloc->data + last_bloc->size
855 : first_heap->bloc_start);
856 if (size < 0)
857 relinquish ();
859 return address;
862 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
863 the data is returned in *PTR. PTR is thus the address of some variable
864 which will use the data area.
866 If we can't allocate the necessary memory, set *PTR to zero, and
867 return zero. */
869 POINTER
870 r_alloc (ptr, size)
871 POINTER *ptr;
872 SIZE size;
874 register bloc_ptr new_bloc;
876 if (! r_alloc_initialized)
877 r_alloc_init ();
879 new_bloc = get_bloc (MEM_ROUNDUP (size));
880 if (new_bloc)
882 new_bloc->variable = ptr;
883 *ptr = new_bloc->data;
885 else
886 *ptr = 0;
888 return *ptr;
891 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
892 Store 0 in *PTR to show there's no block allocated. */
894 void
895 r_alloc_free (ptr)
896 register POINTER *ptr;
898 register bloc_ptr dead_bloc;
900 if (! r_alloc_initialized)
901 r_alloc_init ();
903 dead_bloc = find_bloc (ptr);
904 if (dead_bloc == NIL_BLOC)
905 abort ();
907 free_bloc (dead_bloc);
908 *ptr = 0;
910 #ifdef emacs
911 refill_memory_reserve ();
912 #endif
915 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
916 Do this by shifting all blocks above this one up in memory, unless
917 SIZE is less than or equal to the current bloc size, in which case
918 do nothing.
920 Change *PTR to reflect the new bloc, and return this value.
922 If more memory cannot be allocated, then leave *PTR unchanged, and
923 return zero. */
925 POINTER
926 r_re_alloc (ptr, size)
927 POINTER *ptr;
928 SIZE size;
930 register bloc_ptr bloc;
932 if (! r_alloc_initialized)
933 r_alloc_init ();
935 bloc = find_bloc (ptr);
936 if (bloc == NIL_BLOC)
937 abort ();
939 if (size <= bloc->size)
940 /* Wouldn't it be useful to actually resize the bloc here? */
941 return *ptr;
943 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
944 return 0;
946 return *ptr;
949 /* Disable relocations, after making room for at least SIZE bytes
950 of non-relocatable heap if possible. The relocatable blocs are
951 guaranteed to hold still until thawed, even if this means that
952 malloc must return a null pointer. */
954 void
955 r_alloc_freeze (size)
956 long size;
958 if (! r_alloc_initialized)
959 r_alloc_init ();
961 /* If already frozen, we can't make any more room, so don't try. */
962 if (r_alloc_freeze_level > 0)
963 size = 0;
964 /* If we can't get the amount requested, half is better than nothing. */
965 while (size > 0 && r_alloc_sbrk (size) == 0)
966 size /= 2;
967 ++r_alloc_freeze_level;
968 if (size > 0)
969 r_alloc_sbrk (-size);
972 void
973 r_alloc_thaw ()
975 if (--r_alloc_freeze_level < 0)
976 abort ();
979 /* The hook `malloc' uses for the function which gets more space
980 from the system. */
981 extern POINTER (*__morecore) ();
983 /* Initialize various things for memory allocation. */
985 static void
986 r_alloc_init ()
988 if (r_alloc_initialized)
989 return;
991 r_alloc_initialized = 1;
992 real_morecore = __morecore;
993 __morecore = r_alloc_sbrk;
995 first_heap = last_heap = &heap_base;
996 first_heap->next = first_heap->prev = NIL_HEAP;
997 first_heap->start = first_heap->bloc_start
998 = virtual_break_value = break_value = (*real_morecore) (0);
999 if (break_value == NIL)
1000 abort ();
1002 page_size = PAGE;
1003 extra_bytes = ROUNDUP (50000);
1005 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1007 /* The extra call to real_morecore guarantees that the end of the
1008 address space is a multiple of page_size, even if page_size is
1009 not really the page size of the system running the binary in
1010 which page_size is stored. This allows a binary to be built on a
1011 system with one page size and run on a system with a smaller page
1012 size. */
1013 (*real_morecore) (first_heap->end - first_heap->start);
1015 /* Clear the rest of the last page; this memory is in our address space
1016 even though it is after the sbrk value. */
1017 /* Doubly true, with the additional call that explicitly adds the
1018 rest of that page to the address space. */
1019 bzero (first_heap->start, first_heap->end - first_heap->start);
1020 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1021 use_relocatable_buffers = 1;
1023 #ifdef DEBUG
1024 #include <assert.h>
1026 void
1027 r_alloc_check ()
1029 int found = 0;
1030 heap_ptr h, ph = 0;
1031 bloc_ptr b, pb = 0;
1033 if (!r_alloc_initialized)
1034 return;
1036 assert (first_heap);
1037 assert (last_heap->end <= (POINTER) sbrk (0));
1038 assert ((POINTER) first_heap < first_heap->start);
1039 assert (first_heap->start <= virtual_break_value);
1040 assert (virtual_break_value <= first_heap->end);
1042 for (h = first_heap; h; h = h->next)
1044 assert (h->prev == ph);
1045 assert ((POINTER) ROUNDUP (h->end) == h->end);
1046 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1047 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1048 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1050 if (ph)
1052 assert (ph->end < h->start);
1053 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1056 if (h->bloc_start <= break_value && break_value <= h->end)
1057 found = 1;
1059 ph = h;
1062 assert (found);
1063 assert (last_heap == ph);
1065 for (b = first_bloc; b; b = b->next)
1067 assert (b->prev == pb);
1068 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1069 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1071 ph = 0;
1072 for (h = first_heap; h; h = h->next)
1074 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1075 break;
1076 ph = h;
1079 assert (h);
1081 if (pb && pb->data + pb->size != b->data)
1083 assert (ph && b->data == h->bloc_start);
1084 while (ph)
1086 if (ph->bloc_start <= pb->data
1087 && pb->data + pb->size <= ph->end)
1089 assert (pb->data + pb->size + b->size > ph->end);
1090 break;
1092 else
1094 assert (ph->bloc_start + b->size > ph->end);
1096 ph = ph->prev;
1099 pb = b;
1102 assert (last_bloc == pb);
1104 if (last_bloc)
1105 assert (last_bloc->data + last_bloc->size == break_value);
1106 else
1107 assert (first_heap->bloc_start == break_value);
1109 #endif /* DEBUG */