(display_text_line): Handle the case of point being in
[emacs.git] / src / ralloc.c
blobd1ce3be24fc4e6ee26a9d3f82ebbaeab76c37519
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 /* NOTES:
23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
24 rather than all of them. This means allowing for a possible
25 hole between the first bloc and the end of malloc storage. */
27 #ifdef emacs
29 #include <config.h>
30 #include "lisp.h" /* Needed for VALBITS. */
32 #undef NULL
34 /* The important properties of this type are that 1) it's a pointer, and
35 2) arithmetic on it should work as if the size of the object pointed
36 to has a size of 1. */
37 #if 0 /* Arithmetic on void* is a GCC extension. */
38 #ifdef __STDC__
39 typedef void *POINTER;
40 #else
42 #ifdef HAVE_CONFIG_H
43 #include "config.h"
44 #endif
46 typedef char *POINTER;
48 #endif
49 #endif /* 0 */
51 /* Unconditionally use char * for this. */
52 typedef char *POINTER;
54 typedef unsigned long SIZE;
56 /* Declared in dispnew.c, this version doesn't screw up if regions
57 overlap. */
58 extern void safe_bcopy ();
60 #ifdef DOUG_LEA_MALLOC
61 #define M_TOP_PAD -2
62 extern int mallopt ();
63 #else
64 extern int __malloc_extra_blocks;
65 #endif
67 #else /* not emacs */
69 #include <stddef.h>
71 typedef size_t SIZE;
72 typedef void *POINTER;
74 #include <unistd.h>
75 #include <malloc.h>
76 #include <string.h>
78 #define safe_bcopy(x, y, z) memmove (y, x, z)
79 #define bzero(x, len) memset (x, 0, len)
81 #endif /* not emacs */
83 #include "getpagesize.h"
85 #define NIL ((POINTER) 0)
87 /* A flag to indicate whether we have initialized ralloc yet. For
88 Emacs's sake, please do not make this local to malloc_init; on some
89 machines, the dumping procedure makes all static variables
90 read-only. On these machines, the word static is #defined to be
91 the empty string, meaning that r_alloc_initialized becomes an
92 automatic variable, and loses its value each time Emacs is started up. */
93 static int r_alloc_initialized = 0;
95 static void r_alloc_init ();
97 /* Declarations for working with the malloc, ralloc, and system breaks. */
99 /* Function to set the real break value. */
100 static POINTER (*real_morecore) ();
102 /* The break value, as seen by malloc. */
103 static POINTER virtual_break_value;
105 /* The address of the end of the last data in use by ralloc,
106 including relocatable blocs as well as malloc data. */
107 static POINTER break_value;
109 /* This is the size of a page. We round memory requests to this boundary. */
110 static int page_size;
112 /* Whenever we get memory from the system, get this many extra bytes. This
113 must be a multiple of page_size. */
114 static int extra_bytes;
116 /* Macros for rounding. Note that rounding to any value is possible
117 by changing the definition of PAGE. */
118 #define PAGE (getpagesize ())
119 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
120 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
121 & ~(page_size - 1))
122 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
124 #define MEM_ALIGN sizeof(double)
125 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
126 & ~(MEM_ALIGN - 1))
128 /* Data structures of heaps and blocs. */
130 /* The relocatable objects, or blocs, and the malloc data
131 both reside within one or more heaps.
132 Each heap contains malloc data, running from `start' to `bloc_start',
133 and relocatable objects, running from `bloc_start' to `free'.
135 Relocatable objects may relocate within the same heap
136 or may move into another heap; the heaps themselves may grow
137 but they never move.
139 We try to make just one heap and make it larger as necessary.
140 But sometimes we can't do that, because we can't get contiguous
141 space to add onto the heap. When that happens, we start a new heap. */
143 typedef struct heap
145 struct heap *next;
146 struct heap *prev;
147 /* Start of memory range of this heap. */
148 POINTER start;
149 /* End of memory range of this heap. */
150 POINTER end;
151 /* Start of relocatable data in this heap. */
152 POINTER bloc_start;
153 /* Start of unused space in this heap. */
154 POINTER free;
155 /* First bloc in this heap. */
156 struct bp *first_bloc;
157 /* Last bloc in this heap. */
158 struct bp *last_bloc;
159 } *heap_ptr;
161 #define NIL_HEAP ((heap_ptr) 0)
162 #define HEAP_PTR_SIZE (sizeof (struct heap))
164 /* This is the first heap object.
165 If we need additional heap objects, each one resides at the beginning of
166 the space it covers. */
167 static struct heap heap_base;
169 /* Head and tail of the list of heaps. */
170 static heap_ptr first_heap, last_heap;
172 /* These structures are allocated in the malloc arena.
173 The linked list is kept in order of increasing '.data' members.
174 The data blocks abut each other; if b->next is non-nil, then
175 b->data + b->size == b->next->data.
177 An element with variable==NIL denotes a freed block, which has not yet
178 been collected. They may only appear while r_alloc_freeze > 0, and will be
179 freed when the arena is thawed. Currently, these blocs are not reusable,
180 while the arena is frozen. Very inefficient. */
182 typedef struct bp
184 struct bp *next;
185 struct bp *prev;
186 POINTER *variable;
187 POINTER data;
188 SIZE size;
189 POINTER new_data; /* temporarily used for relocation */
190 struct heap *heap; /* Heap this bloc is in. */
191 } *bloc_ptr;
193 #define NIL_BLOC ((bloc_ptr) 0)
194 #define BLOC_PTR_SIZE (sizeof (struct bp))
196 /* Head and tail of the list of relocatable blocs. */
197 static bloc_ptr first_bloc, last_bloc;
199 static int use_relocatable_buffers;
201 /* If >0, no relocation whatsoever takes place. */
202 static int r_alloc_freeze_level;
205 /* Functions to get and return memory from the system. */
207 /* Find the heap that ADDRESS falls within. */
209 static heap_ptr
210 find_heap (address)
211 POINTER address;
213 heap_ptr heap;
215 for (heap = last_heap; heap; heap = heap->prev)
217 if (heap->start <= address && address <= heap->end)
218 return heap;
221 return NIL_HEAP;
224 /* Find SIZE bytes of space in a heap.
225 Try to get them at ADDRESS (which must fall within some heap's range)
226 if we can get that many within one heap.
228 If enough space is not presently available in our reserve, this means
229 getting more page-aligned space from the system. If the returned space
230 is not contiguous to the last heap, allocate a new heap, and append it
232 obtain does not try to keep track of whether space is in use
233 or not in use. It just returns the address of SIZE bytes that
234 fall within a single heap. If you call obtain twice in a row
235 with the same arguments, you typically get the same value.
236 to the heap list. It's the caller's responsibility to keep
237 track of what space is in use.
239 Return the address of the space if all went well, or zero if we couldn't
240 allocate the memory. */
242 static POINTER
243 obtain (address, size)
244 POINTER address;
245 SIZE size;
247 heap_ptr heap;
248 SIZE already_available;
250 /* Find the heap that ADDRESS falls within. */
251 for (heap = last_heap; heap; heap = heap->prev)
253 if (heap->start <= address && address <= heap->end)
254 break;
257 if (! heap)
258 abort ();
260 /* If we can't fit SIZE bytes in that heap,
261 try successive later heaps. */
262 while (heap && address + size > heap->end)
264 heap = heap->next;
265 if (heap == NIL_HEAP)
266 break;
267 address = heap->bloc_start;
270 /* If we can't fit them within any existing heap,
271 get more space. */
272 if (heap == NIL_HEAP)
274 POINTER new = (*real_morecore)(0);
275 SIZE get;
277 already_available = (char *)last_heap->end - (char *)address;
279 if (new != last_heap->end)
281 /* Someone else called sbrk. Make a new heap. */
283 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
284 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
286 if ((*real_morecore) (bloc_start - new) != new)
287 return 0;
289 new_heap->start = new;
290 new_heap->end = bloc_start;
291 new_heap->bloc_start = bloc_start;
292 new_heap->free = bloc_start;
293 new_heap->next = NIL_HEAP;
294 new_heap->prev = last_heap;
295 new_heap->first_bloc = NIL_BLOC;
296 new_heap->last_bloc = NIL_BLOC;
297 last_heap->next = new_heap;
298 last_heap = new_heap;
300 address = bloc_start;
301 already_available = 0;
304 /* Add space to the last heap (which we may have just created).
305 Get some extra, so we can come here less often. */
307 get = size + extra_bytes - already_available;
308 get = (char *) ROUNDUP ((char *)last_heap->end + get)
309 - (char *) last_heap->end;
311 if ((*real_morecore) (get) != last_heap->end)
312 return 0;
314 last_heap->end += get;
317 return address;
320 /* Return unused heap space to the system
321 if there is a lot of unused space now.
322 This can make the last heap smaller;
323 it can also eliminate the last heap entirely. */
325 static void
326 relinquish ()
328 register heap_ptr h;
329 int excess = 0;
331 /* Add the amount of space beyond break_value
332 in all heaps which have extend beyond break_value at all. */
334 for (h = last_heap; h && break_value < h->end; h = h->prev)
336 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
337 ? h->bloc_start : break_value);
340 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
342 /* Keep extra_bytes worth of empty space.
343 And don't free anything unless we can free at least extra_bytes. */
344 excess -= extra_bytes;
346 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
348 /* This heap should have no blocs in it. */
349 if (last_heap->first_bloc != NIL_BLOC
350 || last_heap->last_bloc != NIL_BLOC)
351 abort ();
353 /* Return the last heap, with its header, to the system. */
354 excess = (char *)last_heap->end - (char *)last_heap->start;
355 last_heap = last_heap->prev;
356 last_heap->next = NIL_HEAP;
358 else
360 excess = (char *) last_heap->end
361 - (char *) ROUNDUP ((char *)last_heap->end - excess);
362 last_heap->end -= excess;
365 if ((*real_morecore) (- excess) == 0)
366 abort ();
370 /* Return the total size in use by relocating allocator,
371 above where malloc gets space. */
373 long
374 r_alloc_size_in_use ()
376 return break_value - virtual_break_value;
379 /* The meat - allocating, freeing, and relocating blocs. */
381 /* Find the bloc referenced by the address in PTR. Returns a pointer
382 to that block. */
384 static bloc_ptr
385 find_bloc (ptr)
386 POINTER *ptr;
388 register bloc_ptr p = first_bloc;
390 while (p != NIL_BLOC)
392 if (p->variable == ptr && p->data == *ptr)
393 return p;
395 p = p->next;
398 return p;
401 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
402 Returns a pointer to the new bloc, or zero if we couldn't allocate
403 memory for the new block. */
405 static bloc_ptr
406 get_bloc (size)
407 SIZE size;
409 register bloc_ptr new_bloc;
410 register heap_ptr heap;
412 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
413 || ! (new_bloc->data = obtain (break_value, size)))
415 if (new_bloc)
416 free (new_bloc);
418 return 0;
421 break_value = new_bloc->data + size;
423 new_bloc->size = size;
424 new_bloc->next = NIL_BLOC;
425 new_bloc->variable = (POINTER *) NIL;
426 new_bloc->new_data = 0;
428 /* Record in the heap that this space is in use. */
429 heap = find_heap (new_bloc->data);
430 heap->free = break_value;
432 /* Maintain the correspondence between heaps and blocs. */
433 new_bloc->heap = heap;
434 heap->last_bloc = new_bloc;
435 if (heap->first_bloc == NIL_BLOC)
436 heap->first_bloc = new_bloc;
438 /* Put this bloc on the doubly-linked list of blocs. */
439 if (first_bloc)
441 new_bloc->prev = last_bloc;
442 last_bloc->next = new_bloc;
443 last_bloc = new_bloc;
445 else
447 first_bloc = last_bloc = new_bloc;
448 new_bloc->prev = NIL_BLOC;
451 return new_bloc;
454 /* Calculate new locations of blocs in the list beginning with BLOC,
455 relocating it to start at ADDRESS, in heap HEAP. If enough space is
456 not presently available in our reserve, call obtain for
457 more space.
459 Store the new location of each bloc in its new_data field.
460 Do not touch the contents of blocs or break_value. */
462 static int
463 relocate_blocs (bloc, heap, address)
464 bloc_ptr bloc;
465 heap_ptr heap;
466 POINTER address;
468 register bloc_ptr b = bloc;
470 /* No need to ever call this if arena is frozen, bug somewhere! */
471 if (r_alloc_freeze_level)
472 abort();
474 while (b)
476 /* If bloc B won't fit within HEAP,
477 move to the next heap and try again. */
478 while (heap && address + b->size > heap->end)
480 heap = heap->next;
481 if (heap == NIL_HEAP)
482 break;
483 address = heap->bloc_start;
486 /* If BLOC won't fit in any heap,
487 get enough new space to hold BLOC and all following blocs. */
488 if (heap == NIL_HEAP)
490 register bloc_ptr tb = b;
491 register SIZE s = 0;
493 /* Add up the size of all the following blocs. */
494 while (tb != NIL_BLOC)
496 if (tb->variable)
497 s += tb->size;
499 tb = tb->next;
502 /* Get that space. */
503 address = obtain (address, s);
504 if (address == 0)
505 return 0;
507 heap = last_heap;
510 /* Record the new address of this bloc
511 and update where the next bloc can start. */
512 b->new_data = address;
513 if (b->variable)
514 address += b->size;
515 b = b->next;
518 return 1;
521 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
522 This is necessary if we put the memory of space of BLOC
523 before that of BEFORE. */
525 static void
526 reorder_bloc (bloc, before)
527 bloc_ptr bloc, before;
529 bloc_ptr prev, next;
531 /* Splice BLOC out from where it is. */
532 prev = bloc->prev;
533 next = bloc->next;
535 if (prev)
536 prev->next = next;
537 if (next)
538 next->prev = prev;
540 /* Splice it in before BEFORE. */
541 prev = before->prev;
543 if (prev)
544 prev->next = bloc;
545 bloc->prev = prev;
547 before->prev = bloc;
548 bloc->next = before;
551 /* Update the records of which heaps contain which blocs, starting
552 with heap HEAP and bloc BLOC. */
554 static void
555 update_heap_bloc_correspondence (bloc, heap)
556 bloc_ptr bloc;
557 heap_ptr heap;
559 register bloc_ptr b;
561 /* Initialize HEAP's status to reflect blocs before BLOC. */
562 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
564 /* The previous bloc is in HEAP. */
565 heap->last_bloc = bloc->prev;
566 heap->free = bloc->prev->data + bloc->prev->size;
568 else
570 /* HEAP contains no blocs before BLOC. */
571 heap->first_bloc = NIL_BLOC;
572 heap->last_bloc = NIL_BLOC;
573 heap->free = heap->bloc_start;
576 /* Advance through blocs one by one. */
577 for (b = bloc; b != NIL_BLOC; b = b->next)
579 /* Advance through heaps, marking them empty,
580 till we get to the one that B is in. */
581 while (heap)
583 if (heap->bloc_start <= b->data && b->data <= heap->end)
584 break;
585 heap = heap->next;
586 /* We know HEAP is not null now,
587 because there has to be space for bloc B. */
588 heap->first_bloc = NIL_BLOC;
589 heap->last_bloc = NIL_BLOC;
590 heap->free = heap->bloc_start;
593 /* Update HEAP's status for bloc B. */
594 heap->free = b->data + b->size;
595 heap->last_bloc = b;
596 if (heap->first_bloc == NIL_BLOC)
597 heap->first_bloc = b;
599 /* Record that B is in HEAP. */
600 b->heap = heap;
603 /* If there are any remaining heaps and no blocs left,
604 mark those heaps as empty. */
605 heap = heap->next;
606 while (heap)
608 heap->first_bloc = NIL_BLOC;
609 heap->last_bloc = NIL_BLOC;
610 heap->free = heap->bloc_start;
611 heap = heap->next;
615 /* Resize BLOC to SIZE bytes. This relocates the blocs
616 that come after BLOC in memory. */
618 static int
619 resize_bloc (bloc, size)
620 bloc_ptr bloc;
621 SIZE size;
623 register bloc_ptr b;
624 heap_ptr heap;
625 POINTER address;
626 SIZE old_size;
628 /* No need to ever call this if arena is frozen, bug somewhere! */
629 if (r_alloc_freeze_level)
630 abort();
632 if (bloc == NIL_BLOC || size == bloc->size)
633 return 1;
635 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
637 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
638 break;
641 if (heap == NIL_HEAP)
642 abort ();
644 old_size = bloc->size;
645 bloc->size = size;
647 /* Note that bloc could be moved into the previous heap. */
648 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
649 : first_heap->bloc_start);
650 while (heap)
652 if (heap->bloc_start <= address && address <= heap->end)
653 break;
654 heap = heap->prev;
657 if (! relocate_blocs (bloc, heap, address))
659 bloc->size = old_size;
660 return 0;
663 if (size > old_size)
665 for (b = last_bloc; b != bloc; b = b->prev)
667 if (!b->variable)
669 b->size = 0;
670 b->data = b->new_data;
672 else
674 safe_bcopy (b->data, b->new_data, b->size);
675 *b->variable = b->data = b->new_data;
678 if (!bloc->variable)
680 bloc->size = 0;
681 bloc->data = bloc->new_data;
683 else
685 safe_bcopy (bloc->data, bloc->new_data, old_size);
686 bzero (bloc->new_data + old_size, size - old_size);
687 *bloc->variable = bloc->data = bloc->new_data;
690 else
692 for (b = bloc; b != NIL_BLOC; b = b->next)
694 if (!b->variable)
696 b->size = 0;
697 b->data = b->new_data;
699 else
701 safe_bcopy (b->data, b->new_data, b->size);
702 *b->variable = b->data = b->new_data;
707 update_heap_bloc_correspondence (bloc, heap);
709 break_value = (last_bloc ? last_bloc->data + last_bloc->size
710 : first_heap->bloc_start);
711 return 1;
714 /* Free BLOC from the chain of blocs, relocating any blocs above it.
715 This may return space to the system. */
717 static void
718 free_bloc (bloc)
719 bloc_ptr bloc;
721 heap_ptr heap = bloc->heap;
723 if (r_alloc_freeze_level)
725 bloc->variable = (POINTER *) NIL;
726 return;
729 resize_bloc (bloc, 0);
731 if (bloc == first_bloc && bloc == last_bloc)
733 first_bloc = last_bloc = NIL_BLOC;
735 else if (bloc == last_bloc)
737 last_bloc = bloc->prev;
738 last_bloc->next = NIL_BLOC;
740 else if (bloc == first_bloc)
742 first_bloc = bloc->next;
743 first_bloc->prev = NIL_BLOC;
745 else
747 bloc->next->prev = bloc->prev;
748 bloc->prev->next = bloc->next;
751 /* Update the records of which blocs are in HEAP. */
752 if (heap->first_bloc == bloc)
754 if (bloc->next != 0 && bloc->next->heap == heap)
755 heap->first_bloc = bloc->next;
756 else
757 heap->first_bloc = heap->last_bloc = NIL_BLOC;
759 if (heap->last_bloc == bloc)
761 if (bloc->prev != 0 && bloc->prev->heap == heap)
762 heap->last_bloc = bloc->prev;
763 else
764 heap->first_bloc = heap->last_bloc = NIL_BLOC;
767 relinquish ();
768 free (bloc);
771 /* Interface routines. */
773 /* Obtain SIZE bytes of storage from the free pool, or the system, as
774 necessary. If relocatable blocs are in use, this means relocating
775 them. This function gets plugged into the GNU malloc's __morecore
776 hook.
778 We provide hysteresis, never relocating by less than extra_bytes.
780 If we're out of memory, we should return zero, to imitate the other
781 __morecore hook values - in particular, __default_morecore in the
782 GNU malloc package. */
784 POINTER
785 r_alloc_sbrk (size)
786 long size;
788 register bloc_ptr b;
789 POINTER address;
791 if (! r_alloc_initialized)
792 r_alloc_init ();
794 if (! use_relocatable_buffers)
795 return (*real_morecore) (size);
797 if (size == 0)
798 return virtual_break_value;
800 if (size > 0)
802 /* Allocate a page-aligned space. GNU malloc would reclaim an
803 extra space if we passed an unaligned one. But we could
804 not always find a space which is contiguous to the previous. */
805 POINTER new_bloc_start;
806 heap_ptr h = first_heap;
807 SIZE get = ROUNDUP (size);
809 address = (POINTER) ROUNDUP (virtual_break_value);
811 /* Search the list upward for a heap which is large enough. */
812 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
814 h = h->next;
815 if (h == NIL_HEAP)
816 break;
817 address = (POINTER) ROUNDUP (h->start);
820 /* If not found, obtain more space. */
821 if (h == NIL_HEAP)
823 get += extra_bytes + page_size;
825 if (! obtain (address, get))
826 return 0;
828 if (first_heap == last_heap)
829 address = (POINTER) ROUNDUP (virtual_break_value);
830 else
831 address = (POINTER) ROUNDUP (last_heap->start);
832 h = last_heap;
835 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
837 if (first_heap->bloc_start < new_bloc_start)
839 /* This is no clean solution - no idea how to do it better. */
840 if (r_alloc_freeze_level)
841 return NIL;
843 /* There is a bug here: if the above obtain call succeeded, but the
844 relocate_blocs call below does not succeed, we need to free
845 the memory that we got with obtain. */
847 /* Move all blocs upward. */
848 if (! relocate_blocs (first_bloc, h, new_bloc_start))
849 return 0;
851 /* Note that (POINTER)(h+1) <= new_bloc_start since
852 get >= page_size, so the following does not destroy the heap
853 header. */
854 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
856 safe_bcopy (b->data, b->new_data, b->size);
857 *b->variable = b->data = b->new_data;
860 h->bloc_start = new_bloc_start;
862 update_heap_bloc_correspondence (first_bloc, h);
864 if (h != first_heap)
866 /* Give up managing heaps below the one the new
867 virtual_break_value points to. */
868 first_heap->prev = NIL_HEAP;
869 first_heap->next = h->next;
870 first_heap->start = h->start;
871 first_heap->end = h->end;
872 first_heap->free = h->free;
873 first_heap->first_bloc = h->first_bloc;
874 first_heap->last_bloc = h->last_bloc;
875 first_heap->bloc_start = h->bloc_start;
877 if (first_heap->next)
878 first_heap->next->prev = first_heap;
879 else
880 last_heap = first_heap;
883 bzero (address, size);
885 else /* size < 0 */
887 SIZE excess = (char *)first_heap->bloc_start
888 - ((char *)virtual_break_value + size);
890 address = virtual_break_value;
892 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
894 excess -= extra_bytes;
895 first_heap->bloc_start
896 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
898 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
900 for (b = first_bloc; b != NIL_BLOC; b = b->next)
902 safe_bcopy (b->data, b->new_data, b->size);
903 *b->variable = b->data = b->new_data;
907 if ((char *)virtual_break_value + size < (char *)first_heap->start)
909 /* We found an additional space below the first heap */
910 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
914 virtual_break_value = (POINTER) ((char *)address + size);
915 break_value = (last_bloc
916 ? last_bloc->data + last_bloc->size
917 : first_heap->bloc_start);
918 if (size < 0)
919 relinquish ();
921 return address;
924 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
925 the data is returned in *PTR. PTR is thus the address of some variable
926 which will use the data area.
928 The allocation of 0 bytes is valid.
929 In case r_alloc_freeze is set, a best fit of unused blocs could be done
930 before allocating a new area. Not yet done.
932 If we can't allocate the necessary memory, set *PTR to zero, and
933 return zero. */
935 POINTER
936 r_alloc (ptr, size)
937 POINTER *ptr;
938 SIZE size;
940 register bloc_ptr new_bloc;
942 if (! r_alloc_initialized)
943 r_alloc_init ();
945 new_bloc = get_bloc (MEM_ROUNDUP (size));
946 if (new_bloc)
948 new_bloc->variable = ptr;
949 *ptr = new_bloc->data;
951 else
952 *ptr = 0;
954 return *ptr;
957 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
958 Store 0 in *PTR to show there's no block allocated. */
960 void
961 r_alloc_free (ptr)
962 register POINTER *ptr;
964 register bloc_ptr dead_bloc;
966 if (! r_alloc_initialized)
967 r_alloc_init ();
969 dead_bloc = find_bloc (ptr);
970 if (dead_bloc == NIL_BLOC)
971 abort ();
973 free_bloc (dead_bloc);
974 *ptr = 0;
976 #ifdef emacs
977 refill_memory_reserve ();
978 #endif
981 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
982 Do this by shifting all blocks above this one up in memory, unless
983 SIZE is less than or equal to the current bloc size, in which case
984 do nothing.
986 In case r_alloc_freeze is set, a new bloc is allocated, and the
987 memory copied to it. Not very efficient. We could traverse the
988 bloc_list for a best fit of free blocs first.
990 Change *PTR to reflect the new bloc, and return this value.
992 If more memory cannot be allocated, then leave *PTR unchanged, and
993 return zero. */
995 POINTER
996 r_re_alloc (ptr, size)
997 POINTER *ptr;
998 SIZE size;
1000 register bloc_ptr bloc;
1002 if (! r_alloc_initialized)
1003 r_alloc_init ();
1005 if (!*ptr)
1006 return r_alloc (ptr, size);
1007 if (!size)
1009 r_alloc_free (ptr);
1010 return r_alloc (ptr, 0);
1013 bloc = find_bloc (ptr);
1014 if (bloc == NIL_BLOC)
1015 abort ();
1017 if (size < bloc->size)
1019 /* Wouldn't it be useful to actually resize the bloc here? */
1020 /* I think so too, but not if it's too expensive... */
1021 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1022 && r_alloc_freeze_level == 0)
1024 resize_bloc (bloc, MEM_ROUNDUP (size));
1025 /* Never mind if this fails, just do nothing... */
1026 /* It *should* be infallible! */
1029 else if (size > bloc->size)
1031 if (r_alloc_freeze_level)
1033 bloc_ptr new_bloc;
1034 new_bloc = get_bloc (MEM_ROUNDUP (size));
1035 if (new_bloc)
1037 new_bloc->variable = ptr;
1038 *ptr = new_bloc->data;
1039 bloc->variable = (POINTER *) NIL;
1041 else
1042 return NIL;
1044 else
1046 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1047 return NIL;
1050 return *ptr;
1053 /* Disable relocations, after making room for at least SIZE bytes
1054 of non-relocatable heap if possible. The relocatable blocs are
1055 guaranteed to hold still until thawed, even if this means that
1056 malloc must return a null pointer. */
1058 void
1059 r_alloc_freeze (size)
1060 long size;
1062 if (! r_alloc_initialized)
1063 r_alloc_init ();
1065 /* If already frozen, we can't make any more room, so don't try. */
1066 if (r_alloc_freeze_level > 0)
1067 size = 0;
1068 /* If we can't get the amount requested, half is better than nothing. */
1069 while (size > 0 && r_alloc_sbrk (size) == 0)
1070 size /= 2;
1071 ++r_alloc_freeze_level;
1072 if (size > 0)
1073 r_alloc_sbrk (-size);
1076 void
1077 r_alloc_thaw ()
1080 if (! r_alloc_initialized)
1081 r_alloc_init ();
1083 if (--r_alloc_freeze_level < 0)
1084 abort ();
1086 /* This frees all unused blocs. It is not too inefficient, as the resize
1087 and bcopy is done only once. Afterwards, all unreferenced blocs are
1088 already shrunk to zero size. */
1089 if (!r_alloc_freeze_level)
1091 bloc_ptr *b = &first_bloc;
1092 while (*b)
1093 if (!(*b)->variable)
1094 free_bloc (*b);
1095 else
1096 b = &(*b)->next;
1101 /* The hook `malloc' uses for the function which gets more space
1102 from the system. */
1103 extern POINTER (*__morecore) ();
1105 /* Initialize various things for memory allocation. */
1107 static void
1108 r_alloc_init ()
1110 if (r_alloc_initialized)
1111 return;
1113 r_alloc_initialized = 1;
1114 real_morecore = __morecore;
1115 __morecore = r_alloc_sbrk;
1117 first_heap = last_heap = &heap_base;
1118 first_heap->next = first_heap->prev = NIL_HEAP;
1119 first_heap->start = first_heap->bloc_start
1120 = virtual_break_value = break_value = (*real_morecore) (0);
1121 if (break_value == NIL)
1122 abort ();
1124 page_size = PAGE;
1125 extra_bytes = ROUNDUP (50000);
1127 #ifdef DOUG_LEA_MALLOC
1128 mallopt (M_TOP_PAD, 64 * 4096);
1129 #else
1130 /* Give GNU malloc's morecore some hysteresis
1131 so that we move all the relocatable blocks much less often. */
1132 __malloc_extra_blocks = 64;
1133 #endif
1135 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1137 /* The extra call to real_morecore guarantees that the end of the
1138 address space is a multiple of page_size, even if page_size is
1139 not really the page size of the system running the binary in
1140 which page_size is stored. This allows a binary to be built on a
1141 system with one page size and run on a system with a smaller page
1142 size. */
1143 (*real_morecore) (first_heap->end - first_heap->start);
1145 /* Clear the rest of the last page; this memory is in our address space
1146 even though it is after the sbrk value. */
1147 /* Doubly true, with the additional call that explicitly adds the
1148 rest of that page to the address space. */
1149 bzero (first_heap->start, first_heap->end - first_heap->start);
1150 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1151 use_relocatable_buffers = 1;
1154 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1156 /* Reinitialize the morecore hook variables after restarting a dumped
1157 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1158 void
1159 r_alloc_reinit ()
1161 /* Only do this if the hook has been reset, so that we don't get an
1162 infinite loop, in case Emacs was linked statically. */
1163 if (__morecore != r_alloc_sbrk)
1165 real_morecore = __morecore;
1166 __morecore = r_alloc_sbrk;
1169 #endif
1171 #ifdef DEBUG
1172 #include <assert.h>
1174 void
1175 r_alloc_check ()
1177 int found = 0;
1178 heap_ptr h, ph = 0;
1179 bloc_ptr b, pb = 0;
1181 if (!r_alloc_initialized)
1182 return;
1184 assert (first_heap);
1185 assert (last_heap->end <= (POINTER) sbrk (0));
1186 assert ((POINTER) first_heap < first_heap->start);
1187 assert (first_heap->start <= virtual_break_value);
1188 assert (virtual_break_value <= first_heap->end);
1190 for (h = first_heap; h; h = h->next)
1192 assert (h->prev == ph);
1193 assert ((POINTER) ROUNDUP (h->end) == h->end);
1194 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1195 the heap start has any sort of alignment.
1196 Perhaps it should. */
1197 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1198 #endif
1199 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1200 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1202 if (ph)
1204 assert (ph->end < h->start);
1205 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1208 if (h->bloc_start <= break_value && break_value <= h->end)
1209 found = 1;
1211 ph = h;
1214 assert (found);
1215 assert (last_heap == ph);
1217 for (b = first_bloc; b; b = b->next)
1219 assert (b->prev == pb);
1220 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1221 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1223 ph = 0;
1224 for (h = first_heap; h; h = h->next)
1226 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1227 break;
1228 ph = h;
1231 assert (h);
1233 if (pb && pb->data + pb->size != b->data)
1235 assert (ph && b->data == h->bloc_start);
1236 while (ph)
1238 if (ph->bloc_start <= pb->data
1239 && pb->data + pb->size <= ph->end)
1241 assert (pb->data + pb->size + b->size > ph->end);
1242 break;
1244 else
1246 assert (ph->bloc_start + b->size > ph->end);
1248 ph = ph->prev;
1251 pb = b;
1254 assert (last_bloc == pb);
1256 if (last_bloc)
1257 assert (last_bloc->data + last_bloc->size == break_value);
1258 else
1259 assert (first_heap->bloc_start == break_value);
1261 #endif /* DEBUG */