Revision: miles@gnu.org--gnu-2005/emacs--cvs-trunk--0--patch-412
[emacs.git] / src / ralloc.c
blobfd0d62e197770f4a6cbd6c6f40bf372b46df4327
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 /* NOTES:
23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
24 rather than all of them. This means allowing for a possible
25 hole between the first bloc and the end of malloc storage. */
27 #ifdef emacs
29 #include <config.h>
30 #include "lisp.h" /* Needed for VALBITS. */
31 #include "blockinput.h"
33 #ifdef HAVE_UNISTD_H
34 #include <unistd.h>
35 #endif
37 typedef POINTER_TYPE *POINTER;
38 typedef size_t SIZE;
40 /* Declared in dispnew.c, this version doesn't screw up if regions
41 overlap. */
43 extern void safe_bcopy ();
45 #ifdef DOUG_LEA_MALLOC
46 #define M_TOP_PAD -2
47 extern int mallopt ();
48 #else /* not DOUG_LEA_MALLOC */
49 #ifndef SYSTEM_MALLOC
50 extern size_t __malloc_extra_blocks;
51 #endif /* SYSTEM_MALLOC */
52 #endif /* not DOUG_LEA_MALLOC */
54 #else /* not emacs */
56 #include <stddef.h>
58 typedef size_t SIZE;
59 typedef void *POINTER;
61 #include <unistd.h>
62 #include <malloc.h>
64 #define safe_bcopy(x, y, z) memmove (y, x, z)
65 #define bzero(x, len) memset (x, 0, len)
67 #endif /* not emacs */
70 #include "getpagesize.h"
72 #define NIL ((POINTER) 0)
74 /* A flag to indicate whether we have initialized ralloc yet. For
75 Emacs's sake, please do not make this local to malloc_init; on some
76 machines, the dumping procedure makes all static variables
77 read-only. On these machines, the word static is #defined to be
78 the empty string, meaning that r_alloc_initialized becomes an
79 automatic variable, and loses its value each time Emacs is started
80 up. */
82 static int r_alloc_initialized = 0;
84 static void r_alloc_init ();
87 /* Declarations for working with the malloc, ralloc, and system breaks. */
89 /* Function to set the real break value. */
90 POINTER (*real_morecore) ();
92 /* The break value, as seen by malloc. */
93 static POINTER virtual_break_value;
95 /* The address of the end of the last data in use by ralloc,
96 including relocatable blocs as well as malloc data. */
97 static POINTER break_value;
99 /* This is the size of a page. We round memory requests to this boundary. */
100 static int page_size;
102 /* Whenever we get memory from the system, get this many extra bytes. This
103 must be a multiple of page_size. */
104 static int extra_bytes;
106 /* Macros for rounding. Note that rounding to any value is possible
107 by changing the definition of PAGE. */
108 #define PAGE (getpagesize ())
109 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
110 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
111 & ~(page_size - 1))
112 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
114 #define MEM_ALIGN sizeof(double)
115 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
116 & ~(MEM_ALIGN - 1))
118 /* The hook `malloc' uses for the function which gets more space
119 from the system. */
121 #ifndef SYSTEM_MALLOC
122 extern POINTER (*__morecore) ();
123 #endif
127 /***********************************************************************
128 Implementation using sbrk
129 ***********************************************************************/
131 /* Data structures of heaps and blocs. */
133 /* The relocatable objects, or blocs, and the malloc data
134 both reside within one or more heaps.
135 Each heap contains malloc data, running from `start' to `bloc_start',
136 and relocatable objects, running from `bloc_start' to `free'.
138 Relocatable objects may relocate within the same heap
139 or may move into another heap; the heaps themselves may grow
140 but they never move.
142 We try to make just one heap and make it larger as necessary.
143 But sometimes we can't do that, because we can't get contiguous
144 space to add onto the heap. When that happens, we start a new heap. */
146 typedef struct heap
148 struct heap *next;
149 struct heap *prev;
150 /* Start of memory range of this heap. */
151 POINTER start;
152 /* End of memory range of this heap. */
153 POINTER end;
154 /* Start of relocatable data in this heap. */
155 POINTER bloc_start;
156 /* Start of unused space in this heap. */
157 POINTER free;
158 /* First bloc in this heap. */
159 struct bp *first_bloc;
160 /* Last bloc in this heap. */
161 struct bp *last_bloc;
162 } *heap_ptr;
164 #define NIL_HEAP ((heap_ptr) 0)
165 #define HEAP_PTR_SIZE (sizeof (struct heap))
167 /* This is the first heap object.
168 If we need additional heap objects, each one resides at the beginning of
169 the space it covers. */
170 static struct heap heap_base;
172 /* Head and tail of the list of heaps. */
173 static heap_ptr first_heap, last_heap;
175 /* These structures are allocated in the malloc arena.
176 The linked list is kept in order of increasing '.data' members.
177 The data blocks abut each other; if b->next is non-nil, then
178 b->data + b->size == b->next->data.
180 An element with variable==NIL denotes a freed block, which has not yet
181 been collected. They may only appear while r_alloc_freeze > 0, and will be
182 freed when the arena is thawed. Currently, these blocs are not reusable,
183 while the arena is frozen. Very inefficient. */
185 typedef struct bp
187 struct bp *next;
188 struct bp *prev;
189 POINTER *variable;
190 POINTER data;
191 SIZE size;
192 POINTER new_data; /* temporarily used for relocation */
193 struct heap *heap; /* Heap this bloc is in. */
194 } *bloc_ptr;
196 #define NIL_BLOC ((bloc_ptr) 0)
197 #define BLOC_PTR_SIZE (sizeof (struct bp))
199 /* Head and tail of the list of relocatable blocs. */
200 static bloc_ptr first_bloc, last_bloc;
202 static int use_relocatable_buffers;
204 /* If >0, no relocation whatsoever takes place. */
205 static int r_alloc_freeze_level;
208 /* Functions to get and return memory from the system. */
210 /* Find the heap that ADDRESS falls within. */
212 static heap_ptr
213 find_heap (address)
214 POINTER address;
216 heap_ptr heap;
218 for (heap = last_heap; heap; heap = heap->prev)
220 if (heap->start <= address && address <= heap->end)
221 return heap;
224 return NIL_HEAP;
227 /* Find SIZE bytes of space in a heap.
228 Try to get them at ADDRESS (which must fall within some heap's range)
229 if we can get that many within one heap.
231 If enough space is not presently available in our reserve, this means
232 getting more page-aligned space from the system. If the returned space
233 is not contiguous to the last heap, allocate a new heap, and append it
235 obtain does not try to keep track of whether space is in use
236 or not in use. It just returns the address of SIZE bytes that
237 fall within a single heap. If you call obtain twice in a row
238 with the same arguments, you typically get the same value.
239 to the heap list. It's the caller's responsibility to keep
240 track of what space is in use.
242 Return the address of the space if all went well, or zero if we couldn't
243 allocate the memory. */
245 static POINTER
246 obtain (address, size)
247 POINTER address;
248 SIZE size;
250 heap_ptr heap;
251 SIZE already_available;
253 /* Find the heap that ADDRESS falls within. */
254 for (heap = last_heap; heap; heap = heap->prev)
256 if (heap->start <= address && address <= heap->end)
257 break;
260 if (! heap)
261 abort ();
263 /* If we can't fit SIZE bytes in that heap,
264 try successive later heaps. */
265 while (heap && (char *) address + size > (char *) heap->end)
267 heap = heap->next;
268 if (heap == NIL_HEAP)
269 break;
270 address = heap->bloc_start;
273 /* If we can't fit them within any existing heap,
274 get more space. */
275 if (heap == NIL_HEAP)
277 POINTER new = (*real_morecore)(0);
278 SIZE get;
280 already_available = (char *)last_heap->end - (char *)address;
282 if (new != last_heap->end)
284 /* Someone else called sbrk. Make a new heap. */
286 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
287 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
289 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
290 return 0;
292 new_heap->start = new;
293 new_heap->end = bloc_start;
294 new_heap->bloc_start = bloc_start;
295 new_heap->free = bloc_start;
296 new_heap->next = NIL_HEAP;
297 new_heap->prev = last_heap;
298 new_heap->first_bloc = NIL_BLOC;
299 new_heap->last_bloc = NIL_BLOC;
300 last_heap->next = new_heap;
301 last_heap = new_heap;
303 address = bloc_start;
304 already_available = 0;
307 /* Add space to the last heap (which we may have just created).
308 Get some extra, so we can come here less often. */
310 get = size + extra_bytes - already_available;
311 get = (char *) ROUNDUP ((char *)last_heap->end + get)
312 - (char *) last_heap->end;
314 if ((*real_morecore) (get) != last_heap->end)
315 return 0;
317 last_heap->end = (char *) last_heap->end + get;
320 return address;
323 /* Return unused heap space to the system
324 if there is a lot of unused space now.
325 This can make the last heap smaller;
326 it can also eliminate the last heap entirely. */
328 static void
329 relinquish ()
331 register heap_ptr h;
332 int excess = 0;
334 /* Add the amount of space beyond break_value
335 in all heaps which have extend beyond break_value at all. */
337 for (h = last_heap; h && break_value < h->end; h = h->prev)
339 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
340 ? h->bloc_start : break_value);
343 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
345 /* Keep extra_bytes worth of empty space.
346 And don't free anything unless we can free at least extra_bytes. */
347 excess -= extra_bytes;
349 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
351 /* This heap should have no blocs in it. */
352 if (last_heap->first_bloc != NIL_BLOC
353 || last_heap->last_bloc != NIL_BLOC)
354 abort ();
356 /* Return the last heap, with its header, to the system. */
357 excess = (char *)last_heap->end - (char *)last_heap->start;
358 last_heap = last_heap->prev;
359 last_heap->next = NIL_HEAP;
361 else
363 excess = (char *) last_heap->end
364 - (char *) ROUNDUP ((char *)last_heap->end - excess);
365 last_heap->end = (char *) last_heap->end - excess;
368 if ((*real_morecore) (- excess) == 0)
370 /* If the system didn't want that much memory back, adjust
371 the end of the last heap to reflect that. This can occur
372 if break_value is still within the original data segment. */
373 last_heap->end = (char *) last_heap->end + excess;
374 /* Make sure that the result of the adjustment is accurate.
375 It should be, for the else clause above; the other case,
376 which returns the entire last heap to the system, seems
377 unlikely to trigger this mode of failure. */
378 if (last_heap->end != (*real_morecore) (0))
379 abort ();
384 /* Return the total size in use by relocating allocator,
385 above where malloc gets space. */
387 long
388 r_alloc_size_in_use ()
390 return (char *) break_value - (char *) virtual_break_value;
393 /* The meat - allocating, freeing, and relocating blocs. */
395 /* Find the bloc referenced by the address in PTR. Returns a pointer
396 to that block. */
398 static bloc_ptr
399 find_bloc (ptr)
400 POINTER *ptr;
402 register bloc_ptr p = first_bloc;
404 while (p != NIL_BLOC)
406 if (p->variable == ptr && p->data == *ptr)
407 return p;
409 p = p->next;
412 return p;
415 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
416 Returns a pointer to the new bloc, or zero if we couldn't allocate
417 memory for the new block. */
419 static bloc_ptr
420 get_bloc (size)
421 SIZE size;
423 register bloc_ptr new_bloc;
424 register heap_ptr heap;
426 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
427 || ! (new_bloc->data = obtain (break_value, size)))
429 if (new_bloc)
430 free (new_bloc);
432 return 0;
435 break_value = (char *) new_bloc->data + size;
437 new_bloc->size = size;
438 new_bloc->next = NIL_BLOC;
439 new_bloc->variable = (POINTER *) NIL;
440 new_bloc->new_data = 0;
442 /* Record in the heap that this space is in use. */
443 heap = find_heap (new_bloc->data);
444 heap->free = break_value;
446 /* Maintain the correspondence between heaps and blocs. */
447 new_bloc->heap = heap;
448 heap->last_bloc = new_bloc;
449 if (heap->first_bloc == NIL_BLOC)
450 heap->first_bloc = new_bloc;
452 /* Put this bloc on the doubly-linked list of blocs. */
453 if (first_bloc)
455 new_bloc->prev = last_bloc;
456 last_bloc->next = new_bloc;
457 last_bloc = new_bloc;
459 else
461 first_bloc = last_bloc = new_bloc;
462 new_bloc->prev = NIL_BLOC;
465 return new_bloc;
468 /* Calculate new locations of blocs in the list beginning with BLOC,
469 relocating it to start at ADDRESS, in heap HEAP. If enough space is
470 not presently available in our reserve, call obtain for
471 more space.
473 Store the new location of each bloc in its new_data field.
474 Do not touch the contents of blocs or break_value. */
476 static int
477 relocate_blocs (bloc, heap, address)
478 bloc_ptr bloc;
479 heap_ptr heap;
480 POINTER address;
482 register bloc_ptr b = bloc;
484 /* No need to ever call this if arena is frozen, bug somewhere! */
485 if (r_alloc_freeze_level)
486 abort();
488 while (b)
490 /* If bloc B won't fit within HEAP,
491 move to the next heap and try again. */
492 while (heap && (char *) address + b->size > (char *) heap->end)
494 heap = heap->next;
495 if (heap == NIL_HEAP)
496 break;
497 address = heap->bloc_start;
500 /* If BLOC won't fit in any heap,
501 get enough new space to hold BLOC and all following blocs. */
502 if (heap == NIL_HEAP)
504 register bloc_ptr tb = b;
505 register SIZE s = 0;
507 /* Add up the size of all the following blocs. */
508 while (tb != NIL_BLOC)
510 if (tb->variable)
511 s += tb->size;
513 tb = tb->next;
516 /* Get that space. */
517 address = obtain (address, s);
518 if (address == 0)
519 return 0;
521 heap = last_heap;
524 /* Record the new address of this bloc
525 and update where the next bloc can start. */
526 b->new_data = address;
527 if (b->variable)
528 address = (char *) address + b->size;
529 b = b->next;
532 return 1;
535 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
536 This is necessary if we put the memory of space of BLOC
537 before that of BEFORE. */
539 static void
540 reorder_bloc (bloc, before)
541 bloc_ptr bloc, before;
543 bloc_ptr prev, next;
545 /* Splice BLOC out from where it is. */
546 prev = bloc->prev;
547 next = bloc->next;
549 if (prev)
550 prev->next = next;
551 if (next)
552 next->prev = prev;
554 /* Splice it in before BEFORE. */
555 prev = before->prev;
557 if (prev)
558 prev->next = bloc;
559 bloc->prev = prev;
561 before->prev = bloc;
562 bloc->next = before;
565 /* Update the records of which heaps contain which blocs, starting
566 with heap HEAP and bloc BLOC. */
568 static void
569 update_heap_bloc_correspondence (bloc, heap)
570 bloc_ptr bloc;
571 heap_ptr heap;
573 register bloc_ptr b;
575 /* Initialize HEAP's status to reflect blocs before BLOC. */
576 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
578 /* The previous bloc is in HEAP. */
579 heap->last_bloc = bloc->prev;
580 heap->free = (char *) bloc->prev->data + bloc->prev->size;
582 else
584 /* HEAP contains no blocs before BLOC. */
585 heap->first_bloc = NIL_BLOC;
586 heap->last_bloc = NIL_BLOC;
587 heap->free = heap->bloc_start;
590 /* Advance through blocs one by one. */
591 for (b = bloc; b != NIL_BLOC; b = b->next)
593 /* Advance through heaps, marking them empty,
594 till we get to the one that B is in. */
595 while (heap)
597 if (heap->bloc_start <= b->data && b->data <= heap->end)
598 break;
599 heap = heap->next;
600 /* We know HEAP is not null now,
601 because there has to be space for bloc B. */
602 heap->first_bloc = NIL_BLOC;
603 heap->last_bloc = NIL_BLOC;
604 heap->free = heap->bloc_start;
607 /* Update HEAP's status for bloc B. */
608 heap->free = (char *) b->data + b->size;
609 heap->last_bloc = b;
610 if (heap->first_bloc == NIL_BLOC)
611 heap->first_bloc = b;
613 /* Record that B is in HEAP. */
614 b->heap = heap;
617 /* If there are any remaining heaps and no blocs left,
618 mark those heaps as empty. */
619 heap = heap->next;
620 while (heap)
622 heap->first_bloc = NIL_BLOC;
623 heap->last_bloc = NIL_BLOC;
624 heap->free = heap->bloc_start;
625 heap = heap->next;
629 /* Resize BLOC to SIZE bytes. This relocates the blocs
630 that come after BLOC in memory. */
632 static int
633 resize_bloc (bloc, size)
634 bloc_ptr bloc;
635 SIZE size;
637 register bloc_ptr b;
638 heap_ptr heap;
639 POINTER address;
640 SIZE old_size;
642 /* No need to ever call this if arena is frozen, bug somewhere! */
643 if (r_alloc_freeze_level)
644 abort();
646 if (bloc == NIL_BLOC || size == bloc->size)
647 return 1;
649 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
651 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
652 break;
655 if (heap == NIL_HEAP)
656 abort ();
658 old_size = bloc->size;
659 bloc->size = size;
661 /* Note that bloc could be moved into the previous heap. */
662 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
663 : (char *) first_heap->bloc_start);
664 while (heap)
666 if (heap->bloc_start <= address && address <= heap->end)
667 break;
668 heap = heap->prev;
671 if (! relocate_blocs (bloc, heap, address))
673 bloc->size = old_size;
674 return 0;
677 if (size > old_size)
679 for (b = last_bloc; b != bloc; b = b->prev)
681 if (!b->variable)
683 b->size = 0;
684 b->data = b->new_data;
686 else
688 safe_bcopy (b->data, b->new_data, b->size);
689 *b->variable = b->data = b->new_data;
692 if (!bloc->variable)
694 bloc->size = 0;
695 bloc->data = bloc->new_data;
697 else
699 safe_bcopy (bloc->data, bloc->new_data, old_size);
700 bzero ((char *) bloc->new_data + old_size, size - old_size);
701 *bloc->variable = bloc->data = bloc->new_data;
704 else
706 for (b = bloc; b != NIL_BLOC; b = b->next)
708 if (!b->variable)
710 b->size = 0;
711 b->data = b->new_data;
713 else
715 safe_bcopy (b->data, b->new_data, b->size);
716 *b->variable = b->data = b->new_data;
721 update_heap_bloc_correspondence (bloc, heap);
723 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
724 : (char *) first_heap->bloc_start);
725 return 1;
728 /* Free BLOC from the chain of blocs, relocating any blocs above it.
729 This may return space to the system. */
731 static void
732 free_bloc (bloc)
733 bloc_ptr bloc;
735 heap_ptr heap = bloc->heap;
737 if (r_alloc_freeze_level)
739 bloc->variable = (POINTER *) NIL;
740 return;
743 resize_bloc (bloc, 0);
745 if (bloc == first_bloc && bloc == last_bloc)
747 first_bloc = last_bloc = NIL_BLOC;
749 else if (bloc == last_bloc)
751 last_bloc = bloc->prev;
752 last_bloc->next = NIL_BLOC;
754 else if (bloc == first_bloc)
756 first_bloc = bloc->next;
757 first_bloc->prev = NIL_BLOC;
759 else
761 bloc->next->prev = bloc->prev;
762 bloc->prev->next = bloc->next;
765 /* Update the records of which blocs are in HEAP. */
766 if (heap->first_bloc == bloc)
768 if (bloc->next != 0 && bloc->next->heap == heap)
769 heap->first_bloc = bloc->next;
770 else
771 heap->first_bloc = heap->last_bloc = NIL_BLOC;
773 if (heap->last_bloc == bloc)
775 if (bloc->prev != 0 && bloc->prev->heap == heap)
776 heap->last_bloc = bloc->prev;
777 else
778 heap->first_bloc = heap->last_bloc = NIL_BLOC;
781 relinquish ();
782 free (bloc);
785 /* Interface routines. */
787 /* Obtain SIZE bytes of storage from the free pool, or the system, as
788 necessary. If relocatable blocs are in use, this means relocating
789 them. This function gets plugged into the GNU malloc's __morecore
790 hook.
792 We provide hysteresis, never relocating by less than extra_bytes.
794 If we're out of memory, we should return zero, to imitate the other
795 __morecore hook values - in particular, __default_morecore in the
796 GNU malloc package. */
798 POINTER
799 r_alloc_sbrk (size)
800 long size;
802 register bloc_ptr b;
803 POINTER address;
805 if (! r_alloc_initialized)
806 r_alloc_init ();
808 if (! use_relocatable_buffers)
809 return (*real_morecore) (size);
811 if (size == 0)
812 return virtual_break_value;
814 if (size > 0)
816 /* Allocate a page-aligned space. GNU malloc would reclaim an
817 extra space if we passed an unaligned one. But we could
818 not always find a space which is contiguous to the previous. */
819 POINTER new_bloc_start;
820 heap_ptr h = first_heap;
821 SIZE get = ROUNDUP (size);
823 address = (POINTER) ROUNDUP (virtual_break_value);
825 /* Search the list upward for a heap which is large enough. */
826 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
828 h = h->next;
829 if (h == NIL_HEAP)
830 break;
831 address = (POINTER) ROUNDUP (h->start);
834 /* If not found, obtain more space. */
835 if (h == NIL_HEAP)
837 get += extra_bytes + page_size;
839 if (! obtain (address, get))
840 return 0;
842 if (first_heap == last_heap)
843 address = (POINTER) ROUNDUP (virtual_break_value);
844 else
845 address = (POINTER) ROUNDUP (last_heap->start);
846 h = last_heap;
849 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
851 if (first_heap->bloc_start < new_bloc_start)
853 /* This is no clean solution - no idea how to do it better. */
854 if (r_alloc_freeze_level)
855 return NIL;
857 /* There is a bug here: if the above obtain call succeeded, but the
858 relocate_blocs call below does not succeed, we need to free
859 the memory that we got with obtain. */
861 /* Move all blocs upward. */
862 if (! relocate_blocs (first_bloc, h, new_bloc_start))
863 return 0;
865 /* Note that (POINTER)(h+1) <= new_bloc_start since
866 get >= page_size, so the following does not destroy the heap
867 header. */
868 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
870 safe_bcopy (b->data, b->new_data, b->size);
871 *b->variable = b->data = b->new_data;
874 h->bloc_start = new_bloc_start;
876 update_heap_bloc_correspondence (first_bloc, h);
878 if (h != first_heap)
880 /* Give up managing heaps below the one the new
881 virtual_break_value points to. */
882 first_heap->prev = NIL_HEAP;
883 first_heap->next = h->next;
884 first_heap->start = h->start;
885 first_heap->end = h->end;
886 first_heap->free = h->free;
887 first_heap->first_bloc = h->first_bloc;
888 first_heap->last_bloc = h->last_bloc;
889 first_heap->bloc_start = h->bloc_start;
891 if (first_heap->next)
892 first_heap->next->prev = first_heap;
893 else
894 last_heap = first_heap;
897 bzero (address, size);
899 else /* size < 0 */
901 SIZE excess = (char *)first_heap->bloc_start
902 - ((char *)virtual_break_value + size);
904 address = virtual_break_value;
906 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
908 excess -= extra_bytes;
909 first_heap->bloc_start
910 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
912 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
914 for (b = first_bloc; b != NIL_BLOC; b = b->next)
916 safe_bcopy (b->data, b->new_data, b->size);
917 *b->variable = b->data = b->new_data;
921 if ((char *)virtual_break_value + size < (char *)first_heap->start)
923 /* We found an additional space below the first heap */
924 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
928 virtual_break_value = (POINTER) ((char *)address + size);
929 break_value = (last_bloc
930 ? (char *) last_bloc->data + last_bloc->size
931 : (char *) first_heap->bloc_start);
932 if (size < 0)
933 relinquish ();
935 return address;
939 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
940 the data is returned in *PTR. PTR is thus the address of some variable
941 which will use the data area.
943 The allocation of 0 bytes is valid.
944 In case r_alloc_freeze is set, a best fit of unused blocs could be done
945 before allocating a new area. Not yet done.
947 If we can't allocate the necessary memory, set *PTR to zero, and
948 return zero. */
950 POINTER
951 r_alloc (ptr, size)
952 POINTER *ptr;
953 SIZE size;
955 register bloc_ptr new_bloc;
957 if (! r_alloc_initialized)
958 r_alloc_init ();
960 new_bloc = get_bloc (MEM_ROUNDUP (size));
961 if (new_bloc)
963 new_bloc->variable = ptr;
964 *ptr = new_bloc->data;
966 else
967 *ptr = 0;
969 return *ptr;
972 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
973 Store 0 in *PTR to show there's no block allocated. */
975 void
976 r_alloc_free (ptr)
977 register POINTER *ptr;
979 register bloc_ptr dead_bloc;
981 if (! r_alloc_initialized)
982 r_alloc_init ();
984 dead_bloc = find_bloc (ptr);
985 if (dead_bloc == NIL_BLOC)
986 abort ();
988 free_bloc (dead_bloc);
989 *ptr = 0;
991 #ifdef emacs
992 refill_memory_reserve ();
993 #endif
996 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
997 Do this by shifting all blocks above this one up in memory, unless
998 SIZE is less than or equal to the current bloc size, in which case
999 do nothing.
1001 In case r_alloc_freeze is set, a new bloc is allocated, and the
1002 memory copied to it. Not very efficient. We could traverse the
1003 bloc_list for a best fit of free blocs first.
1005 Change *PTR to reflect the new bloc, and return this value.
1007 If more memory cannot be allocated, then leave *PTR unchanged, and
1008 return zero. */
1010 POINTER
1011 r_re_alloc (ptr, size)
1012 POINTER *ptr;
1013 SIZE size;
1015 register bloc_ptr bloc;
1017 if (! r_alloc_initialized)
1018 r_alloc_init ();
1020 if (!*ptr)
1021 return r_alloc (ptr, size);
1022 if (!size)
1024 r_alloc_free (ptr);
1025 return r_alloc (ptr, 0);
1028 bloc = find_bloc (ptr);
1029 if (bloc == NIL_BLOC)
1030 abort ();
1032 if (size < bloc->size)
1034 /* Wouldn't it be useful to actually resize the bloc here? */
1035 /* I think so too, but not if it's too expensive... */
1036 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1037 && r_alloc_freeze_level == 0)
1039 resize_bloc (bloc, MEM_ROUNDUP (size));
1040 /* Never mind if this fails, just do nothing... */
1041 /* It *should* be infallible! */
1044 else if (size > bloc->size)
1046 if (r_alloc_freeze_level)
1048 bloc_ptr new_bloc;
1049 new_bloc = get_bloc (MEM_ROUNDUP (size));
1050 if (new_bloc)
1052 new_bloc->variable = ptr;
1053 *ptr = new_bloc->data;
1054 bloc->variable = (POINTER *) NIL;
1056 else
1057 return NIL;
1059 else
1061 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1062 return NIL;
1065 return *ptr;
1068 /* Disable relocations, after making room for at least SIZE bytes
1069 of non-relocatable heap if possible. The relocatable blocs are
1070 guaranteed to hold still until thawed, even if this means that
1071 malloc must return a null pointer. */
1073 void
1074 r_alloc_freeze (size)
1075 long size;
1077 if (! r_alloc_initialized)
1078 r_alloc_init ();
1080 /* If already frozen, we can't make any more room, so don't try. */
1081 if (r_alloc_freeze_level > 0)
1082 size = 0;
1083 /* If we can't get the amount requested, half is better than nothing. */
1084 while (size > 0 && r_alloc_sbrk (size) == 0)
1085 size /= 2;
1086 ++r_alloc_freeze_level;
1087 if (size > 0)
1088 r_alloc_sbrk (-size);
1091 void
1092 r_alloc_thaw ()
1095 if (! r_alloc_initialized)
1096 r_alloc_init ();
1098 if (--r_alloc_freeze_level < 0)
1099 abort ();
1101 /* This frees all unused blocs. It is not too inefficient, as the resize
1102 and bcopy is done only once. Afterwards, all unreferenced blocs are
1103 already shrunk to zero size. */
1104 if (!r_alloc_freeze_level)
1106 bloc_ptr *b = &first_bloc;
1107 while (*b)
1108 if (!(*b)->variable)
1109 free_bloc (*b);
1110 else
1111 b = &(*b)->next;
1116 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1118 /* Reinitialize the morecore hook variables after restarting a dumped
1119 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1120 void
1121 r_alloc_reinit ()
1123 /* Only do this if the hook has been reset, so that we don't get an
1124 infinite loop, in case Emacs was linked statically. */
1125 if (__morecore != r_alloc_sbrk)
1127 real_morecore = __morecore;
1128 __morecore = r_alloc_sbrk;
1132 #endif /* emacs && DOUG_LEA_MALLOC */
1134 #ifdef DEBUG
1136 #include <assert.h>
1138 void
1139 r_alloc_check ()
1141 int found = 0;
1142 heap_ptr h, ph = 0;
1143 bloc_ptr b, pb = 0;
1145 if (!r_alloc_initialized)
1146 return;
1148 assert (first_heap);
1149 assert (last_heap->end <= (POINTER) sbrk (0));
1150 assert ((POINTER) first_heap < first_heap->start);
1151 assert (first_heap->start <= virtual_break_value);
1152 assert (virtual_break_value <= first_heap->end);
1154 for (h = first_heap; h; h = h->next)
1156 assert (h->prev == ph);
1157 assert ((POINTER) ROUNDUP (h->end) == h->end);
1158 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1159 the heap start has any sort of alignment.
1160 Perhaps it should. */
1161 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1162 #endif
1163 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1164 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1166 if (ph)
1168 assert (ph->end < h->start);
1169 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1172 if (h->bloc_start <= break_value && break_value <= h->end)
1173 found = 1;
1175 ph = h;
1178 assert (found);
1179 assert (last_heap == ph);
1181 for (b = first_bloc; b; b = b->next)
1183 assert (b->prev == pb);
1184 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1185 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1187 ph = 0;
1188 for (h = first_heap; h; h = h->next)
1190 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1191 break;
1192 ph = h;
1195 assert (h);
1197 if (pb && pb->data + pb->size != b->data)
1199 assert (ph && b->data == h->bloc_start);
1200 while (ph)
1202 if (ph->bloc_start <= pb->data
1203 && pb->data + pb->size <= ph->end)
1205 assert (pb->data + pb->size + b->size > ph->end);
1206 break;
1208 else
1210 assert (ph->bloc_start + b->size > ph->end);
1212 ph = ph->prev;
1215 pb = b;
1218 assert (last_bloc == pb);
1220 if (last_bloc)
1221 assert (last_bloc->data + last_bloc->size == break_value);
1222 else
1223 assert (first_heap->bloc_start == break_value);
1226 #endif /* DEBUG */
1230 /***********************************************************************
1231 Initialization
1232 ***********************************************************************/
1234 /* Initialize various things for memory allocation. */
1236 static void
1237 r_alloc_init ()
1239 if (r_alloc_initialized)
1240 return;
1241 r_alloc_initialized = 1;
1243 page_size = PAGE;
1244 #ifndef SYSTEM_MALLOC
1245 real_morecore = __morecore;
1246 __morecore = r_alloc_sbrk;
1248 first_heap = last_heap = &heap_base;
1249 first_heap->next = first_heap->prev = NIL_HEAP;
1250 first_heap->start = first_heap->bloc_start
1251 = virtual_break_value = break_value = (*real_morecore) (0);
1252 if (break_value == NIL)
1253 abort ();
1255 extra_bytes = ROUNDUP (50000);
1256 #endif
1258 #ifdef DOUG_LEA_MALLOC
1259 BLOCK_INPUT;
1260 mallopt (M_TOP_PAD, 64 * 4096);
1261 UNBLOCK_INPUT;
1262 #else
1263 #ifndef SYSTEM_MALLOC
1264 /* Give GNU malloc's morecore some hysteresis
1265 so that we move all the relocatable blocks much less often. */
1266 __malloc_extra_blocks = 64;
1267 #endif
1268 #endif
1270 #ifndef SYSTEM_MALLOC
1271 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1273 /* The extra call to real_morecore guarantees that the end of the
1274 address space is a multiple of page_size, even if page_size is
1275 not really the page size of the system running the binary in
1276 which page_size is stored. This allows a binary to be built on a
1277 system with one page size and run on a system with a smaller page
1278 size. */
1279 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
1281 /* Clear the rest of the last page; this memory is in our address space
1282 even though it is after the sbrk value. */
1283 /* Doubly true, with the additional call that explicitly adds the
1284 rest of that page to the address space. */
1285 bzero (first_heap->start,
1286 (char *) first_heap->end - (char *) first_heap->start);
1287 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1288 #endif
1290 use_relocatable_buffers = 1;
1293 /* arch-tag: 6a524a15-faff-44c8-95d4-a5da6f55110f
1294 (do not change this comment) */