(Rmail Display): Document rmail-redecode-body.
[emacs.git] / src / ralloc.c
blob2e9004baa2f4b83407d70276aa0b3bfd17d7f08c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 /* NOTES:
23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
24 rather than all of them. This means allowing for a possible
25 hole between the first bloc and the end of malloc storage. */
27 #ifdef emacs
29 #include <config.h>
30 #include "lisp.h" /* Needed for VALBITS. */
32 #ifdef HAVE_UNISTD_H
33 #include <unistd.h>
34 #endif
36 typedef POINTER_TYPE *POINTER;
37 typedef size_t SIZE;
39 /* Declared in dispnew.c, this version doesn't screw up if regions
40 overlap. */
42 extern void safe_bcopy ();
44 #ifdef DOUG_LEA_MALLOC
45 #define M_TOP_PAD -2
46 extern int mallopt ();
47 #else /* not DOUG_LEA_MALLOC */
48 #ifndef SYSTEM_MALLOC
49 extern size_t __malloc_extra_blocks;
50 #endif /* SYSTEM_MALLOC */
51 #endif /* not DOUG_LEA_MALLOC */
53 #else /* not emacs */
55 #include <stddef.h>
57 typedef size_t SIZE;
58 typedef void *POINTER;
60 #include <unistd.h>
61 #include <malloc.h>
63 #define safe_bcopy(x, y, z) memmove (y, x, z)
64 #define bzero(x, len) memset (x, 0, len)
66 #endif /* not emacs */
69 #include "getpagesize.h"
71 #define NIL ((POINTER) 0)
73 /* A flag to indicate whether we have initialized ralloc yet. For
74 Emacs's sake, please do not make this local to malloc_init; on some
75 machines, the dumping procedure makes all static variables
76 read-only. On these machines, the word static is #defined to be
77 the empty string, meaning that r_alloc_initialized becomes an
78 automatic variable, and loses its value each time Emacs is started
79 up. */
81 static int r_alloc_initialized = 0;
83 static void r_alloc_init ();
86 /* Declarations for working with the malloc, ralloc, and system breaks. */
88 /* Function to set the real break value. */
89 POINTER (*real_morecore) ();
91 /* The break value, as seen by malloc. */
92 static POINTER virtual_break_value;
94 /* The address of the end of the last data in use by ralloc,
95 including relocatable blocs as well as malloc data. */
96 static POINTER break_value;
98 /* This is the size of a page. We round memory requests to this boundary. */
99 static int page_size;
101 /* Whenever we get memory from the system, get this many extra bytes. This
102 must be a multiple of page_size. */
103 static int extra_bytes;
105 /* Macros for rounding. Note that rounding to any value is possible
106 by changing the definition of PAGE. */
107 #define PAGE (getpagesize ())
108 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
109 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
110 & ~(page_size - 1))
111 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
113 #define MEM_ALIGN sizeof(double)
114 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
115 & ~(MEM_ALIGN - 1))
118 /***********************************************************************
119 Implementation using sbrk
120 ***********************************************************************/
122 /* Data structures of heaps and blocs. */
124 /* The relocatable objects, or blocs, and the malloc data
125 both reside within one or more heaps.
126 Each heap contains malloc data, running from `start' to `bloc_start',
127 and relocatable objects, running from `bloc_start' to `free'.
129 Relocatable objects may relocate within the same heap
130 or may move into another heap; the heaps themselves may grow
131 but they never move.
133 We try to make just one heap and make it larger as necessary.
134 But sometimes we can't do that, because we can't get contiguous
135 space to add onto the heap. When that happens, we start a new heap. */
137 typedef struct heap
139 struct heap *next;
140 struct heap *prev;
141 /* Start of memory range of this heap. */
142 POINTER start;
143 /* End of memory range of this heap. */
144 POINTER end;
145 /* Start of relocatable data in this heap. */
146 POINTER bloc_start;
147 /* Start of unused space in this heap. */
148 POINTER free;
149 /* First bloc in this heap. */
150 struct bp *first_bloc;
151 /* Last bloc in this heap. */
152 struct bp *last_bloc;
153 } *heap_ptr;
155 #define NIL_HEAP ((heap_ptr) 0)
156 #define HEAP_PTR_SIZE (sizeof (struct heap))
158 /* This is the first heap object.
159 If we need additional heap objects, each one resides at the beginning of
160 the space it covers. */
161 static struct heap heap_base;
163 /* Head and tail of the list of heaps. */
164 static heap_ptr first_heap, last_heap;
166 /* These structures are allocated in the malloc arena.
167 The linked list is kept in order of increasing '.data' members.
168 The data blocks abut each other; if b->next is non-nil, then
169 b->data + b->size == b->next->data.
171 An element with variable==NIL denotes a freed block, which has not yet
172 been collected. They may only appear while r_alloc_freeze > 0, and will be
173 freed when the arena is thawed. Currently, these blocs are not reusable,
174 while the arena is frozen. Very inefficient. */
176 typedef struct bp
178 struct bp *next;
179 struct bp *prev;
180 POINTER *variable;
181 POINTER data;
182 SIZE size;
183 POINTER new_data; /* temporarily used for relocation */
184 struct heap *heap; /* Heap this bloc is in. */
185 } *bloc_ptr;
187 #define NIL_BLOC ((bloc_ptr) 0)
188 #define BLOC_PTR_SIZE (sizeof (struct bp))
190 /* Head and tail of the list of relocatable blocs. */
191 static bloc_ptr first_bloc, last_bloc;
193 static int use_relocatable_buffers;
195 /* If >0, no relocation whatsoever takes place. */
196 static int r_alloc_freeze_level;
199 /* Functions to get and return memory from the system. */
201 /* Find the heap that ADDRESS falls within. */
203 static heap_ptr
204 find_heap (address)
205 POINTER address;
207 heap_ptr heap;
209 for (heap = last_heap; heap; heap = heap->prev)
211 if (heap->start <= address && address <= heap->end)
212 return heap;
215 return NIL_HEAP;
218 /* Find SIZE bytes of space in a heap.
219 Try to get them at ADDRESS (which must fall within some heap's range)
220 if we can get that many within one heap.
222 If enough space is not presently available in our reserve, this means
223 getting more page-aligned space from the system. If the returned space
224 is not contiguous to the last heap, allocate a new heap, and append it
226 obtain does not try to keep track of whether space is in use
227 or not in use. It just returns the address of SIZE bytes that
228 fall within a single heap. If you call obtain twice in a row
229 with the same arguments, you typically get the same value.
230 to the heap list. It's the caller's responsibility to keep
231 track of what space is in use.
233 Return the address of the space if all went well, or zero if we couldn't
234 allocate the memory. */
236 static POINTER
237 obtain (address, size)
238 POINTER address;
239 SIZE size;
241 heap_ptr heap;
242 SIZE already_available;
244 /* Find the heap that ADDRESS falls within. */
245 for (heap = last_heap; heap; heap = heap->prev)
247 if (heap->start <= address && address <= heap->end)
248 break;
251 if (! heap)
252 abort ();
254 /* If we can't fit SIZE bytes in that heap,
255 try successive later heaps. */
256 while (heap && (char *) address + size > (char *) heap->end)
258 heap = heap->next;
259 if (heap == NIL_HEAP)
260 break;
261 address = heap->bloc_start;
264 /* If we can't fit them within any existing heap,
265 get more space. */
266 if (heap == NIL_HEAP)
268 POINTER new = (*real_morecore)(0);
269 SIZE get;
271 already_available = (char *)last_heap->end - (char *)address;
273 if (new != last_heap->end)
275 /* Someone else called sbrk. Make a new heap. */
277 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
278 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
280 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
281 return 0;
283 new_heap->start = new;
284 new_heap->end = bloc_start;
285 new_heap->bloc_start = bloc_start;
286 new_heap->free = bloc_start;
287 new_heap->next = NIL_HEAP;
288 new_heap->prev = last_heap;
289 new_heap->first_bloc = NIL_BLOC;
290 new_heap->last_bloc = NIL_BLOC;
291 last_heap->next = new_heap;
292 last_heap = new_heap;
294 address = bloc_start;
295 already_available = 0;
298 /* Add space to the last heap (which we may have just created).
299 Get some extra, so we can come here less often. */
301 get = size + extra_bytes - already_available;
302 get = (char *) ROUNDUP ((char *)last_heap->end + get)
303 - (char *) last_heap->end;
305 if ((*real_morecore) (get) != last_heap->end)
306 return 0;
308 last_heap->end = (char *) last_heap->end + get;
311 return address;
314 /* Return unused heap space to the system
315 if there is a lot of unused space now.
316 This can make the last heap smaller;
317 it can also eliminate the last heap entirely. */
319 static void
320 relinquish ()
322 register heap_ptr h;
323 int excess = 0;
325 /* Add the amount of space beyond break_value
326 in all heaps which have extend beyond break_value at all. */
328 for (h = last_heap; h && break_value < h->end; h = h->prev)
330 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
331 ? h->bloc_start : break_value);
334 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
336 /* Keep extra_bytes worth of empty space.
337 And don't free anything unless we can free at least extra_bytes. */
338 excess -= extra_bytes;
340 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
342 /* This heap should have no blocs in it. */
343 if (last_heap->first_bloc != NIL_BLOC
344 || last_heap->last_bloc != NIL_BLOC)
345 abort ();
347 /* Return the last heap, with its header, to the system. */
348 excess = (char *)last_heap->end - (char *)last_heap->start;
349 last_heap = last_heap->prev;
350 last_heap->next = NIL_HEAP;
352 else
354 excess = (char *) last_heap->end
355 - (char *) ROUNDUP ((char *)last_heap->end - excess);
356 last_heap->end = (char *) last_heap->end - excess;
359 if ((*real_morecore) (- excess) == 0)
361 /* If the system didn't want that much memory back, adjust
362 the end of the last heap to reflect that. This can occur
363 if break_value is still within the original data segment. */
364 last_heap->end = (char *) last_heap->end + excess;
365 /* Make sure that the result of the adjustment is accurate.
366 It should be, for the else clause above; the other case,
367 which returns the entire last heap to the system, seems
368 unlikely to trigger this mode of failure. */
369 if (last_heap->end != (*real_morecore) (0))
370 abort ();
375 /* Return the total size in use by relocating allocator,
376 above where malloc gets space. */
378 long
379 r_alloc_size_in_use ()
381 return (char *) break_value - (char *) virtual_break_value;
384 /* The meat - allocating, freeing, and relocating blocs. */
386 /* Find the bloc referenced by the address in PTR. Returns a pointer
387 to that block. */
389 static bloc_ptr
390 find_bloc (ptr)
391 POINTER *ptr;
393 register bloc_ptr p = first_bloc;
395 while (p != NIL_BLOC)
397 if (p->variable == ptr && p->data == *ptr)
398 return p;
400 p = p->next;
403 return p;
406 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
407 Returns a pointer to the new bloc, or zero if we couldn't allocate
408 memory for the new block. */
410 static bloc_ptr
411 get_bloc (size)
412 SIZE size;
414 register bloc_ptr new_bloc;
415 register heap_ptr heap;
417 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
418 || ! (new_bloc->data = obtain (break_value, size)))
420 if (new_bloc)
421 free (new_bloc);
423 return 0;
426 break_value = (char *) new_bloc->data + size;
428 new_bloc->size = size;
429 new_bloc->next = NIL_BLOC;
430 new_bloc->variable = (POINTER *) NIL;
431 new_bloc->new_data = 0;
433 /* Record in the heap that this space is in use. */
434 heap = find_heap (new_bloc->data);
435 heap->free = break_value;
437 /* Maintain the correspondence between heaps and blocs. */
438 new_bloc->heap = heap;
439 heap->last_bloc = new_bloc;
440 if (heap->first_bloc == NIL_BLOC)
441 heap->first_bloc = new_bloc;
443 /* Put this bloc on the doubly-linked list of blocs. */
444 if (first_bloc)
446 new_bloc->prev = last_bloc;
447 last_bloc->next = new_bloc;
448 last_bloc = new_bloc;
450 else
452 first_bloc = last_bloc = new_bloc;
453 new_bloc->prev = NIL_BLOC;
456 return new_bloc;
459 /* Calculate new locations of blocs in the list beginning with BLOC,
460 relocating it to start at ADDRESS, in heap HEAP. If enough space is
461 not presently available in our reserve, call obtain for
462 more space.
464 Store the new location of each bloc in its new_data field.
465 Do not touch the contents of blocs or break_value. */
467 static int
468 relocate_blocs (bloc, heap, address)
469 bloc_ptr bloc;
470 heap_ptr heap;
471 POINTER address;
473 register bloc_ptr b = bloc;
475 /* No need to ever call this if arena is frozen, bug somewhere! */
476 if (r_alloc_freeze_level)
477 abort();
479 while (b)
481 /* If bloc B won't fit within HEAP,
482 move to the next heap and try again. */
483 while (heap && (char *) address + b->size > (char *) heap->end)
485 heap = heap->next;
486 if (heap == NIL_HEAP)
487 break;
488 address = heap->bloc_start;
491 /* If BLOC won't fit in any heap,
492 get enough new space to hold BLOC and all following blocs. */
493 if (heap == NIL_HEAP)
495 register bloc_ptr tb = b;
496 register SIZE s = 0;
498 /* Add up the size of all the following blocs. */
499 while (tb != NIL_BLOC)
501 if (tb->variable)
502 s += tb->size;
504 tb = tb->next;
507 /* Get that space. */
508 address = obtain (address, s);
509 if (address == 0)
510 return 0;
512 heap = last_heap;
515 /* Record the new address of this bloc
516 and update where the next bloc can start. */
517 b->new_data = address;
518 if (b->variable)
519 address = (char *) address + b->size;
520 b = b->next;
523 return 1;
526 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
527 This is necessary if we put the memory of space of BLOC
528 before that of BEFORE. */
530 static void
531 reorder_bloc (bloc, before)
532 bloc_ptr bloc, before;
534 bloc_ptr prev, next;
536 /* Splice BLOC out from where it is. */
537 prev = bloc->prev;
538 next = bloc->next;
540 if (prev)
541 prev->next = next;
542 if (next)
543 next->prev = prev;
545 /* Splice it in before BEFORE. */
546 prev = before->prev;
548 if (prev)
549 prev->next = bloc;
550 bloc->prev = prev;
552 before->prev = bloc;
553 bloc->next = before;
556 /* Update the records of which heaps contain which blocs, starting
557 with heap HEAP and bloc BLOC. */
559 static void
560 update_heap_bloc_correspondence (bloc, heap)
561 bloc_ptr bloc;
562 heap_ptr heap;
564 register bloc_ptr b;
566 /* Initialize HEAP's status to reflect blocs before BLOC. */
567 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
569 /* The previous bloc is in HEAP. */
570 heap->last_bloc = bloc->prev;
571 heap->free = (char *) bloc->prev->data + bloc->prev->size;
573 else
575 /* HEAP contains no blocs before BLOC. */
576 heap->first_bloc = NIL_BLOC;
577 heap->last_bloc = NIL_BLOC;
578 heap->free = heap->bloc_start;
581 /* Advance through blocs one by one. */
582 for (b = bloc; b != NIL_BLOC; b = b->next)
584 /* Advance through heaps, marking them empty,
585 till we get to the one that B is in. */
586 while (heap)
588 if (heap->bloc_start <= b->data && b->data <= heap->end)
589 break;
590 heap = heap->next;
591 /* We know HEAP is not null now,
592 because there has to be space for bloc B. */
593 heap->first_bloc = NIL_BLOC;
594 heap->last_bloc = NIL_BLOC;
595 heap->free = heap->bloc_start;
598 /* Update HEAP's status for bloc B. */
599 heap->free = (char *) b->data + b->size;
600 heap->last_bloc = b;
601 if (heap->first_bloc == NIL_BLOC)
602 heap->first_bloc = b;
604 /* Record that B is in HEAP. */
605 b->heap = heap;
608 /* If there are any remaining heaps and no blocs left,
609 mark those heaps as empty. */
610 heap = heap->next;
611 while (heap)
613 heap->first_bloc = NIL_BLOC;
614 heap->last_bloc = NIL_BLOC;
615 heap->free = heap->bloc_start;
616 heap = heap->next;
620 /* Resize BLOC to SIZE bytes. This relocates the blocs
621 that come after BLOC in memory. */
623 static int
624 resize_bloc (bloc, size)
625 bloc_ptr bloc;
626 SIZE size;
628 register bloc_ptr b;
629 heap_ptr heap;
630 POINTER address;
631 SIZE old_size;
633 /* No need to ever call this if arena is frozen, bug somewhere! */
634 if (r_alloc_freeze_level)
635 abort();
637 if (bloc == NIL_BLOC || size == bloc->size)
638 return 1;
640 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
642 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
643 break;
646 if (heap == NIL_HEAP)
647 abort ();
649 old_size = bloc->size;
650 bloc->size = size;
652 /* Note that bloc could be moved into the previous heap. */
653 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
654 : (char *) first_heap->bloc_start);
655 while (heap)
657 if (heap->bloc_start <= address && address <= heap->end)
658 break;
659 heap = heap->prev;
662 if (! relocate_blocs (bloc, heap, address))
664 bloc->size = old_size;
665 return 0;
668 if (size > old_size)
670 for (b = last_bloc; b != bloc; b = b->prev)
672 if (!b->variable)
674 b->size = 0;
675 b->data = b->new_data;
677 else
679 safe_bcopy (b->data, b->new_data, b->size);
680 *b->variable = b->data = b->new_data;
683 if (!bloc->variable)
685 bloc->size = 0;
686 bloc->data = bloc->new_data;
688 else
690 safe_bcopy (bloc->data, bloc->new_data, old_size);
691 bzero ((char *) bloc->new_data + old_size, size - old_size);
692 *bloc->variable = bloc->data = bloc->new_data;
695 else
697 for (b = bloc; b != NIL_BLOC; b = b->next)
699 if (!b->variable)
701 b->size = 0;
702 b->data = b->new_data;
704 else
706 safe_bcopy (b->data, b->new_data, b->size);
707 *b->variable = b->data = b->new_data;
712 update_heap_bloc_correspondence (bloc, heap);
714 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
715 : (char *) first_heap->bloc_start);
716 return 1;
719 /* Free BLOC from the chain of blocs, relocating any blocs above it.
720 This may return space to the system. */
722 static void
723 free_bloc (bloc)
724 bloc_ptr bloc;
726 heap_ptr heap = bloc->heap;
728 if (r_alloc_freeze_level)
730 bloc->variable = (POINTER *) NIL;
731 return;
734 resize_bloc (bloc, 0);
736 if (bloc == first_bloc && bloc == last_bloc)
738 first_bloc = last_bloc = NIL_BLOC;
740 else if (bloc == last_bloc)
742 last_bloc = bloc->prev;
743 last_bloc->next = NIL_BLOC;
745 else if (bloc == first_bloc)
747 first_bloc = bloc->next;
748 first_bloc->prev = NIL_BLOC;
750 else
752 bloc->next->prev = bloc->prev;
753 bloc->prev->next = bloc->next;
756 /* Update the records of which blocs are in HEAP. */
757 if (heap->first_bloc == bloc)
759 if (bloc->next != 0 && bloc->next->heap == heap)
760 heap->first_bloc = bloc->next;
761 else
762 heap->first_bloc = heap->last_bloc = NIL_BLOC;
764 if (heap->last_bloc == bloc)
766 if (bloc->prev != 0 && bloc->prev->heap == heap)
767 heap->last_bloc = bloc->prev;
768 else
769 heap->first_bloc = heap->last_bloc = NIL_BLOC;
772 relinquish ();
773 free (bloc);
776 /* Interface routines. */
778 /* Obtain SIZE bytes of storage from the free pool, or the system, as
779 necessary. If relocatable blocs are in use, this means relocating
780 them. This function gets plugged into the GNU malloc's __morecore
781 hook.
783 We provide hysteresis, never relocating by less than extra_bytes.
785 If we're out of memory, we should return zero, to imitate the other
786 __morecore hook values - in particular, __default_morecore in the
787 GNU malloc package. */
789 POINTER
790 r_alloc_sbrk (size)
791 long size;
793 register bloc_ptr b;
794 POINTER address;
796 if (! r_alloc_initialized)
797 r_alloc_init ();
799 if (! use_relocatable_buffers)
800 return (*real_morecore) (size);
802 if (size == 0)
803 return virtual_break_value;
805 if (size > 0)
807 /* Allocate a page-aligned space. GNU malloc would reclaim an
808 extra space if we passed an unaligned one. But we could
809 not always find a space which is contiguous to the previous. */
810 POINTER new_bloc_start;
811 heap_ptr h = first_heap;
812 SIZE get = ROUNDUP (size);
814 address = (POINTER) ROUNDUP (virtual_break_value);
816 /* Search the list upward for a heap which is large enough. */
817 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
819 h = h->next;
820 if (h == NIL_HEAP)
821 break;
822 address = (POINTER) ROUNDUP (h->start);
825 /* If not found, obtain more space. */
826 if (h == NIL_HEAP)
828 get += extra_bytes + page_size;
830 if (! obtain (address, get))
831 return 0;
833 if (first_heap == last_heap)
834 address = (POINTER) ROUNDUP (virtual_break_value);
835 else
836 address = (POINTER) ROUNDUP (last_heap->start);
837 h = last_heap;
840 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
842 if (first_heap->bloc_start < new_bloc_start)
844 /* This is no clean solution - no idea how to do it better. */
845 if (r_alloc_freeze_level)
846 return NIL;
848 /* There is a bug here: if the above obtain call succeeded, but the
849 relocate_blocs call below does not succeed, we need to free
850 the memory that we got with obtain. */
852 /* Move all blocs upward. */
853 if (! relocate_blocs (first_bloc, h, new_bloc_start))
854 return 0;
856 /* Note that (POINTER)(h+1) <= new_bloc_start since
857 get >= page_size, so the following does not destroy the heap
858 header. */
859 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
861 safe_bcopy (b->data, b->new_data, b->size);
862 *b->variable = b->data = b->new_data;
865 h->bloc_start = new_bloc_start;
867 update_heap_bloc_correspondence (first_bloc, h);
869 if (h != first_heap)
871 /* Give up managing heaps below the one the new
872 virtual_break_value points to. */
873 first_heap->prev = NIL_HEAP;
874 first_heap->next = h->next;
875 first_heap->start = h->start;
876 first_heap->end = h->end;
877 first_heap->free = h->free;
878 first_heap->first_bloc = h->first_bloc;
879 first_heap->last_bloc = h->last_bloc;
880 first_heap->bloc_start = h->bloc_start;
882 if (first_heap->next)
883 first_heap->next->prev = first_heap;
884 else
885 last_heap = first_heap;
888 bzero (address, size);
890 else /* size < 0 */
892 SIZE excess = (char *)first_heap->bloc_start
893 - ((char *)virtual_break_value + size);
895 address = virtual_break_value;
897 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
899 excess -= extra_bytes;
900 first_heap->bloc_start
901 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
903 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
905 for (b = first_bloc; b != NIL_BLOC; b = b->next)
907 safe_bcopy (b->data, b->new_data, b->size);
908 *b->variable = b->data = b->new_data;
912 if ((char *)virtual_break_value + size < (char *)first_heap->start)
914 /* We found an additional space below the first heap */
915 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
919 virtual_break_value = (POINTER) ((char *)address + size);
920 break_value = (last_bloc
921 ? (char *) last_bloc->data + last_bloc->size
922 : (char *) first_heap->bloc_start);
923 if (size < 0)
924 relinquish ();
926 return address;
930 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
931 the data is returned in *PTR. PTR is thus the address of some variable
932 which will use the data area.
934 The allocation of 0 bytes is valid.
935 In case r_alloc_freeze is set, a best fit of unused blocs could be done
936 before allocating a new area. Not yet done.
938 If we can't allocate the necessary memory, set *PTR to zero, and
939 return zero. */
941 POINTER
942 r_alloc (ptr, size)
943 POINTER *ptr;
944 SIZE size;
946 register bloc_ptr new_bloc;
948 if (! r_alloc_initialized)
949 r_alloc_init ();
951 new_bloc = get_bloc (MEM_ROUNDUP (size));
952 if (new_bloc)
954 new_bloc->variable = ptr;
955 *ptr = new_bloc->data;
957 else
958 *ptr = 0;
960 return *ptr;
963 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
964 Store 0 in *PTR to show there's no block allocated. */
966 void
967 r_alloc_free (ptr)
968 register POINTER *ptr;
970 register bloc_ptr dead_bloc;
972 if (! r_alloc_initialized)
973 r_alloc_init ();
975 dead_bloc = find_bloc (ptr);
976 if (dead_bloc == NIL_BLOC)
977 abort ();
979 free_bloc (dead_bloc);
980 *ptr = 0;
982 #ifdef emacs
983 refill_memory_reserve ();
984 #endif
987 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
988 Do this by shifting all blocks above this one up in memory, unless
989 SIZE is less than or equal to the current bloc size, in which case
990 do nothing.
992 In case r_alloc_freeze is set, a new bloc is allocated, and the
993 memory copied to it. Not very efficient. We could traverse the
994 bloc_list for a best fit of free blocs first.
996 Change *PTR to reflect the new bloc, and return this value.
998 If more memory cannot be allocated, then leave *PTR unchanged, and
999 return zero. */
1001 POINTER
1002 r_re_alloc (ptr, size)
1003 POINTER *ptr;
1004 SIZE size;
1006 register bloc_ptr bloc;
1008 if (! r_alloc_initialized)
1009 r_alloc_init ();
1011 if (!*ptr)
1012 return r_alloc (ptr, size);
1013 if (!size)
1015 r_alloc_free (ptr);
1016 return r_alloc (ptr, 0);
1019 bloc = find_bloc (ptr);
1020 if (bloc == NIL_BLOC)
1021 abort ();
1023 if (size < bloc->size)
1025 /* Wouldn't it be useful to actually resize the bloc here? */
1026 /* I think so too, but not if it's too expensive... */
1027 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1028 && r_alloc_freeze_level == 0)
1030 resize_bloc (bloc, MEM_ROUNDUP (size));
1031 /* Never mind if this fails, just do nothing... */
1032 /* It *should* be infallible! */
1035 else if (size > bloc->size)
1037 if (r_alloc_freeze_level)
1039 bloc_ptr new_bloc;
1040 new_bloc = get_bloc (MEM_ROUNDUP (size));
1041 if (new_bloc)
1043 new_bloc->variable = ptr;
1044 *ptr = new_bloc->data;
1045 bloc->variable = (POINTER *) NIL;
1047 else
1048 return NIL;
1050 else
1052 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1053 return NIL;
1056 return *ptr;
1059 /* Disable relocations, after making room for at least SIZE bytes
1060 of non-relocatable heap if possible. The relocatable blocs are
1061 guaranteed to hold still until thawed, even if this means that
1062 malloc must return a null pointer. */
1064 void
1065 r_alloc_freeze (size)
1066 long size;
1068 if (! r_alloc_initialized)
1069 r_alloc_init ();
1071 /* If already frozen, we can't make any more room, so don't try. */
1072 if (r_alloc_freeze_level > 0)
1073 size = 0;
1074 /* If we can't get the amount requested, half is better than nothing. */
1075 while (size > 0 && r_alloc_sbrk (size) == 0)
1076 size /= 2;
1077 ++r_alloc_freeze_level;
1078 if (size > 0)
1079 r_alloc_sbrk (-size);
1082 void
1083 r_alloc_thaw ()
1086 if (! r_alloc_initialized)
1087 r_alloc_init ();
1089 if (--r_alloc_freeze_level < 0)
1090 abort ();
1092 /* This frees all unused blocs. It is not too inefficient, as the resize
1093 and bcopy is done only once. Afterwards, all unreferenced blocs are
1094 already shrunk to zero size. */
1095 if (!r_alloc_freeze_level)
1097 bloc_ptr *b = &first_bloc;
1098 while (*b)
1099 if (!(*b)->variable)
1100 free_bloc (*b);
1101 else
1102 b = &(*b)->next;
1107 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1109 /* Reinitialize the morecore hook variables after restarting a dumped
1110 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1111 void
1112 r_alloc_reinit ()
1114 /* Only do this if the hook has been reset, so that we don't get an
1115 infinite loop, in case Emacs was linked statically. */
1116 if (__morecore != r_alloc_sbrk)
1118 real_morecore = __morecore;
1119 __morecore = r_alloc_sbrk;
1123 #endif /* emacs && DOUG_LEA_MALLOC */
1125 #ifdef DEBUG
1127 #include <assert.h>
1129 void
1130 r_alloc_check ()
1132 int found = 0;
1133 heap_ptr h, ph = 0;
1134 bloc_ptr b, pb = 0;
1136 if (!r_alloc_initialized)
1137 return;
1139 assert (first_heap);
1140 assert (last_heap->end <= (POINTER) sbrk (0));
1141 assert ((POINTER) first_heap < first_heap->start);
1142 assert (first_heap->start <= virtual_break_value);
1143 assert (virtual_break_value <= first_heap->end);
1145 for (h = first_heap; h; h = h->next)
1147 assert (h->prev == ph);
1148 assert ((POINTER) ROUNDUP (h->end) == h->end);
1149 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1150 the heap start has any sort of alignment.
1151 Perhaps it should. */
1152 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1153 #endif
1154 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1155 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1157 if (ph)
1159 assert (ph->end < h->start);
1160 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1163 if (h->bloc_start <= break_value && break_value <= h->end)
1164 found = 1;
1166 ph = h;
1169 assert (found);
1170 assert (last_heap == ph);
1172 for (b = first_bloc; b; b = b->next)
1174 assert (b->prev == pb);
1175 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1176 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1178 ph = 0;
1179 for (h = first_heap; h; h = h->next)
1181 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1182 break;
1183 ph = h;
1186 assert (h);
1188 if (pb && pb->data + pb->size != b->data)
1190 assert (ph && b->data == h->bloc_start);
1191 while (ph)
1193 if (ph->bloc_start <= pb->data
1194 && pb->data + pb->size <= ph->end)
1196 assert (pb->data + pb->size + b->size > ph->end);
1197 break;
1199 else
1201 assert (ph->bloc_start + b->size > ph->end);
1203 ph = ph->prev;
1206 pb = b;
1209 assert (last_bloc == pb);
1211 if (last_bloc)
1212 assert (last_bloc->data + last_bloc->size == break_value);
1213 else
1214 assert (first_heap->bloc_start == break_value);
1217 #endif /* DEBUG */
1221 /***********************************************************************
1222 Initialization
1223 ***********************************************************************/
1225 /* The hook `malloc' uses for the function which gets more space
1226 from the system. */
1228 #ifndef SYSTEM_MALLOC
1229 extern POINTER (*__morecore) ();
1230 #endif
1233 /* Initialize various things for memory allocation. */
1235 static void
1236 r_alloc_init ()
1238 if (r_alloc_initialized)
1239 return;
1240 r_alloc_initialized = 1;
1242 page_size = PAGE;
1243 #ifndef SYSTEM_MALLOC
1244 real_morecore = __morecore;
1245 __morecore = r_alloc_sbrk;
1247 first_heap = last_heap = &heap_base;
1248 first_heap->next = first_heap->prev = NIL_HEAP;
1249 first_heap->start = first_heap->bloc_start
1250 = virtual_break_value = break_value = (*real_morecore) (0);
1251 if (break_value == NIL)
1252 abort ();
1254 extra_bytes = ROUNDUP (50000);
1255 #endif
1257 #ifdef DOUG_LEA_MALLOC
1258 mallopt (M_TOP_PAD, 64 * 4096);
1259 #else
1260 #ifndef SYSTEM_MALLOC
1261 /* Give GNU malloc's morecore some hysteresis
1262 so that we move all the relocatable blocks much less often. */
1263 __malloc_extra_blocks = 64;
1264 #endif
1265 #endif
1267 #ifndef SYSTEM_MALLOC
1268 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1270 /* The extra call to real_morecore guarantees that the end of the
1271 address space is a multiple of page_size, even if page_size is
1272 not really the page size of the system running the binary in
1273 which page_size is stored. This allows a binary to be built on a
1274 system with one page size and run on a system with a smaller page
1275 size. */
1276 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
1278 /* Clear the rest of the last page; this memory is in our address space
1279 even though it is after the sbrk value. */
1280 /* Doubly true, with the additional call that explicitly adds the
1281 rest of that page to the address space. */
1282 bzero (first_heap->start,
1283 (char *) first_heap->end - (char *) first_heap->start);
1284 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1285 #endif
1287 use_relocatable_buffers = 1;