(r_alloc_init): Conditionalize on SYSTEM_MALLOC, not REL_ALLOC_MMAP.
[emacs.git] / src / ralloc.c
blob6ef6a1f0cd1a07b5eeaffb3eefb06409de086d42
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 /* NOTES:
23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
24 rather than all of them. This means allowing for a possible
25 hole between the first bloc and the end of malloc storage. */
27 #ifdef emacs
29 #include <config.h>
30 #include "lisp.h" /* Needed for VALBITS. */
32 #ifdef HAVE_UNISTD_H
33 #include <unistd.h>
34 #endif
36 typedef POINTER_TYPE *POINTER;
37 typedef size_t SIZE;
39 /* Declared in dispnew.c, this version doesn't screw up if regions
40 overlap. */
42 extern void safe_bcopy ();
44 #ifdef DOUG_LEA_MALLOC
45 #define M_TOP_PAD -2
46 extern int mallopt ();
47 #else /* not DOUG_LEA_MALLOC */
48 #ifndef SYSTEM_MALLOC
49 extern int __malloc_extra_blocks;
50 #endif /* SYSTEM_MALLOC */
51 #endif /* not DOUG_LEA_MALLOC */
53 #else /* not emacs */
55 #include <stddef.h>
57 typedef size_t SIZE;
58 typedef void *POINTER;
60 #include <unistd.h>
61 #include <malloc.h>
63 #define safe_bcopy(x, y, z) memmove (y, x, z)
64 #define bzero(x, len) memset (x, 0, len)
66 #endif /* not emacs */
69 #include "getpagesize.h"
71 #define NIL ((POINTER) 0)
73 /* A flag to indicate whether we have initialized ralloc yet. For
74 Emacs's sake, please do not make this local to malloc_init; on some
75 machines, the dumping procedure makes all static variables
76 read-only. On these machines, the word static is #defined to be
77 the empty string, meaning that r_alloc_initialized becomes an
78 automatic variable, and loses its value each time Emacs is started
79 up. */
81 static int r_alloc_initialized = 0;
83 static void r_alloc_init ();
86 /* Declarations for working with the malloc, ralloc, and system breaks. */
88 /* Function to set the real break value. */
89 POINTER (*real_morecore) ();
91 /* The break value, as seen by malloc. */
92 static POINTER virtual_break_value;
94 /* The address of the end of the last data in use by ralloc,
95 including relocatable blocs as well as malloc data. */
96 static POINTER break_value;
98 /* This is the size of a page. We round memory requests to this boundary. */
99 static int page_size;
101 /* Whenever we get memory from the system, get this many extra bytes. This
102 must be a multiple of page_size. */
103 static int extra_bytes;
105 /* Macros for rounding. Note that rounding to any value is possible
106 by changing the definition of PAGE. */
107 #define PAGE (getpagesize ())
108 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
109 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
110 & ~(page_size - 1))
111 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
113 #define MEM_ALIGN sizeof(double)
114 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
115 & ~(MEM_ALIGN - 1))
118 /***********************************************************************
119 Implementation using sbrk
120 ***********************************************************************/
122 /* Data structures of heaps and blocs. */
124 /* The relocatable objects, or blocs, and the malloc data
125 both reside within one or more heaps.
126 Each heap contains malloc data, running from `start' to `bloc_start',
127 and relocatable objects, running from `bloc_start' to `free'.
129 Relocatable objects may relocate within the same heap
130 or may move into another heap; the heaps themselves may grow
131 but they never move.
133 We try to make just one heap and make it larger as necessary.
134 But sometimes we can't do that, because we can't get contiguous
135 space to add onto the heap. When that happens, we start a new heap. */
137 typedef struct heap
139 struct heap *next;
140 struct heap *prev;
141 /* Start of memory range of this heap. */
142 POINTER start;
143 /* End of memory range of this heap. */
144 POINTER end;
145 /* Start of relocatable data in this heap. */
146 POINTER bloc_start;
147 /* Start of unused space in this heap. */
148 POINTER free;
149 /* First bloc in this heap. */
150 struct bp *first_bloc;
151 /* Last bloc in this heap. */
152 struct bp *last_bloc;
153 } *heap_ptr;
155 #define NIL_HEAP ((heap_ptr) 0)
156 #define HEAP_PTR_SIZE (sizeof (struct heap))
158 /* This is the first heap object.
159 If we need additional heap objects, each one resides at the beginning of
160 the space it covers. */
161 static struct heap heap_base;
163 /* Head and tail of the list of heaps. */
164 static heap_ptr first_heap, last_heap;
166 /* These structures are allocated in the malloc arena.
167 The linked list is kept in order of increasing '.data' members.
168 The data blocks abut each other; if b->next is non-nil, then
169 b->data + b->size == b->next->data.
171 An element with variable==NIL denotes a freed block, which has not yet
172 been collected. They may only appear while r_alloc_freeze > 0, and will be
173 freed when the arena is thawed. Currently, these blocs are not reusable,
174 while the arena is frozen. Very inefficient. */
176 typedef struct bp
178 struct bp *next;
179 struct bp *prev;
180 POINTER *variable;
181 POINTER data;
182 SIZE size;
183 POINTER new_data; /* temporarily used for relocation */
184 struct heap *heap; /* Heap this bloc is in. */
185 } *bloc_ptr;
187 #define NIL_BLOC ((bloc_ptr) 0)
188 #define BLOC_PTR_SIZE (sizeof (struct bp))
190 /* Head and tail of the list of relocatable blocs. */
191 static bloc_ptr first_bloc, last_bloc;
193 static int use_relocatable_buffers;
195 /* If >0, no relocation whatsoever takes place. */
196 static int r_alloc_freeze_level;
199 /* Functions to get and return memory from the system. */
201 /* Find the heap that ADDRESS falls within. */
203 static heap_ptr
204 find_heap (address)
205 POINTER address;
207 heap_ptr heap;
209 for (heap = last_heap; heap; heap = heap->prev)
211 if (heap->start <= address && address <= heap->end)
212 return heap;
215 return NIL_HEAP;
218 /* Find SIZE bytes of space in a heap.
219 Try to get them at ADDRESS (which must fall within some heap's range)
220 if we can get that many within one heap.
222 If enough space is not presently available in our reserve, this means
223 getting more page-aligned space from the system. If the returned space
224 is not contiguous to the last heap, allocate a new heap, and append it
226 obtain does not try to keep track of whether space is in use
227 or not in use. It just returns the address of SIZE bytes that
228 fall within a single heap. If you call obtain twice in a row
229 with the same arguments, you typically get the same value.
230 to the heap list. It's the caller's responsibility to keep
231 track of what space is in use.
233 Return the address of the space if all went well, or zero if we couldn't
234 allocate the memory. */
236 static POINTER
237 obtain (address, size)
238 POINTER address;
239 SIZE size;
241 heap_ptr heap;
242 SIZE already_available;
244 /* Find the heap that ADDRESS falls within. */
245 for (heap = last_heap; heap; heap = heap->prev)
247 if (heap->start <= address && address <= heap->end)
248 break;
251 if (! heap)
252 abort ();
254 /* If we can't fit SIZE bytes in that heap,
255 try successive later heaps. */
256 while (heap && (char *) address + size > (char *) heap->end)
258 heap = heap->next;
259 if (heap == NIL_HEAP)
260 break;
261 address = heap->bloc_start;
264 /* If we can't fit them within any existing heap,
265 get more space. */
266 if (heap == NIL_HEAP)
268 POINTER new = (*real_morecore)(0);
269 SIZE get;
271 already_available = (char *)last_heap->end - (char *)address;
273 if (new != last_heap->end)
275 /* Someone else called sbrk. Make a new heap. */
277 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
278 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
280 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
281 return 0;
283 new_heap->start = new;
284 new_heap->end = bloc_start;
285 new_heap->bloc_start = bloc_start;
286 new_heap->free = bloc_start;
287 new_heap->next = NIL_HEAP;
288 new_heap->prev = last_heap;
289 new_heap->first_bloc = NIL_BLOC;
290 new_heap->last_bloc = NIL_BLOC;
291 last_heap->next = new_heap;
292 last_heap = new_heap;
294 address = bloc_start;
295 already_available = 0;
298 /* Add space to the last heap (which we may have just created).
299 Get some extra, so we can come here less often. */
301 get = size + extra_bytes - already_available;
302 get = (char *) ROUNDUP ((char *)last_heap->end + get)
303 - (char *) last_heap->end;
305 if ((*real_morecore) (get) != last_heap->end)
306 return 0;
308 last_heap->end = (char *) last_heap->end + get;
311 return address;
314 /* Return unused heap space to the system
315 if there is a lot of unused space now.
316 This can make the last heap smaller;
317 it can also eliminate the last heap entirely. */
319 static void
320 relinquish ()
322 register heap_ptr h;
323 int excess = 0;
325 /* Add the amount of space beyond break_value
326 in all heaps which have extend beyond break_value at all. */
328 for (h = last_heap; h && break_value < h->end; h = h->prev)
330 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
331 ? h->bloc_start : break_value);
334 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
336 /* Keep extra_bytes worth of empty space.
337 And don't free anything unless we can free at least extra_bytes. */
338 excess -= extra_bytes;
340 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
342 /* This heap should have no blocs in it. */
343 if (last_heap->first_bloc != NIL_BLOC
344 || last_heap->last_bloc != NIL_BLOC)
345 abort ();
347 /* Return the last heap, with its header, to the system. */
348 excess = (char *)last_heap->end - (char *)last_heap->start;
349 last_heap = last_heap->prev;
350 last_heap->next = NIL_HEAP;
352 else
354 excess = (char *) last_heap->end
355 - (char *) ROUNDUP ((char *)last_heap->end - excess);
356 last_heap->end = (char *) last_heap->end - excess;
359 if ((*real_morecore) (- excess) == 0)
361 /* If the system didn't want that much memory back, adjust
362 the end of the last heap to reflect that. This can occur
363 if break_value is still within the original data segment. */
364 last_heap->end = (char *) last_heap->end + excess;
365 /* Make sure that the result of the adjustment is accurate.
366 It should be, for the else clause above; the other case,
367 which returns the entire last heap to the system, seems
368 unlikely to trigger this mode of failure. */
369 if (last_heap->end != (*real_morecore) (0))
370 abort ();
375 /* Return the total size in use by relocating allocator,
376 above where malloc gets space. */
378 long
379 r_alloc_size_in_use ()
381 return (char *) break_value - (char *) virtual_break_value;
384 /* The meat - allocating, freeing, and relocating blocs. */
386 /* Find the bloc referenced by the address in PTR. Returns a pointer
387 to that block. */
389 static bloc_ptr
390 find_bloc (ptr)
391 POINTER *ptr;
393 register bloc_ptr p = first_bloc;
395 while (p != NIL_BLOC)
397 if (p->variable == ptr && p->data == *ptr)
398 return p;
400 p = p->next;
403 return p;
406 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
407 Returns a pointer to the new bloc, or zero if we couldn't allocate
408 memory for the new block. */
410 static bloc_ptr
411 get_bloc (size)
412 SIZE size;
414 register bloc_ptr new_bloc;
415 register heap_ptr heap;
417 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
418 || ! (new_bloc->data = obtain (break_value, size)))
420 if (new_bloc)
421 free (new_bloc);
423 return 0;
426 break_value = (char *) new_bloc->data + size;
428 new_bloc->size = size;
429 new_bloc->next = NIL_BLOC;
430 new_bloc->variable = (POINTER *) NIL;
431 new_bloc->new_data = 0;
433 /* Record in the heap that this space is in use. */
434 heap = find_heap (new_bloc->data);
435 heap->free = break_value;
437 /* Maintain the correspondence between heaps and blocs. */
438 new_bloc->heap = heap;
439 heap->last_bloc = new_bloc;
440 if (heap->first_bloc == NIL_BLOC)
441 heap->first_bloc = new_bloc;
443 /* Put this bloc on the doubly-linked list of blocs. */
444 if (first_bloc)
446 new_bloc->prev = last_bloc;
447 last_bloc->next = new_bloc;
448 last_bloc = new_bloc;
450 else
452 first_bloc = last_bloc = new_bloc;
453 new_bloc->prev = NIL_BLOC;
456 return new_bloc;
459 /* Calculate new locations of blocs in the list beginning with BLOC,
460 relocating it to start at ADDRESS, in heap HEAP. If enough space is
461 not presently available in our reserve, call obtain for
462 more space.
464 Store the new location of each bloc in its new_data field.
465 Do not touch the contents of blocs or break_value. */
467 static int
468 relocate_blocs (bloc, heap, address)
469 bloc_ptr bloc;
470 heap_ptr heap;
471 POINTER address;
473 register bloc_ptr b = bloc;
475 /* No need to ever call this if arena is frozen, bug somewhere! */
476 if (r_alloc_freeze_level)
477 abort();
479 while (b)
481 /* If bloc B won't fit within HEAP,
482 move to the next heap and try again. */
483 while (heap && (char *) address + b->size > (char *) heap->end)
485 heap = heap->next;
486 if (heap == NIL_HEAP)
487 break;
488 address = heap->bloc_start;
491 /* If BLOC won't fit in any heap,
492 get enough new space to hold BLOC and all following blocs. */
493 if (heap == NIL_HEAP)
495 register bloc_ptr tb = b;
496 register SIZE s = 0;
498 /* Add up the size of all the following blocs. */
499 while (tb != NIL_BLOC)
501 if (tb->variable)
502 s += tb->size;
504 tb = tb->next;
507 /* Get that space. */
508 address = obtain (address, s);
509 if (address == 0)
510 return 0;
512 heap = last_heap;
515 /* Record the new address of this bloc
516 and update where the next bloc can start. */
517 b->new_data = address;
518 if (b->variable)
519 address = (char *) address + b->size;
520 b = b->next;
523 return 1;
526 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
527 This is necessary if we put the memory of space of BLOC
528 before that of BEFORE. */
530 static void
531 reorder_bloc (bloc, before)
532 bloc_ptr bloc, before;
534 bloc_ptr prev, next;
536 /* Splice BLOC out from where it is. */
537 prev = bloc->prev;
538 next = bloc->next;
540 if (prev)
541 prev->next = next;
542 if (next)
543 next->prev = prev;
545 /* Splice it in before BEFORE. */
546 prev = before->prev;
548 if (prev)
549 prev->next = bloc;
550 bloc->prev = prev;
552 before->prev = bloc;
553 bloc->next = before;
556 /* Update the records of which heaps contain which blocs, starting
557 with heap HEAP and bloc BLOC. */
559 static void
560 update_heap_bloc_correspondence (bloc, heap)
561 bloc_ptr bloc;
562 heap_ptr heap;
564 register bloc_ptr b;
566 /* Initialize HEAP's status to reflect blocs before BLOC. */
567 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
569 /* The previous bloc is in HEAP. */
570 heap->last_bloc = bloc->prev;
571 heap->free = (char *) bloc->prev->data + bloc->prev->size;
573 else
575 /* HEAP contains no blocs before BLOC. */
576 heap->first_bloc = NIL_BLOC;
577 heap->last_bloc = NIL_BLOC;
578 heap->free = heap->bloc_start;
581 /* Advance through blocs one by one. */
582 for (b = bloc; b != NIL_BLOC; b = b->next)
584 /* Advance through heaps, marking them empty,
585 till we get to the one that B is in. */
586 while (heap)
588 if (heap->bloc_start <= b->data && b->data <= heap->end)
589 break;
590 heap = heap->next;
591 /* We know HEAP is not null now,
592 because there has to be space for bloc B. */
593 heap->first_bloc = NIL_BLOC;
594 heap->last_bloc = NIL_BLOC;
595 heap->free = heap->bloc_start;
598 /* Update HEAP's status for bloc B. */
599 heap->free = (char *) b->data + b->size;
600 heap->last_bloc = b;
601 if (heap->first_bloc == NIL_BLOC)
602 heap->first_bloc = b;
604 /* Record that B is in HEAP. */
605 b->heap = heap;
608 /* If there are any remaining heaps and no blocs left,
609 mark those heaps as empty. */
610 heap = heap->next;
611 while (heap)
613 heap->first_bloc = NIL_BLOC;
614 heap->last_bloc = NIL_BLOC;
615 heap->free = heap->bloc_start;
616 heap = heap->next;
620 /* Resize BLOC to SIZE bytes. This relocates the blocs
621 that come after BLOC in memory. */
623 static int
624 resize_bloc (bloc, size)
625 bloc_ptr bloc;
626 SIZE size;
628 register bloc_ptr b;
629 heap_ptr heap;
630 POINTER address;
631 SIZE old_size;
633 /* No need to ever call this if arena is frozen, bug somewhere! */
634 if (r_alloc_freeze_level)
635 abort();
637 if (bloc == NIL_BLOC || size == bloc->size)
638 return 1;
640 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
642 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
643 break;
646 if (heap == NIL_HEAP)
647 abort ();
649 old_size = bloc->size;
650 bloc->size = size;
652 /* Note that bloc could be moved into the previous heap. */
653 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
654 : (char *) first_heap->bloc_start);
655 while (heap)
657 if (heap->bloc_start <= address && address <= heap->end)
658 break;
659 heap = heap->prev;
662 if (! relocate_blocs (bloc, heap, address))
664 bloc->size = old_size;
665 return 0;
668 if (size > old_size)
670 for (b = last_bloc; b != bloc; b = b->prev)
672 if (!b->variable)
674 b->size = 0;
675 b->data = b->new_data;
677 else
679 safe_bcopy (b->data, b->new_data, b->size);
680 *b->variable = b->data = b->new_data;
683 if (!bloc->variable)
685 bloc->size = 0;
686 bloc->data = bloc->new_data;
688 else
690 safe_bcopy (bloc->data, bloc->new_data, old_size);
691 bzero ((char *) bloc->new_data + old_size, size - old_size);
692 *bloc->variable = bloc->data = bloc->new_data;
695 else
697 for (b = bloc; b != NIL_BLOC; b = b->next)
699 if (!b->variable)
701 b->size = 0;
702 b->data = b->new_data;
704 else
706 safe_bcopy (b->data, b->new_data, b->size);
707 *b->variable = b->data = b->new_data;
712 update_heap_bloc_correspondence (bloc, heap);
714 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
715 : (char *) first_heap->bloc_start);
716 return 1;
719 /* Free BLOC from the chain of blocs, relocating any blocs above it.
720 This may return space to the system. */
722 static void
723 free_bloc (bloc)
724 bloc_ptr bloc;
726 heap_ptr heap = bloc->heap;
728 if (r_alloc_freeze_level)
730 bloc->variable = (POINTER *) NIL;
731 return;
734 resize_bloc (bloc, 0);
736 if (bloc == first_bloc && bloc == last_bloc)
738 first_bloc = last_bloc = NIL_BLOC;
740 else if (bloc == last_bloc)
742 last_bloc = bloc->prev;
743 last_bloc->next = NIL_BLOC;
745 else if (bloc == first_bloc)
747 first_bloc = bloc->next;
748 first_bloc->prev = NIL_BLOC;
750 else
752 bloc->next->prev = bloc->prev;
753 bloc->prev->next = bloc->next;
756 /* Update the records of which blocs are in HEAP. */
757 if (heap->first_bloc == bloc)
759 if (bloc->next != 0 && bloc->next->heap == heap)
760 heap->first_bloc = bloc->next;
761 else
762 heap->first_bloc = heap->last_bloc = NIL_BLOC;
764 if (heap->last_bloc == bloc)
766 if (bloc->prev != 0 && bloc->prev->heap == heap)
767 heap->last_bloc = bloc->prev;
768 else
769 heap->first_bloc = heap->last_bloc = NIL_BLOC;
772 relinquish ();
773 free (bloc);
776 /* Interface routines. */
778 /* Obtain SIZE bytes of storage from the free pool, or the system, as
779 necessary. If relocatable blocs are in use, this means relocating
780 them. This function gets plugged into the GNU malloc's __morecore
781 hook.
783 We provide hysteresis, never relocating by less than extra_bytes.
785 If we're out of memory, we should return zero, to imitate the other
786 __morecore hook values - in particular, __default_morecore in the
787 GNU malloc package. */
789 POINTER
790 r_alloc_sbrk (size)
791 long size;
793 register bloc_ptr b;
794 POINTER address;
796 if (! r_alloc_initialized)
797 r_alloc_init ();
799 if (! use_relocatable_buffers)
800 return (*real_morecore) (size);
802 if (size == 0)
803 return virtual_break_value;
805 if (size > 0)
807 /* Allocate a page-aligned space. GNU malloc would reclaim an
808 extra space if we passed an unaligned one. But we could
809 not always find a space which is contiguous to the previous. */
810 POINTER new_bloc_start;
811 heap_ptr h = first_heap;
812 SIZE get = ROUNDUP (size);
814 address = (POINTER) ROUNDUP (virtual_break_value);
816 /* Search the list upward for a heap which is large enough. */
817 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
819 h = h->next;
820 if (h == NIL_HEAP)
821 break;
822 address = (POINTER) ROUNDUP (h->start);
825 /* If not found, obtain more space. */
826 if (h == NIL_HEAP)
828 get += extra_bytes + page_size;
830 if (! obtain (address, get))
831 return 0;
833 if (first_heap == last_heap)
834 address = (POINTER) ROUNDUP (virtual_break_value);
835 else
836 address = (POINTER) ROUNDUP (last_heap->start);
837 h = last_heap;
840 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
842 if (first_heap->bloc_start < new_bloc_start)
844 /* This is no clean solution - no idea how to do it better. */
845 if (r_alloc_freeze_level)
846 return NIL;
848 /* There is a bug here: if the above obtain call succeeded, but the
849 relocate_blocs call below does not succeed, we need to free
850 the memory that we got with obtain. */
852 /* Move all blocs upward. */
853 if (! relocate_blocs (first_bloc, h, new_bloc_start))
854 return 0;
856 /* Note that (POINTER)(h+1) <= new_bloc_start since
857 get >= page_size, so the following does not destroy the heap
858 header. */
859 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
861 safe_bcopy (b->data, b->new_data, b->size);
862 *b->variable = b->data = b->new_data;
865 h->bloc_start = new_bloc_start;
867 update_heap_bloc_correspondence (first_bloc, h);
869 if (h != first_heap)
871 /* Give up managing heaps below the one the new
872 virtual_break_value points to. */
873 first_heap->prev = NIL_HEAP;
874 first_heap->next = h->next;
875 first_heap->start = h->start;
876 first_heap->end = h->end;
877 first_heap->free = h->free;
878 first_heap->first_bloc = h->first_bloc;
879 first_heap->last_bloc = h->last_bloc;
880 first_heap->bloc_start = h->bloc_start;
882 if (first_heap->next)
883 first_heap->next->prev = first_heap;
884 else
885 last_heap = first_heap;
888 bzero (address, size);
890 else /* size < 0 */
892 SIZE excess = (char *)first_heap->bloc_start
893 - ((char *)virtual_break_value + size);
895 address = virtual_break_value;
897 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
899 excess -= extra_bytes;
900 first_heap->bloc_start
901 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
903 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
905 for (b = first_bloc; b != NIL_BLOC; b = b->next)
907 safe_bcopy (b->data, b->new_data, b->size);
908 *b->variable = b->data = b->new_data;
912 if ((char *)virtual_break_value + size < (char *)first_heap->start)
914 /* We found an additional space below the first heap */
915 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
919 virtual_break_value = (POINTER) ((char *)address + size);
920 break_value = (last_bloc
921 ? (char *) last_bloc->data + last_bloc->size
922 : (char *) first_heap->bloc_start);
923 if (size < 0)
924 relinquish ();
926 return address;
929 #ifndef REL_ALLOC_MMAP
931 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
932 the data is returned in *PTR. PTR is thus the address of some variable
933 which will use the data area.
935 The allocation of 0 bytes is valid.
936 In case r_alloc_freeze is set, a best fit of unused blocs could be done
937 before allocating a new area. Not yet done.
939 If we can't allocate the necessary memory, set *PTR to zero, and
940 return zero. */
942 POINTER
943 r_alloc (ptr, size)
944 POINTER *ptr;
945 SIZE size;
947 register bloc_ptr new_bloc;
949 if (! r_alloc_initialized)
950 r_alloc_init ();
952 new_bloc = get_bloc (MEM_ROUNDUP (size));
953 if (new_bloc)
955 new_bloc->variable = ptr;
956 *ptr = new_bloc->data;
958 else
959 *ptr = 0;
961 return *ptr;
964 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
965 Store 0 in *PTR to show there's no block allocated. */
967 void
968 r_alloc_free (ptr)
969 register POINTER *ptr;
971 register bloc_ptr dead_bloc;
973 if (! r_alloc_initialized)
974 r_alloc_init ();
976 dead_bloc = find_bloc (ptr);
977 if (dead_bloc == NIL_BLOC)
978 abort ();
980 free_bloc (dead_bloc);
981 *ptr = 0;
983 #ifdef emacs
984 refill_memory_reserve ();
985 #endif
988 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
989 Do this by shifting all blocks above this one up in memory, unless
990 SIZE is less than or equal to the current bloc size, in which case
991 do nothing.
993 In case r_alloc_freeze is set, a new bloc is allocated, and the
994 memory copied to it. Not very efficient. We could traverse the
995 bloc_list for a best fit of free blocs first.
997 Change *PTR to reflect the new bloc, and return this value.
999 If more memory cannot be allocated, then leave *PTR unchanged, and
1000 return zero. */
1002 POINTER
1003 r_re_alloc (ptr, size)
1004 POINTER *ptr;
1005 SIZE size;
1007 register bloc_ptr bloc;
1009 if (! r_alloc_initialized)
1010 r_alloc_init ();
1012 if (!*ptr)
1013 return r_alloc (ptr, size);
1014 if (!size)
1016 r_alloc_free (ptr);
1017 return r_alloc (ptr, 0);
1020 bloc = find_bloc (ptr);
1021 if (bloc == NIL_BLOC)
1022 abort ();
1024 if (size < bloc->size)
1026 /* Wouldn't it be useful to actually resize the bloc here? */
1027 /* I think so too, but not if it's too expensive... */
1028 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1029 && r_alloc_freeze_level == 0)
1031 resize_bloc (bloc, MEM_ROUNDUP (size));
1032 /* Never mind if this fails, just do nothing... */
1033 /* It *should* be infallible! */
1036 else if (size > bloc->size)
1038 if (r_alloc_freeze_level)
1040 bloc_ptr new_bloc;
1041 new_bloc = get_bloc (MEM_ROUNDUP (size));
1042 if (new_bloc)
1044 new_bloc->variable = ptr;
1045 *ptr = new_bloc->data;
1046 bloc->variable = (POINTER *) NIL;
1048 else
1049 return NIL;
1051 else
1053 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1054 return NIL;
1057 return *ptr;
1060 /* Disable relocations, after making room for at least SIZE bytes
1061 of non-relocatable heap if possible. The relocatable blocs are
1062 guaranteed to hold still until thawed, even if this means that
1063 malloc must return a null pointer. */
1065 void
1066 r_alloc_freeze (size)
1067 long size;
1069 if (! r_alloc_initialized)
1070 r_alloc_init ();
1072 /* If already frozen, we can't make any more room, so don't try. */
1073 if (r_alloc_freeze_level > 0)
1074 size = 0;
1075 /* If we can't get the amount requested, half is better than nothing. */
1076 while (size > 0 && r_alloc_sbrk (size) == 0)
1077 size /= 2;
1078 ++r_alloc_freeze_level;
1079 if (size > 0)
1080 r_alloc_sbrk (-size);
1083 void
1084 r_alloc_thaw ()
1087 if (! r_alloc_initialized)
1088 r_alloc_init ();
1090 if (--r_alloc_freeze_level < 0)
1091 abort ();
1093 /* This frees all unused blocs. It is not too inefficient, as the resize
1094 and bcopy is done only once. Afterwards, all unreferenced blocs are
1095 already shrunk to zero size. */
1096 if (!r_alloc_freeze_level)
1098 bloc_ptr *b = &first_bloc;
1099 while (*b)
1100 if (!(*b)->variable)
1101 free_bloc (*b);
1102 else
1103 b = &(*b)->next;
1108 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1110 /* Reinitialize the morecore hook variables after restarting a dumped
1111 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1112 void
1113 r_alloc_reinit ()
1115 /* Only do this if the hook has been reset, so that we don't get an
1116 infinite loop, in case Emacs was linked statically. */
1117 if (__morecore != r_alloc_sbrk)
1119 real_morecore = __morecore;
1120 __morecore = r_alloc_sbrk;
1124 #endif /* emacs && DOUG_LEA_MALLOC */
1126 #ifdef DEBUG
1128 #include <assert.h>
1130 void
1131 r_alloc_check ()
1133 int found = 0;
1134 heap_ptr h, ph = 0;
1135 bloc_ptr b, pb = 0;
1137 if (!r_alloc_initialized)
1138 return;
1140 assert (first_heap);
1141 assert (last_heap->end <= (POINTER) sbrk (0));
1142 assert ((POINTER) first_heap < first_heap->start);
1143 assert (first_heap->start <= virtual_break_value);
1144 assert (virtual_break_value <= first_heap->end);
1146 for (h = first_heap; h; h = h->next)
1148 assert (h->prev == ph);
1149 assert ((POINTER) ROUNDUP (h->end) == h->end);
1150 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1151 the heap start has any sort of alignment.
1152 Perhaps it should. */
1153 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1154 #endif
1155 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1156 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1158 if (ph)
1160 assert (ph->end < h->start);
1161 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1164 if (h->bloc_start <= break_value && break_value <= h->end)
1165 found = 1;
1167 ph = h;
1170 assert (found);
1171 assert (last_heap == ph);
1173 for (b = first_bloc; b; b = b->next)
1175 assert (b->prev == pb);
1176 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1177 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1179 ph = 0;
1180 for (h = first_heap; h; h = h->next)
1182 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1183 break;
1184 ph = h;
1187 assert (h);
1189 if (pb && pb->data + pb->size != b->data)
1191 assert (ph && b->data == h->bloc_start);
1192 while (ph)
1194 if (ph->bloc_start <= pb->data
1195 && pb->data + pb->size <= ph->end)
1197 assert (pb->data + pb->size + b->size > ph->end);
1198 break;
1200 else
1202 assert (ph->bloc_start + b->size > ph->end);
1204 ph = ph->prev;
1207 pb = b;
1210 assert (last_bloc == pb);
1212 if (last_bloc)
1213 assert (last_bloc->data + last_bloc->size == break_value);
1214 else
1215 assert (first_heap->bloc_start == break_value);
1218 #endif /* DEBUG */
1220 #endif /* not REL_ALLOC_MMAP */
1223 /***********************************************************************
1224 Implementation based on mmap
1225 ***********************************************************************/
1227 #ifdef REL_ALLOC_MMAP
1229 #include <sys/types.h>
1230 #include <sys/mman.h>
1231 #ifndef MAP_ANON
1232 #ifdef MAP_ANONYMOUS
1233 #define MAP_ANON MAP_ANONYMOUS
1234 #else
1235 #define MAP_ANON 0
1236 #endif
1237 #endif
1238 #include <stdio.h>
1239 #include <errno.h>
1240 #if !MAP_ANON
1241 #include <fcntl.h>
1242 #endif
1244 /* Memory is allocated in regions which are mapped using mmap(2).
1245 The current implementation lets the system select mapped
1246 addresses; we're not using MAP_FIXED in general, except when
1247 trying to enlarge regions.
1249 Each mapped region starts with a mmap_region structure, the user
1250 area starts after that structure, aligned to MEM_ALIGN.
1252 +-----------------------+
1253 | struct mmap_info + |
1254 | padding |
1255 +-----------------------+
1256 | user data |
1259 +-----------------------+ */
1261 struct mmap_region
1263 /* User-specified size. */
1264 size_t nbytes_specified;
1266 /* Number of bytes mapped */
1267 size_t nbytes_mapped;
1269 /* Pointer to the location holding the address of the memory
1270 allocated with the mmap'd block. The variable actually points
1271 after this structure. */
1272 POINTER_TYPE **var;
1274 /* Next and previous in list of all mmap'd regions. */
1275 struct mmap_region *next, *prev;
1278 /* Doubly-linked list of mmap'd regions. */
1280 static struct mmap_region *mmap_regions;
1282 /* Temporary storage for mmap_set_vars, see there. */
1284 static struct mmap_region *mmap_regions_1;
1286 /* File descriptor for mmap. If we don't have anonymous mapping,
1287 /dev/zero will be opened on it. */
1289 static int mmap_fd = -1;
1291 /* Value is X rounded up to the next multiple of N. */
1293 #define ROUND(X, N) (((X) + (N) - 1) / (N) * (N))
1295 /* Size of mmap_region structure plus padding. */
1297 #define MMAP_REGION_STRUCT_SIZE \
1298 ROUND (sizeof (struct mmap_region), MEM_ALIGN)
1300 /* Given a pointer P to the start of the user-visible part of a mapped
1301 region, return a pointer to the start of the region. */
1303 #define MMAP_REGION(P) \
1304 ((struct mmap_region *) ((char *) (P) - MMAP_REGION_STRUCT_SIZE))
1306 /* Given a pointer P to the start of a mapped region, return a pointer
1307 to the start of the user-visible part of the region. */
1309 #define MMAP_USER_AREA(P) \
1310 ((POINTER_TYPE *) ((char *) (P) + MMAP_REGION_STRUCT_SIZE))
1312 /* Function prototypes. */
1314 static int mmap_free P_ ((struct mmap_region *));
1315 static int mmap_enlarge P_ ((struct mmap_region *, int));
1316 static struct mmap_region *mmap_find P_ ((POINTER_TYPE *, POINTER_TYPE *));
1317 POINTER_TYPE *r_alloc P_ ((POINTER_TYPE **, size_t));
1318 POINTER_TYPE *r_re_alloc P_ ((POINTER_TYPE **, size_t));
1319 void r_alloc_free P_ ((POINTER_TYPE **ptr));
1322 void
1323 r_alloc_init_fd ()
1325 #if !MAP_ANON
1326 /* No anonymous mmap -- we need the file descriptor. */
1327 mmap_fd = open ("/dev/zero", O_RDONLY);
1328 if (mmap_fd < 0)
1329 fatal ("cannot open /dev/zero");
1330 #endif
1333 /* Return a region overlapping address range START...END, or null if
1334 none. END is not including, i.e. the last byte in the range
1335 is at END - 1. */
1337 static struct mmap_region *
1338 mmap_find (start, end)
1339 POINTER_TYPE *start, *end;
1341 struct mmap_region *r;
1342 char *s = (char *) start, *e = (char *) end;
1344 for (r = mmap_regions; r; r = r->next)
1346 char *rstart = (char *) r;
1347 char *rend = rstart + r->nbytes_mapped;
1349 if (/* First byte of range, i.e. START, in this region? */
1350 (s >= rstart && s < rend)
1351 /* Last byte of range, i.e. END - 1, in this region? */
1352 || (e > rstart && e <= rend)
1353 /* First byte of this region in the range? */
1354 || (rstart >= s && rstart < e)
1355 /* Last byte of this region in the range? */
1356 || (rend > s && rend <= e))
1357 break;
1360 return r;
1364 /* Unmap a region. P is a pointer to the start of the user-araa of
1365 the region. Value is non-zero if successful. */
1367 static int
1368 mmap_free (r)
1369 struct mmap_region *r;
1371 if (r->next)
1372 r->next->prev = r->prev;
1373 if (r->prev)
1374 r->prev->next = r->next;
1375 else
1376 mmap_regions = r->next;
1378 if (munmap (r, r->nbytes_mapped) == -1)
1380 fprintf (stderr, "munmap: %s\n", emacs_strerror (errno));
1381 return 0;
1384 return 1;
1388 /* Enlarge region R by NPAGES pages. NPAGES < 0 means shrink R.
1389 Value is non-zero if successful. */
1391 static int
1392 mmap_enlarge (r, npages)
1393 struct mmap_region *r;
1394 int npages;
1396 char *region_end = (char *) r + r->nbytes_mapped;
1397 size_t nbytes;
1398 int success = 1;
1400 if (npages < 0)
1402 /* Unmap pages at the end of the region. */
1403 nbytes = - npages * page_size;
1404 if (munmap (region_end - nbytes, nbytes) == -1)
1406 fprintf (stderr, "munmap: %s\n", emacs_strerror (errno));
1407 success = 0;
1409 else
1410 r->nbytes_mapped -= nbytes;
1412 else if (npages > 0)
1414 nbytes = npages * page_size;
1416 /* Try to map additional pages at the end of the region. We
1417 cannot do this if the address range is already occupied by
1418 something else because mmap deletes any previous mapping.
1419 I'm not sure this is worth doing, let's see. */
1420 if (mmap_find (region_end, region_end + nbytes))
1421 success = 0;
1422 else
1424 POINTER_TYPE *p;
1426 p = mmap (region_end, nbytes, PROT_READ | PROT_WRITE,
1427 MAP_ANON | MAP_PRIVATE | MAP_FIXED, mmap_fd, 0);
1428 if (p == MAP_FAILED)
1430 fprintf (stderr, "mmap: %s\n", emacs_strerror (errno));
1431 success = 0;
1433 else if (p != (POINTER_TYPE *) region_end)
1435 /* Kernels are free to choose a different address. In
1436 that case, unmap what we've mapped above; we have
1437 no use for it. */
1438 if (munmap (p, nbytes) == -1)
1439 fprintf (stderr, "munmap: %s\n", emacs_strerror (errno));
1440 success = 0;
1442 else
1443 r->nbytes_mapped += nbytes;
1446 success = 0;
1449 return success;
1453 /* Set or reset variables holding references to mapped regions. If
1454 RESTORE_P is zero, set all variables to null. If RESTORE_P is
1455 non-zero, set all variables to the start of the user-areas
1456 of mapped regions.
1458 This function is called from Fdump_emacs to ensure that the dumped
1459 Emacs doesn't contain references to memory that won't be mapped
1460 when Emacs starts. */
1462 void
1463 mmap_set_vars (restore_p)
1464 int restore_p;
1466 struct mmap_region *r;
1467 static int fd;
1469 if (restore_p)
1471 mmap_regions = mmap_regions_1;
1472 for (r = mmap_regions; r; r = r->next)
1473 *r->var = MMAP_USER_AREA (r);
1474 mmap_fd = fd;
1476 else
1478 for (r = mmap_regions; r; r = r->next)
1479 *r->var = NULL;
1480 mmap_regions_1 = mmap_regions;
1481 mmap_regions = NULL;
1482 mmap_fd = -1;
1487 /* Return total number of bytes mapped. */
1489 size_t
1490 mmap_mapped_bytes ()
1492 struct mmap_region *r;
1493 size_t n = 0;
1495 for (r = mmap_regions; r; r = r->next)
1496 n += r->nbytes_mapped;
1498 return n;
1502 /* Allocate a block of storage large enough to hold NBYTES bytes of
1503 data. A pointer to the data is returned in *VAR. VAR is thus the
1504 address of some variable which will use the data area.
1506 The allocation of 0 bytes is valid.
1508 If we can't allocate the necessary memory, set *VAR to null, and
1509 return null. */
1511 POINTER_TYPE *
1512 r_alloc (var, nbytes)
1513 POINTER_TYPE **var;
1514 size_t nbytes;
1516 void *p;
1517 size_t map;
1519 if (!r_alloc_initialized)
1520 r_alloc_init ();
1521 #if defined (REL_ALLOC_MMAP) && !MAP_ANON
1522 if (mmap_fd == -1)
1523 r_alloc_init_fd ();
1524 #endif
1526 map = ROUND (nbytes + MMAP_REGION_STRUCT_SIZE, page_size);
1527 p = mmap (NULL, map, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE,
1528 mmap_fd, 0);
1530 if (p == MAP_FAILED)
1532 if (errno != ENOMEM)
1533 fprintf (stderr, "mmap: %s\n", emacs_strerror (errno));
1534 p = NULL;
1536 else
1538 struct mmap_region *r = (struct mmap_region *) p;
1540 r->nbytes_specified = nbytes;
1541 r->nbytes_mapped = map;
1542 r->var = var;
1543 r->prev = NULL;
1544 r->next = mmap_regions;
1545 if (r->next)
1546 r->next->prev = r;
1547 mmap_regions = r;
1549 p = MMAP_USER_AREA (p);
1552 return *var = p;
1556 /* Given a pointer at address VAR to data allocated with r_alloc,
1557 resize it to size NBYTES. Change *VAR to reflect the new block,
1558 and return this value. If more memory cannot be allocated, then
1559 leave *VAR unchanged, and return null. */
1561 POINTER_TYPE *
1562 r_re_alloc (var, nbytes)
1563 POINTER_TYPE **var;
1564 size_t nbytes;
1566 POINTER_TYPE *result;
1568 if (!r_alloc_initialized)
1569 r_alloc_init ();
1570 #if defined (REL_ALLOC_MMAP) && !MAP_ANON
1571 if (mmap_fd == -1)
1572 r_alloc_init_fd ();
1573 #endif
1575 if (*var == NULL)
1576 result = r_alloc (var, nbytes);
1577 else if (nbytes == 0)
1579 r_alloc_free (var);
1580 result = r_alloc (var, nbytes);
1582 else
1584 struct mmap_region *r = MMAP_REGION (*var);
1585 size_t room = r->nbytes_mapped - MMAP_REGION_STRUCT_SIZE;
1587 if (room < nbytes)
1589 /* Must enlarge. */
1590 POINTER_TYPE *old_ptr = *var;
1592 /* Try to map additional pages at the end of the region.
1593 If that fails, allocate a new region, copy data
1594 from the old region, then free it. */
1595 if (mmap_enlarge (r, ROUND (nbytes - room, page_size) / page_size))
1597 r->nbytes_specified = nbytes;
1598 *var = result = old_ptr;
1600 else if (r_alloc (var, nbytes))
1602 bcopy (old_ptr, *var, r->nbytes_specified);
1603 mmap_free (MMAP_REGION (old_ptr));
1604 result = *var;
1605 r = MMAP_REGION (result);
1606 r->nbytes_specified = nbytes;
1608 else
1610 *var = old_ptr;
1611 result = NULL;
1614 else if (room - nbytes >= page_size)
1616 /* Shrinking by at least a page. Let's give some
1617 memory back to the system. */
1618 mmap_enlarge (r, - (room - nbytes) / page_size);
1619 result = *var;
1620 r->nbytes_specified = nbytes;
1622 else
1624 /* Leave it alone. */
1625 result = *var;
1626 r->nbytes_specified = nbytes;
1630 return result;
1634 /* Free a block of relocatable storage whose data is pointed to by
1635 PTR. Store 0 in *PTR to show there's no block allocated. */
1637 void
1638 r_alloc_free (var)
1639 POINTER_TYPE **var;
1641 if (!r_alloc_initialized)
1642 r_alloc_init ();
1643 #if defined (REL_ALLOC_MMAP) && !MAP_ANON
1644 if (mmap_fd == -1)
1645 r_alloc_init_fd ();
1646 #endif
1648 if (*var)
1650 mmap_free (MMAP_REGION (*var));
1651 *var = NULL;
1655 #endif /* REL_ALLOC_MMAP */
1659 /***********************************************************************
1660 Initialization
1661 ***********************************************************************/
1663 /* The hook `malloc' uses for the function which gets more space
1664 from the system. */
1666 #ifndef SYSTEM_MALLOC
1667 extern POINTER (*__morecore) ();
1668 #endif
1670 /* Initialize various things for memory allocation. */
1672 static void
1673 r_alloc_init ()
1675 if (r_alloc_initialized)
1676 return;
1678 r_alloc_initialized = 1;
1679 page_size = PAGE;
1680 #ifndef SYSTEM_MALLOC
1681 real_morecore = __morecore;
1682 __morecore = r_alloc_sbrk;
1684 first_heap = last_heap = &heap_base;
1685 first_heap->next = first_heap->prev = NIL_HEAP;
1686 first_heap->start = first_heap->bloc_start
1687 = virtual_break_value = break_value = (*real_morecore) (0);
1688 if (break_value == NIL)
1689 abort ();
1691 extra_bytes = ROUNDUP (50000);
1692 #endif
1694 #ifdef DOUG_LEA_MALLOC
1695 mallopt (M_TOP_PAD, 64 * 4096);
1696 #else
1697 #ifndef SYSTEM_MALLOC
1698 /* Give GNU malloc's morecore some hysteresis
1699 so that we move all the relocatable blocks much less often. */
1700 __malloc_extra_blocks = 64;
1701 #endif
1702 #endif
1704 #ifndef SYSTEM_MALLOC
1705 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1707 /* The extra call to real_morecore guarantees that the end of the
1708 address space is a multiple of page_size, even if page_size is
1709 not really the page size of the system running the binary in
1710 which page_size is stored. This allows a binary to be built on a
1711 system with one page size and run on a system with a smaller page
1712 size. */
1713 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
1715 /* Clear the rest of the last page; this memory is in our address space
1716 even though it is after the sbrk value. */
1717 /* Doubly true, with the additional call that explicitly adds the
1718 rest of that page to the address space. */
1719 bzero (first_heap->start,
1720 (char *) first_heap->end - (char *) first_heap->start);
1721 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1722 #endif
1723 use_relocatable_buffers = 1;
1724 r_alloc_init_fd ();