(eval-expression): Doc fix.
[emacs.git] / src / ralloc.c
blobf5ebb8449ca57374d50f399803f9475807c96792
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 This file is part of GNU Emacs.
7 GNU Emacs is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 GNU Emacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
20 /* NOTES:
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
26 #ifdef emacs
28 #include <config.h>
29 #include "lisp.h" /* Needed for VALBITS. */
30 #include "blockinput.h"
32 #ifdef HAVE_UNISTD_H
33 #include <unistd.h>
34 #endif
36 typedef POINTER_TYPE *POINTER;
37 typedef size_t SIZE;
39 /* Declared in dispnew.c, this version doesn't screw up if regions
40 overlap. */
42 extern void safe_bcopy ();
44 #ifdef DOUG_LEA_MALLOC
45 #define M_TOP_PAD -2
46 extern int mallopt ();
47 #else /* not DOUG_LEA_MALLOC */
48 #ifndef SYSTEM_MALLOC
49 extern size_t __malloc_extra_blocks;
50 #endif /* SYSTEM_MALLOC */
51 #endif /* not DOUG_LEA_MALLOC */
53 #else /* not emacs */
55 #include <stddef.h>
57 typedef size_t SIZE;
58 typedef void *POINTER;
60 #include <unistd.h>
61 #include <malloc.h>
63 #define safe_bcopy(x, y, z) memmove (y, x, z)
64 #define bzero(x, len) memset (x, 0, len)
66 #endif /* not emacs */
69 #include "getpagesize.h"
71 #define NIL ((POINTER) 0)
73 /* A flag to indicate whether we have initialized ralloc yet. For
74 Emacs's sake, please do not make this local to malloc_init; on some
75 machines, the dumping procedure makes all static variables
76 read-only. On these machines, the word static is #defined to be
77 the empty string, meaning that r_alloc_initialized becomes an
78 automatic variable, and loses its value each time Emacs is started
79 up. */
81 static int r_alloc_initialized = 0;
83 static void r_alloc_init ();
86 /* Declarations for working with the malloc, ralloc, and system breaks. */
88 /* Function to set the real break value. */
89 POINTER (*real_morecore) ();
91 /* The break value, as seen by malloc. */
92 static POINTER virtual_break_value;
94 /* The address of the end of the last data in use by ralloc,
95 including relocatable blocs as well as malloc data. */
96 static POINTER break_value;
98 /* This is the size of a page. We round memory requests to this boundary. */
99 static int page_size;
101 /* Whenever we get memory from the system, get this many extra bytes. This
102 must be a multiple of page_size. */
103 static int extra_bytes;
105 /* Macros for rounding. Note that rounding to any value is possible
106 by changing the definition of PAGE. */
107 #define PAGE (getpagesize ())
108 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
109 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
110 & ~(page_size - 1))
111 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
113 #define MEM_ALIGN sizeof(double)
114 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
115 & ~(MEM_ALIGN - 1))
117 /* The hook `malloc' uses for the function which gets more space
118 from the system. */
120 #ifndef SYSTEM_MALLOC
121 extern POINTER (*__morecore) ();
122 #endif
126 /***********************************************************************
127 Implementation using sbrk
128 ***********************************************************************/
130 /* Data structures of heaps and blocs. */
132 /* The relocatable objects, or blocs, and the malloc data
133 both reside within one or more heaps.
134 Each heap contains malloc data, running from `start' to `bloc_start',
135 and relocatable objects, running from `bloc_start' to `free'.
137 Relocatable objects may relocate within the same heap
138 or may move into another heap; the heaps themselves may grow
139 but they never move.
141 We try to make just one heap and make it larger as necessary.
142 But sometimes we can't do that, because we can't get contiguous
143 space to add onto the heap. When that happens, we start a new heap. */
145 typedef struct heap
147 struct heap *next;
148 struct heap *prev;
149 /* Start of memory range of this heap. */
150 POINTER start;
151 /* End of memory range of this heap. */
152 POINTER end;
153 /* Start of relocatable data in this heap. */
154 POINTER bloc_start;
155 /* Start of unused space in this heap. */
156 POINTER free;
157 /* First bloc in this heap. */
158 struct bp *first_bloc;
159 /* Last bloc in this heap. */
160 struct bp *last_bloc;
161 } *heap_ptr;
163 #define NIL_HEAP ((heap_ptr) 0)
164 #define HEAP_PTR_SIZE (sizeof (struct heap))
166 /* This is the first heap object.
167 If we need additional heap objects, each one resides at the beginning of
168 the space it covers. */
169 static struct heap heap_base;
171 /* Head and tail of the list of heaps. */
172 static heap_ptr first_heap, last_heap;
174 /* These structures are allocated in the malloc arena.
175 The linked list is kept in order of increasing '.data' members.
176 The data blocks abut each other; if b->next is non-nil, then
177 b->data + b->size == b->next->data.
179 An element with variable==NIL denotes a freed block, which has not yet
180 been collected. They may only appear while r_alloc_freeze_level > 0,
181 and will be freed when the arena is thawed. Currently, these blocs are
182 not reusable, while the arena is frozen. Very inefficient. */
184 typedef struct bp
186 struct bp *next;
187 struct bp *prev;
188 POINTER *variable;
189 POINTER data;
190 SIZE size;
191 POINTER new_data; /* temporarily used for relocation */
192 struct heap *heap; /* Heap this bloc is in. */
193 } *bloc_ptr;
195 #define NIL_BLOC ((bloc_ptr) 0)
196 #define BLOC_PTR_SIZE (sizeof (struct bp))
198 /* Head and tail of the list of relocatable blocs. */
199 static bloc_ptr first_bloc, last_bloc;
201 static int use_relocatable_buffers;
203 /* If >0, no relocation whatsoever takes place. */
204 static int r_alloc_freeze_level;
207 /* Functions to get and return memory from the system. */
209 /* Find the heap that ADDRESS falls within. */
211 static heap_ptr
212 find_heap (address)
213 POINTER address;
215 heap_ptr heap;
217 for (heap = last_heap; heap; heap = heap->prev)
219 if (heap->start <= address && address <= heap->end)
220 return heap;
223 return NIL_HEAP;
226 /* Find SIZE bytes of space in a heap.
227 Try to get them at ADDRESS (which must fall within some heap's range)
228 if we can get that many within one heap.
230 If enough space is not presently available in our reserve, this means
231 getting more page-aligned space from the system. If the returned space
232 is not contiguous to the last heap, allocate a new heap, and append it
234 obtain does not try to keep track of whether space is in use
235 or not in use. It just returns the address of SIZE bytes that
236 fall within a single heap. If you call obtain twice in a row
237 with the same arguments, you typically get the same value.
238 to the heap list. It's the caller's responsibility to keep
239 track of what space is in use.
241 Return the address of the space if all went well, or zero if we couldn't
242 allocate the memory. */
244 static POINTER
245 obtain (address, size)
246 POINTER address;
247 SIZE size;
249 heap_ptr heap;
250 SIZE already_available;
252 /* Find the heap that ADDRESS falls within. */
253 for (heap = last_heap; heap; heap = heap->prev)
255 if (heap->start <= address && address <= heap->end)
256 break;
259 if (! heap)
260 abort ();
262 /* If we can't fit SIZE bytes in that heap,
263 try successive later heaps. */
264 while (heap && (char *) address + size > (char *) heap->end)
266 heap = heap->next;
267 if (heap == NIL_HEAP)
268 break;
269 address = heap->bloc_start;
272 /* If we can't fit them within any existing heap,
273 get more space. */
274 if (heap == NIL_HEAP)
276 POINTER new = (*real_morecore)(0);
277 SIZE get;
279 already_available = (char *)last_heap->end - (char *)address;
281 if (new != last_heap->end)
283 /* Someone else called sbrk. Make a new heap. */
285 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
286 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
288 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
289 return 0;
291 new_heap->start = new;
292 new_heap->end = bloc_start;
293 new_heap->bloc_start = bloc_start;
294 new_heap->free = bloc_start;
295 new_heap->next = NIL_HEAP;
296 new_heap->prev = last_heap;
297 new_heap->first_bloc = NIL_BLOC;
298 new_heap->last_bloc = NIL_BLOC;
299 last_heap->next = new_heap;
300 last_heap = new_heap;
302 address = bloc_start;
303 already_available = 0;
306 /* Add space to the last heap (which we may have just created).
307 Get some extra, so we can come here less often. */
309 get = size + extra_bytes - already_available;
310 get = (char *) ROUNDUP ((char *)last_heap->end + get)
311 - (char *) last_heap->end;
313 if ((*real_morecore) (get) != last_heap->end)
314 return 0;
316 last_heap->end = (char *) last_heap->end + get;
319 return address;
322 /* Return unused heap space to the system
323 if there is a lot of unused space now.
324 This can make the last heap smaller;
325 it can also eliminate the last heap entirely. */
327 static void
328 relinquish ()
330 register heap_ptr h;
331 long excess = 0;
333 /* Add the amount of space beyond break_value
334 in all heaps which have extend beyond break_value at all. */
336 for (h = last_heap; h && break_value < h->end; h = h->prev)
338 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
339 ? h->bloc_start : break_value);
342 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
344 /* Keep extra_bytes worth of empty space.
345 And don't free anything unless we can free at least extra_bytes. */
346 excess -= extra_bytes;
348 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
350 /* This heap should have no blocs in it. */
351 if (last_heap->first_bloc != NIL_BLOC
352 || last_heap->last_bloc != NIL_BLOC)
353 abort ();
355 /* Return the last heap, with its header, to the system. */
356 excess = (char *)last_heap->end - (char *)last_heap->start;
357 last_heap = last_heap->prev;
358 last_heap->next = NIL_HEAP;
360 else
362 excess = (char *) last_heap->end
363 - (char *) ROUNDUP ((char *)last_heap->end - excess);
364 last_heap->end = (char *) last_heap->end - excess;
367 if ((*real_morecore) (- excess) == 0)
369 /* If the system didn't want that much memory back, adjust
370 the end of the last heap to reflect that. This can occur
371 if break_value is still within the original data segment. */
372 last_heap->end = (char *) last_heap->end + excess;
373 /* Make sure that the result of the adjustment is accurate.
374 It should be, for the else clause above; the other case,
375 which returns the entire last heap to the system, seems
376 unlikely to trigger this mode of failure. */
377 if (last_heap->end != (*real_morecore) (0))
378 abort ();
383 /* Return the total size in use by relocating allocator,
384 above where malloc gets space. */
386 long
387 r_alloc_size_in_use ()
389 return (char *) break_value - (char *) virtual_break_value;
392 /* The meat - allocating, freeing, and relocating blocs. */
394 /* Find the bloc referenced by the address in PTR. Returns a pointer
395 to that block. */
397 static bloc_ptr
398 find_bloc (ptr)
399 POINTER *ptr;
401 register bloc_ptr p = first_bloc;
403 while (p != NIL_BLOC)
405 /* Consistency check. Don't return inconsistent blocs.
406 Don't abort here, as callers might be expecting this, but
407 callers that always expect a bloc to be returned should abort
408 if one isn't to avoid a memory corruption bug that is
409 difficult to track down. */
410 if (p->variable == ptr && p->data == *ptr)
411 return p;
413 p = p->next;
416 return p;
419 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
420 Returns a pointer to the new bloc, or zero if we couldn't allocate
421 memory for the new block. */
423 static bloc_ptr
424 get_bloc (size)
425 SIZE size;
427 register bloc_ptr new_bloc;
428 register heap_ptr heap;
430 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
431 || ! (new_bloc->data = obtain (break_value, size)))
433 free (new_bloc);
435 return 0;
438 break_value = (char *) new_bloc->data + size;
440 new_bloc->size = size;
441 new_bloc->next = NIL_BLOC;
442 new_bloc->variable = (POINTER *) NIL;
443 new_bloc->new_data = 0;
445 /* Record in the heap that this space is in use. */
446 heap = find_heap (new_bloc->data);
447 heap->free = break_value;
449 /* Maintain the correspondence between heaps and blocs. */
450 new_bloc->heap = heap;
451 heap->last_bloc = new_bloc;
452 if (heap->first_bloc == NIL_BLOC)
453 heap->first_bloc = new_bloc;
455 /* Put this bloc on the doubly-linked list of blocs. */
456 if (first_bloc)
458 new_bloc->prev = last_bloc;
459 last_bloc->next = new_bloc;
460 last_bloc = new_bloc;
462 else
464 first_bloc = last_bloc = new_bloc;
465 new_bloc->prev = NIL_BLOC;
468 return new_bloc;
471 /* Calculate new locations of blocs in the list beginning with BLOC,
472 relocating it to start at ADDRESS, in heap HEAP. If enough space is
473 not presently available in our reserve, call obtain for
474 more space.
476 Store the new location of each bloc in its new_data field.
477 Do not touch the contents of blocs or break_value. */
479 static int
480 relocate_blocs (bloc, heap, address)
481 bloc_ptr bloc;
482 heap_ptr heap;
483 POINTER address;
485 register bloc_ptr b = bloc;
487 /* No need to ever call this if arena is frozen, bug somewhere! */
488 if (r_alloc_freeze_level)
489 abort();
491 while (b)
493 /* If bloc B won't fit within HEAP,
494 move to the next heap and try again. */
495 while (heap && (char *) address + b->size > (char *) heap->end)
497 heap = heap->next;
498 if (heap == NIL_HEAP)
499 break;
500 address = heap->bloc_start;
503 /* If BLOC won't fit in any heap,
504 get enough new space to hold BLOC and all following blocs. */
505 if (heap == NIL_HEAP)
507 register bloc_ptr tb = b;
508 register SIZE s = 0;
510 /* Add up the size of all the following blocs. */
511 while (tb != NIL_BLOC)
513 if (tb->variable)
514 s += tb->size;
516 tb = tb->next;
519 /* Get that space. */
520 address = obtain (address, s);
521 if (address == 0)
522 return 0;
524 heap = last_heap;
527 /* Record the new address of this bloc
528 and update where the next bloc can start. */
529 b->new_data = address;
530 if (b->variable)
531 address = (char *) address + b->size;
532 b = b->next;
535 return 1;
538 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
539 This is necessary if we put the memory of space of BLOC
540 before that of BEFORE. */
542 static void
543 reorder_bloc (bloc, before)
544 bloc_ptr bloc, before;
546 bloc_ptr prev, next;
548 /* Splice BLOC out from where it is. */
549 prev = bloc->prev;
550 next = bloc->next;
552 if (prev)
553 prev->next = next;
554 if (next)
555 next->prev = prev;
557 /* Splice it in before BEFORE. */
558 prev = before->prev;
560 if (prev)
561 prev->next = bloc;
562 bloc->prev = prev;
564 before->prev = bloc;
565 bloc->next = before;
568 /* Update the records of which heaps contain which blocs, starting
569 with heap HEAP and bloc BLOC. */
571 static void
572 update_heap_bloc_correspondence (bloc, heap)
573 bloc_ptr bloc;
574 heap_ptr heap;
576 register bloc_ptr b;
578 /* Initialize HEAP's status to reflect blocs before BLOC. */
579 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
581 /* The previous bloc is in HEAP. */
582 heap->last_bloc = bloc->prev;
583 heap->free = (char *) bloc->prev->data + bloc->prev->size;
585 else
587 /* HEAP contains no blocs before BLOC. */
588 heap->first_bloc = NIL_BLOC;
589 heap->last_bloc = NIL_BLOC;
590 heap->free = heap->bloc_start;
593 /* Advance through blocs one by one. */
594 for (b = bloc; b != NIL_BLOC; b = b->next)
596 /* Advance through heaps, marking them empty,
597 till we get to the one that B is in. */
598 while (heap)
600 if (heap->bloc_start <= b->data && b->data <= heap->end)
601 break;
602 heap = heap->next;
603 /* We know HEAP is not null now,
604 because there has to be space for bloc B. */
605 heap->first_bloc = NIL_BLOC;
606 heap->last_bloc = NIL_BLOC;
607 heap->free = heap->bloc_start;
610 /* Update HEAP's status for bloc B. */
611 heap->free = (char *) b->data + b->size;
612 heap->last_bloc = b;
613 if (heap->first_bloc == NIL_BLOC)
614 heap->first_bloc = b;
616 /* Record that B is in HEAP. */
617 b->heap = heap;
620 /* If there are any remaining heaps and no blocs left,
621 mark those heaps as empty. */
622 heap = heap->next;
623 while (heap)
625 heap->first_bloc = NIL_BLOC;
626 heap->last_bloc = NIL_BLOC;
627 heap->free = heap->bloc_start;
628 heap = heap->next;
632 /* Resize BLOC to SIZE bytes. This relocates the blocs
633 that come after BLOC in memory. */
635 static int
636 resize_bloc (bloc, size)
637 bloc_ptr bloc;
638 SIZE size;
640 register bloc_ptr b;
641 heap_ptr heap;
642 POINTER address;
643 SIZE old_size;
645 /* No need to ever call this if arena is frozen, bug somewhere! */
646 if (r_alloc_freeze_level)
647 abort();
649 if (bloc == NIL_BLOC || size == bloc->size)
650 return 1;
652 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
654 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
655 break;
658 if (heap == NIL_HEAP)
659 abort ();
661 old_size = bloc->size;
662 bloc->size = size;
664 /* Note that bloc could be moved into the previous heap. */
665 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
666 : (char *) first_heap->bloc_start);
667 while (heap)
669 if (heap->bloc_start <= address && address <= heap->end)
670 break;
671 heap = heap->prev;
674 if (! relocate_blocs (bloc, heap, address))
676 bloc->size = old_size;
677 return 0;
680 if (size > old_size)
682 for (b = last_bloc; b != bloc; b = b->prev)
684 if (!b->variable)
686 b->size = 0;
687 b->data = b->new_data;
689 else
691 safe_bcopy (b->data, b->new_data, b->size);
692 *b->variable = b->data = b->new_data;
695 if (!bloc->variable)
697 bloc->size = 0;
698 bloc->data = bloc->new_data;
700 else
702 safe_bcopy (bloc->data, bloc->new_data, old_size);
703 bzero ((char *) bloc->new_data + old_size, size - old_size);
704 *bloc->variable = bloc->data = bloc->new_data;
707 else
709 for (b = bloc; b != NIL_BLOC; b = b->next)
711 if (!b->variable)
713 b->size = 0;
714 b->data = b->new_data;
716 else
718 safe_bcopy (b->data, b->new_data, b->size);
719 *b->variable = b->data = b->new_data;
724 update_heap_bloc_correspondence (bloc, heap);
726 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
727 : (char *) first_heap->bloc_start);
728 return 1;
731 /* Free BLOC from the chain of blocs, relocating any blocs above it.
732 This may return space to the system. */
734 static void
735 free_bloc (bloc)
736 bloc_ptr bloc;
738 heap_ptr heap = bloc->heap;
740 if (r_alloc_freeze_level)
742 bloc->variable = (POINTER *) NIL;
743 return;
746 resize_bloc (bloc, 0);
748 if (bloc == first_bloc && bloc == last_bloc)
750 first_bloc = last_bloc = NIL_BLOC;
752 else if (bloc == last_bloc)
754 last_bloc = bloc->prev;
755 last_bloc->next = NIL_BLOC;
757 else if (bloc == first_bloc)
759 first_bloc = bloc->next;
760 first_bloc->prev = NIL_BLOC;
762 else
764 bloc->next->prev = bloc->prev;
765 bloc->prev->next = bloc->next;
768 /* Update the records of which blocs are in HEAP. */
769 if (heap->first_bloc == bloc)
771 if (bloc->next != 0 && bloc->next->heap == heap)
772 heap->first_bloc = bloc->next;
773 else
774 heap->first_bloc = heap->last_bloc = NIL_BLOC;
776 if (heap->last_bloc == bloc)
778 if (bloc->prev != 0 && bloc->prev->heap == heap)
779 heap->last_bloc = bloc->prev;
780 else
781 heap->first_bloc = heap->last_bloc = NIL_BLOC;
784 relinquish ();
785 free (bloc);
788 /* Interface routines. */
790 /* Obtain SIZE bytes of storage from the free pool, or the system, as
791 necessary. If relocatable blocs are in use, this means relocating
792 them. This function gets plugged into the GNU malloc's __morecore
793 hook.
795 We provide hysteresis, never relocating by less than extra_bytes.
797 If we're out of memory, we should return zero, to imitate the other
798 __morecore hook values - in particular, __default_morecore in the
799 GNU malloc package. */
801 POINTER
802 r_alloc_sbrk (size)
803 long size;
805 register bloc_ptr b;
806 POINTER address;
808 if (! r_alloc_initialized)
809 r_alloc_init ();
811 if (! use_relocatable_buffers)
812 return (*real_morecore) (size);
814 if (size == 0)
815 return virtual_break_value;
817 if (size > 0)
819 /* Allocate a page-aligned space. GNU malloc would reclaim an
820 extra space if we passed an unaligned one. But we could
821 not always find a space which is contiguous to the previous. */
822 POINTER new_bloc_start;
823 heap_ptr h = first_heap;
824 SIZE get = ROUNDUP (size);
826 address = (POINTER) ROUNDUP (virtual_break_value);
828 /* Search the list upward for a heap which is large enough. */
829 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
831 h = h->next;
832 if (h == NIL_HEAP)
833 break;
834 address = (POINTER) ROUNDUP (h->start);
837 /* If not found, obtain more space. */
838 if (h == NIL_HEAP)
840 get += extra_bytes + page_size;
842 if (! obtain (address, get))
843 return 0;
845 if (first_heap == last_heap)
846 address = (POINTER) ROUNDUP (virtual_break_value);
847 else
848 address = (POINTER) ROUNDUP (last_heap->start);
849 h = last_heap;
852 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
854 if (first_heap->bloc_start < new_bloc_start)
856 /* This is no clean solution - no idea how to do it better. */
857 if (r_alloc_freeze_level)
858 return NIL;
860 /* There is a bug here: if the above obtain call succeeded, but the
861 relocate_blocs call below does not succeed, we need to free
862 the memory that we got with obtain. */
864 /* Move all blocs upward. */
865 if (! relocate_blocs (first_bloc, h, new_bloc_start))
866 return 0;
868 /* Note that (POINTER)(h+1) <= new_bloc_start since
869 get >= page_size, so the following does not destroy the heap
870 header. */
871 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
873 safe_bcopy (b->data, b->new_data, b->size);
874 *b->variable = b->data = b->new_data;
877 h->bloc_start = new_bloc_start;
879 update_heap_bloc_correspondence (first_bloc, h);
881 if (h != first_heap)
883 /* Give up managing heaps below the one the new
884 virtual_break_value points to. */
885 first_heap->prev = NIL_HEAP;
886 first_heap->next = h->next;
887 first_heap->start = h->start;
888 first_heap->end = h->end;
889 first_heap->free = h->free;
890 first_heap->first_bloc = h->first_bloc;
891 first_heap->last_bloc = h->last_bloc;
892 first_heap->bloc_start = h->bloc_start;
894 if (first_heap->next)
895 first_heap->next->prev = first_heap;
896 else
897 last_heap = first_heap;
900 bzero (address, size);
902 else /* size < 0 */
904 SIZE excess = (char *)first_heap->bloc_start
905 - ((char *)virtual_break_value + size);
907 address = virtual_break_value;
909 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
911 excess -= extra_bytes;
912 first_heap->bloc_start
913 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
915 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
917 for (b = first_bloc; b != NIL_BLOC; b = b->next)
919 safe_bcopy (b->data, b->new_data, b->size);
920 *b->variable = b->data = b->new_data;
924 if ((char *)virtual_break_value + size < (char *)first_heap->start)
926 /* We found an additional space below the first heap */
927 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
931 virtual_break_value = (POINTER) ((char *)address + size);
932 break_value = (last_bloc
933 ? (char *) last_bloc->data + last_bloc->size
934 : (char *) first_heap->bloc_start);
935 if (size < 0)
936 relinquish ();
938 return address;
942 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
943 the data is returned in *PTR. PTR is thus the address of some variable
944 which will use the data area.
946 The allocation of 0 bytes is valid.
947 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
948 done before allocating a new area. Not yet done.
950 If we can't allocate the necessary memory, set *PTR to zero, and
951 return zero. */
953 POINTER
954 r_alloc (ptr, size)
955 POINTER *ptr;
956 SIZE size;
958 register bloc_ptr new_bloc;
960 if (! r_alloc_initialized)
961 r_alloc_init ();
963 new_bloc = get_bloc (MEM_ROUNDUP (size));
964 if (new_bloc)
966 new_bloc->variable = ptr;
967 *ptr = new_bloc->data;
969 else
970 *ptr = 0;
972 return *ptr;
975 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
976 Store 0 in *PTR to show there's no block allocated. */
978 void
979 r_alloc_free (ptr)
980 register POINTER *ptr;
982 register bloc_ptr dead_bloc;
984 if (! r_alloc_initialized)
985 r_alloc_init ();
987 dead_bloc = find_bloc (ptr);
988 if (dead_bloc == NIL_BLOC)
989 abort (); /* Double free? PTR not originally used to allocate? */
991 free_bloc (dead_bloc);
992 *ptr = 0;
994 #ifdef emacs
995 refill_memory_reserve ();
996 #endif
999 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
1000 Do this by shifting all blocks above this one up in memory, unless
1001 SIZE is less than or equal to the current bloc size, in which case
1002 do nothing.
1004 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
1005 memory copied to it. Not very efficient. We could traverse the
1006 bloc_list for a best fit of free blocs first.
1008 Change *PTR to reflect the new bloc, and return this value.
1010 If more memory cannot be allocated, then leave *PTR unchanged, and
1011 return zero. */
1013 POINTER
1014 r_re_alloc (ptr, size)
1015 POINTER *ptr;
1016 SIZE size;
1018 register bloc_ptr bloc;
1020 if (! r_alloc_initialized)
1021 r_alloc_init ();
1023 if (!*ptr)
1024 return r_alloc (ptr, size);
1025 if (!size)
1027 r_alloc_free (ptr);
1028 return r_alloc (ptr, 0);
1031 bloc = find_bloc (ptr);
1032 if (bloc == NIL_BLOC)
1033 abort (); /* Already freed? PTR not originally used to allocate? */
1035 if (size < bloc->size)
1037 /* Wouldn't it be useful to actually resize the bloc here? */
1038 /* I think so too, but not if it's too expensive... */
1039 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1040 && r_alloc_freeze_level == 0)
1042 resize_bloc (bloc, MEM_ROUNDUP (size));
1043 /* Never mind if this fails, just do nothing... */
1044 /* It *should* be infallible! */
1047 else if (size > bloc->size)
1049 if (r_alloc_freeze_level)
1051 bloc_ptr new_bloc;
1052 new_bloc = get_bloc (MEM_ROUNDUP (size));
1053 if (new_bloc)
1055 new_bloc->variable = ptr;
1056 *ptr = new_bloc->data;
1057 bloc->variable = (POINTER *) NIL;
1059 else
1060 return NIL;
1062 else
1064 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1065 return NIL;
1068 return *ptr;
1071 /* Disable relocations, after making room for at least SIZE bytes
1072 of non-relocatable heap if possible. The relocatable blocs are
1073 guaranteed to hold still until thawed, even if this means that
1074 malloc must return a null pointer. */
1076 void
1077 r_alloc_freeze (size)
1078 long size;
1080 if (! r_alloc_initialized)
1081 r_alloc_init ();
1083 /* If already frozen, we can't make any more room, so don't try. */
1084 if (r_alloc_freeze_level > 0)
1085 size = 0;
1086 /* If we can't get the amount requested, half is better than nothing. */
1087 while (size > 0 && r_alloc_sbrk (size) == 0)
1088 size /= 2;
1089 ++r_alloc_freeze_level;
1090 if (size > 0)
1091 r_alloc_sbrk (-size);
1094 void
1095 r_alloc_thaw ()
1098 if (! r_alloc_initialized)
1099 r_alloc_init ();
1101 if (--r_alloc_freeze_level < 0)
1102 abort ();
1104 /* This frees all unused blocs. It is not too inefficient, as the resize
1105 and bcopy is done only once. Afterwards, all unreferenced blocs are
1106 already shrunk to zero size. */
1107 if (!r_alloc_freeze_level)
1109 bloc_ptr *b = &first_bloc;
1110 while (*b)
1111 if (!(*b)->variable)
1112 free_bloc (*b);
1113 else
1114 b = &(*b)->next;
1119 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1121 /* Reinitialize the morecore hook variables after restarting a dumped
1122 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1123 void
1124 r_alloc_reinit ()
1126 /* Only do this if the hook has been reset, so that we don't get an
1127 infinite loop, in case Emacs was linked statically. */
1128 if (__morecore != r_alloc_sbrk)
1130 real_morecore = __morecore;
1131 __morecore = r_alloc_sbrk;
1135 #endif /* emacs && DOUG_LEA_MALLOC */
1137 #ifdef DEBUG
1139 #include <assert.h>
1141 void
1142 r_alloc_check ()
1144 int found = 0;
1145 heap_ptr h, ph = 0;
1146 bloc_ptr b, pb = 0;
1148 if (!r_alloc_initialized)
1149 return;
1151 assert (first_heap);
1152 assert (last_heap->end <= (POINTER) sbrk (0));
1153 assert ((POINTER) first_heap < first_heap->start);
1154 assert (first_heap->start <= virtual_break_value);
1155 assert (virtual_break_value <= first_heap->end);
1157 for (h = first_heap; h; h = h->next)
1159 assert (h->prev == ph);
1160 assert ((POINTER) ROUNDUP (h->end) == h->end);
1161 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1162 the heap start has any sort of alignment.
1163 Perhaps it should. */
1164 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1165 #endif
1166 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1167 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1169 if (ph)
1171 assert (ph->end < h->start);
1172 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1175 if (h->bloc_start <= break_value && break_value <= h->end)
1176 found = 1;
1178 ph = h;
1181 assert (found);
1182 assert (last_heap == ph);
1184 for (b = first_bloc; b; b = b->next)
1186 assert (b->prev == pb);
1187 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1188 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1190 ph = 0;
1191 for (h = first_heap; h; h = h->next)
1193 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1194 break;
1195 ph = h;
1198 assert (h);
1200 if (pb && pb->data + pb->size != b->data)
1202 assert (ph && b->data == h->bloc_start);
1203 while (ph)
1205 if (ph->bloc_start <= pb->data
1206 && pb->data + pb->size <= ph->end)
1208 assert (pb->data + pb->size + b->size > ph->end);
1209 break;
1211 else
1213 assert (ph->bloc_start + b->size > ph->end);
1215 ph = ph->prev;
1218 pb = b;
1221 assert (last_bloc == pb);
1223 if (last_bloc)
1224 assert (last_bloc->data + last_bloc->size == break_value);
1225 else
1226 assert (first_heap->bloc_start == break_value);
1229 #endif /* DEBUG */
1231 /* Update the internal record of which variable points to some data to NEW.
1232 Used by buffer-swap-text in Emacs to restore consistency after it
1233 swaps the buffer text between two buffer objects. The OLD pointer
1234 is checked to ensure that memory corruption does not occur due to
1235 misuse. */
1236 void
1237 r_alloc_reset_variable (old, new)
1238 POINTER *old, *new;
1240 bloc_ptr bloc = first_bloc;
1242 /* Find the bloc that corresponds to the data pointed to by pointer.
1243 find_bloc cannot be used, as it has internal consistency checks
1244 which fail when the variable needs reseting. */
1245 while (bloc != NIL_BLOC)
1247 if (bloc->data == *new)
1248 break;
1250 bloc = bloc->next;
1253 if (bloc == NIL_BLOC || bloc->variable != old)
1254 abort (); /* Already freed? OLD not originally used to allocate? */
1256 /* Update variable to point to the new location. */
1257 bloc->variable = new;
1261 /***********************************************************************
1262 Initialization
1263 ***********************************************************************/
1265 /* Initialize various things for memory allocation. */
1267 static void
1268 r_alloc_init ()
1270 if (r_alloc_initialized)
1271 return;
1272 r_alloc_initialized = 1;
1274 page_size = PAGE;
1275 #ifndef SYSTEM_MALLOC
1276 real_morecore = __morecore;
1277 __morecore = r_alloc_sbrk;
1279 first_heap = last_heap = &heap_base;
1280 first_heap->next = first_heap->prev = NIL_HEAP;
1281 first_heap->start = first_heap->bloc_start
1282 = virtual_break_value = break_value = (*real_morecore) (0);
1283 if (break_value == NIL)
1284 abort ();
1286 extra_bytes = ROUNDUP (50000);
1287 #endif
1289 #ifdef DOUG_LEA_MALLOC
1290 BLOCK_INPUT;
1291 mallopt (M_TOP_PAD, 64 * 4096);
1292 UNBLOCK_INPUT;
1293 #else
1294 #ifndef SYSTEM_MALLOC
1295 /* Give GNU malloc's morecore some hysteresis
1296 so that we move all the relocatable blocks much less often. */
1297 __malloc_extra_blocks = 64;
1298 #endif
1299 #endif
1301 #ifndef SYSTEM_MALLOC
1302 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1304 /* The extra call to real_morecore guarantees that the end of the
1305 address space is a multiple of page_size, even if page_size is
1306 not really the page size of the system running the binary in
1307 which page_size is stored. This allows a binary to be built on a
1308 system with one page size and run on a system with a smaller page
1309 size. */
1310 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
1312 /* Clear the rest of the last page; this memory is in our address space
1313 even though it is after the sbrk value. */
1314 /* Doubly true, with the additional call that explicitly adds the
1315 rest of that page to the address space. */
1316 bzero (first_heap->start,
1317 (char *) first_heap->end - (char *) first_heap->start);
1318 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1319 #endif
1321 use_relocatable_buffers = 1;
1324 /* arch-tag: 6a524a15-faff-44c8-95d4-a5da6f55110f
1325 (do not change this comment) */