* test/automated/vc-tests.el: Try enabling tests on hydra.nixos.org.
[emacs.git] / src / ralloc.c
blobfb5087d4068fce7021a13fe436601608735f8b9f
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000-2015 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 3 of the License, or
9 (at your option) any later version.
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
19 /* NOTES:
21 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
22 rather than all of them. This means allowing for a possible
23 hole between the first bloc and the end of malloc storage. */
25 #ifdef emacs
27 #include <config.h>
29 #include "lisp.h" /* Needed for VALBITS. */
30 #include "blockinput.h"
32 #include <unistd.h>
34 #ifdef DOUG_LEA_MALLOC
35 #define M_TOP_PAD -2
36 extern int mallopt (int, int);
37 #else /* not DOUG_LEA_MALLOC */
38 #if !defined SYSTEM_MALLOC && !defined HYBRID_MALLOC
39 extern size_t __malloc_extra_blocks;
40 #endif /* not SYSTEM_MALLOC and not HYBRID_MALLOC */
41 #endif /* not DOUG_LEA_MALLOC */
43 #else /* not emacs */
45 #include <stddef.h>
46 #include <malloc.h>
48 #endif /* not emacs */
51 #include "getpagesize.h"
53 /* A flag to indicate whether we have initialized ralloc yet. For
54 Emacs's sake, please do not make this local to malloc_init; on some
55 machines, the dumping procedure makes all static variables
56 read-only. On these machines, the word static is #defined to be
57 the empty string, meaning that r_alloc_initialized becomes an
58 automatic variable, and loses its value each time Emacs is started
59 up. */
61 static int r_alloc_initialized = 0;
63 static void r_alloc_init (void);
66 /* Declarations for working with the malloc, ralloc, and system breaks. */
68 /* Function to set the real break value. */
69 void *(*real_morecore) (ptrdiff_t);
71 /* The break value, as seen by malloc. */
72 static void *virtual_break_value;
74 /* The address of the end of the last data in use by ralloc,
75 including relocatable blocs as well as malloc data. */
76 static void *break_value;
78 /* This is the size of a page. We round memory requests to this boundary. */
79 static int page_size;
81 /* Whenever we get memory from the system, get this many extra bytes. This
82 must be a multiple of page_size. */
83 static int extra_bytes;
85 /* Macros for rounding. Note that rounding to any value is possible
86 by changing the definition of PAGE. */
87 #define PAGE (getpagesize ())
88 #define PAGE_ROUNDUP(size) (((size_t) (size) + page_size - 1) \
89 & ~((size_t) (page_size - 1)))
91 #define MEM_ALIGN sizeof (double)
92 #define MEM_ROUNDUP(addr) (((size_t) (addr) + MEM_ALIGN - 1) \
93 & ~(MEM_ALIGN - 1))
95 /* The hook `malloc' uses for the function which gets more space
96 from the system. */
98 #if !defined SYSTEM_MALLOC && !defined HYBRID_MALLOC
99 extern void *(*__morecore) (ptrdiff_t);
100 #endif
104 /***********************************************************************
105 Implementation using sbrk
106 ***********************************************************************/
108 /* Data structures of heaps and blocs. */
110 /* The relocatable objects, or blocs, and the malloc data
111 both reside within one or more heaps.
112 Each heap contains malloc data, running from `start' to `bloc_start',
113 and relocatable objects, running from `bloc_start' to `free'.
115 Relocatable objects may relocate within the same heap
116 or may move into another heap; the heaps themselves may grow
117 but they never move.
119 We try to make just one heap and make it larger as necessary.
120 But sometimes we can't do that, because we can't get contiguous
121 space to add onto the heap. When that happens, we start a new heap. */
123 typedef struct heap
125 struct heap *next;
126 struct heap *prev;
127 /* Start of memory range of this heap. */
128 void *start;
129 /* End of memory range of this heap. */
130 void *end;
131 /* Start of relocatable data in this heap. */
132 void *bloc_start;
133 /* Start of unused space in this heap. */
134 void *free;
135 /* First bloc in this heap. */
136 struct bp *first_bloc;
137 /* Last bloc in this heap. */
138 struct bp *last_bloc;
139 } *heap_ptr;
141 #define NIL_HEAP ((heap_ptr) 0)
143 /* This is the first heap object.
144 If we need additional heap objects, each one resides at the beginning of
145 the space it covers. */
146 static struct heap heap_base;
148 /* Head and tail of the list of heaps. */
149 static heap_ptr first_heap, last_heap;
151 /* These structures are allocated in the malloc arena.
152 The linked list is kept in order of increasing '.data' members.
153 The data blocks abut each other; if b->next is non-nil, then
154 b->data + b->size == b->next->data.
156 An element with variable==NULL denotes a freed block, which has not yet
157 been collected. They may only appear while r_alloc_freeze_level > 0,
158 and will be freed when the arena is thawed. Currently, these blocs are
159 not reusable, while the arena is frozen. Very inefficient. */
161 typedef struct bp
163 struct bp *next;
164 struct bp *prev;
165 void **variable;
166 void *data;
167 size_t size;
168 void *new_data; /* temporarily used for relocation */
169 struct heap *heap; /* Heap this bloc is in. */
170 } *bloc_ptr;
172 #define NIL_BLOC ((bloc_ptr) 0)
173 #define BLOC_PTR_SIZE (sizeof (struct bp))
175 /* Head and tail of the list of relocatable blocs. */
176 static bloc_ptr first_bloc, last_bloc;
178 static int use_relocatable_buffers;
180 /* If >0, no relocation whatsoever takes place. */
181 static int r_alloc_freeze_level;
184 /* Functions to get and return memory from the system. */
186 /* Find the heap that ADDRESS falls within. */
188 static heap_ptr
189 find_heap (void *address)
191 heap_ptr heap;
193 for (heap = last_heap; heap; heap = heap->prev)
195 if (heap->start <= address && address <= heap->end)
196 return heap;
199 return NIL_HEAP;
202 /* Find SIZE bytes of space in a heap.
203 Try to get them at ADDRESS (which must fall within some heap's range)
204 if we can get that many within one heap.
206 If enough space is not presently available in our reserve, this means
207 getting more page-aligned space from the system. If the returned space
208 is not contiguous to the last heap, allocate a new heap, and append it
209 to the heap list.
211 obtain does not try to keep track of whether space is in use or not
212 in use. It just returns the address of SIZE bytes that fall within a
213 single heap. If you call obtain twice in a row with the same arguments,
214 you typically get the same value. It's the caller's responsibility to
215 keep track of what space is in use.
217 Return the address of the space if all went well, or zero if we couldn't
218 allocate the memory. */
220 static void *
221 obtain (void *address, size_t size)
223 heap_ptr heap;
224 size_t already_available;
226 /* Find the heap that ADDRESS falls within. */
227 for (heap = last_heap; heap; heap = heap->prev)
229 if (heap->start <= address && address <= heap->end)
230 break;
233 if (! heap)
234 emacs_abort ();
236 /* If we can't fit SIZE bytes in that heap,
237 try successive later heaps. */
238 while (heap && (char *) address + size > (char *) heap->end)
240 heap = heap->next;
241 if (heap == NIL_HEAP)
242 break;
243 address = heap->bloc_start;
246 /* If we can't fit them within any existing heap,
247 get more space. */
248 if (heap == NIL_HEAP)
250 void *new = real_morecore (0);
251 size_t get;
253 already_available = (char *) last_heap->end - (char *) address;
255 if (new != last_heap->end)
257 /* Someone else called sbrk. Make a new heap. */
259 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
260 void *bloc_start = (void *) MEM_ROUNDUP ((void *) (new_heap + 1));
262 if (real_morecore ((char *) bloc_start - (char *) new) != new)
263 return 0;
265 new_heap->start = new;
266 new_heap->end = bloc_start;
267 new_heap->bloc_start = bloc_start;
268 new_heap->free = bloc_start;
269 new_heap->next = NIL_HEAP;
270 new_heap->prev = last_heap;
271 new_heap->first_bloc = NIL_BLOC;
272 new_heap->last_bloc = NIL_BLOC;
273 last_heap->next = new_heap;
274 last_heap = new_heap;
276 address = bloc_start;
277 already_available = 0;
280 /* Add space to the last heap (which we may have just created).
281 Get some extra, so we can come here less often. */
283 get = size + extra_bytes - already_available;
284 get = (char *) PAGE_ROUNDUP ((char *) last_heap->end + get)
285 - (char *) last_heap->end;
287 if (real_morecore (get) != last_heap->end)
288 return 0;
290 last_heap->end = (char *) last_heap->end + get;
293 return address;
296 /* Return unused heap space to the system
297 if there is a lot of unused space now.
298 This can make the last heap smaller;
299 it can also eliminate the last heap entirely. */
301 static void
302 relinquish (void)
304 register heap_ptr h;
305 ptrdiff_t excess = 0;
307 /* Add the amount of space beyond break_value
308 in all heaps which have extend beyond break_value at all. */
310 for (h = last_heap; h && break_value < h->end; h = h->prev)
312 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
313 ? h->bloc_start : break_value);
316 if (excess > extra_bytes * 2 && real_morecore (0) == last_heap->end)
318 /* Keep extra_bytes worth of empty space.
319 And don't free anything unless we can free at least extra_bytes. */
320 excess -= extra_bytes;
322 if ((char *) last_heap->end - (char *) last_heap->bloc_start <= excess)
324 heap_ptr lh_prev;
326 /* This heap should have no blocs in it. If it does, we
327 cannot return it to the system. */
328 if (last_heap->first_bloc != NIL_BLOC
329 || last_heap->last_bloc != NIL_BLOC)
330 return;
332 /* Return the last heap, with its header, to the system. */
333 excess = (char *) last_heap->end - (char *) last_heap->start;
334 lh_prev = last_heap->prev;
335 /* If the system doesn't want that much memory back, leave
336 last_heap unaltered to reflect that. This can occur if
337 break_value is still within the original data segment. */
338 if (real_morecore (- excess) != 0)
340 last_heap = lh_prev;
341 last_heap->next = NIL_HEAP;
344 else
346 excess = ((char *) last_heap->end
347 - (char *) PAGE_ROUNDUP ((char *) last_heap->end - excess));
348 /* If the system doesn't want that much memory back, leave
349 the end of the last heap unchanged to reflect that. This
350 can occur if break_value is still within the original
351 data segment. */
352 if (real_morecore (- excess) != 0)
353 last_heap->end = (char *) last_heap->end - excess;
358 /* The meat - allocating, freeing, and relocating blocs. */
360 /* Find the bloc referenced by the address in PTR. Returns a pointer
361 to that block. */
363 static bloc_ptr
364 find_bloc (void **ptr)
366 bloc_ptr p = first_bloc;
368 while (p != NIL_BLOC)
370 /* Consistency check. Don't return inconsistent blocs.
371 Don't abort here, as callers might be expecting this, but
372 callers that always expect a bloc to be returned should abort
373 if one isn't to avoid a memory corruption bug that is
374 difficult to track down. */
375 if (p->variable == ptr && p->data == *ptr)
376 return p;
378 p = p->next;
381 return p;
384 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
385 Returns a pointer to the new bloc, or zero if we couldn't allocate
386 memory for the new block. */
388 static bloc_ptr
389 get_bloc (size_t size)
391 bloc_ptr new_bloc;
392 heap_ptr heap;
394 if (! (new_bloc = malloc (BLOC_PTR_SIZE))
395 || ! (new_bloc->data = obtain (break_value, size)))
397 free (new_bloc);
399 return 0;
402 break_value = (char *) new_bloc->data + size;
404 new_bloc->size = size;
405 new_bloc->next = NIL_BLOC;
406 new_bloc->variable = NULL;
407 new_bloc->new_data = 0;
409 /* Record in the heap that this space is in use. */
410 heap = find_heap (new_bloc->data);
411 heap->free = break_value;
413 /* Maintain the correspondence between heaps and blocs. */
414 new_bloc->heap = heap;
415 heap->last_bloc = new_bloc;
416 if (heap->first_bloc == NIL_BLOC)
417 heap->first_bloc = new_bloc;
419 /* Put this bloc on the doubly-linked list of blocs. */
420 if (first_bloc)
422 new_bloc->prev = last_bloc;
423 last_bloc->next = new_bloc;
424 last_bloc = new_bloc;
426 else
428 first_bloc = last_bloc = new_bloc;
429 new_bloc->prev = NIL_BLOC;
432 return new_bloc;
435 /* Calculate new locations of blocs in the list beginning with BLOC,
436 relocating it to start at ADDRESS, in heap HEAP. If enough space is
437 not presently available in our reserve, call obtain for
438 more space.
440 Store the new location of each bloc in its new_data field.
441 Do not touch the contents of blocs or break_value. */
443 static int
444 relocate_blocs (bloc_ptr bloc, heap_ptr heap, void *address)
446 bloc_ptr b = bloc;
448 /* No need to ever call this if arena is frozen, bug somewhere! */
449 if (r_alloc_freeze_level)
450 emacs_abort ();
452 while (b)
454 /* If bloc B won't fit within HEAP,
455 move to the next heap and try again. */
456 while (heap && (char *) address + b->size > (char *) heap->end)
458 heap = heap->next;
459 if (heap == NIL_HEAP)
460 break;
461 address = heap->bloc_start;
464 /* If BLOC won't fit in any heap,
465 get enough new space to hold BLOC and all following blocs. */
466 if (heap == NIL_HEAP)
468 bloc_ptr tb = b;
469 size_t s = 0;
471 /* Add up the size of all the following blocs. */
472 while (tb != NIL_BLOC)
474 if (tb->variable)
475 s += tb->size;
477 tb = tb->next;
480 /* Get that space. */
481 address = obtain (address, s);
482 if (address == 0)
483 return 0;
485 heap = last_heap;
488 /* Record the new address of this bloc
489 and update where the next bloc can start. */
490 b->new_data = address;
491 if (b->variable)
492 address = (char *) address + b->size;
493 b = b->next;
496 return 1;
499 /* Update the records of which heaps contain which blocs, starting
500 with heap HEAP and bloc BLOC. */
502 static void
503 update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
505 register bloc_ptr b;
507 /* Initialize HEAP's status to reflect blocs before BLOC. */
508 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
510 /* The previous bloc is in HEAP. */
511 heap->last_bloc = bloc->prev;
512 heap->free = (char *) bloc->prev->data + bloc->prev->size;
514 else
516 /* HEAP contains no blocs before BLOC. */
517 heap->first_bloc = NIL_BLOC;
518 heap->last_bloc = NIL_BLOC;
519 heap->free = heap->bloc_start;
522 /* Advance through blocs one by one. */
523 for (b = bloc; b != NIL_BLOC; b = b->next)
525 /* Advance through heaps, marking them empty,
526 till we get to the one that B is in. */
527 while (heap)
529 if (heap->bloc_start <= b->data && b->data <= heap->end)
530 break;
531 heap = heap->next;
532 /* We know HEAP is not null now,
533 because there has to be space for bloc B. */
534 heap->first_bloc = NIL_BLOC;
535 heap->last_bloc = NIL_BLOC;
536 heap->free = heap->bloc_start;
539 /* Update HEAP's status for bloc B. */
540 heap->free = (char *) b->data + b->size;
541 heap->last_bloc = b;
542 if (heap->first_bloc == NIL_BLOC)
543 heap->first_bloc = b;
545 /* Record that B is in HEAP. */
546 b->heap = heap;
549 /* If there are any remaining heaps and no blocs left,
550 mark those heaps as empty. */
551 heap = heap->next;
552 while (heap)
554 heap->first_bloc = NIL_BLOC;
555 heap->last_bloc = NIL_BLOC;
556 heap->free = heap->bloc_start;
557 heap = heap->next;
561 /* Resize BLOC to SIZE bytes. This relocates the blocs
562 that come after BLOC in memory. */
564 static int
565 resize_bloc (bloc_ptr bloc, size_t size)
567 bloc_ptr b;
568 heap_ptr heap;
569 void *address;
570 size_t old_size;
572 /* No need to ever call this if arena is frozen, bug somewhere! */
573 if (r_alloc_freeze_level)
574 emacs_abort ();
576 if (bloc == NIL_BLOC || size == bloc->size)
577 return 1;
579 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
581 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
582 break;
585 if (heap == NIL_HEAP)
586 emacs_abort ();
588 old_size = bloc->size;
589 bloc->size = size;
591 /* Note that bloc could be moved into the previous heap. */
592 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
593 : (char *) first_heap->bloc_start);
594 while (heap)
596 if (heap->bloc_start <= address && address <= heap->end)
597 break;
598 heap = heap->prev;
601 if (! relocate_blocs (bloc, heap, address))
603 bloc->size = old_size;
604 return 0;
607 if (size > old_size)
609 for (b = last_bloc; b != bloc; b = b->prev)
611 if (!b->variable)
613 b->size = 0;
614 b->data = b->new_data;
616 else
618 if (b->new_data != b->data)
619 memmove (b->new_data, b->data, b->size);
620 *b->variable = b->data = b->new_data;
623 if (!bloc->variable)
625 bloc->size = 0;
626 bloc->data = bloc->new_data;
628 else
630 if (bloc->new_data != bloc->data)
631 memmove (bloc->new_data, bloc->data, old_size);
632 memset ((char *) bloc->new_data + old_size, 0, size - old_size);
633 *bloc->variable = bloc->data = bloc->new_data;
636 else
638 for (b = bloc; b != NIL_BLOC; b = b->next)
640 if (!b->variable)
642 b->size = 0;
643 b->data = b->new_data;
645 else
647 if (b->new_data != b->data)
648 memmove (b->new_data, b->data, b->size);
649 *b->variable = b->data = b->new_data;
654 update_heap_bloc_correspondence (bloc, heap);
656 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
657 : (char *) first_heap->bloc_start);
658 return 1;
661 /* Free BLOC from the chain of blocs, relocating any blocs above it.
662 This may return space to the system. */
664 static void
665 free_bloc (bloc_ptr bloc)
667 heap_ptr heap = bloc->heap;
668 heap_ptr h;
670 if (r_alloc_freeze_level)
672 bloc->variable = NULL;
673 return;
676 resize_bloc (bloc, 0);
678 if (bloc == first_bloc && bloc == last_bloc)
680 first_bloc = last_bloc = NIL_BLOC;
682 else if (bloc == last_bloc)
684 last_bloc = bloc->prev;
685 last_bloc->next = NIL_BLOC;
687 else if (bloc == first_bloc)
689 first_bloc = bloc->next;
690 first_bloc->prev = NIL_BLOC;
692 else
694 bloc->next->prev = bloc->prev;
695 bloc->prev->next = bloc->next;
698 /* Sometimes, 'heap' obtained from bloc->heap above is not really a
699 'heap' structure. It can even be beyond the current break point,
700 which will cause crashes when we dereference it below (see
701 bug#12242). Evidently, the reason is bloc allocations done while
702 use_relocatable_buffers was non-positive, because additional
703 memory we get then is not recorded in the heaps we manage. If
704 bloc->heap records such a "heap", we cannot (and don't need to)
705 update its records. So we validate the 'heap' value by making
706 sure it is one of the heaps we manage via the heaps linked list,
707 and don't touch a 'heap' that isn't found there. This avoids
708 accessing memory we know nothing about. */
709 for (h = first_heap; h != NIL_HEAP; h = h->next)
710 if (heap == h)
711 break;
713 if (h)
715 /* Update the records of which blocs are in HEAP. */
716 if (heap->first_bloc == bloc)
718 if (bloc->next != 0 && bloc->next->heap == heap)
719 heap->first_bloc = bloc->next;
720 else
721 heap->first_bloc = heap->last_bloc = NIL_BLOC;
723 if (heap->last_bloc == bloc)
725 if (bloc->prev != 0 && bloc->prev->heap == heap)
726 heap->last_bloc = bloc->prev;
727 else
728 heap->first_bloc = heap->last_bloc = NIL_BLOC;
732 relinquish ();
733 free (bloc);
736 /* Interface routines. */
738 /* Obtain SIZE bytes of storage from the free pool, or the system, as
739 necessary. If relocatable blocs are in use, this means relocating
740 them. This function gets plugged into the GNU malloc's __morecore
741 hook.
743 We provide hysteresis, never relocating by less than extra_bytes.
745 If we're out of memory, we should return zero, to imitate the other
746 __morecore hook values - in particular, __default_morecore in the
747 GNU malloc package. */
749 static void *
750 r_alloc_sbrk (ptrdiff_t size)
752 bloc_ptr b;
753 void *address;
755 if (! r_alloc_initialized)
756 r_alloc_init ();
758 if (use_relocatable_buffers <= 0)
759 return real_morecore (size);
761 if (size == 0)
762 return virtual_break_value;
764 if (size > 0)
766 /* Allocate a page-aligned space. GNU malloc would reclaim an
767 extra space if we passed an unaligned one. But we could
768 not always find a space which is contiguous to the previous. */
769 void *new_bloc_start;
770 heap_ptr h = first_heap;
771 size_t get = PAGE_ROUNDUP (size);
773 address = (void *) PAGE_ROUNDUP (virtual_break_value);
775 /* Search the list upward for a heap which is large enough. */
776 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *) address + get))
778 h = h->next;
779 if (h == NIL_HEAP)
780 break;
781 address = (void *) PAGE_ROUNDUP (h->start);
784 /* If not found, obtain more space. */
785 if (h == NIL_HEAP)
787 get += extra_bytes + page_size;
789 if (! obtain (address, get))
790 return 0;
792 if (first_heap == last_heap)
793 address = (void *) PAGE_ROUNDUP (virtual_break_value);
794 else
795 address = (void *) PAGE_ROUNDUP (last_heap->start);
796 h = last_heap;
799 new_bloc_start = (void *) MEM_ROUNDUP ((char *) address + get);
801 if (first_heap->bloc_start < new_bloc_start)
803 /* This is no clean solution - no idea how to do it better. */
804 if (r_alloc_freeze_level)
805 return NULL;
807 /* There is a bug here: if the above obtain call succeeded, but the
808 relocate_blocs call below does not succeed, we need to free
809 the memory that we got with obtain. */
811 /* Move all blocs upward. */
812 if (! relocate_blocs (first_bloc, h, new_bloc_start))
813 return 0;
815 /* Note that (char *) (h + 1) <= (char *) new_bloc_start since
816 get >= page_size, so the following does not destroy the heap
817 header. */
818 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
820 if (b->new_data != b->data)
821 memmove (b->new_data, b->data, b->size);
822 *b->variable = b->data = b->new_data;
825 h->bloc_start = new_bloc_start;
827 update_heap_bloc_correspondence (first_bloc, h);
829 if (h != first_heap)
831 /* Give up managing heaps below the one the new
832 virtual_break_value points to. */
833 first_heap->prev = NIL_HEAP;
834 first_heap->next = h->next;
835 first_heap->start = h->start;
836 first_heap->end = h->end;
837 first_heap->free = h->free;
838 first_heap->first_bloc = h->first_bloc;
839 first_heap->last_bloc = h->last_bloc;
840 first_heap->bloc_start = h->bloc_start;
842 if (first_heap->next)
843 first_heap->next->prev = first_heap;
844 else
845 last_heap = first_heap;
848 memset (address, 0, size);
850 else /* size < 0 */
852 size_t excess = ((char *) first_heap->bloc_start
853 - ((char *) virtual_break_value + size));
855 address = virtual_break_value;
857 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
859 excess -= extra_bytes;
860 first_heap->bloc_start
861 = (void *) MEM_ROUNDUP ((char *) first_heap->bloc_start - excess);
863 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
865 for (b = first_bloc; b != NIL_BLOC; b = b->next)
867 if (b->new_data != b->data)
868 memmove (b->new_data, b->data, b->size);
869 *b->variable = b->data = b->new_data;
873 if ((char *) virtual_break_value + size < (char *) first_heap->start)
875 /* We found an additional space below the first heap */
876 first_heap->start = (void *) ((char *) virtual_break_value + size);
880 virtual_break_value = (void *) ((char *) address + size);
881 break_value = (last_bloc
882 ? (char *) last_bloc->data + last_bloc->size
883 : (char *) first_heap->bloc_start);
884 if (size < 0)
885 relinquish ();
887 return address;
891 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
892 the data is returned in *PTR. PTR is thus the address of some variable
893 which will use the data area.
895 The allocation of 0 bytes is valid.
896 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
897 done before allocating a new area. Not yet done.
899 If we can't allocate the necessary memory, set *PTR to zero, and
900 return zero. */
902 void *
903 r_alloc (void **ptr, size_t size)
905 bloc_ptr new_bloc;
907 if (! r_alloc_initialized)
908 r_alloc_init ();
910 new_bloc = get_bloc (MEM_ROUNDUP (size));
911 if (new_bloc)
913 new_bloc->variable = ptr;
914 *ptr = new_bloc->data;
916 else
917 *ptr = 0;
919 return *ptr;
922 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
923 Store 0 in *PTR to show there's no block allocated. */
925 void
926 r_alloc_free (void **ptr)
928 bloc_ptr dead_bloc;
930 if (! r_alloc_initialized)
931 r_alloc_init ();
933 dead_bloc = find_bloc (ptr);
934 if (dead_bloc == NIL_BLOC)
935 emacs_abort (); /* Double free? PTR not originally used to allocate? */
937 free_bloc (dead_bloc);
938 *ptr = 0;
940 #ifdef emacs
941 refill_memory_reserve ();
942 #endif
945 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
946 Do this by shifting all blocks above this one up in memory, unless
947 SIZE is less than or equal to the current bloc size, in which case
948 do nothing.
950 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
951 memory copied to it. Not very efficient. We could traverse the
952 bloc_list for a best fit of free blocs first.
954 Change *PTR to reflect the new bloc, and return this value.
956 If more memory cannot be allocated, then leave *PTR unchanged, and
957 return zero. */
959 void *
960 r_re_alloc (void **ptr, size_t size)
962 bloc_ptr bloc;
964 if (! r_alloc_initialized)
965 r_alloc_init ();
967 if (!*ptr)
968 return r_alloc (ptr, size);
969 if (!size)
971 r_alloc_free (ptr);
972 return r_alloc (ptr, 0);
975 bloc = find_bloc (ptr);
976 if (bloc == NIL_BLOC)
977 emacs_abort (); /* Already freed? PTR not originally used to allocate? */
979 if (size < bloc->size)
981 /* Wouldn't it be useful to actually resize the bloc here? */
982 /* I think so too, but not if it's too expensive... */
983 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
984 && r_alloc_freeze_level == 0)
986 resize_bloc (bloc, MEM_ROUNDUP (size));
987 /* Never mind if this fails, just do nothing... */
988 /* It *should* be infallible! */
991 else if (size > bloc->size)
993 if (r_alloc_freeze_level)
995 bloc_ptr new_bloc;
996 new_bloc = get_bloc (MEM_ROUNDUP (size));
997 if (new_bloc)
999 new_bloc->variable = ptr;
1000 *ptr = new_bloc->data;
1001 bloc->variable = NULL;
1003 else
1004 return NULL;
1006 else
1008 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1009 return NULL;
1012 return *ptr;
1016 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1018 /* Reinitialize the morecore hook variables after restarting a dumped
1019 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1020 void
1021 r_alloc_reinit (void)
1023 /* Only do this if the hook has been reset, so that we don't get an
1024 infinite loop, in case Emacs was linked statically. */
1025 if (__morecore != r_alloc_sbrk)
1027 real_morecore = __morecore;
1028 __morecore = r_alloc_sbrk;
1032 #endif /* emacs && DOUG_LEA_MALLOC */
1034 #ifdef DEBUG
1036 #include <assert.h>
1038 void
1039 r_alloc_check (void)
1041 int found = 0;
1042 heap_ptr h, ph = 0;
1043 bloc_ptr b, pb = 0;
1045 if (!r_alloc_initialized)
1046 return;
1048 assert (first_heap);
1049 assert (last_heap->end <= (void *) sbrk (0));
1050 assert ((void *) first_heap < first_heap->start);
1051 assert (first_heap->start <= virtual_break_value);
1052 assert (virtual_break_value <= first_heap->end);
1054 for (h = first_heap; h; h = h->next)
1056 assert (h->prev == ph);
1057 assert ((void *) PAGE_ROUNDUP (h->end) == h->end);
1058 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1059 the heap start has any sort of alignment.
1060 Perhaps it should. */
1061 assert ((void *) MEM_ROUNDUP (h->start) == h->start);
1062 #endif
1063 assert ((void *) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1064 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1066 if (ph)
1068 assert (ph->end < h->start);
1069 assert (h->start <= (void *) h && (void *) (h + 1) <= h->bloc_start);
1072 if (h->bloc_start <= break_value && break_value <= h->end)
1073 found = 1;
1075 ph = h;
1078 assert (found);
1079 assert (last_heap == ph);
1081 for (b = first_bloc; b; b = b->next)
1083 assert (b->prev == pb);
1084 assert ((void *) MEM_ROUNDUP (b->data) == b->data);
1085 assert ((size_t) MEM_ROUNDUP (b->size) == b->size);
1087 ph = 0;
1088 for (h = first_heap; h; h = h->next)
1090 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1091 break;
1092 ph = h;
1095 assert (h);
1097 if (pb && pb->data + pb->size != b->data)
1099 assert (ph && b->data == h->bloc_start);
1100 while (ph)
1102 if (ph->bloc_start <= pb->data
1103 && pb->data + pb->size <= ph->end)
1105 assert (pb->data + pb->size + b->size > ph->end);
1106 break;
1108 else
1110 assert (ph->bloc_start + b->size > ph->end);
1112 ph = ph->prev;
1115 pb = b;
1118 assert (last_bloc == pb);
1120 if (last_bloc)
1121 assert (last_bloc->data + last_bloc->size == break_value);
1122 else
1123 assert (first_heap->bloc_start == break_value);
1126 #endif /* DEBUG */
1128 /* Update the internal record of which variable points to some data to NEW.
1129 Used by buffer-swap-text in Emacs to restore consistency after it
1130 swaps the buffer text between two buffer objects. The OLD pointer
1131 is checked to ensure that memory corruption does not occur due to
1132 misuse. */
1133 void
1134 r_alloc_reset_variable (void **old, void **new)
1136 bloc_ptr bloc = first_bloc;
1138 /* Find the bloc that corresponds to the data pointed to by pointer.
1139 find_bloc cannot be used, as it has internal consistency checks
1140 which fail when the variable needs resetting. */
1141 while (bloc != NIL_BLOC)
1143 if (bloc->data == *new)
1144 break;
1146 bloc = bloc->next;
1149 if (bloc == NIL_BLOC || bloc->variable != old)
1150 emacs_abort (); /* Already freed? OLD not originally used to allocate? */
1152 /* Update variable to point to the new location. */
1153 bloc->variable = new;
1156 void
1157 r_alloc_inhibit_buffer_relocation (int inhibit)
1159 if (use_relocatable_buffers > 1)
1160 use_relocatable_buffers = 1;
1161 if (inhibit)
1162 use_relocatable_buffers--;
1163 else if (use_relocatable_buffers < 1)
1164 use_relocatable_buffers++;
1168 /***********************************************************************
1169 Initialization
1170 ***********************************************************************/
1172 /* Initialize various things for memory allocation. */
1174 static void
1175 r_alloc_init (void)
1177 if (r_alloc_initialized)
1178 return;
1179 r_alloc_initialized = 1;
1181 page_size = PAGE;
1182 #if !defined SYSTEM_MALLOC && !defined HYBRID_MALLOC
1183 real_morecore = __morecore;
1184 __morecore = r_alloc_sbrk;
1186 first_heap = last_heap = &heap_base;
1187 first_heap->next = first_heap->prev = NIL_HEAP;
1188 first_heap->start = first_heap->bloc_start
1189 = virtual_break_value = break_value = real_morecore (0);
1190 if (break_value == NULL)
1191 emacs_abort ();
1193 extra_bytes = PAGE_ROUNDUP (50000);
1194 #endif
1196 #ifdef DOUG_LEA_MALLOC
1197 block_input ();
1198 mallopt (M_TOP_PAD, 64 * 4096);
1199 unblock_input ();
1200 #else
1201 #if !defined SYSTEM_MALLOC && !defined HYBRID_MALLOC
1202 /* Give GNU malloc's morecore some hysteresis so that we move all
1203 the relocatable blocks much less often. The number used to be
1204 64, but alloc.c would override that with 32 in code that was
1205 removed when SYNC_INPUT became the only input handling mode.
1206 That code was conditioned on !DOUG_LEA_MALLOC, so the call to
1207 mallopt above is left unchanged. (Actually, I think there's no
1208 system nowadays that uses DOUG_LEA_MALLOC and also uses
1209 REL_ALLOC.) */
1210 __malloc_extra_blocks = 32;
1211 #endif
1212 #endif
1214 #if !defined SYSTEM_MALLOC && !defined HYBRID_MALLOC
1215 first_heap->end = (void *) PAGE_ROUNDUP (first_heap->start);
1217 /* The extra call to real_morecore guarantees that the end of the
1218 address space is a multiple of page_size, even if page_size is
1219 not really the page size of the system running the binary in
1220 which page_size is stored. This allows a binary to be built on a
1221 system with one page size and run on a system with a smaller page
1222 size. */
1223 real_morecore ((char *) first_heap->end - (char *) first_heap->start);
1225 /* Clear the rest of the last page; this memory is in our address space
1226 even though it is after the sbrk value. */
1227 /* Doubly true, with the additional call that explicitly adds the
1228 rest of that page to the address space. */
1229 memset (first_heap->start, 0,
1230 (char *) first_heap->end - (char *) first_heap->start);
1231 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1232 #endif
1234 use_relocatable_buffers = 1;