* src/buffer.c (Frestore_buffer_modified_p): Fix bug#29846
[emacs.git] / src / ralloc.c
blob59a15e08ad89c79b6483bb5aa35613d310cd204f
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000-2017 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 3 of the License, or (at
9 your option) any later version.
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs. If not, see <https://www.gnu.org/licenses/>. */
19 /* NOTES:
21 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
22 rather than all of them. This means allowing for a possible
23 hole between the first bloc and the end of malloc storage. */
25 #include <config.h>
27 #include <stddef.h>
29 #ifdef emacs
30 # include "lisp.h"
31 # include "blockinput.h"
32 # include <unistd.h>
33 #endif
35 #include "getpagesize.h"
37 /* A flag to indicate whether we have initialized ralloc yet. For
38 Emacs's sake, please do not make this local to malloc_init; on some
39 machines, the dumping procedure makes all static variables
40 read-only. On these machines, the word static is #defined to be
41 the empty string, meaning that r_alloc_initialized becomes an
42 automatic variable, and loses its value each time Emacs is started
43 up. */
45 static int r_alloc_initialized = 0;
47 static void r_alloc_init (void);
50 /* Declarations for working with the malloc, ralloc, and system breaks. */
52 /* Function to set the real break value. */
53 void *(*real_morecore) (ptrdiff_t);
55 /* The break value, as seen by malloc. */
56 static void *virtual_break_value;
58 /* The address of the end of the last data in use by ralloc,
59 including relocatable blocs as well as malloc data. */
60 static void *break_value;
62 /* This is the size of a page. We round memory requests to this boundary. */
63 static int page_size;
65 /* Whenever we get memory from the system, get this many extra bytes. This
66 must be a multiple of page_size. */
67 static int extra_bytes;
69 /* Macros for rounding. Note that rounding to any value is possible
70 by changing the definition of PAGE. */
71 #define PAGE (getpagesize ())
72 #define PAGE_ROUNDUP(size) (((size_t) (size) + page_size - 1) \
73 & ~((size_t) (page_size - 1)))
75 #define MEM_ALIGN sizeof (double)
76 #define MEM_ROUNDUP(addr) (((size_t) (addr) + MEM_ALIGN - 1) \
77 & ~(MEM_ALIGN - 1))
79 /* The hook `malloc' uses for the function which gets more space
80 from the system. */
82 #ifdef HAVE_MALLOC_H
83 # include <malloc.h>
84 #endif
85 #ifndef DOUG_LEA_MALLOC
86 extern void *(*__morecore) (ptrdiff_t);
87 #endif
91 /***********************************************************************
92 Implementation using sbrk
93 ***********************************************************************/
95 /* Data structures of heaps and blocs. */
97 /* The relocatable objects, or blocs, and the malloc data
98 both reside within one or more heaps.
99 Each heap contains malloc data, running from `start' to `bloc_start',
100 and relocatable objects, running from `bloc_start' to `free'.
102 Relocatable objects may relocate within the same heap
103 or may move into another heap; the heaps themselves may grow
104 but they never move.
106 We try to make just one heap and make it larger as necessary.
107 But sometimes we can't do that, because we can't get contiguous
108 space to add onto the heap. When that happens, we start a new heap. */
110 typedef struct heap
112 struct heap *next;
113 struct heap *prev;
114 /* Start of memory range of this heap. */
115 void *start;
116 /* End of memory range of this heap. */
117 void *end;
118 /* Start of relocatable data in this heap. */
119 void *bloc_start;
120 /* Start of unused space in this heap. */
121 void *free;
122 /* First bloc in this heap. */
123 struct bp *first_bloc;
124 /* Last bloc in this heap. */
125 struct bp *last_bloc;
126 } *heap_ptr;
128 #define NIL_HEAP ((heap_ptr) 0)
130 /* This is the first heap object.
131 If we need additional heap objects, each one resides at the beginning of
132 the space it covers. */
133 static struct heap heap_base;
135 /* Head and tail of the list of heaps. */
136 static heap_ptr first_heap, last_heap;
138 /* These structures are allocated in the malloc arena.
139 The linked list is kept in order of increasing '.data' members.
140 The data blocks abut each other; if b->next is non-nil, then
141 b->data + b->size == b->next->data.
143 An element with variable==NULL denotes a freed block, which has not yet
144 been collected. They may only appear while r_alloc_freeze_level > 0,
145 and will be freed when the arena is thawed. Currently, these blocs are
146 not reusable, while the arena is frozen. Very inefficient. */
148 typedef struct bp
150 struct bp *next;
151 struct bp *prev;
152 void **variable;
153 void *data;
154 size_t size;
155 void *new_data; /* temporarily used for relocation */
156 struct heap *heap; /* Heap this bloc is in. */
157 } *bloc_ptr;
159 #define NIL_BLOC ((bloc_ptr) 0)
160 #define BLOC_PTR_SIZE (sizeof (struct bp))
162 /* Head and tail of the list of relocatable blocs. */
163 static bloc_ptr first_bloc, last_bloc;
165 static int use_relocatable_buffers;
167 /* If >0, no relocation whatsoever takes place. */
168 static int r_alloc_freeze_level;
171 /* Functions to get and return memory from the system. */
173 /* Find the heap that ADDRESS falls within. */
175 static heap_ptr
176 find_heap (void *address)
178 heap_ptr heap;
180 for (heap = last_heap; heap; heap = heap->prev)
182 if (heap->start <= address && address <= heap->end)
183 return heap;
186 return NIL_HEAP;
189 /* Find SIZE bytes of space in a heap.
190 Try to get them at ADDRESS (which must fall within some heap's range)
191 if we can get that many within one heap.
193 If enough space is not presently available in our reserve, this means
194 getting more page-aligned space from the system. If the returned space
195 is not contiguous to the last heap, allocate a new heap, and append it
196 to the heap list.
198 obtain does not try to keep track of whether space is in use or not
199 in use. It just returns the address of SIZE bytes that fall within a
200 single heap. If you call obtain twice in a row with the same arguments,
201 you typically get the same value. It's the caller's responsibility to
202 keep track of what space is in use.
204 Return the address of the space if all went well, or zero if we couldn't
205 allocate the memory. */
207 static void *
208 obtain (void *address, size_t size)
210 heap_ptr heap;
211 size_t already_available;
213 /* Find the heap that ADDRESS falls within. */
214 for (heap = last_heap; heap; heap = heap->prev)
216 if (heap->start <= address && address <= heap->end)
217 break;
220 if (! heap)
221 emacs_abort ();
223 /* If we can't fit SIZE bytes in that heap,
224 try successive later heaps. */
225 while (heap && (char *) address + size > (char *) heap->end)
227 heap = heap->next;
228 if (heap == NIL_HEAP)
229 break;
230 address = heap->bloc_start;
233 /* If we can't fit them within any existing heap,
234 get more space. */
235 if (heap == NIL_HEAP)
237 void *new = real_morecore (0);
238 size_t get;
240 already_available = (char *) last_heap->end - (char *) address;
242 if (new != last_heap->end)
244 /* Someone else called sbrk. Make a new heap. */
246 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
247 void *bloc_start = (void *) MEM_ROUNDUP ((void *) (new_heap + 1));
249 if (real_morecore ((char *) bloc_start - (char *) new) != new)
250 return 0;
252 new_heap->start = new;
253 new_heap->end = bloc_start;
254 new_heap->bloc_start = bloc_start;
255 new_heap->free = bloc_start;
256 new_heap->next = NIL_HEAP;
257 new_heap->prev = last_heap;
258 new_heap->first_bloc = NIL_BLOC;
259 new_heap->last_bloc = NIL_BLOC;
260 last_heap->next = new_heap;
261 last_heap = new_heap;
263 address = bloc_start;
264 already_available = 0;
267 /* Add space to the last heap (which we may have just created).
268 Get some extra, so we can come here less often. */
270 get = size + extra_bytes - already_available;
271 get = (char *) PAGE_ROUNDUP ((char *) last_heap->end + get)
272 - (char *) last_heap->end;
274 if (real_morecore (get) != last_heap->end)
275 return 0;
277 last_heap->end = (char *) last_heap->end + get;
280 return address;
283 /* Return unused heap space to the system
284 if there is a lot of unused space now.
285 This can make the last heap smaller;
286 it can also eliminate the last heap entirely. */
288 static void
289 relinquish (void)
291 register heap_ptr h;
292 ptrdiff_t excess = 0;
294 /* Add the amount of space beyond break_value
295 in all heaps which have extend beyond break_value at all. */
297 for (h = last_heap; h && break_value < h->end; h = h->prev)
299 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
300 ? h->bloc_start : break_value);
303 if (excess > extra_bytes * 2 && real_morecore (0) == last_heap->end)
305 /* Keep extra_bytes worth of empty space.
306 And don't free anything unless we can free at least extra_bytes. */
307 excess -= extra_bytes;
309 if ((char *) last_heap->end - (char *) last_heap->bloc_start <= excess)
311 heap_ptr lh_prev;
313 /* This heap should have no blocs in it. If it does, we
314 cannot return it to the system. */
315 if (last_heap->first_bloc != NIL_BLOC
316 || last_heap->last_bloc != NIL_BLOC)
317 return;
319 /* Return the last heap, with its header, to the system. */
320 excess = (char *) last_heap->end - (char *) last_heap->start;
321 lh_prev = last_heap->prev;
322 /* If the system doesn't want that much memory back, leave
323 last_heap unaltered to reflect that. This can occur if
324 break_value is still within the original data segment. */
325 if (real_morecore (- excess) != 0)
327 last_heap = lh_prev;
328 last_heap->next = NIL_HEAP;
331 else
333 excess = ((char *) last_heap->end
334 - (char *) PAGE_ROUNDUP ((char *) last_heap->end - excess));
335 /* If the system doesn't want that much memory back, leave
336 the end of the last heap unchanged to reflect that. This
337 can occur if break_value is still within the original
338 data segment. */
339 if (real_morecore (- excess) != 0)
340 last_heap->end = (char *) last_heap->end - excess;
345 /* The meat - allocating, freeing, and relocating blocs. */
347 /* Find the bloc referenced by the address in PTR. Returns a pointer
348 to that block. */
350 static bloc_ptr
351 find_bloc (void **ptr)
353 bloc_ptr p = first_bloc;
355 while (p != NIL_BLOC)
357 /* Consistency check. Don't return inconsistent blocs.
358 Don't abort here, as callers might be expecting this, but
359 callers that always expect a bloc to be returned should abort
360 if one isn't to avoid a memory corruption bug that is
361 difficult to track down. */
362 if (p->variable == ptr && p->data == *ptr)
363 return p;
365 p = p->next;
368 return p;
371 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
372 Returns a pointer to the new bloc, or zero if we couldn't allocate
373 memory for the new block. */
375 static bloc_ptr
376 get_bloc (size_t size)
378 bloc_ptr new_bloc;
379 heap_ptr heap;
381 if (! (new_bloc = malloc (BLOC_PTR_SIZE))
382 || ! (new_bloc->data = obtain (break_value, size)))
384 free (new_bloc);
386 return 0;
389 break_value = (char *) new_bloc->data + size;
391 new_bloc->size = size;
392 new_bloc->next = NIL_BLOC;
393 new_bloc->variable = NULL;
394 new_bloc->new_data = 0;
396 /* Record in the heap that this space is in use. */
397 heap = find_heap (new_bloc->data);
398 heap->free = break_value;
400 /* Maintain the correspondence between heaps and blocs. */
401 new_bloc->heap = heap;
402 heap->last_bloc = new_bloc;
403 if (heap->first_bloc == NIL_BLOC)
404 heap->first_bloc = new_bloc;
406 /* Put this bloc on the doubly-linked list of blocs. */
407 if (first_bloc)
409 new_bloc->prev = last_bloc;
410 last_bloc->next = new_bloc;
411 last_bloc = new_bloc;
413 else
415 first_bloc = last_bloc = new_bloc;
416 new_bloc->prev = NIL_BLOC;
419 return new_bloc;
422 /* Calculate new locations of blocs in the list beginning with BLOC,
423 relocating it to start at ADDRESS, in heap HEAP. If enough space is
424 not presently available in our reserve, call obtain for
425 more space.
427 Store the new location of each bloc in its new_data field.
428 Do not touch the contents of blocs or break_value. */
430 static int
431 relocate_blocs (bloc_ptr bloc, heap_ptr heap, void *address)
433 bloc_ptr b = bloc;
435 /* No need to ever call this if arena is frozen, bug somewhere! */
436 if (r_alloc_freeze_level)
437 emacs_abort ();
439 while (b)
441 /* If bloc B won't fit within HEAP,
442 move to the next heap and try again. */
443 while (heap && (char *) address + b->size > (char *) heap->end)
445 heap = heap->next;
446 if (heap == NIL_HEAP)
447 break;
448 address = heap->bloc_start;
451 /* If BLOC won't fit in any heap,
452 get enough new space to hold BLOC and all following blocs. */
453 if (heap == NIL_HEAP)
455 bloc_ptr tb = b;
456 size_t s = 0;
458 /* Add up the size of all the following blocs. */
459 while (tb != NIL_BLOC)
461 if (tb->variable)
462 s += tb->size;
464 tb = tb->next;
467 /* Get that space. */
468 address = obtain (address, s);
469 if (address == 0)
470 return 0;
472 heap = last_heap;
475 /* Record the new address of this bloc
476 and update where the next bloc can start. */
477 b->new_data = address;
478 if (b->variable)
479 address = (char *) address + b->size;
480 b = b->next;
483 return 1;
486 /* Update the records of which heaps contain which blocs, starting
487 with heap HEAP and bloc BLOC. */
489 static void
490 update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
492 register bloc_ptr b;
494 /* Initialize HEAP's status to reflect blocs before BLOC. */
495 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
497 /* The previous bloc is in HEAP. */
498 heap->last_bloc = bloc->prev;
499 heap->free = (char *) bloc->prev->data + bloc->prev->size;
501 else
503 /* HEAP contains no blocs before BLOC. */
504 heap->first_bloc = NIL_BLOC;
505 heap->last_bloc = NIL_BLOC;
506 heap->free = heap->bloc_start;
509 /* Advance through blocs one by one. */
510 for (b = bloc; b != NIL_BLOC; b = b->next)
512 /* Advance through heaps, marking them empty,
513 till we get to the one that B is in. */
514 while (heap)
516 if (heap->bloc_start <= b->data && b->data <= heap->end)
517 break;
518 heap = heap->next;
519 /* We know HEAP is not null now,
520 because there has to be space for bloc B. */
521 heap->first_bloc = NIL_BLOC;
522 heap->last_bloc = NIL_BLOC;
523 heap->free = heap->bloc_start;
526 /* Update HEAP's status for bloc B. */
527 heap->free = (char *) b->data + b->size;
528 heap->last_bloc = b;
529 if (heap->first_bloc == NIL_BLOC)
530 heap->first_bloc = b;
532 /* Record that B is in HEAP. */
533 b->heap = heap;
536 /* If there are any remaining heaps and no blocs left,
537 mark those heaps as empty. */
538 heap = heap->next;
539 while (heap)
541 heap->first_bloc = NIL_BLOC;
542 heap->last_bloc = NIL_BLOC;
543 heap->free = heap->bloc_start;
544 heap = heap->next;
548 /* Resize BLOC to SIZE bytes. This relocates the blocs
549 that come after BLOC in memory. */
551 static int
552 resize_bloc (bloc_ptr bloc, size_t size)
554 bloc_ptr b;
555 heap_ptr heap;
556 void *address;
557 size_t old_size;
559 /* No need to ever call this if arena is frozen, bug somewhere! */
560 if (r_alloc_freeze_level)
561 emacs_abort ();
563 if (bloc == NIL_BLOC || size == bloc->size)
564 return 1;
566 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
568 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
569 break;
572 if (heap == NIL_HEAP)
573 emacs_abort ();
575 old_size = bloc->size;
576 bloc->size = size;
578 /* Note that bloc could be moved into the previous heap. */
579 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
580 : (char *) first_heap->bloc_start);
581 while (heap)
583 if (heap->bloc_start <= address && address <= heap->end)
584 break;
585 heap = heap->prev;
588 if (! relocate_blocs (bloc, heap, address))
590 bloc->size = old_size;
591 return 0;
594 if (size > old_size)
596 for (b = last_bloc; b != bloc; b = b->prev)
598 if (!b->variable)
600 b->size = 0;
601 b->data = b->new_data;
603 else
605 if (b->new_data != b->data)
606 memmove (b->new_data, b->data, b->size);
607 *b->variable = b->data = b->new_data;
610 if (!bloc->variable)
612 bloc->size = 0;
613 bloc->data = bloc->new_data;
615 else
617 if (bloc->new_data != bloc->data)
618 memmove (bloc->new_data, bloc->data, old_size);
619 memset ((char *) bloc->new_data + old_size, 0, size - old_size);
620 *bloc->variable = bloc->data = bloc->new_data;
623 else
625 for (b = bloc; b != NIL_BLOC; b = b->next)
627 if (!b->variable)
629 b->size = 0;
630 b->data = b->new_data;
632 else
634 if (b->new_data != b->data)
635 memmove (b->new_data, b->data, b->size);
636 *b->variable = b->data = b->new_data;
641 update_heap_bloc_correspondence (bloc, heap);
643 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
644 : (char *) first_heap->bloc_start);
645 return 1;
648 /* Free BLOC from the chain of blocs, relocating any blocs above it.
649 This may return space to the system. */
651 static void
652 free_bloc (bloc_ptr bloc)
654 heap_ptr heap = bloc->heap;
655 heap_ptr h;
657 if (r_alloc_freeze_level)
659 bloc->variable = NULL;
660 return;
663 resize_bloc (bloc, 0);
665 if (bloc == first_bloc && bloc == last_bloc)
667 first_bloc = last_bloc = NIL_BLOC;
669 else if (bloc == last_bloc)
671 last_bloc = bloc->prev;
672 last_bloc->next = NIL_BLOC;
674 else if (bloc == first_bloc)
676 first_bloc = bloc->next;
677 first_bloc->prev = NIL_BLOC;
679 else
681 bloc->next->prev = bloc->prev;
682 bloc->prev->next = bloc->next;
685 /* Sometimes, 'heap' obtained from bloc->heap above is not really a
686 'heap' structure. It can even be beyond the current break point,
687 which will cause crashes when we dereference it below (see
688 bug#12242). Evidently, the reason is bloc allocations done while
689 use_relocatable_buffers was non-positive, because additional
690 memory we get then is not recorded in the heaps we manage. If
691 bloc->heap records such a "heap", we cannot (and don't need to)
692 update its records. So we validate the 'heap' value by making
693 sure it is one of the heaps we manage via the heaps linked list,
694 and don't touch a 'heap' that isn't found there. This avoids
695 accessing memory we know nothing about. */
696 for (h = first_heap; h != NIL_HEAP; h = h->next)
697 if (heap == h)
698 break;
700 if (h)
702 /* Update the records of which blocs are in HEAP. */
703 if (heap->first_bloc == bloc)
705 if (bloc->next != 0 && bloc->next->heap == heap)
706 heap->first_bloc = bloc->next;
707 else
708 heap->first_bloc = heap->last_bloc = NIL_BLOC;
710 if (heap->last_bloc == bloc)
712 if (bloc->prev != 0 && bloc->prev->heap == heap)
713 heap->last_bloc = bloc->prev;
714 else
715 heap->first_bloc = heap->last_bloc = NIL_BLOC;
719 relinquish ();
720 free (bloc);
723 /* Interface routines. */
725 /* Obtain SIZE bytes of storage from the free pool, or the system, as
726 necessary. If relocatable blocs are in use, this means relocating
727 them. This function gets plugged into the GNU malloc's __morecore
728 hook.
730 We provide hysteresis, never relocating by less than extra_bytes.
732 If we're out of memory, we should return zero, to imitate the other
733 __morecore hook values - in particular, __default_morecore in the
734 GNU malloc package. */
736 static void *
737 r_alloc_sbrk (ptrdiff_t size)
739 bloc_ptr b;
740 void *address;
742 if (! r_alloc_initialized)
743 r_alloc_init ();
745 if (use_relocatable_buffers <= 0)
746 return real_morecore (size);
748 if (size == 0)
749 return virtual_break_value;
751 if (size > 0)
753 /* Allocate a page-aligned space. GNU malloc would reclaim an
754 extra space if we passed an unaligned one. But we could
755 not always find a space which is contiguous to the previous. */
756 void *new_bloc_start;
757 heap_ptr h = first_heap;
758 size_t get = PAGE_ROUNDUP (size);
760 address = (void *) PAGE_ROUNDUP (virtual_break_value);
762 /* Search the list upward for a heap which is large enough. */
763 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *) address + get))
765 h = h->next;
766 if (h == NIL_HEAP)
767 break;
768 address = (void *) PAGE_ROUNDUP (h->start);
771 /* If not found, obtain more space. */
772 if (h == NIL_HEAP)
774 get += extra_bytes + page_size;
776 if (! obtain (address, get))
777 return 0;
779 if (first_heap == last_heap)
780 address = (void *) PAGE_ROUNDUP (virtual_break_value);
781 else
782 address = (void *) PAGE_ROUNDUP (last_heap->start);
783 h = last_heap;
786 new_bloc_start = (void *) MEM_ROUNDUP ((char *) address + get);
788 if (first_heap->bloc_start < new_bloc_start)
790 /* This is no clean solution - no idea how to do it better. */
791 if (r_alloc_freeze_level)
792 return NULL;
794 /* There is a bug here: if the above obtain call succeeded, but the
795 relocate_blocs call below does not succeed, we need to free
796 the memory that we got with obtain. */
798 /* Move all blocs upward. */
799 if (! relocate_blocs (first_bloc, h, new_bloc_start))
800 return 0;
802 /* Note that (char *) (h + 1) <= (char *) new_bloc_start since
803 get >= page_size, so the following does not destroy the heap
804 header. */
805 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
807 if (b->new_data != b->data)
808 memmove (b->new_data, b->data, b->size);
809 *b->variable = b->data = b->new_data;
812 h->bloc_start = new_bloc_start;
814 update_heap_bloc_correspondence (first_bloc, h);
816 if (h != first_heap)
818 /* Give up managing heaps below the one the new
819 virtual_break_value points to. */
820 first_heap->prev = NIL_HEAP;
821 first_heap->next = h->next;
822 first_heap->start = h->start;
823 first_heap->end = h->end;
824 first_heap->free = h->free;
825 first_heap->first_bloc = h->first_bloc;
826 first_heap->last_bloc = h->last_bloc;
827 first_heap->bloc_start = h->bloc_start;
829 if (first_heap->next)
830 first_heap->next->prev = first_heap;
831 else
832 last_heap = first_heap;
835 memset (address, 0, size);
837 else /* size < 0 */
839 size_t excess = ((char *) first_heap->bloc_start
840 - ((char *) virtual_break_value + size));
842 address = virtual_break_value;
844 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
846 excess -= extra_bytes;
847 first_heap->bloc_start
848 = (void *) MEM_ROUNDUP ((char *) first_heap->bloc_start - excess);
850 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
852 for (b = first_bloc; b != NIL_BLOC; b = b->next)
854 if (b->new_data != b->data)
855 memmove (b->new_data, b->data, b->size);
856 *b->variable = b->data = b->new_data;
860 if ((char *) virtual_break_value + size < (char *) first_heap->start)
862 /* We found an additional space below the first heap */
863 first_heap->start = (void *) ((char *) virtual_break_value + size);
867 virtual_break_value = (void *) ((char *) address + size);
868 break_value = (last_bloc
869 ? (char *) last_bloc->data + last_bloc->size
870 : (char *) first_heap->bloc_start);
871 if (size < 0)
872 relinquish ();
874 return address;
878 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
879 the data is returned in *PTR. PTR is thus the address of some variable
880 which will use the data area.
882 The allocation of 0 bytes is valid.
883 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
884 done before allocating a new area. Not yet done.
886 If we can't allocate the necessary memory, set *PTR to zero, and
887 return zero. */
889 void *
890 r_alloc (void **ptr, size_t size)
892 bloc_ptr new_bloc;
894 if (! r_alloc_initialized)
895 r_alloc_init ();
897 new_bloc = get_bloc (MEM_ROUNDUP (size));
898 if (new_bloc)
900 new_bloc->variable = ptr;
901 *ptr = new_bloc->data;
903 else
904 *ptr = 0;
906 return *ptr;
909 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
910 Store 0 in *PTR to show there's no block allocated. */
912 void
913 r_alloc_free (void **ptr)
915 bloc_ptr dead_bloc;
917 if (! r_alloc_initialized)
918 r_alloc_init ();
920 dead_bloc = find_bloc (ptr);
921 if (dead_bloc == NIL_BLOC)
922 emacs_abort (); /* Double free? PTR not originally used to allocate? */
924 free_bloc (dead_bloc);
925 *ptr = 0;
927 #ifdef emacs
928 refill_memory_reserve ();
929 #endif
932 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
933 Do this by shifting all blocks above this one up in memory, unless
934 SIZE is less than or equal to the current bloc size, in which case
935 do nothing.
937 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
938 memory copied to it. Not very efficient. We could traverse the
939 bloc_list for a best fit of free blocs first.
941 Change *PTR to reflect the new bloc, and return this value.
943 If more memory cannot be allocated, then leave *PTR unchanged, and
944 return zero. */
946 void *
947 r_re_alloc (void **ptr, size_t size)
949 bloc_ptr bloc;
951 if (! r_alloc_initialized)
952 r_alloc_init ();
954 if (!*ptr)
955 return r_alloc (ptr, size);
956 if (!size)
958 r_alloc_free (ptr);
959 return r_alloc (ptr, 0);
962 bloc = find_bloc (ptr);
963 if (bloc == NIL_BLOC)
964 emacs_abort (); /* Already freed? PTR not originally used to allocate? */
966 if (size < bloc->size)
968 /* Wouldn't it be useful to actually resize the bloc here? */
969 /* I think so too, but not if it's too expensive... */
970 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
971 && r_alloc_freeze_level == 0)
973 resize_bloc (bloc, MEM_ROUNDUP (size));
974 /* Never mind if this fails, just do nothing... */
975 /* It *should* be infallible! */
978 else if (size > bloc->size)
980 if (r_alloc_freeze_level)
982 bloc_ptr new_bloc;
983 new_bloc = get_bloc (MEM_ROUNDUP (size));
984 if (new_bloc)
986 new_bloc->variable = ptr;
987 *ptr = new_bloc->data;
988 bloc->variable = NULL;
990 else
991 return NULL;
993 else
995 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
996 return NULL;
999 return *ptr;
1003 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1005 /* Reinitialize the morecore hook variables after restarting a dumped
1006 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1007 void
1008 r_alloc_reinit (void)
1010 /* Only do this if the hook has been reset, so that we don't get an
1011 infinite loop, in case Emacs was linked statically. */
1012 if (__morecore != r_alloc_sbrk)
1014 real_morecore = __morecore;
1015 __morecore = r_alloc_sbrk;
1019 #endif /* emacs && DOUG_LEA_MALLOC */
1021 #ifdef DEBUG
1023 #include <assert.h>
1025 void
1026 r_alloc_check (void)
1028 int found = 0;
1029 heap_ptr h, ph = 0;
1030 bloc_ptr b, pb = 0;
1032 if (!r_alloc_initialized)
1033 return;
1035 assert (first_heap);
1036 assert (last_heap->end <= (void *) sbrk (0));
1037 assert ((void *) first_heap < first_heap->start);
1038 assert (first_heap->start <= virtual_break_value);
1039 assert (virtual_break_value <= first_heap->end);
1041 for (h = first_heap; h; h = h->next)
1043 assert (h->prev == ph);
1044 assert ((void *) PAGE_ROUNDUP (h->end) == h->end);
1045 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1046 the heap start has any sort of alignment.
1047 Perhaps it should. */
1048 assert ((void *) MEM_ROUNDUP (h->start) == h->start);
1049 #endif
1050 assert ((void *) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1051 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1053 if (ph)
1055 assert (ph->end < h->start);
1056 assert (h->start <= (void *) h && (void *) (h + 1) <= h->bloc_start);
1059 if (h->bloc_start <= break_value && break_value <= h->end)
1060 found = 1;
1062 ph = h;
1065 assert (found);
1066 assert (last_heap == ph);
1068 for (b = first_bloc; b; b = b->next)
1070 assert (b->prev == pb);
1071 assert ((void *) MEM_ROUNDUP (b->data) == b->data);
1072 assert ((size_t) MEM_ROUNDUP (b->size) == b->size);
1074 ph = 0;
1075 for (h = first_heap; h; h = h->next)
1077 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1078 break;
1079 ph = h;
1082 assert (h);
1084 if (pb && pb->data + pb->size != b->data)
1086 assert (ph && b->data == h->bloc_start);
1087 while (ph)
1089 if (ph->bloc_start <= pb->data
1090 && pb->data + pb->size <= ph->end)
1092 assert (pb->data + pb->size + b->size > ph->end);
1093 break;
1095 else
1097 assert (ph->bloc_start + b->size > ph->end);
1099 ph = ph->prev;
1102 pb = b;
1105 assert (last_bloc == pb);
1107 if (last_bloc)
1108 assert (last_bloc->data + last_bloc->size == break_value);
1109 else
1110 assert (first_heap->bloc_start == break_value);
1113 #endif /* DEBUG */
1115 /* Update the internal record of which variable points to some data to NEW.
1116 Used by buffer-swap-text in Emacs to restore consistency after it
1117 swaps the buffer text between two buffer objects. The OLD pointer
1118 is checked to ensure that memory corruption does not occur due to
1119 misuse. */
1120 void
1121 r_alloc_reset_variable (void **old, void **new)
1123 bloc_ptr bloc = first_bloc;
1125 /* Find the bloc that corresponds to the data pointed to by pointer.
1126 find_bloc cannot be used, as it has internal consistency checks
1127 which fail when the variable needs resetting. */
1128 while (bloc != NIL_BLOC)
1130 if (bloc->data == *new)
1131 break;
1133 bloc = bloc->next;
1136 if (bloc == NIL_BLOC || bloc->variable != old)
1137 emacs_abort (); /* Already freed? OLD not originally used to allocate? */
1139 /* Update variable to point to the new location. */
1140 bloc->variable = new;
1143 void
1144 r_alloc_inhibit_buffer_relocation (int inhibit)
1146 if (use_relocatable_buffers > 1)
1147 use_relocatable_buffers = 1;
1148 if (inhibit)
1149 use_relocatable_buffers--;
1150 else if (use_relocatable_buffers < 1)
1151 use_relocatable_buffers++;
1155 /***********************************************************************
1156 Initialization
1157 ***********************************************************************/
1159 /* Initialize various things for memory allocation. */
1161 static void
1162 r_alloc_init (void)
1164 if (r_alloc_initialized)
1165 return;
1166 r_alloc_initialized = 1;
1168 page_size = PAGE;
1169 #if !defined SYSTEM_MALLOC && !defined HYBRID_MALLOC
1170 real_morecore = __morecore;
1171 __morecore = r_alloc_sbrk;
1173 first_heap = last_heap = &heap_base;
1174 first_heap->next = first_heap->prev = NIL_HEAP;
1175 first_heap->start = first_heap->bloc_start
1176 = virtual_break_value = break_value = real_morecore (0);
1177 if (break_value == NULL)
1178 emacs_abort ();
1180 extra_bytes = PAGE_ROUNDUP (50000);
1181 #endif
1183 #ifdef DOUG_LEA_MALLOC
1184 block_input ();
1185 mallopt (M_TOP_PAD, 64 * 4096);
1186 unblock_input ();
1187 #else
1188 #if !defined SYSTEM_MALLOC && !defined HYBRID_MALLOC
1189 /* Give GNU malloc's morecore some hysteresis so that we move all
1190 the relocatable blocks much less often. The number used to be
1191 64, but alloc.c would override that with 32 in code that was
1192 removed when SYNC_INPUT became the only input handling mode.
1193 That code was conditioned on !DOUG_LEA_MALLOC, so the call to
1194 mallopt above is left unchanged. (Actually, I think there's no
1195 system nowadays that uses DOUG_LEA_MALLOC and also uses
1196 REL_ALLOC.) */
1197 __malloc_extra_blocks = 32;
1198 #endif
1199 #endif
1201 #if !defined SYSTEM_MALLOC && !defined HYBRID_MALLOC
1202 first_heap->end = (void *) PAGE_ROUNDUP (first_heap->start);
1204 /* The extra call to real_morecore guarantees that the end of the
1205 address space is a multiple of page_size, even if page_size is
1206 not really the page size of the system running the binary in
1207 which page_size is stored. This allows a binary to be built on a
1208 system with one page size and run on a system with a smaller page
1209 size. */
1210 real_morecore ((char *) first_heap->end - (char *) first_heap->start);
1212 /* Clear the rest of the last page; this memory is in our address space
1213 even though it is after the sbrk value. */
1214 /* Doubly true, with the additional call that explicitly adds the
1215 rest of that page to the address space. */
1216 memset (first_heap->start, 0,
1217 (char *) first_heap->end - (char *) first_heap->start);
1218 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1219 #endif
1221 use_relocatable_buffers = 1;