(frame.o, keyboard.o, xdisp.o, xfaces.o): Depend on macgui.h.
[emacs.git] / src / ralloc.c
blobcd0df22e1a45731e29cbc16cc94584c6192d9c38
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of GNU Emacs.
7 GNU Emacs is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GNU Emacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU Emacs; see the file COPYING. If not, write to
19 the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
22 /* NOTES:
24 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
25 rather than all of them. This means allowing for a possible
26 hole between the first bloc and the end of malloc storage. */
28 #ifdef emacs
30 #include <config.h>
31 #include "lisp.h" /* Needed for VALBITS. */
32 #include "blockinput.h"
34 #ifdef HAVE_UNISTD_H
35 #include <unistd.h>
36 #endif
38 typedef POINTER_TYPE *POINTER;
39 typedef size_t SIZE;
41 /* Declared in dispnew.c, this version doesn't screw up if regions
42 overlap. */
44 extern void safe_bcopy ();
46 #ifdef DOUG_LEA_MALLOC
47 #define M_TOP_PAD -2
48 extern int mallopt ();
49 #else /* not DOUG_LEA_MALLOC */
50 #ifndef SYSTEM_MALLOC
51 extern size_t __malloc_extra_blocks;
52 #endif /* SYSTEM_MALLOC */
53 #endif /* not DOUG_LEA_MALLOC */
55 #else /* not emacs */
57 #include <stddef.h>
59 typedef size_t SIZE;
60 typedef void *POINTER;
62 #include <unistd.h>
63 #include <malloc.h>
65 #define safe_bcopy(x, y, z) memmove (y, x, z)
66 #define bzero(x, len) memset (x, 0, len)
68 #endif /* not emacs */
71 #include "getpagesize.h"
73 #define NIL ((POINTER) 0)
75 /* A flag to indicate whether we have initialized ralloc yet. For
76 Emacs's sake, please do not make this local to malloc_init; on some
77 machines, the dumping procedure makes all static variables
78 read-only. On these machines, the word static is #defined to be
79 the empty string, meaning that r_alloc_initialized becomes an
80 automatic variable, and loses its value each time Emacs is started
81 up. */
83 static int r_alloc_initialized = 0;
85 static void r_alloc_init ();
88 /* Declarations for working with the malloc, ralloc, and system breaks. */
90 /* Function to set the real break value. */
91 POINTER (*real_morecore) ();
93 /* The break value, as seen by malloc. */
94 static POINTER virtual_break_value;
96 /* The address of the end of the last data in use by ralloc,
97 including relocatable blocs as well as malloc data. */
98 static POINTER break_value;
100 /* This is the size of a page. We round memory requests to this boundary. */
101 static int page_size;
103 /* Whenever we get memory from the system, get this many extra bytes. This
104 must be a multiple of page_size. */
105 static int extra_bytes;
107 /* Macros for rounding. Note that rounding to any value is possible
108 by changing the definition of PAGE. */
109 #define PAGE (getpagesize ())
110 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
111 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
112 & ~(page_size - 1))
113 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
115 #define MEM_ALIGN sizeof(double)
116 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
117 & ~(MEM_ALIGN - 1))
119 /* The hook `malloc' uses for the function which gets more space
120 from the system. */
122 #ifndef SYSTEM_MALLOC
123 extern POINTER (*__morecore) ();
124 #endif
128 /***********************************************************************
129 Implementation using sbrk
130 ***********************************************************************/
132 /* Data structures of heaps and blocs. */
134 /* The relocatable objects, or blocs, and the malloc data
135 both reside within one or more heaps.
136 Each heap contains malloc data, running from `start' to `bloc_start',
137 and relocatable objects, running from `bloc_start' to `free'.
139 Relocatable objects may relocate within the same heap
140 or may move into another heap; the heaps themselves may grow
141 but they never move.
143 We try to make just one heap and make it larger as necessary.
144 But sometimes we can't do that, because we can't get contiguous
145 space to add onto the heap. When that happens, we start a new heap. */
147 typedef struct heap
149 struct heap *next;
150 struct heap *prev;
151 /* Start of memory range of this heap. */
152 POINTER start;
153 /* End of memory range of this heap. */
154 POINTER end;
155 /* Start of relocatable data in this heap. */
156 POINTER bloc_start;
157 /* Start of unused space in this heap. */
158 POINTER free;
159 /* First bloc in this heap. */
160 struct bp *first_bloc;
161 /* Last bloc in this heap. */
162 struct bp *last_bloc;
163 } *heap_ptr;
165 #define NIL_HEAP ((heap_ptr) 0)
166 #define HEAP_PTR_SIZE (sizeof (struct heap))
168 /* This is the first heap object.
169 If we need additional heap objects, each one resides at the beginning of
170 the space it covers. */
171 static struct heap heap_base;
173 /* Head and tail of the list of heaps. */
174 static heap_ptr first_heap, last_heap;
176 /* These structures are allocated in the malloc arena.
177 The linked list is kept in order of increasing '.data' members.
178 The data blocks abut each other; if b->next is non-nil, then
179 b->data + b->size == b->next->data.
181 An element with variable==NIL denotes a freed block, which has not yet
182 been collected. They may only appear while r_alloc_freeze > 0, and will be
183 freed when the arena is thawed. Currently, these blocs are not reusable,
184 while the arena is frozen. Very inefficient. */
186 typedef struct bp
188 struct bp *next;
189 struct bp *prev;
190 POINTER *variable;
191 POINTER data;
192 SIZE size;
193 POINTER new_data; /* temporarily used for relocation */
194 struct heap *heap; /* Heap this bloc is in. */
195 } *bloc_ptr;
197 #define NIL_BLOC ((bloc_ptr) 0)
198 #define BLOC_PTR_SIZE (sizeof (struct bp))
200 /* Head and tail of the list of relocatable blocs. */
201 static bloc_ptr first_bloc, last_bloc;
203 static int use_relocatable_buffers;
205 /* If >0, no relocation whatsoever takes place. */
206 static int r_alloc_freeze_level;
209 /* Functions to get and return memory from the system. */
211 /* Find the heap that ADDRESS falls within. */
213 static heap_ptr
214 find_heap (address)
215 POINTER address;
217 heap_ptr heap;
219 for (heap = last_heap; heap; heap = heap->prev)
221 if (heap->start <= address && address <= heap->end)
222 return heap;
225 return NIL_HEAP;
228 /* Find SIZE bytes of space in a heap.
229 Try to get them at ADDRESS (which must fall within some heap's range)
230 if we can get that many within one heap.
232 If enough space is not presently available in our reserve, this means
233 getting more page-aligned space from the system. If the returned space
234 is not contiguous to the last heap, allocate a new heap, and append it
236 obtain does not try to keep track of whether space is in use
237 or not in use. It just returns the address of SIZE bytes that
238 fall within a single heap. If you call obtain twice in a row
239 with the same arguments, you typically get the same value.
240 to the heap list. It's the caller's responsibility to keep
241 track of what space is in use.
243 Return the address of the space if all went well, or zero if we couldn't
244 allocate the memory. */
246 static POINTER
247 obtain (address, size)
248 POINTER address;
249 SIZE size;
251 heap_ptr heap;
252 SIZE already_available;
254 /* Find the heap that ADDRESS falls within. */
255 for (heap = last_heap; heap; heap = heap->prev)
257 if (heap->start <= address && address <= heap->end)
258 break;
261 if (! heap)
262 abort ();
264 /* If we can't fit SIZE bytes in that heap,
265 try successive later heaps. */
266 while (heap && (char *) address + size > (char *) heap->end)
268 heap = heap->next;
269 if (heap == NIL_HEAP)
270 break;
271 address = heap->bloc_start;
274 /* If we can't fit them within any existing heap,
275 get more space. */
276 if (heap == NIL_HEAP)
278 POINTER new = (*real_morecore)(0);
279 SIZE get;
281 already_available = (char *)last_heap->end - (char *)address;
283 if (new != last_heap->end)
285 /* Someone else called sbrk. Make a new heap. */
287 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
288 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
290 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
291 return 0;
293 new_heap->start = new;
294 new_heap->end = bloc_start;
295 new_heap->bloc_start = bloc_start;
296 new_heap->free = bloc_start;
297 new_heap->next = NIL_HEAP;
298 new_heap->prev = last_heap;
299 new_heap->first_bloc = NIL_BLOC;
300 new_heap->last_bloc = NIL_BLOC;
301 last_heap->next = new_heap;
302 last_heap = new_heap;
304 address = bloc_start;
305 already_available = 0;
308 /* Add space to the last heap (which we may have just created).
309 Get some extra, so we can come here less often. */
311 get = size + extra_bytes - already_available;
312 get = (char *) ROUNDUP ((char *)last_heap->end + get)
313 - (char *) last_heap->end;
315 if ((*real_morecore) (get) != last_heap->end)
316 return 0;
318 last_heap->end = (char *) last_heap->end + get;
321 return address;
324 /* Return unused heap space to the system
325 if there is a lot of unused space now.
326 This can make the last heap smaller;
327 it can also eliminate the last heap entirely. */
329 static void
330 relinquish ()
332 register heap_ptr h;
333 long excess = 0;
335 /* Add the amount of space beyond break_value
336 in all heaps which have extend beyond break_value at all. */
338 for (h = last_heap; h && break_value < h->end; h = h->prev)
340 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
341 ? h->bloc_start : break_value);
344 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
346 /* Keep extra_bytes worth of empty space.
347 And don't free anything unless we can free at least extra_bytes. */
348 excess -= extra_bytes;
350 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
352 /* This heap should have no blocs in it. */
353 if (last_heap->first_bloc != NIL_BLOC
354 || last_heap->last_bloc != NIL_BLOC)
355 abort ();
357 /* Return the last heap, with its header, to the system. */
358 excess = (char *)last_heap->end - (char *)last_heap->start;
359 last_heap = last_heap->prev;
360 last_heap->next = NIL_HEAP;
362 else
364 excess = (char *) last_heap->end
365 - (char *) ROUNDUP ((char *)last_heap->end - excess);
366 last_heap->end = (char *) last_heap->end - excess;
369 if ((*real_morecore) (- excess) == 0)
371 /* If the system didn't want that much memory back, adjust
372 the end of the last heap to reflect that. This can occur
373 if break_value is still within the original data segment. */
374 last_heap->end = (char *) last_heap->end + excess;
375 /* Make sure that the result of the adjustment is accurate.
376 It should be, for the else clause above; the other case,
377 which returns the entire last heap to the system, seems
378 unlikely to trigger this mode of failure. */
379 if (last_heap->end != (*real_morecore) (0))
380 abort ();
385 /* Return the total size in use by relocating allocator,
386 above where malloc gets space. */
388 long
389 r_alloc_size_in_use ()
391 return (char *) break_value - (char *) virtual_break_value;
394 /* The meat - allocating, freeing, and relocating blocs. */
396 /* Find the bloc referenced by the address in PTR. Returns a pointer
397 to that block. */
399 static bloc_ptr
400 find_bloc (ptr)
401 POINTER *ptr;
403 register bloc_ptr p = first_bloc;
405 while (p != NIL_BLOC)
407 if (p->variable == ptr && p->data == *ptr)
408 return p;
410 p = p->next;
413 return p;
416 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
417 Returns a pointer to the new bloc, or zero if we couldn't allocate
418 memory for the new block. */
420 static bloc_ptr
421 get_bloc (size)
422 SIZE size;
424 register bloc_ptr new_bloc;
425 register heap_ptr heap;
427 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
428 || ! (new_bloc->data = obtain (break_value, size)))
430 if (new_bloc)
431 free (new_bloc);
433 return 0;
436 break_value = (char *) new_bloc->data + size;
438 new_bloc->size = size;
439 new_bloc->next = NIL_BLOC;
440 new_bloc->variable = (POINTER *) NIL;
441 new_bloc->new_data = 0;
443 /* Record in the heap that this space is in use. */
444 heap = find_heap (new_bloc->data);
445 heap->free = break_value;
447 /* Maintain the correspondence between heaps and blocs. */
448 new_bloc->heap = heap;
449 heap->last_bloc = new_bloc;
450 if (heap->first_bloc == NIL_BLOC)
451 heap->first_bloc = new_bloc;
453 /* Put this bloc on the doubly-linked list of blocs. */
454 if (first_bloc)
456 new_bloc->prev = last_bloc;
457 last_bloc->next = new_bloc;
458 last_bloc = new_bloc;
460 else
462 first_bloc = last_bloc = new_bloc;
463 new_bloc->prev = NIL_BLOC;
466 return new_bloc;
469 /* Calculate new locations of blocs in the list beginning with BLOC,
470 relocating it to start at ADDRESS, in heap HEAP. If enough space is
471 not presently available in our reserve, call obtain for
472 more space.
474 Store the new location of each bloc in its new_data field.
475 Do not touch the contents of blocs or break_value. */
477 static int
478 relocate_blocs (bloc, heap, address)
479 bloc_ptr bloc;
480 heap_ptr heap;
481 POINTER address;
483 register bloc_ptr b = bloc;
485 /* No need to ever call this if arena is frozen, bug somewhere! */
486 if (r_alloc_freeze_level)
487 abort();
489 while (b)
491 /* If bloc B won't fit within HEAP,
492 move to the next heap and try again. */
493 while (heap && (char *) address + b->size > (char *) heap->end)
495 heap = heap->next;
496 if (heap == NIL_HEAP)
497 break;
498 address = heap->bloc_start;
501 /* If BLOC won't fit in any heap,
502 get enough new space to hold BLOC and all following blocs. */
503 if (heap == NIL_HEAP)
505 register bloc_ptr tb = b;
506 register SIZE s = 0;
508 /* Add up the size of all the following blocs. */
509 while (tb != NIL_BLOC)
511 if (tb->variable)
512 s += tb->size;
514 tb = tb->next;
517 /* Get that space. */
518 address = obtain (address, s);
519 if (address == 0)
520 return 0;
522 heap = last_heap;
525 /* Record the new address of this bloc
526 and update where the next bloc can start. */
527 b->new_data = address;
528 if (b->variable)
529 address = (char *) address + b->size;
530 b = b->next;
533 return 1;
536 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
537 This is necessary if we put the memory of space of BLOC
538 before that of BEFORE. */
540 static void
541 reorder_bloc (bloc, before)
542 bloc_ptr bloc, before;
544 bloc_ptr prev, next;
546 /* Splice BLOC out from where it is. */
547 prev = bloc->prev;
548 next = bloc->next;
550 if (prev)
551 prev->next = next;
552 if (next)
553 next->prev = prev;
555 /* Splice it in before BEFORE. */
556 prev = before->prev;
558 if (prev)
559 prev->next = bloc;
560 bloc->prev = prev;
562 before->prev = bloc;
563 bloc->next = before;
566 /* Update the records of which heaps contain which blocs, starting
567 with heap HEAP and bloc BLOC. */
569 static void
570 update_heap_bloc_correspondence (bloc, heap)
571 bloc_ptr bloc;
572 heap_ptr heap;
574 register bloc_ptr b;
576 /* Initialize HEAP's status to reflect blocs before BLOC. */
577 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
579 /* The previous bloc is in HEAP. */
580 heap->last_bloc = bloc->prev;
581 heap->free = (char *) bloc->prev->data + bloc->prev->size;
583 else
585 /* HEAP contains no blocs before BLOC. */
586 heap->first_bloc = NIL_BLOC;
587 heap->last_bloc = NIL_BLOC;
588 heap->free = heap->bloc_start;
591 /* Advance through blocs one by one. */
592 for (b = bloc; b != NIL_BLOC; b = b->next)
594 /* Advance through heaps, marking them empty,
595 till we get to the one that B is in. */
596 while (heap)
598 if (heap->bloc_start <= b->data && b->data <= heap->end)
599 break;
600 heap = heap->next;
601 /* We know HEAP is not null now,
602 because there has to be space for bloc B. */
603 heap->first_bloc = NIL_BLOC;
604 heap->last_bloc = NIL_BLOC;
605 heap->free = heap->bloc_start;
608 /* Update HEAP's status for bloc B. */
609 heap->free = (char *) b->data + b->size;
610 heap->last_bloc = b;
611 if (heap->first_bloc == NIL_BLOC)
612 heap->first_bloc = b;
614 /* Record that B is in HEAP. */
615 b->heap = heap;
618 /* If there are any remaining heaps and no blocs left,
619 mark those heaps as empty. */
620 heap = heap->next;
621 while (heap)
623 heap->first_bloc = NIL_BLOC;
624 heap->last_bloc = NIL_BLOC;
625 heap->free = heap->bloc_start;
626 heap = heap->next;
630 /* Resize BLOC to SIZE bytes. This relocates the blocs
631 that come after BLOC in memory. */
633 static int
634 resize_bloc (bloc, size)
635 bloc_ptr bloc;
636 SIZE size;
638 register bloc_ptr b;
639 heap_ptr heap;
640 POINTER address;
641 SIZE old_size;
643 /* No need to ever call this if arena is frozen, bug somewhere! */
644 if (r_alloc_freeze_level)
645 abort();
647 if (bloc == NIL_BLOC || size == bloc->size)
648 return 1;
650 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
652 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
653 break;
656 if (heap == NIL_HEAP)
657 abort ();
659 old_size = bloc->size;
660 bloc->size = size;
662 /* Note that bloc could be moved into the previous heap. */
663 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
664 : (char *) first_heap->bloc_start);
665 while (heap)
667 if (heap->bloc_start <= address && address <= heap->end)
668 break;
669 heap = heap->prev;
672 if (! relocate_blocs (bloc, heap, address))
674 bloc->size = old_size;
675 return 0;
678 if (size > old_size)
680 for (b = last_bloc; b != bloc; b = b->prev)
682 if (!b->variable)
684 b->size = 0;
685 b->data = b->new_data;
687 else
689 safe_bcopy (b->data, b->new_data, b->size);
690 *b->variable = b->data = b->new_data;
693 if (!bloc->variable)
695 bloc->size = 0;
696 bloc->data = bloc->new_data;
698 else
700 safe_bcopy (bloc->data, bloc->new_data, old_size);
701 bzero ((char *) bloc->new_data + old_size, size - old_size);
702 *bloc->variable = bloc->data = bloc->new_data;
705 else
707 for (b = bloc; b != NIL_BLOC; b = b->next)
709 if (!b->variable)
711 b->size = 0;
712 b->data = b->new_data;
714 else
716 safe_bcopy (b->data, b->new_data, b->size);
717 *b->variable = b->data = b->new_data;
722 update_heap_bloc_correspondence (bloc, heap);
724 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
725 : (char *) first_heap->bloc_start);
726 return 1;
729 /* Free BLOC from the chain of blocs, relocating any blocs above it.
730 This may return space to the system. */
732 static void
733 free_bloc (bloc)
734 bloc_ptr bloc;
736 heap_ptr heap = bloc->heap;
738 if (r_alloc_freeze_level)
740 bloc->variable = (POINTER *) NIL;
741 return;
744 resize_bloc (bloc, 0);
746 if (bloc == first_bloc && bloc == last_bloc)
748 first_bloc = last_bloc = NIL_BLOC;
750 else if (bloc == last_bloc)
752 last_bloc = bloc->prev;
753 last_bloc->next = NIL_BLOC;
755 else if (bloc == first_bloc)
757 first_bloc = bloc->next;
758 first_bloc->prev = NIL_BLOC;
760 else
762 bloc->next->prev = bloc->prev;
763 bloc->prev->next = bloc->next;
766 /* Update the records of which blocs are in HEAP. */
767 if (heap->first_bloc == bloc)
769 if (bloc->next != 0 && bloc->next->heap == heap)
770 heap->first_bloc = bloc->next;
771 else
772 heap->first_bloc = heap->last_bloc = NIL_BLOC;
774 if (heap->last_bloc == bloc)
776 if (bloc->prev != 0 && bloc->prev->heap == heap)
777 heap->last_bloc = bloc->prev;
778 else
779 heap->first_bloc = heap->last_bloc = NIL_BLOC;
782 relinquish ();
783 free (bloc);
786 /* Interface routines. */
788 /* Obtain SIZE bytes of storage from the free pool, or the system, as
789 necessary. If relocatable blocs are in use, this means relocating
790 them. This function gets plugged into the GNU malloc's __morecore
791 hook.
793 We provide hysteresis, never relocating by less than extra_bytes.
795 If we're out of memory, we should return zero, to imitate the other
796 __morecore hook values - in particular, __default_morecore in the
797 GNU malloc package. */
799 POINTER
800 r_alloc_sbrk (size)
801 long size;
803 register bloc_ptr b;
804 POINTER address;
806 if (! r_alloc_initialized)
807 r_alloc_init ();
809 if (! use_relocatable_buffers)
810 return (*real_morecore) (size);
812 if (size == 0)
813 return virtual_break_value;
815 if (size > 0)
817 /* Allocate a page-aligned space. GNU malloc would reclaim an
818 extra space if we passed an unaligned one. But we could
819 not always find a space which is contiguous to the previous. */
820 POINTER new_bloc_start;
821 heap_ptr h = first_heap;
822 SIZE get = ROUNDUP (size);
824 address = (POINTER) ROUNDUP (virtual_break_value);
826 /* Search the list upward for a heap which is large enough. */
827 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
829 h = h->next;
830 if (h == NIL_HEAP)
831 break;
832 address = (POINTER) ROUNDUP (h->start);
835 /* If not found, obtain more space. */
836 if (h == NIL_HEAP)
838 get += extra_bytes + page_size;
840 if (! obtain (address, get))
841 return 0;
843 if (first_heap == last_heap)
844 address = (POINTER) ROUNDUP (virtual_break_value);
845 else
846 address = (POINTER) ROUNDUP (last_heap->start);
847 h = last_heap;
850 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
852 if (first_heap->bloc_start < new_bloc_start)
854 /* This is no clean solution - no idea how to do it better. */
855 if (r_alloc_freeze_level)
856 return NIL;
858 /* There is a bug here: if the above obtain call succeeded, but the
859 relocate_blocs call below does not succeed, we need to free
860 the memory that we got with obtain. */
862 /* Move all blocs upward. */
863 if (! relocate_blocs (first_bloc, h, new_bloc_start))
864 return 0;
866 /* Note that (POINTER)(h+1) <= new_bloc_start since
867 get >= page_size, so the following does not destroy the heap
868 header. */
869 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
871 safe_bcopy (b->data, b->new_data, b->size);
872 *b->variable = b->data = b->new_data;
875 h->bloc_start = new_bloc_start;
877 update_heap_bloc_correspondence (first_bloc, h);
879 if (h != first_heap)
881 /* Give up managing heaps below the one the new
882 virtual_break_value points to. */
883 first_heap->prev = NIL_HEAP;
884 first_heap->next = h->next;
885 first_heap->start = h->start;
886 first_heap->end = h->end;
887 first_heap->free = h->free;
888 first_heap->first_bloc = h->first_bloc;
889 first_heap->last_bloc = h->last_bloc;
890 first_heap->bloc_start = h->bloc_start;
892 if (first_heap->next)
893 first_heap->next->prev = first_heap;
894 else
895 last_heap = first_heap;
898 bzero (address, size);
900 else /* size < 0 */
902 SIZE excess = (char *)first_heap->bloc_start
903 - ((char *)virtual_break_value + size);
905 address = virtual_break_value;
907 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
909 excess -= extra_bytes;
910 first_heap->bloc_start
911 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
913 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
915 for (b = first_bloc; b != NIL_BLOC; b = b->next)
917 safe_bcopy (b->data, b->new_data, b->size);
918 *b->variable = b->data = b->new_data;
922 if ((char *)virtual_break_value + size < (char *)first_heap->start)
924 /* We found an additional space below the first heap */
925 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
929 virtual_break_value = (POINTER) ((char *)address + size);
930 break_value = (last_bloc
931 ? (char *) last_bloc->data + last_bloc->size
932 : (char *) first_heap->bloc_start);
933 if (size < 0)
934 relinquish ();
936 return address;
940 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
941 the data is returned in *PTR. PTR is thus the address of some variable
942 which will use the data area.
944 The allocation of 0 bytes is valid.
945 In case r_alloc_freeze is set, a best fit of unused blocs could be done
946 before allocating a new area. Not yet done.
948 If we can't allocate the necessary memory, set *PTR to zero, and
949 return zero. */
951 POINTER
952 r_alloc (ptr, size)
953 POINTER *ptr;
954 SIZE size;
956 register bloc_ptr new_bloc;
958 if (! r_alloc_initialized)
959 r_alloc_init ();
961 new_bloc = get_bloc (MEM_ROUNDUP (size));
962 if (new_bloc)
964 new_bloc->variable = ptr;
965 *ptr = new_bloc->data;
967 else
968 *ptr = 0;
970 return *ptr;
973 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
974 Store 0 in *PTR to show there's no block allocated. */
976 void
977 r_alloc_free (ptr)
978 register POINTER *ptr;
980 register bloc_ptr dead_bloc;
982 if (! r_alloc_initialized)
983 r_alloc_init ();
985 dead_bloc = find_bloc (ptr);
986 if (dead_bloc == NIL_BLOC)
987 abort ();
989 free_bloc (dead_bloc);
990 *ptr = 0;
992 #ifdef emacs
993 refill_memory_reserve ();
994 #endif
997 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
998 Do this by shifting all blocks above this one up in memory, unless
999 SIZE is less than or equal to the current bloc size, in which case
1000 do nothing.
1002 In case r_alloc_freeze is set, a new bloc is allocated, and the
1003 memory copied to it. Not very efficient. We could traverse the
1004 bloc_list for a best fit of free blocs first.
1006 Change *PTR to reflect the new bloc, and return this value.
1008 If more memory cannot be allocated, then leave *PTR unchanged, and
1009 return zero. */
1011 POINTER
1012 r_re_alloc (ptr, size)
1013 POINTER *ptr;
1014 SIZE size;
1016 register bloc_ptr bloc;
1018 if (! r_alloc_initialized)
1019 r_alloc_init ();
1021 if (!*ptr)
1022 return r_alloc (ptr, size);
1023 if (!size)
1025 r_alloc_free (ptr);
1026 return r_alloc (ptr, 0);
1029 bloc = find_bloc (ptr);
1030 if (bloc == NIL_BLOC)
1031 abort ();
1033 if (size < bloc->size)
1035 /* Wouldn't it be useful to actually resize the bloc here? */
1036 /* I think so too, but not if it's too expensive... */
1037 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1038 && r_alloc_freeze_level == 0)
1040 resize_bloc (bloc, MEM_ROUNDUP (size));
1041 /* Never mind if this fails, just do nothing... */
1042 /* It *should* be infallible! */
1045 else if (size > bloc->size)
1047 if (r_alloc_freeze_level)
1049 bloc_ptr new_bloc;
1050 new_bloc = get_bloc (MEM_ROUNDUP (size));
1051 if (new_bloc)
1053 new_bloc->variable = ptr;
1054 *ptr = new_bloc->data;
1055 bloc->variable = (POINTER *) NIL;
1057 else
1058 return NIL;
1060 else
1062 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1063 return NIL;
1066 return *ptr;
1069 /* Disable relocations, after making room for at least SIZE bytes
1070 of non-relocatable heap if possible. The relocatable blocs are
1071 guaranteed to hold still until thawed, even if this means that
1072 malloc must return a null pointer. */
1074 void
1075 r_alloc_freeze (size)
1076 long size;
1078 if (! r_alloc_initialized)
1079 r_alloc_init ();
1081 /* If already frozen, we can't make any more room, so don't try. */
1082 if (r_alloc_freeze_level > 0)
1083 size = 0;
1084 /* If we can't get the amount requested, half is better than nothing. */
1085 while (size > 0 && r_alloc_sbrk (size) == 0)
1086 size /= 2;
1087 ++r_alloc_freeze_level;
1088 if (size > 0)
1089 r_alloc_sbrk (-size);
1092 void
1093 r_alloc_thaw ()
1096 if (! r_alloc_initialized)
1097 r_alloc_init ();
1099 if (--r_alloc_freeze_level < 0)
1100 abort ();
1102 /* This frees all unused blocs. It is not too inefficient, as the resize
1103 and bcopy is done only once. Afterwards, all unreferenced blocs are
1104 already shrunk to zero size. */
1105 if (!r_alloc_freeze_level)
1107 bloc_ptr *b = &first_bloc;
1108 while (*b)
1109 if (!(*b)->variable)
1110 free_bloc (*b);
1111 else
1112 b = &(*b)->next;
1117 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1119 /* Reinitialize the morecore hook variables after restarting a dumped
1120 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1121 void
1122 r_alloc_reinit ()
1124 /* Only do this if the hook has been reset, so that we don't get an
1125 infinite loop, in case Emacs was linked statically. */
1126 if (__morecore != r_alloc_sbrk)
1128 real_morecore = __morecore;
1129 __morecore = r_alloc_sbrk;
1133 #endif /* emacs && DOUG_LEA_MALLOC */
1135 #ifdef DEBUG
1137 #include <assert.h>
1139 void
1140 r_alloc_check ()
1142 int found = 0;
1143 heap_ptr h, ph = 0;
1144 bloc_ptr b, pb = 0;
1146 if (!r_alloc_initialized)
1147 return;
1149 assert (first_heap);
1150 assert (last_heap->end <= (POINTER) sbrk (0));
1151 assert ((POINTER) first_heap < first_heap->start);
1152 assert (first_heap->start <= virtual_break_value);
1153 assert (virtual_break_value <= first_heap->end);
1155 for (h = first_heap; h; h = h->next)
1157 assert (h->prev == ph);
1158 assert ((POINTER) ROUNDUP (h->end) == h->end);
1159 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1160 the heap start has any sort of alignment.
1161 Perhaps it should. */
1162 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1163 #endif
1164 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1165 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1167 if (ph)
1169 assert (ph->end < h->start);
1170 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1173 if (h->bloc_start <= break_value && break_value <= h->end)
1174 found = 1;
1176 ph = h;
1179 assert (found);
1180 assert (last_heap == ph);
1182 for (b = first_bloc; b; b = b->next)
1184 assert (b->prev == pb);
1185 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1186 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1188 ph = 0;
1189 for (h = first_heap; h; h = h->next)
1191 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1192 break;
1193 ph = h;
1196 assert (h);
1198 if (pb && pb->data + pb->size != b->data)
1200 assert (ph && b->data == h->bloc_start);
1201 while (ph)
1203 if (ph->bloc_start <= pb->data
1204 && pb->data + pb->size <= ph->end)
1206 assert (pb->data + pb->size + b->size > ph->end);
1207 break;
1209 else
1211 assert (ph->bloc_start + b->size > ph->end);
1213 ph = ph->prev;
1216 pb = b;
1219 assert (last_bloc == pb);
1221 if (last_bloc)
1222 assert (last_bloc->data + last_bloc->size == break_value);
1223 else
1224 assert (first_heap->bloc_start == break_value);
1227 #endif /* DEBUG */
1231 /***********************************************************************
1232 Initialization
1233 ***********************************************************************/
1235 /* Initialize various things for memory allocation. */
1237 static void
1238 r_alloc_init ()
1240 if (r_alloc_initialized)
1241 return;
1242 r_alloc_initialized = 1;
1244 page_size = PAGE;
1245 #ifndef SYSTEM_MALLOC
1246 real_morecore = __morecore;
1247 __morecore = r_alloc_sbrk;
1249 first_heap = last_heap = &heap_base;
1250 first_heap->next = first_heap->prev = NIL_HEAP;
1251 first_heap->start = first_heap->bloc_start
1252 = virtual_break_value = break_value = (*real_morecore) (0);
1253 if (break_value == NIL)
1254 abort ();
1256 extra_bytes = ROUNDUP (50000);
1257 #endif
1259 #ifdef DOUG_LEA_MALLOC
1260 BLOCK_INPUT;
1261 mallopt (M_TOP_PAD, 64 * 4096);
1262 UNBLOCK_INPUT;
1263 #else
1264 #ifndef SYSTEM_MALLOC
1265 /* Give GNU malloc's morecore some hysteresis
1266 so that we move all the relocatable blocks much less often. */
1267 __malloc_extra_blocks = 64;
1268 #endif
1269 #endif
1271 #ifndef SYSTEM_MALLOC
1272 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1274 /* The extra call to real_morecore guarantees that the end of the
1275 address space is a multiple of page_size, even if page_size is
1276 not really the page size of the system running the binary in
1277 which page_size is stored. This allows a binary to be built on a
1278 system with one page size and run on a system with a smaller page
1279 size. */
1280 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
1282 /* Clear the rest of the last page; this memory is in our address space
1283 even though it is after the sbrk value. */
1284 /* Doubly true, with the additional call that explicitly adds the
1285 rest of that page to the address space. */
1286 bzero (first_heap->start,
1287 (char *) first_heap->end - (char *) first_heap->start);
1288 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1289 #endif
1291 use_relocatable_buffers = 1;
1294 /* arch-tag: 6a524a15-faff-44c8-95d4-a5da6f55110f
1295 (do not change this comment) */