* config/i386/i386.md (*fyl2x_sfxf3, *fyl2x_dfxf3): Remove insn
[official-gcc.git] / boehm-gc / malloc.c
blob353a9bc9599ece171858f9eae30b6a1cd6324531
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
15 /* Boehm, February 7, 1996 4:32 pm PST */
17 #include <stdio.h>
18 #include "private/gc_priv.h"
20 extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
21 void GC_extend_size_map(); /* in misc.c. */
23 /* Allocate reclaim list for kind: */
24 /* Return TRUE on success */
25 GC_bool GC_alloc_reclaim_list(kind)
26 register struct obj_kind * kind;
28 struct hblk ** result = (struct hblk **)
29 GC_scratch_alloc((MAXOBJSZ+1) * sizeof(struct hblk *));
30 if (result == 0) return(FALSE);
31 BZERO(result, (MAXOBJSZ+1)*sizeof(struct hblk *));
32 kind -> ok_reclaim_list = result;
33 return(TRUE);
36 /* Allocate a large block of size lw words. */
37 /* The block is not cleared. */
38 /* Flags is 0 or IGNORE_OFF_PAGE. */
39 ptr_t GC_alloc_large(lw, k, flags)
40 word lw;
41 int k;
42 unsigned flags;
44 struct hblk * h;
45 word n_blocks = OBJ_SZ_TO_BLOCKS(lw);
46 ptr_t result;
48 if (!GC_is_initialized) GC_init_inner();
49 /* Do our share of marking work */
50 if(GC_incremental && !GC_dont_gc)
51 GC_collect_a_little_inner((int)n_blocks);
52 h = GC_allochblk(lw, k, flags);
53 # ifdef USE_MUNMAP
54 if (0 == h) {
55 GC_merge_unmapped();
56 h = GC_allochblk(lw, k, flags);
58 # endif
59 while (0 == h && GC_collect_or_expand(n_blocks, (flags != 0))) {
60 h = GC_allochblk(lw, k, flags);
62 if (h == 0) {
63 result = 0;
64 } else {
65 int total_bytes = BYTES_TO_WORDS(n_blocks * HBLKSIZE);
66 if (n_blocks > 1) {
67 GC_large_allocd_bytes += n_blocks * HBLKSIZE;
68 if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
69 GC_max_large_allocd_bytes = GC_large_allocd_bytes;
71 result = (ptr_t) (h -> hb_body);
72 GC_words_wasted += total_bytes - lw;
74 return result;
78 /* Allocate a large block of size lb bytes. Clear if appropriate. */
79 ptr_t GC_alloc_large_and_clear(lw, k, flags)
80 word lw;
81 int k;
82 unsigned flags;
84 ptr_t result = GC_alloc_large(lw, k, flags);
85 word n_blocks = OBJ_SZ_TO_BLOCKS(lw);
87 if (0 == result) return 0;
88 if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
89 /* Clear the whole block, in case of GC_realloc call. */
90 BZERO(result, n_blocks * HBLKSIZE);
92 return result;
95 /* allocate lb bytes for an object of kind k. */
96 /* Should not be used to directly to allocate */
97 /* objects such as STUBBORN objects that */
98 /* require special handling on allocation. */
99 /* First a version that assumes we already */
100 /* hold lock: */
101 ptr_t GC_generic_malloc_inner(lb, k)
102 register word lb;
103 register int k;
105 register word lw;
106 register ptr_t op;
107 register ptr_t *opp;
109 if( SMALL_OBJ(lb) ) {
110 register struct obj_kind * kind = GC_obj_kinds + k;
111 # ifdef MERGE_SIZES
112 lw = GC_size_map[lb];
113 # else
114 lw = ALIGNED_WORDS(lb);
115 if (lw == 0) lw = MIN_WORDS;
116 # endif
117 opp = &(kind -> ok_freelist[lw]);
118 if( (op = *opp) == 0 ) {
119 # ifdef MERGE_SIZES
120 if (GC_size_map[lb] == 0) {
121 if (!GC_is_initialized) GC_init_inner();
122 if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
123 return(GC_generic_malloc_inner(lb, k));
125 # else
126 if (!GC_is_initialized) {
127 GC_init_inner();
128 return(GC_generic_malloc_inner(lb, k));
130 # endif
131 if (kind -> ok_reclaim_list == 0) {
132 if (!GC_alloc_reclaim_list(kind)) goto out;
134 op = GC_allocobj(lw, k);
135 if (op == 0) goto out;
137 /* Here everything is in a consistent state. */
138 /* We assume the following assignment is */
139 /* atomic. If we get aborted */
140 /* after the assignment, we lose an object, */
141 /* but that's benign. */
142 /* Volatile declarations may need to be added */
143 /* to prevent the compiler from breaking things.*/
144 /* If we only execute the second of the */
145 /* following assignments, we lose the free */
146 /* list, but that should still be OK, at least */
147 /* for garbage collected memory. */
148 *opp = obj_link(op);
149 obj_link(op) = 0;
150 } else {
151 lw = ROUNDED_UP_WORDS(lb);
152 op = (ptr_t)GC_alloc_large_and_clear(lw, k, 0);
154 GC_words_allocd += lw;
156 out:
157 return op;
160 /* Allocate a composite object of size n bytes. The caller guarantees */
161 /* that pointers past the first page are not relevant. Caller holds */
162 /* allocation lock. */
163 ptr_t GC_generic_malloc_inner_ignore_off_page(lb, k)
164 register size_t lb;
165 register int k;
167 register word lw;
168 ptr_t op;
170 if (lb <= HBLKSIZE)
171 return(GC_generic_malloc_inner((word)lb, k));
172 lw = ROUNDED_UP_WORDS(lb);
173 op = (ptr_t)GC_alloc_large_and_clear(lw, k, IGNORE_OFF_PAGE);
174 GC_words_allocd += lw;
175 return op;
178 ptr_t GC_generic_malloc(lb, k)
179 register word lb;
180 register int k;
182 ptr_t result;
183 DCL_LOCK_STATE;
185 if (GC_have_errors) GC_print_all_errors();
186 GC_INVOKE_FINALIZERS();
187 if (SMALL_OBJ(lb)) {
188 DISABLE_SIGNALS();
189 LOCK();
190 result = GC_generic_malloc_inner((word)lb, k);
191 UNLOCK();
192 ENABLE_SIGNALS();
193 } else {
194 word lw;
195 word n_blocks;
196 GC_bool init;
197 lw = ROUNDED_UP_WORDS(lb);
198 n_blocks = OBJ_SZ_TO_BLOCKS(lw);
199 init = GC_obj_kinds[k].ok_init;
200 DISABLE_SIGNALS();
201 LOCK();
202 result = (ptr_t)GC_alloc_large(lw, k, 0);
203 if (0 != result) {
204 if (GC_debugging_started) {
205 BZERO(result, n_blocks * HBLKSIZE);
206 } else {
207 # ifdef THREADS
208 /* Clear any memory that might be used for GC descriptors */
209 /* before we release the lock. */
210 ((word *)result)[0] = 0;
211 ((word *)result)[1] = 0;
212 ((word *)result)[lw-1] = 0;
213 ((word *)result)[lw-2] = 0;
214 # endif
217 GC_words_allocd += lw;
218 UNLOCK();
219 ENABLE_SIGNALS();
220 if (init && !GC_debugging_started && 0 != result) {
221 BZERO(result, n_blocks * HBLKSIZE);
224 if (0 == result) {
225 return((*GC_oom_fn)(lb));
226 } else {
227 return(result);
232 #define GENERAL_MALLOC(lb,k) \
233 (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
234 /* We make the GC_clear_stack_call a tail call, hoping to get more of */
235 /* the stack. */
237 /* Allocate lb bytes of atomic (pointerfree) data */
238 # ifdef __STDC__
239 GC_PTR GC_malloc_atomic(size_t lb)
240 # else
241 GC_PTR GC_malloc_atomic(lb)
242 size_t lb;
243 # endif
245 register ptr_t op;
246 register ptr_t * opp;
247 register word lw;
248 DCL_LOCK_STATE;
250 if( EXPECT(SMALL_OBJ(lb), 1) ) {
251 # ifdef MERGE_SIZES
252 lw = GC_size_map[lb];
253 # else
254 lw = ALIGNED_WORDS(lb);
255 # endif
256 opp = &(GC_aobjfreelist[lw]);
257 FASTLOCK();
258 if( EXPECT(!FASTLOCK_SUCCEEDED() || (op = *opp) == 0, 0) ) {
259 FASTUNLOCK();
260 return(GENERAL_MALLOC((word)lb, PTRFREE));
262 /* See above comment on signals. */
263 *opp = obj_link(op);
264 GC_words_allocd += lw;
265 FASTUNLOCK();
266 return((GC_PTR) op);
267 } else {
268 return(GENERAL_MALLOC((word)lb, PTRFREE));
272 /* Allocate lb bytes of composite (pointerful) data */
273 # ifdef __STDC__
274 GC_PTR GC_malloc(size_t lb)
275 # else
276 GC_PTR GC_malloc(lb)
277 size_t lb;
278 # endif
280 register ptr_t op;
281 register ptr_t *opp;
282 register word lw;
283 DCL_LOCK_STATE;
285 if( EXPECT(SMALL_OBJ(lb), 1) ) {
286 # ifdef MERGE_SIZES
287 lw = GC_size_map[lb];
288 # else
289 lw = ALIGNED_WORDS(lb);
290 # endif
291 opp = &(GC_objfreelist[lw]);
292 FASTLOCK();
293 if( EXPECT(!FASTLOCK_SUCCEEDED() || (op = *opp) == 0, 0) ) {
294 FASTUNLOCK();
295 return(GENERAL_MALLOC((word)lb, NORMAL));
297 /* See above comment on signals. */
298 GC_ASSERT(0 == obj_link(op)
299 || (word)obj_link(op)
300 <= (word)GC_greatest_plausible_heap_addr
301 && (word)obj_link(op)
302 >= (word)GC_least_plausible_heap_addr);
303 *opp = obj_link(op);
304 obj_link(op) = 0;
305 GC_words_allocd += lw;
306 FASTUNLOCK();
307 return((GC_PTR) op);
308 } else {
309 return(GENERAL_MALLOC((word)lb, NORMAL));
313 # ifdef REDIRECT_MALLOC
314 # ifdef __STDC__
315 GC_PTR malloc(size_t lb)
316 # else
317 GC_PTR malloc(lb)
318 size_t lb;
319 # endif
321 /* It might help to manually inline the GC_malloc call here. */
322 /* But any decent compiler should reduce the extra procedure call */
323 /* to at most a jump instruction in this case. */
324 # if defined(I386) && defined(GC_SOLARIS_THREADS)
326 * Thread initialisation can call malloc before
327 * we're ready for it.
328 * It's not clear that this is enough to help matters.
329 * The thread implementation may well call malloc at other
330 * inopportune times.
332 if (!GC_is_initialized) return sbrk(lb);
333 # endif /* I386 && GC_SOLARIS_THREADS */
334 return((GC_PTR)REDIRECT_MALLOC(lb));
337 # ifdef __STDC__
338 GC_PTR calloc(size_t n, size_t lb)
339 # else
340 GC_PTR calloc(n, lb)
341 size_t n, lb;
342 # endif
344 return((GC_PTR)REDIRECT_MALLOC(n*lb));
347 #ifndef strdup
348 # include <string.h>
349 # ifdef __STDC__
350 char *strdup(const char *s)
351 # else
352 char *strdup(s)
353 char *s;
354 # endif
356 size_t len = strlen(s) + 1;
357 char * result = ((char *)REDIRECT_MALLOC(len+1));
358 BCOPY(s, result, len+1);
359 return result;
361 #endif /* !defined(strdup) */
362 /* If strdup is macro defined, we assume that it actually calls malloc, */
363 /* and thus the right thing will happen even without overriding it. */
364 /* This seems to be true on most Linux systems. */
366 # endif /* REDIRECT_MALLOC */
368 /* Explicitly deallocate an object p. */
369 # ifdef __STDC__
370 void GC_free(GC_PTR p)
371 # else
372 void GC_free(p)
373 GC_PTR p;
374 # endif
376 register struct hblk *h;
377 register hdr *hhdr;
378 register signed_word sz;
379 register ptr_t * flh;
380 register int knd;
381 register struct obj_kind * ok;
382 DCL_LOCK_STATE;
384 if (p == 0) return;
385 /* Required by ANSI. It's not my fault ... */
386 h = HBLKPTR(p);
387 hhdr = HDR(h);
388 GC_ASSERT(GC_base(p) == p);
389 # if defined(REDIRECT_MALLOC) && \
390 (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
391 || defined(__MINGW32__)) /* Should this be MSWIN32 in general? */
392 /* For Solaris, we have to redirect malloc calls during */
393 /* initialization. For the others, this seems to happen */
394 /* implicitly. */
395 /* Don't try to deallocate that memory. */
396 if (0 == hhdr) return;
397 # endif
398 knd = hhdr -> hb_obj_kind;
399 sz = hhdr -> hb_sz;
400 ok = &GC_obj_kinds[knd];
401 if (EXPECT((sz <= MAXOBJSZ), 1)) {
402 # ifdef THREADS
403 DISABLE_SIGNALS();
404 LOCK();
405 # endif
406 GC_mem_freed += sz;
407 /* A signal here can make GC_mem_freed and GC_non_gc_bytes */
408 /* inconsistent. We claim this is benign. */
409 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
410 /* Its unnecessary to clear the mark bit. If the */
411 /* object is reallocated, it doesn't matter. O.w. the */
412 /* collector will do it, since it's on a free list. */
413 if (ok -> ok_init) {
414 BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
416 flh = &(ok -> ok_freelist[sz]);
417 obj_link(p) = *flh;
418 *flh = (ptr_t)p;
419 # ifdef THREADS
420 UNLOCK();
421 ENABLE_SIGNALS();
422 # endif
423 } else {
424 DISABLE_SIGNALS();
425 LOCK();
426 GC_mem_freed += sz;
427 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
428 GC_freehblk(h);
429 UNLOCK();
430 ENABLE_SIGNALS();
434 /* Explicitly deallocate an object p when we already hold lock. */
435 /* Only used for internally allocated objects, so we can take some */
436 /* shortcuts. */
437 #ifdef THREADS
438 void GC_free_inner(GC_PTR p)
440 register struct hblk *h;
441 register hdr *hhdr;
442 register signed_word sz;
443 register ptr_t * flh;
444 register int knd;
445 register struct obj_kind * ok;
446 DCL_LOCK_STATE;
448 h = HBLKPTR(p);
449 hhdr = HDR(h);
450 knd = hhdr -> hb_obj_kind;
451 sz = hhdr -> hb_sz;
452 ok = &GC_obj_kinds[knd];
453 if (sz <= MAXOBJSZ) {
454 GC_mem_freed += sz;
455 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
456 if (ok -> ok_init) {
457 BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
459 flh = &(ok -> ok_freelist[sz]);
460 obj_link(p) = *flh;
461 *flh = (ptr_t)p;
462 } else {
463 GC_mem_freed += sz;
464 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
465 GC_freehblk(h);
468 #endif /* THREADS */
470 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
471 # define REDIRECT_FREE GC_free
472 # endif
473 # ifdef REDIRECT_FREE
474 # ifdef __STDC__
475 void free(GC_PTR p)
476 # else
477 void free(p)
478 GC_PTR p;
479 # endif
481 # ifndef IGNORE_FREE
482 REDIRECT_FREE(p);
483 # endif
485 # endif /* REDIRECT_MALLOC */