1 /**********************************************************************
6 created at: Tue Oct 5 09:44:46 JST 1993
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
12 **********************************************************************/
14 #define rb_data_object_alloc rb_data_object_alloc
15 #define rb_data_typed_object_alloc rb_data_typed_object_alloc
17 #include "ruby/internal/config.h"
19 # include "ruby/ruby.h"
24 #define sighandler_t ruby_sighandler_t
35 /* MALLOC_HEADERS_BEGIN */
36 #ifndef HAVE_MALLOC_USABLE_SIZE
38 # define HAVE_MALLOC_USABLE_SIZE
39 # define malloc_usable_size(a) _msize(a)
40 # elif defined HAVE_MALLOC_SIZE
41 # define HAVE_MALLOC_USABLE_SIZE
42 # define malloc_usable_size(a) malloc_size(a)
46 #ifdef HAVE_MALLOC_USABLE_SIZE
47 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
48 /* Alternative malloc header is included in ruby/missing.h */
49 # elif defined(HAVE_MALLOC_H)
51 # elif defined(HAVE_MALLOC_NP_H)
52 # include <malloc_np.h>
53 # elif defined(HAVE_MALLOC_MALLOC_H)
54 # include <malloc/malloc.h>
58 #if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
59 /* LIST_HEAD conflicts with sys/queue.h on macOS */
60 # include <sys/user.h>
62 /* MALLOC_HEADERS_END */
64 #ifdef HAVE_SYS_TIME_H
65 # include <sys/time.h>
68 #ifdef HAVE_SYS_RESOURCE_H
69 # include <sys/resource.h>
72 #if defined _WIN32 || defined __CYGWIN__
74 #elif defined(HAVE_POSIX_MEMALIGN)
75 #elif defined(HAVE_MEMALIGN)
79 #include <sys/types.h>
82 #include <emscripten.h>
85 #undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
88 #include "debug_counter.h"
89 #include "eval_intern.h"
93 #include "internal/class.h"
94 #include "internal/complex.h"
95 #include "internal/cont.h"
96 #include "internal/error.h"
97 #include "internal/eval.h"
98 #include "internal/gc.h"
99 #include "internal/hash.h"
100 #include "internal/imemo.h"
101 #include "internal/io.h"
102 #include "internal/numeric.h"
103 #include "internal/object.h"
104 #include "internal/proc.h"
105 #include "internal/rational.h"
106 #include "internal/sanitizers.h"
107 #include "internal/struct.h"
108 #include "internal/symbol.h"
109 #include "internal/thread.h"
110 #include "internal/variable.h"
111 #include "internal/warnings.h"
115 #include "ruby/debug.h"
119 #include "ruby/thread.h"
120 #include "ruby/util.h"
121 #include "ruby_assert.h"
122 #include "ruby_atomic.h"
124 #include "transient_heap.h"
127 #include "vm_callinfo.h"
128 #include "ractor_core.h"
132 #define rb_setjmp(env) RUBY_SETJMP(env)
133 #define rb_jmp_buf rb_jmpbuf_t
134 #undef rb_data_object_wrap
136 static inline struct rbimpl_size_mul_overflow_tag
137 size_add_overflow(size_t x
, size_t y
)
143 #elif __has_builtin(__builtin_add_overflow)
144 p
= __builtin_add_overflow(x
, y
, &z
);
146 #elif defined(DSIZE_T)
147 RB_GNUC_EXTENSION DSIZE_T dx
= x
;
148 RB_GNUC_EXTENSION DSIZE_T dy
= y
;
149 RB_GNUC_EXTENSION DSIZE_T dz
= dx
+ dy
;
158 return (struct rbimpl_size_mul_overflow_tag
) { p
, z
, };
161 static inline struct rbimpl_size_mul_overflow_tag
162 size_mul_add_overflow(size_t x
, size_t y
, size_t z
) /* x * y + z */
164 struct rbimpl_size_mul_overflow_tag t
= rbimpl_size_mul_overflow(x
, y
);
165 struct rbimpl_size_mul_overflow_tag u
= size_add_overflow(t
.right
, z
);
166 return (struct rbimpl_size_mul_overflow_tag
) { t
.left
|| u
.left
, u
.right
};
169 static inline struct rbimpl_size_mul_overflow_tag
170 size_mul_add_mul_overflow(size_t x
, size_t y
, size_t z
, size_t w
) /* x * y + z * w */
172 struct rbimpl_size_mul_overflow_tag t
= rbimpl_size_mul_overflow(x
, y
);
173 struct rbimpl_size_mul_overflow_tag u
= rbimpl_size_mul_overflow(z
, w
);
174 struct rbimpl_size_mul_overflow_tag v
= size_add_overflow(t
.right
, u
.right
);
175 return (struct rbimpl_size_mul_overflow_tag
) { t
.left
|| u
.left
|| v
.left
, v
.right
};
178 PRINTF_ARGS(NORETURN(static void gc_raise(VALUE
, const char*, ...)), 2, 3);
181 size_mul_or_raise(size_t x
, size_t y
, VALUE exc
)
183 struct rbimpl_size_mul_overflow_tag t
= rbimpl_size_mul_overflow(x
, y
);
184 if (LIKELY(!t
.left
)) {
187 else if (rb_during_gc()) {
188 rb_memerror(); /* or...? */
193 "integer overflow: %"PRIuSIZE
196 x
, y
, (size_t)SIZE_MAX
);
201 rb_size_mul_or_raise(size_t x
, size_t y
, VALUE exc
)
203 return size_mul_or_raise(x
, y
, exc
);
207 size_mul_add_or_raise(size_t x
, size_t y
, size_t z
, VALUE exc
)
209 struct rbimpl_size_mul_overflow_tag t
= size_mul_add_overflow(x
, y
, z
);
210 if (LIKELY(!t
.left
)) {
213 else if (rb_during_gc()) {
214 rb_memerror(); /* or...? */
219 "integer overflow: %"PRIuSIZE
223 x
, y
, z
, (size_t)SIZE_MAX
);
228 rb_size_mul_add_or_raise(size_t x
, size_t y
, size_t z
, VALUE exc
)
230 return size_mul_add_or_raise(x
, y
, z
, exc
);
234 size_mul_add_mul_or_raise(size_t x
, size_t y
, size_t z
, size_t w
, VALUE exc
)
236 struct rbimpl_size_mul_overflow_tag t
= size_mul_add_mul_overflow(x
, y
, z
, w
);
237 if (LIKELY(!t
.left
)) {
240 else if (rb_during_gc()) {
241 rb_memerror(); /* or...? */
246 "integer overflow: %"PRIdSIZE
251 x
, y
, z
, w
, (size_t)SIZE_MAX
);
255 #if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
256 /* trick the compiler into thinking a external signal handler uses this */
257 volatile VALUE rb_gc_guarded_val
;
259 rb_gc_guarded_ptr_val(volatile VALUE
*ptr
, VALUE val
)
261 rb_gc_guarded_val
= val
;
267 #ifndef GC_HEAP_INIT_SLOTS
268 #define GC_HEAP_INIT_SLOTS 10000
270 #ifndef GC_HEAP_FREE_SLOTS
271 #define GC_HEAP_FREE_SLOTS 4096
273 #ifndef GC_HEAP_GROWTH_FACTOR
274 #define GC_HEAP_GROWTH_FACTOR 1.8
276 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
277 #define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
279 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
280 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
283 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
284 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
286 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
287 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
289 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
290 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
293 #ifndef GC_MALLOC_LIMIT_MIN
294 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
296 #ifndef GC_MALLOC_LIMIT_MAX
297 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
299 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
300 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
303 #ifndef GC_OLDMALLOC_LIMIT_MIN
304 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
306 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
307 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
309 #ifndef GC_OLDMALLOC_LIMIT_MAX
310 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
313 #ifndef PRINT_MEASURE_LINE
314 #define PRINT_MEASURE_LINE 0
316 #ifndef PRINT_ENTER_EXIT_TICK
317 #define PRINT_ENTER_EXIT_TICK 0
319 #ifndef PRINT_ROOT_TICKS
320 #define PRINT_ROOT_TICKS 0
323 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
327 size_t heap_init_slots
;
328 size_t heap_free_slots
;
329 double growth_factor
;
330 size_t growth_max_slots
;
332 double heap_free_slots_min_ratio
;
333 double heap_free_slots_goal_ratio
;
334 double heap_free_slots_max_ratio
;
335 double oldobject_limit_factor
;
337 size_t malloc_limit_min
;
338 size_t malloc_limit_max
;
339 double malloc_limit_growth_factor
;
341 size_t oldmalloc_limit_min
;
342 size_t oldmalloc_limit_max
;
343 double oldmalloc_limit_growth_factor
;
348 static ruby_gc_params_t gc_params
= {
351 GC_HEAP_GROWTH_FACTOR
,
352 GC_HEAP_GROWTH_MAX_SLOTS
,
354 GC_HEAP_FREE_SLOTS_MIN_RATIO
,
355 GC_HEAP_FREE_SLOTS_GOAL_RATIO
,
356 GC_HEAP_FREE_SLOTS_MAX_RATIO
,
357 GC_HEAP_OLDOBJECT_LIMIT_FACTOR
,
361 GC_MALLOC_LIMIT_GROWTH_FACTOR
,
363 GC_OLDMALLOC_LIMIT_MIN
,
364 GC_OLDMALLOC_LIMIT_MAX
,
365 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
,
371 * enable to embed GC debugging information.
378 * 1: basic information
379 * 2: remember set operation
386 #define RGENGC_DEBUG -1
388 #define RGENGC_DEBUG 0
391 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
392 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
393 #elif defined(HAVE_VA_ARGS_MACRO)
394 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
396 # define RGENGC_DEBUG_ENABLED(level) 0
398 int ruby_rgengc_debug
;
401 * 0: disable all assertions
402 * 1: enable assertions (to debug RGenGC)
403 * 2: enable internal consistency check at each GC (for debugging)
404 * 3: enable internal consistency check at each GC steps (for debugging)
405 * 4: enable liveness check
406 * 5: show all references
408 #ifndef RGENGC_CHECK_MODE
409 #define RGENGC_CHECK_MODE 0
412 // Note: using RUBY_ASSERT_WHEN() extend a macro in expr (info by nobu).
413 #define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
415 /* RGENGC_OLD_NEWOBJ_CHECK
416 * 0: disable all assertions
417 * >0: make a OLD object when new object creation.
419 * Make one OLD object per RGENGC_OLD_NEWOBJ_CHECK WB protected objects creation.
421 #ifndef RGENGC_OLD_NEWOBJ_CHECK
422 #define RGENGC_OLD_NEWOBJ_CHECK 0
426 * 0: disable RGenGC profiling
427 * 1: enable profiling for basic information
428 * 2: enable profiling for each types
430 #ifndef RGENGC_PROFILE
431 #define RGENGC_PROFILE 0
434 /* RGENGC_ESTIMATE_OLDMALLOC
435 * Enable/disable to estimate increase size of malloc'ed size by old objects.
436 * If estimation exceeds threshold, then will invoke full GC.
437 * 0: disable estimation.
438 * 1: enable estimation.
440 #ifndef RGENGC_ESTIMATE_OLDMALLOC
441 #define RGENGC_ESTIMATE_OLDMALLOC 1
444 /* RGENGC_FORCE_MAJOR_GC
445 * Force major/full GC if this macro is not 0.
447 #ifndef RGENGC_FORCE_MAJOR_GC
448 #define RGENGC_FORCE_MAJOR_GC 0
451 #ifndef GC_PROFILE_MORE_DETAIL
452 #define GC_PROFILE_MORE_DETAIL 0
454 #ifndef GC_PROFILE_DETAIL_MEMORY
455 #define GC_PROFILE_DETAIL_MEMORY 0
457 #ifndef GC_ENABLE_INCREMENTAL_MARK
458 #define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
460 #ifndef GC_ENABLE_LAZY_SWEEP
461 #define GC_ENABLE_LAZY_SWEEP 1
463 #ifndef CALC_EXACT_MALLOC_SIZE
464 #define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
466 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
467 #ifndef MALLOC_ALLOCATED_SIZE
468 #define MALLOC_ALLOCATED_SIZE 0
471 #define MALLOC_ALLOCATED_SIZE 0
473 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
474 #define MALLOC_ALLOCATED_SIZE_CHECK 0
477 #ifndef GC_DEBUG_STRESS_TO_CLASS
478 #define GC_DEBUG_STRESS_TO_CLASS 0
481 #ifndef RGENGC_OBJ_INFO
482 #define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
486 GPR_FLAG_NONE
= 0x000,
488 GPR_FLAG_MAJOR_BY_NOFREE
= 0x001,
489 GPR_FLAG_MAJOR_BY_OLDGEN
= 0x002,
490 GPR_FLAG_MAJOR_BY_SHADY
= 0x004,
491 GPR_FLAG_MAJOR_BY_FORCE
= 0x008,
492 #if RGENGC_ESTIMATE_OLDMALLOC
493 GPR_FLAG_MAJOR_BY_OLDMALLOC
= 0x020,
495 GPR_FLAG_MAJOR_MASK
= 0x0ff,
498 GPR_FLAG_NEWOBJ
= 0x100,
499 GPR_FLAG_MALLOC
= 0x200,
500 GPR_FLAG_METHOD
= 0x400,
501 GPR_FLAG_CAPI
= 0x800,
502 GPR_FLAG_STRESS
= 0x1000,
505 GPR_FLAG_IMMEDIATE_SWEEP
= 0x2000,
506 GPR_FLAG_HAVE_FINALIZE
= 0x4000,
507 GPR_FLAG_IMMEDIATE_MARK
= 0x8000,
508 GPR_FLAG_FULL_MARK
= 0x10000,
509 GPR_FLAG_COMPACT
= 0x20000,
512 (GPR_FLAG_FULL_MARK
| GPR_FLAG_IMMEDIATE_MARK
|
513 GPR_FLAG_IMMEDIATE_SWEEP
| GPR_FLAG_CAPI
),
514 } gc_profile_record_flag
;
516 typedef struct gc_profile_record
{
520 double gc_invoke_time
;
522 size_t heap_total_objects
;
523 size_t heap_use_size
;
524 size_t heap_total_size
;
525 size_t moved_objects
;
527 #if GC_PROFILE_MORE_DETAIL
529 double gc_sweep_time
;
531 size_t heap_use_pages
;
532 size_t heap_live_objects
;
533 size_t heap_free_objects
;
535 size_t allocate_increase
;
536 size_t allocate_limit
;
539 size_t removing_objects
;
540 size_t empty_objects
;
541 #if GC_PROFILE_DETAIL_MEMORY
547 #if MALLOC_ALLOCATED_SIZE
548 size_t allocated_size
;
551 #if RGENGC_PROFILE > 0
553 size_t remembered_normal_objects
;
554 size_t remembered_shady_objects
;
558 #define FL_FROM_FREELIST FL_USER0
566 #define RMOVED(obj) ((struct RMoved *)(obj))
568 typedef struct RVALUE
{
571 VALUE flags
; /* always 0 for freed obj */
576 struct RObject object
;
578 struct RFloat flonum
;
579 struct RString string
;
581 struct RRegexp regexp
;
584 struct RTypedData typeddata
;
585 struct RStruct rstruct
;
586 struct RBignum bignum
;
589 struct RRational rational
;
590 struct RComplex
complex;
591 struct RSymbol symbol
;
595 struct vm_throw_data throw_data
;
596 struct vm_ifunc ifunc
;
598 struct rb_method_entry_struct ment
;
599 const rb_iseq_t iseq
;
601 struct rb_imemo_tmpbuf_struct alloc
;
618 STATIC_ASSERT(sizeof_rvalue
, offsetof(RVALUE
, file
) == SIZEOF_VALUE
* 5);
620 STATIC_ASSERT(sizeof_rvalue
, sizeof(RVALUE
) == SIZEOF_VALUE
* 5);
622 STATIC_ASSERT(alignof_rvalue
, RUBY_ALIGNOF(RVALUE
) == SIZEOF_VALUE
);
624 typedef uintptr_t bits_t
;
626 BITS_SIZE
= sizeof(bits_t
),
627 BITS_BITLENGTH
= ( BITS_SIZE
* CHAR_BIT
)
629 #define popcount_bits rb_popcount_intptr
631 struct heap_page_header
{
632 struct heap_page
*page
;
635 struct heap_page_body
{
636 struct heap_page_header header
;
638 /* RVALUE values[]; */
643 struct gc_list
*next
;
646 #define STACK_CHUNK_SIZE 500
648 typedef struct stack_chunk
{
649 VALUE data
[STACK_CHUNK_SIZE
];
650 struct stack_chunk
*next
;
653 typedef struct mark_stack
{
654 stack_chunk_t
*chunk
;
655 stack_chunk_t
*cache
;
659 size_t unused_cache_size
;
662 #define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
663 #define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
665 typedef struct rb_heap_struct
{
666 struct heap_page
*free_pages
;
667 struct list_head pages
;
668 struct heap_page
*sweeping_page
; /* iterator for .pages */
669 struct heap_page
*compact_cursor
;
670 RVALUE
* compact_cursor_index
;
671 #if GC_ENABLE_INCREMENTAL_MARK
672 struct heap_page
*pooled_pages
;
674 size_t total_pages
; /* total page count in a heap */
675 size_t total_slots
; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
678 typedef struct rb_size_pool_struct
{
681 size_t allocatable_pages
;
684 /* Sweeping statistics */
688 /* Global statistics */
689 size_t force_major_gc_count
;
702 typedef struct rb_objspace
{
706 #if MALLOC_ALLOCATED_SIZE
707 size_t allocated_size
;
713 unsigned int mode
: 2;
714 unsigned int immediate_sweep
: 1;
715 unsigned int dont_gc
: 1;
716 unsigned int dont_incremental
: 1;
717 unsigned int during_gc
: 1;
718 unsigned int during_compacting
: 1;
719 unsigned int gc_stressful
: 1;
720 unsigned int has_hook
: 1;
721 unsigned int during_minor_gc
: 1;
722 #if GC_ENABLE_INCREMENTAL_MARK
723 unsigned int during_incremental_marking
: 1;
725 unsigned int measure_gc
: 1;
728 rb_event_flag_t hook_events
;
729 size_t total_allocated_objects
;
730 VALUE next_object_id
;
732 rb_size_pool_t size_pools
[SIZE_POOL_COUNT
];
735 rb_atomic_t finalizing
;
738 mark_stack_t mark_stack
;
742 struct heap_page
**sorted
;
743 size_t allocated_pages
;
744 size_t allocatable_pages
;
745 size_t sorted_length
;
747 size_t freeable_pages
;
751 VALUE deferred_final
;
754 st_table
*finalizer_table
;
758 unsigned int latest_gc_info
;
759 gc_profile_record
*records
;
760 gc_profile_record
*current_record
;
764 #if GC_PROFILE_MORE_DETAIL
769 size_t minor_gc_count
;
770 size_t major_gc_count
;
771 size_t compact_count
;
772 size_t read_barrier_faults
;
773 #if RGENGC_PROFILE > 0
774 size_t total_generated_normal_object_count
;
775 size_t total_generated_shady_object_count
;
776 size_t total_shade_operation_count
;
777 size_t total_promoted_count
;
778 size_t total_remembered_normal_object_count
;
779 size_t total_remembered_shady_object_count
;
781 #if RGENGC_PROFILE >= 2
782 size_t generated_normal_object_count_types
[RUBY_T_MASK
];
783 size_t generated_shady_object_count_types
[RUBY_T_MASK
];
784 size_t shade_operation_count_types
[RUBY_T_MASK
];
785 size_t promoted_types
[RUBY_T_MASK
];
786 size_t remembered_normal_object_count_types
[RUBY_T_MASK
];
787 size_t remembered_shady_object_count_types
[RUBY_T_MASK
];
789 #endif /* RGENGC_PROFILE */
791 /* temporary profiling space */
792 double gc_sweep_start_time
;
793 size_t total_allocated_objects_at_gc_start
;
794 size_t heap_used_at_gc_start
;
796 /* basic statistics */
798 size_t total_freed_objects
;
799 size_t total_allocated_pages
;
800 size_t total_freed_pages
;
801 uint64_t total_time_ns
;
802 struct timespec start_time
;
804 struct gc_list
*global_list
;
806 VALUE gc_stress_mode
;
811 size_t last_major_gc
;
812 size_t uncollectible_wb_unprotected_objects
;
813 size_t uncollectible_wb_unprotected_objects_limit
;
815 size_t old_objects_limit
;
817 #if RGENGC_ESTIMATE_OLDMALLOC
818 size_t oldmalloc_increase
;
819 size_t oldmalloc_increase_limit
;
822 #if RGENGC_CHECK_MODE >= 2
823 struct st_table
*allrefs_table
;
829 size_t considered_count_table
[T_MASK
];
830 size_t moved_count_table
[T_MASK
];
834 #if GC_ENABLE_INCREMENTAL_MARK
841 st_table
*id_to_obj_tbl
;
842 st_table
*obj_to_id_tbl
;
844 #if GC_DEBUG_STRESS_TO_CLASS
845 VALUE stress_to_class
;
850 #if defined(__APPLE__) && defined(__LP64__) && !defined(HEAP_PAGE_ALIGN_LOG)
851 /* for slow mmap: 64KiB */
852 #define HEAP_PAGE_ALIGN_LOG 16
855 #ifndef HEAP_PAGE_ALIGN_LOG
856 /* default tiny heap size: 16KB */
857 #define HEAP_PAGE_ALIGN_LOG 14
859 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
861 HEAP_PAGE_ALIGN
= (1UL << HEAP_PAGE_ALIGN_LOG
),
862 HEAP_PAGE_ALIGN_MASK
= (~(~0UL << HEAP_PAGE_ALIGN_LOG
)),
863 HEAP_PAGE_SIZE
= HEAP_PAGE_ALIGN
,
864 HEAP_PAGE_OBJ_LIMIT
= (unsigned int)((HEAP_PAGE_SIZE
- sizeof(struct heap_page_header
))/sizeof(struct RVALUE
)),
865 HEAP_PAGE_BITMAP_LIMIT
= CEILDIV(CEILDIV(HEAP_PAGE_SIZE
, sizeof(struct RVALUE
)), BITS_BITLENGTH
),
866 HEAP_PAGE_BITMAP_SIZE
= (BITS_SIZE
* HEAP_PAGE_BITMAP_LIMIT
),
868 #define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
869 #define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
872 # if HAVE_CONST_PAGE_SIZE
873 /* If we have the HEAP_PAGE and it is a constant, then we can directly use it. */
874 static const bool USE_MMAP_ALIGNED_ALLOC
= (PAGE_SIZE
<= HEAP_PAGE_SIZE
);
875 # elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
876 /* PAGE_SIZE <= HEAP_PAGE_SIZE */
877 static const bool USE_MMAP_ALIGNED_ALLOC
= true;
879 /* Otherwise, fall back to determining if we can use mmap during runtime. */
880 # define USE_MMAP_ALIGNED_ALLOC (use_mmap_aligned_alloc != false)
882 static bool use_mmap_aligned_alloc
;
884 #elif !defined(__MINGW32__) && !defined(_WIN32)
885 static const bool USE_MMAP_ALIGNED_ALLOC
= false;
895 unsigned int before_sweep
: 1;
896 unsigned int has_remembered_objects
: 1;
897 unsigned int has_uncollectible_shady_objects
: 1;
898 unsigned int in_tomb
: 1;
901 rb_size_pool_t
*size_pool
;
903 struct heap_page
*free_next
;
906 struct list_node page_node
;
908 bits_t wb_unprotected_bits
[HEAP_PAGE_BITMAP_LIMIT
];
909 /* the following three bitmaps are cleared at the beginning of full GC */
910 bits_t mark_bits
[HEAP_PAGE_BITMAP_LIMIT
];
911 bits_t uncollectible_bits
[HEAP_PAGE_BITMAP_LIMIT
];
912 bits_t marking_bits
[HEAP_PAGE_BITMAP_LIMIT
];
914 /* If set, the object is not movable */
915 bits_t pinned_bits
[HEAP_PAGE_BITMAP_LIMIT
];
918 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
919 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
920 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
922 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
923 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
924 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
925 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
927 /* Bitmap Operations */
928 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
929 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
930 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
933 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
934 #define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
935 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
936 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
937 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
940 #define rb_objspace (*rb_objspace_of(GET_VM()))
941 #define rb_objspace_of(vm) ((vm)->objspace)
943 #define ruby_initial_gc_stress gc_params.gc_stress
945 VALUE
*ruby_initial_gc_stress_ptr
= &ruby_initial_gc_stress
;
947 #define malloc_limit objspace->malloc_params.limit
948 #define malloc_increase objspace->malloc_params.increase
949 #define malloc_allocated_size objspace->malloc_params.allocated_size
950 #define heap_pages_sorted objspace->heap_pages.sorted
951 #define heap_allocated_pages objspace->heap_pages.allocated_pages
952 #define heap_pages_sorted_length objspace->heap_pages.sorted_length
953 #define heap_pages_lomem objspace->heap_pages.range[0]
954 #define heap_pages_himem objspace->heap_pages.range[1]
955 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
956 #define heap_pages_final_slots objspace->heap_pages.final_slots
957 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
958 #define size_pools objspace->size_pools
959 #define during_gc objspace->flags.during_gc
960 #define finalizing objspace->atomic_flags.finalizing
961 #define finalizer_table objspace->finalizer_table
962 #define global_list objspace->global_list
963 #define ruby_gc_stressful objspace->flags.gc_stressful
964 #define ruby_gc_stress_mode objspace->gc_stress_mode
965 #if GC_DEBUG_STRESS_TO_CLASS
966 #define stress_to_class objspace->stress_to_class
968 #define stress_to_class 0
972 #define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
973 #define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
974 #define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
975 #define dont_gc_val() (objspace->flags.dont_gc)
977 #define dont_gc_on() (objspace->flags.dont_gc = 1)
978 #define dont_gc_off() (objspace->flags.dont_gc = 0)
979 #define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
980 #define dont_gc_val() (objspace->flags.dont_gc)
983 static inline enum gc_mode
984 gc_mode_verify(enum gc_mode mode
)
986 #if RGENGC_CHECK_MODE > 0
989 case gc_mode_marking
:
990 case gc_mode_sweeping
:
993 rb_bug("gc_mode_verify: unreachable (%d)", (int)mode
);
1000 has_sweeping_pages(rb_objspace_t
*objspace
)
1002 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1003 if (SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->sweeping_page
) {
1010 static inline size_t
1011 heap_eden_total_pages(rb_objspace_t
*objspace
)
1014 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1015 count
+= SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->total_pages
;
1020 static inline size_t
1021 heap_eden_total_slots(rb_objspace_t
*objspace
)
1024 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1025 count
+= SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->total_slots
;
1030 static inline size_t
1031 heap_tomb_total_pages(rb_objspace_t
*objspace
)
1034 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1035 count
+= SIZE_POOL_TOMB_HEAP(&size_pools
[i
])->total_pages
;
1040 static inline size_t
1041 heap_allocatable_pages(rb_objspace_t
*objspace
)
1044 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1045 count
+= size_pools
[i
].allocatable_pages
;
1050 static inline size_t
1051 heap_allocatable_slots(rb_objspace_t
*objspace
)
1054 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1055 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1056 int slot_size_multiple
= size_pool
->slot_size
/ sizeof(RVALUE
);
1057 count
+= size_pool
->allocatable_pages
* HEAP_PAGE_OBJ_LIMIT
/ slot_size_multiple
;
1062 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1063 #define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
1065 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1066 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1067 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1068 #if GC_ENABLE_INCREMENTAL_MARK
1069 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1071 #define is_incremental_marking(objspace) FALSE
1073 #if GC_ENABLE_INCREMENTAL_MARK
1074 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1076 #define will_be_incremental_marking(objspace) FALSE
1078 #define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1080 #if SIZEOF_LONG == SIZEOF_VOIDP
1081 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
1082 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
1083 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1084 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
1085 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1086 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1088 # error not supported
1091 #define RANY(o) ((RVALUE*)(o))
1094 struct RBasic basic
;
1096 void (*dfree
)(void *);
1100 #define RZOMBIE(o) ((struct RZombie *)(o))
1102 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1104 #if RUBY_MARK_FREE_DEBUG
1105 int ruby_gc_debug_indent
= 0;
1108 int ruby_disable_gc
= 0;
1109 int ruby_enable_autocompact
= 0;
1111 void rb_iseq_mark(const rb_iseq_t
*iseq
);
1112 void rb_iseq_update_references(rb_iseq_t
*iseq
);
1113 void rb_iseq_free(const rb_iseq_t
*iseq
);
1114 size_t rb_iseq_memsize(const rb_iseq_t
*iseq
);
1115 void rb_vm_update_references(void *ptr
);
1117 void rb_gcdebug_print_obj_condition(VALUE obj
);
1119 static VALUE
define_final0(VALUE obj
, VALUE block
);
1121 NORETURN(static void *gc_vraise(void *ptr
));
1122 NORETURN(static void gc_raise(VALUE exc
, const char *fmt
, ...));
1123 NORETURN(static void negative_size_allocation_error(const char *));
1125 static void init_mark_stack(mark_stack_t
*stack
);
1127 static int ready_to_gc(rb_objspace_t
*objspace
);
1129 static int garbage_collect(rb_objspace_t
*, unsigned int reason
);
1131 static int gc_start(rb_objspace_t
*objspace
, unsigned int reason
);
1132 static void gc_rest(rb_objspace_t
*objspace
);
1134 enum gc_enter_event
{
1135 gc_enter_event_start
,
1136 gc_enter_event_mark_continue
,
1137 gc_enter_event_sweep_continue
,
1138 gc_enter_event_rest
,
1139 gc_enter_event_finalizer
,
1140 gc_enter_event_rb_memerror
,
1143 static inline void gc_enter(rb_objspace_t
*objspace
, enum gc_enter_event event
, unsigned int *lock_lev
);
1144 static inline void gc_exit(rb_objspace_t
*objspace
, enum gc_enter_event event
, unsigned int *lock_lev
);
1146 static void gc_marks(rb_objspace_t
*objspace
, int full_mark
);
1147 static void gc_marks_start(rb_objspace_t
*objspace
, int full
);
1148 static int gc_marks_finish(rb_objspace_t
*objspace
);
1149 static void gc_marks_rest(rb_objspace_t
*objspace
);
1150 static void gc_marks_continue(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
);
1152 static void gc_sweep(rb_objspace_t
*objspace
);
1153 static void gc_sweep_start(rb_objspace_t
*objspace
);
1154 static void gc_sweep_finish(rb_objspace_t
*objspace
);
1155 static int gc_sweep_step(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
);
1156 static void gc_sweep_rest(rb_objspace_t
*objspace
);
1157 static void gc_sweep_continue(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
);
1159 static inline void gc_mark(rb_objspace_t
*objspace
, VALUE ptr
);
1160 static inline void gc_pin(rb_objspace_t
*objspace
, VALUE ptr
);
1161 static inline void gc_mark_and_pin(rb_objspace_t
*objspace
, VALUE ptr
);
1162 static void gc_mark_ptr(rb_objspace_t
*objspace
, VALUE ptr
);
1163 NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t
*objspace
, VALUE ptr
));
1164 static void gc_mark_children(rb_objspace_t
*objspace
, VALUE ptr
);
1166 static int gc_mark_stacked_objects_incremental(rb_objspace_t
*, size_t count
);
1167 static int gc_mark_stacked_objects_all(rb_objspace_t
*);
1168 static void gc_grey(rb_objspace_t
*objspace
, VALUE ptr
);
1170 static inline int gc_mark_set(rb_objspace_t
*objspace
, VALUE obj
);
1171 NO_SANITIZE("memory", static inline int is_pointer_to_heap(rb_objspace_t
*objspace
, void *ptr
));
1173 static void push_mark_stack(mark_stack_t
*, VALUE
);
1174 static int pop_mark_stack(mark_stack_t
*, VALUE
*);
1175 static size_t mark_stack_size(mark_stack_t
*stack
);
1176 static void shrink_stack_chunk_cache(mark_stack_t
*stack
);
1178 static size_t obj_memsize_of(VALUE obj
, int use_all_types
);
1179 static void gc_verify_internal_consistency(rb_objspace_t
*objspace
);
1180 static int gc_verify_heap_page(rb_objspace_t
*objspace
, struct heap_page
*page
, VALUE obj
);
1181 static int gc_verify_heap_pages(rb_objspace_t
*objspace
);
1183 static void gc_stress_set(rb_objspace_t
*objspace
, VALUE flag
);
1184 static VALUE
gc_disable_no_rest(rb_objspace_t
*);
1186 static double getrusage_time(void);
1187 static inline void gc_prof_setup_new_record(rb_objspace_t
*objspace
, unsigned int reason
);
1188 static inline void gc_prof_timer_start(rb_objspace_t
*);
1189 static inline void gc_prof_timer_stop(rb_objspace_t
*);
1190 static inline void gc_prof_mark_timer_start(rb_objspace_t
*);
1191 static inline void gc_prof_mark_timer_stop(rb_objspace_t
*);
1192 static inline void gc_prof_sweep_timer_start(rb_objspace_t
*);
1193 static inline void gc_prof_sweep_timer_stop(rb_objspace_t
*);
1194 static inline void gc_prof_set_malloc_info(rb_objspace_t
*);
1195 static inline void gc_prof_set_heap_info(rb_objspace_t
*);
1197 #define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1198 if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1199 *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1203 #define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1205 #define gc_prof_record(objspace) (objspace)->profile.current_record
1206 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1208 #ifdef HAVE_VA_ARGS_MACRO
1209 # define gc_report(level, objspace, ...) \
1210 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1212 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1214 PRINTF_ARGS(static void gc_report_body(int level
, rb_objspace_t
*objspace
, const char *fmt
, ...), 3, 4);
1215 static const char *obj_info(VALUE obj
);
1216 static const char *obj_type_name(VALUE obj
);
1219 * 1 - TSC (H/W Time Stamp Counter)
1229 /* the following code is only for internal tuning. */
1231 /* Source code to use RDTSC is quoted and modified from
1232 * http://www.mcs.anl.gov/~kazutomo/rdtsc.html
1233 * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1236 #if defined(__GNUC__) && defined(__i386__)
1237 typedef unsigned long long tick_t
;
1238 #define PRItick "llu"
1239 static inline tick_t
1242 unsigned long long int x
;
1243 __asm__
__volatile__ ("rdtsc" : "=A" (x
));
1247 #elif defined(__GNUC__) && defined(__x86_64__)
1248 typedef unsigned long long tick_t
;
1249 #define PRItick "llu"
1251 static __inline__ tick_t
1254 unsigned long hi
, lo
;
1255 __asm__
__volatile__ ("rdtsc" : "=a"(lo
), "=d"(hi
));
1256 return ((unsigned long long)lo
)|( ((unsigned long long)hi
)<<32);
1259 #elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1260 typedef unsigned long long tick_t
;
1261 #define PRItick "llu"
1263 static __inline__ tick_t
1266 unsigned long long val
= __builtin_ppc_get_timebase();
1270 #elif defined(__aarch64__) && defined(__GNUC__)
1271 typedef unsigned long tick_t
;
1272 #define PRItick "lu"
1274 static __inline__ tick_t
1278 __asm__
__volatile__ ("mrs %0, cntvct_el0" : "=r" (val
));
1283 #elif defined(_WIN32) && defined(_MSC_VER)
1285 typedef unsigned __int64 tick_t
;
1286 #define PRItick "llu"
1288 static inline tick_t
1294 #else /* use clock */
1295 typedef clock_t tick_t
;
1296 #define PRItick "llu"
1298 static inline tick_t
1305 #elif TICK_TYPE == 2
1306 typedef double tick_t
;
1307 #define PRItick "4.9f"
1309 static inline tick_t
1312 return getrusage_time();
1314 #else /* TICK_TYPE */
1315 #error "choose tick type"
1316 #endif /* TICK_TYPE */
1318 #define MEASURE_LINE(expr) do { \
1319 volatile tick_t start_time = tick(); \
1320 volatile tick_t end_time; \
1322 end_time = tick(); \
1323 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1326 #else /* USE_TICK_T */
1327 #define MEASURE_LINE(expr) expr
1328 #endif /* USE_TICK_T */
1330 static inline void *
1331 asan_unpoison_object_temporary(VALUE obj
)
1333 void *ptr
= asan_poisoned_object_p(obj
);
1334 asan_unpoison_object(obj
, false);
1338 #define FL_CHECK2(name, x, pred) \
1339 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1340 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1341 #define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1342 #define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1343 #define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1345 #define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1346 #define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1347 #define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1349 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1350 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1351 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1353 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1354 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1355 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1357 #define RVALUE_OLD_AGE 3
1358 #define RVALUE_AGE_SHIFT 5 /* FL_PROMOTED0 bit */
1360 static int rgengc_remembered(rb_objspace_t
*objspace
, VALUE obj
);
1361 static int rgengc_remembered_sweep(rb_objspace_t
*objspace
, VALUE obj
);
1362 static int rgengc_remember(rb_objspace_t
*objspace
, VALUE obj
);
1363 static void rgengc_mark_and_rememberset_clear(rb_objspace_t
*objspace
, rb_heap_t
*heap
);
1364 static void rgengc_rememberset_mark(rb_objspace_t
*objspace
, rb_heap_t
*heap
);
1367 RVALUE_FLAGS_AGE(VALUE flags
)
1369 return (int)((flags
& (FL_PROMOTED0
| FL_PROMOTED1
)) >> RVALUE_AGE_SHIFT
);
1373 check_rvalue_consistency_force(const VALUE obj
, int terminate
)
1376 rb_objspace_t
*objspace
= &rb_objspace
;
1378 RB_VM_LOCK_ENTER_NO_BARRIER();
1380 if (SPECIAL_CONST_P(obj
)) {
1381 fprintf(stderr
, "check_rvalue_consistency: %p is a special const.\n", (void *)obj
);
1384 else if (!is_pointer_to_heap(objspace
, (void *)obj
)) {
1385 /* check if it is in tomb_pages */
1386 struct heap_page
*page
= NULL
;
1387 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1388 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1389 list_for_each(&size_pool
->tomb_heap
.pages
, page
, page_node
) {
1390 if (&page
->start
[0] <= (RVALUE
*)obj
&&
1391 (uintptr_t)obj
< ((uintptr_t)page
->start
+ (page
->total_slots
* size_pool
->slot_size
))) {
1392 fprintf(stderr
, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1393 (void *)obj
, (void *)page
);
1400 fprintf(stderr
, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj
);
1406 const int wb_unprotected_bit
= RVALUE_WB_UNPROTECTED_BITMAP(obj
) != 0;
1407 const int uncollectible_bit
= RVALUE_UNCOLLECTIBLE_BITMAP(obj
) != 0;
1408 const int mark_bit
= RVALUE_MARK_BITMAP(obj
) != 0;
1409 const int marking_bit
= RVALUE_MARKING_BITMAP(obj
) != 0, remembered_bit
= marking_bit
;
1410 const int age
= RVALUE_FLAGS_AGE(RBASIC(obj
)->flags
);
1412 if (GET_HEAP_PAGE(obj
)->flags
.in_tomb
) {
1413 fprintf(stderr
, "check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj
));
1416 if (BUILTIN_TYPE(obj
) == T_NONE
) {
1417 fprintf(stderr
, "check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj
));
1420 if (BUILTIN_TYPE(obj
) == T_ZOMBIE
) {
1421 fprintf(stderr
, "check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj
));
1425 obj_memsize_of((VALUE
)obj
, FALSE
);
1429 * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1431 if (age
> 0 && wb_unprotected_bit
) {
1432 fprintf(stderr
, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj
), age
);
1436 if (!is_marking(objspace
) && uncollectible_bit
&& !mark_bit
) {
1437 fprintf(stderr
, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj
));
1441 if (!is_full_marking(objspace
)) {
1442 if (uncollectible_bit
&& age
!= RVALUE_OLD_AGE
&& !wb_unprotected_bit
) {
1443 fprintf(stderr
, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1444 obj_info(obj
), age
);
1447 if (remembered_bit
&& age
!= RVALUE_OLD_AGE
) {
1448 fprintf(stderr
, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1449 obj_info(obj
), age
);
1457 * marking:false marking:true
1458 * marked:false white *invalid*
1459 * marked:true black grey
1461 if (is_incremental_marking(objspace
) && marking_bit
) {
1462 if (!is_marking(objspace
) && !mark_bit
) {
1463 fprintf(stderr
, "check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj
));
1469 RB_VM_LOCK_LEAVE_NO_BARRIER();
1471 if (err
> 0 && terminate
) {
1472 rb_bug("check_rvalue_consistency_force: there is %d errors.", err
);
1477 #if RGENGC_CHECK_MODE == 0
1479 check_rvalue_consistency(const VALUE obj
)
1485 check_rvalue_consistency(const VALUE obj
)
1487 check_rvalue_consistency_force(obj
, TRUE
);
1493 gc_object_moved_p(rb_objspace_t
* objspace
, VALUE obj
)
1495 if (RB_SPECIAL_CONST_P(obj
)) {
1499 void *poisoned
= asan_poisoned_object_p(obj
);
1500 asan_unpoison_object(obj
, false);
1502 int ret
= BUILTIN_TYPE(obj
) == T_MOVED
;
1503 /* Re-poison slot if it's not the one we want */
1505 GC_ASSERT(BUILTIN_TYPE(obj
) == T_NONE
);
1506 asan_poison_object(obj
);
1513 RVALUE_MARKED(VALUE obj
)
1515 check_rvalue_consistency(obj
);
1516 return RVALUE_MARK_BITMAP(obj
) != 0;
1520 RVALUE_PINNED(VALUE obj
)
1522 check_rvalue_consistency(obj
);
1523 return RVALUE_PIN_BITMAP(obj
) != 0;
1527 RVALUE_WB_UNPROTECTED(VALUE obj
)
1529 check_rvalue_consistency(obj
);
1530 return RVALUE_WB_UNPROTECTED_BITMAP(obj
) != 0;
1534 RVALUE_MARKING(VALUE obj
)
1536 check_rvalue_consistency(obj
);
1537 return RVALUE_MARKING_BITMAP(obj
) != 0;
1541 RVALUE_REMEMBERED(VALUE obj
)
1543 check_rvalue_consistency(obj
);
1544 return RVALUE_MARKING_BITMAP(obj
) != 0;
1548 RVALUE_UNCOLLECTIBLE(VALUE obj
)
1550 check_rvalue_consistency(obj
);
1551 return RVALUE_UNCOLLECTIBLE_BITMAP(obj
) != 0;
1555 RVALUE_OLD_P_RAW(VALUE obj
)
1557 const VALUE promoted
= FL_PROMOTED0
| FL_PROMOTED1
;
1558 return (RBASIC(obj
)->flags
& promoted
) == promoted
;
1562 RVALUE_OLD_P(VALUE obj
)
1564 check_rvalue_consistency(obj
);
1565 return RVALUE_OLD_P_RAW(obj
);
1568 #if RGENGC_CHECK_MODE || GC_DEBUG
1570 RVALUE_AGE(VALUE obj
)
1572 check_rvalue_consistency(obj
);
1573 return RVALUE_FLAGS_AGE(RBASIC(obj
)->flags
);
1578 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t
*objspace
, struct heap_page
*page
, VALUE obj
)
1580 MARK_IN_BITMAP(&page
->uncollectible_bits
[0], obj
);
1581 objspace
->rgengc
.old_objects
++;
1582 rb_transient_heap_promote(obj
);
1584 #if RGENGC_PROFILE >= 2
1585 objspace
->profile
.total_promoted_count
++;
1586 objspace
->profile
.promoted_types
[BUILTIN_TYPE(obj
)]++;
1591 RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t
*objspace
, VALUE obj
)
1593 RB_DEBUG_COUNTER_INC(obj_promote
);
1594 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace
, GET_HEAP_PAGE(obj
), obj
);
1598 RVALUE_FLAGS_AGE_SET(VALUE flags
, int age
)
1600 flags
&= ~(FL_PROMOTED0
| FL_PROMOTED1
);
1601 flags
|= (age
<< RVALUE_AGE_SHIFT
);
1605 /* set age to age+1 */
1607 RVALUE_AGE_INC(rb_objspace_t
*objspace
, VALUE obj
)
1609 VALUE flags
= RBASIC(obj
)->flags
;
1610 int age
= RVALUE_FLAGS_AGE(flags
);
1612 if (RGENGC_CHECK_MODE
&& age
== RVALUE_OLD_AGE
) {
1613 rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj
));
1617 RBASIC(obj
)->flags
= RVALUE_FLAGS_AGE_SET(flags
, age
);
1619 if (age
== RVALUE_OLD_AGE
) {
1620 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace
, obj
);
1622 check_rvalue_consistency(obj
);
1625 /* set age to RVALUE_OLD_AGE */
1627 RVALUE_AGE_SET_OLD(rb_objspace_t
*objspace
, VALUE obj
)
1629 check_rvalue_consistency(obj
);
1630 GC_ASSERT(!RVALUE_OLD_P(obj
));
1632 RBASIC(obj
)->flags
= RVALUE_FLAGS_AGE_SET(RBASIC(obj
)->flags
, RVALUE_OLD_AGE
);
1633 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace
, obj
);
1635 check_rvalue_consistency(obj
);
1638 /* set age to RVALUE_OLD_AGE - 1 */
1640 RVALUE_AGE_SET_CANDIDATE(rb_objspace_t
*objspace
, VALUE obj
)
1642 check_rvalue_consistency(obj
);
1643 GC_ASSERT(!RVALUE_OLD_P(obj
));
1645 RBASIC(obj
)->flags
= RVALUE_FLAGS_AGE_SET(RBASIC(obj
)->flags
, RVALUE_OLD_AGE
- 1);
1647 check_rvalue_consistency(obj
);
1651 RVALUE_DEMOTE_RAW(rb_objspace_t
*objspace
, VALUE obj
)
1653 RBASIC(obj
)->flags
= RVALUE_FLAGS_AGE_SET(RBASIC(obj
)->flags
, 0);
1654 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj
), obj
);
1658 RVALUE_DEMOTE(rb_objspace_t
*objspace
, VALUE obj
)
1660 check_rvalue_consistency(obj
);
1661 GC_ASSERT(RVALUE_OLD_P(obj
));
1663 if (!is_incremental_marking(objspace
) && RVALUE_REMEMBERED(obj
)) {
1664 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj
), obj
);
1667 RVALUE_DEMOTE_RAW(objspace
, obj
);
1669 if (RVALUE_MARKED(obj
)) {
1670 objspace
->rgengc
.old_objects
--;
1673 check_rvalue_consistency(obj
);
1677 RVALUE_AGE_RESET_RAW(VALUE obj
)
1679 RBASIC(obj
)->flags
= RVALUE_FLAGS_AGE_SET(RBASIC(obj
)->flags
, 0);
1683 RVALUE_AGE_RESET(VALUE obj
)
1685 check_rvalue_consistency(obj
);
1686 GC_ASSERT(!RVALUE_OLD_P(obj
));
1688 RVALUE_AGE_RESET_RAW(obj
);
1689 check_rvalue_consistency(obj
);
1693 RVALUE_BLACK_P(VALUE obj
)
1695 return RVALUE_MARKED(obj
) && !RVALUE_MARKING(obj
);
1700 RVALUE_GREY_P(VALUE obj
)
1702 return RVALUE_MARKED(obj
) && RVALUE_MARKING(obj
);
1707 RVALUE_WHITE_P(VALUE obj
)
1709 return RVALUE_MARKED(obj
) == FALSE
;
1713 --------------------------- ObjectSpace -----------------------------
1716 static inline void *
1719 return calloc(1, n
);
1723 rb_objspace_alloc(void)
1725 rb_objspace_t
*objspace
= calloc1(sizeof(rb_objspace_t
));
1726 objspace
->flags
.measure_gc
= 1;
1727 malloc_limit
= gc_params
.malloc_limit_min
;
1729 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1730 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1732 size_pool
->slot_size
= sizeof(RVALUE
) * (1 << i
);
1734 list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
);
1735 list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool
)->pages
);
1743 static void free_stack_chunks(mark_stack_t
*);
1744 static void heap_page_free(rb_objspace_t
*objspace
, struct heap_page
*page
);
1747 rb_objspace_free(rb_objspace_t
*objspace
)
1749 if (is_lazy_sweeping(objspace
))
1750 rb_bug("lazy sweeping underway when freeing object space");
1752 if (objspace
->profile
.records
) {
1753 free(objspace
->profile
.records
);
1754 objspace
->profile
.records
= 0;
1758 struct gc_list
*list
, *next
;
1759 for (list
= global_list
; list
; list
= next
) {
1764 if (heap_pages_sorted
) {
1766 for (i
= 0; i
< heap_allocated_pages
; ++i
) {
1767 heap_page_free(objspace
, heap_pages_sorted
[i
]);
1769 free(heap_pages_sorted
);
1770 heap_allocated_pages
= 0;
1771 heap_pages_sorted_length
= 0;
1772 heap_pages_lomem
= 0;
1773 heap_pages_himem
= 0;
1775 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1776 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1777 SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
= 0;
1778 SIZE_POOL_EDEN_HEAP(size_pool
)->total_slots
= 0;
1781 st_free_table(objspace
->id_to_obj_tbl
);
1782 st_free_table(objspace
->obj_to_id_tbl
);
1783 free_stack_chunks(&objspace
->mark_stack
);
1788 heap_pages_expand_sorted_to(rb_objspace_t
*objspace
, size_t next_length
)
1790 struct heap_page
**sorted
;
1791 size_t size
= size_mul_or_raise(next_length
, sizeof(struct heap_page
*), rb_eRuntimeError
);
1793 gc_report(3, objspace
, "heap_pages_expand_sorted: next_length: %"PRIdSIZE
", size: %"PRIdSIZE
"\n",
1796 if (heap_pages_sorted_length
> 0) {
1797 sorted
= (struct heap_page
**)realloc(heap_pages_sorted
, size
);
1798 if (sorted
) heap_pages_sorted
= sorted
;
1801 sorted
= heap_pages_sorted
= (struct heap_page
**)malloc(size
);
1808 heap_pages_sorted_length
= next_length
;
1812 heap_pages_expand_sorted(rb_objspace_t
*objspace
)
1814 /* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
1815 * because heap_allocatable_pages contains heap_tomb->total_pages (recycle heap_tomb pages).
1816 * however, if there are pages which do not have empty slots, then try to create new pages
1817 * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
1819 size_t next_length
= heap_allocatable_pages(objspace
);
1820 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1821 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1822 next_length
+= SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
;
1823 next_length
+= SIZE_POOL_TOMB_HEAP(size_pool
)->total_pages
;
1826 if (next_length
> heap_pages_sorted_length
) {
1827 heap_pages_expand_sorted_to(objspace
, next_length
);
1830 GC_ASSERT(heap_allocatable_pages(objspace
) + heap_eden_total_pages(objspace
) <= heap_pages_sorted_length
);
1831 GC_ASSERT(heap_allocated_pages
<= heap_pages_sorted_length
);
1835 size_pool_allocatable_pages_set(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, size_t s
)
1837 size_pool
->allocatable_pages
= s
;
1838 heap_pages_expand_sorted(objspace
);
1842 heap_page_add_freeobj(rb_objspace_t
*objspace
, struct heap_page
*page
, VALUE obj
)
1844 ASSERT_vm_locking();
1846 RVALUE
*p
= (RVALUE
*)obj
;
1848 asan_unpoison_object(obj
, false);
1850 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
1852 p
->as
.free
.flags
= 0;
1853 p
->as
.free
.next
= page
->freelist
;
1855 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
1857 if (RGENGC_CHECK_MODE
&&
1858 /* obj should belong to page */
1859 !(&page
->start
[0] <= (RVALUE
*)obj
&&
1860 (uintptr_t)obj
< ((uintptr_t)page
->start
+ (page
->total_slots
* page
->slot_size
)) &&
1861 obj
% sizeof(RVALUE
) == 0)) {
1862 rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p
);
1865 asan_poison_object(obj
);
1866 gc_report(3, objspace
, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj
);
1870 heap_add_freepage(rb_heap_t
*heap
, struct heap_page
*page
)
1872 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
1873 GC_ASSERT(page
->free_slots
!= 0);
1874 GC_ASSERT(page
->freelist
!= NULL
);
1876 page
->free_next
= heap
->free_pages
;
1877 heap
->free_pages
= page
;
1879 RUBY_DEBUG_LOG("page:%p freelist:%p", (void *)page
, (void *)page
->freelist
);
1881 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
1884 #if GC_ENABLE_INCREMENTAL_MARK
1886 heap_add_poolpage(rb_objspace_t
*objspace
, rb_heap_t
*heap
, struct heap_page
*page
)
1888 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
1889 GC_ASSERT(page
->free_slots
!= 0);
1890 GC_ASSERT(page
->freelist
!= NULL
);
1892 page
->free_next
= heap
->pooled_pages
;
1893 heap
->pooled_pages
= page
;
1894 objspace
->rincgc
.pooled_slots
+= page
->free_slots
;
1896 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
1901 heap_unlink_page(rb_objspace_t
*objspace
, rb_heap_t
*heap
, struct heap_page
*page
)
1903 list_del(&page
->page_node
);
1904 heap
->total_pages
--;
1905 heap
->total_slots
-= page
->total_slots
;
1908 static void rb_aligned_free(void *ptr
, size_t size
);
1911 heap_page_free(rb_objspace_t
*objspace
, struct heap_page
*page
)
1913 heap_allocated_pages
--;
1914 objspace
->profile
.total_freed_pages
++;
1915 rb_aligned_free(GET_PAGE_BODY(page
->start
), HEAP_PAGE_SIZE
);
1920 heap_pages_free_unused_pages(rb_objspace_t
*objspace
)
1924 bool has_pages_in_tomb_heap
= FALSE
;
1925 for (i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1926 if (!list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools
[i
])->pages
)) {
1927 has_pages_in_tomb_heap
= TRUE
;
1932 if (has_pages_in_tomb_heap
) {
1933 for (i
= j
= 1; j
< heap_allocated_pages
; i
++) {
1934 struct heap_page
*page
= heap_pages_sorted
[i
];
1936 if (page
->flags
.in_tomb
&& page
->free_slots
== page
->total_slots
) {
1937 heap_unlink_page(objspace
, SIZE_POOL_TOMB_HEAP(page
->size_pool
), page
);
1938 heap_page_free(objspace
, page
);
1942 heap_pages_sorted
[j
] = page
;
1948 struct heap_page
*hipage
= heap_pages_sorted
[heap_allocated_pages
- 1];
1949 uintptr_t himem
= (uintptr_t)hipage
->start
+ (hipage
->total_slots
* hipage
->slot_size
);
1950 GC_ASSERT(himem
<= (uintptr_t)heap_pages_himem
);
1951 heap_pages_himem
= (RVALUE
*)himem
;
1953 GC_ASSERT(j
== heap_allocated_pages
);
1957 static struct heap_page
*
1958 heap_page_allocate(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
)
1960 uintptr_t start
, end
, p
;
1961 struct heap_page
*page
;
1962 struct heap_page_body
*page_body
= 0;
1963 uintptr_t hi
, lo
, mid
;
1964 size_t stride
= size_pool
->slot_size
;
1965 unsigned int limit
= (unsigned int)((HEAP_PAGE_SIZE
- sizeof(struct heap_page_header
)))/(int)stride
;
1967 /* assign heap_page body (contains heap_page_header and RVALUEs) */
1968 page_body
= (struct heap_page_body
*)rb_aligned_malloc(HEAP_PAGE_ALIGN
, HEAP_PAGE_SIZE
);
1969 if (page_body
== 0) {
1973 /* assign heap_page entry */
1974 page
= calloc1(sizeof(struct heap_page
));
1976 rb_aligned_free(page_body
, HEAP_PAGE_SIZE
);
1980 /* adjust obj_limit (object number available in this page) */
1981 start
= (uintptr_t)((VALUE
)page_body
+ sizeof(struct heap_page_header
));
1983 if ((VALUE
)start
% sizeof(RVALUE
) != 0) {
1984 int delta
= (int)sizeof(RVALUE
) - (start
% (int)sizeof(RVALUE
));
1985 start
= start
+ delta
;
1986 GC_ASSERT(NUM_IN_PAGE(start
) == 0 || NUM_IN_PAGE(start
) == 1);
1988 /* Find a num in page that is evenly divisible by `stride`.
1989 * This is to ensure that objects are aligned with bit planes.
1990 * In other words, ensure there are an even number of objects
1992 if (NUM_IN_PAGE(start
) == 1) {
1993 start
+= stride
- sizeof(RVALUE
);
1996 GC_ASSERT(NUM_IN_PAGE(start
) * sizeof(RVALUE
) % stride
== 0);
1998 limit
= (HEAP_PAGE_SIZE
- (int)(start
- (uintptr_t)page_body
))/(int)stride
;
2000 end
= start
+ (limit
* (int)stride
);
2002 /* setup heap_pages_sorted */
2004 hi
= (uintptr_t)heap_allocated_pages
;
2006 struct heap_page
*mid_page
;
2008 mid
= (lo
+ hi
) / 2;
2009 mid_page
= heap_pages_sorted
[mid
];
2010 if ((uintptr_t)mid_page
->start
< start
) {
2013 else if ((uintptr_t)mid_page
->start
> start
) {
2017 rb_bug("same heap page is allocated: %p at %"PRIuVALUE
, (void *)page_body
, (VALUE
)mid
);
2021 if (hi
< (uintptr_t)heap_allocated_pages
) {
2022 MEMMOVE(&heap_pages_sorted
[hi
+1], &heap_pages_sorted
[hi
], struct heap_page_header
*, heap_allocated_pages
- hi
);
2025 heap_pages_sorted
[hi
] = page
;
2027 heap_allocated_pages
++;
2029 GC_ASSERT(heap_eden_total_pages(objspace
) + heap_allocatable_pages(objspace
) <= heap_pages_sorted_length
);
2030 GC_ASSERT(heap_eden_total_pages(objspace
) + heap_tomb_total_pages(objspace
) == heap_allocated_pages
- 1);
2031 GC_ASSERT(heap_allocated_pages
<= heap_pages_sorted_length
);
2033 objspace
->profile
.total_allocated_pages
++;
2035 if (heap_allocated_pages
> heap_pages_sorted_length
) {
2036 rb_bug("heap_page_allocate: allocated(%"PRIdSIZE
") > sorted(%"PRIdSIZE
")",
2037 heap_allocated_pages
, heap_pages_sorted_length
);
2040 if (heap_pages_lomem
== 0 || (uintptr_t)heap_pages_lomem
> start
) heap_pages_lomem
= (RVALUE
*)start
;
2041 if ((uintptr_t)heap_pages_himem
< end
) heap_pages_himem
= (RVALUE
*)end
;
2043 page
->start
= (RVALUE
*)start
;
2044 page
->total_slots
= limit
;
2045 page
->slot_size
= size_pool
->slot_size
;
2046 page
->size_pool
= size_pool
;
2047 page_body
->header
.page
= page
;
2049 for (p
= start
; p
!= end
; p
+= stride
) {
2050 gc_report(3, objspace
, "assign_heap_page: %p is added to freelist\n", (void *)p
);
2051 heap_page_add_freeobj(objspace
, page
, (VALUE
)p
);
2053 page
->free_slots
= limit
;
2055 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
2059 static struct heap_page
*
2060 heap_page_resurrect(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
)
2062 struct heap_page
*page
= 0, *next
;
2064 list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool
)->pages
, page
, next
, page_node
) {
2065 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
2066 if (page
->freelist
!= NULL
) {
2067 heap_unlink_page(objspace
, &size_pool
->tomb_heap
, page
);
2068 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
2076 static struct heap_page
*
2077 heap_page_create(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
)
2079 struct heap_page
*page
;
2080 const char *method
= "recycle";
2082 size_pool
->allocatable_pages
--;
2084 page
= heap_page_resurrect(objspace
, size_pool
);
2087 page
= heap_page_allocate(objspace
, size_pool
);
2088 method
= "allocate";
2090 if (0) fprintf(stderr
, "heap_page_create: %s - %p, "
2091 "heap_allocated_pages: %"PRIdSIZE
", "
2092 "heap_allocated_pages: %"PRIdSIZE
", "
2093 "tomb->total_pages: %"PRIdSIZE
"\n",
2094 method
, (void *)page
, heap_pages_sorted_length
, heap_allocated_pages
, SIZE_POOL_TOMB_HEAP(size_pool
)->total_pages
);
2099 heap_add_page(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
, struct heap_page
*page
)
2101 /* Adding to eden heap during incremental sweeping is forbidden */
2102 GC_ASSERT(!(heap
== SIZE_POOL_EDEN_HEAP(size_pool
) && heap
->sweeping_page
));
2103 page
->flags
.in_tomb
= (heap
== SIZE_POOL_TOMB_HEAP(size_pool
));
2104 list_add_tail(&heap
->pages
, &page
->page_node
);
2105 heap
->total_pages
++;
2106 heap
->total_slots
+= page
->total_slots
;
2110 heap_assign_page(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
2112 struct heap_page
*page
= heap_page_create(objspace
, size_pool
);
2113 heap_add_page(objspace
, size_pool
, heap
, page
);
2114 heap_add_freepage(heap
, page
);
2118 heap_add_pages(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
, size_t add
)
2122 size_pool_allocatable_pages_set(objspace
, size_pool
, add
);
2124 for (i
= 0; i
< add
; i
++) {
2125 heap_assign_page(objspace
, size_pool
, heap
);
2128 GC_ASSERT(size_pool
->allocatable_pages
== 0);
2132 heap_extend_pages(rb_objspace_t
*objspace
, size_t free_slots
, size_t total_slots
, size_t used
)
2134 double goal_ratio
= gc_params
.heap_free_slots_goal_ratio
;
2137 if (goal_ratio
== 0.0) {
2138 next_used
= (size_t)(used
* gc_params
.growth_factor
);
2141 /* Find `f' where free_slots = f * total_slots * goal_ratio
2142 * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
2144 double f
= (double)(total_slots
- free_slots
) / ((1 - goal_ratio
) * total_slots
);
2146 if (f
> gc_params
.growth_factor
) f
= gc_params
.growth_factor
;
2147 if (f
< 1.0) f
= 1.1;
2149 next_used
= (size_t)(f
* used
);
2153 "free_slots(%8"PRIuSIZE
")/total_slots(%8"PRIuSIZE
")=%1.2f,"
2154 " G(%1.2f), f(%1.2f),"
2155 " used(%8"PRIuSIZE
") => next_used(%8"PRIuSIZE
")\n",
2156 free_slots
, total_slots
, free_slots
/(double)total_slots
,
2157 goal_ratio
, f
, used
, next_used
);
2161 if (gc_params
.growth_max_slots
> 0) {
2162 size_t max_used
= (size_t)(used
+ gc_params
.growth_max_slots
/HEAP_PAGE_OBJ_LIMIT
);
2163 if (next_used
> max_used
) next_used
= max_used
;
2166 size_t extend_page_count
= next_used
- used
;
2167 /* Extend by at least 1 page. */
2168 if (extend_page_count
== 0) extend_page_count
= 1;
2170 return extend_page_count
;
2174 heap_increment(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
2176 if (size_pool
->allocatable_pages
> 0) {
2177 gc_report(1, objspace
, "heap_increment: heap_pages_sorted_length: %"PRIdSIZE
", "
2178 "heap_pages_inc: %"PRIdSIZE
", heap->total_pages: %"PRIdSIZE
"\n",
2179 heap_pages_sorted_length
, size_pool
->allocatable_pages
, heap
->total_pages
);
2181 GC_ASSERT(heap_allocatable_pages(objspace
) + heap_eden_total_pages(objspace
) <= heap_pages_sorted_length
);
2182 GC_ASSERT(heap_allocated_pages
<= heap_pages_sorted_length
);
2184 heap_assign_page(objspace
, size_pool
, heap
);
2191 heap_prepare(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
2193 GC_ASSERT(heap
->free_pages
== NULL
);
2195 if (is_lazy_sweeping(objspace
)) {
2196 gc_sweep_continue(objspace
, size_pool
, heap
);
2198 else if (is_incremental_marking(objspace
)) {
2199 gc_marks_continue(objspace
, size_pool
, heap
);
2202 if (heap
->free_pages
== NULL
&&
2203 (will_be_incremental_marking(objspace
) || heap_increment(objspace
, size_pool
, heap
) == FALSE
) &&
2204 gc_start(objspace
, GPR_FLAG_NEWOBJ
) == FALSE
) {
2210 rb_objspace_set_event_hook(const rb_event_flag_t event
)
2212 rb_objspace_t
*objspace
= &rb_objspace
;
2213 objspace
->hook_events
= event
& RUBY_INTERNAL_EVENT_OBJSPACE_MASK
;
2214 objspace
->flags
.has_hook
= (objspace
->hook_events
!= 0);
2218 gc_event_hook_body(rb_execution_context_t
*ec
, rb_objspace_t
*objspace
, const rb_event_flag_t event
, VALUE data
)
2220 const VALUE
*pc
= ec
->cfp
->pc
;
2221 if (pc
&& VM_FRAME_RUBYFRAME_P(ec
->cfp
)) {
2222 /* increment PC because source line is calculated with PC-1 */
2225 EXEC_EVENT_HOOK(ec
, event
, ec
->cfp
->self
, 0, 0, 0, data
);
2229 #define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2230 #define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2232 #define gc_event_hook_prep(objspace, event, data, prep) do { \
2233 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2235 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2239 #define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2242 newobj_init(VALUE klass
, VALUE flags
, int wb_protected
, rb_objspace_t
*objspace
, VALUE obj
)
2244 #if !__has_feature(memory_sanitizer)
2245 GC_ASSERT(BUILTIN_TYPE(obj
) == T_NONE
);
2246 GC_ASSERT((flags
& FL_WB_PROTECTED
) == 0);
2248 RVALUE
*p
= RANY(obj
);
2249 p
->as
.basic
.flags
= flags
;
2250 *((VALUE
*)&p
->as
.basic
.klass
) = klass
;
2252 #if RACTOR_CHECK_MODE
2253 rb_ractor_setup_belonging(obj
);
2256 #if RGENGC_CHECK_MODE
2257 p
->as
.values
.v1
= p
->as
.values
.v2
= p
->as
.values
.v3
= 0;
2259 RB_VM_LOCK_ENTER_NO_BARRIER();
2261 check_rvalue_consistency(obj
);
2263 GC_ASSERT(RVALUE_MARKED(obj
) == FALSE
);
2264 GC_ASSERT(RVALUE_MARKING(obj
) == FALSE
);
2265 GC_ASSERT(RVALUE_OLD_P(obj
) == FALSE
);
2266 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj
) == FALSE
);
2268 if (flags
& FL_PROMOTED1
) {
2269 if (RVALUE_AGE(obj
) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj
), RVALUE_AGE(obj
));
2272 if (RVALUE_AGE(obj
) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj
), RVALUE_AGE(obj
));
2274 if (rgengc_remembered(objspace
, (VALUE
)obj
)) rb_bug("newobj: %s is remembered.", obj_info(obj
));
2276 RB_VM_LOCK_LEAVE_NO_BARRIER();
2279 if (UNLIKELY(wb_protected
== FALSE
)) {
2280 ASSERT_vm_locking();
2281 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj
), obj
);
2284 // TODO: make it atomic, or ractor local
2285 objspace
->total_allocated_objects
++;
2289 objspace
->profile
.total_generated_normal_object_count
++;
2290 #if RGENGC_PROFILE >= 2
2291 objspace
->profile
.generated_normal_object_count_types
[BUILTIN_TYPE(obj
)]++;
2295 objspace
->profile
.total_generated_shady_object_count
++;
2296 #if RGENGC_PROFILE >= 2
2297 objspace
->profile
.generated_shady_object_count_types
[BUILTIN_TYPE(obj
)]++;
2303 RANY(obj
)->file
= rb_source_location_cstr(&RANY(obj
)->line
);
2304 GC_ASSERT(!SPECIAL_CONST_P(obj
)); /* check alignment */
2307 gc_report(5, objspace
, "newobj: %s\n", obj_info(obj
));
2309 #if RGENGC_OLD_NEWOBJ_CHECK > 0
2311 static int newobj_cnt
= RGENGC_OLD_NEWOBJ_CHECK
;
2313 if (!is_incremental_marking(objspace
) &&
2314 flags
& FL_WB_PROTECTED
&& /* do not promote WB unprotected objects */
2315 ! RB_TYPE_P(obj
, T_ARRAY
)) { /* array.c assumes that allocated objects are new */
2316 if (--newobj_cnt
== 0) {
2317 newobj_cnt
= RGENGC_OLD_NEWOBJ_CHECK
;
2319 gc_mark_set(objspace
, obj
);
2320 RVALUE_AGE_SET_OLD(objspace
, obj
);
2322 rb_gc_writebarrier_remember(obj
);
2327 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
2331 static inline void heap_add_freepage(rb_heap_t
*heap
, struct heap_page
*page
);
2332 static struct heap_page
*heap_next_freepage(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
);
2333 static inline void ractor_set_cache(rb_ractor_t
*cr
, struct heap_page
*page
, size_t size_pool_idx
);
2336 rb_gc_obj_slot_size(VALUE obj
)
2338 return GET_HEAP_PAGE(obj
)->slot_size
;
2341 static inline size_t
2342 size_pool_slot_size(unsigned char pool_id
)
2344 GC_ASSERT(pool_id
< SIZE_POOL_COUNT
);
2346 size_t slot_size
= (1 << pool_id
) * sizeof(RVALUE
);
2348 #if RGENGC_CHECK_MODE
2349 rb_objspace_t
*objspace
= &rb_objspace
;
2350 GC_ASSERT(size_pools
[pool_id
].slot_size
== (short)slot_size
);
2357 rb_gc_size_allocatable_p(size_t size
)
2359 return size
<= size_pool_slot_size(SIZE_POOL_COUNT
- 1);
2363 ractor_cached_free_region(rb_objspace_t
*objspace
, rb_ractor_t
*cr
, size_t size_pool_idx
)
2365 rb_ractor_newobj_size_pool_cache_t
*cache
= &cr
->newobj_cache
.size_pool_caches
[size_pool_idx
];
2366 RVALUE
*p
= cache
->freelist
;
2369 VALUE obj
= (VALUE
)p
;
2370 cache
->freelist
= p
->as
.free
.next
;
2371 asan_unpoison_object(obj
, true);
2372 #if RGENGC_CHECK_MODE
2374 MEMZERO((char *)obj
, char, size_pool_slot_size(size_pool_idx
));
2383 static struct heap_page
*
2384 heap_next_freepage(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
2386 ASSERT_vm_locking();
2388 struct heap_page
*page
;
2390 while (heap
->free_pages
== NULL
) {
2391 heap_prepare(objspace
, size_pool
, heap
);
2393 page
= heap
->free_pages
;
2394 heap
->free_pages
= page
->free_next
;
2396 GC_ASSERT(page
->free_slots
!= 0);
2397 RUBY_DEBUG_LOG("page:%p freelist:%p cnt:%d", (void *)page
, (void *)page
->freelist
, page
->free_slots
);
2399 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
2405 ractor_set_cache(rb_ractor_t
*cr
, struct heap_page
*page
, size_t size_pool_idx
)
2407 gc_report(3, &rb_objspace
, "ractor_set_cache: Using page %p\n", (void *)GET_PAGE_BODY(page
->start
));
2409 rb_ractor_newobj_size_pool_cache_t
*cache
= &cr
->newobj_cache
.size_pool_caches
[size_pool_idx
];
2411 cache
->using_page
= page
;
2412 cache
->freelist
= page
->freelist
;
2413 page
->free_slots
= 0;
2414 page
->freelist
= NULL
;
2416 asan_unpoison_object((VALUE
)cache
->freelist
, false);
2417 GC_ASSERT(RB_TYPE_P((VALUE
)cache
->freelist
, T_NONE
));
2418 asan_poison_object((VALUE
)cache
->freelist
);
2422 ractor_cache_slots(rb_objspace_t
*objspace
, rb_ractor_t
*cr
, size_t size_pool_idx
)
2424 ASSERT_vm_locking();
2426 rb_size_pool_t
*size_pool
= &size_pools
[size_pool_idx
];
2427 struct heap_page
*page
= heap_next_freepage(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
2429 ractor_set_cache(cr
, page
, size_pool_idx
);
2433 newobj_fill(VALUE obj
, VALUE v1
, VALUE v2
, VALUE v3
)
2435 RVALUE
*p
= (RVALUE
*)obj
;
2436 p
->as
.values
.v1
= v1
;
2437 p
->as
.values
.v2
= v2
;
2438 p
->as
.values
.v3
= v3
;
2442 static inline size_t
2443 size_pool_idx_for_size(size_t size
)
2446 size_t slot_count
= CEILDIV(size
, sizeof(RVALUE
));
2448 /* size_pool_idx is ceil(log2(slot_count)) */
2449 size_t size_pool_idx
= 64 - nlz_int64(slot_count
- 1);
2450 if (size_pool_idx
>= SIZE_POOL_COUNT
) {
2451 rb_bug("size_pool_idx_for_size: allocation size too large");
2454 return size_pool_idx
;
2456 GC_ASSERT(size
<= sizeof(RVALUE
));
2461 ALWAYS_INLINE(static VALUE
newobj_slowpath(VALUE klass
, VALUE flags
, rb_objspace_t
*objspace
, rb_ractor_t
*cr
, int wb_protected
, size_t size_pool_idx
));
2464 newobj_slowpath(VALUE klass
, VALUE flags
, rb_objspace_t
*objspace
, rb_ractor_t
*cr
, int wb_protected
, size_t size_pool_idx
)
2469 RB_VM_LOCK_ENTER_CR_LEV(cr
, &lev
);
2471 if (UNLIKELY(during_gc
|| ruby_gc_stressful
)) {
2475 rb_bug("object allocation during garbage collection phase");
2478 if (ruby_gc_stressful
) {
2479 if (!garbage_collect(objspace
, GPR_FLAG_NEWOBJ
)) {
2485 // allocate new slot
2486 while ((obj
= ractor_cached_free_region(objspace
, cr
, size_pool_idx
)) == Qfalse
) {
2487 ractor_cache_slots(objspace
, cr
, size_pool_idx
);
2489 GC_ASSERT(obj
!= 0);
2490 newobj_init(klass
, flags
, wb_protected
, objspace
, obj
);
2492 gc_event_hook_prep(objspace
, RUBY_INTERNAL_EVENT_NEWOBJ
, obj
, newobj_fill(obj
, 0, 0, 0));
2494 RB_VM_LOCK_LEAVE_CR_LEV(cr
, &lev
);
2499 NOINLINE(static VALUE
newobj_slowpath_wb_protected(VALUE klass
, VALUE flags
,
2500 rb_objspace_t
*objspace
, rb_ractor_t
*cr
, size_t size_pool_idx
));
2501 NOINLINE(static VALUE
newobj_slowpath_wb_unprotected(VALUE klass
, VALUE flags
,
2502 rb_objspace_t
*objspace
, rb_ractor_t
*cr
, size_t size_pool_idx
));
2505 newobj_slowpath_wb_protected(VALUE klass
, VALUE flags
, rb_objspace_t
*objspace
, rb_ractor_t
*cr
, size_t size_pool_idx
)
2507 return newobj_slowpath(klass
, flags
, objspace
, cr
, TRUE
, size_pool_idx
);
2511 newobj_slowpath_wb_unprotected(VALUE klass
, VALUE flags
, rb_objspace_t
*objspace
, rb_ractor_t
*cr
, size_t size_pool_idx
)
2513 return newobj_slowpath(klass
, flags
, objspace
, cr
, FALSE
, size_pool_idx
);
2517 newobj_of0(VALUE klass
, VALUE flags
, int wb_protected
, rb_ractor_t
*cr
, size_t alloc_size
)
2520 rb_objspace_t
*objspace
= &rb_objspace
;
2522 RB_DEBUG_COUNTER_INC(obj_newobj
);
2523 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected
, !wb_protected
);
2525 #if GC_DEBUG_STRESS_TO_CLASS
2526 if (UNLIKELY(stress_to_class
)) {
2527 long i
, cnt
= RARRAY_LEN(stress_to_class
);
2528 for (i
= 0; i
< cnt
; ++i
) {
2529 if (klass
== RARRAY_AREF(stress_to_class
, i
)) rb_memerror();
2534 size_t size_pool_idx
= size_pool_idx_for_size(alloc_size
);
2536 if ((!UNLIKELY(during_gc
||
2537 ruby_gc_stressful
||
2538 gc_event_hook_available_p(objspace
)) &&
2540 (obj
= ractor_cached_free_region(objspace
, cr
, size_pool_idx
)) != Qfalse
)) {
2542 newobj_init(klass
, flags
, wb_protected
, objspace
, obj
);
2545 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath
);
2547 obj
= wb_protected
?
2548 newobj_slowpath_wb_protected(klass
, flags
, objspace
, cr
, size_pool_idx
) :
2549 newobj_slowpath_wb_unprotected(klass
, flags
, objspace
, cr
, size_pool_idx
);
2556 newobj_of(VALUE klass
, VALUE flags
, VALUE v1
, VALUE v2
, VALUE v3
, int wb_protected
, size_t alloc_size
)
2558 VALUE obj
= newobj_of0(klass
, flags
, wb_protected
, GET_RACTOR(), alloc_size
);
2559 return newobj_fill(obj
, v1
, v2
, v3
);
2563 newobj_of_cr(rb_ractor_t
*cr
, VALUE klass
, VALUE flags
, VALUE v1
, VALUE v2
, VALUE v3
, int wb_protected
, size_t alloc_size
)
2565 VALUE obj
= newobj_of0(klass
, flags
, wb_protected
, cr
, alloc_size
);
2566 return newobj_fill(obj
, v1
, v2
, v3
);
2570 rb_wb_unprotected_newobj_of(VALUE klass
, VALUE flags
, size_t size
)
2572 GC_ASSERT((flags
& FL_WB_PROTECTED
) == 0);
2573 return newobj_of(klass
, flags
, 0, 0, 0, FALSE
, size
);
2577 rb_wb_protected_newobj_of(VALUE klass
, VALUE flags
, size_t size
)
2579 GC_ASSERT((flags
& FL_WB_PROTECTED
) == 0);
2580 return newobj_of(klass
, flags
, 0, 0, 0, TRUE
, size
);
2584 rb_ec_wb_protected_newobj_of(rb_execution_context_t
*ec
, VALUE klass
, VALUE flags
, size_t size
)
2586 GC_ASSERT((flags
& FL_WB_PROTECTED
) == 0);
2587 return newobj_of_cr(rb_ec_ractor_ptr(ec
), klass
, flags
, 0, 0, 0, TRUE
, size
);
2590 /* for compatibility */
2595 return newobj_of(0, T_NONE
, 0, 0, 0, FALSE
, sizeof(RVALUE
));
2599 rb_newobj_of(VALUE klass
, VALUE flags
)
2601 if ((flags
& RUBY_T_MASK
) == T_OBJECT
) {
2602 st_table
*index_tbl
= RCLASS_IV_INDEX_TBL(klass
);
2604 VALUE obj
= newobj_of(klass
, (flags
| ROBJECT_EMBED
) & ~FL_WB_PROTECTED
, Qundef
, Qundef
, Qundef
, flags
& FL_WB_PROTECTED
, sizeof(RVALUE
));
2606 if (index_tbl
&& index_tbl
->num_entries
> ROBJECT_EMBED_LEN_MAX
) {
2607 rb_init_iv_list(obj
);
2612 return newobj_of(klass
, flags
& ~FL_WB_PROTECTED
, 0, 0, 0, flags
& FL_WB_PROTECTED
, sizeof(RVALUE
));
2616 #define UNEXPECTED_NODE(func) \
2617 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2618 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2621 rb_imemo_name(enum imemo_type type
)
2623 // put no default case to get a warning if an imemo type is missing
2625 #define IMEMO_NAME(x) case imemo_##x: return #x;
2629 IMEMO_NAME(throw_data
);
2636 IMEMO_NAME(parser_strterm
);
2637 IMEMO_NAME(callinfo
);
2638 IMEMO_NAME(callcache
);
2639 IMEMO_NAME(constcache
);
2648 rb_imemo_new(enum imemo_type type
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v0
)
2650 size_t size
= sizeof(RVALUE
);
2651 VALUE flags
= T_IMEMO
| (type
<< FL_USHIFT
);
2652 return newobj_of(v0
, flags
, v1
, v2
, v3
, TRUE
, size
);
2656 rb_imemo_tmpbuf_new(VALUE v1
, VALUE v2
, VALUE v3
, VALUE v0
)
2658 size_t size
= sizeof(RVALUE
);
2659 VALUE flags
= T_IMEMO
| (imemo_tmpbuf
<< FL_USHIFT
);
2660 return newobj_of(v0
, flags
, v1
, v2
, v3
, FALSE
, size
);
2664 rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void *buf
, size_t cnt
)
2666 return rb_imemo_tmpbuf_new((VALUE
)buf
, 0, (VALUE
)cnt
, 0);
2670 rb_imemo_tmpbuf_parser_heap(void *buf
, rb_imemo_tmpbuf_t
*old_heap
, size_t cnt
)
2672 return (rb_imemo_tmpbuf_t
*)rb_imemo_tmpbuf_new((VALUE
)buf
, (VALUE
)old_heap
, (VALUE
)cnt
, 0);
2676 imemo_memsize(VALUE obj
)
2679 switch (imemo_type(obj
)) {
2681 size
+= sizeof(RANY(obj
)->as
.imemo
.ment
.def
);
2684 size
+= rb_iseq_memsize((rb_iseq_t
*)obj
);
2687 size
+= RANY(obj
)->as
.imemo
.env
.env_size
* sizeof(VALUE
);
2690 size
+= RANY(obj
)->as
.imemo
.alloc
.cnt
* sizeof(VALUE
);
2693 size
+= rb_ast_memsize(&RANY(obj
)->as
.imemo
.ast
);
2697 case imemo_throw_data
:
2700 case imemo_parser_strterm
:
2711 rb_imemo_new_debug(enum imemo_type type
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v0
, const char *file
, int line
)
2713 VALUE memo
= rb_imemo_new(type
, v1
, v2
, v3
, v0
);
2714 fprintf(stderr
, "memo %p (type: %d) @ %s:%d\n", (void *)memo
, imemo_type(memo
), file
, line
);
2720 rb_class_allocate_instance(VALUE klass
)
2722 st_table
*index_tbl
= RCLASS_IV_INDEX_TBL(klass
);
2724 VALUE flags
= T_OBJECT
| ROBJECT_EMBED
;
2726 VALUE obj
= newobj_of(klass
, flags
, Qundef
, Qundef
, Qundef
, RGENGC_WB_PROTECTED_OBJECT
, sizeof(RVALUE
));
2728 if (index_tbl
&& index_tbl
->num_entries
> ROBJECT_EMBED_LEN_MAX
) {
2729 rb_init_iv_list(obj
);
2736 rb_data_object_check(VALUE klass
)
2738 if (klass
!= rb_cObject
&& (rb_get_alloc_func(klass
) == rb_class_allocate_instance
)) {
2739 rb_undef_alloc_func(klass
);
2740 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE
, klass
);
2745 rb_data_object_wrap(VALUE klass
, void *datap
, RUBY_DATA_FUNC dmark
, RUBY_DATA_FUNC dfree
)
2747 RUBY_ASSERT_ALWAYS(dfree
!= (RUBY_DATA_FUNC
)1);
2748 if (klass
) rb_data_object_check(klass
);
2749 return newobj_of(klass
, T_DATA
, (VALUE
)dmark
, (VALUE
)dfree
, (VALUE
)datap
, FALSE
, sizeof(RVALUE
));
2753 rb_data_object_zalloc(VALUE klass
, size_t size
, RUBY_DATA_FUNC dmark
, RUBY_DATA_FUNC dfree
)
2755 VALUE obj
= rb_data_object_wrap(klass
, 0, dmark
, dfree
);
2756 DATA_PTR(obj
) = xcalloc(1, size
);
2761 rb_data_typed_object_wrap(VALUE klass
, void *datap
, const rb_data_type_t
*type
)
2763 RBIMPL_NONNULL_ARG(type
);
2764 if (klass
) rb_data_object_check(klass
);
2765 return newobj_of(klass
, T_DATA
, (VALUE
)type
, (VALUE
)1, (VALUE
)datap
, type
->flags
& RUBY_FL_WB_PROTECTED
, sizeof(RVALUE
));
2769 rb_data_typed_object_zalloc(VALUE klass
, size_t size
, const rb_data_type_t
*type
)
2771 VALUE obj
= rb_data_typed_object_wrap(klass
, 0, type
);
2772 DATA_PTR(obj
) = xcalloc(1, size
);
2777 rb_objspace_data_type_memsize(VALUE obj
)
2779 if (RTYPEDDATA_P(obj
)) {
2780 const rb_data_type_t
*type
= RTYPEDDATA_TYPE(obj
);
2781 const void *ptr
= RTYPEDDATA_DATA(obj
);
2782 if (ptr
&& type
->function
.dsize
) {
2783 return type
->function
.dsize(ptr
);
2790 rb_objspace_data_type_name(VALUE obj
)
2792 if (RTYPEDDATA_P(obj
)) {
2793 return RTYPEDDATA_TYPE(obj
)->wrap_struct_name
;
2800 PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t
*objspace
, void *ptr
);)
2802 is_pointer_to_heap(rb_objspace_t
*objspace
, void *ptr
)
2804 register RVALUE
*p
= RANY(ptr
);
2805 register struct heap_page
*page
;
2806 register size_t hi
, lo
, mid
;
2808 RB_DEBUG_COUNTER_INC(gc_isptr_trial
);
2810 if (p
< heap_pages_lomem
|| p
> heap_pages_himem
) return FALSE
;
2811 RB_DEBUG_COUNTER_INC(gc_isptr_range
);
2813 if ((VALUE
)p
% sizeof(RVALUE
) != 0) return FALSE
;
2814 RB_DEBUG_COUNTER_INC(gc_isptr_align
);
2816 /* check if p looks like a pointer using bsearch*/
2818 hi
= heap_allocated_pages
;
2820 mid
= (lo
+ hi
) / 2;
2821 page
= heap_pages_sorted
[mid
];
2822 if (page
->start
<= p
) {
2823 if ((uintptr_t)p
< ((uintptr_t)page
->start
+ (page
->total_slots
* page
->slot_size
))) {
2824 RB_DEBUG_COUNTER_INC(gc_isptr_maybe
);
2826 if (page
->flags
.in_tomb
) {
2830 if ((NUM_IN_PAGE(p
) * sizeof(RVALUE
)) % page
->slot_size
!= 0) return FALSE
;
2844 static enum rb_id_table_iterator_result
2845 free_const_entry_i(VALUE value
, void *data
)
2847 rb_const_entry_t
*ce
= (rb_const_entry_t
*)value
;
2849 return ID_TABLE_CONTINUE
;
2853 rb_free_const_table(struct rb_id_table
*tbl
)
2855 rb_id_table_foreach_values(tbl
, free_const_entry_i
, 0);
2856 rb_id_table_free(tbl
);
2860 free_iv_index_tbl_free_i(st_data_t key
, st_data_t value
, st_data_t data
)
2862 xfree((void *)value
);
2867 iv_index_tbl_free(struct st_table
*tbl
)
2869 st_foreach(tbl
, free_iv_index_tbl_free_i
, 0);
2873 // alive: if false, target pointers can be freed already.
2874 // To check it, we need objspace parameter.
2876 vm_ccs_free(struct rb_class_cc_entries
*ccs
, int alive
, rb_objspace_t
*objspace
, VALUE klass
)
2879 for (int i
=0; i
<ccs
->len
; i
++) {
2880 const struct rb_callcache
*cc
= ccs
->entries
[i
].cc
;
2882 void *ptr
= asan_poisoned_object_p((VALUE
)cc
);
2883 asan_unpoison_object((VALUE
)cc
, false);
2884 // ccs can be free'ed.
2885 if (is_pointer_to_heap(objspace
, (void *)cc
) &&
2886 IMEMO_TYPE_P(cc
, imemo_callcache
) &&
2887 cc
->klass
== klass
) {
2888 // OK. maybe target cc.
2892 asan_poison_object((VALUE
)cc
);
2897 asan_poison_object((VALUE
)cc
);
2900 vm_cc_invalidate(cc
);
2902 ruby_xfree(ccs
->entries
);
2908 rb_vm_ccs_free(struct rb_class_cc_entries
*ccs
)
2910 RB_DEBUG_COUNTER_INC(ccs_free
);
2911 vm_ccs_free(ccs
, TRUE
, NULL
, Qundef
);
2914 struct cc_tbl_i_data
{
2915 rb_objspace_t
*objspace
;
2920 static enum rb_id_table_iterator_result
2921 cc_table_mark_i(ID id
, VALUE ccs_ptr
, void *data_ptr
)
2923 struct cc_tbl_i_data
*data
= data_ptr
;
2924 struct rb_class_cc_entries
*ccs
= (struct rb_class_cc_entries
*)ccs_ptr
;
2925 VM_ASSERT(vm_ccs_p(ccs
));
2926 VM_ASSERT(id
== ccs
->cme
->called_id
);
2928 if (METHOD_ENTRY_INVALIDATED(ccs
->cme
)) {
2929 rb_vm_ccs_free(ccs
);
2930 return ID_TABLE_DELETE
;
2933 gc_mark(data
->objspace
, (VALUE
)ccs
->cme
);
2935 for (int i
=0; i
<ccs
->len
; i
++) {
2936 VM_ASSERT(data
->klass
== ccs
->entries
[i
].cc
->klass
);
2937 VM_ASSERT(vm_cc_check_cme(ccs
->entries
[i
].cc
, ccs
->cme
));
2939 gc_mark(data
->objspace
, (VALUE
)ccs
->entries
[i
].ci
);
2940 gc_mark(data
->objspace
, (VALUE
)ccs
->entries
[i
].cc
);
2942 return ID_TABLE_CONTINUE
;
2947 cc_table_mark(rb_objspace_t
*objspace
, VALUE klass
)
2949 struct rb_id_table
*cc_tbl
= RCLASS_CC_TBL(klass
);
2951 struct cc_tbl_i_data data
= {
2952 .objspace
= objspace
,
2955 rb_id_table_foreach(cc_tbl
, cc_table_mark_i
, &data
);
2959 static enum rb_id_table_iterator_result
2960 cc_table_free_i(VALUE ccs_ptr
, void *data_ptr
)
2962 struct cc_tbl_i_data
*data
= data_ptr
;
2963 struct rb_class_cc_entries
*ccs
= (struct rb_class_cc_entries
*)ccs_ptr
;
2964 VM_ASSERT(vm_ccs_p(ccs
));
2965 vm_ccs_free(ccs
, data
->alive
, data
->objspace
, data
->klass
);
2966 return ID_TABLE_CONTINUE
;
2970 cc_table_free(rb_objspace_t
*objspace
, VALUE klass
, bool alive
)
2972 struct rb_id_table
*cc_tbl
= RCLASS_CC_TBL(klass
);
2975 struct cc_tbl_i_data data
= {
2976 .objspace
= objspace
,
2980 rb_id_table_foreach_values(cc_tbl
, cc_table_free_i
, &data
);
2981 rb_id_table_free(cc_tbl
);
2985 static enum rb_id_table_iterator_result
2986 cvar_table_free_i(VALUE value
, void * ctx
)
2988 xfree((void *) value
);
2989 return ID_TABLE_CONTINUE
;
2993 rb_cc_table_free(VALUE klass
)
2995 cc_table_free(&rb_objspace
, klass
, TRUE
);
2999 make_zombie(rb_objspace_t
*objspace
, VALUE obj
, void (*dfree
)(void *), void *data
)
3001 struct RZombie
*zombie
= RZOMBIE(obj
);
3002 zombie
->basic
.flags
= T_ZOMBIE
| (zombie
->basic
.flags
& FL_SEEN_OBJ_ID
);
3003 zombie
->dfree
= dfree
;
3004 zombie
->data
= data
;
3005 zombie
->next
= heap_pages_deferred_final
;
3006 heap_pages_deferred_final
= (VALUE
)zombie
;
3008 struct heap_page
*page
= GET_HEAP_PAGE(obj
);
3009 page
->final_slots
++;
3010 heap_pages_final_slots
++;
3014 make_io_zombie(rb_objspace_t
*objspace
, VALUE obj
)
3016 rb_io_t
*fptr
= RANY(obj
)->as
.file
.fptr
;
3017 make_zombie(objspace
, obj
, rb_io_fptr_finalize_internal
, fptr
);
3021 obj_free_object_id(rb_objspace_t
*objspace
, VALUE obj
)
3023 ASSERT_vm_locking();
3024 st_data_t o
= (st_data_t
)obj
, id
;
3026 GC_ASSERT(FL_TEST(obj
, FL_SEEN_OBJ_ID
));
3027 FL_UNSET(obj
, FL_SEEN_OBJ_ID
);
3029 if (st_delete(objspace
->obj_to_id_tbl
, &o
, &id
)) {
3031 st_delete(objspace
->id_to_obj_tbl
, &id
, NULL
);
3034 rb_bug("Object ID seen, but not in mapping table: %s\n", obj_info(obj
));
3039 obj_free(rb_objspace_t
*objspace
, VALUE obj
)
3041 RB_DEBUG_COUNTER_INC(obj_free
);
3042 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
3044 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_FREEOBJ
, obj
);
3046 switch (BUILTIN_TYPE(obj
)) {
3051 rb_bug("obj_free() called for broken object");
3057 if (FL_TEST(obj
, FL_EXIVAR
)) {
3058 rb_free_generic_ivar((VALUE
)obj
);
3059 FL_UNSET(obj
, FL_EXIVAR
);
3062 if (FL_TEST(obj
, FL_SEEN_OBJ_ID
) && !FL_TEST(obj
, FL_FINALIZE
)) {
3063 obj_free_object_id(objspace
, obj
);
3066 if (RVALUE_WB_UNPROTECTED(obj
)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj
), obj
);
3068 #if RGENGC_CHECK_MODE
3069 #define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3070 CHECK(RVALUE_WB_UNPROTECTED
);
3071 CHECK(RVALUE_MARKED
);
3072 CHECK(RVALUE_MARKING
);
3073 CHECK(RVALUE_UNCOLLECTIBLE
);
3077 switch (BUILTIN_TYPE(obj
)) {
3079 if (RANY(obj
)->as
.basic
.flags
& ROBJECT_EMBED
) {
3080 RB_DEBUG_COUNTER_INC(obj_obj_embed
);
3082 else if (ROBJ_TRANSIENT_P(obj
)) {
3083 RB_DEBUG_COUNTER_INC(obj_obj_transient
);
3086 xfree(RANY(obj
)->as
.object
.as
.heap
.ivptr
);
3087 RB_DEBUG_COUNTER_INC(obj_obj_ptr
);
3092 rb_id_table_free(RCLASS_M_TBL(obj
));
3093 cc_table_free(objspace
, obj
, FALSE
);
3094 if (RCLASS_IV_TBL(obj
)) {
3095 st_free_table(RCLASS_IV_TBL(obj
));
3097 if (RCLASS_CONST_TBL(obj
)) {
3098 rb_free_const_table(RCLASS_CONST_TBL(obj
));
3100 if (RCLASS_IV_INDEX_TBL(obj
)) {
3101 iv_index_tbl_free(RCLASS_IV_INDEX_TBL(obj
));
3103 if (RCLASS_CVC_TBL(obj
)) {
3104 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj
), cvar_table_free_i
, NULL
);
3105 rb_id_table_free(RCLASS_CVC_TBL(obj
));
3107 rb_class_remove_subclass_head(obj
);
3108 rb_class_remove_from_module_subclasses(obj
);
3109 rb_class_remove_from_super_subclasses(obj
);
3111 if (RCLASS_EXT(obj
))
3112 xfree(RCLASS_EXT(obj
));
3115 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr
, BUILTIN_TYPE(obj
) == T_MODULE
);
3116 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr
, BUILTIN_TYPE(obj
) == T_CLASS
);
3125 #if USE_DEBUG_COUNTER
3126 switch (RHASH_SIZE(obj
)) {
3128 RB_DEBUG_COUNTER_INC(obj_hash_empty
);
3131 RB_DEBUG_COUNTER_INC(obj_hash_1
);
3134 RB_DEBUG_COUNTER_INC(obj_hash_2
);
3137 RB_DEBUG_COUNTER_INC(obj_hash_3
);
3140 RB_DEBUG_COUNTER_INC(obj_hash_4
);
3146 RB_DEBUG_COUNTER_INC(obj_hash_5_8
);
3149 GC_ASSERT(RHASH_SIZE(obj
) > 8);
3150 RB_DEBUG_COUNTER_INC(obj_hash_g8
);
3153 if (RHASH_AR_TABLE_P(obj
)) {
3154 if (RHASH_AR_TABLE(obj
) == NULL
) {
3155 RB_DEBUG_COUNTER_INC(obj_hash_null
);
3158 RB_DEBUG_COUNTER_INC(obj_hash_ar
);
3162 RB_DEBUG_COUNTER_INC(obj_hash_st
);
3165 if (/* RHASH_AR_TABLE_P(obj) */ !FL_TEST_RAW(obj
, RHASH_ST_TABLE_FLAG
)) {
3166 struct ar_table_struct
*tab
= RHASH(obj
)->as
.ar
;
3169 if (RHASH_TRANSIENT_P(obj
)) {
3170 RB_DEBUG_COUNTER_INC(obj_hash_transient
);
3178 GC_ASSERT(RHASH_ST_TABLE_P(obj
));
3179 st_free_table(RHASH(obj
)->as
.st
);
3183 if (RANY(obj
)->as
.regexp
.ptr
) {
3184 onig_free(RANY(obj
)->as
.regexp
.ptr
);
3185 RB_DEBUG_COUNTER_INC(obj_regexp_ptr
);
3189 if (DATA_PTR(obj
)) {
3190 int free_immediately
= FALSE
;
3191 void (*dfree
)(void *);
3192 void *data
= DATA_PTR(obj
);
3194 if (RTYPEDDATA_P(obj
)) {
3195 free_immediately
= (RANY(obj
)->as
.typeddata
.type
->flags
& RUBY_TYPED_FREE_IMMEDIATELY
) != 0;
3196 dfree
= RANY(obj
)->as
.typeddata
.type
->function
.dfree
;
3197 if (0 && free_immediately
== 0) {
3198 /* to expose non-free-immediate T_DATA */
3199 fprintf(stderr
, "not immediate -> %s\n", RANY(obj
)->as
.typeddata
.type
->wrap_struct_name
);
3203 dfree
= RANY(obj
)->as
.data
.dfree
;
3207 if (dfree
== RUBY_DEFAULT_FREE
) {
3209 RB_DEBUG_COUNTER_INC(obj_data_xfree
);
3211 else if (free_immediately
) {
3213 RB_DEBUG_COUNTER_INC(obj_data_imm_free
);
3216 make_zombie(objspace
, obj
, dfree
, data
);
3217 RB_DEBUG_COUNTER_INC(obj_data_zombie
);
3222 RB_DEBUG_COUNTER_INC(obj_data_empty
);
3227 if (RANY(obj
)->as
.match
.rmatch
) {
3228 struct rmatch
*rm
= RANY(obj
)->as
.match
.rmatch
;
3229 #if USE_DEBUG_COUNTER
3230 if (rm
->regs
.num_regs
>= 8) {
3231 RB_DEBUG_COUNTER_INC(obj_match_ge8
);
3233 else if (rm
->regs
.num_regs
>= 4) {
3234 RB_DEBUG_COUNTER_INC(obj_match_ge4
);
3236 else if (rm
->regs
.num_regs
>= 1) {
3237 RB_DEBUG_COUNTER_INC(obj_match_under4
);
3240 onig_region_free(&rm
->regs
, 0);
3241 if (rm
->char_offset
)
3242 xfree(rm
->char_offset
);
3245 RB_DEBUG_COUNTER_INC(obj_match_ptr
);
3249 if (RANY(obj
)->as
.file
.fptr
) {
3250 make_io_zombie(objspace
, obj
);
3251 RB_DEBUG_COUNTER_INC(obj_file_ptr
);
3256 RB_DEBUG_COUNTER_INC(obj_rational
);
3259 RB_DEBUG_COUNTER_INC(obj_complex
);
3264 /* Basically , T_ICLASS shares table with the module */
3265 if (RICLASS_OWNS_M_TBL_P(obj
)) {
3266 /* Method table is not shared for origin iclasses of classes */
3267 rb_id_table_free(RCLASS_M_TBL(obj
));
3269 if (RCLASS_CALLABLE_M_TBL(obj
) != NULL
) {
3270 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj
));
3272 rb_class_remove_subclass_head(obj
);
3273 cc_table_free(objspace
, obj
, FALSE
);
3274 rb_class_remove_from_module_subclasses(obj
);
3275 rb_class_remove_from_super_subclasses(obj
);
3277 xfree(RCLASS_EXT(obj
));
3280 RB_DEBUG_COUNTER_INC(obj_iclass_ptr
);
3284 RB_DEBUG_COUNTER_INC(obj_float
);
3288 if (!BIGNUM_EMBED_P(obj
) && BIGNUM_DIGITS(obj
)) {
3289 xfree(BIGNUM_DIGITS(obj
));
3290 RB_DEBUG_COUNTER_INC(obj_bignum_ptr
);
3293 RB_DEBUG_COUNTER_INC(obj_bignum_embed
);
3298 UNEXPECTED_NODE(obj_free
);
3302 if ((RBASIC(obj
)->flags
& RSTRUCT_EMBED_LEN_MASK
) ||
3303 RANY(obj
)->as
.rstruct
.as
.heap
.ptr
== NULL
) {
3304 RB_DEBUG_COUNTER_INC(obj_struct_embed
);
3306 else if (RSTRUCT_TRANSIENT_P(obj
)) {
3307 RB_DEBUG_COUNTER_INC(obj_struct_transient
);
3310 xfree((void *)RANY(obj
)->as
.rstruct
.as
.heap
.ptr
);
3311 RB_DEBUG_COUNTER_INC(obj_struct_ptr
);
3317 rb_gc_free_dsymbol(obj
);
3318 RB_DEBUG_COUNTER_INC(obj_symbol
);
3323 switch (imemo_type(obj
)) {
3325 rb_free_method_entry(&RANY(obj
)->as
.imemo
.ment
);
3326 RB_DEBUG_COUNTER_INC(obj_imemo_ment
);
3329 rb_iseq_free(&RANY(obj
)->as
.imemo
.iseq
);
3330 RB_DEBUG_COUNTER_INC(obj_imemo_iseq
);
3333 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj
)->as
.imemo
.env
.ep
));
3334 xfree((VALUE
*)RANY(obj
)->as
.imemo
.env
.env
);
3335 RB_DEBUG_COUNTER_INC(obj_imemo_env
);
3338 xfree(RANY(obj
)->as
.imemo
.alloc
.ptr
);
3339 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf
);
3342 rb_ast_free(&RANY(obj
)->as
.imemo
.ast
);
3343 RB_DEBUG_COUNTER_INC(obj_imemo_ast
);
3346 RB_DEBUG_COUNTER_INC(obj_imemo_cref
);
3349 RB_DEBUG_COUNTER_INC(obj_imemo_svar
);
3351 case imemo_throw_data
:
3352 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data
);
3355 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc
);
3358 RB_DEBUG_COUNTER_INC(obj_imemo_memo
);
3360 case imemo_parser_strterm
:
3361 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm
);
3363 case imemo_callinfo
:
3364 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo
);
3366 case imemo_callcache
:
3367 RB_DEBUG_COUNTER_INC(obj_imemo_callcache
);
3369 case imemo_constcache
:
3370 RB_DEBUG_COUNTER_INC(obj_imemo_constcache
);
3376 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE
,
3377 BUILTIN_TYPE(obj
), (void*)obj
, RBASIC(obj
)->flags
);
3380 if (FL_TEST(obj
, FL_FINALIZE
)) {
3381 make_zombie(objspace
, obj
, 0, 0);
3390 #define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3391 #define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3394 object_id_cmp(st_data_t x
, st_data_t y
)
3396 if (RB_BIGNUM_TYPE_P(x
)) {
3397 return !rb_big_eql(x
, y
);
3405 object_id_hash(st_data_t n
)
3407 if (RB_BIGNUM_TYPE_P(n
)) {
3408 return FIX2LONG(rb_big_hash(n
));
3411 return st_numhash(n
);
3414 static const struct st_hash_type object_id_hash_type
= {
3422 rb_objspace_t
*objspace
= &rb_objspace
;
3424 #if defined(HAVE_MMAP) && !HAVE_CONST_PAGE_SIZE && !defined(PAGE_MAX_SIZE)
3425 /* Need to determine if we can use mmap at runtime. */
3427 /* If the PAGE_SIZE macro can be used. */
3428 use_mmap_aligned_alloc
= PAGE_SIZE
<= HEAP_PAGE_SIZE
;
3429 # elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
3430 /* If we can use sysconf to determine the page size. */
3431 use_mmap_aligned_alloc
= sysconf(_SC_PAGE_SIZE
) <= HEAP_PAGE_SIZE
;
3433 /* Otherwise we can't determine the system page size, so don't use mmap. */
3434 use_mmap_aligned_alloc
= FALSE
;
3438 objspace
->next_object_id
= INT2FIX(OBJ_ID_INITIAL
);
3439 objspace
->id_to_obj_tbl
= st_init_table(&object_id_hash_type
);
3440 objspace
->obj_to_id_tbl
= st_init_numtable();
3442 #if RGENGC_ESTIMATE_OLDMALLOC
3443 objspace
->rgengc
.oldmalloc_increase_limit
= gc_params
.oldmalloc_limit_min
;
3446 heap_add_pages(objspace
, &size_pools
[0], SIZE_POOL_EDEN_HEAP(&size_pools
[0]), gc_params
.heap_init_slots
/ HEAP_PAGE_OBJ_LIMIT
);
3448 /* Give other size pools allocatable pages. */
3449 for (int i
= 1; i
< SIZE_POOL_COUNT
; i
++) {
3450 rb_size_pool_t
*size_pool
= &size_pools
[i
];
3451 int multiple
= size_pool
->slot_size
/ sizeof(RVALUE
);
3452 size_pool
->allocatable_pages
= gc_params
.heap_init_slots
* multiple
/ HEAP_PAGE_OBJ_LIMIT
;
3454 heap_pages_expand_sorted(objspace
);
3456 init_mark_stack(&objspace
->mark_stack
);
3458 objspace
->profile
.invoke_time
= getrusage_time();
3459 finalizer_table
= st_init_numtable();
3463 Init_gc_stress(void)
3465 rb_objspace_t
*objspace
= &rb_objspace
;
3467 gc_stress_set(objspace
, ruby_initial_gc_stress
);
3470 typedef int each_obj_callback(void *, void *, size_t, void *);
3472 static void objspace_each_objects(rb_objspace_t
*objspace
, each_obj_callback
*callback
, void *data
, bool protected);
3473 static void objspace_reachable_objects_from_root(rb_objspace_t
*, void (func
)(const char *, VALUE
, void *), void *);
3475 struct each_obj_data
{
3476 rb_objspace_t
*objspace
;
3477 bool reenable_incremental
;
3479 each_obj_callback
*callback
;
3482 struct heap_page
**pages
[SIZE_POOL_COUNT
];
3483 size_t pages_counts
[SIZE_POOL_COUNT
];
3487 objspace_each_objects_ensure(VALUE arg
)
3489 struct each_obj_data
*data
= (struct each_obj_data
*)arg
;
3490 rb_objspace_t
*objspace
= data
->objspace
;
3492 /* Reenable incremental GC */
3493 if (data
->reenable_incremental
) {
3494 objspace
->flags
.dont_incremental
= FALSE
;
3497 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
3498 struct heap_page
**pages
= data
->pages
[i
];
3499 /* pages could be NULL if an error was raised during setup (e.g.
3500 * malloc failed due to out of memory). */
3510 objspace_each_objects_try(VALUE arg
)
3512 struct each_obj_data
*data
= (struct each_obj_data
*)arg
;
3513 rb_objspace_t
*objspace
= data
->objspace
;
3515 /* Copy pages from all size_pools to their respective buffers. */
3516 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
3517 rb_size_pool_t
*size_pool
= &size_pools
[i
];
3518 size_t size
= size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
, sizeof(struct heap_page
*), rb_eRuntimeError
);
3520 struct heap_page
**pages
= malloc(size
);
3521 if (!pages
) rb_memerror();
3523 /* Set up pages buffer by iterating over all pages in the current eden
3524 * heap. This will be a snapshot of the state of the heap before we
3525 * call the callback over each page that exists in this buffer. Thus it
3526 * is safe for the callback to allocate objects without possibly entering
3527 * an infinite loop. */
3528 struct heap_page
*page
= 0;
3529 size_t pages_count
= 0;
3530 list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, page
, page_node
) {
3531 pages
[pages_count
] = page
;
3534 data
->pages
[i
] = pages
;
3535 data
->pages_counts
[i
] = pages_count
;
3536 GC_ASSERT(pages_count
== SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
);
3539 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
3540 rb_size_pool_t
*size_pool
= &size_pools
[i
];
3541 size_t pages_count
= data
->pages_counts
[i
];
3542 struct heap_page
**pages
= data
->pages
[i
];
3544 struct heap_page
*page
= list_top(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, struct heap_page
, page_node
);
3545 for (size_t i
= 0; i
< pages_count
; i
++) {
3546 /* If we have reached the end of the linked list then there are no
3547 * more pages, so break. */
3548 if (page
== NULL
) break;
3550 /* If this page does not match the one in the buffer, then move to
3551 * the next page in the buffer. */
3552 if (pages
[i
] != page
) continue;
3554 uintptr_t pstart
= (uintptr_t)page
->start
;
3555 uintptr_t pend
= pstart
+ (page
->total_slots
* size_pool
->slot_size
);
3557 if ((*data
->callback
)((void *)pstart
, (void *)pend
, size_pool
->slot_size
, data
->data
)) {
3561 page
= list_next(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, page
, page_node
);
3569 * rb_objspace_each_objects() is special C API to walk through
3570 * Ruby object space. This C API is too difficult to use it.
3571 * To be frank, you should not use it. Or you need to read the
3572 * source code of this function and understand what this function does.
3574 * 'callback' will be called several times (the number of heap page,
3575 * at current implementation) with:
3576 * vstart: a pointer to the first living object of the heap_page.
3577 * vend: a pointer to next to the valid heap_page area.
3578 * stride: a distance to next VALUE.
3580 * If callback() returns non-zero, the iteration will be stopped.
3582 * This is a sample callback code to iterate liveness objects:
3585 * sample_callback(void *vstart, void *vend, int stride, void *data) {
3586 * VALUE v = (VALUE)vstart;
3587 * for (; v != (VALUE)vend; v += stride) {
3588 * if (RBASIC(v)->flags) { // liveness check
3589 * // do something with live object 'v'
3591 * return 0; // continue to iteration
3594 * Note: 'vstart' is not a top of heap_page. This point the first
3595 * living object to grasp at least one object to avoid GC issue.
3596 * This means that you can not walk through all Ruby object page
3597 * including freed object page.
3599 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3600 * However, there are possibilities to pass variable values with
3601 * 'stride' with some reasons. You must use stride instead of
3602 * use some constant value in the iteration.
3605 rb_objspace_each_objects(each_obj_callback
*callback
, void *data
)
3607 objspace_each_objects(&rb_objspace
, callback
, data
, TRUE
);
3611 objspace_each_objects(rb_objspace_t
*objspace
, each_obj_callback
*callback
, void *data
, bool protected)
3613 /* Disable incremental GC */
3614 bool reenable_incremental
= FALSE
;
3616 reenable_incremental
= !objspace
->flags
.dont_incremental
;
3619 objspace
->flags
.dont_incremental
= TRUE
;
3622 struct each_obj_data each_obj_data
= {
3623 .objspace
= objspace
,
3624 .reenable_incremental
= reenable_incremental
,
3626 .callback
= callback
,
3630 .pages_counts
= {0},
3632 rb_ensure(objspace_each_objects_try
, (VALUE
)&each_obj_data
,
3633 objspace_each_objects_ensure
, (VALUE
)&each_obj_data
);
3637 rb_objspace_each_objects_without_setup(each_obj_callback
*callback
, void *data
)
3639 objspace_each_objects(&rb_objspace
, callback
, data
, FALSE
);
3642 struct os_each_struct
{
3648 internal_object_p(VALUE obj
)
3650 RVALUE
*p
= (RVALUE
*)obj
;
3651 void *ptr
= __asan_region_is_poisoned(p
, SIZEOF_VALUE
);
3652 asan_unpoison_object(obj
, false);
3653 bool used_p
= p
->as
.basic
.flags
;
3656 switch (BUILTIN_TYPE(obj
)) {
3658 UNEXPECTED_NODE(internal_object_p
);
3667 if (!p
->as
.basic
.klass
) break;
3668 if (FL_TEST(obj
, FL_SINGLETON
)) {
3669 return rb_singleton_class_internal_p(obj
);
3673 if (!p
->as
.basic
.klass
) break;
3677 if (ptr
|| ! used_p
) {
3678 asan_poison_object(obj
);
3684 rb_objspace_internal_object_p(VALUE obj
)
3686 return internal_object_p(obj
);
3690 os_obj_of_i(void *vstart
, void *vend
, size_t stride
, void *data
)
3692 struct os_each_struct
*oes
= (struct os_each_struct
*)data
;
3694 VALUE v
= (VALUE
)vstart
;
3695 for (; v
!= (VALUE
)vend
; v
+= stride
) {
3696 if (!internal_object_p(v
)) {
3697 if (!oes
->of
|| rb_obj_is_kind_of(v
, oes
->of
)) {
3698 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v
)) {
3712 struct os_each_struct oes
;
3716 rb_objspace_each_objects(os_obj_of_i
, &oes
);
3717 return SIZET2NUM(oes
.num
);
3722 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
3723 * ObjectSpace.each_object([module]) -> an_enumerator
3725 * Calls the block once for each living, nonimmediate object in this
3726 * Ruby process. If <i>module</i> is specified, calls the block
3727 * for only those classes or modules that match (or are a subclass of)
3728 * <i>module</i>. Returns the number of objects found. Immediate
3729 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
3730 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
3731 * never returned. In the example below, #each_object returns both
3732 * the numbers we defined and several constants defined in the Math
3735 * If no block is given, an enumerator is returned instead.
3738 * b = 95 # Won't be returned
3739 * c = 12345678987654321
3740 * count = ObjectSpace.each_object(Numeric) {|x| p x }
3741 * puts "Total count: #{count}"
3743 * <em>produces:</em>
3749 * 2.22044604925031e-16
3750 * 1.7976931348623157e+308
3751 * 2.2250738585072e-308
3757 os_each_obj(int argc
, VALUE
*argv
, VALUE os
)
3761 of
= (!rb_check_arity(argc
, 0, 1) ? 0 : argv
[0]);
3762 RETURN_ENUMERATOR(os
, 1, &of
);
3763 return os_obj_of(of
);
3768 * ObjectSpace.undefine_finalizer(obj)
3770 * Removes all finalizers for <i>obj</i>.
3775 undefine_final(VALUE os
, VALUE obj
)
3777 return rb_undefine_finalizer(obj
);
3781 rb_undefine_finalizer(VALUE obj
)
3783 rb_objspace_t
*objspace
= &rb_objspace
;
3784 st_data_t data
= obj
;
3785 rb_check_frozen(obj
);
3786 st_delete(finalizer_table
, &data
, 0);
3787 FL_UNSET(obj
, FL_FINALIZE
);
3792 should_be_callable(VALUE block
)
3794 if (!rb_obj_respond_to(block
, idCall
, TRUE
)) {
3795 rb_raise(rb_eArgError
, "wrong type argument %"PRIsVALUE
" (should be callable)",
3796 rb_obj_class(block
));
3801 should_be_finalizable(VALUE obj
)
3803 if (!FL_ABLE(obj
)) {
3804 rb_raise(rb_eArgError
, "cannot define finalizer for %s",
3805 rb_obj_classname(obj
));
3807 rb_check_frozen(obj
);
3812 * ObjectSpace.define_finalizer(obj, aProc=proc())
3814 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
3815 * was destroyed. The object ID of the <i>obj</i> will be passed
3816 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
3817 * method, make sure it can be called with a single argument.
3819 * The return value is an array <code>[0, aProc]</code>.
3821 * The two recommended patterns are to either create the finaliser proc
3822 * in a non-instance method where it can safely capture the needed state,
3823 * or to use a custom callable object that stores the needed state
3824 * explicitly as instance variables.
3827 * def initialize(data_needed_for_finalization)
3828 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
3831 * def self.create_finalizer(data_needed_for_finalization)
3833 * puts "finalizing #{data_needed_for_finalization}"
3840 * def initialize(data_needed_for_finalization)
3841 * @data_needed_for_finalization = data_needed_for_finalization
3845 * puts "finalizing #{@data_needed_for_finalization}"
3849 * def initialize(data_needed_for_finalization)
3850 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
3854 * Note that if your finalizer references the object to be
3855 * finalized it will never be run on GC, although it will still be
3856 * run at exit. You will get a warning if you capture the object
3857 * to be finalized as the receiver of the finalizer.
3859 * class CapturesSelf
3860 * def initialize(name)
3861 * ObjectSpace.define_finalizer(self, proc {
3862 * # this finalizer will only be run on exit
3863 * puts "finalizing #{name}"
3868 * Also note that finalization can be unpredictable and is never guaranteed
3869 * to be run except on exit.
3873 define_final(int argc
, VALUE
*argv
, VALUE os
)
3877 rb_scan_args(argc
, argv
, "11", &obj
, &block
);
3878 should_be_finalizable(obj
);
3880 block
= rb_block_proc();
3883 should_be_callable(block
);
3886 if (rb_callable_receiver(block
) == obj
) {
3887 rb_warn("finalizer references object to be finalized");
3890 return define_final0(obj
, block
);
3894 define_final0(VALUE obj
, VALUE block
)
3896 rb_objspace_t
*objspace
= &rb_objspace
;
3900 RBASIC(obj
)->flags
|= FL_FINALIZE
;
3902 if (st_lookup(finalizer_table
, obj
, &data
)) {
3903 table
= (VALUE
)data
;
3905 /* avoid duplicate block, table is usually small */
3907 long len
= RARRAY_LEN(table
);
3910 for (i
= 0; i
< len
; i
++) {
3911 VALUE recv
= RARRAY_AREF(table
, i
);
3912 if (rb_equal(recv
, block
)) {
3919 rb_ary_push(table
, block
);
3922 table
= rb_ary_new3(1, block
);
3923 RBASIC_CLEAR_CLASS(table
);
3924 st_add_direct(finalizer_table
, obj
, table
);
3927 block
= rb_ary_new3(2, INT2FIX(0), block
);
3933 rb_define_finalizer(VALUE obj
, VALUE block
)
3935 should_be_finalizable(obj
);
3936 should_be_callable(block
);
3937 return define_final0(obj
, block
);
3941 rb_gc_copy_finalizer(VALUE dest
, VALUE obj
)
3943 rb_objspace_t
*objspace
= &rb_objspace
;
3947 if (!FL_TEST(obj
, FL_FINALIZE
)) return;
3948 if (st_lookup(finalizer_table
, obj
, &data
)) {
3949 table
= (VALUE
)data
;
3950 st_insert(finalizer_table
, dest
, table
);
3952 FL_SET(dest
, FL_FINALIZE
);
3956 run_single_final(VALUE cmd
, VALUE objid
)
3958 return rb_check_funcall(cmd
, idCall
, 1, &objid
);
3962 warn_exception_in_finalizer(rb_execution_context_t
*ec
, VALUE final
)
3964 if (final
!= Qundef
&& !NIL_P(ruby_verbose
)) {
3965 VALUE errinfo
= ec
->errinfo
;
3966 rb_warn("Exception in finalizer %+"PRIsVALUE
, final
);
3967 rb_ec_error_print(ec
, errinfo
);
3972 run_finalizer(rb_objspace_t
*objspace
, VALUE obj
, VALUE table
)
3975 enum ruby_tag_type state
;
3980 rb_control_frame_t
*cfp
;
3983 rb_execution_context_t
* volatile ec
= GET_EC();
3984 #define RESTORE_FINALIZER() (\
3985 ec->cfp = saved.cfp, \
3986 ec->errinfo = saved.errinfo)
3988 saved
.errinfo
= ec
->errinfo
;
3989 saved
.objid
= rb_obj_id(obj
);
3990 saved
.cfp
= ec
->cfp
;
3992 saved
.final
= Qundef
;
3995 state
= EC_EXEC_TAG();
3996 if (state
!= TAG_NONE
) {
3997 ++saved
.finished
; /* skip failed finalizer */
3998 warn_exception_in_finalizer(ec
, ATOMIC_VALUE_EXCHANGE(saved
.final
, Qundef
));
4000 for (i
= saved
.finished
;
4001 RESTORE_FINALIZER(), i
<RARRAY_LEN(table
);
4002 saved
.finished
= ++i
) {
4003 run_single_final(saved
.final
= RARRAY_AREF(table
, i
), saved
.objid
);
4006 #undef RESTORE_FINALIZER
4010 run_final(rb_objspace_t
*objspace
, VALUE zombie
)
4012 st_data_t key
, table
;
4014 if (RZOMBIE(zombie
)->dfree
) {
4015 RZOMBIE(zombie
)->dfree(RZOMBIE(zombie
)->data
);
4018 key
= (st_data_t
)zombie
;
4019 if (st_delete(finalizer_table
, &key
, &table
)) {
4020 run_finalizer(objspace
, zombie
, (VALUE
)table
);
4025 finalize_list(rb_objspace_t
*objspace
, VALUE zombie
)
4029 struct heap_page
*page
;
4030 asan_unpoison_object(zombie
, false);
4031 next_zombie
= RZOMBIE(zombie
)->next
;
4032 page
= GET_HEAP_PAGE(zombie
);
4034 run_final(objspace
, zombie
);
4038 GC_ASSERT(BUILTIN_TYPE(zombie
) == T_ZOMBIE
);
4039 if (FL_TEST(zombie
, FL_SEEN_OBJ_ID
)) {
4040 obj_free_object_id(objspace
, zombie
);
4043 GC_ASSERT(heap_pages_final_slots
> 0);
4044 GC_ASSERT(page
->final_slots
> 0);
4046 heap_pages_final_slots
--;
4047 page
->final_slots
--;
4049 heap_page_add_freeobj(objspace
, page
, zombie
);
4050 objspace
->profile
.total_freed_objects
++;
4054 zombie
= next_zombie
;
4059 finalize_deferred(rb_objspace_t
*objspace
)
4062 rb_execution_context_t
*ec
= GET_EC();
4063 ec
->interrupt_mask
|= PENDING_INTERRUPT_MASK
;
4065 while ((zombie
= ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final
, 0)) != 0) {
4066 finalize_list(objspace
, zombie
);
4069 ec
->interrupt_mask
&= ~PENDING_INTERRUPT_MASK
;
4073 gc_finalize_deferred(void *dmy
)
4075 rb_objspace_t
*objspace
= dmy
;
4076 if (ATOMIC_EXCHANGE(finalizing
, 1)) return;
4078 finalize_deferred(objspace
);
4079 ATOMIC_SET(finalizing
, 0);
4083 gc_finalize_deferred_register(rb_objspace_t
*objspace
)
4085 if (rb_postponed_job_register_one(0, gc_finalize_deferred
, objspace
) == 0) {
4086 rb_bug("gc_finalize_deferred_register: can't register finalizer.");
4090 struct force_finalize_list
{
4093 struct force_finalize_list
*next
;
4097 force_chain_object(st_data_t key
, st_data_t val
, st_data_t arg
)
4099 struct force_finalize_list
**prev
= (struct force_finalize_list
**)arg
;
4100 struct force_finalize_list
*curr
= ALLOC(struct force_finalize_list
);
4108 bool rb_obj_is_main_ractor(VALUE gv
);
4111 rb_objspace_call_finalizer(rb_objspace_t
*objspace
)
4115 #if RGENGC_CHECK_MODE >= 2
4116 gc_verify_internal_consistency(objspace
);
4120 if (ATOMIC_EXCHANGE(finalizing
, 1)) return;
4122 /* run finalizers */
4123 finalize_deferred(objspace
);
4124 GC_ASSERT(heap_pages_deferred_final
== 0);
4127 /* prohibit incremental GC */
4128 objspace
->flags
.dont_incremental
= 1;
4130 /* force to run finalizer */
4131 while (finalizer_table
->num_entries
) {
4132 struct force_finalize_list
*list
= 0;
4133 st_foreach(finalizer_table
, force_chain_object
, (st_data_t
)&list
);
4135 struct force_finalize_list
*curr
= list
;
4136 st_data_t obj
= (st_data_t
)curr
->obj
;
4137 run_finalizer(objspace
, curr
->obj
, curr
->table
);
4138 st_delete(finalizer_table
, &obj
, 0);
4144 /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
4147 /* running data/file finalizers are part of garbage collection */
4148 unsigned int lock_lev
;
4149 gc_enter(objspace
, gc_enter_event_finalizer
, &lock_lev
);
4151 /* run data/file object's finalizers */
4152 for (i
= 0; i
< heap_allocated_pages
; i
++) {
4153 struct heap_page
*page
= heap_pages_sorted
[i
];
4154 short stride
= page
->slot_size
;
4156 uintptr_t p
= (uintptr_t)page
->start
;
4157 uintptr_t pend
= p
+ page
->total_slots
* stride
;
4158 for (; p
< pend
; p
+= stride
) {
4159 VALUE vp
= (VALUE
)p
;
4160 void *poisoned
= asan_poisoned_object_p(vp
);
4161 asan_unpoison_object(vp
, false);
4162 switch (BUILTIN_TYPE(vp
)) {
4164 if (!DATA_PTR(p
) || !RANY(p
)->as
.data
.dfree
) break;
4165 if (rb_obj_is_thread(vp
)) break;
4166 if (rb_obj_is_mutex(vp
)) break;
4167 if (rb_obj_is_fiber(vp
)) break;
4168 if (rb_obj_is_main_ractor(vp
)) break;
4169 if (RTYPEDDATA_P(vp
)) {
4170 RDATA(p
)->dfree
= RANY(p
)->as
.typeddata
.type
->function
.dfree
;
4172 RANY(p
)->as
.free
.flags
= 0;
4173 if (RANY(p
)->as
.data
.dfree
== RUBY_DEFAULT_FREE
) {
4176 else if (RANY(p
)->as
.data
.dfree
) {
4177 make_zombie(objspace
, vp
, RANY(p
)->as
.data
.dfree
, RANY(p
)->as
.data
.data
);
4181 if (RANY(p
)->as
.file
.fptr
) {
4182 make_io_zombie(objspace
, vp
);
4189 GC_ASSERT(BUILTIN_TYPE(vp
) == T_NONE
);
4190 asan_poison_object(vp
);
4195 gc_exit(objspace
, gc_enter_event_finalizer
, &lock_lev
);
4197 if (heap_pages_deferred_final
) {
4198 finalize_list(objspace
, heap_pages_deferred_final
);
4201 st_free_table(finalizer_table
);
4202 finalizer_table
= 0;
4203 ATOMIC_SET(finalizing
, 0);
4207 is_swept_object(rb_objspace_t
*objspace
, VALUE ptr
)
4209 struct heap_page
*page
= GET_HEAP_PAGE(ptr
);
4210 return page
->flags
.before_sweep
? FALSE
: TRUE
;
4213 /* garbage objects will be collected soon. */
4215 is_garbage_object(rb_objspace_t
*objspace
, VALUE ptr
)
4217 if (!is_lazy_sweeping(objspace
) ||
4218 is_swept_object(objspace
, ptr
) ||
4219 MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr
), ptr
)) {
4229 is_live_object(rb_objspace_t
*objspace
, VALUE ptr
)
4231 switch (BUILTIN_TYPE(ptr
)) {
4240 if (!is_garbage_object(objspace
, ptr
)) {
4249 is_markable_object(rb_objspace_t
*objspace
, VALUE obj
)
4251 if (rb_special_const_p(obj
)) return FALSE
; /* special const is not markable */
4252 check_rvalue_consistency(obj
);
4257 rb_objspace_markable_object_p(VALUE obj
)
4259 rb_objspace_t
*objspace
= &rb_objspace
;
4260 return is_markable_object(objspace
, obj
) && is_live_object(objspace
, obj
);
4264 rb_objspace_garbage_object_p(VALUE obj
)
4266 rb_objspace_t
*objspace
= &rb_objspace
;
4267 return is_garbage_object(objspace
, obj
);
4271 id2ref_obj_tbl(rb_objspace_t
*objspace
, VALUE objid
)
4274 if (st_lookup(objspace
->id_to_obj_tbl
, objid
, &orig
)) {
4284 * ObjectSpace._id2ref(object_id) -> an_object
4286 * Converts an object id to a reference to the object. May not be
4287 * called on an object id passed as a parameter to a finalizer.
4289 * s = "I am a string" #=> "I am a string"
4290 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
4293 * On multi-ractor mode, if the object is not shareable, it raises
4300 #if SIZEOF_LONG == SIZEOF_VOIDP
4301 #define NUM2PTR(x) NUM2ULONG(x)
4302 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4303 #define NUM2PTR(x) NUM2ULL(x)
4305 rb_objspace_t
*objspace
= &rb_objspace
;
4310 objid
= rb_to_int(objid
);
4311 if (FIXNUM_P(objid
) || rb_big_size(objid
) <= SIZEOF_VOIDP
) {
4312 ptr
= NUM2PTR(objid
);
4313 if (ptr
== Qtrue
) return Qtrue
;
4314 if (ptr
== Qfalse
) return Qfalse
;
4315 if (NIL_P(ptr
)) return Qnil
;
4316 if (FIXNUM_P(ptr
)) return (VALUE
)ptr
;
4317 if (FLONUM_P(ptr
)) return (VALUE
)ptr
;
4319 ptr
= obj_id_to_ref(objid
);
4320 if ((ptr
% sizeof(RVALUE
)) == (4 << 2)) {
4321 ID symid
= ptr
/ sizeof(RVALUE
);
4323 if (rb_id2str(symid
) == 0)
4324 rb_raise(rb_eRangeError
, "%p is not symbol id value", p0
);
4325 return ID2SYM(symid
);
4329 if ((orig
= id2ref_obj_tbl(objspace
, objid
)) != Qundef
&&
4330 is_live_object(objspace
, orig
)) {
4332 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(orig
)) {
4336 rb_raise(rb_eRangeError
, "%+"PRIsVALUE
" is id of the unshareable object on multi-ractor", rb_int2str(objid
, 10));
4340 if (rb_int_ge(objid
, objspace
->next_object_id
)) {
4341 rb_raise(rb_eRangeError
, "%+"PRIsVALUE
" is not id value", rb_int2str(objid
, 10));
4344 rb_raise(rb_eRangeError
, "%+"PRIsVALUE
" is recycled object", rb_int2str(objid
, 10));
4349 os_id2ref(VALUE os
, VALUE objid
)
4351 return id2ref(objid
);
4355 rb_find_object_id(VALUE obj
, VALUE (*get_heap_object_id
)(VALUE
))
4357 if (STATIC_SYM_P(obj
)) {
4358 return (SYM2ID(obj
) * sizeof(RVALUE
) + (4 << 2)) | FIXNUM_FLAG
;
4360 else if (FLONUM_P(obj
)) {
4361 #if SIZEOF_LONG == SIZEOF_VOIDP
4362 return LONG2NUM((SIGNED_VALUE
)obj
);
4364 return LL2NUM((SIGNED_VALUE
)obj
);
4367 else if (SPECIAL_CONST_P(obj
)) {
4368 return LONG2NUM((SIGNED_VALUE
)obj
);
4371 return get_heap_object_id(obj
);
4375 cached_object_id(VALUE obj
)
4378 rb_objspace_t
*objspace
= &rb_objspace
;
4381 if (st_lookup(objspace
->obj_to_id_tbl
, (st_data_t
)obj
, &id
)) {
4382 GC_ASSERT(FL_TEST(obj
, FL_SEEN_OBJ_ID
));
4385 GC_ASSERT(!FL_TEST(obj
, FL_SEEN_OBJ_ID
));
4387 id
= objspace
->next_object_id
;
4388 objspace
->next_object_id
= rb_int_plus(id
, INT2FIX(OBJ_ID_INCREMENT
));
4390 VALUE already_disabled
= rb_gc_disable_no_rest();
4391 st_insert(objspace
->obj_to_id_tbl
, (st_data_t
)obj
, (st_data_t
)id
);
4392 st_insert(objspace
->id_to_obj_tbl
, (st_data_t
)id
, (st_data_t
)obj
);
4393 if (already_disabled
== Qfalse
) rb_objspace_gc_enable(objspace
);
4394 FL_SET(obj
, FL_SEEN_OBJ_ID
);
4402 nonspecial_obj_id_(VALUE obj
)
4404 return nonspecial_obj_id(obj
);
4409 rb_memory_id(VALUE obj
)
4411 return rb_find_object_id(obj
, nonspecial_obj_id_
);
4415 * Document-method: __id__
4416 * Document-method: object_id
4419 * obj.__id__ -> integer
4420 * obj.object_id -> integer
4422 * Returns an integer identifier for +obj+.
4424 * The same number will be returned on all calls to +object_id+ for a given
4425 * object, and no two active objects will share an id.
4427 * Note: that some objects of builtin classes are reused for optimization.
4428 * This is the case for immediate values and frozen string literals.
4430 * BasicObject implements +__id__+, Kernel implements +object_id+.
4432 * Immediate values are not passed by reference but are passed by value:
4433 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
4435 * Object.new.object_id == Object.new.object_id # => false
4436 * (21 * 2).object_id == (21 * 2).object_id # => true
4437 * "hello".object_id == "hello".object_id # => false
4438 * "hi".freeze.object_id == "hi".freeze.object_id # => true
4442 rb_obj_id(VALUE obj
)
4445 * 32-bit VALUE space
4446 * MSB ------------------------ LSB
4447 * false 00000000000000000000000000000000
4448 * true 00000000000000000000000000000010
4449 * nil 00000000000000000000000000000100
4450 * undef 00000000000000000000000000000110
4451 * symbol ssssssssssssssssssssssss00001110
4452 * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
4453 * fixnum fffffffffffffffffffffffffffffff1
4457 * false 00000000000000000000000000000000
4458 * true 00000000000000000000000000000010
4459 * nil 00000000000000000000000000000100
4460 * undef 00000000000000000000000000000110
4461 * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
4462 * object oooooooooooooooooooooooooooooo0 o...o % A = 0
4463 * fixnum fffffffffffffffffffffffffffffff1 bignum if required
4465 * where A = sizeof(RVALUE)/4
4468 * 20 if 32-bit, double is 4-byte aligned
4469 * 24 if 32-bit, double is 8-byte aligned
4473 return rb_find_object_id(obj
, cached_object_id
);
4476 static enum rb_id_table_iterator_result
4477 cc_table_memsize_i(VALUE ccs_ptr
, void *data_ptr
)
4479 size_t *total_size
= data_ptr
;
4480 struct rb_class_cc_entries
*ccs
= (struct rb_class_cc_entries
*)ccs_ptr
;
4481 *total_size
+= sizeof(*ccs
);
4482 *total_size
+= sizeof(ccs
->entries
[0]) * ccs
->capa
;
4483 return ID_TABLE_CONTINUE
;
4487 cc_table_memsize(struct rb_id_table
*cc_table
)
4489 size_t total
= rb_id_table_memsize(cc_table
);
4490 rb_id_table_foreach_values(cc_table
, cc_table_memsize_i
, &total
);
4495 obj_memsize_of(VALUE obj
, int use_all_types
)
4499 if (SPECIAL_CONST_P(obj
)) {
4503 if (FL_TEST(obj
, FL_EXIVAR
)) {
4504 size
+= rb_generic_ivar_memsize(obj
);
4507 switch (BUILTIN_TYPE(obj
)) {
4509 if (!(RBASIC(obj
)->flags
& ROBJECT_EMBED
)) {
4510 size
+= ROBJECT_NUMIV(obj
) * sizeof(VALUE
);
4515 if (RCLASS_EXT(obj
)) {
4516 if (RCLASS_M_TBL(obj
)) {
4517 size
+= rb_id_table_memsize(RCLASS_M_TBL(obj
));
4519 if (RCLASS_IV_TBL(obj
)) {
4520 size
+= st_memsize(RCLASS_IV_TBL(obj
));
4522 if (RCLASS_CVC_TBL(obj
)) {
4523 size
+= rb_id_table_memsize(RCLASS_CVC_TBL(obj
));
4525 if (RCLASS_IV_INDEX_TBL(obj
)) {
4526 // TODO: more correct value
4527 size
+= st_memsize(RCLASS_IV_INDEX_TBL(obj
));
4529 if (RCLASS_EXT(obj
)->iv_tbl
) {
4530 size
+= st_memsize(RCLASS_EXT(obj
)->iv_tbl
);
4532 if (RCLASS_EXT(obj
)->const_tbl
) {
4533 size
+= rb_id_table_memsize(RCLASS_EXT(obj
)->const_tbl
);
4535 if (RCLASS_CC_TBL(obj
)) {
4536 size
+= cc_table_memsize(RCLASS_CC_TBL(obj
));
4539 size
+= sizeof(rb_classext_t
);
4544 if (RICLASS_OWNS_M_TBL_P(obj
)) {
4545 if (RCLASS_M_TBL(obj
)) {
4546 size
+= rb_id_table_memsize(RCLASS_M_TBL(obj
));
4549 if (RCLASS_EXT(obj
) && RCLASS_CC_TBL(obj
)) {
4550 size
+= cc_table_memsize(RCLASS_CC_TBL(obj
));
4554 size
+= rb_str_memsize(obj
);
4557 size
+= rb_ary_memsize(obj
);
4560 if (RHASH_AR_TABLE_P(obj
)) {
4561 if (RHASH_AR_TABLE(obj
) != NULL
) {
4562 size_t rb_hash_ar_table_size(void);
4563 size
+= rb_hash_ar_table_size();
4567 VM_ASSERT(RHASH_ST_TABLE(obj
) != NULL
);
4568 size
+= st_memsize(RHASH_ST_TABLE(obj
));
4572 if (RREGEXP_PTR(obj
)) {
4573 size
+= onig_memsize(RREGEXP_PTR(obj
));
4577 if (use_all_types
) size
+= rb_objspace_data_type_memsize(obj
);
4580 if (RMATCH(obj
)->rmatch
) {
4581 struct rmatch
*rm
= RMATCH(obj
)->rmatch
;
4582 size
+= onig_region_memsize(&rm
->regs
);
4583 size
+= sizeof(struct rmatch_offset
) * rm
->char_offset_num_allocated
;
4584 size
+= sizeof(struct rmatch
);
4588 if (RFILE(obj
)->fptr
) {
4589 size
+= rb_io_memsize(RFILE(obj
)->fptr
);
4596 size
+= imemo_memsize(obj
);
4604 if (!(RBASIC(obj
)->flags
& BIGNUM_EMBED_FLAG
) && BIGNUM_DIGITS(obj
)) {
4605 size
+= BIGNUM_LEN(obj
) * sizeof(BDIGIT
);
4610 UNEXPECTED_NODE(obj_memsize_of
);
4614 if ((RBASIC(obj
)->flags
& RSTRUCT_EMBED_LEN_MASK
) == 0 &&
4615 RSTRUCT(obj
)->as
.heap
.ptr
) {
4616 size
+= sizeof(VALUE
) * RSTRUCT_LEN(obj
);
4625 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
4626 BUILTIN_TYPE(obj
), (void*)obj
);
4629 return size
+ GET_HEAP_PAGE(obj
)->slot_size
;
4633 rb_obj_memsize_of(VALUE obj
)
4635 return obj_memsize_of(obj
, TRUE
);
4639 set_zero(st_data_t key
, st_data_t val
, st_data_t arg
)
4641 VALUE k
= (VALUE
)key
;
4642 VALUE hash
= (VALUE
)arg
;
4643 rb_hash_aset(hash
, k
, INT2FIX(0));
4648 type_sym(size_t type
)
4651 #define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
4653 COUNT_TYPE(T_OBJECT
);
4654 COUNT_TYPE(T_CLASS
);
4655 COUNT_TYPE(T_MODULE
);
4656 COUNT_TYPE(T_FLOAT
);
4657 COUNT_TYPE(T_STRING
);
4658 COUNT_TYPE(T_REGEXP
);
4659 COUNT_TYPE(T_ARRAY
);
4661 COUNT_TYPE(T_STRUCT
);
4662 COUNT_TYPE(T_BIGNUM
);
4665 COUNT_TYPE(T_MATCH
);
4666 COUNT_TYPE(T_COMPLEX
);
4667 COUNT_TYPE(T_RATIONAL
);
4670 COUNT_TYPE(T_FALSE
);
4671 COUNT_TYPE(T_SYMBOL
);
4672 COUNT_TYPE(T_FIXNUM
);
4673 COUNT_TYPE(T_IMEMO
);
4674 COUNT_TYPE(T_UNDEF
);
4676 COUNT_TYPE(T_ICLASS
);
4677 COUNT_TYPE(T_ZOMBIE
);
4678 COUNT_TYPE(T_MOVED
);
4680 default: return SIZET2NUM(type
); break;
4686 * ObjectSpace.count_objects([result_hash]) -> hash
4688 * Counts all objects grouped by type.
4690 * It returns a hash, such as:
4699 * The contents of the returned hash are implementation specific.
4700 * It may be changed in future.
4702 * The keys starting with +:T_+ means live objects.
4703 * For example, +:T_ARRAY+ is the number of arrays.
4704 * +:FREE+ means object slots which is not used now.
4705 * +:TOTAL+ means sum of above.
4707 * If the optional argument +result_hash+ is given,
4708 * it is overwritten and returned. This is intended to avoid probe effect.
4711 * ObjectSpace.count_objects(h)
4713 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
4715 * This method is only expected to work on C Ruby.
4720 count_objects(int argc
, VALUE
*argv
, VALUE os
)
4722 rb_objspace_t
*objspace
= &rb_objspace
;
4723 size_t counts
[T_MASK
+1];
4729 if (rb_check_arity(argc
, 0, 1) == 1) {
4731 if (!RB_TYPE_P(hash
, T_HASH
))
4732 rb_raise(rb_eTypeError
, "non-hash given");
4735 for (i
= 0; i
<= T_MASK
; i
++) {
4739 for (i
= 0; i
< heap_allocated_pages
; i
++) {
4740 struct heap_page
*page
= heap_pages_sorted
[i
];
4741 short stride
= page
->slot_size
;
4743 uintptr_t p
= (uintptr_t)page
->start
;
4744 uintptr_t pend
= p
+ page
->total_slots
* stride
;
4745 for (;p
< pend
; p
+= stride
) {
4746 VALUE vp
= (VALUE
)p
;
4747 GC_ASSERT((NUM_IN_PAGE(vp
) * sizeof(RVALUE
)) % page
->slot_size
== 0);
4749 void *poisoned
= asan_poisoned_object_p(vp
);
4750 asan_unpoison_object(vp
, false);
4751 if (RANY(p
)->as
.basic
.flags
) {
4752 counts
[BUILTIN_TYPE(vp
)]++;
4758 GC_ASSERT(BUILTIN_TYPE(vp
) == T_NONE
);
4759 asan_poison_object(vp
);
4762 total
+= page
->total_slots
;
4766 hash
= rb_hash_new();
4768 else if (!RHASH_EMPTY_P(hash
)) {
4769 rb_hash_stlike_foreach(hash
, set_zero
, hash
);
4771 rb_hash_aset(hash
, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total
));
4772 rb_hash_aset(hash
, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed
));
4774 for (i
= 0; i
<= T_MASK
; i
++) {
4775 VALUE type
= type_sym(i
);
4777 rb_hash_aset(hash
, type
, SIZET2NUM(counts
[i
]));
4784 ------------------------ Garbage Collection ------------------------
4790 objspace_available_slots(rb_objspace_t
*objspace
)
4792 size_t total_slots
= 0;
4793 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
4794 rb_size_pool_t
*size_pool
= &size_pools
[i
];
4795 total_slots
+= SIZE_POOL_EDEN_HEAP(size_pool
)->total_slots
;
4796 total_slots
+= SIZE_POOL_TOMB_HEAP(size_pool
)->total_slots
;
4802 objspace_live_slots(rb_objspace_t
*objspace
)
4804 return (objspace
->total_allocated_objects
- objspace
->profile
.total_freed_objects
) - heap_pages_final_slots
;
4808 objspace_free_slots(rb_objspace_t
*objspace
)
4810 return objspace_available_slots(objspace
) - objspace_live_slots(objspace
) - heap_pages_final_slots
;
4814 gc_setup_mark_bits(struct heap_page
*page
)
4816 /* copy oldgen bitmap to mark bitmap */
4817 memcpy(&page
->mark_bits
[0], &page
->uncollectible_bits
[0], HEAP_PAGE_BITMAP_SIZE
);
4820 static int gc_is_moveable_obj(rb_objspace_t
*objspace
, VALUE obj
);
4821 static VALUE
gc_move(rb_objspace_t
*objspace
, VALUE scan
, VALUE free
, size_t slot_size
);
4824 lock_page_body(rb_objspace_t
*objspace
, struct heap_page_body
*body
)
4829 if (!VirtualProtect(body
, HEAP_PAGE_SIZE
, PAGE_NOACCESS
, &old_protect
)) {
4831 if (mprotect(body
, HEAP_PAGE_SIZE
, PROT_NONE
)) {
4833 rb_bug("Couldn't protect page %p, errno: %s", (void *)body
, strerror(errno
));
4836 gc_report(5, objspace
, "Protecting page in move %p\n", (void *)body
);
4841 unlock_page_body(rb_objspace_t
*objspace
, struct heap_page_body
*body
)
4846 if (!VirtualProtect(body
, HEAP_PAGE_SIZE
, PAGE_READWRITE
, &old_protect
)) {
4848 if (mprotect(body
, HEAP_PAGE_SIZE
, PROT_READ
| PROT_WRITE
)) {
4850 rb_bug("Couldn't unprotect page %p, errno: %s", (void *)body
, strerror(errno
));
4853 gc_report(5, objspace
, "Unprotecting page in move %p\n", (void *)body
);
4858 try_move_plane(rb_objspace_t
*objspace
, rb_heap_t
*heap
, struct heap_page
*page
, uintptr_t p
, bits_t bits
, VALUE dest
)
4863 /* We're trying to move "p" */
4864 objspace
->rcompactor
.considered_count_table
[BUILTIN_TYPE((VALUE
)p
)]++;
4866 if (gc_is_moveable_obj(objspace
, (VALUE
)p
)) {
4867 /* We were able to move "p" */
4868 objspace
->rcompactor
.moved_count_table
[BUILTIN_TYPE((VALUE
)p
)]++;
4869 objspace
->rcompactor
.total_moved
++;
4871 bool from_freelist
= false;
4873 if (BUILTIN_TYPE(dest
) == T_NONE
) {
4874 from_freelist
= true;
4877 gc_move(objspace
, (VALUE
)p
, dest
, page
->slot_size
);
4878 gc_pin(objspace
, (VALUE
)p
);
4879 heap
->compact_cursor_index
= (RVALUE
*)p
;
4880 if (from_freelist
) {
4881 FL_SET((VALUE
)p
, FL_FROM_FREELIST
);
4887 p
+= sizeof(RVALUE
);
4896 try_move(rb_objspace_t
*objspace
, rb_heap_t
*heap
, struct heap_page
*sweep_page
, VALUE dest
)
4898 struct heap_page
* cursor
= heap
->compact_cursor
;
4900 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest
), dest
));
4902 /* T_NONE objects came from the free list. If the object is *not* a
4903 * T_NONE, it is an object that just got freed but hasn't been
4904 * added to the freelist yet */
4909 bits_t
*mark_bits
= cursor
->mark_bits
;
4910 bits_t
*pin_bits
= cursor
->pinned_bits
;
4913 if (heap
->compact_cursor_index
) {
4914 index
= BITMAP_INDEX(heap
->compact_cursor_index
);
4915 p
= heap
->compact_cursor_index
;
4916 GC_ASSERT(cursor
== GET_HEAP_PAGE(p
));
4923 bits_t bits
= mark_bits
[index
] & ~pin_bits
[index
];
4925 bits
>>= NUM_IN_PAGE(p
);
4926 if (try_move_plane(objspace
, heap
, sweep_page
, (uintptr_t)p
, bits
, dest
)) return 1;
4929 p
= cursor
->start
+ (BITS_BITLENGTH
- NUM_IN_PAGE(cursor
->start
));
4932 p
= cursor
->start
+ (BITS_BITLENGTH
- NUM_IN_PAGE(cursor
->start
)) + (BITS_BITLENGTH
* index
);
4935 /* Find an object to move and move it. Movable objects must be
4936 * marked, so we iterate using the marking bitmap */
4937 for (size_t i
= index
+ 1; i
< HEAP_PAGE_BITMAP_LIMIT
; i
++) {
4938 bits_t bits
= mark_bits
[i
] & ~pin_bits
[i
];
4939 if (try_move_plane(objspace
, heap
, sweep_page
, (uintptr_t)p
, bits
, dest
)) return 1;
4940 p
+= BITS_BITLENGTH
;
4943 /* We couldn't find a movable object on the compact cursor, so lets
4944 * move to the next page (previous page since we are traveling in the
4945 * opposite direction of the sweep cursor) and look there. */
4947 struct heap_page
* next
;
4949 next
= list_prev(&heap
->pages
, cursor
, page_node
);
4951 /* Protect the current cursor since it probably has T_MOVED slots. */
4952 lock_page_body(objspace
, GET_PAGE_BODY(cursor
->start
));
4954 heap
->compact_cursor
= next
;
4955 heap
->compact_cursor_index
= 0;
4958 // Cursors have met, lets quit. We set `heap->compact_cursor` equal
4959 // to `heap->sweeping_page` so we know how far to iterate through
4960 // the heap when unprotecting pages.
4961 if (next
== sweep_page
) {
4970 gc_unprotect_pages(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
4972 struct heap_page
*cursor
= heap
->compact_cursor
;
4975 unlock_page_body(objspace
, GET_PAGE_BODY(cursor
->start
));
4976 cursor
= list_next(&heap
->pages
, cursor
, page_node
);
4980 static void gc_update_references(rb_objspace_t
* objspace
);
4981 static void invalidate_moved_page(rb_objspace_t
*objspace
, struct heap_page
*page
);
4984 read_barrier_handler(uintptr_t address
)
4987 rb_objspace_t
* objspace
= &rb_objspace
;
4989 address
-= address
% sizeof(RVALUE
);
4991 obj
= (VALUE
)address
;
4995 unlock_page_body(objspace
, GET_PAGE_BODY(obj
));
4997 objspace
->profile
.read_barrier_faults
++;
4999 invalidate_moved_page(objspace
, GET_HEAP_PAGE(obj
));
5005 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler
;
5006 typedef void (*signal_handler
)(int);
5007 static signal_handler old_sigsegv_handler
;
5010 read_barrier_signal(EXCEPTION_POINTERS
* info
)
5012 /* EXCEPTION_ACCESS_VIOLATION is what's raised by access to protected pages */
5013 if (info
->ExceptionRecord
->ExceptionCode
== EXCEPTION_ACCESS_VIOLATION
) {
5014 /* > The second array element specifies the virtual address of the inaccessible data.
5015 * https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
5017 * Use this address to invalidate the page */
5018 read_barrier_handler((uintptr_t)info
->ExceptionRecord
->ExceptionInformation
[1]);
5019 return EXCEPTION_CONTINUE_EXECUTION
;
5022 return EXCEPTION_CONTINUE_SEARCH
;
5027 uninstall_handlers(void)
5029 signal(SIGSEGV
, old_sigsegv_handler
);
5030 SetUnhandledExceptionFilter(old_handler
);
5034 install_handlers(void)
5036 /* Remove SEGV handler so that the Unhandled Exception Filter handles it */
5037 old_sigsegv_handler
= signal(SIGSEGV
, NULL
);
5038 /* Unhandled Exception Filter has access to the violation address similar
5039 * to si_addr from sigaction */
5040 old_handler
= SetUnhandledExceptionFilter(read_barrier_signal
);
5043 static struct sigaction old_sigbus_handler
;
5044 static struct sigaction old_sigsegv_handler
;
5047 read_barrier_signal(int sig
, siginfo_t
* info
, void * data
)
5049 // setup SEGV/BUS handlers for errors
5050 struct sigaction prev_sigbus
, prev_sigsegv
;
5051 sigaction(SIGBUS
, &old_sigbus_handler
, &prev_sigbus
);
5052 sigaction(SIGSEGV
, &old_sigsegv_handler
, &prev_sigsegv
);
5054 // enable SIGBUS/SEGV
5055 sigset_t set
, prev_set
;
5057 sigaddset(&set
, SIGBUS
);
5058 sigaddset(&set
, SIGSEGV
);
5059 sigprocmask(SIG_UNBLOCK
, &set
, &prev_set
);
5062 read_barrier_handler((uintptr_t)info
->si_addr
);
5064 // reset SEGV/BUS handlers
5065 sigaction(SIGBUS
, &prev_sigbus
, NULL
);
5066 sigaction(SIGSEGV
, &prev_sigsegv
, NULL
);
5067 sigprocmask(SIG_SETMASK
, &prev_set
, NULL
);
5071 uninstall_handlers(void)
5073 sigaction(SIGBUS
, &old_sigbus_handler
, NULL
);
5074 sigaction(SIGSEGV
, &old_sigsegv_handler
, NULL
);
5078 install_handlers(void)
5080 struct sigaction action
;
5081 memset(&action
, 0, sizeof(struct sigaction
));
5082 sigemptyset(&action
.sa_mask
);
5083 action
.sa_sigaction
= read_barrier_signal
;
5084 action
.sa_flags
= SA_SIGINFO
| SA_ONSTACK
;
5086 sigaction(SIGBUS
, &action
, &old_sigbus_handler
);
5087 sigaction(SIGSEGV
, &action
, &old_sigsegv_handler
);
5092 revert_stack_objects(VALUE stack_obj
, void *ctx
)
5094 rb_objspace_t
* objspace
= (rb_objspace_t
*)ctx
;
5096 if (BUILTIN_TYPE(stack_obj
) == T_MOVED
) {
5097 /* For now we'll revert the whole page if the object made it to the
5098 * stack. I think we can change this to move just the one object
5100 invalidate_moved_page(objspace
, GET_HEAP_PAGE(stack_obj
));
5105 revert_machine_stack_references(rb_objspace_t
*objspace
, VALUE v
)
5107 if (is_pointer_to_heap(objspace
, (void *)v
)) {
5108 if (BUILTIN_TYPE(v
) == T_MOVED
) {
5109 /* For now we'll revert the whole page if the object made it to the
5110 * stack. I think we can change this to move just the one object
5112 invalidate_moved_page(objspace
, GET_HEAP_PAGE(v
));
5117 static void each_machine_stack_value(const rb_execution_context_t
*ec
, void (*cb
)(rb_objspace_t
*, VALUE
));
5120 check_stack_for_moved(rb_objspace_t
*objspace
)
5122 rb_execution_context_t
*ec
= GET_EC();
5123 rb_vm_t
*vm
= rb_ec_vm_ptr(ec
);
5124 rb_vm_each_stack_value(vm
, revert_stack_objects
, (void*)objspace
);
5125 each_machine_stack_value(ec
, revert_machine_stack_references
);
5129 gc_compact_finish(rb_objspace_t
*objspace
, rb_size_pool_t
*pool
, rb_heap_t
*heap
)
5131 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5132 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5133 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
5134 gc_unprotect_pages(objspace
, heap
);
5137 uninstall_handlers();
5139 /* The mutator is allowed to run during incremental sweeping. T_MOVED
5140 * objects can get pushed on the stack and when the compaction process
5141 * finishes up, it may remove the read barrier before anything has a
5142 * chance to read from the T_MOVED address. To fix this, we scan the stack
5143 * then revert any moved objects that made it to the stack. */
5144 check_stack_for_moved(objspace
);
5146 gc_update_references(objspace
);
5147 objspace
->profile
.compact_count
++;
5149 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5150 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5151 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
5152 heap
->compact_cursor
= NULL
;
5153 heap
->compact_cursor_index
= 0;
5156 if (gc_prof_enabled(objspace
)) {
5157 gc_profile_record
*record
= gc_prof_record(objspace
);
5158 record
->moved_objects
= objspace
->rcompactor
.total_moved
- record
->moved_objects
;
5160 objspace
->flags
.during_compacting
= FALSE
;
5163 struct gc_sweep_context
{
5164 struct heap_page
*page
;
5171 gc_fill_swept_plane(rb_objspace_t
*objspace
, rb_heap_t
*heap
, uintptr_t p
, bits_t bitset
, bool *finished_compacting
, struct gc_sweep_context
*ctx
)
5173 struct heap_page
* sweep_page
= ctx
->page
;
5176 short slot_size
= sweep_page
->slot_size
;
5177 short slot_bits
= slot_size
/ sizeof(RVALUE
);
5181 VALUE dest
= (VALUE
)p
;
5183 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(dest
), dest
));
5184 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest
), dest
));
5186 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(dest
), dest
);
5188 if (*finished_compacting
) {
5189 if (BUILTIN_TYPE(dest
) == T_NONE
) {
5195 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)dest
, sizeof(RVALUE
));
5196 heap_page_add_freeobj(objspace
, sweep_page
, dest
);
5199 /* Zombie slots don't get marked, but we can't reuse
5200 * their memory until they have their finalizers run.*/
5201 if (BUILTIN_TYPE(dest
) != T_ZOMBIE
) {
5202 if (!try_move(objspace
, heap
, sweep_page
, dest
)) {
5203 *finished_compacting
= true;
5204 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p
, sizeof(RVALUE
));
5205 gc_report(5, objspace
, "Quit compacting, couldn't find an object to move\n");
5206 if (BUILTIN_TYPE(dest
) == T_NONE
) {
5212 heap_page_add_freeobj(objspace
, sweep_page
, dest
);
5213 gc_report(3, objspace
, "page_sweep: %s is added to freelist\n", obj_info(dest
));
5222 bitset
>>= slot_bits
;
5228 gc_fill_swept_page(rb_objspace_t
*objspace
, rb_heap_t
*heap
, struct heap_page
*sweep_page
, struct gc_sweep_context
*ctx
)
5230 /* Find any pinned but not marked objects and try to fill those slots */
5231 bool finished_compacting
= false;
5232 bits_t
*mark_bits
, *pin_bits
;
5236 mark_bits
= sweep_page
->mark_bits
;
5237 pin_bits
= sweep_page
->pinned_bits
;
5239 p
= (uintptr_t)sweep_page
->start
;
5241 struct heap_page
* cursor
= heap
->compact_cursor
;
5243 unlock_page_body(objspace
, GET_PAGE_BODY(cursor
->start
));
5245 /* *Want to move* objects are pinned but not marked. */
5246 bitset
= pin_bits
[0] & ~mark_bits
[0];
5247 bitset
>>= NUM_IN_PAGE(p
); // Skip header / dead space bits
5248 gc_fill_swept_plane(objspace
, heap
, (uintptr_t)p
, bitset
, &finished_compacting
, ctx
);
5249 p
+= ((BITS_BITLENGTH
- NUM_IN_PAGE(p
)) * sizeof(RVALUE
));
5251 for (int i
= 1; i
< HEAP_PAGE_BITMAP_LIMIT
; i
++) {
5252 /* *Want to move* objects are pinned but not marked. */
5253 bitset
= pin_bits
[i
] & ~mark_bits
[i
];
5254 gc_fill_swept_plane(objspace
, heap
, (uintptr_t)p
, bitset
, &finished_compacting
, ctx
);
5255 p
+= ((BITS_BITLENGTH
) * sizeof(RVALUE
));
5258 lock_page_body(objspace
, GET_PAGE_BODY(heap
->compact_cursor
->start
));
5260 return finished_compacting
;
5264 gc_sweep_plane(rb_objspace_t
*objspace
, rb_heap_t
*heap
, uintptr_t p
, bits_t bitset
, struct gc_sweep_context
*ctx
)
5266 struct heap_page
* sweep_page
= ctx
->page
;
5267 short slot_size
= sweep_page
->slot_size
;
5268 short slot_bits
= slot_size
/ sizeof(RVALUE
);
5269 GC_ASSERT(slot_bits
> 0);
5272 VALUE vp
= (VALUE
)p
;
5273 GC_ASSERT(vp
% sizeof(RVALUE
) == 0);
5275 asan_unpoison_object(vp
, false);
5277 switch (BUILTIN_TYPE(vp
)) {
5278 default: /* majority case */
5279 gc_report(2, objspace
, "page_sweep: free %p\n", (void *)p
);
5280 #if RGENGC_CHECK_MODE
5281 if (!is_full_marking(objspace
)) {
5282 if (RVALUE_OLD_P(vp
)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p
);
5283 if (rgengc_remembered_sweep(objspace
, vp
)) rb_bug("page_sweep: %p - remembered.", (void *)p
);
5286 if (obj_free(objspace
, vp
)) {
5287 if (heap
->compact_cursor
) {
5288 /* We *want* to fill this slot */
5289 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp
), vp
);
5292 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p
, sizeof(RVALUE
));
5293 heap_page_add_freeobj(objspace
, sweep_page
, vp
);
5294 gc_report(3, objspace
, "page_sweep: %s is added to freelist\n", obj_info(vp
));
5304 if (objspace
->flags
.during_compacting
) {
5305 /* The sweep cursor shouldn't have made it to any
5306 * T_MOVED slots while the compact flag is enabled.
5307 * The sweep cursor and compact cursor move in
5308 * opposite directions, and when they meet references will
5309 * get updated and "during_compacting" should get disabled */
5310 rb_bug("T_MOVED shouldn't be seen until compaction is finished\n");
5312 gc_report(3, objspace
, "page_sweep: %s is added to freelist\n", obj_info(vp
));
5313 if (FL_TEST(vp
, FL_FROM_FREELIST
)) {
5319 heap_page_add_freeobj(objspace
, sweep_page
, vp
);
5322 /* already counted */
5325 if (heap
->compact_cursor
) {
5326 /* We *want* to fill this slot */
5327 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp
), vp
);
5330 ctx
->empty_slots
++; /* already freed */
5336 bitset
>>= slot_bits
;
5341 gc_sweep_page(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
, struct gc_sweep_context
*ctx
)
5343 struct heap_page
*sweep_page
= ctx
->page
;
5348 bits_t
*bits
, bitset
;
5350 gc_report(2, objspace
, "page_sweep: start.\n");
5352 if (heap
->compact_cursor
) {
5353 if (sweep_page
== heap
->compact_cursor
) {
5354 /* The compaction cursor and sweep page met, so we need to quit compacting */
5355 gc_report(5, objspace
, "Quit compacting, mark and compact cursor met\n");
5356 gc_compact_finish(objspace
, size_pool
, heap
);
5359 /* We anticipate filling the page, so NULL out the freelist. */
5360 asan_unpoison_memory_region(&sweep_page
->freelist
, sizeof(RVALUE
*), false);
5361 sweep_page
->freelist
= NULL
;
5362 asan_poison_memory_region(&sweep_page
->freelist
, sizeof(RVALUE
*));
5366 sweep_page
->flags
.before_sweep
= FALSE
;
5367 sweep_page
->free_slots
= 0;
5369 p
= sweep_page
->start
;
5370 bits
= sweep_page
->mark_bits
;
5372 int page_rvalue_count
= sweep_page
->total_slots
* (size_pool
->slot_size
/ sizeof(RVALUE
));
5373 int out_of_range_bits
= (NUM_IN_PAGE(p
) + page_rvalue_count
) % BITS_BITLENGTH
;
5374 if (out_of_range_bits
!= 0) { // sizeof(RVALUE) == 64
5375 bits
[BITMAP_INDEX(p
) + page_rvalue_count
/ BITS_BITLENGTH
] |= ~(((bits_t
)1 << out_of_range_bits
) - 1);
5378 // Skip out of range slots at the head of the page
5380 bitset
>>= NUM_IN_PAGE(p
);
5382 gc_sweep_plane(objspace
, heap
, (uintptr_t)p
, bitset
, ctx
);
5384 p
+= (BITS_BITLENGTH
- NUM_IN_PAGE(p
));
5386 for (i
=1; i
< HEAP_PAGE_BITMAP_LIMIT
; i
++) {
5389 gc_sweep_plane(objspace
, heap
, (uintptr_t)p
, bitset
, ctx
);
5391 p
+= BITS_BITLENGTH
;
5394 if (heap
->compact_cursor
) {
5395 if (gc_fill_swept_page(objspace
, heap
, sweep_page
, ctx
)) {
5396 gc_compact_finish(objspace
, size_pool
, heap
);
5400 if (!heap
->compact_cursor
) {
5401 gc_setup_mark_bits(sweep_page
);
5404 #if GC_PROFILE_MORE_DETAIL
5405 if (gc_prof_enabled(objspace
)) {
5406 gc_profile_record
*record
= gc_prof_record(objspace
);
5407 record
->removing_objects
+= ctx
->final_slots
+ ctx
->freed_slots
;
5408 record
->empty_objects
+= ctx
->empty_slots
;
5411 if (0) fprintf(stderr
, "gc_sweep_page(%"PRIdSIZE
"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5413 sweep_page
->total_slots
,
5414 ctx
->freed_slots
, ctx
->empty_slots
, ctx
->final_slots
);
5416 sweep_page
->free_slots
+= ctx
->freed_slots
+ ctx
->empty_slots
;
5417 objspace
->profile
.total_freed_objects
+= ctx
->freed_slots
;
5419 if (heap_pages_deferred_final
&& !finalizing
) {
5420 rb_thread_t
*th
= GET_THREAD();
5422 gc_finalize_deferred_register(objspace
);
5426 #if RGENGC_CHECK_MODE
5427 short freelist_len
= 0;
5428 RVALUE
*ptr
= sweep_page
->freelist
;
5431 ptr
= ptr
->as
.free
.next
;
5433 if (freelist_len
!= sweep_page
->free_slots
) {
5434 rb_bug("inconsistent freelist length: expected %d but was %d", sweep_page
->free_slots
, freelist_len
);
5438 gc_report(2, objspace
, "page_sweep: end.\n");
5442 /* allocate additional minimum page to work */
5444 gc_heap_prepare_minimum_pages(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
5446 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5447 if (!heap
->free_pages
&& heap_increment(objspace
, size_pool
, heap
) == FALSE
) {
5448 /* there is no free after page_sweep() */
5449 size_pool_allocatable_pages_set(objspace
, size_pool
, 1);
5450 if (!heap_increment(objspace
, size_pool
, heap
)) { /* can't allocate additional free objects */
5459 gc_mode_name(enum gc_mode mode
)
5462 case gc_mode_none
: return "none";
5463 case gc_mode_marking
: return "marking";
5464 case gc_mode_sweeping
: return "sweeping";
5465 default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode
);
5470 gc_mode_transition(rb_objspace_t
*objspace
, enum gc_mode mode
)
5472 #if RGENGC_CHECK_MODE
5473 enum gc_mode prev_mode
= gc_mode(objspace
);
5474 switch (prev_mode
) {
5475 case gc_mode_none
: GC_ASSERT(mode
== gc_mode_marking
); break;
5476 case gc_mode_marking
: GC_ASSERT(mode
== gc_mode_sweeping
); break;
5477 case gc_mode_sweeping
: GC_ASSERT(mode
== gc_mode_none
); break;
5480 if (0) fprintf(stderr
, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace
)), gc_mode_name(mode
));
5481 gc_mode_set(objspace
, mode
);
5485 heap_page_freelist_append(struct heap_page
*page
, RVALUE
*freelist
)
5488 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
5489 if (page
->freelist
) {
5490 RVALUE
*p
= page
->freelist
;
5491 asan_unpoison_object((VALUE
)p
, false);
5492 while (p
->as
.free
.next
) {
5494 p
= p
->as
.free
.next
;
5495 asan_poison_object((VALUE
)prev
);
5496 asan_unpoison_object((VALUE
)p
, false);
5498 p
->as
.free
.next
= freelist
;
5499 asan_poison_object((VALUE
)p
);
5502 page
->freelist
= freelist
;
5504 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
5509 gc_sweep_start_heap(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
5511 heap
->sweeping_page
= list_top(&heap
->pages
, struct heap_page
, page_node
);
5512 heap
->free_pages
= NULL
;
5513 #if GC_ENABLE_INCREMENTAL_MARK
5514 heap
->pooled_pages
= NULL
;
5518 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5519 __attribute__((noinline
))
5522 gc_sweep_start(rb_objspace_t
*objspace
)
5524 gc_mode_transition(objspace
, gc_mode_sweeping
);
5526 #if GC_ENABLE_INCREMENTAL_MARK
5527 objspace
->rincgc
.pooled_slots
= 0;
5530 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5531 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5533 gc_sweep_start_heap(objspace
, SIZE_POOL_EDEN_HEAP(size_pool
));
5536 rb_ractor_t
*r
= NULL
;
5537 list_for_each(&GET_VM()->ractor
.set
, r
, vmlr_node
) {
5538 rb_gc_ractor_newobj_cache_clear(&r
->newobj_cache
);
5544 gc_sweep_finish_size_pool(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
)
5546 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
5547 size_t total_slots
= heap
->total_slots
+ SIZE_POOL_TOMB_HEAP(size_pool
)->total_slots
;
5548 size_t total_pages
= heap
->total_pages
+ SIZE_POOL_TOMB_HEAP(size_pool
)->total_pages
;
5549 size_t swept_slots
= size_pool
->freed_slots
+ size_pool
->empty_slots
;
5551 size_t min_free_slots
= (size_t)(total_slots
* gc_params
.heap_free_slots_min_ratio
);
5553 if (swept_slots
< min_free_slots
) {
5554 bool grow_heap
= is_full_marking(objspace
);
5556 if (!is_full_marking(objspace
)) {
5557 /* The heap is a growth heap if it freed more slots than had empty slots. */
5558 bool is_growth_heap
= size_pool
->empty_slots
== 0 ||
5559 size_pool
->freed_slots
> size_pool
->empty_slots
;
5561 if (objspace
->profile
.count
- objspace
->rgengc
.last_major_gc
< RVALUE_OLD_AGE
) {
5564 else if (is_growth_heap
) { /* Only growth heaps are allowed to start a major GC. */
5565 objspace
->rgengc
.need_major_gc
|= GPR_FLAG_MAJOR_BY_NOFREE
;
5566 size_pool
->force_major_gc_count
++;
5571 size_t extend_page_count
= heap_extend_pages(objspace
, swept_slots
, total_slots
, total_pages
);
5573 if (extend_page_count
> size_pool
->allocatable_pages
) {
5574 size_pool_allocatable_pages_set(objspace
, size_pool
, extend_page_count
);
5577 heap_increment(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
5584 gc_sweep_finish(rb_objspace_t
*objspace
)
5586 gc_report(1, objspace
, "gc_sweep_finish\n");
5588 gc_prof_set_heap_info(objspace
);
5589 heap_pages_free_unused_pages(objspace
);
5591 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5592 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5594 /* if heap_pages has unused pages, then assign them to increment */
5595 size_t tomb_pages
= SIZE_POOL_TOMB_HEAP(size_pool
)->total_pages
;
5596 if (size_pool
->allocatable_pages
< tomb_pages
) {
5597 size_pool
->allocatable_pages
= tomb_pages
;
5601 size_pool
->freed_slots
= 0;
5602 size_pool
->empty_slots
= 0;
5604 #if GC_ENABLE_INCREMENTAL_MARK
5605 if (!will_be_incremental_marking(objspace
)) {
5606 rb_heap_t
*eden_heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
5607 struct heap_page
*end_page
= eden_heap
->free_pages
;
5609 while (end_page
->free_next
) end_page
= end_page
->free_next
;
5610 end_page
->free_next
= eden_heap
->pooled_pages
;
5613 eden_heap
->free_pages
= eden_heap
->pooled_pages
;
5615 eden_heap
->pooled_pages
= NULL
;
5616 objspace
->rincgc
.pooled_slots
= 0;
5621 heap_pages_expand_sorted(objspace
);
5623 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_END_SWEEP
, 0);
5624 gc_mode_transition(objspace
, gc_mode_none
);
5626 #if RGENGC_CHECK_MODE >= 2
5627 gc_verify_internal_consistency(objspace
);
5632 gc_sweep_step(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
5634 struct heap_page
*sweep_page
= heap
->sweeping_page
;
5635 int unlink_limit
= 3;
5637 #if GC_ENABLE_INCREMENTAL_MARK
5638 int swept_slots
= 0;
5640 bool need_pool
= TRUE
;
5642 int need_pool
= will_be_incremental_marking(objspace
) ? TRUE
: FALSE
;
5645 gc_report(2, objspace
, "gc_sweep_step (need_pool: %d)\n", need_pool
);
5647 gc_report(2, objspace
, "gc_sweep_step\n");
5650 if (sweep_page
== NULL
) return FALSE
;
5652 #if GC_ENABLE_LAZY_SWEEP
5653 gc_prof_sweep_timer_start(objspace
);
5657 RUBY_DEBUG_LOG("sweep_page:%p", (void *)sweep_page
);
5659 struct gc_sweep_context ctx
= {
5665 gc_sweep_page(objspace
, size_pool
, heap
, &ctx
);
5666 int free_slots
= ctx
.freed_slots
+ ctx
.empty_slots
;
5668 heap
->sweeping_page
= list_next(&heap
->pages
, sweep_page
, page_node
);
5670 if (sweep_page
->final_slots
+ free_slots
== sweep_page
->total_slots
&&
5671 heap_pages_freeable_pages
> 0 &&
5673 heap_pages_freeable_pages
--;
5675 /* there are no living objects -> move this page to tomb heap */
5676 heap_unlink_page(objspace
, heap
, sweep_page
);
5677 heap_add_page(objspace
, size_pool
, SIZE_POOL_TOMB_HEAP(size_pool
), sweep_page
);
5679 else if (free_slots
> 0) {
5681 size_pool
->freed_slots
+= ctx
.freed_slots
;
5682 size_pool
->empty_slots
+= ctx
.empty_slots
;
5685 #if GC_ENABLE_INCREMENTAL_MARK
5687 heap_add_poolpage(objspace
, heap
, sweep_page
);
5691 heap_add_freepage(heap
, sweep_page
);
5692 swept_slots
+= free_slots
;
5693 if (swept_slots
> 2048) {
5698 heap_add_freepage(heap
, sweep_page
);
5703 sweep_page
->free_next
= NULL
;
5705 } while ((sweep_page
= heap
->sweeping_page
));
5707 if (!heap
->sweeping_page
) {
5709 gc_sweep_finish_size_pool(objspace
, size_pool
);
5712 if (!has_sweeping_pages(objspace
)) {
5713 gc_sweep_finish(objspace
);
5717 #if GC_ENABLE_LAZY_SWEEP
5718 gc_prof_sweep_timer_stop(objspace
);
5721 return heap
->free_pages
!= NULL
;
5725 gc_sweep_rest(rb_objspace_t
*objspace
)
5727 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5728 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5730 while (SIZE_POOL_EDEN_HEAP(size_pool
)->sweeping_page
) {
5731 gc_sweep_step(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
5737 gc_sweep_continue(rb_objspace_t
*objspace
, rb_size_pool_t
*sweep_size_pool
, rb_heap_t
*heap
)
5739 GC_ASSERT(dont_gc_val() == FALSE
);
5740 if (!GC_ENABLE_LAZY_SWEEP
) return;
5742 unsigned int lock_lev
;
5743 gc_enter(objspace
, gc_enter_event_sweep_continue
, &lock_lev
);
5745 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5746 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5747 if (!gc_sweep_step(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
))) {
5749 /* sweep_size_pool requires a free slot but sweeping did not yield any. */
5750 if (size_pool
== sweep_size_pool
) {
5751 if (size_pool
->allocatable_pages
> 0) {
5752 heap_increment(objspace
, size_pool
, heap
);
5755 /* Not allowed to create a new page so finish sweeping. */
5756 gc_sweep_rest(objspace
);
5764 gc_exit(objspace
, gc_enter_event_sweep_continue
, &lock_lev
);
5768 invalidate_moved_plane(rb_objspace_t
*objspace
, struct heap_page
*page
, uintptr_t p
, bits_t bitset
)
5773 VALUE forwarding_object
= (VALUE
)p
;
5776 if (BUILTIN_TYPE(forwarding_object
) == T_MOVED
) {
5777 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object
), forwarding_object
));
5778 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object
), forwarding_object
));
5780 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object
), forwarding_object
);
5782 bool from_freelist
= FL_TEST_RAW(forwarding_object
, FL_FROM_FREELIST
);
5783 object
= rb_gc_location(forwarding_object
);
5785 gc_move(objspace
, object
, forwarding_object
, page
->slot_size
);
5786 /* forwarding_object is now our actual object, and "object"
5787 * is the free slot for the original page */
5788 struct heap_page
*orig_page
= GET_HEAP_PAGE(object
);
5789 orig_page
->free_slots
++;
5790 if (!from_freelist
) {
5791 objspace
->profile
.total_freed_objects
++;
5793 heap_page_add_freeobj(objspace
, orig_page
, object
);
5795 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object
), forwarding_object
));
5796 GC_ASSERT(BUILTIN_TYPE(forwarding_object
) != T_MOVED
);
5797 GC_ASSERT(BUILTIN_TYPE(forwarding_object
) != T_NONE
);
5800 p
+= sizeof(RVALUE
);
5807 invalidate_moved_page(rb_objspace_t
*objspace
, struct heap_page
*page
)
5810 bits_t
*mark_bits
, *pin_bits
;
5814 mark_bits
= page
->mark_bits
;
5815 pin_bits
= page
->pinned_bits
;
5819 // Skip out of range slots at the head of the page
5820 bitset
= pin_bits
[0] & ~mark_bits
[0];
5821 bitset
>>= NUM_IN_PAGE(p
);
5822 invalidate_moved_plane(objspace
, page
, (uintptr_t)p
, bitset
);
5823 p
+= (BITS_BITLENGTH
- NUM_IN_PAGE(p
));
5825 for (i
=1; i
< HEAP_PAGE_BITMAP_LIMIT
; i
++) {
5826 /* Moved objects are pinned but never marked. We reuse the pin bits
5827 * to indicate there is a moved object in this slot. */
5828 bitset
= pin_bits
[i
] & ~mark_bits
[i
];
5830 invalidate_moved_plane(objspace
, page
, (uintptr_t)p
, bitset
);
5831 p
+= BITS_BITLENGTH
;
5836 gc_compact_start(rb_objspace_t
*objspace
)
5838 struct heap_page
*page
= NULL
;
5840 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5841 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(&size_pools
[i
]);
5842 list_for_each(&heap
->pages
, page
, page_node
) {
5843 page
->flags
.before_sweep
= TRUE
;
5846 heap
->compact_cursor
= list_tail(&heap
->pages
, struct heap_page
, page_node
);
5847 heap
->compact_cursor_index
= 0;
5850 if (gc_prof_enabled(objspace
)) {
5851 gc_profile_record
*record
= gc_prof_record(objspace
);
5852 record
->moved_objects
= objspace
->rcompactor
.total_moved
;
5855 memset(objspace
->rcompactor
.considered_count_table
, 0, T_MASK
* sizeof(size_t));
5856 memset(objspace
->rcompactor
.moved_count_table
, 0, T_MASK
* sizeof(size_t));
5858 /* Set up read barrier for pages containing MOVED objects */
5863 gc_sweep(rb_objspace_t
*objspace
)
5865 const unsigned int immediate_sweep
= objspace
->flags
.immediate_sweep
;
5867 gc_report(1, objspace
, "gc_sweep: immediate: %d\n", immediate_sweep
);
5869 if (immediate_sweep
) {
5870 #if !GC_ENABLE_LAZY_SWEEP
5871 gc_prof_sweep_timer_start(objspace
);
5873 gc_sweep_start(objspace
);
5874 if (objspace
->flags
.during_compacting
) {
5875 gc_compact_start(objspace
);
5878 gc_sweep_rest(objspace
);
5879 #if !GC_ENABLE_LAZY_SWEEP
5880 gc_prof_sweep_timer_stop(objspace
);
5884 struct heap_page
*page
= NULL
;
5885 gc_sweep_start(objspace
);
5887 if (ruby_enable_autocompact
&& is_full_marking(objspace
)) {
5888 gc_compact_start(objspace
);
5891 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5892 list_for_each(&(SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->pages
), page
, page_node
) {
5893 page
->flags
.before_sweep
= TRUE
;
5897 /* Sweep every size pool. */
5898 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5899 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5900 gc_sweep_step(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
5905 rb_size_pool_t
*size_pool
= &size_pools
[0];
5906 gc_heap_prepare_minimum_pages(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
5910 /* Marking - Marking stack */
5912 static stack_chunk_t
*
5913 stack_chunk_alloc(void)
5917 res
= malloc(sizeof(stack_chunk_t
));
5925 is_mark_stack_empty(mark_stack_t
*stack
)
5927 return stack
->chunk
== NULL
;
5931 mark_stack_size(mark_stack_t
*stack
)
5933 size_t size
= stack
->index
;
5934 stack_chunk_t
*chunk
= stack
->chunk
? stack
->chunk
->next
: NULL
;
5937 size
+= stack
->limit
;
5938 chunk
= chunk
->next
;
5944 add_stack_chunk_cache(mark_stack_t
*stack
, stack_chunk_t
*chunk
)
5946 chunk
->next
= stack
->cache
;
5947 stack
->cache
= chunk
;
5948 stack
->cache_size
++;
5952 shrink_stack_chunk_cache(mark_stack_t
*stack
)
5954 stack_chunk_t
*chunk
;
5956 if (stack
->unused_cache_size
> (stack
->cache_size
/2)) {
5957 chunk
= stack
->cache
;
5958 stack
->cache
= stack
->cache
->next
;
5959 stack
->cache_size
--;
5962 stack
->unused_cache_size
= stack
->cache_size
;
5966 push_mark_stack_chunk(mark_stack_t
*stack
)
5968 stack_chunk_t
*next
;
5970 GC_ASSERT(stack
->index
== stack
->limit
);
5972 if (stack
->cache_size
> 0) {
5973 next
= stack
->cache
;
5974 stack
->cache
= stack
->cache
->next
;
5975 stack
->cache_size
--;
5976 if (stack
->unused_cache_size
> stack
->cache_size
)
5977 stack
->unused_cache_size
= stack
->cache_size
;
5980 next
= stack_chunk_alloc();
5982 next
->next
= stack
->chunk
;
5983 stack
->chunk
= next
;
5988 pop_mark_stack_chunk(mark_stack_t
*stack
)
5990 stack_chunk_t
*prev
;
5992 prev
= stack
->chunk
->next
;
5993 GC_ASSERT(stack
->index
== 0);
5994 add_stack_chunk_cache(stack
, stack
->chunk
);
5995 stack
->chunk
= prev
;
5996 stack
->index
= stack
->limit
;
6000 free_stack_chunks(mark_stack_t
*stack
)
6002 stack_chunk_t
*chunk
= stack
->chunk
;
6003 stack_chunk_t
*next
= NULL
;
6005 while (chunk
!= NULL
) {
6013 push_mark_stack(mark_stack_t
*stack
, VALUE data
)
6016 switch (BUILTIN_TYPE(obj
)) {
6037 if (stack
->index
== stack
->limit
) {
6038 push_mark_stack_chunk(stack
);
6040 stack
->chunk
->data
[stack
->index
++] = data
;
6050 rb_bug("push_mark_stack() called for broken object");
6054 UNEXPECTED_NODE(push_mark_stack
);
6058 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
6059 BUILTIN_TYPE(obj
), (void *)data
,
6060 is_pointer_to_heap(&rb_objspace
, (void *)data
) ? "corrupted object" : "non object");
6064 pop_mark_stack(mark_stack_t
*stack
, VALUE
*data
)
6066 if (is_mark_stack_empty(stack
)) {
6069 if (stack
->index
== 1) {
6070 *data
= stack
->chunk
->data
[--stack
->index
];
6071 pop_mark_stack_chunk(stack
);
6074 *data
= stack
->chunk
->data
[--stack
->index
];
6080 init_mark_stack(mark_stack_t
*stack
)
6084 MEMZERO(stack
, mark_stack_t
, 1);
6085 stack
->index
= stack
->limit
= STACK_CHUNK_SIZE
;
6087 for (i
=0; i
< 4; i
++) {
6088 add_stack_chunk_cache(stack
, stack_chunk_alloc());
6090 stack
->unused_cache_size
= stack
->cache_size
;
6095 #define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6097 #define STACK_START (ec->machine.stack_start)
6098 #define STACK_END (ec->machine.stack_end)
6099 #define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6101 #if STACK_GROW_DIRECTION < 0
6102 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6103 #elif STACK_GROW_DIRECTION > 0
6104 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6106 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6107 : (size_t)(STACK_END - STACK_START + 1))
6109 #if !STACK_GROW_DIRECTION
6110 int ruby_stack_grow_direction
;
6112 ruby_get_stack_grow_direction(volatile VALUE
*addr
)
6115 SET_MACHINE_STACK_END(&end
);
6117 if (end
> addr
) return ruby_stack_grow_direction
= 1;
6118 return ruby_stack_grow_direction
= -1;
6123 ruby_stack_length(VALUE
**p
)
6125 rb_execution_context_t
*ec
= GET_EC();
6127 if (p
) *p
= STACK_UPPER(STACK_END
, STACK_START
, STACK_END
);
6128 return STACK_LENGTH
;
6131 #define PREVENT_STACK_OVERFLOW 1
6132 #ifndef PREVENT_STACK_OVERFLOW
6133 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6134 # define PREVENT_STACK_OVERFLOW 1
6136 # define PREVENT_STACK_OVERFLOW 0
6139 #if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6141 stack_check(rb_execution_context_t
*ec
, int water_mark
)
6145 size_t length
= STACK_LENGTH
;
6146 size_t maximum_length
= STACK_LEVEL_MAX
- water_mark
;
6148 return length
> maximum_length
;
6151 #define stack_check(ec, water_mark) FALSE
6154 #define STACKFRAME_FOR_CALL_CFUNC 2048
6156 MJIT_FUNC_EXPORTED
int
6157 rb_ec_stack_check(rb_execution_context_t
*ec
)
6159 return stack_check(ec
, STACKFRAME_FOR_CALL_CFUNC
);
6163 ruby_stack_check(void)
6165 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC
);
6168 ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(rb_objspace_t
*objspace
, register const VALUE
*x
, register long n
, void (*cb
)(rb_objspace_t
*, VALUE
)));
6170 each_location(rb_objspace_t
*objspace
, register const VALUE
*x
, register long n
, void (*cb
)(rb_objspace_t
*, VALUE
))
6181 gc_mark_locations(rb_objspace_t
*objspace
, const VALUE
*start
, const VALUE
*end
, void (*cb
)(rb_objspace_t
*, VALUE
))
6185 if (end
<= start
) return;
6187 each_location(objspace
, start
, n
, cb
);
6191 rb_gc_mark_locations(const VALUE
*start
, const VALUE
*end
)
6193 gc_mark_locations(&rb_objspace
, start
, end
, gc_mark_maybe
);
6197 gc_mark_values(rb_objspace_t
*objspace
, long n
, const VALUE
*values
)
6201 for (i
=0; i
<n
; i
++) {
6202 gc_mark(objspace
, values
[i
]);
6207 rb_gc_mark_values(long n
, const VALUE
*values
)
6210 rb_objspace_t
*objspace
= &rb_objspace
;
6212 for (i
=0; i
<n
; i
++) {
6213 gc_mark_and_pin(objspace
, values
[i
]);
6218 gc_mark_stack_values(rb_objspace_t
*objspace
, long n
, const VALUE
*values
)
6222 for (i
=0; i
<n
; i
++) {
6223 if (is_markable_object(objspace
, values
[i
])) {
6224 gc_mark_and_pin(objspace
, values
[i
]);
6230 rb_gc_mark_vm_stack_values(long n
, const VALUE
*values
)
6232 rb_objspace_t
*objspace
= &rb_objspace
;
6233 gc_mark_stack_values(objspace
, n
, values
);
6237 mark_value(st_data_t key
, st_data_t value
, st_data_t data
)
6239 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6240 gc_mark(objspace
, (VALUE
)value
);
6245 mark_value_pin(st_data_t key
, st_data_t value
, st_data_t data
)
6247 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6248 gc_mark_and_pin(objspace
, (VALUE
)value
);
6253 mark_tbl_no_pin(rb_objspace_t
*objspace
, st_table
*tbl
)
6255 if (!tbl
|| tbl
->num_entries
== 0) return;
6256 st_foreach(tbl
, mark_value
, (st_data_t
)objspace
);
6260 mark_tbl(rb_objspace_t
*objspace
, st_table
*tbl
)
6262 if (!tbl
|| tbl
->num_entries
== 0) return;
6263 st_foreach(tbl
, mark_value_pin
, (st_data_t
)objspace
);
6267 mark_key(st_data_t key
, st_data_t value
, st_data_t data
)
6269 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6270 gc_mark_and_pin(objspace
, (VALUE
)key
);
6275 mark_set(rb_objspace_t
*objspace
, st_table
*tbl
)
6278 st_foreach(tbl
, mark_key
, (st_data_t
)objspace
);
6282 pin_value(st_data_t key
, st_data_t value
, st_data_t data
)
6284 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6285 gc_mark_and_pin(objspace
, (VALUE
)value
);
6290 mark_finalizer_tbl(rb_objspace_t
*objspace
, st_table
*tbl
)
6293 st_foreach(tbl
, pin_value
, (st_data_t
)objspace
);
6297 rb_mark_set(st_table
*tbl
)
6299 mark_set(&rb_objspace
, tbl
);
6303 mark_keyvalue(st_data_t key
, st_data_t value
, st_data_t data
)
6305 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6307 gc_mark(objspace
, (VALUE
)key
);
6308 gc_mark(objspace
, (VALUE
)value
);
6313 pin_key_pin_value(st_data_t key
, st_data_t value
, st_data_t data
)
6315 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6317 gc_mark_and_pin(objspace
, (VALUE
)key
);
6318 gc_mark_and_pin(objspace
, (VALUE
)value
);
6323 pin_key_mark_value(st_data_t key
, st_data_t value
, st_data_t data
)
6325 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6327 gc_mark_and_pin(objspace
, (VALUE
)key
);
6328 gc_mark(objspace
, (VALUE
)value
);
6333 mark_hash(rb_objspace_t
*objspace
, VALUE hash
)
6335 if (rb_hash_compare_by_id_p(hash
)) {
6336 rb_hash_stlike_foreach(hash
, pin_key_mark_value
, (st_data_t
)objspace
);
6339 rb_hash_stlike_foreach(hash
, mark_keyvalue
, (st_data_t
)objspace
);
6342 if (RHASH_AR_TABLE_P(hash
)) {
6343 if (LIKELY(during_gc
) && RHASH_TRANSIENT_P(hash
)) {
6344 rb_transient_heap_mark(hash
, RHASH_AR_TABLE(hash
));
6348 VM_ASSERT(!RHASH_TRANSIENT_P(hash
));
6350 gc_mark(objspace
, RHASH(hash
)->ifnone
);
6354 mark_st(rb_objspace_t
*objspace
, st_table
*tbl
)
6357 st_foreach(tbl
, pin_key_pin_value
, (st_data_t
)objspace
);
6361 rb_mark_hash(st_table
*tbl
)
6363 mark_st(&rb_objspace
, tbl
);
6367 mark_method_entry(rb_objspace_t
*objspace
, const rb_method_entry_t
*me
)
6369 const rb_method_definition_t
*def
= me
->def
;
6371 gc_mark(objspace
, me
->owner
);
6372 gc_mark(objspace
, me
->defined_class
);
6375 switch (def
->type
) {
6376 case VM_METHOD_TYPE_ISEQ
:
6377 if (def
->body
.iseq
.iseqptr
) gc_mark(objspace
, (VALUE
)def
->body
.iseq
.iseqptr
);
6378 gc_mark(objspace
, (VALUE
)def
->body
.iseq
.cref
);
6380 if (def
->iseq_overload
&& me
->defined_class
) {
6381 // it can be a key of "overloaded_cme" table
6382 // so it should be pinned.
6383 gc_mark_and_pin(objspace
, (VALUE
)me
);
6386 case VM_METHOD_TYPE_ATTRSET
:
6387 case VM_METHOD_TYPE_IVAR
:
6388 gc_mark(objspace
, def
->body
.attr
.location
);
6390 case VM_METHOD_TYPE_BMETHOD
:
6391 gc_mark(objspace
, def
->body
.bmethod
.proc
);
6392 if (def
->body
.bmethod
.hooks
) rb_hook_list_mark(def
->body
.bmethod
.hooks
);
6394 case VM_METHOD_TYPE_ALIAS
:
6395 gc_mark(objspace
, (VALUE
)def
->body
.alias
.original_me
);
6397 case VM_METHOD_TYPE_REFINED
:
6398 gc_mark(objspace
, (VALUE
)def
->body
.refined
.orig_me
);
6399 gc_mark(objspace
, (VALUE
)def
->body
.refined
.owner
);
6401 case VM_METHOD_TYPE_CFUNC
:
6402 case VM_METHOD_TYPE_ZSUPER
:
6403 case VM_METHOD_TYPE_MISSING
:
6404 case VM_METHOD_TYPE_OPTIMIZED
:
6405 case VM_METHOD_TYPE_UNDEF
:
6406 case VM_METHOD_TYPE_NOTIMPLEMENTED
:
6412 static enum rb_id_table_iterator_result
6413 mark_method_entry_i(VALUE me
, void *data
)
6415 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6417 gc_mark(objspace
, me
);
6418 return ID_TABLE_CONTINUE
;
6422 mark_m_tbl(rb_objspace_t
*objspace
, struct rb_id_table
*tbl
)
6425 rb_id_table_foreach_values(tbl
, mark_method_entry_i
, objspace
);
6429 static enum rb_id_table_iterator_result
6430 mark_const_entry_i(VALUE value
, void *data
)
6432 const rb_const_entry_t
*ce
= (const rb_const_entry_t
*)value
;
6433 rb_objspace_t
*objspace
= data
;
6435 gc_mark(objspace
, ce
->value
);
6436 gc_mark(objspace
, ce
->file
);
6437 return ID_TABLE_CONTINUE
;
6441 mark_const_tbl(rb_objspace_t
*objspace
, struct rb_id_table
*tbl
)
6444 rb_id_table_foreach_values(tbl
, mark_const_entry_i
, objspace
);
6447 #if STACK_GROW_DIRECTION < 0
6448 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6449 #elif STACK_GROW_DIRECTION > 0
6450 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6452 #define GET_STACK_BOUNDS(start, end, appendix) \
6453 ((STACK_END < STACK_START) ? \
6454 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6457 static void each_stack_location(rb_objspace_t
*objspace
, const rb_execution_context_t
*ec
,
6458 const VALUE
*stack_start
, const VALUE
*stack_end
, void (*cb
)(rb_objspace_t
*, VALUE
));
6460 #ifndef __EMSCRIPTEN__
6462 mark_current_machine_context(rb_objspace_t
*objspace
, rb_execution_context_t
*ec
)
6466 VALUE v
[sizeof(rb_jmp_buf
) / (sizeof(VALUE
))];
6467 } save_regs_gc_mark
;
6468 VALUE
*stack_start
, *stack_end
;
6470 FLUSH_REGISTER_WINDOWS
;
6471 memset(&save_regs_gc_mark
, 0, sizeof(save_regs_gc_mark
));
6472 /* This assumes that all registers are saved into the jmp_buf (and stack) */
6473 rb_setjmp(save_regs_gc_mark
.j
);
6475 /* SET_STACK_END must be called in this function because
6476 * the stack frame of this function may contain
6477 * callee save registers and they should be marked. */
6479 GET_STACK_BOUNDS(stack_start
, stack_end
, 1);
6481 each_location(objspace
, save_regs_gc_mark
.v
, numberof(save_regs_gc_mark
.v
), gc_mark_maybe
);
6483 each_stack_location(objspace
, ec
, stack_start
, stack_end
, gc_mark_maybe
);
6487 static VALUE
*rb_emscripten_stack_range_tmp
[2];
6490 rb_emscripten_mark_locations(void *begin
, void *end
)
6492 rb_emscripten_stack_range_tmp
[0] = begin
;
6493 rb_emscripten_stack_range_tmp
[1] = end
;
6497 mark_current_machine_context(rb_objspace_t
*objspace
, rb_execution_context_t
*ec
)
6499 emscripten_scan_stack(rb_emscripten_mark_locations
);
6500 each_stack_location(objspace
, ec
, rb_emscripten_stack_range_tmp
[0], rb_emscripten_stack_range_tmp
[1], gc_mark_maybe
);
6502 emscripten_scan_registers(rb_emscripten_mark_locations
);
6503 each_stack_location(objspace
, ec
, rb_emscripten_stack_range_tmp
[0], rb_emscripten_stack_range_tmp
[1], gc_mark_maybe
);
6508 each_machine_stack_value(const rb_execution_context_t
*ec
, void (*cb
)(rb_objspace_t
*, VALUE
))
6510 rb_objspace_t
*objspace
= &rb_objspace
;
6511 VALUE
*stack_start
, *stack_end
;
6513 GET_STACK_BOUNDS(stack_start
, stack_end
, 0);
6514 each_stack_location(objspace
, ec
, stack_start
, stack_end
, cb
);
6518 rb_gc_mark_machine_stack(const rb_execution_context_t
*ec
)
6520 each_machine_stack_value(ec
, gc_mark_maybe
);
6524 each_stack_location(rb_objspace_t
*objspace
, const rb_execution_context_t
*ec
,
6525 const VALUE
*stack_start
, const VALUE
*stack_end
, void (*cb
)(rb_objspace_t
*, VALUE
))
6528 gc_mark_locations(objspace
, stack_start
, stack_end
, cb
);
6530 #if defined(__mc68000__)
6531 gc_mark_locations(objspace
,
6532 (VALUE
*)((char*)stack_start
+ 2),
6533 (VALUE
*)((char*)stack_end
- 2), cb
);
6538 rb_mark_tbl(st_table
*tbl
)
6540 mark_tbl(&rb_objspace
, tbl
);
6544 rb_mark_tbl_no_pin(st_table
*tbl
)
6546 mark_tbl_no_pin(&rb_objspace
, tbl
);
6550 gc_mark_maybe(rb_objspace_t
*objspace
, VALUE obj
)
6552 (void)VALGRIND_MAKE_MEM_DEFINED(&obj
, sizeof(obj
));
6554 if (is_pointer_to_heap(objspace
, (void *)obj
)) {
6555 void *ptr
= __asan_region_is_poisoned((void *)obj
, SIZEOF_VALUE
);
6556 asan_unpoison_object(obj
, false);
6558 /* Garbage can live on the stack, so do not mark or pin */
6559 switch (BUILTIN_TYPE(obj
)) {
6564 gc_mark_and_pin(objspace
, obj
);
6569 GC_ASSERT(BUILTIN_TYPE(obj
) == T_NONE
);
6570 asan_poison_object(obj
);
6576 rb_gc_mark_maybe(VALUE obj
)
6578 gc_mark_maybe(&rb_objspace
, obj
);
6582 gc_mark_set(rb_objspace_t
*objspace
, VALUE obj
)
6584 ASSERT_vm_locking();
6585 if (RVALUE_MARKED(obj
)) return 0;
6586 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj
), obj
);
6591 gc_remember_unprotected(rb_objspace_t
*objspace
, VALUE obj
)
6593 struct heap_page
*page
= GET_HEAP_PAGE(obj
);
6594 bits_t
*uncollectible_bits
= &page
->uncollectible_bits
[0];
6596 if (!MARKED_IN_BITMAP(uncollectible_bits
, obj
)) {
6597 page
->flags
.has_uncollectible_shady_objects
= TRUE
;
6598 MARK_IN_BITMAP(uncollectible_bits
, obj
);
6599 objspace
->rgengc
.uncollectible_wb_unprotected_objects
++;
6601 #if RGENGC_PROFILE > 0
6602 objspace
->profile
.total_remembered_shady_object_count
++;
6603 #if RGENGC_PROFILE >= 2
6604 objspace
->profile
.remembered_shady_object_count_types
[BUILTIN_TYPE(obj
)]++;
6615 rgengc_check_relation(rb_objspace_t
*objspace
, VALUE obj
)
6617 const VALUE old_parent
= objspace
->rgengc
.parent_object
;
6619 if (old_parent
) { /* parent object is old */
6620 if (RVALUE_WB_UNPROTECTED(obj
)) {
6621 if (gc_remember_unprotected(objspace
, obj
)) {
6622 gc_report(2, objspace
, "relation: (O->S) %s -> %s\n", obj_info(old_parent
), obj_info(obj
));
6626 if (!RVALUE_OLD_P(obj
)) {
6627 if (RVALUE_MARKED(obj
)) {
6628 /* An object pointed from an OLD object should be OLD. */
6629 gc_report(2, objspace
, "relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent
), obj_info(obj
));
6630 RVALUE_AGE_SET_OLD(objspace
, obj
);
6631 if (is_incremental_marking(objspace
)) {
6632 if (!RVALUE_MARKING(obj
)) {
6633 gc_grey(objspace
, obj
);
6637 rgengc_remember(objspace
, obj
);
6641 gc_report(2, objspace
, "relation: (O->Y) %s -> %s\n", obj_info(old_parent
), obj_info(obj
));
6642 RVALUE_AGE_SET_CANDIDATE(objspace
, obj
);
6648 GC_ASSERT(old_parent
== objspace
->rgengc
.parent_object
);
6652 gc_grey(rb_objspace_t
*objspace
, VALUE obj
)
6654 #if RGENGC_CHECK_MODE
6655 if (RVALUE_MARKED(obj
) == FALSE
) rb_bug("gc_grey: %s is not marked.", obj_info(obj
));
6656 if (RVALUE_MARKING(obj
) == TRUE
) rb_bug("gc_grey: %s is marking/remembered.", obj_info(obj
));
6659 #if GC_ENABLE_INCREMENTAL_MARK
6660 if (is_incremental_marking(objspace
)) {
6661 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj
), obj
);
6665 push_mark_stack(&objspace
->mark_stack
, obj
);
6669 gc_aging(rb_objspace_t
*objspace
, VALUE obj
)
6671 struct heap_page
*page
= GET_HEAP_PAGE(obj
);
6673 GC_ASSERT(RVALUE_MARKING(obj
) == FALSE
);
6674 check_rvalue_consistency(obj
);
6676 if (!RVALUE_PAGE_WB_UNPROTECTED(page
, obj
)) {
6677 if (!RVALUE_OLD_P(obj
)) {
6678 gc_report(3, objspace
, "gc_aging: YOUNG: %s\n", obj_info(obj
));
6679 RVALUE_AGE_INC(objspace
, obj
);
6681 else if (is_full_marking(objspace
)) {
6682 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page
, obj
) == FALSE
);
6683 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace
, page
, obj
);
6686 check_rvalue_consistency(obj
);
6688 objspace
->marked_slots
++;
6691 NOINLINE(static void gc_mark_ptr(rb_objspace_t
*objspace
, VALUE obj
));
6692 static void reachable_objects_from_callback(VALUE obj
);
6695 gc_mark_ptr(rb_objspace_t
*objspace
, VALUE obj
)
6697 if (LIKELY(during_gc
)) {
6698 rgengc_check_relation(objspace
, obj
);
6699 if (!gc_mark_set(objspace
, obj
)) return; /* already marked */
6701 if (0) { // for debug GC marking miss
6702 if (objspace
->rgengc
.parent_object
) {
6703 RUBY_DEBUG_LOG("%p (%s) parent:%p (%s)",
6704 (void *)obj
, obj_type_name(obj
),
6705 (void *)objspace
->rgengc
.parent_object
, obj_type_name(objspace
->rgengc
.parent_object
));
6708 RUBY_DEBUG_LOG("%p (%s)", (void *)obj
, obj_type_name(obj
));
6712 if (UNLIKELY(RB_TYPE_P(obj
, T_NONE
))) {
6714 rb_bug("try to mark T_NONE object"); /* check here will help debugging */
6716 gc_aging(objspace
, obj
);
6717 gc_grey(objspace
, obj
);
6720 reachable_objects_from_callback(obj
);
6725 gc_pin(rb_objspace_t
*objspace
, VALUE obj
)
6727 GC_ASSERT(is_markable_object(objspace
, obj
));
6728 if (UNLIKELY(objspace
->flags
.during_compacting
)) {
6729 if (LIKELY(during_gc
)) {
6730 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj
), obj
);
6736 gc_mark_and_pin(rb_objspace_t
*objspace
, VALUE obj
)
6738 if (!is_markable_object(objspace
, obj
)) return;
6739 gc_pin(objspace
, obj
);
6740 gc_mark_ptr(objspace
, obj
);
6744 gc_mark(rb_objspace_t
*objspace
, VALUE obj
)
6746 if (!is_markable_object(objspace
, obj
)) return;
6747 gc_mark_ptr(objspace
, obj
);
6751 rb_gc_mark_movable(VALUE ptr
)
6753 gc_mark(&rb_objspace
, ptr
);
6757 rb_gc_mark(VALUE ptr
)
6759 gc_mark_and_pin(&rb_objspace
, ptr
);
6762 /* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
6763 * This function is only for GC_END_MARK timing.
6767 rb_objspace_marked_object_p(VALUE obj
)
6769 return RVALUE_MARKED(obj
) ? TRUE
: FALSE
;
6773 gc_mark_set_parent(rb_objspace_t
*objspace
, VALUE obj
)
6775 if (RVALUE_OLD_P(obj
)) {
6776 objspace
->rgengc
.parent_object
= obj
;
6779 objspace
->rgengc
.parent_object
= Qfalse
;
6784 gc_mark_imemo(rb_objspace_t
*objspace
, VALUE obj
)
6786 switch (imemo_type(obj
)) {
6789 const rb_env_t
*env
= (const rb_env_t
*)obj
;
6791 if (LIKELY(env
->ep
)) {
6792 // just after newobj() can be NULL here.
6793 GC_ASSERT(env
->ep
[VM_ENV_DATA_INDEX_ENV
] == obj
);
6794 GC_ASSERT(VM_ENV_ESCAPED_P(env
->ep
));
6795 gc_mark_values(objspace
, (long)env
->env_size
, env
->env
);
6796 VM_ENV_FLAGS_SET(env
->ep
, VM_ENV_FLAG_WB_REQUIRED
);
6797 gc_mark(objspace
, (VALUE
)rb_vm_env_prev_env(env
));
6798 gc_mark(objspace
, (VALUE
)env
->iseq
);
6803 gc_mark(objspace
, RANY(obj
)->as
.imemo
.cref
.klass_or_self
);
6804 gc_mark(objspace
, (VALUE
)RANY(obj
)->as
.imemo
.cref
.next
);
6805 gc_mark(objspace
, RANY(obj
)->as
.imemo
.cref
.refinements
);
6808 gc_mark(objspace
, RANY(obj
)->as
.imemo
.svar
.cref_or_me
);
6809 gc_mark(objspace
, RANY(obj
)->as
.imemo
.svar
.lastline
);
6810 gc_mark(objspace
, RANY(obj
)->as
.imemo
.svar
.backref
);
6811 gc_mark(objspace
, RANY(obj
)->as
.imemo
.svar
.others
);
6813 case imemo_throw_data
:
6814 gc_mark(objspace
, RANY(obj
)->as
.imemo
.throw_data
.throw_obj
);
6817 gc_mark_maybe(objspace
, (VALUE
)RANY(obj
)->as
.imemo
.ifunc
.data
);
6820 gc_mark(objspace
, RANY(obj
)->as
.imemo
.memo
.v1
);
6821 gc_mark(objspace
, RANY(obj
)->as
.imemo
.memo
.v2
);
6822 gc_mark_maybe(objspace
, RANY(obj
)->as
.imemo
.memo
.u3
.value
);
6825 mark_method_entry(objspace
, &RANY(obj
)->as
.imemo
.ment
);
6828 rb_iseq_mark((rb_iseq_t
*)obj
);
6832 const rb_imemo_tmpbuf_t
*m
= &RANY(obj
)->as
.imemo
.alloc
;
6834 rb_gc_mark_locations(m
->ptr
, m
->ptr
+ m
->cnt
);
6835 } while ((m
= m
->next
) != NULL
);
6839 rb_ast_mark(&RANY(obj
)->as
.imemo
.ast
);
6841 case imemo_parser_strterm
:
6842 rb_strterm_mark(obj
);
6844 case imemo_callinfo
:
6846 case imemo_callcache
:
6848 const struct rb_callcache
*cc
= (const struct rb_callcache
*)obj
;
6849 // should not mark klass here
6850 gc_mark(objspace
, (VALUE
)vm_cc_cme(cc
));
6853 case imemo_constcache
:
6855 const struct iseq_inline_constant_cache_entry
*ice
= (struct iseq_inline_constant_cache_entry
*)obj
;
6856 gc_mark(objspace
, ice
->value
);
6859 #if VM_CHECK_MODE > 0
6861 VM_UNREACHABLE(gc_mark_imemo
);
6867 gc_mark_children(rb_objspace_t
*objspace
, VALUE obj
)
6869 register RVALUE
*any
= RANY(obj
);
6870 gc_mark_set_parent(objspace
, obj
);
6872 if (FL_TEST(obj
, FL_EXIVAR
)) {
6873 rb_mark_generic_ivar(obj
);
6876 switch (BUILTIN_TYPE(obj
)) {
6880 /* Not immediates, but does not have references and singleton
6886 rb_bug("rb_gc_mark() called for broken object");
6890 UNEXPECTED_NODE(rb_gc_mark
);
6894 gc_mark_imemo(objspace
, obj
);
6901 gc_mark(objspace
, any
->as
.basic
.klass
);
6903 switch (BUILTIN_TYPE(obj
)) {
6906 if (RCLASS_SUPER(obj
)) {
6907 gc_mark(objspace
, RCLASS_SUPER(obj
));
6909 if (!RCLASS_EXT(obj
)) break;
6911 mark_m_tbl(objspace
, RCLASS_M_TBL(obj
));
6912 cc_table_mark(objspace
, obj
);
6913 mark_tbl_no_pin(objspace
, RCLASS_IV_TBL(obj
));
6914 mark_const_tbl(objspace
, RCLASS_CONST_TBL(obj
));
6918 if (RICLASS_OWNS_M_TBL_P(obj
)) {
6919 mark_m_tbl(objspace
, RCLASS_M_TBL(obj
));
6921 if (RCLASS_SUPER(obj
)) {
6922 gc_mark(objspace
, RCLASS_SUPER(obj
));
6924 if (!RCLASS_EXT(obj
)) break;
6925 mark_m_tbl(objspace
, RCLASS_CALLABLE_M_TBL(obj
));
6926 cc_table_mark(objspace
, obj
);
6930 if (FL_TEST(obj
, ELTS_SHARED
)) {
6931 VALUE root
= any
->as
.array
.as
.heap
.aux
.shared_root
;
6932 gc_mark(objspace
, root
);
6935 long i
, len
= RARRAY_LEN(obj
);
6936 const VALUE
*ptr
= RARRAY_CONST_PTR_TRANSIENT(obj
);
6937 for (i
=0; i
< len
; i
++) {
6938 gc_mark(objspace
, ptr
[i
]);
6941 if (LIKELY(during_gc
)) {
6942 if (!FL_TEST_RAW(obj
, RARRAY_EMBED_FLAG
) &&
6943 RARRAY_TRANSIENT_P(obj
)) {
6944 rb_transient_heap_mark(obj
, ptr
);
6951 mark_hash(objspace
, obj
);
6955 if (STR_SHARED_P(obj
)) {
6956 gc_mark(objspace
, any
->as
.string
.as
.heap
.aux
.shared
);
6962 void *const ptr
= DATA_PTR(obj
);
6964 RUBY_DATA_FUNC mark_func
= RTYPEDDATA_P(obj
) ?
6965 any
->as
.typeddata
.type
->function
.dmark
:
6967 if (mark_func
) (*mark_func
)(ptr
);
6974 const VALUE
* const ptr
= ROBJECT_IVPTR(obj
);
6976 uint32_t i
, len
= ROBJECT_NUMIV(obj
);
6977 for (i
= 0; i
< len
; i
++) {
6978 gc_mark(objspace
, ptr
[i
]);
6981 if (LIKELY(during_gc
) &&
6982 ROBJ_TRANSIENT_P(obj
)) {
6983 rb_transient_heap_mark(obj
, ptr
);
6989 if (any
->as
.file
.fptr
) {
6990 gc_mark(objspace
, any
->as
.file
.fptr
->self
);
6991 gc_mark(objspace
, any
->as
.file
.fptr
->pathv
);
6992 gc_mark(objspace
, any
->as
.file
.fptr
->tied_io_for_writing
);
6993 gc_mark(objspace
, any
->as
.file
.fptr
->writeconv_asciicompat
);
6994 gc_mark(objspace
, any
->as
.file
.fptr
->writeconv_pre_ecopts
);
6995 gc_mark(objspace
, any
->as
.file
.fptr
->encs
.ecopts
);
6996 gc_mark(objspace
, any
->as
.file
.fptr
->write_lock
);
7001 gc_mark(objspace
, any
->as
.regexp
.src
);
7005 gc_mark(objspace
, any
->as
.match
.regexp
);
7006 if (any
->as
.match
.str
) {
7007 gc_mark(objspace
, any
->as
.match
.str
);
7012 gc_mark(objspace
, any
->as
.rational
.num
);
7013 gc_mark(objspace
, any
->as
.rational
.den
);
7017 gc_mark(objspace
, any
->as
.complex.real
);
7018 gc_mark(objspace
, any
->as
.complex.imag
);
7024 const long len
= RSTRUCT_LEN(obj
);
7025 const VALUE
* const ptr
= RSTRUCT_CONST_PTR(obj
);
7027 for (i
=0; i
<len
; i
++) {
7028 gc_mark(objspace
, ptr
[i
]);
7031 if (LIKELY(during_gc
) &&
7032 RSTRUCT_TRANSIENT_P(obj
)) {
7033 rb_transient_heap_mark(obj
, ptr
);
7040 rb_gcdebug_print_obj_condition((VALUE
)obj
);
7042 if (BUILTIN_TYPE(obj
) == T_MOVED
) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj
);
7043 if (BUILTIN_TYPE(obj
) == T_NONE
) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj
);
7044 if (BUILTIN_TYPE(obj
) == T_ZOMBIE
) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj
);
7045 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
7046 BUILTIN_TYPE(obj
), (void *)any
,
7047 is_pointer_to_heap(objspace
, any
) ? "corrupted object" : "non object");
7052 * incremental: 0 -> not incremental (do all)
7053 * incremental: n -> mark at most `n' objects
7056 gc_mark_stacked_objects(rb_objspace_t
*objspace
, int incremental
, size_t count
)
7058 mark_stack_t
*mstack
= &objspace
->mark_stack
;
7060 #if GC_ENABLE_INCREMENTAL_MARK
7061 size_t marked_slots_at_the_beginning
= objspace
->marked_slots
;
7062 size_t popped_count
= 0;
7065 while (pop_mark_stack(mstack
, &obj
)) {
7066 if (obj
== Qundef
) continue; /* skip */
7068 if (RGENGC_CHECK_MODE
&& !RVALUE_MARKED(obj
)) {
7069 rb_bug("gc_mark_stacked_objects: %s is not marked.", obj_info(obj
));
7071 gc_mark_children(objspace
, obj
);
7073 #if GC_ENABLE_INCREMENTAL_MARK
7075 if (RGENGC_CHECK_MODE
&& !RVALUE_MARKING(obj
)) {
7076 rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
7078 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj
), obj
);
7081 if (popped_count
+ (objspace
->marked_slots
- marked_slots_at_the_beginning
) > count
) {
7086 /* just ignore marking bits */
7091 if (RGENGC_CHECK_MODE
>= 3) gc_verify_internal_consistency(objspace
);
7093 if (is_mark_stack_empty(mstack
)) {
7094 shrink_stack_chunk_cache(mstack
);
7103 gc_mark_stacked_objects_incremental(rb_objspace_t
*objspace
, size_t count
)
7105 return gc_mark_stacked_objects(objspace
, TRUE
, count
);
7109 gc_mark_stacked_objects_all(rb_objspace_t
*objspace
)
7111 return gc_mark_stacked_objects(objspace
, FALSE
, 0);
7114 #if PRINT_ROOT_TICKS
7115 #define MAX_TICKS 0x100
7116 static tick_t mark_ticks
[MAX_TICKS
];
7117 static const char *mark_ticks_categories
[MAX_TICKS
];
7120 show_mark_ticks(void)
7123 fprintf(stderr
, "mark ticks result:\n");
7124 for (i
=0; i
<MAX_TICKS
; i
++) {
7125 const char *category
= mark_ticks_categories
[i
];
7127 fprintf(stderr
, "%s\t%8lu\n", category
, (unsigned long)mark_ticks
[i
]);
7135 #endif /* PRINT_ROOT_TICKS */
7138 gc_mark_roots(rb_objspace_t
*objspace
, const char **categoryp
)
7140 struct gc_list
*list
;
7141 rb_execution_context_t
*ec
= GET_EC();
7142 rb_vm_t
*vm
= rb_ec_vm_ptr(ec
);
7144 #if PRINT_ROOT_TICKS
7145 tick_t start_tick
= tick();
7147 const char *prev_category
= 0;
7149 if (mark_ticks_categories
[0] == 0) {
7150 atexit(show_mark_ticks
);
7154 if (categoryp
) *categoryp
= "xxx";
7156 objspace
->rgengc
.parent_object
= Qfalse
;
7158 #if PRINT_ROOT_TICKS
7159 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7160 if (prev_category) { \
7161 tick_t t = tick(); \
7162 mark_ticks[tick_count] = t - start_tick; \
7163 mark_ticks_categories[tick_count] = prev_category; \
7166 prev_category = category; \
7167 start_tick = tick(); \
7169 #else /* PRINT_ROOT_TICKS */
7170 #define MARK_CHECKPOINT_PRINT_TICK(category)
7173 #define MARK_CHECKPOINT(category) do { \
7174 if (categoryp) *categoryp = category; \
7175 MARK_CHECKPOINT_PRINT_TICK(category); \
7178 MARK_CHECKPOINT("vm");
7181 if (vm
->self
) gc_mark(objspace
, vm
->self
);
7183 MARK_CHECKPOINT("finalizers");
7184 mark_finalizer_tbl(objspace
, finalizer_table
);
7186 MARK_CHECKPOINT("machine_context");
7187 mark_current_machine_context(objspace
, ec
);
7189 /* mark protected global variables */
7190 MARK_CHECKPOINT("global_list");
7191 for (list
= global_list
; list
; list
= list
->next
) {
7192 gc_mark_maybe(objspace
, *list
->varptr
);
7195 MARK_CHECKPOINT("end_proc");
7198 MARK_CHECKPOINT("global_tbl");
7199 rb_gc_mark_global_tbl();
7201 MARK_CHECKPOINT("object_id");
7202 rb_gc_mark(objspace
->next_object_id
);
7203 mark_tbl_no_pin(objspace
, objspace
->obj_to_id_tbl
); /* Only mark ids */
7205 if (stress_to_class
) rb_gc_mark(stress_to_class
);
7207 MARK_CHECKPOINT("finish");
7208 #undef MARK_CHECKPOINT
7211 #if RGENGC_CHECK_MODE >= 4
7213 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7214 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7215 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7223 static struct reflist
*
7224 reflist_create(VALUE obj
)
7226 struct reflist
*refs
= xmalloc(sizeof(struct reflist
));
7228 refs
->list
= ALLOC_N(VALUE
, refs
->size
);
7229 refs
->list
[0] = obj
;
7235 reflist_destruct(struct reflist
*refs
)
7242 reflist_add(struct reflist
*refs
, VALUE obj
)
7244 if (refs
->pos
== refs
->size
) {
7246 SIZED_REALLOC_N(refs
->list
, VALUE
, refs
->size
, refs
->size
/2);
7249 refs
->list
[refs
->pos
++] = obj
;
7253 reflist_dump(struct reflist
*refs
)
7256 for (i
=0; i
<refs
->pos
; i
++) {
7257 VALUE obj
= refs
->list
[i
];
7258 if (IS_ROOTSIG(obj
)) { /* root */
7259 fprintf(stderr
, "<root@%s>", GET_ROOTSIG(obj
));
7262 fprintf(stderr
, "<%s>", obj_info(obj
));
7264 if (i
+1 < refs
->pos
) fprintf(stderr
, ", ");
7269 reflist_referred_from_machine_context(struct reflist
*refs
)
7272 for (i
=0; i
<refs
->pos
; i
++) {
7273 VALUE obj
= refs
->list
[i
];
7274 if (IS_ROOTSIG(obj
) && strcmp(GET_ROOTSIG(obj
), "machine_context") == 0) return 1;
7280 rb_objspace_t
*objspace
;
7286 * #=> {obj1 => [a, b, c], obj2 => [c, d]}
7288 struct st_table
*references
;
7289 const char *category
;
7291 mark_stack_t mark_stack
;
7295 allrefs_add(struct allrefs
*data
, VALUE obj
)
7297 struct reflist
*refs
;
7300 if (st_lookup(data
->references
, obj
, &r
)) {
7301 refs
= (struct reflist
*)r
;
7302 reflist_add(refs
, data
->root_obj
);
7306 refs
= reflist_create(data
->root_obj
);
7307 st_insert(data
->references
, obj
, (st_data_t
)refs
);
7313 allrefs_i(VALUE obj
, void *ptr
)
7315 struct allrefs
*data
= (struct allrefs
*)ptr
;
7317 if (allrefs_add(data
, obj
)) {
7318 push_mark_stack(&data
->mark_stack
, obj
);
7323 allrefs_roots_i(VALUE obj
, void *ptr
)
7325 struct allrefs
*data
= (struct allrefs
*)ptr
;
7326 if (strlen(data
->category
) == 0) rb_bug("!!!");
7327 data
->root_obj
= MAKE_ROOTSIG(data
->category
);
7329 if (allrefs_add(data
, obj
)) {
7330 push_mark_stack(&data
->mark_stack
, obj
);
7333 #define PUSH_MARK_FUNC_DATA(v) do { \
7334 struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7335 GET_RACTOR()->mfd = (v);
7337 #define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7340 objspace_allrefs(rb_objspace_t
*objspace
)
7342 struct allrefs data
;
7343 struct gc_mark_func_data_struct mfd
;
7345 int prev_dont_gc
= dont_gc_val();
7348 data
.objspace
= objspace
;
7349 data
.references
= st_init_numtable();
7350 init_mark_stack(&data
.mark_stack
);
7352 mfd
.mark_func
= allrefs_roots_i
;
7355 /* traverse root objects */
7356 PUSH_MARK_FUNC_DATA(&mfd
);
7357 GET_RACTOR()->mfd
= &mfd
;
7358 gc_mark_roots(objspace
, &data
.category
);
7359 POP_MARK_FUNC_DATA();
7361 /* traverse rest objects reachable from root objects */
7362 while (pop_mark_stack(&data
.mark_stack
, &obj
)) {
7363 rb_objspace_reachable_objects_from(data
.root_obj
= obj
, allrefs_i
, &data
);
7365 free_stack_chunks(&data
.mark_stack
);
7367 dont_gc_set(prev_dont_gc
);
7368 return data
.references
;
7372 objspace_allrefs_destruct_i(st_data_t key
, st_data_t value
, st_data_t ptr
)
7374 struct reflist
*refs
= (struct reflist
*)value
;
7375 reflist_destruct(refs
);
7380 objspace_allrefs_destruct(struct st_table
*refs
)
7382 st_foreach(refs
, objspace_allrefs_destruct_i
, 0);
7383 st_free_table(refs
);
7386 #if RGENGC_CHECK_MODE >= 5
7388 allrefs_dump_i(st_data_t k
, st_data_t v
, st_data_t ptr
)
7390 VALUE obj
= (VALUE
)k
;
7391 struct reflist
*refs
= (struct reflist
*)v
;
7392 fprintf(stderr
, "[allrefs_dump_i] %s <- ", obj_info(obj
));
7394 fprintf(stderr
, "\n");
7399 allrefs_dump(rb_objspace_t
*objspace
)
7401 VALUE size
= objspace
->rgengc
.allrefs_table
->num_entries
;
7402 fprintf(stderr
, "[all refs] (size: %"PRIuVALUE
")\n", size
);
7403 st_foreach(objspace
->rgengc
.allrefs_table
, allrefs_dump_i
, 0);
7408 gc_check_after_marks_i(st_data_t k
, st_data_t v
, st_data_t ptr
)
7411 struct reflist
*refs
= (struct reflist
*)v
;
7412 rb_objspace_t
*objspace
= (rb_objspace_t
*)ptr
;
7414 /* object should be marked or oldgen */
7415 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj
), obj
)) {
7416 fprintf(stderr
, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj
));
7417 fprintf(stderr
, "gc_check_after_marks_i: %p is referred from ", (void *)obj
);
7420 if (reflist_referred_from_machine_context(refs
)) {
7421 fprintf(stderr
, " (marked from machine stack).\n");
7422 /* marked from machine context can be false positive */
7425 objspace
->rgengc
.error_count
++;
7426 fprintf(stderr
, "\n");
7433 gc_marks_check(rb_objspace_t
*objspace
, st_foreach_callback_func
*checker_func
, const char *checker_name
)
7435 size_t saved_malloc_increase
= objspace
->malloc_params
.increase
;
7436 #if RGENGC_ESTIMATE_OLDMALLOC
7437 size_t saved_oldmalloc_increase
= objspace
->rgengc
.oldmalloc_increase
;
7439 VALUE already_disabled
= rb_objspace_gc_disable(objspace
);
7441 objspace
->rgengc
.allrefs_table
= objspace_allrefs(objspace
);
7444 st_foreach(objspace
->rgengc
.allrefs_table
, checker_func
, (st_data_t
)objspace
);
7447 if (objspace
->rgengc
.error_count
> 0) {
7448 #if RGENGC_CHECK_MODE >= 5
7449 allrefs_dump(objspace
);
7451 if (checker_name
) rb_bug("%s: GC has problem.", checker_name
);
7454 objspace_allrefs_destruct(objspace
->rgengc
.allrefs_table
);
7455 objspace
->rgengc
.allrefs_table
= 0;
7457 if (already_disabled
== Qfalse
) rb_objspace_gc_enable(objspace
);
7458 objspace
->malloc_params
.increase
= saved_malloc_increase
;
7459 #if RGENGC_ESTIMATE_OLDMALLOC
7460 objspace
->rgengc
.oldmalloc_increase
= saved_oldmalloc_increase
;
7463 #endif /* RGENGC_CHECK_MODE >= 4 */
7465 struct verify_internal_consistency_struct
{
7466 rb_objspace_t
*objspace
;
7468 size_t live_object_count
;
7469 size_t zombie_object_count
;
7472 size_t old_object_count
;
7473 size_t remembered_shady_count
;
7477 check_generation_i(const VALUE child
, void *ptr
)
7479 struct verify_internal_consistency_struct
*data
= (struct verify_internal_consistency_struct
*)ptr
;
7480 const VALUE parent
= data
->parent
;
7482 if (RGENGC_CHECK_MODE
) GC_ASSERT(RVALUE_OLD_P(parent
));
7484 if (!RVALUE_OLD_P(child
)) {
7485 if (!RVALUE_REMEMBERED(parent
) &&
7486 !RVALUE_REMEMBERED(child
) &&
7487 !RVALUE_UNCOLLECTIBLE(child
)) {
7488 fprintf(stderr
, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent
), obj_info(child
));
7495 check_color_i(const VALUE child
, void *ptr
)
7497 struct verify_internal_consistency_struct
*data
= (struct verify_internal_consistency_struct
*)ptr
;
7498 const VALUE parent
= data
->parent
;
7500 if (!RVALUE_WB_UNPROTECTED(parent
) && RVALUE_WHITE_P(child
)) {
7501 fprintf(stderr
, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7502 obj_info(parent
), obj_info(child
));
7508 check_children_i(const VALUE child
, void *ptr
)
7510 struct verify_internal_consistency_struct
*data
= (struct verify_internal_consistency_struct
*)ptr
;
7511 if (check_rvalue_consistency_force(child
, FALSE
) != 0) {
7512 fprintf(stderr
, "check_children_i: %s has error (referenced from %s)",
7513 obj_info(child
), obj_info(data
->parent
));
7514 rb_print_backtrace(); /* C backtrace will help to debug */
7521 verify_internal_consistency_i(void *page_start
, void *page_end
, size_t stride
,
7522 struct verify_internal_consistency_struct
*data
)
7525 rb_objspace_t
*objspace
= data
->objspace
;
7527 for (obj
= (VALUE
)page_start
; obj
!= (VALUE
)page_end
; obj
+= stride
) {
7528 void *poisoned
= asan_poisoned_object_p(obj
);
7529 asan_unpoison_object(obj
, false);
7531 if (is_live_object(objspace
, obj
)) {
7533 data
->live_object_count
++;
7536 /* Normally, we don't expect T_MOVED objects to be in the heap.
7537 * But they can stay alive on the stack, */
7538 if (!gc_object_moved_p(objspace
, obj
)) {
7539 /* moved slots don't have children */
7540 rb_objspace_reachable_objects_from(obj
, check_children_i
, (void *)data
);
7543 /* check health of children */
7544 if (RVALUE_OLD_P(obj
)) data
->old_object_count
++;
7545 if (RVALUE_WB_UNPROTECTED(obj
) && RVALUE_UNCOLLECTIBLE(obj
)) data
->remembered_shady_count
++;
7547 if (!is_marking(objspace
) && RVALUE_OLD_P(obj
)) {
7548 /* reachable objects from an oldgen object should be old or (young with remember) */
7550 rb_objspace_reachable_objects_from(obj
, check_generation_i
, (void *)data
);
7553 if (is_incremental_marking(objspace
)) {
7554 if (RVALUE_BLACK_P(obj
)) {
7555 /* reachable objects from black objects should be black or grey objects */
7557 rb_objspace_reachable_objects_from(obj
, check_color_i
, (void *)data
);
7562 if (BUILTIN_TYPE(obj
) == T_ZOMBIE
) {
7563 GC_ASSERT((RBASIC(obj
)->flags
& ~FL_SEEN_OBJ_ID
) == T_ZOMBIE
);
7564 data
->zombie_object_count
++;
7568 GC_ASSERT(BUILTIN_TYPE(obj
) == T_NONE
);
7569 asan_poison_object(obj
);
7577 gc_verify_heap_page(rb_objspace_t
*objspace
, struct heap_page
*page
, VALUE obj
)
7580 unsigned int has_remembered_shady
= FALSE
;
7581 unsigned int has_remembered_old
= FALSE
;
7582 int remembered_old_objects
= 0;
7583 int free_objects
= 0;
7584 int zombie_objects
= 0;
7585 int stride
= page
->slot_size
/ sizeof(RVALUE
);
7587 for (i
=0; i
<page
->total_slots
; i
+=stride
) {
7588 VALUE val
= (VALUE
)&page
->start
[i
];
7589 void *poisoned
= asan_poisoned_object_p(val
);
7590 asan_unpoison_object(val
, false);
7592 if (RBASIC(val
) == 0) free_objects
++;
7593 if (BUILTIN_TYPE(val
) == T_ZOMBIE
) zombie_objects
++;
7594 if (RVALUE_PAGE_UNCOLLECTIBLE(page
, val
) && RVALUE_PAGE_WB_UNPROTECTED(page
, val
)) {
7595 has_remembered_shady
= TRUE
;
7597 if (RVALUE_PAGE_MARKING(page
, val
)) {
7598 has_remembered_old
= TRUE
;
7599 remembered_old_objects
++;
7603 GC_ASSERT(BUILTIN_TYPE(val
) == T_NONE
);
7604 asan_poison_object(val
);
7608 if (!is_incremental_marking(objspace
) &&
7609 page
->flags
.has_remembered_objects
== FALSE
&& has_remembered_old
== TRUE
) {
7611 for (i
=0; i
<page
->total_slots
; i
++) {
7612 VALUE val
= (VALUE
)&page
->start
[i
];
7613 if (RVALUE_PAGE_MARKING(page
, val
)) {
7614 fprintf(stderr
, "marking -> %s\n", obj_info(val
));
7617 rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
7618 (void *)page
, remembered_old_objects
, obj
? obj_info(obj
) : "");
7621 if (page
->flags
.has_uncollectible_shady_objects
== FALSE
&& has_remembered_shady
== TRUE
) {
7622 rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
7623 (void *)page
, obj
? obj_info(obj
) : "");
7627 /* free_slots may not equal to free_objects */
7628 if (page
->free_slots
!= free_objects
) {
7629 rb_bug("page %p's free_slots should be %d, but %d\n", (void *)page
, page
->free_slots
, free_objects
);
7632 if (page
->final_slots
!= zombie_objects
) {
7633 rb_bug("page %p's final_slots should be %d, but %d\n", (void *)page
, page
->final_slots
, zombie_objects
);
7636 return remembered_old_objects
;
7640 gc_verify_heap_pages_(rb_objspace_t
*objspace
, struct list_head
*head
)
7642 int remembered_old_objects
= 0;
7643 struct heap_page
*page
= 0;
7645 list_for_each(head
, page
, page_node
) {
7646 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
7647 RVALUE
*p
= page
->freelist
;
7649 VALUE vp
= (VALUE
)p
;
7651 asan_unpoison_object(vp
, false);
7652 if (BUILTIN_TYPE(vp
) != T_NONE
) {
7653 fprintf(stderr
, "freelist slot expected to be T_NONE but was: %s\n", obj_info(vp
));
7655 p
= p
->as
.free
.next
;
7656 asan_poison_object(prev
);
7658 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
7660 if (page
->flags
.has_remembered_objects
== FALSE
) {
7661 remembered_old_objects
+= gc_verify_heap_page(objspace
, page
, Qfalse
);
7665 return remembered_old_objects
;
7669 gc_verify_heap_pages(rb_objspace_t
*objspace
)
7671 int remembered_old_objects
= 0;
7672 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
7673 remembered_old_objects
+= gc_verify_heap_pages_(objspace
, &(SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->pages
));
7674 remembered_old_objects
+= gc_verify_heap_pages_(objspace
, &(SIZE_POOL_TOMB_HEAP(&size_pools
[i
])->pages
));
7676 return remembered_old_objects
;
7681 * GC.verify_internal_consistency -> nil
7683 * Verify internal consistency.
7685 * This method is implementation specific.
7686 * Now this method checks generational consistency
7687 * if RGenGC is supported.
7690 gc_verify_internal_consistency_m(VALUE dummy
)
7692 gc_verify_internal_consistency(&rb_objspace
);
7697 gc_verify_internal_consistency_(rb_objspace_t
*objspace
)
7699 struct verify_internal_consistency_struct data
= {0};
7701 data
.objspace
= objspace
;
7702 gc_report(5, objspace
, "gc_verify_internal_consistency: start\n");
7704 /* check relations */
7705 for (size_t i
= 0; i
< heap_allocated_pages
; i
++) {
7706 struct heap_page
*page
= heap_pages_sorted
[i
];
7707 short slot_size
= page
->slot_size
;
7709 uintptr_t start
= (uintptr_t)page
->start
;
7710 uintptr_t end
= start
+ page
->total_slots
* slot_size
;
7712 verify_internal_consistency_i((void *)start
, (void *)end
, slot_size
, &data
);
7715 if (data
.err_count
!= 0) {
7716 #if RGENGC_CHECK_MODE >= 5
7717 objspace
->rgengc
.error_count
= data
.err_count
;
7718 gc_marks_check(objspace
, NULL
, NULL
);
7719 allrefs_dump(objspace
);
7721 rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
7724 /* check heap_page status */
7725 gc_verify_heap_pages(objspace
);
7727 /* check counters */
7729 if (!is_lazy_sweeping(objspace
) &&
7731 ruby_single_main_ractor
!= NULL
) {
7732 if (objspace_live_slots(objspace
) != data
.live_object_count
) {
7733 fprintf(stderr
, "heap_pages_final_slots: %"PRIdSIZE
", "
7734 "objspace->profile.total_freed_objects: %"PRIdSIZE
"\n",
7735 heap_pages_final_slots
, objspace
->profile
.total_freed_objects
);
7736 rb_bug("inconsistent live slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7737 objspace_live_slots(objspace
), data
.live_object_count
);
7741 if (!is_marking(objspace
)) {
7742 if (objspace
->rgengc
.old_objects
!= data
.old_object_count
) {
7743 rb_bug("inconsistent old slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7744 objspace
->rgengc
.old_objects
, data
.old_object_count
);
7746 if (objspace
->rgengc
.uncollectible_wb_unprotected_objects
!= data
.remembered_shady_count
) {
7747 rb_bug("inconsistent number of wb unprotected objects: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7748 objspace
->rgengc
.uncollectible_wb_unprotected_objects
, data
.remembered_shady_count
);
7753 size_t list_count
= 0;
7756 VALUE z
= heap_pages_deferred_final
;
7759 z
= RZOMBIE(z
)->next
;
7763 if (heap_pages_final_slots
!= data
.zombie_object_count
||
7764 heap_pages_final_slots
!= list_count
) {
7766 rb_bug("inconsistent finalizing object count:\n"
7767 " expect %"PRIuSIZE
"\n"
7768 " but %"PRIuSIZE
" zombies\n"
7769 " heap_pages_deferred_final list has %"PRIuSIZE
" items.",
7770 heap_pages_final_slots
,
7771 data
.zombie_object_count
,
7776 gc_report(5, objspace
, "gc_verify_internal_consistency: OK\n");
7780 gc_verify_internal_consistency(rb_objspace_t
*objspace
)
7784 rb_vm_barrier(); // stop other ractors
7786 unsigned int prev_during_gc
= during_gc
;
7787 during_gc
= FALSE
; // stop gc here
7789 gc_verify_internal_consistency_(objspace
);
7791 during_gc
= prev_during_gc
;
7797 rb_gc_verify_internal_consistency(void)
7799 gc_verify_internal_consistency(&rb_objspace
);
7803 gc_verify_transient_heap_internal_consistency(VALUE dmy
)
7805 rb_transient_heap_verify();
7812 gc_marks_start(rb_objspace_t
*objspace
, int full_mark
)
7815 gc_report(1, objspace
, "gc_marks_start: (%s)\n", full_mark
? "full" : "minor");
7816 gc_mode_transition(objspace
, gc_mode_marking
);
7819 #if GC_ENABLE_INCREMENTAL_MARK
7820 objspace
->rincgc
.step_slots
= (objspace
->marked_slots
* 2) / ((objspace
->rincgc
.pooled_slots
/ HEAP_PAGE_OBJ_LIMIT
) + 1);
7822 if (0) fprintf(stderr
, "objspace->marked_slots: %"PRIdSIZE
", "
7823 "objspace->rincgc.pooled_page_num: %"PRIdSIZE
", "
7824 "objspace->rincgc.step_slots: %"PRIdSIZE
", \n",
7825 objspace
->marked_slots
, objspace
->rincgc
.pooled_slots
, objspace
->rincgc
.step_slots
);
7827 objspace
->flags
.during_minor_gc
= FALSE
;
7828 if (ruby_enable_autocompact
) {
7829 objspace
->flags
.during_compacting
|= TRUE
;
7831 objspace
->profile
.major_gc_count
++;
7832 objspace
->rgengc
.uncollectible_wb_unprotected_objects
= 0;
7833 objspace
->rgengc
.old_objects
= 0;
7834 objspace
->rgengc
.last_major_gc
= objspace
->profile
.count
;
7835 objspace
->marked_slots
= 0;
7837 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
7838 rgengc_mark_and_rememberset_clear(objspace
, SIZE_POOL_EDEN_HEAP(&size_pools
[i
]));
7842 objspace
->flags
.during_minor_gc
= TRUE
;
7843 objspace
->marked_slots
=
7844 objspace
->rgengc
.old_objects
+ objspace
->rgengc
.uncollectible_wb_unprotected_objects
; /* uncollectible objects are marked already */
7845 objspace
->profile
.minor_gc_count
++;
7847 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
7848 rgengc_rememberset_mark(objspace
, SIZE_POOL_EDEN_HEAP(&size_pools
[i
]));
7852 gc_mark_roots(objspace
, NULL
);
7854 gc_report(1, objspace
, "gc_marks_start: (%s) end, stack in %"PRIdSIZE
"\n",
7855 full_mark
? "full" : "minor", mark_stack_size(&objspace
->mark_stack
));
7858 #if GC_ENABLE_INCREMENTAL_MARK
7860 gc_marks_wb_unprotected_objects_plane(rb_objspace_t
*objspace
, uintptr_t p
, bits_t bits
)
7865 gc_report(2, objspace
, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE
)p
));
7866 GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE
)p
));
7867 GC_ASSERT(RVALUE_MARKED((VALUE
)p
));
7868 gc_mark_children(objspace
, (VALUE
)p
);
7870 p
+= sizeof(RVALUE
);
7877 gc_marks_wb_unprotected_objects(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
7879 struct heap_page
*page
= 0;
7881 list_for_each(&heap
->pages
, page
, page_node
) {
7882 bits_t
*mark_bits
= page
->mark_bits
;
7883 bits_t
*wbun_bits
= page
->wb_unprotected_bits
;
7884 RVALUE
*p
= page
->start
;
7887 bits_t bits
= mark_bits
[0] & wbun_bits
[0];
7888 bits
>>= NUM_IN_PAGE(p
);
7889 gc_marks_wb_unprotected_objects_plane(objspace
, (uintptr_t)p
, bits
);
7890 p
+= (BITS_BITLENGTH
- NUM_IN_PAGE(p
));
7892 for (j
=1; j
<HEAP_PAGE_BITMAP_LIMIT
; j
++) {
7893 bits_t bits
= mark_bits
[j
] & wbun_bits
[j
];
7895 gc_marks_wb_unprotected_objects_plane(objspace
, (uintptr_t)p
, bits
);
7896 p
+= BITS_BITLENGTH
;
7900 gc_mark_stacked_objects_all(objspace
);
7903 static struct heap_page
*
7904 heap_move_pooled_pages_to_free_pages(rb_heap_t
*heap
)
7906 struct heap_page
*page
= heap
->pooled_pages
;
7909 heap
->pooled_pages
= page
->free_next
;
7910 heap_add_freepage(heap
, page
);
7918 gc_marks_finish(rb_objspace_t
*objspace
)
7920 #if GC_ENABLE_INCREMENTAL_MARK
7921 /* finish incremental GC */
7922 if (is_incremental_marking(objspace
)) {
7923 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
7924 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(&size_pools
[i
]);
7925 if (heap
->pooled_pages
) {
7926 heap_move_pooled_pages_to_free_pages(heap
);
7927 gc_report(1, objspace
, "gc_marks_finish: pooled pages are exists. retry.\n");
7928 return FALSE
; /* continue marking phase */
7932 if (RGENGC_CHECK_MODE
&& is_mark_stack_empty(&objspace
->mark_stack
) == 0) {
7933 rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE
").",
7934 mark_stack_size(&objspace
->mark_stack
));
7937 gc_mark_roots(objspace
, 0);
7939 if (is_mark_stack_empty(&objspace
->mark_stack
) == FALSE
) {
7940 gc_report(1, objspace
, "gc_marks_finish: not empty (%"PRIdSIZE
"). retry.\n",
7941 mark_stack_size(&objspace
->mark_stack
));
7945 #if RGENGC_CHECK_MODE >= 2
7946 if (gc_verify_heap_pages(objspace
) != 0) {
7947 rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
7951 objspace
->flags
.during_incremental_marking
= FALSE
;
7952 /* check children of all marked wb-unprotected objects */
7953 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
7954 gc_marks_wb_unprotected_objects(objspace
, SIZE_POOL_EDEN_HEAP(&size_pools
[i
]));
7957 #endif /* GC_ENABLE_INCREMENTAL_MARK */
7959 #if RGENGC_CHECK_MODE >= 2
7960 gc_verify_internal_consistency(objspace
);
7963 if (is_full_marking(objspace
)) {
7964 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
7965 const double r
= gc_params
.oldobject_limit_factor
;
7966 objspace
->rgengc
.uncollectible_wb_unprotected_objects_limit
= (size_t)(objspace
->rgengc
.uncollectible_wb_unprotected_objects
* r
);
7967 objspace
->rgengc
.old_objects_limit
= (size_t)(objspace
->rgengc
.old_objects
* r
);
7970 #if RGENGC_CHECK_MODE >= 4
7972 gc_marks_check(objspace
, gc_check_after_marks_i
, "after_marks");
7977 /* decide full GC is needed or not */
7978 size_t total_slots
= heap_allocatable_slots(objspace
) + heap_eden_total_slots(objspace
);
7979 size_t sweep_slots
= total_slots
- objspace
->marked_slots
; /* will be swept slots */
7980 size_t max_free_slots
= (size_t)(total_slots
* gc_params
.heap_free_slots_max_ratio
);
7981 size_t min_free_slots
= (size_t)(total_slots
* gc_params
.heap_free_slots_min_ratio
);
7982 int full_marking
= is_full_marking(objspace
);
7983 const int r_cnt
= GET_VM()->ractor
.cnt
;
7984 const int r_mul
= r_cnt
> 8 ? 8 : r_cnt
; // upto 8
7986 GC_ASSERT(heap_eden_total_slots(objspace
) >= objspace
->marked_slots
);
7988 /* setup free-able page counts */
7989 if (max_free_slots
< gc_params
.heap_init_slots
* r_mul
) {
7990 max_free_slots
= gc_params
.heap_init_slots
* r_mul
;
7993 if (sweep_slots
> max_free_slots
) {
7994 heap_pages_freeable_pages
= (sweep_slots
- max_free_slots
) / HEAP_PAGE_OBJ_LIMIT
;
7997 heap_pages_freeable_pages
= 0;
8000 /* check free_min */
8001 if (min_free_slots
< gc_params
.heap_free_slots
* r_mul
) {
8002 min_free_slots
= gc_params
.heap_free_slots
* r_mul
;
8005 if (sweep_slots
< min_free_slots
) {
8006 if (!full_marking
) {
8007 if (objspace
->profile
.count
- objspace
->rgengc
.last_major_gc
< RVALUE_OLD_AGE
) {
8008 full_marking
= TRUE
;
8009 /* do not update last_major_gc, because full marking is not done. */
8010 /* goto increment; */
8013 gc_report(1, objspace
, "gc_marks_finish: next is full GC!!)\n");
8014 objspace
->rgengc
.need_major_gc
|= GPR_FLAG_MAJOR_BY_NOFREE
;
8021 gc_report(1, objspace
, "gc_marks_finish: heap_set_increment!!\n");
8022 rb_size_pool_t
*size_pool
= &size_pools
[0];
8023 size_pool_allocatable_pages_set(objspace
, size_pool
, heap_extend_pages(objspace
, sweep_slots
, total_slots
, heap_allocated_pages
+ heap_allocatable_pages(objspace
)));
8025 heap_increment(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
8031 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
8032 const double r
= gc_params
.oldobject_limit_factor
;
8033 objspace
->rgengc
.uncollectible_wb_unprotected_objects_limit
= (size_t)(objspace
->rgengc
.uncollectible_wb_unprotected_objects
* r
);
8034 objspace
->rgengc
.old_objects_limit
= (size_t)(objspace
->rgengc
.old_objects
* r
);
8037 if (objspace
->rgengc
.uncollectible_wb_unprotected_objects
> objspace
->rgengc
.uncollectible_wb_unprotected_objects_limit
) {
8038 objspace
->rgengc
.need_major_gc
|= GPR_FLAG_MAJOR_BY_SHADY
;
8040 if (objspace
->rgengc
.old_objects
> objspace
->rgengc
.old_objects_limit
) {
8041 objspace
->rgengc
.need_major_gc
|= GPR_FLAG_MAJOR_BY_OLDGEN
;
8043 if (RGENGC_FORCE_MAJOR_GC
) {
8044 objspace
->rgengc
.need_major_gc
= GPR_FLAG_MAJOR_BY_FORCE
;
8047 gc_report(1, objspace
, "gc_marks_finish (marks %"PRIdSIZE
" objects, "
8048 "old %"PRIdSIZE
" objects, total %"PRIdSIZE
" slots, "
8049 "sweep %"PRIdSIZE
" slots, increment: %"PRIdSIZE
", next GC: %s)\n",
8050 objspace
->marked_slots
, objspace
->rgengc
.old_objects
, heap_eden_total_slots(objspace
), sweep_slots
, heap_allocatable_pages(objspace
),
8051 objspace
->rgengc
.need_major_gc
? "major" : "minor");
8054 rb_transient_heap_finish_marking();
8055 rb_ractor_finish_marking();
8057 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_END_MARK
, 0);
8062 #if GC_ENABLE_INCREMENTAL_MARK
8064 gc_marks_step(rb_objspace_t
*objspace
, size_t slots
)
8066 GC_ASSERT(is_marking(objspace
));
8068 if (gc_mark_stacked_objects_incremental(objspace
, slots
)) {
8069 if (gc_marks_finish(objspace
)) {
8074 if (0) fprintf(stderr
, "objspace->marked_slots: %"PRIdSIZE
"\n", objspace
->marked_slots
);
8079 gc_marks_rest(rb_objspace_t
*objspace
)
8081 gc_report(1, objspace
, "gc_marks_rest\n");
8083 #if GC_ENABLE_INCREMENTAL_MARK
8084 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
8085 SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->pooled_pages
= NULL
;
8089 if (is_incremental_marking(objspace
)) {
8091 while (gc_mark_stacked_objects_incremental(objspace
, INT_MAX
) == FALSE
);
8092 } while (gc_marks_finish(objspace
) == FALSE
);
8095 gc_mark_stacked_objects_all(objspace
);
8096 gc_marks_finish(objspace
);
8104 gc_marks_continue(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
8106 GC_ASSERT(dont_gc_val() == FALSE
);
8107 #if GC_ENABLE_INCREMENTAL_MARK
8109 unsigned int lock_lev
;
8110 gc_enter(objspace
, gc_enter_event_mark_continue
, &lock_lev
);
8115 if (heap
->pooled_pages
) {
8116 while (heap
->pooled_pages
&& slots
< HEAP_PAGE_OBJ_LIMIT
) {
8117 struct heap_page
*page
= heap_move_pooled_pages_to_free_pages(heap
);
8118 slots
+= page
->free_slots
;
8120 from
= "pooled-pages";
8122 else if (heap_increment(objspace
, size_pool
, heap
)) {
8123 slots
= heap
->free_pages
->free_slots
;
8124 from
= "incremented-pages";
8128 gc_report(2, objspace
, "gc_marks_continue: provide %d slots from %s.\n",
8130 gc_marks_step(objspace
, objspace
->rincgc
.step_slots
);
8133 gc_report(2, objspace
, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE
").\n",
8134 mark_stack_size(&objspace
->mark_stack
));
8135 gc_marks_rest(objspace
);
8138 gc_exit(objspace
, gc_enter_event_mark_continue
, &lock_lev
);
8143 gc_marks(rb_objspace_t
*objspace
, int full_mark
)
8145 gc_prof_mark_timer_start(objspace
);
8149 gc_marks_start(objspace
, full_mark
);
8150 if (!is_incremental_marking(objspace
)) {
8151 gc_marks_rest(objspace
);
8154 #if RGENGC_PROFILE > 0
8155 if (gc_prof_record(objspace
)) {
8156 gc_profile_record
*record
= gc_prof_record(objspace
);
8157 record
->old_objects
= objspace
->rgengc
.old_objects
;
8160 gc_prof_mark_timer_stop(objspace
);
8166 gc_report_body(int level
, rb_objspace_t
*objspace
, const char *fmt
, ...)
8168 if (level
<= RGENGC_DEBUG
) {
8172 const char *status
= " ";
8175 status
= is_full_marking(objspace
) ? "+" : "-";
8178 if (is_lazy_sweeping(objspace
)) {
8181 if (is_incremental_marking(objspace
)) {
8186 va_start(args
, fmt
);
8187 vsnprintf(buf
, 1024, fmt
, args
);
8190 fprintf(out
, "%s|", status
);
8195 /* bit operations */
8198 rgengc_remembersetbits_get(rb_objspace_t
*objspace
, VALUE obj
)
8200 return RVALUE_REMEMBERED(obj
);
8204 rgengc_remembersetbits_set(rb_objspace_t
*objspace
, VALUE obj
)
8206 struct heap_page
*page
= GET_HEAP_PAGE(obj
);
8207 bits_t
*bits
= &page
->marking_bits
[0];
8209 GC_ASSERT(!is_incremental_marking(objspace
));
8211 if (MARKED_IN_BITMAP(bits
, obj
)) {
8215 page
->flags
.has_remembered_objects
= TRUE
;
8216 MARK_IN_BITMAP(bits
, obj
);
8223 /* return FALSE if already remembered */
8225 rgengc_remember(rb_objspace_t
*objspace
, VALUE obj
)
8227 gc_report(6, objspace
, "rgengc_remember: %s %s\n", obj_info(obj
),
8228 rgengc_remembersetbits_get(objspace
, obj
) ? "was already remembered" : "is remembered now");
8230 check_rvalue_consistency(obj
);
8232 if (RGENGC_CHECK_MODE
) {
8233 if (RVALUE_WB_UNPROTECTED(obj
)) rb_bug("rgengc_remember: %s is not wb protected.", obj_info(obj
));
8236 #if RGENGC_PROFILE > 0
8237 if (!rgengc_remembered(objspace
, obj
)) {
8238 if (RVALUE_WB_UNPROTECTED(obj
) == 0) {
8239 objspace
->profile
.total_remembered_normal_object_count
++;
8240 #if RGENGC_PROFILE >= 2
8241 objspace
->profile
.remembered_normal_object_count_types
[BUILTIN_TYPE(obj
)]++;
8245 #endif /* RGENGC_PROFILE > 0 */
8247 return rgengc_remembersetbits_set(objspace
, obj
);
8251 rgengc_remembered_sweep(rb_objspace_t
*objspace
, VALUE obj
)
8253 int result
= rgengc_remembersetbits_get(objspace
, obj
);
8254 check_rvalue_consistency(obj
);
8259 rgengc_remembered(rb_objspace_t
*objspace
, VALUE obj
)
8261 gc_report(6, objspace
, "rgengc_remembered: %s\n", obj_info(obj
));
8262 return rgengc_remembered_sweep(objspace
, obj
);
8265 #ifndef PROFILE_REMEMBERSET_MARK
8266 #define PROFILE_REMEMBERSET_MARK 0
8270 rgengc_rememberset_mark_plane(rb_objspace_t
*objspace
, uintptr_t p
, bits_t bitset
)
8275 VALUE obj
= (VALUE
)p
;
8276 gc_report(2, objspace
, "rgengc_rememberset_mark: mark %s\n", obj_info(obj
));
8277 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj
));
8278 GC_ASSERT(RVALUE_OLD_P(obj
) || RVALUE_WB_UNPROTECTED(obj
));
8280 gc_mark_children(objspace
, obj
);
8282 p
+= sizeof(RVALUE
);
8289 rgengc_rememberset_mark(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
8292 struct heap_page
*page
= 0;
8293 #if PROFILE_REMEMBERSET_MARK
8294 int has_old
= 0, has_shady
= 0, has_both
= 0, skip
= 0;
8296 gc_report(1, objspace
, "rgengc_rememberset_mark: start\n");
8298 list_for_each(&heap
->pages
, page
, page_node
) {
8299 if (page
->flags
.has_remembered_objects
| page
->flags
.has_uncollectible_shady_objects
) {
8300 RVALUE
*p
= page
->start
;
8301 bits_t bitset
, bits
[HEAP_PAGE_BITMAP_LIMIT
];
8302 bits_t
*marking_bits
= page
->marking_bits
;
8303 bits_t
*uncollectible_bits
= page
->uncollectible_bits
;
8304 bits_t
*wb_unprotected_bits
= page
->wb_unprotected_bits
;
8305 #if PROFILE_REMEMBERSET_MARK
8306 if (page
->flags
.has_remembered_objects
&& page
->flags
.has_uncollectible_shady_objects
) has_both
++;
8307 else if (page
->flags
.has_remembered_objects
) has_old
++;
8308 else if (page
->flags
.has_uncollectible_shady_objects
) has_shady
++;
8310 for (j
=0; j
<HEAP_PAGE_BITMAP_LIMIT
; j
++) {
8311 bits
[j
] = marking_bits
[j
] | (uncollectible_bits
[j
] & wb_unprotected_bits
[j
]);
8312 marking_bits
[j
] = 0;
8314 page
->flags
.has_remembered_objects
= FALSE
;
8317 bitset
>>= NUM_IN_PAGE(p
);
8318 rgengc_rememberset_mark_plane(objspace
, (uintptr_t)p
, bitset
);
8319 p
+= (BITS_BITLENGTH
- NUM_IN_PAGE(p
));
8321 for (j
=1; j
< HEAP_PAGE_BITMAP_LIMIT
; j
++) {
8323 rgengc_rememberset_mark_plane(objspace
, (uintptr_t)p
, bitset
);
8324 p
+= BITS_BITLENGTH
;
8327 #if PROFILE_REMEMBERSET_MARK
8334 #if PROFILE_REMEMBERSET_MARK
8335 fprintf(stderr
, "%d\t%d\t%d\t%d\n", has_both
, has_old
, has_shady
, skip
);
8337 gc_report(1, objspace
, "rgengc_rememberset_mark: finished\n");
8341 rgengc_mark_and_rememberset_clear(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
8343 struct heap_page
*page
= 0;
8345 list_for_each(&heap
->pages
, page
, page_node
) {
8346 memset(&page
->mark_bits
[0], 0, HEAP_PAGE_BITMAP_SIZE
);
8347 memset(&page
->uncollectible_bits
[0], 0, HEAP_PAGE_BITMAP_SIZE
);
8348 memset(&page
->marking_bits
[0], 0, HEAP_PAGE_BITMAP_SIZE
);
8349 memset(&page
->pinned_bits
[0], 0, HEAP_PAGE_BITMAP_SIZE
);
8350 page
->flags
.has_uncollectible_shady_objects
= FALSE
;
8351 page
->flags
.has_remembered_objects
= FALSE
;
8357 NOINLINE(static void gc_writebarrier_generational(VALUE a
, VALUE b
, rb_objspace_t
*objspace
));
8360 gc_writebarrier_generational(VALUE a
, VALUE b
, rb_objspace_t
*objspace
)
8362 if (RGENGC_CHECK_MODE
) {
8363 if (!RVALUE_OLD_P(a
)) rb_bug("gc_writebarrier_generational: %s is not an old object.", obj_info(a
));
8364 if ( RVALUE_OLD_P(b
)) rb_bug("gc_writebarrier_generational: %s is an old object.", obj_info(b
));
8365 if (is_incremental_marking(objspace
)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a
), obj_info(b
));
8369 /* mark `a' and remember (default behavior) */
8370 if (!rgengc_remembered(objspace
, a
)) {
8371 RB_VM_LOCK_ENTER_NO_BARRIER();
8373 rgengc_remember(objspace
, a
);
8375 RB_VM_LOCK_LEAVE_NO_BARRIER();
8376 gc_report(1, objspace
, "gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a
), obj_info(b
));
8379 /* mark `b' and remember */
8380 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(b
), b
);
8381 if (RVALUE_WB_UNPROTECTED(b
)) {
8382 gc_remember_unprotected(objspace
, b
);
8385 RVALUE_AGE_SET_OLD(objspace
, b
);
8386 rgengc_remember(objspace
, b
);
8389 gc_report(1, objspace
, "gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a
), obj_info(b
));
8392 check_rvalue_consistency(a
);
8393 check_rvalue_consistency(b
);
8396 #if GC_ENABLE_INCREMENTAL_MARK
8398 gc_mark_from(rb_objspace_t
*objspace
, VALUE obj
, VALUE parent
)
8400 gc_mark_set_parent(objspace
, parent
);
8401 rgengc_check_relation(objspace
, obj
);
8402 if (gc_mark_set(objspace
, obj
) == FALSE
) return;
8403 gc_aging(objspace
, obj
);
8404 gc_grey(objspace
, obj
);
8407 NOINLINE(static void gc_writebarrier_incremental(VALUE a
, VALUE b
, rb_objspace_t
*objspace
));
8410 gc_writebarrier_incremental(VALUE a
, VALUE b
, rb_objspace_t
*objspace
)
8412 gc_report(2, objspace
, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a
, obj_info(b
));
8414 if (RVALUE_BLACK_P(a
)) {
8415 if (RVALUE_WHITE_P(b
)) {
8416 if (!RVALUE_WB_UNPROTECTED(a
)) {
8417 gc_report(2, objspace
, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a
, obj_info(b
));
8418 gc_mark_from(objspace
, b
, a
);
8421 else if (RVALUE_OLD_P(a
) && !RVALUE_OLD_P(b
)) {
8422 if (!RVALUE_WB_UNPROTECTED(b
)) {
8423 gc_report(1, objspace
, "gc_writebarrier_incremental: [GN] %p -> %s\n", (void *)a
, obj_info(b
));
8424 RVALUE_AGE_SET_OLD(objspace
, b
);
8426 if (RVALUE_BLACK_P(b
)) {
8427 gc_grey(objspace
, b
);
8431 gc_report(1, objspace
, "gc_writebarrier_incremental: [LL] %p -> %s\n", (void *)a
, obj_info(b
));
8432 gc_remember_unprotected(objspace
, b
);
8436 if (UNLIKELY(objspace
->flags
.during_compacting
)) {
8437 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b
), b
);
8442 #define gc_writebarrier_incremental(a, b, objspace)
8446 rb_gc_writebarrier(VALUE a
, VALUE b
)
8448 rb_objspace_t
*objspace
= &rb_objspace
;
8450 if (RGENGC_CHECK_MODE
&& SPECIAL_CONST_P(a
)) rb_bug("rb_gc_writebarrier: a is special const");
8451 if (RGENGC_CHECK_MODE
&& SPECIAL_CONST_P(b
)) rb_bug("rb_gc_writebarrier: b is special const");
8454 if (!is_incremental_marking(objspace
)) {
8455 if (!RVALUE_OLD_P(a
) || RVALUE_OLD_P(b
)) {
8459 gc_writebarrier_generational(a
, b
, objspace
);
8465 RB_VM_LOCK_ENTER_NO_BARRIER();
8467 if (is_incremental_marking(objspace
)) {
8468 gc_writebarrier_incremental(a
, b
, objspace
);
8474 RB_VM_LOCK_LEAVE_NO_BARRIER();
8476 if (retry
) goto retry
;
8482 rb_gc_writebarrier_unprotect(VALUE obj
)
8484 if (RVALUE_WB_UNPROTECTED(obj
)) {
8488 rb_objspace_t
*objspace
= &rb_objspace
;
8490 gc_report(2, objspace
, "rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj
),
8491 rgengc_remembered(objspace
, obj
) ? " (already remembered)" : "");
8493 if (RVALUE_OLD_P(obj
)) {
8494 gc_report(1, objspace
, "rb_gc_writebarrier_unprotect: %s\n", obj_info(obj
));
8495 RVALUE_DEMOTE(objspace
, obj
);
8496 gc_mark_set(objspace
, obj
);
8497 gc_remember_unprotected(objspace
, obj
);
8500 objspace
->profile
.total_shade_operation_count
++;
8501 #if RGENGC_PROFILE >= 2
8502 objspace
->profile
.shade_operation_count_types
[BUILTIN_TYPE(obj
)]++;
8503 #endif /* RGENGC_PROFILE >= 2 */
8504 #endif /* RGENGC_PROFILE */
8507 RVALUE_AGE_RESET(obj
);
8510 RB_DEBUG_COUNTER_INC(obj_wb_unprotect
);
8511 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj
), obj
);
8516 * remember `obj' if needed.
8518 MJIT_FUNC_EXPORTED
void
8519 rb_gc_writebarrier_remember(VALUE obj
)
8521 rb_objspace_t
*objspace
= &rb_objspace
;
8523 gc_report(1, objspace
, "rb_gc_writebarrier_remember: %s\n", obj_info(obj
));
8525 if (is_incremental_marking(objspace
)) {
8526 if (RVALUE_BLACK_P(obj
)) {
8527 gc_grey(objspace
, obj
);
8531 if (RVALUE_OLD_P(obj
)) {
8532 rgengc_remember(objspace
, obj
);
8537 static st_table
*rgengc_unprotect_logging_table
;
8540 rgengc_unprotect_logging_exit_func_i(st_data_t key
, st_data_t val
, st_data_t arg
)
8542 fprintf(stderr
, "%s\t%"PRIuVALUE
"\n", (char *)key
, (VALUE
)val
);
8547 rgengc_unprotect_logging_exit_func(void)
8549 st_foreach(rgengc_unprotect_logging_table
, rgengc_unprotect_logging_exit_func_i
, 0);
8553 rb_gc_unprotect_logging(void *objptr
, const char *filename
, int line
)
8555 VALUE obj
= (VALUE
)objptr
;
8557 if (rgengc_unprotect_logging_table
== 0) {
8558 rgengc_unprotect_logging_table
= st_init_strtable();
8559 atexit(rgengc_unprotect_logging_exit_func
);
8562 if (RVALUE_WB_UNPROTECTED(obj
) == 0) {
8567 snprintf(ptr
, 0x100 - 1, "%s|%s:%d", obj_info(obj
), filename
, line
);
8569 if (st_lookup(rgengc_unprotect_logging_table
, (st_data_t
)ptr
, &cnt
)) {
8573 ptr
= (strdup
)(buff
);
8574 if (!ptr
) rb_memerror();
8576 st_insert(rgengc_unprotect_logging_table
, (st_data_t
)ptr
, cnt
);
8581 rb_copy_wb_protected_attribute(VALUE dest
, VALUE obj
)
8583 rb_objspace_t
*objspace
= &rb_objspace
;
8585 if (RVALUE_WB_UNPROTECTED(obj
) && !RVALUE_WB_UNPROTECTED(dest
)) {
8586 if (!RVALUE_OLD_P(dest
)) {
8587 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest
), dest
);
8588 RVALUE_AGE_RESET_RAW(dest
);
8591 RVALUE_DEMOTE(objspace
, dest
);
8595 check_rvalue_consistency(dest
);
8598 /* RGENGC analysis information */
8601 rb_obj_rgengc_writebarrier_protected_p(VALUE obj
)
8603 return RBOOL(!RVALUE_WB_UNPROTECTED(obj
));
8607 rb_obj_rgengc_promoted_p(VALUE obj
)
8609 return RBOOL(OBJ_PROMOTED(obj
));
8613 rb_obj_gc_flags(VALUE obj
, ID
* flags
, size_t max
)
8616 static ID ID_marked
;
8617 static ID ID_wb_protected
, ID_old
, ID_marking
, ID_uncollectible
, ID_pinned
;
8620 #define I(s) ID_##s = rb_intern(#s);
8630 if (RVALUE_WB_UNPROTECTED(obj
) == 0 && n
<max
) flags
[n
++] = ID_wb_protected
;
8631 if (RVALUE_OLD_P(obj
) && n
<max
) flags
[n
++] = ID_old
;
8632 if (RVALUE_UNCOLLECTIBLE(obj
) && n
<max
) flags
[n
++] = ID_uncollectible
;
8633 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj
), obj
) && n
<max
) flags
[n
++] = ID_marking
;
8634 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj
), obj
) && n
<max
) flags
[n
++] = ID_marked
;
8635 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj
), obj
) && n
<max
) flags
[n
++] = ID_pinned
;
8642 rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t
*newobj_cache
)
8644 for (size_t size_pool_idx
= 0; size_pool_idx
< SIZE_POOL_COUNT
; size_pool_idx
++) {
8645 rb_ractor_newobj_size_pool_cache_t
*cache
= &newobj_cache
->size_pool_caches
[size_pool_idx
];
8647 struct heap_page
*page
= cache
->using_page
;
8648 RVALUE
*freelist
= cache
->freelist
;
8649 RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", (void *)page
, (void *)freelist
);
8651 heap_page_freelist_append(page
, freelist
);
8653 cache
->using_page
= NULL
;
8654 cache
->freelist
= NULL
;
8659 rb_gc_force_recycle(VALUE obj
)
8664 #ifndef MARK_OBJECT_ARY_BUCKET_SIZE
8665 #define MARK_OBJECT_ARY_BUCKET_SIZE 1024
8669 rb_gc_register_mark_object(VALUE obj
)
8671 if (!is_pointer_to_heap(&rb_objspace
, (void *)obj
))
8676 VALUE ary_ary
= GET_VM()->mark_object_ary
;
8677 VALUE ary
= rb_ary_last(0, 0, ary_ary
);
8679 if (NIL_P(ary
) || RARRAY_LEN(ary
) >= MARK_OBJECT_ARY_BUCKET_SIZE
) {
8680 ary
= rb_ary_tmp_new(MARK_OBJECT_ARY_BUCKET_SIZE
);
8681 rb_ary_push(ary_ary
, ary
);
8684 rb_ary_push(ary
, obj
);
8690 rb_gc_register_address(VALUE
*addr
)
8692 rb_objspace_t
*objspace
= &rb_objspace
;
8693 struct gc_list
*tmp
;
8695 tmp
= ALLOC(struct gc_list
);
8696 tmp
->next
= global_list
;
8702 rb_gc_unregister_address(VALUE
*addr
)
8704 rb_objspace_t
*objspace
= &rb_objspace
;
8705 struct gc_list
*tmp
= global_list
;
8707 if (tmp
->varptr
== addr
) {
8708 global_list
= tmp
->next
;
8713 if (tmp
->next
->varptr
== addr
) {
8714 struct gc_list
*t
= tmp
->next
;
8716 tmp
->next
= tmp
->next
->next
;
8725 rb_global_variable(VALUE
*var
)
8727 rb_gc_register_address(var
);
8734 gc_stress_no_immediate_sweep
,
8735 gc_stress_full_mark_after_malloc
,
8739 #define gc_stress_full_mark_after_malloc_p() \
8740 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
8743 heap_ready_to_gc(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
8745 if (!heap
->free_pages
) {
8746 if (!heap_increment(objspace
, size_pool
, heap
)) {
8747 size_pool_allocatable_pages_set(objspace
, size_pool
, 1);
8748 heap_increment(objspace
, size_pool
, heap
);
8754 ready_to_gc(rb_objspace_t
*objspace
)
8756 if (dont_gc_val() || during_gc
|| ruby_disable_gc
) {
8757 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
8758 rb_size_pool_t
*size_pool
= &size_pools
[i
];
8759 heap_ready_to_gc(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
8769 gc_reset_malloc_info(rb_objspace_t
*objspace
, bool full_mark
)
8771 gc_prof_set_malloc_info(objspace
);
8773 size_t inc
= ATOMIC_SIZE_EXCHANGE(malloc_increase
, 0);
8774 size_t old_limit
= malloc_limit
;
8776 if (inc
> malloc_limit
) {
8777 malloc_limit
= (size_t)(inc
* gc_params
.malloc_limit_growth_factor
);
8778 if (malloc_limit
> gc_params
.malloc_limit_max
) {
8779 malloc_limit
= gc_params
.malloc_limit_max
;
8783 malloc_limit
= (size_t)(malloc_limit
* 0.98); /* magic number */
8784 if (malloc_limit
< gc_params
.malloc_limit_min
) {
8785 malloc_limit
= gc_params
.malloc_limit_min
;
8790 if (old_limit
!= malloc_limit
) {
8791 fprintf(stderr
, "[%"PRIuSIZE
"] malloc_limit: %"PRIuSIZE
" -> %"PRIuSIZE
"\n",
8792 rb_gc_count(), old_limit
, malloc_limit
);
8795 fprintf(stderr
, "[%"PRIuSIZE
"] malloc_limit: not changed (%"PRIuSIZE
")\n",
8796 rb_gc_count(), malloc_limit
);
8801 /* reset oldmalloc info */
8802 #if RGENGC_ESTIMATE_OLDMALLOC
8804 if (objspace
->rgengc
.oldmalloc_increase
> objspace
->rgengc
.oldmalloc_increase_limit
) {
8805 objspace
->rgengc
.need_major_gc
|= GPR_FLAG_MAJOR_BY_OLDMALLOC
;
8806 objspace
->rgengc
.oldmalloc_increase_limit
=
8807 (size_t)(objspace
->rgengc
.oldmalloc_increase_limit
* gc_params
.oldmalloc_limit_growth_factor
);
8809 if (objspace
->rgengc
.oldmalloc_increase_limit
> gc_params
.oldmalloc_limit_max
) {
8810 objspace
->rgengc
.oldmalloc_increase_limit
= gc_params
.oldmalloc_limit_max
;
8814 if (0) fprintf(stderr
, "%"PRIdSIZE
"\t%d\t%"PRIuSIZE
"\t%"PRIuSIZE
"\t%"PRIdSIZE
"\n",
8816 objspace
->rgengc
.need_major_gc
,
8817 objspace
->rgengc
.oldmalloc_increase
,
8818 objspace
->rgengc
.oldmalloc_increase_limit
,
8819 gc_params
.oldmalloc_limit_max
);
8823 objspace
->rgengc
.oldmalloc_increase
= 0;
8825 if ((objspace
->profile
.latest_gc_info
& GPR_FLAG_MAJOR_BY_OLDMALLOC
) == 0) {
8826 objspace
->rgengc
.oldmalloc_increase_limit
=
8827 (size_t)(objspace
->rgengc
.oldmalloc_increase_limit
/ ((gc_params
.oldmalloc_limit_growth_factor
- 1)/10 + 1));
8828 if (objspace
->rgengc
.oldmalloc_increase_limit
< gc_params
.oldmalloc_limit_min
) {
8829 objspace
->rgengc
.oldmalloc_increase_limit
= gc_params
.oldmalloc_limit_min
;
8837 garbage_collect(rb_objspace_t
*objspace
, unsigned int reason
)
8843 #if GC_PROFILE_MORE_DETAIL
8844 objspace
->profile
.prepare_time
= getrusage_time();
8849 #if GC_PROFILE_MORE_DETAIL
8850 objspace
->profile
.prepare_time
= getrusage_time() - objspace
->profile
.prepare_time
;
8853 ret
= gc_start(objspace
, reason
);
8861 gc_start(rb_objspace_t
*objspace
, unsigned int reason
)
8863 unsigned int do_full_mark
= !!(reason
& GPR_FLAG_FULL_MARK
);
8864 #if GC_ENABLE_INCREMENTAL_MARK
8865 unsigned int immediate_mark
= reason
& GPR_FLAG_IMMEDIATE_MARK
;
8868 /* reason may be clobbered, later, so keep set immediate_sweep here */
8869 objspace
->flags
.immediate_sweep
= !!(reason
& GPR_FLAG_IMMEDIATE_SWEEP
);
8871 /* Explicitly enable compaction (GC.compact) */
8872 objspace
->flags
.during_compacting
= !!(reason
& GPR_FLAG_COMPACT
);
8874 if (!heap_allocated_pages
) return FALSE
; /* heap is not ready */
8875 if (!(reason
& GPR_FLAG_METHOD
) && !ready_to_gc(objspace
)) return TRUE
; /* GC is not allowed */
8877 GC_ASSERT(gc_mode(objspace
) == gc_mode_none
);
8878 GC_ASSERT(!is_lazy_sweeping(objspace
));
8879 GC_ASSERT(!is_incremental_marking(objspace
));
8881 unsigned int lock_lev
;
8882 gc_enter(objspace
, gc_enter_event_start
, &lock_lev
);
8884 #if RGENGC_CHECK_MODE >= 2
8885 gc_verify_internal_consistency(objspace
);
8888 if (ruby_gc_stressful
) {
8889 int flag
= FIXNUM_P(ruby_gc_stress_mode
) ? FIX2INT(ruby_gc_stress_mode
) : 0;
8891 if ((flag
& (1<<gc_stress_no_major
)) == 0) {
8892 do_full_mark
= TRUE
;
8895 objspace
->flags
.immediate_sweep
= !(flag
& (1<<gc_stress_no_immediate_sweep
));
8898 if (objspace
->rgengc
.need_major_gc
) {
8899 reason
|= objspace
->rgengc
.need_major_gc
;
8900 do_full_mark
= TRUE
;
8902 else if (RGENGC_FORCE_MAJOR_GC
) {
8903 reason
= GPR_FLAG_MAJOR_BY_FORCE
;
8904 do_full_mark
= TRUE
;
8907 objspace
->rgengc
.need_major_gc
= GPR_FLAG_NONE
;
8910 if (do_full_mark
&& (reason
& GPR_FLAG_MAJOR_MASK
) == 0) {
8911 reason
|= GPR_FLAG_MAJOR_BY_FORCE
; /* GC by CAPI, METHOD, and so on. */
8914 #if GC_ENABLE_INCREMENTAL_MARK
8915 if (!GC_ENABLE_INCREMENTAL_MARK
|| objspace
->flags
.dont_incremental
|| immediate_mark
) {
8916 objspace
->flags
.during_incremental_marking
= FALSE
;
8919 objspace
->flags
.during_incremental_marking
= do_full_mark
;
8923 if (!GC_ENABLE_LAZY_SWEEP
|| objspace
->flags
.dont_incremental
) {
8924 objspace
->flags
.immediate_sweep
= TRUE
;
8927 if (objspace
->flags
.immediate_sweep
) reason
|= GPR_FLAG_IMMEDIATE_SWEEP
;
8929 gc_report(1, objspace
, "gc_start(reason: %x) => %u, %d, %d\n",
8931 do_full_mark
, !is_incremental_marking(objspace
), objspace
->flags
.immediate_sweep
);
8933 #if USE_DEBUG_COUNTER
8934 RB_DEBUG_COUNTER_INC(gc_count
);
8936 if (reason
& GPR_FLAG_MAJOR_MASK
) {
8937 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree
, reason
& GPR_FLAG_MAJOR_BY_NOFREE
);
8938 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen
, reason
& GPR_FLAG_MAJOR_BY_OLDGEN
);
8939 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady
, reason
& GPR_FLAG_MAJOR_BY_SHADY
);
8940 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force
, reason
& GPR_FLAG_MAJOR_BY_FORCE
);
8941 #if RGENGC_ESTIMATE_OLDMALLOC
8942 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc
, reason
& GPR_FLAG_MAJOR_BY_OLDMALLOC
);
8946 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj
, reason
& GPR_FLAG_NEWOBJ
);
8947 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc
, reason
& GPR_FLAG_MALLOC
);
8948 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method
, reason
& GPR_FLAG_METHOD
);
8949 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi
, reason
& GPR_FLAG_CAPI
);
8950 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress
, reason
& GPR_FLAG_STRESS
);
8954 objspace
->profile
.count
++;
8955 objspace
->profile
.latest_gc_info
= reason
;
8956 objspace
->profile
.total_allocated_objects_at_gc_start
= objspace
->total_allocated_objects
;
8957 objspace
->profile
.heap_used_at_gc_start
= heap_allocated_pages
;
8958 gc_prof_setup_new_record(objspace
, reason
);
8959 gc_reset_malloc_info(objspace
, do_full_mark
);
8960 rb_transient_heap_start_marking(do_full_mark
);
8962 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_START
, 0 /* TODO: pass minor/immediate flag? */);
8963 GC_ASSERT(during_gc
);
8965 gc_prof_timer_start(objspace
);
8967 gc_marks(objspace
, do_full_mark
);
8969 gc_prof_timer_stop(objspace
);
8971 gc_exit(objspace
, gc_enter_event_start
, &lock_lev
);
8976 gc_rest(rb_objspace_t
*objspace
)
8978 int marking
= is_incremental_marking(objspace
);
8979 int sweeping
= is_lazy_sweeping(objspace
);
8981 if (marking
|| sweeping
) {
8982 unsigned int lock_lev
;
8983 gc_enter(objspace
, gc_enter_event_rest
, &lock_lev
);
8985 if (RGENGC_CHECK_MODE
>= 2) gc_verify_internal_consistency(objspace
);
8987 if (is_incremental_marking(objspace
)) {
8988 gc_marks_rest(objspace
);
8990 if (is_lazy_sweeping(objspace
)) {
8991 gc_sweep_rest(objspace
);
8993 gc_exit(objspace
, gc_enter_event_rest
, &lock_lev
);
8997 struct objspace_and_reason
{
8998 rb_objspace_t
*objspace
;
8999 unsigned int reason
;
9003 gc_current_status_fill(rb_objspace_t
*objspace
, char *buff
)
9006 if (is_marking(objspace
)) {
9008 if (is_full_marking(objspace
)) buff
[i
++] = 'F';
9009 #if GC_ENABLE_INCREMENTAL_MARK
9010 if (is_incremental_marking(objspace
)) buff
[i
++] = 'I';
9013 else if (is_sweeping(objspace
)) {
9015 if (is_lazy_sweeping(objspace
)) buff
[i
++] = 'L';
9024 gc_current_status(rb_objspace_t
*objspace
)
9026 static char buff
[0x10];
9027 gc_current_status_fill(objspace
, buff
);
9031 #if PRINT_ENTER_EXIT_TICK
9033 static tick_t last_exit_tick
;
9034 static tick_t enter_tick
;
9035 static int enter_count
= 0;
9036 static char last_gc_status
[0x10];
9039 gc_record(rb_objspace_t
*objspace
, int direction
, const char *event
)
9041 if (direction
== 0) { /* enter */
9043 enter_tick
= tick();
9044 gc_current_status_fill(objspace
, last_gc_status
);
9047 tick_t exit_tick
= tick();
9048 char current_gc_status
[0x10];
9049 gc_current_status_fill(objspace
, current_gc_status
);
9051 /* [last mutator time] [gc time] [event] */
9052 fprintf(stderr
, "%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9053 enter_tick
- last_exit_tick
,
9054 exit_tick
- enter_tick
,
9056 last_gc_status
, current_gc_status
,
9057 (objspace
->profile
.latest_gc_info
& GPR_FLAG_MAJOR_MASK
) ? '+' : '-');
9058 last_exit_tick
= exit_tick
;
9060 /* [enter_tick] [gc time] [event] */
9061 fprintf(stderr
, "%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9063 exit_tick
- enter_tick
,
9065 last_gc_status
, current_gc_status
,
9066 (objspace
->profile
.latest_gc_info
& GPR_FLAG_MAJOR_MASK
) ? '+' : '-');
9070 #else /* PRINT_ENTER_EXIT_TICK */
9072 gc_record(rb_objspace_t
*objspace
, int direction
, const char *event
)
9076 #endif /* PRINT_ENTER_EXIT_TICK */
9079 gc_enter_event_cstr(enum gc_enter_event event
)
9082 case gc_enter_event_start
: return "start";
9083 case gc_enter_event_mark_continue
: return "mark_continue";
9084 case gc_enter_event_sweep_continue
: return "sweep_continue";
9085 case gc_enter_event_rest
: return "rest";
9086 case gc_enter_event_finalizer
: return "finalizer";
9087 case gc_enter_event_rb_memerror
: return "rb_memerror";
9093 gc_enter_count(enum gc_enter_event event
)
9096 case gc_enter_event_start
: RB_DEBUG_COUNTER_INC(gc_enter_start
); break;
9097 case gc_enter_event_mark_continue
: RB_DEBUG_COUNTER_INC(gc_enter_mark_continue
); break;
9098 case gc_enter_event_sweep_continue
: RB_DEBUG_COUNTER_INC(gc_enter_sweep_continue
); break;
9099 case gc_enter_event_rest
: RB_DEBUG_COUNTER_INC(gc_enter_rest
); break;
9100 case gc_enter_event_finalizer
: RB_DEBUG_COUNTER_INC(gc_enter_finalizer
); break;
9101 case gc_enter_event_rb_memerror
: /* nothing */ break;
9106 #define MEASURE_GC (objspace->flags.measure_gc)
9110 gc_enter_event_measure_p(rb_objspace_t
*objspace
, enum gc_enter_event event
)
9112 if (!MEASURE_GC
) return false;
9115 case gc_enter_event_start
:
9116 case gc_enter_event_mark_continue
:
9117 case gc_enter_event_sweep_continue
:
9118 case gc_enter_event_rest
:
9122 // case gc_enter_event_finalizer:
9123 // case gc_enter_event_rb_memerror:
9128 static bool current_process_time(struct timespec
*ts
);
9131 gc_enter_clock(rb_objspace_t
*objspace
, enum gc_enter_event event
)
9133 if (gc_enter_event_measure_p(objspace
, event
)) {
9134 if (!current_process_time(&objspace
->profile
.start_time
)) {
9135 objspace
->profile
.start_time
.tv_sec
= 0;
9136 objspace
->profile
.start_time
.tv_nsec
= 0;
9142 gc_exit_clock(rb_objspace_t
*objspace
, enum gc_enter_event event
)
9144 if (gc_enter_event_measure_p(objspace
, event
)) {
9145 struct timespec end_time
;
9147 if ((objspace
->profile
.start_time
.tv_sec
> 0 ||
9148 objspace
->profile
.start_time
.tv_nsec
> 0) &&
9149 current_process_time(&end_time
)) {
9151 if (end_time
.tv_sec
< objspace
->profile
.start_time
.tv_sec
) {
9156 (uint64_t)(end_time
.tv_sec
- objspace
->profile
.start_time
.tv_sec
) * (1000 * 1000 * 1000) +
9157 (end_time
.tv_nsec
- objspace
->profile
.start_time
.tv_nsec
);
9158 objspace
->profile
.total_time_ns
+= ns
;
9165 gc_enter(rb_objspace_t
*objspace
, enum gc_enter_event event
, unsigned int *lock_lev
)
9167 RB_VM_LOCK_ENTER_LEV(lock_lev
);
9169 gc_enter_clock(objspace
, event
);
9172 case gc_enter_event_rest
:
9173 if (!is_marking(objspace
)) break;
9175 case gc_enter_event_start
:
9176 case gc_enter_event_mark_continue
:
9177 // stop other ractors
9184 gc_enter_count(event
);
9185 if (UNLIKELY(during_gc
!= 0)) rb_bug("during_gc != 0");
9186 if (RGENGC_CHECK_MODE
>= 3) gc_verify_internal_consistency(objspace
);
9188 mjit_gc_start_hook();
9191 RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event
), gc_current_status(objspace
));
9192 gc_report(1, objspace
, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event
), gc_current_status(objspace
));
9193 gc_record(objspace
, 0, gc_enter_event_cstr(event
));
9194 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_ENTER
, 0); /* TODO: which parameter should be passed? */
9198 gc_exit(rb_objspace_t
*objspace
, enum gc_enter_event event
, unsigned int *lock_lev
)
9200 GC_ASSERT(during_gc
!= 0);
9202 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_EXIT
, 0); /* TODO: which parameter should be passsed? */
9203 gc_record(objspace
, 1, gc_enter_event_cstr(event
));
9204 RUBY_DEBUG_LOG("%s (%s)", gc_enter_event_cstr(event
), gc_current_status(objspace
));
9205 gc_report(1, objspace
, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event
), gc_current_status(objspace
));
9208 mjit_gc_exit_hook();
9209 gc_exit_clock(objspace
, event
);
9210 RB_VM_LOCK_LEAVE_LEV(lock_lev
);
9214 gc_with_gvl(void *ptr
)
9216 struct objspace_and_reason
*oar
= (struct objspace_and_reason
*)ptr
;
9217 return (void *)(VALUE
)garbage_collect(oar
->objspace
, oar
->reason
);
9221 garbage_collect_with_gvl(rb_objspace_t
*objspace
, unsigned int reason
)
9223 if (dont_gc_val()) return TRUE
;
9224 if (ruby_thread_has_gvl_p()) {
9225 return garbage_collect(objspace
, reason
);
9228 if (ruby_native_thread_p()) {
9229 struct objspace_and_reason oar
;
9230 oar
.objspace
= objspace
;
9231 oar
.reason
= reason
;
9232 return (int)(VALUE
)rb_thread_call_with_gvl(gc_with_gvl
, (void *)&oar
);
9235 /* no ruby thread */
9236 fprintf(stderr
, "[FATAL] failed to allocate memory\n");
9243 gc_start_internal(rb_execution_context_t
*ec
, VALUE self
, VALUE full_mark
, VALUE immediate_mark
, VALUE immediate_sweep
, VALUE compact
)
9245 rb_objspace_t
*objspace
= &rb_objspace
;
9246 unsigned int reason
= (GPR_FLAG_FULL_MARK
|
9247 GPR_FLAG_IMMEDIATE_MARK
|
9248 GPR_FLAG_IMMEDIATE_SWEEP
|
9251 /* For now, compact implies full mark / sweep, so ignore other flags */
9252 if (RTEST(compact
)) {
9253 /* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
9254 * the read barrier, so we must disable compaction. */
9255 #if !defined(__MINGW32__) && !defined(_WIN32)
9256 if (!USE_MMAP_ALIGNED_ALLOC
) {
9257 rb_raise(rb_eNotImpError
, "Compaction isn't available on this platform");
9261 reason
|= GPR_FLAG_COMPACT
;
9264 if (!RTEST(full_mark
)) reason
&= ~GPR_FLAG_FULL_MARK
;
9265 if (!RTEST(immediate_mark
)) reason
&= ~GPR_FLAG_IMMEDIATE_MARK
;
9266 if (!RTEST(immediate_sweep
)) reason
&= ~GPR_FLAG_IMMEDIATE_SWEEP
;
9269 garbage_collect(objspace
, reason
);
9270 gc_finalize_deferred(objspace
);
9276 gc_is_moveable_obj(rb_objspace_t
*objspace
, VALUE obj
)
9278 GC_ASSERT(!SPECIAL_CONST_P(obj
));
9280 switch (BUILTIN_TYPE(obj
)) {
9287 if (DYNAMIC_SYM_P(obj
) && (RSYMBOL(obj
)->id
& ~ID_SCOPE_MASK
)) {
9309 if (FL_TEST(obj
, FL_FINALIZE
)) {
9310 /* The finalizer table is a numtable. It looks up objects by address.
9311 * We can't mark the keys in the finalizer table because that would
9312 * prevent the objects from being collected. This check prevents
9313 * objects that are keys in the finalizer table from being moved
9314 * without directly pinning them. */
9315 if (st_is_member(finalizer_table
, obj
)) {
9319 GC_ASSERT(RVALUE_MARKED(obj
));
9320 GC_ASSERT(!RVALUE_PINNED(obj
));
9325 rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj
));
9333 gc_move(rb_objspace_t
*objspace
, VALUE scan
, VALUE free
, size_t slot_size
)
9339 RVALUE
*dest
= (RVALUE
*)free
;
9340 RVALUE
*src
= (RVALUE
*)scan
;
9342 gc_report(4, objspace
, "Moving object: %p -> %p\n", (void*)scan
, (void *)free
);
9344 GC_ASSERT(BUILTIN_TYPE(scan
) != T_NONE
);
9345 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free
), free
));
9347 /* Save off bits for current object. */
9348 marked
= rb_objspace_marked_object_p((VALUE
)src
);
9349 wb_unprotected
= RVALUE_WB_UNPROTECTED((VALUE
)src
);
9350 uncollectible
= RVALUE_UNCOLLECTIBLE((VALUE
)src
);
9351 marking
= RVALUE_MARKING((VALUE
)src
);
9353 /* Clear bits for eventual T_MOVED */
9354 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE
)src
), (VALUE
)src
);
9355 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE
)src
), (VALUE
)src
);
9356 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE
)src
), (VALUE
)src
);
9357 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE
)src
), (VALUE
)src
);
9359 if (FL_TEST((VALUE
)src
, FL_EXIVAR
)) {
9360 /* Same deal as below. Generic ivars are held in st tables.
9361 * Resizing the table could cause a GC to happen and we can't allow it */
9362 VALUE already_disabled
= rb_gc_disable_no_rest();
9363 rb_mv_generic_ivar((VALUE
)src
, (VALUE
)dest
);
9364 if (already_disabled
== Qfalse
) rb_objspace_gc_enable(objspace
);
9367 st_data_t srcid
= (st_data_t
)src
, id
;
9369 /* If the source object's object_id has been seen, we need to update
9370 * the object to object id mapping. */
9371 if (st_lookup(objspace
->obj_to_id_tbl
, srcid
, &id
)) {
9372 gc_report(4, objspace
, "Moving object with seen id: %p -> %p\n", (void *)src
, (void *)dest
);
9373 /* inserting in the st table can cause the GC to run. We need to
9374 * prevent re-entry in to the GC since `gc_move` is running in the GC,
9375 * so temporarily disable the GC around the st table mutation */
9376 VALUE already_disabled
= rb_gc_disable_no_rest();
9377 st_delete(objspace
->obj_to_id_tbl
, &srcid
, 0);
9378 st_insert(objspace
->obj_to_id_tbl
, (st_data_t
)dest
, id
);
9379 if (already_disabled
== Qfalse
) rb_objspace_gc_enable(objspace
);
9382 /* Move the object */
9383 memcpy(dest
, src
, slot_size
);
9384 memset(src
, 0, slot_size
);
9386 /* Set bits for object in new location */
9388 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE
)dest
), (VALUE
)dest
);
9391 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE
)dest
), (VALUE
)dest
);
9395 MARK_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE
)dest
), (VALUE
)dest
);
9398 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE
)dest
), (VALUE
)dest
);
9401 if (wb_unprotected
) {
9402 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE
)dest
), (VALUE
)dest
);
9405 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE
)dest
), (VALUE
)dest
);
9408 if (uncollectible
) {
9409 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE
)dest
), (VALUE
)dest
);
9412 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE
)dest
), (VALUE
)dest
);
9415 /* Assign forwarding address */
9416 src
->as
.moved
.flags
= T_MOVED
;
9417 src
->as
.moved
.dummy
= Qundef
;
9418 src
->as
.moved
.destination
= (VALUE
)dest
;
9419 GC_ASSERT(BUILTIN_TYPE((VALUE
)dest
) != T_NONE
);
9425 compare_free_slots(const void *left
, const void *right
, void *dummy
)
9427 struct heap_page
*left_page
;
9428 struct heap_page
*right_page
;
9430 left_page
= *(struct heap_page
* const *)left
;
9431 right_page
= *(struct heap_page
* const *)right
;
9433 return left_page
->free_slots
- right_page
->free_slots
;
9437 gc_sort_heap_by_empty_slots(rb_objspace_t
*objspace
)
9439 for (int j
= 0; j
< SIZE_POOL_COUNT
; j
++) {
9440 rb_size_pool_t
*size_pool
= &size_pools
[j
];
9442 size_t total_pages
= SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
;
9443 size_t size
= size_mul_or_raise(total_pages
, sizeof(struct heap_page
*), rb_eRuntimeError
);
9444 struct heap_page
*page
= 0, **page_list
= malloc(size
);
9447 list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, page
, page_node
) {
9448 page_list
[i
++] = page
;
9452 GC_ASSERT((size_t)i
== total_pages
);
9454 /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
9455 * head of the list, so empty pages will end up at the start of the heap */
9456 ruby_qsort(page_list
, total_pages
, sizeof(struct heap_page
*), compare_free_slots
, NULL
);
9458 /* Reset the eden heap */
9459 list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
);
9461 for (i
= 0; i
< total_pages
; i
++) {
9462 list_add(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, &page_list
[i
]->page_node
);
9463 if (page_list
[i
]->free_slots
!= 0) {
9464 heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool
), page_list
[i
]);
9473 gc_ref_update_array(rb_objspace_t
* objspace
, VALUE v
)
9477 if (FL_TEST(v
, ELTS_SHARED
))
9480 len
= RARRAY_LEN(v
);
9482 VALUE
*ptr
= (VALUE
*)RARRAY_CONST_PTR_TRANSIENT(v
);
9483 for (i
= 0; i
< len
; i
++) {
9484 UPDATE_IF_MOVED(objspace
, ptr
[i
]);
9490 gc_ref_update_object(rb_objspace_t
* objspace
, VALUE v
)
9492 VALUE
*ptr
= ROBJECT_IVPTR(v
);
9494 uint32_t i
, len
= ROBJECT_NUMIV(v
);
9495 for (i
= 0; i
< len
; i
++) {
9496 UPDATE_IF_MOVED(objspace
, ptr
[i
]);
9501 hash_replace_ref(st_data_t
*key
, st_data_t
*value
, st_data_t argp
, int existing
)
9503 rb_objspace_t
*objspace
= (rb_objspace_t
*)argp
;
9505 if (gc_object_moved_p(objspace
, (VALUE
)*key
)) {
9506 *key
= rb_gc_location((VALUE
)*key
);
9509 if (gc_object_moved_p(objspace
, (VALUE
)*value
)) {
9510 *value
= rb_gc_location((VALUE
)*value
);
9517 hash_foreach_replace(st_data_t key
, st_data_t value
, st_data_t argp
, int error
)
9519 rb_objspace_t
*objspace
;
9521 objspace
= (rb_objspace_t
*)argp
;
9523 if (gc_object_moved_p(objspace
, (VALUE
)key
)) {
9527 if (gc_object_moved_p(objspace
, (VALUE
)value
)) {
9534 hash_replace_ref_value(st_data_t
*key
, st_data_t
*value
, st_data_t argp
, int existing
)
9536 rb_objspace_t
*objspace
= (rb_objspace_t
*)argp
;
9538 if (gc_object_moved_p(objspace
, (VALUE
)*value
)) {
9539 *value
= rb_gc_location((VALUE
)*value
);
9546 hash_foreach_replace_value(st_data_t key
, st_data_t value
, st_data_t argp
, int error
)
9548 rb_objspace_t
*objspace
;
9550 objspace
= (rb_objspace_t
*)argp
;
9552 if (gc_object_moved_p(objspace
, (VALUE
)value
)) {
9559 gc_update_tbl_refs(rb_objspace_t
* objspace
, st_table
*tbl
)
9561 if (!tbl
|| tbl
->num_entries
== 0) return;
9563 if (st_foreach_with_replace(tbl
, hash_foreach_replace_value
, hash_replace_ref_value
, (st_data_t
)objspace
)) {
9564 rb_raise(rb_eRuntimeError
, "hash modified during iteration");
9569 gc_update_table_refs(rb_objspace_t
* objspace
, st_table
*tbl
)
9571 if (!tbl
|| tbl
->num_entries
== 0) return;
9573 if (st_foreach_with_replace(tbl
, hash_foreach_replace
, hash_replace_ref
, (st_data_t
)objspace
)) {
9574 rb_raise(rb_eRuntimeError
, "hash modified during iteration");
9578 /* Update MOVED references in an st_table */
9580 rb_gc_update_tbl_refs(st_table
*ptr
)
9582 rb_objspace_t
*objspace
= &rb_objspace
;
9583 gc_update_table_refs(objspace
, ptr
);
9587 gc_ref_update_hash(rb_objspace_t
* objspace
, VALUE v
)
9589 rb_hash_stlike_foreach_with_replace(v
, hash_foreach_replace
, hash_replace_ref
, (st_data_t
)objspace
);
9593 gc_ref_update_method_entry(rb_objspace_t
*objspace
, rb_method_entry_t
*me
)
9595 rb_method_definition_t
*def
= me
->def
;
9597 UPDATE_IF_MOVED(objspace
, me
->owner
);
9598 UPDATE_IF_MOVED(objspace
, me
->defined_class
);
9601 switch (def
->type
) {
9602 case VM_METHOD_TYPE_ISEQ
:
9603 if (def
->body
.iseq
.iseqptr
) {
9604 TYPED_UPDATE_IF_MOVED(objspace
, rb_iseq_t
*, def
->body
.iseq
.iseqptr
);
9606 TYPED_UPDATE_IF_MOVED(objspace
, rb_cref_t
*, def
->body
.iseq
.cref
);
9608 case VM_METHOD_TYPE_ATTRSET
:
9609 case VM_METHOD_TYPE_IVAR
:
9610 UPDATE_IF_MOVED(objspace
, def
->body
.attr
.location
);
9612 case VM_METHOD_TYPE_BMETHOD
:
9613 UPDATE_IF_MOVED(objspace
, def
->body
.bmethod
.proc
);
9615 case VM_METHOD_TYPE_ALIAS
:
9616 TYPED_UPDATE_IF_MOVED(objspace
, struct rb_method_entry_struct
*, def
->body
.alias
.original_me
);
9618 case VM_METHOD_TYPE_REFINED
:
9619 TYPED_UPDATE_IF_MOVED(objspace
, struct rb_method_entry_struct
*, def
->body
.refined
.orig_me
);
9620 UPDATE_IF_MOVED(objspace
, def
->body
.refined
.owner
);
9622 case VM_METHOD_TYPE_CFUNC
:
9623 case VM_METHOD_TYPE_ZSUPER
:
9624 case VM_METHOD_TYPE_MISSING
:
9625 case VM_METHOD_TYPE_OPTIMIZED
:
9626 case VM_METHOD_TYPE_UNDEF
:
9627 case VM_METHOD_TYPE_NOTIMPLEMENTED
:
9634 gc_update_values(rb_objspace_t
*objspace
, long n
, VALUE
*values
)
9638 for (i
=0; i
<n
; i
++) {
9639 UPDATE_IF_MOVED(objspace
, values
[i
]);
9644 gc_ref_update_imemo(rb_objspace_t
*objspace
, VALUE obj
)
9646 switch (imemo_type(obj
)) {
9649 rb_env_t
*env
= (rb_env_t
*)obj
;
9650 if (LIKELY(env
->ep
)) {
9651 // just after newobj() can be NULL here.
9652 TYPED_UPDATE_IF_MOVED(objspace
, rb_iseq_t
*, env
->iseq
);
9653 UPDATE_IF_MOVED(objspace
, env
->ep
[VM_ENV_DATA_INDEX_ENV
]);
9654 gc_update_values(objspace
, (long)env
->env_size
, (VALUE
*)env
->env
);
9659 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.cref
.klass_or_self
);
9660 TYPED_UPDATE_IF_MOVED(objspace
, struct rb_cref_struct
*, RANY(obj
)->as
.imemo
.cref
.next
);
9661 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.cref
.refinements
);
9664 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.svar
.cref_or_me
);
9665 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.svar
.lastline
);
9666 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.svar
.backref
);
9667 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.svar
.others
);
9669 case imemo_throw_data
:
9670 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.throw_data
.throw_obj
);
9675 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.memo
.v1
);
9676 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.memo
.v2
);
9679 gc_ref_update_method_entry(objspace
, &RANY(obj
)->as
.imemo
.ment
);
9682 rb_iseq_update_references((rb_iseq_t
*)obj
);
9685 rb_ast_update_references((rb_ast_t
*)obj
);
9687 case imemo_callcache
:
9689 const struct rb_callcache
*cc
= (const struct rb_callcache
*)obj
;
9691 UPDATE_IF_MOVED(objspace
, cc
->klass
);
9692 if (!is_live_object(objspace
, cc
->klass
)) {
9693 *((VALUE
*)(&cc
->klass
)) = (VALUE
)0;
9698 TYPED_UPDATE_IF_MOVED(objspace
, struct rb_callable_method_entry_struct
*, cc
->cme_
);
9699 if (!is_live_object(objspace
, (VALUE
)cc
->cme_
)) {
9700 *((struct rb_callable_method_entry_struct
**)(&cc
->cme_
)) = (struct rb_callable_method_entry_struct
*)0;
9705 case imemo_constcache
:
9707 const struct iseq_inline_constant_cache_entry
*ice
= (struct iseq_inline_constant_cache_entry
*)obj
;
9708 UPDATE_IF_MOVED(objspace
, ice
->value
);
9711 case imemo_parser_strterm
:
9713 case imemo_callinfo
:
9716 rb_bug("not reachable %d", imemo_type(obj
));
9721 static enum rb_id_table_iterator_result
9722 check_id_table_move(ID id
, VALUE value
, void *data
)
9724 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
9726 if (gc_object_moved_p(objspace
, (VALUE
)value
)) {
9727 return ID_TABLE_REPLACE
;
9730 return ID_TABLE_CONTINUE
;
9733 /* Returns the new location of an object, if it moved. Otherwise returns
9734 * the existing location. */
9736 rb_gc_location(VALUE value
)
9741 if (!SPECIAL_CONST_P(value
)) {
9742 void *poisoned
= asan_poisoned_object_p(value
);
9743 asan_unpoison_object(value
, false);
9745 if (BUILTIN_TYPE(value
) == T_MOVED
) {
9746 destination
= (VALUE
)RMOVED(value
)->destination
;
9747 GC_ASSERT(BUILTIN_TYPE(destination
) != T_NONE
);
9750 destination
= value
;
9753 /* Re-poison slot if it's not the one we want */
9755 GC_ASSERT(BUILTIN_TYPE(value
) == T_NONE
);
9756 asan_poison_object(value
);
9760 destination
= value
;
9766 static enum rb_id_table_iterator_result
9767 update_id_table(ID
*key
, VALUE
* value
, void *data
, int existing
)
9769 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
9771 if (gc_object_moved_p(objspace
, (VALUE
)*value
)) {
9772 *value
= rb_gc_location((VALUE
)*value
);
9775 return ID_TABLE_CONTINUE
;
9779 update_m_tbl(rb_objspace_t
*objspace
, struct rb_id_table
*tbl
)
9782 rb_id_table_foreach_with_replace(tbl
, check_id_table_move
, update_id_table
, objspace
);
9786 static enum rb_id_table_iterator_result
9787 update_cc_tbl_i(ID id
, VALUE ccs_ptr
, void *data
)
9789 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
9790 struct rb_class_cc_entries
*ccs
= (struct rb_class_cc_entries
*)ccs_ptr
;
9791 VM_ASSERT(vm_ccs_p(ccs
));
9793 if (gc_object_moved_p(objspace
, (VALUE
)ccs
->cme
)) {
9794 ccs
->cme
= (const rb_callable_method_entry_t
*)rb_gc_location((VALUE
)ccs
->cme
);
9797 for (int i
=0; i
<ccs
->len
; i
++) {
9798 if (gc_object_moved_p(objspace
, (VALUE
)ccs
->entries
[i
].ci
)) {
9799 ccs
->entries
[i
].ci
= (struct rb_callinfo
*)rb_gc_location((VALUE
)ccs
->entries
[i
].ci
);
9801 if (gc_object_moved_p(objspace
, (VALUE
)ccs
->entries
[i
].cc
)) {
9802 ccs
->entries
[i
].cc
= (struct rb_callcache
*)rb_gc_location((VALUE
)ccs
->entries
[i
].cc
);
9807 return ID_TABLE_CONTINUE
;
9811 update_cc_tbl(rb_objspace_t
*objspace
, VALUE klass
)
9813 struct rb_id_table
*tbl
= RCLASS_CC_TBL(klass
);
9815 rb_id_table_foreach_with_replace(tbl
, update_cc_tbl_i
, 0, objspace
);
9819 static enum rb_id_table_iterator_result
9820 update_cvc_tbl_i(ID id
, VALUE cvc_entry
, void *data
)
9822 struct rb_cvar_class_tbl_entry
*entry
;
9824 entry
= (struct rb_cvar_class_tbl_entry
*)cvc_entry
;
9826 entry
->class_value
= rb_gc_location(entry
->class_value
);
9828 return ID_TABLE_CONTINUE
;
9832 update_cvc_tbl(rb_objspace_t
*objspace
, VALUE klass
)
9834 struct rb_id_table
*tbl
= RCLASS_CVC_TBL(klass
);
9836 rb_id_table_foreach_with_replace(tbl
, update_cvc_tbl_i
, 0, objspace
);
9840 static enum rb_id_table_iterator_result
9841 update_const_table(VALUE value
, void *data
)
9843 rb_const_entry_t
*ce
= (rb_const_entry_t
*)value
;
9844 rb_objspace_t
* objspace
= (rb_objspace_t
*)data
;
9846 if (gc_object_moved_p(objspace
, ce
->value
)) {
9847 ce
->value
= rb_gc_location(ce
->value
);
9850 if (gc_object_moved_p(objspace
, ce
->file
)) {
9851 ce
->file
= rb_gc_location(ce
->file
);
9854 return ID_TABLE_CONTINUE
;
9858 update_const_tbl(rb_objspace_t
*objspace
, struct rb_id_table
*tbl
)
9861 rb_id_table_foreach_values(tbl
, update_const_table
, objspace
);
9865 update_subclass_entries(rb_objspace_t
*objspace
, rb_subclass_entry_t
*entry
)
9868 UPDATE_IF_MOVED(objspace
, entry
->klass
);
9869 entry
= entry
->next
;
9874 update_iv_index_tbl_i(st_data_t key
, st_data_t value
, st_data_t arg
)
9876 rb_objspace_t
*objspace
= (rb_objspace_t
*)arg
;
9877 struct rb_iv_index_tbl_entry
*ent
= (struct rb_iv_index_tbl_entry
*)value
;
9878 UPDATE_IF_MOVED(objspace
, ent
->class_value
);
9883 update_class_ext(rb_objspace_t
*objspace
, rb_classext_t
*ext
)
9885 UPDATE_IF_MOVED(objspace
, ext
->origin_
);
9886 UPDATE_IF_MOVED(objspace
, ext
->refined_class
);
9887 update_subclass_entries(objspace
, ext
->subclasses
);
9889 // ext->iv_index_tbl
9890 if (ext
->iv_index_tbl
) {
9891 st_foreach(ext
->iv_index_tbl
, update_iv_index_tbl_i
, (st_data_t
)objspace
);
9896 gc_update_object_references(rb_objspace_t
*objspace
, VALUE obj
)
9898 RVALUE
*any
= RANY(obj
);
9900 gc_report(4, objspace
, "update-refs: %p ->\n", (void *)obj
);
9902 switch (BUILTIN_TYPE(obj
)) {
9905 if (RCLASS_SUPER((VALUE
)obj
)) {
9906 UPDATE_IF_MOVED(objspace
, RCLASS(obj
)->super
);
9908 if (!RCLASS_EXT(obj
)) break;
9909 update_m_tbl(objspace
, RCLASS_M_TBL(obj
));
9910 update_cc_tbl(objspace
, obj
);
9911 update_cvc_tbl(objspace
, obj
);
9913 gc_update_tbl_refs(objspace
, RCLASS_IV_TBL(obj
));
9915 update_class_ext(objspace
, RCLASS_EXT(obj
));
9916 update_const_tbl(objspace
, RCLASS_CONST_TBL(obj
));
9920 if (FL_TEST(obj
, RICLASS_IS_ORIGIN
) &&
9921 !FL_TEST(obj
, RICLASS_ORIGIN_SHARED_MTBL
)) {
9922 update_m_tbl(objspace
, RCLASS_M_TBL(obj
));
9924 if (RCLASS_SUPER((VALUE
)obj
)) {
9925 UPDATE_IF_MOVED(objspace
, RCLASS(obj
)->super
);
9927 if (!RCLASS_EXT(obj
)) break;
9928 if (RCLASS_IV_TBL(obj
)) {
9929 gc_update_tbl_refs(objspace
, RCLASS_IV_TBL(obj
));
9931 update_class_ext(objspace
, RCLASS_EXT(obj
));
9932 update_m_tbl(objspace
, RCLASS_CALLABLE_M_TBL(obj
));
9933 update_cc_tbl(objspace
, obj
);
9937 gc_ref_update_imemo(objspace
, obj
);
9945 /* These can't move */
9949 if (FL_TEST(obj
, ELTS_SHARED
)) {
9950 UPDATE_IF_MOVED(objspace
, any
->as
.array
.as
.heap
.aux
.shared_root
);
9953 gc_ref_update_array(objspace
, obj
);
9958 gc_ref_update_hash(objspace
, obj
);
9959 UPDATE_IF_MOVED(objspace
, any
->as
.hash
.ifnone
);
9963 if (STR_SHARED_P(obj
)) {
9965 VALUE orig_shared
= any
->as
.string
.as
.heap
.aux
.shared
;
9967 UPDATE_IF_MOVED(objspace
, any
->as
.string
.as
.heap
.aux
.shared
);
9969 VALUE shared
= any
->as
.string
.as
.heap
.aux
.shared
;
9970 if (STR_EMBED_P(shared
)) {
9971 size_t offset
= (size_t)any
->as
.string
.as
.heap
.ptr
- (size_t)RSTRING(orig_shared
)->as
.embed
.ary
;
9972 GC_ASSERT(any
->as
.string
.as
.heap
.ptr
>= RSTRING(orig_shared
)->as
.embed
.ary
);
9973 GC_ASSERT(offset
<= (size_t)RSTRING(shared
)->as
.embed
.len
);
9974 any
->as
.string
.as
.heap
.ptr
= RSTRING(shared
)->as
.embed
.ary
+ offset
;
9981 /* Call the compaction callback, if it exists */
9983 void *const ptr
= DATA_PTR(obj
);
9985 if (RTYPEDDATA_P(obj
)) {
9986 RUBY_DATA_FUNC compact_func
= any
->as
.typeddata
.type
->function
.dcompact
;
9987 if (compact_func
) (*compact_func
)(ptr
);
9994 gc_ref_update_object(objspace
, obj
);
9998 if (any
->as
.file
.fptr
) {
9999 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->self
);
10000 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->pathv
);
10001 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->tied_io_for_writing
);
10002 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->writeconv_asciicompat
);
10003 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->writeconv_pre_ecopts
);
10004 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->encs
.ecopts
);
10005 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->write_lock
);
10009 UPDATE_IF_MOVED(objspace
, any
->as
.regexp
.src
);
10013 if (DYNAMIC_SYM_P((VALUE
)any
)) {
10014 UPDATE_IF_MOVED(objspace
, RSYMBOL(any
)->fstr
);
10023 UPDATE_IF_MOVED(objspace
, any
->as
.match
.regexp
);
10025 if (any
->as
.match
.str
) {
10026 UPDATE_IF_MOVED(objspace
, any
->as
.match
.str
);
10031 UPDATE_IF_MOVED(objspace
, any
->as
.rational
.num
);
10032 UPDATE_IF_MOVED(objspace
, any
->as
.rational
.den
);
10036 UPDATE_IF_MOVED(objspace
, any
->as
.complex.real
);
10037 UPDATE_IF_MOVED(objspace
, any
->as
.complex.imag
);
10043 long i
, len
= RSTRUCT_LEN(obj
);
10044 VALUE
*ptr
= (VALUE
*)RSTRUCT_CONST_PTR(obj
);
10046 for (i
= 0; i
< len
; i
++) {
10047 UPDATE_IF_MOVED(objspace
, ptr
[i
]);
10053 rb_gcdebug_print_obj_condition((VALUE
)obj
);
10054 rb_obj_info_dump(obj
);
10055 rb_bug("unreachable");
10061 UPDATE_IF_MOVED(objspace
, RBASIC(obj
)->klass
);
10063 gc_report(4, objspace
, "update-refs: %p <-\n", (void *)obj
);
10067 gc_ref_update(void *vstart
, void *vend
, size_t stride
, rb_objspace_t
* objspace
, struct heap_page
*page
)
10069 VALUE v
= (VALUE
)vstart
;
10070 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
10071 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
10072 page
->flags
.has_uncollectible_shady_objects
= FALSE
;
10073 page
->flags
.has_remembered_objects
= FALSE
;
10075 /* For each object on the page */
10076 for (; v
!= (VALUE
)vend
; v
+= stride
) {
10077 void *poisoned
= asan_poisoned_object_p(v
);
10078 asan_unpoison_object(v
, false);
10080 switch (BUILTIN_TYPE(v
)) {
10086 if (RVALUE_WB_UNPROTECTED(v
)) {
10087 page
->flags
.has_uncollectible_shady_objects
= TRUE
;
10089 if (RVALUE_PAGE_MARKING(page
, v
)) {
10090 page
->flags
.has_remembered_objects
= TRUE
;
10092 if (page
->flags
.before_sweep
) {
10093 if (RVALUE_MARKED(v
)) {
10094 gc_update_object_references(objspace
, v
);
10098 gc_update_object_references(objspace
, v
);
10103 asan_poison_object(v
);
10110 extern rb_symbols_t ruby_global_symbols
;
10111 #define global_symbols ruby_global_symbols
10114 gc_update_references(rb_objspace_t
*objspace
)
10116 rb_execution_context_t
*ec
= GET_EC();
10117 rb_vm_t
*vm
= rb_ec_vm_ptr(ec
);
10119 struct heap_page
*page
= NULL
;
10121 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
10122 bool should_set_mark_bits
= TRUE
;
10123 rb_size_pool_t
*size_pool
= &size_pools
[i
];
10124 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
10126 list_for_each(&heap
->pages
, page
, page_node
) {
10127 uintptr_t start
= (uintptr_t)page
->start
;
10128 uintptr_t end
= start
+ (page
->total_slots
* size_pool
->slot_size
);
10130 gc_ref_update((void *)start
, (void *)end
, size_pool
->slot_size
, objspace
, page
);
10131 if (page
== heap
->sweeping_page
) {
10132 should_set_mark_bits
= FALSE
;
10134 if (should_set_mark_bits
) {
10135 gc_setup_mark_bits(page
);
10139 rb_vm_update_references(vm
);
10140 rb_transient_heap_update_references();
10141 rb_gc_update_global_tbl();
10142 global_symbols
.ids
= rb_gc_location(global_symbols
.ids
);
10143 global_symbols
.dsymbol_fstr_hash
= rb_gc_location(global_symbols
.dsymbol_fstr_hash
);
10144 gc_update_tbl_refs(objspace
, objspace
->obj_to_id_tbl
);
10145 gc_update_table_refs(objspace
, objspace
->id_to_obj_tbl
);
10146 gc_update_table_refs(objspace
, global_symbols
.str_sym
);
10147 gc_update_table_refs(objspace
, finalizer_table
);
10151 gc_compact_stats(rb_execution_context_t
*ec
, VALUE self
)
10154 rb_objspace_t
*objspace
= &rb_objspace
;
10155 VALUE h
= rb_hash_new();
10156 VALUE considered
= rb_hash_new();
10157 VALUE moved
= rb_hash_new();
10159 for (i
=0; i
<T_MASK
; i
++) {
10160 if (objspace
->rcompactor
.considered_count_table
[i
]) {
10161 rb_hash_aset(considered
, type_sym(i
), SIZET2NUM(objspace
->rcompactor
.considered_count_table
[i
]));
10164 if (objspace
->rcompactor
.moved_count_table
[i
]) {
10165 rb_hash_aset(moved
, type_sym(i
), SIZET2NUM(objspace
->rcompactor
.moved_count_table
[i
]));
10169 rb_hash_aset(h
, ID2SYM(rb_intern("considered")), considered
);
10170 rb_hash_aset(h
, ID2SYM(rb_intern("moved")), moved
);
10176 root_obj_check_moved_i(const char *category
, VALUE obj
, void *data
)
10178 if (gc_object_moved_p(&rb_objspace
, obj
)) {
10179 rb_bug("ROOT %s points to MOVED: %p -> %s\n", category
, (void *)obj
, obj_info(rb_gc_location(obj
)));
10184 reachable_object_check_moved_i(VALUE ref
, void *data
)
10186 VALUE parent
= (VALUE
)data
;
10187 if (gc_object_moved_p(&rb_objspace
, ref
)) {
10188 rb_bug("Object %s points to MOVED: %p -> %s\n", obj_info(parent
), (void *)ref
, obj_info(rb_gc_location(ref
)));
10193 heap_check_moved_i(void *vstart
, void *vend
, size_t stride
, void *data
)
10195 VALUE v
= (VALUE
)vstart
;
10196 for (; v
!= (VALUE
)vend
; v
+= stride
) {
10197 if (gc_object_moved_p(&rb_objspace
, v
)) {
10198 /* Moved object still on the heap, something may have a reference. */
10201 void *poisoned
= asan_poisoned_object_p(v
);
10202 asan_unpoison_object(v
, false);
10204 switch (BUILTIN_TYPE(v
)) {
10209 if (!rb_objspace_garbage_object_p(v
)) {
10210 rb_objspace_reachable_objects_from(v
, reachable_object_check_moved_i
, (void *)v
);
10215 GC_ASSERT(BUILTIN_TYPE(v
) == T_NONE
);
10216 asan_poison_object(v
);
10225 gc_compact(rb_execution_context_t
*ec
, VALUE self
)
10227 /* Run GC with compaction enabled */
10228 gc_start_internal(ec
, self
, Qtrue
, Qtrue
, Qtrue
, Qtrue
);
10230 return gc_compact_stats(ec
, self
);
10234 gc_verify_compaction_references(rb_execution_context_t
*ec
, VALUE self
, VALUE double_heap
, VALUE toward_empty
)
10236 rb_objspace_t
*objspace
= &rb_objspace
;
10238 /* Clear the heap. */
10239 gc_start_internal(ec
, self
, Qtrue
, Qtrue
, Qtrue
, Qfalse
);
10241 RB_VM_LOCK_ENTER();
10245 if (RTEST(double_heap
)) {
10246 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
10247 rb_size_pool_t
*size_pool
= &size_pools
[i
];
10248 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
10249 heap_add_pages(objspace
, size_pool
, heap
, heap
->total_pages
);
10253 if (RTEST(toward_empty
)) {
10254 gc_sort_heap_by_empty_slots(objspace
);
10257 RB_VM_LOCK_LEAVE();
10259 gc_start_internal(ec
, self
, Qtrue
, Qtrue
, Qtrue
, Qtrue
);
10261 objspace_reachable_objects_from_root(objspace
, root_obj_check_moved_i
, NULL
);
10262 objspace_each_objects(objspace
, heap_check_moved_i
, NULL
, TRUE
);
10264 return gc_compact_stats(ec
, self
);
10277 rb_objspace_t
*objspace
= &rb_objspace
;
10278 unsigned int reason
= GPR_DEFAULT_REASON
;
10279 garbage_collect(objspace
, reason
);
10285 rb_objspace_t
*objspace
= &rb_objspace
;
10289 #if RGENGC_PROFILE >= 2
10291 static const char *type_name(int type
, VALUE obj
);
10294 gc_count_add_each_types(VALUE hash
, const char *name
, const size_t *types
)
10296 VALUE result
= rb_hash_new_with_size(T_MASK
);
10298 for (i
=0; i
<T_MASK
; i
++) {
10299 const char *type
= type_name(i
, 0);
10300 rb_hash_aset(result
, ID2SYM(rb_intern(type
)), SIZET2NUM(types
[i
]));
10302 rb_hash_aset(hash
, ID2SYM(rb_intern(name
)), result
);
10309 return rb_objspace
.profile
.count
;
10313 gc_count(rb_execution_context_t
*ec
, VALUE self
)
10315 return SIZET2NUM(rb_gc_count());
10319 gc_info_decode(rb_objspace_t
*objspace
, const VALUE hash_or_key
, const unsigned int orig_flags
)
10321 static VALUE sym_major_by
= Qnil
, sym_gc_by
, sym_immediate_sweep
, sym_have_finalizer
, sym_state
;
10322 static VALUE sym_nofree
, sym_oldgen
, sym_shady
, sym_force
, sym_stress
;
10323 #if RGENGC_ESTIMATE_OLDMALLOC
10324 static VALUE sym_oldmalloc
;
10326 static VALUE sym_newobj
, sym_malloc
, sym_method
, sym_capi
;
10327 static VALUE sym_none
, sym_marking
, sym_sweeping
;
10328 VALUE hash
= Qnil
, key
= Qnil
;
10330 unsigned int flags
= orig_flags
? orig_flags
: objspace
->profile
.latest_gc_info
;
10332 if (SYMBOL_P(hash_or_key
)) {
10335 else if (RB_TYPE_P(hash_or_key
, T_HASH
)) {
10336 hash
= hash_or_key
;
10339 rb_raise(rb_eTypeError
, "non-hash or symbol given");
10342 if (NIL_P(sym_major_by
)) {
10343 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
10346 S(immediate_sweep
);
10355 #if RGENGC_ESTIMATE_OLDMALLOC
10369 #define SET(name, attr) \
10370 if (key == sym_##name) \
10372 else if (hash != Qnil) \
10373 rb_hash_aset(hash, sym_##name, (attr));
10376 (flags
& GPR_FLAG_MAJOR_BY_NOFREE
) ? sym_nofree
:
10377 (flags
& GPR_FLAG_MAJOR_BY_OLDGEN
) ? sym_oldgen
:
10378 (flags
& GPR_FLAG_MAJOR_BY_SHADY
) ? sym_shady
:
10379 (flags
& GPR_FLAG_MAJOR_BY_FORCE
) ? sym_force
:
10380 #if RGENGC_ESTIMATE_OLDMALLOC
10381 (flags
& GPR_FLAG_MAJOR_BY_OLDMALLOC
) ? sym_oldmalloc
:
10384 SET(major_by
, major_by
);
10387 (flags
& GPR_FLAG_NEWOBJ
) ? sym_newobj
:
10388 (flags
& GPR_FLAG_MALLOC
) ? sym_malloc
:
10389 (flags
& GPR_FLAG_METHOD
) ? sym_method
:
10390 (flags
& GPR_FLAG_CAPI
) ? sym_capi
:
10391 (flags
& GPR_FLAG_STRESS
) ? sym_stress
:
10395 SET(have_finalizer
, RBOOL(flags
& GPR_FLAG_HAVE_FINALIZE
));
10396 SET(immediate_sweep
, RBOOL(flags
& GPR_FLAG_IMMEDIATE_SWEEP
));
10398 if (orig_flags
== 0) {
10399 SET(state
, gc_mode(objspace
) == gc_mode_none
? sym_none
:
10400 gc_mode(objspace
) == gc_mode_marking
? sym_marking
: sym_sweeping
);
10404 if (!NIL_P(key
)) {/* matched key should return above */
10405 rb_raise(rb_eArgError
, "unknown key: %"PRIsVALUE
, rb_sym2str(key
));
10412 rb_gc_latest_gc_info(VALUE key
)
10414 rb_objspace_t
*objspace
= &rb_objspace
;
10415 return gc_info_decode(objspace
, key
, 0);
10419 gc_latest_gc_info(rb_execution_context_t
*ec
, VALUE self
, VALUE arg
)
10421 rb_objspace_t
*objspace
= &rb_objspace
;
10424 arg
= rb_hash_new();
10426 else if (!SYMBOL_P(arg
) && !RB_TYPE_P(arg
, T_HASH
)) {
10427 rb_raise(rb_eTypeError
, "non-hash or symbol given");
10430 return gc_info_decode(objspace
, arg
, 0);
10436 gc_stat_sym_heap_allocated_pages
,
10437 gc_stat_sym_heap_sorted_length
,
10438 gc_stat_sym_heap_allocatable_pages
,
10439 gc_stat_sym_heap_available_slots
,
10440 gc_stat_sym_heap_live_slots
,
10441 gc_stat_sym_heap_free_slots
,
10442 gc_stat_sym_heap_final_slots
,
10443 gc_stat_sym_heap_marked_slots
,
10444 gc_stat_sym_heap_eden_pages
,
10445 gc_stat_sym_heap_tomb_pages
,
10446 gc_stat_sym_total_allocated_pages
,
10447 gc_stat_sym_total_freed_pages
,
10448 gc_stat_sym_total_allocated_objects
,
10449 gc_stat_sym_total_freed_objects
,
10450 gc_stat_sym_malloc_increase_bytes
,
10451 gc_stat_sym_malloc_increase_bytes_limit
,
10452 gc_stat_sym_minor_gc_count
,
10453 gc_stat_sym_major_gc_count
,
10454 gc_stat_sym_compact_count
,
10455 gc_stat_sym_read_barrier_faults
,
10456 gc_stat_sym_total_moved_objects
,
10457 gc_stat_sym_remembered_wb_unprotected_objects
,
10458 gc_stat_sym_remembered_wb_unprotected_objects_limit
,
10459 gc_stat_sym_old_objects
,
10460 gc_stat_sym_old_objects_limit
,
10461 #if RGENGC_ESTIMATE_OLDMALLOC
10462 gc_stat_sym_oldmalloc_increase_bytes
,
10463 gc_stat_sym_oldmalloc_increase_bytes_limit
,
10466 gc_stat_sym_total_generated_normal_object_count
,
10467 gc_stat_sym_total_generated_shady_object_count
,
10468 gc_stat_sym_total_shade_operation_count
,
10469 gc_stat_sym_total_promoted_count
,
10470 gc_stat_sym_total_remembered_normal_object_count
,
10471 gc_stat_sym_total_remembered_shady_object_count
,
10476 static VALUE gc_stat_symbols
[gc_stat_sym_last
];
10479 setup_gc_stat_symbols(void)
10481 if (gc_stat_symbols
[0] == 0) {
10482 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
10485 S(heap_allocated_pages
);
10486 S(heap_sorted_length
);
10487 S(heap_allocatable_pages
);
10488 S(heap_available_slots
);
10489 S(heap_live_slots
);
10490 S(heap_free_slots
);
10491 S(heap_final_slots
);
10492 S(heap_marked_slots
);
10493 S(heap_eden_pages
);
10494 S(heap_tomb_pages
);
10495 S(total_allocated_pages
);
10496 S(total_freed_pages
);
10497 S(total_allocated_objects
);
10498 S(total_freed_objects
);
10499 S(malloc_increase_bytes
);
10500 S(malloc_increase_bytes_limit
);
10504 S(read_barrier_faults
);
10505 S(total_moved_objects
);
10506 S(remembered_wb_unprotected_objects
);
10507 S(remembered_wb_unprotected_objects_limit
);
10509 S(old_objects_limit
);
10510 #if RGENGC_ESTIMATE_OLDMALLOC
10511 S(oldmalloc_increase_bytes
);
10512 S(oldmalloc_increase_bytes_limit
);
10515 S(total_generated_normal_object_count
);
10516 S(total_generated_shady_object_count
);
10517 S(total_shade_operation_count
);
10518 S(total_promoted_count
);
10519 S(total_remembered_normal_object_count
);
10520 S(total_remembered_shady_object_count
);
10521 #endif /* RGENGC_PROFILE */
10527 gc_stat_internal(VALUE hash_or_sym
)
10529 rb_objspace_t
*objspace
= &rb_objspace
;
10530 VALUE hash
= Qnil
, key
= Qnil
;
10532 setup_gc_stat_symbols();
10534 if (RB_TYPE_P(hash_or_sym
, T_HASH
)) {
10535 hash
= hash_or_sym
;
10537 else if (SYMBOL_P(hash_or_sym
)) {
10541 rb_raise(rb_eTypeError
, "non-hash or symbol argument");
10544 #define SET(name, attr) \
10545 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
10547 else if (hash != Qnil) \
10548 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
10550 SET(count
, objspace
->profile
.count
);
10551 SET(time
, (size_t) (objspace
->profile
.total_time_ns
/ (1000 * 1000) /* ns -> ms */)); // TODO: UINT64T2NUM
10553 /* implementation dependent counters */
10554 SET(heap_allocated_pages
, heap_allocated_pages
);
10555 SET(heap_sorted_length
, heap_pages_sorted_length
);
10556 SET(heap_allocatable_pages
, heap_allocatable_pages(objspace
));
10557 SET(heap_available_slots
, objspace_available_slots(objspace
));
10558 SET(heap_live_slots
, objspace_live_slots(objspace
));
10559 SET(heap_free_slots
, objspace_free_slots(objspace
));
10560 SET(heap_final_slots
, heap_pages_final_slots
);
10561 SET(heap_marked_slots
, objspace
->marked_slots
);
10562 SET(heap_eden_pages
, heap_eden_total_pages(objspace
));
10563 SET(heap_tomb_pages
, heap_tomb_total_pages(objspace
));
10564 SET(total_allocated_pages
, objspace
->profile
.total_allocated_pages
);
10565 SET(total_freed_pages
, objspace
->profile
.total_freed_pages
);
10566 SET(total_allocated_objects
, objspace
->total_allocated_objects
);
10567 SET(total_freed_objects
, objspace
->profile
.total_freed_objects
);
10568 SET(malloc_increase_bytes
, malloc_increase
);
10569 SET(malloc_increase_bytes_limit
, malloc_limit
);
10570 SET(minor_gc_count
, objspace
->profile
.minor_gc_count
);
10571 SET(major_gc_count
, objspace
->profile
.major_gc_count
);
10572 SET(compact_count
, objspace
->profile
.compact_count
);
10573 SET(read_barrier_faults
, objspace
->profile
.read_barrier_faults
);
10574 SET(total_moved_objects
, objspace
->rcompactor
.total_moved
);
10575 SET(remembered_wb_unprotected_objects
, objspace
->rgengc
.uncollectible_wb_unprotected_objects
);
10576 SET(remembered_wb_unprotected_objects_limit
, objspace
->rgengc
.uncollectible_wb_unprotected_objects_limit
);
10577 SET(old_objects
, objspace
->rgengc
.old_objects
);
10578 SET(old_objects_limit
, objspace
->rgengc
.old_objects_limit
);
10579 #if RGENGC_ESTIMATE_OLDMALLOC
10580 SET(oldmalloc_increase_bytes
, objspace
->rgengc
.oldmalloc_increase
);
10581 SET(oldmalloc_increase_bytes_limit
, objspace
->rgengc
.oldmalloc_increase_limit
);
10585 SET(total_generated_normal_object_count
, objspace
->profile
.total_generated_normal_object_count
);
10586 SET(total_generated_shady_object_count
, objspace
->profile
.total_generated_shady_object_count
);
10587 SET(total_shade_operation_count
, objspace
->profile
.total_shade_operation_count
);
10588 SET(total_promoted_count
, objspace
->profile
.total_promoted_count
);
10589 SET(total_remembered_normal_object_count
, objspace
->profile
.total_remembered_normal_object_count
);
10590 SET(total_remembered_shady_object_count
, objspace
->profile
.total_remembered_shady_object_count
);
10591 #endif /* RGENGC_PROFILE */
10594 if (!NIL_P(key
)) { /* matched key should return above */
10595 rb_raise(rb_eArgError
, "unknown key: %"PRIsVALUE
, rb_sym2str(key
));
10598 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
10599 if (hash
!= Qnil
) {
10600 gc_count_add_each_types(hash
, "generated_normal_object_count_types", objspace
->profile
.generated_normal_object_count_types
);
10601 gc_count_add_each_types(hash
, "generated_shady_object_count_types", objspace
->profile
.generated_shady_object_count_types
);
10602 gc_count_add_each_types(hash
, "shade_operation_count_types", objspace
->profile
.shade_operation_count_types
);
10603 gc_count_add_each_types(hash
, "promoted_types", objspace
->profile
.promoted_types
);
10604 gc_count_add_each_types(hash
, "remembered_normal_object_count_types", objspace
->profile
.remembered_normal_object_count_types
);
10605 gc_count_add_each_types(hash
, "remembered_shady_object_count_types", objspace
->profile
.remembered_shady_object_count_types
);
10613 gc_stat(rb_execution_context_t
*ec
, VALUE self
, VALUE arg
) // arg is (nil || hash || symbol)
10616 arg
= rb_hash_new();
10618 else if (SYMBOL_P(arg
)) {
10619 size_t value
= gc_stat_internal(arg
);
10620 return SIZET2NUM(value
);
10622 else if (RB_TYPE_P(arg
, T_HASH
)) {
10626 rb_raise(rb_eTypeError
, "non-hash or symbol given");
10629 gc_stat_internal(arg
);
10634 rb_gc_stat(VALUE key
)
10636 if (SYMBOL_P(key
)) {
10637 size_t value
= gc_stat_internal(key
);
10641 gc_stat_internal(key
);
10647 gc_stress_get(rb_execution_context_t
*ec
, VALUE self
)
10649 rb_objspace_t
*objspace
= &rb_objspace
;
10650 return ruby_gc_stress_mode
;
10654 gc_stress_set(rb_objspace_t
*objspace
, VALUE flag
)
10656 objspace
->flags
.gc_stressful
= RTEST(flag
);
10657 objspace
->gc_stress_mode
= flag
;
10661 gc_stress_set_m(rb_execution_context_t
*ec
, VALUE self
, VALUE flag
)
10663 rb_objspace_t
*objspace
= &rb_objspace
;
10664 gc_stress_set(objspace
, flag
);
10671 rb_objspace_t
*objspace
= &rb_objspace
;
10672 return rb_objspace_gc_enable(objspace
);
10676 rb_objspace_gc_enable(rb_objspace_t
*objspace
)
10678 int old
= dont_gc_val();
10685 gc_enable(rb_execution_context_t
*ec
, VALUE _
)
10687 return rb_gc_enable();
10691 rb_gc_disable_no_rest(void)
10693 rb_objspace_t
*objspace
= &rb_objspace
;
10694 return gc_disable_no_rest(objspace
);
10698 gc_disable_no_rest(rb_objspace_t
*objspace
)
10700 int old
= dont_gc_val();
10706 rb_gc_disable(void)
10708 rb_objspace_t
*objspace
= &rb_objspace
;
10709 return rb_objspace_gc_disable(objspace
);
10713 rb_objspace_gc_disable(rb_objspace_t
*objspace
)
10716 return gc_disable_no_rest(objspace
);
10720 gc_disable(rb_execution_context_t
*ec
, VALUE _
)
10722 return rb_gc_disable();
10726 gc_set_auto_compact(rb_execution_context_t
*ec
, VALUE _
, VALUE v
)
10728 /* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
10729 * the read barrier, so we must disable automatic compaction. */
10730 #if !defined(__MINGW32__) && !defined(_WIN32)
10731 if (!USE_MMAP_ALIGNED_ALLOC
) {
10732 rb_raise(rb_eNotImpError
, "Automatic compaction isn't available on this platform");
10736 ruby_enable_autocompact
= RTEST(v
);
10741 gc_get_auto_compact(rb_execution_context_t
*ec
, VALUE _
)
10743 return RBOOL(ruby_enable_autocompact
);
10747 get_envparam_size(const char *name
, size_t *default_value
, size_t lower_bound
)
10749 const char *ptr
= getenv(name
);
10752 if (ptr
!= NULL
&& *ptr
) {
10755 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
10756 val
= strtoll(ptr
, &end
, 0);
10758 val
= strtol(ptr
, &end
, 0);
10761 case 'k': case 'K':
10765 case 'm': case 'M':
10769 case 'g': case 'G':
10770 unit
= 1024*1024*1024;
10774 while (*end
&& isspace((unsigned char)*end
)) end
++;
10776 if (RTEST(ruby_verbose
)) fprintf(stderr
, "invalid string for %s: %s\n", name
, ptr
);
10780 if (val
< -(ssize_t
)(SIZE_MAX
/ 2 / unit
) || (ssize_t
)(SIZE_MAX
/ 2 / unit
) < val
) {
10781 if (RTEST(ruby_verbose
)) fprintf(stderr
, "%s=%s is ignored because it overflows\n", name
, ptr
);
10786 if (val
> 0 && (size_t)val
> lower_bound
) {
10787 if (RTEST(ruby_verbose
)) {
10788 fprintf(stderr
, "%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
")\n", name
, val
, *default_value
);
10790 *default_value
= (size_t)val
;
10794 if (RTEST(ruby_verbose
)) {
10795 fprintf(stderr
, "%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
") is ignored because it must be greater than %"PRIuSIZE
".\n",
10796 name
, val
, *default_value
, lower_bound
);
10805 get_envparam_double(const char *name
, double *default_value
, double lower_bound
, double upper_bound
, int accept_zero
)
10807 const char *ptr
= getenv(name
);
10810 if (ptr
!= NULL
&& *ptr
) {
10812 val
= strtod(ptr
, &end
);
10813 if (!*ptr
|| *end
) {
10814 if (RTEST(ruby_verbose
)) fprintf(stderr
, "invalid string for %s: %s\n", name
, ptr
);
10818 if (accept_zero
&& val
== 0.0) {
10821 else if (val
<= lower_bound
) {
10822 if (RTEST(ruby_verbose
)) {
10823 fprintf(stderr
, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
10824 name
, val
, *default_value
, lower_bound
);
10827 else if (upper_bound
!= 0.0 && /* ignore upper_bound if it is 0.0 */
10828 val
> upper_bound
) {
10829 if (RTEST(ruby_verbose
)) {
10830 fprintf(stderr
, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
10831 name
, val
, *default_value
, upper_bound
);
10841 if (RTEST(ruby_verbose
)) fprintf(stderr
, "%s=%f (default value: %f)\n", name
, val
, *default_value
);
10842 *default_value
= val
;
10847 gc_set_initial_pages(void)
10850 rb_objspace_t
*objspace
= &rb_objspace
;
10854 min_pages
= gc_params
.heap_init_slots
/ HEAP_PAGE_OBJ_LIMIT
;
10856 size_t pages_per_class
= (min_pages
- heap_eden_total_pages(objspace
)) / SIZE_POOL_COUNT
;
10858 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
10859 rb_size_pool_t
*size_pool
= &size_pools
[i
];
10861 heap_add_pages(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
), pages_per_class
);
10864 heap_add_pages(objspace
, &size_pools
[0], SIZE_POOL_EDEN_HEAP(&size_pools
[0]), min_pages
- heap_eden_total_pages(objspace
));
10868 * GC tuning environment variables
10870 * * RUBY_GC_HEAP_INIT_SLOTS
10871 * - Initial allocation slots.
10872 * * RUBY_GC_HEAP_FREE_SLOTS
10873 * - Prepare at least this amount of slots after GC.
10874 * - Allocate slots if there are not enough slots.
10875 * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
10876 * - Allocate slots by this factor.
10877 * - (next slots number) = (current slots number) * (this factor)
10878 * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
10879 * - Allocation rate is limited to this number of slots.
10880 * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
10881 * - Allocate additional pages when the number of free slots is
10882 * lower than the value (total_slots * (this ratio)).
10883 * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
10884 * - Allocate slots to satisfy this formula:
10885 * free_slots = total_slots * goal_ratio
10886 * - In other words, prepare (total_slots * goal_ratio) free slots.
10887 * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
10888 * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
10889 * - Allow to free pages when the number of free slots is
10890 * greater than the value (total_slots * (this ratio)).
10891 * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
10892 * - Do full GC when the number of old objects is more than R * N
10893 * where R is this factor and
10894 * N is the number of old objects just after last full GC.
10897 * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
10898 * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
10900 * * RUBY_GC_MALLOC_LIMIT
10901 * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
10902 * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
10904 * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
10905 * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
10906 * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
10910 ruby_gc_set_params(void)
10912 /* RUBY_GC_HEAP_FREE_SLOTS */
10913 if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params
.heap_free_slots
, 0)) {
10917 /* RUBY_GC_HEAP_INIT_SLOTS */
10918 if (get_envparam_size("RUBY_GC_HEAP_INIT_SLOTS", &gc_params
.heap_init_slots
, 0)) {
10919 gc_set_initial_pages();
10922 get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params
.growth_factor
, 1.0, 0.0, FALSE
);
10923 get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params
.growth_max_slots
, 0);
10924 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params
.heap_free_slots_min_ratio
,
10926 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params
.heap_free_slots_max_ratio
,
10927 gc_params
.heap_free_slots_min_ratio
, 1.0, FALSE
);
10928 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params
.heap_free_slots_goal_ratio
,
10929 gc_params
.heap_free_slots_min_ratio
, gc_params
.heap_free_slots_max_ratio
, TRUE
);
10930 get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params
.oldobject_limit_factor
, 0.0, 0.0, TRUE
);
10932 get_envparam_size ("RUBY_GC_MALLOC_LIMIT", &gc_params
.malloc_limit_min
, 0);
10933 get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params
.malloc_limit_max
, 0);
10934 if (!gc_params
.malloc_limit_max
) { /* ignore max-check if 0 */
10935 gc_params
.malloc_limit_max
= SIZE_MAX
;
10937 get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params
.malloc_limit_growth_factor
, 1.0, 0.0, FALSE
);
10939 #if RGENGC_ESTIMATE_OLDMALLOC
10940 if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params
.oldmalloc_limit_min
, 0)) {
10941 rb_objspace_t
*objspace
= &rb_objspace
;
10942 objspace
->rgengc
.oldmalloc_increase_limit
= gc_params
.oldmalloc_limit_min
;
10944 get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params
.oldmalloc_limit_max
, 0);
10945 get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params
.oldmalloc_limit_growth_factor
, 1.0, 0.0, FALSE
);
10950 reachable_objects_from_callback(VALUE obj
)
10952 rb_ractor_t
*cr
= GET_RACTOR();
10953 cr
->mfd
->mark_func(obj
, cr
->mfd
->data
);
10957 rb_objspace_reachable_objects_from(VALUE obj
, void (func
)(VALUE
, void *), void *data
)
10959 rb_objspace_t
*objspace
= &rb_objspace
;
10961 if (during_gc
) rb_bug("rb_objspace_reachable_objects_from() is not supported while during_gc == true");
10963 if (is_markable_object(objspace
, obj
)) {
10964 rb_ractor_t
*cr
= GET_RACTOR();
10965 struct gc_mark_func_data_struct mfd
= {
10968 }, *prev_mfd
= cr
->mfd
;
10971 gc_mark_children(objspace
, obj
);
10972 cr
->mfd
= prev_mfd
;
10976 struct root_objects_data
{
10977 const char *category
;
10978 void (*func
)(const char *category
, VALUE
, void *);
10983 root_objects_from(VALUE obj
, void *ptr
)
10985 const struct root_objects_data
*data
= (struct root_objects_data
*)ptr
;
10986 (*data
->func
)(data
->category
, obj
, data
->data
);
10990 rb_objspace_reachable_objects_from_root(void (func
)(const char *category
, VALUE
, void *), void *passing_data
)
10992 rb_objspace_t
*objspace
= &rb_objspace
;
10993 objspace_reachable_objects_from_root(objspace
, func
, passing_data
);
10997 objspace_reachable_objects_from_root(rb_objspace_t
*objspace
, void (func
)(const char *category
, VALUE
, void *), void *passing_data
)
10999 if (during_gc
) rb_bug("objspace_reachable_objects_from_root() is not supported while during_gc == true");
11001 rb_ractor_t
*cr
= GET_RACTOR();
11002 struct root_objects_data data
= {
11004 .data
= passing_data
,
11006 struct gc_mark_func_data_struct mfd
= {
11007 .mark_func
= root_objects_from
,
11009 }, *prev_mfd
= cr
->mfd
;
11012 gc_mark_roots(objspace
, &data
.category
);
11013 cr
->mfd
= prev_mfd
;
11017 ------------------------ Extended allocator ------------------------
11020 struct gc_raise_tag
{
11027 gc_vraise(void *ptr
)
11029 struct gc_raise_tag
*argv
= ptr
;
11030 rb_vraise(argv
->exc
, argv
->fmt
, *argv
->ap
);
11031 UNREACHABLE_RETURN(NULL
);
11035 gc_raise(VALUE exc
, const char *fmt
, ...)
11039 struct gc_raise_tag argv
= {
11043 if (ruby_thread_has_gvl_p()) {
11047 else if (ruby_native_thread_p()) {
11048 rb_thread_call_with_gvl(gc_vraise
, &argv
);
11052 /* Not in a ruby thread */
11053 fprintf(stderr
, "%s", "[FATAL] ");
11054 vfprintf(stderr
, fmt
, ap
);
11061 static void objspace_xfree(rb_objspace_t
*objspace
, void *ptr
, size_t size
);
11064 negative_size_allocation_error(const char *msg
)
11066 gc_raise(rb_eNoMemError
, "%s", msg
);
11070 ruby_memerror_body(void *dummy
)
11076 NORETURN(static void ruby_memerror(void));
11077 RBIMPL_ATTR_MAYBE_UNUSED()
11079 ruby_memerror(void)
11081 if (ruby_thread_has_gvl_p()) {
11085 if (ruby_native_thread_p()) {
11086 rb_thread_call_with_gvl(ruby_memerror_body
, 0);
11089 /* no ruby thread */
11090 fprintf(stderr
, "[FATAL] failed to allocate memory\n");
11093 exit(EXIT_FAILURE
);
11099 rb_execution_context_t
*ec
= GET_EC();
11100 rb_objspace_t
*objspace
= rb_objspace_of(rb_ec_vm_ptr(ec
));
11104 // Print out pid, sleep, so you can attach debugger to see what went wrong:
11105 fprintf(stderr
, "rb_memerror pid=%"PRI_PIDT_PREFIX
"d\n", getpid());
11110 // TODO: OMG!! How to implement it?
11111 gc_exit(objspace
, gc_enter_event_rb_memerror
, NULL
);
11116 rb_ec_raised_p(ec
, RAISED_NOMEMORY
)) {
11117 fprintf(stderr
, "[FATAL] failed to allocate memory\n");
11118 exit(EXIT_FAILURE
);
11120 if (rb_ec_raised_p(ec
, RAISED_NOMEMORY
)) {
11121 rb_ec_raised_clear(ec
);
11124 rb_ec_raised_set(ec
, RAISED_NOMEMORY
);
11125 exc
= ruby_vm_special_exception_copy(exc
);
11128 EC_JUMP_TAG(ec
, TAG_RAISE
);
11132 rb_aligned_malloc(size_t alignment
, size_t size
)
11136 #if defined __MINGW32__
11137 res
= __mingw_aligned_malloc(size
, alignment
);
11138 #elif defined _WIN32
11139 void *_aligned_malloc(size_t, size_t);
11140 res
= _aligned_malloc(size
, alignment
);
11142 if (USE_MMAP_ALIGNED_ALLOC
) {
11143 GC_ASSERT(alignment
% sysconf(_SC_PAGE_SIZE
) == 0);
11145 char *ptr
= mmap(NULL
, alignment
+ size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
11146 if (ptr
== MAP_FAILED
) {
11150 char *aligned
= ptr
+ alignment
;
11151 aligned
-= ((VALUE
)aligned
& (alignment
- 1));
11152 GC_ASSERT(aligned
> ptr
);
11153 GC_ASSERT(aligned
<= ptr
+ alignment
);
11155 size_t start_out_of_range_size
= aligned
- ptr
;
11156 GC_ASSERT(start_out_of_range_size
% sysconf(_SC_PAGE_SIZE
) == 0);
11157 if (start_out_of_range_size
> 0) {
11158 if (munmap(ptr
, start_out_of_range_size
)) {
11159 rb_bug("rb_aligned_malloc: munmap failed for start");
11163 size_t end_out_of_range_size
= alignment
- start_out_of_range_size
;
11164 GC_ASSERT(end_out_of_range_size
% sysconf(_SC_PAGE_SIZE
) == 0);
11165 if (end_out_of_range_size
> 0) {
11166 if (munmap(aligned
+ size
, end_out_of_range_size
)) {
11167 rb_bug("rb_aligned_malloc: munmap failed for end");
11171 res
= (void *)aligned
;
11174 # if defined(HAVE_POSIX_MEMALIGN)
11175 if (posix_memalign(&res
, alignment
, size
) != 0) {
11178 # elif defined(HAVE_MEMALIGN)
11179 res
= memalign(alignment
, size
);
11182 res
= malloc(alignment
+ size
+ sizeof(void*));
11183 aligned
= (char*)res
+ alignment
+ sizeof(void*);
11184 aligned
-= ((VALUE
)aligned
& (alignment
- 1));
11185 ((void**)aligned
)[-1] = res
;
11186 res
= (void*)aligned
;
11191 /* alignment must be a power of 2 */
11192 GC_ASSERT(((alignment
- 1) & alignment
) == 0);
11193 GC_ASSERT(alignment
% sizeof(void*) == 0);
11198 rb_aligned_free(void *ptr
, size_t size
)
11200 #if defined __MINGW32__
11201 __mingw_aligned_free(ptr
);
11202 #elif defined _WIN32
11203 _aligned_free(ptr
);
11205 if (USE_MMAP_ALIGNED_ALLOC
) {
11206 GC_ASSERT(size
% sysconf(_SC_PAGE_SIZE
) == 0);
11207 if (munmap(ptr
, size
)) {
11208 rb_bug("rb_aligned_free: munmap failed");
11212 # if defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
11215 free(((void**)ptr
)[-1]);
11221 static inline size_t
11222 objspace_malloc_size(rb_objspace_t
*objspace
, void *ptr
, size_t hint
)
11224 #ifdef HAVE_MALLOC_USABLE_SIZE
11225 return malloc_usable_size(ptr
);
11232 MEMOP_TYPE_MALLOC
= 0,
11238 atomic_sub_nounderflow(size_t *var
, size_t sub
)
11240 if (sub
== 0) return;
11244 if (val
< sub
) sub
= val
;
11245 if (ATOMIC_SIZE_CAS(*var
, val
, val
-sub
) == val
) break;
11250 objspace_malloc_gc_stress(rb_objspace_t
*objspace
)
11252 if (ruby_gc_stressful
&& ruby_native_thread_p()) {
11253 unsigned int reason
= (GPR_FLAG_IMMEDIATE_MARK
| GPR_FLAG_IMMEDIATE_SWEEP
|
11254 GPR_FLAG_STRESS
| GPR_FLAG_MALLOC
);
11256 if (gc_stress_full_mark_after_malloc_p()) {
11257 reason
|= GPR_FLAG_FULL_MARK
;
11259 garbage_collect_with_gvl(objspace
, reason
);
11264 objspace_malloc_increase_report(rb_objspace_t
*objspace
, void *mem
, size_t new_size
, size_t old_size
, enum memop_type type
)
11266 if (0) fprintf(stderr
, "increase - ptr: %p, type: %s, new_size: %"PRIdSIZE
", old_size: %"PRIdSIZE
"\n",
11268 type
== MEMOP_TYPE_MALLOC
? "malloc" :
11269 type
== MEMOP_TYPE_FREE
? "free " :
11270 type
== MEMOP_TYPE_REALLOC
? "realloc": "error",
11271 new_size
, old_size
);
11276 objspace_malloc_increase_body(rb_objspace_t
*objspace
, void *mem
, size_t new_size
, size_t old_size
, enum memop_type type
)
11278 if (new_size
> old_size
) {
11279 ATOMIC_SIZE_ADD(malloc_increase
, new_size
- old_size
);
11280 #if RGENGC_ESTIMATE_OLDMALLOC
11281 ATOMIC_SIZE_ADD(objspace
->rgengc
.oldmalloc_increase
, new_size
- old_size
);
11285 atomic_sub_nounderflow(&malloc_increase
, old_size
- new_size
);
11286 #if RGENGC_ESTIMATE_OLDMALLOC
11287 atomic_sub_nounderflow(&objspace
->rgengc
.oldmalloc_increase
, old_size
- new_size
);
11291 if (type
== MEMOP_TYPE_MALLOC
) {
11293 if (malloc_increase
> malloc_limit
&& ruby_native_thread_p() && !dont_gc_val()) {
11294 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace
)) {
11295 gc_rest(objspace
); /* gc_rest can reduce malloc_increase */
11298 garbage_collect_with_gvl(objspace
, GPR_FLAG_MALLOC
);
11302 #if MALLOC_ALLOCATED_SIZE
11303 if (new_size
>= old_size
) {
11304 ATOMIC_SIZE_ADD(objspace
->malloc_params
.allocated_size
, new_size
- old_size
);
11307 size_t dec_size
= old_size
- new_size
;
11308 size_t allocated_size
= objspace
->malloc_params
.allocated_size
;
11310 #if MALLOC_ALLOCATED_SIZE_CHECK
11311 if (allocated_size
< dec_size
) {
11312 rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
11315 atomic_sub_nounderflow(&objspace
->malloc_params
.allocated_size
, dec_size
);
11319 case MEMOP_TYPE_MALLOC
:
11320 ATOMIC_SIZE_INC(objspace
->malloc_params
.allocations
);
11322 case MEMOP_TYPE_FREE
:
11324 size_t allocations
= objspace
->malloc_params
.allocations
;
11325 if (allocations
> 0) {
11326 atomic_sub_nounderflow(&objspace
->malloc_params
.allocations
, 1);
11328 #if MALLOC_ALLOCATED_SIZE_CHECK
11330 GC_ASSERT(objspace
->malloc_params
.allocations
> 0);
11335 case MEMOP_TYPE_REALLOC
: /* ignore */ break;
11341 #define objspace_malloc_increase(...) \
11342 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
11343 !malloc_increase_done; \
11344 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
11346 struct malloc_obj_info
{ /* 4 words */
11348 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11355 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11356 const char *ruby_malloc_info_file
;
11357 int ruby_malloc_info_line
;
11360 static inline size_t
11361 objspace_malloc_prepare(rb_objspace_t
*objspace
, size_t size
)
11363 if (size
== 0) size
= 1;
11365 #if CALC_EXACT_MALLOC_SIZE
11366 size
+= sizeof(struct malloc_obj_info
);
11372 static inline void *
11373 objspace_malloc_fixup(rb_objspace_t
*objspace
, void *mem
, size_t size
)
11375 size
= objspace_malloc_size(objspace
, mem
, size
);
11376 objspace_malloc_increase(objspace
, mem
, size
, 0, MEMOP_TYPE_MALLOC
);
11378 #if CALC_EXACT_MALLOC_SIZE
11380 struct malloc_obj_info
*info
= mem
;
11382 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11383 info
->gen
= objspace
->profile
.count
;
11384 info
->file
= ruby_malloc_info_file
;
11385 info
->line
= info
->file
? ruby_malloc_info_line
: 0;
11394 #if defined(__GNUC__) && RUBY_DEBUG
11395 #define RB_BUG_INSTEAD_OF_RB_MEMERROR
11398 #ifdef RB_BUG_INSTEAD_OF_RB_MEMERROR
11399 #define TRY_WITH_GC(siz, expr) do { \
11400 const gc_profile_record_flag gpr = \
11401 GPR_FLAG_FULL_MARK | \
11402 GPR_FLAG_IMMEDIATE_MARK | \
11403 GPR_FLAG_IMMEDIATE_SWEEP | \
11405 objspace_malloc_gc_stress(objspace); \
11407 if (LIKELY((expr))) { \
11408 /* Success on 1st try */ \
11410 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
11411 /* @shyouhei thinks this doesn't happen */ \
11412 rb_bug("TRY_WITH_GC: could not GC"); \
11414 else if ((expr)) { \
11415 /* Success on 2nd try */ \
11418 rb_bug("TRY_WITH_GC: could not allocate:" \
11419 "%"PRIdSIZE" bytes for %s", \
11424 #define TRY_WITH_GC(siz, alloc) do { \
11425 objspace_malloc_gc_stress(objspace); \
11427 (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
11428 GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
11429 GPR_FLAG_MALLOC) || \
11436 /* these shouldn't be called directly.
11437 * objspace_* functions do not check allocation size.
11440 objspace_xmalloc0(rb_objspace_t
*objspace
, size_t size
)
11444 size
= objspace_malloc_prepare(objspace
, size
);
11445 TRY_WITH_GC(size
, mem
= malloc(size
));
11446 RB_DEBUG_COUNTER_INC(heap_xmalloc
);
11447 return objspace_malloc_fixup(objspace
, mem
, size
);
11450 static inline size_t
11451 xmalloc2_size(const size_t count
, const size_t elsize
)
11453 return size_mul_or_raise(count
, elsize
, rb_eArgError
);
11457 objspace_xrealloc(rb_objspace_t
*objspace
, void *ptr
, size_t new_size
, size_t old_size
)
11461 if (!ptr
) return objspace_xmalloc0(objspace
, new_size
);
11464 * The behavior of realloc(ptr, 0) is implementation defined.
11465 * Therefore we don't use realloc(ptr, 0) for portability reason.
11466 * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
11468 if (new_size
== 0) {
11469 if ((mem
= objspace_xmalloc0(objspace
, 0)) != NULL
) {
11471 * - OpenBSD's malloc(3) man page says that when 0 is passed, it
11472 * returns a non-NULL pointer to an access-protected memory page.
11473 * The returned pointer cannot be read / written at all, but
11474 * still be a valid argument of free().
11476 * https://man.openbsd.org/malloc.3
11478 * - Linux's malloc(3) man page says that it _might_ perhaps return
11479 * a non-NULL pointer when its argument is 0. That return value
11480 * is safe (and is expected) to be passed to free().
11482 * http://man7.org/linux/man-pages/man3/malloc.3.html
11484 * - As I read the implementation jemalloc's malloc() returns fully
11485 * normal 16 bytes memory region when its argument is 0.
11487 * - As I read the implementation musl libc's malloc() returns
11488 * fully normal 32 bytes memory region when its argument is 0.
11490 * - Other malloc implementations can also return non-NULL.
11492 objspace_xfree(objspace
, ptr
, old_size
);
11497 * It is dangerous to return NULL here, because that could lead to
11498 * RCE. Fallback to 1 byte instead of zero.
11500 * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
11506 #if CALC_EXACT_MALLOC_SIZE
11508 struct malloc_obj_info
*info
= (struct malloc_obj_info
*)ptr
- 1;
11509 new_size
+= sizeof(struct malloc_obj_info
);
11511 old_size
= info
->size
;
11515 old_size
= objspace_malloc_size(objspace
, ptr
, old_size
);
11516 TRY_WITH_GC(new_size
, mem
= realloc(ptr
, new_size
));
11517 new_size
= objspace_malloc_size(objspace
, mem
, new_size
);
11519 #if CALC_EXACT_MALLOC_SIZE
11521 struct malloc_obj_info
*info
= mem
;
11522 info
->size
= new_size
;
11527 objspace_malloc_increase(objspace
, mem
, new_size
, old_size
, MEMOP_TYPE_REALLOC
);
11529 RB_DEBUG_COUNTER_INC(heap_xrealloc
);
11533 #if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
11535 #define MALLOC_INFO_GEN_SIZE 100
11536 #define MALLOC_INFO_SIZE_SIZE 10
11537 static size_t malloc_info_gen_cnt
[MALLOC_INFO_GEN_SIZE
];
11538 static size_t malloc_info_gen_size
[MALLOC_INFO_GEN_SIZE
];
11539 static size_t malloc_info_size
[MALLOC_INFO_SIZE_SIZE
+1];
11540 static st_table
*malloc_info_file_table
;
11543 mmalloc_info_file_i(st_data_t key
, st_data_t val
, st_data_t dmy
)
11545 const char *file
= (void *)key
;
11546 const size_t *data
= (void *)val
;
11548 fprintf(stderr
, "%s\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", file
, data
[0], data
[1]);
11550 return ST_CONTINUE
;
11553 __attribute__((destructor
))
11555 rb_malloc_info_show_results(void)
11559 fprintf(stderr
, "* malloc_info gen statistics\n");
11560 for (i
=0; i
<MALLOC_INFO_GEN_SIZE
; i
++) {
11561 if (i
== MALLOC_INFO_GEN_SIZE
-1) {
11562 fprintf(stderr
, "more\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", malloc_info_gen_cnt
[i
], malloc_info_gen_size
[i
]);
11565 fprintf(stderr
, "%d\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", i
, malloc_info_gen_cnt
[i
], malloc_info_gen_size
[i
]);
11569 fprintf(stderr
, "* malloc_info size statistics\n");
11570 for (i
=0; i
<MALLOC_INFO_SIZE_SIZE
; i
++) {
11572 fprintf(stderr
, "%d\t%"PRIdSIZE
"\n", s
, malloc_info_size
[i
]);
11574 fprintf(stderr
, "more\t%"PRIdSIZE
"\n", malloc_info_size
[i
]);
11576 if (malloc_info_file_table
) {
11577 fprintf(stderr
, "* malloc_info file statistics\n");
11578 st_foreach(malloc_info_file_table
, mmalloc_info_file_i
, 0);
11583 rb_malloc_info_show_results(void)
11589 objspace_xfree(rb_objspace_t
*objspace
, void *ptr
, size_t old_size
)
11593 * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
11594 * its first version. We would better follow.
11598 #if CALC_EXACT_MALLOC_SIZE
11599 struct malloc_obj_info
*info
= (struct malloc_obj_info
*)ptr
- 1;
11601 old_size
= info
->size
;
11603 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11605 int gen
= (int)(objspace
->profile
.count
- info
->gen
);
11606 int gen_index
= gen
>= MALLOC_INFO_GEN_SIZE
? MALLOC_INFO_GEN_SIZE
-1 : gen
;
11609 malloc_info_gen_cnt
[gen_index
]++;
11610 malloc_info_gen_size
[gen_index
] += info
->size
;
11612 for (i
=0; i
<MALLOC_INFO_SIZE_SIZE
; i
++) {
11613 size_t s
= 16 << i
;
11614 if (info
->size
<= s
) {
11615 malloc_info_size
[i
]++;
11619 malloc_info_size
[i
]++;
11623 st_data_t key
= (st_data_t
)info
->file
, d
;
11626 if (malloc_info_file_table
== NULL
) {
11627 malloc_info_file_table
= st_init_numtable_with_size(1024);
11629 if (st_lookup(malloc_info_file_table
, key
, &d
)) {
11631 data
= (size_t *)d
;
11634 data
= malloc(xmalloc2_size(2, sizeof(size_t)));
11635 if (data
== NULL
) rb_bug("objspace_xfree: can not allocate memory");
11636 data
[0] = data
[1] = 0;
11637 st_insert(malloc_info_file_table
, key
, (st_data_t
)data
);
11640 data
[1] += info
->size
;
11642 if (0 && gen
>= 2) { /* verbose output */
11644 fprintf(stderr
, "free - size:%"PRIdSIZE
", gen:%d, pos: %s:%"PRIdSIZE
"\n",
11645 info
->size
, gen
, info
->file
, info
->line
);
11648 fprintf(stderr
, "free - size:%"PRIdSIZE
", gen:%d\n",
11655 old_size
= objspace_malloc_size(objspace
, ptr
, old_size
);
11657 objspace_malloc_increase(objspace
, ptr
, 0, old_size
, MEMOP_TYPE_FREE
) {
11659 RB_DEBUG_COUNTER_INC(heap_xfree
);
11664 ruby_xmalloc0(size_t size
)
11666 return objspace_xmalloc0(&rb_objspace
, size
);
11670 ruby_xmalloc_body(size_t size
)
11672 if ((ssize_t
)size
< 0) {
11673 negative_size_allocation_error("too large allocation size");
11675 return ruby_xmalloc0(size
);
11679 ruby_malloc_size_overflow(size_t count
, size_t elsize
)
11681 rb_raise(rb_eArgError
,
11682 "malloc: possible integer overflow (%"PRIuSIZE
"*%"PRIuSIZE
")",
11687 ruby_xmalloc2_body(size_t n
, size_t size
)
11689 return objspace_xmalloc0(&rb_objspace
, xmalloc2_size(n
, size
));
11693 objspace_xcalloc(rb_objspace_t
*objspace
, size_t size
)
11697 size
= objspace_malloc_prepare(objspace
, size
);
11698 TRY_WITH_GC(size
, mem
= calloc1(size
));
11699 return objspace_malloc_fixup(objspace
, mem
, size
);
11703 ruby_xcalloc_body(size_t n
, size_t size
)
11705 return objspace_xcalloc(&rb_objspace
, xmalloc2_size(n
, size
));
11708 #ifdef ruby_sized_xrealloc
11709 #undef ruby_sized_xrealloc
11712 ruby_sized_xrealloc(void *ptr
, size_t new_size
, size_t old_size
)
11714 if ((ssize_t
)new_size
< 0) {
11715 negative_size_allocation_error("too large allocation size");
11718 return objspace_xrealloc(&rb_objspace
, ptr
, new_size
, old_size
);
11722 ruby_xrealloc_body(void *ptr
, size_t new_size
)
11724 return ruby_sized_xrealloc(ptr
, new_size
, 0);
11727 #ifdef ruby_sized_xrealloc2
11728 #undef ruby_sized_xrealloc2
11731 ruby_sized_xrealloc2(void *ptr
, size_t n
, size_t size
, size_t old_n
)
11733 size_t len
= xmalloc2_size(n
, size
);
11734 return objspace_xrealloc(&rb_objspace
, ptr
, len
, old_n
* size
);
11738 ruby_xrealloc2_body(void *ptr
, size_t n
, size_t size
)
11740 return ruby_sized_xrealloc2(ptr
, n
, size
, 0);
11743 #ifdef ruby_sized_xfree
11744 #undef ruby_sized_xfree
11747 ruby_sized_xfree(void *x
, size_t size
)
11750 objspace_xfree(&rb_objspace
, x
, size
);
11755 ruby_xfree(void *x
)
11757 ruby_sized_xfree(x
, 0);
11761 rb_xmalloc_mul_add(size_t x
, size_t y
, size_t z
) /* x * y + z */
11763 size_t w
= size_mul_add_or_raise(x
, y
, z
, rb_eArgError
);
11764 return ruby_xmalloc(w
);
11768 rb_xrealloc_mul_add(const void *p
, size_t x
, size_t y
, size_t z
) /* x * y + z */
11770 size_t w
= size_mul_add_or_raise(x
, y
, z
, rb_eArgError
);
11771 return ruby_xrealloc((void *)p
, w
);
11775 rb_xmalloc_mul_add_mul(size_t x
, size_t y
, size_t z
, size_t w
) /* x * y + z * w */
11777 size_t u
= size_mul_add_mul_or_raise(x
, y
, z
, w
, rb_eArgError
);
11778 return ruby_xmalloc(u
);
11782 rb_xcalloc_mul_add_mul(size_t x
, size_t y
, size_t z
, size_t w
) /* x * y + z * w */
11784 size_t u
= size_mul_add_mul_or_raise(x
, y
, z
, w
, rb_eArgError
);
11785 return ruby_xcalloc(u
, 1);
11788 /* Mimic ruby_xmalloc, but need not rb_objspace.
11789 * should return pointer suitable for ruby_xfree
11792 ruby_mimmalloc(size_t size
)
11795 #if CALC_EXACT_MALLOC_SIZE
11796 size
+= sizeof(struct malloc_obj_info
);
11798 mem
= malloc(size
);
11799 #if CALC_EXACT_MALLOC_SIZE
11804 /* set 0 for consistency of allocated_size/allocations */
11806 struct malloc_obj_info
*info
= mem
;
11808 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11820 ruby_mimfree(void *ptr
)
11822 #if CALC_EXACT_MALLOC_SIZE
11823 struct malloc_obj_info
*info
= (struct malloc_obj_info
*)ptr
- 1;
11830 rb_alloc_tmp_buffer_with_count(volatile VALUE
*store
, size_t size
, size_t cnt
)
11834 rb_imemo_tmpbuf_t
*tmpbuf
;
11836 /* Keep the order; allocate an empty imemo first then xmalloc, to
11837 * get rid of potential memory leak */
11838 imemo
= rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL
, 0);
11840 ptr
= ruby_xmalloc0(size
);
11841 tmpbuf
= (rb_imemo_tmpbuf_t
*)imemo
;
11848 rb_alloc_tmp_buffer(volatile VALUE
*store
, long len
)
11852 if (len
< 0 || (cnt
= (long)roomof(len
, sizeof(VALUE
))) < 0) {
11853 rb_raise(rb_eArgError
, "negative buffer size (or size too big)");
11856 return rb_alloc_tmp_buffer_with_count(store
, len
, cnt
);
11860 rb_free_tmp_buffer(volatile VALUE
*store
)
11862 rb_imemo_tmpbuf_t
*s
= (rb_imemo_tmpbuf_t
*)ATOMIC_VALUE_EXCHANGE(*store
, 0);
11864 void *ptr
= ATOMIC_PTR_EXCHANGE(s
->ptr
, 0);
11870 #if MALLOC_ALLOCATED_SIZE
11873 * GC.malloc_allocated_size -> Integer
11875 * Returns the size of memory allocated by malloc().
11877 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
11881 gc_malloc_allocated_size(VALUE self
)
11883 return UINT2NUM(rb_objspace
.malloc_params
.allocated_size
);
11888 * GC.malloc_allocations -> Integer
11890 * Returns the number of malloc() allocations.
11892 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
11896 gc_malloc_allocations(VALUE self
)
11898 return UINT2NUM(rb_objspace
.malloc_params
.allocations
);
11903 rb_gc_adjust_memory_usage(ssize_t diff
)
11905 rb_objspace_t
*objspace
= &rb_objspace
;
11907 objspace_malloc_increase(objspace
, 0, diff
, 0, MEMOP_TYPE_REALLOC
);
11909 else if (diff
< 0) {
11910 objspace_malloc_increase(objspace
, 0, 0, -diff
, MEMOP_TYPE_REALLOC
);
11915 ------------------------------ WeakMap ------------------------------
11919 st_table
*obj2wmap
; /* obj -> [ref,...] */
11920 st_table
*wmap2obj
; /* ref -> obj */
11924 #define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
11926 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
11928 wmap_mark_map(st_data_t key
, st_data_t val
, st_data_t arg
)
11930 rb_objspace_t
*objspace
= (rb_objspace_t
*)arg
;
11931 VALUE obj
= (VALUE
)val
;
11932 if (!is_live_object(objspace
, obj
)) return ST_DELETE
;
11933 return ST_CONTINUE
;
11938 wmap_compact(void *ptr
)
11940 struct weakmap
*w
= ptr
;
11941 if (w
->wmap2obj
) rb_gc_update_tbl_refs(w
->wmap2obj
);
11942 if (w
->obj2wmap
) rb_gc_update_tbl_refs(w
->obj2wmap
);
11943 w
->final
= rb_gc_location(w
->final
);
11947 wmap_mark(void *ptr
)
11949 struct weakmap
*w
= ptr
;
11950 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
11951 if (w
->obj2wmap
) st_foreach(w
->obj2wmap
, wmap_mark_map
, (st_data_t
)&rb_objspace
);
11953 rb_gc_mark_movable(w
->final
);
11957 wmap_free_map(st_data_t key
, st_data_t val
, st_data_t arg
)
11959 VALUE
*ptr
= (VALUE
*)val
;
11960 ruby_sized_xfree(ptr
, (ptr
[0] + 1) * sizeof(VALUE
));
11961 return ST_CONTINUE
;
11965 wmap_free(void *ptr
)
11967 struct weakmap
*w
= ptr
;
11968 st_foreach(w
->obj2wmap
, wmap_free_map
, 0);
11969 st_free_table(w
->obj2wmap
);
11970 st_free_table(w
->wmap2obj
);
11974 wmap_memsize_map(st_data_t key
, st_data_t val
, st_data_t arg
)
11976 VALUE
*ptr
= (VALUE
*)val
;
11977 *(size_t *)arg
+= (ptr
[0] + 1) * sizeof(VALUE
);
11978 return ST_CONTINUE
;
11982 wmap_memsize(const void *ptr
)
11985 const struct weakmap
*w
= ptr
;
11987 size
+= st_memsize(w
->obj2wmap
);
11988 size
+= st_memsize(w
->wmap2obj
);
11989 st_foreach(w
->obj2wmap
, wmap_memsize_map
, (st_data_t
)&size
);
11993 static const rb_data_type_t weakmap_type
= {
12001 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
12004 static VALUE
wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid
, self
));
12007 wmap_allocate(VALUE klass
)
12010 VALUE obj
= TypedData_Make_Struct(klass
, struct weakmap
, &weakmap_type
, w
);
12011 w
->obj2wmap
= rb_init_identtable();
12012 w
->wmap2obj
= rb_init_identtable();
12013 w
->final
= rb_func_lambda_new(wmap_finalize
, obj
, 1, 1);
12018 wmap_live_p(rb_objspace_t
*objspace
, VALUE obj
)
12020 if (SPECIAL_CONST_P(obj
)) return TRUE
;
12021 if (is_pointer_to_heap(objspace
, (void *)obj
)) {
12022 void *poisoned
= asan_unpoison_object_temporary(obj
);
12024 enum ruby_value_type t
= BUILTIN_TYPE(obj
);
12025 int ret
= (!(t
== T_NONE
|| t
>= T_FIXNUM
|| t
== T_ICLASS
) &&
12026 is_live_object(objspace
, obj
));
12029 asan_poison_object(obj
);
12038 wmap_final_func(st_data_t
*key
, st_data_t
*value
, st_data_t arg
, int existing
)
12040 VALUE wmap
, *ptr
, size
, i
, j
;
12041 if (!existing
) return ST_STOP
;
12042 wmap
= (VALUE
)arg
, ptr
= (VALUE
*)*value
;
12043 for (i
= j
= 1, size
= ptr
[0]; i
<= size
; ++i
) {
12044 if (ptr
[i
] != wmap
) {
12049 ruby_sized_xfree(ptr
, i
* sizeof(VALUE
));
12053 SIZED_REALLOC_N(ptr
, VALUE
, j
+ 1, i
);
12055 *value
= (st_data_t
)ptr
;
12057 return ST_CONTINUE
;
12062 wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid
, self
))
12064 st_data_t orig
, wmap
, data
;
12065 VALUE obj
, *rids
, i
, size
;
12068 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12069 /* Get reference from object id. */
12070 if ((obj
= id2ref_obj_tbl(&rb_objspace
, objid
)) == Qundef
) {
12071 rb_bug("wmap_finalize: objid is not found.");
12074 /* obj is original referenced object and/or weak reference. */
12075 orig
= (st_data_t
)obj
;
12076 if (st_delete(w
->obj2wmap
, &orig
, &data
)) {
12077 rids
= (VALUE
*)data
;
12079 for (i
= 0; i
< size
; ++i
) {
12080 wmap
= (st_data_t
)rids
[i
];
12081 st_delete(w
->wmap2obj
, &wmap
, NULL
);
12083 ruby_sized_xfree((VALUE
*)data
, (size
+ 1) * sizeof(VALUE
));
12086 wmap
= (st_data_t
)obj
;
12087 if (st_delete(w
->wmap2obj
, &wmap
, &orig
)) {
12088 wmap
= (st_data_t
)obj
;
12089 st_update(w
->obj2wmap
, orig
, wmap_final_func
, wmap
);
12094 struct wmap_iter_arg
{
12095 rb_objspace_t
*objspace
;
12100 wmap_inspect_append(rb_objspace_t
*objspace
, VALUE str
, VALUE obj
)
12102 if (SPECIAL_CONST_P(obj
)) {
12103 return rb_str_append(str
, rb_inspect(obj
));
12105 else if (wmap_live_p(objspace
, obj
)) {
12106 return rb_str_append(str
, rb_any_to_s(obj
));
12109 return rb_str_catf(str
, "#<collected:%p>", (void*)obj
);
12114 wmap_inspect_i(st_data_t key
, st_data_t val
, st_data_t arg
)
12116 struct wmap_iter_arg
*argp
= (struct wmap_iter_arg
*)arg
;
12117 rb_objspace_t
*objspace
= argp
->objspace
;
12118 VALUE str
= argp
->value
;
12119 VALUE k
= (VALUE
)key
, v
= (VALUE
)val
;
12121 if (RSTRING_PTR(str
)[0] == '#') {
12122 rb_str_cat2(str
, ", ");
12125 rb_str_cat2(str
, ": ");
12126 RSTRING_PTR(str
)[0] = '#';
12128 wmap_inspect_append(objspace
, str
, k
);
12129 rb_str_cat2(str
, " => ");
12130 wmap_inspect_append(objspace
, str
, v
);
12132 return ST_CONTINUE
;
12136 wmap_inspect(VALUE self
)
12139 VALUE c
= rb_class_name(CLASS_OF(self
));
12141 struct wmap_iter_arg args
;
12143 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12144 str
= rb_sprintf("-<%"PRIsVALUE
":%p", c
, (void *)self
);
12146 args
.objspace
= &rb_objspace
;
12148 st_foreach(w
->wmap2obj
, wmap_inspect_i
, (st_data_t
)&args
);
12150 RSTRING_PTR(str
)[0] = '#';
12151 rb_str_cat2(str
, ">");
12156 wmap_each_i(st_data_t key
, st_data_t val
, st_data_t arg
)
12158 rb_objspace_t
*objspace
= (rb_objspace_t
*)arg
;
12159 VALUE obj
= (VALUE
)val
;
12160 if (wmap_live_p(objspace
, obj
)) {
12161 rb_yield_values(2, (VALUE
)key
, obj
);
12163 return ST_CONTINUE
;
12166 /* Iterates over keys and objects in a weakly referenced object */
12168 wmap_each(VALUE self
)
12171 rb_objspace_t
*objspace
= &rb_objspace
;
12173 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12174 st_foreach(w
->wmap2obj
, wmap_each_i
, (st_data_t
)objspace
);
12179 wmap_each_key_i(st_data_t key
, st_data_t val
, st_data_t arg
)
12181 rb_objspace_t
*objspace
= (rb_objspace_t
*)arg
;
12182 VALUE obj
= (VALUE
)val
;
12183 if (wmap_live_p(objspace
, obj
)) {
12184 rb_yield((VALUE
)key
);
12186 return ST_CONTINUE
;
12189 /* Iterates over keys and objects in a weakly referenced object */
12191 wmap_each_key(VALUE self
)
12194 rb_objspace_t
*objspace
= &rb_objspace
;
12196 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12197 st_foreach(w
->wmap2obj
, wmap_each_key_i
, (st_data_t
)objspace
);
12202 wmap_each_value_i(st_data_t key
, st_data_t val
, st_data_t arg
)
12204 rb_objspace_t
*objspace
= (rb_objspace_t
*)arg
;
12205 VALUE obj
= (VALUE
)val
;
12206 if (wmap_live_p(objspace
, obj
)) {
12209 return ST_CONTINUE
;
12212 /* Iterates over keys and objects in a weakly referenced object */
12214 wmap_each_value(VALUE self
)
12217 rb_objspace_t
*objspace
= &rb_objspace
;
12219 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12220 st_foreach(w
->wmap2obj
, wmap_each_value_i
, (st_data_t
)objspace
);
12225 wmap_keys_i(st_data_t key
, st_data_t val
, st_data_t arg
)
12227 struct wmap_iter_arg
*argp
= (struct wmap_iter_arg
*)arg
;
12228 rb_objspace_t
*objspace
= argp
->objspace
;
12229 VALUE ary
= argp
->value
;
12230 VALUE obj
= (VALUE
)val
;
12231 if (wmap_live_p(objspace
, obj
)) {
12232 rb_ary_push(ary
, (VALUE
)key
);
12234 return ST_CONTINUE
;
12237 /* Iterates over keys and objects in a weakly referenced object */
12239 wmap_keys(VALUE self
)
12242 struct wmap_iter_arg args
;
12244 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12245 args
.objspace
= &rb_objspace
;
12246 args
.value
= rb_ary_new();
12247 st_foreach(w
->wmap2obj
, wmap_keys_i
, (st_data_t
)&args
);
12252 wmap_values_i(st_data_t key
, st_data_t val
, st_data_t arg
)
12254 struct wmap_iter_arg
*argp
= (struct wmap_iter_arg
*)arg
;
12255 rb_objspace_t
*objspace
= argp
->objspace
;
12256 VALUE ary
= argp
->value
;
12257 VALUE obj
= (VALUE
)val
;
12258 if (wmap_live_p(objspace
, obj
)) {
12259 rb_ary_push(ary
, obj
);
12261 return ST_CONTINUE
;
12264 /* Iterates over values and objects in a weakly referenced object */
12266 wmap_values(VALUE self
)
12269 struct wmap_iter_arg args
;
12271 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12272 args
.objspace
= &rb_objspace
;
12273 args
.value
= rb_ary_new();
12274 st_foreach(w
->wmap2obj
, wmap_values_i
, (st_data_t
)&args
);
12279 wmap_aset_update(st_data_t
*key
, st_data_t
*val
, st_data_t arg
, int existing
)
12281 VALUE size
, *ptr
, *optr
;
12283 size
= (ptr
= optr
= (VALUE
*)*val
)[0];
12285 SIZED_REALLOC_N(ptr
, VALUE
, size
+ 1, size
);
12290 ptr
= ruby_xmalloc0(2 * sizeof(VALUE
));
12293 ptr
[size
] = (VALUE
)arg
;
12294 if (ptr
== optr
) return ST_STOP
;
12295 *val
= (st_data_t
)ptr
;
12296 return ST_CONTINUE
;
12299 /* Creates a weak reference from the given key to the given value */
12301 wmap_aset(VALUE self
, VALUE key
, VALUE value
)
12305 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12306 if (FL_ABLE(value
)) {
12307 define_final0(value
, w
->final
);
12309 if (FL_ABLE(key
)) {
12310 define_final0(key
, w
->final
);
12313 st_update(w
->obj2wmap
, (st_data_t
)value
, wmap_aset_update
, key
);
12314 st_insert(w
->wmap2obj
, (st_data_t
)key
, (st_data_t
)value
);
12315 return nonspecial_obj_id(value
);
12318 /* Retrieves a weakly referenced object with the given key */
12320 wmap_lookup(VALUE self
, VALUE key
)
12325 rb_objspace_t
*objspace
= &rb_objspace
;
12327 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12328 if (!st_lookup(w
->wmap2obj
, (st_data_t
)key
, &data
)) return Qundef
;
12330 if (!wmap_live_p(objspace
, obj
)) return Qundef
;
12334 /* Retrieves a weakly referenced object with the given key */
12336 wmap_aref(VALUE self
, VALUE key
)
12338 VALUE obj
= wmap_lookup(self
, key
);
12339 return obj
!= Qundef
? obj
: Qnil
;
12342 /* Returns +true+ if +key+ is registered */
12344 wmap_has_key(VALUE self
, VALUE key
)
12346 return RBOOL(wmap_lookup(self
, key
) != Qundef
);
12349 /* Returns the number of referenced objects */
12351 wmap_size(VALUE self
)
12356 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12357 n
= w
->wmap2obj
->num_entries
;
12358 #if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
12359 return ULONG2NUM(n
);
12366 ------------------------------ GC profiler ------------------------------
12369 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
12372 current_process_time(struct timespec
*ts
)
12374 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
12376 static int try_clock_gettime
= 1;
12377 if (try_clock_gettime
&& clock_gettime(CLOCK_PROCESS_CPUTIME_ID
, ts
) == 0) {
12381 try_clock_gettime
= 0;
12388 struct rusage usage
;
12389 struct timeval time
;
12390 if (getrusage(RUSAGE_SELF
, &usage
) == 0) {
12391 time
= usage
.ru_utime
;
12392 ts
->tv_sec
= time
.tv_sec
;
12393 ts
->tv_nsec
= (int32_t)time
.tv_usec
* 1000;
12401 FILETIME creation_time
, exit_time
, kernel_time
, user_time
;
12404 if (GetProcessTimes(GetCurrentProcess(),
12405 &creation_time
, &exit_time
, &kernel_time
, &user_time
) != 0) {
12406 memcpy(&ui
, &user_time
, sizeof(FILETIME
));
12407 #define PER100NSEC (uint64_t)(1000 * 1000 * 10)
12408 ts
->tv_nsec
= (long)(ui
.QuadPart
% PER100NSEC
);
12409 ts
->tv_sec
= (time_t)(ui
.QuadPart
/ PER100NSEC
);
12419 getrusage_time(void)
12421 struct timespec ts
;
12422 if (current_process_time(&ts
)) {
12423 return ts
.tv_sec
+ ts
.tv_nsec
* 1e-9;
12432 gc_prof_setup_new_record(rb_objspace_t
*objspace
, unsigned int reason
)
12434 if (objspace
->profile
.run
) {
12435 size_t index
= objspace
->profile
.next_index
;
12436 gc_profile_record
*record
;
12438 /* create new record */
12439 objspace
->profile
.next_index
++;
12441 if (!objspace
->profile
.records
) {
12442 objspace
->profile
.size
= GC_PROFILE_RECORD_DEFAULT_SIZE
;
12443 objspace
->profile
.records
= malloc(xmalloc2_size(sizeof(gc_profile_record
), objspace
->profile
.size
));
12445 if (index
>= objspace
->profile
.size
) {
12447 objspace
->profile
.size
+= 1000;
12448 ptr
= realloc(objspace
->profile
.records
, xmalloc2_size(sizeof(gc_profile_record
), objspace
->profile
.size
));
12449 if (!ptr
) rb_memerror();
12450 objspace
->profile
.records
= ptr
;
12452 if (!objspace
->profile
.records
) {
12453 rb_bug("gc_profile malloc or realloc miss");
12455 record
= objspace
->profile
.current_record
= &objspace
->profile
.records
[objspace
->profile
.next_index
- 1];
12456 MEMZERO(record
, gc_profile_record
, 1);
12458 /* setup before-GC parameter */
12459 record
->flags
= reason
| (ruby_gc_stressful
? GPR_FLAG_STRESS
: 0);
12460 #if MALLOC_ALLOCATED_SIZE
12461 record
->allocated_size
= malloc_allocated_size
;
12463 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
12466 struct rusage usage
;
12467 if (getrusage(RUSAGE_SELF
, &usage
) == 0) {
12468 record
->maxrss
= usage
.ru_maxrss
;
12469 record
->minflt
= usage
.ru_minflt
;
12470 record
->majflt
= usage
.ru_majflt
;
12479 gc_prof_timer_start(rb_objspace_t
*objspace
)
12481 if (gc_prof_enabled(objspace
)) {
12482 gc_profile_record
*record
= gc_prof_record(objspace
);
12483 #if GC_PROFILE_MORE_DETAIL
12484 record
->prepare_time
= objspace
->profile
.prepare_time
;
12486 record
->gc_time
= 0;
12487 record
->gc_invoke_time
= getrusage_time();
12492 elapsed_time_from(double time
)
12494 double now
= getrusage_time();
12504 gc_prof_timer_stop(rb_objspace_t
*objspace
)
12506 if (gc_prof_enabled(objspace
)) {
12507 gc_profile_record
*record
= gc_prof_record(objspace
);
12508 record
->gc_time
= elapsed_time_from(record
->gc_invoke_time
);
12509 record
->gc_invoke_time
-= objspace
->profile
.invoke_time
;
12513 #define RUBY_DTRACE_GC_HOOK(name) \
12514 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
12516 gc_prof_mark_timer_start(rb_objspace_t
*objspace
)
12518 RUBY_DTRACE_GC_HOOK(MARK_BEGIN
);
12519 #if GC_PROFILE_MORE_DETAIL
12520 if (gc_prof_enabled(objspace
)) {
12521 gc_prof_record(objspace
)->gc_mark_time
= getrusage_time();
12527 gc_prof_mark_timer_stop(rb_objspace_t
*objspace
)
12529 RUBY_DTRACE_GC_HOOK(MARK_END
);
12530 #if GC_PROFILE_MORE_DETAIL
12531 if (gc_prof_enabled(objspace
)) {
12532 gc_profile_record
*record
= gc_prof_record(objspace
);
12533 record
->gc_mark_time
= elapsed_time_from(record
->gc_mark_time
);
12539 gc_prof_sweep_timer_start(rb_objspace_t
*objspace
)
12541 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN
);
12542 if (gc_prof_enabled(objspace
)) {
12543 gc_profile_record
*record
= gc_prof_record(objspace
);
12545 if (record
->gc_time
> 0 || GC_PROFILE_MORE_DETAIL
) {
12546 objspace
->profile
.gc_sweep_start_time
= getrusage_time();
12552 gc_prof_sweep_timer_stop(rb_objspace_t
*objspace
)
12554 RUBY_DTRACE_GC_HOOK(SWEEP_END
);
12556 if (gc_prof_enabled(objspace
)) {
12558 gc_profile_record
*record
= gc_prof_record(objspace
);
12560 if (record
->gc_time
> 0) {
12561 sweep_time
= elapsed_time_from(objspace
->profile
.gc_sweep_start_time
);
12562 /* need to accumulate GC time for lazy sweep after gc() */
12563 record
->gc_time
+= sweep_time
;
12565 else if (GC_PROFILE_MORE_DETAIL
) {
12566 sweep_time
= elapsed_time_from(objspace
->profile
.gc_sweep_start_time
);
12569 #if GC_PROFILE_MORE_DETAIL
12570 record
->gc_sweep_time
+= sweep_time
;
12571 if (heap_pages_deferred_final
) record
->flags
|= GPR_FLAG_HAVE_FINALIZE
;
12573 if (heap_pages_deferred_final
) objspace
->profile
.latest_gc_info
|= GPR_FLAG_HAVE_FINALIZE
;
12578 gc_prof_set_malloc_info(rb_objspace_t
*objspace
)
12580 #if GC_PROFILE_MORE_DETAIL
12581 if (gc_prof_enabled(objspace
)) {
12582 gc_profile_record
*record
= gc_prof_record(objspace
);
12583 record
->allocate_increase
= malloc_increase
;
12584 record
->allocate_limit
= malloc_limit
;
12590 gc_prof_set_heap_info(rb_objspace_t
*objspace
)
12592 if (gc_prof_enabled(objspace
)) {
12593 gc_profile_record
*record
= gc_prof_record(objspace
);
12594 size_t live
= objspace
->profile
.total_allocated_objects_at_gc_start
- objspace
->profile
.total_freed_objects
;
12595 size_t total
= objspace
->profile
.heap_used_at_gc_start
* HEAP_PAGE_OBJ_LIMIT
;
12597 #if GC_PROFILE_MORE_DETAIL
12598 record
->heap_use_pages
= objspace
->profile
.heap_used_at_gc_start
;
12599 record
->heap_live_objects
= live
;
12600 record
->heap_free_objects
= total
- live
;
12603 record
->heap_total_objects
= total
;
12604 record
->heap_use_size
= live
* sizeof(RVALUE
);
12605 record
->heap_total_size
= total
* sizeof(RVALUE
);
12611 * GC::Profiler.clear -> nil
12613 * Clears the GC profiler data.
12618 gc_profile_clear(VALUE _
)
12620 rb_objspace_t
*objspace
= &rb_objspace
;
12621 void *p
= objspace
->profile
.records
;
12622 objspace
->profile
.records
= NULL
;
12623 objspace
->profile
.size
= 0;
12624 objspace
->profile
.next_index
= 0;
12625 objspace
->profile
.current_record
= 0;
12634 * GC::Profiler.raw_data -> [Hash, ...]
12636 * Returns an Array of individual raw profile data Hashes ordered
12637 * from earliest to latest by +:GC_INVOKE_TIME+.
12643 * :GC_TIME=>1.3000000000000858e-05,
12644 * :GC_INVOKE_TIME=>0.010634999999999999,
12645 * :HEAP_USE_SIZE=>289640,
12646 * :HEAP_TOTAL_SIZE=>588960,
12647 * :HEAP_TOTAL_OBJECTS=>14724,
12648 * :GC_IS_MARKED=>false
12656 * Time elapsed in seconds for this GC run
12657 * +:GC_INVOKE_TIME+::
12658 * Time elapsed in seconds from startup to when the GC was invoked
12659 * +:HEAP_USE_SIZE+::
12660 * Total bytes of heap used
12661 * +:HEAP_TOTAL_SIZE+::
12662 * Total size of heap in bytes
12663 * +:HEAP_TOTAL_OBJECTS+::
12664 * Total number of objects
12665 * +:GC_IS_MARKED+::
12666 * Returns +true+ if the GC is in mark phase
12668 * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
12669 * to the following hash keys:
12671 * +:GC_MARK_TIME+::
12672 * +:GC_SWEEP_TIME+::
12673 * +:ALLOCATE_INCREASE+::
12674 * +:ALLOCATE_LIMIT+::
12675 * +:HEAP_USE_PAGES+::
12676 * +:HEAP_LIVE_OBJECTS+::
12677 * +:HEAP_FREE_OBJECTS+::
12678 * +:HAVE_FINALIZE+::
12683 gc_profile_record_get(VALUE _
)
12686 VALUE gc_profile
= rb_ary_new();
12688 rb_objspace_t
*objspace
= (&rb_objspace
);
12690 if (!objspace
->profile
.run
) {
12694 for (i
=0; i
< objspace
->profile
.next_index
; i
++) {
12695 gc_profile_record
*record
= &objspace
->profile
.records
[i
];
12697 prof
= rb_hash_new();
12698 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(0, rb_hash_new(), record
->flags
));
12699 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record
->gc_time
));
12700 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record
->gc_invoke_time
));
12701 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record
->heap_use_size
));
12702 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record
->heap_total_size
));
12703 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record
->heap_total_objects
));
12704 rb_hash_aset(prof
, ID2SYM(rb_intern("MOVED_OBJECTS")), SIZET2NUM(record
->moved_objects
));
12705 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue
);
12706 #if GC_PROFILE_MORE_DETAIL
12707 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record
->gc_mark_time
));
12708 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record
->gc_sweep_time
));
12709 rb_hash_aset(prof
, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record
->allocate_increase
));
12710 rb_hash_aset(prof
, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record
->allocate_limit
));
12711 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record
->heap_use_pages
));
12712 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record
->heap_live_objects
));
12713 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record
->heap_free_objects
));
12715 rb_hash_aset(prof
, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record
->removing_objects
));
12716 rb_hash_aset(prof
, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record
->empty_objects
));
12718 rb_hash_aset(prof
, ID2SYM(rb_intern("HAVE_FINALIZE")), RBOOL(record
->flags
& GPR_FLAG_HAVE_FINALIZE
));
12721 #if RGENGC_PROFILE > 0
12722 rb_hash_aset(prof
, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record
->old_objects
));
12723 rb_hash_aset(prof
, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record
->remembered_normal_objects
));
12724 rb_hash_aset(prof
, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record
->remembered_shady_objects
));
12726 rb_ary_push(gc_profile
, prof
);
12732 #if GC_PROFILE_MORE_DETAIL
12733 #define MAJOR_REASON_MAX 0x10
12736 gc_profile_dump_major_reason(unsigned int flags
, char *buff
)
12738 unsigned int reason
= flags
& GPR_FLAG_MAJOR_MASK
;
12741 if (reason
== GPR_FLAG_NONE
) {
12747 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
12748 buff[i++] = #x[0]; \
12749 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
12755 #if RGENGC_ESTIMATE_OLDMALLOC
12765 gc_profile_dump_on(VALUE out
, VALUE (*append
)(VALUE
, VALUE
))
12767 rb_objspace_t
*objspace
= &rb_objspace
;
12768 size_t count
= objspace
->profile
.next_index
;
12769 #ifdef MAJOR_REASON_MAX
12770 char reason_str
[MAJOR_REASON_MAX
];
12773 if (objspace
->profile
.run
&& count
/* > 1 */) {
12775 const gc_profile_record
*record
;
12777 append(out
, rb_sprintf("GC %"PRIuSIZE
" invokes.\n", objspace
->profile
.count
));
12778 append(out
, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
12780 for (i
= 0; i
< count
; i
++) {
12781 record
= &objspace
->profile
.records
[i
];
12782 append(out
, rb_sprintf("%5"PRIuSIZE
" %19.3f %20"PRIuSIZE
" %20"PRIuSIZE
" %20"PRIuSIZE
" %30.20f\n",
12783 i
+1, record
->gc_invoke_time
, record
->heap_use_size
,
12784 record
->heap_total_size
, record
->heap_total_objects
, record
->gc_time
*1000));
12787 #if GC_PROFILE_MORE_DETAIL
12788 const char *str
= "\n\n" \
12790 "Prepare Time = Previously GC's rest sweep time\n"
12791 "Index Flags Allocate Inc. Allocate Limit"
12792 #if CALC_EXACT_MALLOC_SIZE
12795 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
12797 " OldgenObj RemNormObj RemShadObj"
12799 #if GC_PROFILE_DETAIL_MEMORY
12800 " MaxRSS(KB) MinorFLT MajorFLT"
12803 append(out
, rb_str_new_cstr(str
));
12805 for (i
= 0; i
< count
; i
++) {
12806 record
= &objspace
->profile
.records
[i
];
12807 append(out
, rb_sprintf("%5"PRIuSIZE
" %4s/%c/%6s%c %13"PRIuSIZE
" %15"PRIuSIZE
12808 #if CALC_EXACT_MALLOC_SIZE
12811 " %9"PRIuSIZE
" %17.12f %17.12f %17.12f %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
12813 "%10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
12815 #if GC_PROFILE_DETAIL_MEMORY
12821 gc_profile_dump_major_reason(record
->flags
, reason_str
),
12822 (record
->flags
& GPR_FLAG_HAVE_FINALIZE
) ? 'F' : '.',
12823 (record
->flags
& GPR_FLAG_NEWOBJ
) ? "NEWOBJ" :
12824 (record
->flags
& GPR_FLAG_MALLOC
) ? "MALLOC" :
12825 (record
->flags
& GPR_FLAG_METHOD
) ? "METHOD" :
12826 (record
->flags
& GPR_FLAG_CAPI
) ? "CAPI__" : "??????",
12827 (record
->flags
& GPR_FLAG_STRESS
) ? '!' : ' ',
12828 record
->allocate_increase
, record
->allocate_limit
,
12829 #if CALC_EXACT_MALLOC_SIZE
12830 record
->allocated_size
,
12832 record
->heap_use_pages
,
12833 record
->gc_mark_time
*1000,
12834 record
->gc_sweep_time
*1000,
12835 record
->prepare_time
*1000,
12837 record
->heap_live_objects
,
12838 record
->heap_free_objects
,
12839 record
->removing_objects
,
12840 record
->empty_objects
12843 record
->old_objects
,
12844 record
->remembered_normal_objects
,
12845 record
->remembered_shady_objects
12847 #if GC_PROFILE_DETAIL_MEMORY
12849 record
->maxrss
/ 1024,
12862 * GC::Profiler.result -> String
12864 * Returns a profile data report such as:
12867 * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
12868 * 1 0.012 159240 212940 10647 0.00000000000001530000
12872 gc_profile_result(VALUE _
)
12874 VALUE str
= rb_str_buf_new(0);
12875 gc_profile_dump_on(str
, rb_str_buf_append
);
12881 * GC::Profiler.report
12882 * GC::Profiler.report(io)
12884 * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
12889 gc_profile_report(int argc
, VALUE
*argv
, VALUE self
)
12893 out
= (!rb_check_arity(argc
, 0, 1) ? rb_stdout
: argv
[0]);
12894 gc_profile_dump_on(out
, rb_io_write
);
12901 * GC::Profiler.total_time -> float
12903 * The total time used for garbage collection in seconds
12907 gc_profile_total_time(VALUE self
)
12910 rb_objspace_t
*objspace
= &rb_objspace
;
12912 if (objspace
->profile
.run
&& objspace
->profile
.next_index
> 0) {
12914 size_t count
= objspace
->profile
.next_index
;
12916 for (i
= 0; i
< count
; i
++) {
12917 time
+= objspace
->profile
.records
[i
].gc_time
;
12920 return DBL2NUM(time
);
12925 * GC::Profiler.enabled? -> true or false
12927 * The current status of GC profile mode.
12931 gc_profile_enable_get(VALUE self
)
12933 rb_objspace_t
*objspace
= &rb_objspace
;
12934 return RBOOL(objspace
->profile
.run
);
12939 * GC::Profiler.enable -> nil
12941 * Starts the GC profiler.
12946 gc_profile_enable(VALUE _
)
12948 rb_objspace_t
*objspace
= &rb_objspace
;
12949 objspace
->profile
.run
= TRUE
;
12950 objspace
->profile
.current_record
= 0;
12956 * GC::Profiler.disable -> nil
12958 * Stops the GC profiler.
12963 gc_profile_disable(VALUE _
)
12965 rb_objspace_t
*objspace
= &rb_objspace
;
12967 objspace
->profile
.run
= FALSE
;
12968 objspace
->profile
.current_record
= 0;
12973 ------------------------------ DEBUG ------------------------------
12976 static const char *
12977 type_name(int type
, VALUE obj
)
12980 #define TYPE_NAME(t) case (t): return #t;
12982 TYPE_NAME(T_OBJECT
);
12983 TYPE_NAME(T_CLASS
);
12984 TYPE_NAME(T_MODULE
);
12985 TYPE_NAME(T_FLOAT
);
12986 TYPE_NAME(T_STRING
);
12987 TYPE_NAME(T_REGEXP
);
12988 TYPE_NAME(T_ARRAY
);
12990 TYPE_NAME(T_STRUCT
);
12991 TYPE_NAME(T_BIGNUM
);
12993 TYPE_NAME(T_MATCH
);
12994 TYPE_NAME(T_COMPLEX
);
12995 TYPE_NAME(T_RATIONAL
);
12998 TYPE_NAME(T_FALSE
);
12999 TYPE_NAME(T_SYMBOL
);
13000 TYPE_NAME(T_FIXNUM
);
13001 TYPE_NAME(T_UNDEF
);
13002 TYPE_NAME(T_IMEMO
);
13003 TYPE_NAME(T_ICLASS
);
13004 TYPE_NAME(T_MOVED
);
13005 TYPE_NAME(T_ZOMBIE
);
13007 if (obj
&& rb_objspace_data_type_name(obj
)) {
13008 return rb_objspace_data_type_name(obj
);
13016 static const char *
13017 obj_type_name(VALUE obj
)
13019 return type_name(TYPE(obj
), obj
);
13023 rb_method_type_name(rb_method_type_t type
)
13026 case VM_METHOD_TYPE_ISEQ
: return "iseq";
13027 case VM_METHOD_TYPE_ATTRSET
: return "attrest";
13028 case VM_METHOD_TYPE_IVAR
: return "ivar";
13029 case VM_METHOD_TYPE_BMETHOD
: return "bmethod";
13030 case VM_METHOD_TYPE_ALIAS
: return "alias";
13031 case VM_METHOD_TYPE_REFINED
: return "refined";
13032 case VM_METHOD_TYPE_CFUNC
: return "cfunc";
13033 case VM_METHOD_TYPE_ZSUPER
: return "zsuper";
13034 case VM_METHOD_TYPE_MISSING
: return "missing";
13035 case VM_METHOD_TYPE_OPTIMIZED
: return "optimized";
13036 case VM_METHOD_TYPE_UNDEF
: return "undef";
13037 case VM_METHOD_TYPE_NOTIMPLEMENTED
: return "notimplemented";
13039 rb_bug("rb_method_type_name: unreachable (type: %d)", type
);
13043 # define ARY_SHARED_P(ary) \
13044 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
13045 FL_TEST((ary),ELTS_SHARED)!=0)
13046 # define ARY_EMBED_P(ary) \
13047 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
13048 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
13051 rb_raw_iseq_info(char *buff
, const int buff_size
, const rb_iseq_t
*iseq
)
13053 if (buff_size
> 0 && iseq
->body
&& iseq
->body
->location
.label
&& !RB_TYPE_P(iseq
->body
->location
.pathobj
, T_MOVED
)) {
13054 VALUE path
= rb_iseq_path(iseq
);
13055 VALUE n
= iseq
->body
->location
.first_lineno
;
13056 snprintf(buff
, buff_size
, " %s@%s:%d",
13057 RSTRING_PTR(iseq
->body
->location
.label
),
13059 n
? FIX2INT(n
) : 0 );
13064 str_len_no_raise(VALUE str
)
13066 long len
= RSTRING_LEN(str
);
13067 if (len
< 0) return 0;
13068 if (len
> INT_MAX
) return INT_MAX
;
13073 rb_raw_obj_info(char *buff
, const int buff_size
, VALUE obj
)
13076 void *poisoned
= asan_poisoned_object_p(obj
);
13077 asan_unpoison_object(obj
, false);
13079 #define BUFF_ARGS buff + pos, buff_size - pos
13080 #define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
13081 if (SPECIAL_CONST_P(obj
)) {
13082 APPENDF((BUFF_ARGS
, "%s", obj_type_name(obj
)));
13084 if (FIXNUM_P(obj
)) {
13085 APPENDF((BUFF_ARGS
, " %ld", FIX2LONG(obj
)));
13087 else if (SYMBOL_P(obj
)) {
13088 APPENDF((BUFF_ARGS
, " %s", rb_id2name(SYM2ID(obj
))));
13092 #define TF(c) ((c) != 0 ? "true" : "false")
13093 #define C(c, s) ((c) != 0 ? (s) : " ")
13094 const int type
= BUILTIN_TYPE(obj
);
13095 const int age
= RVALUE_FLAGS_AGE(RBASIC(obj
)->flags
);
13097 if (is_pointer_to_heap(&rb_objspace
, (void *)obj
)) {
13098 APPENDF((BUFF_ARGS
, "%p [%d%s%s%s%s%s%s] %s ",
13100 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj
), "L"),
13101 C(RVALUE_MARK_BITMAP(obj
), "M"),
13102 C(RVALUE_PIN_BITMAP(obj
), "P"),
13103 C(RVALUE_MARKING_BITMAP(obj
), "R"),
13104 C(RVALUE_WB_UNPROTECTED_BITMAP(obj
), "U"),
13105 C(rb_objspace_garbage_object_p(obj
), "G"),
13106 obj_type_name(obj
)));
13110 APPENDF((BUFF_ARGS
, "%p [%dXXXX] %s",
13112 obj_type_name(obj
)));
13115 if (internal_object_p(obj
)) {
13118 else if (RBASIC(obj
)->klass
== 0) {
13119 APPENDF((BUFF_ARGS
, "(temporary internal)"));
13122 if (RTEST(RBASIC(obj
)->klass
)) {
13123 VALUE class_path
= rb_class_path_cached(RBASIC(obj
)->klass
);
13124 if (!NIL_P(class_path
)) {
13125 APPENDF((BUFF_ARGS
, "(%s)", RSTRING_PTR(class_path
)));
13131 APPENDF((BUFF_ARGS
, "@%s:%d", RANY(obj
)->file
, RANY(obj
)->line
));
13136 UNEXPECTED_NODE(rb_raw_obj_info
);
13139 if (FL_TEST(obj
, ELTS_SHARED
)) {
13140 APPENDF((BUFF_ARGS
, "shared -> %s",
13141 rb_obj_info(RARRAY(obj
)->as
.heap
.aux
.shared_root
)));
13143 else if (FL_TEST(obj
, RARRAY_EMBED_FLAG
)) {
13144 APPENDF((BUFF_ARGS
, "[%s%s] len: %ld (embed)",
13145 C(ARY_EMBED_P(obj
), "E"),
13146 C(ARY_SHARED_P(obj
), "S"),
13150 APPENDF((BUFF_ARGS
, "[%s%s%s] len: %ld, capa:%ld ptr:%p",
13151 C(ARY_EMBED_P(obj
), "E"),
13152 C(ARY_SHARED_P(obj
), "S"),
13153 C(RARRAY_TRANSIENT_P(obj
), "T"),
13155 ARY_EMBED_P(obj
) ? -1L : RARRAY(obj
)->as
.heap
.aux
.capa
,
13156 (void *)RARRAY_CONST_PTR_TRANSIENT(obj
)));
13160 if (STR_SHARED_P(obj
)) APPENDF((BUFF_ARGS
, " [shared] "));
13161 APPENDF((BUFF_ARGS
, "%.*s", str_len_no_raise(obj
), RSTRING_PTR(obj
)));
13165 VALUE fstr
= RSYMBOL(obj
)->fstr
;
13166 ID id
= RSYMBOL(obj
)->id
;
13167 if (RB_TYPE_P(fstr
, T_STRING
)) {
13168 APPENDF((BUFF_ARGS
, ":%s id:%d", RSTRING_PTR(fstr
), (unsigned int)id
));
13171 APPENDF((BUFF_ARGS
, "(%p) id:%d", (void *)fstr
, (unsigned int)id
));
13176 APPENDF((BUFF_ARGS
, "-> %p", (void*)rb_gc_location(obj
)));
13180 APPENDF((BUFF_ARGS
, "[%c%c] %"PRIdSIZE
,
13181 RHASH_AR_TABLE_P(obj
) ? 'A' : 'S',
13182 RHASH_TRANSIENT_P(obj
) ? 'T' : ' ',
13189 VALUE class_path
= rb_class_path_cached(obj
);
13190 if (!NIL_P(class_path
)) {
13191 APPENDF((BUFF_ARGS
, "%s", RSTRING_PTR(class_path
)));
13194 APPENDF((BUFF_ARGS
, "(annon)"));
13200 VALUE class_path
= rb_class_path_cached(RBASIC_CLASS(obj
));
13201 if (!NIL_P(class_path
)) {
13202 APPENDF((BUFF_ARGS
, "src:%s", RSTRING_PTR(class_path
)));
13208 uint32_t len
= ROBJECT_NUMIV(obj
);
13210 if (RANY(obj
)->as
.basic
.flags
& ROBJECT_EMBED
) {
13211 APPENDF((BUFF_ARGS
, "(embed) len:%d", len
));
13214 VALUE
*ptr
= ROBJECT_IVPTR(obj
);
13215 APPENDF((BUFF_ARGS
, "len:%d ptr:%p", len
, (void *)ptr
));
13220 const struct rb_block
*block
;
13221 const rb_iseq_t
*iseq
;
13222 if (rb_obj_is_proc(obj
) &&
13223 (block
= vm_proc_block(obj
)) != NULL
&&
13224 (vm_block_type(block
) == block_type_iseq
) &&
13225 (iseq
= vm_block_iseq(block
)) != NULL
) {
13226 rb_raw_iseq_info(BUFF_ARGS
, iseq
);
13228 else if (rb_ractor_p(obj
)) {
13229 rb_ractor_t
*r
= (void *)DATA_PTR(obj
);
13231 APPENDF((BUFF_ARGS
, "r:%d", r
->pub
.id
));
13235 const char * const type_name
= rb_objspace_data_type_name(obj
);
13237 APPENDF((BUFF_ARGS
, "%s", type_name
));
13243 APPENDF((BUFF_ARGS
, "<%s> ", rb_imemo_name(imemo_type(obj
))));
13245 switch (imemo_type(obj
)) {
13248 const rb_method_entry_t
*me
= &RANY(obj
)->as
.imemo
.ment
;
13250 APPENDF((BUFF_ARGS
, ":%s (%s%s%s%s) type:%s alias:%d owner:%p defined_class:%p",
13251 rb_id2name(me
->called_id
),
13252 METHOD_ENTRY_VISI(me
) == METHOD_VISI_PUBLIC
? "pub" :
13253 METHOD_ENTRY_VISI(me
) == METHOD_VISI_PRIVATE
? "pri" : "pro",
13254 METHOD_ENTRY_COMPLEMENTED(me
) ? ",cmp" : "",
13255 METHOD_ENTRY_CACHED(me
) ? ",cc" : "",
13256 METHOD_ENTRY_INVALIDATED(me
) ? ",inv" : "",
13257 me
->def
? rb_method_type_name(me
->def
->type
) : "NULL",
13258 me
->def
? me
->def
->alias_count
: -1,
13259 (void *)me
->owner
, // obj_info(me->owner),
13260 (void *)me
->defined_class
)); //obj_info(me->defined_class)));
13263 switch (me
->def
->type
) {
13264 case VM_METHOD_TYPE_ISEQ
:
13265 APPENDF((BUFF_ARGS
, " (iseq:%s)", obj_info((VALUE
)me
->def
->body
.iseq
.iseqptr
)));
13275 const rb_iseq_t
*iseq
= (const rb_iseq_t
*)obj
;
13276 rb_raw_iseq_info(BUFF_ARGS
, iseq
);
13279 case imemo_callinfo
:
13281 const struct rb_callinfo
*ci
= (const struct rb_callinfo
*)obj
;
13282 APPENDF((BUFF_ARGS
, "(mid:%s, flag:%x argc:%d, kwarg:%s)",
13283 rb_id2name(vm_ci_mid(ci
)),
13286 vm_ci_kwarg(ci
) ? "available" : "NULL"));
13289 case imemo_callcache
:
13291 const struct rb_callcache
*cc
= (const struct rb_callcache
*)obj
;
13292 VALUE class_path
= cc
->klass
? rb_class_path_cached(cc
->klass
) : Qnil
;
13293 const rb_callable_method_entry_t
*cme
= vm_cc_cme(cc
);
13295 APPENDF((BUFF_ARGS
, "(klass:%s cme:%s%s (%p) call:%p",
13296 NIL_P(class_path
) ? (cc
->klass
? "??" : "<NULL>") : RSTRING_PTR(class_path
),
13297 cme
? rb_id2name(cme
->called_id
) : "<NULL>",
13298 cme
? (METHOD_ENTRY_INVALIDATED(cme
) ? " [inv]" : "") : "",
13300 (void *)vm_cc_call(cc
)));
13315 asan_poison_object(obj
);
13323 #if RGENGC_OBJ_INFO
13324 #define OBJ_INFO_BUFFERS_NUM 10
13325 #define OBJ_INFO_BUFFERS_SIZE 0x100
13326 static int obj_info_buffers_index
= 0;
13327 static char obj_info_buffers
[OBJ_INFO_BUFFERS_NUM
][OBJ_INFO_BUFFERS_SIZE
];
13329 static const char *
13330 obj_info(VALUE obj
)
13332 const int index
= obj_info_buffers_index
++;
13333 char *const buff
= &obj_info_buffers
[index
][0];
13335 if (obj_info_buffers_index
>= OBJ_INFO_BUFFERS_NUM
) {
13336 obj_info_buffers_index
= 0;
13339 return rb_raw_obj_info(buff
, OBJ_INFO_BUFFERS_SIZE
, obj
);
13342 static const char *
13343 obj_info(VALUE obj
)
13345 return obj_type_name(obj
);
13349 MJIT_FUNC_EXPORTED
const char *
13350 rb_obj_info(VALUE obj
)
13352 return obj_info(obj
);
13356 rb_obj_info_dump(VALUE obj
)
13359 fprintf(stderr
, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff
, 0x100, obj
));
13362 MJIT_FUNC_EXPORTED
void
13363 rb_obj_info_dump_loc(VALUE obj
, const char *file
, int line
, const char *func
)
13366 fprintf(stderr
, "<OBJ_INFO:%s@%s:%d> %s\n", func
, file
, line
, rb_raw_obj_info(buff
, 0x100, obj
));
13372 rb_gcdebug_print_obj_condition(VALUE obj
)
13374 rb_objspace_t
*objspace
= &rb_objspace
;
13376 fprintf(stderr
, "created at: %s:%d\n", RANY(obj
)->file
, RANY(obj
)->line
);
13378 if (BUILTIN_TYPE(obj
) == T_MOVED
) {
13379 fprintf(stderr
, "moved?: true\n");
13382 fprintf(stderr
, "moved?: false\n");
13384 if (is_pointer_to_heap(objspace
, (void *)obj
)) {
13385 fprintf(stderr
, "pointer to heap?: true\n");
13388 fprintf(stderr
, "pointer to heap?: false\n");
13392 fprintf(stderr
, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj
), obj
) ? "true" : "false");
13393 fprintf(stderr
, "pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj
), obj
) ? "true" : "false");
13394 fprintf(stderr
, "age? : %d\n", RVALUE_AGE(obj
));
13395 fprintf(stderr
, "old? : %s\n", RVALUE_OLD_P(obj
) ? "true" : "false");
13396 fprintf(stderr
, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj
) ? "false" : "true");
13397 fprintf(stderr
, "remembered? : %s\n", RVALUE_REMEMBERED(obj
) ? "true" : "false");
13399 if (is_lazy_sweeping(objspace
)) {
13400 fprintf(stderr
, "lazy sweeping?: true\n");
13401 fprintf(stderr
, "swept?: %s\n", is_swept_object(objspace
, obj
) ? "done" : "not yet");
13404 fprintf(stderr
, "lazy sweeping?: false\n");
13409 gcdebug_sentinel(RB_BLOCK_CALL_FUNC_ARGLIST(obj
, name
))
13411 fprintf(stderr
, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name
, (void *)obj
);
13416 rb_gcdebug_sentinel(VALUE obj
, const char *name
)
13418 rb_define_finalizer(obj
, rb_proc_new(gcdebug_sentinel
, (VALUE
)name
));
13421 #endif /* GC_DEBUG */
13423 #if GC_DEBUG_STRESS_TO_CLASS
13426 * GC.add_stress_to_class(class[, ...])
13428 * Raises NoMemoryError when allocating an instance of the given classes.
13432 rb_gcdebug_add_stress_to_class(int argc
, VALUE
*argv
, VALUE self
)
13434 rb_objspace_t
*objspace
= &rb_objspace
;
13436 if (!stress_to_class
) {
13437 stress_to_class
= rb_ary_tmp_new(argc
);
13439 rb_ary_cat(stress_to_class
, argv
, argc
);
13445 * GC.remove_stress_to_class(class[, ...])
13447 * No longer raises NoMemoryError when allocating an instance of the
13452 rb_gcdebug_remove_stress_to_class(int argc
, VALUE
*argv
, VALUE self
)
13454 rb_objspace_t
*objspace
= &rb_objspace
;
13457 if (stress_to_class
) {
13458 for (i
= 0; i
< argc
; ++i
) {
13459 rb_ary_delete_same(stress_to_class
, argv
[i
]);
13461 if (RARRAY_LEN(stress_to_class
) == 0) {
13462 stress_to_class
= 0;
13470 * Document-module: ObjectSpace
13472 * The ObjectSpace module contains a number of routines
13473 * that interact with the garbage collection facility and allow you to
13474 * traverse all living objects with an iterator.
13476 * ObjectSpace also provides support for object finalizers, procs that will be
13477 * called when a specific object is about to be destroyed by garbage
13478 * collection. See the documentation for
13479 * <code>ObjectSpace.define_finalizer</code> for important information on
13480 * how to use this method correctly.
13485 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
13486 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
13493 * Finalizer two on 537763470
13494 * Finalizer one on 537763480
13498 * Document-class: ObjectSpace::WeakMap
13500 * An ObjectSpace::WeakMap object holds references to
13501 * any objects, but those objects can get garbage collected.
13503 * This class is mostly used internally by WeakRef, please use
13504 * +lib/weakref.rb+ for the public interface.
13507 /* Document-class: GC::Profiler
13509 * The GC profiler provides access to information on GC runs including time,
13510 * length and object space size.
13514 * GC::Profiler.enable
13516 * require 'rdoc/rdoc'
13518 * GC::Profiler.report
13520 * GC::Profiler.disable
13522 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
13525 #include "gc.rbinc"
13531 VALUE rb_mObjSpace
;
13532 VALUE rb_mProfiler
;
13533 VALUE gc_constants
;
13535 rb_mGC
= rb_define_module("GC");
13537 gc_constants
= rb_hash_new();
13538 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("DEBUG")), RBOOL(GC_DEBUG
));
13539 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE
)));
13540 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT
));
13541 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE
));
13542 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE
));
13543 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(SIZE_POOL_COUNT
));
13544 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(size_pool_slot_size(SIZE_POOL_COUNT
- 1)));
13545 OBJ_FREEZE(gc_constants
);
13546 /* internal constants */
13547 rb_define_const(rb_mGC
, "INTERNAL_CONSTANTS", gc_constants
);
13549 rb_mProfiler
= rb_define_module_under(rb_mGC
, "Profiler");
13550 rb_define_singleton_method(rb_mProfiler
, "enabled?", gc_profile_enable_get
, 0);
13551 rb_define_singleton_method(rb_mProfiler
, "enable", gc_profile_enable
, 0);
13552 rb_define_singleton_method(rb_mProfiler
, "raw_data", gc_profile_record_get
, 0);
13553 rb_define_singleton_method(rb_mProfiler
, "disable", gc_profile_disable
, 0);
13554 rb_define_singleton_method(rb_mProfiler
, "clear", gc_profile_clear
, 0);
13555 rb_define_singleton_method(rb_mProfiler
, "result", gc_profile_result
, 0);
13556 rb_define_singleton_method(rb_mProfiler
, "report", gc_profile_report
, -1);
13557 rb_define_singleton_method(rb_mProfiler
, "total_time", gc_profile_total_time
, 0);
13559 rb_mObjSpace
= rb_define_module("ObjectSpace");
13561 rb_define_module_function(rb_mObjSpace
, "each_object", os_each_obj
, -1);
13563 rb_define_module_function(rb_mObjSpace
, "define_finalizer", define_final
, -1);
13564 rb_define_module_function(rb_mObjSpace
, "undefine_finalizer", undefine_final
, 1);
13566 rb_define_module_function(rb_mObjSpace
, "_id2ref", os_id2ref
, 1);
13568 rb_vm_register_special_exception(ruby_error_nomemory
, rb_eNoMemError
, "failed to allocate memory");
13570 rb_define_method(rb_cBasicObject
, "__id__", rb_obj_id
, 0);
13571 rb_define_method(rb_mKernel
, "object_id", rb_obj_id
, 0);
13573 rb_define_module_function(rb_mObjSpace
, "count_objects", count_objects
, -1);
13576 VALUE rb_cWeakMap
= rb_define_class_under(rb_mObjSpace
, "WeakMap", rb_cObject
);
13577 rb_define_alloc_func(rb_cWeakMap
, wmap_allocate
);
13578 rb_define_method(rb_cWeakMap
, "[]=", wmap_aset
, 2);
13579 rb_define_method(rb_cWeakMap
, "[]", wmap_aref
, 1);
13580 rb_define_method(rb_cWeakMap
, "include?", wmap_has_key
, 1);
13581 rb_define_method(rb_cWeakMap
, "member?", wmap_has_key
, 1);
13582 rb_define_method(rb_cWeakMap
, "key?", wmap_has_key
, 1);
13583 rb_define_method(rb_cWeakMap
, "inspect", wmap_inspect
, 0);
13584 rb_define_method(rb_cWeakMap
, "each", wmap_each
, 0);
13585 rb_define_method(rb_cWeakMap
, "each_pair", wmap_each
, 0);
13586 rb_define_method(rb_cWeakMap
, "each_key", wmap_each_key
, 0);
13587 rb_define_method(rb_cWeakMap
, "each_value", wmap_each_value
, 0);
13588 rb_define_method(rb_cWeakMap
, "keys", wmap_keys
, 0);
13589 rb_define_method(rb_cWeakMap
, "values", wmap_values
, 0);
13590 rb_define_method(rb_cWeakMap
, "size", wmap_size
, 0);
13591 rb_define_method(rb_cWeakMap
, "length", wmap_size
, 0);
13592 rb_include_module(rb_cWeakMap
, rb_mEnumerable
);
13595 /* internal methods */
13596 rb_define_singleton_method(rb_mGC
, "verify_internal_consistency", gc_verify_internal_consistency_m
, 0);
13597 rb_define_singleton_method(rb_mGC
, "verify_transient_heap_internal_consistency", gc_verify_transient_heap_internal_consistency
, 0);
13598 #if MALLOC_ALLOCATED_SIZE
13599 rb_define_singleton_method(rb_mGC
, "malloc_allocated_size", gc_malloc_allocated_size
, 0);
13600 rb_define_singleton_method(rb_mGC
, "malloc_allocations", gc_malloc_allocations
, 0);
13603 #if GC_DEBUG_STRESS_TO_CLASS
13604 rb_define_singleton_method(rb_mGC
, "add_stress_to_class", rb_gcdebug_add_stress_to_class
, -1);
13605 rb_define_singleton_method(rb_mGC
, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class
, -1);
13610 /* GC build options */
13611 rb_define_const(rb_mGC
, "OPTS", opts
= rb_ary_new());
13612 #define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
13616 OPT(RGENGC_CHECK_MODE
);
13617 OPT(RGENGC_PROFILE
);
13618 OPT(RGENGC_ESTIMATE_OLDMALLOC
);
13619 OPT(GC_PROFILE_MORE_DETAIL
);
13620 OPT(GC_ENABLE_LAZY_SWEEP
);
13621 OPT(CALC_EXACT_MALLOC_SIZE
);
13622 OPT(MALLOC_ALLOCATED_SIZE
);
13623 OPT(MALLOC_ALLOCATED_SIZE_CHECK
);
13624 OPT(GC_PROFILE_DETAIL_MEMORY
);
13630 #ifdef ruby_xmalloc
13631 #undef ruby_xmalloc
13633 #ifdef ruby_xmalloc2
13634 #undef ruby_xmalloc2
13636 #ifdef ruby_xcalloc
13637 #undef ruby_xcalloc
13639 #ifdef ruby_xrealloc
13640 #undef ruby_xrealloc
13642 #ifdef ruby_xrealloc2
13643 #undef ruby_xrealloc2
13647 ruby_xmalloc(size_t size
)
13649 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13650 ruby_malloc_info_file
= __FILE__
;
13651 ruby_malloc_info_line
= __LINE__
;
13653 return ruby_xmalloc_body(size
);
13657 ruby_xmalloc2(size_t n
, size_t size
)
13659 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13660 ruby_malloc_info_file
= __FILE__
;
13661 ruby_malloc_info_line
= __LINE__
;
13663 return ruby_xmalloc2_body(n
, size
);
13667 ruby_xcalloc(size_t n
, size_t size
)
13669 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13670 ruby_malloc_info_file
= __FILE__
;
13671 ruby_malloc_info_line
= __LINE__
;
13673 return ruby_xcalloc_body(n
, size
);
13677 ruby_xrealloc(void *ptr
, size_t new_size
)
13679 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13680 ruby_malloc_info_file
= __FILE__
;
13681 ruby_malloc_info_line
= __LINE__
;
13683 return ruby_xrealloc_body(ptr
, new_size
);
13687 ruby_xrealloc2(void *ptr
, size_t n
, size_t new_size
)
13689 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13690 ruby_malloc_info_file
= __FILE__
;
13691 ruby_malloc_info_line
= __LINE__
;
13693 return ruby_xrealloc2_body(ptr
, n
, new_size
);