1 /**********************************************************************
6 created at: Tue Oct 5 09:44:46 JST 1993
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
12 **********************************************************************/
14 #define rb_data_object_alloc rb_data_object_alloc
15 #define rb_data_typed_object_alloc rb_data_typed_object_alloc
17 #include "ruby/internal/config.h"
19 # include "ruby/ruby.h"
29 #if defined(__wasm__) && !defined(__EMSCRIPTEN__)
30 # include "wasm/setjmp.h"
31 # include "wasm/machine.h"
38 /* MALLOC_HEADERS_BEGIN */
39 #ifndef HAVE_MALLOC_USABLE_SIZE
41 # define HAVE_MALLOC_USABLE_SIZE
42 # define malloc_usable_size(a) _msize(a)
43 # elif defined HAVE_MALLOC_SIZE
44 # define HAVE_MALLOC_USABLE_SIZE
45 # define malloc_usable_size(a) malloc_size(a)
49 #ifdef HAVE_MALLOC_USABLE_SIZE
50 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
51 /* Alternative malloc header is included in ruby/missing.h */
52 # elif defined(HAVE_MALLOC_H)
54 # elif defined(HAVE_MALLOC_NP_H)
55 # include <malloc_np.h>
56 # elif defined(HAVE_MALLOC_MALLOC_H)
57 # include <malloc/malloc.h>
61 #ifdef HAVE_MALLOC_TRIM
64 # ifdef __EMSCRIPTEN__
65 /* malloc_trim is defined in emscripten/emmalloc.h on emscripten. */
66 # include <emscripten/emmalloc.h>
70 #if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
71 /* LIST_HEAD conflicts with sys/queue.h on macOS */
72 # include <sys/user.h>
74 /* MALLOC_HEADERS_END */
76 #ifdef HAVE_SYS_TIME_H
77 # include <sys/time.h>
80 #ifdef HAVE_SYS_RESOURCE_H
81 # include <sys/resource.h>
84 #if defined _WIN32 || defined __CYGWIN__
86 #elif defined(HAVE_POSIX_MEMALIGN)
87 #elif defined(HAVE_MEMALIGN)
91 #include <sys/types.h>
94 #include <emscripten.h>
97 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
98 # include <mach/task.h>
99 # include <mach/mach_init.h>
100 # include <mach/mach_port.h>
102 #undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
104 #include "constant.h"
106 #include "debug_counter.h"
107 #include "eval_intern.h"
108 #include "id_table.h"
109 #include "internal.h"
110 #include "internal/class.h"
111 #include "internal/compile.h"
112 #include "internal/complex.h"
113 #include "internal/cont.h"
114 #include "internal/error.h"
115 #include "internal/eval.h"
116 #include "internal/gc.h"
117 #include "internal/hash.h"
118 #include "internal/imemo.h"
119 #include "internal/io.h"
120 #include "internal/numeric.h"
121 #include "internal/object.h"
122 #include "internal/proc.h"
123 #include "internal/rational.h"
124 #include "internal/sanitizers.h"
125 #include "internal/struct.h"
126 #include "internal/symbol.h"
127 #include "internal/thread.h"
128 #include "internal/variable.h"
129 #include "internal/warnings.h"
133 #include "ruby/debug.h"
137 #include "ruby/thread.h"
138 #include "ruby/util.h"
139 #include "ruby_assert.h"
140 #include "ruby_atomic.h"
144 #include "vm_callinfo.h"
145 #include "ractor_core.h"
150 #define rb_setjmp(env) RUBY_SETJMP(env)
151 #define rb_jmp_buf rb_jmpbuf_t
152 #undef rb_data_object_wrap
154 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
155 #define MAP_ANONYMOUS MAP_ANON
159 static size_t malloc_offset
= 0;
160 #if defined(HAVE_MALLOC_USABLE_SIZE)
162 gc_compute_malloc_offset(void)
164 // Different allocators use different metadata storage strategies which result in different
166 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
167 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
168 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
170 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
173 for (offset
= 0; offset
<= 16; offset
+= 8) {
174 size_t allocated
= (64 - offset
);
175 void *test_ptr
= malloc(allocated
);
176 size_t wasted
= malloc_usable_size(test_ptr
) - allocated
;
187 gc_compute_malloc_offset(void)
189 // If we don't have malloc_usable_size, we use powers of 2.
195 rb_malloc_grow_capa(size_t current
, size_t type_size
)
197 size_t current_capacity
= current
;
198 if (current_capacity
< 4) {
199 current_capacity
= 4;
201 current_capacity
*= type_size
;
203 // We double the current capacity.
204 size_t new_capacity
= (current_capacity
* 2);
206 // And round up to the next power of 2 if it's not already one.
207 if (rb_popcount64(new_capacity
) != 1) {
208 new_capacity
= (size_t)(1 << (64 - nlz_int64(new_capacity
)));
211 new_capacity
-= malloc_offset
;
212 new_capacity
/= type_size
;
213 if (current
> new_capacity
) {
214 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current
, new_capacity
, malloc_offset
);
216 RUBY_ASSERT(new_capacity
> current
);
220 static inline struct rbimpl_size_mul_overflow_tag
221 size_add_overflow(size_t x
, size_t y
)
227 #elif defined(ckd_add)
228 p
= ckd_add(&z
, x
, y
);
230 #elif __has_builtin(__builtin_add_overflow)
231 p
= __builtin_add_overflow(x
, y
, &z
);
233 #elif defined(DSIZE_T)
234 RB_GNUC_EXTENSION DSIZE_T dx
= x
;
235 RB_GNUC_EXTENSION DSIZE_T dy
= y
;
236 RB_GNUC_EXTENSION DSIZE_T dz
= dx
+ dy
;
245 return (struct rbimpl_size_mul_overflow_tag
) { p
, z
, };
248 static inline struct rbimpl_size_mul_overflow_tag
249 size_mul_add_overflow(size_t x
, size_t y
, size_t z
) /* x * y + z */
251 struct rbimpl_size_mul_overflow_tag t
= rbimpl_size_mul_overflow(x
, y
);
252 struct rbimpl_size_mul_overflow_tag u
= size_add_overflow(t
.right
, z
);
253 return (struct rbimpl_size_mul_overflow_tag
) { t
.left
|| u
.left
, u
.right
};
256 static inline struct rbimpl_size_mul_overflow_tag
257 size_mul_add_mul_overflow(size_t x
, size_t y
, size_t z
, size_t w
) /* x * y + z * w */
259 struct rbimpl_size_mul_overflow_tag t
= rbimpl_size_mul_overflow(x
, y
);
260 struct rbimpl_size_mul_overflow_tag u
= rbimpl_size_mul_overflow(z
, w
);
261 struct rbimpl_size_mul_overflow_tag v
= size_add_overflow(t
.right
, u
.right
);
262 return (struct rbimpl_size_mul_overflow_tag
) { t
.left
|| u
.left
|| v
.left
, v
.right
};
265 PRINTF_ARGS(NORETURN(static void gc_raise(VALUE
, const char*, ...)), 2, 3);
268 size_mul_or_raise(size_t x
, size_t y
, VALUE exc
)
270 struct rbimpl_size_mul_overflow_tag t
= rbimpl_size_mul_overflow(x
, y
);
271 if (LIKELY(!t
.left
)) {
274 else if (rb_during_gc()) {
275 rb_memerror(); /* or...? */
280 "integer overflow: %"PRIuSIZE
283 x
, y
, (size_t)SIZE_MAX
);
288 rb_size_mul_or_raise(size_t x
, size_t y
, VALUE exc
)
290 return size_mul_or_raise(x
, y
, exc
);
294 size_mul_add_or_raise(size_t x
, size_t y
, size_t z
, VALUE exc
)
296 struct rbimpl_size_mul_overflow_tag t
= size_mul_add_overflow(x
, y
, z
);
297 if (LIKELY(!t
.left
)) {
300 else if (rb_during_gc()) {
301 rb_memerror(); /* or...? */
306 "integer overflow: %"PRIuSIZE
310 x
, y
, z
, (size_t)SIZE_MAX
);
315 rb_size_mul_add_or_raise(size_t x
, size_t y
, size_t z
, VALUE exc
)
317 return size_mul_add_or_raise(x
, y
, z
, exc
);
321 size_mul_add_mul_or_raise(size_t x
, size_t y
, size_t z
, size_t w
, VALUE exc
)
323 struct rbimpl_size_mul_overflow_tag t
= size_mul_add_mul_overflow(x
, y
, z
, w
);
324 if (LIKELY(!t
.left
)) {
327 else if (rb_during_gc()) {
328 rb_memerror(); /* or...? */
333 "integer overflow: %"PRIdSIZE
338 x
, y
, z
, w
, (size_t)SIZE_MAX
);
342 #if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
343 /* trick the compiler into thinking a external signal handler uses this */
344 volatile VALUE rb_gc_guarded_val
;
346 rb_gc_guarded_ptr_val(volatile VALUE
*ptr
, VALUE val
)
348 rb_gc_guarded_val
= val
;
354 #ifndef GC_HEAP_INIT_SLOTS
355 #define GC_HEAP_INIT_SLOTS 10000
357 #ifndef GC_HEAP_FREE_SLOTS
358 #define GC_HEAP_FREE_SLOTS 4096
360 #ifndef GC_HEAP_GROWTH_FACTOR
361 #define GC_HEAP_GROWTH_FACTOR 1.8
363 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
364 #define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
366 #ifndef GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO
367 # define GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO 0.01
369 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
370 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
373 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
374 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
376 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
377 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
379 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
380 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
383 #ifndef GC_MALLOC_LIMIT_MIN
384 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
386 #ifndef GC_MALLOC_LIMIT_MAX
387 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
389 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
390 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
393 #ifndef GC_OLDMALLOC_LIMIT_MIN
394 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
396 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
397 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
399 #ifndef GC_OLDMALLOC_LIMIT_MAX
400 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
403 #ifndef GC_CAN_COMPILE_COMPACTION
404 #if defined(__wasi__) /* WebAssembly doesn't support signals */
405 # define GC_CAN_COMPILE_COMPACTION 0
407 # define GC_CAN_COMPILE_COMPACTION 1
411 #ifndef PRINT_MEASURE_LINE
412 #define PRINT_MEASURE_LINE 0
414 #ifndef PRINT_ENTER_EXIT_TICK
415 #define PRINT_ENTER_EXIT_TICK 0
417 #ifndef PRINT_ROOT_TICKS
418 #define PRINT_ROOT_TICKS 0
421 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
424 size_t size_pool_init_slots
[SIZE_POOL_COUNT
];
425 size_t heap_free_slots
;
426 double growth_factor
;
427 size_t growth_max_slots
;
429 double heap_free_slots_min_ratio
;
430 double heap_free_slots_goal_ratio
;
431 double heap_free_slots_max_ratio
;
432 double uncollectible_wb_unprotected_objects_limit_ratio
;
433 double oldobject_limit_factor
;
435 size_t malloc_limit_min
;
436 size_t malloc_limit_max
;
437 double malloc_limit_growth_factor
;
439 size_t oldmalloc_limit_min
;
440 size_t oldmalloc_limit_max
;
441 double oldmalloc_limit_growth_factor
;
444 static ruby_gc_params_t gc_params
= {
447 GC_HEAP_GROWTH_FACTOR
,
448 GC_HEAP_GROWTH_MAX_SLOTS
,
450 GC_HEAP_FREE_SLOTS_MIN_RATIO
,
451 GC_HEAP_FREE_SLOTS_GOAL_RATIO
,
452 GC_HEAP_FREE_SLOTS_MAX_RATIO
,
453 GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO
,
454 GC_HEAP_OLDOBJECT_LIMIT_FACTOR
,
458 GC_MALLOC_LIMIT_GROWTH_FACTOR
,
460 GC_OLDMALLOC_LIMIT_MIN
,
461 GC_OLDMALLOC_LIMIT_MAX
,
462 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
,
466 * enable to embed GC debugging information.
473 * 1: basic information
474 * 2: remember set operation
481 #define RGENGC_DEBUG -1
483 #define RGENGC_DEBUG 0
486 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
487 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
488 #elif defined(HAVE_VA_ARGS_MACRO)
489 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
491 # define RGENGC_DEBUG_ENABLED(level) 0
493 int ruby_rgengc_debug
;
496 * 0: disable all assertions
497 * 1: enable assertions (to debug RGenGC)
498 * 2: enable internal consistency check at each GC (for debugging)
499 * 3: enable internal consistency check at each GC steps (for debugging)
500 * 4: enable liveness check
501 * 5: show all references
503 #ifndef RGENGC_CHECK_MODE
504 #define RGENGC_CHECK_MODE 0
507 // Note: using RUBY_ASSERT_WHEN() extend a macro in expr (info by nobu).
508 #define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
511 * 0: disable RGenGC profiling
512 * 1: enable profiling for basic information
513 * 2: enable profiling for each types
515 #ifndef RGENGC_PROFILE
516 #define RGENGC_PROFILE 0
519 /* RGENGC_ESTIMATE_OLDMALLOC
520 * Enable/disable to estimate increase size of malloc'ed size by old objects.
521 * If estimation exceeds threshold, then will invoke full GC.
522 * 0: disable estimation.
523 * 1: enable estimation.
525 #ifndef RGENGC_ESTIMATE_OLDMALLOC
526 #define RGENGC_ESTIMATE_OLDMALLOC 1
529 /* RGENGC_FORCE_MAJOR_GC
530 * Force major/full GC if this macro is not 0.
532 #ifndef RGENGC_FORCE_MAJOR_GC
533 #define RGENGC_FORCE_MAJOR_GC 0
536 #ifndef GC_PROFILE_MORE_DETAIL
537 #define GC_PROFILE_MORE_DETAIL 0
539 #ifndef GC_PROFILE_DETAIL_MEMORY
540 #define GC_PROFILE_DETAIL_MEMORY 0
542 #ifndef GC_ENABLE_LAZY_SWEEP
543 #define GC_ENABLE_LAZY_SWEEP 1
545 #ifndef CALC_EXACT_MALLOC_SIZE
546 #define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
548 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
549 #ifndef MALLOC_ALLOCATED_SIZE
550 #define MALLOC_ALLOCATED_SIZE 0
553 #define MALLOC_ALLOCATED_SIZE 0
555 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
556 #define MALLOC_ALLOCATED_SIZE_CHECK 0
559 #ifndef GC_DEBUG_STRESS_TO_CLASS
560 #define GC_DEBUG_STRESS_TO_CLASS RUBY_DEBUG
563 #ifndef RGENGC_OBJ_INFO
564 #define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
568 GPR_FLAG_NONE
= 0x000,
570 GPR_FLAG_MAJOR_BY_NOFREE
= 0x001,
571 GPR_FLAG_MAJOR_BY_OLDGEN
= 0x002,
572 GPR_FLAG_MAJOR_BY_SHADY
= 0x004,
573 GPR_FLAG_MAJOR_BY_FORCE
= 0x008,
574 #if RGENGC_ESTIMATE_OLDMALLOC
575 GPR_FLAG_MAJOR_BY_OLDMALLOC
= 0x020,
577 GPR_FLAG_MAJOR_MASK
= 0x0ff,
580 GPR_FLAG_NEWOBJ
= 0x100,
581 GPR_FLAG_MALLOC
= 0x200,
582 GPR_FLAG_METHOD
= 0x400,
583 GPR_FLAG_CAPI
= 0x800,
584 GPR_FLAG_STRESS
= 0x1000,
587 GPR_FLAG_IMMEDIATE_SWEEP
= 0x2000,
588 GPR_FLAG_HAVE_FINALIZE
= 0x4000,
589 GPR_FLAG_IMMEDIATE_MARK
= 0x8000,
590 GPR_FLAG_FULL_MARK
= 0x10000,
591 GPR_FLAG_COMPACT
= 0x20000,
594 (GPR_FLAG_FULL_MARK
| GPR_FLAG_IMMEDIATE_MARK
|
595 GPR_FLAG_IMMEDIATE_SWEEP
| GPR_FLAG_CAPI
),
596 } gc_profile_record_flag
;
598 typedef struct gc_profile_record
{
602 double gc_invoke_time
;
604 size_t heap_total_objects
;
605 size_t heap_use_size
;
606 size_t heap_total_size
;
607 size_t moved_objects
;
609 #if GC_PROFILE_MORE_DETAIL
611 double gc_sweep_time
;
613 size_t heap_use_pages
;
614 size_t heap_live_objects
;
615 size_t heap_free_objects
;
617 size_t allocate_increase
;
618 size_t allocate_limit
;
621 size_t removing_objects
;
622 size_t empty_objects
;
623 #if GC_PROFILE_DETAIL_MEMORY
629 #if MALLOC_ALLOCATED_SIZE
630 size_t allocated_size
;
633 #if RGENGC_PROFILE > 0
635 size_t remembered_normal_objects
;
636 size_t remembered_shady_objects
;
644 shape_id_t original_shape_id
;
647 #define RMOVED(obj) ((struct RMoved *)(obj))
649 typedef struct RVALUE
{
652 VALUE flags
; /* always 0 for freed obj */
657 struct RObject object
;
659 struct RFloat flonum
;
660 struct RString string
;
662 struct RRegexp regexp
;
665 struct RTypedData typeddata
;
666 struct RStruct rstruct
;
667 struct RBignum bignum
;
670 struct RRational rational
;
671 struct RComplex
complex;
672 struct RSymbol symbol
;
676 struct vm_throw_data throw_data
;
677 struct vm_ifunc ifunc
;
679 struct rb_method_entry_struct ment
;
680 const rb_iseq_t iseq
;
682 struct rb_imemo_tmpbuf_struct alloc
;
694 /* These members ae located at the end of the slot that the object is in. */
695 #if RACTOR_CHECK_MODE || GC_DEBUG
696 struct rvalue_overhead
{
697 # if RACTOR_CHECK_MODE
698 uint32_t _ractor_belonging_id
;
706 // Make sure that RVALUE_OVERHEAD aligns to sizeof(VALUE)
707 # define RVALUE_OVERHEAD (sizeof(struct { \
709 struct rvalue_overhead overhead; \
713 # define GET_RVALUE_OVERHEAD(obj) ((struct rvalue_overhead *)((uintptr_t)obj + rb_gc_obj_slot_size(obj)))
715 # define RVALUE_OVERHEAD 0
718 STATIC_ASSERT(sizeof_rvalue
, sizeof(RVALUE
) == (SIZEOF_VALUE
* 5));
719 STATIC_ASSERT(alignof_rvalue
, RUBY_ALIGNOF(RVALUE
) == SIZEOF_VALUE
);
721 typedef uintptr_t bits_t
;
723 BITS_SIZE
= sizeof(bits_t
),
724 BITS_BITLENGTH
= ( BITS_SIZE
* CHAR_BIT
)
727 struct heap_page_header
{
728 struct heap_page
*page
;
731 struct heap_page_body
{
732 struct heap_page_header header
;
734 /* RVALUE values[]; */
737 #define STACK_CHUNK_SIZE 500
739 typedef struct stack_chunk
{
740 VALUE data
[STACK_CHUNK_SIZE
];
741 struct stack_chunk
*next
;
744 typedef struct mark_stack
{
745 stack_chunk_t
*chunk
;
746 stack_chunk_t
*cache
;
750 size_t unused_cache_size
;
753 #define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
754 #define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
756 typedef int (*gc_compact_compare_func
)(const void *l
, const void *r
, void *d
);
758 typedef struct rb_heap_struct
{
759 struct heap_page
*free_pages
;
760 struct ccan_list_head pages
;
761 struct heap_page
*sweeping_page
; /* iterator for .pages */
762 struct heap_page
*compact_cursor
;
763 uintptr_t compact_cursor_index
;
764 struct heap_page
*pooled_pages
;
765 size_t total_pages
; /* total page count in a heap */
766 size_t total_slots
; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
769 typedef struct rb_size_pool_struct
{
772 size_t allocatable_pages
;
774 /* Basic statistics */
775 size_t total_allocated_pages
;
776 size_t total_freed_pages
;
777 size_t force_major_gc_count
;
778 size_t force_incremental_marking_finish_count
;
779 size_t total_allocated_objects
;
780 size_t total_freed_objects
;
782 /* Sweeping statistics */
797 typedef struct rb_objspace
{
801 #if MALLOC_ALLOCATED_SIZE
802 size_t allocated_size
;
809 unsigned int mode
: 2;
810 unsigned int immediate_sweep
: 1;
811 unsigned int dont_gc
: 1;
812 unsigned int dont_incremental
: 1;
813 unsigned int during_gc
: 1;
814 unsigned int during_compacting
: 1;
815 unsigned int during_reference_updating
: 1;
816 unsigned int gc_stressful
: 1;
817 unsigned int has_newobj_hook
: 1;
818 unsigned int during_minor_gc
: 1;
819 unsigned int during_incremental_marking
: 1;
820 unsigned int measure_gc
: 1;
823 rb_event_flag_t hook_events
;
824 unsigned long long next_object_id
;
826 rb_size_pool_t size_pools
[SIZE_POOL_COUNT
];
829 rb_atomic_t finalizing
;
832 mark_stack_t mark_stack
;
836 struct heap_page
**sorted
;
837 size_t allocated_pages
;
838 size_t allocatable_pages
;
839 size_t sorted_length
;
841 size_t freeable_pages
;
845 VALUE deferred_final
;
848 st_table
*finalizer_table
;
852 unsigned int latest_gc_info
;
853 gc_profile_record
*records
;
854 gc_profile_record
*current_record
;
858 #if GC_PROFILE_MORE_DETAIL
863 size_t minor_gc_count
;
864 size_t major_gc_count
;
865 size_t compact_count
;
866 size_t read_barrier_faults
;
867 #if RGENGC_PROFILE > 0
868 size_t total_generated_normal_object_count
;
869 size_t total_generated_shady_object_count
;
870 size_t total_shade_operation_count
;
871 size_t total_promoted_count
;
872 size_t total_remembered_normal_object_count
;
873 size_t total_remembered_shady_object_count
;
875 #if RGENGC_PROFILE >= 2
876 size_t generated_normal_object_count_types
[RUBY_T_MASK
];
877 size_t generated_shady_object_count_types
[RUBY_T_MASK
];
878 size_t shade_operation_count_types
[RUBY_T_MASK
];
879 size_t promoted_types
[RUBY_T_MASK
];
880 size_t remembered_normal_object_count_types
[RUBY_T_MASK
];
881 size_t remembered_shady_object_count_types
[RUBY_T_MASK
];
883 #endif /* RGENGC_PROFILE */
885 /* temporary profiling space */
886 double gc_sweep_start_time
;
887 size_t total_allocated_objects_at_gc_start
;
888 size_t heap_used_at_gc_start
;
890 /* basic statistics */
892 uint64_t marking_time_ns
;
893 struct timespec marking_start_time
;
894 uint64_t sweeping_time_ns
;
895 struct timespec sweeping_start_time
;
897 /* Weak references */
898 size_t weak_references_count
;
899 size_t retained_weak_references_count
;
902 VALUE gc_stress_mode
;
907 size_t last_major_gc
;
908 size_t uncollectible_wb_unprotected_objects
;
909 size_t uncollectible_wb_unprotected_objects_limit
;
911 size_t old_objects_limit
;
913 #if RGENGC_ESTIMATE_OLDMALLOC
914 size_t oldmalloc_increase
;
915 size_t oldmalloc_increase_limit
;
918 #if RGENGC_CHECK_MODE >= 2
919 struct st_table
*allrefs_table
;
925 size_t considered_count_table
[T_MASK
];
926 size_t moved_count_table
[T_MASK
];
927 size_t moved_up_count_table
[T_MASK
];
928 size_t moved_down_count_table
[T_MASK
];
931 /* This function will be used, if set, to sort the heap prior to compaction */
932 gc_compact_compare_func compare_func
;
940 st_table
*id_to_obj_tbl
;
941 st_table
*obj_to_id_tbl
;
943 #if GC_DEBUG_STRESS_TO_CLASS
944 VALUE stress_to_class
;
947 rb_darray(VALUE
*) weak_references
;
948 rb_postponed_job_handle_t finalize_deferred_pjob
;
950 #ifdef RUBY_ASAN_ENABLED
951 const rb_execution_context_t
*marking_machine_context_ec
;
957 #ifndef HEAP_PAGE_ALIGN_LOG
958 /* default tiny heap size: 64KiB */
959 #define HEAP_PAGE_ALIGN_LOG 16
962 #define BASE_SLOT_SIZE (sizeof(RVALUE) + RVALUE_OVERHEAD)
964 #define CEILDIV(i, mod) roomof(i, mod)
966 HEAP_PAGE_ALIGN
= (1UL << HEAP_PAGE_ALIGN_LOG
),
967 HEAP_PAGE_ALIGN_MASK
= (~(~0UL << HEAP_PAGE_ALIGN_LOG
)),
968 HEAP_PAGE_SIZE
= HEAP_PAGE_ALIGN
,
969 HEAP_PAGE_OBJ_LIMIT
= (unsigned int)((HEAP_PAGE_SIZE
- sizeof(struct heap_page_header
)) / BASE_SLOT_SIZE
),
970 HEAP_PAGE_BITMAP_LIMIT
= CEILDIV(CEILDIV(HEAP_PAGE_SIZE
, BASE_SLOT_SIZE
), BITS_BITLENGTH
),
971 HEAP_PAGE_BITMAP_SIZE
= (BITS_SIZE
* HEAP_PAGE_BITMAP_LIMIT
),
973 #define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
974 #define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
976 #if !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
977 # define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
980 #undef INIT_HEAP_PAGE_ALLOC_USE_MMAP
981 /* Must define either HEAP_PAGE_ALLOC_USE_MMAP or
982 * INIT_HEAP_PAGE_ALLOC_USE_MMAP. */
985 /* We can't use mmap of course, if it is not available. */
986 static const bool HEAP_PAGE_ALLOC_USE_MMAP
= false;
988 #elif defined(__wasm__)
989 /* wasmtime does not have proper support for mmap.
990 * See https://github.com/bytecodealliance/wasmtime/blob/main/docs/WASI-rationale.md#why-no-mmap-and-friends
992 static const bool HEAP_PAGE_ALLOC_USE_MMAP
= false;
994 #elif HAVE_CONST_PAGE_SIZE
995 /* If we have the PAGE_SIZE and it is a constant, then we can directly use it. */
996 static const bool HEAP_PAGE_ALLOC_USE_MMAP
= (PAGE_SIZE
<= HEAP_PAGE_SIZE
);
998 #elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
999 /* If we can use the maximum page size. */
1000 static const bool HEAP_PAGE_ALLOC_USE_MMAP
= true;
1002 #elif defined(PAGE_SIZE)
1003 /* If the PAGE_SIZE macro can be used dynamically. */
1004 # define INIT_HEAP_PAGE_ALLOC_USE_MMAP (PAGE_SIZE <= HEAP_PAGE_SIZE)
1006 #elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
1007 /* If we can use sysconf to determine the page size. */
1008 # define INIT_HEAP_PAGE_ALLOC_USE_MMAP (sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE)
1011 /* Otherwise we can't determine the system page size, so don't use mmap. */
1012 static const bool HEAP_PAGE_ALLOC_USE_MMAP
= false;
1015 #ifdef INIT_HEAP_PAGE_ALLOC_USE_MMAP
1016 /* We can determine the system page size at runtime. */
1017 # define HEAP_PAGE_ALLOC_USE_MMAP (heap_page_alloc_use_mmap != false)
1019 static bool heap_page_alloc_use_mmap
;
1022 #define RVALUE_AGE_BIT_COUNT 2
1023 #define RVALUE_AGE_BIT_MASK (((bits_t)1 << RVALUE_AGE_BIT_COUNT) - 1)
1032 unsigned int before_sweep
: 1;
1033 unsigned int has_remembered_objects
: 1;
1034 unsigned int has_uncollectible_wb_unprotected_objects
: 1;
1035 unsigned int in_tomb
: 1;
1038 rb_size_pool_t
*size_pool
;
1040 struct heap_page
*free_next
;
1043 struct ccan_list_node page_node
;
1045 bits_t wb_unprotected_bits
[HEAP_PAGE_BITMAP_LIMIT
];
1046 /* the following three bitmaps are cleared at the beginning of full GC */
1047 bits_t mark_bits
[HEAP_PAGE_BITMAP_LIMIT
];
1048 bits_t uncollectible_bits
[HEAP_PAGE_BITMAP_LIMIT
];
1049 bits_t marking_bits
[HEAP_PAGE_BITMAP_LIMIT
];
1051 bits_t remembered_bits
[HEAP_PAGE_BITMAP_LIMIT
];
1053 /* If set, the object is not movable */
1054 bits_t pinned_bits
[HEAP_PAGE_BITMAP_LIMIT
];
1055 bits_t age_bits
[HEAP_PAGE_BITMAP_LIMIT
* RVALUE_AGE_BIT_COUNT
];
1059 * When asan is enabled, this will prohibit writing to the freelist until it is unlocked
1062 asan_lock_freelist(struct heap_page
*page
)
1064 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
1068 * When asan is enabled, this will enable the ability to write to the freelist
1071 asan_unlock_freelist(struct heap_page
*page
)
1073 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
1076 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
1077 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
1078 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
1080 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK) / BASE_SLOT_SIZE)
1081 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
1082 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
1083 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
1085 /* Bitmap Operations */
1086 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
1087 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
1088 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
1090 /* getting bitmap */
1091 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
1092 #define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
1093 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
1094 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
1095 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
1097 #define GC_SWEEP_PAGES_FREEABLE_PER_STEP 3
1099 #define RVALUE_AGE_BITMAP_INDEX(n) (NUM_IN_PAGE(n) / (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT))
1100 #define RVALUE_AGE_BITMAP_OFFSET(n) ((NUM_IN_PAGE(n) % (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT)) * RVALUE_AGE_BIT_COUNT)
1102 #define RVALUE_OLD_AGE 3
1105 RVALUE_AGE_GET(VALUE obj
)
1107 bits_t
*age_bits
= GET_HEAP_PAGE(obj
)->age_bits
;
1108 return (int)(age_bits
[RVALUE_AGE_BITMAP_INDEX(obj
)] >> RVALUE_AGE_BITMAP_OFFSET(obj
)) & RVALUE_AGE_BIT_MASK
;
1112 RVALUE_AGE_SET(VALUE obj
, int age
)
1114 RUBY_ASSERT(age
<= RVALUE_OLD_AGE
);
1115 bits_t
*age_bits
= GET_HEAP_PAGE(obj
)->age_bits
;
1117 age_bits
[RVALUE_AGE_BITMAP_INDEX(obj
)] &= ~(RVALUE_AGE_BIT_MASK
<< (RVALUE_AGE_BITMAP_OFFSET(obj
)));
1118 // shift the correct value in
1119 age_bits
[RVALUE_AGE_BITMAP_INDEX(obj
)] |= ((bits_t
)age
<< RVALUE_AGE_BITMAP_OFFSET(obj
));
1120 if (age
== RVALUE_OLD_AGE
) {
1121 RB_FL_SET_RAW(obj
, RUBY_FL_PROMOTED
);
1124 RB_FL_UNSET_RAW(obj
, RUBY_FL_PROMOTED
);
1129 #define rb_objspace (*rb_objspace_of(GET_VM()))
1130 #define rb_objspace_of(vm) ((vm)->objspace)
1131 #define unless_objspace(objspace) \
1132 rb_objspace_t *objspace; \
1133 rb_vm_t *unless_objspace_vm = GET_VM(); \
1134 if (unless_objspace_vm) objspace = unless_objspace_vm->objspace; \
1135 else /* return; or objspace will be warned uninitialized */
1137 #define malloc_limit objspace->malloc_params.limit
1138 #define malloc_increase objspace->malloc_params.increase
1139 #define malloc_allocated_size objspace->malloc_params.allocated_size
1140 #define heap_pages_sorted objspace->heap_pages.sorted
1141 #define heap_allocated_pages objspace->heap_pages.allocated_pages
1142 #define heap_pages_sorted_length objspace->heap_pages.sorted_length
1143 #define heap_pages_lomem objspace->heap_pages.range[0]
1144 #define heap_pages_himem objspace->heap_pages.range[1]
1145 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
1146 #define heap_pages_final_slots objspace->heap_pages.final_slots
1147 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
1148 #define size_pools objspace->size_pools
1149 #define during_gc objspace->flags.during_gc
1150 #define finalizing objspace->atomic_flags.finalizing
1151 #define finalizer_table objspace->finalizer_table
1152 #define ruby_gc_stressful objspace->flags.gc_stressful
1153 #define ruby_gc_stress_mode objspace->gc_stress_mode
1154 #if GC_DEBUG_STRESS_TO_CLASS
1155 #define stress_to_class objspace->stress_to_class
1156 #define set_stress_to_class(c) (stress_to_class = (c))
1158 #define stress_to_class (objspace, 0)
1159 #define set_stress_to_class(c) (objspace, (c))
1163 #define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
1164 #define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
1165 #define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
1166 #define dont_gc_val() (objspace->flags.dont_gc)
1168 #define dont_gc_on() (objspace->flags.dont_gc = 1)
1169 #define dont_gc_off() (objspace->flags.dont_gc = 0)
1170 #define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
1171 #define dont_gc_val() (objspace->flags.dont_gc)
1174 static inline enum gc_mode
1175 gc_mode_verify(enum gc_mode mode
)
1177 #if RGENGC_CHECK_MODE > 0
1180 case gc_mode_marking
:
1181 case gc_mode_sweeping
:
1182 case gc_mode_compacting
:
1185 rb_bug("gc_mode_verify: unreachable (%d)", (int)mode
);
1192 has_sweeping_pages(rb_objspace_t
*objspace
)
1194 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1195 if (SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->sweeping_page
) {
1202 static inline size_t
1203 heap_eden_total_pages(rb_objspace_t
*objspace
)
1206 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1207 count
+= SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->total_pages
;
1212 static inline size_t
1213 heap_eden_total_slots(rb_objspace_t
*objspace
)
1216 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1217 count
+= SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->total_slots
;
1222 static inline size_t
1223 heap_tomb_total_pages(rb_objspace_t
*objspace
)
1226 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1227 count
+= SIZE_POOL_TOMB_HEAP(&size_pools
[i
])->total_pages
;
1232 static inline size_t
1233 heap_allocatable_pages(rb_objspace_t
*objspace
)
1236 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1237 count
+= size_pools
[i
].allocatable_pages
;
1242 static inline size_t
1243 heap_allocatable_slots(rb_objspace_t
*objspace
)
1246 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1247 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1248 int slot_size_multiple
= size_pool
->slot_size
/ BASE_SLOT_SIZE
;
1249 count
+= size_pool
->allocatable_pages
* HEAP_PAGE_OBJ_LIMIT
/ slot_size_multiple
;
1254 static inline size_t
1255 total_allocated_pages(rb_objspace_t
*objspace
)
1258 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1259 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1260 count
+= size_pool
->total_allocated_pages
;
1265 static inline size_t
1266 total_freed_pages(rb_objspace_t
*objspace
)
1269 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1270 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1271 count
+= size_pool
->total_freed_pages
;
1276 static inline size_t
1277 total_allocated_objects(rb_objspace_t
*objspace
)
1280 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1281 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1282 count
+= size_pool
->total_allocated_objects
;
1287 static inline size_t
1288 total_freed_objects(rb_objspace_t
*objspace
)
1291 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1292 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1293 count
+= size_pool
->total_freed_objects
;
1298 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1299 #define gc_mode_set(objspace, m) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(m))
1300 #define gc_needs_major_flags objspace->rgengc.need_major_gc
1302 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1303 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1304 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1305 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1306 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1307 #define GC_INCREMENTAL_SWEEP_SLOT_COUNT 2048
1308 #define GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT 1024
1309 #define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1311 #if SIZEOF_LONG == SIZEOF_VOIDP
1312 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
1313 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1314 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1315 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1317 # error not supported
1320 #define RANY(o) ((RVALUE*)(o))
1323 struct RBasic basic
;
1325 void (*dfree
)(void *);
1329 #define RZOMBIE(o) ((struct RZombie *)(o))
1331 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1333 #if RUBY_MARK_FREE_DEBUG
1334 int ruby_gc_debug_indent
= 0;
1337 int ruby_disable_gc
= 0;
1338 int ruby_enable_autocompact
= 0;
1339 #if RGENGC_CHECK_MODE
1340 gc_compact_compare_func ruby_autocompact_compare_func
;
1343 void rb_vm_update_references(void *ptr
);
1345 void rb_gcdebug_print_obj_condition(VALUE obj
);
1347 NORETURN(static void *gc_vraise(void *ptr
));
1348 NORETURN(static void gc_raise(VALUE exc
, const char *fmt
, ...));
1349 NORETURN(static void negative_size_allocation_error(const char *));
1351 static void init_mark_stack(mark_stack_t
*stack
);
1352 static int garbage_collect(rb_objspace_t
*, unsigned int reason
);
1354 static int gc_start(rb_objspace_t
*objspace
, unsigned int reason
);
1355 static void gc_rest(rb_objspace_t
*objspace
);
1357 enum gc_enter_event
{
1358 gc_enter_event_start
,
1359 gc_enter_event_continue
,
1360 gc_enter_event_rest
,
1361 gc_enter_event_finalizer
,
1362 gc_enter_event_rb_memerror
,
1365 static inline void gc_enter(rb_objspace_t
*objspace
, enum gc_enter_event event
, unsigned int *lock_lev
);
1366 static inline void gc_exit(rb_objspace_t
*objspace
, enum gc_enter_event event
, unsigned int *lock_lev
);
1367 static void gc_marking_enter(rb_objspace_t
*objspace
);
1368 static void gc_marking_exit(rb_objspace_t
*objspace
);
1369 static void gc_sweeping_enter(rb_objspace_t
*objspace
);
1370 static void gc_sweeping_exit(rb_objspace_t
*objspace
);
1371 static bool gc_marks_continue(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
);
1373 static void gc_sweep(rb_objspace_t
*objspace
);
1374 static void gc_sweep_finish_size_pool(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
);
1375 static void gc_sweep_continue(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
);
1377 static inline void gc_mark(rb_objspace_t
*objspace
, VALUE ptr
);
1378 static inline void gc_pin(rb_objspace_t
*objspace
, VALUE ptr
);
1379 static inline void gc_mark_and_pin(rb_objspace_t
*objspace
, VALUE ptr
);
1380 NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t
*objspace
, VALUE ptr
));
1382 static int gc_mark_stacked_objects_incremental(rb_objspace_t
*, size_t count
);
1383 NO_SANITIZE("memory", static inline int is_pointer_to_heap(rb_objspace_t
*objspace
, const void *ptr
));
1385 static size_t obj_memsize_of(VALUE obj
, int use_all_types
);
1386 static void gc_verify_internal_consistency(rb_objspace_t
*objspace
);
1388 static VALUE
gc_disable_no_rest(rb_objspace_t
*);
1390 static double getrusage_time(void);
1391 static inline void gc_prof_setup_new_record(rb_objspace_t
*objspace
, unsigned int reason
);
1392 static inline void gc_prof_timer_start(rb_objspace_t
*);
1393 static inline void gc_prof_timer_stop(rb_objspace_t
*);
1394 static inline void gc_prof_mark_timer_start(rb_objspace_t
*);
1395 static inline void gc_prof_mark_timer_stop(rb_objspace_t
*);
1396 static inline void gc_prof_sweep_timer_start(rb_objspace_t
*);
1397 static inline void gc_prof_sweep_timer_stop(rb_objspace_t
*);
1398 static inline void gc_prof_set_malloc_info(rb_objspace_t
*);
1399 static inline void gc_prof_set_heap_info(rb_objspace_t
*);
1401 #define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1402 if (gc_object_moved_p((_objspace), (VALUE)(_thing))) { \
1403 *(_type *)&(_thing) = (_type)RMOVED(_thing)->destination; \
1407 #define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1409 #define gc_prof_record(objspace) (objspace)->profile.current_record
1410 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1412 #ifdef HAVE_VA_ARGS_MACRO
1413 # define gc_report(level, objspace, ...) \
1414 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1416 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1418 PRINTF_ARGS(static void gc_report_body(int level
, rb_objspace_t
*objspace
, const char *fmt
, ...), 3, 4);
1419 static const char *obj_info(VALUE obj
);
1420 static const char *obj_info_basic(VALUE obj
);
1421 static const char *obj_type_name(VALUE obj
);
1423 static void gc_finalize_deferred(void *dmy
);
1426 /* the following code is only for internal tuning. */
1428 /* Source code to use RDTSC is quoted and modified from
1429 * https://www.mcs.anl.gov/~kazutomo/rdtsc.html
1430 * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1433 #if defined(__GNUC__) && defined(__i386__)
1434 typedef unsigned long long tick_t
;
1435 #define PRItick "llu"
1436 static inline tick_t
1439 unsigned long long int x
;
1440 __asm__
__volatile__ ("rdtsc" : "=A" (x
));
1444 #elif defined(__GNUC__) && defined(__x86_64__)
1445 typedef unsigned long long tick_t
;
1446 #define PRItick "llu"
1448 static __inline__ tick_t
1451 unsigned long hi
, lo
;
1452 __asm__
__volatile__ ("rdtsc" : "=a"(lo
), "=d"(hi
));
1453 return ((unsigned long long)lo
)|( ((unsigned long long)hi
)<<32);
1456 #elif defined(__powerpc64__) && (GCC_VERSION_SINCE(4,8,0) || defined(__clang__))
1457 typedef unsigned long long tick_t
;
1458 #define PRItick "llu"
1460 static __inline__ tick_t
1463 unsigned long long val
= __builtin_ppc_get_timebase();
1467 /* Implementation for macOS PPC by @nobu
1468 * See: https://github.com/ruby/ruby/pull/5975#discussion_r890045558
1470 #elif defined(__POWERPC__) && defined(__APPLE__)
1471 typedef unsigned long long tick_t
;
1472 #define PRItick "llu"
1474 static __inline__ tick_t
1477 unsigned long int upper
, lower
, tmp
;
1478 # define mftbu(r) __asm__ volatile("mftbu %0" : "=r"(r))
1479 # define mftb(r) __asm__ volatile("mftb %0" : "=r"(r))
1484 } while (tmp
!= upper
);
1485 return ((tick_t
)upper
<< 32) | lower
;
1488 #elif defined(__aarch64__) && defined(__GNUC__)
1489 typedef unsigned long tick_t
;
1490 #define PRItick "lu"
1492 static __inline__ tick_t
1496 __asm__
__volatile__ ("mrs %0, cntvct_el0" : "=r" (val
));
1501 #elif defined(_WIN32) && defined(_MSC_VER)
1503 typedef unsigned __int64 tick_t
;
1504 #define PRItick "llu"
1506 static inline tick_t
1512 #else /* use clock */
1513 typedef clock_t tick_t
;
1514 #define PRItick "llu"
1516 static inline tick_t
1522 #else /* USE_TICK_T */
1523 #define MEASURE_LINE(expr) expr
1524 #endif /* USE_TICK_T */
1526 #define asan_unpoisoning_object(obj) \
1527 for (void *poisoned = asan_unpoison_object_temporary(obj), \
1528 *unpoisoning = &poisoned; /* flag to loop just once */ \
1530 unpoisoning = asan_poison_object_restore(obj, poisoned))
1532 #define FL_CHECK2(name, x, pred) \
1533 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1534 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1535 #define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1536 #define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1537 #define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1539 #define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1540 #define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1541 #define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1543 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1544 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1545 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1547 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1548 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1549 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1551 static int rgengc_remember(rb_objspace_t
*objspace
, VALUE obj
);
1552 static void rgengc_mark_and_rememberset_clear(rb_objspace_t
*objspace
, rb_heap_t
*heap
);
1553 static void rgengc_rememberset_mark(rb_objspace_t
*objspace
, rb_heap_t
*heap
);
1556 check_rvalue_consistency_force(const VALUE obj
, int terminate
)
1559 rb_objspace_t
*objspace
= &rb_objspace
;
1561 RB_VM_LOCK_ENTER_NO_BARRIER();
1563 if (SPECIAL_CONST_P(obj
)) {
1564 fprintf(stderr
, "check_rvalue_consistency: %p is a special const.\n", (void *)obj
);
1567 else if (!is_pointer_to_heap(objspace
, (void *)obj
)) {
1568 /* check if it is in tomb_pages */
1569 struct heap_page
*page
= NULL
;
1570 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1571 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1572 ccan_list_for_each(&size_pool
->tomb_heap
.pages
, page
, page_node
) {
1573 if (page
->start
<= (uintptr_t)obj
&&
1574 (uintptr_t)obj
< (page
->start
+ (page
->total_slots
* size_pool
->slot_size
))) {
1575 fprintf(stderr
, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1576 (void *)obj
, (void *)page
);
1583 fprintf(stderr
, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj
);
1589 const int wb_unprotected_bit
= RVALUE_WB_UNPROTECTED_BITMAP(obj
) != 0;
1590 const int uncollectible_bit
= RVALUE_UNCOLLECTIBLE_BITMAP(obj
) != 0;
1591 const int mark_bit
= RVALUE_MARK_BITMAP(obj
) != 0;
1592 const int marking_bit
= RVALUE_MARKING_BITMAP(obj
) != 0;
1593 const int remembered_bit
= MARKED_IN_BITMAP(GET_HEAP_PAGE(obj
)->remembered_bits
, obj
) != 0;
1594 const int age
= RVALUE_AGE_GET((VALUE
)obj
);
1596 if (GET_HEAP_PAGE(obj
)->flags
.in_tomb
) {
1597 fprintf(stderr
, "check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj
));
1600 if (BUILTIN_TYPE(obj
) == T_NONE
) {
1601 fprintf(stderr
, "check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj
));
1604 if (BUILTIN_TYPE(obj
) == T_ZOMBIE
) {
1605 fprintf(stderr
, "check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj
));
1609 obj_memsize_of((VALUE
)obj
, FALSE
);
1613 * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1615 if (age
> 0 && wb_unprotected_bit
) {
1616 fprintf(stderr
, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj
), age
);
1620 if (!is_marking(objspace
) && uncollectible_bit
&& !mark_bit
) {
1621 fprintf(stderr
, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj
));
1625 if (!is_full_marking(objspace
)) {
1626 if (uncollectible_bit
&& age
!= RVALUE_OLD_AGE
&& !wb_unprotected_bit
) {
1627 fprintf(stderr
, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1628 obj_info(obj
), age
);
1631 if (remembered_bit
&& age
!= RVALUE_OLD_AGE
) {
1632 fprintf(stderr
, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1633 obj_info(obj
), age
);
1641 * marking:false marking:true
1642 * marked:false white *invalid*
1643 * marked:true black grey
1645 if (is_incremental_marking(objspace
) && marking_bit
) {
1646 if (!is_marking(objspace
) && !mark_bit
) {
1647 fprintf(stderr
, "check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj
));
1653 RB_VM_LOCK_LEAVE_NO_BARRIER();
1655 if (err
> 0 && terminate
) {
1656 rb_bug("check_rvalue_consistency_force: there is %d errors.", err
);
1661 #if RGENGC_CHECK_MODE == 0
1663 check_rvalue_consistency(const VALUE obj
)
1669 check_rvalue_consistency(const VALUE obj
)
1671 check_rvalue_consistency_force(obj
, TRUE
);
1677 gc_object_moved_p(rb_objspace_t
* objspace
, VALUE obj
)
1679 if (RB_SPECIAL_CONST_P(obj
)) {
1683 void *poisoned
= asan_unpoison_object_temporary(obj
);
1685 int ret
= BUILTIN_TYPE(obj
) == T_MOVED
;
1686 /* Re-poison slot if it's not the one we want */
1688 GC_ASSERT(BUILTIN_TYPE(obj
) == T_NONE
);
1689 asan_poison_object(obj
);
1696 RVALUE_MARKED(VALUE obj
)
1698 check_rvalue_consistency(obj
);
1699 return RVALUE_MARK_BITMAP(obj
) != 0;
1703 RVALUE_PINNED(VALUE obj
)
1705 check_rvalue_consistency(obj
);
1706 return RVALUE_PIN_BITMAP(obj
) != 0;
1710 RVALUE_WB_UNPROTECTED(VALUE obj
)
1712 check_rvalue_consistency(obj
);
1713 return RVALUE_WB_UNPROTECTED_BITMAP(obj
) != 0;
1717 RVALUE_MARKING(VALUE obj
)
1719 check_rvalue_consistency(obj
);
1720 return RVALUE_MARKING_BITMAP(obj
) != 0;
1724 RVALUE_REMEMBERED(VALUE obj
)
1726 check_rvalue_consistency(obj
);
1727 return MARKED_IN_BITMAP(GET_HEAP_PAGE(obj
)->remembered_bits
, obj
) != 0;
1731 RVALUE_UNCOLLECTIBLE(VALUE obj
)
1733 check_rvalue_consistency(obj
);
1734 return RVALUE_UNCOLLECTIBLE_BITMAP(obj
) != 0;
1738 RVALUE_OLD_P(VALUE obj
)
1740 GC_ASSERT(!RB_SPECIAL_CONST_P(obj
));
1741 check_rvalue_consistency(obj
);
1742 // Because this will only ever be called on GC controlled objects,
1743 // we can use the faster _RAW function here
1744 return RB_OBJ_PROMOTED_RAW(obj
);
1748 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t
*objspace
, struct heap_page
*page
, VALUE obj
)
1750 MARK_IN_BITMAP(&page
->uncollectible_bits
[0], obj
);
1751 objspace
->rgengc
.old_objects
++;
1753 #if RGENGC_PROFILE >= 2
1754 objspace
->profile
.total_promoted_count
++;
1755 objspace
->profile
.promoted_types
[BUILTIN_TYPE(obj
)]++;
1760 RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t
*objspace
, VALUE obj
)
1762 RB_DEBUG_COUNTER_INC(obj_promote
);
1763 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace
, GET_HEAP_PAGE(obj
), obj
);
1766 /* set age to age+1 */
1768 RVALUE_AGE_INC(rb_objspace_t
*objspace
, VALUE obj
)
1770 int age
= RVALUE_AGE_GET((VALUE
)obj
);
1772 if (RGENGC_CHECK_MODE
&& age
== RVALUE_OLD_AGE
) {
1773 rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj
));
1777 RVALUE_AGE_SET(obj
, age
);
1779 if (age
== RVALUE_OLD_AGE
) {
1780 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace
, obj
);
1783 check_rvalue_consistency(obj
);
1787 RVALUE_AGE_SET_CANDIDATE(rb_objspace_t
*objspace
, VALUE obj
)
1789 check_rvalue_consistency(obj
);
1790 GC_ASSERT(!RVALUE_OLD_P(obj
));
1791 RVALUE_AGE_SET(obj
, RVALUE_OLD_AGE
- 1);
1792 check_rvalue_consistency(obj
);
1796 RVALUE_AGE_RESET(VALUE obj
)
1798 RVALUE_AGE_SET(obj
, 0);
1802 RVALUE_DEMOTE(rb_objspace_t
*objspace
, VALUE obj
)
1804 check_rvalue_consistency(obj
);
1805 GC_ASSERT(RVALUE_OLD_P(obj
));
1807 if (!is_incremental_marking(objspace
) && RVALUE_REMEMBERED(obj
)) {
1808 CLEAR_IN_BITMAP(GET_HEAP_PAGE(obj
)->remembered_bits
, obj
);
1811 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj
), obj
);
1812 RVALUE_AGE_RESET(obj
);
1814 if (RVALUE_MARKED(obj
)) {
1815 objspace
->rgengc
.old_objects
--;
1818 check_rvalue_consistency(obj
);
1822 RVALUE_BLACK_P(VALUE obj
)
1824 return RVALUE_MARKED(obj
) && !RVALUE_MARKING(obj
);
1829 RVALUE_GREY_P(VALUE obj
)
1831 return RVALUE_MARKED(obj
) && RVALUE_MARKING(obj
);
1836 RVALUE_WHITE_P(VALUE obj
)
1838 return RVALUE_MARKED(obj
) == FALSE
;
1842 --------------------------- ObjectSpace -----------------------------
1845 static inline void *
1848 return calloc(1, n
);
1851 static VALUE initial_stress
= Qfalse
;
1854 rb_gc_initial_stress_set(VALUE flag
)
1856 initial_stress
= flag
;
1859 static void *rb_gc_impl_objspace_alloc(void);
1864 # define RUBY_GC_LIBRARY_PATH "RUBY_GC_LIBRARY_PATH"
1867 ruby_external_gc_init(void)
1869 char *gc_so_path
= getenv(RUBY_GC_LIBRARY_PATH
);
1870 void *handle
= NULL
;
1871 if (gc_so_path
&& dln_supported_p()) {
1873 handle
= dln_open(gc_so_path
, error
, sizeof(error
));
1875 fprintf(stderr
, "%s", error
);
1876 rb_bug("ruby_external_gc_init: Shared library %s cannot be opened", gc_so_path
);
1880 # define load_external_gc_func(name) do { \
1882 rb_gc_functions->name = dln_symbol(handle, "rb_gc_impl_" #name); \
1883 if (!rb_gc_functions->name) { \
1884 rb_bug("ruby_external_gc_init: " #name " func not exported by library %s", gc_so_path); \
1888 rb_gc_functions->name = rb_gc_impl_##name; \
1892 load_external_gc_func(objspace_alloc
);
1894 # undef load_external_gc_func
1897 # define rb_gc_impl_objspace_alloc rb_gc_functions->objspace_alloc
1901 rb_objspace_alloc(void)
1904 ruby_external_gc_init();
1906 return (rb_objspace_t
*)rb_gc_impl_objspace_alloc();
1910 # undef rb_gc_impl_objspace_alloc
1913 static void free_stack_chunks(mark_stack_t
*);
1914 static void mark_stack_free_cache(mark_stack_t
*);
1915 static void heap_page_free(rb_objspace_t
*objspace
, struct heap_page
*page
);
1918 rb_objspace_free(rb_objspace_t
*objspace
)
1920 if (is_lazy_sweeping(objspace
))
1921 rb_bug("lazy sweeping underway when freeing object space");
1923 free(objspace
->profile
.records
);
1924 objspace
->profile
.records
= NULL
;
1926 if (heap_pages_sorted
) {
1928 size_t total_heap_pages
= heap_allocated_pages
;
1929 for (i
= 0; i
< total_heap_pages
; ++i
) {
1930 heap_page_free(objspace
, heap_pages_sorted
[i
]);
1932 free(heap_pages_sorted
);
1933 heap_allocated_pages
= 0;
1934 heap_pages_sorted_length
= 0;
1935 heap_pages_lomem
= 0;
1936 heap_pages_himem
= 0;
1938 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1939 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1940 SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
= 0;
1941 SIZE_POOL_EDEN_HEAP(size_pool
)->total_slots
= 0;
1944 st_free_table(objspace
->id_to_obj_tbl
);
1945 st_free_table(objspace
->obj_to_id_tbl
);
1947 free_stack_chunks(&objspace
->mark_stack
);
1948 mark_stack_free_cache(&objspace
->mark_stack
);
1950 rb_darray_free(objspace
->weak_references
);
1956 heap_pages_expand_sorted_to(rb_objspace_t
*objspace
, size_t next_length
)
1958 struct heap_page
**sorted
;
1959 size_t size
= size_mul_or_raise(next_length
, sizeof(struct heap_page
*), rb_eRuntimeError
);
1961 gc_report(3, objspace
, "heap_pages_expand_sorted: next_length: %"PRIdSIZE
", size: %"PRIdSIZE
"\n",
1964 if (heap_pages_sorted_length
> 0) {
1965 sorted
= (struct heap_page
**)realloc(heap_pages_sorted
, size
);
1966 if (sorted
) heap_pages_sorted
= sorted
;
1969 sorted
= heap_pages_sorted
= (struct heap_page
**)malloc(size
);
1976 heap_pages_sorted_length
= next_length
;
1980 heap_pages_expand_sorted(rb_objspace_t
*objspace
)
1982 /* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
1983 * because heap_allocatable_pages contains heap_tomb->total_pages (recycle heap_tomb pages).
1984 * however, if there are pages which do not have empty slots, then try to create new pages
1985 * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
1987 size_t next_length
= heap_allocatable_pages(objspace
);
1988 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1989 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1990 next_length
+= SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
;
1991 next_length
+= SIZE_POOL_TOMB_HEAP(size_pool
)->total_pages
;
1994 if (next_length
> heap_pages_sorted_length
) {
1995 heap_pages_expand_sorted_to(objspace
, next_length
);
1998 GC_ASSERT(heap_allocatable_pages(objspace
) + heap_eden_total_pages(objspace
) <= heap_pages_sorted_length
);
1999 GC_ASSERT(heap_allocated_pages
<= heap_pages_sorted_length
);
2003 size_pool_allocatable_pages_set(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, size_t s
)
2005 size_pool
->allocatable_pages
= s
;
2006 heap_pages_expand_sorted(objspace
);
2010 heap_page_add_freeobj(rb_objspace_t
*objspace
, struct heap_page
*page
, VALUE obj
)
2012 ASSERT_vm_locking();
2014 RVALUE
*p
= (RVALUE
*)obj
;
2016 asan_unpoison_object(obj
, false);
2018 asan_unlock_freelist(page
);
2020 p
->as
.free
.flags
= 0;
2021 p
->as
.free
.next
= page
->freelist
;
2023 asan_lock_freelist(page
);
2025 RVALUE_AGE_RESET(obj
);
2027 if (RGENGC_CHECK_MODE
&&
2028 /* obj should belong to page */
2029 !(page
->start
<= (uintptr_t)obj
&&
2030 (uintptr_t)obj
< ((uintptr_t)page
->start
+ (page
->total_slots
* page
->slot_size
)) &&
2031 obj
% BASE_SLOT_SIZE
== 0)) {
2032 rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p
);
2035 asan_poison_object(obj
);
2036 gc_report(3, objspace
, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj
);
2040 heap_add_freepage(rb_heap_t
*heap
, struct heap_page
*page
)
2042 asan_unlock_freelist(page
);
2043 GC_ASSERT(page
->free_slots
!= 0);
2044 GC_ASSERT(page
->freelist
!= NULL
);
2046 page
->free_next
= heap
->free_pages
;
2047 heap
->free_pages
= page
;
2049 RUBY_DEBUG_LOG("page:%p freelist:%p", (void *)page
, (void *)page
->freelist
);
2051 asan_lock_freelist(page
);
2055 heap_add_poolpage(rb_objspace_t
*objspace
, rb_heap_t
*heap
, struct heap_page
*page
)
2057 asan_unlock_freelist(page
);
2058 GC_ASSERT(page
->free_slots
!= 0);
2059 GC_ASSERT(page
->freelist
!= NULL
);
2061 page
->free_next
= heap
->pooled_pages
;
2062 heap
->pooled_pages
= page
;
2063 objspace
->rincgc
.pooled_slots
+= page
->free_slots
;
2065 asan_lock_freelist(page
);
2069 heap_unlink_page(rb_objspace_t
*objspace
, rb_heap_t
*heap
, struct heap_page
*page
)
2071 ccan_list_del(&page
->page_node
);
2072 heap
->total_pages
--;
2073 heap
->total_slots
-= page
->total_slots
;
2076 static void rb_aligned_free(void *ptr
, size_t size
);
2079 heap_page_body_free(struct heap_page_body
*page_body
)
2081 GC_ASSERT((uintptr_t)page_body
% HEAP_PAGE_ALIGN
== 0);
2083 if (HEAP_PAGE_ALLOC_USE_MMAP
) {
2085 GC_ASSERT(HEAP_PAGE_SIZE
% sysconf(_SC_PAGE_SIZE
) == 0);
2086 if (munmap(page_body
, HEAP_PAGE_SIZE
)) {
2087 rb_bug("heap_page_body_free: munmap failed");
2092 rb_aligned_free(page_body
, HEAP_PAGE_SIZE
);
2097 heap_page_free(rb_objspace_t
*objspace
, struct heap_page
*page
)
2099 heap_allocated_pages
--;
2100 page
->size_pool
->total_freed_pages
++;
2101 heap_page_body_free(GET_PAGE_BODY(page
->start
));
2106 rb_aligned_malloc(size_t alignment
, size_t size
)
2108 /* alignment must be a power of 2 */
2109 GC_ASSERT(((alignment
- 1) & alignment
) == 0);
2110 GC_ASSERT(alignment
% sizeof(void*) == 0);
2114 #if defined __MINGW32__
2115 res
= __mingw_aligned_malloc(size
, alignment
);
2116 #elif defined _WIN32
2117 void *_aligned_malloc(size_t, size_t);
2118 res
= _aligned_malloc(size
, alignment
);
2119 #elif defined(HAVE_POSIX_MEMALIGN)
2120 if (posix_memalign(&res
, alignment
, size
) != 0) {
2123 #elif defined(HAVE_MEMALIGN)
2124 res
= memalign(alignment
, size
);
2127 res
= malloc(alignment
+ size
+ sizeof(void*));
2128 aligned
= (char*)res
+ alignment
+ sizeof(void*);
2129 aligned
-= ((VALUE
)aligned
& (alignment
- 1));
2130 ((void**)aligned
)[-1] = res
;
2131 res
= (void*)aligned
;
2134 GC_ASSERT((uintptr_t)res
% alignment
== 0);
2140 heap_pages_free_unused_pages(rb_objspace_t
*objspace
)
2144 bool has_pages_in_tomb_heap
= FALSE
;
2145 for (i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
2146 if (!ccan_list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools
[i
])->pages
)) {
2147 has_pages_in_tomb_heap
= TRUE
;
2152 if (has_pages_in_tomb_heap
) {
2153 for (i
= j
= 0; j
< heap_allocated_pages
; i
++) {
2154 struct heap_page
*page
= heap_pages_sorted
[i
];
2156 if (page
->flags
.in_tomb
&& page
->free_slots
== page
->total_slots
) {
2157 heap_unlink_page(objspace
, SIZE_POOL_TOMB_HEAP(page
->size_pool
), page
);
2158 heap_page_free(objspace
, page
);
2162 heap_pages_sorted
[j
] = page
;
2168 struct heap_page
*hipage
= heap_pages_sorted
[heap_allocated_pages
- 1];
2169 uintptr_t himem
= (uintptr_t)hipage
->start
+ (hipage
->total_slots
* hipage
->slot_size
);
2170 GC_ASSERT(himem
<= heap_pages_himem
);
2171 heap_pages_himem
= himem
;
2173 struct heap_page
*lopage
= heap_pages_sorted
[0];
2174 uintptr_t lomem
= (uintptr_t)lopage
->start
;
2175 GC_ASSERT(lomem
>= heap_pages_lomem
);
2176 heap_pages_lomem
= lomem
;
2178 GC_ASSERT(j
== heap_allocated_pages
);
2182 static struct heap_page_body
*
2183 heap_page_body_allocate(void)
2185 struct heap_page_body
*page_body
;
2187 if (HEAP_PAGE_ALLOC_USE_MMAP
) {
2189 GC_ASSERT(HEAP_PAGE_ALIGN
% sysconf(_SC_PAGE_SIZE
) == 0);
2191 char *ptr
= mmap(NULL
, HEAP_PAGE_ALIGN
+ HEAP_PAGE_SIZE
,
2192 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
2193 if (ptr
== MAP_FAILED
) {
2197 char *aligned
= ptr
+ HEAP_PAGE_ALIGN
;
2198 aligned
-= ((VALUE
)aligned
& (HEAP_PAGE_ALIGN
- 1));
2199 GC_ASSERT(aligned
> ptr
);
2200 GC_ASSERT(aligned
<= ptr
+ HEAP_PAGE_ALIGN
);
2202 size_t start_out_of_range_size
= aligned
- ptr
;
2203 GC_ASSERT(start_out_of_range_size
% sysconf(_SC_PAGE_SIZE
) == 0);
2204 if (start_out_of_range_size
> 0) {
2205 if (munmap(ptr
, start_out_of_range_size
)) {
2206 rb_bug("heap_page_body_allocate: munmap failed for start");
2210 size_t end_out_of_range_size
= HEAP_PAGE_ALIGN
- start_out_of_range_size
;
2211 GC_ASSERT(end_out_of_range_size
% sysconf(_SC_PAGE_SIZE
) == 0);
2212 if (end_out_of_range_size
> 0) {
2213 if (munmap(aligned
+ HEAP_PAGE_SIZE
, end_out_of_range_size
)) {
2214 rb_bug("heap_page_body_allocate: munmap failed for end");
2218 page_body
= (struct heap_page_body
*)aligned
;
2222 page_body
= rb_aligned_malloc(HEAP_PAGE_ALIGN
, HEAP_PAGE_SIZE
);
2225 GC_ASSERT((uintptr_t)page_body
% HEAP_PAGE_ALIGN
== 0);
2230 static struct heap_page
*
2231 heap_page_allocate(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
)
2233 uintptr_t start
, end
, p
;
2234 struct heap_page
*page
;
2235 uintptr_t hi
, lo
, mid
;
2236 size_t stride
= size_pool
->slot_size
;
2237 unsigned int limit
= (unsigned int)((HEAP_PAGE_SIZE
- sizeof(struct heap_page_header
)))/(int)stride
;
2239 /* assign heap_page body (contains heap_page_header and RVALUEs) */
2240 struct heap_page_body
*page_body
= heap_page_body_allocate();
2241 if (page_body
== 0) {
2245 /* assign heap_page entry */
2246 page
= calloc1(sizeof(struct heap_page
));
2248 heap_page_body_free(page_body
);
2252 /* adjust obj_limit (object number available in this page) */
2253 start
= (uintptr_t)((VALUE
)page_body
+ sizeof(struct heap_page_header
));
2255 if (start
% BASE_SLOT_SIZE
!= 0) {
2256 int delta
= BASE_SLOT_SIZE
- (start
% BASE_SLOT_SIZE
);
2257 start
= start
+ delta
;
2258 GC_ASSERT(NUM_IN_PAGE(start
) == 0 || NUM_IN_PAGE(start
) == 1);
2260 /* Find a num in page that is evenly divisible by `stride`.
2261 * This is to ensure that objects are aligned with bit planes.
2262 * In other words, ensure there are an even number of objects
2264 if (NUM_IN_PAGE(start
) == 1) {
2265 start
+= stride
- BASE_SLOT_SIZE
;
2268 GC_ASSERT(NUM_IN_PAGE(start
) * BASE_SLOT_SIZE
% stride
== 0);
2270 limit
= (HEAP_PAGE_SIZE
- (int)(start
- (uintptr_t)page_body
))/(int)stride
;
2272 end
= start
+ (limit
* (int)stride
);
2274 /* setup heap_pages_sorted */
2276 hi
= (uintptr_t)heap_allocated_pages
;
2278 struct heap_page
*mid_page
;
2280 mid
= (lo
+ hi
) / 2;
2281 mid_page
= heap_pages_sorted
[mid
];
2282 if ((uintptr_t)mid_page
->start
< start
) {
2285 else if ((uintptr_t)mid_page
->start
> start
) {
2289 rb_bug("same heap page is allocated: %p at %"PRIuVALUE
, (void *)page_body
, (VALUE
)mid
);
2293 if (hi
< (uintptr_t)heap_allocated_pages
) {
2294 MEMMOVE(&heap_pages_sorted
[hi
+1], &heap_pages_sorted
[hi
], struct heap_page_header
*, heap_allocated_pages
- hi
);
2297 heap_pages_sorted
[hi
] = page
;
2299 heap_allocated_pages
++;
2301 GC_ASSERT(heap_eden_total_pages(objspace
) + heap_allocatable_pages(objspace
) <= heap_pages_sorted_length
);
2302 GC_ASSERT(heap_eden_total_pages(objspace
) + heap_tomb_total_pages(objspace
) == heap_allocated_pages
- 1);
2303 GC_ASSERT(heap_allocated_pages
<= heap_pages_sorted_length
);
2305 size_pool
->total_allocated_pages
++;
2307 if (heap_allocated_pages
> heap_pages_sorted_length
) {
2308 rb_bug("heap_page_allocate: allocated(%"PRIdSIZE
") > sorted(%"PRIdSIZE
")",
2309 heap_allocated_pages
, heap_pages_sorted_length
);
2312 if (heap_pages_lomem
== 0 || heap_pages_lomem
> start
) heap_pages_lomem
= start
;
2313 if (heap_pages_himem
< end
) heap_pages_himem
= end
;
2315 page
->start
= start
;
2316 page
->total_slots
= limit
;
2317 page
->slot_size
= size_pool
->slot_size
;
2318 page
->size_pool
= size_pool
;
2319 page_body
->header
.page
= page
;
2321 for (p
= start
; p
!= end
; p
+= stride
) {
2322 gc_report(3, objspace
, "assign_heap_page: %p is added to freelist\n", (void *)p
);
2323 heap_page_add_freeobj(objspace
, page
, (VALUE
)p
);
2325 page
->free_slots
= limit
;
2327 asan_lock_freelist(page
);
2331 static struct heap_page
*
2332 heap_page_resurrect(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
)
2334 struct heap_page
*page
= 0, *next
;
2336 ccan_list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool
)->pages
, page
, next
, page_node
) {
2337 asan_unlock_freelist(page
);
2338 if (page
->freelist
!= NULL
) {
2339 heap_unlink_page(objspace
, &size_pool
->tomb_heap
, page
);
2340 asan_lock_freelist(page
);
2348 static struct heap_page
*
2349 heap_page_create(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
)
2351 struct heap_page
*page
;
2352 const char *method
= "recycle";
2354 size_pool
->allocatable_pages
--;
2356 page
= heap_page_resurrect(objspace
, size_pool
);
2359 page
= heap_page_allocate(objspace
, size_pool
);
2360 method
= "allocate";
2362 if (0) fprintf(stderr
, "heap_page_create: %s - %p, "
2363 "heap_allocated_pages: %"PRIdSIZE
", "
2364 "heap_allocated_pages: %"PRIdSIZE
", "
2365 "tomb->total_pages: %"PRIdSIZE
"\n",
2366 method
, (void *)page
, heap_pages_sorted_length
, heap_allocated_pages
, SIZE_POOL_TOMB_HEAP(size_pool
)->total_pages
);
2371 heap_add_page(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
, struct heap_page
*page
)
2373 /* Adding to eden heap during incremental sweeping is forbidden */
2374 GC_ASSERT(!(heap
== SIZE_POOL_EDEN_HEAP(size_pool
) && heap
->sweeping_page
));
2375 page
->flags
.in_tomb
= (heap
== SIZE_POOL_TOMB_HEAP(size_pool
));
2376 ccan_list_add_tail(&heap
->pages
, &page
->page_node
);
2377 heap
->total_pages
++;
2378 heap
->total_slots
+= page
->total_slots
;
2382 heap_assign_page(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
2384 struct heap_page
*page
= heap_page_create(objspace
, size_pool
);
2385 heap_add_page(objspace
, size_pool
, heap
, page
);
2386 heap_add_freepage(heap
, page
);
2389 #if GC_CAN_COMPILE_COMPACTION
2391 heap_add_pages(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
, size_t add
)
2395 size_pool_allocatable_pages_set(objspace
, size_pool
, add
);
2397 for (i
= 0; i
< add
; i
++) {
2398 heap_assign_page(objspace
, size_pool
, heap
);
2401 GC_ASSERT(size_pool
->allocatable_pages
== 0);
2406 slots_to_pages_for_size_pool(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, size_t slots
)
2408 size_t multiple
= size_pool
->slot_size
/ BASE_SLOT_SIZE
;
2409 /* Due to alignment, heap pages may have one less slot. We should
2410 * ensure there is enough pages to guarantee that we will have at
2411 * least the required number of slots after allocating all the pages. */
2412 size_t slots_per_page
= (HEAP_PAGE_OBJ_LIMIT
/ multiple
) - 1;
2413 return CEILDIV(slots
, slots_per_page
);
2417 minimum_pages_for_size_pool(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
)
2419 size_t size_pool_idx
= size_pool
- size_pools
;
2420 size_t init_slots
= gc_params
.size_pool_init_slots
[size_pool_idx
];
2421 return slots_to_pages_for_size_pool(objspace
, size_pool
, init_slots
);
2425 heap_extend_pages(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, size_t free_slots
, size_t total_slots
, size_t used
)
2427 double goal_ratio
= gc_params
.heap_free_slots_goal_ratio
;
2430 if (goal_ratio
== 0.0) {
2431 next_used
= (size_t)(used
* gc_params
.growth_factor
);
2433 else if (total_slots
== 0) {
2434 next_used
= minimum_pages_for_size_pool(objspace
, size_pool
);
2437 /* Find `f' where free_slots = f * total_slots * goal_ratio
2438 * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
2440 double f
= (double)(total_slots
- free_slots
) / ((1 - goal_ratio
) * total_slots
);
2442 if (f
> gc_params
.growth_factor
) f
= gc_params
.growth_factor
;
2443 if (f
< 1.0) f
= 1.1;
2445 next_used
= (size_t)(f
* used
);
2449 "free_slots(%8"PRIuSIZE
")/total_slots(%8"PRIuSIZE
")=%1.2f,"
2450 " G(%1.2f), f(%1.2f),"
2451 " used(%8"PRIuSIZE
") => next_used(%8"PRIuSIZE
")\n",
2452 free_slots
, total_slots
, free_slots
/(double)total_slots
,
2453 goal_ratio
, f
, used
, next_used
);
2457 if (gc_params
.growth_max_slots
> 0) {
2458 size_t max_used
= (size_t)(used
+ gc_params
.growth_max_slots
/HEAP_PAGE_OBJ_LIMIT
);
2459 if (next_used
> max_used
) next_used
= max_used
;
2462 size_t extend_page_count
= next_used
- used
;
2463 /* Extend by at least 1 page. */
2464 if (extend_page_count
== 0) extend_page_count
= 1;
2466 return extend_page_count
;
2470 heap_increment(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
2472 if (size_pool
->allocatable_pages
> 0) {
2473 gc_report(1, objspace
, "heap_increment: heap_pages_sorted_length: %"PRIdSIZE
", "
2474 "heap_pages_inc: %"PRIdSIZE
", heap->total_pages: %"PRIdSIZE
"\n",
2475 heap_pages_sorted_length
, size_pool
->allocatable_pages
, heap
->total_pages
);
2477 GC_ASSERT(heap_allocatable_pages(objspace
) + heap_eden_total_pages(objspace
) <= heap_pages_sorted_length
);
2478 GC_ASSERT(heap_allocated_pages
<= heap_pages_sorted_length
);
2480 heap_assign_page(objspace
, size_pool
, heap
);
2487 gc_continue(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
2489 unsigned int lock_lev
;
2490 gc_enter(objspace
, gc_enter_event_continue
, &lock_lev
);
2492 /* Continue marking if in incremental marking. */
2493 if (is_incremental_marking(objspace
)) {
2494 if (gc_marks_continue(objspace
, size_pool
, heap
)) {
2499 /* Continue sweeping if in lazy sweeping or the previous incremental
2500 * marking finished and did not yield a free page. */
2501 if (heap
->free_pages
== NULL
&& is_lazy_sweeping(objspace
)) {
2502 gc_sweep_continue(objspace
, size_pool
, heap
);
2505 gc_exit(objspace
, gc_enter_event_continue
, &lock_lev
);
2509 heap_prepare(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
2511 GC_ASSERT(heap
->free_pages
== NULL
);
2513 /* Continue incremental marking or lazy sweeping, if in any of those steps. */
2514 gc_continue(objspace
, size_pool
, heap
);
2516 /* If we still don't have a free page and not allowed to create a new page,
2517 * we should start a new GC cycle. */
2518 if (heap
->free_pages
== NULL
&&
2519 (will_be_incremental_marking(objspace
) ||
2520 (heap_increment(objspace
, size_pool
, heap
) == FALSE
))) {
2521 if (gc_start(objspace
, GPR_FLAG_NEWOBJ
) == FALSE
) {
2525 /* Do steps of incremental marking or lazy sweeping if the GC run permits. */
2526 gc_continue(objspace
, size_pool
, heap
);
2528 /* If we're not incremental marking (e.g. a minor GC) or finished
2529 * sweeping and still don't have a free page, then
2530 * gc_sweep_finish_size_pool should allow us to create a new page. */
2531 if (heap
->free_pages
== NULL
&& !heap_increment(objspace
, size_pool
, heap
)) {
2532 if (gc_needs_major_flags
== GPR_FLAG_NONE
) {
2533 rb_bug("cannot create a new page after GC");
2535 else { // Major GC is required, which will allow us to create new page
2536 if (gc_start(objspace
, GPR_FLAG_NEWOBJ
) == FALSE
) {
2540 /* Do steps of incremental marking or lazy sweeping. */
2541 gc_continue(objspace
, size_pool
, heap
);
2543 if (heap
->free_pages
== NULL
&&
2544 !heap_increment(objspace
, size_pool
, heap
)) {
2545 rb_bug("cannot create a new page after major GC");
2553 GC_ASSERT(heap
->free_pages
!= NULL
);
2557 rb_objspace_set_event_hook(const rb_event_flag_t event
)
2559 rb_objspace_t
*objspace
= &rb_objspace
;
2560 objspace
->hook_events
= event
& RUBY_INTERNAL_EVENT_OBJSPACE_MASK
;
2561 objspace
->flags
.has_newobj_hook
= !!(objspace
->hook_events
& RUBY_INTERNAL_EVENT_NEWOBJ
);
2565 gc_event_hook_body(rb_execution_context_t
*ec
, rb_objspace_t
*objspace
, const rb_event_flag_t event
, VALUE data
)
2567 if (UNLIKELY(!ec
->cfp
)) return;
2568 EXEC_EVENT_HOOK(ec
, event
, ec
->cfp
->self
, 0, 0, 0, data
);
2571 #define gc_event_newobj_hook_needed_p(objspace) ((objspace)->flags.has_newobj_hook)
2572 #define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2574 #define gc_event_hook_prep(objspace, event, data, prep) do { \
2575 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2577 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2581 #define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2584 newobj_init(VALUE klass
, VALUE flags
, int wb_protected
, rb_objspace_t
*objspace
, VALUE obj
)
2586 #if !__has_feature(memory_sanitizer)
2587 GC_ASSERT(BUILTIN_TYPE(obj
) == T_NONE
);
2588 GC_ASSERT((flags
& FL_WB_PROTECTED
) == 0);
2590 RVALUE
*p
= RANY(obj
);
2591 p
->as
.basic
.flags
= flags
;
2592 *((VALUE
*)&p
->as
.basic
.klass
) = klass
;
2594 int t
= flags
& RUBY_T_MASK
;
2595 if (t
== T_CLASS
|| t
== T_MODULE
|| t
== T_ICLASS
) {
2596 RVALUE_AGE_SET_CANDIDATE(objspace
, obj
);
2599 #if RACTOR_CHECK_MODE
2600 rb_ractor_setup_belonging(obj
);
2603 #if RGENGC_CHECK_MODE
2604 p
->as
.values
.v1
= p
->as
.values
.v2
= p
->as
.values
.v3
= 0;
2606 RB_VM_LOCK_ENTER_NO_BARRIER();
2608 check_rvalue_consistency(obj
);
2610 GC_ASSERT(RVALUE_MARKED(obj
) == FALSE
);
2611 GC_ASSERT(RVALUE_MARKING(obj
) == FALSE
);
2612 GC_ASSERT(RVALUE_OLD_P(obj
) == FALSE
);
2613 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj
) == FALSE
);
2615 if (RVALUE_REMEMBERED((VALUE
)obj
)) rb_bug("newobj: %s is remembered.", obj_info(obj
));
2617 RB_VM_LOCK_LEAVE_NO_BARRIER();
2620 if (UNLIKELY(wb_protected
== FALSE
)) {
2621 ASSERT_vm_locking();
2622 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj
), obj
);
2627 objspace
->profile
.total_generated_normal_object_count
++;
2628 #if RGENGC_PROFILE >= 2
2629 objspace
->profile
.generated_normal_object_count_types
[BUILTIN_TYPE(obj
)]++;
2633 objspace
->profile
.total_generated_shady_object_count
++;
2634 #if RGENGC_PROFILE >= 2
2635 objspace
->profile
.generated_shady_object_count_types
[BUILTIN_TYPE(obj
)]++;
2641 GET_RVALUE_OVERHEAD(obj
)->file
= rb_source_location_cstr(&GET_RVALUE_OVERHEAD(obj
)->line
);
2642 GC_ASSERT(!SPECIAL_CONST_P(obj
)); /* check alignment */
2645 gc_report(5, objspace
, "newobj: %s\n", obj_info_basic(obj
));
2647 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
2652 rb_gc_obj_slot_size(VALUE obj
)
2654 return GET_HEAP_PAGE(obj
)->slot_size
- RVALUE_OVERHEAD
;
2657 static inline size_t
2658 size_pool_slot_size(unsigned char pool_id
)
2660 GC_ASSERT(pool_id
< SIZE_POOL_COUNT
);
2662 size_t slot_size
= (1 << pool_id
) * BASE_SLOT_SIZE
;
2664 #if RGENGC_CHECK_MODE
2665 rb_objspace_t
*objspace
= &rb_objspace
;
2666 GC_ASSERT(size_pools
[pool_id
].slot_size
== (short)slot_size
);
2669 slot_size
-= RVALUE_OVERHEAD
;
2675 rb_gc_size_allocatable_p(size_t size
)
2677 return size
<= size_pool_slot_size(SIZE_POOL_COUNT
- 1);
2680 static size_t size_pool_sizes
[SIZE_POOL_COUNT
+ 1] = { 0 };
2683 rb_gc_size_pool_sizes(void)
2685 if (size_pool_sizes
[0] == 0) {
2686 for (unsigned char i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
2687 size_pool_sizes
[i
] = size_pool_slot_size(i
);
2691 return size_pool_sizes
;
2695 rb_gc_size_pool_id_for_size(size_t size
)
2697 size
+= RVALUE_OVERHEAD
;
2699 size_t slot_count
= CEILDIV(size
, BASE_SLOT_SIZE
);
2701 /* size_pool_idx is ceil(log2(slot_count)) */
2702 size_t size_pool_idx
= 64 - nlz_int64(slot_count
- 1);
2704 if (size_pool_idx
>= SIZE_POOL_COUNT
) {
2705 rb_bug("rb_gc_size_pool_id_for_size: allocation size too large "
2706 "(size=%"PRIuSIZE
"u, size_pool_idx=%"PRIuSIZE
"u)", size
, size_pool_idx
);
2709 #if RGENGC_CHECK_MODE
2710 rb_objspace_t
*objspace
= &rb_objspace
;
2711 GC_ASSERT(size
<= (size_t)size_pools
[size_pool_idx
].slot_size
);
2712 if (size_pool_idx
> 0) GC_ASSERT(size
> (size_t)size_pools
[size_pool_idx
- 1].slot_size
);
2715 return size_pool_idx
;
2719 ractor_cache_allocate_slot(rb_objspace_t
*objspace
, rb_ractor_newobj_cache_t
*cache
,
2720 size_t size_pool_idx
)
2722 rb_ractor_newobj_size_pool_cache_t
*size_pool_cache
= &cache
->size_pool_caches
[size_pool_idx
];
2723 RVALUE
*p
= size_pool_cache
->freelist
;
2725 if (is_incremental_marking(objspace
)) {
2726 // Not allowed to allocate without running an incremental marking step
2727 if (cache
->incremental_mark_step_allocated_slots
>= INCREMENTAL_MARK_STEP_ALLOCATIONS
) {
2732 cache
->incremental_mark_step_allocated_slots
++;
2737 VALUE obj
= (VALUE
)p
;
2738 MAYBE_UNUSED(const size_t) stride
= size_pool_slot_size(size_pool_idx
);
2739 size_pool_cache
->freelist
= p
->as
.free
.next
;
2740 asan_unpoison_memory_region(p
, stride
, true);
2741 #if RGENGC_CHECK_MODE
2742 GC_ASSERT(rb_gc_obj_slot_size(obj
) == stride
);
2744 MEMZERO((char *)obj
, char, stride
);
2753 static struct heap_page
*
2754 heap_next_free_page(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
2756 ASSERT_vm_locking();
2758 struct heap_page
*page
;
2760 if (heap
->free_pages
== NULL
) {
2761 heap_prepare(objspace
, size_pool
, heap
);
2764 page
= heap
->free_pages
;
2765 heap
->free_pages
= page
->free_next
;
2767 GC_ASSERT(page
->free_slots
!= 0);
2768 RUBY_DEBUG_LOG("page:%p freelist:%p cnt:%d", (void *)page
, (void *)page
->freelist
, page
->free_slots
);
2770 asan_unlock_freelist(page
);
2776 ractor_cache_set_page(rb_ractor_newobj_cache_t
*cache
, size_t size_pool_idx
,
2777 struct heap_page
*page
)
2779 gc_report(3, &rb_objspace
, "ractor_set_cache: Using page %p\n", (void *)GET_PAGE_BODY(page
->start
));
2781 rb_ractor_newobj_size_pool_cache_t
*size_pool_cache
= &cache
->size_pool_caches
[size_pool_idx
];
2783 GC_ASSERT(size_pool_cache
->freelist
== NULL
);
2784 GC_ASSERT(page
->free_slots
!= 0);
2785 GC_ASSERT(page
->freelist
!= NULL
);
2787 size_pool_cache
->using_page
= page
;
2788 size_pool_cache
->freelist
= page
->freelist
;
2789 page
->free_slots
= 0;
2790 page
->freelist
= NULL
;
2792 asan_unpoison_object((VALUE
)size_pool_cache
->freelist
, false);
2793 GC_ASSERT(RB_TYPE_P((VALUE
)size_pool_cache
->freelist
, T_NONE
));
2794 asan_poison_object((VALUE
)size_pool_cache
->freelist
);
2798 newobj_fill(VALUE obj
, VALUE v1
, VALUE v2
, VALUE v3
)
2800 RVALUE
*p
= (RVALUE
*)obj
;
2801 p
->as
.values
.v1
= v1
;
2802 p
->as
.values
.v2
= v2
;
2803 p
->as
.values
.v3
= v3
;
2808 newobj_alloc(rb_objspace_t
*objspace
, rb_ractor_newobj_cache_t
*cache
, size_t size_pool_idx
, bool vm_locked
)
2810 rb_size_pool_t
*size_pool
= &size_pools
[size_pool_idx
];
2811 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
2813 VALUE obj
= ractor_cache_allocate_slot(objspace
, cache
, size_pool_idx
);
2815 if (UNLIKELY(obj
== Qfalse
)) {
2817 bool unlock_vm
= false;
2820 RB_VM_LOCK_ENTER_CR_LEV(GET_RACTOR(), &lev
);
2826 ASSERT_vm_locking();
2828 if (is_incremental_marking(objspace
)) {
2829 gc_continue(objspace
, size_pool
, heap
);
2830 cache
->incremental_mark_step_allocated_slots
= 0;
2832 // Retry allocation after resetting incremental_mark_step_allocated_slots
2833 obj
= ractor_cache_allocate_slot(objspace
, cache
, size_pool_idx
);
2836 if (obj
== Qfalse
) {
2837 // Get next free page (possibly running GC)
2838 struct heap_page
*page
= heap_next_free_page(objspace
, size_pool
, heap
);
2839 ractor_cache_set_page(cache
, size_pool_idx
, page
);
2841 // Retry allocation after moving to new page
2842 obj
= ractor_cache_allocate_slot(objspace
, cache
, size_pool_idx
);
2844 GC_ASSERT(obj
!= Qfalse
);
2849 RB_VM_LOCK_LEAVE_CR_LEV(GET_RACTOR(), &lev
);
2853 size_pool
->total_allocated_objects
++;
2859 newobj_zero_slot(VALUE obj
)
2861 memset((char *)obj
+ sizeof(struct RBasic
), 0, rb_gc_obj_slot_size(obj
) - sizeof(struct RBasic
));
2864 ALWAYS_INLINE(static VALUE
newobj_slowpath(VALUE klass
, VALUE flags
, rb_objspace_t
*objspace
, rb_ractor_newobj_cache_t
*cache
, int wb_protected
, size_t size_pool_idx
));
2867 newobj_slowpath(VALUE klass
, VALUE flags
, rb_objspace_t
*objspace
, rb_ractor_newobj_cache_t
*cache
, int wb_protected
, size_t size_pool_idx
)
2872 RB_VM_LOCK_ENTER_CR_LEV(GET_RACTOR(), &lev
);
2874 if (UNLIKELY(during_gc
|| ruby_gc_stressful
)) {
2878 rb_bug("object allocation during garbage collection phase");
2881 if (ruby_gc_stressful
) {
2882 if (!garbage_collect(objspace
, GPR_FLAG_NEWOBJ
)) {
2888 obj
= newobj_alloc(objspace
, cache
, size_pool_idx
, true);
2889 newobj_init(klass
, flags
, wb_protected
, objspace
, obj
);
2891 gc_event_hook_prep(objspace
, RUBY_INTERNAL_EVENT_NEWOBJ
, obj
, newobj_zero_slot(obj
));
2893 RB_VM_LOCK_LEAVE_CR_LEV(GET_RACTOR(), &lev
);
2898 NOINLINE(static VALUE
newobj_slowpath_wb_protected(VALUE klass
, VALUE flags
,
2899 rb_objspace_t
*objspace
, rb_ractor_newobj_cache_t
*cache
, size_t size_pool_idx
));
2900 NOINLINE(static VALUE
newobj_slowpath_wb_unprotected(VALUE klass
, VALUE flags
,
2901 rb_objspace_t
*objspace
, rb_ractor_newobj_cache_t
*cache
, size_t size_pool_idx
));
2904 newobj_slowpath_wb_protected(VALUE klass
, VALUE flags
, rb_objspace_t
*objspace
, rb_ractor_newobj_cache_t
*cache
, size_t size_pool_idx
)
2906 return newobj_slowpath(klass
, flags
, objspace
, cache
, TRUE
, size_pool_idx
);
2910 newobj_slowpath_wb_unprotected(VALUE klass
, VALUE flags
, rb_objspace_t
*objspace
, rb_ractor_newobj_cache_t
*cache
, size_t size_pool_idx
)
2912 return newobj_slowpath(klass
, flags
, objspace
, cache
, FALSE
, size_pool_idx
);
2916 newobj_of(rb_ractor_t
*cr
, VALUE klass
, VALUE flags
, VALUE v1
, VALUE v2
, VALUE v3
, int wb_protected
, size_t alloc_size
)
2919 rb_objspace_t
*objspace
= &rb_objspace
;
2921 RB_DEBUG_COUNTER_INC(obj_newobj
);
2922 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected
, !wb_protected
);
2924 if (UNLIKELY(stress_to_class
)) {
2925 long i
, cnt
= RARRAY_LEN(stress_to_class
);
2926 for (i
= 0; i
< cnt
; ++i
) {
2927 if (klass
== RARRAY_AREF(stress_to_class
, i
)) rb_memerror();
2931 size_t size_pool_idx
= rb_gc_size_pool_id_for_size(alloc_size
);
2933 rb_ractor_newobj_cache_t
*cache
= &cr
->newobj_cache
;
2935 if (!UNLIKELY(during_gc
||
2936 ruby_gc_stressful
||
2937 gc_event_newobj_hook_needed_p(objspace
)) &&
2939 obj
= newobj_alloc(objspace
, cache
, size_pool_idx
, false);
2940 newobj_init(klass
, flags
, wb_protected
, objspace
, obj
);
2943 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath
);
2945 obj
= wb_protected
?
2946 newobj_slowpath_wb_protected(klass
, flags
, objspace
, cache
, size_pool_idx
) :
2947 newobj_slowpath_wb_unprotected(klass
, flags
, objspace
, cache
, size_pool_idx
);
2950 return newobj_fill(obj
, v1
, v2
, v3
);
2954 rb_wb_unprotected_newobj_of(VALUE klass
, VALUE flags
, size_t size
)
2956 GC_ASSERT((flags
& FL_WB_PROTECTED
) == 0);
2957 return newobj_of(GET_RACTOR(), klass
, flags
, 0, 0, 0, FALSE
, size
);
2961 rb_wb_protected_newobj_of(rb_execution_context_t
*ec
, VALUE klass
, VALUE flags
, size_t size
)
2963 GC_ASSERT((flags
& FL_WB_PROTECTED
) == 0);
2964 return newobj_of(rb_ec_ractor_ptr(ec
), klass
, flags
, 0, 0, 0, TRUE
, size
);
2967 #define UNEXPECTED_NODE(func) \
2968 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2969 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2972 rb_data_object_check(VALUE klass
)
2974 if (klass
!= rb_cObject
&& (rb_get_alloc_func(klass
) == rb_class_allocate_instance
)) {
2975 rb_undef_alloc_func(klass
);
2976 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE
, klass
);
2981 rb_data_object_wrap(VALUE klass
, void *datap
, RUBY_DATA_FUNC dmark
, RUBY_DATA_FUNC dfree
)
2983 RUBY_ASSERT_ALWAYS(dfree
!= (RUBY_DATA_FUNC
)1);
2984 if (klass
) rb_data_object_check(klass
);
2985 return newobj_of(GET_RACTOR(), klass
, T_DATA
, (VALUE
)dmark
, (VALUE
)dfree
, (VALUE
)datap
, !dmark
, sizeof(struct RTypedData
));
2989 rb_data_object_zalloc(VALUE klass
, size_t size
, RUBY_DATA_FUNC dmark
, RUBY_DATA_FUNC dfree
)
2991 VALUE obj
= rb_data_object_wrap(klass
, 0, dmark
, dfree
);
2992 DATA_PTR(obj
) = xcalloc(1, size
);
2997 typed_data_alloc(VALUE klass
, VALUE typed_flag
, void *datap
, const rb_data_type_t
*type
, size_t size
)
2999 RBIMPL_NONNULL_ARG(type
);
3000 if (klass
) rb_data_object_check(klass
);
3001 bool wb_protected
= (type
->flags
& RUBY_FL_WB_PROTECTED
) || !type
->function
.dmark
;
3002 return newobj_of(GET_RACTOR(), klass
, T_DATA
, (VALUE
)type
, 1 | typed_flag
, (VALUE
)datap
, wb_protected
, size
);
3006 rb_data_typed_object_wrap(VALUE klass
, void *datap
, const rb_data_type_t
*type
)
3008 if (UNLIKELY(type
->flags
& RUBY_TYPED_EMBEDDABLE
)) {
3009 rb_raise(rb_eTypeError
, "Cannot wrap an embeddable TypedData");
3012 return typed_data_alloc(klass
, 0, datap
, type
, sizeof(struct RTypedData
));
3016 rb_data_typed_object_zalloc(VALUE klass
, size_t size
, const rb_data_type_t
*type
)
3018 if (type
->flags
& RUBY_TYPED_EMBEDDABLE
) {
3019 if (!(type
->flags
& RUBY_TYPED_FREE_IMMEDIATELY
)) {
3020 rb_raise(rb_eTypeError
, "Embeddable TypedData must be freed immediately");
3023 size_t embed_size
= offsetof(struct RTypedData
, data
) + size
;
3024 if (rb_gc_size_allocatable_p(embed_size
)) {
3025 VALUE obj
= typed_data_alloc(klass
, TYPED_DATA_EMBEDDED
, 0, type
, embed_size
);
3026 memset((char *)obj
+ offsetof(struct RTypedData
, data
), 0, size
);
3031 VALUE obj
= typed_data_alloc(klass
, 0, NULL
, type
, sizeof(struct RTypedData
));
3032 DATA_PTR(obj
) = xcalloc(1, size
);
3037 rb_objspace_data_type_memsize(VALUE obj
)
3040 if (RTYPEDDATA_P(obj
)) {
3041 const rb_data_type_t
*type
= RTYPEDDATA_TYPE(obj
);
3042 const void *ptr
= RTYPEDDATA_GET_DATA(obj
);
3044 if (RTYPEDDATA_TYPE(obj
)->flags
& RUBY_TYPED_EMBEDDABLE
&& !RTYPEDDATA_EMBEDDED_P(obj
)) {
3045 #ifdef HAVE_MALLOC_USABLE_SIZE
3046 size
+= malloc_usable_size((void *)ptr
);
3050 if (ptr
&& type
->function
.dsize
) {
3051 size
+= type
->function
.dsize(ptr
);
3059 rb_objspace_data_type_name(VALUE obj
)
3061 if (RTYPEDDATA_P(obj
)) {
3062 return RTYPEDDATA_TYPE(obj
)->wrap_struct_name
;
3070 ptr_in_page_body_p(const void *ptr
, const void *memb
)
3072 struct heap_page
*page
= *(struct heap_page
**)memb
;
3073 uintptr_t p_body
= (uintptr_t)GET_PAGE_BODY(page
->start
);
3075 if ((uintptr_t)ptr
>= p_body
) {
3076 return (uintptr_t)ptr
< (p_body
+ HEAP_PAGE_SIZE
) ? 0 : 1;
3083 PUREFUNC(static inline struct heap_page
* heap_page_for_ptr(rb_objspace_t
*objspace
, uintptr_t ptr
);)
3084 static inline struct heap_page
*
3085 heap_page_for_ptr(rb_objspace_t
*objspace
, uintptr_t ptr
)
3087 struct heap_page
**res
;
3089 if (ptr
< (uintptr_t)heap_pages_lomem
||
3090 ptr
> (uintptr_t)heap_pages_himem
) {
3094 res
= bsearch((void *)ptr
, heap_pages_sorted
,
3095 (size_t)heap_allocated_pages
, sizeof(struct heap_page
*),
3096 ptr_in_page_body_p
);
3106 PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t
*objspace
, const void *ptr
);)
3108 is_pointer_to_heap(rb_objspace_t
*objspace
, const void *ptr
)
3110 register uintptr_t p
= (uintptr_t)ptr
;
3111 register struct heap_page
*page
;
3113 RB_DEBUG_COUNTER_INC(gc_isptr_trial
);
3115 if (p
< heap_pages_lomem
|| p
> heap_pages_himem
) return FALSE
;
3116 RB_DEBUG_COUNTER_INC(gc_isptr_range
);
3118 if (p
% BASE_SLOT_SIZE
!= 0) return FALSE
;
3119 RB_DEBUG_COUNTER_INC(gc_isptr_align
);
3121 page
= heap_page_for_ptr(objspace
, (uintptr_t)ptr
);
3123 RB_DEBUG_COUNTER_INC(gc_isptr_maybe
);
3124 if (page
->flags
.in_tomb
) {
3128 if (p
< page
->start
) return FALSE
;
3129 if (p
>= page
->start
+ (page
->total_slots
* page
->slot_size
)) return FALSE
;
3130 if ((NUM_IN_PAGE(p
) * BASE_SLOT_SIZE
) % page
->slot_size
!= 0) return FALSE
;
3138 static enum rb_id_table_iterator_result
3139 cvar_table_free_i(VALUE value
, void *ctx
)
3141 xfree((void *)value
);
3142 return ID_TABLE_CONTINUE
;
3145 #define ZOMBIE_OBJ_KEPT_FLAGS (FL_SEEN_OBJ_ID | FL_FINALIZE)
3148 make_zombie(rb_objspace_t
*objspace
, VALUE obj
, void (*dfree
)(void *), void *data
)
3150 struct RZombie
*zombie
= RZOMBIE(obj
);
3151 zombie
->basic
.flags
= T_ZOMBIE
| (zombie
->basic
.flags
& ZOMBIE_OBJ_KEPT_FLAGS
);
3152 zombie
->dfree
= dfree
;
3153 zombie
->data
= data
;
3154 VALUE prev
, next
= heap_pages_deferred_final
;
3156 zombie
->next
= prev
= next
;
3157 next
= RUBY_ATOMIC_VALUE_CAS(heap_pages_deferred_final
, prev
, obj
);
3158 } while (next
!= prev
);
3160 struct heap_page
*page
= GET_HEAP_PAGE(obj
);
3161 page
->final_slots
++;
3162 heap_pages_final_slots
++;
3166 make_io_zombie(rb_objspace_t
*objspace
, VALUE obj
)
3168 rb_io_t
*fptr
= RANY(obj
)->as
.file
.fptr
;
3169 make_zombie(objspace
, obj
, rb_io_fptr_finalize_internal
, fptr
);
3173 obj_free_object_id(rb_objspace_t
*objspace
, VALUE obj
)
3175 ASSERT_vm_locking();
3176 st_data_t o
= (st_data_t
)obj
, id
;
3178 GC_ASSERT(FL_TEST(obj
, FL_SEEN_OBJ_ID
));
3179 FL_UNSET(obj
, FL_SEEN_OBJ_ID
);
3181 if (st_delete(objspace
->obj_to_id_tbl
, &o
, &id
)) {
3183 st_delete(objspace
->id_to_obj_tbl
, &id
, NULL
);
3186 rb_bug("Object ID seen, but not in mapping table: %s", obj_info(obj
));
3191 rb_data_free(rb_objspace_t
*objspace
, VALUE obj
)
3193 void *data
= RTYPEDDATA_P(obj
) ? RTYPEDDATA_GET_DATA(obj
) : DATA_PTR(obj
);
3195 int free_immediately
= false;
3196 void (*dfree
)(void *);
3198 if (RTYPEDDATA_P(obj
)) {
3199 free_immediately
= (RANY(obj
)->as
.typeddata
.type
->flags
& RUBY_TYPED_FREE_IMMEDIATELY
) != 0;
3200 dfree
= RANY(obj
)->as
.typeddata
.type
->function
.dfree
;
3203 dfree
= RANY(obj
)->as
.data
.dfree
;
3207 if (dfree
== RUBY_DEFAULT_FREE
) {
3208 if (!RTYPEDDATA_EMBEDDED_P(obj
)) {
3210 RB_DEBUG_COUNTER_INC(obj_data_xfree
);
3213 else if (free_immediately
) {
3215 if (RTYPEDDATA_TYPE(obj
)->flags
& RUBY_TYPED_EMBEDDABLE
&& !RTYPEDDATA_EMBEDDED_P(obj
)) {
3219 RB_DEBUG_COUNTER_INC(obj_data_imm_free
);
3222 make_zombie(objspace
, obj
, dfree
, data
);
3223 RB_DEBUG_COUNTER_INC(obj_data_zombie
);
3228 RB_DEBUG_COUNTER_INC(obj_data_empty
);
3236 obj_free(rb_objspace_t
*objspace
, VALUE obj
)
3238 RB_DEBUG_COUNTER_INC(obj_free
);
3239 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
3241 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_FREEOBJ
, obj
);
3243 switch (BUILTIN_TYPE(obj
)) {
3248 rb_bug("obj_free() called for broken object");
3254 if (FL_TEST(obj
, FL_EXIVAR
)) {
3255 rb_free_generic_ivar((VALUE
)obj
);
3256 FL_UNSET(obj
, FL_EXIVAR
);
3259 if (FL_TEST(obj
, FL_SEEN_OBJ_ID
) && !FL_TEST(obj
, FL_FINALIZE
)) {
3260 obj_free_object_id(objspace
, obj
);
3263 if (RVALUE_WB_UNPROTECTED(obj
)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj
), obj
);
3265 #if RGENGC_CHECK_MODE
3266 #define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3267 CHECK(RVALUE_WB_UNPROTECTED
);
3268 CHECK(RVALUE_MARKED
);
3269 CHECK(RVALUE_MARKING
);
3270 CHECK(RVALUE_UNCOLLECTIBLE
);
3274 switch (BUILTIN_TYPE(obj
)) {
3276 if (rb_shape_obj_too_complex(obj
)) {
3277 RB_DEBUG_COUNTER_INC(obj_obj_too_complex
);
3278 st_free_table(ROBJECT_IV_HASH(obj
));
3280 else if (RANY(obj
)->as
.basic
.flags
& ROBJECT_EMBED
) {
3281 RB_DEBUG_COUNTER_INC(obj_obj_embed
);
3284 xfree(RANY(obj
)->as
.object
.as
.heap
.ivptr
);
3285 RB_DEBUG_COUNTER_INC(obj_obj_ptr
);
3290 rb_id_table_free(RCLASS_M_TBL(obj
));
3291 rb_cc_table_free(obj
);
3292 if (rb_shape_obj_too_complex(obj
)) {
3293 st_free_table((st_table
*)RCLASS_IVPTR(obj
));
3296 xfree(RCLASS_IVPTR(obj
));
3299 if (RCLASS_CONST_TBL(obj
)) {
3300 rb_free_const_table(RCLASS_CONST_TBL(obj
));
3302 if (RCLASS_CVC_TBL(obj
)) {
3303 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj
), cvar_table_free_i
, NULL
);
3304 rb_id_table_free(RCLASS_CVC_TBL(obj
));
3306 rb_class_remove_subclass_head(obj
);
3307 rb_class_remove_from_module_subclasses(obj
);
3308 rb_class_remove_from_super_subclasses(obj
);
3309 if (FL_TEST_RAW(obj
, RCLASS_SUPERCLASSES_INCLUDE_SELF
)) {
3310 xfree(RCLASS_SUPERCLASSES(obj
));
3313 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr
, BUILTIN_TYPE(obj
) == T_MODULE
);
3314 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr
, BUILTIN_TYPE(obj
) == T_CLASS
);
3323 #if USE_DEBUG_COUNTER
3324 switch (RHASH_SIZE(obj
)) {
3326 RB_DEBUG_COUNTER_INC(obj_hash_empty
);
3329 RB_DEBUG_COUNTER_INC(obj_hash_1
);
3332 RB_DEBUG_COUNTER_INC(obj_hash_2
);
3335 RB_DEBUG_COUNTER_INC(obj_hash_3
);
3338 RB_DEBUG_COUNTER_INC(obj_hash_4
);
3344 RB_DEBUG_COUNTER_INC(obj_hash_5_8
);
3347 GC_ASSERT(RHASH_SIZE(obj
) > 8);
3348 RB_DEBUG_COUNTER_INC(obj_hash_g8
);
3351 if (RHASH_AR_TABLE_P(obj
)) {
3352 if (RHASH_AR_TABLE(obj
) == NULL
) {
3353 RB_DEBUG_COUNTER_INC(obj_hash_null
);
3356 RB_DEBUG_COUNTER_INC(obj_hash_ar
);
3360 RB_DEBUG_COUNTER_INC(obj_hash_st
);
3367 if (RANY(obj
)->as
.regexp
.ptr
) {
3368 onig_free(RANY(obj
)->as
.regexp
.ptr
);
3369 RB_DEBUG_COUNTER_INC(obj_regexp_ptr
);
3373 if (!rb_data_free(objspace
, obj
)) return false;
3377 rb_matchext_t
*rm
= RMATCH_EXT(obj
);
3378 #if USE_DEBUG_COUNTER
3379 if (rm
->regs
.num_regs
>= 8) {
3380 RB_DEBUG_COUNTER_INC(obj_match_ge8
);
3382 else if (rm
->regs
.num_regs
>= 4) {
3383 RB_DEBUG_COUNTER_INC(obj_match_ge4
);
3385 else if (rm
->regs
.num_regs
>= 1) {
3386 RB_DEBUG_COUNTER_INC(obj_match_under4
);
3389 onig_region_free(&rm
->regs
, 0);
3390 xfree(rm
->char_offset
);
3392 RB_DEBUG_COUNTER_INC(obj_match_ptr
);
3396 if (RANY(obj
)->as
.file
.fptr
) {
3397 make_io_zombie(objspace
, obj
);
3398 RB_DEBUG_COUNTER_INC(obj_file_ptr
);
3403 RB_DEBUG_COUNTER_INC(obj_rational
);
3406 RB_DEBUG_COUNTER_INC(obj_complex
);
3411 /* Basically , T_ICLASS shares table with the module */
3412 if (RICLASS_OWNS_M_TBL_P(obj
)) {
3413 /* Method table is not shared for origin iclasses of classes */
3414 rb_id_table_free(RCLASS_M_TBL(obj
));
3416 if (RCLASS_CALLABLE_M_TBL(obj
) != NULL
) {
3417 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj
));
3419 rb_class_remove_subclass_head(obj
);
3420 rb_cc_table_free(obj
);
3421 rb_class_remove_from_module_subclasses(obj
);
3422 rb_class_remove_from_super_subclasses(obj
);
3424 RB_DEBUG_COUNTER_INC(obj_iclass_ptr
);
3428 RB_DEBUG_COUNTER_INC(obj_float
);
3432 if (!BIGNUM_EMBED_P(obj
) && BIGNUM_DIGITS(obj
)) {
3433 xfree(BIGNUM_DIGITS(obj
));
3434 RB_DEBUG_COUNTER_INC(obj_bignum_ptr
);
3437 RB_DEBUG_COUNTER_INC(obj_bignum_embed
);
3442 UNEXPECTED_NODE(obj_free
);
3446 if ((RBASIC(obj
)->flags
& RSTRUCT_EMBED_LEN_MASK
) ||
3447 RANY(obj
)->as
.rstruct
.as
.heap
.ptr
== NULL
) {
3448 RB_DEBUG_COUNTER_INC(obj_struct_embed
);
3451 xfree((void *)RANY(obj
)->as
.rstruct
.as
.heap
.ptr
);
3452 RB_DEBUG_COUNTER_INC(obj_struct_ptr
);
3458 rb_gc_free_dsymbol(obj
);
3459 RB_DEBUG_COUNTER_INC(obj_symbol
);
3464 rb_imemo_free((VALUE
)obj
);
3468 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE
,
3469 BUILTIN_TYPE(obj
), (void*)obj
, RBASIC(obj
)->flags
);
3472 if (FL_TEST(obj
, FL_FINALIZE
)) {
3473 make_zombie(objspace
, obj
, 0, 0);
3477 RBASIC(obj
)->flags
= 0;
3483 #define OBJ_ID_INCREMENT (BASE_SLOT_SIZE)
3484 #define OBJ_ID_INITIAL (OBJ_ID_INCREMENT)
3487 object_id_cmp(st_data_t x
, st_data_t y
)
3489 if (RB_BIGNUM_TYPE_P(x
)) {
3490 return !rb_big_eql(x
, y
);
3498 object_id_hash(st_data_t n
)
3500 if (RB_BIGNUM_TYPE_P(n
)) {
3501 return FIX2LONG(rb_big_hash(n
));
3504 return st_numhash(n
);
3507 static const struct st_hash_type object_id_hash_type
= {
3513 rb_gc_impl_objspace_alloc(void)
3515 rb_objspace_t
*objspace
= calloc1(sizeof(rb_objspace_t
));
3516 ruby_current_vm_ptr
->objspace
= objspace
;
3518 objspace
->flags
.gc_stressful
= RTEST(initial_stress
);
3519 objspace
->gc_stress_mode
= initial_stress
;
3521 objspace
->flags
.measure_gc
= 1;
3522 malloc_limit
= gc_params
.malloc_limit_min
;
3523 objspace
->finalize_deferred_pjob
= rb_postponed_job_preregister(0, gc_finalize_deferred
, objspace
);
3524 if (objspace
->finalize_deferred_pjob
== POSTPONED_JOB_HANDLE_INVALID
) {
3525 rb_bug("Could not preregister postponed job for GC");
3528 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
3529 rb_size_pool_t
*size_pool
= &size_pools
[i
];
3531 size_pool
->slot_size
= (1 << i
) * BASE_SLOT_SIZE
;
3533 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
);
3534 ccan_list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool
)->pages
);
3537 rb_darray_make(&objspace
->weak_references
, 0);
3539 // TODO: debug why on Windows Ruby crashes on boot when GC is on.
3544 #if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
3545 /* Need to determine if we can use mmap at runtime. */
3546 heap_page_alloc_use_mmap
= INIT_HEAP_PAGE_ALLOC_USE_MMAP
;
3549 objspace
->next_object_id
= OBJ_ID_INITIAL
;
3550 objspace
->id_to_obj_tbl
= st_init_table(&object_id_hash_type
);
3551 objspace
->obj_to_id_tbl
= st_init_numtable();
3553 #if RGENGC_ESTIMATE_OLDMALLOC
3554 objspace
->rgengc
.oldmalloc_increase_limit
= gc_params
.oldmalloc_limit_min
;
3557 /* Set size pools allocatable pages. */
3558 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
3559 rb_size_pool_t
*size_pool
= &size_pools
[i
];
3561 /* Set the default value of size_pool_init_slots. */
3562 gc_params
.size_pool_init_slots
[i
] = GC_HEAP_INIT_SLOTS
;
3564 size_pool
->allocatable_pages
= minimum_pages_for_size_pool(objspace
, size_pool
);
3566 heap_pages_expand_sorted(objspace
);
3568 init_mark_stack(&objspace
->mark_stack
);
3570 objspace
->profile
.invoke_time
= getrusage_time();
3571 finalizer_table
= st_init_numtable();
3575 typedef int each_obj_callback(void *, void *, size_t, void *);
3576 typedef int each_page_callback(struct heap_page
*, void *);
3578 static void objspace_each_objects(rb_objspace_t
*objspace
, each_obj_callback
*callback
, void *data
, bool protected);
3579 static void objspace_reachable_objects_from_root(rb_objspace_t
*, void (func
)(const char *, VALUE
, void *), void *);
3581 struct each_obj_data
{
3582 rb_objspace_t
*objspace
;
3583 bool reenable_incremental
;
3585 each_obj_callback
*each_obj_callback
;
3586 each_page_callback
*each_page_callback
;
3589 struct heap_page
**pages
[SIZE_POOL_COUNT
];
3590 size_t pages_counts
[SIZE_POOL_COUNT
];
3594 objspace_each_objects_ensure(VALUE arg
)
3596 struct each_obj_data
*data
= (struct each_obj_data
*)arg
;
3597 rb_objspace_t
*objspace
= data
->objspace
;
3599 /* Reenable incremental GC */
3600 if (data
->reenable_incremental
) {
3601 objspace
->flags
.dont_incremental
= FALSE
;
3604 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
3605 struct heap_page
**pages
= data
->pages
[i
];
3613 objspace_each_objects_try(VALUE arg
)
3615 struct each_obj_data
*data
= (struct each_obj_data
*)arg
;
3616 rb_objspace_t
*objspace
= data
->objspace
;
3618 /* Copy pages from all size_pools to their respective buffers. */
3619 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
3620 rb_size_pool_t
*size_pool
= &size_pools
[i
];
3621 size_t size
= size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
, sizeof(struct heap_page
*), rb_eRuntimeError
);
3623 struct heap_page
**pages
= malloc(size
);
3624 if (!pages
) rb_memerror();
3626 /* Set up pages buffer by iterating over all pages in the current eden
3627 * heap. This will be a snapshot of the state of the heap before we
3628 * call the callback over each page that exists in this buffer. Thus it
3629 * is safe for the callback to allocate objects without possibly entering
3630 * an infinite loop. */
3631 struct heap_page
*page
= 0;
3632 size_t pages_count
= 0;
3633 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, page
, page_node
) {
3634 pages
[pages_count
] = page
;
3637 data
->pages
[i
] = pages
;
3638 data
->pages_counts
[i
] = pages_count
;
3639 GC_ASSERT(pages_count
== SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
);
3642 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
3643 rb_size_pool_t
*size_pool
= &size_pools
[i
];
3644 size_t pages_count
= data
->pages_counts
[i
];
3645 struct heap_page
**pages
= data
->pages
[i
];
3647 struct heap_page
*page
= ccan_list_top(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, struct heap_page
, page_node
);
3648 for (size_t i
= 0; i
< pages_count
; i
++) {
3649 /* If we have reached the end of the linked list then there are no
3650 * more pages, so break. */
3651 if (page
== NULL
) break;
3653 /* If this page does not match the one in the buffer, then move to
3654 * the next page in the buffer. */
3655 if (pages
[i
] != page
) continue;
3657 uintptr_t pstart
= (uintptr_t)page
->start
;
3658 uintptr_t pend
= pstart
+ (page
->total_slots
* size_pool
->slot_size
);
3660 if (data
->each_obj_callback
&&
3661 (*data
->each_obj_callback
)((void *)pstart
, (void *)pend
, size_pool
->slot_size
, data
->data
)) {
3664 if (data
->each_page_callback
&&
3665 (*data
->each_page_callback
)(page
, data
->data
)) {
3669 page
= ccan_list_next(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, page
, page_node
);
3677 * rb_objspace_each_objects() is special C API to walk through
3678 * Ruby object space. This C API is too difficult to use it.
3679 * To be frank, you should not use it. Or you need to read the
3680 * source code of this function and understand what this function does.
3682 * 'callback' will be called several times (the number of heap page,
3683 * at current implementation) with:
3684 * vstart: a pointer to the first living object of the heap_page.
3685 * vend: a pointer to next to the valid heap_page area.
3686 * stride: a distance to next VALUE.
3688 * If callback() returns non-zero, the iteration will be stopped.
3690 * This is a sample callback code to iterate liveness objects:
3693 * sample_callback(void *vstart, void *vend, int stride, void *data)
3695 * VALUE v = (VALUE)vstart;
3696 * for (; v != (VALUE)vend; v += stride) {
3697 * if (!rb_objspace_internal_object_p(v)) { // liveness check
3698 * // do something with live object 'v'
3701 * return 0; // continue to iteration
3704 * Note: 'vstart' is not a top of heap_page. This point the first
3705 * living object to grasp at least one object to avoid GC issue.
3706 * This means that you can not walk through all Ruby object page
3707 * including freed object page.
3709 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3710 * However, there are possibilities to pass variable values with
3711 * 'stride' with some reasons. You must use stride instead of
3712 * use some constant value in the iteration.
3715 rb_objspace_each_objects(each_obj_callback
*callback
, void *data
)
3717 objspace_each_objects(&rb_objspace
, callback
, data
, TRUE
);
3721 objspace_each_exec(bool protected, struct each_obj_data
*each_obj_data
)
3723 /* Disable incremental GC */
3724 rb_objspace_t
*objspace
= each_obj_data
->objspace
;
3725 bool reenable_incremental
= FALSE
;
3727 reenable_incremental
= !objspace
->flags
.dont_incremental
;
3730 objspace
->flags
.dont_incremental
= TRUE
;
3733 each_obj_data
->reenable_incremental
= reenable_incremental
;
3734 memset(&each_obj_data
->pages
, 0, sizeof(each_obj_data
->pages
));
3735 memset(&each_obj_data
->pages_counts
, 0, sizeof(each_obj_data
->pages_counts
));
3736 rb_ensure(objspace_each_objects_try
, (VALUE
)each_obj_data
,
3737 objspace_each_objects_ensure
, (VALUE
)each_obj_data
);
3741 objspace_each_objects(rb_objspace_t
*objspace
, each_obj_callback
*callback
, void *data
, bool protected)
3743 struct each_obj_data each_obj_data
= {
3744 .objspace
= objspace
,
3745 .each_obj_callback
= callback
,
3746 .each_page_callback
= NULL
,
3749 objspace_each_exec(protected, &each_obj_data
);
3752 #if GC_CAN_COMPILE_COMPACTION
3754 objspace_each_pages(rb_objspace_t
*objspace
, each_page_callback
*callback
, void *data
, bool protected)
3756 struct each_obj_data each_obj_data
= {
3757 .objspace
= objspace
,
3758 .each_obj_callback
= NULL
,
3759 .each_page_callback
= callback
,
3762 objspace_each_exec(protected, &each_obj_data
);
3766 struct os_each_struct
{
3772 internal_object_p(VALUE obj
)
3774 RVALUE
*p
= (RVALUE
*)obj
;
3775 void *ptr
= asan_unpoison_object_temporary(obj
);
3776 bool used_p
= p
->as
.basic
.flags
;
3779 switch (BUILTIN_TYPE(obj
)) {
3781 UNEXPECTED_NODE(internal_object_p
);
3790 if (!p
->as
.basic
.klass
) break;
3791 if (RCLASS_SINGLETON_P(obj
)) {
3792 return rb_singleton_class_internal_p(obj
);
3796 if (!p
->as
.basic
.klass
) break;
3800 if (ptr
|| ! used_p
) {
3801 asan_poison_object(obj
);
3807 rb_objspace_internal_object_p(VALUE obj
)
3809 return internal_object_p(obj
);
3813 os_obj_of_i(void *vstart
, void *vend
, size_t stride
, void *data
)
3815 struct os_each_struct
*oes
= (struct os_each_struct
*)data
;
3817 VALUE v
= (VALUE
)vstart
;
3818 for (; v
!= (VALUE
)vend
; v
+= stride
) {
3819 if (!internal_object_p(v
)) {
3820 if (!oes
->of
|| rb_obj_is_kind_of(v
, oes
->of
)) {
3821 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v
)) {
3835 struct os_each_struct oes
;
3839 rb_objspace_each_objects(os_obj_of_i
, &oes
);
3840 return SIZET2NUM(oes
.num
);
3845 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
3846 * ObjectSpace.each_object([module]) -> an_enumerator
3848 * Calls the block once for each living, nonimmediate object in this
3849 * Ruby process. If <i>module</i> is specified, calls the block
3850 * for only those classes or modules that match (or are a subclass of)
3851 * <i>module</i>. Returns the number of objects found. Immediate
3852 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
3853 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
3854 * never returned. In the example below, #each_object returns both
3855 * the numbers we defined and several constants defined in the Math
3858 * If no block is given, an enumerator is returned instead.
3861 * b = 95 # Won't be returned
3862 * c = 12345678987654321
3863 * count = ObjectSpace.each_object(Numeric) {|x| p x }
3864 * puts "Total count: #{count}"
3866 * <em>produces:</em>
3872 * 2.22044604925031e-16
3873 * 1.7976931348623157e+308
3874 * 2.2250738585072e-308
3880 os_each_obj(int argc
, VALUE
*argv
, VALUE os
)
3884 of
= (!rb_check_arity(argc
, 0, 1) ? 0 : argv
[0]);
3885 RETURN_ENUMERATOR(os
, 1, &of
);
3886 return os_obj_of(of
);
3891 * ObjectSpace.undefine_finalizer(obj)
3893 * Removes all finalizers for <i>obj</i>.
3898 undefine_final(VALUE os
, VALUE obj
)
3900 return rb_undefine_finalizer(obj
);
3904 rb_undefine_finalizer(VALUE obj
)
3906 rb_objspace_t
*objspace
= &rb_objspace
;
3907 st_data_t data
= obj
;
3908 rb_check_frozen(obj
);
3909 st_delete(finalizer_table
, &data
, 0);
3910 FL_UNSET(obj
, FL_FINALIZE
);
3915 should_be_callable(VALUE block
)
3917 if (!rb_obj_respond_to(block
, idCall
, TRUE
)) {
3918 rb_raise(rb_eArgError
, "wrong type argument %"PRIsVALUE
" (should be callable)",
3919 rb_obj_class(block
));
3924 should_be_finalizable(VALUE obj
)
3926 if (!FL_ABLE(obj
)) {
3927 rb_raise(rb_eArgError
, "cannot define finalizer for %s",
3928 rb_obj_classname(obj
));
3930 rb_check_frozen(obj
);
3934 rb_define_finalizer_no_check(VALUE obj
, VALUE block
)
3936 rb_objspace_t
*objspace
= &rb_objspace
;
3940 RBASIC(obj
)->flags
|= FL_FINALIZE
;
3942 if (st_lookup(finalizer_table
, obj
, &data
)) {
3943 table
= (VALUE
)data
;
3945 /* avoid duplicate block, table is usually small */
3947 long len
= RARRAY_LEN(table
);
3950 for (i
= 0; i
< len
; i
++) {
3951 VALUE recv
= RARRAY_AREF(table
, i
);
3952 if (rb_equal(recv
, block
)) {
3959 rb_ary_push(table
, block
);
3962 table
= rb_ary_new3(1, block
);
3963 RBASIC_CLEAR_CLASS(table
);
3964 st_add_direct(finalizer_table
, obj
, table
);
3967 block
= rb_ary_new3(2, INT2FIX(0), block
);
3974 * ObjectSpace.define_finalizer(obj, aProc=proc())
3976 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
3977 * was destroyed. The object ID of the <i>obj</i> will be passed
3978 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
3979 * method, make sure it can be called with a single argument.
3981 * The return value is an array <code>[0, aProc]</code>.
3983 * The two recommended patterns are to either create the finaliser proc
3984 * in a non-instance method where it can safely capture the needed state,
3985 * or to use a custom callable object that stores the needed state
3986 * explicitly as instance variables.
3989 * def initialize(data_needed_for_finalization)
3990 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
3993 * def self.create_finalizer(data_needed_for_finalization)
3995 * puts "finalizing #{data_needed_for_finalization}"
4002 * def initialize(data_needed_for_finalization)
4003 * @data_needed_for_finalization = data_needed_for_finalization
4007 * puts "finalizing #{@data_needed_for_finalization}"
4011 * def initialize(data_needed_for_finalization)
4012 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
4016 * Note that if your finalizer references the object to be
4017 * finalized it will never be run on GC, although it will still be
4018 * run at exit. You will get a warning if you capture the object
4019 * to be finalized as the receiver of the finalizer.
4021 * class CapturesSelf
4022 * def initialize(name)
4023 * ObjectSpace.define_finalizer(self, proc {
4024 * # this finalizer will only be run on exit
4025 * puts "finalizing #{name}"
4030 * Also note that finalization can be unpredictable and is never guaranteed
4031 * to be run except on exit.
4035 define_final(int argc
, VALUE
*argv
, VALUE os
)
4039 rb_scan_args(argc
, argv
, "11", &obj
, &block
);
4040 should_be_finalizable(obj
);
4042 block
= rb_block_proc();
4045 should_be_callable(block
);
4048 if (rb_callable_receiver(block
) == obj
) {
4049 rb_warn("finalizer references object to be finalized");
4052 return rb_define_finalizer_no_check(obj
, block
);
4056 rb_define_finalizer(VALUE obj
, VALUE block
)
4058 should_be_finalizable(obj
);
4059 should_be_callable(block
);
4060 return rb_define_finalizer_no_check(obj
, block
);
4064 rb_gc_copy_finalizer(VALUE dest
, VALUE obj
)
4066 rb_objspace_t
*objspace
= &rb_objspace
;
4070 if (!FL_TEST(obj
, FL_FINALIZE
)) return;
4072 if (RB_LIKELY(st_lookup(finalizer_table
, obj
, &data
))) {
4073 table
= (VALUE
)data
;
4074 st_insert(finalizer_table
, dest
, table
);
4075 FL_SET(dest
, FL_FINALIZE
);
4078 rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", obj_info(obj
));
4083 run_single_final(VALUE cmd
, VALUE objid
)
4085 return rb_check_funcall(cmd
, idCall
, 1, &objid
);
4089 warn_exception_in_finalizer(rb_execution_context_t
*ec
, VALUE final
)
4091 if (!UNDEF_P(final
) && !NIL_P(ruby_verbose
)) {
4092 VALUE errinfo
= ec
->errinfo
;
4093 rb_warn("Exception in finalizer %+"PRIsVALUE
, final
);
4094 rb_ec_error_print(ec
, errinfo
);
4099 run_finalizer(rb_objspace_t
*objspace
, VALUE obj
, VALUE table
)
4102 enum ruby_tag_type state
;
4107 rb_control_frame_t
*cfp
;
4112 rb_execution_context_t
* volatile ec
= GET_EC();
4113 #define RESTORE_FINALIZER() (\
4114 ec->cfp = saved.cfp, \
4115 ec->cfp->sp = saved.sp, \
4116 ec->errinfo = saved.errinfo)
4118 saved
.errinfo
= ec
->errinfo
;
4119 saved
.objid
= rb_obj_id(obj
);
4120 saved
.cfp
= ec
->cfp
;
4121 saved
.sp
= ec
->cfp
->sp
;
4123 saved
.final
= Qundef
;
4126 state
= EC_EXEC_TAG();
4127 if (state
!= TAG_NONE
) {
4128 ++saved
.finished
; /* skip failed finalizer */
4129 warn_exception_in_finalizer(ec
, ATOMIC_VALUE_EXCHANGE(saved
.final
, Qundef
));
4131 for (i
= saved
.finished
;
4132 RESTORE_FINALIZER(), i
<RARRAY_LEN(table
);
4133 saved
.finished
= ++i
) {
4134 run_single_final(saved
.final
= RARRAY_AREF(table
, i
), saved
.objid
);
4137 #undef RESTORE_FINALIZER
4141 run_final(rb_objspace_t
*objspace
, VALUE zombie
)
4143 if (RZOMBIE(zombie
)->dfree
) {
4144 RZOMBIE(zombie
)->dfree(RZOMBIE(zombie
)->data
);
4147 st_data_t key
= (st_data_t
)zombie
;
4148 if (FL_TEST_RAW(zombie
, FL_FINALIZE
)) {
4149 FL_UNSET(zombie
, FL_FINALIZE
);
4151 if (st_delete(finalizer_table
, &key
, &table
)) {
4152 run_finalizer(objspace
, zombie
, (VALUE
)table
);
4155 rb_bug("FL_FINALIZE flag is set, but finalizers are not found");
4159 GC_ASSERT(!st_lookup(finalizer_table
, key
, NULL
));
4164 finalize_list(rb_objspace_t
*objspace
, VALUE zombie
)
4168 struct heap_page
*page
;
4169 asan_unpoison_object(zombie
, false);
4170 next_zombie
= RZOMBIE(zombie
)->next
;
4171 page
= GET_HEAP_PAGE(zombie
);
4173 run_final(objspace
, zombie
);
4177 GC_ASSERT(BUILTIN_TYPE(zombie
) == T_ZOMBIE
);
4178 if (FL_TEST(zombie
, FL_SEEN_OBJ_ID
)) {
4179 obj_free_object_id(objspace
, zombie
);
4182 GC_ASSERT(heap_pages_final_slots
> 0);
4183 GC_ASSERT(page
->final_slots
> 0);
4185 heap_pages_final_slots
--;
4186 page
->final_slots
--;
4188 heap_page_add_freeobj(objspace
, page
, zombie
);
4189 page
->size_pool
->total_freed_objects
++;
4193 zombie
= next_zombie
;
4198 finalize_deferred_heap_pages(rb_objspace_t
*objspace
)
4201 while ((zombie
= ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final
, 0)) != 0) {
4202 finalize_list(objspace
, zombie
);
4207 finalize_deferred(rb_objspace_t
*objspace
)
4209 rb_execution_context_t
*ec
= GET_EC();
4210 ec
->interrupt_mask
|= PENDING_INTERRUPT_MASK
;
4211 finalize_deferred_heap_pages(objspace
);
4212 ec
->interrupt_mask
&= ~PENDING_INTERRUPT_MASK
;
4216 gc_finalize_deferred(void *dmy
)
4218 rb_objspace_t
*objspace
= dmy
;
4219 if (ATOMIC_EXCHANGE(finalizing
, 1)) return;
4221 finalize_deferred(objspace
);
4222 ATOMIC_SET(finalizing
, 0);
4226 gc_finalize_deferred_register(rb_objspace_t
*objspace
)
4228 /* will enqueue a call to gc_finalize_deferred */
4229 rb_postponed_job_trigger(objspace
->finalize_deferred_pjob
);
4232 static int pop_mark_stack(mark_stack_t
*stack
, VALUE
*data
);
4235 gc_abort(rb_objspace_t
*objspace
)
4237 if (is_incremental_marking(objspace
)) {
4238 /* Remove all objects from the mark stack. */
4240 while (pop_mark_stack(&objspace
->mark_stack
, &obj
));
4242 objspace
->flags
.during_incremental_marking
= FALSE
;
4245 if (is_lazy_sweeping(objspace
)) {
4246 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
4247 rb_size_pool_t
*size_pool
= &size_pools
[i
];
4248 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
4250 heap
->sweeping_page
= NULL
;
4251 struct heap_page
*page
= NULL
;
4253 ccan_list_for_each(&heap
->pages
, page
, page_node
) {
4254 page
->flags
.before_sweep
= false;
4259 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
4260 rb_size_pool_t
*size_pool
= &size_pools
[i
];
4261 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
4262 rgengc_mark_and_rememberset_clear(objspace
, heap
);
4265 gc_mode_set(objspace
, gc_mode_none
);
4268 struct force_finalize_list
{
4271 struct force_finalize_list
*next
;
4275 force_chain_object(st_data_t key
, st_data_t val
, st_data_t arg
)
4277 struct force_finalize_list
**prev
= (struct force_finalize_list
**)arg
;
4278 struct force_finalize_list
*curr
= ALLOC(struct force_finalize_list
);
4287 gc_each_object(rb_objspace_t
*objspace
, void (*func
)(VALUE obj
, void *data
), void *data
)
4289 for (size_t i
= 0; i
< heap_allocated_pages
; i
++) {
4290 struct heap_page
*page
= heap_pages_sorted
[i
];
4291 short stride
= page
->slot_size
;
4293 uintptr_t p
= (uintptr_t)page
->start
;
4294 uintptr_t pend
= p
+ page
->total_slots
* stride
;
4295 for (; p
< pend
; p
+= stride
) {
4296 VALUE obj
= (VALUE
)p
;
4298 void *poisoned
= asan_unpoison_object_temporary(obj
);
4303 GC_ASSERT(BUILTIN_TYPE(obj
) == T_NONE
);
4304 asan_poison_object(obj
);
4310 bool rb_obj_is_main_ractor(VALUE gv
);
4313 rb_objspace_free_objects_i(VALUE obj
, void *data
)
4315 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
4317 switch (BUILTIN_TYPE(obj
)) {
4322 obj_free(objspace
, obj
);
4328 rb_objspace_free_objects(rb_objspace_t
*objspace
)
4330 gc_each_object(objspace
, rb_objspace_free_objects_i
, objspace
);
4334 rb_objspace_call_finalizer_i(VALUE obj
, void *data
)
4336 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
4338 switch (BUILTIN_TYPE(obj
)) {
4340 if (!rb_free_at_exit
&& (!DATA_PTR(obj
) || !RANY(obj
)->as
.data
.dfree
)) break;
4341 if (rb_obj_is_thread(obj
)) break;
4342 if (rb_obj_is_mutex(obj
)) break;
4343 if (rb_obj_is_fiber(obj
)) break;
4344 if (rb_obj_is_main_ractor(obj
)) break;
4346 obj_free(objspace
, obj
);
4349 obj_free(objspace
, obj
);
4356 if (rb_free_at_exit
) {
4357 obj_free(objspace
, obj
);
4364 rb_objspace_call_finalizer(rb_objspace_t
*objspace
)
4366 #if RGENGC_CHECK_MODE >= 2
4367 gc_verify_internal_consistency(objspace
);
4369 if (ATOMIC_EXCHANGE(finalizing
, 1)) return;
4371 /* run finalizers */
4372 finalize_deferred(objspace
);
4373 GC_ASSERT(heap_pages_deferred_final
== 0);
4375 /* prohibit incremental GC */
4376 objspace
->flags
.dont_incremental
= 1;
4378 /* force to run finalizer */
4379 while (finalizer_table
->num_entries
) {
4380 struct force_finalize_list
*list
= 0;
4381 st_foreach(finalizer_table
, force_chain_object
, (st_data_t
)&list
);
4383 struct force_finalize_list
*curr
= list
;
4385 st_data_t obj
= (st_data_t
)curr
->obj
;
4386 st_delete(finalizer_table
, &obj
, 0);
4387 FL_UNSET(curr
->obj
, FL_FINALIZE
);
4389 run_finalizer(objspace
, curr
->obj
, curr
->table
);
4396 /* Abort incremental marking and lazy sweeping to speed up shutdown. */
4399 /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
4402 /* running data/file finalizers are part of garbage collection */
4403 unsigned int lock_lev
;
4404 gc_enter(objspace
, gc_enter_event_finalizer
, &lock_lev
);
4406 gc_each_object(objspace
, rb_objspace_call_finalizer_i
, objspace
);
4408 gc_exit(objspace
, gc_enter_event_finalizer
, &lock_lev
);
4410 finalize_deferred_heap_pages(objspace
);
4412 st_free_table(finalizer_table
);
4413 finalizer_table
= 0;
4414 ATOMIC_SET(finalizing
, 0);
4417 /* garbage objects will be collected soon. */
4419 is_garbage_object(rb_objspace_t
*objspace
, VALUE ptr
)
4421 return is_lazy_sweeping(objspace
) && GET_HEAP_PAGE(ptr
)->flags
.before_sweep
&&
4422 !MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr
), ptr
);
4426 is_live_object(rb_objspace_t
*objspace
, VALUE ptr
)
4428 switch (BUILTIN_TYPE(ptr
)) {
4437 return !is_garbage_object(objspace
, ptr
);
4441 is_markable_object(VALUE obj
)
4443 return !RB_SPECIAL_CONST_P(obj
);
4447 rb_objspace_markable_object_p(VALUE obj
)
4449 rb_objspace_t
*objspace
= &rb_objspace
;
4450 return is_markable_object(obj
) && is_live_object(objspace
, obj
);
4454 rb_objspace_garbage_object_p(VALUE obj
)
4456 rb_objspace_t
*objspace
= &rb_objspace
;
4457 return is_garbage_object(objspace
, obj
);
4461 rb_gc_is_ptr_to_obj(const void *ptr
)
4463 rb_objspace_t
*objspace
= &rb_objspace
;
4464 return is_pointer_to_heap(objspace
, ptr
);
4469 * ObjectSpace._id2ref(object_id) -> an_object
4471 * Converts an object id to a reference to the object. May not be
4472 * called on an object id passed as a parameter to a finalizer.
4474 * s = "I am a string" #=> "I am a string"
4475 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
4478 * On multi-ractor mode, if the object is not shareable, it raises
4485 #if SIZEOF_LONG == SIZEOF_VOIDP
4486 #define NUM2PTR(x) NUM2ULONG(x)
4487 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4488 #define NUM2PTR(x) NUM2ULL(x)
4490 rb_objspace_t
*objspace
= &rb_objspace
;
4492 objid
= rb_to_int(objid
);
4493 if (FIXNUM_P(objid
) || rb_big_size(objid
) <= SIZEOF_VOIDP
) {
4494 VALUE ptr
= NUM2PTR(objid
);
4495 if (SPECIAL_CONST_P(ptr
)) {
4496 if (ptr
== Qtrue
) return Qtrue
;
4497 if (ptr
== Qfalse
) return Qfalse
;
4498 if (NIL_P(ptr
)) return Qnil
;
4499 if (FIXNUM_P(ptr
)) return ptr
;
4500 if (FLONUM_P(ptr
)) return ptr
;
4502 if (SYMBOL_P(ptr
)) {
4503 // Check that the symbol is valid
4504 if (rb_static_id_valid_p(SYM2ID(ptr
))) {
4508 rb_raise(rb_eRangeError
, "%p is not symbol id value", (void *)ptr
);
4512 rb_raise(rb_eRangeError
, "%+"PRIsVALUE
" is not id value", rb_int2str(objid
, 10));
4517 if (st_lookup(objspace
->id_to_obj_tbl
, objid
, &orig
) &&
4518 is_live_object(objspace
, orig
)) {
4519 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(orig
)) {
4523 rb_raise(rb_eRangeError
, "%+"PRIsVALUE
" is id of the unshareable object on multi-ractor", rb_int2str(objid
, 10));
4527 if (rb_int_ge(objid
, ULL2NUM(objspace
->next_object_id
))) {
4528 rb_raise(rb_eRangeError
, "%+"PRIsVALUE
" is not id value", rb_int2str(objid
, 10));
4531 rb_raise(rb_eRangeError
, "%+"PRIsVALUE
" is recycled object", rb_int2str(objid
, 10));
4537 os_id2ref(VALUE os
, VALUE objid
)
4539 return id2ref(objid
);
4543 rb_find_object_id(VALUE obj
, VALUE (*get_heap_object_id
)(VALUE
))
4545 if (SPECIAL_CONST_P(obj
)) {
4546 #if SIZEOF_LONG == SIZEOF_VOIDP
4547 return LONG2NUM((SIGNED_VALUE
)obj
);
4549 return LL2NUM((SIGNED_VALUE
)obj
);
4553 return get_heap_object_id(obj
);
4557 cached_object_id(VALUE obj
)
4560 rb_objspace_t
*objspace
= &rb_objspace
;
4563 if (st_lookup(objspace
->obj_to_id_tbl
, (st_data_t
)obj
, &id
)) {
4564 GC_ASSERT(FL_TEST(obj
, FL_SEEN_OBJ_ID
));
4567 GC_ASSERT(!FL_TEST(obj
, FL_SEEN_OBJ_ID
));
4569 id
= ULL2NUM(objspace
->next_object_id
);
4570 objspace
->next_object_id
+= OBJ_ID_INCREMENT
;
4572 VALUE already_disabled
= rb_gc_disable_no_rest();
4573 st_insert(objspace
->obj_to_id_tbl
, (st_data_t
)obj
, (st_data_t
)id
);
4574 st_insert(objspace
->id_to_obj_tbl
, (st_data_t
)id
, (st_data_t
)obj
);
4575 if (already_disabled
== Qfalse
) rb_objspace_gc_enable(objspace
);
4576 FL_SET(obj
, FL_SEEN_OBJ_ID
);
4584 nonspecial_obj_id(VALUE obj
)
4586 #if SIZEOF_LONG == SIZEOF_VOIDP
4587 return (VALUE
)((SIGNED_VALUE
)(obj
)|FIXNUM_FLAG
);
4588 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4589 return LL2NUM((SIGNED_VALUE
)(obj
) / 2);
4591 # error not supported
4596 rb_memory_id(VALUE obj
)
4598 return rb_find_object_id(obj
, nonspecial_obj_id
);
4602 * Document-method: __id__
4603 * Document-method: object_id
4606 * obj.__id__ -> integer
4607 * obj.object_id -> integer
4609 * Returns an integer identifier for +obj+.
4611 * The same number will be returned on all calls to +object_id+ for a given
4612 * object, and no two active objects will share an id.
4614 * Note: that some objects of builtin classes are reused for optimization.
4615 * This is the case for immediate values and frozen string literals.
4617 * BasicObject implements +__id__+, Kernel implements +object_id+.
4619 * Immediate values are not passed by reference but are passed by value:
4620 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
4622 * Object.new.object_id == Object.new.object_id # => false
4623 * (21 * 2).object_id == (21 * 2).object_id # => true
4624 * "hello".object_id == "hello".object_id # => false
4625 * "hi".freeze.object_id == "hi".freeze.object_id # => true
4629 rb_obj_id(VALUE obj
)
4632 * 32-bit VALUE space
4633 * MSB ------------------------ LSB
4634 * false 00000000000000000000000000000000
4635 * true 00000000000000000000000000000010
4636 * nil 00000000000000000000000000000100
4637 * undef 00000000000000000000000000000110
4638 * symbol ssssssssssssssssssssssss00001110
4639 * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
4640 * fixnum fffffffffffffffffffffffffffffff1
4644 * false 00000000000000000000000000000000
4645 * true 00000000000000000000000000000010
4646 * nil 00000000000000000000000000000100
4647 * undef 00000000000000000000000000000110
4648 * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
4649 * object oooooooooooooooooooooooooooooo0 o...o % A = 0
4650 * fixnum fffffffffffffffffffffffffffffff1 bignum if required
4652 * where A = sizeof(RVALUE)/4
4655 * 20 if 32-bit, double is 4-byte aligned
4656 * 24 if 32-bit, double is 8-byte aligned
4660 return rb_find_object_id(obj
, cached_object_id
);
4663 static enum rb_id_table_iterator_result
4664 cc_table_memsize_i(VALUE ccs_ptr
, void *data_ptr
)
4666 size_t *total_size
= data_ptr
;
4667 struct rb_class_cc_entries
*ccs
= (struct rb_class_cc_entries
*)ccs_ptr
;
4668 *total_size
+= sizeof(*ccs
);
4669 *total_size
+= sizeof(ccs
->entries
[0]) * ccs
->capa
;
4670 return ID_TABLE_CONTINUE
;
4674 cc_table_memsize(struct rb_id_table
*cc_table
)
4676 size_t total
= rb_id_table_memsize(cc_table
);
4677 rb_id_table_foreach_values(cc_table
, cc_table_memsize_i
, &total
);
4682 obj_memsize_of(VALUE obj
, int use_all_types
)
4686 if (SPECIAL_CONST_P(obj
)) {
4690 if (FL_TEST(obj
, FL_EXIVAR
)) {
4691 size
+= rb_generic_ivar_memsize(obj
);
4694 switch (BUILTIN_TYPE(obj
)) {
4696 if (rb_shape_obj_too_complex(obj
)) {
4697 size
+= rb_st_memsize(ROBJECT_IV_HASH(obj
));
4699 else if (!(RBASIC(obj
)->flags
& ROBJECT_EMBED
)) {
4700 size
+= ROBJECT_IV_CAPACITY(obj
) * sizeof(VALUE
);
4705 if (RCLASS_M_TBL(obj
)) {
4706 size
+= rb_id_table_memsize(RCLASS_M_TBL(obj
));
4708 // class IV sizes are allocated as powers of two
4709 size
+= SIZEOF_VALUE
<< bit_length(RCLASS_IV_COUNT(obj
));
4710 if (RCLASS_CVC_TBL(obj
)) {
4711 size
+= rb_id_table_memsize(RCLASS_CVC_TBL(obj
));
4713 if (RCLASS_EXT(obj
)->const_tbl
) {
4714 size
+= rb_id_table_memsize(RCLASS_EXT(obj
)->const_tbl
);
4716 if (RCLASS_CC_TBL(obj
)) {
4717 size
+= cc_table_memsize(RCLASS_CC_TBL(obj
));
4719 if (FL_TEST_RAW(obj
, RCLASS_SUPERCLASSES_INCLUDE_SELF
)) {
4720 size
+= (RCLASS_SUPERCLASS_DEPTH(obj
) + 1) * sizeof(VALUE
);
4724 if (RICLASS_OWNS_M_TBL_P(obj
)) {
4725 if (RCLASS_M_TBL(obj
)) {
4726 size
+= rb_id_table_memsize(RCLASS_M_TBL(obj
));
4729 if (RCLASS_CC_TBL(obj
)) {
4730 size
+= cc_table_memsize(RCLASS_CC_TBL(obj
));
4734 size
+= rb_str_memsize(obj
);
4737 size
+= rb_ary_memsize(obj
);
4740 if (RHASH_ST_TABLE_P(obj
)) {
4741 VM_ASSERT(RHASH_ST_TABLE(obj
) != NULL
);
4742 /* st_table is in the slot */
4743 size
+= st_memsize(RHASH_ST_TABLE(obj
)) - sizeof(st_table
);
4747 if (RREGEXP_PTR(obj
)) {
4748 size
+= onig_memsize(RREGEXP_PTR(obj
));
4752 if (use_all_types
) size
+= rb_objspace_data_type_memsize(obj
);
4756 rb_matchext_t
*rm
= RMATCH_EXT(obj
);
4757 size
+= onig_region_memsize(&rm
->regs
);
4758 size
+= sizeof(struct rmatch_offset
) * rm
->char_offset_num_allocated
;
4762 if (RFILE(obj
)->fptr
) {
4763 size
+= rb_io_memsize(RFILE(obj
)->fptr
);
4770 size
+= rb_imemo_memsize(obj
);
4778 if (!(RBASIC(obj
)->flags
& BIGNUM_EMBED_FLAG
) && BIGNUM_DIGITS(obj
)) {
4779 size
+= BIGNUM_LEN(obj
) * sizeof(BDIGIT
);
4784 UNEXPECTED_NODE(obj_memsize_of
);
4788 if ((RBASIC(obj
)->flags
& RSTRUCT_EMBED_LEN_MASK
) == 0 &&
4789 RSTRUCT(obj
)->as
.heap
.ptr
) {
4790 size
+= sizeof(VALUE
) * RSTRUCT_LEN(obj
);
4799 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
4800 BUILTIN_TYPE(obj
), (void*)obj
);
4803 return size
+ rb_gc_obj_slot_size(obj
);
4807 rb_obj_memsize_of(VALUE obj
)
4809 return obj_memsize_of(obj
, TRUE
);
4813 set_zero(st_data_t key
, st_data_t val
, st_data_t arg
)
4815 VALUE k
= (VALUE
)key
;
4816 VALUE hash
= (VALUE
)arg
;
4817 rb_hash_aset(hash
, k
, INT2FIX(0));
4822 type_sym(size_t type
)
4825 #define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
4827 COUNT_TYPE(T_OBJECT
);
4828 COUNT_TYPE(T_CLASS
);
4829 COUNT_TYPE(T_MODULE
);
4830 COUNT_TYPE(T_FLOAT
);
4831 COUNT_TYPE(T_STRING
);
4832 COUNT_TYPE(T_REGEXP
);
4833 COUNT_TYPE(T_ARRAY
);
4835 COUNT_TYPE(T_STRUCT
);
4836 COUNT_TYPE(T_BIGNUM
);
4839 COUNT_TYPE(T_MATCH
);
4840 COUNT_TYPE(T_COMPLEX
);
4841 COUNT_TYPE(T_RATIONAL
);
4844 COUNT_TYPE(T_FALSE
);
4845 COUNT_TYPE(T_SYMBOL
);
4846 COUNT_TYPE(T_FIXNUM
);
4847 COUNT_TYPE(T_IMEMO
);
4848 COUNT_TYPE(T_UNDEF
);
4850 COUNT_TYPE(T_ICLASS
);
4851 COUNT_TYPE(T_ZOMBIE
);
4852 COUNT_TYPE(T_MOVED
);
4854 default: return SIZET2NUM(type
); break;
4858 struct count_objects_data
{
4859 size_t counts
[T_MASK
+1];
4865 count_objects_i(VALUE obj
, void *d
)
4867 struct count_objects_data
*data
= (struct count_objects_data
*)d
;
4869 if (RANY(obj
)->as
.basic
.flags
) {
4870 data
->counts
[BUILTIN_TYPE(obj
)]++;
4881 * ObjectSpace.count_objects([result_hash]) -> hash
4883 * Counts all objects grouped by type.
4885 * It returns a hash, such as:
4894 * The contents of the returned hash are implementation specific.
4895 * It may be changed in future.
4897 * The keys starting with +:T_+ means live objects.
4898 * For example, +:T_ARRAY+ is the number of arrays.
4899 * +:FREE+ means object slots which is not used now.
4900 * +:TOTAL+ means sum of above.
4902 * If the optional argument +result_hash+ is given,
4903 * it is overwritten and returned. This is intended to avoid probe effect.
4906 * ObjectSpace.count_objects(h)
4908 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
4910 * This method is only expected to work on C Ruby.
4915 count_objects(int argc
, VALUE
*argv
, VALUE os
)
4917 rb_objspace_t
*objspace
= &rb_objspace
;
4918 struct count_objects_data data
= { 0 };
4921 if (rb_check_arity(argc
, 0, 1) == 1) {
4923 if (!RB_TYPE_P(hash
, T_HASH
))
4924 rb_raise(rb_eTypeError
, "non-hash given");
4927 gc_each_object(objspace
, count_objects_i
, &data
);
4930 hash
= rb_hash_new();
4932 else if (!RHASH_EMPTY_P(hash
)) {
4933 rb_hash_stlike_foreach(hash
, set_zero
, hash
);
4935 rb_hash_aset(hash
, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(data
.total
));
4936 rb_hash_aset(hash
, ID2SYM(rb_intern("FREE")), SIZET2NUM(data
.freed
));
4938 for (size_t i
= 0; i
<= T_MASK
; i
++) {
4939 VALUE type
= type_sym(i
);
4941 rb_hash_aset(hash
, type
, SIZET2NUM(data
.counts
[i
]));
4948 ------------------------ Garbage Collection ------------------------
4954 objspace_available_slots(rb_objspace_t
*objspace
)
4956 size_t total_slots
= 0;
4957 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
4958 rb_size_pool_t
*size_pool
= &size_pools
[i
];
4959 total_slots
+= SIZE_POOL_EDEN_HEAP(size_pool
)->total_slots
;
4960 total_slots
+= SIZE_POOL_TOMB_HEAP(size_pool
)->total_slots
;
4966 objspace_live_slots(rb_objspace_t
*objspace
)
4968 return total_allocated_objects(objspace
) - total_freed_objects(objspace
) - heap_pages_final_slots
;
4972 objspace_free_slots(rb_objspace_t
*objspace
)
4974 return objspace_available_slots(objspace
) - objspace_live_slots(objspace
) - heap_pages_final_slots
;
4978 gc_setup_mark_bits(struct heap_page
*page
)
4980 /* copy oldgen bitmap to mark bitmap */
4981 memcpy(&page
->mark_bits
[0], &page
->uncollectible_bits
[0], HEAP_PAGE_BITMAP_SIZE
);
4984 static int gc_is_moveable_obj(rb_objspace_t
*objspace
, VALUE obj
);
4985 static VALUE
gc_move(rb_objspace_t
*objspace
, VALUE scan
, VALUE free
, size_t src_slot_size
, size_t slot_size
);
4988 enum {HEAP_PAGE_LOCK
= PAGE_NOACCESS
, HEAP_PAGE_UNLOCK
= PAGE_READWRITE
};
4991 protect_page_body(struct heap_page_body
*body
, DWORD protect
)
4994 return VirtualProtect(body
, HEAP_PAGE_SIZE
, protect
, &old_protect
) != 0;
4997 enum {HEAP_PAGE_LOCK
= PROT_NONE
, HEAP_PAGE_UNLOCK
= PROT_READ
| PROT_WRITE
};
4998 #define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
5002 lock_page_body(rb_objspace_t
*objspace
, struct heap_page_body
*body
)
5004 if (!protect_page_body(body
, HEAP_PAGE_LOCK
)) {
5005 rb_bug("Couldn't protect page %p, errno: %s", (void *)body
, strerror(errno
));
5008 gc_report(5, objspace
, "Protecting page in move %p\n", (void *)body
);
5013 unlock_page_body(rb_objspace_t
*objspace
, struct heap_page_body
*body
)
5015 if (!protect_page_body(body
, HEAP_PAGE_UNLOCK
)) {
5016 rb_bug("Couldn't unprotect page %p, errno: %s", (void *)body
, strerror(errno
));
5019 gc_report(5, objspace
, "Unprotecting page in move %p\n", (void *)body
);
5024 try_move(rb_objspace_t
*objspace
, rb_heap_t
*heap
, struct heap_page
*free_page
, VALUE src
)
5026 GC_ASSERT(gc_is_moveable_obj(objspace
, src
));
5028 struct heap_page
*src_page
= GET_HEAP_PAGE(src
);
5033 /* We should return true if either src is successfully moved, or src is
5034 * unmoveable. A false return will cause the sweeping cursor to be
5035 * incremented to the next page, and src will attempt to move again */
5036 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(src
), src
));
5038 asan_unlock_freelist(free_page
);
5039 VALUE dest
= (VALUE
)free_page
->freelist
;
5040 asan_lock_freelist(free_page
);
5041 asan_unpoison_object(dest
, false);
5043 /* if we can't get something from the freelist then the page must be
5047 asan_unlock_freelist(free_page
);
5048 free_page
->freelist
= RANY(dest
)->as
.free
.next
;
5049 asan_lock_freelist(free_page
);
5051 GC_ASSERT(RB_BUILTIN_TYPE(dest
) == T_NONE
);
5053 if (src_page
->slot_size
> free_page
->slot_size
) {
5054 objspace
->rcompactor
.moved_down_count_table
[BUILTIN_TYPE(src
)]++;
5056 else if (free_page
->slot_size
> src_page
->slot_size
) {
5057 objspace
->rcompactor
.moved_up_count_table
[BUILTIN_TYPE(src
)]++;
5059 objspace
->rcompactor
.moved_count_table
[BUILTIN_TYPE(src
)]++;
5060 objspace
->rcompactor
.total_moved
++;
5062 gc_move(objspace
, src
, dest
, src_page
->slot_size
, free_page
->slot_size
);
5063 gc_pin(objspace
, src
);
5064 free_page
->free_slots
--;
5070 gc_unprotect_pages(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
5072 struct heap_page
*cursor
= heap
->compact_cursor
;
5075 unlock_page_body(objspace
, GET_PAGE_BODY(cursor
->start
));
5076 cursor
= ccan_list_next(&heap
->pages
, cursor
, page_node
);
5080 static void gc_update_references(rb_objspace_t
* objspace
);
5081 #if GC_CAN_COMPILE_COMPACTION
5082 static void invalidate_moved_page(rb_objspace_t
*objspace
, struct heap_page
*page
);
5085 #if defined(__MINGW32__) || defined(_WIN32)
5086 # define GC_COMPACTION_SUPPORTED 1
5088 /* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
5089 * the read barrier, so we must disable compaction. */
5090 # define GC_COMPACTION_SUPPORTED (GC_CAN_COMPILE_COMPACTION && HEAP_PAGE_ALLOC_USE_MMAP)
5093 #if GC_CAN_COMPILE_COMPACTION
5095 read_barrier_handler(uintptr_t original_address
)
5098 rb_objspace_t
* objspace
= &rb_objspace
;
5100 /* Calculate address aligned to slots. */
5101 uintptr_t address
= original_address
- (original_address
% BASE_SLOT_SIZE
);
5103 obj
= (VALUE
)address
;
5105 struct heap_page_body
*page_body
= GET_PAGE_BODY(obj
);
5107 /* If the page_body is NULL, then mprotect cannot handle it and will crash
5108 * with "Cannot allocate memory". */
5109 if (page_body
== NULL
) {
5110 rb_bug("read_barrier_handler: segmentation fault at %p", (void *)original_address
);
5115 unlock_page_body(objspace
, page_body
);
5117 objspace
->profile
.read_barrier_faults
++;
5119 invalidate_moved_page(objspace
, GET_HEAP_PAGE(obj
));
5125 #if !GC_CAN_COMPILE_COMPACTION
5127 uninstall_handlers(void)
5133 install_handlers(void)
5137 #elif defined(_WIN32)
5138 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler
;
5139 typedef void (*signal_handler
)(int);
5140 static signal_handler old_sigsegv_handler
;
5143 read_barrier_signal(EXCEPTION_POINTERS
* info
)
5145 /* EXCEPTION_ACCESS_VIOLATION is what's raised by access to protected pages */
5146 if (info
->ExceptionRecord
->ExceptionCode
== EXCEPTION_ACCESS_VIOLATION
) {
5147 /* > The second array element specifies the virtual address of the inaccessible data.
5148 * https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
5150 * Use this address to invalidate the page */
5151 read_barrier_handler((uintptr_t)info
->ExceptionRecord
->ExceptionInformation
[1]);
5152 return EXCEPTION_CONTINUE_EXECUTION
;
5155 return EXCEPTION_CONTINUE_SEARCH
;
5160 uninstall_handlers(void)
5162 signal(SIGSEGV
, old_sigsegv_handler
);
5163 SetUnhandledExceptionFilter(old_handler
);
5167 install_handlers(void)
5169 /* Remove SEGV handler so that the Unhandled Exception Filter handles it */
5170 old_sigsegv_handler
= signal(SIGSEGV
, NULL
);
5171 /* Unhandled Exception Filter has access to the violation address similar
5172 * to si_addr from sigaction */
5173 old_handler
= SetUnhandledExceptionFilter(read_barrier_signal
);
5176 static struct sigaction old_sigbus_handler
;
5177 static struct sigaction old_sigsegv_handler
;
5179 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5180 static exception_mask_t old_exception_masks
[32];
5181 static mach_port_t old_exception_ports
[32];
5182 static exception_behavior_t old_exception_behaviors
[32];
5183 static thread_state_flavor_t old_exception_flavors
[32];
5184 static mach_msg_type_number_t old_exception_count
;
5187 disable_mach_bad_access_exc(void)
5189 old_exception_count
= sizeof(old_exception_masks
) / sizeof(old_exception_masks
[0]);
5190 task_swap_exception_ports(
5191 mach_task_self(), EXC_MASK_BAD_ACCESS
,
5192 MACH_PORT_NULL
, EXCEPTION_DEFAULT
, 0,
5193 old_exception_masks
, &old_exception_count
,
5194 old_exception_ports
, old_exception_behaviors
, old_exception_flavors
5199 restore_mach_bad_access_exc(void)
5201 for (mach_msg_type_number_t i
= 0; i
< old_exception_count
; i
++) {
5202 task_set_exception_ports(
5204 old_exception_masks
[i
], old_exception_ports
[i
],
5205 old_exception_behaviors
[i
], old_exception_flavors
[i
]
5212 read_barrier_signal(int sig
, siginfo_t
* info
, void * data
)
5214 // setup SEGV/BUS handlers for errors
5215 struct sigaction prev_sigbus
, prev_sigsegv
;
5216 sigaction(SIGBUS
, &old_sigbus_handler
, &prev_sigbus
);
5217 sigaction(SIGSEGV
, &old_sigsegv_handler
, &prev_sigsegv
);
5219 // enable SIGBUS/SEGV
5220 sigset_t set
, prev_set
;
5222 sigaddset(&set
, SIGBUS
);
5223 sigaddset(&set
, SIGSEGV
);
5224 sigprocmask(SIG_UNBLOCK
, &set
, &prev_set
);
5225 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5226 disable_mach_bad_access_exc();
5229 read_barrier_handler((uintptr_t)info
->si_addr
);
5231 // reset SEGV/BUS handlers
5232 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5233 restore_mach_bad_access_exc();
5235 sigaction(SIGBUS
, &prev_sigbus
, NULL
);
5236 sigaction(SIGSEGV
, &prev_sigsegv
, NULL
);
5237 sigprocmask(SIG_SETMASK
, &prev_set
, NULL
);
5241 uninstall_handlers(void)
5243 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5244 restore_mach_bad_access_exc();
5246 sigaction(SIGBUS
, &old_sigbus_handler
, NULL
);
5247 sigaction(SIGSEGV
, &old_sigsegv_handler
, NULL
);
5251 install_handlers(void)
5253 struct sigaction action
;
5254 memset(&action
, 0, sizeof(struct sigaction
));
5255 sigemptyset(&action
.sa_mask
);
5256 action
.sa_sigaction
= read_barrier_signal
;
5257 action
.sa_flags
= SA_SIGINFO
| SA_ONSTACK
;
5259 sigaction(SIGBUS
, &action
, &old_sigbus_handler
);
5260 sigaction(SIGSEGV
, &action
, &old_sigsegv_handler
);
5261 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5262 disable_mach_bad_access_exc();
5268 gc_compact_finish(rb_objspace_t
*objspace
)
5270 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5271 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5272 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
5273 gc_unprotect_pages(objspace
, heap
);
5276 uninstall_handlers();
5278 gc_update_references(objspace
);
5279 objspace
->profile
.compact_count
++;
5281 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5282 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5283 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
5284 heap
->compact_cursor
= NULL
;
5285 heap
->free_pages
= NULL
;
5286 heap
->compact_cursor_index
= 0;
5289 if (gc_prof_enabled(objspace
)) {
5290 gc_profile_record
*record
= gc_prof_record(objspace
);
5291 record
->moved_objects
= objspace
->rcompactor
.total_moved
- record
->moved_objects
;
5293 objspace
->flags
.during_compacting
= FALSE
;
5296 struct gc_sweep_context
{
5297 struct heap_page
*page
;
5304 gc_sweep_plane(rb_objspace_t
*objspace
, rb_heap_t
*heap
, uintptr_t p
, bits_t bitset
, struct gc_sweep_context
*ctx
)
5306 struct heap_page
* sweep_page
= ctx
->page
;
5307 short slot_size
= sweep_page
->slot_size
;
5308 short slot_bits
= slot_size
/ BASE_SLOT_SIZE
;
5309 GC_ASSERT(slot_bits
> 0);
5312 VALUE vp
= (VALUE
)p
;
5313 GC_ASSERT(vp
% BASE_SLOT_SIZE
== 0);
5315 asan_unpoison_object(vp
, false);
5317 switch (BUILTIN_TYPE(vp
)) {
5318 default: /* majority case */
5319 gc_report(2, objspace
, "page_sweep: free %p\n", (void *)p
);
5320 #if RGENGC_CHECK_MODE
5321 if (!is_full_marking(objspace
)) {
5322 if (RVALUE_OLD_P(vp
)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p
);
5323 if (RVALUE_REMEMBERED(vp
)) rb_bug("page_sweep: %p - remembered.", (void *)p
);
5326 if (obj_free(objspace
, vp
)) {
5327 // always add free slots back to the swept pages freelist,
5328 // so that if we're comapacting, we can re-use the slots
5329 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p
, BASE_SLOT_SIZE
);
5330 heap_page_add_freeobj(objspace
, sweep_page
, vp
);
5331 gc_report(3, objspace
, "page_sweep: %s is added to freelist\n", obj_info(vp
));
5340 if (objspace
->flags
.during_compacting
) {
5341 /* The sweep cursor shouldn't have made it to any
5342 * T_MOVED slots while the compact flag is enabled.
5343 * The sweep cursor and compact cursor move in
5344 * opposite directions, and when they meet references will
5345 * get updated and "during_compacting" should get disabled */
5346 rb_bug("T_MOVED shouldn't be seen until compaction is finished");
5348 gc_report(3, objspace
, "page_sweep: %s is added to freelist\n", obj_info(vp
));
5350 heap_page_add_freeobj(objspace
, sweep_page
, vp
);
5353 /* already counted */
5356 ctx
->empty_slots
++; /* already freed */
5361 bitset
>>= slot_bits
;
5366 gc_sweep_page(rb_objspace_t
*objspace
, rb_heap_t
*heap
, struct gc_sweep_context
*ctx
)
5368 struct heap_page
*sweep_page
= ctx
->page
;
5369 GC_ASSERT(SIZE_POOL_EDEN_HEAP(sweep_page
->size_pool
) == heap
);
5372 bits_t
*bits
, bitset
;
5374 gc_report(2, objspace
, "page_sweep: start.\n");
5376 #if RGENGC_CHECK_MODE
5377 if (!objspace
->flags
.immediate_sweep
) {
5378 GC_ASSERT(sweep_page
->flags
.before_sweep
== TRUE
);
5381 sweep_page
->flags
.before_sweep
= FALSE
;
5382 sweep_page
->free_slots
= 0;
5384 p
= (uintptr_t)sweep_page
->start
;
5385 bits
= sweep_page
->mark_bits
;
5387 int page_rvalue_count
= sweep_page
->total_slots
* (sweep_page
->slot_size
/ BASE_SLOT_SIZE
);
5388 int out_of_range_bits
= (NUM_IN_PAGE(p
) + page_rvalue_count
) % BITS_BITLENGTH
;
5389 if (out_of_range_bits
!= 0) { // sizeof(RVALUE) == 64
5390 bits
[BITMAP_INDEX(p
) + page_rvalue_count
/ BITS_BITLENGTH
] |= ~(((bits_t
)1 << out_of_range_bits
) - 1);
5393 /* The last bitmap plane may not be used if the last plane does not
5394 * have enough space for the slot_size. In that case, the last plane must
5395 * be skipped since none of the bits will be set. */
5396 int bitmap_plane_count
= CEILDIV(NUM_IN_PAGE(p
) + page_rvalue_count
, BITS_BITLENGTH
);
5397 GC_ASSERT(bitmap_plane_count
== HEAP_PAGE_BITMAP_LIMIT
- 1 ||
5398 bitmap_plane_count
== HEAP_PAGE_BITMAP_LIMIT
);
5400 // Skip out of range slots at the head of the page
5402 bitset
>>= NUM_IN_PAGE(p
);
5404 gc_sweep_plane(objspace
, heap
, p
, bitset
, ctx
);
5406 p
+= (BITS_BITLENGTH
- NUM_IN_PAGE(p
)) * BASE_SLOT_SIZE
;
5408 for (int i
= 1; i
< bitmap_plane_count
; i
++) {
5411 gc_sweep_plane(objspace
, heap
, p
, bitset
, ctx
);
5413 p
+= BITS_BITLENGTH
* BASE_SLOT_SIZE
;
5416 if (!heap
->compact_cursor
) {
5417 gc_setup_mark_bits(sweep_page
);
5420 #if GC_PROFILE_MORE_DETAIL
5421 if (gc_prof_enabled(objspace
)) {
5422 gc_profile_record
*record
= gc_prof_record(objspace
);
5423 record
->removing_objects
+= ctx
->final_slots
+ ctx
->freed_slots
;
5424 record
->empty_objects
+= ctx
->empty_slots
;
5427 if (0) fprintf(stderr
, "gc_sweep_page(%"PRIdSIZE
"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5429 sweep_page
->total_slots
,
5430 ctx
->freed_slots
, ctx
->empty_slots
, ctx
->final_slots
);
5432 sweep_page
->free_slots
+= ctx
->freed_slots
+ ctx
->empty_slots
;
5433 sweep_page
->size_pool
->total_freed_objects
+= ctx
->freed_slots
;
5435 if (heap_pages_deferred_final
&& !finalizing
) {
5436 gc_finalize_deferred_register(objspace
);
5439 #if RGENGC_CHECK_MODE
5440 short freelist_len
= 0;
5441 asan_unlock_freelist(sweep_page
);
5442 RVALUE
*ptr
= sweep_page
->freelist
;
5445 ptr
= ptr
->as
.free
.next
;
5447 asan_lock_freelist(sweep_page
);
5448 if (freelist_len
!= sweep_page
->free_slots
) {
5449 rb_bug("inconsistent freelist length: expected %d but was %d", sweep_page
->free_slots
, freelist_len
);
5453 gc_report(2, objspace
, "page_sweep: end.\n");
5457 gc_mode_name(enum gc_mode mode
)
5460 case gc_mode_none
: return "none";
5461 case gc_mode_marking
: return "marking";
5462 case gc_mode_sweeping
: return "sweeping";
5463 case gc_mode_compacting
: return "compacting";
5464 default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode
);
5469 gc_mode_transition(rb_objspace_t
*objspace
, enum gc_mode mode
)
5471 #if RGENGC_CHECK_MODE
5472 enum gc_mode prev_mode
= gc_mode(objspace
);
5473 switch (prev_mode
) {
5474 case gc_mode_none
: GC_ASSERT(mode
== gc_mode_marking
); break;
5475 case gc_mode_marking
: GC_ASSERT(mode
== gc_mode_sweeping
); break;
5476 case gc_mode_sweeping
: GC_ASSERT(mode
== gc_mode_none
|| mode
== gc_mode_compacting
); break;
5477 case gc_mode_compacting
: GC_ASSERT(mode
== gc_mode_none
); break;
5480 if (0) fprintf(stderr
, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace
)), gc_mode_name(mode
));
5481 gc_mode_set(objspace
, mode
);
5485 heap_page_freelist_append(struct heap_page
*page
, RVALUE
*freelist
)
5488 asan_unlock_freelist(page
);
5489 if (page
->freelist
) {
5490 RVALUE
*p
= page
->freelist
;
5491 asan_unpoison_object((VALUE
)p
, false);
5492 while (p
->as
.free
.next
) {
5494 p
= p
->as
.free
.next
;
5495 asan_poison_object((VALUE
)prev
);
5496 asan_unpoison_object((VALUE
)p
, false);
5498 p
->as
.free
.next
= freelist
;
5499 asan_poison_object((VALUE
)p
);
5502 page
->freelist
= freelist
;
5504 asan_lock_freelist(page
);
5509 gc_sweep_start_heap(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
5511 heap
->sweeping_page
= ccan_list_top(&heap
->pages
, struct heap_page
, page_node
);
5512 heap
->free_pages
= NULL
;
5513 heap
->pooled_pages
= NULL
;
5514 if (!objspace
->flags
.immediate_sweep
) {
5515 struct heap_page
*page
= NULL
;
5517 ccan_list_for_each(&heap
->pages
, page
, page_node
) {
5518 page
->flags
.before_sweep
= TRUE
;
5523 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5524 __attribute__((noinline
))
5527 #if GC_CAN_COMPILE_COMPACTION
5528 static void gc_sort_heap_by_compare_func(rb_objspace_t
*objspace
, gc_compact_compare_func compare_func
);
5529 static int compare_pinned_slots(const void *left
, const void *right
, void *d
);
5533 gc_sweep_start(rb_objspace_t
*objspace
)
5535 gc_mode_transition(objspace
, gc_mode_sweeping
);
5536 objspace
->rincgc
.pooled_slots
= 0;
5538 #if GC_CAN_COMPILE_COMPACTION
5539 if (objspace
->flags
.during_compacting
) {
5540 gc_sort_heap_by_compare_func(
5542 objspace
->rcompactor
.compare_func
? objspace
->rcompactor
.compare_func
: compare_pinned_slots
5547 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5548 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5549 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
5551 gc_sweep_start_heap(objspace
, heap
);
5553 /* We should call gc_sweep_finish_size_pool for size pools with no pages. */
5554 if (heap
->sweeping_page
== NULL
) {
5555 GC_ASSERT(heap
->total_pages
== 0);
5556 GC_ASSERT(heap
->total_slots
== 0);
5557 gc_sweep_finish_size_pool(objspace
, size_pool
);
5561 rb_ractor_t
*r
= NULL
;
5562 ccan_list_for_each(&GET_VM()->ractor
.set
, r
, vmlr_node
) {
5563 rb_gc_ractor_newobj_cache_clear(&r
->newobj_cache
);
5568 gc_sweep_finish_size_pool(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
)
5570 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
5571 size_t total_slots
= heap
->total_slots
+ SIZE_POOL_TOMB_HEAP(size_pool
)->total_slots
;
5572 size_t total_pages
= heap
->total_pages
+ SIZE_POOL_TOMB_HEAP(size_pool
)->total_pages
;
5573 size_t swept_slots
= size_pool
->freed_slots
+ size_pool
->empty_slots
;
5575 size_t init_slots
= gc_params
.size_pool_init_slots
[size_pool
- size_pools
];
5576 size_t min_free_slots
= (size_t)(MAX(total_slots
, init_slots
) * gc_params
.heap_free_slots_min_ratio
);
5578 /* If we don't have enough slots and we have pages on the tomb heap, move
5579 * pages from the tomb heap to the eden heap. This may prevent page
5580 * creation thrashing (frequently allocating and deallocting pages) and
5581 * GC thrashing (running GC more frequently than required). */
5582 struct heap_page
*resurrected_page
;
5583 while (swept_slots
< min_free_slots
&&
5584 (resurrected_page
= heap_page_resurrect(objspace
, size_pool
))) {
5585 swept_slots
+= resurrected_page
->free_slots
;
5587 heap_add_page(objspace
, size_pool
, heap
, resurrected_page
);
5588 heap_add_freepage(heap
, resurrected_page
);
5591 if (swept_slots
< min_free_slots
) {
5592 bool grow_heap
= is_full_marking(objspace
);
5594 /* Consider growing or starting a major GC if we are not currently in a
5595 * major GC and we can't allocate any more pages. */
5596 if (!is_full_marking(objspace
) && size_pool
->allocatable_pages
== 0) {
5597 /* The heap is a growth heap if it freed more slots than had empty slots. */
5598 bool is_growth_heap
= size_pool
->empty_slots
== 0 || size_pool
->freed_slots
> size_pool
->empty_slots
;
5600 /* Grow this heap if we haven't run at least RVALUE_OLD_AGE minor
5601 * GC since the last major GC or if this heap is smaller than the
5602 * the configured initial size. */
5603 if (objspace
->profile
.count
- objspace
->rgengc
.last_major_gc
< RVALUE_OLD_AGE
||
5604 total_slots
< init_slots
) {
5607 else if (is_growth_heap
) { /* Only growth heaps are allowed to start a major GC. */
5608 gc_needs_major_flags
|= GPR_FLAG_MAJOR_BY_NOFREE
;
5609 size_pool
->force_major_gc_count
++;
5614 size_t extend_page_count
= heap_extend_pages(objspace
, size_pool
, swept_slots
, total_slots
, total_pages
);
5616 if (extend_page_count
> size_pool
->allocatable_pages
) {
5617 size_pool_allocatable_pages_set(objspace
, size_pool
, extend_page_count
);
5624 gc_sweep_finish(rb_objspace_t
*objspace
)
5626 gc_report(1, objspace
, "gc_sweep_finish\n");
5628 gc_prof_set_heap_info(objspace
);
5629 heap_pages_free_unused_pages(objspace
);
5631 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5632 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5634 /* if heap_pages has unused pages, then assign them to increment */
5635 size_t tomb_pages
= SIZE_POOL_TOMB_HEAP(size_pool
)->total_pages
;
5636 if (size_pool
->allocatable_pages
< tomb_pages
) {
5637 size_pool
->allocatable_pages
= tomb_pages
;
5640 size_pool
->freed_slots
= 0;
5641 size_pool
->empty_slots
= 0;
5643 if (!will_be_incremental_marking(objspace
)) {
5644 rb_heap_t
*eden_heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
5645 struct heap_page
*end_page
= eden_heap
->free_pages
;
5647 while (end_page
->free_next
) end_page
= end_page
->free_next
;
5648 end_page
->free_next
= eden_heap
->pooled_pages
;
5651 eden_heap
->free_pages
= eden_heap
->pooled_pages
;
5653 eden_heap
->pooled_pages
= NULL
;
5654 objspace
->rincgc
.pooled_slots
= 0;
5657 heap_pages_expand_sorted(objspace
);
5659 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_END_SWEEP
, 0);
5660 gc_mode_transition(objspace
, gc_mode_none
);
5662 #if RGENGC_CHECK_MODE >= 2
5663 gc_verify_internal_consistency(objspace
);
5668 gc_sweep_step(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
5670 struct heap_page
*sweep_page
= heap
->sweeping_page
;
5671 int unlink_limit
= GC_SWEEP_PAGES_FREEABLE_PER_STEP
;
5672 int swept_slots
= 0;
5673 int pooled_slots
= 0;
5675 if (sweep_page
== NULL
) return FALSE
;
5677 #if GC_ENABLE_LAZY_SWEEP
5678 gc_prof_sweep_timer_start(objspace
);
5682 RUBY_DEBUG_LOG("sweep_page:%p", (void *)sweep_page
);
5684 struct gc_sweep_context ctx
= {
5690 gc_sweep_page(objspace
, heap
, &ctx
);
5691 int free_slots
= ctx
.freed_slots
+ ctx
.empty_slots
;
5693 heap
->sweeping_page
= ccan_list_next(&heap
->pages
, sweep_page
, page_node
);
5695 if (sweep_page
->final_slots
+ free_slots
== sweep_page
->total_slots
&&
5696 heap_pages_freeable_pages
> 0 &&
5698 heap_pages_freeable_pages
--;
5700 /* there are no living objects -> move this page to tomb heap */
5701 heap_unlink_page(objspace
, heap
, sweep_page
);
5702 heap_add_page(objspace
, size_pool
, SIZE_POOL_TOMB_HEAP(size_pool
), sweep_page
);
5704 else if (free_slots
> 0) {
5705 size_pool
->freed_slots
+= ctx
.freed_slots
;
5706 size_pool
->empty_slots
+= ctx
.empty_slots
;
5708 if (pooled_slots
< GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT
) {
5709 heap_add_poolpage(objspace
, heap
, sweep_page
);
5710 pooled_slots
+= free_slots
;
5713 heap_add_freepage(heap
, sweep_page
);
5714 swept_slots
+= free_slots
;
5715 if (swept_slots
> GC_INCREMENTAL_SWEEP_SLOT_COUNT
) {
5721 sweep_page
->free_next
= NULL
;
5723 } while ((sweep_page
= heap
->sweeping_page
));
5725 if (!heap
->sweeping_page
) {
5726 gc_sweep_finish_size_pool(objspace
, size_pool
);
5728 if (!has_sweeping_pages(objspace
)) {
5729 gc_sweep_finish(objspace
);
5733 #if GC_ENABLE_LAZY_SWEEP
5734 gc_prof_sweep_timer_stop(objspace
);
5737 return heap
->free_pages
!= NULL
;
5741 gc_sweep_rest(rb_objspace_t
*objspace
)
5743 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5744 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5746 while (SIZE_POOL_EDEN_HEAP(size_pool
)->sweeping_page
) {
5747 gc_sweep_step(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
5753 gc_sweep_continue(rb_objspace_t
*objspace
, rb_size_pool_t
*sweep_size_pool
, rb_heap_t
*heap
)
5755 GC_ASSERT(dont_gc_val() == FALSE
);
5756 if (!GC_ENABLE_LAZY_SWEEP
) return;
5758 gc_sweeping_enter(objspace
);
5760 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5761 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5762 if (!gc_sweep_step(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
))) {
5763 /* sweep_size_pool requires a free slot but sweeping did not yield any. */
5764 if (size_pool
== sweep_size_pool
) {
5765 if (size_pool
->allocatable_pages
> 0) {
5766 heap_increment(objspace
, size_pool
, heap
);
5769 /* Not allowed to create a new page so finish sweeping. */
5770 gc_sweep_rest(objspace
);
5777 gc_sweeping_exit(objspace
);
5780 #if GC_CAN_COMPILE_COMPACTION
5782 invalidate_moved_plane(rb_objspace_t
*objspace
, struct heap_page
*page
, uintptr_t p
, bits_t bitset
)
5787 VALUE forwarding_object
= (VALUE
)p
;
5790 if (BUILTIN_TYPE(forwarding_object
) == T_MOVED
) {
5791 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object
), forwarding_object
));
5792 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object
), forwarding_object
));
5794 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object
), forwarding_object
);
5796 object
= rb_gc_location(forwarding_object
);
5798 shape_id_t original_shape_id
= 0;
5799 if (RB_TYPE_P(object
, T_OBJECT
)) {
5800 original_shape_id
= RMOVED(forwarding_object
)->original_shape_id
;
5803 gc_move(objspace
, object
, forwarding_object
, GET_HEAP_PAGE(object
)->slot_size
, page
->slot_size
);
5804 /* forwarding_object is now our actual object, and "object"
5805 * is the free slot for the original page */
5807 if (original_shape_id
) {
5808 ROBJECT_SET_SHAPE_ID(forwarding_object
, original_shape_id
);
5811 struct heap_page
*orig_page
= GET_HEAP_PAGE(object
);
5812 orig_page
->free_slots
++;
5813 heap_page_add_freeobj(objspace
, orig_page
, object
);
5815 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object
), forwarding_object
));
5816 GC_ASSERT(BUILTIN_TYPE(forwarding_object
) != T_MOVED
);
5817 GC_ASSERT(BUILTIN_TYPE(forwarding_object
) != T_NONE
);
5820 p
+= BASE_SLOT_SIZE
;
5827 invalidate_moved_page(rb_objspace_t
*objspace
, struct heap_page
*page
)
5830 bits_t
*mark_bits
, *pin_bits
;
5833 mark_bits
= page
->mark_bits
;
5834 pin_bits
= page
->pinned_bits
;
5836 uintptr_t p
= page
->start
;
5838 // Skip out of range slots at the head of the page
5839 bitset
= pin_bits
[0] & ~mark_bits
[0];
5840 bitset
>>= NUM_IN_PAGE(p
);
5841 invalidate_moved_plane(objspace
, page
, p
, bitset
);
5842 p
+= (BITS_BITLENGTH
- NUM_IN_PAGE(p
)) * BASE_SLOT_SIZE
;
5844 for (i
=1; i
< HEAP_PAGE_BITMAP_LIMIT
; i
++) {
5845 /* Moved objects are pinned but never marked. We reuse the pin bits
5846 * to indicate there is a moved object in this slot. */
5847 bitset
= pin_bits
[i
] & ~mark_bits
[i
];
5849 invalidate_moved_plane(objspace
, page
, p
, bitset
);
5850 p
+= BITS_BITLENGTH
* BASE_SLOT_SIZE
;
5856 gc_compact_start(rb_objspace_t
*objspace
)
5858 struct heap_page
*page
= NULL
;
5859 gc_mode_transition(objspace
, gc_mode_compacting
);
5861 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5862 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(&size_pools
[i
]);
5863 ccan_list_for_each(&heap
->pages
, page
, page_node
) {
5864 page
->flags
.before_sweep
= TRUE
;
5867 heap
->compact_cursor
= ccan_list_tail(&heap
->pages
, struct heap_page
, page_node
);
5868 heap
->compact_cursor_index
= 0;
5871 if (gc_prof_enabled(objspace
)) {
5872 gc_profile_record
*record
= gc_prof_record(objspace
);
5873 record
->moved_objects
= objspace
->rcompactor
.total_moved
;
5876 memset(objspace
->rcompactor
.considered_count_table
, 0, T_MASK
* sizeof(size_t));
5877 memset(objspace
->rcompactor
.moved_count_table
, 0, T_MASK
* sizeof(size_t));
5878 memset(objspace
->rcompactor
.moved_up_count_table
, 0, T_MASK
* sizeof(size_t));
5879 memset(objspace
->rcompactor
.moved_down_count_table
, 0, T_MASK
* sizeof(size_t));
5881 /* Set up read barrier for pages containing MOVED objects */
5885 static void gc_sweep_compact(rb_objspace_t
*objspace
);
5888 gc_sweep(rb_objspace_t
*objspace
)
5890 gc_sweeping_enter(objspace
);
5892 const unsigned int immediate_sweep
= objspace
->flags
.immediate_sweep
;
5894 gc_report(1, objspace
, "gc_sweep: immediate: %d\n", immediate_sweep
);
5896 gc_sweep_start(objspace
);
5897 if (objspace
->flags
.during_compacting
) {
5898 gc_sweep_compact(objspace
);
5901 if (immediate_sweep
) {
5902 #if !GC_ENABLE_LAZY_SWEEP
5903 gc_prof_sweep_timer_start(objspace
);
5905 gc_sweep_rest(objspace
);
5906 #if !GC_ENABLE_LAZY_SWEEP
5907 gc_prof_sweep_timer_stop(objspace
);
5912 /* Sweep every size pool. */
5913 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5914 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5915 gc_sweep_step(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
5919 gc_sweeping_exit(objspace
);
5922 /* Marking - Marking stack */
5924 static stack_chunk_t
*
5925 stack_chunk_alloc(void)
5929 res
= malloc(sizeof(stack_chunk_t
));
5937 is_mark_stack_empty(mark_stack_t
*stack
)
5939 return stack
->chunk
== NULL
;
5943 mark_stack_size(mark_stack_t
*stack
)
5945 size_t size
= stack
->index
;
5946 stack_chunk_t
*chunk
= stack
->chunk
? stack
->chunk
->next
: NULL
;
5949 size
+= stack
->limit
;
5950 chunk
= chunk
->next
;
5956 add_stack_chunk_cache(mark_stack_t
*stack
, stack_chunk_t
*chunk
)
5958 chunk
->next
= stack
->cache
;
5959 stack
->cache
= chunk
;
5960 stack
->cache_size
++;
5964 shrink_stack_chunk_cache(mark_stack_t
*stack
)
5966 stack_chunk_t
*chunk
;
5968 if (stack
->unused_cache_size
> (stack
->cache_size
/2)) {
5969 chunk
= stack
->cache
;
5970 stack
->cache
= stack
->cache
->next
;
5971 stack
->cache_size
--;
5974 stack
->unused_cache_size
= stack
->cache_size
;
5978 push_mark_stack_chunk(mark_stack_t
*stack
)
5980 stack_chunk_t
*next
;
5982 GC_ASSERT(stack
->index
== stack
->limit
);
5984 if (stack
->cache_size
> 0) {
5985 next
= stack
->cache
;
5986 stack
->cache
= stack
->cache
->next
;
5987 stack
->cache_size
--;
5988 if (stack
->unused_cache_size
> stack
->cache_size
)
5989 stack
->unused_cache_size
= stack
->cache_size
;
5992 next
= stack_chunk_alloc();
5994 next
->next
= stack
->chunk
;
5995 stack
->chunk
= next
;
6000 pop_mark_stack_chunk(mark_stack_t
*stack
)
6002 stack_chunk_t
*prev
;
6004 prev
= stack
->chunk
->next
;
6005 GC_ASSERT(stack
->index
== 0);
6006 add_stack_chunk_cache(stack
, stack
->chunk
);
6007 stack
->chunk
= prev
;
6008 stack
->index
= stack
->limit
;
6012 mark_stack_chunk_list_free(stack_chunk_t
*chunk
)
6014 stack_chunk_t
*next
= NULL
;
6016 while (chunk
!= NULL
) {
6024 free_stack_chunks(mark_stack_t
*stack
)
6026 mark_stack_chunk_list_free(stack
->chunk
);
6030 mark_stack_free_cache(mark_stack_t
*stack
)
6032 mark_stack_chunk_list_free(stack
->cache
);
6033 stack
->cache_size
= 0;
6034 stack
->unused_cache_size
= 0;
6038 push_mark_stack(mark_stack_t
*stack
, VALUE obj
)
6040 switch (BUILTIN_TYPE(obj
)) {
6061 if (stack
->index
== stack
->limit
) {
6062 push_mark_stack_chunk(stack
);
6064 stack
->chunk
->data
[stack
->index
++] = obj
;
6074 rb_bug("push_mark_stack() called for broken object");
6078 UNEXPECTED_NODE(push_mark_stack
);
6082 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
6083 BUILTIN_TYPE(obj
), (void *)obj
,
6084 is_pointer_to_heap(&rb_objspace
, (void *)obj
) ? "corrupted object" : "non object");
6088 pop_mark_stack(mark_stack_t
*stack
, VALUE
*data
)
6090 if (is_mark_stack_empty(stack
)) {
6093 if (stack
->index
== 1) {
6094 *data
= stack
->chunk
->data
[--stack
->index
];
6095 pop_mark_stack_chunk(stack
);
6098 *data
= stack
->chunk
->data
[--stack
->index
];
6104 init_mark_stack(mark_stack_t
*stack
)
6108 MEMZERO(stack
, mark_stack_t
, 1);
6109 stack
->index
= stack
->limit
= STACK_CHUNK_SIZE
;
6111 for (i
=0; i
< 4; i
++) {
6112 add_stack_chunk_cache(stack
, stack_chunk_alloc());
6114 stack
->unused_cache_size
= stack
->cache_size
;
6119 #define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6121 #define STACK_START (ec->machine.stack_start)
6122 #define STACK_END (ec->machine.stack_end)
6123 #define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6125 #if STACK_GROW_DIRECTION < 0
6126 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6127 #elif STACK_GROW_DIRECTION > 0
6128 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6130 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6131 : (size_t)(STACK_END - STACK_START + 1))
6133 #if !STACK_GROW_DIRECTION
6134 int ruby_stack_grow_direction
;
6136 ruby_get_stack_grow_direction(volatile VALUE
*addr
)
6139 SET_MACHINE_STACK_END(&end
);
6141 if (end
> addr
) return ruby_stack_grow_direction
= 1;
6142 return ruby_stack_grow_direction
= -1;
6147 ruby_stack_length(VALUE
**p
)
6149 rb_execution_context_t
*ec
= GET_EC();
6151 if (p
) *p
= STACK_UPPER(STACK_END
, STACK_START
, STACK_END
);
6152 return STACK_LENGTH
;
6155 #define PREVENT_STACK_OVERFLOW 1
6156 #ifndef PREVENT_STACK_OVERFLOW
6157 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6158 # define PREVENT_STACK_OVERFLOW 1
6160 # define PREVENT_STACK_OVERFLOW 0
6163 #if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6165 stack_check(rb_execution_context_t
*ec
, int water_mark
)
6169 size_t length
= STACK_LENGTH
;
6170 size_t maximum_length
= STACK_LEVEL_MAX
- water_mark
;
6172 return length
> maximum_length
;
6175 #define stack_check(ec, water_mark) FALSE
6178 #define STACKFRAME_FOR_CALL_CFUNC 2048
6181 rb_ec_stack_check(rb_execution_context_t
*ec
)
6183 return stack_check(ec
, STACKFRAME_FOR_CALL_CFUNC
);
6187 ruby_stack_check(void)
6189 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC
);
6192 ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(rb_objspace_t
*objspace
, register const VALUE
*x
, register long n
, void (*cb
)(rb_objspace_t
*, VALUE
)));
6194 each_location(rb_objspace_t
*objspace
, register const VALUE
*x
, register long n
, void (*cb
)(rb_objspace_t
*, VALUE
))
6205 gc_mark_locations(rb_objspace_t
*objspace
, const VALUE
*start
, const VALUE
*end
, void (*cb
)(rb_objspace_t
*, VALUE
))
6209 if (end
<= start
) return;
6211 each_location(objspace
, start
, n
, cb
);
6215 rb_gc_mark_locations(const VALUE
*start
, const VALUE
*end
)
6217 gc_mark_locations(&rb_objspace
, start
, end
, gc_mark_maybe
);
6221 rb_gc_mark_values(long n
, const VALUE
*values
)
6224 rb_objspace_t
*objspace
= &rb_objspace
;
6226 for (i
=0; i
<n
; i
++) {
6227 gc_mark(objspace
, values
[i
]);
6232 rb_gc_mark_vm_stack_values(long n
, const VALUE
*values
)
6234 rb_objspace_t
*objspace
= &rb_objspace
;
6236 for (long i
= 0; i
< n
; i
++) {
6237 gc_mark_and_pin(objspace
, values
[i
]);
6242 mark_value(st_data_t key
, st_data_t value
, st_data_t data
)
6244 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6245 gc_mark(objspace
, (VALUE
)value
);
6250 mark_value_pin(st_data_t key
, st_data_t value
, st_data_t data
)
6252 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6253 gc_mark_and_pin(objspace
, (VALUE
)value
);
6258 mark_tbl_no_pin(rb_objspace_t
*objspace
, st_table
*tbl
)
6260 if (!tbl
|| tbl
->num_entries
== 0) return;
6261 st_foreach(tbl
, mark_value
, (st_data_t
)objspace
);
6265 mark_tbl(rb_objspace_t
*objspace
, st_table
*tbl
)
6267 if (!tbl
|| tbl
->num_entries
== 0) return;
6268 st_foreach(tbl
, mark_value_pin
, (st_data_t
)objspace
);
6272 mark_key(st_data_t key
, st_data_t value
, st_data_t data
)
6274 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6275 gc_mark_and_pin(objspace
, (VALUE
)key
);
6280 mark_set(rb_objspace_t
*objspace
, st_table
*tbl
)
6283 st_foreach(tbl
, mark_key
, (st_data_t
)objspace
);
6287 pin_value(st_data_t key
, st_data_t value
, st_data_t data
)
6289 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6290 gc_mark_and_pin(objspace
, (VALUE
)value
);
6295 mark_finalizer_tbl(rb_objspace_t
*objspace
, st_table
*tbl
)
6298 st_foreach(tbl
, pin_value
, (st_data_t
)objspace
);
6302 rb_mark_set(st_table
*tbl
)
6304 mark_set(&rb_objspace
, tbl
);
6308 mark_keyvalue(st_data_t key
, st_data_t value
, st_data_t data
)
6310 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6312 gc_mark(objspace
, (VALUE
)key
);
6313 gc_mark(objspace
, (VALUE
)value
);
6318 pin_key_pin_value(st_data_t key
, st_data_t value
, st_data_t data
)
6320 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6322 gc_mark_and_pin(objspace
, (VALUE
)key
);
6323 gc_mark_and_pin(objspace
, (VALUE
)value
);
6328 pin_key_mark_value(st_data_t key
, st_data_t value
, st_data_t data
)
6330 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6332 gc_mark_and_pin(objspace
, (VALUE
)key
);
6333 gc_mark(objspace
, (VALUE
)value
);
6338 mark_hash(rb_objspace_t
*objspace
, VALUE hash
)
6340 if (rb_hash_compare_by_id_p(hash
)) {
6341 rb_hash_stlike_foreach(hash
, pin_key_mark_value
, (st_data_t
)objspace
);
6344 rb_hash_stlike_foreach(hash
, mark_keyvalue
, (st_data_t
)objspace
);
6347 gc_mark(objspace
, RHASH(hash
)->ifnone
);
6351 mark_st(rb_objspace_t
*objspace
, st_table
*tbl
)
6354 st_foreach(tbl
, pin_key_pin_value
, (st_data_t
)objspace
);
6358 rb_mark_hash(st_table
*tbl
)
6360 mark_st(&rb_objspace
, tbl
);
6363 static enum rb_id_table_iterator_result
6364 mark_method_entry_i(VALUE me
, void *data
)
6366 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6368 gc_mark(objspace
, me
);
6369 return ID_TABLE_CONTINUE
;
6373 mark_m_tbl(rb_objspace_t
*objspace
, struct rb_id_table
*tbl
)
6376 rb_id_table_foreach_values(tbl
, mark_method_entry_i
, objspace
);
6380 static enum rb_id_table_iterator_result
6381 mark_const_entry_i(VALUE value
, void *data
)
6383 const rb_const_entry_t
*ce
= (const rb_const_entry_t
*)value
;
6384 rb_objspace_t
*objspace
= data
;
6386 gc_mark(objspace
, ce
->value
);
6387 gc_mark(objspace
, ce
->file
);
6388 return ID_TABLE_CONTINUE
;
6392 mark_const_tbl(rb_objspace_t
*objspace
, struct rb_id_table
*tbl
)
6395 rb_id_table_foreach_values(tbl
, mark_const_entry_i
, objspace
);
6398 #if STACK_GROW_DIRECTION < 0
6399 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6400 #elif STACK_GROW_DIRECTION > 0
6401 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6403 #define GET_STACK_BOUNDS(start, end, appendix) \
6404 ((STACK_END < STACK_START) ? \
6405 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6408 static void each_stack_location(rb_objspace_t
*objspace
, const rb_execution_context_t
*ec
,
6409 const VALUE
*stack_start
, const VALUE
*stack_end
, void (*cb
)(rb_objspace_t
*, VALUE
));
6412 gc_mark_machine_stack_location_maybe(rb_objspace_t
*objspace
, VALUE obj
)
6414 gc_mark_maybe(objspace
, obj
);
6416 #ifdef RUBY_ASAN_ENABLED
6417 const rb_execution_context_t
*ec
= objspace
->marking_machine_context_ec
;
6418 void *fake_frame_start
;
6419 void *fake_frame_end
;
6420 bool is_fake_frame
= asan_get_fake_stack_extents(
6421 ec
->machine
.asan_fake_stack_handle
, obj
,
6422 ec
->machine
.stack_start
, ec
->machine
.stack_end
,
6423 &fake_frame_start
, &fake_frame_end
6425 if (is_fake_frame
) {
6426 each_stack_location(objspace
, ec
, fake_frame_start
, fake_frame_end
, gc_mark_maybe
);
6431 #if defined(__wasm__)
6434 static VALUE
*rb_stack_range_tmp
[2];
6437 rb_mark_locations(void *begin
, void *end
)
6439 rb_stack_range_tmp
[0] = begin
;
6440 rb_stack_range_tmp
[1] = end
;
6443 # if defined(__EMSCRIPTEN__)
6446 mark_current_machine_context(rb_objspace_t
*objspace
, rb_execution_context_t
*ec
)
6448 emscripten_scan_stack(rb_mark_locations
);
6449 each_stack_location(objspace
, ec
, rb_stack_range_tmp
[0], rb_stack_range_tmp
[1], gc_mark_maybe
);
6451 emscripten_scan_registers(rb_mark_locations
);
6452 each_stack_location(objspace
, ec
, rb_stack_range_tmp
[0], rb_stack_range_tmp
[1], gc_mark_maybe
);
6454 # else // use Asyncify version
6457 mark_current_machine_context(rb_objspace_t
*objspace
, rb_execution_context_t
*ec
)
6459 VALUE
*stack_start
, *stack_end
;
6461 GET_STACK_BOUNDS(stack_start
, stack_end
, 1);
6462 each_stack_location(objspace
, ec
, stack_start
, stack_end
, gc_mark_maybe
);
6464 rb_wasm_scan_locals(rb_mark_locations
);
6465 each_stack_location(objspace
, ec
, rb_stack_range_tmp
[0], rb_stack_range_tmp
[1], gc_mark_maybe
);
6470 #else // !defined(__wasm__)
6473 mark_current_machine_context(rb_objspace_t
*objspace
, rb_execution_context_t
*ec
)
6477 VALUE v
[sizeof(rb_jmp_buf
) / (sizeof(VALUE
))];
6478 } save_regs_gc_mark
;
6479 VALUE
*stack_start
, *stack_end
;
6481 FLUSH_REGISTER_WINDOWS
;
6482 memset(&save_regs_gc_mark
, 0, sizeof(save_regs_gc_mark
));
6483 /* This assumes that all registers are saved into the jmp_buf (and stack) */
6484 rb_setjmp(save_regs_gc_mark
.j
);
6486 /* SET_STACK_END must be called in this function because
6487 * the stack frame of this function may contain
6488 * callee save registers and they should be marked. */
6490 GET_STACK_BOUNDS(stack_start
, stack_end
, 1);
6492 #ifdef RUBY_ASAN_ENABLED
6493 objspace
->marking_machine_context_ec
= ec
;
6496 each_location(objspace
, save_regs_gc_mark
.v
, numberof(save_regs_gc_mark
.v
), gc_mark_machine_stack_location_maybe
);
6497 each_stack_location(objspace
, ec
, stack_start
, stack_end
, gc_mark_machine_stack_location_maybe
);
6499 #ifdef RUBY_ASAN_ENABLED
6500 objspace
->marking_machine_context_ec
= NULL
;
6506 rb_gc_mark_machine_context(const rb_execution_context_t
*ec
)
6508 rb_objspace_t
*objspace
= &rb_objspace
;
6509 #ifdef RUBY_ASAN_ENABLED
6510 objspace
->marking_machine_context_ec
= ec
;
6513 VALUE
*stack_start
, *stack_end
;
6515 GET_STACK_BOUNDS(stack_start
, stack_end
, 0);
6516 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec
)->serial
, stack_start
, stack_end
);
6518 each_stack_location(objspace
, ec
, stack_start
, stack_end
, gc_mark_machine_stack_location_maybe
);
6519 int num_regs
= sizeof(ec
->machine
.regs
)/(sizeof(VALUE
));
6520 each_location(objspace
, (VALUE
*)&ec
->machine
.regs
, num_regs
, gc_mark_machine_stack_location_maybe
);
6522 #ifdef RUBY_ASAN_ENABLED
6523 objspace
->marking_machine_context_ec
= NULL
;
6528 each_stack_location(rb_objspace_t
*objspace
, const rb_execution_context_t
*ec
,
6529 const VALUE
*stack_start
, const VALUE
*stack_end
, void (*cb
)(rb_objspace_t
*, VALUE
))
6532 gc_mark_locations(objspace
, stack_start
, stack_end
, cb
);
6534 #if defined(__mc68000__)
6535 gc_mark_locations(objspace
,
6536 (VALUE
*)((char*)stack_start
+ 2),
6537 (VALUE
*)((char*)stack_end
- 2), cb
);
6542 rb_mark_tbl(st_table
*tbl
)
6544 mark_tbl(&rb_objspace
, tbl
);
6548 rb_mark_tbl_no_pin(st_table
*tbl
)
6550 mark_tbl_no_pin(&rb_objspace
, tbl
);
6554 gc_mark_maybe(rb_objspace_t
*objspace
, VALUE obj
)
6556 (void)VALGRIND_MAKE_MEM_DEFINED(&obj
, sizeof(obj
));
6558 if (is_pointer_to_heap(objspace
, (void *)obj
)) {
6559 void *ptr
= asan_unpoison_object_temporary(obj
);
6561 /* Garbage can live on the stack, so do not mark or pin */
6562 switch (BUILTIN_TYPE(obj
)) {
6567 gc_mark_and_pin(objspace
, obj
);
6572 GC_ASSERT(BUILTIN_TYPE(obj
) == T_NONE
);
6573 asan_poison_object(obj
);
6579 rb_gc_mark_maybe(VALUE obj
)
6581 gc_mark_maybe(&rb_objspace
, obj
);
6585 gc_mark_set(rb_objspace_t
*objspace
, VALUE obj
)
6587 ASSERT_vm_locking();
6588 if (RVALUE_MARKED(obj
)) return 0;
6589 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj
), obj
);
6594 gc_remember_unprotected(rb_objspace_t
*objspace
, VALUE obj
)
6596 struct heap_page
*page
= GET_HEAP_PAGE(obj
);
6597 bits_t
*uncollectible_bits
= &page
->uncollectible_bits
[0];
6599 if (!MARKED_IN_BITMAP(uncollectible_bits
, obj
)) {
6600 page
->flags
.has_uncollectible_wb_unprotected_objects
= TRUE
;
6601 MARK_IN_BITMAP(uncollectible_bits
, obj
);
6602 objspace
->rgengc
.uncollectible_wb_unprotected_objects
++;
6604 #if RGENGC_PROFILE > 0
6605 objspace
->profile
.total_remembered_shady_object_count
++;
6606 #if RGENGC_PROFILE >= 2
6607 objspace
->profile
.remembered_shady_object_count_types
[BUILTIN_TYPE(obj
)]++;
6618 rgengc_check_relation(rb_objspace_t
*objspace
, VALUE obj
)
6620 const VALUE old_parent
= objspace
->rgengc
.parent_object
;
6622 if (old_parent
) { /* parent object is old */
6623 if (RVALUE_WB_UNPROTECTED(obj
) || !RVALUE_OLD_P(obj
)) {
6624 rgengc_remember(objspace
, old_parent
);
6628 GC_ASSERT(old_parent
== objspace
->rgengc
.parent_object
);
6632 gc_grey(rb_objspace_t
*objspace
, VALUE obj
)
6634 #if RGENGC_CHECK_MODE
6635 if (RVALUE_MARKED(obj
) == FALSE
) rb_bug("gc_grey: %s is not marked.", obj_info(obj
));
6636 if (RVALUE_MARKING(obj
) == TRUE
) rb_bug("gc_grey: %s is marking/remembered.", obj_info(obj
));
6639 if (is_incremental_marking(objspace
)) {
6640 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj
), obj
);
6643 push_mark_stack(&objspace
->mark_stack
, obj
);
6647 gc_aging(rb_objspace_t
*objspace
, VALUE obj
)
6649 struct heap_page
*page
= GET_HEAP_PAGE(obj
);
6651 GC_ASSERT(RVALUE_MARKING(obj
) == FALSE
);
6652 check_rvalue_consistency(obj
);
6654 if (!RVALUE_PAGE_WB_UNPROTECTED(page
, obj
)) {
6655 if (!RVALUE_OLD_P(obj
)) {
6656 gc_report(3, objspace
, "gc_aging: YOUNG: %s\n", obj_info(obj
));
6657 RVALUE_AGE_INC(objspace
, obj
);
6659 else if (is_full_marking(objspace
)) {
6660 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page
, obj
) == FALSE
);
6661 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace
, page
, obj
);
6664 check_rvalue_consistency(obj
);
6666 objspace
->marked_slots
++;
6669 NOINLINE(static void gc_mark_ptr(rb_objspace_t
*objspace
, VALUE obj
));
6670 static void reachable_objects_from_callback(VALUE obj
);
6673 gc_mark_ptr(rb_objspace_t
*objspace
, VALUE obj
)
6675 if (LIKELY(during_gc
)) {
6676 rgengc_check_relation(objspace
, obj
);
6677 if (!gc_mark_set(objspace
, obj
)) return; /* already marked */
6679 if (0) { // for debug GC marking miss
6680 if (objspace
->rgengc
.parent_object
) {
6681 RUBY_DEBUG_LOG("%p (%s) parent:%p (%s)",
6682 (void *)obj
, obj_type_name(obj
),
6683 (void *)objspace
->rgengc
.parent_object
, obj_type_name(objspace
->rgengc
.parent_object
));
6686 RUBY_DEBUG_LOG("%p (%s)", (void *)obj
, obj_type_name(obj
));
6690 if (UNLIKELY(RB_TYPE_P(obj
, T_NONE
))) {
6692 rb_bug("try to mark T_NONE object"); /* check here will help debugging */
6694 gc_aging(objspace
, obj
);
6695 gc_grey(objspace
, obj
);
6698 reachable_objects_from_callback(obj
);
6703 gc_pin(rb_objspace_t
*objspace
, VALUE obj
)
6705 GC_ASSERT(is_markable_object(obj
));
6706 if (UNLIKELY(objspace
->flags
.during_compacting
)) {
6707 if (LIKELY(during_gc
)) {
6708 if (!MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj
), obj
)) {
6709 GC_ASSERT(GET_HEAP_PAGE(obj
)->pinned_slots
<= GET_HEAP_PAGE(obj
)->total_slots
);
6710 GET_HEAP_PAGE(obj
)->pinned_slots
++;
6711 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj
), obj
);
6718 gc_mark_and_pin(rb_objspace_t
*objspace
, VALUE obj
)
6720 if (!is_markable_object(obj
)) return;
6721 gc_pin(objspace
, obj
);
6722 gc_mark_ptr(objspace
, obj
);
6726 gc_mark(rb_objspace_t
*objspace
, VALUE obj
)
6728 if (!is_markable_object(obj
)) return;
6729 gc_mark_ptr(objspace
, obj
);
6733 rb_gc_mark_movable(VALUE ptr
)
6735 gc_mark(&rb_objspace
, ptr
);
6739 rb_gc_mark(VALUE ptr
)
6741 gc_mark_and_pin(&rb_objspace
, ptr
);
6745 rb_gc_mark_and_move(VALUE
*ptr
)
6747 rb_objspace_t
*objspace
= &rb_objspace
;
6748 if (RB_SPECIAL_CONST_P(*ptr
)) return;
6750 if (UNLIKELY(objspace
->flags
.during_reference_updating
)) {
6751 GC_ASSERT(objspace
->flags
.during_compacting
);
6752 GC_ASSERT(during_gc
);
6754 *ptr
= rb_gc_location(*ptr
);
6757 gc_mark_ptr(objspace
, *ptr
);
6762 rb_gc_mark_weak(VALUE
*ptr
)
6764 rb_objspace_t
*objspace
= &rb_objspace
;
6766 if (UNLIKELY(!during_gc
)) return;
6769 if (RB_SPECIAL_CONST_P(obj
)) return;
6771 GC_ASSERT(objspace
->rgengc
.parent_object
== 0 || FL_TEST(objspace
->rgengc
.parent_object
, FL_WB_PROTECTED
));
6773 if (UNLIKELY(RB_TYPE_P(obj
, T_NONE
))) {
6775 rb_bug("try to mark T_NONE object");
6778 /* If we are in a minor GC and the other object is old, then obj should
6779 * already be marked and cannot be reclaimed in this GC cycle so we don't
6780 * need to add it to the weak refences list. */
6781 if (!is_full_marking(objspace
) && RVALUE_OLD_P(obj
)) {
6782 GC_ASSERT(RVALUE_MARKED(obj
));
6783 GC_ASSERT(!objspace
->flags
.during_compacting
);
6788 rgengc_check_relation(objspace
, obj
);
6790 DURING_GC_COULD_MALLOC_REGION_START();
6792 rb_darray_append(&objspace
->weak_references
, ptr
);
6794 DURING_GC_COULD_MALLOC_REGION_END();
6796 objspace
->profile
.weak_references_count
++;
6800 rb_gc_remove_weak(VALUE parent_obj
, VALUE
*ptr
)
6802 rb_objspace_t
*objspace
= &rb_objspace
;
6804 /* If we're not incremental marking, then the state of the objects can't
6805 * change so we don't need to do anything. */
6806 if (!is_incremental_marking(objspace
)) return;
6807 /* If parent_obj has not been marked, then ptr has not yet been marked
6808 * weak, so we don't need to do anything. */
6809 if (!RVALUE_MARKED(parent_obj
)) return;
6812 rb_darray_foreach(objspace
->weak_references
, i
, ptr_ptr
) {
6813 if (*ptr_ptr
== ptr
) {
6821 gc_mark_set_parent(rb_objspace_t
*objspace
, VALUE obj
)
6823 if (RVALUE_OLD_P(obj
)) {
6824 objspace
->rgengc
.parent_object
= obj
;
6827 objspace
->rgengc
.parent_object
= Qfalse
;
6832 gc_declarative_marking_p(const rb_data_type_t
*type
)
6834 return (type
->flags
& RUBY_TYPED_DECL_MARKING
) != 0;
6837 static void mark_cvc_tbl(rb_objspace_t
*objspace
, VALUE klass
);
6840 gc_mark_children(rb_objspace_t
*objspace
, VALUE obj
)
6842 register RVALUE
*any
= RANY(obj
);
6843 gc_mark_set_parent(objspace
, obj
);
6845 if (FL_TEST(obj
, FL_EXIVAR
)) {
6846 rb_mark_generic_ivar(obj
);
6849 switch (BUILTIN_TYPE(obj
)) {
6853 /* Not immediates, but does not have references and singleton class.
6855 * RSYMBOL(obj)->fstr intentionally not marked. See log for 96815f1e
6856 * ("symbol.c: remove rb_gc_mark_symbols()") */
6861 rb_bug("rb_gc_mark() called for broken object");
6865 UNEXPECTED_NODE(rb_gc_mark
);
6869 rb_imemo_mark_and_move(obj
, false);
6876 gc_mark(objspace
, any
->as
.basic
.klass
);
6878 switch (BUILTIN_TYPE(obj
)) {
6880 if (FL_TEST(obj
, FL_SINGLETON
)) {
6881 gc_mark(objspace
, RCLASS_ATTACHED_OBJECT(obj
));
6883 // Continue to the shared T_CLASS/T_MODULE
6885 if (RCLASS_SUPER(obj
)) {
6886 gc_mark(objspace
, RCLASS_SUPER(obj
));
6889 mark_m_tbl(objspace
, RCLASS_M_TBL(obj
));
6890 mark_cvc_tbl(objspace
, obj
);
6891 rb_cc_table_mark(obj
);
6892 if (rb_shape_obj_too_complex(obj
)) {
6893 mark_tbl_no_pin(objspace
, (st_table
*)RCLASS_IVPTR(obj
));
6896 for (attr_index_t i
= 0; i
< RCLASS_IV_COUNT(obj
); i
++) {
6897 gc_mark(objspace
, RCLASS_IVPTR(obj
)[i
]);
6900 mark_const_tbl(objspace
, RCLASS_CONST_TBL(obj
));
6902 gc_mark(objspace
, RCLASS_EXT(obj
)->classpath
);
6906 if (RICLASS_OWNS_M_TBL_P(obj
)) {
6907 mark_m_tbl(objspace
, RCLASS_M_TBL(obj
));
6909 if (RCLASS_SUPER(obj
)) {
6910 gc_mark(objspace
, RCLASS_SUPER(obj
));
6913 if (RCLASS_INCLUDER(obj
)) {
6914 gc_mark(objspace
, RCLASS_INCLUDER(obj
));
6916 mark_m_tbl(objspace
, RCLASS_CALLABLE_M_TBL(obj
));
6917 rb_cc_table_mark(obj
);
6921 if (ARY_SHARED_P(obj
)) {
6922 VALUE root
= ARY_SHARED_ROOT(obj
);
6923 gc_mark(objspace
, root
);
6926 long i
, len
= RARRAY_LEN(obj
);
6927 const VALUE
*ptr
= RARRAY_CONST_PTR(obj
);
6928 for (i
=0; i
< len
; i
++) {
6929 gc_mark(objspace
, ptr
[i
]);
6935 mark_hash(objspace
, obj
);
6939 if (STR_SHARED_P(obj
)) {
6940 if (STR_EMBED_P(any
->as
.string
.as
.heap
.aux
.shared
)) {
6941 /* Embedded shared strings cannot be moved because this string
6942 * points into the slot of the shared string. There may be code
6943 * using the RSTRING_PTR on the stack, which would pin this
6944 * string but not pin the shared string, causing it to move. */
6945 gc_mark_and_pin(objspace
, any
->as
.string
.as
.heap
.aux
.shared
);
6948 gc_mark(objspace
, any
->as
.string
.as
.heap
.aux
.shared
);
6955 void *const ptr
= RTYPEDDATA_P(obj
) ? RTYPEDDATA_GET_DATA(obj
) : DATA_PTR(obj
);
6958 if (RTYPEDDATA_P(obj
) && gc_declarative_marking_p(any
->as
.typeddata
.type
)) {
6959 size_t *offset_list
= (size_t *)RANY(obj
)->as
.typeddata
.type
->function
.dmark
;
6961 for (size_t offset
= *offset_list
; offset
!= RUBY_REF_END
; offset
= *offset_list
++) {
6962 rb_gc_mark_movable(*(VALUE
*)((char *)ptr
+ offset
));
6966 RUBY_DATA_FUNC mark_func
= RTYPEDDATA_P(obj
) ?
6967 any
->as
.typeddata
.type
->function
.dmark
:
6969 if (mark_func
) (*mark_func
)(ptr
);
6977 rb_shape_t
*shape
= rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj
));
6978 if (rb_shape_obj_too_complex(obj
)) {
6979 mark_tbl_no_pin(objspace
, ROBJECT_IV_HASH(obj
));
6982 const VALUE
* const ptr
= ROBJECT_IVPTR(obj
);
6984 uint32_t i
, len
= ROBJECT_IV_COUNT(obj
);
6985 for (i
= 0; i
< len
; i
++) {
6986 gc_mark(objspace
, ptr
[i
]);
6990 VALUE klass
= RBASIC_CLASS(obj
);
6992 // Increment max_iv_count if applicable, used to determine size pool allocation
6993 attr_index_t num_of_ivs
= shape
->next_iv_index
;
6994 if (RCLASS_EXT(klass
)->max_iv_count
< num_of_ivs
) {
6995 RCLASS_EXT(klass
)->max_iv_count
= num_of_ivs
;
7002 if (any
->as
.file
.fptr
) {
7003 gc_mark(objspace
, any
->as
.file
.fptr
->self
);
7004 gc_mark(objspace
, any
->as
.file
.fptr
->pathv
);
7005 gc_mark(objspace
, any
->as
.file
.fptr
->tied_io_for_writing
);
7006 gc_mark(objspace
, any
->as
.file
.fptr
->writeconv_asciicompat
);
7007 gc_mark(objspace
, any
->as
.file
.fptr
->writeconv_pre_ecopts
);
7008 gc_mark(objspace
, any
->as
.file
.fptr
->encs
.ecopts
);
7009 gc_mark(objspace
, any
->as
.file
.fptr
->write_lock
);
7010 gc_mark(objspace
, any
->as
.file
.fptr
->timeout
);
7015 gc_mark(objspace
, any
->as
.regexp
.src
);
7019 gc_mark(objspace
, any
->as
.match
.regexp
);
7020 if (any
->as
.match
.str
) {
7021 gc_mark(objspace
, any
->as
.match
.str
);
7026 gc_mark(objspace
, any
->as
.rational
.num
);
7027 gc_mark(objspace
, any
->as
.rational
.den
);
7031 gc_mark(objspace
, any
->as
.complex.real
);
7032 gc_mark(objspace
, any
->as
.complex.imag
);
7038 const long len
= RSTRUCT_LEN(obj
);
7039 const VALUE
* const ptr
= RSTRUCT_CONST_PTR(obj
);
7041 for (i
=0; i
<len
; i
++) {
7042 gc_mark(objspace
, ptr
[i
]);
7049 rb_gcdebug_print_obj_condition((VALUE
)obj
);
7051 if (BUILTIN_TYPE(obj
) == T_MOVED
) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj
);
7052 if (BUILTIN_TYPE(obj
) == T_NONE
) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj
);
7053 if (BUILTIN_TYPE(obj
) == T_ZOMBIE
) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj
);
7054 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
7055 BUILTIN_TYPE(obj
), (void *)any
,
7056 is_pointer_to_heap(objspace
, any
) ? "corrupted object" : "non object");
7061 * incremental: 0 -> not incremental (do all)
7062 * incremental: n -> mark at most `n' objects
7065 gc_mark_stacked_objects(rb_objspace_t
*objspace
, int incremental
, size_t count
)
7067 mark_stack_t
*mstack
= &objspace
->mark_stack
;
7069 size_t marked_slots_at_the_beginning
= objspace
->marked_slots
;
7070 size_t popped_count
= 0;
7072 while (pop_mark_stack(mstack
, &obj
)) {
7073 if (UNDEF_P(obj
)) continue; /* skip */
7075 if (RGENGC_CHECK_MODE
&& !RVALUE_MARKED(obj
)) {
7076 rb_bug("gc_mark_stacked_objects: %s is not marked.", obj_info(obj
));
7078 gc_mark_children(objspace
, obj
);
7081 if (RGENGC_CHECK_MODE
&& !RVALUE_MARKING(obj
)) {
7082 rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
7084 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj
), obj
);
7087 if (popped_count
+ (objspace
->marked_slots
- marked_slots_at_the_beginning
) > count
) {
7092 /* just ignore marking bits */
7096 if (RGENGC_CHECK_MODE
>= 3) gc_verify_internal_consistency(objspace
);
7098 if (is_mark_stack_empty(mstack
)) {
7099 shrink_stack_chunk_cache(mstack
);
7108 gc_mark_stacked_objects_incremental(rb_objspace_t
*objspace
, size_t count
)
7110 return gc_mark_stacked_objects(objspace
, TRUE
, count
);
7114 gc_mark_stacked_objects_all(rb_objspace_t
*objspace
)
7116 return gc_mark_stacked_objects(objspace
, FALSE
, 0);
7119 #if PRINT_ROOT_TICKS
7120 #define MAX_TICKS 0x100
7121 static tick_t mark_ticks
[MAX_TICKS
];
7122 static const char *mark_ticks_categories
[MAX_TICKS
];
7125 show_mark_ticks(void)
7128 fprintf(stderr
, "mark ticks result:\n");
7129 for (i
=0; i
<MAX_TICKS
; i
++) {
7130 const char *category
= mark_ticks_categories
[i
];
7132 fprintf(stderr
, "%s\t%8lu\n", category
, (unsigned long)mark_ticks
[i
]);
7140 #endif /* PRINT_ROOT_TICKS */
7143 gc_mark_roots(rb_objspace_t
*objspace
, const char **categoryp
)
7145 rb_execution_context_t
*ec
= GET_EC();
7146 rb_vm_t
*vm
= rb_ec_vm_ptr(ec
);
7148 #if PRINT_ROOT_TICKS
7149 tick_t start_tick
= tick();
7151 const char *prev_category
= 0;
7153 if (mark_ticks_categories
[0] == 0) {
7154 atexit(show_mark_ticks
);
7158 if (categoryp
) *categoryp
= "xxx";
7160 objspace
->rgengc
.parent_object
= Qfalse
;
7162 #if PRINT_ROOT_TICKS
7163 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7164 if (prev_category) { \
7165 tick_t t = tick(); \
7166 mark_ticks[tick_count] = t - start_tick; \
7167 mark_ticks_categories[tick_count] = prev_category; \
7170 prev_category = category; \
7171 start_tick = tick(); \
7173 #else /* PRINT_ROOT_TICKS */
7174 #define MARK_CHECKPOINT_PRINT_TICK(category)
7177 #define MARK_CHECKPOINT(category) do { \
7178 if (categoryp) *categoryp = category; \
7179 MARK_CHECKPOINT_PRINT_TICK(category); \
7182 MARK_CHECKPOINT("vm");
7185 if (vm
->self
) gc_mark(objspace
, vm
->self
);
7187 MARK_CHECKPOINT("finalizers");
7188 mark_finalizer_tbl(objspace
, finalizer_table
);
7190 MARK_CHECKPOINT("machine_context");
7191 mark_current_machine_context(objspace
, ec
);
7193 /* mark protected global variables */
7195 MARK_CHECKPOINT("end_proc");
7198 MARK_CHECKPOINT("global_tbl");
7199 rb_gc_mark_global_tbl();
7201 MARK_CHECKPOINT("object_id");
7202 mark_tbl_no_pin(objspace
, objspace
->obj_to_id_tbl
); /* Only mark ids */
7204 if (stress_to_class
) rb_gc_mark(stress_to_class
);
7206 MARK_CHECKPOINT("finish");
7207 #undef MARK_CHECKPOINT
7210 #if RGENGC_CHECK_MODE >= 4
7212 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7213 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7214 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7222 static struct reflist
*
7223 reflist_create(VALUE obj
)
7225 struct reflist
*refs
= xmalloc(sizeof(struct reflist
));
7227 refs
->list
= ALLOC_N(VALUE
, refs
->size
);
7228 refs
->list
[0] = obj
;
7234 reflist_destruct(struct reflist
*refs
)
7241 reflist_add(struct reflist
*refs
, VALUE obj
)
7243 if (refs
->pos
== refs
->size
) {
7245 SIZED_REALLOC_N(refs
->list
, VALUE
, refs
->size
, refs
->size
/2);
7248 refs
->list
[refs
->pos
++] = obj
;
7252 reflist_dump(struct reflist
*refs
)
7255 for (i
=0; i
<refs
->pos
; i
++) {
7256 VALUE obj
= refs
->list
[i
];
7257 if (IS_ROOTSIG(obj
)) { /* root */
7258 fprintf(stderr
, "<root@%s>", GET_ROOTSIG(obj
));
7261 fprintf(stderr
, "<%s>", obj_info(obj
));
7263 if (i
+1 < refs
->pos
) fprintf(stderr
, ", ");
7268 reflist_referred_from_machine_context(struct reflist
*refs
)
7271 for (i
=0; i
<refs
->pos
; i
++) {
7272 VALUE obj
= refs
->list
[i
];
7273 if (IS_ROOTSIG(obj
) && strcmp(GET_ROOTSIG(obj
), "machine_context") == 0) return 1;
7279 rb_objspace_t
*objspace
;
7285 * #=> {obj1 => [a, b, c], obj2 => [c, d]}
7287 struct st_table
*references
;
7288 const char *category
;
7290 mark_stack_t mark_stack
;
7294 allrefs_add(struct allrefs
*data
, VALUE obj
)
7296 struct reflist
*refs
;
7299 if (st_lookup(data
->references
, obj
, &r
)) {
7300 refs
= (struct reflist
*)r
;
7301 reflist_add(refs
, data
->root_obj
);
7305 refs
= reflist_create(data
->root_obj
);
7306 st_insert(data
->references
, obj
, (st_data_t
)refs
);
7312 allrefs_i(VALUE obj
, void *ptr
)
7314 struct allrefs
*data
= (struct allrefs
*)ptr
;
7316 if (allrefs_add(data
, obj
)) {
7317 push_mark_stack(&data
->mark_stack
, obj
);
7322 allrefs_roots_i(VALUE obj
, void *ptr
)
7324 struct allrefs
*data
= (struct allrefs
*)ptr
;
7325 if (strlen(data
->category
) == 0) rb_bug("!!!");
7326 data
->root_obj
= MAKE_ROOTSIG(data
->category
);
7328 if (allrefs_add(data
, obj
)) {
7329 push_mark_stack(&data
->mark_stack
, obj
);
7332 #define PUSH_MARK_FUNC_DATA(v) do { \
7333 struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7334 GET_RACTOR()->mfd = (v);
7336 #define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7339 objspace_allrefs(rb_objspace_t
*objspace
)
7341 struct allrefs data
;
7342 struct gc_mark_func_data_struct mfd
;
7344 int prev_dont_gc
= dont_gc_val();
7347 data
.objspace
= objspace
;
7348 data
.references
= st_init_numtable();
7349 init_mark_stack(&data
.mark_stack
);
7351 mfd
.mark_func
= allrefs_roots_i
;
7354 /* traverse root objects */
7355 PUSH_MARK_FUNC_DATA(&mfd
);
7356 GET_RACTOR()->mfd
= &mfd
;
7357 gc_mark_roots(objspace
, &data
.category
);
7358 POP_MARK_FUNC_DATA();
7360 /* traverse rest objects reachable from root objects */
7361 while (pop_mark_stack(&data
.mark_stack
, &obj
)) {
7362 rb_objspace_reachable_objects_from(data
.root_obj
= obj
, allrefs_i
, &data
);
7364 free_stack_chunks(&data
.mark_stack
);
7366 dont_gc_set(prev_dont_gc
);
7367 return data
.references
;
7371 objspace_allrefs_destruct_i(st_data_t key
, st_data_t value
, st_data_t ptr
)
7373 struct reflist
*refs
= (struct reflist
*)value
;
7374 reflist_destruct(refs
);
7379 objspace_allrefs_destruct(struct st_table
*refs
)
7381 st_foreach(refs
, objspace_allrefs_destruct_i
, 0);
7382 st_free_table(refs
);
7385 #if RGENGC_CHECK_MODE >= 5
7387 allrefs_dump_i(st_data_t k
, st_data_t v
, st_data_t ptr
)
7389 VALUE obj
= (VALUE
)k
;
7390 struct reflist
*refs
= (struct reflist
*)v
;
7391 fprintf(stderr
, "[allrefs_dump_i] %s <- ", obj_info(obj
));
7393 fprintf(stderr
, "\n");
7398 allrefs_dump(rb_objspace_t
*objspace
)
7400 VALUE size
= objspace
->rgengc
.allrefs_table
->num_entries
;
7401 fprintf(stderr
, "[all refs] (size: %"PRIuVALUE
")\n", size
);
7402 st_foreach(objspace
->rgengc
.allrefs_table
, allrefs_dump_i
, 0);
7407 gc_check_after_marks_i(st_data_t k
, st_data_t v
, st_data_t ptr
)
7410 struct reflist
*refs
= (struct reflist
*)v
;
7411 rb_objspace_t
*objspace
= (rb_objspace_t
*)ptr
;
7413 /* object should be marked or oldgen */
7414 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj
), obj
)) {
7415 fprintf(stderr
, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj
));
7416 fprintf(stderr
, "gc_check_after_marks_i: %p is referred from ", (void *)obj
);
7419 if (reflist_referred_from_machine_context(refs
)) {
7420 fprintf(stderr
, " (marked from machine stack).\n");
7421 /* marked from machine context can be false positive */
7424 objspace
->rgengc
.error_count
++;
7425 fprintf(stderr
, "\n");
7432 gc_marks_check(rb_objspace_t
*objspace
, st_foreach_callback_func
*checker_func
, const char *checker_name
)
7434 size_t saved_malloc_increase
= objspace
->malloc_params
.increase
;
7435 #if RGENGC_ESTIMATE_OLDMALLOC
7436 size_t saved_oldmalloc_increase
= objspace
->rgengc
.oldmalloc_increase
;
7438 VALUE already_disabled
= rb_objspace_gc_disable(objspace
);
7440 objspace
->rgengc
.allrefs_table
= objspace_allrefs(objspace
);
7443 st_foreach(objspace
->rgengc
.allrefs_table
, checker_func
, (st_data_t
)objspace
);
7446 if (objspace
->rgengc
.error_count
> 0) {
7447 #if RGENGC_CHECK_MODE >= 5
7448 allrefs_dump(objspace
);
7450 if (checker_name
) rb_bug("%s: GC has problem.", checker_name
);
7453 objspace_allrefs_destruct(objspace
->rgengc
.allrefs_table
);
7454 objspace
->rgengc
.allrefs_table
= 0;
7456 if (already_disabled
== Qfalse
) rb_objspace_gc_enable(objspace
);
7457 objspace
->malloc_params
.increase
= saved_malloc_increase
;
7458 #if RGENGC_ESTIMATE_OLDMALLOC
7459 objspace
->rgengc
.oldmalloc_increase
= saved_oldmalloc_increase
;
7462 #endif /* RGENGC_CHECK_MODE >= 4 */
7464 struct verify_internal_consistency_struct
{
7465 rb_objspace_t
*objspace
;
7467 size_t live_object_count
;
7468 size_t zombie_object_count
;
7471 size_t old_object_count
;
7472 size_t remembered_shady_count
;
7476 check_generation_i(const VALUE child
, void *ptr
)
7478 struct verify_internal_consistency_struct
*data
= (struct verify_internal_consistency_struct
*)ptr
;
7479 const VALUE parent
= data
->parent
;
7481 if (RGENGC_CHECK_MODE
) GC_ASSERT(RVALUE_OLD_P(parent
));
7483 if (!RVALUE_OLD_P(child
)) {
7484 if (!RVALUE_REMEMBERED(parent
) &&
7485 !RVALUE_REMEMBERED(child
) &&
7486 !RVALUE_UNCOLLECTIBLE(child
)) {
7487 fprintf(stderr
, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent
), obj_info(child
));
7494 check_color_i(const VALUE child
, void *ptr
)
7496 struct verify_internal_consistency_struct
*data
= (struct verify_internal_consistency_struct
*)ptr
;
7497 const VALUE parent
= data
->parent
;
7499 if (!RVALUE_WB_UNPROTECTED(parent
) && RVALUE_WHITE_P(child
)) {
7500 fprintf(stderr
, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7501 obj_info(parent
), obj_info(child
));
7507 check_children_i(const VALUE child
, void *ptr
)
7509 struct verify_internal_consistency_struct
*data
= (struct verify_internal_consistency_struct
*)ptr
;
7510 if (check_rvalue_consistency_force(child
, FALSE
) != 0) {
7511 fprintf(stderr
, "check_children_i: %s has error (referenced from %s)",
7512 obj_info(child
), obj_info(data
->parent
));
7513 rb_print_backtrace(stderr
); /* C backtrace will help to debug */
7520 verify_internal_consistency_i(void *page_start
, void *page_end
, size_t stride
,
7521 struct verify_internal_consistency_struct
*data
)
7524 rb_objspace_t
*objspace
= data
->objspace
;
7526 for (obj
= (VALUE
)page_start
; obj
!= (VALUE
)page_end
; obj
+= stride
) {
7527 void *poisoned
= asan_unpoison_object_temporary(obj
);
7529 if (is_live_object(objspace
, obj
)) {
7531 data
->live_object_count
++;
7534 /* Normally, we don't expect T_MOVED objects to be in the heap.
7535 * But they can stay alive on the stack, */
7536 if (!gc_object_moved_p(objspace
, obj
)) {
7537 /* moved slots don't have children */
7538 rb_objspace_reachable_objects_from(obj
, check_children_i
, (void *)data
);
7541 /* check health of children */
7542 if (RVALUE_OLD_P(obj
)) data
->old_object_count
++;
7543 if (RVALUE_WB_UNPROTECTED(obj
) && RVALUE_UNCOLLECTIBLE(obj
)) data
->remembered_shady_count
++;
7545 if (!is_marking(objspace
) && RVALUE_OLD_P(obj
)) {
7546 /* reachable objects from an oldgen object should be old or (young with remember) */
7548 rb_objspace_reachable_objects_from(obj
, check_generation_i
, (void *)data
);
7551 if (is_incremental_marking(objspace
)) {
7552 if (RVALUE_BLACK_P(obj
)) {
7553 /* reachable objects from black objects should be black or grey objects */
7555 rb_objspace_reachable_objects_from(obj
, check_color_i
, (void *)data
);
7560 if (BUILTIN_TYPE(obj
) == T_ZOMBIE
) {
7561 data
->zombie_object_count
++;
7563 if ((RBASIC(obj
)->flags
& ~ZOMBIE_OBJ_KEPT_FLAGS
) != T_ZOMBIE
) {
7564 fprintf(stderr
, "verify_internal_consistency_i: T_ZOMBIE has extra flags set: %s\n",
7569 if (!!FL_TEST(obj
, FL_FINALIZE
) != !!st_is_member(finalizer_table
, obj
)) {
7570 fprintf(stderr
, "verify_internal_consistency_i: FL_FINALIZE %s but %s finalizer_table: %s\n",
7571 FL_TEST(obj
, FL_FINALIZE
) ? "set" : "not set", st_is_member(finalizer_table
, obj
) ? "in" : "not in",
7578 GC_ASSERT(BUILTIN_TYPE(obj
) == T_NONE
);
7579 asan_poison_object(obj
);
7587 gc_verify_heap_page(rb_objspace_t
*objspace
, struct heap_page
*page
, VALUE obj
)
7589 unsigned int has_remembered_shady
= FALSE
;
7590 unsigned int has_remembered_old
= FALSE
;
7591 int remembered_old_objects
= 0;
7592 int free_objects
= 0;
7593 int zombie_objects
= 0;
7595 short slot_size
= page
->slot_size
;
7596 uintptr_t start
= (uintptr_t)page
->start
;
7597 uintptr_t end
= start
+ page
->total_slots
* slot_size
;
7599 for (uintptr_t ptr
= start
; ptr
< end
; ptr
+= slot_size
) {
7600 VALUE val
= (VALUE
)ptr
;
7601 void *poisoned
= asan_unpoison_object_temporary(val
);
7602 enum ruby_value_type type
= BUILTIN_TYPE(val
);
7604 if (type
== T_NONE
) free_objects
++;
7605 if (type
== T_ZOMBIE
) zombie_objects
++;
7606 if (RVALUE_PAGE_UNCOLLECTIBLE(page
, val
) && RVALUE_PAGE_WB_UNPROTECTED(page
, val
)) {
7607 has_remembered_shady
= TRUE
;
7609 if (RVALUE_PAGE_MARKING(page
, val
)) {
7610 has_remembered_old
= TRUE
;
7611 remembered_old_objects
++;
7615 GC_ASSERT(BUILTIN_TYPE(val
) == T_NONE
);
7616 asan_poison_object(val
);
7620 if (!is_incremental_marking(objspace
) &&
7621 page
->flags
.has_remembered_objects
== FALSE
&& has_remembered_old
== TRUE
) {
7623 for (uintptr_t ptr
= start
; ptr
< end
; ptr
+= slot_size
) {
7624 VALUE val
= (VALUE
)ptr
;
7625 if (RVALUE_PAGE_MARKING(page
, val
)) {
7626 fprintf(stderr
, "marking -> %s\n", obj_info(val
));
7629 rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
7630 (void *)page
, remembered_old_objects
, obj
? obj_info(obj
) : "");
7633 if (page
->flags
.has_uncollectible_wb_unprotected_objects
== FALSE
&& has_remembered_shady
== TRUE
) {
7634 rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
7635 (void *)page
, obj
? obj_info(obj
) : "");
7639 /* free_slots may not equal to free_objects */
7640 if (page
->free_slots
!= free_objects
) {
7641 rb_bug("page %p's free_slots should be %d, but %d", (void *)page
, page
->free_slots
, free_objects
);
7644 if (page
->final_slots
!= zombie_objects
) {
7645 rb_bug("page %p's final_slots should be %d, but %d", (void *)page
, page
->final_slots
, zombie_objects
);
7648 return remembered_old_objects
;
7652 gc_verify_heap_pages_(rb_objspace_t
*objspace
, struct ccan_list_head
*head
)
7654 int remembered_old_objects
= 0;
7655 struct heap_page
*page
= 0;
7657 ccan_list_for_each(head
, page
, page_node
) {
7658 asan_unlock_freelist(page
);
7659 RVALUE
*p
= page
->freelist
;
7661 VALUE vp
= (VALUE
)p
;
7663 asan_unpoison_object(vp
, false);
7664 if (BUILTIN_TYPE(vp
) != T_NONE
) {
7665 fprintf(stderr
, "freelist slot expected to be T_NONE but was: %s\n", obj_info(vp
));
7667 p
= p
->as
.free
.next
;
7668 asan_poison_object(prev
);
7670 asan_lock_freelist(page
);
7672 if (page
->flags
.has_remembered_objects
== FALSE
) {
7673 remembered_old_objects
+= gc_verify_heap_page(objspace
, page
, Qfalse
);
7677 return remembered_old_objects
;
7681 gc_verify_heap_pages(rb_objspace_t
*objspace
)
7683 int remembered_old_objects
= 0;
7684 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
7685 remembered_old_objects
+= gc_verify_heap_pages_(objspace
, &(SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->pages
));
7686 remembered_old_objects
+= gc_verify_heap_pages_(objspace
, &(SIZE_POOL_TOMB_HEAP(&size_pools
[i
])->pages
));
7688 return remembered_old_objects
;
7693 * GC.verify_internal_consistency -> nil
7695 * Verify internal consistency.
7697 * This method is implementation specific.
7698 * Now this method checks generational consistency
7699 * if RGenGC is supported.
7702 gc_verify_internal_consistency_m(VALUE dummy
)
7704 gc_verify_internal_consistency(&rb_objspace
);
7709 gc_verify_internal_consistency_(rb_objspace_t
*objspace
)
7711 struct verify_internal_consistency_struct data
= {0};
7713 data
.objspace
= objspace
;
7714 gc_report(5, objspace
, "gc_verify_internal_consistency: start\n");
7716 /* check relations */
7717 for (size_t i
= 0; i
< heap_allocated_pages
; i
++) {
7718 struct heap_page
*page
= heap_pages_sorted
[i
];
7719 short slot_size
= page
->slot_size
;
7721 uintptr_t start
= (uintptr_t)page
->start
;
7722 uintptr_t end
= start
+ page
->total_slots
* slot_size
;
7724 verify_internal_consistency_i((void *)start
, (void *)end
, slot_size
, &data
);
7727 if (data
.err_count
!= 0) {
7728 #if RGENGC_CHECK_MODE >= 5
7729 objspace
->rgengc
.error_count
= data
.err_count
;
7730 gc_marks_check(objspace
, NULL
, NULL
);
7731 allrefs_dump(objspace
);
7733 rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
7736 /* check heap_page status */
7737 gc_verify_heap_pages(objspace
);
7739 /* check counters */
7741 if (!is_lazy_sweeping(objspace
) &&
7743 ruby_single_main_ractor
!= NULL
) {
7744 if (objspace_live_slots(objspace
) != data
.live_object_count
) {
7745 fprintf(stderr
, "heap_pages_final_slots: %"PRIdSIZE
", total_freed_objects: %"PRIdSIZE
"\n",
7746 heap_pages_final_slots
, total_freed_objects(objspace
));
7747 rb_bug("inconsistent live slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7748 objspace_live_slots(objspace
), data
.live_object_count
);
7752 if (!is_marking(objspace
)) {
7753 if (objspace
->rgengc
.old_objects
!= data
.old_object_count
) {
7754 rb_bug("inconsistent old slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7755 objspace
->rgengc
.old_objects
, data
.old_object_count
);
7757 if (objspace
->rgengc
.uncollectible_wb_unprotected_objects
!= data
.remembered_shady_count
) {
7758 rb_bug("inconsistent number of wb unprotected objects: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7759 objspace
->rgengc
.uncollectible_wb_unprotected_objects
, data
.remembered_shady_count
);
7764 size_t list_count
= 0;
7767 VALUE z
= heap_pages_deferred_final
;
7770 z
= RZOMBIE(z
)->next
;
7774 if (heap_pages_final_slots
!= data
.zombie_object_count
||
7775 heap_pages_final_slots
!= list_count
) {
7777 rb_bug("inconsistent finalizing object count:\n"
7778 " expect %"PRIuSIZE
"\n"
7779 " but %"PRIuSIZE
" zombies\n"
7780 " heap_pages_deferred_final list has %"PRIuSIZE
" items.",
7781 heap_pages_final_slots
,
7782 data
.zombie_object_count
,
7787 gc_report(5, objspace
, "gc_verify_internal_consistency: OK\n");
7791 gc_verify_internal_consistency(rb_objspace_t
*objspace
)
7795 rb_vm_barrier(); // stop other ractors
7797 unsigned int prev_during_gc
= during_gc
;
7798 during_gc
= FALSE
; // stop gc here
7800 gc_verify_internal_consistency_(objspace
);
7802 during_gc
= prev_during_gc
;
7808 rb_gc_verify_internal_consistency(void)
7810 gc_verify_internal_consistency(&rb_objspace
);
7814 heap_move_pooled_pages_to_free_pages(rb_heap_t
*heap
)
7816 if (heap
->pooled_pages
) {
7817 if (heap
->free_pages
) {
7818 struct heap_page
*free_pages_tail
= heap
->free_pages
;
7819 while (free_pages_tail
->free_next
) {
7820 free_pages_tail
= free_pages_tail
->free_next
;
7822 free_pages_tail
->free_next
= heap
->pooled_pages
;
7825 heap
->free_pages
= heap
->pooled_pages
;
7828 heap
->pooled_pages
= NULL
;
7835 gc_marks_start(rb_objspace_t
*objspace
, int full_mark
)
7838 gc_report(1, objspace
, "gc_marks_start: (%s)\n", full_mark
? "full" : "minor");
7839 gc_mode_transition(objspace
, gc_mode_marking
);
7842 size_t incremental_marking_steps
= (objspace
->rincgc
.pooled_slots
/ INCREMENTAL_MARK_STEP_ALLOCATIONS
) + 1;
7843 objspace
->rincgc
.step_slots
= (objspace
->marked_slots
* 2) / incremental_marking_steps
;
7845 if (0) fprintf(stderr
, "objspace->marked_slots: %"PRIdSIZE
", "
7846 "objspace->rincgc.pooled_page_num: %"PRIdSIZE
", "
7847 "objspace->rincgc.step_slots: %"PRIdSIZE
", \n",
7848 objspace
->marked_slots
, objspace
->rincgc
.pooled_slots
, objspace
->rincgc
.step_slots
);
7849 objspace
->flags
.during_minor_gc
= FALSE
;
7850 if (ruby_enable_autocompact
) {
7851 objspace
->flags
.during_compacting
|= TRUE
;
7853 objspace
->profile
.major_gc_count
++;
7854 objspace
->rgengc
.uncollectible_wb_unprotected_objects
= 0;
7855 objspace
->rgengc
.old_objects
= 0;
7856 objspace
->rgengc
.last_major_gc
= objspace
->profile
.count
;
7857 objspace
->marked_slots
= 0;
7859 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
7860 rb_size_pool_t
*size_pool
= &size_pools
[i
];
7861 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
7862 rgengc_mark_and_rememberset_clear(objspace
, heap
);
7863 heap_move_pooled_pages_to_free_pages(heap
);
7865 if (objspace
->flags
.during_compacting
) {
7866 struct heap_page
*page
= NULL
;
7868 ccan_list_for_each(&heap
->pages
, page
, page_node
) {
7869 page
->pinned_slots
= 0;
7875 objspace
->flags
.during_minor_gc
= TRUE
;
7876 objspace
->marked_slots
=
7877 objspace
->rgengc
.old_objects
+ objspace
->rgengc
.uncollectible_wb_unprotected_objects
; /* uncollectible objects are marked already */
7878 objspace
->profile
.minor_gc_count
++;
7880 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
7881 rgengc_rememberset_mark(objspace
, SIZE_POOL_EDEN_HEAP(&size_pools
[i
]));
7885 gc_mark_roots(objspace
, NULL
);
7887 gc_report(1, objspace
, "gc_marks_start: (%s) end, stack in %"PRIdSIZE
"\n",
7888 full_mark
? "full" : "minor", mark_stack_size(&objspace
->mark_stack
));
7892 gc_marks_wb_unprotected_objects_plane(rb_objspace_t
*objspace
, uintptr_t p
, bits_t bits
)
7897 gc_report(2, objspace
, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE
)p
));
7898 GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE
)p
));
7899 GC_ASSERT(RVALUE_MARKED((VALUE
)p
));
7900 gc_mark_children(objspace
, (VALUE
)p
);
7902 p
+= BASE_SLOT_SIZE
;
7909 gc_marks_wb_unprotected_objects(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
7911 struct heap_page
*page
= 0;
7913 ccan_list_for_each(&heap
->pages
, page
, page_node
) {
7914 bits_t
*mark_bits
= page
->mark_bits
;
7915 bits_t
*wbun_bits
= page
->wb_unprotected_bits
;
7916 uintptr_t p
= page
->start
;
7919 bits_t bits
= mark_bits
[0] & wbun_bits
[0];
7920 bits
>>= NUM_IN_PAGE(p
);
7921 gc_marks_wb_unprotected_objects_plane(objspace
, p
, bits
);
7922 p
+= (BITS_BITLENGTH
- NUM_IN_PAGE(p
)) * BASE_SLOT_SIZE
;
7924 for (j
=1; j
<HEAP_PAGE_BITMAP_LIMIT
; j
++) {
7925 bits_t bits
= mark_bits
[j
] & wbun_bits
[j
];
7927 gc_marks_wb_unprotected_objects_plane(objspace
, p
, bits
);
7928 p
+= BITS_BITLENGTH
* BASE_SLOT_SIZE
;
7932 gc_mark_stacked_objects_all(objspace
);
7936 gc_update_weak_references(rb_objspace_t
*objspace
)
7938 size_t retained_weak_references_count
= 0;
7940 rb_darray_foreach(objspace
->weak_references
, i
, ptr_ptr
) {
7941 if (!*ptr_ptr
) continue;
7943 VALUE obj
= **ptr_ptr
;
7945 if (RB_SPECIAL_CONST_P(obj
)) continue;
7947 if (!RVALUE_MARKED(obj
)) {
7951 retained_weak_references_count
++;
7955 objspace
->profile
.retained_weak_references_count
= retained_weak_references_count
;
7957 rb_darray_clear(objspace
->weak_references
);
7959 DURING_GC_COULD_MALLOC_REGION_START();
7961 rb_darray_resize_capa(&objspace
->weak_references
, retained_weak_references_count
);
7963 DURING_GC_COULD_MALLOC_REGION_END();
7967 gc_marks_finish(rb_objspace_t
*objspace
)
7969 /* finish incremental GC */
7970 if (is_incremental_marking(objspace
)) {
7971 if (RGENGC_CHECK_MODE
&& is_mark_stack_empty(&objspace
->mark_stack
) == 0) {
7972 rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE
").",
7973 mark_stack_size(&objspace
->mark_stack
));
7976 gc_mark_roots(objspace
, 0);
7977 while (gc_mark_stacked_objects_incremental(objspace
, INT_MAX
) == false);
7979 #if RGENGC_CHECK_MODE >= 2
7980 if (gc_verify_heap_pages(objspace
) != 0) {
7981 rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
7985 objspace
->flags
.during_incremental_marking
= FALSE
;
7986 /* check children of all marked wb-unprotected objects */
7987 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
7988 gc_marks_wb_unprotected_objects(objspace
, SIZE_POOL_EDEN_HEAP(&size_pools
[i
]));
7992 gc_update_weak_references(objspace
);
7994 #if RGENGC_CHECK_MODE >= 2
7995 gc_verify_internal_consistency(objspace
);
7998 #if RGENGC_CHECK_MODE >= 4
8000 gc_marks_check(objspace
, gc_check_after_marks_i
, "after_marks");
8005 /* decide full GC is needed or not */
8006 size_t total_slots
= heap_allocatable_slots(objspace
) + heap_eden_total_slots(objspace
);
8007 size_t sweep_slots
= total_slots
- objspace
->marked_slots
; /* will be swept slots */
8008 size_t max_free_slots
= (size_t)(total_slots
* gc_params
.heap_free_slots_max_ratio
);
8009 size_t min_free_slots
= (size_t)(total_slots
* gc_params
.heap_free_slots_min_ratio
);
8010 int full_marking
= is_full_marking(objspace
);
8011 const int r_cnt
= GET_VM()->ractor
.cnt
;
8012 const int r_mul
= r_cnt
> 8 ? 8 : r_cnt
; // upto 8
8014 GC_ASSERT(heap_eden_total_slots(objspace
) >= objspace
->marked_slots
);
8016 /* Setup freeable slots. */
8017 size_t total_init_slots
= 0;
8018 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
8019 total_init_slots
+= gc_params
.size_pool_init_slots
[i
] * r_mul
;
8022 if (max_free_slots
< total_init_slots
) {
8023 max_free_slots
= total_init_slots
;
8026 if (sweep_slots
> max_free_slots
) {
8027 heap_pages_freeable_pages
= (sweep_slots
- max_free_slots
) / HEAP_PAGE_OBJ_LIMIT
;
8030 heap_pages_freeable_pages
= 0;
8033 /* check free_min */
8034 if (min_free_slots
< gc_params
.heap_free_slots
* r_mul
) {
8035 min_free_slots
= gc_params
.heap_free_slots
* r_mul
;
8038 if (sweep_slots
< min_free_slots
) {
8039 if (!full_marking
) {
8040 if (objspace
->profile
.count
- objspace
->rgengc
.last_major_gc
< RVALUE_OLD_AGE
) {
8041 full_marking
= TRUE
;
8042 /* do not update last_major_gc, because full marking is not done. */
8043 /* goto increment; */
8046 gc_report(1, objspace
, "gc_marks_finish: next is full GC!!)\n");
8047 gc_needs_major_flags
|= GPR_FLAG_MAJOR_BY_NOFREE
;
8053 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
8054 const double r
= gc_params
.oldobject_limit_factor
;
8055 objspace
->rgengc
.uncollectible_wb_unprotected_objects_limit
= MAX(
8056 (size_t)(objspace
->rgengc
.uncollectible_wb_unprotected_objects
* r
),
8057 (size_t)(objspace
->rgengc
.old_objects
* gc_params
.uncollectible_wb_unprotected_objects_limit_ratio
)
8059 objspace
->rgengc
.old_objects_limit
= (size_t)(objspace
->rgengc
.old_objects
* r
);
8062 if (objspace
->rgengc
.uncollectible_wb_unprotected_objects
> objspace
->rgengc
.uncollectible_wb_unprotected_objects_limit
) {
8063 gc_needs_major_flags
|= GPR_FLAG_MAJOR_BY_SHADY
;
8065 if (objspace
->rgengc
.old_objects
> objspace
->rgengc
.old_objects_limit
) {
8066 gc_needs_major_flags
|= GPR_FLAG_MAJOR_BY_OLDGEN
;
8068 if (RGENGC_FORCE_MAJOR_GC
) {
8069 gc_needs_major_flags
= GPR_FLAG_MAJOR_BY_FORCE
;
8072 gc_report(1, objspace
, "gc_marks_finish (marks %"PRIdSIZE
" objects, "
8073 "old %"PRIdSIZE
" objects, total %"PRIdSIZE
" slots, "
8074 "sweep %"PRIdSIZE
" slots, increment: %"PRIdSIZE
", next GC: %s)\n",
8075 objspace
->marked_slots
, objspace
->rgengc
.old_objects
, heap_eden_total_slots(objspace
), sweep_slots
, heap_allocatable_pages(objspace
),
8076 gc_needs_major_flags
? "major" : "minor");
8079 rb_ractor_finish_marking();
8081 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_END_MARK
, 0);
8085 gc_compact_heap_cursors_met_p(rb_heap_t
*heap
)
8087 return heap
->sweeping_page
== heap
->compact_cursor
;
8090 static rb_size_pool_t
*
8091 gc_compact_destination_pool(rb_objspace_t
*objspace
, rb_size_pool_t
*src_pool
, VALUE src
)
8096 switch (BUILTIN_TYPE(src
)) {
8098 obj_size
= rb_ary_size_as_embedded(src
);
8102 if (rb_shape_obj_too_complex(src
)) {
8103 return &size_pools
[0];
8106 obj_size
= rb_obj_embedded_size(ROBJECT_IV_CAPACITY(src
));
8111 obj_size
= rb_str_size_as_embedded(src
);
8115 obj_size
= sizeof(struct RHash
) + (RHASH_ST_TABLE_P(src
) ? sizeof(st_table
) : sizeof(ar_table
));
8122 if (rb_gc_size_allocatable_p(obj_size
)){
8123 idx
= rb_gc_size_pool_id_for_size(obj_size
);
8125 return &size_pools
[idx
];
8129 gc_compact_move(rb_objspace_t
*objspace
, rb_heap_t
*heap
, rb_size_pool_t
*size_pool
, VALUE src
)
8131 GC_ASSERT(BUILTIN_TYPE(src
) != T_MOVED
);
8132 GC_ASSERT(gc_is_moveable_obj(objspace
, src
));
8134 rb_size_pool_t
*dest_pool
= gc_compact_destination_pool(objspace
, size_pool
, src
);
8135 rb_heap_t
*dheap
= SIZE_POOL_EDEN_HEAP(dest_pool
);
8136 rb_shape_t
*new_shape
= NULL
;
8137 rb_shape_t
*orig_shape
= NULL
;
8139 if (gc_compact_heap_cursors_met_p(dheap
)) {
8140 return dheap
!= heap
;
8143 if (RB_TYPE_P(src
, T_OBJECT
)) {
8144 orig_shape
= rb_shape_get_shape(src
);
8145 if (dheap
!= heap
&& !rb_shape_obj_too_complex(src
)) {
8146 rb_shape_t
*initial_shape
= rb_shape_get_shape_by_id((shape_id_t
)((dest_pool
- size_pools
) + FIRST_T_OBJECT_SHAPE_ID
));
8147 new_shape
= rb_shape_traverse_from_new_root(initial_shape
, orig_shape
);
8150 dest_pool
= size_pool
;
8156 while (!try_move(objspace
, dheap
, dheap
->free_pages
, src
)) {
8157 struct gc_sweep_context ctx
= {
8158 .page
= dheap
->sweeping_page
,
8164 /* The page of src could be partially compacted, so it may contain
8165 * T_MOVED. Sweeping a page may read objects on this page, so we
8166 * need to lock the page. */
8167 lock_page_body(objspace
, GET_PAGE_BODY(src
));
8168 gc_sweep_page(objspace
, dheap
, &ctx
);
8169 unlock_page_body(objspace
, GET_PAGE_BODY(src
));
8171 if (dheap
->sweeping_page
->free_slots
> 0) {
8172 heap_add_freepage(dheap
, dheap
->sweeping_page
);
8175 dheap
->sweeping_page
= ccan_list_next(&dheap
->pages
, dheap
->sweeping_page
, page_node
);
8176 if (gc_compact_heap_cursors_met_p(dheap
)) {
8177 return dheap
!= heap
;
8183 VALUE dest
= rb_gc_location(src
);
8184 rb_shape_set_shape(dest
, new_shape
);
8186 RMOVED(src
)->original_shape_id
= rb_shape_id(orig_shape
);
8193 gc_compact_plane(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
, uintptr_t p
, bits_t bitset
, struct heap_page
*page
)
8195 short slot_size
= page
->slot_size
;
8196 short slot_bits
= slot_size
/ BASE_SLOT_SIZE
;
8197 GC_ASSERT(slot_bits
> 0);
8200 VALUE vp
= (VALUE
)p
;
8201 GC_ASSERT(vp
% BASE_SLOT_SIZE
== 0);
8204 objspace
->rcompactor
.considered_count_table
[BUILTIN_TYPE(vp
)]++;
8206 if (gc_is_moveable_obj(objspace
, vp
)) {
8207 if (!gc_compact_move(objspace
, heap
, size_pool
, vp
)) {
8208 //the cursors met. bubble up
8214 bitset
>>= slot_bits
;
8220 // Iterate up all the objects in page, moving them to where they want to go
8222 gc_compact_page(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
, struct heap_page
*page
)
8224 GC_ASSERT(page
== heap
->compact_cursor
);
8226 bits_t
*mark_bits
, *pin_bits
;
8228 uintptr_t p
= page
->start
;
8230 mark_bits
= page
->mark_bits
;
8231 pin_bits
= page
->pinned_bits
;
8233 // objects that can be moved are marked and not pinned
8234 bitset
= (mark_bits
[0] & ~pin_bits
[0]);
8235 bitset
>>= NUM_IN_PAGE(p
);
8237 if (!gc_compact_plane(objspace
, size_pool
, heap
, (uintptr_t)p
, bitset
, page
))
8240 p
+= (BITS_BITLENGTH
- NUM_IN_PAGE(p
)) * BASE_SLOT_SIZE
;
8242 for (int j
= 1; j
< HEAP_PAGE_BITMAP_LIMIT
; j
++) {
8243 bitset
= (mark_bits
[j
] & ~pin_bits
[j
]);
8245 if (!gc_compact_plane(objspace
, size_pool
, heap
, (uintptr_t)p
, bitset
, page
))
8248 p
+= BITS_BITLENGTH
* BASE_SLOT_SIZE
;
8255 gc_compact_all_compacted_p(rb_objspace_t
*objspace
)
8257 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
8258 rb_size_pool_t
*size_pool
= &size_pools
[i
];
8259 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
8261 if (heap
->total_pages
> 0 &&
8262 !gc_compact_heap_cursors_met_p(heap
)) {
8271 gc_sweep_compact(rb_objspace_t
*objspace
)
8273 gc_compact_start(objspace
);
8274 #if RGENGC_CHECK_MODE >= 2
8275 gc_verify_internal_consistency(objspace
);
8278 while (!gc_compact_all_compacted_p(objspace
)) {
8279 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
8280 rb_size_pool_t
*size_pool
= &size_pools
[i
];
8281 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
8283 if (gc_compact_heap_cursors_met_p(heap
)) {
8287 struct heap_page
*start_page
= heap
->compact_cursor
;
8289 if (!gc_compact_page(objspace
, size_pool
, heap
, start_page
)) {
8290 lock_page_body(objspace
, GET_PAGE_BODY(start_page
->start
));
8295 // If we get here, we've finished moving all objects on the compact_cursor page
8296 // So we can lock it and move the cursor on to the next one.
8297 lock_page_body(objspace
, GET_PAGE_BODY(start_page
->start
));
8298 heap
->compact_cursor
= ccan_list_prev(&heap
->pages
, heap
->compact_cursor
, page_node
);
8302 gc_compact_finish(objspace
);
8304 #if RGENGC_CHECK_MODE >= 2
8305 gc_verify_internal_consistency(objspace
);
8310 gc_marks_rest(rb_objspace_t
*objspace
)
8312 gc_report(1, objspace
, "gc_marks_rest\n");
8314 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
8315 SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->pooled_pages
= NULL
;
8318 if (is_incremental_marking(objspace
)) {
8319 while (gc_mark_stacked_objects_incremental(objspace
, INT_MAX
) == FALSE
);
8322 gc_mark_stacked_objects_all(objspace
);
8325 gc_marks_finish(objspace
);
8329 gc_marks_step(rb_objspace_t
*objspace
, size_t slots
)
8331 bool marking_finished
= false;
8333 GC_ASSERT(is_marking(objspace
));
8334 if (gc_mark_stacked_objects_incremental(objspace
, slots
)) {
8335 gc_marks_finish(objspace
);
8337 marking_finished
= true;
8340 return marking_finished
;
8344 gc_marks_continue(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
8346 GC_ASSERT(dont_gc_val() == FALSE
);
8347 bool marking_finished
= true;
8349 gc_marking_enter(objspace
);
8351 if (heap
->free_pages
) {
8352 gc_report(2, objspace
, "gc_marks_continue: has pooled pages");
8354 marking_finished
= gc_marks_step(objspace
, objspace
->rincgc
.step_slots
);
8357 gc_report(2, objspace
, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE
").\n",
8358 mark_stack_size(&objspace
->mark_stack
));
8359 size_pool
->force_incremental_marking_finish_count
++;
8360 gc_marks_rest(objspace
);
8363 gc_marking_exit(objspace
);
8365 return marking_finished
;
8369 gc_marks(rb_objspace_t
*objspace
, int full_mark
)
8371 gc_prof_mark_timer_start(objspace
);
8372 gc_marking_enter(objspace
);
8374 bool marking_finished
= false;
8378 gc_marks_start(objspace
, full_mark
);
8379 if (!is_incremental_marking(objspace
)) {
8380 gc_marks_rest(objspace
);
8381 marking_finished
= true;
8384 #if RGENGC_PROFILE > 0
8385 if (gc_prof_record(objspace
)) {
8386 gc_profile_record
*record
= gc_prof_record(objspace
);
8387 record
->old_objects
= objspace
->rgengc
.old_objects
;
8391 gc_marking_exit(objspace
);
8392 gc_prof_mark_timer_stop(objspace
);
8394 return marking_finished
;
8400 gc_report_body(int level
, rb_objspace_t
*objspace
, const char *fmt
, ...)
8402 if (level
<= RGENGC_DEBUG
) {
8406 const char *status
= " ";
8409 status
= is_full_marking(objspace
) ? "+" : "-";
8412 if (is_lazy_sweeping(objspace
)) {
8415 if (is_incremental_marking(objspace
)) {
8420 va_start(args
, fmt
);
8421 vsnprintf(buf
, 1024, fmt
, args
);
8424 fprintf(out
, "%s|", status
);
8429 /* bit operations */
8432 rgengc_remembersetbits_set(rb_objspace_t
*objspace
, VALUE obj
)
8434 struct heap_page
*page
= GET_HEAP_PAGE(obj
);
8435 bits_t
*bits
= &page
->remembered_bits
[0];
8437 if (MARKED_IN_BITMAP(bits
, obj
)) {
8441 page
->flags
.has_remembered_objects
= TRUE
;
8442 MARK_IN_BITMAP(bits
, obj
);
8449 /* return FALSE if already remembered */
8451 rgengc_remember(rb_objspace_t
*objspace
, VALUE obj
)
8453 gc_report(6, objspace
, "rgengc_remember: %s %s\n", obj_info(obj
),
8454 RVALUE_REMEMBERED(obj
) ? "was already remembered" : "is remembered now");
8456 check_rvalue_consistency(obj
);
8458 if (RGENGC_CHECK_MODE
) {
8459 if (RVALUE_WB_UNPROTECTED(obj
)) rb_bug("rgengc_remember: %s is not wb protected.", obj_info(obj
));
8462 #if RGENGC_PROFILE > 0
8463 if (!RVALUE_REMEMBERED(obj
)) {
8464 if (RVALUE_WB_UNPROTECTED(obj
) == 0) {
8465 objspace
->profile
.total_remembered_normal_object_count
++;
8466 #if RGENGC_PROFILE >= 2
8467 objspace
->profile
.remembered_normal_object_count_types
[BUILTIN_TYPE(obj
)]++;
8471 #endif /* RGENGC_PROFILE > 0 */
8473 return rgengc_remembersetbits_set(objspace
, obj
);
8476 #ifndef PROFILE_REMEMBERSET_MARK
8477 #define PROFILE_REMEMBERSET_MARK 0
8481 rgengc_rememberset_mark_plane(rb_objspace_t
*objspace
, uintptr_t p
, bits_t bitset
)
8486 VALUE obj
= (VALUE
)p
;
8487 gc_report(2, objspace
, "rgengc_rememberset_mark: mark %s\n", obj_info(obj
));
8488 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj
));
8489 GC_ASSERT(RVALUE_OLD_P(obj
) || RVALUE_WB_UNPROTECTED(obj
));
8491 gc_mark_children(objspace
, obj
);
8493 p
+= BASE_SLOT_SIZE
;
8500 rgengc_rememberset_mark(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
8503 struct heap_page
*page
= 0;
8504 #if PROFILE_REMEMBERSET_MARK
8505 int has_old
= 0, has_shady
= 0, has_both
= 0, skip
= 0;
8507 gc_report(1, objspace
, "rgengc_rememberset_mark: start\n");
8509 ccan_list_for_each(&heap
->pages
, page
, page_node
) {
8510 if (page
->flags
.has_remembered_objects
| page
->flags
.has_uncollectible_wb_unprotected_objects
) {
8511 uintptr_t p
= page
->start
;
8512 bits_t bitset
, bits
[HEAP_PAGE_BITMAP_LIMIT
];
8513 bits_t
*remembered_bits
= page
->remembered_bits
;
8514 bits_t
*uncollectible_bits
= page
->uncollectible_bits
;
8515 bits_t
*wb_unprotected_bits
= page
->wb_unprotected_bits
;
8516 #if PROFILE_REMEMBERSET_MARK
8517 if (page
->flags
.has_remembered_objects
&& page
->flags
.has_uncollectible_wb_unprotected_objects
) has_both
++;
8518 else if (page
->flags
.has_remembered_objects
) has_old
++;
8519 else if (page
->flags
.has_uncollectible_wb_unprotected_objects
) has_shady
++;
8521 for (j
=0; j
<HEAP_PAGE_BITMAP_LIMIT
; j
++) {
8522 bits
[j
] = remembered_bits
[j
] | (uncollectible_bits
[j
] & wb_unprotected_bits
[j
]);
8523 remembered_bits
[j
] = 0;
8525 page
->flags
.has_remembered_objects
= FALSE
;
8528 bitset
>>= NUM_IN_PAGE(p
);
8529 rgengc_rememberset_mark_plane(objspace
, p
, bitset
);
8530 p
+= (BITS_BITLENGTH
- NUM_IN_PAGE(p
)) * BASE_SLOT_SIZE
;
8532 for (j
=1; j
< HEAP_PAGE_BITMAP_LIMIT
; j
++) {
8534 rgengc_rememberset_mark_plane(objspace
, p
, bitset
);
8535 p
+= BITS_BITLENGTH
* BASE_SLOT_SIZE
;
8538 #if PROFILE_REMEMBERSET_MARK
8545 #if PROFILE_REMEMBERSET_MARK
8546 fprintf(stderr
, "%d\t%d\t%d\t%d\n", has_both
, has_old
, has_shady
, skip
);
8548 gc_report(1, objspace
, "rgengc_rememberset_mark: finished\n");
8552 rgengc_mark_and_rememberset_clear(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
8554 struct heap_page
*page
= 0;
8556 ccan_list_for_each(&heap
->pages
, page
, page_node
) {
8557 memset(&page
->mark_bits
[0], 0, HEAP_PAGE_BITMAP_SIZE
);
8558 memset(&page
->uncollectible_bits
[0], 0, HEAP_PAGE_BITMAP_SIZE
);
8559 memset(&page
->marking_bits
[0], 0, HEAP_PAGE_BITMAP_SIZE
);
8560 memset(&page
->remembered_bits
[0], 0, HEAP_PAGE_BITMAP_SIZE
);
8561 memset(&page
->pinned_bits
[0], 0, HEAP_PAGE_BITMAP_SIZE
);
8562 page
->flags
.has_uncollectible_wb_unprotected_objects
= FALSE
;
8563 page
->flags
.has_remembered_objects
= FALSE
;
8569 NOINLINE(static void gc_writebarrier_generational(VALUE a
, VALUE b
, rb_objspace_t
*objspace
));
8572 gc_writebarrier_generational(VALUE a
, VALUE b
, rb_objspace_t
*objspace
)
8574 if (RGENGC_CHECK_MODE
) {
8575 if (!RVALUE_OLD_P(a
)) rb_bug("gc_writebarrier_generational: %s is not an old object.", obj_info(a
));
8576 if ( RVALUE_OLD_P(b
)) rb_bug("gc_writebarrier_generational: %s is an old object.", obj_info(b
));
8577 if (is_incremental_marking(objspace
)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a
), obj_info(b
));
8580 /* mark `a' and remember (default behavior) */
8581 if (!RVALUE_REMEMBERED(a
)) {
8582 RB_VM_LOCK_ENTER_NO_BARRIER();
8584 rgengc_remember(objspace
, a
);
8586 RB_VM_LOCK_LEAVE_NO_BARRIER();
8587 gc_report(1, objspace
, "gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a
), obj_info(b
));
8590 check_rvalue_consistency(a
);
8591 check_rvalue_consistency(b
);
8595 gc_mark_from(rb_objspace_t
*objspace
, VALUE obj
, VALUE parent
)
8597 gc_mark_set_parent(objspace
, parent
);
8598 rgengc_check_relation(objspace
, obj
);
8599 if (gc_mark_set(objspace
, obj
) == FALSE
) return;
8600 gc_aging(objspace
, obj
);
8601 gc_grey(objspace
, obj
);
8604 NOINLINE(static void gc_writebarrier_incremental(VALUE a
, VALUE b
, rb_objspace_t
*objspace
));
8607 gc_writebarrier_incremental(VALUE a
, VALUE b
, rb_objspace_t
*objspace
)
8609 gc_report(2, objspace
, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a
, obj_info(b
));
8611 if (RVALUE_BLACK_P(a
)) {
8612 if (RVALUE_WHITE_P(b
)) {
8613 if (!RVALUE_WB_UNPROTECTED(a
)) {
8614 gc_report(2, objspace
, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a
, obj_info(b
));
8615 gc_mark_from(objspace
, b
, a
);
8618 else if (RVALUE_OLD_P(a
) && !RVALUE_OLD_P(b
)) {
8619 rgengc_remember(objspace
, a
);
8622 if (UNLIKELY(objspace
->flags
.during_compacting
)) {
8623 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b
), b
);
8629 rb_gc_writebarrier(VALUE a
, VALUE b
)
8631 rb_objspace_t
*objspace
= &rb_objspace
;
8633 if (RGENGC_CHECK_MODE
) {
8634 if (SPECIAL_CONST_P(a
)) rb_bug("rb_gc_writebarrier: a is special const: %"PRIxVALUE
, a
);
8635 if (SPECIAL_CONST_P(b
)) rb_bug("rb_gc_writebarrier: b is special const: %"PRIxVALUE
, b
);
8639 if (!is_incremental_marking(objspace
)) {
8640 if (!RVALUE_OLD_P(a
) || RVALUE_OLD_P(b
)) {
8644 gc_writebarrier_generational(a
, b
, objspace
);
8650 RB_VM_LOCK_ENTER_NO_BARRIER();
8652 if (is_incremental_marking(objspace
)) {
8653 gc_writebarrier_incremental(a
, b
, objspace
);
8659 RB_VM_LOCK_LEAVE_NO_BARRIER();
8661 if (retry
) goto retry
;
8667 rb_gc_writebarrier_unprotect(VALUE obj
)
8669 if (RVALUE_WB_UNPROTECTED(obj
)) {
8673 rb_objspace_t
*objspace
= &rb_objspace
;
8675 gc_report(2, objspace
, "rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj
),
8676 RVALUE_REMEMBERED(obj
) ? " (already remembered)" : "");
8678 RB_VM_LOCK_ENTER_NO_BARRIER();
8680 if (RVALUE_OLD_P(obj
)) {
8681 gc_report(1, objspace
, "rb_gc_writebarrier_unprotect: %s\n", obj_info(obj
));
8682 RVALUE_DEMOTE(objspace
, obj
);
8683 gc_mark_set(objspace
, obj
);
8684 gc_remember_unprotected(objspace
, obj
);
8687 objspace
->profile
.total_shade_operation_count
++;
8688 #if RGENGC_PROFILE >= 2
8689 objspace
->profile
.shade_operation_count_types
[BUILTIN_TYPE(obj
)]++;
8690 #endif /* RGENGC_PROFILE >= 2 */
8691 #endif /* RGENGC_PROFILE */
8694 RVALUE_AGE_RESET(obj
);
8697 RB_DEBUG_COUNTER_INC(obj_wb_unprotect
);
8698 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj
), obj
);
8700 RB_VM_LOCK_LEAVE_NO_BARRIER();
8705 * remember `obj' if needed.
8708 rb_gc_writebarrier_remember(VALUE obj
)
8710 rb_objspace_t
*objspace
= &rb_objspace
;
8712 gc_report(1, objspace
, "rb_gc_writebarrier_remember: %s\n", obj_info(obj
));
8714 if (is_incremental_marking(objspace
)) {
8715 if (RVALUE_BLACK_P(obj
)) {
8716 gc_grey(objspace
, obj
);
8720 if (RVALUE_OLD_P(obj
)) {
8721 rgengc_remember(objspace
, obj
);
8727 rb_gc_copy_attributes(VALUE dest
, VALUE obj
)
8729 if (RVALUE_WB_UNPROTECTED(obj
)) {
8730 rb_gc_writebarrier_unprotect(dest
);
8732 rb_gc_copy_finalizer(dest
, obj
);
8736 rb_obj_gc_flags(VALUE obj
, ID
* flags
, size_t max
)
8739 static ID ID_marked
;
8740 static ID ID_wb_protected
, ID_old
, ID_marking
, ID_uncollectible
, ID_pinned
;
8743 #define I(s) ID_##s = rb_intern(#s);
8753 if (RVALUE_WB_UNPROTECTED(obj
) == 0 && n
<max
) flags
[n
++] = ID_wb_protected
;
8754 if (RVALUE_OLD_P(obj
) && n
<max
) flags
[n
++] = ID_old
;
8755 if (RVALUE_UNCOLLECTIBLE(obj
) && n
<max
) flags
[n
++] = ID_uncollectible
;
8756 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj
), obj
) && n
<max
) flags
[n
++] = ID_marking
;
8757 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj
), obj
) && n
<max
) flags
[n
++] = ID_marked
;
8758 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj
), obj
) && n
<max
) flags
[n
++] = ID_pinned
;
8765 rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t
*newobj_cache
)
8767 newobj_cache
->incremental_mark_step_allocated_slots
= 0;
8769 for (size_t size_pool_idx
= 0; size_pool_idx
< SIZE_POOL_COUNT
; size_pool_idx
++) {
8770 rb_ractor_newobj_size_pool_cache_t
*cache
= &newobj_cache
->size_pool_caches
[size_pool_idx
];
8772 struct heap_page
*page
= cache
->using_page
;
8773 RVALUE
*freelist
= cache
->freelist
;
8774 RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", (void *)page
, (void *)freelist
);
8776 heap_page_freelist_append(page
, freelist
);
8778 cache
->using_page
= NULL
;
8779 cache
->freelist
= NULL
;
8784 rb_gc_register_mark_object(VALUE obj
)
8786 if (!is_pointer_to_heap(&rb_objspace
, (void *)obj
))
8789 rb_vm_register_global_object(obj
);
8793 rb_gc_register_address(VALUE
*addr
)
8795 rb_vm_t
*vm
= GET_VM();
8799 struct global_object_list
*tmp
= ALLOC(struct global_object_list
);
8800 tmp
->next
= vm
->global_object_list
;
8802 vm
->global_object_list
= tmp
;
8805 * Because some C extensions have assignment-then-register bugs,
8806 * we guard `obj` here so that it would not get swept defensively.
8809 if (0 && !SPECIAL_CONST_P(obj
)) {
8810 rb_warn("Object is assigned to registering address already: %"PRIsVALUE
,
8812 rb_print_backtrace(stderr
);
8817 rb_gc_unregister_address(VALUE
*addr
)
8819 rb_vm_t
*vm
= GET_VM();
8820 struct global_object_list
*tmp
= vm
->global_object_list
;
8822 if (tmp
->varptr
== addr
) {
8823 vm
->global_object_list
= tmp
->next
;
8828 if (tmp
->next
->varptr
== addr
) {
8829 struct global_object_list
*t
= tmp
->next
;
8831 tmp
->next
= tmp
->next
->next
;
8840 rb_global_variable(VALUE
*var
)
8842 rb_gc_register_address(var
);
8847 gc_stress_no_immediate_sweep
,
8848 gc_stress_full_mark_after_malloc
,
8852 #define gc_stress_full_mark_after_malloc_p() \
8853 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
8856 heap_ready_to_gc(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
8858 if (!heap
->free_pages
) {
8859 if (!heap_increment(objspace
, size_pool
, heap
)) {
8860 size_pool_allocatable_pages_set(objspace
, size_pool
, 1);
8861 heap_increment(objspace
, size_pool
, heap
);
8867 ready_to_gc(rb_objspace_t
*objspace
)
8869 if (dont_gc_val() || during_gc
|| ruby_disable_gc
) {
8870 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
8871 rb_size_pool_t
*size_pool
= &size_pools
[i
];
8872 heap_ready_to_gc(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
8882 gc_reset_malloc_info(rb_objspace_t
*objspace
, bool full_mark
)
8884 gc_prof_set_malloc_info(objspace
);
8886 size_t inc
= ATOMIC_SIZE_EXCHANGE(malloc_increase
, 0);
8887 size_t old_limit
= malloc_limit
;
8889 if (inc
> malloc_limit
) {
8890 malloc_limit
= (size_t)(inc
* gc_params
.malloc_limit_growth_factor
);
8891 if (malloc_limit
> gc_params
.malloc_limit_max
) {
8892 malloc_limit
= gc_params
.malloc_limit_max
;
8896 malloc_limit
= (size_t)(malloc_limit
* 0.98); /* magic number */
8897 if (malloc_limit
< gc_params
.malloc_limit_min
) {
8898 malloc_limit
= gc_params
.malloc_limit_min
;
8903 if (old_limit
!= malloc_limit
) {
8904 fprintf(stderr
, "[%"PRIuSIZE
"] malloc_limit: %"PRIuSIZE
" -> %"PRIuSIZE
"\n",
8905 rb_gc_count(), old_limit
, malloc_limit
);
8908 fprintf(stderr
, "[%"PRIuSIZE
"] malloc_limit: not changed (%"PRIuSIZE
")\n",
8909 rb_gc_count(), malloc_limit
);
8914 /* reset oldmalloc info */
8915 #if RGENGC_ESTIMATE_OLDMALLOC
8917 if (objspace
->rgengc
.oldmalloc_increase
> objspace
->rgengc
.oldmalloc_increase_limit
) {
8918 gc_needs_major_flags
|= GPR_FLAG_MAJOR_BY_OLDMALLOC
;
8919 objspace
->rgengc
.oldmalloc_increase_limit
=
8920 (size_t)(objspace
->rgengc
.oldmalloc_increase_limit
* gc_params
.oldmalloc_limit_growth_factor
);
8922 if (objspace
->rgengc
.oldmalloc_increase_limit
> gc_params
.oldmalloc_limit_max
) {
8923 objspace
->rgengc
.oldmalloc_increase_limit
= gc_params
.oldmalloc_limit_max
;
8927 if (0) fprintf(stderr
, "%"PRIdSIZE
"\t%d\t%"PRIuSIZE
"\t%"PRIuSIZE
"\t%"PRIdSIZE
"\n",
8929 gc_needs_major_flags
,
8930 objspace
->rgengc
.oldmalloc_increase
,
8931 objspace
->rgengc
.oldmalloc_increase_limit
,
8932 gc_params
.oldmalloc_limit_max
);
8936 objspace
->rgengc
.oldmalloc_increase
= 0;
8938 if ((objspace
->profile
.latest_gc_info
& GPR_FLAG_MAJOR_BY_OLDMALLOC
) == 0) {
8939 objspace
->rgengc
.oldmalloc_increase_limit
=
8940 (size_t)(objspace
->rgengc
.oldmalloc_increase_limit
/ ((gc_params
.oldmalloc_limit_growth_factor
- 1)/10 + 1));
8941 if (objspace
->rgengc
.oldmalloc_increase_limit
< gc_params
.oldmalloc_limit_min
) {
8942 objspace
->rgengc
.oldmalloc_increase_limit
= gc_params
.oldmalloc_limit_min
;
8950 garbage_collect(rb_objspace_t
*objspace
, unsigned int reason
)
8956 #if GC_PROFILE_MORE_DETAIL
8957 objspace
->profile
.prepare_time
= getrusage_time();
8962 #if GC_PROFILE_MORE_DETAIL
8963 objspace
->profile
.prepare_time
= getrusage_time() - objspace
->profile
.prepare_time
;
8966 ret
= gc_start(objspace
, reason
);
8974 gc_start(rb_objspace_t
*objspace
, unsigned int reason
)
8976 unsigned int do_full_mark
= !!(reason
& GPR_FLAG_FULL_MARK
);
8978 /* reason may be clobbered, later, so keep set immediate_sweep here */
8979 objspace
->flags
.immediate_sweep
= !!(reason
& GPR_FLAG_IMMEDIATE_SWEEP
);
8981 if (!heap_allocated_pages
) return TRUE
; /* heap is not ready */
8982 if (!(reason
& GPR_FLAG_METHOD
) && !ready_to_gc(objspace
)) return TRUE
; /* GC is not allowed */
8984 GC_ASSERT(gc_mode(objspace
) == gc_mode_none
);
8985 GC_ASSERT(!is_lazy_sweeping(objspace
));
8986 GC_ASSERT(!is_incremental_marking(objspace
));
8988 unsigned int lock_lev
;
8989 gc_enter(objspace
, gc_enter_event_start
, &lock_lev
);
8991 #if RGENGC_CHECK_MODE >= 2
8992 gc_verify_internal_consistency(objspace
);
8995 if (ruby_gc_stressful
) {
8996 int flag
= FIXNUM_P(ruby_gc_stress_mode
) ? FIX2INT(ruby_gc_stress_mode
) : 0;
8998 if ((flag
& (1<<gc_stress_no_major
)) == 0) {
8999 do_full_mark
= TRUE
;
9002 objspace
->flags
.immediate_sweep
= !(flag
& (1<<gc_stress_no_immediate_sweep
));
9005 if (gc_needs_major_flags
) {
9006 reason
|= gc_needs_major_flags
;
9007 do_full_mark
= TRUE
;
9009 else if (RGENGC_FORCE_MAJOR_GC
) {
9010 reason
= GPR_FLAG_MAJOR_BY_FORCE
;
9011 do_full_mark
= TRUE
;
9014 gc_needs_major_flags
= GPR_FLAG_NONE
;
9016 if (do_full_mark
&& (reason
& GPR_FLAG_MAJOR_MASK
) == 0) {
9017 reason
|= GPR_FLAG_MAJOR_BY_FORCE
; /* GC by CAPI, METHOD, and so on. */
9020 if (objspace
->flags
.dont_incremental
||
9021 reason
& GPR_FLAG_IMMEDIATE_MARK
||
9022 ruby_gc_stressful
) {
9023 objspace
->flags
.during_incremental_marking
= FALSE
;
9026 objspace
->flags
.during_incremental_marking
= do_full_mark
;
9029 /* Explicitly enable compaction (GC.compact) */
9030 if (do_full_mark
&& ruby_enable_autocompact
) {
9031 objspace
->flags
.during_compacting
= TRUE
;
9032 #if RGENGC_CHECK_MODE
9033 objspace
->rcompactor
.compare_func
= ruby_autocompact_compare_func
;
9037 objspace
->flags
.during_compacting
= !!(reason
& GPR_FLAG_COMPACT
);
9040 if (!GC_ENABLE_LAZY_SWEEP
|| objspace
->flags
.dont_incremental
) {
9041 objspace
->flags
.immediate_sweep
= TRUE
;
9044 if (objspace
->flags
.immediate_sweep
) reason
|= GPR_FLAG_IMMEDIATE_SWEEP
;
9046 gc_report(1, objspace
, "gc_start(reason: %x) => %u, %d, %d\n",
9048 do_full_mark
, !is_incremental_marking(objspace
), objspace
->flags
.immediate_sweep
);
9050 #if USE_DEBUG_COUNTER
9051 RB_DEBUG_COUNTER_INC(gc_count
);
9053 if (reason
& GPR_FLAG_MAJOR_MASK
) {
9054 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree
, reason
& GPR_FLAG_MAJOR_BY_NOFREE
);
9055 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen
, reason
& GPR_FLAG_MAJOR_BY_OLDGEN
);
9056 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady
, reason
& GPR_FLAG_MAJOR_BY_SHADY
);
9057 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force
, reason
& GPR_FLAG_MAJOR_BY_FORCE
);
9058 #if RGENGC_ESTIMATE_OLDMALLOC
9059 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc
, reason
& GPR_FLAG_MAJOR_BY_OLDMALLOC
);
9063 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj
, reason
& GPR_FLAG_NEWOBJ
);
9064 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc
, reason
& GPR_FLAG_MALLOC
);
9065 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method
, reason
& GPR_FLAG_METHOD
);
9066 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi
, reason
& GPR_FLAG_CAPI
);
9067 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress
, reason
& GPR_FLAG_STRESS
);
9071 objspace
->profile
.count
++;
9072 objspace
->profile
.latest_gc_info
= reason
;
9073 objspace
->profile
.total_allocated_objects_at_gc_start
= total_allocated_objects(objspace
);
9074 objspace
->profile
.heap_used_at_gc_start
= heap_allocated_pages
;
9075 objspace
->profile
.weak_references_count
= 0;
9076 objspace
->profile
.retained_weak_references_count
= 0;
9077 gc_prof_setup_new_record(objspace
, reason
);
9078 gc_reset_malloc_info(objspace
, do_full_mark
);
9080 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_START
, 0 /* TODO: pass minor/immediate flag? */);
9081 GC_ASSERT(during_gc
);
9083 gc_prof_timer_start(objspace
);
9085 if (gc_marks(objspace
, do_full_mark
)) {
9089 gc_prof_timer_stop(objspace
);
9091 gc_exit(objspace
, gc_enter_event_start
, &lock_lev
);
9096 gc_rest(rb_objspace_t
*objspace
)
9098 if (is_incremental_marking(objspace
) || is_lazy_sweeping(objspace
)) {
9099 unsigned int lock_lev
;
9100 gc_enter(objspace
, gc_enter_event_rest
, &lock_lev
);
9102 if (RGENGC_CHECK_MODE
>= 2) gc_verify_internal_consistency(objspace
);
9104 if (is_incremental_marking(objspace
)) {
9105 gc_marking_enter(objspace
);
9106 gc_marks_rest(objspace
);
9107 gc_marking_exit(objspace
);
9112 if (is_lazy_sweeping(objspace
)) {
9113 gc_sweeping_enter(objspace
);
9114 gc_sweep_rest(objspace
);
9115 gc_sweeping_exit(objspace
);
9118 gc_exit(objspace
, gc_enter_event_rest
, &lock_lev
);
9122 struct objspace_and_reason
{
9123 rb_objspace_t
*objspace
;
9124 unsigned int reason
;
9128 gc_current_status_fill(rb_objspace_t
*objspace
, char *buff
)
9131 if (is_marking(objspace
)) {
9133 if (is_full_marking(objspace
)) buff
[i
++] = 'F';
9134 if (is_incremental_marking(objspace
)) buff
[i
++] = 'I';
9136 else if (is_sweeping(objspace
)) {
9138 if (is_lazy_sweeping(objspace
)) buff
[i
++] = 'L';
9147 gc_current_status(rb_objspace_t
*objspace
)
9149 static char buff
[0x10];
9150 gc_current_status_fill(objspace
, buff
);
9154 #if PRINT_ENTER_EXIT_TICK
9156 static tick_t last_exit_tick
;
9157 static tick_t enter_tick
;
9158 static int enter_count
= 0;
9159 static char last_gc_status
[0x10];
9162 gc_record(rb_objspace_t
*objspace
, int direction
, const char *event
)
9164 if (direction
== 0) { /* enter */
9166 enter_tick
= tick();
9167 gc_current_status_fill(objspace
, last_gc_status
);
9170 tick_t exit_tick
= tick();
9171 char current_gc_status
[0x10];
9172 gc_current_status_fill(objspace
, current_gc_status
);
9174 /* [last mutator time] [gc time] [event] */
9175 fprintf(stderr
, "%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9176 enter_tick
- last_exit_tick
,
9177 exit_tick
- enter_tick
,
9179 last_gc_status
, current_gc_status
,
9180 (objspace
->profile
.latest_gc_info
& GPR_FLAG_MAJOR_MASK
) ? '+' : '-');
9181 last_exit_tick
= exit_tick
;
9183 /* [enter_tick] [gc time] [event] */
9184 fprintf(stderr
, "%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9186 exit_tick
- enter_tick
,
9188 last_gc_status
, current_gc_status
,
9189 (objspace
->profile
.latest_gc_info
& GPR_FLAG_MAJOR_MASK
) ? '+' : '-');
9193 #else /* PRINT_ENTER_EXIT_TICK */
9195 gc_record(rb_objspace_t
*objspace
, int direction
, const char *event
)
9199 #endif /* PRINT_ENTER_EXIT_TICK */
9202 gc_enter_event_cstr(enum gc_enter_event event
)
9205 case gc_enter_event_start
: return "start";
9206 case gc_enter_event_continue
: return "continue";
9207 case gc_enter_event_rest
: return "rest";
9208 case gc_enter_event_finalizer
: return "finalizer";
9209 case gc_enter_event_rb_memerror
: return "rb_memerror";
9215 gc_enter_count(enum gc_enter_event event
)
9218 case gc_enter_event_start
: RB_DEBUG_COUNTER_INC(gc_enter_start
); break;
9219 case gc_enter_event_continue
: RB_DEBUG_COUNTER_INC(gc_enter_continue
); break;
9220 case gc_enter_event_rest
: RB_DEBUG_COUNTER_INC(gc_enter_rest
); break;
9221 case gc_enter_event_finalizer
: RB_DEBUG_COUNTER_INC(gc_enter_finalizer
); break;
9222 case gc_enter_event_rb_memerror
: /* nothing */ break;
9226 static bool current_process_time(struct timespec
*ts
);
9229 gc_clock_start(struct timespec
*ts
)
9231 if (!current_process_time(ts
)) {
9238 gc_clock_end(struct timespec
*ts
)
9240 struct timespec end_time
;
9242 if ((ts
->tv_sec
> 0 || ts
->tv_nsec
> 0) &&
9243 current_process_time(&end_time
) &&
9244 end_time
.tv_sec
>= ts
->tv_sec
) {
9245 return (uint64_t)(end_time
.tv_sec
- ts
->tv_sec
) * (1000 * 1000 * 1000) +
9246 (end_time
.tv_nsec
- ts
->tv_nsec
);
9253 gc_enter(rb_objspace_t
*objspace
, enum gc_enter_event event
, unsigned int *lock_lev
)
9255 RB_VM_LOCK_ENTER_LEV(lock_lev
);
9258 case gc_enter_event_rest
:
9259 if (!is_marking(objspace
)) break;
9261 case gc_enter_event_start
:
9262 case gc_enter_event_continue
:
9263 // stop other ractors
9270 gc_enter_count(event
);
9271 if (UNLIKELY(during_gc
!= 0)) rb_bug("during_gc != 0");
9272 if (RGENGC_CHECK_MODE
>= 3 && (dont_gc_val() == 0)) gc_verify_internal_consistency(objspace
);
9275 RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event
), gc_current_status(objspace
));
9276 gc_report(1, objspace
, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event
), gc_current_status(objspace
));
9277 gc_record(objspace
, 0, gc_enter_event_cstr(event
));
9278 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_ENTER
, 0); /* TODO: which parameter should be passed? */
9282 gc_exit(rb_objspace_t
*objspace
, enum gc_enter_event event
, unsigned int *lock_lev
)
9284 GC_ASSERT(during_gc
!= 0);
9286 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_EXIT
, 0); /* TODO: which parameter should be passed? */
9287 gc_record(objspace
, 1, gc_enter_event_cstr(event
));
9288 RUBY_DEBUG_LOG("%s (%s)", gc_enter_event_cstr(event
), gc_current_status(objspace
));
9289 gc_report(1, objspace
, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event
), gc_current_status(objspace
));
9292 RB_VM_LOCK_LEAVE_LEV(lock_lev
);
9296 #define MEASURE_GC (objspace->flags.measure_gc)
9300 gc_marking_enter(rb_objspace_t
*objspace
)
9302 GC_ASSERT(during_gc
!= 0);
9305 gc_clock_start(&objspace
->profile
.marking_start_time
);
9310 gc_marking_exit(rb_objspace_t
*objspace
)
9312 GC_ASSERT(during_gc
!= 0);
9315 objspace
->profile
.marking_time_ns
+= gc_clock_end(&objspace
->profile
.marking_start_time
);
9320 gc_sweeping_enter(rb_objspace_t
*objspace
)
9322 GC_ASSERT(during_gc
!= 0);
9325 gc_clock_start(&objspace
->profile
.sweeping_start_time
);
9330 gc_sweeping_exit(rb_objspace_t
*objspace
)
9332 GC_ASSERT(during_gc
!= 0);
9335 objspace
->profile
.sweeping_time_ns
+= gc_clock_end(&objspace
->profile
.sweeping_start_time
);
9340 gc_with_gvl(void *ptr
)
9342 struct objspace_and_reason
*oar
= (struct objspace_and_reason
*)ptr
;
9343 return (void *)(VALUE
)garbage_collect(oar
->objspace
, oar
->reason
);
9347 garbage_collect_with_gvl(rb_objspace_t
*objspace
, unsigned int reason
)
9349 if (dont_gc_val()) return TRUE
;
9350 if (ruby_thread_has_gvl_p()) {
9351 return garbage_collect(objspace
, reason
);
9354 if (ruby_native_thread_p()) {
9355 struct objspace_and_reason oar
;
9356 oar
.objspace
= objspace
;
9357 oar
.reason
= reason
;
9358 return (int)(VALUE
)rb_thread_call_with_gvl(gc_with_gvl
, (void *)&oar
);
9361 /* no ruby thread */
9362 fprintf(stderr
, "[FATAL] failed to allocate memory\n");
9369 gc_set_candidate_object_i(void *vstart
, void *vend
, size_t stride
, void *data
)
9371 rb_objspace_t
*objspace
= &rb_objspace
;
9372 VALUE v
= (VALUE
)vstart
;
9373 for (; v
!= (VALUE
)vend
; v
+= stride
) {
9374 asan_unpoisoning_object(v
) {
9375 switch (BUILTIN_TYPE(v
)) {
9380 // precompute the string coderange. This both save time for when it will be
9381 // eventually needed, and avoid mutating heap pages after a potential fork.
9382 rb_enc_str_coderange(v
);
9385 if (!RVALUE_OLD_P(v
) && !RVALUE_WB_UNPROTECTED(v
)) {
9386 RVALUE_AGE_SET_CANDIDATE(objspace
, v
);
9396 gc_start_internal(rb_execution_context_t
*ec
, VALUE self
, VALUE full_mark
, VALUE immediate_mark
, VALUE immediate_sweep
, VALUE compact
)
9398 rb_objspace_t
*objspace
= &rb_objspace
;
9399 unsigned int reason
= (GPR_FLAG_FULL_MARK
|
9400 GPR_FLAG_IMMEDIATE_MARK
|
9401 GPR_FLAG_IMMEDIATE_SWEEP
|
9404 /* For now, compact implies full mark / sweep, so ignore other flags */
9405 if (RTEST(compact
)) {
9406 GC_ASSERT(GC_COMPACTION_SUPPORTED
);
9408 reason
|= GPR_FLAG_COMPACT
;
9411 if (!RTEST(full_mark
)) reason
&= ~GPR_FLAG_FULL_MARK
;
9412 if (!RTEST(immediate_mark
)) reason
&= ~GPR_FLAG_IMMEDIATE_MARK
;
9413 if (!RTEST(immediate_sweep
)) reason
&= ~GPR_FLAG_IMMEDIATE_SWEEP
;
9416 garbage_collect(objspace
, reason
);
9417 gc_finalize_deferred(objspace
);
9423 free_empty_pages(void)
9425 rb_objspace_t
*objspace
= &rb_objspace
;
9427 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
9428 /* Move all empty pages to the tomb heap for freeing. */
9429 rb_size_pool_t
*size_pool
= &size_pools
[i
];
9430 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
9431 rb_heap_t
*tomb_heap
= SIZE_POOL_TOMB_HEAP(size_pool
);
9433 size_t freed_pages
= 0;
9435 struct heap_page
**next_page_ptr
= &heap
->free_pages
;
9436 struct heap_page
*page
= heap
->free_pages
;
9438 /* All finalizers should have been ran in gc_start_internal, so there
9439 * should be no objects that require finalization. */
9440 GC_ASSERT(page
->final_slots
== 0);
9442 struct heap_page
*next_page
= page
->free_next
;
9444 if (page
->free_slots
== page
->total_slots
) {
9445 heap_unlink_page(objspace
, heap
, page
);
9446 heap_add_page(objspace
, size_pool
, tomb_heap
, page
);
9450 *next_page_ptr
= page
;
9451 next_page_ptr
= &page
->free_next
;
9457 *next_page_ptr
= NULL
;
9459 size_pool_allocatable_pages_set(objspace
, size_pool
, size_pool
->allocatable_pages
+ freed_pages
);
9462 heap_pages_free_unused_pages(objspace
);
9466 rb_gc_prepare_heap(void)
9468 rb_objspace_each_objects(gc_set_candidate_object_i
, NULL
);
9469 gc_start_internal(NULL
, Qtrue
, Qtrue
, Qtrue
, Qtrue
, Qtrue
);
9472 #if defined(HAVE_MALLOC_TRIM) && !defined(RUBY_ALTERNATIVE_MALLOC_HEADER)
9478 gc_is_moveable_obj(rb_objspace_t
*objspace
, VALUE obj
)
9480 GC_ASSERT(!SPECIAL_CONST_P(obj
));
9482 switch (BUILTIN_TYPE(obj
)) {
9488 if (RSYMBOL(obj
)->id
& ~ID_SCOPE_MASK
) {
9510 if (FL_TEST(obj
, FL_FINALIZE
)) {
9511 /* The finalizer table is a numtable. It looks up objects by address.
9512 * We can't mark the keys in the finalizer table because that would
9513 * prevent the objects from being collected. This check prevents
9514 * objects that are keys in the finalizer table from being moved
9515 * without directly pinning them. */
9516 GC_ASSERT(st_is_member(finalizer_table
, obj
));
9520 GC_ASSERT(RVALUE_MARKED(obj
));
9521 GC_ASSERT(!RVALUE_PINNED(obj
));
9526 rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj
));
9534 gc_move(rb_objspace_t
*objspace
, VALUE scan
, VALUE free
, size_t src_slot_size
, size_t slot_size
)
9540 RVALUE
*dest
= (RVALUE
*)free
;
9541 RVALUE
*src
= (RVALUE
*)scan
;
9543 gc_report(4, objspace
, "Moving object: %p -> %p\n", (void*)scan
, (void *)free
);
9545 GC_ASSERT(BUILTIN_TYPE(scan
) != T_NONE
);
9546 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free
), free
));
9548 GC_ASSERT(!RVALUE_MARKING((VALUE
)src
));
9550 /* Save off bits for current object. */
9551 marked
= RVALUE_MARKED((VALUE
)src
);
9552 wb_unprotected
= RVALUE_WB_UNPROTECTED((VALUE
)src
);
9553 uncollectible
= RVALUE_UNCOLLECTIBLE((VALUE
)src
);
9554 bool remembered
= RVALUE_REMEMBERED((VALUE
)src
);
9555 age
= RVALUE_AGE_GET((VALUE
)src
);
9557 /* Clear bits for eventual T_MOVED */
9558 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE
)src
), (VALUE
)src
);
9559 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE
)src
), (VALUE
)src
);
9560 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE
)src
), (VALUE
)src
);
9561 CLEAR_IN_BITMAP(GET_HEAP_PAGE((VALUE
)src
)->remembered_bits
, (VALUE
)src
);
9563 if (FL_TEST((VALUE
)src
, FL_EXIVAR
)) {
9564 /* Resizing the st table could cause a malloc */
9565 DURING_GC_COULD_MALLOC_REGION_START();
9567 rb_mv_generic_ivar((VALUE
)src
, (VALUE
)dest
);
9569 DURING_GC_COULD_MALLOC_REGION_END();
9572 if (FL_TEST((VALUE
)src
, FL_SEEN_OBJ_ID
)) {
9573 /* If the source object's object_id has been seen, we need to update
9574 * the object to object id mapping. */
9575 st_data_t srcid
= (st_data_t
)src
, id
;
9577 gc_report(4, objspace
, "Moving object with seen id: %p -> %p\n", (void *)src
, (void *)dest
);
9578 /* Resizing the st table could cause a malloc */
9579 DURING_GC_COULD_MALLOC_REGION_START();
9581 if (!st_delete(objspace
->obj_to_id_tbl
, &srcid
, &id
)) {
9582 rb_bug("gc_move: object ID seen, but not in mapping table: %s", obj_info((VALUE
)src
));
9585 st_insert(objspace
->obj_to_id_tbl
, (st_data_t
)dest
, id
);
9587 DURING_GC_COULD_MALLOC_REGION_END();
9590 GC_ASSERT(!st_lookup(objspace
->obj_to_id_tbl
, (st_data_t
)src
, NULL
));
9593 /* Move the object */
9594 memcpy(dest
, src
, MIN(src_slot_size
, slot_size
));
9596 if (RVALUE_OVERHEAD
> 0) {
9597 void *dest_overhead
= (void *)(((uintptr_t)dest
) + slot_size
- RVALUE_OVERHEAD
);
9598 void *src_overhead
= (void *)(((uintptr_t)src
) + src_slot_size
- RVALUE_OVERHEAD
);
9600 memcpy(dest_overhead
, src_overhead
, RVALUE_OVERHEAD
);
9603 memset(src
, 0, src_slot_size
);
9604 RVALUE_AGE_RESET((VALUE
)src
);
9606 /* Set bits for object in new location */
9608 MARK_IN_BITMAP(GET_HEAP_PAGE(dest
)->remembered_bits
, (VALUE
)dest
);
9611 CLEAR_IN_BITMAP(GET_HEAP_PAGE(dest
)->remembered_bits
, (VALUE
)dest
);
9615 MARK_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE
)dest
), (VALUE
)dest
);
9618 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE
)dest
), (VALUE
)dest
);
9621 if (wb_unprotected
) {
9622 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE
)dest
), (VALUE
)dest
);
9625 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE
)dest
), (VALUE
)dest
);
9628 if (uncollectible
) {
9629 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE
)dest
), (VALUE
)dest
);
9632 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE
)dest
), (VALUE
)dest
);
9635 RVALUE_AGE_SET((VALUE
)dest
, age
);
9636 /* Assign forwarding address */
9637 src
->as
.moved
.flags
= T_MOVED
;
9638 src
->as
.moved
.dummy
= Qundef
;
9639 src
->as
.moved
.destination
= (VALUE
)dest
;
9640 GC_ASSERT(BUILTIN_TYPE((VALUE
)dest
) != T_NONE
);
9645 #if GC_CAN_COMPILE_COMPACTION
9647 compare_pinned_slots(const void *left
, const void *right
, void *dummy
)
9649 struct heap_page
*left_page
;
9650 struct heap_page
*right_page
;
9652 left_page
= *(struct heap_page
* const *)left
;
9653 right_page
= *(struct heap_page
* const *)right
;
9655 return left_page
->pinned_slots
- right_page
->pinned_slots
;
9659 compare_free_slots(const void *left
, const void *right
, void *dummy
)
9661 struct heap_page
*left_page
;
9662 struct heap_page
*right_page
;
9664 left_page
= *(struct heap_page
* const *)left
;
9665 right_page
= *(struct heap_page
* const *)right
;
9667 return left_page
->free_slots
- right_page
->free_slots
;
9671 gc_sort_heap_by_compare_func(rb_objspace_t
*objspace
, gc_compact_compare_func compare_func
)
9673 for (int j
= 0; j
< SIZE_POOL_COUNT
; j
++) {
9674 rb_size_pool_t
*size_pool
= &size_pools
[j
];
9676 size_t total_pages
= SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
;
9677 size_t size
= size_mul_or_raise(total_pages
, sizeof(struct heap_page
*), rb_eRuntimeError
);
9678 struct heap_page
*page
= 0, **page_list
= malloc(size
);
9681 SIZE_POOL_EDEN_HEAP(size_pool
)->free_pages
= NULL
;
9682 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, page
, page_node
) {
9683 page_list
[i
++] = page
;
9687 GC_ASSERT((size_t)i
== total_pages
);
9689 /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
9690 * head of the list, so empty pages will end up at the start of the heap */
9691 ruby_qsort(page_list
, total_pages
, sizeof(struct heap_page
*), compare_func
, NULL
);
9693 /* Reset the eden heap */
9694 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
);
9696 for (i
= 0; i
< total_pages
; i
++) {
9697 ccan_list_add(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, &page_list
[i
]->page_node
);
9698 if (page_list
[i
]->free_slots
!= 0) {
9699 heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool
), page_list
[i
]);
9709 gc_ref_update_array(rb_objspace_t
* objspace
, VALUE v
)
9711 if (ARY_SHARED_P(v
)) {
9712 VALUE old_root
= RARRAY(v
)->as
.heap
.aux
.shared_root
;
9714 UPDATE_IF_MOVED(objspace
, RARRAY(v
)->as
.heap
.aux
.shared_root
);
9716 VALUE new_root
= RARRAY(v
)->as
.heap
.aux
.shared_root
;
9717 // If the root is embedded and its location has changed
9718 if (ARY_EMBED_P(new_root
) && new_root
!= old_root
) {
9719 size_t offset
= (size_t)(RARRAY(v
)->as
.heap
.ptr
- RARRAY(old_root
)->as
.ary
);
9720 GC_ASSERT(RARRAY(v
)->as
.heap
.ptr
>= RARRAY(old_root
)->as
.ary
);
9721 RARRAY(v
)->as
.heap
.ptr
= RARRAY(new_root
)->as
.ary
+ offset
;
9725 long len
= RARRAY_LEN(v
);
9728 VALUE
*ptr
= (VALUE
*)RARRAY_CONST_PTR(v
);
9729 for (long i
= 0; i
< len
; i
++) {
9730 UPDATE_IF_MOVED(objspace
, ptr
[i
]);
9734 if (rb_gc_obj_slot_size(v
) >= rb_ary_size_as_embedded(v
)) {
9735 if (rb_ary_embeddable_p(v
)) {
9736 rb_ary_make_embedded(v
);
9742 static void gc_ref_update_table_values_only(rb_objspace_t
*objspace
, st_table
*tbl
);
9745 gc_ref_update_object(rb_objspace_t
*objspace
, VALUE v
)
9747 VALUE
*ptr
= ROBJECT_IVPTR(v
);
9749 if (rb_shape_obj_too_complex(v
)) {
9750 gc_ref_update_table_values_only(objspace
, ROBJECT_IV_HASH(v
));
9754 size_t slot_size
= rb_gc_obj_slot_size(v
);
9755 size_t embed_size
= rb_obj_embedded_size(ROBJECT_IV_CAPACITY(v
));
9756 if (slot_size
>= embed_size
&& !RB_FL_TEST_RAW(v
, ROBJECT_EMBED
)) {
9757 // Object can be re-embedded
9758 memcpy(ROBJECT(v
)->as
.ary
, ptr
, sizeof(VALUE
) * ROBJECT_IV_COUNT(v
));
9759 RB_FL_SET_RAW(v
, ROBJECT_EMBED
);
9761 ptr
= ROBJECT(v
)->as
.ary
;
9764 for (uint32_t i
= 0; i
< ROBJECT_IV_COUNT(v
); i
++) {
9765 UPDATE_IF_MOVED(objspace
, ptr
[i
]);
9770 hash_replace_ref(st_data_t
*key
, st_data_t
*value
, st_data_t argp
, int existing
)
9772 rb_objspace_t
*objspace
= (rb_objspace_t
*)argp
;
9774 if (gc_object_moved_p(objspace
, (VALUE
)*key
)) {
9775 *key
= rb_gc_location((VALUE
)*key
);
9778 if (gc_object_moved_p(objspace
, (VALUE
)*value
)) {
9779 *value
= rb_gc_location((VALUE
)*value
);
9786 hash_foreach_replace(st_data_t key
, st_data_t value
, st_data_t argp
, int error
)
9788 rb_objspace_t
*objspace
;
9790 objspace
= (rb_objspace_t
*)argp
;
9792 if (gc_object_moved_p(objspace
, (VALUE
)key
)) {
9796 if (gc_object_moved_p(objspace
, (VALUE
)value
)) {
9803 hash_replace_ref_value(st_data_t
*key
, st_data_t
*value
, st_data_t argp
, int existing
)
9805 rb_objspace_t
*objspace
= (rb_objspace_t
*)argp
;
9807 if (gc_object_moved_p(objspace
, (VALUE
)*value
)) {
9808 *value
= rb_gc_location((VALUE
)*value
);
9815 hash_foreach_replace_value(st_data_t key
, st_data_t value
, st_data_t argp
, int error
)
9817 rb_objspace_t
*objspace
;
9819 objspace
= (rb_objspace_t
*)argp
;
9821 if (gc_object_moved_p(objspace
, (VALUE
)value
)) {
9828 gc_ref_update_table_values_only(rb_objspace_t
*objspace
, st_table
*tbl
)
9830 if (!tbl
|| tbl
->num_entries
== 0) return;
9832 if (st_foreach_with_replace(tbl
, hash_foreach_replace_value
, hash_replace_ref_value
, (st_data_t
)objspace
)) {
9833 rb_raise(rb_eRuntimeError
, "hash modified during iteration");
9838 rb_gc_ref_update_table_values_only(st_table
*tbl
)
9840 gc_ref_update_table_values_only(&rb_objspace
, tbl
);
9844 gc_update_table_refs(rb_objspace_t
* objspace
, st_table
*tbl
)
9846 if (!tbl
|| tbl
->num_entries
== 0) return;
9848 if (st_foreach_with_replace(tbl
, hash_foreach_replace
, hash_replace_ref
, (st_data_t
)objspace
)) {
9849 rb_raise(rb_eRuntimeError
, "hash modified during iteration");
9853 /* Update MOVED references in a VALUE=>VALUE st_table */
9855 rb_gc_update_tbl_refs(st_table
*ptr
)
9857 rb_objspace_t
*objspace
= &rb_objspace
;
9858 gc_update_table_refs(objspace
, ptr
);
9862 gc_ref_update_hash(rb_objspace_t
* objspace
, VALUE v
)
9864 rb_hash_stlike_foreach_with_replace(v
, hash_foreach_replace
, hash_replace_ref
, (st_data_t
)objspace
);
9868 gc_update_values(rb_objspace_t
*objspace
, long n
, VALUE
*values
)
9872 for (i
=0; i
<n
; i
++) {
9873 UPDATE_IF_MOVED(objspace
, values
[i
]);
9878 rb_gc_update_values(long n
, VALUE
*values
)
9880 gc_update_values(&rb_objspace
, n
, values
);
9883 static enum rb_id_table_iterator_result
9884 check_id_table_move(VALUE value
, void *data
)
9886 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
9888 if (gc_object_moved_p(objspace
, (VALUE
)value
)) {
9889 return ID_TABLE_REPLACE
;
9892 return ID_TABLE_CONTINUE
;
9895 /* Returns the new location of an object, if it moved. Otherwise returns
9896 * the existing location. */
9898 rb_gc_location(VALUE value
)
9903 if (!SPECIAL_CONST_P(value
)) {
9904 void *poisoned
= asan_unpoison_object_temporary(value
);
9906 if (BUILTIN_TYPE(value
) == T_MOVED
) {
9907 destination
= (VALUE
)RMOVED(value
)->destination
;
9908 GC_ASSERT(BUILTIN_TYPE(destination
) != T_NONE
);
9911 destination
= value
;
9914 /* Re-poison slot if it's not the one we want */
9916 GC_ASSERT(BUILTIN_TYPE(value
) == T_NONE
);
9917 asan_poison_object(value
);
9921 destination
= value
;
9927 static enum rb_id_table_iterator_result
9928 update_id_table(VALUE
*value
, void *data
, int existing
)
9930 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
9932 if (gc_object_moved_p(objspace
, (VALUE
)*value
)) {
9933 *value
= rb_gc_location((VALUE
)*value
);
9936 return ID_TABLE_CONTINUE
;
9940 update_m_tbl(rb_objspace_t
*objspace
, struct rb_id_table
*tbl
)
9943 rb_id_table_foreach_values_with_replace(tbl
, check_id_table_move
, update_id_table
, objspace
);
9947 static enum rb_id_table_iterator_result
9948 update_cc_tbl_i(VALUE ccs_ptr
, void *data
)
9950 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
9951 struct rb_class_cc_entries
*ccs
= (struct rb_class_cc_entries
*)ccs_ptr
;
9952 VM_ASSERT(vm_ccs_p(ccs
));
9954 if (gc_object_moved_p(objspace
, (VALUE
)ccs
->cme
)) {
9955 ccs
->cme
= (const rb_callable_method_entry_t
*)rb_gc_location((VALUE
)ccs
->cme
);
9958 for (int i
=0; i
<ccs
->len
; i
++) {
9959 if (gc_object_moved_p(objspace
, (VALUE
)ccs
->entries
[i
].cc
)) {
9960 ccs
->entries
[i
].cc
= (struct rb_callcache
*)rb_gc_location((VALUE
)ccs
->entries
[i
].cc
);
9965 return ID_TABLE_CONTINUE
;
9969 update_cc_tbl(rb_objspace_t
*objspace
, VALUE klass
)
9971 struct rb_id_table
*tbl
= RCLASS_CC_TBL(klass
);
9973 rb_id_table_foreach_values(tbl
, update_cc_tbl_i
, objspace
);
9977 static enum rb_id_table_iterator_result
9978 update_cvc_tbl_i(VALUE cvc_entry
, void *data
)
9980 struct rb_cvar_class_tbl_entry
*entry
;
9981 rb_objspace_t
* objspace
= (rb_objspace_t
*)data
;
9983 entry
= (struct rb_cvar_class_tbl_entry
*)cvc_entry
;
9986 TYPED_UPDATE_IF_MOVED(objspace
, rb_cref_t
*, entry
->cref
);
9989 entry
->class_value
= rb_gc_location(entry
->class_value
);
9991 return ID_TABLE_CONTINUE
;
9995 update_cvc_tbl(rb_objspace_t
*objspace
, VALUE klass
)
9997 struct rb_id_table
*tbl
= RCLASS_CVC_TBL(klass
);
9999 rb_id_table_foreach_values(tbl
, update_cvc_tbl_i
, objspace
);
10003 static enum rb_id_table_iterator_result
10004 mark_cvc_tbl_i(VALUE cvc_entry
, void *data
)
10006 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
10007 struct rb_cvar_class_tbl_entry
*entry
;
10009 entry
= (struct rb_cvar_class_tbl_entry
*)cvc_entry
;
10011 RUBY_ASSERT(entry
->cref
== 0 || (BUILTIN_TYPE((VALUE
)entry
->cref
) == T_IMEMO
&& IMEMO_TYPE_P(entry
->cref
, imemo_cref
)));
10012 gc_mark(objspace
, (VALUE
) entry
->cref
);
10014 return ID_TABLE_CONTINUE
;
10018 mark_cvc_tbl(rb_objspace_t
*objspace
, VALUE klass
)
10020 struct rb_id_table
*tbl
= RCLASS_CVC_TBL(klass
);
10022 rb_id_table_foreach_values(tbl
, mark_cvc_tbl_i
, objspace
);
10026 static enum rb_id_table_iterator_result
10027 update_const_table(VALUE value
, void *data
)
10029 rb_const_entry_t
*ce
= (rb_const_entry_t
*)value
;
10030 rb_objspace_t
* objspace
= (rb_objspace_t
*)data
;
10032 if (gc_object_moved_p(objspace
, ce
->value
)) {
10033 ce
->value
= rb_gc_location(ce
->value
);
10036 if (gc_object_moved_p(objspace
, ce
->file
)) {
10037 ce
->file
= rb_gc_location(ce
->file
);
10040 return ID_TABLE_CONTINUE
;
10044 update_const_tbl(rb_objspace_t
*objspace
, struct rb_id_table
*tbl
)
10047 rb_id_table_foreach_values(tbl
, update_const_table
, objspace
);
10051 update_subclass_entries(rb_objspace_t
*objspace
, rb_subclass_entry_t
*entry
)
10054 UPDATE_IF_MOVED(objspace
, entry
->klass
);
10055 entry
= entry
->next
;
10060 update_class_ext(rb_objspace_t
*objspace
, rb_classext_t
*ext
)
10062 UPDATE_IF_MOVED(objspace
, ext
->origin_
);
10063 UPDATE_IF_MOVED(objspace
, ext
->includer
);
10064 UPDATE_IF_MOVED(objspace
, ext
->refined_class
);
10065 update_subclass_entries(objspace
, ext
->subclasses
);
10069 update_superclasses(rb_objspace_t
*objspace
, VALUE obj
)
10071 if (FL_TEST_RAW(obj
, RCLASS_SUPERCLASSES_INCLUDE_SELF
)) {
10072 for (size_t i
= 0; i
< RCLASS_SUPERCLASS_DEPTH(obj
) + 1; i
++) {
10073 UPDATE_IF_MOVED(objspace
, RCLASS_SUPERCLASSES(obj
)[i
]);
10079 gc_update_object_references(rb_objspace_t
*objspace
, VALUE obj
)
10081 RVALUE
*any
= RANY(obj
);
10083 gc_report(4, objspace
, "update-refs: %p ->\n", (void *)obj
);
10085 if (FL_TEST(obj
, FL_EXIVAR
)) {
10086 rb_ref_update_generic_ivar(obj
);
10089 switch (BUILTIN_TYPE(obj
)) {
10091 if (FL_TEST(obj
, FL_SINGLETON
)) {
10092 UPDATE_IF_MOVED(objspace
, RCLASS_ATTACHED_OBJECT(obj
));
10094 // Continue to the shared T_CLASS/T_MODULE
10096 if (RCLASS_SUPER((VALUE
)obj
)) {
10097 UPDATE_IF_MOVED(objspace
, RCLASS(obj
)->super
);
10099 update_m_tbl(objspace
, RCLASS_M_TBL(obj
));
10100 update_cc_tbl(objspace
, obj
);
10101 update_cvc_tbl(objspace
, obj
);
10102 update_superclasses(objspace
, obj
);
10104 if (rb_shape_obj_too_complex(obj
)) {
10105 gc_ref_update_table_values_only(objspace
, RCLASS_IV_HASH(obj
));
10108 for (attr_index_t i
= 0; i
< RCLASS_IV_COUNT(obj
); i
++) {
10109 UPDATE_IF_MOVED(objspace
, RCLASS_IVPTR(obj
)[i
]);
10113 update_class_ext(objspace
, RCLASS_EXT(obj
));
10114 update_const_tbl(objspace
, RCLASS_CONST_TBL(obj
));
10116 UPDATE_IF_MOVED(objspace
, RCLASS_EXT(obj
)->classpath
);
10120 if (RICLASS_OWNS_M_TBL_P(obj
)) {
10121 update_m_tbl(objspace
, RCLASS_M_TBL(obj
));
10123 if (RCLASS_SUPER((VALUE
)obj
)) {
10124 UPDATE_IF_MOVED(objspace
, RCLASS(obj
)->super
);
10126 update_class_ext(objspace
, RCLASS_EXT(obj
));
10127 update_m_tbl(objspace
, RCLASS_CALLABLE_M_TBL(obj
));
10128 update_cc_tbl(objspace
, obj
);
10132 rb_imemo_mark_and_move(obj
, true);
10140 /* These can't move */
10144 gc_ref_update_array(objspace
, obj
);
10148 gc_ref_update_hash(objspace
, obj
);
10149 UPDATE_IF_MOVED(objspace
, any
->as
.hash
.ifnone
);
10154 if (STR_SHARED_P(obj
)) {
10155 UPDATE_IF_MOVED(objspace
, any
->as
.string
.as
.heap
.aux
.shared
);
10158 /* If, after move the string is not embedded, and can fit in the
10159 * slot it's been placed in, then re-embed it. */
10160 if (rb_gc_obj_slot_size(obj
) >= rb_str_size_as_embedded(obj
)) {
10161 if (!STR_EMBED_P(obj
) && rb_str_reembeddable_p(obj
)) {
10162 rb_str_make_embedded(obj
);
10169 /* Call the compaction callback, if it exists */
10171 void *const ptr
= RTYPEDDATA_P(obj
) ? RTYPEDDATA_GET_DATA(obj
) : DATA_PTR(obj
);
10173 if (RTYPEDDATA_P(obj
) && gc_declarative_marking_p(any
->as
.typeddata
.type
)) {
10174 size_t *offset_list
= (size_t *)RANY(obj
)->as
.typeddata
.type
->function
.dmark
;
10176 for (size_t offset
= *offset_list
; offset
!= RUBY_REF_END
; offset
= *offset_list
++) {
10177 VALUE
*ref
= (VALUE
*)((char *)ptr
+ offset
);
10178 if (SPECIAL_CONST_P(*ref
)) continue;
10179 *ref
= rb_gc_location(*ref
);
10182 else if (RTYPEDDATA_P(obj
)) {
10183 RUBY_DATA_FUNC compact_func
= any
->as
.typeddata
.type
->function
.dcompact
;
10184 if (compact_func
) (*compact_func
)(ptr
);
10191 gc_ref_update_object(objspace
, obj
);
10195 if (any
->as
.file
.fptr
) {
10196 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->self
);
10197 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->pathv
);
10198 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->tied_io_for_writing
);
10199 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->writeconv_asciicompat
);
10200 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->writeconv_pre_ecopts
);
10201 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->encs
.ecopts
);
10202 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->write_lock
);
10206 UPDATE_IF_MOVED(objspace
, any
->as
.regexp
.src
);
10210 UPDATE_IF_MOVED(objspace
, RSYMBOL(any
)->fstr
);
10218 UPDATE_IF_MOVED(objspace
, any
->as
.match
.regexp
);
10220 if (any
->as
.match
.str
) {
10221 UPDATE_IF_MOVED(objspace
, any
->as
.match
.str
);
10226 UPDATE_IF_MOVED(objspace
, any
->as
.rational
.num
);
10227 UPDATE_IF_MOVED(objspace
, any
->as
.rational
.den
);
10231 UPDATE_IF_MOVED(objspace
, any
->as
.complex.real
);
10232 UPDATE_IF_MOVED(objspace
, any
->as
.complex.imag
);
10238 long i
, len
= RSTRUCT_LEN(obj
);
10239 VALUE
*ptr
= (VALUE
*)RSTRUCT_CONST_PTR(obj
);
10241 for (i
= 0; i
< len
; i
++) {
10242 UPDATE_IF_MOVED(objspace
, ptr
[i
]);
10248 rb_gcdebug_print_obj_condition((VALUE
)obj
);
10249 rb_obj_info_dump(obj
);
10250 rb_bug("unreachable");
10256 UPDATE_IF_MOVED(objspace
, RBASIC(obj
)->klass
);
10258 gc_report(4, objspace
, "update-refs: %p <-\n", (void *)obj
);
10262 gc_ref_update(void *vstart
, void *vend
, size_t stride
, rb_objspace_t
* objspace
, struct heap_page
*page
)
10264 VALUE v
= (VALUE
)vstart
;
10265 asan_unlock_freelist(page
);
10266 asan_lock_freelist(page
);
10267 page
->flags
.has_uncollectible_wb_unprotected_objects
= FALSE
;
10268 page
->flags
.has_remembered_objects
= FALSE
;
10270 /* For each object on the page */
10271 for (; v
!= (VALUE
)vend
; v
+= stride
) {
10272 void *poisoned
= asan_unpoison_object_temporary(v
);
10274 switch (BUILTIN_TYPE(v
)) {
10280 if (RVALUE_WB_UNPROTECTED(v
)) {
10281 page
->flags
.has_uncollectible_wb_unprotected_objects
= TRUE
;
10283 if (RVALUE_REMEMBERED(v
)) {
10284 page
->flags
.has_remembered_objects
= TRUE
;
10286 if (page
->flags
.before_sweep
) {
10287 if (RVALUE_MARKED(v
)) {
10288 gc_update_object_references(objspace
, v
);
10292 gc_update_object_references(objspace
, v
);
10297 asan_poison_object(v
);
10304 extern rb_symbols_t ruby_global_symbols
;
10305 #define global_symbols ruby_global_symbols
10308 gc_update_references(rb_objspace_t
*objspace
)
10310 objspace
->flags
.during_reference_updating
= true;
10312 rb_execution_context_t
*ec
= GET_EC();
10313 rb_vm_t
*vm
= rb_ec_vm_ptr(ec
);
10315 struct heap_page
*page
= NULL
;
10317 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
10318 bool should_set_mark_bits
= TRUE
;
10319 rb_size_pool_t
*size_pool
= &size_pools
[i
];
10320 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
10322 ccan_list_for_each(&heap
->pages
, page
, page_node
) {
10323 uintptr_t start
= (uintptr_t)page
->start
;
10324 uintptr_t end
= start
+ (page
->total_slots
* size_pool
->slot_size
);
10326 gc_ref_update((void *)start
, (void *)end
, size_pool
->slot_size
, objspace
, page
);
10327 if (page
== heap
->sweeping_page
) {
10328 should_set_mark_bits
= FALSE
;
10330 if (should_set_mark_bits
) {
10331 gc_setup_mark_bits(page
);
10335 rb_vm_update_references(vm
);
10336 rb_gc_update_global_tbl();
10337 global_symbols
.ids
= rb_gc_location(global_symbols
.ids
);
10338 global_symbols
.dsymbol_fstr_hash
= rb_gc_location(global_symbols
.dsymbol_fstr_hash
);
10339 gc_ref_update_table_values_only(objspace
, objspace
->obj_to_id_tbl
);
10340 gc_update_table_refs(objspace
, objspace
->id_to_obj_tbl
);
10341 gc_update_table_refs(objspace
, global_symbols
.str_sym
);
10342 gc_update_table_refs(objspace
, finalizer_table
);
10344 objspace
->flags
.during_reference_updating
= false;
10347 #if GC_CAN_COMPILE_COMPACTION
10350 * GC.latest_compact_info -> hash
10352 * Returns information about object moved in the most recent \GC compaction.
10354 * The returned +hash+ contains the following keys:
10357 * Hash containing the type of the object as the key and the number of
10358 * objects of that type that were considered for movement.
10360 * Hash containing the type of the object as the key and the number of
10361 * objects of that type that were actually moved.
10363 * Hash containing the type of the object as the key and the number of
10364 * objects of that type that were increased in size.
10366 * Hash containing the type of the object as the key and the number of
10367 * objects of that type that were decreased in size.
10369 * Some objects can't be moved (due to pinning) so these numbers can be used to
10370 * calculate compaction efficiency.
10373 gc_compact_stats(VALUE self
)
10376 rb_objspace_t
*objspace
= &rb_objspace
;
10377 VALUE h
= rb_hash_new();
10378 VALUE considered
= rb_hash_new();
10379 VALUE moved
= rb_hash_new();
10380 VALUE moved_up
= rb_hash_new();
10381 VALUE moved_down
= rb_hash_new();
10383 for (i
=0; i
<T_MASK
; i
++) {
10384 if (objspace
->rcompactor
.considered_count_table
[i
]) {
10385 rb_hash_aset(considered
, type_sym(i
), SIZET2NUM(objspace
->rcompactor
.considered_count_table
[i
]));
10388 if (objspace
->rcompactor
.moved_count_table
[i
]) {
10389 rb_hash_aset(moved
, type_sym(i
), SIZET2NUM(objspace
->rcompactor
.moved_count_table
[i
]));
10392 if (objspace
->rcompactor
.moved_up_count_table
[i
]) {
10393 rb_hash_aset(moved_up
, type_sym(i
), SIZET2NUM(objspace
->rcompactor
.moved_up_count_table
[i
]));
10396 if (objspace
->rcompactor
.moved_down_count_table
[i
]) {
10397 rb_hash_aset(moved_down
, type_sym(i
), SIZET2NUM(objspace
->rcompactor
.moved_down_count_table
[i
]));
10401 rb_hash_aset(h
, ID2SYM(rb_intern("considered")), considered
);
10402 rb_hash_aset(h
, ID2SYM(rb_intern("moved")), moved
);
10403 rb_hash_aset(h
, ID2SYM(rb_intern("moved_up")), moved_up
);
10404 rb_hash_aset(h
, ID2SYM(rb_intern("moved_down")), moved_down
);
10409 # define gc_compact_stats rb_f_notimplement
10412 #if GC_CAN_COMPILE_COMPACTION
10414 root_obj_check_moved_i(const char *category
, VALUE obj
, void *data
)
10416 rb_objspace_t
*objspace
= data
;
10418 if (gc_object_moved_p(objspace
, obj
)) {
10419 rb_bug("ROOT %s points to MOVED: %p -> %s", category
, (void *)obj
, obj_info(rb_gc_location(obj
)));
10424 reachable_object_check_moved_i(VALUE ref
, void *data
)
10426 VALUE parent
= (VALUE
)data
;
10427 if (gc_object_moved_p(&rb_objspace
, ref
)) {
10428 rb_bug("Object %s points to MOVED: %p -> %s", obj_info(parent
), (void *)ref
, obj_info(rb_gc_location(ref
)));
10433 heap_check_moved_i(void *vstart
, void *vend
, size_t stride
, void *data
)
10435 rb_objspace_t
*objspace
= data
;
10437 VALUE v
= (VALUE
)vstart
;
10438 for (; v
!= (VALUE
)vend
; v
+= stride
) {
10439 if (gc_object_moved_p(objspace
, v
)) {
10440 /* Moved object still on the heap, something may have a reference. */
10443 void *poisoned
= asan_unpoison_object_temporary(v
);
10445 switch (BUILTIN_TYPE(v
)) {
10450 if (!rb_objspace_garbage_object_p(v
)) {
10451 rb_objspace_reachable_objects_from(v
, reachable_object_check_moved_i
, (void *)v
);
10456 GC_ASSERT(BUILTIN_TYPE(v
) == T_NONE
);
10457 asan_poison_object(v
);
10467 * GC.compact -> hash
10469 * This function compacts objects together in Ruby's heap. It eliminates
10470 * unused space (or fragmentation) in the heap by moving objects in to that
10473 * The returned +hash+ contains statistics about the objects that were moved;
10474 * see GC.latest_compact_info.
10476 * This method is only expected to work on CRuby.
10478 * To test whether \GC compaction is supported, use the idiom:
10480 * GC.respond_to?(:compact)
10483 gc_compact(VALUE self
)
10485 /* Run GC with compaction enabled */
10486 gc_start_internal(NULL
, self
, Qtrue
, Qtrue
, Qtrue
, Qtrue
);
10488 return gc_compact_stats(self
);
10491 # define gc_compact rb_f_notimplement
10494 #if GC_CAN_COMPILE_COMPACTION
10496 struct desired_compaction_pages_i_data
{
10497 rb_objspace_t
*objspace
;
10498 size_t required_slots
[SIZE_POOL_COUNT
];
10502 desired_compaction_pages_i(struct heap_page
*page
, void *data
)
10504 struct desired_compaction_pages_i_data
*tdata
= data
;
10505 rb_objspace_t
*objspace
= tdata
->objspace
;
10506 VALUE vstart
= (VALUE
)page
->start
;
10507 VALUE vend
= vstart
+ (VALUE
)(page
->total_slots
* page
->size_pool
->slot_size
);
10510 for (VALUE v
= vstart
; v
!= vend
; v
+= page
->size_pool
->slot_size
) {
10511 /* skip T_NONEs; they won't be moved */
10512 void *poisoned
= asan_unpoison_object_temporary(v
);
10513 if (BUILTIN_TYPE(v
) == T_NONE
) {
10515 asan_poison_object(v
);
10520 rb_size_pool_t
*dest_pool
= gc_compact_destination_pool(objspace
, page
->size_pool
, v
);
10521 size_t dest_pool_idx
= dest_pool
- size_pools
;
10522 tdata
->required_slots
[dest_pool_idx
]++;
10529 gc_verify_compaction_references(rb_execution_context_t
*ec
, VALUE self
, VALUE double_heap
, VALUE expand_heap
, VALUE toward_empty
)
10531 rb_objspace_t
*objspace
= &rb_objspace
;
10533 /* Clear the heap. */
10534 gc_start_internal(NULL
, self
, Qtrue
, Qtrue
, Qtrue
, Qfalse
);
10536 if (RTEST(double_heap
)) {
10537 rb_warn("double_heap is deprecated, please use expand_heap instead");
10540 RB_VM_LOCK_ENTER();
10544 /* if both double_heap and expand_heap are set, expand_heap takes precedence */
10545 if (RTEST(expand_heap
)) {
10546 struct desired_compaction_pages_i_data desired_compaction
= {
10547 .objspace
= objspace
,
10548 .required_slots
= {0},
10550 /* Work out how many objects want to be in each size pool, taking account of moves */
10551 objspace_each_pages(objspace
, desired_compaction_pages_i
, &desired_compaction
, TRUE
);
10553 /* Find out which pool has the most pages */
10554 size_t max_existing_pages
= 0;
10555 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
10556 rb_size_pool_t
*size_pool
= &size_pools
[i
];
10557 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
10558 max_existing_pages
= MAX(max_existing_pages
, heap
->total_pages
);
10560 /* Add pages to each size pool so that compaction is guaranteed to move every object */
10561 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
10562 rb_size_pool_t
*size_pool
= &size_pools
[i
];
10563 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
10565 size_t pages_to_add
= 0;
10567 * Step 1: Make sure every pool has the same number of pages, by adding empty pages
10568 * to smaller pools. This is required to make sure the compact cursor can advance
10569 * through all of the pools in `gc_sweep_compact` without hitting the "sweep &
10570 * compact cursors met" condition on some pools before fully compacting others
10572 pages_to_add
+= max_existing_pages
- heap
->total_pages
;
10574 * Step 2: Now add additional free pages to each size pool sufficient to hold all objects
10575 * that want to be in that size pool, whether moved into it or moved within it
10577 pages_to_add
+= slots_to_pages_for_size_pool(objspace
, size_pool
, desired_compaction
.required_slots
[i
]);
10579 * Step 3: Add two more pages so that the compact & sweep cursors will meet _after_ all objects
10580 * have been moved, and not on the last iteration of the `gc_sweep_compact` loop
10584 heap_add_pages(objspace
, size_pool
, heap
, pages_to_add
);
10587 else if (RTEST(double_heap
)) {
10588 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
10589 rb_size_pool_t
*size_pool
= &size_pools
[i
];
10590 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
10591 heap_add_pages(objspace
, size_pool
, heap
, heap
->total_pages
);
10596 if (RTEST(toward_empty
)) {
10597 objspace
->rcompactor
.compare_func
= compare_free_slots
;
10600 RB_VM_LOCK_LEAVE();
10602 gc_start_internal(NULL
, self
, Qtrue
, Qtrue
, Qtrue
, Qtrue
);
10604 objspace_reachable_objects_from_root(objspace
, root_obj_check_moved_i
, objspace
);
10605 objspace_each_objects(objspace
, heap_check_moved_i
, objspace
, TRUE
);
10607 objspace
->rcompactor
.compare_func
= NULL
;
10608 return gc_compact_stats(self
);
10611 # define gc_verify_compaction_references (rb_builtin_arity3_function_type)rb_f_notimplement
10624 unless_objspace(objspace
) { return; }
10625 unsigned int reason
= GPR_DEFAULT_REASON
;
10626 garbage_collect(objspace
, reason
);
10632 unless_objspace(objspace
) { return FALSE
; }
10636 #if RGENGC_PROFILE >= 2
10638 static const char *type_name(int type
, VALUE obj
);
10641 gc_count_add_each_types(VALUE hash
, const char *name
, const size_t *types
)
10643 VALUE result
= rb_hash_new_with_size(T_MASK
);
10645 for (i
=0; i
<T_MASK
; i
++) {
10646 const char *type
= type_name(i
, 0);
10647 rb_hash_aset(result
, ID2SYM(rb_intern(type
)), SIZET2NUM(types
[i
]));
10649 rb_hash_aset(hash
, ID2SYM(rb_intern(name
)), result
);
10656 return rb_objspace
.profile
.count
;
10660 gc_count(rb_execution_context_t
*ec
, VALUE self
)
10662 return SIZET2NUM(rb_gc_count());
10666 gc_info_decode(rb_objspace_t
*objspace
, const VALUE hash_or_key
, const unsigned int orig_flags
)
10668 static VALUE sym_major_by
= Qnil
, sym_gc_by
, sym_immediate_sweep
, sym_have_finalizer
, sym_state
, sym_need_major_by
;
10669 static VALUE sym_nofree
, sym_oldgen
, sym_shady
, sym_force
, sym_stress
;
10670 #if RGENGC_ESTIMATE_OLDMALLOC
10671 static VALUE sym_oldmalloc
;
10673 static VALUE sym_newobj
, sym_malloc
, sym_method
, sym_capi
;
10674 static VALUE sym_none
, sym_marking
, sym_sweeping
;
10675 static VALUE sym_weak_references_count
, sym_retained_weak_references_count
;
10676 VALUE hash
= Qnil
, key
= Qnil
;
10677 VALUE major_by
, need_major_by
;
10678 unsigned int flags
= orig_flags
? orig_flags
: objspace
->profile
.latest_gc_info
;
10680 if (SYMBOL_P(hash_or_key
)) {
10683 else if (RB_TYPE_P(hash_or_key
, T_HASH
)) {
10684 hash
= hash_or_key
;
10687 rb_raise(rb_eTypeError
, "non-hash or symbol given");
10690 if (NIL_P(sym_major_by
)) {
10691 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
10694 S(immediate_sweep
);
10704 #if RGENGC_ESTIMATE_OLDMALLOC
10716 S(weak_references_count
);
10717 S(retained_weak_references_count
);
10721 #define SET(name, attr) \
10722 if (key == sym_##name) \
10724 else if (hash != Qnil) \
10725 rb_hash_aset(hash, sym_##name, (attr));
10728 (flags
& GPR_FLAG_MAJOR_BY_NOFREE
) ? sym_nofree
:
10729 (flags
& GPR_FLAG_MAJOR_BY_OLDGEN
) ? sym_oldgen
:
10730 (flags
& GPR_FLAG_MAJOR_BY_SHADY
) ? sym_shady
:
10731 (flags
& GPR_FLAG_MAJOR_BY_FORCE
) ? sym_force
:
10732 #if RGENGC_ESTIMATE_OLDMALLOC
10733 (flags
& GPR_FLAG_MAJOR_BY_OLDMALLOC
) ? sym_oldmalloc
:
10736 SET(major_by
, major_by
);
10738 if (orig_flags
== 0) { /* set need_major_by only if flags not set explicitly */
10739 unsigned int need_major_flags
= gc_needs_major_flags
;
10741 (need_major_flags
& GPR_FLAG_MAJOR_BY_NOFREE
) ? sym_nofree
:
10742 (need_major_flags
& GPR_FLAG_MAJOR_BY_OLDGEN
) ? sym_oldgen
:
10743 (need_major_flags
& GPR_FLAG_MAJOR_BY_SHADY
) ? sym_shady
:
10744 (need_major_flags
& GPR_FLAG_MAJOR_BY_FORCE
) ? sym_force
:
10745 #if RGENGC_ESTIMATE_OLDMALLOC
10746 (need_major_flags
& GPR_FLAG_MAJOR_BY_OLDMALLOC
) ? sym_oldmalloc
:
10749 SET(need_major_by
, need_major_by
);
10753 (flags
& GPR_FLAG_NEWOBJ
) ? sym_newobj
:
10754 (flags
& GPR_FLAG_MALLOC
) ? sym_malloc
:
10755 (flags
& GPR_FLAG_METHOD
) ? sym_method
:
10756 (flags
& GPR_FLAG_CAPI
) ? sym_capi
:
10757 (flags
& GPR_FLAG_STRESS
) ? sym_stress
:
10761 SET(have_finalizer
, RBOOL(flags
& GPR_FLAG_HAVE_FINALIZE
));
10762 SET(immediate_sweep
, RBOOL(flags
& GPR_FLAG_IMMEDIATE_SWEEP
));
10764 if (orig_flags
== 0) {
10765 SET(state
, gc_mode(objspace
) == gc_mode_none
? sym_none
:
10766 gc_mode(objspace
) == gc_mode_marking
? sym_marking
: sym_sweeping
);
10769 SET(weak_references_count
, LONG2FIX(objspace
->profile
.weak_references_count
));
10770 SET(retained_weak_references_count
, LONG2FIX(objspace
->profile
.retained_weak_references_count
));
10773 if (!NIL_P(key
)) {/* matched key should return above */
10774 rb_raise(rb_eArgError
, "unknown key: %"PRIsVALUE
, rb_sym2str(key
));
10781 rb_gc_latest_gc_info(VALUE key
)
10783 rb_objspace_t
*objspace
= &rb_objspace
;
10784 return gc_info_decode(objspace
, key
, 0);
10788 gc_latest_gc_info(rb_execution_context_t
*ec
, VALUE self
, VALUE arg
)
10790 rb_objspace_t
*objspace
= &rb_objspace
;
10793 arg
= rb_hash_new();
10795 else if (!SYMBOL_P(arg
) && !RB_TYPE_P(arg
, T_HASH
)) {
10796 rb_raise(rb_eTypeError
, "non-hash or symbol given");
10799 return gc_info_decode(objspace
, arg
, 0);
10805 gc_stat_sym_marking_time
,
10806 gc_stat_sym_sweeping_time
,
10807 gc_stat_sym_heap_allocated_pages
,
10808 gc_stat_sym_heap_sorted_length
,
10809 gc_stat_sym_heap_allocatable_pages
,
10810 gc_stat_sym_heap_available_slots
,
10811 gc_stat_sym_heap_live_slots
,
10812 gc_stat_sym_heap_free_slots
,
10813 gc_stat_sym_heap_final_slots
,
10814 gc_stat_sym_heap_marked_slots
,
10815 gc_stat_sym_heap_eden_pages
,
10816 gc_stat_sym_heap_tomb_pages
,
10817 gc_stat_sym_total_allocated_pages
,
10818 gc_stat_sym_total_freed_pages
,
10819 gc_stat_sym_total_allocated_objects
,
10820 gc_stat_sym_total_freed_objects
,
10821 gc_stat_sym_malloc_increase_bytes
,
10822 gc_stat_sym_malloc_increase_bytes_limit
,
10823 gc_stat_sym_minor_gc_count
,
10824 gc_stat_sym_major_gc_count
,
10825 gc_stat_sym_compact_count
,
10826 gc_stat_sym_read_barrier_faults
,
10827 gc_stat_sym_total_moved_objects
,
10828 gc_stat_sym_remembered_wb_unprotected_objects
,
10829 gc_stat_sym_remembered_wb_unprotected_objects_limit
,
10830 gc_stat_sym_old_objects
,
10831 gc_stat_sym_old_objects_limit
,
10832 #if RGENGC_ESTIMATE_OLDMALLOC
10833 gc_stat_sym_oldmalloc_increase_bytes
,
10834 gc_stat_sym_oldmalloc_increase_bytes_limit
,
10836 gc_stat_sym_weak_references_count
,
10838 gc_stat_sym_total_generated_normal_object_count
,
10839 gc_stat_sym_total_generated_shady_object_count
,
10840 gc_stat_sym_total_shade_operation_count
,
10841 gc_stat_sym_total_promoted_count
,
10842 gc_stat_sym_total_remembered_normal_object_count
,
10843 gc_stat_sym_total_remembered_shady_object_count
,
10848 static VALUE gc_stat_symbols
[gc_stat_sym_last
];
10851 setup_gc_stat_symbols(void)
10853 if (gc_stat_symbols
[0] == 0) {
10854 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
10859 S(heap_allocated_pages
);
10860 S(heap_sorted_length
);
10861 S(heap_allocatable_pages
);
10862 S(heap_available_slots
);
10863 S(heap_live_slots
);
10864 S(heap_free_slots
);
10865 S(heap_final_slots
);
10866 S(heap_marked_slots
);
10867 S(heap_eden_pages
);
10868 S(heap_tomb_pages
);
10869 S(total_allocated_pages
);
10870 S(total_freed_pages
);
10871 S(total_allocated_objects
);
10872 S(total_freed_objects
);
10873 S(malloc_increase_bytes
);
10874 S(malloc_increase_bytes_limit
);
10878 S(read_barrier_faults
);
10879 S(total_moved_objects
);
10880 S(remembered_wb_unprotected_objects
);
10881 S(remembered_wb_unprotected_objects_limit
);
10883 S(old_objects_limit
);
10884 #if RGENGC_ESTIMATE_OLDMALLOC
10885 S(oldmalloc_increase_bytes
);
10886 S(oldmalloc_increase_bytes_limit
);
10888 S(weak_references_count
);
10890 S(total_generated_normal_object_count
);
10891 S(total_generated_shady_object_count
);
10892 S(total_shade_operation_count
);
10893 S(total_promoted_count
);
10894 S(total_remembered_normal_object_count
);
10895 S(total_remembered_shady_object_count
);
10896 #endif /* RGENGC_PROFILE */
10902 ns_to_ms(uint64_t ns
)
10904 return ns
/ (1000 * 1000);
10908 gc_stat_internal(VALUE hash_or_sym
)
10910 rb_objspace_t
*objspace
= &rb_objspace
;
10911 VALUE hash
= Qnil
, key
= Qnil
;
10913 setup_gc_stat_symbols();
10915 if (RB_TYPE_P(hash_or_sym
, T_HASH
)) {
10916 hash
= hash_or_sym
;
10918 else if (SYMBOL_P(hash_or_sym
)) {
10922 rb_raise(rb_eTypeError
, "non-hash or symbol argument");
10925 #define SET(name, attr) \
10926 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
10928 else if (hash != Qnil) \
10929 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
10931 SET(count
, objspace
->profile
.count
);
10932 SET(time
, (size_t)ns_to_ms(objspace
->profile
.marking_time_ns
+ objspace
->profile
.sweeping_time_ns
)); // TODO: UINT64T2NUM
10933 SET(marking_time
, (size_t)ns_to_ms(objspace
->profile
.marking_time_ns
));
10934 SET(sweeping_time
, (size_t)ns_to_ms(objspace
->profile
.sweeping_time_ns
));
10936 /* implementation dependent counters */
10937 SET(heap_allocated_pages
, heap_allocated_pages
);
10938 SET(heap_sorted_length
, heap_pages_sorted_length
);
10939 SET(heap_allocatable_pages
, heap_allocatable_pages(objspace
));
10940 SET(heap_available_slots
, objspace_available_slots(objspace
));
10941 SET(heap_live_slots
, objspace_live_slots(objspace
));
10942 SET(heap_free_slots
, objspace_free_slots(objspace
));
10943 SET(heap_final_slots
, heap_pages_final_slots
);
10944 SET(heap_marked_slots
, objspace
->marked_slots
);
10945 SET(heap_eden_pages
, heap_eden_total_pages(objspace
));
10946 SET(heap_tomb_pages
, heap_tomb_total_pages(objspace
));
10947 SET(total_allocated_pages
, total_allocated_pages(objspace
));
10948 SET(total_freed_pages
, total_freed_pages(objspace
));
10949 SET(total_allocated_objects
, total_allocated_objects(objspace
));
10950 SET(total_freed_objects
, total_freed_objects(objspace
));
10951 SET(malloc_increase_bytes
, malloc_increase
);
10952 SET(malloc_increase_bytes_limit
, malloc_limit
);
10953 SET(minor_gc_count
, objspace
->profile
.minor_gc_count
);
10954 SET(major_gc_count
, objspace
->profile
.major_gc_count
);
10955 SET(compact_count
, objspace
->profile
.compact_count
);
10956 SET(read_barrier_faults
, objspace
->profile
.read_barrier_faults
);
10957 SET(total_moved_objects
, objspace
->rcompactor
.total_moved
);
10958 SET(remembered_wb_unprotected_objects
, objspace
->rgengc
.uncollectible_wb_unprotected_objects
);
10959 SET(remembered_wb_unprotected_objects_limit
, objspace
->rgengc
.uncollectible_wb_unprotected_objects_limit
);
10960 SET(old_objects
, objspace
->rgengc
.old_objects
);
10961 SET(old_objects_limit
, objspace
->rgengc
.old_objects_limit
);
10962 #if RGENGC_ESTIMATE_OLDMALLOC
10963 SET(oldmalloc_increase_bytes
, objspace
->rgengc
.oldmalloc_increase
);
10964 SET(oldmalloc_increase_bytes_limit
, objspace
->rgengc
.oldmalloc_increase_limit
);
10968 SET(total_generated_normal_object_count
, objspace
->profile
.total_generated_normal_object_count
);
10969 SET(total_generated_shady_object_count
, objspace
->profile
.total_generated_shady_object_count
);
10970 SET(total_shade_operation_count
, objspace
->profile
.total_shade_operation_count
);
10971 SET(total_promoted_count
, objspace
->profile
.total_promoted_count
);
10972 SET(total_remembered_normal_object_count
, objspace
->profile
.total_remembered_normal_object_count
);
10973 SET(total_remembered_shady_object_count
, objspace
->profile
.total_remembered_shady_object_count
);
10974 #endif /* RGENGC_PROFILE */
10977 if (!NIL_P(key
)) { /* matched key should return above */
10978 rb_raise(rb_eArgError
, "unknown key: %"PRIsVALUE
, rb_sym2str(key
));
10981 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
10982 if (hash
!= Qnil
) {
10983 gc_count_add_each_types(hash
, "generated_normal_object_count_types", objspace
->profile
.generated_normal_object_count_types
);
10984 gc_count_add_each_types(hash
, "generated_shady_object_count_types", objspace
->profile
.generated_shady_object_count_types
);
10985 gc_count_add_each_types(hash
, "shade_operation_count_types", objspace
->profile
.shade_operation_count_types
);
10986 gc_count_add_each_types(hash
, "promoted_types", objspace
->profile
.promoted_types
);
10987 gc_count_add_each_types(hash
, "remembered_normal_object_count_types", objspace
->profile
.remembered_normal_object_count_types
);
10988 gc_count_add_each_types(hash
, "remembered_shady_object_count_types", objspace
->profile
.remembered_shady_object_count_types
);
10996 gc_stat(rb_execution_context_t
*ec
, VALUE self
, VALUE arg
) // arg is (nil || hash || symbol)
10999 arg
= rb_hash_new();
11001 else if (SYMBOL_P(arg
)) {
11002 size_t value
= gc_stat_internal(arg
);
11003 return SIZET2NUM(value
);
11005 else if (RB_TYPE_P(arg
, T_HASH
)) {
11009 rb_raise(rb_eTypeError
, "non-hash or symbol given");
11012 gc_stat_internal(arg
);
11017 rb_gc_stat(VALUE key
)
11019 if (SYMBOL_P(key
)) {
11020 size_t value
= gc_stat_internal(key
);
11024 gc_stat_internal(key
);
11030 enum gc_stat_heap_sym
{
11031 gc_stat_heap_sym_slot_size
,
11032 gc_stat_heap_sym_heap_allocatable_pages
,
11033 gc_stat_heap_sym_heap_eden_pages
,
11034 gc_stat_heap_sym_heap_eden_slots
,
11035 gc_stat_heap_sym_heap_tomb_pages
,
11036 gc_stat_heap_sym_heap_tomb_slots
,
11037 gc_stat_heap_sym_total_allocated_pages
,
11038 gc_stat_heap_sym_total_freed_pages
,
11039 gc_stat_heap_sym_force_major_gc_count
,
11040 gc_stat_heap_sym_force_incremental_marking_finish_count
,
11041 gc_stat_heap_sym_total_allocated_objects
,
11042 gc_stat_heap_sym_total_freed_objects
,
11043 gc_stat_heap_sym_last
11046 static VALUE gc_stat_heap_symbols
[gc_stat_heap_sym_last
];
11049 setup_gc_stat_heap_symbols(void)
11051 if (gc_stat_heap_symbols
[0] == 0) {
11052 #define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
11054 S(heap_allocatable_pages
);
11055 S(heap_eden_pages
);
11056 S(heap_eden_slots
);
11057 S(heap_tomb_pages
);
11058 S(heap_tomb_slots
);
11059 S(total_allocated_pages
);
11060 S(total_freed_pages
);
11061 S(force_major_gc_count
);
11062 S(force_incremental_marking_finish_count
);
11063 S(total_allocated_objects
);
11064 S(total_freed_objects
);
11070 gc_stat_heap_internal(int size_pool_idx
, VALUE hash_or_sym
)
11072 rb_objspace_t
*objspace
= &rb_objspace
;
11073 VALUE hash
= Qnil
, key
= Qnil
;
11075 setup_gc_stat_heap_symbols();
11077 if (RB_TYPE_P(hash_or_sym
, T_HASH
)) {
11078 hash
= hash_or_sym
;
11080 else if (SYMBOL_P(hash_or_sym
)) {
11084 rb_raise(rb_eTypeError
, "non-hash or symbol argument");
11087 if (size_pool_idx
< 0 || size_pool_idx
>= SIZE_POOL_COUNT
) {
11088 rb_raise(rb_eArgError
, "size pool index out of range");
11091 rb_size_pool_t
*size_pool
= &size_pools
[size_pool_idx
];
11093 #define SET(name, attr) \
11094 if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
11096 else if (hash != Qnil) \
11097 rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
11099 SET(slot_size
, size_pool
->slot_size
);
11100 SET(heap_allocatable_pages
, size_pool
->allocatable_pages
);
11101 SET(heap_eden_pages
, SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
);
11102 SET(heap_eden_slots
, SIZE_POOL_EDEN_HEAP(size_pool
)->total_slots
);
11103 SET(heap_tomb_pages
, SIZE_POOL_TOMB_HEAP(size_pool
)->total_pages
);
11104 SET(heap_tomb_slots
, SIZE_POOL_TOMB_HEAP(size_pool
)->total_slots
);
11105 SET(total_allocated_pages
, size_pool
->total_allocated_pages
);
11106 SET(total_freed_pages
, size_pool
->total_freed_pages
);
11107 SET(force_major_gc_count
, size_pool
->force_major_gc_count
);
11108 SET(force_incremental_marking_finish_count
, size_pool
->force_incremental_marking_finish_count
);
11109 SET(total_allocated_objects
, size_pool
->total_allocated_objects
);
11110 SET(total_freed_objects
, size_pool
->total_freed_objects
);
11113 if (!NIL_P(key
)) { /* matched key should return above */
11114 rb_raise(rb_eArgError
, "unknown key: %"PRIsVALUE
, rb_sym2str(key
));
11121 gc_stat_heap(rb_execution_context_t
*ec
, VALUE self
, VALUE heap_name
, VALUE arg
)
11123 if (NIL_P(heap_name
)) {
11125 arg
= rb_hash_new();
11127 else if (RB_TYPE_P(arg
, T_HASH
)) {
11131 rb_raise(rb_eTypeError
, "non-hash given");
11134 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
11135 VALUE hash
= rb_hash_aref(arg
, INT2FIX(i
));
11137 hash
= rb_hash_new();
11138 rb_hash_aset(arg
, INT2FIX(i
), hash
);
11140 gc_stat_heap_internal(i
, hash
);
11143 else if (FIXNUM_P(heap_name
)) {
11144 int size_pool_idx
= FIX2INT(heap_name
);
11147 arg
= rb_hash_new();
11149 else if (SYMBOL_P(arg
)) {
11150 size_t value
= gc_stat_heap_internal(size_pool_idx
, arg
);
11151 return SIZET2NUM(value
);
11153 else if (RB_TYPE_P(arg
, T_HASH
)) {
11157 rb_raise(rb_eTypeError
, "non-hash or symbol given");
11160 gc_stat_heap_internal(size_pool_idx
, arg
);
11163 rb_raise(rb_eTypeError
, "heap_name must be nil or an Integer");
11170 gc_stress_get(rb_execution_context_t
*ec
, VALUE self
)
11172 rb_objspace_t
*objspace
= &rb_objspace
;
11173 return ruby_gc_stress_mode
;
11177 gc_stress_set_m(rb_execution_context_t
*ec
, VALUE self
, VALUE flag
)
11179 rb_objspace_t
*objspace
= &rb_objspace
;
11181 objspace
->flags
.gc_stressful
= RTEST(flag
);
11182 objspace
->gc_stress_mode
= flag
;
11190 rb_objspace_t
*objspace
= &rb_objspace
;
11191 return rb_objspace_gc_enable(objspace
);
11195 rb_objspace_gc_enable(rb_objspace_t
*objspace
)
11197 int old
= dont_gc_val();
11204 gc_enable(rb_execution_context_t
*ec
, VALUE _
)
11206 return rb_gc_enable();
11210 rb_gc_disable_no_rest(void)
11212 rb_objspace_t
*objspace
= &rb_objspace
;
11213 return gc_disable_no_rest(objspace
);
11217 gc_disable_no_rest(rb_objspace_t
*objspace
)
11219 int old
= dont_gc_val();
11225 rb_gc_disable(void)
11227 rb_objspace_t
*objspace
= &rb_objspace
;
11228 return rb_objspace_gc_disable(objspace
);
11232 rb_objspace_gc_disable(rb_objspace_t
*objspace
)
11235 return gc_disable_no_rest(objspace
);
11239 gc_disable(rb_execution_context_t
*ec
, VALUE _
)
11241 return rb_gc_disable();
11244 #if GC_CAN_COMPILE_COMPACTION
11247 * GC.auto_compact = flag
11249 * Updates automatic compaction mode.
11251 * When enabled, the compactor will execute on every major collection.
11253 * Enabling compaction will degrade performance on major collections.
11256 gc_set_auto_compact(VALUE _
, VALUE v
)
11258 GC_ASSERT(GC_COMPACTION_SUPPORTED
);
11260 ruby_enable_autocompact
= RTEST(v
);
11262 #if RGENGC_CHECK_MODE
11263 ruby_autocompact_compare_func
= NULL
;
11266 ID id
= RB_SYM2ID(v
);
11267 if (id
== rb_intern("empty")) {
11268 ruby_autocompact_compare_func
= compare_free_slots
;
11276 # define gc_set_auto_compact rb_f_notimplement
11279 #if GC_CAN_COMPILE_COMPACTION
11282 * GC.auto_compact -> true or false
11284 * Returns whether or not automatic compaction has been enabled.
11287 gc_get_auto_compact(VALUE _
)
11289 return RBOOL(ruby_enable_autocompact
);
11292 # define gc_get_auto_compact rb_f_notimplement
11296 get_envparam_size(const char *name
, size_t *default_value
, size_t lower_bound
)
11298 const char *ptr
= getenv(name
);
11301 if (ptr
!= NULL
&& *ptr
) {
11304 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
11305 val
= strtoll(ptr
, &end
, 0);
11307 val
= strtol(ptr
, &end
, 0);
11310 case 'k': case 'K':
11314 case 'm': case 'M':
11318 case 'g': case 'G':
11319 unit
= 1024*1024*1024;
11323 while (*end
&& isspace((unsigned char)*end
)) end
++;
11325 if (RTEST(ruby_verbose
)) fprintf(stderr
, "invalid string for %s: %s\n", name
, ptr
);
11329 if (val
< -(ssize_t
)(SIZE_MAX
/ 2 / unit
) || (ssize_t
)(SIZE_MAX
/ 2 / unit
) < val
) {
11330 if (RTEST(ruby_verbose
)) fprintf(stderr
, "%s=%s is ignored because it overflows\n", name
, ptr
);
11335 if (val
> 0 && (size_t)val
> lower_bound
) {
11336 if (RTEST(ruby_verbose
)) {
11337 fprintf(stderr
, "%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
")\n", name
, val
, *default_value
);
11339 *default_value
= (size_t)val
;
11343 if (RTEST(ruby_verbose
)) {
11344 fprintf(stderr
, "%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
") is ignored because it must be greater than %"PRIuSIZE
".\n",
11345 name
, val
, *default_value
, lower_bound
);
11354 get_envparam_double(const char *name
, double *default_value
, double lower_bound
, double upper_bound
, int accept_zero
)
11356 const char *ptr
= getenv(name
);
11359 if (ptr
!= NULL
&& *ptr
) {
11361 val
= strtod(ptr
, &end
);
11362 if (!*ptr
|| *end
) {
11363 if (RTEST(ruby_verbose
)) fprintf(stderr
, "invalid string for %s: %s\n", name
, ptr
);
11367 if (accept_zero
&& val
== 0.0) {
11370 else if (val
<= lower_bound
) {
11371 if (RTEST(ruby_verbose
)) {
11372 fprintf(stderr
, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
11373 name
, val
, *default_value
, lower_bound
);
11376 else if (upper_bound
!= 0.0 && /* ignore upper_bound if it is 0.0 */
11377 val
> upper_bound
) {
11378 if (RTEST(ruby_verbose
)) {
11379 fprintf(stderr
, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
11380 name
, val
, *default_value
, upper_bound
);
11390 if (RTEST(ruby_verbose
)) fprintf(stderr
, "%s=%f (default value: %f)\n", name
, val
, *default_value
);
11391 *default_value
= val
;
11396 gc_set_initial_pages(rb_objspace_t
*objspace
)
11400 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
11401 rb_size_pool_t
*size_pool
= &size_pools
[i
];
11402 char env_key
[sizeof("RUBY_GC_HEAP_" "_INIT_SLOTS") + DECIMAL_SIZE_OF_BITS(sizeof(int) * CHAR_BIT
)];
11403 snprintf(env_key
, sizeof(env_key
), "RUBY_GC_HEAP_%d_INIT_SLOTS", i
);
11405 size_t size_pool_init_slots
= gc_params
.size_pool_init_slots
[i
];
11406 if (get_envparam_size(env_key
, &size_pool_init_slots
, 0)) {
11407 gc_params
.size_pool_init_slots
[i
] = size_pool_init_slots
;
11410 if (size_pool_init_slots
> size_pool
->eden_heap
.total_slots
) {
11411 size_t slots
= size_pool_init_slots
- size_pool
->eden_heap
.total_slots
;
11412 size_pool
->allocatable_pages
= slots_to_pages_for_size_pool(objspace
, size_pool
, slots
);
11415 /* We already have more slots than size_pool_init_slots allows, so
11416 * prevent creating more pages. */
11417 size_pool
->allocatable_pages
= 0;
11420 heap_pages_expand_sorted(objspace
);
11424 * GC tuning environment variables
11426 * * RUBY_GC_HEAP_FREE_SLOTS
11427 * - Prepare at least this amount of slots after GC.
11428 * - Allocate slots if there are not enough slots.
11429 * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
11430 * - Allocate slots by this factor.
11431 * - (next slots number) = (current slots number) * (this factor)
11432 * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
11433 * - Allocation rate is limited to this number of slots.
11434 * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
11435 * - Allocate additional pages when the number of free slots is
11436 * lower than the value (total_slots * (this ratio)).
11437 * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
11438 * - Allocate slots to satisfy this formula:
11439 * free_slots = total_slots * goal_ratio
11440 * - In other words, prepare (total_slots * goal_ratio) free slots.
11441 * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
11442 * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
11443 * - Allow to free pages when the number of free slots is
11444 * greater than the value (total_slots * (this ratio)).
11445 * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
11446 * - Do full GC when the number of old objects is more than R * N
11447 * where R is this factor and
11448 * N is the number of old objects just after last full GC.
11451 * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
11452 * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
11454 * * RUBY_GC_MALLOC_LIMIT
11455 * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
11456 * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
11458 * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
11459 * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
11460 * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
11464 ruby_gc_set_params(void)
11466 rb_objspace_t
*objspace
= &rb_objspace
;
11467 /* RUBY_GC_HEAP_FREE_SLOTS */
11468 if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params
.heap_free_slots
, 0)) {
11472 gc_set_initial_pages(objspace
);
11474 get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params
.growth_factor
, 1.0, 0.0, FALSE
);
11475 get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params
.growth_max_slots
, 0);
11476 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params
.heap_free_slots_min_ratio
,
11478 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params
.heap_free_slots_max_ratio
,
11479 gc_params
.heap_free_slots_min_ratio
, 1.0, FALSE
);
11480 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params
.heap_free_slots_goal_ratio
,
11481 gc_params
.heap_free_slots_min_ratio
, gc_params
.heap_free_slots_max_ratio
, TRUE
);
11482 get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params
.oldobject_limit_factor
, 0.0, 0.0, TRUE
);
11483 get_envparam_double("RUBY_GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO", &gc_params
.uncollectible_wb_unprotected_objects_limit_ratio
, 0.0, 0.0, TRUE
);
11485 if (get_envparam_size("RUBY_GC_MALLOC_LIMIT", &gc_params
.malloc_limit_min
, 0)) {
11486 malloc_limit
= gc_params
.malloc_limit_min
;
11488 get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params
.malloc_limit_max
, 0);
11489 if (!gc_params
.malloc_limit_max
) { /* ignore max-check if 0 */
11490 gc_params
.malloc_limit_max
= SIZE_MAX
;
11492 get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params
.malloc_limit_growth_factor
, 1.0, 0.0, FALSE
);
11494 #if RGENGC_ESTIMATE_OLDMALLOC
11495 if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params
.oldmalloc_limit_min
, 0)) {
11496 objspace
->rgengc
.oldmalloc_increase_limit
= gc_params
.oldmalloc_limit_min
;
11498 get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params
.oldmalloc_limit_max
, 0);
11499 get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params
.oldmalloc_limit_growth_factor
, 1.0, 0.0, FALSE
);
11504 reachable_objects_from_callback(VALUE obj
)
11506 rb_ractor_t
*cr
= GET_RACTOR();
11507 cr
->mfd
->mark_func(obj
, cr
->mfd
->data
);
11511 rb_objspace_reachable_objects_from(VALUE obj
, void (func
)(VALUE
, void *), void *data
)
11513 rb_objspace_t
*objspace
= &rb_objspace
;
11515 RB_VM_LOCK_ENTER();
11517 if (during_gc
) rb_bug("rb_objspace_reachable_objects_from() is not supported while during_gc == true");
11519 if (is_markable_object(obj
)) {
11520 rb_ractor_t
*cr
= GET_RACTOR();
11521 struct gc_mark_func_data_struct mfd
= {
11524 }, *prev_mfd
= cr
->mfd
;
11527 gc_mark_children(objspace
, obj
);
11528 cr
->mfd
= prev_mfd
;
11531 RB_VM_LOCK_LEAVE();
11534 struct root_objects_data
{
11535 const char *category
;
11536 void (*func
)(const char *category
, VALUE
, void *);
11541 root_objects_from(VALUE obj
, void *ptr
)
11543 const struct root_objects_data
*data
= (struct root_objects_data
*)ptr
;
11544 (*data
->func
)(data
->category
, obj
, data
->data
);
11548 rb_objspace_reachable_objects_from_root(void (func
)(const char *category
, VALUE
, void *), void *passing_data
)
11550 rb_objspace_t
*objspace
= &rb_objspace
;
11551 objspace_reachable_objects_from_root(objspace
, func
, passing_data
);
11555 objspace_reachable_objects_from_root(rb_objspace_t
*objspace
, void (func
)(const char *category
, VALUE
, void *), void *passing_data
)
11557 if (during_gc
) rb_bug("objspace_reachable_objects_from_root() is not supported while during_gc == true");
11559 rb_ractor_t
*cr
= GET_RACTOR();
11560 struct root_objects_data data
= {
11562 .data
= passing_data
,
11564 struct gc_mark_func_data_struct mfd
= {
11565 .mark_func
= root_objects_from
,
11567 }, *prev_mfd
= cr
->mfd
;
11570 gc_mark_roots(objspace
, &data
.category
);
11571 cr
->mfd
= prev_mfd
;
11575 ------------------------ Extended allocator ------------------------
11578 struct gc_raise_tag
{
11585 gc_vraise(void *ptr
)
11587 struct gc_raise_tag
*argv
= ptr
;
11588 rb_vraise(argv
->exc
, argv
->fmt
, *argv
->ap
);
11589 UNREACHABLE_RETURN(NULL
);
11593 gc_raise(VALUE exc
, const char *fmt
, ...)
11597 struct gc_raise_tag argv
= {
11601 if (ruby_thread_has_gvl_p()) {
11605 else if (ruby_native_thread_p()) {
11606 rb_thread_call_with_gvl(gc_vraise
, &argv
);
11610 /* Not in a ruby thread */
11611 fprintf(stderr
, "%s", "[FATAL] ");
11612 vfprintf(stderr
, fmt
, ap
);
11619 static void objspace_xfree(rb_objspace_t
*objspace
, void *ptr
, size_t size
);
11622 negative_size_allocation_error(const char *msg
)
11624 gc_raise(rb_eNoMemError
, "%s", msg
);
11628 ruby_memerror_body(void *dummy
)
11634 NORETURN(static void ruby_memerror(void));
11635 RBIMPL_ATTR_MAYBE_UNUSED()
11637 ruby_memerror(void)
11639 if (ruby_thread_has_gvl_p()) {
11643 if (ruby_native_thread_p()) {
11644 rb_thread_call_with_gvl(ruby_memerror_body
, 0);
11647 /* no ruby thread */
11648 fprintf(stderr
, "[FATAL] failed to allocate memory\n");
11651 exit(EXIT_FAILURE
);
11657 rb_execution_context_t
*ec
= GET_EC();
11658 rb_objspace_t
*objspace
= rb_objspace_of(rb_ec_vm_ptr(ec
));
11662 // Print out pid, sleep, so you can attach debugger to see what went wrong:
11663 fprintf(stderr
, "rb_memerror pid=%"PRI_PIDT_PREFIX
"d\n", getpid());
11668 // TODO: OMG!! How to implement it?
11669 gc_exit(objspace
, gc_enter_event_rb_memerror
, NULL
);
11674 rb_ec_raised_p(ec
, RAISED_NOMEMORY
)) {
11675 fprintf(stderr
, "[FATAL] failed to allocate memory\n");
11676 exit(EXIT_FAILURE
);
11678 if (rb_ec_raised_p(ec
, RAISED_NOMEMORY
)) {
11679 rb_ec_raised_clear(ec
);
11682 rb_ec_raised_set(ec
, RAISED_NOMEMORY
);
11683 exc
= ruby_vm_special_exception_copy(exc
);
11686 EC_JUMP_TAG(ec
, TAG_RAISE
);
11690 rb_aligned_free(void *ptr
, size_t size
)
11692 #if defined __MINGW32__
11693 __mingw_aligned_free(ptr
);
11694 #elif defined _WIN32
11695 _aligned_free(ptr
);
11696 #elif defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
11699 free(((void**)ptr
)[-1]);
11703 static inline size_t
11704 objspace_malloc_size(rb_objspace_t
*objspace
, void *ptr
, size_t hint
)
11706 #ifdef HAVE_MALLOC_USABLE_SIZE
11707 return malloc_usable_size(ptr
);
11714 MEMOP_TYPE_MALLOC
= 0,
11720 atomic_sub_nounderflow(size_t *var
, size_t sub
)
11722 if (sub
== 0) return;
11726 if (val
< sub
) sub
= val
;
11727 if (ATOMIC_SIZE_CAS(*var
, val
, val
-sub
) == val
) break;
11732 objspace_malloc_gc_stress(rb_objspace_t
*objspace
)
11734 if (ruby_gc_stressful
&& ruby_native_thread_p()) {
11735 unsigned int reason
= (GPR_FLAG_IMMEDIATE_MARK
| GPR_FLAG_IMMEDIATE_SWEEP
|
11736 GPR_FLAG_STRESS
| GPR_FLAG_MALLOC
);
11738 if (gc_stress_full_mark_after_malloc_p()) {
11739 reason
|= GPR_FLAG_FULL_MARK
;
11741 garbage_collect_with_gvl(objspace
, reason
);
11746 objspace_malloc_increase_report(rb_objspace_t
*objspace
, void *mem
, size_t new_size
, size_t old_size
, enum memop_type type
)
11748 if (0) fprintf(stderr
, "increase - ptr: %p, type: %s, new_size: %"PRIdSIZE
", old_size: %"PRIdSIZE
"\n",
11750 type
== MEMOP_TYPE_MALLOC
? "malloc" :
11751 type
== MEMOP_TYPE_FREE
? "free " :
11752 type
== MEMOP_TYPE_REALLOC
? "realloc": "error",
11753 new_size
, old_size
);
11758 objspace_malloc_increase_body(rb_objspace_t
*objspace
, void *mem
, size_t new_size
, size_t old_size
, enum memop_type type
)
11760 if (new_size
> old_size
) {
11761 ATOMIC_SIZE_ADD(malloc_increase
, new_size
- old_size
);
11762 #if RGENGC_ESTIMATE_OLDMALLOC
11763 ATOMIC_SIZE_ADD(objspace
->rgengc
.oldmalloc_increase
, new_size
- old_size
);
11767 atomic_sub_nounderflow(&malloc_increase
, old_size
- new_size
);
11768 #if RGENGC_ESTIMATE_OLDMALLOC
11769 atomic_sub_nounderflow(&objspace
->rgengc
.oldmalloc_increase
, old_size
- new_size
);
11773 if (type
== MEMOP_TYPE_MALLOC
) {
11775 if (malloc_increase
> malloc_limit
&& ruby_native_thread_p() && !dont_gc_val()) {
11776 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace
)) {
11777 gc_rest(objspace
); /* gc_rest can reduce malloc_increase */
11780 garbage_collect_with_gvl(objspace
, GPR_FLAG_MALLOC
);
11784 #if MALLOC_ALLOCATED_SIZE
11785 if (new_size
>= old_size
) {
11786 ATOMIC_SIZE_ADD(objspace
->malloc_params
.allocated_size
, new_size
- old_size
);
11789 size_t dec_size
= old_size
- new_size
;
11790 size_t allocated_size
= objspace
->malloc_params
.allocated_size
;
11792 #if MALLOC_ALLOCATED_SIZE_CHECK
11793 if (allocated_size
< dec_size
) {
11794 rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
11797 atomic_sub_nounderflow(&objspace
->malloc_params
.allocated_size
, dec_size
);
11801 case MEMOP_TYPE_MALLOC
:
11802 ATOMIC_SIZE_INC(objspace
->malloc_params
.allocations
);
11804 case MEMOP_TYPE_FREE
:
11806 size_t allocations
= objspace
->malloc_params
.allocations
;
11807 if (allocations
> 0) {
11808 atomic_sub_nounderflow(&objspace
->malloc_params
.allocations
, 1);
11810 #if MALLOC_ALLOCATED_SIZE_CHECK
11812 GC_ASSERT(objspace
->malloc_params
.allocations
> 0);
11817 case MEMOP_TYPE_REALLOC
: /* ignore */ break;
11823 #define objspace_malloc_increase(...) \
11824 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
11825 !malloc_increase_done; \
11826 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
11828 struct malloc_obj_info
{ /* 4 words */
11830 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11837 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11838 const char *ruby_malloc_info_file
;
11839 int ruby_malloc_info_line
;
11842 static inline size_t
11843 objspace_malloc_prepare(rb_objspace_t
*objspace
, size_t size
)
11845 if (size
== 0) size
= 1;
11847 #if CALC_EXACT_MALLOC_SIZE
11848 size
+= sizeof(struct malloc_obj_info
);
11855 malloc_during_gc_p(rb_objspace_t
*objspace
)
11857 /* malloc is not allowed during GC when we're not using multiple ractors
11858 * (since ractors can run while another thread is sweeping) and when we
11859 * have the GVL (since if we don't have the GVL, we'll try to acquire the
11860 * GVL which will block and ensure the other thread finishes GC). */
11861 return during_gc
&& !dont_gc_val() && !rb_multi_ractor_p() && ruby_thread_has_gvl_p();
11864 static inline void *
11865 objspace_malloc_fixup(rb_objspace_t
*objspace
, void *mem
, size_t size
)
11867 size
= objspace_malloc_size(objspace
, mem
, size
);
11868 objspace_malloc_increase(objspace
, mem
, size
, 0, MEMOP_TYPE_MALLOC
) {}
11870 #if CALC_EXACT_MALLOC_SIZE
11872 struct malloc_obj_info
*info
= mem
;
11874 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11875 info
->gen
= objspace
->profile
.count
;
11876 info
->file
= ruby_malloc_info_file
;
11877 info
->line
= info
->file
? ruby_malloc_info_line
: 0;
11886 #if defined(__GNUC__) && RUBY_DEBUG
11887 #define RB_BUG_INSTEAD_OF_RB_MEMERROR 1
11890 #ifndef RB_BUG_INSTEAD_OF_RB_MEMERROR
11891 # define RB_BUG_INSTEAD_OF_RB_MEMERROR 0
11894 #define GC_MEMERROR(...) \
11895 ((RB_BUG_INSTEAD_OF_RB_MEMERROR+0) ? rb_bug("" __VA_ARGS__) : rb_memerror())
11897 #define TRY_WITH_GC(siz, expr) do { \
11898 const gc_profile_record_flag gpr = \
11899 GPR_FLAG_FULL_MARK | \
11900 GPR_FLAG_IMMEDIATE_MARK | \
11901 GPR_FLAG_IMMEDIATE_SWEEP | \
11903 objspace_malloc_gc_stress(objspace); \
11905 if (LIKELY((expr))) { \
11906 /* Success on 1st try */ \
11908 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
11909 /* @shyouhei thinks this doesn't happen */ \
11910 GC_MEMERROR("TRY_WITH_GC: could not GC"); \
11912 else if ((expr)) { \
11913 /* Success on 2nd try */ \
11916 GC_MEMERROR("TRY_WITH_GC: could not allocate:" \
11917 "%"PRIdSIZE" bytes for %s", \
11923 check_malloc_not_in_gc(rb_objspace_t
*objspace
, const char *msg
)
11925 if (UNLIKELY(malloc_during_gc_p(objspace
))) {
11928 rb_bug("Cannot %s during GC", msg
);
11932 /* these shouldn't be called directly.
11933 * objspace_* functions do not check allocation size.
11936 objspace_xmalloc0(rb_objspace_t
*objspace
, size_t size
)
11938 check_malloc_not_in_gc(objspace
, "malloc");
11942 size
= objspace_malloc_prepare(objspace
, size
);
11943 TRY_WITH_GC(size
, mem
= malloc(size
));
11944 RB_DEBUG_COUNTER_INC(heap_xmalloc
);
11945 return objspace_malloc_fixup(objspace
, mem
, size
);
11948 static inline size_t
11949 xmalloc2_size(const size_t count
, const size_t elsize
)
11951 return size_mul_or_raise(count
, elsize
, rb_eArgError
);
11955 objspace_xrealloc(rb_objspace_t
*objspace
, void *ptr
, size_t new_size
, size_t old_size
)
11957 check_malloc_not_in_gc(objspace
, "realloc");
11961 if (!ptr
) return objspace_xmalloc0(objspace
, new_size
);
11964 * The behavior of realloc(ptr, 0) is implementation defined.
11965 * Therefore we don't use realloc(ptr, 0) for portability reason.
11966 * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
11968 if (new_size
== 0) {
11969 if ((mem
= objspace_xmalloc0(objspace
, 0)) != NULL
) {
11971 * - OpenBSD's malloc(3) man page says that when 0 is passed, it
11972 * returns a non-NULL pointer to an access-protected memory page.
11973 * The returned pointer cannot be read / written at all, but
11974 * still be a valid argument of free().
11976 * https://man.openbsd.org/malloc.3
11978 * - Linux's malloc(3) man page says that it _might_ perhaps return
11979 * a non-NULL pointer when its argument is 0. That return value
11980 * is safe (and is expected) to be passed to free().
11982 * https://man7.org/linux/man-pages/man3/malloc.3.html
11984 * - As I read the implementation jemalloc's malloc() returns fully
11985 * normal 16 bytes memory region when its argument is 0.
11987 * - As I read the implementation musl libc's malloc() returns
11988 * fully normal 32 bytes memory region when its argument is 0.
11990 * - Other malloc implementations can also return non-NULL.
11992 objspace_xfree(objspace
, ptr
, old_size
);
11997 * It is dangerous to return NULL here, because that could lead to
11998 * RCE. Fallback to 1 byte instead of zero.
12000 * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
12006 #if CALC_EXACT_MALLOC_SIZE
12008 struct malloc_obj_info
*info
= (struct malloc_obj_info
*)ptr
- 1;
12009 new_size
+= sizeof(struct malloc_obj_info
);
12011 old_size
= info
->size
;
12015 old_size
= objspace_malloc_size(objspace
, ptr
, old_size
);
12016 TRY_WITH_GC(new_size
, mem
= RB_GNUC_EXTENSION_BLOCK(realloc(ptr
, new_size
)));
12017 new_size
= objspace_malloc_size(objspace
, mem
, new_size
);
12019 #if CALC_EXACT_MALLOC_SIZE
12021 struct malloc_obj_info
*info
= mem
;
12022 info
->size
= new_size
;
12027 objspace_malloc_increase(objspace
, mem
, new_size
, old_size
, MEMOP_TYPE_REALLOC
);
12029 RB_DEBUG_COUNTER_INC(heap_xrealloc
);
12033 #if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
12035 #define MALLOC_INFO_GEN_SIZE 100
12036 #define MALLOC_INFO_SIZE_SIZE 10
12037 static size_t malloc_info_gen_cnt
[MALLOC_INFO_GEN_SIZE
];
12038 static size_t malloc_info_gen_size
[MALLOC_INFO_GEN_SIZE
];
12039 static size_t malloc_info_size
[MALLOC_INFO_SIZE_SIZE
+1];
12040 static st_table
*malloc_info_file_table
;
12043 mmalloc_info_file_i(st_data_t key
, st_data_t val
, st_data_t dmy
)
12045 const char *file
= (void *)key
;
12046 const size_t *data
= (void *)val
;
12048 fprintf(stderr
, "%s\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", file
, data
[0], data
[1]);
12050 return ST_CONTINUE
;
12053 __attribute__((destructor
))
12055 rb_malloc_info_show_results(void)
12059 fprintf(stderr
, "* malloc_info gen statistics\n");
12060 for (i
=0; i
<MALLOC_INFO_GEN_SIZE
; i
++) {
12061 if (i
== MALLOC_INFO_GEN_SIZE
-1) {
12062 fprintf(stderr
, "more\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", malloc_info_gen_cnt
[i
], malloc_info_gen_size
[i
]);
12065 fprintf(stderr
, "%d\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", i
, malloc_info_gen_cnt
[i
], malloc_info_gen_size
[i
]);
12069 fprintf(stderr
, "* malloc_info size statistics\n");
12070 for (i
=0; i
<MALLOC_INFO_SIZE_SIZE
; i
++) {
12072 fprintf(stderr
, "%d\t%"PRIdSIZE
"\n", s
, malloc_info_size
[i
]);
12074 fprintf(stderr
, "more\t%"PRIdSIZE
"\n", malloc_info_size
[i
]);
12076 if (malloc_info_file_table
) {
12077 fprintf(stderr
, "* malloc_info file statistics\n");
12078 st_foreach(malloc_info_file_table
, mmalloc_info_file_i
, 0);
12083 rb_malloc_info_show_results(void)
12089 objspace_xfree(rb_objspace_t
*objspace
, void *ptr
, size_t old_size
)
12093 * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
12094 * its first version. We would better follow.
12098 #if CALC_EXACT_MALLOC_SIZE
12099 struct malloc_obj_info
*info
= (struct malloc_obj_info
*)ptr
- 1;
12101 old_size
= info
->size
;
12103 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
12105 int gen
= (int)(objspace
->profile
.count
- info
->gen
);
12106 int gen_index
= gen
>= MALLOC_INFO_GEN_SIZE
? MALLOC_INFO_GEN_SIZE
-1 : gen
;
12109 malloc_info_gen_cnt
[gen_index
]++;
12110 malloc_info_gen_size
[gen_index
] += info
->size
;
12112 for (i
=0; i
<MALLOC_INFO_SIZE_SIZE
; i
++) {
12113 size_t s
= 16 << i
;
12114 if (info
->size
<= s
) {
12115 malloc_info_size
[i
]++;
12119 malloc_info_size
[i
]++;
12123 st_data_t key
= (st_data_t
)info
->file
, d
;
12126 if (malloc_info_file_table
== NULL
) {
12127 malloc_info_file_table
= st_init_numtable_with_size(1024);
12129 if (st_lookup(malloc_info_file_table
, key
, &d
)) {
12131 data
= (size_t *)d
;
12134 data
= malloc(xmalloc2_size(2, sizeof(size_t)));
12135 if (data
== NULL
) rb_bug("objspace_xfree: can not allocate memory");
12136 data
[0] = data
[1] = 0;
12137 st_insert(malloc_info_file_table
, key
, (st_data_t
)data
);
12140 data
[1] += info
->size
;
12142 if (0 && gen
>= 2) { /* verbose output */
12144 fprintf(stderr
, "free - size:%"PRIdSIZE
", gen:%d, pos: %s:%"PRIdSIZE
"\n",
12145 info
->size
, gen
, info
->file
, info
->line
);
12148 fprintf(stderr
, "free - size:%"PRIdSIZE
", gen:%d\n",
12155 old_size
= objspace_malloc_size(objspace
, ptr
, old_size
);
12157 objspace_malloc_increase(objspace
, ptr
, 0, old_size
, MEMOP_TYPE_FREE
) {
12160 RB_DEBUG_COUNTER_INC(heap_xfree
);
12165 ruby_xmalloc0(size_t size
)
12167 return objspace_xmalloc0(&rb_objspace
, size
);
12171 ruby_xmalloc_body(size_t size
)
12173 if ((ssize_t
)size
< 0) {
12174 negative_size_allocation_error("too large allocation size");
12176 return ruby_xmalloc0(size
);
12180 ruby_malloc_size_overflow(size_t count
, size_t elsize
)
12182 rb_raise(rb_eArgError
,
12183 "malloc: possible integer overflow (%"PRIuSIZE
"*%"PRIuSIZE
")",
12188 ruby_xmalloc2_body(size_t n
, size_t size
)
12190 return objspace_xmalloc0(&rb_objspace
, xmalloc2_size(n
, size
));
12194 objspace_xcalloc(rb_objspace_t
*objspace
, size_t size
)
12196 if (UNLIKELY(malloc_during_gc_p(objspace
))) {
12197 rb_warn("calloc during GC detected, this could cause crashes if it triggers another GC");
12198 #if RGENGC_CHECK_MODE || RUBY_DEBUG
12199 rb_bug("Cannot calloc during GC");
12205 size
= objspace_malloc_prepare(objspace
, size
);
12206 TRY_WITH_GC(size
, mem
= calloc1(size
));
12207 return objspace_malloc_fixup(objspace
, mem
, size
);
12211 ruby_xcalloc_body(size_t n
, size_t size
)
12213 return objspace_xcalloc(&rb_objspace
, xmalloc2_size(n
, size
));
12216 #ifdef ruby_sized_xrealloc
12217 #undef ruby_sized_xrealloc
12220 ruby_sized_xrealloc(void *ptr
, size_t new_size
, size_t old_size
)
12222 if ((ssize_t
)new_size
< 0) {
12223 negative_size_allocation_error("too large allocation size");
12226 return objspace_xrealloc(&rb_objspace
, ptr
, new_size
, old_size
);
12230 ruby_xrealloc_body(void *ptr
, size_t new_size
)
12232 return ruby_sized_xrealloc(ptr
, new_size
, 0);
12235 #ifdef ruby_sized_xrealloc2
12236 #undef ruby_sized_xrealloc2
12239 ruby_sized_xrealloc2(void *ptr
, size_t n
, size_t size
, size_t old_n
)
12241 size_t len
= xmalloc2_size(n
, size
);
12242 return objspace_xrealloc(&rb_objspace
, ptr
, len
, old_n
* size
);
12246 ruby_xrealloc2_body(void *ptr
, size_t n
, size_t size
)
12248 return ruby_sized_xrealloc2(ptr
, n
, size
, 0);
12251 #ifdef ruby_sized_xfree
12252 #undef ruby_sized_xfree
12255 ruby_sized_xfree(void *x
, size_t size
)
12258 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
12259 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
12261 if (LIKELY(GET_VM())) {
12262 objspace_xfree(&rb_objspace
, x
, size
);
12271 ruby_xfree(void *x
)
12273 ruby_sized_xfree(x
, 0);
12277 rb_xmalloc_mul_add(size_t x
, size_t y
, size_t z
) /* x * y + z */
12279 size_t w
= size_mul_add_or_raise(x
, y
, z
, rb_eArgError
);
12280 return ruby_xmalloc(w
);
12284 rb_xcalloc_mul_add(size_t x
, size_t y
, size_t z
) /* x * y + z */
12286 size_t w
= size_mul_add_or_raise(x
, y
, z
, rb_eArgError
);
12287 return ruby_xcalloc(w
, 1);
12291 rb_xrealloc_mul_add(const void *p
, size_t x
, size_t y
, size_t z
) /* x * y + z */
12293 size_t w
= size_mul_add_or_raise(x
, y
, z
, rb_eArgError
);
12294 return ruby_xrealloc((void *)p
, w
);
12298 rb_xmalloc_mul_add_mul(size_t x
, size_t y
, size_t z
, size_t w
) /* x * y + z * w */
12300 size_t u
= size_mul_add_mul_or_raise(x
, y
, z
, w
, rb_eArgError
);
12301 return ruby_xmalloc(u
);
12305 rb_xcalloc_mul_add_mul(size_t x
, size_t y
, size_t z
, size_t w
) /* x * y + z * w */
12307 size_t u
= size_mul_add_mul_or_raise(x
, y
, z
, w
, rb_eArgError
);
12308 return ruby_xcalloc(u
, 1);
12311 /* Mimic ruby_xmalloc, but need not rb_objspace.
12312 * should return pointer suitable for ruby_xfree
12315 ruby_mimmalloc(size_t size
)
12318 #if CALC_EXACT_MALLOC_SIZE
12319 size
+= sizeof(struct malloc_obj_info
);
12321 mem
= malloc(size
);
12322 #if CALC_EXACT_MALLOC_SIZE
12327 /* set 0 for consistency of allocated_size/allocations */
12329 struct malloc_obj_info
*info
= mem
;
12331 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
12343 ruby_mimcalloc(size_t num
, size_t size
)
12346 #if CALC_EXACT_MALLOC_SIZE
12347 struct rbimpl_size_mul_overflow_tag t
= rbimpl_size_mul_overflow(num
, size
);
12348 if (UNLIKELY(t
.left
)) {
12351 size
= t
.right
+ sizeof(struct malloc_obj_info
);
12352 mem
= calloc1(size
);
12357 /* set 0 for consistency of allocated_size/allocations */
12359 struct malloc_obj_info
*info
= mem
;
12361 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
12369 mem
= calloc(num
, size
);
12375 ruby_mimfree(void *ptr
)
12377 #if CALC_EXACT_MALLOC_SIZE
12378 struct malloc_obj_info
*info
= (struct malloc_obj_info
*)ptr
- 1;
12384 #if MALLOC_ALLOCATED_SIZE
12387 * GC.malloc_allocated_size -> Integer
12389 * Returns the size of memory allocated by malloc().
12391 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
12395 gc_malloc_allocated_size(VALUE self
)
12397 return UINT2NUM(rb_objspace
.malloc_params
.allocated_size
);
12402 * GC.malloc_allocations -> Integer
12404 * Returns the number of malloc() allocations.
12406 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
12410 gc_malloc_allocations(VALUE self
)
12412 return UINT2NUM(rb_objspace
.malloc_params
.allocations
);
12417 rb_gc_adjust_memory_usage(ssize_t diff
)
12419 unless_objspace(objspace
) { return; }
12422 objspace_malloc_increase(objspace
, 0, diff
, 0, MEMOP_TYPE_REALLOC
);
12424 else if (diff
< 0) {
12425 objspace_malloc_increase(objspace
, 0, 0, -diff
, MEMOP_TYPE_REALLOC
);
12430 ------------------------------ GC profiler ------------------------------
12433 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
12436 current_process_time(struct timespec
*ts
)
12438 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
12440 static int try_clock_gettime
= 1;
12441 if (try_clock_gettime
&& clock_gettime(CLOCK_PROCESS_CPUTIME_ID
, ts
) == 0) {
12445 try_clock_gettime
= 0;
12452 struct rusage usage
;
12453 struct timeval time
;
12454 if (getrusage(RUSAGE_SELF
, &usage
) == 0) {
12455 time
= usage
.ru_utime
;
12456 ts
->tv_sec
= time
.tv_sec
;
12457 ts
->tv_nsec
= (int32_t)time
.tv_usec
* 1000;
12465 FILETIME creation_time
, exit_time
, kernel_time
, user_time
;
12468 if (GetProcessTimes(GetCurrentProcess(),
12469 &creation_time
, &exit_time
, &kernel_time
, &user_time
) != 0) {
12470 memcpy(&ui
, &user_time
, sizeof(FILETIME
));
12471 #define PER100NSEC (uint64_t)(1000 * 1000 * 10)
12472 ts
->tv_nsec
= (long)(ui
.QuadPart
% PER100NSEC
);
12473 ts
->tv_sec
= (time_t)(ui
.QuadPart
/ PER100NSEC
);
12483 getrusage_time(void)
12485 struct timespec ts
;
12486 if (current_process_time(&ts
)) {
12487 return ts
.tv_sec
+ ts
.tv_nsec
* 1e-9;
12496 gc_prof_setup_new_record(rb_objspace_t
*objspace
, unsigned int reason
)
12498 if (objspace
->profile
.run
) {
12499 size_t index
= objspace
->profile
.next_index
;
12500 gc_profile_record
*record
;
12502 /* create new record */
12503 objspace
->profile
.next_index
++;
12505 if (!objspace
->profile
.records
) {
12506 objspace
->profile
.size
= GC_PROFILE_RECORD_DEFAULT_SIZE
;
12507 objspace
->profile
.records
= malloc(xmalloc2_size(sizeof(gc_profile_record
), objspace
->profile
.size
));
12509 if (index
>= objspace
->profile
.size
) {
12511 objspace
->profile
.size
+= 1000;
12512 ptr
= realloc(objspace
->profile
.records
, xmalloc2_size(sizeof(gc_profile_record
), objspace
->profile
.size
));
12513 if (!ptr
) rb_memerror();
12514 objspace
->profile
.records
= ptr
;
12516 if (!objspace
->profile
.records
) {
12517 rb_bug("gc_profile malloc or realloc miss");
12519 record
= objspace
->profile
.current_record
= &objspace
->profile
.records
[objspace
->profile
.next_index
- 1];
12520 MEMZERO(record
, gc_profile_record
, 1);
12522 /* setup before-GC parameter */
12523 record
->flags
= reason
| (ruby_gc_stressful
? GPR_FLAG_STRESS
: 0);
12524 #if MALLOC_ALLOCATED_SIZE
12525 record
->allocated_size
= malloc_allocated_size
;
12527 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
12530 struct rusage usage
;
12531 if (getrusage(RUSAGE_SELF
, &usage
) == 0) {
12532 record
->maxrss
= usage
.ru_maxrss
;
12533 record
->minflt
= usage
.ru_minflt
;
12534 record
->majflt
= usage
.ru_majflt
;
12543 gc_prof_timer_start(rb_objspace_t
*objspace
)
12545 if (gc_prof_enabled(objspace
)) {
12546 gc_profile_record
*record
= gc_prof_record(objspace
);
12547 #if GC_PROFILE_MORE_DETAIL
12548 record
->prepare_time
= objspace
->profile
.prepare_time
;
12550 record
->gc_time
= 0;
12551 record
->gc_invoke_time
= getrusage_time();
12556 elapsed_time_from(double time
)
12558 double now
= getrusage_time();
12568 gc_prof_timer_stop(rb_objspace_t
*objspace
)
12570 if (gc_prof_enabled(objspace
)) {
12571 gc_profile_record
*record
= gc_prof_record(objspace
);
12572 record
->gc_time
= elapsed_time_from(record
->gc_invoke_time
);
12573 record
->gc_invoke_time
-= objspace
->profile
.invoke_time
;
12577 #define RUBY_DTRACE_GC_HOOK(name) \
12578 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
12580 gc_prof_mark_timer_start(rb_objspace_t
*objspace
)
12582 RUBY_DTRACE_GC_HOOK(MARK_BEGIN
);
12583 #if GC_PROFILE_MORE_DETAIL
12584 if (gc_prof_enabled(objspace
)) {
12585 gc_prof_record(objspace
)->gc_mark_time
= getrusage_time();
12591 gc_prof_mark_timer_stop(rb_objspace_t
*objspace
)
12593 RUBY_DTRACE_GC_HOOK(MARK_END
);
12594 #if GC_PROFILE_MORE_DETAIL
12595 if (gc_prof_enabled(objspace
)) {
12596 gc_profile_record
*record
= gc_prof_record(objspace
);
12597 record
->gc_mark_time
= elapsed_time_from(record
->gc_mark_time
);
12603 gc_prof_sweep_timer_start(rb_objspace_t
*objspace
)
12605 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN
);
12606 if (gc_prof_enabled(objspace
)) {
12607 gc_profile_record
*record
= gc_prof_record(objspace
);
12609 if (record
->gc_time
> 0 || GC_PROFILE_MORE_DETAIL
) {
12610 objspace
->profile
.gc_sweep_start_time
= getrusage_time();
12616 gc_prof_sweep_timer_stop(rb_objspace_t
*objspace
)
12618 RUBY_DTRACE_GC_HOOK(SWEEP_END
);
12620 if (gc_prof_enabled(objspace
)) {
12622 gc_profile_record
*record
= gc_prof_record(objspace
);
12624 if (record
->gc_time
> 0) {
12625 sweep_time
= elapsed_time_from(objspace
->profile
.gc_sweep_start_time
);
12626 /* need to accumulate GC time for lazy sweep after gc() */
12627 record
->gc_time
+= sweep_time
;
12629 else if (GC_PROFILE_MORE_DETAIL
) {
12630 sweep_time
= elapsed_time_from(objspace
->profile
.gc_sweep_start_time
);
12633 #if GC_PROFILE_MORE_DETAIL
12634 record
->gc_sweep_time
+= sweep_time
;
12635 if (heap_pages_deferred_final
) record
->flags
|= GPR_FLAG_HAVE_FINALIZE
;
12637 if (heap_pages_deferred_final
) objspace
->profile
.latest_gc_info
|= GPR_FLAG_HAVE_FINALIZE
;
12642 gc_prof_set_malloc_info(rb_objspace_t
*objspace
)
12644 #if GC_PROFILE_MORE_DETAIL
12645 if (gc_prof_enabled(objspace
)) {
12646 gc_profile_record
*record
= gc_prof_record(objspace
);
12647 record
->allocate_increase
= malloc_increase
;
12648 record
->allocate_limit
= malloc_limit
;
12654 gc_prof_set_heap_info(rb_objspace_t
*objspace
)
12656 if (gc_prof_enabled(objspace
)) {
12657 gc_profile_record
*record
= gc_prof_record(objspace
);
12658 size_t live
= objspace
->profile
.total_allocated_objects_at_gc_start
- total_freed_objects(objspace
);
12659 size_t total
= objspace
->profile
.heap_used_at_gc_start
* HEAP_PAGE_OBJ_LIMIT
;
12661 #if GC_PROFILE_MORE_DETAIL
12662 record
->heap_use_pages
= objspace
->profile
.heap_used_at_gc_start
;
12663 record
->heap_live_objects
= live
;
12664 record
->heap_free_objects
= total
- live
;
12667 record
->heap_total_objects
= total
;
12668 record
->heap_use_size
= live
* sizeof(RVALUE
);
12669 record
->heap_total_size
= total
* sizeof(RVALUE
);
12675 * GC::Profiler.clear -> nil
12677 * Clears the \GC profiler data.
12682 gc_profile_clear(VALUE _
)
12684 rb_objspace_t
*objspace
= &rb_objspace
;
12685 void *p
= objspace
->profile
.records
;
12686 objspace
->profile
.records
= NULL
;
12687 objspace
->profile
.size
= 0;
12688 objspace
->profile
.next_index
= 0;
12689 objspace
->profile
.current_record
= 0;
12696 * GC::Profiler.raw_data -> [Hash, ...]
12698 * Returns an Array of individual raw profile data Hashes ordered
12699 * from earliest to latest by +:GC_INVOKE_TIME+.
12705 * :GC_TIME=>1.3000000000000858e-05,
12706 * :GC_INVOKE_TIME=>0.010634999999999999,
12707 * :HEAP_USE_SIZE=>289640,
12708 * :HEAP_TOTAL_SIZE=>588960,
12709 * :HEAP_TOTAL_OBJECTS=>14724,
12710 * :GC_IS_MARKED=>false
12718 * Time elapsed in seconds for this GC run
12719 * +:GC_INVOKE_TIME+::
12720 * Time elapsed in seconds from startup to when the GC was invoked
12721 * +:HEAP_USE_SIZE+::
12722 * Total bytes of heap used
12723 * +:HEAP_TOTAL_SIZE+::
12724 * Total size of heap in bytes
12725 * +:HEAP_TOTAL_OBJECTS+::
12726 * Total number of objects
12727 * +:GC_IS_MARKED+::
12728 * Returns +true+ if the GC is in mark phase
12730 * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
12731 * to the following hash keys:
12733 * +:GC_MARK_TIME+::
12734 * +:GC_SWEEP_TIME+::
12735 * +:ALLOCATE_INCREASE+::
12736 * +:ALLOCATE_LIMIT+::
12737 * +:HEAP_USE_PAGES+::
12738 * +:HEAP_LIVE_OBJECTS+::
12739 * +:HEAP_FREE_OBJECTS+::
12740 * +:HAVE_FINALIZE+::
12745 gc_profile_record_get(VALUE _
)
12748 VALUE gc_profile
= rb_ary_new();
12750 rb_objspace_t
*objspace
= (&rb_objspace
);
12752 if (!objspace
->profile
.run
) {
12756 for (i
=0; i
< objspace
->profile
.next_index
; i
++) {
12757 gc_profile_record
*record
= &objspace
->profile
.records
[i
];
12759 prof
= rb_hash_new();
12760 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(objspace
, rb_hash_new(), record
->flags
));
12761 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record
->gc_time
));
12762 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record
->gc_invoke_time
));
12763 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record
->heap_use_size
));
12764 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record
->heap_total_size
));
12765 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record
->heap_total_objects
));
12766 rb_hash_aset(prof
, ID2SYM(rb_intern("MOVED_OBJECTS")), SIZET2NUM(record
->moved_objects
));
12767 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue
);
12768 #if GC_PROFILE_MORE_DETAIL
12769 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record
->gc_mark_time
));
12770 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record
->gc_sweep_time
));
12771 rb_hash_aset(prof
, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record
->allocate_increase
));
12772 rb_hash_aset(prof
, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record
->allocate_limit
));
12773 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record
->heap_use_pages
));
12774 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record
->heap_live_objects
));
12775 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record
->heap_free_objects
));
12777 rb_hash_aset(prof
, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record
->removing_objects
));
12778 rb_hash_aset(prof
, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record
->empty_objects
));
12780 rb_hash_aset(prof
, ID2SYM(rb_intern("HAVE_FINALIZE")), RBOOL(record
->flags
& GPR_FLAG_HAVE_FINALIZE
));
12783 #if RGENGC_PROFILE > 0
12784 rb_hash_aset(prof
, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record
->old_objects
));
12785 rb_hash_aset(prof
, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record
->remembered_normal_objects
));
12786 rb_hash_aset(prof
, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record
->remembered_shady_objects
));
12788 rb_ary_push(gc_profile
, prof
);
12794 #if GC_PROFILE_MORE_DETAIL
12795 #define MAJOR_REASON_MAX 0x10
12798 gc_profile_dump_major_reason(unsigned int flags
, char *buff
)
12800 unsigned int reason
= flags
& GPR_FLAG_MAJOR_MASK
;
12803 if (reason
== GPR_FLAG_NONE
) {
12809 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
12810 buff[i++] = #x[0]; \
12811 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
12817 #if RGENGC_ESTIMATE_OLDMALLOC
12827 gc_profile_dump_on(VALUE out
, VALUE (*append
)(VALUE
, VALUE
))
12829 rb_objspace_t
*objspace
= &rb_objspace
;
12830 size_t count
= objspace
->profile
.next_index
;
12831 #ifdef MAJOR_REASON_MAX
12832 char reason_str
[MAJOR_REASON_MAX
];
12835 if (objspace
->profile
.run
&& count
/* > 1 */) {
12837 const gc_profile_record
*record
;
12839 append(out
, rb_sprintf("GC %"PRIuSIZE
" invokes.\n", objspace
->profile
.count
));
12840 append(out
, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
12842 for (i
= 0; i
< count
; i
++) {
12843 record
= &objspace
->profile
.records
[i
];
12844 append(out
, rb_sprintf("%5"PRIuSIZE
" %19.3f %20"PRIuSIZE
" %20"PRIuSIZE
" %20"PRIuSIZE
" %30.20f\n",
12845 i
+1, record
->gc_invoke_time
, record
->heap_use_size
,
12846 record
->heap_total_size
, record
->heap_total_objects
, record
->gc_time
*1000));
12849 #if GC_PROFILE_MORE_DETAIL
12850 const char *str
= "\n\n" \
12852 "Prepare Time = Previously GC's rest sweep time\n"
12853 "Index Flags Allocate Inc. Allocate Limit"
12854 #if CALC_EXACT_MALLOC_SIZE
12857 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
12859 " OldgenObj RemNormObj RemShadObj"
12861 #if GC_PROFILE_DETAIL_MEMORY
12862 " MaxRSS(KB) MinorFLT MajorFLT"
12865 append(out
, rb_str_new_cstr(str
));
12867 for (i
= 0; i
< count
; i
++) {
12868 record
= &objspace
->profile
.records
[i
];
12869 append(out
, rb_sprintf("%5"PRIuSIZE
" %4s/%c/%6s%c %13"PRIuSIZE
" %15"PRIuSIZE
12870 #if CALC_EXACT_MALLOC_SIZE
12873 " %9"PRIuSIZE
" %17.12f %17.12f %17.12f %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
12875 "%10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
12877 #if GC_PROFILE_DETAIL_MEMORY
12883 gc_profile_dump_major_reason(record
->flags
, reason_str
),
12884 (record
->flags
& GPR_FLAG_HAVE_FINALIZE
) ? 'F' : '.',
12885 (record
->flags
& GPR_FLAG_NEWOBJ
) ? "NEWOBJ" :
12886 (record
->flags
& GPR_FLAG_MALLOC
) ? "MALLOC" :
12887 (record
->flags
& GPR_FLAG_METHOD
) ? "METHOD" :
12888 (record
->flags
& GPR_FLAG_CAPI
) ? "CAPI__" : "??????",
12889 (record
->flags
& GPR_FLAG_STRESS
) ? '!' : ' ',
12890 record
->allocate_increase
, record
->allocate_limit
,
12891 #if CALC_EXACT_MALLOC_SIZE
12892 record
->allocated_size
,
12894 record
->heap_use_pages
,
12895 record
->gc_mark_time
*1000,
12896 record
->gc_sweep_time
*1000,
12897 record
->prepare_time
*1000,
12899 record
->heap_live_objects
,
12900 record
->heap_free_objects
,
12901 record
->removing_objects
,
12902 record
->empty_objects
12905 record
->old_objects
,
12906 record
->remembered_normal_objects
,
12907 record
->remembered_shady_objects
12909 #if GC_PROFILE_DETAIL_MEMORY
12911 record
->maxrss
/ 1024,
12924 * GC::Profiler.result -> String
12926 * Returns a profile data report such as:
12929 * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
12930 * 1 0.012 159240 212940 10647 0.00000000000001530000
12934 gc_profile_result(VALUE _
)
12936 VALUE str
= rb_str_buf_new(0);
12937 gc_profile_dump_on(str
, rb_str_buf_append
);
12943 * GC::Profiler.report
12944 * GC::Profiler.report(io)
12946 * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
12951 gc_profile_report(int argc
, VALUE
*argv
, VALUE self
)
12955 out
= (!rb_check_arity(argc
, 0, 1) ? rb_stdout
: argv
[0]);
12956 gc_profile_dump_on(out
, rb_io_write
);
12963 * GC::Profiler.total_time -> float
12965 * The total time used for garbage collection in seconds
12969 gc_profile_total_time(VALUE self
)
12972 rb_objspace_t
*objspace
= &rb_objspace
;
12974 if (objspace
->profile
.run
&& objspace
->profile
.next_index
> 0) {
12976 size_t count
= objspace
->profile
.next_index
;
12978 for (i
= 0; i
< count
; i
++) {
12979 time
+= objspace
->profile
.records
[i
].gc_time
;
12982 return DBL2NUM(time
);
12987 * GC::Profiler.enabled? -> true or false
12989 * The current status of \GC profile mode.
12993 gc_profile_enable_get(VALUE self
)
12995 rb_objspace_t
*objspace
= &rb_objspace
;
12996 return RBOOL(objspace
->profile
.run
);
13001 * GC::Profiler.enable -> nil
13003 * Starts the \GC profiler.
13008 gc_profile_enable(VALUE _
)
13010 rb_objspace_t
*objspace
= &rb_objspace
;
13011 objspace
->profile
.run
= TRUE
;
13012 objspace
->profile
.current_record
= 0;
13018 * GC::Profiler.disable -> nil
13020 * Stops the \GC profiler.
13025 gc_profile_disable(VALUE _
)
13027 rb_objspace_t
*objspace
= &rb_objspace
;
13029 objspace
->profile
.run
= FALSE
;
13030 objspace
->profile
.current_record
= 0;
13035 ------------------------------ DEBUG ------------------------------
13038 static const char *
13039 type_name(int type
, VALUE obj
)
13042 #define TYPE_NAME(t) case (t): return #t;
13044 TYPE_NAME(T_OBJECT
);
13045 TYPE_NAME(T_CLASS
);
13046 TYPE_NAME(T_MODULE
);
13047 TYPE_NAME(T_FLOAT
);
13048 TYPE_NAME(T_STRING
);
13049 TYPE_NAME(T_REGEXP
);
13050 TYPE_NAME(T_ARRAY
);
13052 TYPE_NAME(T_STRUCT
);
13053 TYPE_NAME(T_BIGNUM
);
13055 TYPE_NAME(T_MATCH
);
13056 TYPE_NAME(T_COMPLEX
);
13057 TYPE_NAME(T_RATIONAL
);
13060 TYPE_NAME(T_FALSE
);
13061 TYPE_NAME(T_SYMBOL
);
13062 TYPE_NAME(T_FIXNUM
);
13063 TYPE_NAME(T_UNDEF
);
13064 TYPE_NAME(T_IMEMO
);
13065 TYPE_NAME(T_ICLASS
);
13066 TYPE_NAME(T_MOVED
);
13067 TYPE_NAME(T_ZOMBIE
);
13069 if (obj
&& rb_objspace_data_type_name(obj
)) {
13070 return rb_objspace_data_type_name(obj
);
13078 static const char *
13079 obj_type_name(VALUE obj
)
13081 return type_name(TYPE(obj
), obj
);
13085 rb_method_type_name(rb_method_type_t type
)
13088 case VM_METHOD_TYPE_ISEQ
: return "iseq";
13089 case VM_METHOD_TYPE_ATTRSET
: return "attrest";
13090 case VM_METHOD_TYPE_IVAR
: return "ivar";
13091 case VM_METHOD_TYPE_BMETHOD
: return "bmethod";
13092 case VM_METHOD_TYPE_ALIAS
: return "alias";
13093 case VM_METHOD_TYPE_REFINED
: return "refined";
13094 case VM_METHOD_TYPE_CFUNC
: return "cfunc";
13095 case VM_METHOD_TYPE_ZSUPER
: return "zsuper";
13096 case VM_METHOD_TYPE_MISSING
: return "missing";
13097 case VM_METHOD_TYPE_OPTIMIZED
: return "optimized";
13098 case VM_METHOD_TYPE_UNDEF
: return "undef";
13099 case VM_METHOD_TYPE_NOTIMPLEMENTED
: return "notimplemented";
13101 rb_bug("rb_method_type_name: unreachable (type: %d)", type
);
13105 rb_raw_iseq_info(char *const buff
, const size_t buff_size
, const rb_iseq_t
*iseq
)
13107 if (buff_size
> 0 && ISEQ_BODY(iseq
) && ISEQ_BODY(iseq
)->location
.label
&& !RB_TYPE_P(ISEQ_BODY(iseq
)->location
.pathobj
, T_MOVED
)) {
13108 VALUE path
= rb_iseq_path(iseq
);
13109 int n
= ISEQ_BODY(iseq
)->location
.first_lineno
;
13110 snprintf(buff
, buff_size
, " %s@%s:%d",
13111 RSTRING_PTR(ISEQ_BODY(iseq
)->location
.label
),
13112 RSTRING_PTR(path
), n
);
13117 str_len_no_raise(VALUE str
)
13119 long len
= RSTRING_LEN(str
);
13120 if (len
< 0) return 0;
13121 if (len
> INT_MAX
) return INT_MAX
;
13125 #define BUFF_ARGS buff + pos, buff_size - pos
13126 #define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
13127 #define APPEND_S(s) do { \
13128 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
13132 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
13135 #define C(c, s) ((c) != 0 ? (s) : " ")
13138 rb_raw_obj_info_common(char *const buff
, const size_t buff_size
, const VALUE obj
)
13142 if (SPECIAL_CONST_P(obj
)) {
13143 APPEND_F("%s", obj_type_name(obj
));
13145 if (FIXNUM_P(obj
)) {
13146 APPEND_F(" %ld", FIX2LONG(obj
));
13148 else if (SYMBOL_P(obj
)) {
13149 APPEND_F(" %s", rb_id2name(SYM2ID(obj
)));
13153 const int age
= RVALUE_AGE_GET(obj
);
13155 if (is_pointer_to_heap(&rb_objspace
, (void *)obj
)) {
13156 APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
13158 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj
), "L"),
13159 C(RVALUE_MARK_BITMAP(obj
), "M"),
13160 C(RVALUE_PIN_BITMAP(obj
), "P"),
13161 C(RVALUE_MARKING_BITMAP(obj
), "R"),
13162 C(RVALUE_WB_UNPROTECTED_BITMAP(obj
), "U"),
13163 C(rb_objspace_garbage_object_p(obj
), "G"),
13164 obj_type_name(obj
));
13168 APPEND_F("%p [%dXXXX] %s",
13170 obj_type_name(obj
));
13173 if (internal_object_p(obj
)) {
13176 else if (RBASIC(obj
)->klass
== 0) {
13177 APPEND_S("(temporary internal)");
13179 else if (RTEST(RBASIC(obj
)->klass
)) {
13180 VALUE class_path
= rb_class_path_cached(RBASIC(obj
)->klass
);
13181 if (!NIL_P(class_path
)) {
13182 APPEND_F("(%s)", RSTRING_PTR(class_path
));
13187 APPEND_F("@%s:%d", GET_RVALUE_OVERHEAD(obj
)->file
, GET_RVALUE_OVERHEAD(obj
)->line
);
13196 rb_raw_obj_info_buitin_type(char *const buff
, const size_t buff_size
, const VALUE obj
, size_t pos
)
13198 if (LIKELY(pos
< buff_size
) && !SPECIAL_CONST_P(obj
)) {
13199 const enum ruby_value_type type
= BUILTIN_TYPE(obj
);
13203 UNEXPECTED_NODE(rb_raw_obj_info
);
13206 if (ARY_SHARED_P(obj
)) {
13207 APPEND_S("shared -> ");
13208 rb_raw_obj_info(BUFF_ARGS
, ARY_SHARED_ROOT(obj
));
13210 else if (ARY_EMBED_P(obj
)) {
13211 APPEND_F("[%s%s] len: %ld (embed)",
13212 C(ARY_EMBED_P(obj
), "E"),
13213 C(ARY_SHARED_P(obj
), "S"),
13217 APPEND_F("[%s%s] len: %ld, capa:%ld ptr:%p",
13218 C(ARY_EMBED_P(obj
), "E"),
13219 C(ARY_SHARED_P(obj
), "S"),
13221 ARY_EMBED_P(obj
) ? -1L : RARRAY(obj
)->as
.heap
.aux
.capa
,
13222 (void *)RARRAY_CONST_PTR(obj
));
13226 if (STR_SHARED_P(obj
)) {
13227 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj
));
13230 if (STR_EMBED_P(obj
)) APPEND_S(" [embed]");
13232 APPEND_F(" len: %ld, capa: %" PRIdSIZE
, RSTRING_LEN(obj
), rb_str_capacity(obj
));
13234 APPEND_F(" \"%.*s\"", str_len_no_raise(obj
), RSTRING_PTR(obj
));
13238 VALUE fstr
= RSYMBOL(obj
)->fstr
;
13239 ID id
= RSYMBOL(obj
)->id
;
13240 if (RB_TYPE_P(fstr
, T_STRING
)) {
13241 APPEND_F(":%s id:%d", RSTRING_PTR(fstr
), (unsigned int)id
);
13244 APPEND_F("(%p) id:%d", (void *)fstr
, (unsigned int)id
);
13249 APPEND_F("-> %p", (void*)rb_gc_location(obj
));
13253 APPEND_F("[%c] %"PRIdSIZE
,
13254 RHASH_AR_TABLE_P(obj
) ? 'A' : 'S',
13261 VALUE class_path
= rb_class_path_cached(obj
);
13262 if (!NIL_P(class_path
)) {
13263 APPEND_F("%s", RSTRING_PTR(class_path
));
13266 APPEND_S("(anon)");
13272 VALUE class_path
= rb_class_path_cached(RBASIC_CLASS(obj
));
13273 if (!NIL_P(class_path
)) {
13274 APPEND_F("src:%s", RSTRING_PTR(class_path
));
13280 if (rb_shape_obj_too_complex(obj
)) {
13281 size_t hash_len
= rb_st_table_size(ROBJECT_IV_HASH(obj
));
13282 APPEND_F("(too_complex) len:%zu", hash_len
);
13285 uint32_t len
= ROBJECT_IV_CAPACITY(obj
);
13287 if (RANY(obj
)->as
.basic
.flags
& ROBJECT_EMBED
) {
13288 APPEND_F("(embed) len:%d", len
);
13291 VALUE
*ptr
= ROBJECT_IVPTR(obj
);
13292 APPEND_F("len:%d ptr:%p", len
, (void *)ptr
);
13298 const struct rb_block
*block
;
13299 const rb_iseq_t
*iseq
;
13300 if (rb_obj_is_proc(obj
) &&
13301 (block
= vm_proc_block(obj
)) != NULL
&&
13302 (vm_block_type(block
) == block_type_iseq
) &&
13303 (iseq
= vm_block_iseq(block
)) != NULL
) {
13304 rb_raw_iseq_info(BUFF_ARGS
, iseq
);
13306 else if (rb_ractor_p(obj
)) {
13307 rb_ractor_t
*r
= (void *)DATA_PTR(obj
);
13309 APPEND_F("r:%d", r
->pub
.id
);
13313 const char * const type_name
= rb_objspace_data_type_name(obj
);
13315 APPEND_F("%s", type_name
);
13321 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj
)));
13323 switch (imemo_type(obj
)) {
13326 const rb_method_entry_t
*me
= &RANY(obj
)->as
.imemo
.ment
;
13328 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
13329 rb_id2name(me
->called_id
),
13330 METHOD_ENTRY_VISI(me
) == METHOD_VISI_PUBLIC
? "pub" :
13331 METHOD_ENTRY_VISI(me
) == METHOD_VISI_PRIVATE
? "pri" : "pro",
13332 METHOD_ENTRY_COMPLEMENTED(me
) ? ",cmp" : "",
13333 METHOD_ENTRY_CACHED(me
) ? ",cc" : "",
13334 METHOD_ENTRY_INVALIDATED(me
) ? ",inv" : "",
13335 me
->def
? rb_method_type_name(me
->def
->type
) : "NULL",
13336 me
->def
? me
->def
->aliased
: -1,
13337 (void *)me
->owner
, // obj_info(me->owner),
13338 (void *)me
->defined_class
); //obj_info(me->defined_class)));
13341 switch (me
->def
->type
) {
13342 case VM_METHOD_TYPE_ISEQ
:
13343 APPEND_S(" (iseq:");
13344 rb_raw_obj_info(BUFF_ARGS
, (VALUE
)me
->def
->body
.iseq
.iseqptr
);
13355 const rb_iseq_t
*iseq
= (const rb_iseq_t
*)obj
;
13356 rb_raw_iseq_info(BUFF_ARGS
, iseq
);
13359 case imemo_callinfo
:
13361 const struct rb_callinfo
*ci
= (const struct rb_callinfo
*)obj
;
13362 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
13363 rb_id2name(vm_ci_mid(ci
)),
13366 vm_ci_kwarg(ci
) ? "available" : "NULL");
13369 case imemo_callcache
:
13371 const struct rb_callcache
*cc
= (const struct rb_callcache
*)obj
;
13372 VALUE class_path
= cc
->klass
? rb_class_path_cached(cc
->klass
) : Qnil
;
13373 const rb_callable_method_entry_t
*cme
= vm_cc_cme(cc
);
13375 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
13376 NIL_P(class_path
) ? (cc
->klass
? "??" : "<NULL>") : RSTRING_PTR(class_path
),
13377 cme
? rb_id2name(cme
->called_id
) : "<NULL>",
13378 cme
? (METHOD_ENTRY_INVALIDATED(cme
) ? " [inv]" : "") : "",
13380 (void *)vm_cc_call(cc
));
13399 rb_raw_obj_info(char *const buff
, const size_t buff_size
, VALUE obj
)
13401 asan_unpoisoning_object(obj
) {
13402 size_t pos
= rb_raw_obj_info_common(buff
, buff_size
, obj
);
13403 pos
= rb_raw_obj_info_buitin_type(buff
, buff_size
, obj
, pos
);
13404 if (pos
>= buff_size
) {} // truncated
13414 #if RGENGC_OBJ_INFO
13415 #define OBJ_INFO_BUFFERS_NUM 10
13416 #define OBJ_INFO_BUFFERS_SIZE 0x100
13417 static rb_atomic_t obj_info_buffers_index
= 0;
13418 static char obj_info_buffers
[OBJ_INFO_BUFFERS_NUM
][OBJ_INFO_BUFFERS_SIZE
];
13420 /* Increments *var atomically and resets *var to 0 when maxval is
13421 * reached. Returns the wraparound old *var value (0...maxval). */
13423 atomic_inc_wraparound(rb_atomic_t
*var
, const rb_atomic_t maxval
)
13425 rb_atomic_t oldval
= RUBY_ATOMIC_FETCH_ADD(*var
, 1);
13426 if (UNLIKELY(oldval
>= maxval
- 1)) { // wraparound *var
13427 const rb_atomic_t newval
= oldval
+ 1;
13428 RUBY_ATOMIC_CAS(*var
, newval
, newval
% maxval
);
13434 static const char *
13435 obj_info(VALUE obj
)
13437 rb_atomic_t index
= atomic_inc_wraparound(&obj_info_buffers_index
, OBJ_INFO_BUFFERS_NUM
);
13438 char *const buff
= obj_info_buffers
[index
];
13439 return rb_raw_obj_info(buff
, OBJ_INFO_BUFFERS_SIZE
, obj
);
13442 static const char *
13443 obj_info_basic(VALUE obj
)
13445 rb_atomic_t index
= atomic_inc_wraparound(&obj_info_buffers_index
, OBJ_INFO_BUFFERS_NUM
);
13446 char *const buff
= obj_info_buffers
[index
];
13448 asan_unpoisoning_object(obj
) {
13449 rb_raw_obj_info_common(buff
, OBJ_INFO_BUFFERS_SIZE
, obj
);
13455 static const char *
13456 obj_info(VALUE obj
)
13458 return obj_type_name(obj
);
13461 static const char *
13462 obj_info_basic(VALUE obj
)
13464 return obj_type_name(obj
);
13470 rb_obj_info(VALUE obj
)
13472 return obj_info(obj
);
13476 rb_obj_info_dump(VALUE obj
)
13479 fprintf(stderr
, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff
, 0x100, obj
));
13483 rb_obj_info_dump_loc(VALUE obj
, const char *file
, int line
, const char *func
)
13486 fprintf(stderr
, "<OBJ_INFO:%s@%s:%d> %s\n", func
, file
, line
, rb_raw_obj_info(buff
, 0x100, obj
));
13492 rb_gcdebug_print_obj_condition(VALUE obj
)
13494 rb_objspace_t
*objspace
= &rb_objspace
;
13496 fprintf(stderr
, "created at: %s:%d\n", GET_RVALUE_OVERHEAD(obj
)->file
, GET_RVALUE_OVERHEAD(obj
)->line
);
13498 if (BUILTIN_TYPE(obj
) == T_MOVED
) {
13499 fprintf(stderr
, "moved?: true\n");
13502 fprintf(stderr
, "moved?: false\n");
13504 if (is_pointer_to_heap(objspace
, (void *)obj
)) {
13505 fprintf(stderr
, "pointer to heap?: true\n");
13508 fprintf(stderr
, "pointer to heap?: false\n");
13512 fprintf(stderr
, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj
), obj
) ? "true" : "false");
13513 fprintf(stderr
, "pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj
), obj
) ? "true" : "false");
13514 fprintf(stderr
, "age? : %d\n", RVALUE_AGE_GET(obj
));
13515 fprintf(stderr
, "old? : %s\n", RVALUE_OLD_P(obj
) ? "true" : "false");
13516 fprintf(stderr
, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj
) ? "false" : "true");
13517 fprintf(stderr
, "remembered? : %s\n", RVALUE_REMEMBERED(obj
) ? "true" : "false");
13519 if (is_lazy_sweeping(objspace
)) {
13520 fprintf(stderr
, "lazy sweeping?: true\n");
13521 fprintf(stderr
, "page swept?: %s\n", GET_HEAP_PAGE(obj
)->flags
.before_sweep
? "false" : "true");
13524 fprintf(stderr
, "lazy sweeping?: false\n");
13529 gcdebug_sentinel(RB_BLOCK_CALL_FUNC_ARGLIST(obj
, name
))
13531 fprintf(stderr
, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name
, (void *)obj
);
13536 rb_gcdebug_sentinel(VALUE obj
, const char *name
)
13538 rb_define_finalizer(obj
, rb_proc_new(gcdebug_sentinel
, (VALUE
)name
));
13541 #endif /* GC_DEBUG */
13546 * GC.add_stress_to_class(class[, ...])
13548 * Raises NoMemoryError when allocating an instance of the given classes.
13552 rb_gcdebug_add_stress_to_class(int argc
, VALUE
*argv
, VALUE self
)
13554 rb_objspace_t
*objspace
= &rb_objspace
;
13556 if (!stress_to_class
) {
13557 set_stress_to_class(rb_ary_hidden_new(argc
));
13559 rb_ary_cat(stress_to_class
, argv
, argc
);
13566 * GC.remove_stress_to_class(class[, ...])
13568 * No longer raises NoMemoryError when allocating an instance of the
13573 rb_gcdebug_remove_stress_to_class(int argc
, VALUE
*argv
, VALUE self
)
13575 rb_objspace_t
*objspace
= &rb_objspace
;
13578 if (stress_to_class
) {
13579 for (i
= 0; i
< argc
; ++i
) {
13580 rb_ary_delete_same(stress_to_class
, argv
[i
]);
13582 if (RARRAY_LEN(stress_to_class
) == 0) {
13583 set_stress_to_class(0);
13590 * Document-module: ObjectSpace
13592 * The ObjectSpace module contains a number of routines
13593 * that interact with the garbage collection facility and allow you to
13594 * traverse all living objects with an iterator.
13596 * ObjectSpace also provides support for object finalizers, procs that will be
13597 * called after a specific object was destroyed by garbage collection. See
13598 * the documentation for +ObjectSpace.define_finalizer+ for important
13599 * information on how to use this method correctly.
13604 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
13605 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
13612 * Finalizer two on 537763470
13613 * Finalizer one on 537763480
13616 /* Document-class: GC::Profiler
13618 * The GC profiler provides access to information on GC runs including time,
13619 * length and object space size.
13623 * GC::Profiler.enable
13625 * require 'rdoc/rdoc'
13627 * GC::Profiler.report
13629 * GC::Profiler.disable
13631 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
13634 #include "gc.rbinc"
13640 if (getenv(RUBY_GC_LIBRARY_PATH
) != NULL
&& !dln_supported_p()) {
13641 rb_warn(RUBY_GC_LIBRARY_PATH
" is ignored because this executable file can't load extension libraries");
13646 malloc_offset
= gc_compute_malloc_offset();
13648 VALUE rb_mObjSpace
;
13649 VALUE rb_mProfiler
;
13650 VALUE gc_constants
;
13652 rb_mGC
= rb_define_module("GC");
13654 gc_constants
= rb_hash_new();
13655 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("DEBUG")), RBOOL(GC_DEBUG
));
13656 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(BASE_SLOT_SIZE
- RVALUE_OVERHEAD
));
13657 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("RVALUE_OVERHEAD")), SIZET2NUM(RVALUE_OVERHEAD
));
13658 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(BASE_SLOT_SIZE
));
13659 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT
));
13660 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE
));
13661 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE
));
13662 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(SIZE_POOL_COUNT
));
13663 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(size_pool_slot_size(SIZE_POOL_COUNT
- 1)));
13664 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("RVALUE_OLD_AGE")), LONG2FIX(RVALUE_OLD_AGE
));
13665 if (RB_BUG_INSTEAD_OF_RB_MEMERROR
+0) {
13666 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("RB_BUG_INSTEAD_OF_RB_MEMERROR")), Qtrue
);
13668 OBJ_FREEZE(gc_constants
);
13669 /* Internal constants in the garbage collector. */
13670 rb_define_const(rb_mGC
, "INTERNAL_CONSTANTS", gc_constants
);
13672 rb_mProfiler
= rb_define_module_under(rb_mGC
, "Profiler");
13673 rb_define_singleton_method(rb_mProfiler
, "enabled?", gc_profile_enable_get
, 0);
13674 rb_define_singleton_method(rb_mProfiler
, "enable", gc_profile_enable
, 0);
13675 rb_define_singleton_method(rb_mProfiler
, "raw_data", gc_profile_record_get
, 0);
13676 rb_define_singleton_method(rb_mProfiler
, "disable", gc_profile_disable
, 0);
13677 rb_define_singleton_method(rb_mProfiler
, "clear", gc_profile_clear
, 0);
13678 rb_define_singleton_method(rb_mProfiler
, "result", gc_profile_result
, 0);
13679 rb_define_singleton_method(rb_mProfiler
, "report", gc_profile_report
, -1);
13680 rb_define_singleton_method(rb_mProfiler
, "total_time", gc_profile_total_time
, 0);
13682 rb_mObjSpace
= rb_define_module("ObjectSpace");
13684 rb_define_module_function(rb_mObjSpace
, "each_object", os_each_obj
, -1);
13686 rb_define_module_function(rb_mObjSpace
, "define_finalizer", define_final
, -1);
13687 rb_define_module_function(rb_mObjSpace
, "undefine_finalizer", undefine_final
, 1);
13689 rb_define_module_function(rb_mObjSpace
, "_id2ref", os_id2ref
, 1);
13691 rb_vm_register_special_exception(ruby_error_nomemory
, rb_eNoMemError
, "failed to allocate memory");
13693 rb_define_method(rb_cBasicObject
, "__id__", rb_obj_id
, 0);
13694 rb_define_method(rb_mKernel
, "object_id", rb_obj_id
, 0);
13696 rb_define_module_function(rb_mObjSpace
, "count_objects", count_objects
, -1);
13698 /* internal methods */
13699 rb_define_singleton_method(rb_mGC
, "verify_internal_consistency", gc_verify_internal_consistency_m
, 0);
13700 #if MALLOC_ALLOCATED_SIZE
13701 rb_define_singleton_method(rb_mGC
, "malloc_allocated_size", gc_malloc_allocated_size
, 0);
13702 rb_define_singleton_method(rb_mGC
, "malloc_allocations", gc_malloc_allocations
, 0);
13705 if (GC_COMPACTION_SUPPORTED
) {
13706 rb_define_singleton_method(rb_mGC
, "compact", gc_compact
, 0);
13707 rb_define_singleton_method(rb_mGC
, "auto_compact", gc_get_auto_compact
, 0);
13708 rb_define_singleton_method(rb_mGC
, "auto_compact=", gc_set_auto_compact
, 1);
13709 rb_define_singleton_method(rb_mGC
, "latest_compact_info", gc_compact_stats
, 0);
13712 rb_define_singleton_method(rb_mGC
, "compact", rb_f_notimplement
, 0);
13713 rb_define_singleton_method(rb_mGC
, "auto_compact", rb_f_notimplement
, 0);
13714 rb_define_singleton_method(rb_mGC
, "auto_compact=", rb_f_notimplement
, 1);
13715 rb_define_singleton_method(rb_mGC
, "latest_compact_info", rb_f_notimplement
, 0);
13716 /* When !GC_COMPACTION_SUPPORTED, this method is not defined in gc.rb */
13717 rb_define_singleton_method(rb_mGC
, "verify_compaction_references", rb_f_notimplement
, -1);
13720 if (GC_DEBUG_STRESS_TO_CLASS
) {
13721 rb_define_singleton_method(rb_mGC
, "add_stress_to_class", rb_gcdebug_add_stress_to_class
, -1);
13722 rb_define_singleton_method(rb_mGC
, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class
, -1);
13727 /* \GC build options */
13728 rb_define_const(rb_mGC
, "OPTS", opts
= rb_ary_new());
13729 #define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
13733 OPT(RGENGC_CHECK_MODE
);
13734 OPT(RGENGC_PROFILE
);
13735 OPT(RGENGC_ESTIMATE_OLDMALLOC
);
13736 OPT(GC_PROFILE_MORE_DETAIL
);
13737 OPT(GC_ENABLE_LAZY_SWEEP
);
13738 OPT(CALC_EXACT_MALLOC_SIZE
);
13739 OPT(MALLOC_ALLOCATED_SIZE
);
13740 OPT(MALLOC_ALLOCATED_SIZE_CHECK
);
13741 OPT(GC_PROFILE_DETAIL_MEMORY
);
13742 OPT(GC_COMPACTION_SUPPORTED
);
13748 #ifdef ruby_xmalloc
13749 #undef ruby_xmalloc
13751 #ifdef ruby_xmalloc2
13752 #undef ruby_xmalloc2
13754 #ifdef ruby_xcalloc
13755 #undef ruby_xcalloc
13757 #ifdef ruby_xrealloc
13758 #undef ruby_xrealloc
13760 #ifdef ruby_xrealloc2
13761 #undef ruby_xrealloc2
13765 ruby_xmalloc(size_t size
)
13767 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13768 ruby_malloc_info_file
= __FILE__
;
13769 ruby_malloc_info_line
= __LINE__
;
13771 return ruby_xmalloc_body(size
);
13775 ruby_xmalloc2(size_t n
, size_t size
)
13777 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13778 ruby_malloc_info_file
= __FILE__
;
13779 ruby_malloc_info_line
= __LINE__
;
13781 return ruby_xmalloc2_body(n
, size
);
13785 ruby_xcalloc(size_t n
, size_t size
)
13787 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13788 ruby_malloc_info_file
= __FILE__
;
13789 ruby_malloc_info_line
= __LINE__
;
13791 return ruby_xcalloc_body(n
, size
);
13795 ruby_xrealloc(void *ptr
, size_t new_size
)
13797 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13798 ruby_malloc_info_file
= __FILE__
;
13799 ruby_malloc_info_line
= __LINE__
;
13801 return ruby_xrealloc_body(ptr
, new_size
);
13805 ruby_xrealloc2(void *ptr
, size_t n
, size_t new_size
)
13807 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13808 ruby_malloc_info_file
= __FILE__
;
13809 ruby_malloc_info_line
= __LINE__
;
13811 return ruby_xrealloc2_body(ptr
, n
, new_size
);