14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
24#define sighandler_t ruby_sighandler_t
35#ifndef HAVE_MALLOC_USABLE_SIZE
37# define HAVE_MALLOC_USABLE_SIZE
38# define malloc_usable_size(a) _msize(a)
39# elif defined HAVE_MALLOC_SIZE
40# define HAVE_MALLOC_USABLE_SIZE
41# define malloc_usable_size(a) malloc_size(a)
45#ifdef HAVE_MALLOC_USABLE_SIZE
46# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
47# include RUBY_ALTERNATIVE_MALLOC_HEADER
50# elif defined(HAVE_MALLOC_NP_H)
51# include <malloc_np.h>
52# elif defined(HAVE_MALLOC_MALLOC_H)
53# include <malloc/malloc.h>
61#ifdef HAVE_SYS_RESOURCE_H
62# include <sys/resource.h>
65#if defined _WIN32 || defined __CYGWIN__
67#elif defined(HAVE_POSIX_MEMALIGN)
68#elif defined(HAVE_MEMALIGN)
83#include "internal/error.h"
84#include "internal/eval.h"
85#include "internal/gc.h"
95#include "internal/symbol.h"
97#include "internal/variable.h"
119#define rb_setjmp(env) RUBY_SETJMP(env)
120#define rb_jmp_buf rb_jmpbuf_t
121#undef rb_data_object_wrap
124size_add_overflow(size_t x, size_t y)
130#elif __has_builtin(__builtin_add_overflow)
131 p = __builtin_add_overflow(x, y, &z);
133#elif defined(DSIZE_T)
149size_mul_add_overflow(size_t x, size_t y, size_t z)
157size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w)
168size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
190 return size_mul_or_raise(x, y, exc);
194size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
217 return size_mul_add_or_raise(x, y, z, exc);
221size_mul_add_mul_or_raise(
size_t x,
size_t y,
size_t z,
size_t w,
VALUE exc)
242#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
244volatile VALUE rb_gc_guarded_val;
248 rb_gc_guarded_val = val;
254#ifndef GC_HEAP_INIT_SLOTS
255#define GC_HEAP_INIT_SLOTS 10000
257#ifndef GC_HEAP_FREE_SLOTS
258#define GC_HEAP_FREE_SLOTS 4096
260#ifndef GC_HEAP_GROWTH_FACTOR
261#define GC_HEAP_GROWTH_FACTOR 1.8
263#ifndef GC_HEAP_GROWTH_MAX_SLOTS
264#define GC_HEAP_GROWTH_MAX_SLOTS 0
266#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
267#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
270#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
271#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
273#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
274#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
276#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
277#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
280#ifndef GC_MALLOC_LIMIT_MIN
281#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 )
283#ifndef GC_MALLOC_LIMIT_MAX
284#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 )
286#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
287#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
290#ifndef GC_OLDMALLOC_LIMIT_MIN
291#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 )
293#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
294#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
296#ifndef GC_OLDMALLOC_LIMIT_MAX
297#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 )
300#ifndef PRINT_MEASURE_LINE
301#define PRINT_MEASURE_LINE 0
303#ifndef PRINT_ENTER_EXIT_TICK
304#define PRINT_ENTER_EXIT_TICK 0
306#ifndef PRINT_ROOT_TICKS
307#define PRINT_ROOT_TICKS 0
310#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
373#define RGENGC_DEBUG -1
375#define RGENGC_DEBUG 0
378#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
379# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
381# define RGENGC_DEBUG_ENABLED(level) 0
393#ifndef RGENGC_CHECK_MODE
394#define RGENGC_CHECK_MODE 0
398#define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
406#ifndef RGENGC_OLD_NEWOBJ_CHECK
407#define RGENGC_OLD_NEWOBJ_CHECK 0
415#ifndef RGENGC_PROFILE
416#define RGENGC_PROFILE 0
425#ifndef RGENGC_ESTIMATE_OLDMALLOC
426#define RGENGC_ESTIMATE_OLDMALLOC 1
432#ifndef RGENGC_FORCE_MAJOR_GC
433#define RGENGC_FORCE_MAJOR_GC 0
436#ifndef GC_PROFILE_MORE_DETAIL
437#define GC_PROFILE_MORE_DETAIL 0
439#ifndef GC_PROFILE_DETAIL_MEMORY
440#define GC_PROFILE_DETAIL_MEMORY 0
442#ifndef GC_ENABLE_INCREMENTAL_MARK
443#define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
445#ifndef GC_ENABLE_LAZY_SWEEP
446#define GC_ENABLE_LAZY_SWEEP 1
448#ifndef CALC_EXACT_MALLOC_SIZE
449#define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
451#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
452#ifndef MALLOC_ALLOCATED_SIZE
453#define MALLOC_ALLOCATED_SIZE 0
456#define MALLOC_ALLOCATED_SIZE 0
458#ifndef MALLOC_ALLOCATED_SIZE_CHECK
459#define MALLOC_ALLOCATED_SIZE_CHECK 0
462#ifndef GC_DEBUG_STRESS_TO_CLASS
463#define GC_DEBUG_STRESS_TO_CLASS 0
466#ifndef RGENGC_OBJ_INFO
467#define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
477#if RGENGC_ESTIMATE_OLDMALLOC
478 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
512#if GC_PROFILE_MORE_DETAIL
514 double gc_sweep_time;
516 size_t heap_use_pages;
517 size_t heap_live_objects;
518 size_t heap_free_objects;
520 size_t allocate_increase;
521 size_t allocate_limit;
524 size_t removing_objects;
525 size_t empty_objects;
526#if GC_PROFILE_DETAIL_MEMORY
532#if MALLOC_ALLOCATED_SIZE
533 size_t allocated_size;
536#if RGENGC_PROFILE > 0
538 size_t remembered_normal_objects;
539 size_t remembered_shady_objects;
543#define FL_FROM_FREELIST FL_USER0
551#define RMOVED(obj) ((struct RMoved *)(obj))
553#if defined(_MSC_VER) || defined(__CYGWIN__)
605#if defined(_MSC_VER) || defined(__CYGWIN__)
614#define popcount_bits rb_popcount_intptr
631#define STACK_CHUNK_SIZE 500
653#if GC_ENABLE_INCREMENTAL_MARK
670#if MALLOC_ALLOCATED_SIZE
671 size_t allocated_size;
686#if GC_ENABLE_INCREMENTAL_MARK
687 unsigned int during_incremental_marking : 1;
728#if GC_PROFILE_MORE_DETAIL
737#if RGENGC_PROFILE > 0
738 size_t total_generated_normal_object_count;
739 size_t total_generated_shady_object_count;
740 size_t total_shade_operation_count;
741 size_t total_promoted_count;
742 size_t total_remembered_normal_object_count;
743 size_t total_remembered_shady_object_count;
745#if RGENGC_PROFILE >= 2
746 size_t generated_normal_object_count_types[
RUBY_T_MASK];
747 size_t generated_shady_object_count_types[
RUBY_T_MASK];
750 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
751 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
779#if RGENGC_ESTIMATE_OLDMALLOC
780 size_t oldmalloc_increase;
781 size_t oldmalloc_increase_limit;
784#if RGENGC_CHECK_MODE >= 2
796#if GC_ENABLE_INCREMENTAL_MARK
806#if GC_DEBUG_STRESS_TO_CLASS
813#define HEAP_PAGE_ALIGN_LOG 14
814#define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
852#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
853#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
854#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
856#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
857#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
858#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
859#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
862#define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
863#define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
864#define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
867#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
868#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
869#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
870#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
871#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
874#define rb_objspace (*rb_objspace_of(GET_VM()))
875#define rb_objspace_of(vm) ((vm)->objspace)
877#define ruby_initial_gc_stress gc_params.gc_stress
881#define malloc_limit objspace->malloc_params.limit
882#define malloc_increase objspace->malloc_params.increase
883#define malloc_allocated_size objspace->malloc_params.allocated_size
884#define heap_pages_sorted objspace->heap_pages.sorted
885#define heap_allocated_pages objspace->heap_pages.allocated_pages
886#define heap_pages_sorted_length objspace->heap_pages.sorted_length
887#define heap_pages_lomem objspace->heap_pages.range[0]
888#define heap_pages_himem objspace->heap_pages.range[1]
889#define heap_allocatable_pages objspace->heap_pages.allocatable_pages
890#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
891#define heap_pages_final_slots objspace->heap_pages.final_slots
892#define heap_pages_deferred_final objspace->heap_pages.deferred_final
893#define heap_eden (&objspace->eden_heap)
894#define heap_tomb (&objspace->tomb_heap)
895#define during_gc objspace->flags.during_gc
896#define finalizing objspace->atomic_flags.finalizing
897#define finalizer_table objspace->finalizer_table
898#define global_list objspace->global_list
899#define ruby_gc_stressful objspace->flags.gc_stressful
900#define ruby_gc_stress_mode objspace->gc_stress_mode
901#if GC_DEBUG_STRESS_TO_CLASS
902#define stress_to_class objspace->stress_to_class
904#define stress_to_class 0
908#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
909#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
910#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
911#define dont_gc_val() (objspace->flags.dont_gc)
913#define dont_gc_on() (objspace->flags.dont_gc = 1)
914#define dont_gc_off() (objspace->flags.dont_gc = 0)
915#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
916#define dont_gc_val() (objspace->flags.dont_gc)
922#if RGENGC_CHECK_MODE > 0
929 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)
mode);
935#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
936#define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
938#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
939#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
940#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
941#if GC_ENABLE_INCREMENTAL_MARK
942#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
944#define is_incremental_marking(objspace) FALSE
946#if GC_ENABLE_INCREMENTAL_MARK
947#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
949#define will_be_incremental_marking(objspace) FALSE
951#define has_sweeping_pages(heap) ((heap)->sweeping_page != 0)
952#define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap))
954#if SIZEOF_LONG == SIZEOF_VOIDP
955# define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
956# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
957#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
958# define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
959# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
960 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
965#define RANY(o) ((RVALUE*)(o))
974#define RZOMBIE(o) ((struct RZombie *)(o))
976#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
978#if RUBY_MARK_FREE_DEBUG
979int ruby_gc_debug_indent = 0;
996NORETURN(
static void gc_raise(
VALUE exc,
const char *fmt, ...));
997NORETURN(
static void negative_size_allocation_error(
const char *));
1020static void gc_marks(
rb_objspace_t *objspace,
int full_mark);
1021static void gc_marks_start(
rb_objspace_t *objspace,
int full);
1024static void gc_marks_step(
rb_objspace_t *objspace,
size_t slots);
1051static void shrink_stack_chunk_cache(
mark_stack_t *stack);
1053static size_t obj_memsize_of(
VALUE obj,
int use_all_types);
1054static void gc_verify_internal_consistency(
rb_objspace_t *objspace);
1061static double getrusage_time(
void);
1062static inline void gc_prof_setup_new_record(
rb_objspace_t *objspace,
int reason);
1065static inline void gc_prof_mark_timer_start(
rb_objspace_t *);
1067static inline void gc_prof_sweep_timer_start(
rb_objspace_t *);
1068static inline void gc_prof_sweep_timer_stop(
rb_objspace_t *);
1072#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1073 if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1074 *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1078#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1080#define gc_prof_record(objspace) (objspace)->profile.current_record
1081#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1083#ifdef HAVE_VA_ARGS_MACRO
1084# define gc_report(level, objspace, ...) \
1085 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1087# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1090static const char *obj_info(
VALUE obj);
1091static const char *obj_type_name(
VALUE obj);
1111#if defined(__GNUC__) && defined(__i386__)
1112typedef unsigned long long tick_t;
1113#define PRItick "llu"
1117 unsigned long long int x;
1118 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
1122#elif defined(__GNUC__) && defined(__x86_64__)
1123typedef unsigned long long tick_t;
1124#define PRItick "llu"
1126static __inline__ tick_t
1129 unsigned long hi,
lo;
1130 __asm__ __volatile__ (
"rdtsc" :
"=a"(
lo),
"=d"(
hi));
1131 return ((
unsigned long long)
lo)|( ((
unsigned long long)
hi)<<32);
1134#elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1135typedef unsigned long long tick_t;
1136#define PRItick "llu"
1138static __inline__ tick_t
1141 unsigned long long val = __builtin_ppc_get_timebase();
1145#elif defined(__aarch64__) && defined(__GNUC__)
1146typedef unsigned long tick_t;
1149static __inline__ tick_t
1153 __asm__ __volatile__ (
"mrs %0, cntvct_el0", :
"=r" (val));
1158#elif defined(_WIN32) && defined(_MSC_VER)
1160typedef unsigned __int64 tick_t;
1161#define PRItick "llu"
1170typedef clock_t tick_t;
1171#define PRItick "llu"
1181typedef double tick_t;
1182#define PRItick "4.9f"
1187 return getrusage_time();
1190#error "choose tick type"
1193#define MEASURE_LINE(expr) do { \
1194 volatile tick_t start_time = tick(); \
1195 volatile tick_t end_time; \
1197 end_time = tick(); \
1198 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1202#define MEASURE_LINE(expr) expr
1206asan_unpoison_object_temporary(
VALUE obj)
1208 void *
ptr = asan_poisoned_object_p(obj);
1209 asan_unpoison_object(obj,
false);
1213#define FL_CHECK2(name, x, pred) \
1214 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1215 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1216#define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1217#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1218#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1220#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1221#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1222#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1224#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1225#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1226#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1228#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1229#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1230#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1232#define RVALUE_OLD_AGE 3
1233#define RVALUE_AGE_SHIFT 5
1248check_rvalue_consistency_force(
const VALUE obj,
int terminate)
1256 fprintf(stderr,
"check_rvalue_consistency: %p is a special const.\n", (
void *)obj);
1259 else if (!is_pointer_to_heap(objspace, (
void *)obj)) {
1265 fprintf(stderr,
"check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1266 (
void *)obj, (
void *)page);
1272 fprintf(stderr,
"check_rvalue_consistency: %p is not a Ruby object.\n", (
void *)obj);
1282 const int age = RVALUE_FLAGS_AGE(
RBASIC(obj)->
flags);
1285 fprintf(stderr,
"check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1289 fprintf(stderr,
"check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1293 fprintf(stderr,
"check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1303 if (age > 0 && wb_unprotected_bit) {
1304 fprintf(stderr,
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1308 if (!
is_marking(objspace) && uncollectible_bit && !mark_bit) {
1309 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1314 if (uncollectible_bit && age !=
RVALUE_OLD_AGE && !wb_unprotected_bit) {
1315 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1316 obj_info(obj), age);
1320 fprintf(stderr,
"check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1321 obj_info(obj), age);
1335 fprintf(stderr,
"check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1343 if (
err > 0 && terminate) {
1344 rb_bug(
"check_rvalue_consistency_force: there is %d errors.",
err);
1349#if RGENGC_CHECK_MODE == 0
1351check_rvalue_consistency(
const VALUE obj)
1357check_rvalue_consistency(
const VALUE obj)
1359 check_rvalue_consistency_force(obj,
TRUE);
1367 if (RB_SPECIAL_CONST_P(obj)) {
1371 void *poisoned = asan_poisoned_object_p(obj);
1372 asan_unpoison_object(obj,
false);
1378 asan_poison_object(obj);
1385RVALUE_MARKED(
VALUE obj)
1387 check_rvalue_consistency(obj);
1392RVALUE_PINNED(
VALUE obj)
1394 check_rvalue_consistency(obj);
1399RVALUE_WB_UNPROTECTED(
VALUE obj)
1401 check_rvalue_consistency(obj);
1406RVALUE_MARKING(
VALUE obj)
1408 check_rvalue_consistency(obj);
1413RVALUE_REMEMBERED(
VALUE obj)
1415 check_rvalue_consistency(obj);
1420RVALUE_UNCOLLECTIBLE(
VALUE obj)
1422 check_rvalue_consistency(obj);
1427RVALUE_OLD_P_RAW(
VALUE obj)
1430 return (
RBASIC(obj)->
flags & promoted) == promoted;
1434RVALUE_OLD_P(
VALUE obj)
1436 check_rvalue_consistency(obj);
1437 return RVALUE_OLD_P_RAW(obj);
1440#if RGENGC_CHECK_MODE || GC_DEBUG
1442RVALUE_AGE(
VALUE obj)
1444 check_rvalue_consistency(obj);
1456#if RGENGC_PROFILE >= 2
1457 objspace->
profile.total_promoted_count++;
1466 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace,
GET_HEAP_PAGE(obj), obj);
1482 int age = RVALUE_FLAGS_AGE(
flags);
1485 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1489 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
flags, age);
1492 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1494 check_rvalue_consistency(obj);
1501 check_rvalue_consistency(obj);
1505 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1507 check_rvalue_consistency(obj);
1514 check_rvalue_consistency(obj);
1519 check_rvalue_consistency(obj);
1532 check_rvalue_consistency(obj);
1539 RVALUE_DEMOTE_RAW(objspace, obj);
1541 if (RVALUE_MARKED(obj)) {
1545 check_rvalue_consistency(obj);
1549RVALUE_AGE_RESET_RAW(
VALUE obj)
1555RVALUE_AGE_RESET(
VALUE obj)
1557 check_rvalue_consistency(obj);
1560 RVALUE_AGE_RESET_RAW(obj);
1561 check_rvalue_consistency(obj);
1565RVALUE_BLACK_P(
VALUE obj)
1567 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1572RVALUE_GREY_P(
VALUE obj)
1574 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1579RVALUE_WHITE_P(
VALUE obj)
1581 return RVALUE_MARKED(obj) ==
FALSE;
1613 rb_bug(
"lazy sweeping underway when freeing object space");
1648heap_pages_expand_sorted_to(
rb_objspace_t *objspace,
size_t next_length)
1684 heap_pages_expand_sorted_to(objspace, next_length);
1692heap_allocatable_pages_set(
rb_objspace_t *objspace,
size_t s)
1695 heap_pages_expand_sorted(objspace);
1705 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1716 obj %
sizeof(
RVALUE) == 0)) {
1717 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", (
void *)p);
1720 asan_poison_object(obj);
1721 gc_report(3, objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)obj);
1727 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1739#if GC_ENABLE_INCREMENTAL_MARK
1743 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1748 heap->pooled_pages = page;
1749 objspace->rincgc.pooled_slots += page->
free_slots;
1763static void rb_aligned_free(
void *
ptr);
1784 heap_unlink_page(objspace,
heap_tomb, page);
1785 heap_page_free(objspace, page);
1815 if (page_body == 0) {
1820 page = calloc1(
sizeof(
struct heap_page));
1822 rb_aligned_free(page_body);
1833 end = start + limit;
1841 mid = (
lo +
hi) / 2;
1880 for (p =
start; p != end; p++) {
1881 gc_report(3, objspace,
"assign_heap_page: %p is added to freelist\n", (
void *)p);
1882 heap_page_add_freeobj(objspace, page, (
VALUE)p);
1896 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1898 heap_unlink_page(objspace,
heap_tomb, page);
1911 const char *method =
"recycle";
1915 page = heap_page_resurrect(objspace);
1918 page = heap_page_allocate(objspace);
1919 method =
"allocate";
1921 if (0) fprintf(stderr,
"heap_page_create: %s - %p, "
1922 "heap_allocated_pages: %"PRIdSIZE", "
1923 "heap_allocated_pages: %"PRIdSIZE", "
1924 "tomb->total_pages: %"PRIdSIZE"\n",
1943 struct heap_page *page = heap_page_create(objspace);
1944 heap_add_page(objspace, heap, page);
1945 heap_add_freepage(heap, page);
1953 heap_allocatable_pages_set(objspace,
add);
1955 for (i = 0; i <
add; i++) {
1956 heap_assign_page(objspace, heap);
1969 if (goal_ratio == 0.0) {
1979 if (
f < 1.0)
f = 1.1;
1981 next_used = (size_t)(
f * used);
1986 " G(%1.2f), f(%1.2f),"
1989 goal_ratio,
f, used, next_used);
1995 if (next_used > max_used) next_used = max_used;
1998 return next_used - used;
2002heap_set_increment(
rb_objspace_t *objspace,
size_t additional_pages)
2005 size_t next_used_limit = used + additional_pages;
2009 heap_allocatable_pages_set(objspace, next_used_limit - used);
2011 gc_report(1, objspace,
"heap_set_increment: heap_allocatable_pages is %"PRIdSIZE"\n",
2019 gc_report(1, objspace,
"heap_increment: heap_pages_sorted_length: %"PRIdSIZE", "
2026 heap_assign_page(objspace, heap);
2038 gc_sweep_continue(objspace, heap);
2041 gc_marks_continue(objspace, heap);
2063 if (pc && VM_FRAME_RUBYFRAME_P(ec->
cfp)) {
2071#define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2072#define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2074#define gc_event_hook_prep(objspace, event, data, prep) do { \
2075 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2077 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2081#define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2086#if !__has_feature(memory_sanitizer)
2094#if RACTOR_CHECK_MODE
2095 rb_ractor_setup_belonging(obj);
2098#if RGENGC_CHECK_MODE
2103 check_rvalue_consistency(obj);
2111 if (RVALUE_AGE(obj) != 2)
rb_bug(
"newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
2114 if (RVALUE_AGE(obj) > 0)
rb_bug(
"newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
2116 if (rgengc_remembered(objspace, (
VALUE)obj))
rb_bug(
"newobj: %s is remembered.", obj_info(obj));
2131 objspace->
profile.total_generated_normal_object_count++;
2132#if RGENGC_PROFILE >= 2
2137 objspace->
profile.total_generated_shady_object_count++;
2138#if RGENGC_PROFILE >= 2
2149 gc_report(5, objspace,
"newobj: %s\n", obj_info(obj));
2151#if RGENGC_OLD_NEWOBJ_CHECK > 0
2158 if (--newobj_cnt == 0) {
2161 gc_mark_set(objspace, obj);
2162 RVALUE_AGE_SET_OLD(objspace, obj);
2181 asan_unpoison_object(obj,
true);
2197 heap_prepare(objspace, heap);
2205 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
2250 rb_bug(
"object allocation during garbage collection phase");
2261 while ((obj = ractor_cached_freeobj(objspace, cr)) ==
Qfalse) {
2262 ractor_cache_slots(objspace, cr);
2265 newobj_init(klass,
flags, wb_protected, objspace, obj);
2281 return newobj_slowpath(klass,
flags, objspace, cr,
TRUE);
2287 return newobj_slowpath(klass,
flags, objspace, cr,
FALSE);
2299#if GC_DEBUG_STRESS_TO_CLASS
2302 for (i = 0; i <
cnt; ++i) {
2312 (obj = ractor_cached_freeobj(objspace, cr)) !=
Qfalse)) {
2314 newobj_init(klass,
flags, wb_protected, objspace, obj);
2319 obj = wb_protected ?
2320 newobj_slowpath_wb_protected(klass,
flags, objspace, cr) :
2321 newobj_slowpath_wb_unprotected(klass,
flags, objspace, cr);
2330 VALUE obj = newobj_of0(klass,
flags, wb_protected, GET_RACTOR());
2331 return newobj_fill(obj, v1, v2, v3);
2337 VALUE obj = newobj_of0(klass,
flags, wb_protected, cr);
2338 return newobj_fill(obj, v1, v2, v3);
2345 return newobj_of(klass,
flags, 0, 0, 0,
FALSE);
2352 return newobj_of(klass,
flags, 0, 0, 0,
TRUE);
2359 return newobj_of_cr(rb_ec_ractor_ptr(ec), klass,
flags, 0, 0, 0,
TRUE);
2380#define UNEXPECTED_NODE(func) \
2381 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2382 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2389#define IMEMO_NAME(x) case imemo_##x: return #x;
2415 return newobj_of(v0,
flags, v1, v2, v3,
TRUE);
2422 return newobj_of(v0,
flags, v1, v2, v3,
FALSE);
2426rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
void *
buf,
size_t cnt)
2438imemo_memsize(
VALUE obj)
2443 size +=
sizeof(
RANY(obj)->as.imemo.ment.def);
2476 fprintf(stderr,
"memo %p (type: %d) @ %s:%d\n", (
void *)memo,
imemo_type(memo),
file, line);
2492 if (klass) Check_Type(klass,
T_CLASS);
2508 if (klass) Check_Type(klass,
T_CLASS);
2523 if (RTYPEDDATA_P(obj)) {
2526 if (
ptr &&
type->function.dsize) {
2527 return type->function.dsize(
ptr);
2536 if (RTYPEDDATA_P(obj)) {
2537 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
2550 register size_t hi,
lo, mid;
2564 mid = (
lo +
hi) / 2;
2566 if (page->
start <= p) {
2587free_const_entry_i(
VALUE value,
void *data)
2604 xfree((
void *)value);
2609iv_index_tbl_free(
struct st_table *tbl)
2611 st_foreach(tbl, free_iv_index_tbl_free_i, 0);
2620 for (
int i=0; i<ccs->
len; i++) {
2623 void *
ptr = asan_poisoned_object_p((
VALUE)cc);
2624 asan_unpoison_object((
VALUE)cc,
false);
2626 if (is_pointer_to_heap(objspace, (
void *)cc) &&
2628 cc->klass ==
klass) {
2633 asan_poison_object((
VALUE)cc);
2638 asan_poison_object((
VALUE)cc);
2641 vm_cc_invalidate(cc);
2662cc_table_mark_i(
ID id,
VALUE ccs_ptr,
void *data_ptr)
2676 for (
int i=0; i<ccs->
len; i++) {
2701cc_table_free_i(
VALUE ccs_ptr,
void *data_ptr)
2768 rb_bug(
"Object ID seen, but not in mapping table: %s\n", obj_info(obj));
2785 rb_bug(
"obj_free() called for broken object");
2797 obj_free_object_id(objspace, obj);
2802#if RGENGC_CHECK_MODE
2803#define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
2804 CHECK(RVALUE_WB_UNPROTECTED);
2805 CHECK(RVALUE_MARKED);
2806 CHECK(RVALUE_MARKING);
2807 CHECK(RVALUE_UNCOLLECTIBLE);
2816 else if (ROBJ_TRANSIENT_P(obj)) {
2820 xfree(
RANY(obj)->as.object.as.heap.ivptr);
2828 cc_table_free(objspace, obj,
FALSE);
2849 if (
RANY(obj)->as.klass.ptr)
2863#if USE_DEBUG_COUNTER
2891 if (RHASH_AR_TABLE_P(obj)) {
2892 if (RHASH_AR_TABLE(obj) ==
NULL) {
2907 if (RHASH_TRANSIENT_P(obj)) {
2921 if (
RANY(obj)->as.regexp.ptr) {
2928 int free_immediately =
FALSE;
2929 void (*dfree)(
void *);
2932 if (RTYPEDDATA_P(obj)) {
2934 dfree =
RANY(obj)->as.typeddata.type->function.dfree;
2935 if (0 && free_immediately == 0) {
2937 fprintf(stderr,
"not immediate -> %s\n",
RANY(obj)->as.typeddata.type->wrap_struct_name);
2941 dfree =
RANY(obj)->as.data.dfree;
2949 else if (free_immediately) {
2954 make_zombie(objspace, obj, dfree, data);
2965 if (
RANY(obj)->as.match.rmatch) {
2966 struct rmatch *rm =
RANY(obj)->as.match.rmatch;
2967#if USE_DEBUG_COUNTER
2987 if (
RANY(obj)->as.file.fptr) {
2988 make_io_zombie(objspace, obj);
3003 if (RICLASS_OWNS_M_TBL_P(obj)) {
3014 cc_table_free(objspace, obj,
FALSE);
3028 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
3029 xfree(BIGNUM_DIGITS(obj));
3043 RANY(obj)->as.rstruct.as.heap.ptr ==
NULL) {
3046 else if (RSTRUCT_TRANSIENT_P(obj)) {
3050 xfree((
void *)
RANY(obj)->as.rstruct.as.heap.ptr);
3121 make_zombie(objspace, obj, 0, 0);
3130#define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3131#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3152static const struct st_hash_type object_id_hash_type = {
3162#if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
3167 pagesize = (
int)sysconf(_SC_PAGE_SIZE);
3177#if RGENGC_ESTIMATE_OLDMALLOC
3199static void objspace_reachable_objects_from_root(
rb_objspace_t *,
void (func)(
const char *,
VALUE,
void *),
void *);
3222 pstart = page->
start;
3225 if ((*callback)(pstart, pend,
sizeof(
RVALUE), data)) {
3232objspace_each_objects_protected(
VALUE arg)
3240incremental_enable(
VALUE _)
3298 if (prev_dont_incremental) {
3319internal_object_p(
VALUE obj)
3323 asan_unpoison_object(obj,
false);
3348 if (
ptr || ! used_p) {
3349 asan_poison_object(obj);
3357 return internal_object_p(obj);
3361os_obj_of_i(
void *vstart,
void *vend,
size_t stride,
void *data)
3366 for (; p != pend; p++) {
3368 if (!internal_object_p(v)) {
3370 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
3435 return os_obj_of(
of);
3464should_be_callable(
VALUE block)
3473should_be_finalizable(
VALUE obj)
3550 should_be_finalizable(obj);
3555 should_be_callable(block);
3559 rb_warn(
"finalizer references object to be finalized");
3562 return define_final0(obj, block);
3578 table = (
VALUE)data;
3585 for (i = 0; i <
len; i++) {
3597 RBASIC_CLEAR_CLASS(table);
3606 should_be_finalizable(obj);
3607 should_be_callable(block);
3608 return define_final0(obj, block);
3620 table = (
VALUE)data;
3645#define RESTORE_FINALIZER() (\
3646 ec->cfp = saved.cfp, \
3647 rb_set_errinfo(saved.errinfo))
3651 saved.cfp = ec->
cfp;
3659 for (i = saved.finished;
3661 saved.finished = ++i) {
3662 run_single_final(
RARRAY_AREF(table, i), saved.objid);
3665#undef RESTORE_FINALIZER
3679 run_finalizer(objspace, zombie, (
VALUE)table);
3689 asan_unpoison_object(zombie,
false);
3690 next_zombie =
RZOMBIE(zombie)->next;
3693 run_final(objspace, zombie);
3699 obj_free_object_id(objspace, zombie);
3702 RZOMBIE(zombie)->basic.flags = 0;
3709 heap_page_add_freeobj(objspace,
GET_HEAP_PAGE(zombie), zombie);
3714 zombie = next_zombie;
3724 finalize_list(objspace, zombie);
3729gc_finalize_deferred(
void *dmy)
3736 finalize_deferred(objspace);
3746 rb_bug(
"gc_finalize_deferred_register: can't register finalizer.");
3776#if RGENGC_CHECK_MODE >= 2
3777 gc_verify_internal_consistency(objspace);
3784 finalize_deferred(objspace);
3798 run_finalizer(objspace, curr->
obj, curr->
table);
3809 unsigned int lock_lev;
3817 void *poisoned = asan_poisoned_object_p(vp);
3818 asan_unpoison_object(vp,
false);
3826 if (RTYPEDDATA_P(vp)) {
3827 RDATA(p)->dfree =
RANY(p)->as.typeddata.type->function.dfree;
3833 else if (
RANY(p)->as.data.dfree) {
3834 make_zombie(objspace, vp,
RANY(p)->as.data.dfree,
RANY(p)->as.data.data);
3838 if (
RANY(p)->as.file.fptr) {
3839 make_io_zombie(objspace, vp);
3847 asan_poison_object(vp);
3887 is_swept_object(objspace,
ptr) ||
3909 if (!is_garbage_object(objspace,
ptr)) {
3920 if (rb_special_const_p(obj))
return FALSE;
3921 check_rvalue_consistency(obj);
3929 return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
3936 return is_garbage_object(objspace, obj);
3969#if SIZEOF_LONG == SIZEOF_VOIDP
3970#define NUM2PTR(x) NUM2ULONG(x)
3971#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3972#define NUM2PTR(x) NUM2ULL(x)
3989 if ((
ptr %
sizeof(
RVALUE)) == (4 << 2)) {
3998 if ((orig = id2ref_obj_tbl(objspace, objid)) !=
Qundef &&
3999 is_live_object(objspace, orig)) {
4001 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(orig)) {
4019 return id2ref(objid);
4026 return (
SYM2ID(obj) *
sizeof(
RVALUE) + (4 << 2)) | FIXNUM_FLAG;
4028 else if (FLONUM_P(obj)) {
4029#if SIZEOF_LONG == SIZEOF_VOIDP
4039 return get_heap_object_id(obj);
4043cached_object_id(
VALUE obj)
4070nonspecial_obj_id_(
VALUE obj)
4079 return rb_find_object_id(obj, nonspecial_obj_id_);
4141 return rb_find_object_id(obj, cached_object_id);
4145cc_table_memsize_i(
VALUE ccs_ptr,
void *data_ptr)
4147 size_t *total_size = data_ptr;
4149 *total_size +=
sizeof(*ccs);
4150 *total_size +=
sizeof(ccs->
entries[0]) * ccs->
capa;
4163obj_memsize_of(
VALUE obj,
int use_all_types)
4178 size += ROBJECT_NUMIV(obj) *
sizeof(
VALUE);
4207 if (RICLASS_OWNS_M_TBL_P(obj)) {
4223 if (RHASH_AR_TABLE_P(obj)) {
4224 if (RHASH_AR_TABLE(obj) !=
NULL) {
4251 if (
RFILE(obj)->fptr) {
4259 size += imemo_memsize(obj);
4288 rb_bug(
"objspace/memsize_of(): unknown data type 0x%x(%p)",
4298 return obj_memsize_of(obj,
TRUE);
4311type_sym(
size_t type)
4314#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
4394 if (!RB_TYPE_P(hash,
T_HASH))
4398 for (i = 0; i <=
T_MASK; i++) {
4407 for (;p < pend; p++) {
4409 void *poisoned = asan_poisoned_object_p(vp);
4410 asan_unpoison_object(vp,
false);
4419 asan_poison_object(vp);
4434 for (i = 0; i <=
T_MASK; i++) {
4468gc_setup_mark_bits(
struct heap_page *page)
4483 if (!VirtualProtect(body,
HEAP_PAGE_SIZE, PAGE_NOACCESS, &old_protect)) {
4487 rb_bug(
"Couldn't protect page %p", (
void *)body);
4489 gc_report(5, objspace,
"Protecting page in move %p\n", (
void *)body);
4499 if (!VirtualProtect(body,
HEAP_PAGE_SIZE, PAGE_READWRITE, &old_protect)) {
4503 rb_bug(
"Couldn't unprotect page %p", (
void *)body);
4505 gc_report(5, objspace,
"Unprotecting page in move %p\n", (
void *)body);
4513 char from_freelist = 0;
4546 if (gc_is_moveable_obj(objspace, (
VALUE)p)) {
4550 gc_move(objspace, (
VALUE)p, dest);
4551 gc_pin(objspace, (
VALUE)p);
4553 if (from_freelist) {
4584 if (next == sweep_page) {
4606static void read_barrier_handler(
intptr_t address)
4611 address -= address %
sizeof(
RVALUE);
4613 obj = (
VALUE)address;
4627static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
4628typedef void (*signal_handler)(
int);
4629static signal_handler old_sigsegv_handler;
4631static LONG WINAPI read_barrier_signal(EXCEPTION_POINTERS * info)
4634 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
4639 read_barrier_handler((
intptr_t)info->ExceptionRecord->ExceptionInformation[1]);
4640 return EXCEPTION_CONTINUE_EXECUTION;
4642 return EXCEPTION_CONTINUE_SEARCH;
4647uninstall_handlers(
void)
4649 signal(SIGSEGV, old_sigsegv_handler);
4650 SetUnhandledExceptionFilter(old_handler);
4654install_handlers(
void)
4657 old_sigsegv_handler = signal(SIGSEGV,
NULL);
4660 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
4663static struct sigaction old_sigbus_handler;
4664static struct sigaction old_sigsegv_handler;
4667read_barrier_signal(
int sig, siginfo_t * info,
void * data)
4670 struct sigaction prev_sigbus, prev_sigsegv;
4671 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
4672 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
4675 sigset_t set, prev_set;
4677 sigaddset(&set, SIGBUS);
4678 sigaddset(&set, SIGSEGV);
4679 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
4682 read_barrier_handler((
intptr_t)info->si_addr);
4685 sigaction(SIGBUS, &prev_sigbus,
NULL);
4686 sigaction(SIGSEGV, &prev_sigsegv,
NULL);
4687 sigprocmask(SIG_SETMASK, &prev_set,
NULL);
4691uninstall_handlers(
void)
4693 sigaction(SIGBUS, &old_sigbus_handler,
NULL);
4694 sigaction(SIGSEGV, &old_sigsegv_handler,
NULL);
4698install_handlers(
void)
4700 struct sigaction action;
4701 memset(&action, 0,
sizeof(
struct sigaction));
4702 sigemptyset(&action.sa_mask);
4703 action.sa_sigaction = read_barrier_signal;
4704 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
4706 sigaction(SIGBUS, &action, &old_sigbus_handler);
4707 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
4712revert_stack_objects(
VALUE stack_obj,
void *ctx)
4728 rb_vm_t *vm = rb_ec_vm_ptr(ec);
4737 gc_unprotect_pages(objspace, heap);
4738 uninstall_handlers();
4745 check_stack_for_moved(objspace);
4747 gc_update_references(objspace, heap);
4764 int moved_slots = 0;
4765 int finished_compacting = 0;
4766 bits_t *mark_bits, *pin_bits;
4773 p = sweep_page->
start;
4782 bitset = pin_bits[i] & ~mark_bits[i];
4795 if (finished_compacting) {
4802 heap_page_add_freeobj(objspace, sweep_page, dest);
4807 if(!try_move(objspace, heap, sweep_page, dest)) {
4808 finished_compacting = 1;
4810 gc_report(5, objspace,
"Quit compacting, couldn't find an object to move\n");
4816 heap_page_add_freeobj(objspace, sweep_page, dest);
4817 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(dest));
4832 return finished_compacting;
4839 int empty_slots = 0, freed_slots = 0,
final_slots = 0;
4840 int was_compacting = 0;
4844 gc_report(2, objspace,
"page_sweep: start.\n");
4849 gc_report(5, objspace,
"Quit compacting, mark and compact cursor met\n");
4850 gc_compact_finish(objspace, heap);
4853 asan_unpoison_memory_region(&sweep_page->
freelist,
sizeof(
RVALUE*),
false);
4855 asan_poison_memory_region(&sweep_page->
freelist,
sizeof(
RVALUE*));
4862 p = sweep_page->
start;
4870 if (out_of_range_bits != 0) {
4880 asan_unpoison_object(vp,
false);
4884 gc_report(2, objspace,
"page_sweep: free %p\n", (
void *)p);
4885#if RGENGC_CHECK_MODE
4887 if (RVALUE_OLD_P(vp))
rb_bug(
"page_sweep: %p - old while minor GC.", (
void *)p);
4888 if (rgengc_remembered_sweep(objspace, vp))
rb_bug(
"page_sweep: %p - remembered.", (
void *)p);
4891 if (obj_free(objspace, vp)) {
4900 heap_page_add_freeobj(objspace, sweep_page, vp);
4901 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(vp));
4916 rb_bug(
"T_MOVED shouldn't be seen until compaction is finished\n");
4918 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(vp));
4924 heap_page_add_freeobj(objspace, sweep_page, vp);
4938 if (was_compacting) {
4939 heap_page_add_freeobj(objspace, sweep_page, vp);
4953 if (gc_fill_swept_page(objspace, heap, sweep_page, &freed_slots, &empty_slots)) {
4954 gc_compact_finish(objspace, heap);
4959 gc_setup_mark_bits(sweep_page);
4962#if GC_PROFILE_MORE_DETAIL
4965 record->removing_objects +=
final_slots + freed_slots;
4966 record->empty_objects += empty_slots;
4969 if (0) fprintf(stderr,
"gc_page_sweep(%"PRIdSIZE"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
4974 sweep_page->
free_slots = freed_slots + empty_slots;
4980 gc_finalize_deferred_register(objspace);
4984 gc_report(2, objspace,
"page_sweep: end.\n");
4986 return freed_slots + empty_slots;
4995 heap_set_increment(objspace, 1);
4996 if (!heap_increment(objspace, heap)) {
5009 default:
rb_bug(
"gc_mode_name: unknown mode: %d", (
int)
mode);
5016#if RGENGC_CHECK_MODE
5018 switch (prev_mode) {
5024 if (0) fprintf(stderr,
"gc_mode_transition: %s->%s\n", gc_mode_name(
gc_mode(objspace)), gc_mode_name(
mode));
5033#if GC_ENABLE_INCREMENTAL_MARK
5034 heap->pooled_pages =
NULL;
5035 objspace->rincgc.pooled_slots = 0;
5039 list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
5044#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5051 gc_sweep_start_heap(objspace,
heap_eden);
5057 gc_report(1, objspace,
"gc_sweep_finish\n");
5059 gc_prof_set_heap_info(objspace);
5060 heap_pages_free_unused_pages(objspace);
5063 if (heap_allocatable_pages < heap_tomb->total_pages) {
5064 heap_allocatable_pages_set(objspace,
heap_tomb->total_pages);
5070#if RGENGC_CHECK_MODE >= 2
5071 gc_verify_internal_consistency(objspace);
5079 int unlink_limit = 3;
5080 int swept_slots = 0;
5082#if GC_ENABLE_INCREMENTAL_MARK
5085 gc_report(2, objspace,
"gc_sweep_step (need_pool: %d)\n", need_pool);
5087 gc_report(2, objspace,
"gc_sweep_step\n");
5092#if GC_ENABLE_LAZY_SWEEP
5093 gc_prof_sweep_timer_start(objspace);
5098 int free_slots = gc_page_sweep(objspace, heap, sweep_page);
5107 heap_unlink_page(objspace, heap, sweep_page);
5108 heap_add_page(objspace,
heap_tomb, sweep_page);
5111#if GC_ENABLE_INCREMENTAL_MARK
5113 heap_add_poolpage(objspace, heap, sweep_page);
5117 heap_add_freepage(heap, sweep_page);
5119 if (swept_slots > 2048) {
5124 heap_add_freepage(heap, sweep_page);
5133 gc_sweep_finish(objspace);
5136#if GC_ENABLE_LAZY_SWEEP
5137 gc_prof_sweep_timer_stop(objspace);
5151 gc_sweep_step(objspace, heap);
5161 unsigned int lock_lev;
5163 gc_sweep_step(objspace, heap);
5171 int empty_slots = 0, freed_slots = 0;
5185 bitset = pin_bits[i] & ~mark_bits[i];
5208 gc_move(objspace,
object, forwarding_object);
5211 heap_page_add_freeobj(objspace,
GET_HEAP_PAGE(
object),
object);
5224 page->
free_slots += (empty_slots + freed_slots);
5251 gc_report(1, objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
5253 if (immediate_sweep) {
5254#if !GC_ENABLE_LAZY_SWEEP
5255 gc_prof_sweep_timer_start(objspace);
5257 gc_sweep_start(objspace);
5268 gc_sweep_rest(objspace);
5269#if !GC_ENABLE_LAZY_SWEEP
5270 gc_prof_sweep_timer_stop(objspace);
5275 gc_sweep_start(objspace);
5287 gc_heap_prepare_minimum_pages(objspace,
heap_eden);
5293stack_chunk_alloc(
void)
5318 chunk = chunk->
next;
5327 stack->
cache = chunk;
5337 chunk = stack->
cache;
5353 next = stack->
cache;
5360 next = stack_chunk_alloc();
5363 stack->
chunk = next;
5374 add_stack_chunk_cache(stack, stack->
chunk);
5375 stack->
chunk = prev;
5385 while (chunk !=
NULL) {
5400 rb_bug(
"push_mark_stack() called for broken object");
5412 push_mark_stack_chunk(stack);
5420 if (is_mark_stack_empty(stack)) {
5423 if (stack->
index == 1) {
5425 pop_mark_stack_chunk(stack);
5433#if GC_ENABLE_INCREMENTAL_MARK
5438 for (i=0; i<limit; i++) {
5439 if (chunk->
data[i] == obj) {
5451 int limit = stack->
index;
5454 if (invalidate_mark_stack_chunk(chunk, limit, obj))
return;
5455 chunk = chunk->
next;
5456 limit = stack->
limit;
5458 rb_bug(
"invalid_mark_stack: unreachable");
5471 for (i=0; i < 4; i++) {
5472 add_stack_chunk_cache(stack, stack_chunk_alloc());
5479#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
5481#define STACK_START (ec->machine.stack_start)
5482#define STACK_END (ec->machine.stack_end)
5483#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
5485#ifdef __EMSCRIPTEN__
5486#undef STACK_GROW_DIRECTION
5487#define STACK_GROW_DIRECTION 1
5490#if STACK_GROW_DIRECTION < 0
5491# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
5492#elif STACK_GROW_DIRECTION > 0
5493# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
5495# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
5496 : (size_t)(STACK_END - STACK_START + 1))
5498#if !STACK_GROW_DIRECTION
5520#define PREVENT_STACK_OVERFLOW 1
5521#ifndef PREVENT_STACK_OVERFLOW
5522#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
5523# define PREVENT_STACK_OVERFLOW 1
5525# define PREVENT_STACK_OVERFLOW 0
5528#if PREVENT_STACK_OVERFLOW
5537 return length > maximum_length;
5540#define stack_check(ec, water_mark) FALSE
5543#define STACKFRAME_FOR_CALL_CFUNC 2048
5559mark_locations_array(
rb_objspace_t *objspace,
register const VALUE *x,
register long n)
5564 gc_mark_maybe(objspace, v);
5574 if (end <=
start)
return;
5576 mark_locations_array(objspace,
start, n);
5590 for (i=0; i<n; i++) {
5591 gc_mark(objspace, values[i]);
5601 for (i=0; i<n; i++) {
5602 gc_mark_and_pin(objspace, values[i]);
5611 for (i=0; i<n; i++) {
5612 if (is_markable_object(objspace, values[i])) {
5613 gc_mark_and_pin(objspace, values[i]);
5622 gc_mark_stack_values(objspace, n, values);
5629 gc_mark(objspace, (
VALUE)value);
5637 gc_mark_and_pin(objspace, (
VALUE)value);
5659 gc_mark_and_pin(objspace, (
VALUE)
key);
5674 gc_mark_and_pin(objspace, (
VALUE)value);
5697 gc_mark(objspace, (
VALUE)value);
5706 gc_mark_and_pin(objspace, (
VALUE)
key);
5707 gc_mark_and_pin(objspace, (
VALUE)value);
5716 gc_mark_and_pin(objspace, (
VALUE)
key);
5717 gc_mark(objspace, (
VALUE)value);
5731 if (RHASH_AR_TABLE_P(hash)) {
5739 gc_mark(objspace,
RHASH(hash)->ifnone);
5760 gc_mark(objspace, me->
owner);
5764 switch (
def->type) {
5766 if (
def->body.iseq.iseqptr) gc_mark(objspace, (
VALUE)
def->body.iseq.iseqptr);
5767 gc_mark(objspace, (
VALUE)
def->body.iseq.cref);
5771 gc_mark(objspace,
def->body.attr.location);
5774 gc_mark(objspace,
def->body.bmethod.proc);
5778 gc_mark(objspace, (
VALUE)
def->body.alias.original_me);
5781 gc_mark(objspace, (
VALUE)
def->body.refined.orig_me);
5782 gc_mark(objspace, (
VALUE)
def->body.refined.owner);
5796mark_method_entry_i(
VALUE me,
void *data)
5800 gc_mark(objspace, me);
5813mark_const_entry_i(
VALUE value,
void *data)
5818 gc_mark(objspace, ce->
value);
5819 gc_mark(objspace, ce->
file);
5830#if STACK_GROW_DIRECTION < 0
5831#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
5832#elif STACK_GROW_DIRECTION > 0
5833#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
5835#define GET_STACK_BOUNDS(start, end, appendix) \
5836 ((STACK_END < STACK_START) ? \
5837 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
5841 const VALUE *stack_start,
const VALUE *stack_end);
5849 } save_regs_gc_mark;
5850 VALUE *stack_start, *stack_end;
5853 memset(&save_regs_gc_mark, 0,
sizeof(save_regs_gc_mark));
5863 mark_locations_array(objspace, save_regs_gc_mark.v,
numberof(save_regs_gc_mark.v));
5865 mark_stack_locations(objspace, ec, stack_start, stack_end);
5872 VALUE *stack_start, *stack_end;
5875 mark_stack_locations(objspace, ec, stack_start, stack_end);
5880 const VALUE *stack_start,
const VALUE *stack_end)
5883 gc_mark_locations(objspace, stack_start, stack_end);
5885#if defined(__mc68000__)
5886 gc_mark_locations(objspace,
5887 (
VALUE*)((
char*)stack_start + 2),
5888 (
VALUE*)((
char*)stack_end - 2));
5909 if (is_pointer_to_heap(objspace, (
void *)obj)) {
5911 asan_unpoison_object(obj,
false);
5919 gc_mark_and_pin(objspace, obj);
5925 asan_poison_object(obj);
5940 if (RVALUE_MARKED(obj))
return 0;
5956#if RGENGC_PROFILE > 0
5957 objspace->
profile.total_remembered_shady_object_count++;
5958#if RGENGC_PROFILE >= 2
5975 if (RVALUE_WB_UNPROTECTED(obj)) {
5976 if (gc_remember_unprotected(objspace, obj)) {
5977 gc_report(2, objspace,
"relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5981 if (!RVALUE_OLD_P(obj)) {
5982 if (RVALUE_MARKED(obj)) {
5984 gc_report(2, objspace,
"relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5985 RVALUE_AGE_SET_OLD(objspace, obj);
5987 if (!RVALUE_MARKING(obj)) {
5988 gc_grey(objspace, obj);
5992 rgengc_remember(objspace, obj);
5996 gc_report(2, objspace,
"relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5997 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
6009#if RGENGC_CHECK_MODE
6010 if (RVALUE_MARKED(obj) ==
FALSE)
rb_bug(
"gc_grey: %s is not marked.", obj_info(obj));
6011 if (RVALUE_MARKING(obj) ==
TRUE)
rb_bug(
"gc_grey: %s is marking/remembered.", obj_info(obj));
6014#if GC_ENABLE_INCREMENTAL_MARK
6029 check_rvalue_consistency(obj);
6032 if (!RVALUE_OLD_P(obj)) {
6033 gc_report(3, objspace,
"gc_aging: YOUNG: %s\n", obj_info(obj));
6034 RVALUE_AGE_INC(objspace, obj);
6038 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
6041 check_rvalue_consistency(obj);
6047static void reachable_objects_from_callback(
VALUE obj);
6053 rgengc_check_relation(objspace, obj);
6054 if (!gc_mark_set(objspace, obj))
return;
6059 (
void *)obj, obj_type_name(obj),
6069 rb_bug(
"try to mark T_NONE object");
6071 gc_aging(objspace, obj);
6072 gc_grey(objspace, obj);
6075 reachable_objects_from_callback(obj);
6082 GC_ASSERT(is_markable_object(objspace, obj));
6093 if (!is_markable_object(objspace, obj))
return;
6094 gc_pin(objspace, obj);
6095 gc_mark_ptr(objspace, obj);
6101 if (!is_markable_object(objspace, obj))
return;
6102 gc_mark_ptr(objspace, obj);
6124 return RVALUE_MARKED(obj) ?
TRUE :
FALSE;
6130 if (RVALUE_OLD_P(obj)) {
6150 gc_mark_values(objspace, (
long)
env->env_size,
env->env);
6153 gc_mark(objspace, (
VALUE)
env->iseq);
6158 gc_mark(objspace,
RANY(obj)->as.imemo.cref.klass);
6159 gc_mark(objspace, (
VALUE)
RANY(obj)->as.imemo.cref.next);
6160 gc_mark(objspace,
RANY(obj)->as.imemo.cref.refinements);
6163 gc_mark(objspace,
RANY(obj)->as.imemo.svar.cref_or_me);
6164 gc_mark(objspace,
RANY(obj)->as.imemo.svar.lastline);
6165 gc_mark(objspace,
RANY(obj)->as.imemo.svar.backref);
6166 gc_mark(objspace,
RANY(obj)->as.imemo.svar.others);
6169 gc_mark(objspace,
RANY(obj)->as.imemo.throw_data.throw_obj);
6172 gc_mark_maybe(objspace, (
VALUE)
RANY(obj)->as.imemo.ifunc.data);
6175 gc_mark(objspace,
RANY(obj)->as.imemo.memo.v1);
6176 gc_mark(objspace,
RANY(obj)->as.imemo.memo.v2);
6177 gc_mark_maybe(objspace,
RANY(obj)->as.imemo.memo.u3.value);
6180 mark_method_entry(objspace, &
RANY(obj)->as.imemo.ment);
6205 gc_mark(objspace, (
VALUE)vm_cc_cme(cc));
6211 gc_mark(objspace, ice->
value);
6214#if VM_CHECK_MODE > 0
6225 gc_mark_set_parent(objspace, obj);
6241 rb_bug(
"rb_gc_mark() called for broken object");
6249 gc_mark_imemo(objspace, obj);
6267 cc_table_mark(objspace, obj);
6273 if (RICLASS_OWNS_M_TBL_P(obj)) {
6281 cc_table_mark(objspace, obj);
6287 gc_mark(objspace,
root);
6292 for (i=0; i <
len; i++) {
6293 gc_mark(objspace,
ptr[i]);
6298 RARRAY_TRANSIENT_P(obj)) {
6306 mark_hash(objspace, obj);
6310 if (STR_SHARED_P(obj)) {
6322 if (mark_func) (*mark_func)(
ptr);
6329 const VALUE *
const ptr = ROBJECT_IVPTR(obj);
6332 for (i = 0; i <
len; i++) {
6333 gc_mark(objspace,
ptr[i]);
6337 ROBJ_TRANSIENT_P(obj)) {
6380 const VALUE *
const ptr = RSTRUCT_CONST_PTR(obj);
6382 for (i=0; i<
len; i++) {
6383 gc_mark(objspace,
ptr[i]);
6387 RSTRUCT_TRANSIENT_P(obj)) {
6400 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
6402 is_pointer_to_heap(objspace, any) ?
"corrupted object" :
"non object");
6415#if GC_ENABLE_INCREMENTAL_MARK
6416 size_t marked_slots_at_the_beginning = objspace->
marked_slots;
6417 size_t popped_count = 0;
6420 while (pop_mark_stack(mstack, &obj)) {
6421 if (obj ==
Qundef)
continue;
6424 rb_bug(
"gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
6426 gc_mark_children(objspace, obj);
6428#if GC_ENABLE_INCREMENTAL_MARK
6431 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
6436 if (popped_count + (objspace->
marked_slots - marked_slots_at_the_beginning) >
count) {
6448 if (is_mark_stack_empty(mstack)) {
6449 shrink_stack_chunk_cache(mstack);
6460 return gc_mark_stacked_objects(objspace,
TRUE,
count);
6466 return gc_mark_stacked_objects(objspace,
FALSE, 0);
6470#define MAX_TICKS 0x100
6471static tick_t mark_ticks[MAX_TICKS];
6472static const char *mark_ticks_categories[MAX_TICKS];
6475show_mark_ticks(
void)
6478 fprintf(stderr,
"mark ticks result:\n");
6479 for (i=0; i<MAX_TICKS; i++) {
6480 const char *category = mark_ticks_categories[i];
6482 fprintf(stderr,
"%s\t%8lu\n", category, (
unsigned long)mark_ticks[i]);
6493gc_mark_roots(
rb_objspace_t *objspace,
const char **categoryp)
6497 rb_vm_t *vm = rb_ec_vm_ptr(ec);
6500 tick_t start_tick = tick();
6502 const char *prev_category = 0;
6504 if (mark_ticks_categories[0] == 0) {
6505 atexit(show_mark_ticks);
6509 if (categoryp) *categoryp =
"xxx";
6514#define MARK_CHECKPOINT_PRINT_TICK(category) do { \
6515 if (prev_category) { \
6516 tick_t t = tick(); \
6517 mark_ticks[tick_count] = t - start_tick; \
6518 mark_ticks_categories[tick_count] = prev_category; \
6521 prev_category = category; \
6522 start_tick = tick(); \
6525#define MARK_CHECKPOINT_PRINT_TICK(category)
6528#define MARK_CHECKPOINT(category) do { \
6529 if (categoryp) *categoryp = category; \
6530 MARK_CHECKPOINT_PRINT_TICK(category); \
6536 if (vm->
self) gc_mark(objspace, vm->
self);
6542 mark_current_machine_context(objspace, ec);
6547 gc_mark_maybe(objspace, *list->
varptr);
6563#undef MARK_CHECKPOINT
6566#if RGENGC_CHECK_MODE >= 4
6568#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
6569#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
6570#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
6578static struct reflist *
6579reflist_create(
VALUE obj)
6581 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
6584 refs->list[0] = obj;
6590reflist_destruct(
struct reflist *refs)
6597reflist_add(
struct reflist *refs,
VALUE obj)
6599 if (refs->pos == refs->size) {
6604 refs->list[refs->pos++] = obj;
6608reflist_dump(
struct reflist *refs)
6611 for (i=0; i<refs->pos; i++) {
6612 VALUE obj = refs->list[i];
6613 if (IS_ROOTSIG(obj)) {
6614 fprintf(stderr,
"<root@%s>", GET_ROOTSIG(obj));
6617 fprintf(stderr,
"<%s>", obj_info(obj));
6619 if (i+1 < refs->pos) fprintf(stderr,
", ");
6624reflist_referred_from_machine_context(
struct reflist *refs)
6627 for (i=0; i<refs->pos; i++) {
6628 VALUE obj = refs->list[i];
6629 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj),
"machine_context") == 0)
return 1;
6644 const char *category;
6650allrefs_add(
struct allrefs *data,
VALUE obj)
6652 struct reflist *refs;
6655 if (
st_lookup(data->references, obj, &r)) {
6656 refs = (
struct reflist *)r;
6657 reflist_add(refs, data->root_obj);
6661 refs = reflist_create(data->root_obj);
6670 struct allrefs *data = (
struct allrefs *)
ptr;
6672 if (allrefs_add(data, obj)) {
6673 push_mark_stack(&data->mark_stack, obj);
6678allrefs_roots_i(
VALUE obj,
void *
ptr)
6680 struct allrefs *data = (
struct allrefs *)
ptr;
6682 data->root_obj = MAKE_ROOTSIG(data->category);
6684 if (allrefs_add(data, obj)) {
6685 push_mark_stack(&data->mark_stack, obj);
6692 struct allrefs data;
6693 struct mark_func_data_struct mfd;
6698 data.objspace = objspace;
6700 init_mark_stack(&data.mark_stack);
6702 mfd.mark_func = allrefs_roots_i;
6706 PUSH_MARK_FUNC_DATA(&mfd);
6707 objspace->mark_func_data = &mfd;
6708 gc_mark_roots(objspace, &data.category);
6709 POP_MARK_FUNC_DATA();
6712 while (pop_mark_stack(&data.mark_stack, &obj)) {
6715 free_stack_chunks(&data.mark_stack);
6718 return data.references;
6724 struct reflist *refs = (
struct reflist *)value;
6725 reflist_destruct(refs);
6730objspace_allrefs_destruct(
struct st_table *refs)
6732 st_foreach(refs, objspace_allrefs_destruct_i, 0);
6736#if RGENGC_CHECK_MODE >= 5
6741 struct reflist *refs = (
struct reflist *)v;
6742 fprintf(stderr,
"[allrefs_dump_i] %s <- ", obj_info(obj));
6744 fprintf(stderr,
"\n");
6761 struct reflist *refs = (
struct reflist *)v;
6766 fprintf(stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
6767 fprintf(stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)obj);
6770 if (reflist_referred_from_machine_context(refs)) {
6771 fprintf(stderr,
" (marked from machine stack).\n");
6775 objspace->
rgengc.error_count++;
6776 fprintf(stderr,
"\n");
6786#if RGENGC_ESTIMATE_OLDMALLOC
6787 size_t saved_oldmalloc_increase = objspace->
rgengc.oldmalloc_increase;
6791 objspace->
rgengc.allrefs_table = objspace_allrefs(objspace);
6797 if (objspace->
rgengc.error_count > 0) {
6798#if RGENGC_CHECK_MODE >= 5
6799 allrefs_dump(objspace);
6801 if (checker_name)
rb_bug(
"%s: GC has problem.", checker_name);
6804 objspace_allrefs_destruct(objspace->
rgengc.allrefs_table);
6805 objspace->
rgengc.allrefs_table = 0;
6809#if RGENGC_ESTIMATE_OLDMALLOC
6810 objspace->
rgengc.oldmalloc_increase = saved_oldmalloc_increase;
6827check_generation_i(
const VALUE child,
void *
ptr)
6834 if (!RVALUE_OLD_P(child)) {
6835 if (!RVALUE_REMEMBERED(
parent) &&
6836 !RVALUE_REMEMBERED(child) &&
6837 !RVALUE_UNCOLLECTIBLE(child)) {
6838 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(
parent), obj_info(child));
6845check_color_i(
const VALUE child,
void *
ptr)
6850 if (!RVALUE_WB_UNPROTECTED(
parent) && RVALUE_WHITE_P(child)) {
6851 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
6852 obj_info(
parent), obj_info(child));
6858check_children_i(
const VALUE child,
void *
ptr)
6861 if (check_rvalue_consistency_force(child,
FALSE) != 0) {
6862 fprintf(stderr,
"check_children_i: %s has error (referenced from %s)",
6863 obj_info(child), obj_info(data->
parent));
6871verify_internal_consistency_i(
void *page_start,
void *page_end,
size_t stride,
void *
ptr)
6877 for (obj = (
VALUE)page_start; obj != (
VALUE)page_end; obj += stride) {
6878 void *poisoned = asan_poisoned_object_p(obj);
6879 asan_unpoison_object(obj,
false);
6881 if (is_live_object(
objspace, obj)) {
6888 if (!gc_object_moved_p(
objspace, obj)) {
6904 if (RVALUE_BLACK_P(obj)) {
6919 asan_poison_object(obj);
6930 unsigned int has_remembered_shady =
FALSE;
6931 unsigned int has_remembered_old =
FALSE;
6932 int remembered_old_objects = 0;
6933 int free_objects = 0;
6934 int zombie_objects = 0;
6938 void *poisoned = asan_poisoned_object_p(val);
6939 asan_unpoison_object(val,
false);
6941 if (
RBASIC(val) == 0) free_objects++;
6944 has_remembered_shady =
TRUE;
6947 has_remembered_old =
TRUE;
6948 remembered_old_objects++;
6953 asan_poison_object(val);
6963 fprintf(stderr,
"marking -> %s\n", obj_info(val));
6966 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
6967 (
void *)page, remembered_old_objects, obj ? obj_info(obj) :
"");
6971 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
6972 (
void *)page, obj ? obj_info(obj) :
"");
6978 rb_bug(
"page %p's free_slots should be %d, but %d\n", (
void *)page, page->free_slots, free_objects);
6982 rb_bug(
"page %p's final_slots should be %d, but %d\n", (
void *)page, page->final_slots, zombie_objects);
6985 return remembered_old_objects;
6991 int remembered_old_objects = 0;
6995 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
7000 asan_unpoison_object(vp,
false);
7002 fprintf(stderr,
"freelist slot expected to be T_NONE but was: %s\n", obj_info(vp));
7005 asan_poison_object(prev);
7010 remembered_old_objects += gc_verify_heap_page(objspace, page,
Qfalse);
7014 return remembered_old_objects;
7020 int remembered_old_objects = 0;
7021 remembered_old_objects += gc_verify_heap_pages_(objspace, &
heap_eden->pages);
7022 remembered_old_objects += gc_verify_heap_pages_(objspace, &
heap_tomb->pages);
7023 return remembered_old_objects;
7037gc_verify_internal_consistency_m(
VALUE dummy)
7053 objspace_each_objects_without_setup(
objspace, verify_internal_consistency_i, &data);
7056#if RGENGC_CHECK_MODE >= 5
7061 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
7073 fprintf(stderr,
"heap_pages_final_slots: %"PRIdSIZE", "
7074 "objspace->profile.total_freed_objects: %"PRIdSIZE"\n",
7093 size_t list_count = 0;
7106 rb_bug(
"inconsistent finalizing object count:\n"
7109 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
7126 unsigned int prev_during_gc =
during_gc;
7129 gc_verify_internal_consistency_(
objspace);
7143gc_verify_transient_heap_internal_consistency(
VALUE dmy)
7159#if GC_ENABLE_INCREMENTAL_MARK
7162 if (0) fprintf(stderr,
"objspace->marked_slots: %"PRIdSIZE", "
7163 "objspace->rincgc.pooled_page_num: %"PRIdSIZE", "
7164 "objspace->rincgc.step_slots: %"PRIdSIZE", \n",
7192#if GC_ENABLE_INCREMENTAL_MARK
7213 gc_report(2, objspace,
"gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((
VALUE)p));
7216 gc_mark_children(objspace, (
VALUE)p);
7225 gc_mark_stacked_objects_all(objspace);
7229heap_move_pooled_pages_to_free_pages(
rb_heap_t *heap)
7231 struct heap_page *page = heap->pooled_pages;
7235 heap_add_freepage(heap, page);
7245#if GC_ENABLE_INCREMENTAL_MARK
7249 heap_move_pooled_pages_to_free_pages(
heap_eden);
7250 gc_report(1, objspace,
"gc_marks_finish: pooled pages are exists. retry.\n");
7255 rb_bug(
"gc_marks_finish: mark stack is not empty (%"PRIdSIZE").",
7259 gc_mark_roots(objspace, 0);
7267#if RGENGC_CHECK_MODE >= 2
7268 if (gc_verify_heap_pages(objspace) != 0) {
7269 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
7273 objspace->
flags.during_incremental_marking =
FALSE;
7275 gc_marks_wb_unprotected_objects(objspace);
7279#if RGENGC_CHECK_MODE >= 2
7280 gc_verify_internal_consistency(objspace);
7290#if RGENGC_CHECK_MODE >= 4
7291 gc_marks_check(objspace, gc_check_after_marks_i,
"after_marks");
7302 const int r_cnt = GET_VM()->ractor.cnt;
7303 const int r_mul = r_cnt > 8 ? 8 : r_cnt;
7312 if (sweep_slots > max_free_slots) {
7324 if (sweep_slots < min_free_slots) {
7325 if (!full_marking) {
7327 full_marking =
TRUE;
7332 gc_report(1, objspace,
"gc_marks_finish: next is full GC!!)\n");
7338 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
7339 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slots,
total_slots));
7340 heap_increment(objspace, heap);
7379#if GC_ENABLE_INCREMENTAL_MARK
7382 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
7383 if (gc_marks_finish(objspace)) {
7395 gc_report(1, objspace,
"gc_marks_rest\n");
7397#if GC_ENABLE_INCREMENTAL_MARK
7403 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) ==
FALSE);
7404 }
while (gc_marks_finish(objspace) ==
FALSE);
7407 gc_mark_stacked_objects_all(objspace);
7408 gc_marks_finish(objspace);
7419#if GC_ENABLE_INCREMENTAL_MARK
7421 unsigned int lock_lev;
7427 if (heap->pooled_pages) {
7429 struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
7432 from =
"pooled-pages";
7434 else if (heap_increment(objspace, heap)) {
7436 from =
"incremented-pages";
7440 gc_report(2, objspace,
"gc_marks_continue: provide %d slots from %s.\n",
7442 gc_marks_step(objspace, objspace->rincgc.step_slots);
7445 gc_report(2, objspace,
"gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE").\n",
7447 gc_marks_rest(objspace);
7457 gc_prof_mark_timer_start(objspace);
7461 gc_marks_start(objspace, full_mark);
7463 gc_marks_rest(objspace);
7466#if RGENGC_PROFILE > 0
7472 gc_prof_mark_timer_stop(objspace);
7478gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...)
7484 const char *status =
" ";
7498 va_start(args, fmt);
7502 fprintf(
out,
"%s|", status);
7512 return RVALUE_REMEMBERED(obj);
7539 gc_report(6, objspace,
"rgengc_remember: %s %s\n", obj_info(obj),
7540 rgengc_remembersetbits_get(objspace, obj) ?
"was already remembered" :
"is remembered now");
7542 check_rvalue_consistency(obj);
7545 if (RVALUE_WB_UNPROTECTED(obj))
rb_bug(
"rgengc_remember: %s is not wb protected.", obj_info(obj));
7548#if RGENGC_PROFILE > 0
7549 if (!rgengc_remembered(objspace, obj)) {
7550 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
7551 objspace->
profile.total_remembered_normal_object_count++;
7552#if RGENGC_PROFILE >= 2
7559 return rgengc_remembersetbits_set(objspace, obj);
7565 int result = rgengc_remembersetbits_get(objspace, obj);
7566 check_rvalue_consistency(obj);
7573 gc_report(6, objspace,
"rgengc_remembered: %s\n", obj_info(obj));
7574 return rgengc_remembered_sweep(objspace, obj);
7577#ifndef PROFILE_REMEMBERSET_MARK
7578#define PROFILE_REMEMBERSET_MARK 0
7586#if PROFILE_REMEMBERSET_MARK
7587 int has_old = 0, has_shady = 0, has_both = 0,
skip = 0;
7589 gc_report(1, objspace,
"rgengc_rememberset_mark: start\n");
7599#if PROFILE_REMEMBERSET_MARK
7619 gc_report(2, objspace,
"rgengc_rememberset_mark: mark %s\n", obj_info(obj));
7621 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
7623 gc_mark_children(objspace, obj);
7631#if PROFILE_REMEMBERSET_MARK
7638#if PROFILE_REMEMBERSET_MARK
7639 fprintf(stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady,
skip);
7641 gc_report(1, objspace,
"rgengc_rememberset_mark: finished\n");
7667 if (!RVALUE_OLD_P(a))
rb_bug(
"gc_writebarrier_generational: %s is not an old object.", obj_info(a));
7668 if ( RVALUE_OLD_P(b))
rb_bug(
"gc_writebarrier_generational: %s is an old object.", obj_info(b));
7669 if (
is_incremental_marking(objspace))
rb_bug(
"gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
7674 if (!rgengc_remembered(objspace, a)) {
7677 rgengc_remember(objspace, a);
7680 gc_report(1, objspace,
"gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
7685 if (RVALUE_WB_UNPROTECTED(b)) {
7686 gc_remember_unprotected(objspace, b);
7689 RVALUE_AGE_SET_OLD(objspace, b);
7690 rgengc_remember(objspace, b);
7693 gc_report(1, objspace,
"gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
7696 check_rvalue_consistency(a);
7697 check_rvalue_consistency(b);
7700#if GC_ENABLE_INCREMENTAL_MARK
7704 gc_mark_set_parent(objspace, parent);
7705 rgengc_check_relation(objspace, obj);
7706 if (gc_mark_set(objspace, obj) ==
FALSE)
return;
7707 gc_aging(objspace, obj);
7708 gc_grey(objspace, obj);
7716 gc_report(2, objspace,
"gc_writebarrier_incremental: [LG] %p -> %s\n", (
void *)a, obj_info(b));
7718 if (RVALUE_BLACK_P(a)) {
7719 if (RVALUE_WHITE_P(b)) {
7720 if (!RVALUE_WB_UNPROTECTED(a)) {
7721 gc_report(2, objspace,
"gc_writebarrier_incremental: [IN] %p -> %s\n", (
void *)a, obj_info(b));
7722 gc_mark_from(objspace, b, a);
7725 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
7726 if (!RVALUE_WB_UNPROTECTED(b)) {
7727 gc_report(1, objspace,
"gc_writebarrier_incremental: [GN] %p -> %s\n", (
void *)a, obj_info(b));
7728 RVALUE_AGE_SET_OLD(objspace, b);
7730 if (RVALUE_BLACK_P(b)) {
7731 gc_grey(objspace, b);
7735 gc_report(1, objspace,
"gc_writebarrier_incremental: [LL] %p -> %s\n", (
void *)a, obj_info(b));
7736 gc_remember_unprotected(objspace, b);
7746#define gc_writebarrier_incremental(a, b, objspace)
7758 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
7762 gc_writebarrier_generational(a, b, objspace);
7779 if (RVALUE_WB_UNPROTECTED(obj)) {
7785 gc_report(2, objspace,
"rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
7786 rgengc_remembered(objspace, obj) ?
" (already remembered)" :
"");
7788 if (RVALUE_OLD_P(obj)) {
7789 gc_report(1, objspace,
"rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
7790 RVALUE_DEMOTE(objspace, obj);
7791 gc_mark_set(objspace, obj);
7792 gc_remember_unprotected(objspace, obj);
7795 objspace->
profile.total_shade_operation_count++;
7796#if RGENGC_PROFILE >= 2
7802 RVALUE_AGE_RESET(obj);
7818 gc_report(1, objspace,
"rb_gc_writebarrier_remember: %s\n", obj_info(obj));
7821 if (RVALUE_BLACK_P(obj)) {
7822 gc_grey(objspace, obj);
7826 if (RVALUE_OLD_P(obj)) {
7827 rgengc_remember(objspace, obj);
7832static st_table *rgengc_unprotect_logging_table;
7842rgengc_unprotect_logging_exit_func(
void)
7844 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
7852 if (rgengc_unprotect_logging_table == 0) {
7854 atexit(rgengc_unprotect_logging_exit_func);
7857 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
7880 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
7881 if (!RVALUE_OLD_P(dest)) {
7883 RVALUE_AGE_RESET_RAW(dest);
7886 RVALUE_DEMOTE(objspace, dest);
7890 check_rvalue_consistency(dest);
7898 return RVALUE_WB_UNPROTECTED(obj) ?
Qfalse :
Qtrue;
7911 static ID ID_marked;
7912 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
7915#define I(s) ID_##s = rb_intern(#s);
7925 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<
max)
flags[n++] = ID_wb_protected;
7926 if (RVALUE_OLD_P(obj) && n<
max)
flags[n++] = ID_old;
7927 if (RVALUE_UNCOLLECTIBLE(obj) && n<
max)
flags[n++] = ID_uncollectible;
7944 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
7947 asan_unpoison_object((
VALUE)p,
false);
7951 asan_poison_object((
VALUE)prev);
7952 asan_unpoison_object((
VALUE)p,
false);
7955 asan_poison_object((
VALUE)p);
7973 int is_old = RVALUE_OLD_P(obj);
7975 gc_report(2, objspace,
"rb_gc_force_recycle: %s\n", obj_info(obj));
7978 if (RVALUE_MARKED(obj)) {
7986#if GC_ENABLE_INCREMENTAL_MARK
7989 invalidate_mark_stack(&objspace->
mark_stack, obj);
8000#if GC_ENABLE_INCREMENTAL_MARK
8017#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
8018#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
8026 VALUE ary_ary = GET_VM()->mark_object_ary;
8057 if (tmp->
varptr == addr) {
8089#define gc_stress_full_mark_after_malloc_p() \
8090 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
8096 if (!heap_increment(objspace, heap)) {
8097 heap_set_increment(objspace, 1);
8098 heap_increment(objspace, heap);
8118 gc_prof_set_malloc_info(objspace);
8149#if RGENGC_ESTIMATE_OLDMALLOC
8151 if (objspace->
rgengc.oldmalloc_increase > objspace->
rgengc.oldmalloc_increase_limit) {
8153 objspace->
rgengc.oldmalloc_increase_limit =
8164 objspace->
rgengc.oldmalloc_increase,
8165 objspace->
rgengc.oldmalloc_increase_limit,
8170 objspace->
rgengc.oldmalloc_increase = 0;
8173 objspace->
rgengc.oldmalloc_increase_limit =
8190#if GC_PROFILE_MORE_DETAIL
8191 objspace->
profile.prepare_time = getrusage_time();
8196#if GC_PROFILE_MORE_DETAIL
8197 objspace->
profile.prepare_time = getrusage_time() - objspace->
profile.prepare_time;
8200 ret = gc_start(objspace, reason);
8226 unsigned int lock_lev;
8229#if RGENGC_CHECK_MODE >= 2
8230 gc_verify_internal_consistency(objspace);
8237 do_full_mark =
TRUE;
8245 do_full_mark =
TRUE;
8249 do_full_mark =
TRUE;
8259#if GC_ENABLE_INCREMENTAL_MARK
8261 objspace->
flags.during_incremental_marking =
FALSE;
8264 objspace->
flags.during_incremental_marking = do_full_mark;
8274 gc_report(1, objspace,
"gc_start(reason: %d) => %u, %d, %d\n",
8278#if USE_DEBUG_COUNTER
8286#if RGENGC_ESTIMATE_OLDMALLOC
8303 gc_prof_setup_new_record(objspace, reason);
8304 gc_reset_malloc_info(objspace);
8310 gc_prof_timer_start(objspace);
8312 gc_marks(objspace, do_full_mark);
8314 gc_prof_timer_stop(objspace);
8326 if (marking || sweeping) {
8327 unsigned int lock_lev;
8333 gc_marks_rest(objspace);
8336 gc_sweep_rest(objspace);
8354#if GC_ENABLE_INCREMENTAL_MARK
8371 static char buff[0x10];
8372 gc_current_status_fill(objspace, buff);
8376#if PRINT_ENTER_EXIT_TICK
8378static tick_t last_exit_tick;
8379static tick_t enter_tick;
8380static int enter_count = 0;
8381static char last_gc_status[0x10];
8384gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
8386 if (direction == 0) {
8388 enter_tick = tick();
8389 gc_current_status_fill(objspace, last_gc_status);
8392 tick_t exit_tick = tick();
8393 char current_gc_status[0x10];
8394 gc_current_status_fill(objspace, current_gc_status);
8397 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
8398 enter_tick - last_exit_tick,
8399 exit_tick - enter_tick,
8401 last_gc_status, current_gc_status,
8403 last_exit_tick = exit_tick;
8406 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
8408 exit_tick - enter_tick,
8410 last_gc_status, current_gc_status,
8417gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
8468 gc_enter_count(event);
8472 mjit_gc_start_hook();
8475 RUBY_DEBUG_LOG(
"%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
8476 gc_report(1, objspace,
"gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
8477 gc_record(objspace, 0, gc_enter_event_cstr(event));
8487 gc_record(objspace, 1, gc_enter_event_cstr(event));
8488 RUBY_DEBUG_LOG(
"%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
8489 gc_report(1, objspace,
"gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
8492 mjit_gc_exit_hook();
8497gc_with_gvl(
void *
ptr)
8519 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
8535 if (
RTEST(compact)) {
8538 if (!
RTEST(full_mark))
reason &= ~GPR_FLAG_FULL_MARK;
8539 if (!
RTEST(immediate_mark))
reason &= ~GPR_FLAG_IMMEDIATE_MARK;
8540 if (!
RTEST(immediate_sweep))
reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
8623 wb_unprotected = RVALUE_WB_UNPROTECTED((
VALUE)src);
8624 uncollectible = RVALUE_UNCOLLECTIBLE((
VALUE)src);
8625 marking = RVALUE_MARKING((
VALUE)src);
8646 gc_report(4,
objspace,
"Moving object with seen id: %p -> %p\n", (
void *)src, (
void *)dest);
8658 memset(src, 0,
sizeof(
RVALUE));
8675 if (wb_unprotected) {
8682 if (uncollectible) {
8690 src->as.moved.flags =
T_MOVED;
8691 src->as.moved.dummy =
Qundef;
8692 src->as.moved.destination = (
VALUE)dest;
8699compare_free_slots(
const void *left,
const void *right,
void *dummy)
8704 left_page = *(
struct heap_page *
const *)left;
8705 right_page = *(
struct heap_page *
const *)right;
8713 size_t total_pages =
heap_eden->total_pages;
8719 page_list[i++] = page;
8723 assert((
size_t)i == total_pages);
8732 for (i = 0; i < total_pages; i++) {
8733 list_add(&
heap_eden->pages, &page_list[i]->page_node);
8735 heap_add_freepage(
heap_eden, page_list[i]);
8753 for (i = 0; i <
len; i++) {
8765 for (i = 0; i <
len; i++) {
8775 if (gc_object_moved_p(objspace, (
VALUE)*
key)) {
8779 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
8793 if (gc_object_moved_p(objspace, (
VALUE)
key)) {
8797 if (gc_object_moved_p(objspace, (
VALUE)value)) {
8808 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
8822 if (gc_object_moved_p(objspace, (
VALUE)value)) {
8853 gc_update_table_refs(objspace,
ptr);
8871 switch (
def->type) {
8873 if (
def->body.iseq.iseqptr) {
8908 for (i=0; i<n; i++) {
8922 gc_update_values(objspace, (
long)
env->env_size, (
VALUE *)
env->env);
8946 gc_ref_update_method_entry(objspace, &
RANY(obj)->as.imemo.ment);
8959 if (!is_live_object(objspace, cc->
klass)) {
8966 if (!is_live_object(objspace, (
VALUE)cc->
cme_)) {
8993 if (gc_object_moved_p(objspace, (
VALUE)
value)) {
9009 void *poisoned = asan_poisoned_object_p(
value);
9010 asan_unpoison_object(
value,
false);
9017 destination =
value;
9023 asan_poison_object(
value);
9027 destination =
value;
9038 if (gc_object_moved_p(objspace, (
VALUE)*
value)) {
9054update_cc_tbl_i(
ID id,
VALUE ccs_ptr,
void *data)
9060 if (gc_object_moved_p(objspace, (
VALUE)ccs->
cme)) {
9064 for (
int i=0; i<ccs->
len; i++) {
9087update_const_table(
VALUE value,
void *data)
9092 if (gc_object_moved_p(objspace, ce->
value)) {
9096 if (gc_object_moved_p(objspace, ce->
file)) {
9115 entry = entry->
next;
9133 update_subclass_entries(objspace, ext->
subclasses);
9146 gc_report(4, objspace,
"update-refs: %p ->\n", (
void *)obj);
9156 update_cc_tbl(objspace, obj);
9178 update_cc_tbl(objspace, obj);
9182 gc_ref_update_imemo(objspace, obj);
9198 gc_ref_update_array(objspace, obj);
9203 gc_ref_update_hash(objspace, obj);
9208 if (STR_SHARED_P(obj)) {
9218 if (RTYPEDDATA_P(obj)) {
9220 if (compact_func) (*compact_func)(
ptr);
9227 gc_ref_update_object(objspace, obj);
9258 if (any->as.match.str) {
9279 for (i = 0; i <
len; i++) {
9296 gc_report(4, objspace,
"update-refs: %p <-\n", (
void *)obj);
9303 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
9309 for (; v != (
VALUE)vend; v += stride) {
9310 void *poisoned = asan_poisoned_object_p(v);
9311 asan_unpoison_object(v,
false);
9319 if (RVALUE_WB_UNPROTECTED(v)) {
9326 if (RVALUE_MARKED(v)) {
9327 gc_update_object_references(objspace, v);
9330 gc_update_object_references(objspace, v);
9335 asan_poison_object(v);
9343#define global_symbols ruby_global_symbols
9349 rb_vm_t *vm = rb_ec_vm_ptr(ec);
9350 short should_set_mark_bits = 1;
9357 should_set_mark_bits = 0;
9359 if (should_set_mark_bits) {
9360 gc_setup_mark_bits(page);
9385 for (i=0; i<
T_MASK; i++) {
9402root_obj_check_moved_i(
const char *category,
VALUE obj,
void *data)
9405 rb_bug(
"ROOT %s points to MOVED: %p -> %s\n", category, (
void *)obj, obj_info(
rb_gc_location(obj)));
9410reachable_object_check_moved_i(
VALUE ref,
void *data)
9414 rb_bug(
"Object %s points to MOVED: %p -> %s\n", obj_info(parent), (
void *)ref, obj_info(
rb_gc_location(ref)));
9419heap_check_moved_i(
void *vstart,
void *vend,
size_t stride,
void *data)
9422 for (; v != (
VALUE)vend; v += stride) {
9427 void *poisoned = asan_poisoned_object_p(v);
9428 asan_unpoison_object(v,
false);
9442 asan_poison_object(v);
9456 return gc_compact_stats(ec, self);
9471 if (
RTEST(double_heap)) {
9475 if (
RTEST(toward_empty)) {
9476 gc_sort_heap_by_empty_slots(objspace);
9483 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i,
NULL);
9484 objspace_each_objects(objspace, heap_check_moved_i,
NULL);
9486 return gc_compact_stats(ec, self);
9501 garbage_collect(objspace, reason);
9511#if RGENGC_PROFILE >= 2
9513static const char *type_name(
int type,
VALUE obj);
9516gc_count_add_each_types(
VALUE hash,
const char *
name,
const size_t *
types)
9520 for (i=0; i<
T_MASK; i++) {
9521 const char *
type = type_name(i, 0);
9541gc_info_decode(
rb_objspace_t *objspace,
const VALUE hash_or_key,
const int orig_flags)
9543 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
9544 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
9545#if RGENGC_ESTIMATE_OLDMALLOC
9546 static VALUE sym_oldmalloc;
9548 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
9549 static VALUE sym_none, sym_marking, sym_sweeping;
9557 else if (RB_TYPE_P(hash_or_key,
T_HASH)) {
9564 if (sym_major_by ==
Qnil) {
9565#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
9577#if RGENGC_ESTIMATE_OLDMALLOC
9591#define SET(name, attr) \
9592 if (key == sym_##name) \
9594 else if (hash != Qnil) \
9595 rb_hash_aset(hash, sym_##name, (attr));
9602#if RGENGC_ESTIMATE_OLDMALLOC
9603 (
flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
9606 SET(major_by, major_by);
9620 if (orig_flags == 0) {
9637 return gc_info_decode(objspace,
key, 0);
9652 return gc_info_decode(objspace, arg, 0);
9682#if RGENGC_ESTIMATE_OLDMALLOC
9683 gc_stat_sym_oldmalloc_increase_bytes,
9684 gc_stat_sym_oldmalloc_increase_bytes_limit,
9687 gc_stat_sym_total_generated_normal_object_count,
9688 gc_stat_sym_total_generated_shady_object_count,
9689 gc_stat_sym_total_shade_operation_count,
9690 gc_stat_sym_total_promoted_count,
9691 gc_stat_sym_total_remembered_normal_object_count,
9692 gc_stat_sym_total_remembered_shady_object_count,
9700setup_gc_stat_symbols(
void)
9702 if (gc_stat_symbols[0] == 0) {
9703#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
9706 S(heap_sorted_length);
9708 S(heap_available_slots);
9711 S(heap_final_slots);
9712 S(heap_marked_slots);
9715 S(total_allocated_pages);
9716 S(total_freed_pages);
9717 S(total_allocated_objects);
9718 S(total_freed_objects);
9719 S(malloc_increase_bytes);
9720 S(malloc_increase_bytes_limit);
9724 S(read_barrier_faults);
9725 S(total_moved_objects);
9726 S(remembered_wb_unprotected_objects);
9727 S(remembered_wb_unprotected_objects_limit);
9729 S(old_objects_limit);
9730#if RGENGC_ESTIMATE_OLDMALLOC
9731 S(oldmalloc_increase_bytes);
9732 S(oldmalloc_increase_bytes_limit);
9735 S(total_generated_normal_object_count);
9736 S(total_generated_shady_object_count);
9737 S(total_shade_operation_count);
9738 S(total_promoted_count);
9739 S(total_remembered_normal_object_count);
9740 S(total_remembered_shady_object_count);
9747gc_stat_internal(
VALUE hash_or_sym)
9752 setup_gc_stat_symbols();
9754 if (RB_TYPE_P(hash_or_sym,
T_HASH)) {
9764#define SET(name, attr) \
9765 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
9767 else if (hash != Qnil) \
9768 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
9776 SET(heap_available_slots, objspace_available_slots(objspace));
9777 SET(heap_live_slots, objspace_live_slots(objspace));
9778 SET(heap_free_slots, objspace_free_slots(objspace));
9798#if RGENGC_ESTIMATE_OLDMALLOC
9799 SET(oldmalloc_increase_bytes, objspace->
rgengc.oldmalloc_increase);
9800 SET(oldmalloc_increase_bytes_limit, objspace->
rgengc.oldmalloc_increase_limit);
9804 SET(total_generated_normal_object_count, objspace->
profile.total_generated_normal_object_count);
9805 SET(total_generated_shady_object_count, objspace->
profile.total_generated_shady_object_count);
9806 SET(total_shade_operation_count, objspace->
profile.total_shade_operation_count);
9807 SET(total_promoted_count, objspace->
profile.total_promoted_count);
9808 SET(total_remembered_normal_object_count, objspace->
profile.total_remembered_normal_object_count);
9809 SET(total_remembered_shady_object_count, objspace->
profile.total_remembered_shady_object_count);
9817#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
9819 gc_count_add_each_types(hash,
"generated_normal_object_count_types", objspace->
profile.generated_normal_object_count_types);
9820 gc_count_add_each_types(hash,
"generated_shady_object_count_types", objspace->
profile.generated_shady_object_count_types);
9821 gc_count_add_each_types(hash,
"shade_operation_count_types", objspace->
profile.shade_operation_count_types);
9822 gc_count_add_each_types(hash,
"promoted_types", objspace->
profile.promoted_types);
9823 gc_count_add_each_types(hash,
"remembered_normal_object_count_types", objspace->
profile.remembered_normal_object_count_types);
9824 gc_count_add_each_types(hash,
"remembered_shady_object_count_types", objspace->
profile.remembered_shady_object_count_types);
9838 size_t value = gc_stat_internal(arg);
9841 else if (RB_TYPE_P(arg,
T_HASH)) {
9848 gc_stat_internal(arg);
9856 size_t value = gc_stat_internal(
key);
9860 gc_stat_internal(
key);
9883 gc_stress_set(objspace, flag);
9913 return gc_disable_no_rest(objspace);
9935 return gc_disable_no_rest(objspace);
9947#if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
9952 pagesize = (
int)sysconf(_SC_PAGE_SIZE);
9968get_envparam_size(
const char *
name,
size_t *default_value,
size_t lower_bound)
9976#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
9977 val = strtoll(
ptr, &end, 0);
9979 val = strtol(
ptr, &end, 0);
9991 unit = 1024*1024*1024;
9995 while (*end && isspace((
unsigned char)*end)) end++;
10001 if (val < -(ssize_t)(
SIZE_MAX / 2 / unit) || (ssize_t)(
SIZE_MAX / 2 / unit) < val) {
10007 if (val > 0 && (
size_t)val > lower_bound) {
10011 *default_value = (size_t)val;
10016 fprintf(stderr,
"%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
10017 name, val, *default_value, lower_bound);
10026get_envparam_double(
const char *
name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
10034 if (!*
ptr || *end) {
10039 if (accept_zero && val == 0.0) {
10042 else if (val <= lower_bound) {
10044 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
10045 name, val, *default_value, lower_bound);
10048 else if (upper_bound != 0.0 &&
10049 val > upper_bound) {
10051 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
10052 name, val, *default_value, upper_bound);
10062 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%f (default value: %f)\n",
name, val, *default_value);
10063 *default_value = val;
10068gc_set_initial_pages(
void)
10074 if (min_pages >
heap_eden->total_pages) {
10125 if (get_envparam_size(
"RUBY_GC_HEAP_FREE_SLOTS", &gc_params.
heap_free_slots, 0)) {
10130 if (get_envparam_size(
"RUBY_GC_HEAP_INIT_SLOTS", &gc_params.
heap_init_slots, 0)) {
10131 gc_set_initial_pages();
10134 get_envparam_double(
"RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.
growth_factor, 1.0, 0.0,
FALSE);
10135 get_envparam_size (
"RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.
growth_max_slots, 0);
10144 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT", &gc_params.
malloc_limit_min, 0);
10145 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.
malloc_limit_max, 0);
10151#if RGENGC_ESTIMATE_OLDMALLOC
10162reachable_objects_from_callback(
VALUE obj)
10173 if (
during_gc)
rb_bug(
"rb_objspace_reachable_objects_from() is not supported while during_gc == true");
10175 if (is_markable_object(objspace, obj)) {
10177 struct gc_mark_func_data_struct mfd = {
10180 }, *prev_mfd = cr->
mfd;
10183 gc_mark_children(objspace, obj);
10184 cr->
mfd = prev_mfd;
10195root_objects_from(
VALUE obj,
void *
ptr)
10205 objspace_reachable_objects_from_root(objspace,
func, passing_data);
10211 if (
during_gc)
rb_bug(
"objspace_reachable_objects_from_root() is not supported while during_gc == true");
10216 .data = passing_data,
10218 struct gc_mark_func_data_struct mfd = {
10219 .mark_func = root_objects_from,
10221 }, *prev_mfd = cr->
mfd;
10224 gc_mark_roots(objspace, &data.
category);
10225 cr->
mfd = prev_mfd;
10239gc_vraise(
void *
ptr)
10265 fprintf(stderr,
"%s",
"[FATAL] ");
10266 vfprintf(stderr,
fmt,
ap);
10276negative_size_allocation_error(
const char *msg)
10282ruby_memerror_body(
void *dummy)
10288NORETURN(
static void ruby_memerror(
void));
10302 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
10317 fprintf(stderr,
"rb_memerror pid=%"PRI_PIDT_PREFIX
"d\n", getpid());
10329 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
10348#if defined __MINGW32__
10349 res = __mingw_aligned_malloc(
size, alignment);
10350#elif defined _WIN32
10351 void *_aligned_malloc(
size_t,
size_t);
10352 res = _aligned_malloc(
size, alignment);
10353#elif defined(HAVE_POSIX_MEMALIGN)
10354 if (posix_memalign(&res, alignment,
size) == 0) {
10360#elif defined(HAVE_MEMALIGN)
10361 res = memalign(alignment,
size);
10364 res =
malloc(alignment +
size +
sizeof(
void*));
10365 aligned = (
char*)res + alignment +
sizeof(
void*);
10366 aligned -= ((
VALUE)aligned & (alignment - 1));
10367 ((
void**)aligned)[-1] = res;
10368 res = (
void*)aligned;
10372 GC_ASSERT(((alignment - 1) & alignment) == 0);
10373 GC_ASSERT(alignment %
sizeof(
void*) == 0);
10378rb_aligned_free(
void *
ptr)
10380#if defined __MINGW32__
10381 __mingw_aligned_free(
ptr);
10382#elif defined _WIN32
10383 _aligned_free(
ptr);
10384#elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
10391static inline size_t
10394#ifdef HAVE_MALLOC_USABLE_SIZE
10395 return malloc_usable_size(
ptr);
10408atomic_sub_nounderflow(
size_t *var,
size_t sub)
10410 if (
sub == 0)
return;
10414 if (val <
sub)
sub = val;
10429 garbage_collect_with_gvl(objspace, reason);
10436 if (new_size > old_size) {
10438#if RGENGC_ESTIMATE_OLDMALLOC
10444#if RGENGC_ESTIMATE_OLDMALLOC
10445 atomic_sub_nounderflow(&objspace->
rgengc.oldmalloc_increase, old_size - new_size);
10460#if MALLOC_ALLOCATED_SIZE
10461 if (new_size >= old_size) {
10465 size_t dec_size = old_size - new_size;
10466 size_t allocated_size = objspace->
malloc_params.allocated_size;
10468#if MALLOC_ALLOCATED_SIZE_CHECK
10469 if (allocated_size < dec_size) {
10470 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
10473 atomic_sub_nounderflow(&objspace->
malloc_params.allocated_size, dec_size);
10476 if (0) fprintf(stderr,
"increase - ptr: %p, type: %s, new_size: %"PRIdSIZE", old_size: %"PRIdSIZE"\n",
10481 new_size, old_size);
10490 if (allocations > 0) {
10491 atomic_sub_nounderflow(&objspace->
malloc_params.allocations, 1);
10493#if MALLOC_ALLOCATED_SIZE_CHECK
10507#if USE_GC_MALLOC_OBJ_INFO_DETAILS
10514#if USE_GC_MALLOC_OBJ_INFO_DETAILS
10515const char *ruby_malloc_info_file;
10516int ruby_malloc_info_line;
10519static inline size_t
10524#if CALC_EXACT_MALLOC_SIZE
10531static inline void *
10534 size = objspace_malloc_size(objspace, mem,
size);
10537#if CALC_EXACT_MALLOC_SIZE
10541#if USE_GC_MALLOC_OBJ_INFO_DETAILS
10543 info->file = ruby_malloc_info_file;
10544 info->line = info->file ? ruby_malloc_info_line : 0;
10553#if defined(__GNUC__) && RUBY_DEBUG
10554#define RB_BUG_INSTEAD_OF_RB_MEMERROR
10557#ifdef RB_BUG_INSTEAD_OF_RB_MEMERROR
10558#define TRY_WITH_GC(siz, expr) do { \
10559 const gc_profile_record_flag gpr = \
10560 GPR_FLAG_FULL_MARK | \
10561 GPR_FLAG_IMMEDIATE_MARK | \
10562 GPR_FLAG_IMMEDIATE_SWEEP | \
10564 objspace_malloc_gc_stress(objspace); \
10566 if (LIKELY((expr))) { \
10569 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
10571 rb_bug("TRY_WITH_GC: could not GC"); \
10573 else if ((expr)) { \
10577 rb_bug("TRY_WITH_GC: could not allocate:" \
10578 "%"PRIdSIZE" bytes for %s", \
10583#define TRY_WITH_GC(siz, alloc) do { \
10584 objspace_malloc_gc_stress(objspace); \
10586 (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
10587 GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
10588 GPR_FLAG_MALLOC) || \
10603 size = objspace_malloc_prepare(objspace,
size);
10606 return objspace_malloc_fixup(objspace, mem,
size);
10609static inline size_t
10610xmalloc2_size(
const size_t count,
const size_t elsize)
10616objspace_xrealloc(
rb_objspace_t *objspace,
void *
ptr,
size_t new_size,
size_t old_size)
10620 if (!
ptr)
return objspace_xmalloc0(objspace, new_size);
10627 if (new_size == 0) {
10628 if ((mem = objspace_xmalloc0(objspace, 0)) !=
NULL) {
10651 objspace_xfree(objspace,
ptr, old_size);
10665#if CALC_EXACT_MALLOC_SIZE
10670 old_size = info->
size;
10674 old_size = objspace_malloc_size(objspace,
ptr, old_size);
10676 new_size = objspace_malloc_size(objspace, mem, new_size);
10678#if CALC_EXACT_MALLOC_SIZE
10681 info->
size = new_size;
10692#if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
10694#define MALLOC_INFO_GEN_SIZE 100
10695#define MALLOC_INFO_SIZE_SIZE 10
10696static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
10697static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
10698static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
10699static st_table *malloc_info_file_table;
10704 const char *
file = (
void *)
key;
10705 const size_t *data = (
void *)val;
10718 fprintf(stderr,
"* malloc_info gen statistics\n");
10719 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
10720 if (i == MALLOC_INFO_GEN_SIZE-1) {
10721 fprintf(stderr,
"more\t%"PRIdSIZE"\t%"PRIdSIZE"\n", malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
10724 fprintf(stderr,
"%d\t%"PRIdSIZE"\t%"PRIdSIZE"\n", i, malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
10728 fprintf(stderr,
"* malloc_info size statistics\n");
10729 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
10731 fprintf(stderr,
"%d\t%"PRIdSIZE"\n", s, malloc_info_size[i]);
10733 fprintf(stderr,
"more\t%"PRIdSIZE"\n", malloc_info_size[i]);
10735 if (malloc_info_file_table) {
10736 fprintf(stderr,
"* malloc_info file statistics\n");
10737 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
10757#if CALC_EXACT_MALLOC_SIZE
10760 old_size = info->
size;
10762#if USE_GC_MALLOC_OBJ_INFO_DETAILS
10765 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
10768 malloc_info_gen_cnt[gen_index]++;
10769 malloc_info_gen_size[gen_index] += info->
size;
10771 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
10772 size_t s = 16 << i;
10773 if (info->
size <= s) {
10774 malloc_info_size[i]++;
10778 malloc_info_size[i]++;
10785 if (malloc_info_file_table ==
NULL) {
10790 data = (
size_t *)d;
10793 data =
malloc(xmalloc2_size(2,
sizeof(
size_t)));
10794 if (data ==
NULL)
rb_bug(
"objspace_xfree: can not allocate memory");
10795 data[0] = data[1] = 0;
10799 data[1] += info->
size;
10801 if (0 && gen >= 2) {
10804 info->
size, gen, info->file, info->line);
10807 fprintf(stderr,
"free - size:%"PRIdSIZE", gen:%d\n",
10814 old_size = objspace_malloc_size(objspace,
ptr, old_size);
10823ruby_xmalloc0(
size_t size)
10831 if ((ssize_t)
size < 0) {
10832 negative_size_allocation_error(
"too large allocation size");
10834 return ruby_xmalloc0(
size);
10856 size = objspace_malloc_prepare(objspace,
size);
10858 return objspace_malloc_fixup(objspace, mem,
size);
10867#ifdef ruby_sized_xrealloc
10868#undef ruby_sized_xrealloc
10873 if ((ssize_t)new_size < 0) {
10874 negative_size_allocation_error(
"too large allocation size");
10877 return objspace_xrealloc(&
rb_objspace,
ptr, new_size, old_size);
10886#ifdef ruby_sized_xrealloc2
10887#undef ruby_sized_xrealloc2
10892 size_t len = xmalloc2_size(n,
size);
10902#ifdef ruby_sized_xfree
10903#undef ruby_sized_xfree
10922 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
10929 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
10936 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
10943 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
10954#if CALC_EXACT_MALLOC_SIZE
10958#if CALC_EXACT_MALLOC_SIZE
10967#if USE_GC_MALLOC_OBJ_INFO_DETAILS
10981#if CALC_EXACT_MALLOC_SIZE
10997 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
NULL, 0);
11029#if MALLOC_ALLOCATED_SIZE
11040gc_malloc_allocated_size(
VALUE self)
11055gc_malloc_allocations(
VALUE self)
11068 else if (diff < 0) {
11083#define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
11085#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
11091 if (!is_live_object(objspace, obj))
return ST_DELETE;
11097wmap_compact(
void *
ptr)
11106wmap_mark(
void *
ptr)
11109#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
11124wmap_free(
void *
ptr)
11136 *(
size_t *)arg += (
ptr[0] + 1) *
sizeof(
VALUE);
11141wmap_memsize(
const void *
ptr)
11166wmap_allocate(
VALUE klass)
11180 if (is_pointer_to_heap(objspace, (
void *)obj)) {
11181 void *poisoned = asan_unpoison_object_temporary(obj);
11185 is_live_object(objspace, obj));
11188 asan_poison_object(obj);
11200 if (!existing)
return ST_STOP;
11203 if (
ptr[i] != wmap) {
11230 rb_bug(
"wmap_finalize: objid is not found.");
11236 rids = (
VALUE *)data;
11238 for (i = 0; i <
size; ++i) {
11264 else if (wmap_live_p(objspace, obj)) {
11295wmap_inspect(
VALUE self)
11327wmap_each(
VALUE self)
11342 if (wmap_live_p(objspace, obj)) {
11350wmap_each_key(
VALUE self)
11365 if (wmap_live_p(objspace, obj)) {
11373wmap_each_value(
VALUE self)
11398wmap_keys(
VALUE self)
11425wmap_values(
VALUE self)
11449 ptr = ruby_xmalloc0(2 *
sizeof(
VALUE));
11466 define_final0(value, w->
final);
11489 if (!wmap_live_p(objspace, obj))
return Qundef;
11497 VALUE obj = wmap_lookup(self,
key);
11510wmap_size(
VALUE self)
11517#if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
11528#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
11532getrusage_time(
void)
11534#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
11536 static int try_clock_gettime = 1;
11538 if (try_clock_gettime &&
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
11542 try_clock_gettime = 0;
11549 struct rusage usage;
11551 if (getrusage(RUSAGE_SELF, &usage) == 0) {
11552 time = usage.ru_utime;
11560 FILETIME creation_time, exit_time, kernel_time, user_time;
11565 if (GetProcessTimes(GetCurrentProcess(),
11566 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
11567 memcpy(&ui, &user_time,
sizeof(FILETIME));
11568 q = ui.QuadPart / 10L;
11569 t = (
DWORD)(q % 1000000L) * 1e-6;
11574 t += (double)(
DWORD)(q >> 16) * (1 << 16);
11575 t += (
DWORD)q & ~(~0 << 16);
11586gc_prof_setup_new_record(
rb_objspace_t *objspace,
int reason)
11607 rb_bug(
"gc_profile malloc or realloc miss");
11614#if MALLOC_ALLOCATED_SIZE
11617#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
11620 struct rusage usage;
11621 if (getrusage(RUSAGE_SELF, &usage) == 0) {
11622 record->maxrss = usage.ru_maxrss;
11623 record->minflt = usage.ru_minflt;
11624 record->majflt = usage.ru_majflt;
11637#if GC_PROFILE_MORE_DETAIL
11638 record->prepare_time = objspace->
profile.prepare_time;
11646elapsed_time_from(
double time)
11648 double now = getrusage_time();
11667#define RUBY_DTRACE_GC_HOOK(name) \
11668 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
11673#if GC_PROFILE_MORE_DETAIL
11684#if GC_PROFILE_MORE_DETAIL
11687 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
11717 record->
gc_time += sweep_time;
11723#if GC_PROFILE_MORE_DETAIL
11724 record->gc_sweep_time += sweep_time;
11734#if GC_PROFILE_MORE_DETAIL
11751#if GC_PROFILE_MORE_DETAIL
11753 record->heap_live_objects = live;
11754 record->heap_free_objects = total - live;
11772gc_profile_clear(
VALUE _)
11837gc_profile_record_get(
VALUE _)
11860#if GC_PROFILE_MORE_DETAIL
11875#if RGENGC_PROFILE > 0
11886#if GC_PROFILE_MORE_DETAIL
11887#define MAJOR_REASON_MAX 0x10
11890gc_profile_dump_major_reason(
int flags,
char *buff)
11901 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
11902 buff[i++] = #x[0]; \
11903 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
11909#if RGENGC_ESTIMATE_OLDMALLOC
11923#ifdef MAJOR_REASON_MAX
11924 char reason_str[MAJOR_REASON_MAX];
11932 append(
out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
11934 for (i = 0; i <
count; i++) {
11941#if GC_PROFILE_MORE_DETAIL
11942 const char *
str =
"\n\n" \
11944 "Prepare Time = Previously GC's rest sweep time\n"
11945 "Index Flags Allocate Inc. Allocate Limit"
11946#if CALC_EXACT_MALLOC_SIZE
11949 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
11951 " OldgenObj RemNormObj RemShadObj"
11953#if GC_PROFILE_DETAIL_MEMORY
11954 " MaxRSS(KB) MinorFLT MajorFLT"
11959 for (i = 0; i <
count; i++) {
11975 gc_profile_dump_major_reason(record->
flags, reason_str),
11982 record->allocate_increase, record->allocate_limit,
11984 record->allocated_size,
11986 record->heap_use_pages,
11987 record->gc_mark_time*1000,
11988 record->gc_sweep_time*1000,
11989 record->prepare_time*1000,
11991 record->heap_live_objects,
11992 record->heap_free_objects,
11993 record->removing_objects,
11994 record->empty_objects
11997 record->old_objects,
11998 record->remembered_normal_objects,
11999 record->remembered_shady_objects
12003 record->maxrss / 1024,
12026gc_profile_result(
VALUE _)
12061gc_profile_total_time(
VALUE self)
12070 for (i = 0; i <
count; i++) {
12085gc_profile_enable_get(
VALUE self)
12100gc_profile_enable(
VALUE _)
12117gc_profile_disable(
VALUE _)
12134#define TYPE_NAME(t) case (t): return #t;
12171obj_type_name(
VALUE obj)
12173 return type_name(
TYPE(obj), obj);
12193 rb_bug(
"rb_method_type_name: unreachable (type: %d)",
type);
12197# define ARY_SHARED_P(ary) \
12198 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
12199 FL_TEST((ary),ELTS_SHARED)!=0)
12200# define ARY_EMBED_P(ary) \
12201 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
12202 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
12205rb_raw_iseq_info(
char *buff,
const int buff_size,
const rb_iseq_t *iseq)
12210 snprintf(buff, buff_size,
" %s@%s:%d",
12223 if (
len < 0)
return 0;
12224 if (
len > INT_MAX)
return INT_MAX;
12232 void *poisoned = asan_poisoned_object_p(obj);
12233 asan_unpoison_object(obj,
false);
12235#define BUFF_ARGS buff + pos, buff_size - pos
12236#define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
12248#define TF(c) ((c) != 0 ? "true" : "false")
12249#define C(c, s) ((c) != 0 ? (s) : " ")
12251 const int age = RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
12253 if (is_pointer_to_heap(&
rb_objspace, (
void *)obj)) {
12261 obj_type_name(obj)));
12267 obj_type_name(obj)));
12270 if (internal_object_p(obj)) {
12273 else if (
RBASIC(obj)->klass == 0) {
12279 if (!
NIL_P(class_path)) {
12308 C(RARRAY_TRANSIENT_P(obj),
"T"),
12336 RHASH_AR_TABLE_P(obj) ?
'A' :
'S',
12337 RHASH_TRANSIENT_P(obj) ?
'T' :
' ',
12345 if (!
NIL_P(class_path)) {
12356 if (!
NIL_P(class_path)) {
12378 (block = vm_proc_block(obj)) !=
NULL &&
12380 (iseq = vm_block_iseq(block)) !=
NULL) {
12404 APPENDF((
BUFF_ARGS,
":%s (%s%s%s%s) type:%s alias:%d owner:%p defined_class:%p",
12438 vm_ci_kwarg(ci) ?
"available" :
"NULL"));
12448 vm_cc_cme(cc) ?
rb_id2name(vm_cc_cme(cc)->called_id) :
"<NULL>",
12449 (
void *)vm_cc_cme(cc), (
void *)vm_cc_call(cc)));
12464 asan_poison_object(obj);
12473#define OBJ_INFO_BUFFERS_NUM 10
12474#define OBJ_INFO_BUFFERS_SIZE 0x100
12475static int obj_info_buffers_index = 0;
12476static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
12481 const int index = obj_info_buffers_index++;
12482 char *
const buff = &obj_info_buffers[index][0];
12484 if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
12485 obj_info_buffers_index = 0;
12494 return obj_type_name(obj);
12501 return obj_info(obj);
12508 fprintf(stderr,
"rb_obj_info_dump: %s\n",
rb_raw_obj_info(buff, 0x100, obj));
12515 fprintf(stderr,
"<OBJ_INFO:%s@%s:%d> %s\n", func,
file, line,
rb_raw_obj_info(buff, 0x100, obj));
12525 fprintf(stderr,
"created at: %s:%d\n",
RANY(obj)->
file,
RANY(obj)->line);
12528 fprintf(stderr,
"moved?: true\n");
12531 fprintf(stderr,
"moved?: false\n");
12533 if (is_pointer_to_heap(objspace, (
void *)obj)) {
12534 fprintf(stderr,
"pointer to heap?: true\n");
12537 fprintf(stderr,
"pointer to heap?: false\n");
12543 fprintf(stderr,
"age? : %d\n", RVALUE_AGE(obj));
12544 fprintf(stderr,
"old? : %s\n", RVALUE_OLD_P(obj) ?
"true" :
"false");
12545 fprintf(stderr,
"WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ?
"false" :
"true");
12546 fprintf(stderr,
"remembered? : %s\n", RVALUE_REMEMBERED(obj) ?
"true" :
"false");
12549 fprintf(stderr,
"lazy sweeping?: true\n");
12550 fprintf(stderr,
"swept?: %s\n", is_swept_object(objspace, obj) ?
"done" :
"not yet");
12553 fprintf(stderr,
"lazy sweeping?: false\n");
12560 fprintf(stderr,
"WARNING: object %s(%p) is inadvertently collected\n", (
char *)
name, (
void *)obj);
12565rb_gcdebug_sentinel(
VALUE obj,
const char *
name)
12572#if GC_DEBUG_STRESS_TO_CLASS
12607 for (i = 0; i <
argc; ++i) {
12680 VALUE rb_mObjSpace;
12681 VALUE rb_mProfiler;
12682 VALUE gc_constants;
12746#if MALLOC_ALLOCATED_SIZE
12751#if GC_DEBUG_STRESS_TO_CLASS
12760#define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
12781#ifdef ruby_xmalloc2
12782#undef ruby_xmalloc2
12787#ifdef ruby_xrealloc
12788#undef ruby_xrealloc
12790#ifdef ruby_xrealloc2
12791#undef ruby_xrealloc2
12797#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12798 ruby_malloc_info_file = __FILE__;
12799 ruby_malloc_info_line = __LINE__;
12807#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12808 ruby_malloc_info_file = __FILE__;
12809 ruby_malloc_info_line = __LINE__;
12817#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12818 ruby_malloc_info_file = __FILE__;
12819 ruby_malloc_info_line = __LINE__;
12827#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12828 ruby_malloc_info_file = __FILE__;
12829 ruby_malloc_info_line = __LINE__;
12837#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12838 ruby_malloc_info_file = __FILE__;
12839 ruby_malloc_info_line = __LINE__;
size_t rb_ary_memsize(VALUE ary)
VALUE rb_ary_push(VALUE ary, VALUE item)
void rb_ary_free(VALUE ary)
VALUE rb_ary_last(int argc, const VALUE *argv, VALUE ary)
VALUE rb_ary_tmp_new(long capa)
void rb_ary_delete_same(VALUE ary, VALUE item)
VALUE rb_ary_cat(VALUE ary, const VALUE *argv, long len)
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
#define PRINTF_ARGS(decl, string_index, first_to_check)
#define UNREACHABLE_RETURN
size_t rb_big_size(VALUE big)
VALUE rb_big_eql(VALUE x, VALUE y)
VALUE rb_big_hash(VALUE x)
int bits(struct state *s, int need)
int ruby_thread_has_gvl_p(void)
VALUE rb_obj_is_fiber(VALUE obj)
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
#define range(low, item, hi)
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
#define RB_DEBUG_COUNTER_INC_IF(type, cond)
#define RB_DEBUG_COUNTER_INC(type)
#define FLUSH_REGISTER_WINDOWS
#define RB_GNUC_EXTENSION
#define MJIT_FUNC_EXPORTED
char str[HTML_ESCAPE_MAX_LEN+1]
#define rb_ec_raised_p(ec, f)
#define rb_ec_raised_set(ec, f)
#define EC_JUMP_TAG(ec, st)
#define rb_ec_raised_clear(ec)
void rb_mark_end_proc(void)
#define RUBY_INTERNAL_EVENT_GC_EXIT
#define RUBY_INTERNAL_EVENT_GC_ENTER
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
#define RUBY_INTERNAL_EVENT_GC_END_MARK
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
#define RUBY_INTERNAL_EVENT_FREEOBJ
#define RUBY_INTERNAL_EVENT_GC_START
#define RUBY_INTERNAL_EVENT_NEWOBJ
#define RSTRING_LEN(string)
#define RSTRING_PTR(string)
#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing)
VALUE rb_wb_protected_newobj_of(VALUE klass, VALUE flags)
#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR
#define GC_OLDMALLOC_LIMIT_MAX
int rb_objspace_internal_object_p(VALUE obj)
VALUE * ruby_initial_gc_stress_ptr
bool rb_obj_is_main_ractor(VALUE gv)
#define GC_MALLOC_LIMIT_MAX
#define stack_check(ec, water_mark)
void ruby_xfree(void *x)
Deallocates a storage instance.
#define TRY_WITH_GC(siz, alloc)
#define GC_HEAP_FREE_SLOTS_MIN_RATIO
#define MALLOC_ALLOCATED_SIZE
#define GC_ENABLE_INCREMENTAL_MARK
#define ARY_SHARED_P(ary)
#define obj_id_to_ref(objid)
#define CALC_EXACT_MALLOC_SIZE
@ gc_stat_sym_total_freed_objects
@ gc_stat_sym_old_objects
@ gc_stat_sym_total_allocated_objects
@ gc_stat_sym_compact_count
@ gc_stat_sym_total_moved_objects
@ gc_stat_sym_heap_allocatable_pages
@ gc_stat_sym_read_barrier_faults
@ gc_stat_sym_old_objects_limit
@ gc_stat_sym_heap_live_slots
@ gc_stat_sym_heap_free_slots
@ gc_stat_sym_heap_marked_slots
@ gc_stat_sym_total_allocated_pages
@ gc_stat_sym_heap_available_slots
@ gc_stat_sym_remembered_wb_unprotected_objects_limit
@ gc_stat_sym_malloc_increase_bytes_limit
@ gc_stat_sym_heap_final_slots
@ gc_stat_sym_total_freed_pages
@ gc_stat_sym_heap_sorted_length
@ gc_stat_sym_heap_tomb_pages
@ gc_stat_sym_heap_allocated_pages
@ gc_stat_sym_heap_eden_pages
@ gc_stat_sym_remembered_wb_unprotected_objects
@ gc_stat_sym_minor_gc_count
@ gc_stat_sym_malloc_increase_bytes
@ gc_stat_sym_major_gc_count
#define RGENGC_ESTIMATE_OLDMALLOC
#define RGENGC_CHECK_MODE
#define GC_HEAP_GROWTH_MAX_SLOTS
int rb_objspace_garbage_object_p(VALUE obj)
#define GC_PROFILE_DETAIL_MEMORY
#define RVALUE_PIN_BITMAP(obj)
VALUE rb_gc_location(VALUE value)
#define heap_pages_final_slots
struct stack_chunk stack_chunk_t
#define GC_MALLOC_LIMIT_MIN
VALUE rb_gc_disable(void)
size_t rb_objspace_data_type_memsize(VALUE obj)
#define is_marking(objspace)
#define gc_mode(objspace)
#define gc_prof_enabled(objspace)
#define gc_report(level, objspace,...)
void * rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
#define UNEXPECTED_NODE(func)
void * rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z)
void rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
void rb_mark_tbl_no_pin(st_table *tbl)
#define heap_pages_freeable_pages
void ruby_mimfree(void *ptr)
rb_imemo_tmpbuf_t * rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
#define heap_pages_deferred_final
void * ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
Identical to ruby_xrealloc(), except it resizes the given storage instance to newelems * newsiz bytes...
void rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
const char * rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
VALUE rb_undefine_finalizer(VALUE obj)
#define ruby_gc_stress_mode
void rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
rb_symbols_t ruby_global_symbols
#define MALLOC_ALLOCATED_SIZE_CHECK
struct mark_stack mark_stack_t
@ HEAP_PAGE_BITMAP_PLANES
void rb_mark_set(st_table *tbl)
void * ruby_xcalloc(size_t n, size_t size)
Identical to ruby_xmalloc2(), except it zero-fills the region before it returns.
void * ruby_xmalloc2_body(size_t n, size_t size)
#define CLEAR_IN_BITMAP(bits, p)
#define HEAP_PAGE_ALIGN_LOG
#define GET_HEAP_MARKING_BITS(x)
void rb_mark_hash(st_table *tbl)
VALUE rb_obj_id(VALUE obj)
void rb_gc_mark_movable(VALUE ptr)
#define GET_HEAP_WB_UNPROTECTED_BITS(x)
void rb_gc_mark_maybe(VALUE obj)
VALUE rb_class_allocate_instance(VALUE klass)
#define GC_ENABLE_LAZY_SWEEP
#define MARK_IN_BITMAP(bits, p)
#define GC_PROFILE_RECORD_DEFAULT_SIZE
#define RVALUE_PAGE_MARKING(page, obj)
int rb_objspace_marked_object_p(VALUE obj)
void * rb_xmalloc_mul_add(size_t x, size_t y, size_t z)
VALUE rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
int rb_ec_stack_check(rb_execution_context_t *ec)
void * rb_aligned_malloc(size_t alignment, size_t size)
void rb_mark_tbl(st_table *tbl)
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
void rb_gc_writebarrier_unprotect(VALUE obj)
#define malloc_allocated_size
void * ruby_mimmalloc(size_t size)
#define GC_HEAP_INIT_SLOTS
VALUE rb_ec_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags)
#define heap_pages_sorted_length
#define RVALUE_WB_UNPROTECTED_BITMAP(obj)
size_t rb_obj_memsize_of(VALUE obj)
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
void rb_objspace_reachable_objects_from(VALUE obj, void(func)(VALUE, void *), void *data)
const char * rb_method_type_name(rb_method_type_t type)
#define STACKFRAME_FOR_CALL_CFUNC
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
VALUE rb_memory_id(VALUE obj)
#define RGENGC_OLD_NEWOBJ_CHECK
void * rb_alloc_tmp_buffer(volatile VALUE *store, long len)
const char * rb_objspace_data_type_name(VALUE obj)
void rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache)
rb_objspace_t * rb_objspace_alloc(void)
#define gc_mode_set(objspace, mode)
#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
void rb_gc_force_recycle(VALUE obj)
#define GET_STACK_BOUNDS(start, end, appendix)
void * rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w)
void ruby_gc_set_params(void)
void rb_objspace_set_event_hook(const rb_event_flag_t event)
void rb_gcdebug_print_obj_condition(VALUE obj)
@ gc_enter_event_finalizer
@ gc_enter_event_sweep_continue
@ gc_enter_event_rb_memerror
@ gc_enter_event_mark_continue
int ruby_get_stack_grow_direction(volatile VALUE *addr)
void * ruby_xrealloc_body(void *ptr, size_t new_size)
void rb_gc_copy_finalizer(VALUE dest, VALUE obj)
size_t rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
void rb_iseq_update_references(rb_iseq_t *iseq)
#define GET_HEAP_MARK_BITS(x)
void rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
int rb_objspace_markable_object_p(VALUE obj)
void rb_gc_update_tbl_refs(st_table *ptr)
bool rb_ractor_p(VALUE rv)
VALUE rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
void rb_gc_mark(VALUE ptr)
void rb_gc_verify_internal_consistency(void)
void rb_cc_table_free(VALUE klass)
#define gc_event_hook_available_p(objspace)
void ruby_malloc_size_overflow(size_t count, size_t elsize)
const char * rb_imemo_name(enum imemo_type type)
int ruby_enable_autocompact
#define GC_HEAP_FREE_SLOTS_GOAL_RATIO
#define RVALUE_MARK_BITMAP(obj)
void rb_gc_mark_values(long n, const VALUE *values)
void * ruby_xmalloc(size_t size)
Allocates a storage instance.
@ gc_stress_full_mark_after_malloc
@ gc_stress_no_immediate_sweep
void rb_obj_info_dump(VALUE obj)
void rb_gc_writebarrier(VALUE a, VALUE b)
#define gc_prof_record(objspace)
#define MARK_CHECKPOINT(category)
VALUE rb_newobj_of(VALUE klass, VALUE flags)
#define UPDATE_IF_MOVED(_objspace, _thing)
#define is_incremental_marking(objspace)
void rb_vm_update_references(void *ptr)
#define GC_MALLOC_LIMIT_GROWTH_FACTOR
#define is_sweeping(objspace)
int ruby_stack_grow_direction
const char * rb_obj_info(VALUE obj)
void rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
VALUE rb_define_finalizer(VALUE obj, VALUE block)
#define MARK_OBJECT_ARY_BUCKET_SIZE
#define has_sweeping_pages(heap)
VALUE rb_objspace_gc_enable(rb_objspace_t *objspace)
#define RGENGC_FORCE_MAJOR_GC
void * ruby_xmalloc2(size_t n, size_t size)
Identical to ruby_xmalloc(), except it allocates nelems * elemsiz bytes.
@ GPR_FLAG_MAJOR_BY_FORCE
@ GPR_FLAG_IMMEDIATE_SWEEP
@ GPR_FLAG_MAJOR_BY_SHADY
@ GPR_FLAG_MAJOR_BY_NOFREE
@ GPR_FLAG_IMMEDIATE_MARK
@ GPR_FLAG_MAJOR_BY_OLDGEN
struct rb_objspace rb_objspace_t
#define GC_OLDMALLOC_LIMIT_MIN
#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj)
void * ruby_xrealloc(void *ptr, size_t new_size)
Resize the storage instance.
void rb_gc_unregister_address(VALUE *addr)
Inform the garbage collector that a pointer previously passed to rb_gc_register_address() no longer p...
#define RVALUE_UNCOLLECTIBLE_BITMAP(obj)
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
void rb_objspace_each_objects(each_obj_callback *callback, void *data)
void * ruby_xmalloc_body(size_t size)
void * ruby_xcalloc_body(size_t n, size_t size)
void rb_objspace_reachable_objects_from_root(void(func)(const char *category, VALUE, void *), void *passing_data)
#define gc_event_hook_prep(objspace, event, data, prep)
VALUE rb_gc_disable_no_rest(void)
#define nonspecial_obj_id(obj)
void rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
VALUE rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags)
void rb_objspace_call_finalizer(rb_objspace_t *objspace)
#define gc_event_hook(objspace, event, data)
size_t rb_gc_stat(VALUE key)
#define GC_HEAP_GROWTH_FACTOR
void rb_iseq_mark(const rb_iseq_t *iseq)
void rb_gc_register_address(VALUE *addr)
Inform the garbage collector that valptr points to a live Ruby object that should not be moved.
void rb_iseq_free(const rb_iseq_t *iseq)
void rb_objspace_free(rb_objspace_t *objspace)
#define heap_pages_sorted
void rb_gc_writebarrier_remember(VALUE obj)
void rb_free_const_table(struct rb_id_table *tbl)
#define MARKED_IN_BITMAP(bits, p)
#define gc_stress_full_mark_after_malloc_p()
size_t rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
void rb_gc_mark_vm_stack_values(long n, const VALUE *values)
VALUE rb_objspace_gc_disable(rb_objspace_t *objspace)
void rb_malloc_info_show_results(void)
#define GET_HEAP_PINNED_BITS(x)
#define heap_allocated_pages
void rb_gc_register_mark_object(VALUE obj)
Inform the garbage collector that object is a live Ruby object that should not be moved.
#define RVALUE_MARKING_BITMAP(obj)
#define GC_HEAP_FREE_SLOTS_MAX_RATIO
#define ruby_gc_stressful
void Init_gc_stress(void)
void rb_gc_adjust_memory_usage(ssize_t diff)
#define will_be_incremental_marking(objspace)
#define gc_writebarrier_incremental(a, b, objspace)
VALUE rb_gc_latest_gc_info(VALUE key)
void * rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w)
#define RUBY_DTRACE_GC_HOOK(name)
#define heap_allocatable_pages
void rb_free_tmp_buffer(volatile VALUE *store)
#define RESTORE_FINALIZER()
#define GC_HEAP_FREE_SLOTS
struct rb_heap_struct rb_heap_t
size_t rb_obj_gc_flags(VALUE obj, ID *flags, size_t max)
size_t rb_iseq_memsize(const rb_iseq_t *iseq)
#define is_lazy_sweeping(heap)
void * ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
#define rb_objspace_of(vm)
VALUE rb_obj_rgengc_promoted_p(VALUE obj)
int each_obj_callback(void *, void *, size_t, void *)
#define ruby_initial_gc_stress
#define GET_HEAP_UNCOLLECTIBLE_BITS(x)
#define is_full_marking(objspace)
#define RVALUE_PAGE_WB_UNPROTECTED(page, obj)
#define GC_PROFILE_MORE_DETAIL
#define SET_MACHINE_STACK_END(p)
#define STACK_UPPER(x, a, b)
void rb_include_module(VALUE klass, VALUE module)
void rb_class_detach_subclasses(VALUE klass)
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
int rb_singleton_class_internal_p(VALUE sklass)
VALUE rb_define_module(const char *name)
void rb_class_detach_module_subclasses(VALUE klass)
void rb_class_remove_from_module_subclasses(VALUE klass)
VALUE rb_define_module_under(VALUE outer, const char *name)
void rb_class_remove_from_super_subclasses(VALUE klass)
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
int ruby_stack_check(void)
size_t ruby_stack_length(VALUE **p)
void rb_raise(VALUE exc, const char *fmt,...)
void rb_bug(const char *fmt,...)
void rb_vraise(VALUE exc, const char *fmt, va_list ap)
void rb_warn(const char *fmt,...)
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
VALUE rb_errinfo(void)
The current exception in the current thread.
VALUE rb_mKernel
Kernel module.
VALUE rb_cObject
Object class.
VALUE rb_any_to_s(VALUE)
Default implementation of #to_s.
VALUE rb_obj_class(VALUE)
VALUE rb_inspect(VALUE)
Convenient wrapper of Object::inspect.
VALUE rb_cBasicObject
BasicObject class.
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Determines if obj is a kind of c.
VALUE rb_to_int(VALUE)
Converts val into Integer.
void skip(file *in, unsigned n)
int rb_hash_stlike_foreach_with_replace(VALUE hash, st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg)
size_t rb_hash_ar_table_size(void)
VALUE rb_hash_new_with_size(st_index_t size)
VALUE rb_hash_compare_by_id_p(VALUE hash)
int rb_hash_stlike_foreach(VALUE hash, st_foreach_callback_func *func, st_data_t arg)
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
st_table * rb_init_identtable(void)
void *PTR64 __attribute__((mode(DI)))
void rb_id_table_foreach_with_replace(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, rb_id_table_update_callback_func_t *replace, void *data)
size_t rb_id_table_memsize(const struct rb_id_table *tbl)
void rb_id_table_free(struct rb_id_table *tbl)
void rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data)
void rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, void *data)
rb_id_table_iterator_result
IMEMO: Internal memo object.
#define IMEMO_TYPE_P(v, t)
@ imemo_ifunc
iterator function
@ imemo_cref
class reference
@ imemo_svar
special variable
Thin wrapper to ruby/config.h.
VALUE rb_funcall(VALUE, ID, int,...)
Calls a method.
Defines RBIMPL_HAS_BUILTIN.
#define RETURN_ENUMERATOR(obj, argc, argv)
VALUE rb_io_write(VALUE, VALUE)
VALUE rb_obj_is_proc(VALUE)
VALUE rb_block_proc(void)
VALUE rb_str_buf_new(long)
VALUE rb_str_append(VALUE, VALUE)
VALUE rb_str_buf_append(VALUE, VALUE)
#define rb_str_new_cstr(str)
VALUE rb_class_name(VALUE)
VALUE rb_class_path_cached(VALUE)
void rb_free_generic_ivar(VALUE)
int rb_obj_respond_to(VALUE, ID, int)
void rb_clear_constant_cache(void)
VALUE rb_check_funcall(VALUE, ID, int, const VALUE *)
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
const char * rb_id2name(ID)
ID rb_intern(const char *)
void rb_define_const(VALUE, const char *, VALUE)
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
#define BIGNUM_EMBED_FLAG
Internal header for Class.
#define RCLASS_CALLABLE_M_TBL(c)
#define RCLASS_CONST_TBL(c)
struct rb_classext_struct rb_classext_t
#define RICLASS_ORIGIN_SHARED_MTBL
#define RICLASS_IS_ORIGIN
#define RCLASS_IV_INDEX_TBL(c)
Internal header for Complex.
Internal header for Fiber.
#define ruby_sized_xrealloc
#define ruby_sized_xrealloc2
#define SIZED_REALLOC_N(v, T, m, n)
Internal header for Hash.
void rb_io_fptr_finalize_internal(void *ptr)
size_t rb_io_memsize(const rb_io_t *)
Internal header for Numeric.
VALUE rb_int2str(VALUE num, int base)
VALUE rb_int_ge(VALUE x, VALUE y)
VALUE rb_int_plus(VALUE x, VALUE y)
Internal header for Object.
Internal header for Proc.
VALUE rb_callable_receiver(VALUE)
VALUE rb_func_lambda_new(rb_block_call_func_t func, VALUE val, int min_argc, int max_argc)
Internal header for Rational.
size_t rb_str_memsize(VALUE)
Internal header for Struct.
void rb_gc_free_dsymbol(VALUE)
Internal header for Thread.
VALUE rb_obj_is_mutex(VALUE obj)
void rb_gc_mark_global_tbl(void)
void rb_mark_generic_ivar(VALUE)
size_t rb_generic_ivar_memsize(VALUE)
void rb_gc_update_global_tbl(void)
void rb_mv_generic_ivar(VALUE src, VALUE dst)
void rb_print_backtrace(void)
void rb_vm_each_stack_value(void *ptr, void(*cb)(VALUE, void *), void *ctx)
VALUE ruby_vm_special_exception_copy(VALUE)
VALUE rb_obj_is_thread(VALUE obj)
void rb_vm_mark(void *ptr)
const char * rb_source_location_cstr(int *pline)
typedef long(ZCALLBACK *tell_file_func) OF((voidpf opaque
typedef int(ZCALLBACK *close_file_func) OF((voidpf opaque
VALUE rb_iseq_path(const rb_iseq_t *iseq)
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
VALUE rb_yield_values(int n,...)
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define MEMZERO(p, type, n)
#define MEMMOVE(p1, p2, type, n)
#define METHOD_ENTRY_CACHED(me)
#define METHOD_ENTRY_COMPLEMENTED(me)
void rb_free_method_entry(const rb_method_entry_t *me)
@ VM_METHOD_TYPE_ISEQ
Ruby method.
@ VM_METHOD_TYPE_ATTRSET
attr_writer or attr_accessor
@ VM_METHOD_TYPE_CFUNC
C method.
@ VM_METHOD_TYPE_OPTIMIZED
Kernel::send, Proc::call, etc.
@ VM_METHOD_TYPE_REFINED
refinement
@ VM_METHOD_TYPE_NOTIMPLEMENTED
@ VM_METHOD_TYPE_MISSING
wrapper for method_missing(id)
@ VM_METHOD_TYPE_IVAR
attr_reader or attr_accessor
#define METHOD_ENTRY_INVALIDATED(me)
#define METHOD_ENTRY_VISI(me)
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
VALUE type(ANYARGS)
ANYARGS-ed function type.
void rb_ast_update_references(rb_ast_t *ast)
void rb_ast_mark(rb_ast_t *ast)
size_t rb_ast_memsize(const rb_ast_t *ast)
void rb_ast_free(rb_ast_t *ast)
ONIG_EXTERN void onig_region_free(OnigRegion *region, int free_self)
ONIG_EXTERN void onig_free(OnigRegex)
#define RARRAY_AREF(a, i)
void rb_ractor_finish_marking(void)
#define RARRAY_EMBED_FLAG
#define RARRAY_CONST_PTR_TRANSIENT
#define RUBY_DEFAULT_FREE
void(* RUBY_DATA_FUNC)(void *)
#define rb_data_object_wrap
size_t onig_region_memsize(const OnigRegion *regs)
size_t onig_memsize(const regex_t *reg)
#define RGENGC_WB_PROTECTED_OBJECT
void rb_strterm_mark(VALUE obj)
#define RTYPEDDATA_DATA(v)
#define TypedData_Get_Struct(obj, type, data_type, sval)
@ RUBY_TYPED_FREE_IMMEDIATELY
#define TypedData_Make_Struct(klass, type, data_type, sval)
const char * rb_obj_classname(VALUE)
int ruby_native_thread_p(void)
#define ATOMIC_VALUE_EXCHANGE(var, val)
#define ATOMIC_EXCHANGE(var, val)
#define ATOMIC_SIZE_INC(var)
#define ATOMIC_PTR_EXCHANGE(var, val)
#define ATOMIC_SET(var, val)
#define ATOMIC_SIZE_CAS(var, oldval, newval)
#define ATOMIC_SIZE_ADD(var, val)
#define ATOMIC_SIZE_EXCHANGE(var, val)
Internal header for ASAN / MSAN / etc.
#define NO_SANITIZE(x, y)
#define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(x)
rb_atomic_t cnt[RUBY_NSIG]
VALUE rb_str_catf(VALUE, const char *,...)
VALUE rb_sprintf(const char *,...)
#define st_init_numtable_with_size
#define st_is_member(table, key)
int st_foreach_callback_func(st_data_t, st_data_t, st_data_t)
#define st_foreach_with_replace
size_t strlen(const char *)
struct RArray::@95::@96 heap
union RArray::@95::@96::@97 aux
union RString::@100::@101::@102 aux
struct RString::@100::@101 heap
const rb_data_type_t * type
union RVALUE::@82::@84 imemo
struct RTypedData typeddata
struct RVALUE::@82::@83 free
struct RVALUE::@82::@85 values
struct RRational rational
each_obj_callback * callback
struct force_finalize_list * next
size_t heap_total_objects
struct heap_page_header header
bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT]
unsigned int has_remembered_objects
bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT]
unsigned int before_sweep
bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT]
bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT]
bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT]
struct list_node page_node
unsigned int has_uncollectible_shady_objects
struct heap_page * free_next
struct heap_page::@93 flags
struct heap_page * using_page
const struct rb_callable_method_entry_struct *const cme_
const struct rb_callcache * cc
const struct rb_callinfo * ci
const struct rb_callable_method_entry_struct * cme
struct rb_class_cc_entries::rb_class_cc_entries_entry * entries
struct st_table * iv_index_tbl
struct rb_subclass_entry * subclasses
const VALUE refined_class
struct rb_data_type_struct::@103 function
struct heap_page * free_pages
struct heap_page * sweeping_page
struct heap_page * compact_cursor
size_t compact_cursor_index
struct rb_imemo_tmpbuf_struct * next
struct rb_io_t::rb_io_enc_t encs
VALUE writeconv_asciicompat
VALUE writeconv_pre_ecopts
VALUE tied_io_for_writing
rb_iseq_location_t location
struct rb_iseq_constant_body * body
union rb_method_definition_struct::@123 body
struct rb_method_definition_struct *const def
rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
size_t total_freed_objects
size_t uncollectible_wb_unprotected_objects_limit
size_t heap_used_at_gc_start
st_table * finalizer_table
struct rb_objspace::@86 malloc_params
rb_event_flag_t hook_events
size_t moved_count_table[T_MASK]
gc_profile_record * records
unsigned int during_minor_gc
struct gc_list * global_list
size_t uncollectible_wb_unprotected_objects
unsigned int gc_stressful
gc_profile_record * current_record
struct rb_objspace::@92 rcompactor
struct rb_objspace::@87 flags
double gc_sweep_start_time
struct heap_page ** sorted
size_t read_barrier_faults
struct rb_objspace::@91 rgengc
unsigned int immediate_sweep
unsigned int during_compacting
size_t considered_count_table[T_MASK]
size_t total_allocated_objects_at_gc_start
size_t total_allocated_pages
size_t total_allocated_objects
struct rb_objspace::@90 profile
unsigned int dont_incremental
void(* mark_func)(VALUE v, void *data)
struct rb_ractor_struct::gc_mark_func_data_struct * mfd
rb_ractor_newobj_cache_t newobj_cache
struct rb_subclass_entry * next
int char_offset_num_allocated
struct rmatch_offset * char_offset
void(* func)(const char *category, VALUE, void *)
double oldmalloc_limit_growth_factor
double heap_free_slots_max_ratio
double heap_free_slots_goal_ratio
double malloc_limit_growth_factor
double oldobject_limit_factor
double heap_free_slots_min_ratio
size_t oldmalloc_limit_min
size_t oldmalloc_limit_max
struct stack_chunk * next
VALUE data[STACK_CHUNK_SIZE]
size_t zombie_object_count
size_t remembered_shady_count
IFUNC (Internal FUNCtion)
#define rb_transient_heap_finish_marking()
#define rb_transient_heap_promote(obj)
#define rb_transient_heap_verify()
#define rb_transient_heap_mark(obj, ptr)
#define rb_transient_heap_update_references()
#define rb_transient_heap_start_marking(full_marking)
void error(const char *msg)
ruby_value_type
C-level type of an object.
rb_ractor_t * ruby_single_main_ractor
const rb_env_t * rb_vm_env_prev_env(const rb_env_t *env)
#define rb_vm_register_special_exception(sp, e, m)
#define VM_ENV_DATA_INDEX_ENV
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
void rb_hook_list_mark(rb_hook_list_t *hooks)
#define VM_UNREACHABLE(func)
@ VM_ENV_FLAG_WB_REQUIRED
#define RUBY_DEBUG_LOG(fmt,...)
#define RB_VM_LOCK_LEAVE_CR_LEV(cr, levp)
#define RB_VM_LOCK_ENTER_CR_LEV(cr, levp)
#define RB_VM_LOCK_ENTER_NO_BARRIER()
#define ASSERT_vm_locking()
#define RB_VM_LOCK_LEAVE_NO_BARRIER()
#define RB_VM_LOCK_ENTER()
#define RB_VM_LOCK_ENTER_LEV(levp)
#define RB_VM_LOCK_LEAVE()
#define RB_VM_LOCK_LEAVE_LEV(levp)
Internal header to suppres / mandate warnings.
int clock_gettime(clockid_t, struct timespec *)
if((ID)(DISPID) nameid !=nameid)
#define VALGRIND_MAKE_MEM_DEFINED(p, n)
#define VALGRIND_MAKE_MEM_UNDEFINED(p, n)
int def(FILE *source, FILE *dest, int level)