Ruby 3.0.5p211 (2022-11-24 revision ba5cf0f7c52d4d35cc6a173c89eda98ceffa2dcf)
gc.c
Go to the documentation of this file.
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#include <signal.h>
23
24#define sighandler_t ruby_sighandler_t
25
26#ifndef _WIN32
27#include <unistd.h>
28#include <sys/mman.h>
29#endif
30
31#include <setjmp.h>
32#include <stdarg.h>
33#include <stdio.h>
34
35#ifndef HAVE_MALLOC_USABLE_SIZE
36# ifdef _WIN32
37# define HAVE_MALLOC_USABLE_SIZE
38# define malloc_usable_size(a) _msize(a)
39# elif defined HAVE_MALLOC_SIZE
40# define HAVE_MALLOC_USABLE_SIZE
41# define malloc_usable_size(a) malloc_size(a)
42# endif
43#endif
44
45#ifdef HAVE_MALLOC_USABLE_SIZE
46# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
47# include RUBY_ALTERNATIVE_MALLOC_HEADER
48# elif HAVE_MALLOC_H
49# include <malloc.h>
50# elif defined(HAVE_MALLOC_NP_H)
51# include <malloc_np.h>
52# elif defined(HAVE_MALLOC_MALLOC_H)
53# include <malloc/malloc.h>
54# endif
55#endif
56
57#ifdef HAVE_SYS_TIME_H
58# include <sys/time.h>
59#endif
60
61#ifdef HAVE_SYS_RESOURCE_H
62# include <sys/resource.h>
63#endif
64
65#if defined _WIN32 || defined __CYGWIN__
66# include <windows.h>
67#elif defined(HAVE_POSIX_MEMALIGN)
68#elif defined(HAVE_MEMALIGN)
69# include <malloc.h>
70#endif
71
72#include <sys/types.h>
73
74#include "constant.h"
75#include "debug_counter.h"
76#include "eval_intern.h"
77#include "gc.h"
78#include "id_table.h"
79#include "internal.h"
80#include "internal/class.h"
81#include "internal/complex.h"
82#include "internal/cont.h"
83#include "internal/error.h"
84#include "internal/eval.h"
85#include "internal/gc.h"
86#include "internal/hash.h"
87#include "internal/imemo.h"
88#include "internal/io.h"
89#include "internal/numeric.h"
90#include "internal/object.h"
91#include "internal/proc.h"
92#include "internal/rational.h"
93#include "internal/sanitizers.h"
94#include "internal/struct.h"
95#include "internal/symbol.h"
96#include "internal/thread.h"
97#include "internal/variable.h"
98#include "internal/warnings.h"
99#include "mjit.h"
100#include "probes.h"
101#include "regint.h"
102#include "ruby/debug.h"
103#include "ruby/io.h"
104#include "ruby/re.h"
105#include "ruby/st.h"
106#include "ruby/thread.h"
107#include "ruby/util.h"
108#include "ruby_assert.h"
109#include "ruby_atomic.h"
110#include "symbol.h"
111#include "transient_heap.h"
112#include "vm_core.h"
113#include "vm_sync.h"
114#include "vm_callinfo.h"
115#include "ractor_core.h"
116
117#include "builtin.h"
118
119#define rb_setjmp(env) RUBY_SETJMP(env)
120#define rb_jmp_buf rb_jmpbuf_t
121#undef rb_data_object_wrap
122
123static inline struct rbimpl_size_mul_overflow_tag
124size_add_overflow(size_t x, size_t y)
125{
126 size_t z;
127 bool p;
128#if 0
129
130#elif __has_builtin(__builtin_add_overflow)
131 p = __builtin_add_overflow(x, y, &z);
132
133#elif defined(DSIZE_T)
136 RB_GNUC_EXTENSION DSIZE_T dz = dx + dy;
137 p = dz > SIZE_MAX;
138 z = (size_t)dz;
139
140#else
141 z = x + y;
142 p = z < y;
143
144#endif
145 return (struct rbimpl_size_mul_overflow_tag) { p, z, };
146}
147
148static inline struct rbimpl_size_mul_overflow_tag
149size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
150{
151 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
152 struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
153 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
154}
155
156static inline struct rbimpl_size_mul_overflow_tag
157size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
158{
159 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
160 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
161 struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
162 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
163}
164
165PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
166
167static inline size_t
168size_mul_or_raise(size_t x, size_t y, VALUE exc)
169{
170 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
171 if (LIKELY(!t.left)) {
172 return t.right;
173 }
174 else if (rb_during_gc()) {
175 rb_memerror(); /* or...? */
176 }
177 else {
178 gc_raise(
179 exc,
180 "integer overflow: %"PRIuSIZE
181 " * %"PRIuSIZE
182 " > %"PRIuSIZE,
183 x, y, SIZE_MAX);
184 }
185}
186
187size_t
188rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
189{
190 return size_mul_or_raise(x, y, exc);
191}
192
193static inline size_t
194size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
195{
196 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
197 if (LIKELY(!t.left)) {
198 return t.right;
199 }
200 else if (rb_during_gc()) {
201 rb_memerror(); /* or...? */
202 }
203 else {
204 gc_raise(
205 exc,
206 "integer overflow: %"PRIuSIZE
207 " * %"PRIuSIZE
208 " + %"PRIuSIZE
209 " > %"PRIuSIZE,
210 x, y, z, SIZE_MAX);
211 }
212}
213
214size_t
215rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
216{
217 return size_mul_add_or_raise(x, y, z, exc);
218}
219
220static inline size_t
221size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
222{
223 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
224 if (LIKELY(!t.left)) {
225 return t.right;
226 }
227 else if (rb_during_gc()) {
228 rb_memerror(); /* or...? */
229 }
230 else {
231 gc_raise(
232 exc,
233 "integer overflow: %"PRIdSIZE
234 " * %"PRIdSIZE
235 " + %"PRIdSIZE
236 " * %"PRIdSIZE
237 " > %"PRIdSIZE,
238 x, y, z, w, SIZE_MAX);
239 }
240}
241
242#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
243/* trick the compiler into thinking a external signal handler uses this */
244volatile VALUE rb_gc_guarded_val;
245volatile VALUE *
246rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
247{
248 rb_gc_guarded_val = val;
249
250 return ptr;
251}
252#endif
253
254#ifndef GC_HEAP_INIT_SLOTS
255#define GC_HEAP_INIT_SLOTS 10000
256#endif
257#ifndef GC_HEAP_FREE_SLOTS
258#define GC_HEAP_FREE_SLOTS 4096
259#endif
260#ifndef GC_HEAP_GROWTH_FACTOR
261#define GC_HEAP_GROWTH_FACTOR 1.8
262#endif
263#ifndef GC_HEAP_GROWTH_MAX_SLOTS
264#define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
265#endif
266#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
267#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
268#endif
269
270#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
271#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
272#endif
273#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
274#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
275#endif
276#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
277#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
278#endif
279
280#ifndef GC_MALLOC_LIMIT_MIN
281#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
282#endif
283#ifndef GC_MALLOC_LIMIT_MAX
284#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
285#endif
286#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
287#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
288#endif
289
290#ifndef GC_OLDMALLOC_LIMIT_MIN
291#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
292#endif
293#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
294#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
295#endif
296#ifndef GC_OLDMALLOC_LIMIT_MAX
297#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
298#endif
299
300#ifndef PRINT_MEASURE_LINE
301#define PRINT_MEASURE_LINE 0
302#endif
303#ifndef PRINT_ENTER_EXIT_TICK
304#define PRINT_ENTER_EXIT_TICK 0
305#endif
306#ifndef PRINT_ROOT_TICKS
307#define PRINT_ROOT_TICKS 0
308#endif
309
310#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
311#define TICK_TYPE 1
312
313typedef struct {
318
323
327
331
334
335static ruby_gc_params_t gc_params = {
340
345
349
353
354 FALSE,
355};
356
357/* GC_DEBUG:
358 * enable to embed GC debugging information.
359 */
360#ifndef GC_DEBUG
361#define GC_DEBUG 0
362#endif
363
364/* RGENGC_DEBUG:
365 * 1: basic information
366 * 2: remember set operation
367 * 3: mark
368 * 4:
369 * 5: sweep
370 */
371#ifndef RGENGC_DEBUG
372#ifdef RUBY_DEVEL
373#define RGENGC_DEBUG -1
374#else
375#define RGENGC_DEBUG 0
376#endif
377#endif
378#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
379# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
380#else
381# define RGENGC_DEBUG_ENABLED(level) 0
382#endif
384
385/* RGENGC_CHECK_MODE
386 * 0: disable all assertions
387 * 1: enable assertions (to debug RGenGC)
388 * 2: enable internal consistency check at each GC (for debugging)
389 * 3: enable internal consistency check at each GC steps (for debugging)
390 * 4: enable liveness check
391 * 5: show all references
392 */
393#ifndef RGENGC_CHECK_MODE
394#define RGENGC_CHECK_MODE 0
395#endif
396
397// Note: using RUBY_ASSERT_WHEN() extend a macro in expr (info by nobu).
398#define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
399
400/* RGENGC_OLD_NEWOBJ_CHECK
401 * 0: disable all assertions
402 * >0: make a OLD object when new object creation.
403 *
404 * Make one OLD object per RGENGC_OLD_NEWOBJ_CHECK WB protected objects creation.
405 */
406#ifndef RGENGC_OLD_NEWOBJ_CHECK
407#define RGENGC_OLD_NEWOBJ_CHECK 0
408#endif
409
410/* RGENGC_PROFILE
411 * 0: disable RGenGC profiling
412 * 1: enable profiling for basic information
413 * 2: enable profiling for each types
414 */
415#ifndef RGENGC_PROFILE
416#define RGENGC_PROFILE 0
417#endif
418
419/* RGENGC_ESTIMATE_OLDMALLOC
420 * Enable/disable to estimate increase size of malloc'ed size by old objects.
421 * If estimation exceeds threshold, then will invoke full GC.
422 * 0: disable estimation.
423 * 1: enable estimation.
424 */
425#ifndef RGENGC_ESTIMATE_OLDMALLOC
426#define RGENGC_ESTIMATE_OLDMALLOC 1
427#endif
428
429/* RGENGC_FORCE_MAJOR_GC
430 * Force major/full GC if this macro is not 0.
431 */
432#ifndef RGENGC_FORCE_MAJOR_GC
433#define RGENGC_FORCE_MAJOR_GC 0
434#endif
435
436#ifndef GC_PROFILE_MORE_DETAIL
437#define GC_PROFILE_MORE_DETAIL 0
438#endif
439#ifndef GC_PROFILE_DETAIL_MEMORY
440#define GC_PROFILE_DETAIL_MEMORY 0
441#endif
442#ifndef GC_ENABLE_INCREMENTAL_MARK
443#define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
444#endif
445#ifndef GC_ENABLE_LAZY_SWEEP
446#define GC_ENABLE_LAZY_SWEEP 1
447#endif
448#ifndef CALC_EXACT_MALLOC_SIZE
449#define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
450#endif
451#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
452#ifndef MALLOC_ALLOCATED_SIZE
453#define MALLOC_ALLOCATED_SIZE 0
454#endif
455#else
456#define MALLOC_ALLOCATED_SIZE 0
457#endif
458#ifndef MALLOC_ALLOCATED_SIZE_CHECK
459#define MALLOC_ALLOCATED_SIZE_CHECK 0
460#endif
461
462#ifndef GC_DEBUG_STRESS_TO_CLASS
463#define GC_DEBUG_STRESS_TO_CLASS 0
464#endif
465
466#ifndef RGENGC_OBJ_INFO
467#define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
468#endif
469
470typedef enum {
472 /* major reason */
477#if RGENGC_ESTIMATE_OLDMALLOC
478 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
479#endif
481
482 /* gc reason */
488
489 /* others */
495
500
501typedef struct gc_profile_record {
502 int flags;
503
504 double gc_time;
506
511
512#if GC_PROFILE_MORE_DETAIL
513 double gc_mark_time;
514 double gc_sweep_time;
515
516 size_t heap_use_pages;
517 size_t heap_live_objects;
518 size_t heap_free_objects;
519
520 size_t allocate_increase;
521 size_t allocate_limit;
522
523 double prepare_time;
524 size_t removing_objects;
525 size_t empty_objects;
526#if GC_PROFILE_DETAIL_MEMORY
527 long maxrss;
528 long minflt;
529 long majflt;
530#endif
531#endif
532#if MALLOC_ALLOCATED_SIZE
533 size_t allocated_size;
534#endif
535
536#if RGENGC_PROFILE > 0
537 size_t old_objects;
538 size_t remembered_normal_objects;
539 size_t remembered_shady_objects;
540#endif
542
543#define FL_FROM_FREELIST FL_USER0
544
545struct RMoved {
549};
550
551#define RMOVED(obj) ((struct RMoved *)(obj))
552
553#if defined(_MSC_VER) || defined(__CYGWIN__)
554#pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
555#endif
556
557typedef struct RVALUE {
558 union {
559 struct {
560 VALUE flags; /* always 0 for freed obj */
561 struct RVALUE *next;
563 struct RMoved moved;
564 struct RBasic basic;
566 struct RClass klass;
569 struct RArray array;
571 struct RHash hash;
572 struct RData data;
576 struct RFile file;
577 struct RMatch match;
580 union {
582 struct vm_svar svar;
583 struct vm_throw_data throw_data;
584 struct vm_ifunc ifunc;
585 struct MEMO memo;
592 struct {
593 struct RBasic basic;
598 } as;
599#if GC_DEBUG
600 const char *file;
601 int line;
602#endif
604
605#if defined(_MSC_VER) || defined(__CYGWIN__)
606#pragma pack(pop)
607#endif
608
610enum {
611 BITS_SIZE = sizeof(bits_t),
614#define popcount_bits rb_popcount_intptr
615
618};
619
622 /* char gap[]; */
623 /* RVALUE values[]; */
624};
625
626struct gc_list {
628 struct gc_list *next;
629};
630
631#define STACK_CHUNK_SIZE 500
632
633typedef struct stack_chunk {
637
638typedef struct mark_stack {
641 int index;
642 int limit;
646
647typedef struct rb_heap_struct {
649 struct list_head pages;
650 struct heap_page *sweeping_page; /* iterator for .pages */
653#if GC_ENABLE_INCREMENTAL_MARK
654 struct heap_page *pooled_pages;
655#endif
656 size_t total_pages; /* total page count in a heap */
657 size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
659
665
666typedef struct rb_objspace {
667 struct {
668 size_t limit;
669 size_t increase;
670#if MALLOC_ALLOCATED_SIZE
671 size_t allocated_size;
672 size_t allocations;
673#endif
675
676 struct {
677 unsigned int mode : 2;
678 unsigned int immediate_sweep : 1;
679 unsigned int dont_gc : 1;
680 unsigned int dont_incremental : 1;
681 unsigned int during_gc : 1;
682 unsigned int during_compacting : 1;
683 unsigned int gc_stressful: 1;
684 unsigned int has_hook: 1;
685 unsigned int during_minor_gc : 1;
686#if GC_ENABLE_INCREMENTAL_MARK
687 unsigned int during_incremental_marking : 1;
688#endif
689 } flags;
690
694
696 rb_heap_t tomb_heap; /* heap for zombies and ghosts */
697
698 struct {
699 rb_atomic_t finalizing;
700 } atomic_flags;
701
704
705 struct {
712
713 /* final */
716 } heap_pages;
717
719
720 struct {
721 int run;
726 size_t size;
727
728#if GC_PROFILE_MORE_DETAIL
729 double prepare_time;
730#endif
732
737#if RGENGC_PROFILE > 0
738 size_t total_generated_normal_object_count;
739 size_t total_generated_shady_object_count;
740 size_t total_shade_operation_count;
741 size_t total_promoted_count;
742 size_t total_remembered_normal_object_count;
743 size_t total_remembered_shady_object_count;
744
745#if RGENGC_PROFILE >= 2
746 size_t generated_normal_object_count_types[RUBY_T_MASK];
747 size_t generated_shady_object_count_types[RUBY_T_MASK];
748 size_t shade_operation_count_types[RUBY_T_MASK];
749 size_t promoted_types[RUBY_T_MASK];
750 size_t remembered_normal_object_count_types[RUBY_T_MASK];
751 size_t remembered_shady_object_count_types[RUBY_T_MASK];
752#endif
753#endif /* RGENGC_PROFILE */
754
755 /* temporary profiling space */
759
760 /* basic statistics */
761 size_t count;
765 } profile;
767
769
770 struct {
778
779#if RGENGC_ESTIMATE_OLDMALLOC
780 size_t oldmalloc_increase;
781 size_t oldmalloc_increase_limit;
782#endif
783
784#if RGENGC_CHECK_MODE >= 2
785 struct st_table *allrefs_table;
786 size_t error_count;
787#endif
788 } rgengc;
789
790 struct {
791 size_t considered_count_table[T_MASK];
792 size_t moved_count_table[T_MASK];
794 } rcompactor;
795
796#if GC_ENABLE_INCREMENTAL_MARK
797 struct {
798 size_t pooled_slots;
799 size_t step_slots;
800 } rincgc;
801#endif
802
805
806#if GC_DEBUG_STRESS_TO_CLASS
808#endif
810
811
812/* default tiny heap size: 16KB */
813#define HEAP_PAGE_ALIGN_LOG 14
814#define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
815enum {
819 HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header))/sizeof(struct RVALUE)),
822 HEAP_PAGE_BITMAP_PLANES = 4 /* RGENGC: mark, unprotected, uncollectible, marking */
824
825struct heap_page {
830 struct {
831 unsigned int before_sweep : 1;
832 unsigned int has_remembered_objects : 1;
834 unsigned int in_tomb : 1;
836
840 struct list_node page_node;
841
843 /* the following three bitmaps are cleared at the beginning of full GC */
847
848 /* If set, the object is not movable */
850};
851
852#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
853#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
854#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
855
856#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
857#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
858#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
859#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
860
861/* Bitmap Operations */
862#define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
863#define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
864#define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
865
866/* getting bitmap */
867#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
868#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
869#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
870#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
871#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
872
873/* Aliases */
874#define rb_objspace (*rb_objspace_of(GET_VM()))
875#define rb_objspace_of(vm) ((vm)->objspace)
876
877#define ruby_initial_gc_stress gc_params.gc_stress
878
880
881#define malloc_limit objspace->malloc_params.limit
882#define malloc_increase objspace->malloc_params.increase
883#define malloc_allocated_size objspace->malloc_params.allocated_size
884#define heap_pages_sorted objspace->heap_pages.sorted
885#define heap_allocated_pages objspace->heap_pages.allocated_pages
886#define heap_pages_sorted_length objspace->heap_pages.sorted_length
887#define heap_pages_lomem objspace->heap_pages.range[0]
888#define heap_pages_himem objspace->heap_pages.range[1]
889#define heap_allocatable_pages objspace->heap_pages.allocatable_pages
890#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
891#define heap_pages_final_slots objspace->heap_pages.final_slots
892#define heap_pages_deferred_final objspace->heap_pages.deferred_final
893#define heap_eden (&objspace->eden_heap)
894#define heap_tomb (&objspace->tomb_heap)
895#define during_gc objspace->flags.during_gc
896#define finalizing objspace->atomic_flags.finalizing
897#define finalizer_table objspace->finalizer_table
898#define global_list objspace->global_list
899#define ruby_gc_stressful objspace->flags.gc_stressful
900#define ruby_gc_stress_mode objspace->gc_stress_mode
901#if GC_DEBUG_STRESS_TO_CLASS
902#define stress_to_class objspace->stress_to_class
903#else
904#define stress_to_class 0
905#endif
906
907#if 0
908#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
909#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
910#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
911#define dont_gc_val() (objspace->flags.dont_gc)
912#else
913#define dont_gc_on() (objspace->flags.dont_gc = 1)
914#define dont_gc_off() (objspace->flags.dont_gc = 0)
915#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
916#define dont_gc_val() (objspace->flags.dont_gc)
917#endif
918
919static inline enum gc_mode
920gc_mode_verify(enum gc_mode mode)
921{
922#if RGENGC_CHECK_MODE > 0
923 switch (mode) {
924 case gc_mode_none:
925 case gc_mode_marking:
926 case gc_mode_sweeping:
927 break;
928 default:
929 rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
930 }
931#endif
932 return mode;
933}
934
935#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
936#define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
937
938#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
939#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
940#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
941#if GC_ENABLE_INCREMENTAL_MARK
942#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
943#else
944#define is_incremental_marking(objspace) FALSE
945#endif
946#if GC_ENABLE_INCREMENTAL_MARK
947#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
948#else
949#define will_be_incremental_marking(objspace) FALSE
950#endif
951#define has_sweeping_pages(heap) ((heap)->sweeping_page != 0)
952#define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap))
953
954#if SIZEOF_LONG == SIZEOF_VOIDP
955# define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
956# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
957#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
958# define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
959# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
960 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
961#else
962# error not supported
963#endif
964
965#define RANY(o) ((RVALUE*)(o))
966
967struct RZombie {
968 struct RBasic basic;
970 void (*dfree)(void *);
971 void *data;
972};
973
974#define RZOMBIE(o) ((struct RZombie *)(o))
975
976#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
977
978#if RUBY_MARK_FREE_DEBUG
979int ruby_gc_debug_indent = 0;
980#endif
984
985void rb_iseq_mark(const rb_iseq_t *iseq);
987void rb_iseq_free(const rb_iseq_t *iseq);
988size_t rb_iseq_memsize(const rb_iseq_t *iseq);
989void rb_vm_update_references(void *ptr);
990
992
993static VALUE define_final0(VALUE obj, VALUE block);
994
995NORETURN(static void *gc_vraise(void *ptr));
996NORETURN(static void gc_raise(VALUE exc, const char *fmt, ...));
997NORETURN(static void negative_size_allocation_error(const char *));
998
999static void init_mark_stack(mark_stack_t *stack);
1000
1001static int ready_to_gc(rb_objspace_t *objspace);
1002
1003static int garbage_collect(rb_objspace_t *, int reason);
1004
1005static int gc_start(rb_objspace_t *objspace, int reason);
1006static void gc_rest(rb_objspace_t *objspace);
1007
1015};
1016
1017static inline void gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1018static inline void gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1019
1020static void gc_marks(rb_objspace_t *objspace, int full_mark);
1021static void gc_marks_start(rb_objspace_t *objspace, int full);
1022static int gc_marks_finish(rb_objspace_t *objspace);
1023static void gc_marks_rest(rb_objspace_t *objspace);
1024static void gc_marks_step(rb_objspace_t *objspace, size_t slots);
1025static void gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap);
1026
1027static void gc_sweep(rb_objspace_t *objspace);
1028static void gc_sweep_start(rb_objspace_t *objspace);
1029static void gc_sweep_finish(rb_objspace_t *objspace);
1030static int gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap);
1031static void gc_sweep_rest(rb_objspace_t *objspace);
1032static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap);
1033
1034static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
1035static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr);
1036static inline void gc_mark_and_pin(rb_objspace_t *objspace, VALUE ptr);
1037static void gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr);
1038NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr));
1039static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
1040
1041static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count);
1042static int gc_mark_stacked_objects_all(rb_objspace_t *);
1043static void gc_grey(rb_objspace_t *objspace, VALUE ptr);
1044
1045static inline int gc_mark_set(rb_objspace_t *objspace, VALUE obj);
1046NO_SANITIZE("memory", static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr));
1047
1048static void push_mark_stack(mark_stack_t *, VALUE);
1049static int pop_mark_stack(mark_stack_t *, VALUE *);
1050static size_t mark_stack_size(mark_stack_t *stack);
1051static void shrink_stack_chunk_cache(mark_stack_t *stack);
1052
1053static size_t obj_memsize_of(VALUE obj, int use_all_types);
1054static void gc_verify_internal_consistency(rb_objspace_t *objspace);
1055static int gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj);
1056static int gc_verify_heap_pages(rb_objspace_t *objspace);
1057
1058static void gc_stress_set(rb_objspace_t *objspace, VALUE flag);
1059static VALUE gc_disable_no_rest(rb_objspace_t *);
1060
1061static double getrusage_time(void);
1062static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, int reason);
1063static inline void gc_prof_timer_start(rb_objspace_t *);
1064static inline void gc_prof_timer_stop(rb_objspace_t *);
1065static inline void gc_prof_mark_timer_start(rb_objspace_t *);
1066static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
1067static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
1068static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
1069static inline void gc_prof_set_malloc_info(rb_objspace_t *);
1070static inline void gc_prof_set_heap_info(rb_objspace_t *);
1071
1072#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1073 if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1074 *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1075 } \
1076} while (0)
1077
1078#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1079
1080#define gc_prof_record(objspace) (objspace)->profile.current_record
1081#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1082
1083#ifdef HAVE_VA_ARGS_MACRO
1084# define gc_report(level, objspace, ...) \
1085 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1086#else
1087# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1088#endif
1089PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...), 3, 4);
1090static const char *obj_info(VALUE obj);
1091static const char *obj_type_name(VALUE obj);
1092
1093/*
1094 * 1 - TSC (H/W Time Stamp Counter)
1095 * 2 - getrusage
1096 */
1097#ifndef TICK_TYPE
1098#define TICK_TYPE 1
1099#endif
1100
1101#if USE_TICK_T
1102
1103#if TICK_TYPE == 1
1104/* the following code is only for internal tuning. */
1105
1106/* Source code to use RDTSC is quoted and modified from
1107 * http://www.mcs.anl.gov/~kazutomo/rdtsc.html
1108 * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1109 */
1110
1111#if defined(__GNUC__) && defined(__i386__)
1112typedef unsigned long long tick_t;
1113#define PRItick "llu"
1114static inline tick_t
1115tick(void)
1116{
1117 unsigned long long int x;
1118 __asm__ __volatile__ ("rdtsc" : "=A" (x));
1119 return x;
1120}
1121
1122#elif defined(__GNUC__) && defined(__x86_64__)
1123typedef unsigned long long tick_t;
1124#define PRItick "llu"
1125
1126static __inline__ tick_t
1127tick(void)
1128{
1129 unsigned long hi, lo;
1130 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
1131 return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
1132}
1133
1134#elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1135typedef unsigned long long tick_t;
1136#define PRItick "llu"
1137
1138static __inline__ tick_t
1139tick(void)
1140{
1141 unsigned long long val = __builtin_ppc_get_timebase();
1142 return val;
1143}
1144
1145#elif defined(__aarch64__) && defined(__GNUC__)
1146typedef unsigned long tick_t;
1147#define PRItick "lu"
1148
1149static __inline__ tick_t
1150tick(void)
1151{
1152 unsigned long val;
1153 __asm__ __volatile__ ("mrs %0, cntvct_el0", : "=r" (val));
1154 return val;
1155}
1156
1157
1158#elif defined(_WIN32) && defined(_MSC_VER)
1159#include <intrin.h>
1160typedef unsigned __int64 tick_t;
1161#define PRItick "llu"
1162
1163static inline tick_t
1164tick(void)
1165{
1166 return __rdtsc();
1167}
1168
1169#else /* use clock */
1170typedef clock_t tick_t;
1171#define PRItick "llu"
1172
1173static inline tick_t
1174tick(void)
1175{
1176 return clock();
1177}
1178#endif /* TSC */
1179
1180#elif TICK_TYPE == 2
1181typedef double tick_t;
1182#define PRItick "4.9f"
1183
1184static inline tick_t
1185tick(void)
1186{
1187 return getrusage_time();
1188}
1189#else /* TICK_TYPE */
1190#error "choose tick type"
1191#endif /* TICK_TYPE */
1192
1193#define MEASURE_LINE(expr) do { \
1194 volatile tick_t start_time = tick(); \
1195 volatile tick_t end_time; \
1196 expr; \
1197 end_time = tick(); \
1198 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1199} while (0)
1200
1201#else /* USE_TICK_T */
1202#define MEASURE_LINE(expr) expr
1203#endif /* USE_TICK_T */
1204
1205static inline void *
1206asan_unpoison_object_temporary(VALUE obj)
1207{
1208 void *ptr = asan_poisoned_object_p(obj);
1209 asan_unpoison_object(obj, false);
1210 return ptr;
1211}
1212
1213#define FL_CHECK2(name, x, pred) \
1214 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1215 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1216#define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1217#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1218#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1219
1220#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1221#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1222#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1223
1224#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1225#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1226#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1227
1228#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1229#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1230#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1231
1232#define RVALUE_OLD_AGE 3
1233#define RVALUE_AGE_SHIFT 5 /* FL_PROMOTED0 bit */
1234
1235static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj);
1236static int rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj);
1237static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
1238static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
1239static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
1240
1241static inline int
1242RVALUE_FLAGS_AGE(VALUE flags)
1243{
1244 return (int)((flags & (FL_PROMOTED0 | FL_PROMOTED1)) >> RVALUE_AGE_SHIFT);
1245}
1246
1247static int
1248check_rvalue_consistency_force(const VALUE obj, int terminate)
1249{
1250 int err = 0;
1251 rb_objspace_t *objspace = &rb_objspace;
1252
1254 {
1255 if (SPECIAL_CONST_P(obj)) {
1256 fprintf(stderr, "check_rvalue_consistency: %p is a special const.\n", (void *)obj);
1257 err++;
1258 }
1259 else if (!is_pointer_to_heap(objspace, (void *)obj)) {
1260 /* check if it is in tomb_pages */
1261 struct heap_page *page = NULL;
1262 list_for_each(&heap_tomb->pages, page, page_node) {
1263 if (&page->start[0] <= (RVALUE *)obj &&
1264 (RVALUE *)obj < &page->start[page->total_slots]) {
1265 fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1266 (void *)obj, (void *)page);
1267 err++;
1268 goto skip;
1269 }
1270 }
1271 bp();
1272 fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
1273 err++;
1274 skip:
1275 ;
1276 }
1277 else {
1278 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1279 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1280 const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1281 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0, remembered_bit = marking_bit;
1282 const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1283
1284 if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1285 fprintf(stderr, "check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1286 err++;
1287 }
1288 if (BUILTIN_TYPE(obj) == T_NONE) {
1289 fprintf(stderr, "check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1290 err++;
1291 }
1292 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
1293 fprintf(stderr, "check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1294 err++;
1295 }
1296
1297 obj_memsize_of((VALUE)obj, FALSE);
1298
1299 /* check generation
1300 *
1301 * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1302 */
1303 if (age > 0 && wb_unprotected_bit) {
1304 fprintf(stderr, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1305 err++;
1306 }
1307
1308 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1309 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1310 err++;
1311 }
1312
1313 if (!is_full_marking(objspace)) {
1314 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1315 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1316 obj_info(obj), age);
1317 err++;
1318 }
1319 if (remembered_bit && age != RVALUE_OLD_AGE) {
1320 fprintf(stderr, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1321 obj_info(obj), age);
1322 err++;
1323 }
1324 }
1325
1326 /*
1327 * check coloring
1328 *
1329 * marking:false marking:true
1330 * marked:false white *invalid*
1331 * marked:true black grey
1332 */
1333 if (is_incremental_marking(objspace) && marking_bit) {
1334 if (!is_marking(objspace) && !mark_bit) {
1335 fprintf(stderr, "check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1336 err++;
1337 }
1338 }
1339 }
1340 }
1342
1343 if (err > 0 && terminate) {
1344 rb_bug("check_rvalue_consistency_force: there is %d errors.", err);
1345 }
1346 return err;
1347}
1348
1349#if RGENGC_CHECK_MODE == 0
1350static inline VALUE
1351check_rvalue_consistency(const VALUE obj)
1352{
1353 return obj;
1354}
1355#else
1356static VALUE
1357check_rvalue_consistency(const VALUE obj)
1358{
1359 check_rvalue_consistency_force(obj, TRUE);
1360 return obj;
1361}
1362#endif
1363
1364static inline int
1365gc_object_moved_p(rb_objspace_t * objspace, VALUE obj)
1366{
1367 if (RB_SPECIAL_CONST_P(obj)) {
1368 return FALSE;
1369 }
1370 else {
1371 void *poisoned = asan_poisoned_object_p(obj);
1372 asan_unpoison_object(obj, false);
1373
1374 int ret = BUILTIN_TYPE(obj) == T_MOVED;
1375 /* Re-poison slot if it's not the one we want */
1376 if (poisoned) {
1377 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
1378 asan_poison_object(obj);
1379 }
1380 return ret;
1381 }
1382}
1383
1384static inline int
1385RVALUE_MARKED(VALUE obj)
1386{
1387 check_rvalue_consistency(obj);
1388 return RVALUE_MARK_BITMAP(obj) != 0;
1389}
1390
1391static inline int
1392RVALUE_PINNED(VALUE obj)
1393{
1394 check_rvalue_consistency(obj);
1395 return RVALUE_PIN_BITMAP(obj) != 0;
1396}
1397
1398static inline int
1399RVALUE_WB_UNPROTECTED(VALUE obj)
1400{
1401 check_rvalue_consistency(obj);
1402 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1403}
1404
1405static inline int
1406RVALUE_MARKING(VALUE obj)
1407{
1408 check_rvalue_consistency(obj);
1409 return RVALUE_MARKING_BITMAP(obj) != 0;
1410}
1411
1412static inline int
1413RVALUE_REMEMBERED(VALUE obj)
1414{
1415 check_rvalue_consistency(obj);
1416 return RVALUE_MARKING_BITMAP(obj) != 0;
1417}
1418
1419static inline int
1420RVALUE_UNCOLLECTIBLE(VALUE obj)
1421{
1422 check_rvalue_consistency(obj);
1423 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1424}
1425
1426static inline int
1427RVALUE_OLD_P_RAW(VALUE obj)
1428{
1429 const VALUE promoted = FL_PROMOTED0 | FL_PROMOTED1;
1430 return (RBASIC(obj)->flags & promoted) == promoted;
1431}
1432
1433static inline int
1434RVALUE_OLD_P(VALUE obj)
1435{
1436 check_rvalue_consistency(obj);
1437 return RVALUE_OLD_P_RAW(obj);
1438}
1439
1440#if RGENGC_CHECK_MODE || GC_DEBUG
1441static inline int
1442RVALUE_AGE(VALUE obj)
1443{
1444 check_rvalue_consistency(obj);
1445 return RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1446}
1447#endif
1448
1449static inline void
1450RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1451{
1452 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1453 objspace->rgengc.old_objects++;
1455
1456#if RGENGC_PROFILE >= 2
1457 objspace->profile.total_promoted_count++;
1458 objspace->profile.promoted_types[BUILTIN_TYPE(obj)]++;
1459#endif
1460}
1461
1462static inline void
1463RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
1464{
1465 RB_DEBUG_COUNTER_INC(obj_promote);
1466 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1467}
1468
1469static inline VALUE
1470RVALUE_FLAGS_AGE_SET(VALUE flags, int age)
1471{
1473 flags |= (age << RVALUE_AGE_SHIFT);
1474 return flags;
1475}
1476
1477/* set age to age+1 */
1478static inline void
1479RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
1480{
1481 VALUE flags = RBASIC(obj)->flags;
1482 int age = RVALUE_FLAGS_AGE(flags);
1483
1484 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1485 rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1486 }
1487
1488 age++;
1489 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(flags, age);
1490
1491 if (age == RVALUE_OLD_AGE) {
1492 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1493 }
1494 check_rvalue_consistency(obj);
1495}
1496
1497/* set age to RVALUE_OLD_AGE */
1498static inline void
1499RVALUE_AGE_SET_OLD(rb_objspace_t *objspace, VALUE obj)
1500{
1501 check_rvalue_consistency(obj);
1502 GC_ASSERT(!RVALUE_OLD_P(obj));
1503
1504 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE);
1505 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1506
1507 check_rvalue_consistency(obj);
1508}
1509
1510/* set age to RVALUE_OLD_AGE - 1 */
1511static inline void
1512RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
1513{
1514 check_rvalue_consistency(obj);
1515 GC_ASSERT(!RVALUE_OLD_P(obj));
1516
1517 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE - 1);
1518
1519 check_rvalue_consistency(obj);
1520}
1521
1522static inline void
1523RVALUE_DEMOTE_RAW(rb_objspace_t *objspace, VALUE obj)
1524{
1525 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1527}
1528
1529static inline void
1530RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
1531{
1532 check_rvalue_consistency(obj);
1533 GC_ASSERT(RVALUE_OLD_P(obj));
1534
1535 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1537 }
1538
1539 RVALUE_DEMOTE_RAW(objspace, obj);
1540
1541 if (RVALUE_MARKED(obj)) {
1542 objspace->rgengc.old_objects--;
1543 }
1544
1545 check_rvalue_consistency(obj);
1546}
1547
1548static inline void
1549RVALUE_AGE_RESET_RAW(VALUE obj)
1550{
1551 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1552}
1553
1554static inline void
1555RVALUE_AGE_RESET(VALUE obj)
1556{
1557 check_rvalue_consistency(obj);
1558 GC_ASSERT(!RVALUE_OLD_P(obj));
1559
1560 RVALUE_AGE_RESET_RAW(obj);
1561 check_rvalue_consistency(obj);
1562}
1563
1564static inline int
1565RVALUE_BLACK_P(VALUE obj)
1566{
1567 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1568}
1569
1570#if 0
1571static inline int
1572RVALUE_GREY_P(VALUE obj)
1573{
1574 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1575}
1576#endif
1577
1578static inline int
1579RVALUE_WHITE_P(VALUE obj)
1580{
1581 return RVALUE_MARKED(obj) == FALSE;
1582}
1583
1584/*
1585 --------------------------- ObjectSpace -----------------------------
1586*/
1587
1588static inline void *
1589calloc1(size_t n)
1590{
1591 return calloc(1, n);
1592}
1593
1596{
1597 rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
1598 malloc_limit = gc_params.malloc_limit_min;
1599 list_head_init(&objspace->eden_heap.pages);
1600 list_head_init(&objspace->tomb_heap.pages);
1601 dont_gc_on();
1602
1603 return objspace;
1604}
1605
1606static void free_stack_chunks(mark_stack_t *);
1607static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
1608
1609void
1611{
1613 rb_bug("lazy sweeping underway when freeing object space");
1614
1615 if (objspace->profile.records) {
1616 free(objspace->profile.records);
1617 objspace->profile.records = 0;
1618 }
1619
1620 if (global_list) {
1621 struct gc_list *list, *next;
1622 for (list = global_list; list; list = next) {
1623 next = list->next;
1624 xfree(list);
1625 }
1626 }
1627 if (heap_pages_sorted) {
1628 size_t i;
1629 for (i = 0; i < heap_allocated_pages; ++i) {
1630 heap_page_free(objspace, heap_pages_sorted[i]);
1631 }
1635 heap_pages_lomem = 0;
1636 heap_pages_himem = 0;
1637
1638 objspace->eden_heap.total_pages = 0;
1639 objspace->eden_heap.total_slots = 0;
1640 }
1641 st_free_table(objspace->id_to_obj_tbl);
1642 st_free_table(objspace->obj_to_id_tbl);
1643 free_stack_chunks(&objspace->mark_stack);
1644 free(objspace);
1645}
1646
1647static void
1648heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length)
1649{
1650 struct heap_page **sorted;
1651 size_t size = size_mul_or_raise(next_length, sizeof(struct heap_page *), rb_eRuntimeError);
1652
1653 gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %"PRIdSIZE", size: %"PRIdSIZE"\n",
1654 next_length, size);
1655
1656 if (heap_pages_sorted_length > 0) {
1657 sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
1658 if (sorted) heap_pages_sorted = sorted;
1659 }
1660 else {
1661 sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
1662 }
1663
1664 if (sorted == 0) {
1665 rb_memerror();
1666 }
1667
1668 heap_pages_sorted_length = next_length;
1669}
1670
1671static void
1672heap_pages_expand_sorted(rb_objspace_t *objspace)
1673{
1674 /* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
1675 * because heap_allocatable_pages contains heap_tomb->total_pages (recycle heap_tomb pages).
1676 * however, if there are pages which do not have empty slots, then try to create new pages
1677 * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
1678 */
1679 size_t next_length = heap_allocatable_pages;
1680 next_length += heap_eden->total_pages;
1681 next_length += heap_tomb->total_pages;
1682
1683 if (next_length > heap_pages_sorted_length) {
1684 heap_pages_expand_sorted_to(objspace, next_length);
1685 }
1686
1689}
1690
1691static void
1692heap_allocatable_pages_set(rb_objspace_t *objspace, size_t s)
1693{
1695 heap_pages_expand_sorted(objspace);
1696}
1697
1698static inline void
1699heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1700{
1702
1703 RVALUE *p = (RVALUE *)obj;
1704
1705 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1706
1707 p->as.free.flags = 0;
1708 p->as.free.next = page->freelist;
1709 page->freelist = p;
1710 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1711
1712 if (RGENGC_CHECK_MODE &&
1713 /* obj should belong to page */
1714 !(&page->start[0] <= (RVALUE *)obj &&
1715 (RVALUE *)obj < &page->start[page->total_slots] &&
1716 obj % sizeof(RVALUE) == 0)) {
1717 rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p);
1718 }
1719
1720 asan_poison_object(obj);
1721 gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
1722}
1723
1724static inline void
1725heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
1726{
1727 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1728 GC_ASSERT(page->free_slots != 0);
1729 GC_ASSERT(page->freelist != NULL);
1730
1731 page->free_next = heap->free_pages;
1732 heap->free_pages = page;
1733
1734 RUBY_DEBUG_LOG("page:%p freelist:%p", page, page->freelist);
1735
1736 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1737}
1738
1739#if GC_ENABLE_INCREMENTAL_MARK
1740static inline void
1741heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1742{
1743 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1744 GC_ASSERT(page->free_slots != 0);
1745 GC_ASSERT(page->freelist != NULL);
1746
1747 page->free_next = heap->pooled_pages;
1748 heap->pooled_pages = page;
1749 objspace->rincgc.pooled_slots += page->free_slots;
1750
1751 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1752}
1753#endif
1754
1755static void
1756heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1757{
1758 list_del(&page->page_node);
1759 heap->total_pages--;
1760 heap->total_slots -= page->total_slots;
1761}
1762
1763static void rb_aligned_free(void *ptr);
1764
1765static void
1766heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
1767{
1769 objspace->profile.total_freed_pages++;
1770 rb_aligned_free(GET_PAGE_BODY(page->start));
1771 free(page);
1772}
1773
1774static void
1775heap_pages_free_unused_pages(rb_objspace_t *objspace)
1776{
1777 size_t i, j;
1778
1779 if (!list_empty(&heap_tomb->pages)) {
1780 for (i = j = 1; j < heap_allocated_pages; i++) {
1781 struct heap_page *page = heap_pages_sorted[i];
1782
1783 if (page->flags.in_tomb && page->free_slots == page->total_slots) {
1784 heap_unlink_page(objspace, heap_tomb, page);
1785 heap_page_free(objspace, page);
1786 }
1787 else {
1788 if (i != j) {
1789 heap_pages_sorted[j] = page;
1790 }
1791 j++;
1792 }
1793 }
1794
1795 struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1];
1796 RVALUE *himem = hipage->start + hipage->total_slots;
1797 GC_ASSERT(himem <= heap_pages_himem);
1798 heap_pages_himem = himem;
1799
1801 }
1802}
1803
1804static struct heap_page *
1805heap_page_allocate(rb_objspace_t *objspace)
1806{
1807 RVALUE *start, *end, *p;
1808 struct heap_page *page;
1809 struct heap_page_body *page_body = 0;
1810 size_t hi, lo, mid;
1811 int limit = HEAP_PAGE_OBJ_LIMIT;
1812
1813 /* assign heap_page body (contains heap_page_header and RVALUEs) */
1815 if (page_body == 0) {
1816 rb_memerror();
1817 }
1818
1819 /* assign heap_page entry */
1820 page = calloc1(sizeof(struct heap_page));
1821 if (page == 0) {
1822 rb_aligned_free(page_body);
1823 rb_memerror();
1824 }
1825
1826 /* adjust obj_limit (object number available in this page) */
1827 start = (RVALUE*)((VALUE)page_body + sizeof(struct heap_page_header));
1828 if ((VALUE)start % sizeof(RVALUE) != 0) {
1829 int delta = (int)(sizeof(RVALUE) - ((VALUE)start % sizeof(RVALUE)));
1830 start = (RVALUE*)((VALUE)start + delta);
1831 limit = (HEAP_PAGE_SIZE - (int)((VALUE)start - (VALUE)page_body))/(int)sizeof(RVALUE);
1832 }
1833 end = start + limit;
1834
1835 /* setup heap_pages_sorted */
1836 lo = 0;
1838 while (lo < hi) {
1839 struct heap_page *mid_page;
1840
1841 mid = (lo + hi) / 2;
1842 mid_page = heap_pages_sorted[mid];
1843 if (mid_page->start < start) {
1844 lo = mid + 1;
1845 }
1846 else if (mid_page->start > start) {
1847 hi = mid;
1848 }
1849 else {
1850 rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
1851 }
1852 }
1853
1854 if (hi < heap_allocated_pages) {
1856 }
1857
1858 heap_pages_sorted[hi] = page;
1859
1861
1863 GC_ASSERT(heap_eden->total_pages + heap_tomb->total_pages == heap_allocated_pages - 1);
1865
1866 objspace->profile.total_allocated_pages++;
1867
1869 rb_bug("heap_page_allocate: allocated(%"PRIdSIZE") > sorted(%"PRIdSIZE")",
1871 }
1872
1874 if (heap_pages_himem < end) heap_pages_himem = end;
1875
1876 page->start = start;
1877 page->total_slots = limit;
1878 page_body->header.page = page;
1879
1880 for (p = start; p != end; p++) {
1881 gc_report(3, objspace, "assign_heap_page: %p is added to freelist\n", (void *)p);
1882 heap_page_add_freeobj(objspace, page, (VALUE)p);
1883 }
1884 page->free_slots = limit;
1885
1886 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1887 return page;
1888}
1889
1890static struct heap_page *
1891heap_page_resurrect(rb_objspace_t *objspace)
1892{
1893 struct heap_page *page = 0, *next;
1894
1895 list_for_each_safe(&heap_tomb->pages, page, next, page_node) {
1896 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1897 if (page->freelist != NULL) {
1898 heap_unlink_page(objspace, heap_tomb, page);
1899 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1900 return page;
1901 }
1902 }
1903
1904 return NULL;
1905}
1906
1907static struct heap_page *
1908heap_page_create(rb_objspace_t *objspace)
1909{
1910 struct heap_page *page;
1911 const char *method = "recycle";
1912
1914
1915 page = heap_page_resurrect(objspace);
1916
1917 if (page == NULL) {
1918 page = heap_page_allocate(objspace);
1919 method = "allocate";
1920 }
1921 if (0) fprintf(stderr, "heap_page_create: %s - %p, "
1922 "heap_allocated_pages: %"PRIdSIZE", "
1923 "heap_allocated_pages: %"PRIdSIZE", "
1924 "tomb->total_pages: %"PRIdSIZE"\n",
1925 method, (void *)page, heap_pages_sorted_length, heap_allocated_pages, heap_tomb->total_pages);
1926 return page;
1927}
1928
1929static void
1930heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1931{
1932 /* Adding to eden heap during incremental sweeping is forbidden */
1933 GC_ASSERT(!(heap == heap_eden && heap->sweeping_page));
1934 page->flags.in_tomb = (heap == heap_tomb);
1935 list_add_tail(&heap->pages, &page->page_node);
1936 heap->total_pages++;
1937 heap->total_slots += page->total_slots;
1938}
1939
1940static void
1941heap_assign_page(rb_objspace_t *objspace, rb_heap_t *heap)
1942{
1943 struct heap_page *page = heap_page_create(objspace);
1944 heap_add_page(objspace, heap, page);
1945 heap_add_freepage(heap, page);
1946}
1947
1948static void
1949heap_add_pages(rb_objspace_t *objspace, rb_heap_t *heap, size_t add)
1950{
1951 size_t i;
1952
1953 heap_allocatable_pages_set(objspace, add);
1954
1955 for (i = 0; i < add; i++) {
1956 heap_assign_page(objspace, heap);
1957 }
1958
1960}
1961
1962static size_t
1963heap_extend_pages(rb_objspace_t *objspace, size_t free_slots, size_t total_slots)
1964{
1965 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
1967 size_t next_used;
1968
1969 if (goal_ratio == 0.0) {
1970 next_used = (size_t)(used * gc_params.growth_factor);
1971 }
1972 else {
1973 /* Find `f' where free_slots = f * total_slots * goal_ratio
1974 * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
1975 */
1976 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
1977
1978 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
1979 if (f < 1.0) f = 1.1;
1980
1981 next_used = (size_t)(f * used);
1982
1983 if (0) {
1984 fprintf(stderr,
1985 "free_slots(%8"PRIuSIZE")/total_slots(%8"PRIuSIZE")=%1.2f,"
1986 " G(%1.2f), f(%1.2f),"
1987 " used(%8"PRIuSIZE") => next_used(%8"PRIuSIZE")\n",
1989 goal_ratio, f, used, next_used);
1990 }
1991 }
1992
1993 if (gc_params.growth_max_slots > 0) {
1994 size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
1995 if (next_used > max_used) next_used = max_used;
1996 }
1997
1998 return next_used - used;
1999}
2000
2001static void
2002heap_set_increment(rb_objspace_t *objspace, size_t additional_pages)
2003{
2004 size_t used = heap_eden->total_pages;
2005 size_t next_used_limit = used + additional_pages;
2006
2007 if (next_used_limit == heap_allocated_pages) next_used_limit++;
2008
2009 heap_allocatable_pages_set(objspace, next_used_limit - used);
2010
2011 gc_report(1, objspace, "heap_set_increment: heap_allocatable_pages is %"PRIdSIZE"\n",
2013}
2014
2015static int
2016heap_increment(rb_objspace_t *objspace, rb_heap_t *heap)
2017{
2018 if (heap_allocatable_pages > 0) {
2019 gc_report(1, objspace, "heap_increment: heap_pages_sorted_length: %"PRIdSIZE", "
2020 "heap_pages_inc: %"PRIdSIZE", heap->total_pages: %"PRIdSIZE"\n",
2022
2025
2026 heap_assign_page(objspace, heap);
2027 return TRUE;
2028 }
2029 return FALSE;
2030}
2031
2032static void
2033heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap)
2034{
2035 GC_ASSERT(heap->free_pages == NULL);
2036
2037 if (is_lazy_sweeping(heap)) {
2038 gc_sweep_continue(objspace, heap);
2039 }
2040 else if (is_incremental_marking(objspace)) {
2041 gc_marks_continue(objspace, heap);
2042 }
2043
2044 if (heap->free_pages == NULL &&
2045 (will_be_incremental_marking(objspace) || heap_increment(objspace, heap) == FALSE) &&
2046 gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2047 rb_memerror();
2048 }
2049}
2050
2051void
2053{
2054 rb_objspace_t *objspace = &rb_objspace;
2056 objspace->flags.has_hook = (objspace->hook_events != 0);
2057}
2058
2059static void
2060gc_event_hook_body(rb_execution_context_t *ec, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
2061{
2062 const VALUE *pc = ec->cfp->pc;
2063 if (pc && VM_FRAME_RUBYFRAME_P(ec->cfp)) {
2064 /* increment PC because source line is calculated with PC-1 */
2065 ec->cfp->pc++;
2066 }
2067 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2068 ec->cfp->pc = pc;
2069}
2070
2071#define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2072#define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2073
2074#define gc_event_hook_prep(objspace, event, data, prep) do { \
2075 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2076 prep; \
2077 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2078 } \
2079} while (0)
2080
2081#define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2082
2083static inline VALUE
2084newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, VALUE obj)
2085{
2086#if !__has_feature(memory_sanitizer)
2087 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
2089#endif
2090 RVALUE *p = RANY(obj);
2091 p->as.basic.flags = flags;
2092 *((VALUE *)&p->as.basic.klass) = klass;
2093
2094#if RACTOR_CHECK_MODE
2095 rb_ractor_setup_belonging(obj);
2096#endif
2097
2098#if RGENGC_CHECK_MODE
2099 p->as.values.v1 = p->as.values.v2 = p->as.values.v3 = 0;
2100
2102 {
2103 check_rvalue_consistency(obj);
2104
2105 GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2106 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2107 GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2108 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2109
2110 if (flags & FL_PROMOTED1) {
2111 if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
2112 }
2113 else {
2114 if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
2115 }
2116 if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %s is remembered.", obj_info(obj));
2117 }
2119#endif
2120
2121 if (UNLIKELY(wb_protected == FALSE)) {
2124 }
2125
2126 // TODO: make it atomic, or ractor local
2127 objspace->total_allocated_objects++;
2128
2129#if RGENGC_PROFILE
2130 if (wb_protected) {
2131 objspace->profile.total_generated_normal_object_count++;
2132#if RGENGC_PROFILE >= 2
2133 objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
2134#endif
2135 }
2136 else {
2137 objspace->profile.total_generated_shady_object_count++;
2138#if RGENGC_PROFILE >= 2
2139 objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
2140#endif
2141 }
2142#endif
2143
2144#if GC_DEBUG
2145 RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2146 GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
2147#endif
2148
2149 gc_report(5, objspace, "newobj: %s\n", obj_info(obj));
2150
2151#if RGENGC_OLD_NEWOBJ_CHECK > 0
2152 {
2153 static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2154
2155 if (!is_incremental_marking(objspace) &&
2156 flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
2157 ! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
2158 if (--newobj_cnt == 0) {
2159 newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2160
2161 gc_mark_set(objspace, obj);
2162 RVALUE_AGE_SET_OLD(objspace, obj);
2163
2165 }
2166 }
2167 }
2168#endif
2169 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
2170 return obj;
2171}
2172
2173static inline VALUE
2174ractor_cached_freeobj(rb_objspace_t *objspace, rb_ractor_t *cr)
2175{
2176 RVALUE *p = cr->newobj_cache.freelist;
2177
2178 if (p) {
2179 VALUE obj = (VALUE)p;
2180 cr->newobj_cache.freelist = p->as.free.next;
2181 asan_unpoison_object(obj, true);
2182 return obj;
2183 }
2184 else {
2185 return Qfalse;
2186 }
2187}
2188
2189static struct heap_page *
2190heap_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap)
2191{
2193
2194 struct heap_page *page;
2195
2196 while (heap->free_pages == NULL) {
2197 heap_prepare(objspace, heap);
2198 }
2199 page = heap->free_pages;
2200 heap->free_pages = page->free_next;
2201
2202 GC_ASSERT(page->free_slots != 0);
2203 RUBY_DEBUG_LOG("page:%p freelist:%p cnt:%d", page, page->freelist, page->free_slots);
2204
2205 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
2206
2207 return page;
2208}
2209
2210static inline void
2211ractor_cache_slots(rb_objspace_t *objspace, rb_ractor_t *cr)
2212{
2215
2216 struct heap_page *page = heap_next_freepage(objspace, heap_eden);
2217
2218 cr->newobj_cache.using_page = page;
2219 cr->newobj_cache.freelist = page->freelist;
2220 page->free_slots = 0;
2221 page->freelist = NULL;
2222
2223 GC_ASSERT(RB_TYPE_P((VALUE)cr->newobj_cache.freelist, T_NONE));
2224}
2225
2226static inline VALUE
2227newobj_fill(VALUE obj, VALUE v1, VALUE v2, VALUE v3)
2228{
2229 RVALUE *p = (RVALUE *)obj;
2230 p->as.values.v1 = v1;
2231 p->as.values.v2 = v2;
2232 p->as.values.v3 = v3;
2233 return obj;
2234}
2235
2236ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected));
2237
2238static inline VALUE
2239newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected)
2240{
2241 VALUE obj;
2242 unsigned int lev;
2243
2244 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2245 {
2247 if (during_gc) {
2248 dont_gc_on();
2249 during_gc = 0;
2250 rb_bug("object allocation during garbage collection phase");
2251 }
2252
2253 if (ruby_gc_stressful) {
2254 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2255 rb_memerror();
2256 }
2257 }
2258 }
2259
2260 // allocate new slot
2261 while ((obj = ractor_cached_freeobj(objspace, cr)) == Qfalse) {
2262 ractor_cache_slots(objspace, cr);
2263 }
2264 GC_ASSERT(obj != 0);
2265 newobj_init(klass, flags, wb_protected, objspace, obj);
2266 gc_event_hook_prep(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj, newobj_fill(obj, 0, 0, 0));
2267 }
2268 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2269
2270 return obj;
2271}
2272
2273NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags,
2274 rb_objspace_t *objspace, rb_ractor_t *cr));
2275NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags,
2276 rb_objspace_t *objspace, rb_ractor_t *cr));
2277
2278static VALUE
2279newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr)
2280{
2281 return newobj_slowpath(klass, flags, objspace, cr, TRUE);
2282}
2283
2284static VALUE
2285newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr)
2286{
2287 return newobj_slowpath(klass, flags, objspace, cr, FALSE);
2288}
2289
2290static inline VALUE
2291newobj_of0(VALUE klass, VALUE flags, int wb_protected, rb_ractor_t *cr)
2292{
2293 VALUE obj;
2294 rb_objspace_t *objspace = &rb_objspace;
2295
2296 RB_DEBUG_COUNTER_INC(obj_newobj);
2297 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2298
2299#if GC_DEBUG_STRESS_TO_CLASS
2301 long i, cnt = RARRAY_LEN(stress_to_class);
2302 for (i = 0; i < cnt; ++i) {
2303 if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
2304 }
2305 }
2306#endif
2307
2308 if ((!UNLIKELY(during_gc ||
2310 gc_event_hook_available_p(objspace)) &&
2311 wb_protected &&
2312 (obj = ractor_cached_freeobj(objspace, cr)) != Qfalse)) {
2313
2314 newobj_init(klass, flags, wb_protected, objspace, obj);
2315 }
2316 else {
2317 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2318
2319 obj = wb_protected ?
2320 newobj_slowpath_wb_protected(klass, flags, objspace, cr) :
2321 newobj_slowpath_wb_unprotected(klass, flags, objspace, cr);
2322 }
2323
2324 return obj;
2325}
2326
2327static inline VALUE
2328newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected)
2329{
2330 VALUE obj = newobj_of0(klass, flags, wb_protected, GET_RACTOR());
2331 return newobj_fill(obj, v1, v2, v3);
2332}
2333
2334static inline VALUE
2335newobj_of_cr(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected)
2336{
2337 VALUE obj = newobj_of0(klass, flags, wb_protected, cr);
2338 return newobj_fill(obj, v1, v2, v3);
2339}
2340
2341VALUE
2343{
2345 return newobj_of(klass, flags, 0, 0, 0, FALSE);
2346}
2347
2348VALUE
2350{
2352 return newobj_of(klass, flags, 0, 0, 0, TRUE);
2353}
2354
2355VALUE
2357{
2359 return newobj_of_cr(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE);
2360}
2361
2362/* for compatibility */
2363
2364VALUE
2366{
2367 return newobj_of(0, T_NONE, 0, 0, 0, FALSE);
2368}
2369
2370VALUE
2372{
2373 if ((flags & RUBY_T_MASK) == T_OBJECT) {
2374 return newobj_of(klass, (flags | ROBJECT_EMBED) & ~FL_WB_PROTECTED , Qundef, Qundef, Qundef, flags & FL_WB_PROTECTED);
2375 } else {
2376 return newobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED);
2377 }
2378}
2379
2380#define UNEXPECTED_NODE(func) \
2381 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2382 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2383
2384const char *
2386{
2387 // put no default case to get a warning if an imemo type is missing
2388 switch (type) {
2389#define IMEMO_NAME(x) case imemo_##x: return #x;
2390 IMEMO_NAME(env);
2391 IMEMO_NAME(cref);
2392 IMEMO_NAME(svar);
2393 IMEMO_NAME(throw_data);
2394 IMEMO_NAME(ifunc);
2395 IMEMO_NAME(memo);
2396 IMEMO_NAME(ment);
2397 IMEMO_NAME(iseq);
2398 IMEMO_NAME(tmpbuf);
2399 IMEMO_NAME(ast);
2400 IMEMO_NAME(parser_strterm);
2401 IMEMO_NAME(callinfo);
2402 IMEMO_NAME(callcache);
2403 IMEMO_NAME(constcache);
2404#undef IMEMO_NAME
2405 }
2406 return "unknown";
2407}
2408
2409#undef rb_imemo_new
2410
2411VALUE
2413{
2414 VALUE flags = T_IMEMO | (type << FL_USHIFT);
2415 return newobj_of(v0, flags, v1, v2, v3, TRUE);
2416}
2417
2418static VALUE
2419rb_imemo_tmpbuf_new(VALUE v1, VALUE v2, VALUE v3, VALUE v0)
2420{
2422 return newobj_of(v0, flags, v1, v2, v3, FALSE);
2423}
2424
2425static VALUE
2426rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void *buf, size_t cnt)
2427{
2428 return rb_imemo_tmpbuf_new((VALUE)buf, 0, (VALUE)cnt, 0);
2429}
2430
2433{
2434 return (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new((VALUE)buf, (VALUE)old_heap, (VALUE)cnt, 0);
2435}
2436
2437static size_t
2438imemo_memsize(VALUE obj)
2439{
2440 size_t size = 0;
2441 switch (imemo_type(obj)) {
2442 case imemo_ment:
2443 size += sizeof(RANY(obj)->as.imemo.ment.def);
2444 break;
2445 case imemo_iseq:
2446 size += rb_iseq_memsize((rb_iseq_t *)obj);
2447 break;
2448 case imemo_env:
2449 size += RANY(obj)->as.imemo.env.env_size * sizeof(VALUE);
2450 break;
2451 case imemo_tmpbuf:
2452 size += RANY(obj)->as.imemo.alloc.cnt * sizeof(VALUE);
2453 break;
2454 case imemo_ast:
2455 size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
2456 break;
2457 case imemo_cref:
2458 case imemo_svar:
2459 case imemo_throw_data:
2460 case imemo_ifunc:
2461 case imemo_memo:
2463 break;
2464 default:
2465 /* unreachable */
2466 break;
2467 }
2468 return size;
2469}
2470
2471#if IMEMO_DEBUG
2472VALUE
2473rb_imemo_new_debug(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0, const char *file, int line)
2474{
2475 VALUE memo = rb_imemo_new(type, v1, v2, v3, v0);
2476 fprintf(stderr, "memo %p (type: %d) @ %s:%d\n", (void *)memo, imemo_type(memo), file, line);
2477 return memo;
2478}
2479#endif
2480
2481VALUE
2483{
2485 return newobj_of(klass, flags, Qundef, Qundef, Qundef, RGENGC_WB_PROTECTED_OBJECT);
2486}
2487
2488VALUE
2490{
2492 if (klass) Check_Type(klass, T_CLASS);
2493 return newobj_of(klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, FALSE);
2494}
2495
2496VALUE
2498{
2499 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
2500 DATA_PTR(obj) = xcalloc(1, size);
2501 return obj;
2502}
2503
2504VALUE
2506{
2508 if (klass) Check_Type(klass, T_CLASS);
2509 return newobj_of(klass, T_DATA, (VALUE)type, (VALUE)1, (VALUE)datap, type->flags & RUBY_FL_WB_PROTECTED);
2510}
2511
2512VALUE
2514{
2515 VALUE obj = rb_data_typed_object_wrap(klass, 0, type);
2516 DATA_PTR(obj) = xcalloc(1, size);
2517 return obj;
2518}
2519
2520size_t
2522{
2523 if (RTYPEDDATA_P(obj)) {
2524 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
2525 const void *ptr = RTYPEDDATA_DATA(obj);
2526 if (ptr && type->function.dsize) {
2527 return type->function.dsize(ptr);
2528 }
2529 }
2530 return 0;
2531}
2532
2533const char *
2535{
2536 if (RTYPEDDATA_P(obj)) {
2537 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
2538 }
2539 else {
2540 return 0;
2541 }
2542}
2543
2544PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr);)
2545static inline int
2546is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
2547{
2548 register RVALUE *p = RANY(ptr);
2549 register struct heap_page *page;
2550 register size_t hi, lo, mid;
2551
2552 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
2553
2554 if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
2555 RB_DEBUG_COUNTER_INC(gc_isptr_range);
2556
2557 if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
2558 RB_DEBUG_COUNTER_INC(gc_isptr_align);
2559
2560 /* check if p looks like a pointer using bsearch*/
2561 lo = 0;
2563 while (lo < hi) {
2564 mid = (lo + hi) / 2;
2565 page = heap_pages_sorted[mid];
2566 if (page->start <= p) {
2567 if (p < page->start + page->total_slots) {
2568 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
2569
2570 if (page->flags.in_tomb) {
2571 return FALSE;
2572 }
2573 else {
2574 return TRUE;
2575 }
2576 }
2577 lo = mid + 1;
2578 }
2579 else {
2580 hi = mid;
2581 }
2582 }
2583 return FALSE;
2584}
2585
2587free_const_entry_i(VALUE value, void *data)
2588{
2589 rb_const_entry_t *ce = (rb_const_entry_t *)value;
2590 xfree(ce);
2591 return ID_TABLE_CONTINUE;
2592}
2593
2594void
2596{
2597 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
2598 rb_id_table_free(tbl);
2599}
2600
2601static int
2602free_iv_index_tbl_free_i(st_data_t key, st_data_t value, st_data_t data)
2603{
2604 xfree((void *)value);
2605 return ST_CONTINUE;
2606}
2607
2608static void
2609iv_index_tbl_free(struct st_table *tbl)
2610{
2611 st_foreach(tbl, free_iv_index_tbl_free_i, 0);
2612}
2613
2614// alive: if false, target pointers can be freed already.
2615// To check it, we need objspace parameter.
2616static void
2617vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, rb_objspace_t *objspace, VALUE klass)
2618{
2619 if (ccs->entries) {
2620 for (int i=0; i<ccs->len; i++) {
2621 const struct rb_callcache *cc = ccs->entries[i].cc;
2622 if (!alive) {
2623 void *ptr = asan_poisoned_object_p((VALUE)cc);
2624 asan_unpoison_object((VALUE)cc, false);
2625 // ccs can be free'ed.
2626 if (is_pointer_to_heap(objspace, (void *)cc) &&
2628 cc->klass == klass) {
2629 // OK. maybe target cc.
2630 }
2631 else {
2632 if (ptr) {
2633 asan_poison_object((VALUE)cc);
2634 }
2635 continue;
2636 }
2637 if (ptr) {
2638 asan_poison_object((VALUE)cc);
2639 }
2640 }
2641 vm_cc_invalidate(cc);
2642 }
2643 ruby_xfree(ccs->entries);
2644 }
2645 ruby_xfree(ccs);
2646}
2647
2648void
2650{
2651 RB_DEBUG_COUNTER_INC(ccs_free);
2652 vm_ccs_free(ccs, TRUE, NULL, Qundef);
2653}
2654
2658 bool alive;
2659};
2660
2662cc_table_mark_i(ID id, VALUE ccs_ptr, void *data_ptr)
2663{
2664 struct cc_tbl_i_data *data = data_ptr;
2665 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
2666 VM_ASSERT(vm_ccs_p(ccs));
2667 VM_ASSERT(id == ccs->cme->called_id);
2668
2669 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2670 rb_vm_ccs_free(ccs);
2671 return ID_TABLE_DELETE;
2672 }
2673 else {
2674 gc_mark(data->objspace, (VALUE)ccs->cme);
2675
2676 for (int i=0; i<ccs->len; i++) {
2677 VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
2678 VM_ASSERT(ccs->cme == vm_cc_cme(ccs->entries[i].cc));
2679
2680 gc_mark(data->objspace, (VALUE)ccs->entries[i].ci);
2681 gc_mark(data->objspace, (VALUE)ccs->entries[i].cc);
2682 }
2683 return ID_TABLE_CONTINUE;
2684 }
2685}
2686
2687static void
2688cc_table_mark(rb_objspace_t *objspace, VALUE klass)
2689{
2690 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2691 if (cc_tbl) {
2692 struct cc_tbl_i_data data = {
2693 .objspace = objspace,
2694 .klass = klass,
2695 };
2696 rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
2697 }
2698}
2699
2701cc_table_free_i(VALUE ccs_ptr, void *data_ptr)
2702{
2703 struct cc_tbl_i_data *data = data_ptr;
2704 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
2705 VM_ASSERT(vm_ccs_p(ccs));
2706 vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
2707 return ID_TABLE_CONTINUE;
2708}
2709
2710static void
2711cc_table_free(rb_objspace_t *objspace, VALUE klass, bool alive)
2712{
2713 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2714
2715 if (cc_tbl) {
2716 struct cc_tbl_i_data data = {
2717 .objspace = objspace,
2718 .klass = klass,
2719 .alive = alive,
2720 };
2721 rb_id_table_foreach_values(cc_tbl, cc_table_free_i, &data);
2722 rb_id_table_free(cc_tbl);
2723 }
2724}
2725
2726void
2728{
2729 cc_table_free(&rb_objspace, klass, TRUE);
2730}
2731
2732static inline void
2733make_zombie(rb_objspace_t *objspace, VALUE obj, void (*dfree)(void *), void *data)
2734{
2735 struct RZombie *zombie = RZOMBIE(obj);
2736 zombie->basic.flags = T_ZOMBIE | (zombie->basic.flags & FL_SEEN_OBJ_ID);
2737 zombie->dfree = dfree;
2738 zombie->data = data;
2741
2742 struct heap_page *page = GET_HEAP_PAGE(obj);
2743 page->final_slots++;
2745}
2746
2747static inline void
2748make_io_zombie(rb_objspace_t *objspace, VALUE obj)
2749{
2750 rb_io_t *fptr = RANY(obj)->as.file.fptr;
2751 make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
2752}
2753
2754static void
2755obj_free_object_id(rb_objspace_t *objspace, VALUE obj)
2756{
2758 st_data_t o = (st_data_t)obj, id;
2759
2762
2763 if (st_delete(objspace->obj_to_id_tbl, &o, &id)) {
2764 GC_ASSERT(id);
2765 st_delete(objspace->id_to_obj_tbl, &id, NULL);
2766 }
2767 else {
2768 rb_bug("Object ID seen, but not in mapping table: %s\n", obj_info(obj));
2769 }
2770}
2771
2772static int
2773obj_free(rb_objspace_t *objspace, VALUE obj)
2774{
2775 RB_DEBUG_COUNTER_INC(obj_free);
2776 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
2777
2779
2780 switch (BUILTIN_TYPE(obj)) {
2781 case T_NIL:
2782 case T_FIXNUM:
2783 case T_TRUE:
2784 case T_FALSE:
2785 rb_bug("obj_free() called for broken object");
2786 break;
2787 default:
2788 break;
2789 }
2790
2791 if (FL_TEST(obj, FL_EXIVAR)) {
2793 FL_UNSET(obj, FL_EXIVAR);
2794 }
2795
2796 if (FL_TEST(obj, FL_SEEN_OBJ_ID) && !FL_TEST(obj, FL_FINALIZE)) {
2797 obj_free_object_id(objspace, obj);
2798 }
2799
2800 if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2801
2802#if RGENGC_CHECK_MODE
2803#define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
2804 CHECK(RVALUE_WB_UNPROTECTED);
2805 CHECK(RVALUE_MARKED);
2806 CHECK(RVALUE_MARKING);
2807 CHECK(RVALUE_UNCOLLECTIBLE);
2808#undef CHECK
2809#endif
2810
2811 switch (BUILTIN_TYPE(obj)) {
2812 case T_OBJECT:
2813 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
2814 RB_DEBUG_COUNTER_INC(obj_obj_embed);
2815 }
2816 else if (ROBJ_TRANSIENT_P(obj)) {
2817 RB_DEBUG_COUNTER_INC(obj_obj_transient);
2818 }
2819 else {
2820 xfree(RANY(obj)->as.object.as.heap.ivptr);
2821 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
2822 }
2823 break;
2824 case T_MODULE:
2825 case T_CLASS:
2826 mjit_remove_class_serial(RCLASS_SERIAL(obj));
2828 cc_table_free(objspace, obj, FALSE);
2829 if (RCLASS_IV_TBL(obj)) {
2831 }
2832 if (RCLASS_CONST_TBL(obj)) {
2834 }
2835 if (RCLASS_IV_INDEX_TBL(obj)) {
2836 iv_index_tbl_free(RCLASS_IV_INDEX_TBL(obj));
2837 }
2838 if (RCLASS_EXT(obj)->subclasses) {
2839 if (BUILTIN_TYPE(obj) == T_MODULE) {
2841 }
2842 else {
2844 }
2845 RCLASS_EXT(obj)->subclasses = NULL;
2846 }
2849 if (RANY(obj)->as.klass.ptr)
2850 xfree(RANY(obj)->as.klass.ptr);
2851 RANY(obj)->as.klass.ptr = NULL;
2852
2853 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
2854 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
2855 break;
2856 case T_STRING:
2857 rb_str_free(obj);
2858 break;
2859 case T_ARRAY:
2860 rb_ary_free(obj);
2861 break;
2862 case T_HASH:
2863#if USE_DEBUG_COUNTER
2864 switch (RHASH_SIZE(obj)) {
2865 case 0:
2866 RB_DEBUG_COUNTER_INC(obj_hash_empty);
2867 break;
2868 case 1:
2869 RB_DEBUG_COUNTER_INC(obj_hash_1);
2870 break;
2871 case 2:
2872 RB_DEBUG_COUNTER_INC(obj_hash_2);
2873 break;
2874 case 3:
2875 RB_DEBUG_COUNTER_INC(obj_hash_3);
2876 break;
2877 case 4:
2878 RB_DEBUG_COUNTER_INC(obj_hash_4);
2879 break;
2880 case 5:
2881 case 6:
2882 case 7:
2883 case 8:
2884 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
2885 break;
2886 default:
2887 GC_ASSERT(RHASH_SIZE(obj) > 8);
2888 RB_DEBUG_COUNTER_INC(obj_hash_g8);
2889 }
2890
2891 if (RHASH_AR_TABLE_P(obj)) {
2892 if (RHASH_AR_TABLE(obj) == NULL) {
2893 RB_DEBUG_COUNTER_INC(obj_hash_null);
2894 }
2895 else {
2896 RB_DEBUG_COUNTER_INC(obj_hash_ar);
2897 }
2898 }
2899 else {
2900 RB_DEBUG_COUNTER_INC(obj_hash_st);
2901 }
2902#endif
2903 if (/* RHASH_AR_TABLE_P(obj) */ !FL_TEST_RAW(obj, RHASH_ST_TABLE_FLAG)) {
2904 struct ar_table_struct *tab = RHASH(obj)->as.ar;
2905
2906 if (tab) {
2907 if (RHASH_TRANSIENT_P(obj)) {
2908 RB_DEBUG_COUNTER_INC(obj_hash_transient);
2909 }
2910 else {
2911 ruby_xfree(tab);
2912 }
2913 }
2914 }
2915 else {
2916 GC_ASSERT(RHASH_ST_TABLE_P(obj));
2917 st_free_table(RHASH(obj)->as.st);
2918 }
2919 break;
2920 case T_REGEXP:
2921 if (RANY(obj)->as.regexp.ptr) {
2922 onig_free(RANY(obj)->as.regexp.ptr);
2923 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
2924 }
2925 break;
2926 case T_DATA:
2927 if (DATA_PTR(obj)) {
2928 int free_immediately = FALSE;
2929 void (*dfree)(void *);
2930 void *data = DATA_PTR(obj);
2931
2932 if (RTYPEDDATA_P(obj)) {
2933 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
2934 dfree = RANY(obj)->as.typeddata.type->function.dfree;
2935 if (0 && free_immediately == 0) {
2936 /* to expose non-free-immediate T_DATA */
2937 fprintf(stderr, "not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
2938 }
2939 }
2940 else {
2941 dfree = RANY(obj)->as.data.dfree;
2942 }
2943
2944 if (dfree) {
2945 if (dfree == RUBY_DEFAULT_FREE) {
2946 xfree(data);
2947 RB_DEBUG_COUNTER_INC(obj_data_xfree);
2948 }
2949 else if (free_immediately) {
2950 (*dfree)(data);
2951 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
2952 }
2953 else {
2954 make_zombie(objspace, obj, dfree, data);
2955 RB_DEBUG_COUNTER_INC(obj_data_zombie);
2956 return 1;
2957 }
2958 }
2959 else {
2960 RB_DEBUG_COUNTER_INC(obj_data_empty);
2961 }
2962 }
2963 break;
2964 case T_MATCH:
2965 if (RANY(obj)->as.match.rmatch) {
2966 struct rmatch *rm = RANY(obj)->as.match.rmatch;
2967#if USE_DEBUG_COUNTER
2968 if (rm->regs.num_regs >= 8) {
2969 RB_DEBUG_COUNTER_INC(obj_match_ge8);
2970 }
2971 else if (rm->regs.num_regs >= 4) {
2972 RB_DEBUG_COUNTER_INC(obj_match_ge4);
2973 }
2974 else if (rm->regs.num_regs >= 1) {
2975 RB_DEBUG_COUNTER_INC(obj_match_under4);
2976 }
2977#endif
2978 onig_region_free(&rm->regs, 0);
2979 if (rm->char_offset)
2980 xfree(rm->char_offset);
2981 xfree(rm);
2982
2983 RB_DEBUG_COUNTER_INC(obj_match_ptr);
2984 }
2985 break;
2986 case T_FILE:
2987 if (RANY(obj)->as.file.fptr) {
2988 make_io_zombie(objspace, obj);
2989 RB_DEBUG_COUNTER_INC(obj_file_ptr);
2990 return 1;
2991 }
2992 break;
2993 case T_RATIONAL:
2994 RB_DEBUG_COUNTER_INC(obj_rational);
2995 break;
2996 case T_COMPLEX:
2997 RB_DEBUG_COUNTER_INC(obj_complex);
2998 break;
2999 case T_MOVED:
3000 break;
3001 case T_ICLASS:
3002 /* Basically , T_ICLASS shares table with the module */
3003 if (RICLASS_OWNS_M_TBL_P(obj)) {
3004 /* Method table is not shared for origin iclasses of classes */
3006 }
3007 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
3009 }
3010 if (RCLASS_EXT(obj)->subclasses) {
3012 RCLASS_EXT(obj)->subclasses = NULL;
3013 }
3014 cc_table_free(objspace, obj, FALSE);
3017 xfree(RANY(obj)->as.klass.ptr);
3018 RANY(obj)->as.klass.ptr = NULL;
3019
3020 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
3021 break;
3022
3023 case T_FLOAT:
3024 RB_DEBUG_COUNTER_INC(obj_float);
3025 break;
3026
3027 case T_BIGNUM:
3028 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
3029 xfree(BIGNUM_DIGITS(obj));
3030 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
3031 }
3032 else {
3033 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
3034 }
3035 break;
3036
3037 case T_NODE:
3038 UNEXPECTED_NODE(obj_free);
3039 break;
3040
3041 case T_STRUCT:
3042 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
3043 RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
3044 RB_DEBUG_COUNTER_INC(obj_struct_embed);
3045 }
3046 else if (RSTRUCT_TRANSIENT_P(obj)) {
3047 RB_DEBUG_COUNTER_INC(obj_struct_transient);
3048 }
3049 else {
3050 xfree((void *)RANY(obj)->as.rstruct.as.heap.ptr);
3051 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
3052 }
3053 break;
3054
3055 case T_SYMBOL:
3056 {
3057 rb_gc_free_dsymbol(obj);
3058 RB_DEBUG_COUNTER_INC(obj_symbol);
3059 }
3060 break;
3061
3062 case T_IMEMO:
3063 switch (imemo_type(obj)) {
3064 case imemo_ment:
3065 rb_free_method_entry(&RANY(obj)->as.imemo.ment);
3066 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
3067 break;
3068 case imemo_iseq:
3069 rb_iseq_free(&RANY(obj)->as.imemo.iseq);
3070 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
3071 break;
3072 case imemo_env:
3073 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
3074 xfree((VALUE *)RANY(obj)->as.imemo.env.env);
3075 RB_DEBUG_COUNTER_INC(obj_imemo_env);
3076 break;
3077 case imemo_tmpbuf:
3078 xfree(RANY(obj)->as.imemo.alloc.ptr);
3079 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
3080 break;
3081 case imemo_ast:
3082 rb_ast_free(&RANY(obj)->as.imemo.ast);
3083 RB_DEBUG_COUNTER_INC(obj_imemo_ast);
3084 break;
3085 case imemo_cref:
3086 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
3087 break;
3088 case imemo_svar:
3089 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
3090 break;
3091 case imemo_throw_data:
3092 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
3093 break;
3094 case imemo_ifunc:
3095 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
3096 break;
3097 case imemo_memo:
3098 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
3099 break;
3101 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
3102 break;
3103 case imemo_callinfo:
3104 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
3105 break;
3106 case imemo_callcache:
3107 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
3108 break;
3109 case imemo_constcache:
3110 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
3111 break;
3112 }
3113 return 0;
3114
3115 default:
3116 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
3117 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
3118 }
3119
3120 if (FL_TEST(obj, FL_FINALIZE)) {
3121 make_zombie(objspace, obj, 0, 0);
3122 return 1;
3123 }
3124 else {
3125 return 0;
3126 }
3127}
3128
3129
3130#define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3131#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3132
3133static int
3134object_id_cmp(st_data_t x, st_data_t y)
3135{
3136 if (RB_TYPE_P(x, T_BIGNUM)) {
3137 return !rb_big_eql(x, y);
3138 } else {
3139 return x != y;
3140 }
3141}
3142
3143static st_index_t
3144object_id_hash(st_data_t n)
3145{
3146 if (RB_TYPE_P(n, T_BIGNUM)) {
3147 return FIX2LONG(rb_big_hash(n));
3148 } else {
3149 return st_numhash(n);
3150 }
3151}
3152static const struct st_hash_type object_id_hash_type = {
3153 object_id_cmp,
3154 object_id_hash,
3155};
3156
3157void
3159{
3160 rb_objspace_t *objspace = &rb_objspace;
3161
3162#if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
3163 /* If Ruby's heap pages are not a multiple of the system page size, we
3164 * cannot use mprotect for the read barrier, so we must disable automatic
3165 * compaction. */
3166 int pagesize;
3167 pagesize = (int)sysconf(_SC_PAGE_SIZE);
3168 if ((HEAP_PAGE_SIZE % pagesize) != 0) {
3170 }
3171#endif
3172
3174 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
3175 objspace->obj_to_id_tbl = st_init_numtable();
3176
3177#if RGENGC_ESTIMATE_OLDMALLOC
3178 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
3179#endif
3180
3181 heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
3182 init_mark_stack(&objspace->mark_stack);
3183
3184 objspace->profile.invoke_time = getrusage_time();
3186}
3187
3188void
3190{
3191 rb_objspace_t *objspace = &rb_objspace;
3192
3193 gc_stress_set(objspace, ruby_initial_gc_stress);
3194}
3195
3196typedef int each_obj_callback(void *, void *, size_t, void *);
3197
3198static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data);
3199static void objspace_reachable_objects_from_root(rb_objspace_t *, void (func)(const char *, VALUE, void *), void *);
3200
3204 void *data;
3205};
3206
3207static void
3208objspace_each_objects_without_setup(rb_objspace_t *objspace, each_obj_callback *callback, void *data)
3209{
3210 size_t i;
3211 struct heap_page *page;
3212 RVALUE *pstart = NULL, *pend;
3213
3214 i = 0;
3215 while (i < heap_allocated_pages) {
3216 while (0 < i && pstart < heap_pages_sorted[i-1]->start) i--;
3217 while (i < heap_allocated_pages && heap_pages_sorted[i]->start <= pstart) i++;
3218 if (heap_allocated_pages <= i) break;
3219
3220 page = heap_pages_sorted[i];
3221
3222 pstart = page->start;
3223 pend = pstart + page->total_slots;
3224
3225 if ((*callback)(pstart, pend, sizeof(RVALUE), data)) {
3226 break;
3227 }
3228 }
3229}
3230
3231static VALUE
3232objspace_each_objects_protected(VALUE arg)
3233{
3234 struct each_obj_args *args = (struct each_obj_args *)arg;
3235 objspace_each_objects_without_setup(args->objspace, args->callback, args->data);
3236 return Qnil;
3237}
3238
3239static VALUE
3240incremental_enable(VALUE _)
3241{
3243
3245 return Qnil;
3246}
3247
3248/*
3249 * rb_objspace_each_objects() is special C API to walk through
3250 * Ruby object space. This C API is too difficult to use it.
3251 * To be frank, you should not use it. Or you need to read the
3252 * source code of this function and understand what this function does.
3253 *
3254 * 'callback' will be called several times (the number of heap page,
3255 * at current implementation) with:
3256 * vstart: a pointer to the first living object of the heap_page.
3257 * vend: a pointer to next to the valid heap_page area.
3258 * stride: a distance to next VALUE.
3259 *
3260 * If callback() returns non-zero, the iteration will be stopped.
3261 *
3262 * This is a sample callback code to iterate liveness objects:
3263 *
3264 * int
3265 * sample_callback(void *vstart, void *vend, int stride, void *data) {
3266 * VALUE v = (VALUE)vstart;
3267 * for (; v != (VALUE)vend; v += stride) {
3268 * if (RBASIC(v)->flags) { // liveness check
3269 * // do something with live object 'v'
3270 * }
3271 * return 0; // continue to iteration
3272 * }
3273 *
3274 * Note: 'vstart' is not a top of heap_page. This point the first
3275 * living object to grasp at least one object to avoid GC issue.
3276 * This means that you can not walk through all Ruby object page
3277 * including freed object page.
3278 *
3279 * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
3280 * However, there are possibilities to pass variable values with
3281 * 'stride' with some reasons. You must use stride instead of
3282 * use some constant value in the iteration.
3283 */
3284void
3286{
3287 objspace_each_objects(&rb_objspace, callback, data);
3288}
3289
3290static void
3291objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data)
3292{
3293 int prev_dont_incremental = objspace->flags.dont_incremental;
3294
3295 gc_rest(objspace);
3297
3298 if (prev_dont_incremental) {
3299 objspace_each_objects_without_setup(objspace, callback, data);
3300 }
3301 else {
3302 struct each_obj_args args = {objspace, callback, data};
3303 rb_ensure(objspace_each_objects_protected, (VALUE)&args, incremental_enable, Qnil);
3304 }
3305}
3306
3307void
3309{
3310 objspace_each_objects_without_setup(&rb_objspace, callback, data);
3311}
3312
3314 size_t num;
3316};
3317
3318static int
3319internal_object_p(VALUE obj)
3320{
3321 RVALUE *p = (RVALUE *)obj;
3322 void *ptr = __asan_region_is_poisoned(p, SIZEOF_VALUE);
3323 asan_unpoison_object(obj, false);
3324 bool used_p = p->as.basic.flags;
3325
3326 if (used_p) {
3327 switch (BUILTIN_TYPE(obj)) {
3328 case T_NODE:
3329 UNEXPECTED_NODE(internal_object_p);
3330 break;
3331 case T_NONE:
3332 case T_MOVED:
3333 case T_IMEMO:
3334 case T_ICLASS:
3335 case T_ZOMBIE:
3336 break;
3337 case T_CLASS:
3338 if (!p->as.basic.klass) break;
3339 if (FL_TEST(obj, FL_SINGLETON)) {
3341 }
3342 return 0;
3343 default:
3344 if (!p->as.basic.klass) break;
3345 return 0;
3346 }
3347 }
3348 if (ptr || ! used_p) {
3349 asan_poison_object(obj);
3350 }
3351 return 1;
3352}
3353
3354int
3356{
3357 return internal_object_p(obj);
3358}
3359
3360static int
3361os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
3362{
3363 struct os_each_struct *oes = (struct os_each_struct *)data;
3364 RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
3365
3366 for (; p != pend; p++) {
3367 volatile VALUE v = (VALUE)p;
3368 if (!internal_object_p(v)) {
3369 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
3370 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
3371 rb_yield(v);
3372 oes->num++;
3373 }
3374 }
3375 }
3376 }
3377
3378 return 0;
3379}
3380
3381static VALUE
3382os_obj_of(VALUE of)
3383{
3384 struct os_each_struct oes;
3385
3386 oes.num = 0;
3387 oes.of = of;
3388 rb_objspace_each_objects(os_obj_of_i, &oes);
3389 return SIZET2NUM(oes.num);
3390}
3391
3392/*
3393 * call-seq:
3394 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
3395 * ObjectSpace.each_object([module]) -> an_enumerator
3396 *
3397 * Calls the block once for each living, nonimmediate object in this
3398 * Ruby process. If <i>module</i> is specified, calls the block
3399 * for only those classes or modules that match (or are a subclass of)
3400 * <i>module</i>. Returns the number of objects found. Immediate
3401 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
3402 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
3403 * never returned. In the example below, #each_object returns both
3404 * the numbers we defined and several constants defined in the Math
3405 * module.
3406 *
3407 * If no block is given, an enumerator is returned instead.
3408 *
3409 * a = 102.7
3410 * b = 95 # Won't be returned
3411 * c = 12345678987654321
3412 * count = ObjectSpace.each_object(Numeric) {|x| p x }
3413 * puts "Total count: #{count}"
3414 *
3415 * <em>produces:</em>
3416 *
3417 * 12345678987654321
3418 * 102.7
3419 * 2.71828182845905
3420 * 3.14159265358979
3421 * 2.22044604925031e-16
3422 * 1.7976931348623157e+308
3423 * 2.2250738585072e-308
3424 * Total count: 7
3425 *
3426 */
3427
3428static VALUE
3429os_each_obj(int argc, VALUE *argv, VALUE os)
3430{
3431 VALUE of;
3432
3433 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
3434 RETURN_ENUMERATOR(os, 1, &of);
3435 return os_obj_of(of);
3436}
3437
3438/*
3439 * call-seq:
3440 * ObjectSpace.undefine_finalizer(obj)
3441 *
3442 * Removes all finalizers for <i>obj</i>.
3443 *
3444 */
3445
3446static VALUE
3447undefine_final(VALUE os, VALUE obj)
3448{
3449 return rb_undefine_finalizer(obj);
3450}
3451
3452VALUE
3454{
3455 rb_objspace_t *objspace = &rb_objspace;
3456 st_data_t data = obj;
3457 rb_check_frozen(obj);
3458 st_delete(finalizer_table, &data, 0);
3459 FL_UNSET(obj, FL_FINALIZE);
3460 return obj;
3461}
3462
3463static void
3464should_be_callable(VALUE block)
3465{
3466 if (!rb_obj_respond_to(block, idCall, TRUE)) {
3467 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
3468 rb_obj_class(block));
3469 }
3470}
3471
3472static void
3473should_be_finalizable(VALUE obj)
3474{
3475 if (!FL_ABLE(obj)) {
3476 rb_raise(rb_eArgError, "cannot define finalizer for %s",
3477 rb_obj_classname(obj));
3478 }
3479 rb_check_frozen(obj);
3480}
3481
3482/*
3483 * call-seq:
3484 * ObjectSpace.define_finalizer(obj, aProc=proc())
3485 *
3486 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
3487 * was destroyed. The object ID of the <i>obj</i> will be passed
3488 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
3489 * method, make sure it can be called with a single argument.
3490 *
3491 * The return value is an array <code>[0, aProc]</code>.
3492 *
3493 * The two recommended patterns are to either create the finaliser proc
3494 * in a non-instance method where it can safely capture the needed state,
3495 * or to use a custom callable object that stores the needed state
3496 * explicitly as instance variables.
3497 *
3498 * class Foo
3499 * def initialize(data_needed_for_finalization)
3500 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
3501 * end
3502 *
3503 * def self.create_finalizer(data_needed_for_finalization)
3504 * proc {
3505 * puts "finalizing #{data_needed_for_finalization}"
3506 * }
3507 * end
3508 * end
3509 *
3510 * class Bar
3511 * class Remover
3512 * def initialize(data_needed_for_finalization)
3513 * @data_needed_for_finalization = data_needed_for_finalization
3514 * end
3515 *
3516 * def call(id)
3517 * puts "finalizing #{@data_needed_for_finalization}"
3518 * end
3519 * end
3520 *
3521 * def initialize(data_needed_for_finalization)
3522 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
3523 * end
3524 * end
3525 *
3526 * Note that if your finalizer references the object to be
3527 * finalized it will never be run on GC, although it will still be
3528 * run at exit. You will get a warning if you capture the object
3529 * to be finalized as the receiver of the finalizer.
3530 *
3531 * class CapturesSelf
3532 * def initialize(name)
3533 * ObjectSpace.define_finalizer(self, proc {
3534 * # this finalizer will only be run on exit
3535 * puts "finalizing #{name}"
3536 * })
3537 * end
3538 * end
3539 *
3540 * Also note that finalization can be unpredictable and is never guaranteed
3541 * to be run except on exit.
3542 */
3543
3544static VALUE
3545define_final(int argc, VALUE *argv, VALUE os)
3546{
3547 VALUE obj, block;
3548
3549 rb_scan_args(argc, argv, "11", &obj, &block);
3550 should_be_finalizable(obj);
3551 if (argc == 1) {
3552 block = rb_block_proc();
3553 }
3554 else {
3555 should_be_callable(block);
3556 }
3557
3558 if (rb_callable_receiver(block) == obj) {
3559 rb_warn("finalizer references object to be finalized");
3560 }
3561
3562 return define_final0(obj, block);
3563}
3564
3565static VALUE
3566define_final0(VALUE obj, VALUE block)
3567{
3568 rb_objspace_t *objspace = &rb_objspace;
3569 VALUE table;
3570 st_data_t data;
3571
3572 RBASIC(obj)->flags |= FL_FINALIZE;
3573
3574 block = rb_ary_new3(2, INT2FIX(0), block);
3575 OBJ_FREEZE(block);
3576
3577 if (st_lookup(finalizer_table, obj, &data)) {
3578 table = (VALUE)data;
3579
3580 /* avoid duplicate block, table is usually small */
3581 {
3582 long len = RARRAY_LEN(table);
3583 long i;
3584
3585 for (i = 0; i < len; i++) {
3586 VALUE recv = RARRAY_AREF(table, i);
3587 if (rb_funcall(recv, idEq, 1, block)) {
3588 return recv;
3589 }
3590 }
3591 }
3592
3593 rb_ary_push(table, block);
3594 }
3595 else {
3596 table = rb_ary_new3(1, block);
3597 RBASIC_CLEAR_CLASS(table);
3598 st_add_direct(finalizer_table, obj, table);
3599 }
3600 return block;
3601}
3602
3603VALUE
3605{
3606 should_be_finalizable(obj);
3607 should_be_callable(block);
3608 return define_final0(obj, block);
3609}
3610
3611void
3613{
3614 rb_objspace_t *objspace = &rb_objspace;
3615 VALUE table;
3616 st_data_t data;
3617
3618 if (!FL_TEST(obj, FL_FINALIZE)) return;
3619 if (st_lookup(finalizer_table, obj, &data)) {
3620 table = (VALUE)data;
3621 st_insert(finalizer_table, dest, table);
3622 }
3623 FL_SET(dest, FL_FINALIZE);
3624}
3625
3626static VALUE
3627run_single_final(VALUE final, VALUE objid)
3628{
3629 const VALUE cmd = RARRAY_AREF(final, 1);
3630 return rb_check_funcall(cmd, idCall, 1, &objid);
3631}
3632
3633static void
3634run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
3635{
3636 long i;
3637 enum ruby_tag_type state;
3638 volatile struct {
3639 VALUE errinfo;
3640 VALUE objid;
3641 rb_control_frame_t *cfp;
3642 long finished;
3643 } saved;
3644 rb_execution_context_t * volatile ec = GET_EC();
3645#define RESTORE_FINALIZER() (\
3646 ec->cfp = saved.cfp, \
3647 rb_set_errinfo(saved.errinfo))
3648
3649 saved.errinfo = rb_errinfo();
3650 saved.objid = rb_obj_id(obj);
3651 saved.cfp = ec->cfp;
3652 saved.finished = 0;
3653
3654 EC_PUSH_TAG(ec);
3655 state = EC_EXEC_TAG();
3656 if (state != TAG_NONE) {
3657 ++saved.finished; /* skip failed finalizer */
3658 }
3659 for (i = saved.finished;
3660 RESTORE_FINALIZER(), i<RARRAY_LEN(table);
3661 saved.finished = ++i) {
3662 run_single_final(RARRAY_AREF(table, i), saved.objid);
3663 }
3664 EC_POP_TAG();
3665#undef RESTORE_FINALIZER
3666}
3667
3668static void
3669run_final(rb_objspace_t *objspace, VALUE zombie)
3670{
3671 st_data_t key, table;
3672
3673 if (RZOMBIE(zombie)->dfree) {
3674 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
3675 }
3676
3677 key = (st_data_t)zombie;
3678 if (st_delete(finalizer_table, &key, &table)) {
3679 run_finalizer(objspace, zombie, (VALUE)table);
3680 }
3681}
3682
3683static void
3684finalize_list(rb_objspace_t *objspace, VALUE zombie)
3685{
3686 while (zombie) {
3687 VALUE next_zombie;
3688 struct heap_page *page;
3689 asan_unpoison_object(zombie, false);
3690 next_zombie = RZOMBIE(zombie)->next;
3691 page = GET_HEAP_PAGE(zombie);
3692
3693 run_final(objspace, zombie);
3694
3696 {
3697 GC_ASSERT(BUILTIN_TYPE(zombie) == T_ZOMBIE);
3698 if (FL_TEST(zombie, FL_SEEN_OBJ_ID)) {
3699 obj_free_object_id(objspace, zombie);
3700 }
3701
3702 RZOMBIE(zombie)->basic.flags = 0;
3704 GC_ASSERT(page->final_slots > 0);
3705
3707 page->final_slots--;
3708 page->free_slots++;
3709 heap_page_add_freeobj(objspace, GET_HEAP_PAGE(zombie), zombie);
3710 objspace->profile.total_freed_objects++;
3711 }
3713
3714 zombie = next_zombie;
3715 }
3716}
3717
3718static void
3719finalize_deferred(rb_objspace_t *objspace)
3720{
3721 VALUE zombie;
3722
3723 while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
3724 finalize_list(objspace, zombie);
3725 }
3726}
3727
3728static void
3729gc_finalize_deferred(void *dmy)
3730{
3731 rb_objspace_t *objspace = dmy;
3732 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
3733
3735 {
3736 finalize_deferred(objspace);
3738 }
3740}
3741
3742static void
3743gc_finalize_deferred_register(rb_objspace_t *objspace)
3744{
3745 if (rb_postponed_job_register_one(0, gc_finalize_deferred, objspace) == 0) {
3746 rb_bug("gc_finalize_deferred_register: can't register finalizer.");
3747 }
3748}
3749
3754};
3755
3756static int
3757force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
3758{
3759 struct force_finalize_list **prev = (struct force_finalize_list **)arg;
3760 struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
3761 curr->obj = key;
3762 curr->table = val;
3763 curr->next = *prev;
3764 *prev = curr;
3765 return ST_CONTINUE;
3766}
3767
3769
3770void
3772{
3773 RVALUE *p, *pend;
3774 size_t i;
3775
3776#if RGENGC_CHECK_MODE >= 2
3777 gc_verify_internal_consistency(objspace);
3778#endif
3779 gc_rest(objspace);
3780
3781 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
3782
3783 /* run finalizers */
3784 finalize_deferred(objspace);
3786
3787 gc_rest(objspace);
3788 /* prohibit incremental GC */
3789 objspace->flags.dont_incremental = 1;
3790
3791 /* force to run finalizer */
3792 while (finalizer_table->num_entries) {
3793 struct force_finalize_list *list = 0;
3794 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
3795 while (list) {
3796 struct force_finalize_list *curr = list;
3797 st_data_t obj = (st_data_t)curr->obj;
3798 run_finalizer(objspace, curr->obj, curr->table);
3800 list = curr->next;
3801 xfree(curr);
3802 }
3803 }
3804
3805 /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
3806 dont_gc_on();
3807
3808 /* running data/file finalizers are part of garbage collection */
3809 unsigned int lock_lev;
3810 gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
3811
3812 /* run data/file object's finalizers */
3813 for (i = 0; i < heap_allocated_pages; i++) {
3814 p = heap_pages_sorted[i]->start; pend = p + heap_pages_sorted[i]->total_slots;
3815 while (p < pend) {
3816 VALUE vp = (VALUE)p;
3817 void *poisoned = asan_poisoned_object_p(vp);
3818 asan_unpoison_object(vp, false);
3819 switch (BUILTIN_TYPE(vp)) {
3820 case T_DATA:
3821 if (!DATA_PTR(p) || !RANY(p)->as.data.dfree) break;
3822 if (rb_obj_is_thread(vp)) break;
3823 if (rb_obj_is_mutex(vp)) break;
3824 if (rb_obj_is_fiber(vp)) break;
3825 if (rb_obj_is_main_ractor(vp)) break;
3826 if (RTYPEDDATA_P(vp)) {
3827 RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
3828 }
3829 p->as.free.flags = 0;
3830 if (RANY(p)->as.data.dfree == RUBY_DEFAULT_FREE) {
3831 xfree(DATA_PTR(p));
3832 }
3833 else if (RANY(p)->as.data.dfree) {
3834 make_zombie(objspace, vp, RANY(p)->as.data.dfree, RANY(p)->as.data.data);
3835 }
3836 break;
3837 case T_FILE:
3838 if (RANY(p)->as.file.fptr) {
3839 make_io_zombie(objspace, vp);
3840 }
3841 break;
3842 default:
3843 break;
3844 }
3845 if (poisoned) {
3847 asan_poison_object(vp);
3848 }
3849 p++;
3850 }
3851 }
3852
3853 gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
3854
3856 finalize_list(objspace, heap_pages_deferred_final);
3857 }
3858
3860 finalizer_table = 0;
3862}
3863
3864static inline int
3865heap_is_swept_object(rb_objspace_t *objspace, rb_heap_t *heap, VALUE ptr)
3866{
3867 struct heap_page *page = GET_HEAP_PAGE(ptr);
3868 return page->flags.before_sweep ? FALSE : TRUE;
3869}
3870
3871static inline int
3872is_swept_object(rb_objspace_t *objspace, VALUE ptr)
3873{
3874 if (heap_is_swept_object(objspace, heap_eden, ptr)) {
3875 return TRUE;
3876 }
3877 else {
3878 return FALSE;
3879 }
3880}
3881
3882/* garbage objects will be collected soon. */
3883static inline int
3884is_garbage_object(rb_objspace_t *objspace, VALUE ptr)
3885{
3887 is_swept_object(objspace, ptr) ||
3889
3890 return FALSE;
3891 }
3892 else {
3893 return TRUE;
3894 }
3895}
3896
3897static inline int
3898is_live_object(rb_objspace_t *objspace, VALUE ptr)
3899{
3900 switch (BUILTIN_TYPE(ptr)) {
3901 case T_NONE:
3902 case T_MOVED:
3903 case T_ZOMBIE:
3904 return FALSE;
3905 default:
3906 break;
3907 }
3908
3909 if (!is_garbage_object(objspace, ptr)) {
3910 return TRUE;
3911 }
3912 else {
3913 return FALSE;
3914 }
3915}
3916
3917static inline int
3918is_markable_object(rb_objspace_t *objspace, VALUE obj)
3919{
3920 if (rb_special_const_p(obj)) return FALSE; /* special const is not markable */
3921 check_rvalue_consistency(obj);
3922 return TRUE;
3923}
3924
3925int
3927{
3928 rb_objspace_t *objspace = &rb_objspace;
3929 return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
3930}
3931
3932int
3934{
3935 rb_objspace_t *objspace = &rb_objspace;
3936 return is_garbage_object(objspace, obj);
3937}
3938
3939static VALUE
3940id2ref_obj_tbl(rb_objspace_t *objspace, VALUE objid)
3941{
3942 VALUE orig;
3943 if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
3944 return orig;
3945 }
3946 else {
3947 return Qundef;
3948 }
3949}
3950
3951/*
3952 * call-seq:
3953 * ObjectSpace._id2ref(object_id) -> an_object
3954 *
3955 * Converts an object id to a reference to the object. May not be
3956 * called on an object id passed as a parameter to a finalizer.
3957 *
3958 * s = "I am a string" #=> "I am a string"
3959 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
3960 * r == s #=> true
3961 *
3962 * On multi-ractor mode, if the object is not sharable, it raises
3963 * RangeError.
3964 */
3965
3966static VALUE
3967id2ref(VALUE objid)
3968{
3969#if SIZEOF_LONG == SIZEOF_VOIDP
3970#define NUM2PTR(x) NUM2ULONG(x)
3971#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3972#define NUM2PTR(x) NUM2ULL(x)
3973#endif
3974 rb_objspace_t *objspace = &rb_objspace;
3975 VALUE ptr;
3976 VALUE orig;
3977 void *p0;
3978
3979 objid = rb_to_int(objid);
3980 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
3981 ptr = NUM2PTR(objid);
3982 if (ptr == Qtrue) return Qtrue;
3983 if (ptr == Qfalse) return Qfalse;
3984 if (ptr == Qnil) return Qnil;
3985 if (FIXNUM_P(ptr)) return (VALUE)ptr;
3986 if (FLONUM_P(ptr)) return (VALUE)ptr;
3987
3988 ptr = obj_id_to_ref(objid);
3989 if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
3990 ID symid = ptr / sizeof(RVALUE);
3991 p0 = (void *)ptr;
3992 if (rb_id2str(symid) == 0)
3993 rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
3994 return ID2SYM(symid);
3995 }
3996 }
3997
3998 if ((orig = id2ref_obj_tbl(objspace, objid)) != Qundef &&
3999 is_live_object(objspace, orig)) {
4000
4001 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(orig)) {
4002 return orig;
4003 }
4004 else {
4005 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
4006 }
4007 }
4008
4009 if (rb_int_ge(objid, objspace->next_object_id)) {
4010 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
4011 } else {
4012 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is recycled object", rb_int2str(objid, 10));
4013 }
4014}
4015
4016static VALUE
4017os_id2ref(VALUE os, VALUE objid)
4018{
4019 return id2ref(objid);
4020}
4021
4022static VALUE
4023rb_find_object_id(VALUE obj, VALUE (*get_heap_object_id)(VALUE))
4024{
4025 if (STATIC_SYM_P(obj)) {
4026 return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
4027 }
4028 else if (FLONUM_P(obj)) {
4029#if SIZEOF_LONG == SIZEOF_VOIDP
4030 return LONG2NUM((SIGNED_VALUE)obj);
4031#else
4032 return LL2NUM((SIGNED_VALUE)obj);
4033#endif
4034 }
4035 else if (SPECIAL_CONST_P(obj)) {
4036 return LONG2NUM((SIGNED_VALUE)obj);
4037 }
4038
4039 return get_heap_object_id(obj);
4040}
4041
4042static VALUE
4043cached_object_id(VALUE obj)
4044{
4045 VALUE id;
4046 rb_objspace_t *objspace = &rb_objspace;
4047
4049 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &id)) {
4051 }
4052 else {
4054
4055 id = objspace->next_object_id;
4057
4058 VALUE already_disabled = rb_gc_disable_no_rest();
4059 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
4060 st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
4061 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
4062 FL_SET(obj, FL_SEEN_OBJ_ID);
4063 }
4065
4066 return id;
4067}
4068
4069static VALUE
4070nonspecial_obj_id_(VALUE obj)
4071{
4072 return nonspecial_obj_id(obj);
4073}
4074
4075
4076VALUE
4078{
4079 return rb_find_object_id(obj, nonspecial_obj_id_);
4080}
4081
4082/*
4083 * Document-method: __id__
4084 * Document-method: object_id
4085 *
4086 * call-seq:
4087 * obj.__id__ -> integer
4088 * obj.object_id -> integer
4089 *
4090 * Returns an integer identifier for +obj+.
4091 *
4092 * The same number will be returned on all calls to +object_id+ for a given
4093 * object, and no two active objects will share an id.
4094 *
4095 * Note: that some objects of builtin classes are reused for optimization.
4096 * This is the case for immediate values and frozen string literals.
4097 *
4098 * BasicObject implements +__id__+, Kernel implements +object_id+.
4099 *
4100 * Immediate values are not passed by reference but are passed by value:
4101 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
4102 *
4103 * Object.new.object_id == Object.new.object_id # => false
4104 * (21 * 2).object_id == (21 * 2).object_id # => true
4105 * "hello".object_id == "hello".object_id # => false
4106 * "hi".freeze.object_id == "hi".freeze.object_id # => true
4107 */
4108
4109VALUE
4111{
4112 /*
4113 * 32-bit VALUE space
4114 * MSB ------------------------ LSB
4115 * false 00000000000000000000000000000000
4116 * true 00000000000000000000000000000010
4117 * nil 00000000000000000000000000000100
4118 * undef 00000000000000000000000000000110
4119 * symbol ssssssssssssssssssssssss00001110
4120 * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
4121 * fixnum fffffffffffffffffffffffffffffff1
4122 *
4123 * object_id space
4124 * LSB
4125 * false 00000000000000000000000000000000
4126 * true 00000000000000000000000000000010
4127 * nil 00000000000000000000000000000100
4128 * undef 00000000000000000000000000000110
4129 * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
4130 * object oooooooooooooooooooooooooooooo0 o...o % A = 0
4131 * fixnum fffffffffffffffffffffffffffffff1 bignum if required
4132 *
4133 * where A = sizeof(RVALUE)/4
4134 *
4135 * sizeof(RVALUE) is
4136 * 20 if 32-bit, double is 4-byte aligned
4137 * 24 if 32-bit, double is 8-byte aligned
4138 * 40 if 64-bit
4139 */
4140
4141 return rb_find_object_id(obj, cached_object_id);
4142}
4143
4145cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
4146{
4147 size_t *total_size = data_ptr;
4148 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
4149 *total_size += sizeof(*ccs);
4150 *total_size += sizeof(ccs->entries[0]) * ccs->capa;
4151 return ID_TABLE_CONTINUE;
4152}
4153
4154static size_t
4155cc_table_memsize(struct rb_id_table *cc_table)
4156{
4157 size_t total = rb_id_table_memsize(cc_table);
4158 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
4159 return total;
4160}
4161
4162static size_t
4163obj_memsize_of(VALUE obj, int use_all_types)
4164{
4165 size_t size = 0;
4166
4167 if (SPECIAL_CONST_P(obj)) {
4168 return 0;
4169 }
4170
4171 if (FL_TEST(obj, FL_EXIVAR)) {
4173 }
4174
4175 switch (BUILTIN_TYPE(obj)) {
4176 case T_OBJECT:
4177 if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
4178 size += ROBJECT_NUMIV(obj) * sizeof(VALUE);
4179 }
4180 break;
4181 case T_MODULE:
4182 case T_CLASS:
4183 if (RCLASS_EXT(obj)) {
4184 if (RCLASS_M_TBL(obj)) {
4186 }
4187 if (RCLASS_IV_TBL(obj)) {
4188 size += st_memsize(RCLASS_IV_TBL(obj));
4189 }
4190 if (RCLASS_IV_INDEX_TBL(obj)) {
4191 // TODO: more correct value
4193 }
4194 if (RCLASS(obj)->ptr->iv_tbl) {
4195 size += st_memsize(RCLASS(obj)->ptr->iv_tbl);
4196 }
4197 if (RCLASS(obj)->ptr->const_tbl) {
4198 size += rb_id_table_memsize(RCLASS(obj)->ptr->const_tbl);
4199 }
4200 if (RCLASS_CC_TBL(obj)) {
4201 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4202 }
4203 size += sizeof(rb_classext_t);
4204 }
4205 break;
4206 case T_ICLASS:
4207 if (RICLASS_OWNS_M_TBL_P(obj)) {
4208 if (RCLASS_M_TBL(obj)) {
4210 }
4211 }
4212 if (RCLASS_EXT(obj) && RCLASS_CC_TBL(obj)) {
4213 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4214 }
4215 break;
4216 case T_STRING:
4217 size += rb_str_memsize(obj);
4218 break;
4219 case T_ARRAY:
4220 size += rb_ary_memsize(obj);
4221 break;
4222 case T_HASH:
4223 if (RHASH_AR_TABLE_P(obj)) {
4224 if (RHASH_AR_TABLE(obj) != NULL) {
4225 size_t rb_hash_ar_table_size();
4227 }
4228 }
4229 else {
4230 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
4231 size += st_memsize(RHASH_ST_TABLE(obj));
4232 }
4233 break;
4234 case T_REGEXP:
4235 if (RREGEXP_PTR(obj)) {
4236 size += onig_memsize(RREGEXP_PTR(obj));
4237 }
4238 break;
4239 case T_DATA:
4240 if (use_all_types) size += rb_objspace_data_type_memsize(obj);
4241 break;
4242 case T_MATCH:
4243 if (RMATCH(obj)->rmatch) {
4244 struct rmatch *rm = RMATCH(obj)->rmatch;
4245 size += onig_region_memsize(&rm->regs);
4246 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
4247 size += sizeof(struct rmatch);
4248 }
4249 break;
4250 case T_FILE:
4251 if (RFILE(obj)->fptr) {
4252 size += rb_io_memsize(RFILE(obj)->fptr);
4253 }
4254 break;
4255 case T_RATIONAL:
4256 case T_COMPLEX:
4257 break;
4258 case T_IMEMO:
4259 size += imemo_memsize(obj);
4260 break;
4261
4262 case T_FLOAT:
4263 case T_SYMBOL:
4264 break;
4265
4266 case T_BIGNUM:
4267 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
4268 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
4269 }
4270 break;
4271
4272 case T_NODE:
4273 UNEXPECTED_NODE(obj_memsize_of);
4274 break;
4275
4276 case T_STRUCT:
4277 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
4278 RSTRUCT(obj)->as.heap.ptr) {
4279 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
4280 }
4281 break;
4282
4283 case T_ZOMBIE:
4284 case T_MOVED:
4285 break;
4286
4287 default:
4288 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
4289 BUILTIN_TYPE(obj), (void*)obj);
4290 }
4291
4292 return size + sizeof(RVALUE);
4293}
4294
4295size_t
4297{
4298 return obj_memsize_of(obj, TRUE);
4299}
4300
4301static int
4302set_zero(st_data_t key, st_data_t val, st_data_t arg)
4303{
4304 VALUE k = (VALUE)key;
4305 VALUE hash = (VALUE)arg;
4306 rb_hash_aset(hash, k, INT2FIX(0));
4307 return ST_CONTINUE;
4308}
4309
4310static VALUE
4311type_sym(size_t type)
4312{
4313 switch (type) {
4314#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
4342#undef COUNT_TYPE
4343 default: return SIZET2NUM(type); break;
4344 }
4345}
4346
4347/*
4348 * call-seq:
4349 * ObjectSpace.count_objects([result_hash]) -> hash
4350 *
4351 * Counts all objects grouped by type.
4352 *
4353 * It returns a hash, such as:
4354 * {
4355 * :TOTAL=>10000,
4356 * :FREE=>3011,
4357 * :T_OBJECT=>6,
4358 * :T_CLASS=>404,
4359 * # ...
4360 * }
4361 *
4362 * The contents of the returned hash are implementation specific.
4363 * It may be changed in future.
4364 *
4365 * The keys starting with +:T_+ means live objects.
4366 * For example, +:T_ARRAY+ is the number of arrays.
4367 * +:FREE+ means object slots which is not used now.
4368 * +:TOTAL+ means sum of above.
4369 *
4370 * If the optional argument +result_hash+ is given,
4371 * it is overwritten and returned. This is intended to avoid probe effect.
4372 *
4373 * h = {}
4374 * ObjectSpace.count_objects(h)
4375 * puts h
4376 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
4377 *
4378 * This method is only expected to work on C Ruby.
4379 *
4380 */
4381
4382static VALUE
4383count_objects(int argc, VALUE *argv, VALUE os)
4384{
4385 rb_objspace_t *objspace = &rb_objspace;
4386 size_t counts[T_MASK+1];
4387 size_t freed = 0;
4388 size_t total = 0;
4389 size_t i;
4390 VALUE hash = Qnil;
4391
4392 if (rb_check_arity(argc, 0, 1) == 1) {
4393 hash = argv[0];
4394 if (!RB_TYPE_P(hash, T_HASH))
4395 rb_raise(rb_eTypeError, "non-hash given");
4396 }
4397
4398 for (i = 0; i <= T_MASK; i++) {
4399 counts[i] = 0;
4400 }
4401
4402 for (i = 0; i < heap_allocated_pages; i++) {
4403 struct heap_page *page = heap_pages_sorted[i];
4404 RVALUE *p, *pend;
4405
4406 p = page->start; pend = p + page->total_slots;
4407 for (;p < pend; p++) {
4408 VALUE vp = (VALUE)p;
4409 void *poisoned = asan_poisoned_object_p(vp);
4410 asan_unpoison_object(vp, false);
4411 if (p->as.basic.flags) {
4412 counts[BUILTIN_TYPE(vp)]++;
4413 }
4414 else {
4415 freed++;
4416 }
4417 if (poisoned) {
4419 asan_poison_object(vp);
4420 }
4421 }
4422 total += page->total_slots;
4423 }
4424
4425 if (hash == Qnil) {
4426 hash = rb_hash_new();
4427 }
4428 else if (!RHASH_EMPTY_P(hash)) {
4429 rb_hash_stlike_foreach(hash, set_zero, hash);
4430 }
4431 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
4432 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
4433
4434 for (i = 0; i <= T_MASK; i++) {
4435 VALUE type = type_sym(i);
4436 if (counts[i])
4437 rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
4438 }
4439
4440 return hash;
4441}
4442
4443/*
4444 ------------------------ Garbage Collection ------------------------
4445*/
4446
4447/* Sweeping */
4448
4449static size_t
4450objspace_available_slots(rb_objspace_t *objspace)
4451{
4452 return heap_eden->total_slots + heap_tomb->total_slots;
4453}
4454
4455static size_t
4456objspace_live_slots(rb_objspace_t *objspace)
4457{
4459}
4460
4461static size_t
4462objspace_free_slots(rb_objspace_t *objspace)
4463{
4464 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
4465}
4466
4467static void
4468gc_setup_mark_bits(struct heap_page *page)
4469{
4470 /* copy oldgen bitmap to mark bitmap */
4472}
4473
4474static int gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj);
4475static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free);
4476
4477static void
4478lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
4479{
4480#if defined(_WIN32)
4481 DWORD old_protect;
4482
4483 if (!VirtualProtect(body, HEAP_PAGE_SIZE, PAGE_NOACCESS, &old_protect)) {
4484#else
4485 if(mprotect(body, HEAP_PAGE_SIZE, PROT_NONE)) {
4486#endif
4487 rb_bug("Couldn't protect page %p", (void *)body);
4488 } else {
4489 gc_report(5, objspace, "Protecting page in move %p\n", (void *)body);
4490 }
4491}
4492
4493static void
4494unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
4495{
4496#if defined(_WIN32)
4497 DWORD old_protect;
4498
4499 if (!VirtualProtect(body, HEAP_PAGE_SIZE, PAGE_READWRITE, &old_protect)) {
4500#else
4501 if(mprotect(body, HEAP_PAGE_SIZE, PROT_READ | PROT_WRITE)) {
4502#endif
4503 rb_bug("Couldn't unprotect page %p", (void *)body);
4504 } else {
4505 gc_report(5, objspace, "Unprotecting page in move %p\n", (void *)body);
4506 }
4507}
4508
4509static short
4510try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page, VALUE dest)
4511{
4512 struct heap_page * cursor = heap->compact_cursor;
4513 char from_freelist = 0;
4514
4516
4517 /* T_NONE objects came from the free list. If the object is *not* a
4518 * T_NONE, it is an object that just got freed but hasn't been
4519 * added to the freelist yet */
4520
4521 if (BUILTIN_TYPE(dest) == T_NONE) {
4522 from_freelist = 1;
4523 }
4524
4525 while(1) {
4526 size_t index = heap->compact_cursor_index;
4527
4528 bits_t *mark_bits = cursor->mark_bits;
4529 bits_t *pin_bits = cursor->pinned_bits;
4530 RVALUE * p = cursor->start;
4531 RVALUE * offset = p - NUM_IN_PAGE(p);
4532
4533 /* Find an object to move and move it. Movable objects must be
4534 * marked, so we iterate using the marking bitmap */
4535 for (size_t i = index; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
4536 bits_t bits = mark_bits[i] & ~pin_bits[i];
4537
4538 if (bits) {
4539 p = offset + i * BITS_BITLENGTH;
4540
4541 do {
4542 if (bits & 1) {
4543 /* We're trying to move "p" */
4545
4546 if (gc_is_moveable_obj(objspace, (VALUE)p)) {
4547 /* We were able to move "p" */
4549 objspace->rcompactor.total_moved++;
4550 gc_move(objspace, (VALUE)p, dest);
4551 gc_pin(objspace, (VALUE)p);
4552 heap->compact_cursor_index = i;
4553 if (from_freelist) {
4555 }
4556
4557 return 1;
4558 }
4559 }
4560 p++;
4561 bits >>= 1;
4562 } while (bits);
4563 }
4564 }
4565
4566 /* We couldn't find a movable object on the compact cursor, so lets
4567 * move to the next page (previous page since we are traveling in the
4568 * opposite direction of the sweep cursor) and look there. */
4569
4570 struct heap_page * next;
4571
4572 next = list_prev(&heap->pages, cursor, page_node);
4573
4574 /* Protect the current cursor since it probably has T_MOVED slots. */
4575 lock_page_body(objspace, GET_PAGE_BODY(cursor->start));
4576
4577 heap->compact_cursor = next;
4578 heap->compact_cursor_index = 0;
4579 cursor = next;
4580
4581 // Cursors have met, lets quit. We set `heap->compact_cursor` equal
4582 // to `heap->sweeping_page` so we know how far to iterate through
4583 // the heap when unprotecting pages.
4584 if (next == sweep_page) {
4585 break;
4586 }
4587 }
4588
4589 return 0;
4590}
4591
4592static void
4593gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
4594{
4595 struct heap_page *cursor = heap->compact_cursor;
4596
4597 while(cursor) {
4598 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
4599 cursor = list_next(&heap->pages, cursor, page_node);
4600 }
4601}
4602
4603static void gc_update_references(rb_objspace_t * objspace, rb_heap_t *heap);
4604static void invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page);
4605
4606static void read_barrier_handler(intptr_t address)
4607{
4608 VALUE obj;
4609 rb_objspace_t * objspace = &rb_objspace;
4610
4611 address -= address % sizeof(RVALUE);
4612
4613 obj = (VALUE)address;
4614
4616 {
4617 unlock_page_body(objspace, GET_PAGE_BODY(obj));
4618
4619 objspace->profile.read_barrier_faults++;
4620
4621 invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
4622 }
4624}
4625
4626#if defined(_WIN32)
4627static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
4628typedef void (*signal_handler)(int);
4629static signal_handler old_sigsegv_handler;
4630
4631static LONG WINAPI read_barrier_signal(EXCEPTION_POINTERS * info)
4632{
4633 /* EXCEPTION_ACCESS_VIOLATION is what's raised by access to protected pages */
4634 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
4635 /* > The second array element specifies the virtual address of the inaccessible data.
4636 * https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
4637 *
4638 * Use this address to invalidate the page */
4639 read_barrier_handler((intptr_t)info->ExceptionRecord->ExceptionInformation[1]);
4640 return EXCEPTION_CONTINUE_EXECUTION;
4641 } else {
4642 return EXCEPTION_CONTINUE_SEARCH;
4643 }
4644}
4645
4646static void
4647uninstall_handlers(void)
4648{
4649 signal(SIGSEGV, old_sigsegv_handler);
4650 SetUnhandledExceptionFilter(old_handler);
4651}
4652
4653static void
4654install_handlers(void)
4655{
4656 /* Remove SEGV handler so that the Unhandled Exception Filter handles it */
4657 old_sigsegv_handler = signal(SIGSEGV, NULL);
4658 /* Unhandled Exception Filter has access to the violation address similar
4659 * to si_addr from sigaction */
4660 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
4661}
4662#else
4663static struct sigaction old_sigbus_handler;
4664static struct sigaction old_sigsegv_handler;
4665
4666static void
4667read_barrier_signal(int sig, siginfo_t * info, void * data)
4668{
4669 // setup SEGV/BUS handlers for errors
4670 struct sigaction prev_sigbus, prev_sigsegv;
4671 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
4672 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
4673
4674 // enable SIGBUS/SEGV
4675 sigset_t set, prev_set;
4676 sigemptyset(&set);
4677 sigaddset(&set, SIGBUS);
4678 sigaddset(&set, SIGSEGV);
4679 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
4680
4681 // run handler
4682 read_barrier_handler((intptr_t)info->si_addr);
4683
4684 // reset SEGV/BUS handlers
4685 sigaction(SIGBUS, &prev_sigbus, NULL);
4686 sigaction(SIGSEGV, &prev_sigsegv, NULL);
4687 sigprocmask(SIG_SETMASK, &prev_set, NULL);
4688}
4689
4690static void
4691uninstall_handlers(void)
4692{
4693 sigaction(SIGBUS, &old_sigbus_handler, NULL);
4694 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
4695}
4696
4697static void
4698install_handlers(void)
4699{
4700 struct sigaction action;
4701 memset(&action, 0, sizeof(struct sigaction));
4702 sigemptyset(&action.sa_mask);
4703 action.sa_sigaction = read_barrier_signal;
4704 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
4705
4706 sigaction(SIGBUS, &action, &old_sigbus_handler);
4707 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
4708}
4709#endif
4710
4711static void
4712revert_stack_objects(VALUE stack_obj, void *ctx)
4713{
4714 rb_objspace_t * objspace = (rb_objspace_t*)ctx;
4715
4716 if (BUILTIN_TYPE(stack_obj) == T_MOVED) {
4717 /* For now we'll revert the whole page if the object made it to the
4718 * stack. I think we can change this to move just the one object
4719 * back though */
4720 invalidate_moved_page(objspace, GET_HEAP_PAGE(stack_obj));
4721 }
4722}
4723
4724static void
4725check_stack_for_moved(rb_objspace_t *objspace)
4726{
4727 rb_execution_context_t *ec = GET_EC();
4728 rb_vm_t *vm = rb_ec_vm_ptr(ec);
4729 rb_vm_each_stack_value(vm, revert_stack_objects, (void*)objspace);
4730}
4731
4732static void
4733gc_compact_finish(rb_objspace_t *objspace, rb_heap_t *heap)
4734{
4735 GC_ASSERT(heap->sweeping_page == heap->compact_cursor);
4736
4737 gc_unprotect_pages(objspace, heap);
4738 uninstall_handlers();
4739
4740 /* The mutator is allowed to run during incremental sweeping. T_MOVED
4741 * objects can get pushed on the stack and when the compaction process
4742 * finishes up, it may remove the read barrier before anything has a
4743 * chance to read from the T_MOVED address. To fix this, we scan the stack
4744 * then revert any moved objects that made it to the stack. */
4745 check_stack_for_moved(objspace);
4746
4747 gc_update_references(objspace, heap);
4748 heap->compact_cursor = NULL;
4749 heap->compact_cursor_index = 0;
4750 objspace->profile.compact_count++;
4751 if (gc_prof_enabled(objspace)) {
4752 gc_profile_record *record = gc_prof_record(objspace);
4753 record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
4754 }
4756 objspace->flags.during_compacting = FALSE;
4757}
4758
4759static int
4760gc_fill_swept_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page, int *freed_slots, int *empty_slots)
4761{
4762 /* Find any pinned but not marked objects and try to fill those slots */
4763 int i;
4764 int moved_slots = 0;
4765 int finished_compacting = 0;
4766 bits_t *mark_bits, *pin_bits;
4767 bits_t bitset;
4768 RVALUE *p, *offset;
4769
4770 mark_bits = sweep_page->mark_bits;
4771 pin_bits = sweep_page->pinned_bits;
4772
4773 p = sweep_page->start;
4774 offset = p - NUM_IN_PAGE(p);
4775
4776 struct heap_page * cursor = heap->compact_cursor;
4777
4778 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
4779
4780 for (i=0; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
4781 /* *Want to move* objects are pinned but not marked. */
4782 bitset = pin_bits[i] & ~mark_bits[i];
4783
4784 if (bitset) {
4785 p = offset + i * BITS_BITLENGTH;
4786 do {
4787 if (bitset & 1) {
4788 VALUE dest = (VALUE)p;
4789
4792
4794
4795 if (finished_compacting) {
4796 if (BUILTIN_TYPE(dest) == T_NONE) {
4797 (*empty_slots)++;
4798 } else {
4799 (*freed_slots)++;
4800 }
4801 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)dest, sizeof(RVALUE));
4802 heap_page_add_freeobj(objspace, sweep_page, dest);
4803 } else {
4804 /* Zombie slots don't get marked, but we can't reuse
4805 * their memory until they have their finalizers run.*/
4806 if (BUILTIN_TYPE(dest) != T_ZOMBIE) {
4807 if(!try_move(objspace, heap, sweep_page, dest)) {
4808 finished_compacting = 1;
4809 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
4810 gc_report(5, objspace, "Quit compacting, couldn't find an object to move\n");
4811 if (BUILTIN_TYPE(dest) == T_NONE) {
4812 (*empty_slots)++;
4813 } else {
4814 (*freed_slots)++;
4815 }
4816 heap_page_add_freeobj(objspace, sweep_page, dest);
4817 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(dest));
4818 } else {
4819 moved_slots++;
4820 }
4821 }
4822 }
4823 }
4824 p++;
4825 bitset >>= 1;
4826 } while (bitset);
4827 }
4828 }
4829
4830 lock_page_body(objspace, GET_PAGE_BODY(heap->compact_cursor->start));
4831
4832 return finished_compacting;
4833}
4834
4835static inline int
4836gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page)
4837{
4838 int i;
4839 int empty_slots = 0, freed_slots = 0, final_slots = 0;
4840 int was_compacting = 0;
4841 RVALUE *p, *offset;
4842 bits_t *bits, bitset;
4843
4844 gc_report(2, objspace, "page_sweep: start.\n");
4845
4846 if (heap->compact_cursor) {
4847 if (sweep_page == heap->compact_cursor) {
4848 /* The compaction cursor and sweep page met, so we need to quit compacting */
4849 gc_report(5, objspace, "Quit compacting, mark and compact cursor met\n");
4850 gc_compact_finish(objspace, heap);
4851 } else {
4852 /* We anticipate filling the page, so NULL out the freelist. */
4853 asan_unpoison_memory_region(&sweep_page->freelist, sizeof(RVALUE*), false);
4854 sweep_page->freelist = NULL;
4855 asan_poison_memory_region(&sweep_page->freelist, sizeof(RVALUE*));
4856 was_compacting = 1;
4857 }
4858 }
4859
4860 sweep_page->flags.before_sweep = FALSE;
4861
4862 p = sweep_page->start;
4863 offset = p - NUM_IN_PAGE(p);
4864 bits = sweep_page->mark_bits;
4865
4866 /* create guard : fill 1 out-of-range */
4867 bits[BITMAP_INDEX(p)] |= BITMAP_BIT(p)-1;
4868
4869 int out_of_range_bits = (NUM_IN_PAGE(p) + sweep_page->total_slots) % BITS_BITLENGTH;
4870 if (out_of_range_bits != 0) { // sizeof(RVALUE) == 64
4871 bits[BITMAP_INDEX(p) + sweep_page->total_slots / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
4872 }
4873
4874 for (i=0; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
4875 bitset = ~bits[i];
4876 if (bitset) {
4877 p = offset + i * BITS_BITLENGTH;
4878 do {
4879 VALUE vp = (VALUE)p;
4880 asan_unpoison_object(vp, false);
4881 if (bitset & 1) {
4882 switch (BUILTIN_TYPE(vp)) {
4883 default: /* majority case */
4884 gc_report(2, objspace, "page_sweep: free %p\n", (void *)p);
4885#if RGENGC_CHECK_MODE
4886 if (!is_full_marking(objspace)) {
4887 if (RVALUE_OLD_P(vp)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p);
4888 if (rgengc_remembered_sweep(objspace, vp)) rb_bug("page_sweep: %p - remembered.", (void *)p);
4889 }
4890#endif
4891 if (obj_free(objspace, vp)) {
4892 final_slots++;
4893 }
4894 else {
4895 if (heap->compact_cursor) {
4896 /* We *want* to fill this slot */
4898 } else {
4899 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
4900 heap_page_add_freeobj(objspace, sweep_page, vp);
4901 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
4902 freed_slots++;
4903 }
4904
4905 }
4906 break;
4907
4908 /* minor cases */
4909 case T_MOVED:
4910 if (objspace->flags.during_compacting) {
4911 /* The sweep cursor shouldn't have made it to any
4912 * T_MOVED slots while the compact flag is enabled.
4913 * The sweep cursor and compact cursor move in
4914 * opposite directions, and when they meet references will
4915 * get updated and "during_compacting" should get disabled */
4916 rb_bug("T_MOVED shouldn't be seen until compaction is finished\n");
4917 }
4918 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
4919 if (FL_TEST(vp, FL_FROM_FREELIST)) {
4920 empty_slots++;
4921 } else {
4922 freed_slots++;
4923 }
4924 heap_page_add_freeobj(objspace, sweep_page, vp);
4925 break;
4926 case T_ZOMBIE:
4927 /* already counted */
4928 break;
4929 case T_NONE:
4930 if (heap->compact_cursor) {
4931 /* We *want* to fill this slot */
4933 } else {
4934 /* When we started sweeping this page, we were in
4935 * compacting mode and nulled the free list for
4936 * the page. But compaction finished, so we need to
4937 * put any T_NONE slots back on the freelist. */
4938 if (was_compacting) {
4939 heap_page_add_freeobj(objspace, sweep_page, vp);
4940 }
4941 empty_slots++; /* already freed */
4942 }
4943 break;
4944 }
4945 }
4946 p++;
4947 bitset >>= 1;
4948 } while (bitset);
4949 }
4950 }
4951
4952 if (heap->compact_cursor) {
4953 if (gc_fill_swept_page(objspace, heap, sweep_page, &freed_slots, &empty_slots)) {
4954 gc_compact_finish(objspace, heap);
4955 }
4956 }
4957
4958 if (!heap->compact_cursor) {
4959 gc_setup_mark_bits(sweep_page);
4960 }
4961
4962#if GC_PROFILE_MORE_DETAIL
4963 if (gc_prof_enabled(objspace)) {
4964 gc_profile_record *record = gc_prof_record(objspace);
4965 record->removing_objects += final_slots + freed_slots;
4966 record->empty_objects += empty_slots;
4967 }
4968#endif
4969 if (0) fprintf(stderr, "gc_page_sweep(%"PRIdSIZE"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
4970 rb_gc_count(),
4971 sweep_page->total_slots,
4972 freed_slots, empty_slots, final_slots);
4973
4974 sweep_page->free_slots = freed_slots + empty_slots;
4975 objspace->profile.total_freed_objects += freed_slots;
4976
4978 rb_thread_t *th = GET_THREAD();
4979 if (th) {
4980 gc_finalize_deferred_register(objspace);
4981 }
4982 }
4983
4984 gc_report(2, objspace, "page_sweep: end.\n");
4985
4986 return freed_slots + empty_slots;
4987}
4988
4989/* allocate additional minimum page to work */
4990static void
4991gc_heap_prepare_minimum_pages(rb_objspace_t *objspace, rb_heap_t *heap)
4992{
4993 if (!heap->free_pages && heap_increment(objspace, heap) == FALSE) {
4994 /* there is no free after page_sweep() */
4995 heap_set_increment(objspace, 1);
4996 if (!heap_increment(objspace, heap)) { /* can't allocate additional free objects */
4997 rb_memerror();
4998 }
4999 }
5000}
5001
5002static const char *
5003gc_mode_name(enum gc_mode mode)
5004{
5005 switch (mode) {
5006 case gc_mode_none: return "none";
5007 case gc_mode_marking: return "marking";
5008 case gc_mode_sweeping: return "sweeping";
5009 default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
5010 }
5011}
5012
5013static void
5014gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
5015{
5016#if RGENGC_CHECK_MODE
5017 enum gc_mode prev_mode = gc_mode(objspace);
5018 switch (prev_mode) {
5019 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
5022 }
5023#endif
5024 if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
5025 gc_mode_set(objspace, mode);
5026}
5027
5028static void
5029gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
5030{
5031 heap->sweeping_page = list_top(&heap->pages, struct heap_page, page_node);
5032 heap->free_pages = NULL;
5033#if GC_ENABLE_INCREMENTAL_MARK
5034 heap->pooled_pages = NULL;
5035 objspace->rincgc.pooled_slots = 0;
5036#endif
5037
5038 rb_ractor_t *r = NULL;
5039 list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
5041 }
5042}
5043
5044#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5045__attribute__((noinline))
5046#endif
5047static void
5048gc_sweep_start(rb_objspace_t *objspace)
5049{
5050 gc_mode_transition(objspace, gc_mode_sweeping);
5051 gc_sweep_start_heap(objspace, heap_eden);
5052}
5053
5054static void
5055gc_sweep_finish(rb_objspace_t *objspace)
5056{
5057 gc_report(1, objspace, "gc_sweep_finish\n");
5058
5059 gc_prof_set_heap_info(objspace);
5060 heap_pages_free_unused_pages(objspace);
5061
5062 /* if heap_pages has unused pages, then assign them to increment */
5063 if (heap_allocatable_pages < heap_tomb->total_pages) {
5064 heap_allocatable_pages_set(objspace, heap_tomb->total_pages);
5065 }
5066
5068 gc_mode_transition(objspace, gc_mode_none);
5069
5070#if RGENGC_CHECK_MODE >= 2
5071 gc_verify_internal_consistency(objspace);
5072#endif
5073}
5074
5075static int
5076gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
5077{
5078 struct heap_page *sweep_page = heap->sweeping_page;
5079 int unlink_limit = 3;
5080 int swept_slots = 0;
5081
5082#if GC_ENABLE_INCREMENTAL_MARK
5083 int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
5084
5085 gc_report(2, objspace, "gc_sweep_step (need_pool: %d)\n", need_pool);
5086#else
5087 gc_report(2, objspace, "gc_sweep_step\n");
5088#endif
5089
5090 if (sweep_page == NULL) return FALSE;
5091
5092#if GC_ENABLE_LAZY_SWEEP
5093 gc_prof_sweep_timer_start(objspace);
5094#endif
5095
5096 do {
5097 RUBY_DEBUG_LOG("sweep_page:%p", sweep_page);
5098 int free_slots = gc_page_sweep(objspace, heap, sweep_page);
5099 heap->sweeping_page = list_next(&heap->pages, sweep_page, page_node);
5100
5101 if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
5103 unlink_limit > 0) {
5105 unlink_limit--;
5106 /* there are no living objects -> move this page to tomb heap */
5107 heap_unlink_page(objspace, heap, sweep_page);
5108 heap_add_page(objspace, heap_tomb, sweep_page);
5109 }
5110 else if (free_slots > 0) {
5111#if GC_ENABLE_INCREMENTAL_MARK
5112 if (need_pool) {
5113 heap_add_poolpage(objspace, heap, sweep_page);
5114 need_pool = FALSE;
5115 }
5116 else {
5117 heap_add_freepage(heap, sweep_page);
5118 swept_slots += free_slots;
5119 if (swept_slots > 2048) {
5120 break;
5121 }
5122 }
5123#else
5124 heap_add_freepage(heap, sweep_page);
5125#endif
5126 }
5127 else {
5128 sweep_page->free_next = NULL;
5129 }
5130 } while ((sweep_page = heap->sweeping_page));
5131
5132 if (!heap->sweeping_page) {
5133 gc_sweep_finish(objspace);
5134 }
5135
5136#if GC_ENABLE_LAZY_SWEEP
5137 gc_prof_sweep_timer_stop(objspace);
5138#endif
5139
5140 GC_ASSERT(gc_mode(objspace) == gc_mode_sweeping ? heap->free_pages != NULL : 1);
5141
5142 return heap->free_pages != NULL;
5143}
5144
5145static void
5146gc_sweep_rest(rb_objspace_t *objspace)
5147{
5148 rb_heap_t *heap = heap_eden; /* lazy sweep only for eden */
5149
5150 while (has_sweeping_pages(heap)) {
5151 gc_sweep_step(objspace, heap);
5152 }
5153}
5154
5155static void
5156gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap)
5157{
5159 if (!GC_ENABLE_LAZY_SWEEP) return;
5160
5161 unsigned int lock_lev;
5162 gc_enter(objspace, gc_enter_event_sweep_continue, &lock_lev);
5163 gc_sweep_step(objspace, heap);
5164 gc_exit(objspace, gc_enter_event_sweep_continue, &lock_lev);
5165}
5166
5167static void
5168invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page)
5169{
5170 int i;
5171 int empty_slots = 0, freed_slots = 0;
5172 bits_t *mark_bits, *pin_bits;
5173 bits_t bitset;
5174 RVALUE *p, *offset;
5175
5176 mark_bits = page->mark_bits;
5177 pin_bits = page->pinned_bits;
5178
5179 p = page->start;
5180 offset = p - NUM_IN_PAGE(p);
5181
5182 for (i=0; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
5183 /* Moved objects are pinned but never marked. We reuse the pin bits
5184 * to indicate there is a moved object in this slot. */
5185 bitset = pin_bits[i] & ~mark_bits[i];
5186
5187 if (bitset) {
5188 p = offset + i * BITS_BITLENGTH;
5189 do {
5190 if (bitset & 1) {
5191 VALUE forwarding_object = (VALUE)p;
5192 VALUE object;
5193
5194 if (BUILTIN_TYPE(forwarding_object) == T_MOVED) {
5195 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object));
5196 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
5197
5198 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
5199
5200 object = rb_gc_location(forwarding_object);
5201
5202 if (FL_TEST(forwarding_object, FL_FROM_FREELIST)) {
5203 empty_slots++; /* already freed */
5204 } else {
5205 freed_slots++;
5206 }
5207
5208 gc_move(objspace, object, forwarding_object);
5209 /* forwarding_object is now our actual object, and "object"
5210 * is the free slot for the original page */
5211 heap_page_add_freeobj(objspace, GET_HEAP_PAGE(object), object);
5212
5213 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
5214 GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_MOVED);
5215 GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_NONE);
5216 }
5217 }
5218 p++;
5219 bitset >>= 1;
5220 } while (bitset);
5221 }
5222 }
5223
5224 page->free_slots += (empty_slots + freed_slots);
5225 objspace->profile.total_freed_objects += freed_slots;
5226}
5227
5228static void
5229gc_compact_start(rb_objspace_t *objspace, rb_heap_t *heap)
5230{
5231 heap->compact_cursor = list_tail(&heap->pages, struct heap_page, page_node);
5232 heap->compact_cursor_index = 0;
5233
5234 if (gc_prof_enabled(objspace)) {
5235 gc_profile_record *record = gc_prof_record(objspace);
5236 record->moved_objects = objspace->rcompactor.total_moved;
5237 }
5238
5239 memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
5240 memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
5241
5242 /* Set up read barrier for pages containing MOVED objects */
5243 install_handlers();
5244}
5245
5246static void
5247gc_sweep(rb_objspace_t *objspace)
5248{
5249 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
5250
5251 gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
5252
5253 if (immediate_sweep) {
5254#if !GC_ENABLE_LAZY_SWEEP
5255 gc_prof_sweep_timer_start(objspace);
5256#endif
5257 gc_sweep_start(objspace);
5258 if (objspace->flags.during_compacting) {
5259 struct heap_page *page = NULL;
5260
5261 list_for_each(&heap_eden->pages, page, page_node) {
5262 page->flags.before_sweep = TRUE;
5263 }
5264
5265 gc_compact_start(objspace, heap_eden);
5266 }
5267
5268 gc_sweep_rest(objspace);
5269#if !GC_ENABLE_LAZY_SWEEP
5270 gc_prof_sweep_timer_stop(objspace);
5271#endif
5272 }
5273 else {
5274 struct heap_page *page = NULL;
5275 gc_sweep_start(objspace);
5276
5277 if (ruby_enable_autocompact && is_full_marking(objspace)) {
5278 gc_compact_start(objspace, heap_eden);
5279 }
5280
5281 list_for_each(&heap_eden->pages, page, page_node) {
5282 page->flags.before_sweep = TRUE;
5283 }
5284 gc_sweep_step(objspace, heap_eden);
5285 }
5286
5287 gc_heap_prepare_minimum_pages(objspace, heap_eden);
5288}
5289
5290/* Marking - Marking stack */
5291
5292static stack_chunk_t *
5293stack_chunk_alloc(void)
5294{
5295 stack_chunk_t *res;
5296
5297 res = malloc(sizeof(stack_chunk_t));
5298 if (!res)
5299 rb_memerror();
5300
5301 return res;
5302}
5303
5304static inline int
5305is_mark_stack_empty(mark_stack_t *stack)
5306{
5307 return stack->chunk == NULL;
5308}
5309
5310static size_t
5311mark_stack_size(mark_stack_t *stack)
5312{
5313 size_t size = stack->index;
5314 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
5315
5316 while (chunk) {
5317 size += stack->limit;
5318 chunk = chunk->next;
5319 }
5320 return size;
5321}
5322
5323static void
5324add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
5325{
5326 chunk->next = stack->cache;
5327 stack->cache = chunk;
5328 stack->cache_size++;
5329}
5330
5331static void
5332shrink_stack_chunk_cache(mark_stack_t *stack)
5333{
5334 stack_chunk_t *chunk;
5335
5336 if (stack->unused_cache_size > (stack->cache_size/2)) {
5337 chunk = stack->cache;
5338 stack->cache = stack->cache->next;
5339 stack->cache_size--;
5340 free(chunk);
5341 }
5342 stack->unused_cache_size = stack->cache_size;
5343}
5344
5345static void
5346push_mark_stack_chunk(mark_stack_t *stack)
5347{
5348 stack_chunk_t *next;
5349
5350 GC_ASSERT(stack->index == stack->limit);
5351
5352 if (stack->cache_size > 0) {
5353 next = stack->cache;
5354 stack->cache = stack->cache->next;
5355 stack->cache_size--;
5356 if (stack->unused_cache_size > stack->cache_size)
5357 stack->unused_cache_size = stack->cache_size;
5358 }
5359 else {
5360 next = stack_chunk_alloc();
5361 }
5362 next->next = stack->chunk;
5363 stack->chunk = next;
5364 stack->index = 0;
5365}
5366
5367static void
5368pop_mark_stack_chunk(mark_stack_t *stack)
5369{
5370 stack_chunk_t *prev;
5371
5372 prev = stack->chunk->next;
5373 GC_ASSERT(stack->index == 0);
5374 add_stack_chunk_cache(stack, stack->chunk);
5375 stack->chunk = prev;
5376 stack->index = stack->limit;
5377}
5378
5379static void
5380free_stack_chunks(mark_stack_t *stack)
5381{
5382 stack_chunk_t *chunk = stack->chunk;
5383 stack_chunk_t *next = NULL;
5384
5385 while (chunk != NULL) {
5386 next = chunk->next;
5387 free(chunk);
5388 chunk = next;
5389 }
5390}
5391
5392static void
5393push_mark_stack(mark_stack_t *stack, VALUE data)
5394{
5395 VALUE obj = data;
5396 switch (BUILTIN_TYPE(obj)) {
5397 case T_NIL:
5398 case T_FIXNUM:
5399 case T_MOVED:
5400 rb_bug("push_mark_stack() called for broken object");
5401 break;
5402
5403 case T_NODE:
5404 UNEXPECTED_NODE(push_mark_stack);
5405 break;
5406
5407 default:
5408 break;
5409 }
5410
5411 if (stack->index == stack->limit) {
5412 push_mark_stack_chunk(stack);
5413 }
5414 stack->chunk->data[stack->index++] = data;
5415}
5416
5417static int
5418pop_mark_stack(mark_stack_t *stack, VALUE *data)
5419{
5420 if (is_mark_stack_empty(stack)) {
5421 return FALSE;
5422 }
5423 if (stack->index == 1) {
5424 *data = stack->chunk->data[--stack->index];
5425 pop_mark_stack_chunk(stack);
5426 }
5427 else {
5428 *data = stack->chunk->data[--stack->index];
5429 }
5430 return TRUE;
5431}
5432
5433#if GC_ENABLE_INCREMENTAL_MARK
5434static int
5435invalidate_mark_stack_chunk(stack_chunk_t *chunk, int limit, VALUE obj)
5436{
5437 int i;
5438 for (i=0; i<limit; i++) {
5439 if (chunk->data[i] == obj) {
5440 chunk->data[i] = Qundef;
5441 return TRUE;
5442 }
5443 }
5444 return FALSE;
5445}
5446
5447static void
5448invalidate_mark_stack(mark_stack_t *stack, VALUE obj)
5449{
5450 stack_chunk_t *chunk = stack->chunk;
5451 int limit = stack->index;
5452
5453 while (chunk) {
5454 if (invalidate_mark_stack_chunk(chunk, limit, obj)) return;
5455 chunk = chunk->next;
5456 limit = stack->limit;
5457 }
5458 rb_bug("invalid_mark_stack: unreachable");
5459}
5460#endif
5461
5462static void
5463init_mark_stack(mark_stack_t *stack)
5464{
5465 int i;
5466
5467 MEMZERO(stack, mark_stack_t, 1);
5468 stack->index = stack->limit = STACK_CHUNK_SIZE;
5469 stack->cache_size = 0;
5470
5471 for (i=0; i < 4; i++) {
5472 add_stack_chunk_cache(stack, stack_chunk_alloc());
5473 }
5474 stack->unused_cache_size = stack->cache_size;
5475}
5476
5477/* Marking */
5478
5479#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
5480
5481#define STACK_START (ec->machine.stack_start)
5482#define STACK_END (ec->machine.stack_end)
5483#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
5484
5485#ifdef __EMSCRIPTEN__
5486#undef STACK_GROW_DIRECTION
5487#define STACK_GROW_DIRECTION 1
5488#endif
5489
5490#if STACK_GROW_DIRECTION < 0
5491# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
5492#elif STACK_GROW_DIRECTION > 0
5493# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
5494#else
5495# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
5496 : (size_t)(STACK_END - STACK_START + 1))
5497#endif
5498#if !STACK_GROW_DIRECTION
5500int
5502{
5503 VALUE *end;
5505
5506 if (end > addr) return ruby_stack_grow_direction = 1;
5507 return ruby_stack_grow_direction = -1;
5508}
5509#endif
5510
5511size_t
5513{
5514 rb_execution_context_t *ec = GET_EC();
5517 return STACK_LENGTH;
5518}
5519
5520#define PREVENT_STACK_OVERFLOW 1
5521#ifndef PREVENT_STACK_OVERFLOW
5522#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
5523# define PREVENT_STACK_OVERFLOW 1
5524#else
5525# define PREVENT_STACK_OVERFLOW 0
5526#endif
5527#endif
5528#if PREVENT_STACK_OVERFLOW
5529static int
5530stack_check(rb_execution_context_t *ec, int water_mark)
5531{
5533
5534 size_t length = STACK_LENGTH;
5535 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
5536
5537 return length > maximum_length;
5538}
5539#else
5540#define stack_check(ec, water_mark) FALSE
5541#endif
5542
5543#define STACKFRAME_FOR_CALL_CFUNC 2048
5544
5547{
5549}
5550
5551int
5553{
5554 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
5555}
5556
5557ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n));
5558static void
5559mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n)
5560{
5561 VALUE v;
5562 while (n--) {
5563 v = *x;
5564 gc_mark_maybe(objspace, v);
5565 x++;
5566 }
5567}
5568
5569static void
5570gc_mark_locations(rb_objspace_t *objspace, const VALUE *start, const VALUE *end)
5571{
5572 long n;
5573
5574 if (end <= start) return;
5575 n = end - start;
5576 mark_locations_array(objspace, start, n);
5577}
5578
5579void
5581{
5582 gc_mark_locations(&rb_objspace, start, end);
5583}
5584
5585static void
5586gc_mark_values(rb_objspace_t *objspace, long n, const VALUE *values)
5587{
5588 long i;
5589
5590 for (i=0; i<n; i++) {
5591 gc_mark(objspace, values[i]);
5592 }
5593}
5594
5595void
5596rb_gc_mark_values(long n, const VALUE *values)
5597{
5598 long i;
5599 rb_objspace_t *objspace = &rb_objspace;
5600
5601 for (i=0; i<n; i++) {
5602 gc_mark_and_pin(objspace, values[i]);
5603 }
5604}
5605
5606static void
5607gc_mark_stack_values(rb_objspace_t *objspace, long n, const VALUE *values)
5608{
5609 long i;
5610
5611 for (i=0; i<n; i++) {
5612 if (is_markable_object(objspace, values[i])) {
5613 gc_mark_and_pin(objspace, values[i]);
5614 }
5615 }
5616}
5617
5618void
5619rb_gc_mark_vm_stack_values(long n, const VALUE *values)
5620{
5621 rb_objspace_t *objspace = &rb_objspace;
5622 gc_mark_stack_values(objspace, n, values);
5623}
5624
5625static int
5626mark_value(st_data_t key, st_data_t value, st_data_t data)
5627{
5628 rb_objspace_t *objspace = (rb_objspace_t *)data;
5629 gc_mark(objspace, (VALUE)value);
5630 return ST_CONTINUE;
5631}
5632
5633static int
5634mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
5635{
5636 rb_objspace_t *objspace = (rb_objspace_t *)data;
5637 gc_mark_and_pin(objspace, (VALUE)value);
5638 return ST_CONTINUE;
5639}
5640
5641static void
5642mark_tbl_no_pin(rb_objspace_t *objspace, st_table *tbl)
5643{
5644 if (!tbl || tbl->num_entries == 0) return;
5645 st_foreach(tbl, mark_value, (st_data_t)objspace);
5646}
5647
5648static void
5649mark_tbl(rb_objspace_t *objspace, st_table *tbl)
5650{
5651 if (!tbl || tbl->num_entries == 0) return;
5652 st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
5653}
5654
5655static int
5656mark_key(st_data_t key, st_data_t value, st_data_t data)
5657{
5658 rb_objspace_t *objspace = (rb_objspace_t *)data;
5659 gc_mark_and_pin(objspace, (VALUE)key);
5660 return ST_CONTINUE;
5661}
5662
5663static void
5664mark_set(rb_objspace_t *objspace, st_table *tbl)
5665{
5666 if (!tbl) return;
5667 st_foreach(tbl, mark_key, (st_data_t)objspace);
5668}
5669
5670static int
5671pin_value(st_data_t key, st_data_t value, st_data_t data)
5672{
5673 rb_objspace_t *objspace = (rb_objspace_t *)data;
5674 gc_mark_and_pin(objspace, (VALUE)value);
5675 return ST_CONTINUE;
5676}
5677
5678static void
5679mark_finalizer_tbl(rb_objspace_t *objspace, st_table *tbl)
5680{
5681 if (!tbl) return;
5682 st_foreach(tbl, pin_value, (st_data_t)objspace);
5683}
5684
5685void
5687{
5688 mark_set(&rb_objspace, tbl);
5689}
5690
5691static int
5692mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
5693{
5694 rb_objspace_t *objspace = (rb_objspace_t *)data;
5695
5696 gc_mark(objspace, (VALUE)key);
5697 gc_mark(objspace, (VALUE)value);
5698 return ST_CONTINUE;
5699}
5700
5701static int
5702pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
5703{
5704 rb_objspace_t *objspace = (rb_objspace_t *)data;
5705
5706 gc_mark_and_pin(objspace, (VALUE)key);
5707 gc_mark_and_pin(objspace, (VALUE)value);
5708 return ST_CONTINUE;
5709}
5710
5711static int
5712pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
5713{
5714 rb_objspace_t *objspace = (rb_objspace_t *)data;
5715
5716 gc_mark_and_pin(objspace, (VALUE)key);
5717 gc_mark(objspace, (VALUE)value);
5718 return ST_CONTINUE;
5719}
5720
5721static void
5722mark_hash(rb_objspace_t *objspace, VALUE hash)
5723{
5724 if (rb_hash_compare_by_id_p(hash)) {
5725 rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
5726 }
5727 else {
5728 rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
5729 }
5730
5731 if (RHASH_AR_TABLE_P(hash)) {
5732 if (LIKELY(during_gc) && RHASH_TRANSIENT_P(hash)) {
5733 rb_transient_heap_mark(hash, RHASH_AR_TABLE(hash));
5734 }
5735 }
5736 else {
5737 VM_ASSERT(!RHASH_TRANSIENT_P(hash));
5738 }
5739 gc_mark(objspace, RHASH(hash)->ifnone);
5740}
5741
5742static void
5743mark_st(rb_objspace_t *objspace, st_table *tbl)
5744{
5745 if (!tbl) return;
5746 st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
5747}
5748
5749void
5751{
5752 mark_st(&rb_objspace, tbl);
5753}
5754
5755static void
5756mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
5757{
5758 const rb_method_definition_t *def = me->def;
5759
5760 gc_mark(objspace, me->owner);
5761 gc_mark(objspace, me->defined_class);
5762
5763 if (def) {
5764 switch (def->type) {
5766 if (def->body.iseq.iseqptr) gc_mark(objspace, (VALUE)def->body.iseq.iseqptr);
5767 gc_mark(objspace, (VALUE)def->body.iseq.cref);
5768 break;
5771 gc_mark(objspace, def->body.attr.location);
5772 break;
5774 gc_mark(objspace, def->body.bmethod.proc);
5775 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
5776 break;
5778 gc_mark(objspace, (VALUE)def->body.alias.original_me);
5779 return;
5781 gc_mark(objspace, (VALUE)def->body.refined.orig_me);
5782 gc_mark(objspace, (VALUE)def->body.refined.owner);
5783 break;
5790 break;
5791 }
5792 }
5793}
5794
5796mark_method_entry_i(VALUE me, void *data)
5797{
5798 rb_objspace_t *objspace = (rb_objspace_t *)data;
5799
5800 gc_mark(objspace, me);
5801 return ID_TABLE_CONTINUE;
5802}
5803
5804static void
5805mark_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
5806{
5807 if (tbl) {
5808 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
5809 }
5810}
5811
5813mark_const_entry_i(VALUE value, void *data)
5814{
5815 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
5816 rb_objspace_t *objspace = data;
5817
5818 gc_mark(objspace, ce->value);
5819 gc_mark(objspace, ce->file);
5820 return ID_TABLE_CONTINUE;
5821}
5822
5823static void
5824mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
5825{
5826 if (!tbl) return;
5827 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
5828}
5829
5830#if STACK_GROW_DIRECTION < 0
5831#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
5832#elif STACK_GROW_DIRECTION > 0
5833#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
5834#else
5835#define GET_STACK_BOUNDS(start, end, appendix) \
5836 ((STACK_END < STACK_START) ? \
5837 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
5838#endif
5839
5840static void mark_stack_locations(rb_objspace_t *objspace, const rb_execution_context_t *ec,
5841 const VALUE *stack_start, const VALUE *stack_end);
5842
5843static void
5844mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
5845{
5846 union {
5847 rb_jmp_buf j;
5848 VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
5849 } save_regs_gc_mark;
5850 VALUE *stack_start, *stack_end;
5851
5853 memset(&save_regs_gc_mark, 0, sizeof(save_regs_gc_mark));
5854 /* This assumes that all registers are saved into the jmp_buf (and stack) */
5855 rb_setjmp(save_regs_gc_mark.j);
5856
5857 /* SET_STACK_END must be called in this function because
5858 * the stack frame of this function may contain
5859 * callee save registers and they should be marked. */
5861 GET_STACK_BOUNDS(stack_start, stack_end, 1);
5862
5863 mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
5864
5865 mark_stack_locations(objspace, ec, stack_start, stack_end);
5866}
5867
5868void
5870{
5871 rb_objspace_t *objspace = &rb_objspace;
5872 VALUE *stack_start, *stack_end;
5873
5874 GET_STACK_BOUNDS(stack_start, stack_end, 0);
5875 mark_stack_locations(objspace, ec, stack_start, stack_end);
5876}
5877
5878static void
5879mark_stack_locations(rb_objspace_t *objspace, const rb_execution_context_t *ec,
5880 const VALUE *stack_start, const VALUE *stack_end)
5881{
5882
5883 gc_mark_locations(objspace, stack_start, stack_end);
5884
5885#if defined(__mc68000__)
5886 gc_mark_locations(objspace,
5887 (VALUE*)((char*)stack_start + 2),
5888 (VALUE*)((char*)stack_end - 2));
5889#endif
5890}
5891
5892void
5894{
5895 mark_tbl(&rb_objspace, tbl);
5896}
5897
5898void
5900{
5901 mark_tbl_no_pin(&rb_objspace, tbl);
5902}
5903
5904static void
5905gc_mark_maybe(rb_objspace_t *objspace, VALUE obj)
5906{
5907 (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
5908
5909 if (is_pointer_to_heap(objspace, (void *)obj)) {
5910 void *ptr = __asan_region_is_poisoned((void *)obj, SIZEOF_VALUE);
5911 asan_unpoison_object(obj, false);
5912
5913 /* Garbage can live on the stack, so do not mark or pin */
5914 switch (BUILTIN_TYPE(obj)) {
5915 case T_ZOMBIE:
5916 case T_NONE:
5917 break;
5918 default:
5919 gc_mark_and_pin(objspace, obj);
5920 break;
5921 }
5922
5923 if (ptr) {
5924 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
5925 asan_poison_object(obj);
5926 }
5927 }
5928}
5929
5930void
5932{
5933 gc_mark_maybe(&rb_objspace, obj);
5934}
5935
5936static inline int
5937gc_mark_set(rb_objspace_t *objspace, VALUE obj)
5938{
5940 if (RVALUE_MARKED(obj)) return 0;
5942 return 1;
5943}
5944
5945static int
5946gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
5947{
5948 struct heap_page *page = GET_HEAP_PAGE(obj);
5950
5955
5956#if RGENGC_PROFILE > 0
5957 objspace->profile.total_remembered_shady_object_count++;
5958#if RGENGC_PROFILE >= 2
5959 objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
5960#endif
5961#endif
5962 return TRUE;
5963 }
5964 else {
5965 return FALSE;
5966 }
5967}
5968
5969static void
5970rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
5971{
5972 const VALUE old_parent = objspace->rgengc.parent_object;
5973
5974 if (old_parent) { /* parent object is old */
5975 if (RVALUE_WB_UNPROTECTED(obj)) {
5976 if (gc_remember_unprotected(objspace, obj)) {
5977 gc_report(2, objspace, "relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5978 }
5979 }
5980 else {
5981 if (!RVALUE_OLD_P(obj)) {
5982 if (RVALUE_MARKED(obj)) {
5983 /* An object pointed from an OLD object should be OLD. */
5984 gc_report(2, objspace, "relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5985 RVALUE_AGE_SET_OLD(objspace, obj);
5986 if (is_incremental_marking(objspace)) {
5987 if (!RVALUE_MARKING(obj)) {
5988 gc_grey(objspace, obj);
5989 }
5990 }
5991 else {
5992 rgengc_remember(objspace, obj);
5993 }
5994 }
5995 else {
5996 gc_report(2, objspace, "relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5997 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
5998 }
5999 }
6000 }
6001 }
6002
6003 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
6004}
6005
6006static void
6007gc_grey(rb_objspace_t *objspace, VALUE obj)
6008{
6009#if RGENGC_CHECK_MODE
6010 if (RVALUE_MARKED(obj) == FALSE) rb_bug("gc_grey: %s is not marked.", obj_info(obj));
6011 if (RVALUE_MARKING(obj) == TRUE) rb_bug("gc_grey: %s is marking/remembered.", obj_info(obj));
6012#endif
6013
6014#if GC_ENABLE_INCREMENTAL_MARK
6015 if (is_incremental_marking(objspace)) {
6017 }
6018#endif
6019
6020 push_mark_stack(&objspace->mark_stack, obj);
6021}
6022
6023static void
6024gc_aging(rb_objspace_t *objspace, VALUE obj)
6025{
6026 struct heap_page *page = GET_HEAP_PAGE(obj);
6027
6028 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
6029 check_rvalue_consistency(obj);
6030
6031 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
6032 if (!RVALUE_OLD_P(obj)) {
6033 gc_report(3, objspace, "gc_aging: YOUNG: %s\n", obj_info(obj));
6034 RVALUE_AGE_INC(objspace, obj);
6035 }
6036 else if (is_full_marking(objspace)) {
6038 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
6039 }
6040 }
6041 check_rvalue_consistency(obj);
6042
6043 objspace->marked_slots++;
6044}
6045
6046NOINLINE(static void gc_mark_ptr(rb_objspace_t *objspace, VALUE obj));
6047static void reachable_objects_from_callback(VALUE obj);
6048
6049static void
6050gc_mark_ptr(rb_objspace_t *objspace, VALUE obj)
6051{
6052 if (LIKELY(during_gc)) {
6053 rgengc_check_relation(objspace, obj);
6054 if (!gc_mark_set(objspace, obj)) return; /* already marked */
6055
6056 if (0) { // for debug GC marking miss
6057 if (objspace->rgengc.parent_object) {
6058 RUBY_DEBUG_LOG("%p (%s) parent:%p (%s)",
6059 (void *)obj, obj_type_name(obj),
6060 (void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
6061 }
6062 else {
6063 RUBY_DEBUG_LOG("%p (%s)", (void *)obj, obj_type_name(obj));
6064 }
6065 }
6066
6067 if (UNLIKELY(RB_TYPE_P(obj, T_NONE))) {
6068 rp(obj);
6069 rb_bug("try to mark T_NONE object"); /* check here will help debugging */
6070 }
6071 gc_aging(objspace, obj);
6072 gc_grey(objspace, obj);
6073 }
6074 else {
6075 reachable_objects_from_callback(obj);
6076 }
6077}
6078
6079static inline void
6080gc_pin(rb_objspace_t *objspace, VALUE obj)
6081{
6082 GC_ASSERT(is_markable_object(objspace, obj));
6083 if (UNLIKELY(objspace->flags.during_compacting)) {
6084 if (LIKELY(during_gc)) {
6086 }
6087 }
6088}
6089
6090static inline void
6091gc_mark_and_pin(rb_objspace_t *objspace, VALUE obj)
6092{
6093 if (!is_markable_object(objspace, obj)) return;
6094 gc_pin(objspace, obj);
6095 gc_mark_ptr(objspace, obj);
6096}
6097
6098static inline void
6099gc_mark(rb_objspace_t *objspace, VALUE obj)
6100{
6101 if (!is_markable_object(objspace, obj)) return;
6102 gc_mark_ptr(objspace, obj);
6103}
6104
6105void
6107{
6108 gc_mark(&rb_objspace, ptr);
6109}
6110
6111void
6113{
6114 gc_mark_and_pin(&rb_objspace, ptr);
6115}
6116
6117/* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
6118 * This function is only for GC_END_MARK timing.
6119 */
6120
6121int
6123{
6124 return RVALUE_MARKED(obj) ? TRUE : FALSE;
6125}
6126
6127static inline void
6128gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
6129{
6130 if (RVALUE_OLD_P(obj)) {
6131 objspace->rgengc.parent_object = obj;
6132 }
6133 else {
6134 objspace->rgengc.parent_object = Qfalse;
6135 }
6136}
6137
6138static void
6139gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
6140{
6141 switch (imemo_type(obj)) {
6142 case imemo_env:
6143 {
6144 const rb_env_t *env = (const rb_env_t *)obj;
6145
6146 if (LIKELY(env->ep)) {
6147 // just after newobj() can be NULL here.
6148 GC_ASSERT(env->ep[VM_ENV_DATA_INDEX_ENV] == obj);
6149 GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
6150 gc_mark_values(objspace, (long)env->env_size, env->env);
6151 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
6152 gc_mark(objspace, (VALUE)rb_vm_env_prev_env(env));
6153 gc_mark(objspace, (VALUE)env->iseq);
6154 }
6155 }
6156 return;
6157 case imemo_cref:
6158 gc_mark(objspace, RANY(obj)->as.imemo.cref.klass);
6159 gc_mark(objspace, (VALUE)RANY(obj)->as.imemo.cref.next);
6160 gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
6161 return;
6162 case imemo_svar:
6163 gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
6164 gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
6165 gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
6166 gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
6167 return;
6168 case imemo_throw_data:
6169 gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
6170 return;
6171 case imemo_ifunc:
6172 gc_mark_maybe(objspace, (VALUE)RANY(obj)->as.imemo.ifunc.data);
6173 return;
6174 case imemo_memo:
6175 gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
6176 gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
6177 gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
6178 return;
6179 case imemo_ment:
6180 mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
6181 return;
6182 case imemo_iseq:
6183 rb_iseq_mark((rb_iseq_t *)obj);
6184 return;
6185 case imemo_tmpbuf:
6186 {
6187 const rb_imemo_tmpbuf_t *m = &RANY(obj)->as.imemo.alloc;
6188 do {
6189 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
6190 } while ((m = m->next) != NULL);
6191 }
6192 return;
6193 case imemo_ast:
6194 rb_ast_mark(&RANY(obj)->as.imemo.ast);
6195 return;
6197 rb_strterm_mark(obj);
6198 return;
6199 case imemo_callinfo:
6200 return;
6201 case imemo_callcache:
6202 {
6203 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
6204 // should not mark klass here
6205 gc_mark(objspace, (VALUE)vm_cc_cme(cc));
6206 }
6207 return;
6208 case imemo_constcache:
6209 {
6211 gc_mark(objspace, ice->value);
6212 }
6213 return;
6214#if VM_CHECK_MODE > 0
6215 default:
6216 VM_UNREACHABLE(gc_mark_imemo);
6217#endif
6218 }
6219}
6220
6221static void
6222gc_mark_children(rb_objspace_t *objspace, VALUE obj)
6223{
6224 register RVALUE *any = RANY(obj);
6225 gc_mark_set_parent(objspace, obj);
6226
6227 if (FL_TEST(obj, FL_EXIVAR)) {
6229 }
6230
6231 switch (BUILTIN_TYPE(obj)) {
6232 case T_FLOAT:
6233 case T_BIGNUM:
6234 case T_SYMBOL:
6235 /* Not immediates, but does not have references and singleton
6236 * class */
6237 return;
6238
6239 case T_NIL:
6240 case T_FIXNUM:
6241 rb_bug("rb_gc_mark() called for broken object");
6242 break;
6243
6244 case T_NODE:
6246 break;
6247
6248 case T_IMEMO:
6249 gc_mark_imemo(objspace, obj);
6250 return;
6251
6252 default:
6253 break;
6254 }
6255
6256 gc_mark(objspace, any->as.basic.klass);
6257
6258 switch (BUILTIN_TYPE(obj)) {
6259 case T_CLASS:
6260 case T_MODULE:
6261 if (RCLASS_SUPER(obj)) {
6262 gc_mark(objspace, RCLASS_SUPER(obj));
6263 }
6264 if (!RCLASS_EXT(obj)) break;
6265
6266 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
6267 cc_table_mark(objspace, obj);
6268 mark_tbl_no_pin(objspace, RCLASS_IV_TBL(obj));
6269 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
6270 break;
6271
6272 case T_ICLASS:
6273 if (RICLASS_OWNS_M_TBL_P(obj)) {
6274 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
6275 }
6276 if (RCLASS_SUPER(obj)) {
6277 gc_mark(objspace, RCLASS_SUPER(obj));
6278 }
6279 if (!RCLASS_EXT(obj)) break;
6280 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
6281 cc_table_mark(objspace, obj);
6282 break;
6283
6284 case T_ARRAY:
6285 if (FL_TEST(obj, ELTS_SHARED)) {
6287 gc_mark(objspace, root);
6288 }
6289 else {
6290 long i, len = RARRAY_LEN(obj);
6291 const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(obj);
6292 for (i=0; i < len; i++) {
6293 gc_mark(objspace, ptr[i]);
6294 }
6295
6296 if (LIKELY(during_gc)) {
6297 if (!FL_TEST_RAW(obj, RARRAY_EMBED_FLAG) &&
6298 RARRAY_TRANSIENT_P(obj)) {
6300 }
6301 }
6302 }
6303 break;
6304
6305 case T_HASH:
6306 mark_hash(objspace, obj);
6307 break;
6308
6309 case T_STRING:
6310 if (STR_SHARED_P(obj)) {
6311 gc_mark(objspace, any->as.string.as.heap.aux.shared);
6312 }
6313 break;
6314
6315 case T_DATA:
6316 {
6317 void *const ptr = DATA_PTR(obj);
6318 if (ptr) {
6319 RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
6321 any->as.data.dmark;
6322 if (mark_func) (*mark_func)(ptr);
6323 }
6324 }
6325 break;
6326
6327 case T_OBJECT:
6328 {
6329 const VALUE * const ptr = ROBJECT_IVPTR(obj);
6330
6331 uint32_t i, len = ROBJECT_NUMIV(obj);
6332 for (i = 0; i < len; i++) {
6333 gc_mark(objspace, ptr[i]);
6334 }
6335
6336 if (LIKELY(during_gc) &&
6337 ROBJ_TRANSIENT_P(obj)) {
6339 }
6340 }
6341 break;
6342
6343 case T_FILE:
6344 if (any->as.file.fptr) {
6345 gc_mark(objspace, any->as.file.fptr->self);
6346 gc_mark(objspace, any->as.file.fptr->pathv);
6347 gc_mark(objspace, any->as.file.fptr->tied_io_for_writing);
6348 gc_mark(objspace, any->as.file.fptr->writeconv_asciicompat);
6349 gc_mark(objspace, any->as.file.fptr->writeconv_pre_ecopts);
6350 gc_mark(objspace, any->as.file.fptr->encs.ecopts);
6351 gc_mark(objspace, any->as.file.fptr->write_lock);
6352 }
6353 break;
6354
6355 case T_REGEXP:
6356 gc_mark(objspace, any->as.regexp.src);
6357 break;
6358
6359 case T_MATCH:
6360 gc_mark(objspace, any->as.match.regexp);
6361 if (any->as.match.str) {
6362 gc_mark(objspace, any->as.match.str);
6363 }
6364 break;
6365
6366 case T_RATIONAL:
6367 gc_mark(objspace, any->as.rational.num);
6368 gc_mark(objspace, any->as.rational.den);
6369 break;
6370
6371 case T_COMPLEX:
6372 gc_mark(objspace, any->as.complex.real);
6373 gc_mark(objspace, any->as.complex.imag);
6374 break;
6375
6376 case T_STRUCT:
6377 {
6378 long i;
6379 const long len = RSTRUCT_LEN(obj);
6380 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
6381
6382 for (i=0; i<len; i++) {
6383 gc_mark(objspace, ptr[i]);
6384 }
6385
6386 if (LIKELY(during_gc) &&
6387 RSTRUCT_TRANSIENT_P(obj)) {
6389 }
6390 }
6391 break;
6392
6393 default:
6394#if GC_DEBUG
6396#endif
6397 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
6398 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
6399 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
6400 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
6401 BUILTIN_TYPE(obj), (void *)any,
6402 is_pointer_to_heap(objspace, any) ? "corrupted object" : "non object");
6403 }
6404}
6405
6410static inline int
6411gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
6412{
6413 mark_stack_t *mstack = &objspace->mark_stack;
6414 VALUE obj;
6415#if GC_ENABLE_INCREMENTAL_MARK
6416 size_t marked_slots_at_the_beginning = objspace->marked_slots;
6417 size_t popped_count = 0;
6418#endif
6419
6420 while (pop_mark_stack(mstack, &obj)) {
6421 if (obj == Qundef) continue; /* skip */
6422
6423 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
6424 rb_bug("gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
6425 }
6426 gc_mark_children(objspace, obj);
6427
6428#if GC_ENABLE_INCREMENTAL_MARK
6429 if (incremental) {
6430 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
6431 rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
6432 }
6434 popped_count++;
6435
6436 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
6437 break;
6438 }
6439 }
6440 else {
6441 /* just ignore marking bits */
6442 }
6443#endif
6444 }
6445
6446 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
6447
6448 if (is_mark_stack_empty(mstack)) {
6449 shrink_stack_chunk_cache(mstack);
6450 return TRUE;
6451 }
6452 else {
6453 return FALSE;
6454 }
6455}
6456
6457static int
6458gc_mark_stacked_objects_incremental(rb_objspace_t *objspace, size_t count)
6459{
6460 return gc_mark_stacked_objects(objspace, TRUE, count);
6461}
6462
6463static int
6464gc_mark_stacked_objects_all(rb_objspace_t *objspace)
6465{
6466 return gc_mark_stacked_objects(objspace, FALSE, 0);
6467}
6468
6469#if PRINT_ROOT_TICKS
6470#define MAX_TICKS 0x100
6471static tick_t mark_ticks[MAX_TICKS];
6472static const char *mark_ticks_categories[MAX_TICKS];
6473
6474static void
6475show_mark_ticks(void)
6476{
6477 int i;
6478 fprintf(stderr, "mark ticks result:\n");
6479 for (i=0; i<MAX_TICKS; i++) {
6480 const char *category = mark_ticks_categories[i];
6481 if (category) {
6482 fprintf(stderr, "%s\t%8lu\n", category, (unsigned long)mark_ticks[i]);
6483 }
6484 else {
6485 break;
6486 }
6487 }
6488}
6489
6490#endif /* PRINT_ROOT_TICKS */
6491
6492static void
6493gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
6494{
6495 struct gc_list *list;
6496 rb_execution_context_t *ec = GET_EC();
6497 rb_vm_t *vm = rb_ec_vm_ptr(ec);
6498
6499#if PRINT_ROOT_TICKS
6500 tick_t start_tick = tick();
6501 int tick_count = 0;
6502 const char *prev_category = 0;
6503
6504 if (mark_ticks_categories[0] == 0) {
6505 atexit(show_mark_ticks);
6506 }
6507#endif
6508
6509 if (categoryp) *categoryp = "xxx";
6510
6511 objspace->rgengc.parent_object = Qfalse;
6512
6513#if PRINT_ROOT_TICKS
6514#define MARK_CHECKPOINT_PRINT_TICK(category) do { \
6515 if (prev_category) { \
6516 tick_t t = tick(); \
6517 mark_ticks[tick_count] = t - start_tick; \
6518 mark_ticks_categories[tick_count] = prev_category; \
6519 tick_count++; \
6520 } \
6521 prev_category = category; \
6522 start_tick = tick(); \
6523} while (0)
6524#else /* PRINT_ROOT_TICKS */
6525#define MARK_CHECKPOINT_PRINT_TICK(category)
6526#endif
6527
6528#define MARK_CHECKPOINT(category) do { \
6529 if (categoryp) *categoryp = category; \
6530 MARK_CHECKPOINT_PRINT_TICK(category); \
6531} while (0)
6532
6533 MARK_CHECKPOINT("vm");
6535 rb_vm_mark(vm);
6536 if (vm->self) gc_mark(objspace, vm->self);
6537
6538 MARK_CHECKPOINT("finalizers");
6539 mark_finalizer_tbl(objspace, finalizer_table);
6540
6541 MARK_CHECKPOINT("machine_context");
6542 mark_current_machine_context(objspace, ec);
6543
6544 /* mark protected global variables */
6545 MARK_CHECKPOINT("global_list");
6546 for (list = global_list; list; list = list->next) {
6547 gc_mark_maybe(objspace, *list->varptr);
6548 }
6549
6550 MARK_CHECKPOINT("end_proc");
6552
6553 MARK_CHECKPOINT("global_tbl");
6555
6556 MARK_CHECKPOINT("object_id");
6557 rb_gc_mark(objspace->next_object_id);
6558 mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl); /* Only mark ids */
6559
6561
6562 MARK_CHECKPOINT("finish");
6563#undef MARK_CHECKPOINT
6564}
6565
6566#if RGENGC_CHECK_MODE >= 4
6567
6568#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
6569#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
6570#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
6571
6572struct reflist {
6573 VALUE *list;
6574 int pos;
6575 int size;
6576};
6577
6578static struct reflist *
6579reflist_create(VALUE obj)
6580{
6581 struct reflist *refs = xmalloc(sizeof(struct reflist));
6582 refs->size = 1;
6583 refs->list = ALLOC_N(VALUE, refs->size);
6584 refs->list[0] = obj;
6585 refs->pos = 1;
6586 return refs;
6587}
6588
6589static void
6590reflist_destruct(struct reflist *refs)
6591{
6592 xfree(refs->list);
6593 xfree(refs);
6594}
6595
6596static void
6597reflist_add(struct reflist *refs, VALUE obj)
6598{
6599 if (refs->pos == refs->size) {
6600 refs->size *= 2;
6601 SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
6602 }
6603
6604 refs->list[refs->pos++] = obj;
6605}
6606
6607static void
6608reflist_dump(struct reflist *refs)
6609{
6610 int i;
6611 for (i=0; i<refs->pos; i++) {
6612 VALUE obj = refs->list[i];
6613 if (IS_ROOTSIG(obj)) { /* root */
6614 fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
6615 }
6616 else {
6617 fprintf(stderr, "<%s>", obj_info(obj));
6618 }
6619 if (i+1 < refs->pos) fprintf(stderr, ", ");
6620 }
6621}
6622
6623static int
6624reflist_referred_from_machine_context(struct reflist *refs)
6625{
6626 int i;
6627 for (i=0; i<refs->pos; i++) {
6628 VALUE obj = refs->list[i];
6629 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
6630 }
6631 return 0;
6632}
6633
6634struct allrefs {
6635 rb_objspace_t *objspace;
6636 /* a -> obj1
6637 * b -> obj1
6638 * c -> obj1
6639 * c -> obj2
6640 * d -> obj3
6641 * #=> {obj1 => [a, b, c], obj2 => [c, d]}
6642 */
6643 struct st_table *references;
6644 const char *category;
6645 VALUE root_obj;
6647};
6648
6649static int
6650allrefs_add(struct allrefs *data, VALUE obj)
6651{
6652 struct reflist *refs;
6653 st_data_t r;
6654
6655 if (st_lookup(data->references, obj, &r)) {
6656 refs = (struct reflist *)r;
6657 reflist_add(refs, data->root_obj);
6658 return 0;
6659 }
6660 else {
6661 refs = reflist_create(data->root_obj);
6662 st_insert(data->references, obj, (st_data_t)refs);
6663 return 1;
6664 }
6665}
6666
6667static void
6668allrefs_i(VALUE obj, void *ptr)
6669{
6670 struct allrefs *data = (struct allrefs *)ptr;
6671
6672 if (allrefs_add(data, obj)) {
6673 push_mark_stack(&data->mark_stack, obj);
6674 }
6675}
6676
6677static void
6678allrefs_roots_i(VALUE obj, void *ptr)
6679{
6680 struct allrefs *data = (struct allrefs *)ptr;
6681 if (strlen(data->category) == 0) rb_bug("!!!");
6682 data->root_obj = MAKE_ROOTSIG(data->category);
6683
6684 if (allrefs_add(data, obj)) {
6685 push_mark_stack(&data->mark_stack, obj);
6686 }
6687}
6688
6689static st_table *
6690objspace_allrefs(rb_objspace_t *objspace)
6691{
6692 struct allrefs data;
6693 struct mark_func_data_struct mfd;
6694 VALUE obj;
6695 int prev_dont_gc = dont_gc_val();
6696 dont_gc_on();
6697
6698 data.objspace = objspace;
6699 data.references = st_init_numtable();
6700 init_mark_stack(&data.mark_stack);
6701
6702 mfd.mark_func = allrefs_roots_i;
6703 mfd.data = &data;
6704
6705 /* traverse root objects */
6706 PUSH_MARK_FUNC_DATA(&mfd);
6707 objspace->mark_func_data = &mfd;
6708 gc_mark_roots(objspace, &data.category);
6709 POP_MARK_FUNC_DATA();
6710
6711 /* traverse rest objects reachable from root objects */
6712 while (pop_mark_stack(&data.mark_stack, &obj)) {
6713 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
6714 }
6715 free_stack_chunks(&data.mark_stack);
6716
6717 dont_gc_set(prev_dont_gc);
6718 return data.references;
6719}
6720
6721static int
6722objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
6723{
6724 struct reflist *refs = (struct reflist *)value;
6725 reflist_destruct(refs);
6726 return ST_CONTINUE;
6727}
6728
6729static void
6730objspace_allrefs_destruct(struct st_table *refs)
6731{
6732 st_foreach(refs, objspace_allrefs_destruct_i, 0);
6733 st_free_table(refs);
6734}
6735
6736#if RGENGC_CHECK_MODE >= 5
6737static int
6738allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
6739{
6740 VALUE obj = (VALUE)k;
6741 struct reflist *refs = (struct reflist *)v;
6742 fprintf(stderr, "[allrefs_dump_i] %s <- ", obj_info(obj));
6743 reflist_dump(refs);
6744 fprintf(stderr, "\n");
6745 return ST_CONTINUE;
6746}
6747
6748static void
6749allrefs_dump(rb_objspace_t *objspace)
6750{
6751 VALUE size = objspace->rgengc.allrefs_table->num_entries;
6752 fprintf(stderr, "[all refs] (size: %"PRIuVALUE")\n", size);
6753 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
6754}
6755#endif
6756
6757static int
6758gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
6759{
6760 VALUE obj = k;
6761 struct reflist *refs = (struct reflist *)v;
6762 rb_objspace_t *objspace = (rb_objspace_t *)ptr;
6763
6764 /* object should be marked or oldgen */
6765 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
6766 fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
6767 fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
6768 reflist_dump(refs);
6769
6770 if (reflist_referred_from_machine_context(refs)) {
6771 fprintf(stderr, " (marked from machine stack).\n");
6772 /* marked from machine context can be false positive */
6773 }
6774 else {
6775 objspace->rgengc.error_count++;
6776 fprintf(stderr, "\n");
6777 }
6778 }
6779 return ST_CONTINUE;
6780}
6781
6782static void
6783gc_marks_check(rb_objspace_t *objspace, st_foreach_callback_func *checker_func, const char *checker_name)
6784{
6785 size_t saved_malloc_increase = objspace->malloc_params.increase;
6786#if RGENGC_ESTIMATE_OLDMALLOC
6787 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
6788#endif
6789 VALUE already_disabled = rb_objspace_gc_disable(objspace);
6790
6791 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
6792
6793 if (checker_func) {
6794 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
6795 }
6796
6797 if (objspace->rgengc.error_count > 0) {
6798#if RGENGC_CHECK_MODE >= 5
6799 allrefs_dump(objspace);
6800#endif
6801 if (checker_name) rb_bug("%s: GC has problem.", checker_name);
6802 }
6803
6804 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
6805 objspace->rgengc.allrefs_table = 0;
6806
6807 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
6808 objspace->malloc_params.increase = saved_malloc_increase;
6809#if RGENGC_ESTIMATE_OLDMALLOC
6810 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
6811#endif
6812}
6813#endif /* RGENGC_CHECK_MODE >= 4 */
6814
6820
6824};
6825
6826static void
6827check_generation_i(const VALUE child, void *ptr)
6828{
6830 const VALUE parent = data->parent;
6831
6832 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
6833
6834 if (!RVALUE_OLD_P(child)) {
6835 if (!RVALUE_REMEMBERED(parent) &&
6836 !RVALUE_REMEMBERED(child) &&
6837 !RVALUE_UNCOLLECTIBLE(child)) {
6838 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
6839 data->err_count++;
6840 }
6841 }
6842}
6843
6844static void
6845check_color_i(const VALUE child, void *ptr)
6846{
6848 const VALUE parent = data->parent;
6849
6850 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
6851 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
6852 obj_info(parent), obj_info(child));
6853 data->err_count++;
6854 }
6855}
6856
6857static void
6858check_children_i(const VALUE child, void *ptr)
6859{
6861 if (check_rvalue_consistency_force(child, FALSE) != 0) {
6862 fprintf(stderr, "check_children_i: %s has error (referenced from %s)",
6863 obj_info(child), obj_info(data->parent));
6864 rb_print_backtrace(); /* C backtrace will help to debug */
6865
6866 data->err_count++;
6867 }
6868}
6869
6870static int
6871verify_internal_consistency_i(void *page_start, void *page_end, size_t stride, void *ptr)
6872{
6874 VALUE obj;
6876
6877 for (obj = (VALUE)page_start; obj != (VALUE)page_end; obj += stride) {
6878 void *poisoned = asan_poisoned_object_p(obj);
6879 asan_unpoison_object(obj, false);
6880
6881 if (is_live_object(objspace, obj)) {
6882 /* count objects */
6883 data->live_object_count++;
6884 data->parent = obj;
6885
6886 /* Normally, we don't expect T_MOVED objects to be in the heap.
6887 * But they can stay alive on the stack, */
6888 if (!gc_object_moved_p(objspace, obj)) {
6889 /* moved slots don't have children */
6890 rb_objspace_reachable_objects_from(obj, check_children_i, (void *)data);
6891 }
6892
6893 /* check health of children */
6894 if (RVALUE_OLD_P(obj)) data->old_object_count++;
6895 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
6896
6897 if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
6898 /* reachable objects from an oldgen object should be old or (young with remember) */
6899 data->parent = obj;
6900 rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
6901 }
6902
6904 if (RVALUE_BLACK_P(obj)) {
6905 /* reachable objects from black objects should be black or grey objects */
6906 data->parent = obj;
6907 rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data);
6908 }
6909 }
6910 }
6911 else {
6912 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
6913 GC_ASSERT((RBASIC(obj)->flags & ~FL_SEEN_OBJ_ID) == T_ZOMBIE);
6914 data->zombie_object_count++;
6915 }
6916 }
6917 if (poisoned) {
6918 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
6919 asan_poison_object(obj);
6920 }
6921 }
6922
6923 return 0;
6924}
6925
6926static int
6927gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
6928{
6929 int i;
6930 unsigned int has_remembered_shady = FALSE;
6931 unsigned int has_remembered_old = FALSE;
6932 int remembered_old_objects = 0;
6933 int free_objects = 0;
6934 int zombie_objects = 0;
6935
6936 for (i=0; i<page->total_slots; i++) {
6937 VALUE val = (VALUE)&page->start[i];
6938 void *poisoned = asan_poisoned_object_p(val);
6939 asan_unpoison_object(val, false);
6940
6941 if (RBASIC(val) == 0) free_objects++;
6942 if (BUILTIN_TYPE(val) == T_ZOMBIE) zombie_objects++;
6943 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
6944 has_remembered_shady = TRUE;
6945 }
6946 if (RVALUE_PAGE_MARKING(page, val)) {
6947 has_remembered_old = TRUE;
6948 remembered_old_objects++;
6949 }
6950
6951 if (poisoned) {
6952 GC_ASSERT(BUILTIN_TYPE(val) == T_NONE);
6953 asan_poison_object(val);
6954 }
6955 }
6956
6958 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
6959
6960 for (i=0; i<page->total_slots; i++) {
6961 VALUE val = (VALUE)&page->start[i];
6962 if (RVALUE_PAGE_MARKING(page, val)) {
6963 fprintf(stderr, "marking -> %s\n", obj_info(val));
6964 }
6965 }
6966 rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
6967 (void *)page, remembered_old_objects, obj ? obj_info(obj) : "");
6968 }
6969
6970 if (page->flags.has_uncollectible_shady_objects == FALSE && has_remembered_shady == TRUE) {
6971 rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
6972 (void *)page, obj ? obj_info(obj) : "");
6973 }
6974
6975 if (0) {
6976 /* free_slots may not equal to free_objects */
6977 if (page->free_slots != free_objects) {
6978 rb_bug("page %p's free_slots should be %d, but %d\n", (void *)page, page->free_slots, free_objects);
6979 }
6980 }
6981 if (page->final_slots != zombie_objects) {
6982 rb_bug("page %p's final_slots should be %d, but %d\n", (void *)page, page->final_slots, zombie_objects);
6983 }
6984
6985 return remembered_old_objects;
6986}
6987
6988static int
6989gc_verify_heap_pages_(rb_objspace_t *objspace, struct list_head *head)
6990{
6991 int remembered_old_objects = 0;
6992 struct heap_page *page = 0;
6993
6994 list_for_each(head, page, page_node) {
6995 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
6996 RVALUE *p = page->freelist;
6997 while (p) {
6998 VALUE vp = (VALUE)p;
6999 VALUE prev = vp;
7000 asan_unpoison_object(vp, false);
7001 if (BUILTIN_TYPE(vp) != T_NONE) {
7002 fprintf(stderr, "freelist slot expected to be T_NONE but was: %s\n", obj_info(vp));
7003 }
7004 p = p->as.free.next;
7005 asan_poison_object(prev);
7006 }
7007 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
7008
7009 if (page->flags.has_remembered_objects == FALSE) {
7010 remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
7011 }
7012 }
7013
7014 return remembered_old_objects;
7015}
7016
7017static int
7018gc_verify_heap_pages(rb_objspace_t *objspace)
7019{
7020 int remembered_old_objects = 0;
7021 remembered_old_objects += gc_verify_heap_pages_(objspace, &heap_eden->pages);
7022 remembered_old_objects += gc_verify_heap_pages_(objspace, &heap_tomb->pages);
7023 return remembered_old_objects;
7024}
7025
7026/*
7027 * call-seq:
7028 * GC.verify_internal_consistency -> nil
7029 *
7030 * Verify internal consistency.
7031 *
7032 * This method is implementation specific.
7033 * Now this method checks generational consistency
7034 * if RGenGC is supported.
7035 */
7036static VALUE
7037gc_verify_internal_consistency_m(VALUE dummy)
7038{
7039 gc_verify_internal_consistency(&rb_objspace);
7040 return Qnil;
7041}
7042
7043static void
7044gc_verify_internal_consistency_(rb_objspace_t *objspace)
7045{
7046 struct verify_internal_consistency_struct data = {0};
7047
7048 data.objspace = objspace;
7049 gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
7050
7051 /* check relations */
7052
7053 objspace_each_objects_without_setup(objspace, verify_internal_consistency_i, &data);
7054
7055 if (data.err_count != 0) {
7056#if RGENGC_CHECK_MODE >= 5
7057 objspace->rgengc.error_count = data.err_count;
7058 gc_marks_check(objspace, NULL, NULL);
7059 allrefs_dump(objspace);
7060#endif
7061 rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
7062 }
7063
7064 /* check heap_page status */
7065 gc_verify_heap_pages(objspace);
7066
7067 /* check counters */
7068
7070 !finalizing &&
7072 if (objspace_live_slots(objspace) != data.live_object_count) {
7073 fprintf(stderr, "heap_pages_final_slots: %"PRIdSIZE", "
7074 "objspace->profile.total_freed_objects: %"PRIdSIZE"\n",
7076 rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
7077 objspace_live_slots(objspace), data.live_object_count);
7078 }
7079 }
7080
7081 if (!is_marking(objspace)) {
7083 rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
7085 }
7087 rb_bug("inconsistent number of wb unprotected objects: expect %"PRIuSIZE", but %"PRIuSIZE".",
7089 }
7090 }
7091
7092 if (!finalizing) {
7093 size_t list_count = 0;
7094
7095 {
7097 while (z) {
7098 list_count++;
7099 z = RZOMBIE(z)->next;
7100 }
7101 }
7102
7104 heap_pages_final_slots != list_count) {
7105
7106 rb_bug("inconsistent finalizing object count:\n"
7107 " expect %"PRIuSIZE"\n"
7108 " but %"PRIuSIZE" zombies\n"
7109 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
7112 list_count);
7113 }
7114 }
7115
7116 gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
7117}
7118
7119static void
7120gc_verify_internal_consistency(rb_objspace_t *objspace)
7121{
7123 {
7124 rb_vm_barrier(); // stop other ractors
7125
7126 unsigned int prev_during_gc = during_gc;
7127 during_gc = FALSE; // stop gc here
7128 {
7129 gc_verify_internal_consistency_(objspace);
7130 }
7131 during_gc = prev_during_gc;
7132 }
7134}
7135
7136void
7138{
7139 gc_verify_internal_consistency(&rb_objspace);
7140}
7141
7142static VALUE
7143gc_verify_transient_heap_internal_consistency(VALUE dmy)
7144{
7146 return Qnil;
7147}
7148
7149/* marks */
7150
7151static void
7152gc_marks_start(rb_objspace_t *objspace, int full_mark)
7153{
7154 /* start marking */
7155 gc_report(1, objspace, "gc_marks_start: (%s)\n", full_mark ? "full" : "minor");
7156 gc_mode_transition(objspace, gc_mode_marking);
7157
7158 if (full_mark) {
7159#if GC_ENABLE_INCREMENTAL_MARK
7160 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / ((objspace->rincgc.pooled_slots / HEAP_PAGE_OBJ_LIMIT) + 1);
7161
7162 if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE", "
7163 "objspace->rincgc.pooled_page_num: %"PRIdSIZE", "
7164 "objspace->rincgc.step_slots: %"PRIdSIZE", \n",
7165 objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
7166#endif
7170 }
7176 rgengc_mark_and_rememberset_clear(objspace, heap_eden);
7177 }
7178 else {
7181 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
7183 rgengc_rememberset_mark(objspace, heap_eden);
7184 }
7185
7186 gc_mark_roots(objspace, NULL);
7187
7188 gc_report(1, objspace, "gc_marks_start: (%s) end, stack in %"PRIdSIZE"\n",
7189 full_mark ? "full" : "minor", mark_stack_size(&objspace->mark_stack));
7190}
7191
7192#if GC_ENABLE_INCREMENTAL_MARK
7193static void
7194gc_marks_wb_unprotected_objects(rb_objspace_t *objspace)
7195{
7196 struct heap_page *page = 0;
7197
7198 list_for_each(&heap_eden->pages, page, page_node) {
7199 bits_t *mark_bits = page->mark_bits;
7200 bits_t *wbun_bits = page->wb_unprotected_bits;
7201 RVALUE *p = page->start;
7202 RVALUE *offset = p - NUM_IN_PAGE(p);
7203 size_t j;
7204
7205 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
7206 bits_t bits = mark_bits[j] & wbun_bits[j];
7207
7208 if (bits) {
7209 p = offset + j * BITS_BITLENGTH;
7210
7211 do {
7212 if (bits & 1) {
7213 gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE)p));
7214 GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE)p));
7215 GC_ASSERT(RVALUE_MARKED((VALUE)p));
7216 gc_mark_children(objspace, (VALUE)p);
7217 }
7218 p++;
7219 bits >>= 1;
7220 } while (bits);
7221 }
7222 }
7223 }
7224
7225 gc_mark_stacked_objects_all(objspace);
7226}
7227
7228static struct heap_page *
7229heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
7230{
7231 struct heap_page *page = heap->pooled_pages;
7232
7233 if (page) {
7234 heap->pooled_pages = page->free_next;
7235 heap_add_freepage(heap, page);
7236 }
7237
7238 return page;
7239}
7240#endif
7241
7242static int
7243gc_marks_finish(rb_objspace_t *objspace)
7244{
7245#if GC_ENABLE_INCREMENTAL_MARK
7246 /* finish incremental GC */
7247 if (is_incremental_marking(objspace)) {
7248 if (heap_eden->pooled_pages) {
7249 heap_move_pooled_pages_to_free_pages(heap_eden);
7250 gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n");
7251 return FALSE; /* continue marking phase */
7252 }
7253
7254 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
7255 rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE").",
7256 mark_stack_size(&objspace->mark_stack));
7257 }
7258
7259 gc_mark_roots(objspace, 0);
7260
7261 if (is_mark_stack_empty(&objspace->mark_stack) == FALSE) {
7262 gc_report(1, objspace, "gc_marks_finish: not empty (%"PRIdSIZE"). retry.\n",
7263 mark_stack_size(&objspace->mark_stack));
7264 return FALSE;
7265 }
7266
7267#if RGENGC_CHECK_MODE >= 2
7268 if (gc_verify_heap_pages(objspace) != 0) {
7269 rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
7270 }
7271#endif
7272
7273 objspace->flags.during_incremental_marking = FALSE;
7274 /* check children of all marked wb-unprotected objects */
7275 gc_marks_wb_unprotected_objects(objspace);
7276 }
7277#endif /* GC_ENABLE_INCREMENTAL_MARK */
7278
7279#if RGENGC_CHECK_MODE >= 2
7280 gc_verify_internal_consistency(objspace);
7281#endif
7282
7283 if (is_full_marking(objspace)) {
7284 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
7285 const double r = gc_params.oldobject_limit_factor;
7287 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
7288 }
7289
7290#if RGENGC_CHECK_MODE >= 4
7291 gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
7292#endif
7293
7294 {
7295 /* decide full GC is needed or not */
7296 rb_heap_t *heap = heap_eden;
7298 size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
7299 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
7300 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
7301 int full_marking = is_full_marking(objspace);
7302 const int r_cnt = GET_VM()->ractor.cnt;
7303 const int r_mul = r_cnt > 8 ? 8 : r_cnt; // upto 8
7304
7305 GC_ASSERT(heap->total_slots >= objspace->marked_slots);
7306
7307 /* setup free-able page counts */
7308 if (max_free_slots < gc_params.heap_init_slots * r_mul) {
7309 max_free_slots = gc_params.heap_init_slots * r_mul;
7310 }
7311
7312 if (sweep_slots > max_free_slots) {
7313 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
7314 }
7315 else {
7317 }
7318
7319 /* check free_min */
7320 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
7321 min_free_slots = gc_params.heap_free_slots * r_mul;
7322 }
7323
7324 if (sweep_slots < min_free_slots) {
7325 if (!full_marking) {
7326 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
7327 full_marking = TRUE;
7328 /* do not update last_major_gc, because full marking is not done. */
7329 /* goto increment; */
7330 }
7331 else {
7332 gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
7334 }
7335 }
7336 if (full_marking) {
7337 /* increment: */
7338 gc_report(1, objspace, "gc_marks_finish: heap_set_increment!!\n");
7339 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slots, total_slots));
7340 heap_increment(objspace, heap);
7341 }
7342 }
7343
7344 if (full_marking) {
7345 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
7346 const double r = gc_params.oldobject_limit_factor;
7348 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
7349 }
7350
7353 }
7354 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
7356 }
7359 }
7360
7361 gc_report(1, objspace, "gc_marks_finish (marks %"PRIdSIZE" objects, "
7362 "old %"PRIdSIZE" objects, total %"PRIdSIZE" slots, "
7363 "sweep %"PRIdSIZE" slots, increment: %"PRIdSIZE", next GC: %s)\n",
7364 objspace->marked_slots, objspace->rgengc.old_objects, heap->total_slots, sweep_slots, heap_allocatable_pages,
7365 objspace->rgengc.need_major_gc ? "major" : "minor");
7366 }
7367
7370
7372
7373 return TRUE;
7374}
7375
7376static void
7377gc_marks_step(rb_objspace_t *objspace, size_t slots)
7378{
7379#if GC_ENABLE_INCREMENTAL_MARK
7380 GC_ASSERT(is_marking(objspace));
7381
7382 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
7383 if (gc_marks_finish(objspace)) {
7384 /* finish */
7385 gc_sweep(objspace);
7386 }
7387 }
7388 if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE"\n", objspace->marked_slots);
7389#endif
7390}
7391
7392static void
7393gc_marks_rest(rb_objspace_t *objspace)
7394{
7395 gc_report(1, objspace, "gc_marks_rest\n");
7396
7397#if GC_ENABLE_INCREMENTAL_MARK
7398 heap_eden->pooled_pages = NULL;
7399#endif
7400
7401 if (is_incremental_marking(objspace)) {
7402 do {
7403 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
7404 } while (gc_marks_finish(objspace) == FALSE);
7405 }
7406 else {
7407 gc_mark_stacked_objects_all(objspace);
7408 gc_marks_finish(objspace);
7409 }
7410
7411 /* move to sweep */
7412 gc_sweep(objspace);
7413}
7414
7415static void
7416gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
7417{
7419#if GC_ENABLE_INCREMENTAL_MARK
7420
7421 unsigned int lock_lev;
7422 gc_enter(objspace, gc_enter_event_mark_continue, &lock_lev);
7423
7424 int slots = 0;
7425 const char *from;
7426
7427 if (heap->pooled_pages) {
7428 while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) {
7429 struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
7430 slots += page->free_slots;
7431 }
7432 from = "pooled-pages";
7433 }
7434 else if (heap_increment(objspace, heap)) {
7435 slots = heap->free_pages->free_slots;
7436 from = "incremented-pages";
7437 }
7438
7439 if (slots > 0) {
7440 gc_report(2, objspace, "gc_marks_continue: provide %d slots from %s.\n",
7441 slots, from);
7442 gc_marks_step(objspace, objspace->rincgc.step_slots);
7443 }
7444 else {
7445 gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE").\n",
7446 mark_stack_size(&objspace->mark_stack));
7447 gc_marks_rest(objspace);
7448 }
7449
7450 gc_exit(objspace, gc_enter_event_mark_continue, &lock_lev);
7451#endif
7452}
7453
7454static void
7455gc_marks(rb_objspace_t *objspace, int full_mark)
7456{
7457 gc_prof_mark_timer_start(objspace);
7458
7459 /* setup marking */
7460
7461 gc_marks_start(objspace, full_mark);
7462 if (!is_incremental_marking(objspace)) {
7463 gc_marks_rest(objspace);
7464 }
7465
7466#if RGENGC_PROFILE > 0
7467 if (gc_prof_record(objspace)) {
7468 gc_profile_record *record = gc_prof_record(objspace);
7469 record->old_objects = objspace->rgengc.old_objects;
7470 }
7471#endif
7472 gc_prof_mark_timer_stop(objspace);
7473}
7474
7475/* RGENGC */
7476
7477static void
7478gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
7479{
7480 if (level <= RGENGC_DEBUG) {
7481 char buf[1024];
7482 FILE *out = stderr;
7483 va_list args;
7484 const char *status = " ";
7485
7486 if (during_gc) {
7487 status = is_full_marking(objspace) ? "+" : "-";
7488 }
7489 else {
7491 status = "S";
7492 }
7493 if (is_incremental_marking(objspace)) {
7494 status = "M";
7495 }
7496 }
7497
7498 va_start(args, fmt);
7499 vsnprintf(buf, 1024, fmt, args);
7500 va_end(args);
7501
7502 fprintf(out, "%s|", status);
7503 fputs(buf, out);
7504 }
7505}
7506
7507/* bit operations */
7508
7509static int
7510rgengc_remembersetbits_get(rb_objspace_t *objspace, VALUE obj)
7511{
7512 return RVALUE_REMEMBERED(obj);
7513}
7514
7515static int
7516rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
7517{
7518 struct heap_page *page = GET_HEAP_PAGE(obj);
7519 bits_t *bits = &page->marking_bits[0];
7520
7522
7523 if (MARKED_IN_BITMAP(bits, obj)) {
7524 return FALSE;
7525 }
7526 else {
7528 MARK_IN_BITMAP(bits, obj);
7529 return TRUE;
7530 }
7531}
7532
7533/* wb, etc */
7534
7535/* return FALSE if already remembered */
7536static int
7537rgengc_remember(rb_objspace_t *objspace, VALUE obj)
7538{
7539 gc_report(6, objspace, "rgengc_remember: %s %s\n", obj_info(obj),
7540 rgengc_remembersetbits_get(objspace, obj) ? "was already remembered" : "is remembered now");
7541
7542 check_rvalue_consistency(obj);
7543
7544 if (RGENGC_CHECK_MODE) {
7545 if (RVALUE_WB_UNPROTECTED(obj)) rb_bug("rgengc_remember: %s is not wb protected.", obj_info(obj));
7546 }
7547
7548#if RGENGC_PROFILE > 0
7549 if (!rgengc_remembered(objspace, obj)) {
7550 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
7551 objspace->profile.total_remembered_normal_object_count++;
7552#if RGENGC_PROFILE >= 2
7553 objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
7554#endif
7555 }
7556 }
7557#endif /* RGENGC_PROFILE > 0 */
7558
7559 return rgengc_remembersetbits_set(objspace, obj);
7560}
7561
7562static int
7563rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj)
7564{
7565 int result = rgengc_remembersetbits_get(objspace, obj);
7566 check_rvalue_consistency(obj);
7567 return result;
7568}
7569
7570static int
7571rgengc_remembered(rb_objspace_t *objspace, VALUE obj)
7572{
7573 gc_report(6, objspace, "rgengc_remembered: %s\n", obj_info(obj));
7574 return rgengc_remembered_sweep(objspace, obj);
7575}
7576
7577#ifndef PROFILE_REMEMBERSET_MARK
7578#define PROFILE_REMEMBERSET_MARK 0
7579#endif
7580
7581static void
7582rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
7583{
7584 size_t j;
7585 struct heap_page *page = 0;
7586#if PROFILE_REMEMBERSET_MARK
7587 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
7588#endif
7589 gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
7590
7591 list_for_each(&heap->pages, page, page_node) {
7593 RVALUE *p = page->start;
7594 RVALUE *offset = p - NUM_IN_PAGE(p);
7599#if PROFILE_REMEMBERSET_MARK
7601 else if (page->flags.has_remembered_objects) has_old++;
7602 else if (page->flags.has_uncollectible_shady_objects) has_shady++;
7603#endif
7604 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
7606 marking_bits[j] = 0;
7607 }
7609
7610 for (j=0; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
7611 bitset = bits[j];
7612
7613 if (bitset) {
7614 p = offset + j * BITS_BITLENGTH;
7615
7616 do {
7617 if (bitset & 1) {
7618 VALUE obj = (VALUE)p;
7619 gc_report(2, objspace, "rgengc_rememberset_mark: mark %s\n", obj_info(obj));
7620 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
7621 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
7622
7623 gc_mark_children(objspace, obj);
7624 }
7625 p++;
7626 bitset >>= 1;
7627 } while (bitset);
7628 }
7629 }
7630 }
7631#if PROFILE_REMEMBERSET_MARK
7632 else {
7633 skip++;
7634 }
7635#endif
7636 }
7637
7638#if PROFILE_REMEMBERSET_MARK
7639 fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
7640#endif
7641 gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
7642}
7643
7644static void
7645rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
7646{
7647 struct heap_page *page = 0;
7648
7649 list_for_each(&heap->pages, page, page_node) {
7650 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
7651 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
7652 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
7653 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
7656 }
7657}
7658
7659/* RGENGC: APIs */
7660
7661NOINLINE(static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace));
7662
7663static void
7664gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
7665{
7666 if (RGENGC_CHECK_MODE) {
7667 if (!RVALUE_OLD_P(a)) rb_bug("gc_writebarrier_generational: %s is not an old object.", obj_info(a));
7668 if ( RVALUE_OLD_P(b)) rb_bug("gc_writebarrier_generational: %s is an old object.", obj_info(b));
7669 if (is_incremental_marking(objspace)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
7670 }
7671
7672#if 1
7673 /* mark `a' and remember (default behavior) */
7674 if (!rgengc_remembered(objspace, a)) {
7676 {
7677 rgengc_remember(objspace, a);
7678 }
7680 gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
7681 }
7682#else
7683 /* mark `b' and remember */
7685 if (RVALUE_WB_UNPROTECTED(b)) {
7686 gc_remember_unprotected(objspace, b);
7687 }
7688 else {
7689 RVALUE_AGE_SET_OLD(objspace, b);
7690 rgengc_remember(objspace, b);
7691 }
7692
7693 gc_report(1, objspace, "gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
7694#endif
7695
7696 check_rvalue_consistency(a);
7697 check_rvalue_consistency(b);
7698}
7699
7700#if GC_ENABLE_INCREMENTAL_MARK
7701static void
7702gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
7703{
7704 gc_mark_set_parent(objspace, parent);
7705 rgengc_check_relation(objspace, obj);
7706 if (gc_mark_set(objspace, obj) == FALSE) return;
7707 gc_aging(objspace, obj);
7708 gc_grey(objspace, obj);
7709}
7710
7711NOINLINE(static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace));
7712
7713static void
7715{
7716 gc_report(2, objspace, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a, obj_info(b));
7717
7718 if (RVALUE_BLACK_P(a)) {
7719 if (RVALUE_WHITE_P(b)) {
7720 if (!RVALUE_WB_UNPROTECTED(a)) {
7721 gc_report(2, objspace, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a, obj_info(b));
7722 gc_mark_from(objspace, b, a);
7723 }
7724 }
7725 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
7726 if (!RVALUE_WB_UNPROTECTED(b)) {
7727 gc_report(1, objspace, "gc_writebarrier_incremental: [GN] %p -> %s\n", (void *)a, obj_info(b));
7728 RVALUE_AGE_SET_OLD(objspace, b);
7729
7730 if (RVALUE_BLACK_P(b)) {
7731 gc_grey(objspace, b);
7732 }
7733 }
7734 else {
7735 gc_report(1, objspace, "gc_writebarrier_incremental: [LL] %p -> %s\n", (void *)a, obj_info(b));
7736 gc_remember_unprotected(objspace, b);
7737 }
7738 }
7739
7740 if (UNLIKELY(objspace->flags.during_compacting)) {
7742 }
7743 }
7744}
7745#else
7746#define gc_writebarrier_incremental(a, b, objspace)
7747#endif
7748
7749void
7751{
7752 rb_objspace_t *objspace = &rb_objspace;
7753
7754 if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const");
7755 if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const");
7756
7757 if (!is_incremental_marking(objspace)) {
7758 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
7759 // do nothing
7760 }
7761 else {
7762 gc_writebarrier_generational(a, b, objspace);
7763 }
7764 }
7765 else {
7766 /* slow path */
7768 {
7769 gc_writebarrier_incremental(a, b, objspace);
7770 }
7772 }
7773 return;
7774}
7775
7776void
7778{
7779 if (RVALUE_WB_UNPROTECTED(obj)) {
7780 return;
7781 }
7782 else {
7783 rb_objspace_t *objspace = &rb_objspace;
7784
7785 gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
7786 rgengc_remembered(objspace, obj) ? " (already remembered)" : "");
7787
7788 if (RVALUE_OLD_P(obj)) {
7789 gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
7790 RVALUE_DEMOTE(objspace, obj);
7791 gc_mark_set(objspace, obj);
7792 gc_remember_unprotected(objspace, obj);
7793
7794#if RGENGC_PROFILE
7795 objspace->profile.total_shade_operation_count++;
7796#if RGENGC_PROFILE >= 2
7797 objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
7798#endif /* RGENGC_PROFILE >= 2 */
7799#endif /* RGENGC_PROFILE */
7800 }
7801 else {
7802 RVALUE_AGE_RESET(obj);
7803 }
7804
7805 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
7807 }
7808}
7809
7810/*
7811 * remember `obj' if needed.
7812 */
7815{
7816 rb_objspace_t *objspace = &rb_objspace;
7817
7818 gc_report(1, objspace, "rb_gc_writebarrier_remember: %s\n", obj_info(obj));
7819
7820 if (is_incremental_marking(objspace)) {
7821 if (RVALUE_BLACK_P(obj)) {
7822 gc_grey(objspace, obj);
7823 }
7824 }
7825 else {
7826 if (RVALUE_OLD_P(obj)) {
7827 rgengc_remember(objspace, obj);
7828 }
7829 }
7830}
7831
7832static st_table *rgengc_unprotect_logging_table;
7833
7834static int
7835rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val, st_data_t arg)
7836{
7837 fprintf(stderr, "%s\t%"PRIuVALUE"\n", (char *)key, (VALUE)val);
7838 return ST_CONTINUE;
7839}
7840
7841static void
7842rgengc_unprotect_logging_exit_func(void)
7843{
7844 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
7845}
7846
7847void
7848rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
7849{
7850 VALUE obj = (VALUE)objptr;
7851
7852 if (rgengc_unprotect_logging_table == 0) {
7853 rgengc_unprotect_logging_table = st_init_strtable();
7854 atexit(rgengc_unprotect_logging_exit_func);
7855 }
7856
7857 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
7858 char buff[0x100];
7859 st_data_t cnt = 1;
7860 char *ptr = buff;
7861
7862 snprintf(ptr, 0x100 - 1, "%s|%s:%d", obj_info(obj), filename, line);
7863
7864 if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
7865 cnt++;
7866 }
7867 else {
7868 ptr = (strdup)(buff);
7869 if (!ptr) rb_memerror();
7870 }
7871 st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
7872 }
7873}
7874
7875void
7877{
7878 rb_objspace_t *objspace = &rb_objspace;
7879
7880 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
7881 if (!RVALUE_OLD_P(dest)) {
7883 RVALUE_AGE_RESET_RAW(dest);
7884 }
7885 else {
7886 RVALUE_DEMOTE(objspace, dest);
7887 }
7888 }
7889
7890 check_rvalue_consistency(dest);
7891}
7892
7893/* RGENGC analysis information */
7894
7895VALUE
7897{
7898 return RVALUE_WB_UNPROTECTED(obj) ? Qfalse : Qtrue;
7899}
7900
7901VALUE
7903{
7904 return OBJ_PROMOTED(obj) ? Qtrue : Qfalse;
7905}
7906
7907size_t
7909{
7910 size_t n = 0;
7911 static ID ID_marked;
7912 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
7913
7914 if (!ID_marked) {
7915#define I(s) ID_##s = rb_intern(#s);
7916 I(marked);
7917 I(wb_protected);
7918 I(old);
7919 I(marking);
7920 I(uncollectible);
7921 I(pinned);
7922#undef I
7923 }
7924
7925 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
7926 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
7927 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
7928 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
7929 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
7930 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
7931 return n;
7932}
7933
7934/* GC */
7935
7936void
7938{
7939 struct heap_page *page = newobj_cache->using_page;
7940 RVALUE *freelist = newobj_cache->freelist;
7941 RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", page, freelist);
7942
7943 if (page && freelist) {
7944 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
7945 if (page->freelist) {
7946 RVALUE *p = page->freelist;
7947 asan_unpoison_object((VALUE)p, false);
7948 while (p->as.free.next) {
7949 RVALUE *prev = p;
7950 p = p->as.free.next;
7951 asan_poison_object((VALUE)prev);
7952 asan_unpoison_object((VALUE)p, false);
7953 }
7954 p->as.free.next = freelist;
7955 asan_poison_object((VALUE)p);
7956 }
7957 else {
7958 page->freelist = freelist;
7959 }
7960 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
7961 }
7962
7963 newobj_cache->using_page = NULL;
7964 newobj_cache->freelist = NULL;
7965}
7966
7967void
7969{
7970 rb_objspace_t *objspace = &rb_objspace;
7972 {
7973 int is_old = RVALUE_OLD_P(obj);
7974
7975 gc_report(2, objspace, "rb_gc_force_recycle: %s\n", obj_info(obj));
7976
7977 if (is_old) {
7978 if (RVALUE_MARKED(obj)) {
7979 objspace->rgengc.old_objects--;
7980 }
7981 }
7985
7986#if GC_ENABLE_INCREMENTAL_MARK
7987 if (is_incremental_marking(objspace)) {
7988 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj)) {
7989 invalidate_mark_stack(&objspace->mark_stack, obj);
7991 }
7993 }
7994 else {
7995#endif
7996 if (is_old || GET_HEAP_PAGE(obj)->flags.before_sweep) {
7998 }
8000#if GC_ENABLE_INCREMENTAL_MARK
8001 }
8002#endif
8003
8004 objspace->profile.total_freed_objects++;
8005
8006 heap_page_add_freeobj(objspace, GET_HEAP_PAGE(obj), obj);
8007
8008 /* Disable counting swept_slots because there are no meaning.
8009 * if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(p), p)) {
8010 * objspace->heap.swept_slots++;
8011 * }
8012 */
8013 }
8015}
8016
8017#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
8018#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
8019#endif
8020
8021void
8023{
8025 {
8026 VALUE ary_ary = GET_VM()->mark_object_ary;
8027 VALUE ary = rb_ary_last(0, 0, ary_ary);
8028
8029 if (ary == Qnil || RARRAY_LEN(ary) >= MARK_OBJECT_ARY_BUCKET_SIZE) {
8031 rb_ary_push(ary_ary, ary);
8032 }
8033
8034 rb_ary_push(ary, obj);
8035 }
8037}
8038
8039void
8041{
8042 rb_objspace_t *objspace = &rb_objspace;
8043 struct gc_list *tmp;
8044
8045 tmp = ALLOC(struct gc_list);
8046 tmp->next = global_list;
8047 tmp->varptr = addr;
8048 global_list = tmp;
8049}
8050
8051void
8053{
8054 rb_objspace_t *objspace = &rb_objspace;
8055 struct gc_list *tmp = global_list;
8056
8057 if (tmp->varptr == addr) {
8058 global_list = tmp->next;
8059 xfree(tmp);
8060 return;
8061 }
8062 while (tmp->next) {
8063 if (tmp->next->varptr == addr) {
8064 struct gc_list *t = tmp->next;
8065
8066 tmp->next = tmp->next->next;
8067 xfree(t);
8068 break;
8069 }
8070 tmp = tmp->next;
8071 }
8072}
8073
8074void
8076{
8078}
8079
8080#define GC_NOTIFY 0
8081
8082enum {
8088
8089#define gc_stress_full_mark_after_malloc_p() \
8090 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
8091
8092static void
8093heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
8094{
8095 if (!heap->free_pages) {
8096 if (!heap_increment(objspace, heap)) {
8097 heap_set_increment(objspace, 1);
8098 heap_increment(objspace, heap);
8099 }
8100 }
8101}
8102
8103static int
8104ready_to_gc(rb_objspace_t *objspace)
8105{
8107 heap_ready_to_gc(objspace, heap_eden);
8108 return FALSE;
8109 }
8110 else {
8111 return TRUE;
8112 }
8113}
8114
8115static void
8116gc_reset_malloc_info(rb_objspace_t *objspace)
8117{
8118 gc_prof_set_malloc_info(objspace);
8119 {
8120 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
8121 size_t old_limit = malloc_limit;
8122
8123 if (inc > malloc_limit) {
8124 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
8125 if (malloc_limit > gc_params.malloc_limit_max) {
8126 malloc_limit = gc_params.malloc_limit_max;
8127 }
8128 }
8129 else {
8130 malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
8131 if (malloc_limit < gc_params.malloc_limit_min) {
8132 malloc_limit = gc_params.malloc_limit_min;
8133 }
8134 }
8135
8136 if (0) {
8137 if (old_limit != malloc_limit) {
8138 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
8139 rb_gc_count(), old_limit, malloc_limit);
8140 }
8141 else {
8142 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
8144 }
8145 }
8146 }
8147
8148 /* reset oldmalloc info */
8149#if RGENGC_ESTIMATE_OLDMALLOC
8150 if (!is_full_marking(objspace)) {
8151 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
8152 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
8153 objspace->rgengc.oldmalloc_increase_limit =
8154 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
8155
8156 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
8157 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
8158 }
8159 }
8160
8161 if (0) fprintf(stderr, "%"PRIdSIZE"\t%d\t%"PRIuSIZE"\t%"PRIuSIZE"\t%"PRIdSIZE"\n",
8162 rb_gc_count(),
8163 objspace->rgengc.need_major_gc,
8164 objspace->rgengc.oldmalloc_increase,
8165 objspace->rgengc.oldmalloc_increase_limit,
8166 gc_params.oldmalloc_limit_max);
8167 }
8168 else {
8169 /* major GC */
8170 objspace->rgengc.oldmalloc_increase = 0;
8171
8172 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
8173 objspace->rgengc.oldmalloc_increase_limit =
8174 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
8175 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
8176 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
8177 }
8178 }
8179 }
8180#endif
8181}
8182
8183static int
8184garbage_collect(rb_objspace_t *objspace, int reason)
8185{
8186 int ret;
8187
8189 {
8190#if GC_PROFILE_MORE_DETAIL
8191 objspace->profile.prepare_time = getrusage_time();
8192#endif
8193
8194 gc_rest(objspace);
8195
8196#if GC_PROFILE_MORE_DETAIL
8197 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
8198#endif
8199
8200 ret = gc_start(objspace, reason);
8201 }
8203
8204 return ret;
8205}
8206
8207static int
8208gc_start(rb_objspace_t *objspace, int reason)
8209{
8210 unsigned int do_full_mark = !!((unsigned)reason & GPR_FLAG_FULL_MARK);
8211 unsigned int immediate_mark = (unsigned)reason & GPR_FLAG_IMMEDIATE_MARK;
8212
8213 /* reason may be clobbered, later, so keep set immediate_sweep here */
8214 objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP);
8215
8216 /* Explicitly enable compaction (GC.compact) */
8217 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
8218
8219 if (!heap_allocated_pages) return FALSE; /* heap is not ready */
8220 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
8221
8222 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
8225
8226 unsigned int lock_lev;
8227 gc_enter(objspace, gc_enter_event_start, &lock_lev);
8228
8229#if RGENGC_CHECK_MODE >= 2
8230 gc_verify_internal_consistency(objspace);
8231#endif
8232
8233 if (ruby_gc_stressful) {
8235
8236 if ((flag & (1<<gc_stress_no_major)) == 0) {
8237 do_full_mark = TRUE;
8238 }
8239
8240 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
8241 }
8242 else {
8243 if (objspace->rgengc.need_major_gc) {
8244 reason |= objspace->rgengc.need_major_gc;
8245 do_full_mark = TRUE;
8246 }
8247 else if (RGENGC_FORCE_MAJOR_GC) {
8248 reason = GPR_FLAG_MAJOR_BY_FORCE;
8249 do_full_mark = TRUE;
8250 }
8251
8253 }
8254
8255 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
8256 reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
8257 }
8258
8259#if GC_ENABLE_INCREMENTAL_MARK
8260 if (!GC_ENABLE_INCREMENTAL_MARK || objspace->flags.dont_incremental || immediate_mark) {
8261 objspace->flags.during_incremental_marking = FALSE;
8262 }
8263 else {
8264 objspace->flags.during_incremental_marking = do_full_mark;
8265 }
8266#endif
8267
8268 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
8269 objspace->flags.immediate_sweep = TRUE;
8270 }
8271
8272 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
8273
8274 gc_report(1, objspace, "gc_start(reason: %d) => %u, %d, %d\n",
8275 reason,
8276 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
8277
8278#if USE_DEBUG_COUNTER
8279 RB_DEBUG_COUNTER_INC(gc_count);
8280
8281 if (reason & GPR_FLAG_MAJOR_MASK) {
8282 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
8283 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
8284 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
8285 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
8286#if RGENGC_ESTIMATE_OLDMALLOC
8287 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
8288#endif
8289 }
8290 else {
8291 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
8292 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
8293 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
8294 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
8295 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
8296 }
8297#endif
8298
8299 objspace->profile.count++;
8300 objspace->profile.latest_gc_info = reason;
8303 gc_prof_setup_new_record(objspace, reason);
8304 gc_reset_malloc_info(objspace);
8305 rb_transient_heap_start_marking(do_full_mark);
8306
8307 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
8309
8310 gc_prof_timer_start(objspace);
8311 {
8312 gc_marks(objspace, do_full_mark);
8313 }
8314 gc_prof_timer_stop(objspace);
8315
8316 gc_exit(objspace, gc_enter_event_start, &lock_lev);
8317 return TRUE;
8318}
8319
8320static void
8321gc_rest(rb_objspace_t *objspace)
8322{
8323 int marking = is_incremental_marking(objspace);
8324 int sweeping = is_lazy_sweeping(heap_eden);
8325
8326 if (marking || sweeping) {
8327 unsigned int lock_lev;
8328 gc_enter(objspace, gc_enter_event_rest, &lock_lev);
8329
8330 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
8331
8332 if (is_incremental_marking(objspace)) {
8333 gc_marks_rest(objspace);
8334 }
8336 gc_sweep_rest(objspace);
8337 }
8338 gc_exit(objspace, gc_enter_event_rest, &lock_lev);
8339 }
8340}
8341
8345};
8346
8347static void
8348gc_current_status_fill(rb_objspace_t *objspace, char *buff)
8349{
8350 int i = 0;
8351 if (is_marking(objspace)) {
8352 buff[i++] = 'M';
8353 if (is_full_marking(objspace)) buff[i++] = 'F';
8354#if GC_ENABLE_INCREMENTAL_MARK
8355 if (is_incremental_marking(objspace)) buff[i++] = 'I';
8356#endif
8357 }
8358 else if (is_sweeping(objspace)) {
8359 buff[i++] = 'S';
8360 if (is_lazy_sweeping(heap_eden)) buff[i++] = 'L';
8361 }
8362 else {
8363 buff[i++] = 'N';
8364 }
8365 buff[i] = '\0';
8366}
8367
8368static const char *
8369gc_current_status(rb_objspace_t *objspace)
8370{
8371 static char buff[0x10];
8372 gc_current_status_fill(objspace, buff);
8373 return buff;
8374}
8375
8376#if PRINT_ENTER_EXIT_TICK
8377
8378static tick_t last_exit_tick;
8379static tick_t enter_tick;
8380static int enter_count = 0;
8381static char last_gc_status[0x10];
8382
8383static inline void
8384gc_record(rb_objspace_t *objspace, int direction, const char *event)
8385{
8386 if (direction == 0) { /* enter */
8387 enter_count++;
8388 enter_tick = tick();
8389 gc_current_status_fill(objspace, last_gc_status);
8390 }
8391 else { /* exit */
8392 tick_t exit_tick = tick();
8393 char current_gc_status[0x10];
8394 gc_current_status_fill(objspace, current_gc_status);
8395#if 1
8396 /* [last mutator time] [gc time] [event] */
8397 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
8398 enter_tick - last_exit_tick,
8399 exit_tick - enter_tick,
8400 event,
8401 last_gc_status, current_gc_status,
8402 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
8403 last_exit_tick = exit_tick;
8404#else
8405 /* [enter_tick] [gc time] [event] */
8406 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
8407 enter_tick,
8408 exit_tick - enter_tick,
8409 event,
8410 last_gc_status, current_gc_status,
8411 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
8412#endif
8413 }
8414}
8415#else /* PRINT_ENTER_EXIT_TICK */
8416static inline void
8417gc_record(rb_objspace_t *objspace, int direction, const char *event)
8418{
8419 /* null */
8420}
8421#endif /* PRINT_ENTER_EXIT_TICK */
8422
8423static const char *
8424gc_enter_event_cstr(enum gc_enter_event event)
8425{
8426 switch (event) {
8427 case gc_enter_event_start: return "start";
8428 case gc_enter_event_mark_continue: return "mark_continue";
8429 case gc_enter_event_sweep_continue: return "sweep_continue";
8430 case gc_enter_event_rest: return "rest";
8431 case gc_enter_event_finalizer: return "finalizer";
8432 case gc_enter_event_rb_memerror: return "rb_memerror";
8433 }
8434 return NULL;
8435}
8436
8437static void
8438gc_enter_count(enum gc_enter_event event)
8439{
8440 switch (event) {
8441 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start); break;
8442 case gc_enter_event_mark_continue: RB_DEBUG_COUNTER_INC(gc_enter_mark_continue); break;
8443 case gc_enter_event_sweep_continue: RB_DEBUG_COUNTER_INC(gc_enter_sweep_continue); break;
8444 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest); break;
8445 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer); break;
8446 case gc_enter_event_rb_memerror: /* nothing */ break;
8447 }
8448}
8449
8450static inline void
8451gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
8452{
8453 RB_VM_LOCK_ENTER_LEV(lock_lev);
8454
8455 switch (event) {
8457 if (!is_marking(objspace)) break;
8458 // fall through
8461 // stop other ractors
8462 rb_vm_barrier();
8463 break;
8464 default:
8465 break;
8466 }
8467
8468 gc_enter_count(event);
8469 if (UNLIKELY(during_gc != 0)) rb_bug("during_gc != 0");
8470 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
8471
8472 mjit_gc_start_hook();
8473
8474 during_gc = TRUE;
8475 RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
8476 gc_report(1, objspace, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
8477 gc_record(objspace, 0, gc_enter_event_cstr(event));
8478 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_ENTER, 0); /* TODO: which parameter should be passed? */
8479}
8480
8481static inline void
8482gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
8483{
8484 GC_ASSERT(during_gc != 0);
8485
8486 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_EXIT, 0); /* TODO: which parameter should be passsed? */
8487 gc_record(objspace, 1, gc_enter_event_cstr(event));
8488 RUBY_DEBUG_LOG("%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
8489 gc_report(1, objspace, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
8490 during_gc = FALSE;
8491
8492 mjit_gc_exit_hook();
8493 RB_VM_LOCK_LEAVE_LEV(lock_lev);
8494}
8495
8496static void *
8497gc_with_gvl(void *ptr)
8498{
8499 struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
8500 return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
8501}
8502
8503static int
8504garbage_collect_with_gvl(rb_objspace_t *objspace, int reason)
8505{
8506 if (dont_gc_val()) return TRUE;
8507 if (ruby_thread_has_gvl_p()) {
8508 return garbage_collect(objspace, reason);
8509 }
8510 else {
8511 if (ruby_native_thread_p()) {
8512 struct objspace_and_reason oar;
8513 oar.objspace = objspace;
8514 oar.reason = reason;
8515 return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
8516 }
8517 else {
8518 /* no ruby thread */
8519 fprintf(stderr, "[FATAL] failed to allocate memory\n");
8520 exit(EXIT_FAILURE);
8521 }
8522 }
8523}
8524
8525static VALUE
8526gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
8527{
8533
8534 /* For now, compact implies full mark / sweep, so ignore other flags */
8535 if (RTEST(compact)) {
8537 } else {
8538 if (!RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
8539 if (!RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
8540 if (!RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
8541 }
8542
8543 garbage_collect(objspace, reason);
8544 gc_finalize_deferred(objspace);
8545
8546 return Qnil;
8547}
8548
8549static int
8550gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
8551{
8553
8554 switch (BUILTIN_TYPE(obj)) {
8555 case T_NONE:
8556 case T_NIL:
8557 case T_MOVED:
8558 case T_ZOMBIE:
8559 return FALSE;
8560 case T_SYMBOL:
8561 if (DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->id & ~ID_SCOPE_MASK)) {
8562 return FALSE;
8563 }
8564 /* fall through */
8565 case T_STRING:
8566 case T_OBJECT:
8567 case T_FLOAT:
8568 case T_IMEMO:
8569 case T_ARRAY:
8570 case T_BIGNUM:
8571 case T_ICLASS:
8572 case T_MODULE:
8573 case T_REGEXP:
8574 case T_DATA:
8575 case T_MATCH:
8576 case T_STRUCT:
8577 case T_HASH:
8578 case T_FILE:
8579 case T_COMPLEX:
8580 case T_RATIONAL:
8581 case T_NODE:
8582 case T_CLASS:
8583 if (FL_TEST(obj, FL_FINALIZE)) {
8584 /* The finalizer table is a numtable. It looks up objects by address.
8585 * We can't mark the keys in the finalizer table because that would
8586 * prevent the objects from being collected. This check prevents
8587 * objects that are keys in the finalizer table from being moved
8588 * without directly pinning them. */
8589 if (st_is_member(finalizer_table, obj)) {
8590 return FALSE;
8591 }
8592 }
8593 GC_ASSERT(RVALUE_MARKED(obj));
8594 GC_ASSERT(!RVALUE_PINNED(obj));
8595
8596 return TRUE;
8597
8598 default:
8599 rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj));
8600 break;
8601 }
8602
8603 return FALSE;
8604}
8605
8606static VALUE
8607gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free)
8608{
8609 int marked;
8610 int wb_unprotected;
8611 int uncollectible;
8612 int marking;
8613 RVALUE *dest = (RVALUE *)free;
8614 RVALUE *src = (RVALUE *)scan;
8615
8616 gc_report(4, objspace, "Moving object: %p -> %p\n", (void*)scan, (void *)free);
8617
8618 GC_ASSERT(BUILTIN_TYPE(scan) != T_NONE);
8620
8621 /* Save off bits for current object. */
8622 marked = rb_objspace_marked_object_p((VALUE)src);
8623 wb_unprotected = RVALUE_WB_UNPROTECTED((VALUE)src);
8624 uncollectible = RVALUE_UNCOLLECTIBLE((VALUE)src);
8625 marking = RVALUE_MARKING((VALUE)src);
8626
8627 /* Clear bits for eventual T_MOVED */
8632
8633 if (FL_TEST((VALUE)src, FL_EXIVAR)) {
8634 /* Same deal as below. Generic ivars are held in st tables.
8635 * Resizing the table could cause a GC to happen and we can't allow it */
8636 VALUE already_disabled = rb_gc_disable_no_rest();
8637 rb_mv_generic_ivar((VALUE)src, (VALUE)dest);
8638 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
8639 }
8640
8641 st_data_t srcid = (st_data_t)src, id;
8642
8643 /* If the source object's object_id has been seen, we need to update
8644 * the object to object id mapping. */
8645 if (st_lookup(objspace->obj_to_id_tbl, srcid, &id)) {
8646 gc_report(4, objspace, "Moving object with seen id: %p -> %p\n", (void *)src, (void *)dest);
8647 /* inserting in the st table can cause the GC to run. We need to
8648 * prevent re-entry in to the GC since `gc_move` is running in the GC,
8649 * so temporarily disable the GC around the st table mutation */
8650 VALUE already_disabled = rb_gc_disable_no_rest();
8651 st_delete(objspace->obj_to_id_tbl, &srcid, 0);
8653 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
8654 }
8655
8656 /* Move the object */
8657 memcpy(dest, src, sizeof(RVALUE));
8658 memset(src, 0, sizeof(RVALUE));
8659
8660 /* Set bits for object in new location */
8661 if (marking) {
8663 }
8664 else {
8666 }
8667
8668 if (marked) {
8670 }
8671 else {
8673 }
8674
8675 if (wb_unprotected) {
8677 }
8678 else {
8680 }
8681
8682 if (uncollectible) {
8684 }
8685 else {
8687 }
8688
8689 /* Assign forwarding address */
8690 src->as.moved.flags = T_MOVED;
8691 src->as.moved.dummy = Qundef;
8692 src->as.moved.destination = (VALUE)dest;
8694
8695 return (VALUE)src;
8696}
8697
8698static int
8699compare_free_slots(const void *left, const void *right, void *dummy)
8700{
8701 struct heap_page *left_page;
8702 struct heap_page *right_page;
8703
8704 left_page = *(struct heap_page * const *)left;
8705 right_page = *(struct heap_page * const *)right;
8706
8707 return left_page->free_slots - right_page->free_slots;
8708}
8709
8710static void
8711gc_sort_heap_by_empty_slots(rb_objspace_t *objspace)
8712{
8713 size_t total_pages = heap_eden->total_pages;
8714 size_t size = size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
8715 struct heap_page *page = 0, **page_list = malloc(size);
8716 size_t i = 0;
8717
8718 list_for_each(&heap_eden->pages, page, page_node) {
8719 page_list[i++] = page;
8720 assert(page != NULL);
8721 }
8722 assert(total_pages > 0);
8723 assert((size_t)i == total_pages);
8724
8725 /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
8726 * head of the list, so empty pages will end up at the start of the heap */
8727 ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_free_slots, NULL);
8728
8729 /* Reset the eden heap */
8730 list_head_init(&objspace->eden_heap.pages);
8731
8732 for (i = 0; i < total_pages; i++) {
8733 list_add(&heap_eden->pages, &page_list[i]->page_node);
8734 if (page_list[i]->free_slots != 0) {
8735 heap_add_freepage(heap_eden, page_list[i]);
8736 }
8737 }
8738
8739 free(page_list);
8740}
8741
8742static void
8743gc_ref_update_array(rb_objspace_t * objspace, VALUE v)
8744{
8745 long i, len;
8746
8747 if (FL_TEST(v, ELTS_SHARED))
8748 return;
8749
8750 len = RARRAY_LEN(v);
8751 if (len > 0) {
8753 for (i = 0; i < len; i++) {
8754 UPDATE_IF_MOVED(objspace, ptr[i]);
8755 }
8756 }
8757}
8758
8759static void
8760gc_ref_update_object(rb_objspace_t * objspace, VALUE v)
8761{
8762 VALUE *ptr = ROBJECT_IVPTR(v);
8763
8764 uint32_t i, len = ROBJECT_NUMIV(v);
8765 for (i = 0; i < len; i++) {
8766 UPDATE_IF_MOVED(objspace, ptr[i]);
8767 }
8768}
8769
8770static int
8771hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
8772{
8773 rb_objspace_t *objspace = (rb_objspace_t *)argp;
8774
8775 if (gc_object_moved_p(objspace, (VALUE)*key)) {
8777 }
8778
8779 if (gc_object_moved_p(objspace, (VALUE)*value)) {
8780 *value = rb_gc_location((VALUE)*value);
8781 }
8782
8783 return ST_CONTINUE;
8784}
8785
8786static int
8787hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp, int error)
8788{
8789 rb_objspace_t *objspace;
8790
8791 objspace = (rb_objspace_t *)argp;
8792
8793 if (gc_object_moved_p(objspace, (VALUE)key)) {
8794 return ST_REPLACE;
8795 }
8796
8797 if (gc_object_moved_p(objspace, (VALUE)value)) {
8798 return ST_REPLACE;
8799 }
8800 return ST_CONTINUE;
8801}
8802
8803static int
8804hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
8805{
8806 rb_objspace_t *objspace = (rb_objspace_t *)argp;
8807
8808 if (gc_object_moved_p(objspace, (VALUE)*value)) {
8809 *value = rb_gc_location((VALUE)*value);
8810 }
8811
8812 return ST_CONTINUE;
8813}
8814
8815static int
8816hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp, int error)
8817{
8818 rb_objspace_t *objspace;
8819
8820 objspace = (rb_objspace_t *)argp;
8821
8822 if (gc_object_moved_p(objspace, (VALUE)value)) {
8823 return ST_REPLACE;
8824 }
8825 return ST_CONTINUE;
8826}
8827
8828static void
8829gc_update_tbl_refs(rb_objspace_t * objspace, st_table *tbl)
8830{
8831 if (!tbl || tbl->num_entries == 0) return;
8832
8833 if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
8834 rb_raise(rb_eRuntimeError, "hash modified during iteration");
8835 }
8836}
8837
8838static void
8839gc_update_table_refs(rb_objspace_t * objspace, st_table *tbl)
8840{
8841 if (!tbl || tbl->num_entries == 0) return;
8842
8843 if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
8844 rb_raise(rb_eRuntimeError, "hash modified during iteration");
8845 }
8846}
8847
8848/* Update MOVED references in an st_table */
8849void
8851{
8852 rb_objspace_t *objspace = &rb_objspace;
8853 gc_update_table_refs(objspace, ptr);
8854}
8855
8856static void
8857gc_ref_update_hash(rb_objspace_t * objspace, VALUE v)
8858{
8859 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
8860}
8861
8862static void
8863gc_ref_update_method_entry(rb_objspace_t *objspace, rb_method_entry_t *me)
8864{
8866
8867 UPDATE_IF_MOVED(objspace, me->owner);
8868 UPDATE_IF_MOVED(objspace, me->defined_class);
8869
8870 if (def) {
8871 switch (def->type) {
8873 if (def->body.iseq.iseqptr) {
8874 TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, def->body.iseq.iseqptr);
8875 }
8876 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, def->body.iseq.cref);
8877 break;
8880 UPDATE_IF_MOVED(objspace, def->body.attr.location);
8881 break;
8883 UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
8884 break;
8886 TYPED_UPDATE_IF_MOVED(objspace, struct rb_method_entry_struct *, def->body.alias.original_me);
8887 return;
8889 TYPED_UPDATE_IF_MOVED(objspace, struct rb_method_entry_struct *, def->body.refined.orig_me);
8890 UPDATE_IF_MOVED(objspace, def->body.refined.owner);
8891 break;
8898 break;
8899 }
8900 }
8901}
8902
8903static void
8904gc_update_values(rb_objspace_t *objspace, long n, VALUE *values)
8905{
8906 long i;
8907
8908 for (i=0; i<n; i++) {
8909 UPDATE_IF_MOVED(objspace, values[i]);
8910 }
8911}
8912
8913static void
8914gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
8915{
8916 switch (imemo_type(obj)) {
8917 case imemo_env:
8918 {
8919 rb_env_t *env = (rb_env_t *)obj;
8920 TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, env->iseq);
8922 gc_update_values(objspace, (long)env->env_size, (VALUE *)env->env);
8923 }
8924 break;
8925 case imemo_cref:
8926 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass);
8927 TYPED_UPDATE_IF_MOVED(objspace, struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
8928 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
8929 break;
8930 case imemo_svar:
8931 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
8932 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
8933 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
8934 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
8935 break;
8936 case imemo_throw_data:
8937 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
8938 break;
8939 case imemo_ifunc:
8940 break;
8941 case imemo_memo:
8942 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
8943 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
8944 break;
8945 case imemo_ment:
8946 gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
8947 break;
8948 case imemo_iseq:
8950 break;
8951 case imemo_ast:
8953 break;
8954 case imemo_callcache:
8955 {
8956 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
8957 if (cc->klass) {
8958 UPDATE_IF_MOVED(objspace, cc->klass);
8959 if (!is_live_object(objspace, cc->klass)) {
8960 *((VALUE *)(&cc->klass)) = (VALUE)0;
8961 }
8962 }
8963
8964 if (cc->cme_) {
8966 if (!is_live_object(objspace, (VALUE)cc->cme_)) {
8967 *((struct rb_callable_method_entry_struct **)(&cc->cme_)) = (struct rb_callable_method_entry_struct *)0;
8968 }
8969 }
8970 }
8971 break;
8972 case imemo_constcache:
8973 {
8975 UPDATE_IF_MOVED(objspace, ice->value);
8976 }
8977 break;
8979 case imemo_tmpbuf:
8980 case imemo_callinfo:
8981 break;
8982 default:
8983 rb_bug("not reachable %d", imemo_type(obj));
8984 break;
8985 }
8986}
8987
8989check_id_table_move(ID id, VALUE value, void *data)
8990{
8991 rb_objspace_t *objspace = (rb_objspace_t *)data;
8992
8993 if (gc_object_moved_p(objspace, (VALUE)value)) {
8994 return ID_TABLE_REPLACE;
8995 }
8996
8997 return ID_TABLE_CONTINUE;
8998}
8999
9000/* Returns the new location of an object, if it moved. Otherwise returns
9001 * the existing location. */
9002VALUE
9004{
9005
9006 VALUE destination;
9007
9008 if (!SPECIAL_CONST_P(value)) {
9009 void *poisoned = asan_poisoned_object_p(value);
9010 asan_unpoison_object(value, false);
9011
9012 if (BUILTIN_TYPE(value) == T_MOVED) {
9013 destination = (VALUE)RMOVED(value)->destination;
9014 GC_ASSERT(BUILTIN_TYPE(destination) != T_NONE);
9015 }
9016 else {
9017 destination = value;
9018 }
9019
9020 /* Re-poison slot if it's not the one we want */
9021 if (poisoned) {
9023 asan_poison_object(value);
9024 }
9025 }
9026 else {
9027 destination = value;
9028 }
9029
9030 return destination;
9031}
9032
9034update_id_table(ID *key, VALUE * value, void *data, int existing)
9035{
9036 rb_objspace_t *objspace = (rb_objspace_t *)data;
9037
9038 if (gc_object_moved_p(objspace, (VALUE)*value)) {
9040 }
9041
9042 return ID_TABLE_CONTINUE;
9043}
9044
9045static void
9046update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
9047{
9048 if (tbl) {
9049 rb_id_table_foreach_with_replace(tbl, check_id_table_move, update_id_table, objspace);
9050 }
9051}
9052
9054update_cc_tbl_i(ID id, VALUE ccs_ptr, void *data)
9055{
9056 rb_objspace_t *objspace = (rb_objspace_t *)data;
9057 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
9058 VM_ASSERT(vm_ccs_p(ccs));
9059
9060 if (gc_object_moved_p(objspace, (VALUE)ccs->cme)) {
9061 ccs->cme = (const rb_callable_method_entry_t *)rb_gc_location((VALUE)ccs->cme);
9062 }
9063
9064 for (int i=0; i<ccs->len; i++) {
9065 if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].ci)) {
9066 ccs->entries[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)ccs->entries[i].ci);
9067 }
9068 if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
9069 ccs->entries[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc);
9070 }
9071 }
9072
9073 // do not replace
9074 return ID_TABLE_CONTINUE;
9075}
9076
9077static void
9078update_cc_tbl(rb_objspace_t *objspace, VALUE klass)
9079{
9080 struct rb_id_table *tbl = RCLASS_CC_TBL(klass);
9081 if (tbl) {
9082 rb_id_table_foreach_with_replace(tbl, update_cc_tbl_i, 0, objspace);
9083 }
9084}
9085
9087update_const_table(VALUE value, void *data)
9088{
9089 rb_const_entry_t *ce = (rb_const_entry_t *)value;
9090 rb_objspace_t * objspace = (rb_objspace_t *)data;
9091
9092 if (gc_object_moved_p(objspace, ce->value)) {
9093 ce->value = rb_gc_location(ce->value);
9094 }
9095
9096 if (gc_object_moved_p(objspace, ce->file)) {
9097 ce->file = rb_gc_location(ce->file);
9098 }
9099
9100 return ID_TABLE_CONTINUE;
9101}
9102
9103static void
9104update_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
9105{
9106 if (!tbl) return;
9107 rb_id_table_foreach_values(tbl, update_const_table, objspace);
9108}
9109
9110static void
9111update_subclass_entries(rb_objspace_t *objspace, rb_subclass_entry_t *entry)
9112{
9113 while (entry) {
9114 UPDATE_IF_MOVED(objspace, entry->klass);
9115 entry = entry->next;
9116 }
9117}
9118
9119static int
9120update_iv_index_tbl_i(st_data_t key, st_data_t value, st_data_t arg)
9121{
9122 rb_objspace_t *objspace = (rb_objspace_t *)arg;
9123 struct rb_iv_index_tbl_entry *ent = (struct rb_iv_index_tbl_entry *)value;
9124 UPDATE_IF_MOVED(objspace, ent->class_value);
9125 return ST_CONTINUE;
9126}
9127
9128static void
9129update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
9130{
9131 UPDATE_IF_MOVED(objspace, ext->origin_);
9132 UPDATE_IF_MOVED(objspace, ext->refined_class);
9133 update_subclass_entries(objspace, ext->subclasses);
9134
9135 // ext->iv_index_tbl
9136 if (ext->iv_index_tbl) {
9137 st_foreach(ext->iv_index_tbl, update_iv_index_tbl_i, (st_data_t)objspace);
9138 }
9139}
9140
9141static void
9142gc_update_object_references(rb_objspace_t *objspace, VALUE obj)
9143{
9144 RVALUE *any = RANY(obj);
9145
9146 gc_report(4, objspace, "update-refs: %p ->\n", (void *)obj);
9147
9148 switch (BUILTIN_TYPE(obj)) {
9149 case T_CLASS:
9150 case T_MODULE:
9151 if (RCLASS_SUPER((VALUE)obj)) {
9152 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
9153 }
9154 if (!RCLASS_EXT(obj)) break;
9155 update_m_tbl(objspace, RCLASS_M_TBL(obj));
9156 update_cc_tbl(objspace, obj);
9157
9158 gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
9159
9160 update_class_ext(objspace, RCLASS_EXT(obj));
9161 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
9162 break;
9163
9164 case T_ICLASS:
9165 if (FL_TEST(obj, RICLASS_IS_ORIGIN) &&
9167 update_m_tbl(objspace, RCLASS_M_TBL(obj));
9168 }
9169 if (RCLASS_SUPER((VALUE)obj)) {
9170 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
9171 }
9172 if (!RCLASS_EXT(obj)) break;
9173 if (RCLASS_IV_TBL(obj)) {
9174 gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
9175 }
9176 update_class_ext(objspace, RCLASS_EXT(obj));
9177 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
9178 update_cc_tbl(objspace, obj);
9179 break;
9180
9181 case T_IMEMO:
9182 gc_ref_update_imemo(objspace, obj);
9183 return;
9184
9185 case T_NIL:
9186 case T_FIXNUM:
9187 case T_NODE:
9188 case T_MOVED:
9189 case T_NONE:
9190 /* These can't move */
9191 return;
9192
9193 case T_ARRAY:
9194 if (FL_TEST(obj, ELTS_SHARED)) {
9195 UPDATE_IF_MOVED(objspace, any->as.array.as.heap.aux.shared_root);
9196 }
9197 else {
9198 gc_ref_update_array(objspace, obj);
9199 }
9200 break;
9201
9202 case T_HASH:
9203 gc_ref_update_hash(objspace, obj);
9204 UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
9205 break;
9206
9207 case T_STRING:
9208 if (STR_SHARED_P(obj)) {
9209 UPDATE_IF_MOVED(objspace, any->as.string.as.heap.aux.shared);
9210 }
9211 break;
9212
9213 case T_DATA:
9214 /* Call the compaction callback, if it exists */
9215 {
9216 void *const ptr = DATA_PTR(obj);
9217 if (ptr) {
9218 if (RTYPEDDATA_P(obj)) {
9219 RUBY_DATA_FUNC compact_func = any->as.typeddata.type->function.dcompact;
9220 if (compact_func) (*compact_func)(ptr);
9221 }
9222 }
9223 }
9224 break;
9225
9226 case T_OBJECT:
9227 gc_ref_update_object(objspace, obj);
9228 break;
9229
9230 case T_FILE:
9231 if (any->as.file.fptr) {
9232 UPDATE_IF_MOVED(objspace, any->as.file.fptr->self);
9233 UPDATE_IF_MOVED(objspace, any->as.file.fptr->pathv);
9237 UPDATE_IF_MOVED(objspace, any->as.file.fptr->encs.ecopts);
9238 UPDATE_IF_MOVED(objspace, any->as.file.fptr->write_lock);
9239 }
9240 break;
9241 case T_REGEXP:
9242 UPDATE_IF_MOVED(objspace, any->as.regexp.src);
9243 break;
9244
9245 case T_SYMBOL:
9246 if (DYNAMIC_SYM_P((VALUE)any)) {
9247 UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
9248 }
9249 break;
9250
9251 case T_FLOAT:
9252 case T_BIGNUM:
9253 break;
9254
9255 case T_MATCH:
9256 UPDATE_IF_MOVED(objspace, any->as.match.regexp);
9257
9258 if (any->as.match.str) {
9259 UPDATE_IF_MOVED(objspace, any->as.match.str);
9260 }
9261 break;
9262
9263 case T_RATIONAL:
9264 UPDATE_IF_MOVED(objspace, any->as.rational.num);
9265 UPDATE_IF_MOVED(objspace, any->as.rational.den);
9266 break;
9267
9268 case T_COMPLEX:
9269 UPDATE_IF_MOVED(objspace, any->as.complex.real);
9270 UPDATE_IF_MOVED(objspace, any->as.complex.imag);
9271
9272 break;
9273
9274 case T_STRUCT:
9275 {
9276 long i, len = RSTRUCT_LEN(obj);
9277 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
9278
9279 for (i = 0; i < len; i++) {
9280 UPDATE_IF_MOVED(objspace, ptr[i]);
9281 }
9282 }
9283 break;
9284 default:
9285#if GC_DEBUG
9287 rb_obj_info_dump(obj);
9288 rb_bug("unreachable");
9289#endif
9290 break;
9291
9292 }
9293
9294 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
9295
9296 gc_report(4, objspace, "update-refs: %p <-\n", (void *)obj);
9297}
9298
9299static int
9300gc_ref_update(void *vstart, void *vend, size_t stride, rb_objspace_t * objspace, struct heap_page *page)
9301{
9302 VALUE v = (VALUE)vstart;
9303 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
9304 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
9307
9308 /* For each object on the page */
9309 for (; v != (VALUE)vend; v += stride) {
9310 void *poisoned = asan_poisoned_object_p(v);
9311 asan_unpoison_object(v, false);
9312
9313 switch (BUILTIN_TYPE(v)) {
9314 case T_NONE:
9315 case T_MOVED:
9316 case T_ZOMBIE:
9317 break;
9318 default:
9319 if (RVALUE_WB_UNPROTECTED(v)) {
9321 }
9322 if (RVALUE_PAGE_MARKING(page, v)) {
9324 }
9325 if (page->flags.before_sweep) {
9326 if (RVALUE_MARKED(v)) {
9327 gc_update_object_references(objspace, v);
9328 }
9329 } else {
9330 gc_update_object_references(objspace, v);
9331 }
9332 }
9333
9334 if (poisoned) {
9335 asan_poison_object(v);
9336 }
9337 }
9338
9339 return 0;
9340}
9341
9343#define global_symbols ruby_global_symbols
9344
9345static void
9346gc_update_references(rb_objspace_t * objspace, rb_heap_t *heap)
9347{
9348 rb_execution_context_t *ec = GET_EC();
9349 rb_vm_t *vm = rb_ec_vm_ptr(ec);
9350 short should_set_mark_bits = 1;
9351
9352 struct heap_page *page = NULL;
9353
9354 list_for_each(&heap->pages, page, page_node) {
9355 gc_ref_update(page->start, page->start + page->total_slots, sizeof(RVALUE), objspace, page);
9356 if (page == heap->sweeping_page) {
9357 should_set_mark_bits = 0;
9358 }
9359 if (should_set_mark_bits) {
9360 gc_setup_mark_bits(page);
9361 }
9362 }
9367 global_symbols.dsymbol_fstr_hash = rb_gc_location(global_symbols.dsymbol_fstr_hash);
9368 gc_update_tbl_refs(objspace, objspace->obj_to_id_tbl);
9369 gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
9370 gc_update_table_refs(objspace, global_symbols.str_sym);
9371 gc_update_table_refs(objspace, finalizer_table);
9372}
9373
9374static VALUE type_sym(size_t type);
9375
9376static VALUE
9377gc_compact_stats(rb_execution_context_t *ec, VALUE self)
9378{
9379 size_t i;
9380 rb_objspace_t *objspace = &rb_objspace;
9381 VALUE h = rb_hash_new();
9382 VALUE considered = rb_hash_new();
9383 VALUE moved = rb_hash_new();
9384
9385 for (i=0; i<T_MASK; i++) {
9386 if(objspace->rcompactor.considered_count_table[i]) {
9387 rb_hash_aset(considered, type_sym(i), SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
9388 }
9389
9390 if(objspace->rcompactor.moved_count_table[i]) {
9391 rb_hash_aset(moved, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
9392 }
9393 }
9394
9395 rb_hash_aset(h, ID2SYM(rb_intern("considered")), considered);
9396 rb_hash_aset(h, ID2SYM(rb_intern("moved")), moved);
9397
9398 return h;
9399}
9400
9401static void
9402root_obj_check_moved_i(const char *category, VALUE obj, void *data)
9403{
9404 if (gc_object_moved_p(&rb_objspace, obj)) {
9405 rb_bug("ROOT %s points to MOVED: %p -> %s\n", category, (void *)obj, obj_info(rb_gc_location(obj)));
9406 }
9407}
9408
9409static void
9410reachable_object_check_moved_i(VALUE ref, void *data)
9411{
9412 VALUE parent = (VALUE)data;
9413 if (gc_object_moved_p(&rb_objspace, ref)) {
9414 rb_bug("Object %s points to MOVED: %p -> %s\n", obj_info(parent), (void *)ref, obj_info(rb_gc_location(ref)));
9415 }
9416}
9417
9418static int
9419heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
9420{
9421 VALUE v = (VALUE)vstart;
9422 for (; v != (VALUE)vend; v += stride) {
9423 if (gc_object_moved_p(&rb_objspace, v)) {
9424 /* Moved object still on the heap, something may have a reference. */
9425 }
9426 else {
9427 void *poisoned = asan_poisoned_object_p(v);
9428 asan_unpoison_object(v, false);
9429
9430 switch (BUILTIN_TYPE(v)) {
9431 case T_NONE:
9432 case T_ZOMBIE:
9433 break;
9434 default:
9436 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (void *)v);
9437 }
9438 }
9439
9440 if (poisoned) {
9442 asan_poison_object(v);
9443 }
9444 }
9445 }
9446
9447 return 0;
9448}
9449
9450static VALUE
9451gc_compact(rb_execution_context_t *ec, VALUE self)
9452{
9453 /* Run GC with compaction enabled */
9454 gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qtrue);
9455
9456 return gc_compact_stats(ec, self);
9457}
9458
9459static VALUE
9460gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE double_heap, VALUE toward_empty)
9461{
9462 rb_objspace_t *objspace = &rb_objspace;
9463
9464 /* Clear the heap. */
9465 gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qfalse);
9466
9468 {
9469 gc_rest(objspace);
9470
9471 if (RTEST(double_heap)) {
9472 heap_add_pages(objspace, heap_eden, heap_allocated_pages);
9473 }
9474
9475 if (RTEST(toward_empty)) {
9476 gc_sort_heap_by_empty_slots(objspace);
9477 }
9478 }
9480
9481 gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qtrue);
9482
9483 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
9484 objspace_each_objects(objspace, heap_check_moved_i, NULL);
9485
9486 return gc_compact_stats(ec, self);
9487}
9488
9489VALUE
9491{
9492 rb_gc();
9493 return Qnil;
9494}
9495
9496void
9498{
9499 rb_objspace_t *objspace = &rb_objspace;
9500 int reason = GPR_DEFAULT_REASON;
9501 garbage_collect(objspace, reason);
9502}
9503
9504int
9506{
9507 rb_objspace_t *objspace = &rb_objspace;
9508 return during_gc;
9509}
9510
9511#if RGENGC_PROFILE >= 2
9512
9513static const char *type_name(int type, VALUE obj);
9514
9515static void
9516gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
9517{
9519 int i;
9520 for (i=0; i<T_MASK; i++) {
9521 const char *type = type_name(i, 0);
9523 }
9524 rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
9525}
9526#endif
9527
9528size_t
9530{
9531 return rb_objspace.profile.count;
9532}
9533
9534static VALUE
9535gc_count(rb_execution_context_t *ec, VALUE self)
9536{
9537 return SIZET2NUM(rb_gc_count());
9538}
9539
9540static VALUE
9541gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const int orig_flags)
9542{
9543 static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
9544 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
9545#if RGENGC_ESTIMATE_OLDMALLOC
9546 static VALUE sym_oldmalloc;
9547#endif
9548 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
9549 static VALUE sym_none, sym_marking, sym_sweeping;
9550 VALUE hash = Qnil, key = Qnil;
9551 VALUE major_by;
9552 VALUE flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
9553
9554 if (SYMBOL_P(hash_or_key)) {
9555 key = hash_or_key;
9556 }
9557 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
9558 hash = hash_or_key;
9559 }
9560 else {
9561 rb_raise(rb_eTypeError, "non-hash or symbol given");
9562 }
9563
9564 if (sym_major_by == Qnil) {
9565#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
9566 S(major_by);
9567 S(gc_by);
9568 S(immediate_sweep);
9569 S(have_finalizer);
9570 S(state);
9571
9572 S(stress);
9573 S(nofree);
9574 S(oldgen);
9575 S(shady);
9576 S(force);
9577#if RGENGC_ESTIMATE_OLDMALLOC
9578 S(oldmalloc);
9579#endif
9580 S(newobj);
9581 S(malloc);
9582 S(method);
9583 S(capi);
9584
9585 S(none);
9586 S(marking);
9587 S(sweeping);
9588#undef S
9589 }
9590
9591#define SET(name, attr) \
9592 if (key == sym_##name) \
9593 return (attr); \
9594 else if (hash != Qnil) \
9595 rb_hash_aset(hash, sym_##name, (attr));
9596
9597 major_by =
9598 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
9599 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
9600 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
9601 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
9602#if RGENGC_ESTIMATE_OLDMALLOC
9603 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
9604#endif
9605 Qnil;
9606 SET(major_by, major_by);
9607
9608 SET(gc_by,
9609 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
9610 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
9611 (flags & GPR_FLAG_METHOD) ? sym_method :
9612 (flags & GPR_FLAG_CAPI) ? sym_capi :
9613 (flags & GPR_FLAG_STRESS) ? sym_stress :
9614 Qnil
9615 );
9616
9617 SET(have_finalizer, (flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
9618 SET(immediate_sweep, (flags & GPR_FLAG_IMMEDIATE_SWEEP) ? Qtrue : Qfalse);
9619
9620 if (orig_flags == 0) {
9621 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
9622 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
9623 }
9624#undef SET
9625
9626 if (!NIL_P(key)) {/* matched key should return above */
9627 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
9628 }
9629
9630 return hash;
9631}
9632
9633VALUE
9635{
9636 rb_objspace_t *objspace = &rb_objspace;
9637 return gc_info_decode(objspace, key, 0);
9638}
9639
9640static VALUE
9641gc_latest_gc_info(rb_execution_context_t *ec, VALUE self, VALUE arg)
9642{
9643 rb_objspace_t *objspace = &rb_objspace;
9644
9645 if (NIL_P(arg)) {
9646 arg = rb_hash_new();
9647 }
9648 else if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
9649 rb_raise(rb_eTypeError, "non-hash or symbol given");
9650 }
9651
9652 return gc_info_decode(objspace, arg, 0);
9653}
9654
9682#if RGENGC_ESTIMATE_OLDMALLOC
9683 gc_stat_sym_oldmalloc_increase_bytes,
9684 gc_stat_sym_oldmalloc_increase_bytes_limit,
9685#endif
9686#if RGENGC_PROFILE
9687 gc_stat_sym_total_generated_normal_object_count,
9688 gc_stat_sym_total_generated_shady_object_count,
9689 gc_stat_sym_total_shade_operation_count,
9690 gc_stat_sym_total_promoted_count,
9691 gc_stat_sym_total_remembered_normal_object_count,
9692 gc_stat_sym_total_remembered_shady_object_count,
9693#endif
9696
9697static VALUE gc_stat_symbols[gc_stat_sym_last];
9698
9699static void
9700setup_gc_stat_symbols(void)
9701{
9702 if (gc_stat_symbols[0] == 0) {
9703#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
9704 S(count);
9706 S(heap_sorted_length);
9708 S(heap_available_slots);
9709 S(heap_live_slots);
9710 S(heap_free_slots);
9711 S(heap_final_slots);
9712 S(heap_marked_slots);
9713 S(heap_eden_pages);
9714 S(heap_tomb_pages);
9715 S(total_allocated_pages);
9716 S(total_freed_pages);
9717 S(total_allocated_objects);
9718 S(total_freed_objects);
9719 S(malloc_increase_bytes);
9720 S(malloc_increase_bytes_limit);
9721 S(minor_gc_count);
9722 S(major_gc_count);
9723 S(compact_count);
9724 S(read_barrier_faults);
9725 S(total_moved_objects);
9726 S(remembered_wb_unprotected_objects);
9727 S(remembered_wb_unprotected_objects_limit);
9728 S(old_objects);
9729 S(old_objects_limit);
9730#if RGENGC_ESTIMATE_OLDMALLOC
9731 S(oldmalloc_increase_bytes);
9732 S(oldmalloc_increase_bytes_limit);
9733#endif
9734#if RGENGC_PROFILE
9735 S(total_generated_normal_object_count);
9736 S(total_generated_shady_object_count);
9737 S(total_shade_operation_count);
9738 S(total_promoted_count);
9739 S(total_remembered_normal_object_count);
9740 S(total_remembered_shady_object_count);
9741#endif /* RGENGC_PROFILE */
9742#undef S
9743 }
9744}
9745
9746static size_t
9747gc_stat_internal(VALUE hash_or_sym)
9748{
9749 rb_objspace_t *objspace = &rb_objspace;
9750 VALUE hash = Qnil, key = Qnil;
9751
9752 setup_gc_stat_symbols();
9753
9754 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
9755 hash = hash_or_sym;
9756 }
9757 else if (SYMBOL_P(hash_or_sym)) {
9758 key = hash_or_sym;
9759 }
9760 else {
9761 rb_raise(rb_eTypeError, "non-hash or symbol argument");
9762 }
9763
9764#define SET(name, attr) \
9765 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
9766 return attr; \
9767 else if (hash != Qnil) \
9768 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
9769
9770 SET(count, objspace->profile.count);
9771
9772 /* implementation dependent counters */
9774 SET(heap_sorted_length, heap_pages_sorted_length);
9776 SET(heap_available_slots, objspace_available_slots(objspace));
9777 SET(heap_live_slots, objspace_live_slots(objspace));
9778 SET(heap_free_slots, objspace_free_slots(objspace));
9779 SET(heap_final_slots, heap_pages_final_slots);
9780 SET(heap_marked_slots, objspace->marked_slots);
9781 SET(heap_eden_pages, heap_eden->total_pages);
9782 SET(heap_tomb_pages, heap_tomb->total_pages);
9783 SET(total_allocated_pages, objspace->profile.total_allocated_pages);
9784 SET(total_freed_pages, objspace->profile.total_freed_pages);
9785 SET(total_allocated_objects, objspace->total_allocated_objects);
9786 SET(total_freed_objects, objspace->profile.total_freed_objects);
9787 SET(malloc_increase_bytes, malloc_increase);
9788 SET(malloc_increase_bytes_limit, malloc_limit);
9789 SET(minor_gc_count, objspace->profile.minor_gc_count);
9790 SET(major_gc_count, objspace->profile.major_gc_count);
9791 SET(compact_count, objspace->profile.compact_count);
9792 SET(read_barrier_faults, objspace->profile.read_barrier_faults);
9793 SET(total_moved_objects, objspace->rcompactor.total_moved);
9794 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
9795 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
9796 SET(old_objects, objspace->rgengc.old_objects);
9797 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
9798#if RGENGC_ESTIMATE_OLDMALLOC
9799 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
9800 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
9801#endif
9802
9803#if RGENGC_PROFILE
9804 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
9805 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
9806 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
9807 SET(total_promoted_count, objspace->profile.total_promoted_count);
9808 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
9809 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
9810#endif /* RGENGC_PROFILE */
9811#undef SET
9812
9813 if (!NIL_P(key)) { /* matched key should return above */
9814 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
9815 }
9816
9817#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
9818 if (hash != Qnil) {
9819 gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
9820 gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
9821 gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
9822 gc_count_add_each_types(hash, "promoted_types", objspace->profile.promoted_types);
9823 gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
9824 gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
9825 }
9826#endif
9827
9828 return 0;
9829}
9830
9831static VALUE
9832gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
9833{
9834 if (NIL_P(arg)) {
9835 arg = rb_hash_new();
9836 }
9837 else if (SYMBOL_P(arg)) {
9838 size_t value = gc_stat_internal(arg);
9839 return SIZET2NUM(value);
9840 }
9841 else if (RB_TYPE_P(arg, T_HASH)) {
9842 // ok
9843 }
9844 else {
9845 rb_raise(rb_eTypeError, "non-hash or symbol given");
9846 }
9847
9848 gc_stat_internal(arg);
9849 return arg;
9850}
9851
9852size_t
9854{
9855 if (SYMBOL_P(key)) {
9856 size_t value = gc_stat_internal(key);
9857 return value;
9858 }
9859 else {
9860 gc_stat_internal(key);
9861 return 0;
9862 }
9863}
9864
9865static VALUE
9866gc_stress_get(rb_execution_context_t *ec, VALUE self)
9867{
9868 rb_objspace_t *objspace = &rb_objspace;
9869 return ruby_gc_stress_mode;
9870}
9871
9872static void
9873gc_stress_set(rb_objspace_t *objspace, VALUE flag)
9874{
9875 objspace->flags.gc_stressful = RTEST(flag);
9876 objspace->gc_stress_mode = flag;
9877}
9878
9879static VALUE
9880gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
9881{
9882 rb_objspace_t *objspace = &rb_objspace;
9883 gc_stress_set(objspace, flag);
9884 return flag;
9885}
9886
9887VALUE
9889{
9890 rb_objspace_t *objspace = &rb_objspace;
9891 return rb_objspace_gc_enable(objspace);
9892}
9893
9894VALUE
9896{
9897 int old = dont_gc_val();
9898
9899 dont_gc_off();
9900 return old ? Qtrue : Qfalse;
9901}
9902
9903static VALUE
9904gc_enable(rb_execution_context_t *ec, VALUE _)
9905{
9906 return rb_gc_enable();
9907}
9908
9909VALUE
9911{
9912 rb_objspace_t *objspace = &rb_objspace;
9913 return gc_disable_no_rest(objspace);
9914}
9915
9916static VALUE
9917gc_disable_no_rest(rb_objspace_t *objspace)
9918{
9919 int old = dont_gc_val();
9920 dont_gc_on();
9921 return old ? Qtrue : Qfalse;
9922}
9923
9924VALUE
9926{
9927 rb_objspace_t *objspace = &rb_objspace;
9928 return rb_objspace_gc_disable(objspace);
9929}
9930
9931VALUE
9933{
9934 gc_rest(objspace);
9935 return gc_disable_no_rest(objspace);
9936}
9937
9938static VALUE
9939gc_disable(rb_execution_context_t *ec, VALUE _)
9940{
9941 return rb_gc_disable();
9942}
9943
9944static VALUE
9945gc_set_auto_compact(rb_execution_context_t *ec, VALUE _, VALUE v)
9946{
9947#if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
9948 /* If Ruby's heap pages are not a multiple of the system page size, we
9949 * cannot use mprotect for the read barrier, so we must disable automatic
9950 * compaction. */
9951 int pagesize;
9952 pagesize = (int)sysconf(_SC_PAGE_SIZE);
9953 if ((HEAP_PAGE_SIZE % pagesize) != 0) {
9954 rb_raise(rb_eNotImpError, "Automatic compaction isn't available on this platform");
9955 }
9956#endif
9958 return v;
9959}
9960
9961static VALUE
9962gc_get_auto_compact(rb_execution_context_t *ec, VALUE _)
9963{
9965}
9966
9967static int
9968get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
9969{
9970 char *ptr = getenv(name);
9971 ssize_t val;
9972
9973 if (ptr != NULL && *ptr) {
9974 size_t unit = 0;
9975 char *end;
9976#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
9977 val = strtoll(ptr, &end, 0);
9978#else
9979 val = strtol(ptr, &end, 0);
9980#endif
9981 switch (*end) {
9982 case 'k': case 'K':
9983 unit = 1024;
9984 ++end;
9985 break;
9986 case 'm': case 'M':
9987 unit = 1024*1024;
9988 ++end;
9989 break;
9990 case 'g': case 'G':
9991 unit = 1024*1024*1024;
9992 ++end;
9993 break;
9994 }
9995 while (*end && isspace((unsigned char)*end)) end++;
9996 if (*end) {
9997 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
9998 return 0;
9999 }
10000 if (unit > 0) {
10001 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
10002 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%s is ignored because it overflows\n", name, ptr);
10003 return 0;
10004 }
10005 val *= unit;
10006 }
10007 if (val > 0 && (size_t)val > lower_bound) {
10008 if (RTEST(ruby_verbose)) {
10009 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
10010 }
10011 *default_value = (size_t)val;
10012 return 1;
10013 }
10014 else {
10015 if (RTEST(ruby_verbose)) {
10016 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
10017 name, val, *default_value, lower_bound);
10018 }
10019 return 0;
10020 }
10021 }
10022 return 0;
10023}
10024
10025static int
10026get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
10027{
10028 char *ptr = getenv(name);
10029 double val;
10030
10031 if (ptr != NULL && *ptr) {
10032 char *end;
10033 val = strtod(ptr, &end);
10034 if (!*ptr || *end) {
10035 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
10036 return 0;
10037 }
10038
10039 if (accept_zero && val == 0.0) {
10040 goto accept;
10041 }
10042 else if (val <= lower_bound) {
10043 if (RTEST(ruby_verbose)) {
10044 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
10045 name, val, *default_value, lower_bound);
10046 }
10047 }
10048 else if (upper_bound != 0.0 && /* ignore upper_bound if it is 0.0 */
10049 val > upper_bound) {
10050 if (RTEST(ruby_verbose)) {
10051 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
10052 name, val, *default_value, upper_bound);
10053 }
10054 }
10055 else {
10056 goto accept;
10057 }
10058 }
10059 return 0;
10060
10061 accept:
10062 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f)\n", name, val, *default_value);
10063 *default_value = val;
10064 return 1;
10065}
10066
10067static void
10068gc_set_initial_pages(void)
10069{
10070 size_t min_pages;
10071 rb_objspace_t *objspace = &rb_objspace;
10072
10073 min_pages = gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT;
10074 if (min_pages > heap_eden->total_pages) {
10075 heap_add_pages(objspace, heap_eden, min_pages - heap_eden->total_pages);
10076 }
10077}
10078
10079/*
10080 * GC tuning environment variables
10081 *
10082 * * RUBY_GC_HEAP_INIT_SLOTS
10083 * - Initial allocation slots.
10084 * * RUBY_GC_HEAP_FREE_SLOTS
10085 * - Prepare at least this amount of slots after GC.
10086 * - Allocate slots if there are not enough slots.
10087 * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
10088 * - Allocate slots by this factor.
10089 * - (next slots number) = (current slots number) * (this factor)
10090 * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
10091 * - Allocation rate is limited to this number of slots.
10092 * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
10093 * - Allocate additional pages when the number of free slots is
10094 * lower than the value (total_slots * (this ratio)).
10095 * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
10096 * - Allocate slots to satisfy this formula:
10097 * free_slots = total_slots * goal_ratio
10098 * - In other words, prepare (total_slots * goal_ratio) free slots.
10099 * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
10100 * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
10101 * - Allow to free pages when the number of free slots is
10102 * greater than the value (total_slots * (this ratio)).
10103 * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
10104 * - Do full GC when the number of old objects is more than R * N
10105 * where R is this factor and
10106 * N is the number of old objects just after last full GC.
10107 *
10108 * * obsolete
10109 * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
10110 * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
10111 *
10112 * * RUBY_GC_MALLOC_LIMIT
10113 * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
10114 * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
10115 *
10116 * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
10117 * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
10118 * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
10119 */
10120
10121void
10123{
10124 /* RUBY_GC_HEAP_FREE_SLOTS */
10125 if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
10126 /* ok */
10127 }
10128
10129 /* RUBY_GC_HEAP_INIT_SLOTS */
10130 if (get_envparam_size("RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
10131 gc_set_initial_pages();
10132 }
10133
10134 get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
10135 get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
10136 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
10137 0.0, 1.0, FALSE);
10138 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
10139 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
10140 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
10142 get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
10143
10144 get_envparam_size ("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0);
10145 get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
10146 if (!gc_params.malloc_limit_max) { /* ignore max-check if 0 */
10147 gc_params.malloc_limit_max = SIZE_MAX;
10148 }
10149 get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
10150
10151#if RGENGC_ESTIMATE_OLDMALLOC
10152 if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
10153 rb_objspace_t *objspace = &rb_objspace;
10154 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
10155 }
10156 get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
10157 get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
10158#endif
10159}
10160
10161static void
10162reachable_objects_from_callback(VALUE obj)
10163{
10164 rb_ractor_t *cr = GET_RACTOR();
10165 cr->mfd->mark_func(obj, cr->mfd->data);
10166}
10167
10168void
10169rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
10170{
10171 rb_objspace_t *objspace = &rb_objspace;
10172
10173 if (during_gc) rb_bug("rb_objspace_reachable_objects_from() is not supported while during_gc == true");
10174
10175 if (is_markable_object(objspace, obj)) {
10176 rb_ractor_t *cr = GET_RACTOR();
10177 struct gc_mark_func_data_struct mfd = {
10178 .mark_func = func,
10179 .data = data,
10180 }, *prev_mfd = cr->mfd;
10181
10182 cr->mfd = &mfd;
10183 gc_mark_children(objspace, obj);
10184 cr->mfd = prev_mfd;
10185 }
10186}
10187
10189 const char *category;
10190 void (*func)(const char *category, VALUE, void *);
10191 void *data;
10192};
10193
10194static void
10195root_objects_from(VALUE obj, void *ptr)
10196{
10197 const struct root_objects_data *data = (struct root_objects_data *)ptr;
10198 (*data->func)(data->category, obj, data->data);
10199}
10200
10201void
10202rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
10203{
10204 rb_objspace_t *objspace = &rb_objspace;
10205 objspace_reachable_objects_from_root(objspace, func, passing_data);
10206}
10207
10208static void
10209objspace_reachable_objects_from_root(rb_objspace_t *objspace, void (func)(const char *category, VALUE, void *), void *passing_data)
10210{
10211 if (during_gc) rb_bug("objspace_reachable_objects_from_root() is not supported while during_gc == true");
10212
10213 rb_ractor_t *cr = GET_RACTOR();
10214 struct root_objects_data data = {
10215 .func = func,
10216 .data = passing_data,
10217 };
10218 struct gc_mark_func_data_struct mfd = {
10219 .mark_func = root_objects_from,
10220 .data = &data,
10221 }, *prev_mfd = cr->mfd;
10222
10223 cr->mfd = &mfd;
10224 gc_mark_roots(objspace, &data.category);
10225 cr->mfd = prev_mfd;
10226}
10227
10228/*
10229 ------------------------ Extended allocator ------------------------
10230*/
10231
10234 const char *fmt;
10235 va_list *ap;
10236};
10237
10238static void *
10239gc_vraise(void *ptr)
10240{
10241 struct gc_raise_tag *argv = ptr;
10242 rb_vraise(argv->exc, argv->fmt, *argv->ap);
10244}
10245
10246static void
10247gc_raise(VALUE exc, const char *fmt, ...)
10248{
10249 va_list ap;
10250 va_start(ap, fmt);
10251 struct gc_raise_tag argv = {
10252 exc, fmt, &ap,
10253 };
10254
10255 if (ruby_thread_has_gvl_p()) {
10256 gc_vraise(&argv);
10258 }
10259 else if (ruby_native_thread_p()) {
10260 rb_thread_call_with_gvl(gc_vraise, &argv);
10262 }
10263 else {
10264 /* Not in a ruby thread */
10265 fprintf(stderr, "%s", "[FATAL] ");
10266 vfprintf(stderr, fmt, ap);
10267 }
10268
10269 va_end(ap);
10270 abort();
10271}
10272
10273static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size);
10274
10275static void
10276negative_size_allocation_error(const char *msg)
10277{
10278 gc_raise(rb_eNoMemError, "%s", msg);
10279}
10280
10281static void *
10282ruby_memerror_body(void *dummy)
10283{
10284 rb_memerror();
10285 return 0;
10286}
10287
10288NORETURN(static void ruby_memerror(void));
10290static void
10291ruby_memerror(void)
10292{
10293 if (ruby_thread_has_gvl_p()) {
10294 rb_memerror();
10295 }
10296 else {
10297 if (ruby_native_thread_p()) {
10298 rb_thread_call_with_gvl(ruby_memerror_body, 0);
10299 }
10300 else {
10301 /* no ruby thread */
10302 fprintf(stderr, "[FATAL] failed to allocate memory\n");
10303 }
10304 }
10305 exit(EXIT_FAILURE);
10306}
10307
10308void
10310{
10311 rb_execution_context_t *ec = GET_EC();
10312 rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
10313 VALUE exc;
10314
10315 if (0) {
10316 // Print out pid, sleep, so you can attach debugger to see what went wrong:
10317 fprintf(stderr, "rb_memerror pid=%"PRI_PIDT_PREFIX"d\n", getpid());
10318 sleep(60);
10319 }
10320
10321 if (during_gc) {
10322 // TODO: OMG!! How to implement it?
10323 gc_exit(objspace, gc_enter_event_rb_memerror, NULL);
10324 }
10325
10326 exc = nomem_error;
10327 if (!exc ||
10329 fprintf(stderr, "[FATAL] failed to allocate memory\n");
10330 exit(EXIT_FAILURE);
10331 }
10334 }
10335 else {
10338 }
10339 ec->errinfo = exc;
10341}
10342
10343void *
10344rb_aligned_malloc(size_t alignment, size_t size)
10345{
10346 void *res;
10347
10348#if defined __MINGW32__
10349 res = __mingw_aligned_malloc(size, alignment);
10350#elif defined _WIN32
10351 void *_aligned_malloc(size_t, size_t);
10352 res = _aligned_malloc(size, alignment);
10353#elif defined(HAVE_POSIX_MEMALIGN)
10354 if (posix_memalign(&res, alignment, size) == 0) {
10355 return res;
10356 }
10357 else {
10358 return NULL;
10359 }
10360#elif defined(HAVE_MEMALIGN)
10361 res = memalign(alignment, size);
10362#else
10363 char* aligned;
10364 res = malloc(alignment + size + sizeof(void*));
10365 aligned = (char*)res + alignment + sizeof(void*);
10366 aligned -= ((VALUE)aligned & (alignment - 1));
10367 ((void**)aligned)[-1] = res;
10368 res = (void*)aligned;
10369#endif
10370
10371 /* alignment must be a power of 2 */
10372 GC_ASSERT(((alignment - 1) & alignment) == 0);
10373 GC_ASSERT(alignment % sizeof(void*) == 0);
10374 return res;
10375}
10376
10377static void
10378rb_aligned_free(void *ptr)
10379{
10380#if defined __MINGW32__
10381 __mingw_aligned_free(ptr);
10382#elif defined _WIN32
10383 _aligned_free(ptr);
10384#elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
10385 free(ptr);
10386#else
10387 free(((void**)ptr)[-1]);
10388#endif
10389}
10390
10391static inline size_t
10392objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
10393{
10394#ifdef HAVE_MALLOC_USABLE_SIZE
10395 return malloc_usable_size(ptr);
10396#else
10397 return hint;
10398#endif
10399}
10400
10406
10407static inline void
10408atomic_sub_nounderflow(size_t *var, size_t sub)
10409{
10410 if (sub == 0) return;
10411
10412 while (1) {
10413 size_t val = *var;
10414 if (val < sub) sub = val;
10415 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
10416 }
10417}
10418
10419static void
10420objspace_malloc_gc_stress(rb_objspace_t *objspace)
10421{
10425
10427 reason |= GPR_FLAG_FULL_MARK;
10428 }
10429 garbage_collect_with_gvl(objspace, reason);
10430 }
10431}
10432
10433static void
10434objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
10435{
10436 if (new_size > old_size) {
10437 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
10438#if RGENGC_ESTIMATE_OLDMALLOC
10439 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
10440#endif
10441 }
10442 else {
10443 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
10444#if RGENGC_ESTIMATE_OLDMALLOC
10445 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
10446#endif
10447 }
10448
10449 if (type == MEMOP_TYPE_MALLOC) {
10450 retry:
10453 gc_rest(objspace); /* gc_rest can reduce malloc_increase */
10454 goto retry;
10455 }
10456 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
10457 }
10458 }
10459
10460#if MALLOC_ALLOCATED_SIZE
10461 if (new_size >= old_size) {
10462 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
10463 }
10464 else {
10465 size_t dec_size = old_size - new_size;
10466 size_t allocated_size = objspace->malloc_params.allocated_size;
10467
10468#if MALLOC_ALLOCATED_SIZE_CHECK
10469 if (allocated_size < dec_size) {
10470 rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
10471 }
10472#endif
10473 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
10474 }
10475
10476 if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %"PRIdSIZE", old_size: %"PRIdSIZE"\n",
10477 mem,
10478 type == MEMOP_TYPE_MALLOC ? "malloc" :
10479 type == MEMOP_TYPE_FREE ? "free " :
10480 type == MEMOP_TYPE_REALLOC ? "realloc": "error",
10481 new_size, old_size);
10482
10483 switch (type) {
10484 case MEMOP_TYPE_MALLOC:
10485 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
10486 break;
10487 case MEMOP_TYPE_FREE:
10488 {
10489 size_t allocations = objspace->malloc_params.allocations;
10490 if (allocations > 0) {
10491 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
10492 }
10493#if MALLOC_ALLOCATED_SIZE_CHECK
10494 else {
10495 GC_ASSERT(objspace->malloc_params.allocations > 0);
10496 }
10497#endif
10498 }
10499 break;
10500 case MEMOP_TYPE_REALLOC: /* ignore */ break;
10501 }
10502#endif
10503}
10504
10505struct malloc_obj_info { /* 4 words */
10506 size_t size;
10507#if USE_GC_MALLOC_OBJ_INFO_DETAILS
10508 size_t gen;
10509 const char *file;
10510 size_t line;
10511#endif
10512};
10513
10514#if USE_GC_MALLOC_OBJ_INFO_DETAILS
10515const char *ruby_malloc_info_file;
10516int ruby_malloc_info_line;
10517#endif
10518
10519static inline size_t
10520objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
10521{
10522 if (size == 0) size = 1;
10523
10524#if CALC_EXACT_MALLOC_SIZE
10525 size += sizeof(struct malloc_obj_info);
10526#endif
10527
10528 return size;
10529}
10530
10531static inline void *
10532objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
10533{
10534 size = objspace_malloc_size(objspace, mem, size);
10535 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
10536
10537#if CALC_EXACT_MALLOC_SIZE
10538 {
10539 struct malloc_obj_info *info = mem;
10540 info->size = size;
10541#if USE_GC_MALLOC_OBJ_INFO_DETAILS
10542 info->gen = objspace->profile.count;
10543 info->file = ruby_malloc_info_file;
10544 info->line = info->file ? ruby_malloc_info_line : 0;
10545#endif
10546 mem = info + 1;
10547 }
10548#endif
10549
10550 return mem;
10551}
10552
10553#if defined(__GNUC__) && RUBY_DEBUG
10554#define RB_BUG_INSTEAD_OF_RB_MEMERROR
10555#endif
10556
10557#ifdef RB_BUG_INSTEAD_OF_RB_MEMERROR
10558#define TRY_WITH_GC(siz, expr) do { \
10559 const gc_profile_record_flag gpr = \
10560 GPR_FLAG_FULL_MARK | \
10561 GPR_FLAG_IMMEDIATE_MARK | \
10562 GPR_FLAG_IMMEDIATE_SWEEP | \
10563 GPR_FLAG_MALLOC; \
10564 objspace_malloc_gc_stress(objspace); \
10565 \
10566 if (LIKELY((expr))) { \
10567 /* Success on 1st try */ \
10568 } \
10569 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
10570 /* @shyouhei thinks this doesn't happen */ \
10571 rb_bug("TRY_WITH_GC: could not GC"); \
10572 } \
10573 else if ((expr)) { \
10574 /* Success on 2nd try */ \
10575 } \
10576 else { \
10577 rb_bug("TRY_WITH_GC: could not allocate:" \
10578 "%"PRIdSIZE" bytes for %s", \
10579 siz, # expr); \
10580 } \
10581 } while (0)
10582#else
10583#define TRY_WITH_GC(siz, alloc) do { \
10584 objspace_malloc_gc_stress(objspace); \
10585 if (!(alloc) && \
10586 (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
10587 GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
10588 GPR_FLAG_MALLOC) || \
10589 !(alloc))) { \
10590 ruby_memerror(); \
10591 } \
10592 } while (0)
10593#endif
10594
10595/* these shouldn't be called directly.
10596 * objspace_* functions do not check allocation size.
10597 */
10598static void *
10599objspace_xmalloc0(rb_objspace_t *objspace, size_t size)
10600{
10601 void *mem;
10602
10603 size = objspace_malloc_prepare(objspace, size);
10604 TRY_WITH_GC(size, mem = malloc(size));
10605 RB_DEBUG_COUNTER_INC(heap_xmalloc);
10606 return objspace_malloc_fixup(objspace, mem, size);
10607}
10608
10609static inline size_t
10610xmalloc2_size(const size_t count, const size_t elsize)
10611{
10612 return size_mul_or_raise(count, elsize, rb_eArgError);
10613}
10614
10615static void *
10616objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
10617{
10618 void *mem;
10619
10620 if (!ptr) return objspace_xmalloc0(objspace, new_size);
10621
10622 /*
10623 * The behavior of realloc(ptr, 0) is implementation defined.
10624 * Therefore we don't use realloc(ptr, 0) for portability reason.
10625 * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
10626 */
10627 if (new_size == 0) {
10628 if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
10629 /*
10630 * - OpenBSD's malloc(3) man page says that when 0 is passed, it
10631 * returns a non-NULL pointer to an access-protected memory page.
10632 * The returned pointer cannot be read / written at all, but
10633 * still be a valid argument of free().
10634 *
10635 * https://man.openbsd.org/malloc.3
10636 *
10637 * - Linux's malloc(3) man page says that it _might_ perhaps return
10638 * a non-NULL pointer when its argument is 0. That return value
10639 * is safe (and is expected) to be passed to free().
10640 *
10641 * http://man7.org/linux/man-pages/man3/malloc.3.html
10642 *
10643 * - As I read the implementation jemalloc's malloc() returns fully
10644 * normal 16 bytes memory region when its argument is 0.
10645 *
10646 * - As I read the implementation musl libc's malloc() returns
10647 * fully normal 32 bytes memory region when its argument is 0.
10648 *
10649 * - Other malloc implementations can also return non-NULL.
10650 */
10651 objspace_xfree(objspace, ptr, old_size);
10652 return mem;
10653 }
10654 else {
10655 /*
10656 * It is dangerous to return NULL here, because that could lead to
10657 * RCE. Fallback to 1 byte instead of zero.
10658 *
10659 * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
10660 */
10661 new_size = 1;
10662 }
10663 }
10664
10665#if CALC_EXACT_MALLOC_SIZE
10666 {
10667 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
10668 new_size += sizeof(struct malloc_obj_info);
10669 ptr = info;
10670 old_size = info->size;
10671 }
10672#endif
10673
10674 old_size = objspace_malloc_size(objspace, ptr, old_size);
10675 TRY_WITH_GC(new_size, mem = realloc(ptr, new_size));
10676 new_size = objspace_malloc_size(objspace, mem, new_size);
10677
10678#if CALC_EXACT_MALLOC_SIZE
10679 {
10680 struct malloc_obj_info *info = mem;
10681 info->size = new_size;
10682 mem = info + 1;
10683 }
10684#endif
10685
10686 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
10687
10688 RB_DEBUG_COUNTER_INC(heap_xrealloc);
10689 return mem;
10690}
10691
10692#if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
10693
10694#define MALLOC_INFO_GEN_SIZE 100
10695#define MALLOC_INFO_SIZE_SIZE 10
10696static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
10697static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
10698static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
10699static st_table *malloc_info_file_table;
10700
10701static int
10702mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
10703{
10704 const char *file = (void *)key;
10705 const size_t *data = (void *)val;
10706
10707 fprintf(stderr, "%s\t%"PRIdSIZE"\t%"PRIdSIZE"\n", file, data[0], data[1]);
10708
10709 return ST_CONTINUE;
10710}
10711
10712__attribute__((destructor))
10713void
10715{
10716 int i;
10717
10718 fprintf(stderr, "* malloc_info gen statistics\n");
10719 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
10720 if (i == MALLOC_INFO_GEN_SIZE-1) {
10721 fprintf(stderr, "more\t%"PRIdSIZE"\t%"PRIdSIZE"\n", malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
10722 }
10723 else {
10724 fprintf(stderr, "%d\t%"PRIdSIZE"\t%"PRIdSIZE"\n", i, malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
10725 }
10726 }
10727
10728 fprintf(stderr, "* malloc_info size statistics\n");
10729 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
10730 int s = 16 << i;
10731 fprintf(stderr, "%d\t%"PRIdSIZE"\n", s, malloc_info_size[i]);
10732 }
10733 fprintf(stderr, "more\t%"PRIdSIZE"\n", malloc_info_size[i]);
10734
10735 if (malloc_info_file_table) {
10736 fprintf(stderr, "* malloc_info file statistics\n");
10737 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
10738 }
10739}
10740#else
10741void
10743{
10744}
10745#endif
10746
10747static void
10748objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
10749{
10750 if (!ptr) {
10751 /*
10752 * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
10753 * its first version. We would better follow.
10754 */
10755 return;
10756 }
10757#if CALC_EXACT_MALLOC_SIZE
10758 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
10759 ptr = info;
10760 old_size = info->size;
10761
10762#if USE_GC_MALLOC_OBJ_INFO_DETAILS
10763 {
10764 int gen = (int)(objspace->profile.count - info->gen);
10765 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
10766 int i;
10767
10768 malloc_info_gen_cnt[gen_index]++;
10769 malloc_info_gen_size[gen_index] += info->size;
10770
10771 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
10772 size_t s = 16 << i;
10773 if (info->size <= s) {
10774 malloc_info_size[i]++;
10775 goto found;
10776 }
10777 }
10778 malloc_info_size[i]++;
10779 found:;
10780
10781 {
10782 st_data_t key = (st_data_t)info->file, d;
10783 size_t *data;
10784
10785 if (malloc_info_file_table == NULL) {
10786 malloc_info_file_table = st_init_numtable_with_size(1024);
10787 }
10788 if (st_lookup(malloc_info_file_table, key, &d)) {
10789 /* hit */
10790 data = (size_t *)d;
10791 }
10792 else {
10793 data = malloc(xmalloc2_size(2, sizeof(size_t)));
10794 if (data == NULL) rb_bug("objspace_xfree: can not allocate memory");
10795 data[0] = data[1] = 0;
10796 st_insert(malloc_info_file_table, key, (st_data_t)data);
10797 }
10798 data[0] ++;
10799 data[1] += info->size;
10800 };
10801 if (0 && gen >= 2) { /* verbose output */
10802 if (info->file) {
10803 fprintf(stderr, "free - size:%"PRIdSIZE", gen:%d, pos: %s:%"PRIdSIZE"\n",
10804 info->size, gen, info->file, info->line);
10805 }
10806 else {
10807 fprintf(stderr, "free - size:%"PRIdSIZE", gen:%d\n",
10808 info->size, gen);
10809 }
10810 }
10811 }
10812#endif
10813#endif
10814 old_size = objspace_malloc_size(objspace, ptr, old_size);
10815
10816 free(ptr);
10817 RB_DEBUG_COUNTER_INC(heap_xfree);
10818
10819 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE);
10820}
10821
10822static void *
10823ruby_xmalloc0(size_t size)
10824{
10825 return objspace_xmalloc0(&rb_objspace, size);
10826}
10827
10828void *
10830{
10831 if ((ssize_t)size < 0) {
10832 negative_size_allocation_error("too large allocation size");
10833 }
10834 return ruby_xmalloc0(size);
10835}
10836
10837void
10838ruby_malloc_size_overflow(size_t count, size_t elsize)
10839{
10841 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
10842 count, elsize);
10843}
10844
10845void *
10846ruby_xmalloc2_body(size_t n, size_t size)
10847{
10848 return objspace_xmalloc0(&rb_objspace, xmalloc2_size(n, size));
10849}
10850
10851static void *
10852objspace_xcalloc(rb_objspace_t *objspace, size_t size)
10853{
10854 void *mem;
10855
10856 size = objspace_malloc_prepare(objspace, size);
10857 TRY_WITH_GC(size, mem = calloc1(size));
10858 return objspace_malloc_fixup(objspace, mem, size);
10859}
10860
10861void *
10862ruby_xcalloc_body(size_t n, size_t size)
10863{
10864 return objspace_xcalloc(&rb_objspace, xmalloc2_size(n, size));
10865}
10866
10867#ifdef ruby_sized_xrealloc
10868#undef ruby_sized_xrealloc
10869#endif
10870void *
10871ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
10872{
10873 if ((ssize_t)new_size < 0) {
10874 negative_size_allocation_error("too large allocation size");
10875 }
10876
10877 return objspace_xrealloc(&rb_objspace, ptr, new_size, old_size);
10878}
10879
10880void *
10881ruby_xrealloc_body(void *ptr, size_t new_size)
10882{
10883 return ruby_sized_xrealloc(ptr, new_size, 0);
10884}
10885
10886#ifdef ruby_sized_xrealloc2
10887#undef ruby_sized_xrealloc2
10888#endif
10889void *
10890ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
10891{
10892 size_t len = xmalloc2_size(n, size);
10893 return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
10894}
10895
10896void *
10897ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
10898{
10899 return ruby_sized_xrealloc2(ptr, n, size, 0);
10900}
10901
10902#ifdef ruby_sized_xfree
10903#undef ruby_sized_xfree
10904#endif
10905void
10906ruby_sized_xfree(void *x, size_t size)
10907{
10908 if (x) {
10909 objspace_xfree(&rb_objspace, x, size);
10910 }
10911}
10912
10913void
10915{
10916 ruby_sized_xfree(x, 0);
10917}
10918
10919void *
10920rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
10921{
10922 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
10923 return ruby_xmalloc(w);
10924}
10925
10926void *
10927rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
10928{
10929 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
10930 return ruby_xrealloc((void *)p, w);
10931}
10932
10933void *
10934rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
10935{
10936 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
10937 return ruby_xmalloc(u);
10938}
10939
10940void *
10941rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
10942{
10943 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
10944 return ruby_xcalloc(u, 1);
10945}
10946
10947/* Mimic ruby_xmalloc, but need not rb_objspace.
10948 * should return pointer suitable for ruby_xfree
10949 */
10950void *
10952{
10953 void *mem;
10954#if CALC_EXACT_MALLOC_SIZE
10955 size += sizeof(struct malloc_obj_info);
10956#endif
10957 mem = malloc(size);
10958#if CALC_EXACT_MALLOC_SIZE
10959 if (!mem) {
10960 return NULL;
10961 }
10962 else
10963 /* set 0 for consistency of allocated_size/allocations */
10964 {
10965 struct malloc_obj_info *info = mem;
10966 info->size = 0;
10967#if USE_GC_MALLOC_OBJ_INFO_DETAILS
10968 info->gen = 0;
10969 info->file = NULL;
10970 info->line = 0;
10971#endif
10972 mem = info + 1;
10973 }
10974#endif
10975 return mem;
10976}
10977
10978void
10980{
10981#if CALC_EXACT_MALLOC_SIZE
10982 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
10983 ptr = info;
10984#endif
10985 free(ptr);
10986}
10987
10988void *
10989rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
10990{
10991 void *ptr;
10992 VALUE imemo;
10993 rb_imemo_tmpbuf_t *tmpbuf;
10994
10995 /* Keep the order; allocate an empty imemo first then xmalloc, to
10996 * get rid of potential memory leak */
10997 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
10998 *store = imemo;
10999 ptr = ruby_xmalloc0(size);
11000 tmpbuf = (rb_imemo_tmpbuf_t *)imemo;
11001 tmpbuf->ptr = ptr;
11002 tmpbuf->cnt = cnt;
11003 return ptr;
11004}
11005
11006void *
11007rb_alloc_tmp_buffer(volatile VALUE *store, long len)
11008{
11009 long cnt;
11010
11011 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
11012 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
11013 }
11014
11015 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
11016}
11017
11018void
11020{
11022 if (s) {
11023 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
11024 s->cnt = 0;
11025 ruby_xfree(ptr);
11026 }
11027}
11028
11029#if MALLOC_ALLOCATED_SIZE
11030/*
11031 * call-seq:
11032 * GC.malloc_allocated_size -> Integer
11033 *
11034 * Returns the size of memory allocated by malloc().
11035 *
11036 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
11037 */
11038
11039static VALUE
11040gc_malloc_allocated_size(VALUE self)
11041{
11042 return UINT2NUM(rb_objspace.malloc_params.allocated_size);
11043}
11044
11045/*
11046 * call-seq:
11047 * GC.malloc_allocations -> Integer
11048 *
11049 * Returns the number of malloc() allocations.
11050 *
11051 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
11052 */
11053
11054static VALUE
11055gc_malloc_allocations(VALUE self)
11056{
11057 return UINT2NUM(rb_objspace.malloc_params.allocations);
11058}
11059#endif
11060
11061void
11063{
11064 rb_objspace_t *objspace = &rb_objspace;
11065 if (diff > 0) {
11066 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
11067 }
11068 else if (diff < 0) {
11069 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
11070 }
11071}
11072
11073/*
11074 ------------------------------ WeakMap ------------------------------
11075*/
11076
11077struct weakmap {
11078 st_table *obj2wmap; /* obj -> [ref,...] */
11079 st_table *wmap2obj; /* ref -> obj */
11080 VALUE final;
11081};
11082
11083#define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
11084
11085#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
11086static int
11087wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
11088{
11089 rb_objspace_t *objspace = (rb_objspace_t *)arg;
11090 VALUE obj = (VALUE)val;
11091 if (!is_live_object(objspace, obj)) return ST_DELETE;
11092 return ST_CONTINUE;
11093}
11094#endif
11095
11096static void
11097wmap_compact(void *ptr)
11098{
11099 struct weakmap *w = ptr;
11102 w->final = rb_gc_location(w->final);
11103}
11104
11105static void
11106wmap_mark(void *ptr)
11107{
11108 struct weakmap *w = ptr;
11109#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
11110 if (w->obj2wmap) st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
11111#endif
11113}
11114
11115static int
11116wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
11117{
11118 VALUE *ptr = (VALUE *)val;
11119 ruby_sized_xfree(ptr, (ptr[0] + 1) * sizeof(VALUE));
11120 return ST_CONTINUE;
11121}
11122
11123static void
11124wmap_free(void *ptr)
11125{
11126 struct weakmap *w = ptr;
11127 st_foreach(w->obj2wmap, wmap_free_map, 0);
11130}
11131
11132static int
11133wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
11134{
11135 VALUE *ptr = (VALUE *)val;
11136 *(size_t *)arg += (ptr[0] + 1) * sizeof(VALUE);
11137 return ST_CONTINUE;
11138}
11139
11140static size_t
11141wmap_memsize(const void *ptr)
11142{
11143 size_t size;
11144 const struct weakmap *w = ptr;
11145 size = sizeof(*w);
11146 size += st_memsize(w->obj2wmap);
11147 size += st_memsize(w->wmap2obj);
11148 st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
11149 return size;
11150}
11151
11152static const rb_data_type_t weakmap_type = {
11153 "weakmap",
11154 {
11155 wmap_mark,
11156 wmap_free,
11157 wmap_memsize,
11158 wmap_compact,
11159 },
11161};
11162
11163static VALUE wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self));
11164
11165static VALUE
11166wmap_allocate(VALUE klass)
11167{
11168 struct weakmap *w;
11169 VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
11172 w->final = rb_func_lambda_new(wmap_finalize, obj, 1, 1);
11173 return obj;
11174}
11175
11176static int
11177wmap_live_p(rb_objspace_t *objspace, VALUE obj)
11178{
11179 if (SPECIAL_CONST_P(obj)) return TRUE;
11180 if (is_pointer_to_heap(objspace, (void *)obj)) {
11181 void *poisoned = asan_unpoison_object_temporary(obj);
11182
11183 enum ruby_value_type t = BUILTIN_TYPE(obj);
11184 int ret = (!(t == T_NONE || t >= T_FIXNUM || t == T_ICLASS) &&
11185 is_live_object(objspace, obj));
11186
11187 if (poisoned) {
11188 asan_poison_object(obj);
11189 }
11190
11191 return ret;
11192 }
11193 return TRUE;
11194}
11195
11196static int
11197wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
11198{
11199 VALUE wmap, *ptr, size, i, j;
11200 if (!existing) return ST_STOP;
11201 wmap = (VALUE)arg, ptr = (VALUE *)*value;
11202 for (i = j = 1, size = ptr[0]; i <= size; ++i) {
11203 if (ptr[i] != wmap) {
11204 ptr[j++] = ptr[i];
11205 }
11206 }
11207 if (j == 1) {
11208 ruby_sized_xfree(ptr, i * sizeof(VALUE));
11209 return ST_DELETE;
11210 }
11211 if (j < i) {
11212 SIZED_REALLOC_N(ptr, VALUE, j + 1, i);
11213 ptr[0] = j;
11214 *value = (st_data_t)ptr;
11215 }
11216 return ST_CONTINUE;
11217}
11218
11219/* :nodoc: */
11220static VALUE
11221wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self))
11222{
11223 st_data_t orig, wmap, data;
11224 VALUE obj, *rids, i, size;
11225 struct weakmap *w;
11226
11227 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
11228 /* Get reference from object id. */
11229 if ((obj = id2ref_obj_tbl(&rb_objspace, objid)) == Qundef) {
11230 rb_bug("wmap_finalize: objid is not found.");
11231 }
11232
11233 /* obj is original referenced object and/or weak reference. */
11234 orig = (st_data_t)obj;
11235 if (st_delete(w->obj2wmap, &orig, &data)) {
11236 rids = (VALUE *)data;
11237 size = *rids++;
11238 for (i = 0; i < size; ++i) {
11239 wmap = (st_data_t)rids[i];
11240 st_delete(w->wmap2obj, &wmap, NULL);
11241 }
11242 ruby_sized_xfree((VALUE *)data, (size + 1) * sizeof(VALUE));
11243 }
11244
11245 wmap = (st_data_t)obj;
11246 if (st_delete(w->wmap2obj, &wmap, &orig)) {
11247 wmap = (st_data_t)obj;
11248 st_update(w->obj2wmap, orig, wmap_final_func, wmap);
11249 }
11250 return self;
11251}
11252
11256};
11257
11258static VALUE
11259wmap_inspect_append(rb_objspace_t *objspace, VALUE str, VALUE obj)
11260{
11261 if (SPECIAL_CONST_P(obj)) {
11262 return rb_str_append(str, rb_inspect(obj));
11263 }
11264 else if (wmap_live_p(objspace, obj)) {
11265 return rb_str_append(str, rb_any_to_s(obj));
11266 }
11267 else {
11268 return rb_str_catf(str, "#<collected:%p>", (void*)obj);
11269 }
11270}
11271
11272static int
11273wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
11274{
11275 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
11277 VALUE str = argp->value;
11278 VALUE k = (VALUE)key, v = (VALUE)val;
11279
11280 if (RSTRING_PTR(str)[0] == '#') {
11281 rb_str_cat2(str, ", ");
11282 }
11283 else {
11284 rb_str_cat2(str, ": ");
11285 RSTRING_PTR(str)[0] = '#';
11286 }
11287 wmap_inspect_append(objspace, str, k);
11288 rb_str_cat2(str, " => ");
11289 wmap_inspect_append(objspace, str, v);
11290
11291 return ST_CONTINUE;
11292}
11293
11294static VALUE
11295wmap_inspect(VALUE self)
11296{
11297 VALUE str;
11298 VALUE c = rb_class_name(CLASS_OF(self));
11299 struct weakmap *w;
11300 struct wmap_iter_arg args;
11301
11302 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
11303 str = rb_sprintf("-<%"PRIsVALUE":%p", c, (void *)self);
11304 if (w->wmap2obj) {
11305 args.objspace = &rb_objspace;
11306 args.value = str;
11307 st_foreach(w->wmap2obj, wmap_inspect_i, (st_data_t)&args);
11308 }
11309 RSTRING_PTR(str)[0] = '#';
11310 rb_str_cat2(str, ">");
11311 return str;
11312}
11313
11314static int
11315wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
11316{
11318 VALUE obj = (VALUE)val;
11319 if (wmap_live_p(objspace, obj)) {
11320 rb_yield_values(2, (VALUE)key, obj);
11321 }
11322 return ST_CONTINUE;
11323}
11324
11325/* Iterates over keys and objects in a weakly referenced object */
11326static VALUE
11327wmap_each(VALUE self)
11328{
11329 struct weakmap *w;
11330 rb_objspace_t *objspace = &rb_objspace;
11331
11332 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
11333 st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
11334 return self;
11335}
11336
11337static int
11338wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
11339{
11340 rb_objspace_t *objspace = (rb_objspace_t *)arg;
11341 VALUE obj = (VALUE)val;
11342 if (wmap_live_p(objspace, obj)) {
11343 rb_yield((VALUE)key);
11344 }
11345 return ST_CONTINUE;
11346}
11347
11348/* Iterates over keys and objects in a weakly referenced object */
11349static VALUE
11350wmap_each_key(VALUE self)
11351{
11352 struct weakmap *w;
11353 rb_objspace_t *objspace = &rb_objspace;
11354
11355 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
11356 st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
11357 return self;
11358}
11359
11360static int
11361wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
11362{
11363 rb_objspace_t *objspace = (rb_objspace_t *)arg;
11364 VALUE obj = (VALUE)val;
11365 if (wmap_live_p(objspace, obj)) {
11366 rb_yield(obj);
11367 }
11368 return ST_CONTINUE;
11369}
11370
11371/* Iterates over keys and objects in a weakly referenced object */
11372static VALUE
11373wmap_each_value(VALUE self)
11374{
11375 struct weakmap *w;
11376 rb_objspace_t *objspace = &rb_objspace;
11377
11378 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
11379 st_foreach(w->wmap2obj, wmap_each_value_i, (st_data_t)objspace);
11380 return self;
11381}
11382
11383static int
11384wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
11385{
11386 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
11388 VALUE ary = argp->value;
11389 VALUE obj = (VALUE)val;
11390 if (wmap_live_p(objspace, obj)) {
11391 rb_ary_push(ary, (VALUE)key);
11392 }
11393 return ST_CONTINUE;
11394}
11395
11396/* Iterates over keys and objects in a weakly referenced object */
11397static VALUE
11398wmap_keys(VALUE self)
11399{
11400 struct weakmap *w;
11401 struct wmap_iter_arg args;
11402
11403 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
11404 args.objspace = &rb_objspace;
11405 args.value = rb_ary_new();
11406 st_foreach(w->wmap2obj, wmap_keys_i, (st_data_t)&args);
11407 return args.value;
11408}
11409
11410static int
11411wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
11412{
11413 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
11415 VALUE ary = argp->value;
11416 VALUE obj = (VALUE)val;
11417 if (wmap_live_p(objspace, obj)) {
11418 rb_ary_push(ary, obj);
11419 }
11420 return ST_CONTINUE;
11421}
11422
11423/* Iterates over values and objects in a weakly referenced object */
11424static VALUE
11425wmap_values(VALUE self)
11426{
11427 struct weakmap *w;
11428 struct wmap_iter_arg args;
11429
11430 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
11431 args.objspace = &rb_objspace;
11432 args.value = rb_ary_new();
11433 st_foreach(w->wmap2obj, wmap_values_i, (st_data_t)&args);
11434 return args.value;
11435}
11436
11437static int
11438wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg, int existing)
11439{
11440 VALUE size, *ptr, *optr;
11441 if (existing) {
11442 size = (ptr = optr = (VALUE *)*val)[0];
11443 ++size;
11445 }
11446 else {
11447 optr = 0;
11448 size = 1;
11449 ptr = ruby_xmalloc0(2 * sizeof(VALUE));
11450 }
11451 ptr[0] = size;
11452 ptr[size] = (VALUE)arg;
11453 if (ptr == optr) return ST_STOP;
11454 *val = (st_data_t)ptr;
11455 return ST_CONTINUE;
11456}
11457
11458/* Creates a weak reference from the given key to the given value */
11459static VALUE
11460wmap_aset(VALUE self, VALUE key, VALUE value)
11461{
11462 struct weakmap *w;
11463
11464 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
11465 if (FL_ABLE(value)) {
11466 define_final0(value, w->final);
11467 }
11468 if (FL_ABLE(key)) {
11469 define_final0(key, w->final);
11470 }
11471
11472 st_update(w->obj2wmap, (st_data_t)value, wmap_aset_update, key);
11474 return nonspecial_obj_id(value);
11475}
11476
11477/* Retrieves a weakly referenced object with the given key */
11478static VALUE
11479wmap_lookup(VALUE self, VALUE key)
11480{
11481 st_data_t data;
11482 VALUE obj;
11483 struct weakmap *w;
11484 rb_objspace_t *objspace = &rb_objspace;
11485
11486 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
11487 if (!st_lookup(w->wmap2obj, (st_data_t)key, &data)) return Qundef;
11488 obj = (VALUE)data;
11489 if (!wmap_live_p(objspace, obj)) return Qundef;
11490 return obj;
11491}
11492
11493/* Retrieves a weakly referenced object with the given key */
11494static VALUE
11495wmap_aref(VALUE self, VALUE key)
11496{
11497 VALUE obj = wmap_lookup(self, key);
11498 return obj != Qundef ? obj : Qnil;
11499}
11500
11501/* Returns +true+ if +key+ is registered */
11502static VALUE
11503wmap_has_key(VALUE self, VALUE key)
11504{
11505 return wmap_lookup(self, key) == Qundef ? Qfalse : Qtrue;
11506}
11507
11508/* Returns the number of referenced objects */
11509static VALUE
11510wmap_size(VALUE self)
11511{
11512 struct weakmap *w;
11513 st_index_t n;
11514
11515 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
11516 n = w->wmap2obj->num_entries;
11517#if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
11518 return ULONG2NUM(n);
11519#else
11520 return ULL2NUM(n);
11521#endif
11522}
11523
11524/*
11525 ------------------------------ GC profiler ------------------------------
11526*/
11527
11528#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
11529
11530/* return sec in user time */
11531static double
11532getrusage_time(void)
11533{
11534#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
11535 {
11536 static int try_clock_gettime = 1;
11537 struct timespec ts;
11538 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
11539 return ts.tv_sec + ts.tv_nsec * 1e-9;
11540 }
11541 else {
11542 try_clock_gettime = 0;
11543 }
11544 }
11545#endif
11546
11547#ifdef RUSAGE_SELF
11548 {
11549 struct rusage usage;
11550 struct timeval time;
11551 if (getrusage(RUSAGE_SELF, &usage) == 0) {
11552 time = usage.ru_utime;
11553 return time.tv_sec + time.tv_usec * 1e-6;
11554 }
11555 }
11556#endif
11557
11558#ifdef _WIN32
11559 {
11560 FILETIME creation_time, exit_time, kernel_time, user_time;
11561 ULARGE_INTEGER ui;
11562 LONG_LONG q;
11563 double t;
11564
11565 if (GetProcessTimes(GetCurrentProcess(),
11566 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
11567 memcpy(&ui, &user_time, sizeof(FILETIME));
11568 q = ui.QuadPart / 10L;
11569 t = (DWORD)(q % 1000000L) * 1e-6;
11570 q /= 1000000L;
11571#ifdef __GNUC__
11572 t += q;
11573#else
11574 t += (double)(DWORD)(q >> 16) * (1 << 16);
11575 t += (DWORD)q & ~(~0 << 16);
11576#endif
11577 return t;
11578 }
11579 }
11580#endif
11581
11582 return 0.0;
11583}
11584
11585static inline void
11586gc_prof_setup_new_record(rb_objspace_t *objspace, int reason)
11587{
11588 if (objspace->profile.run) {
11589 size_t index = objspace->profile.next_index;
11590 gc_profile_record *record;
11591
11592 /* create new record */
11593 objspace->profile.next_index++;
11594
11595 if (!objspace->profile.records) {
11597 objspace->profile.records = malloc(xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
11598 }
11599 if (index >= objspace->profile.size) {
11600 void *ptr;
11601 objspace->profile.size += 1000;
11602 ptr = realloc(objspace->profile.records, xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
11603 if (!ptr) rb_memerror();
11604 objspace->profile.records = ptr;
11605 }
11606 if (!objspace->profile.records) {
11607 rb_bug("gc_profile malloc or realloc miss");
11608 }
11609 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
11610 MEMZERO(record, gc_profile_record, 1);
11611
11612 /* setup before-GC parameter */
11613 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
11614#if MALLOC_ALLOCATED_SIZE
11615 record->allocated_size = malloc_allocated_size;
11616#endif
11617#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
11618#ifdef RUSAGE_SELF
11619 {
11620 struct rusage usage;
11621 if (getrusage(RUSAGE_SELF, &usage) == 0) {
11622 record->maxrss = usage.ru_maxrss;
11623 record->minflt = usage.ru_minflt;
11624 record->majflt = usage.ru_majflt;
11625 }
11626 }
11627#endif
11628#endif
11629 }
11630}
11631
11632static inline void
11633gc_prof_timer_start(rb_objspace_t *objspace)
11634{
11635 if (gc_prof_enabled(objspace)) {
11636 gc_profile_record *record = gc_prof_record(objspace);
11637#if GC_PROFILE_MORE_DETAIL
11638 record->prepare_time = objspace->profile.prepare_time;
11639#endif
11640 record->gc_time = 0;
11641 record->gc_invoke_time = getrusage_time();
11642 }
11643}
11644
11645static double
11646elapsed_time_from(double time)
11647{
11648 double now = getrusage_time();
11649 if (now > time) {
11650 return now - time;
11651 }
11652 else {
11653 return 0;
11654 }
11655}
11656
11657static inline void
11658gc_prof_timer_stop(rb_objspace_t *objspace)
11659{
11660 if (gc_prof_enabled(objspace)) {
11661 gc_profile_record *record = gc_prof_record(objspace);
11662 record->gc_time = elapsed_time_from(record->gc_invoke_time);
11663 record->gc_invoke_time -= objspace->profile.invoke_time;
11664 }
11665}
11666
11667#define RUBY_DTRACE_GC_HOOK(name) \
11668 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
11669static inline void
11670gc_prof_mark_timer_start(rb_objspace_t *objspace)
11671{
11672 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
11673#if GC_PROFILE_MORE_DETAIL
11674 if (gc_prof_enabled(objspace)) {
11675 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
11676 }
11677#endif
11678}
11679
11680static inline void
11681gc_prof_mark_timer_stop(rb_objspace_t *objspace)
11682{
11683 RUBY_DTRACE_GC_HOOK(MARK_END);
11684#if GC_PROFILE_MORE_DETAIL
11685 if (gc_prof_enabled(objspace)) {
11686 gc_profile_record *record = gc_prof_record(objspace);
11687 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
11688 }
11689#endif
11690}
11691
11692static inline void
11693gc_prof_sweep_timer_start(rb_objspace_t *objspace)
11694{
11695 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
11696 if (gc_prof_enabled(objspace)) {
11697 gc_profile_record *record = gc_prof_record(objspace);
11698
11699 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
11700 objspace->profile.gc_sweep_start_time = getrusage_time();
11701 }
11702 }
11703}
11704
11705static inline void
11706gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
11707{
11708 RUBY_DTRACE_GC_HOOK(SWEEP_END);
11709
11710 if (gc_prof_enabled(objspace)) {
11711 double sweep_time;
11712 gc_profile_record *record = gc_prof_record(objspace);
11713
11714 if (record->gc_time > 0) {
11715 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
11716 /* need to accumulate GC time for lazy sweep after gc() */
11717 record->gc_time += sweep_time;
11718 }
11719 else if (GC_PROFILE_MORE_DETAIL) {
11720 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
11721 }
11722
11723#if GC_PROFILE_MORE_DETAIL
11724 record->gc_sweep_time += sweep_time;
11726#endif
11728 }
11729}
11730
11731static inline void
11732gc_prof_set_malloc_info(rb_objspace_t *objspace)
11733{
11734#if GC_PROFILE_MORE_DETAIL
11735 if (gc_prof_enabled(objspace)) {
11736 gc_profile_record *record = gc_prof_record(objspace);
11737 record->allocate_increase = malloc_increase;
11738 record->allocate_limit = malloc_limit;
11739 }
11740#endif
11741}
11742
11743static inline void
11744gc_prof_set_heap_info(rb_objspace_t *objspace)
11745{
11746 if (gc_prof_enabled(objspace)) {
11747 gc_profile_record *record = gc_prof_record(objspace);
11748 size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects;
11749 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
11750
11751#if GC_PROFILE_MORE_DETAIL
11752 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
11753 record->heap_live_objects = live;
11754 record->heap_free_objects = total - live;
11755#endif
11756
11757 record->heap_total_objects = total;
11758 record->heap_use_size = live * sizeof(RVALUE);
11759 record->heap_total_size = total * sizeof(RVALUE);
11760 }
11761}
11762
11763/*
11764 * call-seq:
11765 * GC::Profiler.clear -> nil
11766 *
11767 * Clears the GC profiler data.
11768 *
11769 */
11770
11771static VALUE
11772gc_profile_clear(VALUE _)
11773{
11774 rb_objspace_t *objspace = &rb_objspace;
11775 void *p = objspace->profile.records;
11776 objspace->profile.records = NULL;
11777 objspace->profile.size = 0;
11778 objspace->profile.next_index = 0;
11779 objspace->profile.current_record = 0;
11780 if (p) {
11781 free(p);
11782 }
11783 return Qnil;
11784}
11785
11786/*
11787 * call-seq:
11788 * GC::Profiler.raw_data -> [Hash, ...]
11789 *
11790 * Returns an Array of individual raw profile data Hashes ordered
11791 * from earliest to latest by +:GC_INVOKE_TIME+.
11792 *
11793 * For example:
11794 *
11795 * [
11796 * {
11797 * :GC_TIME=>1.3000000000000858e-05,
11798 * :GC_INVOKE_TIME=>0.010634999999999999,
11799 * :HEAP_USE_SIZE=>289640,
11800 * :HEAP_TOTAL_SIZE=>588960,
11801 * :HEAP_TOTAL_OBJECTS=>14724,
11802 * :GC_IS_MARKED=>false
11803 * },
11804 * # ...
11805 * ]
11806 *
11807 * The keys mean:
11808 *
11809 * +:GC_TIME+::
11810 * Time elapsed in seconds for this GC run
11811 * +:GC_INVOKE_TIME+::
11812 * Time elapsed in seconds from startup to when the GC was invoked
11813 * +:HEAP_USE_SIZE+::
11814 * Total bytes of heap used
11815 * +:HEAP_TOTAL_SIZE+::
11816 * Total size of heap in bytes
11817 * +:HEAP_TOTAL_OBJECTS+::
11818 * Total number of objects
11819 * +:GC_IS_MARKED+::
11820 * Returns +true+ if the GC is in mark phase
11821 *
11822 * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
11823 * to the following hash keys:
11824 *
11825 * +:GC_MARK_TIME+::
11826 * +:GC_SWEEP_TIME+::
11827 * +:ALLOCATE_INCREASE+::
11828 * +:ALLOCATE_LIMIT+::
11829 * +:HEAP_USE_PAGES+::
11830 * +:HEAP_LIVE_OBJECTS+::
11831 * +:HEAP_FREE_OBJECTS+::
11832 * +:HAVE_FINALIZE+::
11833 *
11834 */
11835
11836static VALUE
11837gc_profile_record_get(VALUE _)
11838{
11839 VALUE prof;
11840 VALUE gc_profile = rb_ary_new();
11841 size_t i;
11842 rb_objspace_t *objspace = (&rb_objspace);
11843
11844 if (!objspace->profile.run) {
11845 return Qnil;
11846 }
11847
11848 for (i =0; i < objspace->profile.next_index; i++) {
11849 gc_profile_record *record = &objspace->profile.records[i];
11850
11851 prof = rb_hash_new();
11852 rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(0, rb_hash_new(), record->flags));
11853 rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
11854 rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
11855 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
11856 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
11857 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
11858 rb_hash_aset(prof, ID2SYM(rb_intern("MOVED_OBJECTS")), SIZET2NUM(record->moved_objects));
11859 rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
11860#if GC_PROFILE_MORE_DETAIL
11861 rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
11862 rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
11863 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
11864 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
11865 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
11866 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
11867 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
11868
11869 rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
11870 rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
11871
11872 rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), (record->flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
11873#endif
11874
11875#if RGENGC_PROFILE > 0
11876 rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
11877 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
11878 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
11879#endif
11880 rb_ary_push(gc_profile, prof);
11881 }
11882
11883 return gc_profile;
11884}
11885
11886#if GC_PROFILE_MORE_DETAIL
11887#define MAJOR_REASON_MAX 0x10
11888
11889static char *
11890gc_profile_dump_major_reason(int flags, char *buff)
11891{
11892 int reason = flags & GPR_FLAG_MAJOR_MASK;
11893 int i = 0;
11894
11895 if (reason == GPR_FLAG_NONE) {
11896 buff[0] = '-';
11897 buff[1] = 0;
11898 }
11899 else {
11900#define C(x, s) \
11901 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
11902 buff[i++] = #x[0]; \
11903 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
11904 buff[i] = 0; \
11905 }
11906 C(NOFREE, N);
11907 C(OLDGEN, O);
11908 C(SHADY, S);
11909#if RGENGC_ESTIMATE_OLDMALLOC
11910 C(OLDMALLOC, M);
11911#endif
11912#undef C
11913 }
11914 return buff;
11915}
11916#endif
11917
11918static void
11919gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
11920{
11921 rb_objspace_t *objspace = &rb_objspace;
11922 size_t count = objspace->profile.next_index;
11923#ifdef MAJOR_REASON_MAX
11924 char reason_str[MAJOR_REASON_MAX];
11925#endif
11926
11927 if (objspace->profile.run && count /* > 1 */) {
11928 size_t i;
11929 const gc_profile_record *record;
11930
11931 append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
11932 append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
11933
11934 for (i = 0; i < count; i++) {
11935 record = &objspace->profile.records[i];
11936 append(out, rb_sprintf("%5"PRIuSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
11937 i+1, record->gc_invoke_time, record->heap_use_size,
11938 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
11939 }
11940
11941#if GC_PROFILE_MORE_DETAIL
11942 const char *str = "\n\n" \
11943 "More detail.\n" \
11944 "Prepare Time = Previously GC's rest sweep time\n"
11945 "Index Flags Allocate Inc. Allocate Limit"
11946#if CALC_EXACT_MALLOC_SIZE
11947 " Allocated Size"
11948#endif
11949 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
11950#if RGENGC_PROFILE
11951 " OldgenObj RemNormObj RemShadObj"
11952#endif
11953#if GC_PROFILE_DETAIL_MEMORY
11954 " MaxRSS(KB) MinorFLT MajorFLT"
11955#endif
11956 "\n";
11957 append(out, rb_str_new_cstr(str));
11958
11959 for (i = 0; i < count; i++) {
11960 record = &objspace->profile.records[i];
11961 append(out, rb_sprintf("%5"PRIuSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
11963 " %15"PRIuSIZE
11964#endif
11965 " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
11967 "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
11968#endif
11970 "%11ld %8ld %8ld"
11971#endif
11972
11973 "\n",
11974 i+1,
11975 gc_profile_dump_major_reason(record->flags, reason_str),
11976 (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
11977 (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
11978 (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
11979 (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
11980 (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
11981 (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
11982 record->allocate_increase, record->allocate_limit,
11984 record->allocated_size,
11985#endif
11986 record->heap_use_pages,
11987 record->gc_mark_time*1000,
11988 record->gc_sweep_time*1000,
11989 record->prepare_time*1000,
11990
11991 record->heap_live_objects,
11992 record->heap_free_objects,
11993 record->removing_objects,
11994 record->empty_objects
11996 ,
11997 record->old_objects,
11998 record->remembered_normal_objects,
11999 record->remembered_shady_objects
12000#endif
12002 ,
12003 record->maxrss / 1024,
12004 record->minflt,
12005 record->majflt
12006#endif
12007
12008 ));
12009 }
12010#endif
12011 }
12012}
12013
12014/*
12015 * call-seq:
12016 * GC::Profiler.result -> String
12017 *
12018 * Returns a profile data report such as:
12019 *
12020 * GC 1 invokes.
12021 * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
12022 * 1 0.012 159240 212940 10647 0.00000000000001530000
12023 */
12024
12025static VALUE
12026gc_profile_result(VALUE _)
12027{
12029 gc_profile_dump_on(str, rb_str_buf_append);
12030 return str;
12031}
12032
12033/*
12034 * call-seq:
12035 * GC::Profiler.report
12036 * GC::Profiler.report(io)
12037 *
12038 * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
12039 *
12040 */
12041
12042static VALUE
12043gc_profile_report(int argc, VALUE *argv, VALUE self)
12044{
12045 VALUE out;
12046
12047 out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
12048 gc_profile_dump_on(out, rb_io_write);
12049
12050 return Qnil;
12051}
12052
12053/*
12054 * call-seq:
12055 * GC::Profiler.total_time -> float
12056 *
12057 * The total time used for garbage collection in seconds
12058 */
12059
12060static VALUE
12061gc_profile_total_time(VALUE self)
12062{
12063 double time = 0;
12064 rb_objspace_t *objspace = &rb_objspace;
12065
12066 if (objspace->profile.run && objspace->profile.next_index > 0) {
12067 size_t i;
12068 size_t count = objspace->profile.next_index;
12069
12070 for (i = 0; i < count; i++) {
12071 time += objspace->profile.records[i].gc_time;
12072 }
12073 }
12074 return DBL2NUM(time);
12075}
12076
12077/*
12078 * call-seq:
12079 * GC::Profiler.enabled? -> true or false
12080 *
12081 * The current status of GC profile mode.
12082 */
12083
12084static VALUE
12085gc_profile_enable_get(VALUE self)
12086{
12087 rb_objspace_t *objspace = &rb_objspace;
12088 return objspace->profile.run ? Qtrue : Qfalse;
12089}
12090
12091/*
12092 * call-seq:
12093 * GC::Profiler.enable -> nil
12094 *
12095 * Starts the GC profiler.
12096 *
12097 */
12098
12099static VALUE
12100gc_profile_enable(VALUE _)
12101{
12102 rb_objspace_t *objspace = &rb_objspace;
12103 objspace->profile.run = TRUE;
12104 objspace->profile.current_record = 0;
12105 return Qnil;
12106}
12107
12108/*
12109 * call-seq:
12110 * GC::Profiler.disable -> nil
12111 *
12112 * Stops the GC profiler.
12113 *
12114 */
12115
12116static VALUE
12117gc_profile_disable(VALUE _)
12118{
12119 rb_objspace_t *objspace = &rb_objspace;
12120
12121 objspace->profile.run = FALSE;
12122 objspace->profile.current_record = 0;
12123 return Qnil;
12124}
12125
12126/*
12127 ------------------------------ DEBUG ------------------------------
12128*/
12129
12130static const char *
12131type_name(int type, VALUE obj)
12132{
12133 switch (type) {
12134#define TYPE_NAME(t) case (t): return #t;
12160 case T_DATA:
12161 if (obj && rb_objspace_data_type_name(obj)) {
12162 return rb_objspace_data_type_name(obj);
12163 }
12164 return "T_DATA";
12165#undef TYPE_NAME
12166 }
12167 return "unknown";
12168}
12169
12170static const char *
12171obj_type_name(VALUE obj)
12172{
12173 return type_name(TYPE(obj), obj);
12174}
12175
12176const char *
12178{
12179 switch (type) {
12180 case VM_METHOD_TYPE_ISEQ: return "iseq";
12181 case VM_METHOD_TYPE_ATTRSET: return "attrest";
12182 case VM_METHOD_TYPE_IVAR: return "ivar";
12183 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
12184 case VM_METHOD_TYPE_ALIAS: return "alias";
12185 case VM_METHOD_TYPE_REFINED: return "refined";
12186 case VM_METHOD_TYPE_CFUNC: return "cfunc";
12187 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
12188 case VM_METHOD_TYPE_MISSING: return "missing";
12189 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
12190 case VM_METHOD_TYPE_UNDEF: return "undef";
12191 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
12192 }
12193 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
12194}
12195
12196/* from array.c */
12197# define ARY_SHARED_P(ary) \
12198 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
12199 FL_TEST((ary),ELTS_SHARED)!=0)
12200# define ARY_EMBED_P(ary) \
12201 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
12202 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
12203
12204static void
12205rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
12206{
12207 if (buff_size > 0 && iseq->body && iseq->body->location.label && !RB_TYPE_P(iseq->body->location.pathobj, T_MOVED)) {
12208 VALUE path = rb_iseq_path(iseq);
12209 VALUE n = iseq->body->location.first_lineno;
12210 snprintf(buff, buff_size, " %s@%s:%d",
12212 RSTRING_PTR(path),
12213 n ? FIX2INT(n) : 0 );
12214 }
12215}
12216
12217bool rb_ractor_p(VALUE rv);
12218
12219static int
12220str_len_no_raise(VALUE str)
12221{
12222 long len = RSTRING_LEN(str);
12223 if (len < 0) return 0;
12224 if (len > INT_MAX) return INT_MAX;
12225 return (int)len;
12226}
12227
12228const char *
12229rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
12230{
12231 int pos = 0;
12232 void *poisoned = asan_poisoned_object_p(obj);
12233 asan_unpoison_object(obj, false);
12234
12235#define BUFF_ARGS buff + pos, buff_size - pos
12236#define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
12237 if (SPECIAL_CONST_P(obj)) {
12238 APPENDF((BUFF_ARGS, "%s", obj_type_name(obj)));
12239
12240 if (FIXNUM_P(obj)) {
12241 APPENDF((BUFF_ARGS, " %ld", FIX2LONG(obj)));
12242 }
12243 else if (SYMBOL_P(obj)) {
12244 APPENDF((BUFF_ARGS, " %s", rb_id2name(SYM2ID(obj))));
12245 }
12246 }
12247 else {
12248#define TF(c) ((c) != 0 ? "true" : "false")
12249#define C(c, s) ((c) != 0 ? (s) : " ")
12250 const int type = BUILTIN_TYPE(obj);
12251 const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
12252
12253 if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
12254 APPENDF((BUFF_ARGS, "%p [%d%s%s%s%s%s] %s ",
12255 (void *)obj, age,
12256 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
12257 C(RVALUE_MARK_BITMAP(obj), "M"),
12258 C(RVALUE_PIN_BITMAP(obj), "P"),
12259 C(RVALUE_MARKING_BITMAP(obj), "R"),
12261 obj_type_name(obj)));
12262 }
12263 else {
12264 /* fake */
12265 APPENDF((BUFF_ARGS, "%p [%dXXXX] %s",
12266 (void *)obj, age,
12267 obj_type_name(obj)));
12268 }
12269
12270 if (internal_object_p(obj)) {
12271 /* ignore */
12272 }
12273 else if (RBASIC(obj)->klass == 0) {
12274 APPENDF((BUFF_ARGS, "(temporary internal)"));
12275 }
12276 else {
12277 if (RTEST(RBASIC(obj)->klass)) {
12278 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
12279 if (!NIL_P(class_path)) {
12280 APPENDF((BUFF_ARGS, "(%s)", RSTRING_PTR(class_path)));
12281 }
12282 }
12283 }
12284
12285#if GC_DEBUG
12286 APPENDF((BUFF_ARGS, "@%s:%d", RANY(obj)->file, RANY(obj)->line));
12287#endif
12288
12289 switch (type) {
12290 case T_NODE:
12292 break;
12293 case T_ARRAY:
12294 if (FL_TEST(obj, ELTS_SHARED)) {
12295 APPENDF((BUFF_ARGS, "shared -> %s",
12296 rb_obj_info(RARRAY(obj)->as.heap.aux.shared_root)));
12297 }
12298 else if (FL_TEST(obj, RARRAY_EMBED_FLAG)) {
12299 APPENDF((BUFF_ARGS, "[%s%s] len: %ld (embed)",
12300 C(ARY_EMBED_P(obj), "E"),
12301 C(ARY_SHARED_P(obj), "S"),
12302 RARRAY_LEN(obj)));
12303 }
12304 else {
12305 APPENDF((BUFF_ARGS, "[%s%s%s] len: %ld, capa:%ld ptr:%p",
12306 C(ARY_EMBED_P(obj), "E"),
12307 C(ARY_SHARED_P(obj), "S"),
12308 C(RARRAY_TRANSIENT_P(obj), "T"),
12309 RARRAY_LEN(obj),
12310 ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
12311 (void *)RARRAY_CONST_PTR_TRANSIENT(obj)));
12312 }
12313 break;
12314 case T_STRING: {
12315 if (STR_SHARED_P(obj)) APPENDF((BUFF_ARGS, " [shared] "));
12316 APPENDF((BUFF_ARGS, "%.*s", str_len_no_raise(obj), RSTRING_PTR(obj)));
12317 break;
12318 }
12319 case T_SYMBOL: {
12320 VALUE fstr = RSYMBOL(obj)->fstr;
12321 ID id = RSYMBOL(obj)->id;
12322 if (RB_TYPE_P(fstr, T_STRING)) {
12323 APPENDF((BUFF_ARGS, ":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id));
12324 }
12325 else {
12326 APPENDF((BUFF_ARGS, "(%p) id:%d", (void *)fstr, (unsigned int)id));
12327 }
12328 break;
12329 }
12330 case T_MOVED: {
12331 APPENDF((BUFF_ARGS, "-> %p", (void*)rb_gc_location(obj)));
12332 break;
12333 }
12334 case T_HASH: {
12335 APPENDF((BUFF_ARGS, "[%c%c] %"PRIdSIZE,
12336 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
12337 RHASH_TRANSIENT_P(obj) ? 'T' : ' ',
12338 RHASH_SIZE(obj)));
12339 break;
12340 }
12341 case T_CLASS:
12342 case T_MODULE:
12343 {
12344 VALUE class_path = rb_class_path_cached(obj);
12345 if (!NIL_P(class_path)) {
12346 APPENDF((BUFF_ARGS, "%s", RSTRING_PTR(class_path)));
12347 }
12348 else {
12349 APPENDF((BUFF_ARGS, "(annon)"));
12350 }
12351 break;
12352 }
12353 case T_ICLASS:
12354 {
12355 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
12356 if (!NIL_P(class_path)) {
12357 APPENDF((BUFF_ARGS, "src:%s", RSTRING_PTR(class_path)));
12358 }
12359 break;
12360 }
12361 case T_OBJECT:
12362 {
12363 uint32_t len = ROBJECT_NUMIV(obj);
12364
12365 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
12366 APPENDF((BUFF_ARGS, "(embed) len:%d", len));
12367 }
12368 else {
12369 VALUE *ptr = ROBJECT_IVPTR(obj);
12370 APPENDF((BUFF_ARGS, "len:%d ptr:%p", len, (void *)ptr));
12371 }
12372 }
12373 break;
12374 case T_DATA: {
12375 const struct rb_block *block;
12376 const rb_iseq_t *iseq;
12377 if (rb_obj_is_proc(obj) &&
12378 (block = vm_proc_block(obj)) != NULL &&
12379 (vm_block_type(block) == block_type_iseq) &&
12380 (iseq = vm_block_iseq(block)) != NULL) {
12381 rb_raw_iseq_info(BUFF_ARGS, iseq);
12382 }
12383 else if (rb_ractor_p(obj)) {
12384 rb_ractor_t *r = (void *)DATA_PTR(obj);
12385 if (r) {
12386 APPENDF((BUFF_ARGS, "r:%d", r->pub.id));
12387 }
12388 }
12389 else {
12390 const char * const type_name = rb_objspace_data_type_name(obj);
12391 if (type_name) {
12392 APPENDF((BUFF_ARGS, "%s", type_name));
12393 }
12394 }
12395 break;
12396 }
12397 case T_IMEMO: {
12398 APPENDF((BUFF_ARGS, "<%s> ", rb_imemo_name(imemo_type(obj))));
12399
12400 switch (imemo_type(obj)) {
12401 case imemo_ment: {
12402 const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment;
12403 if (me->def) {
12404 APPENDF((BUFF_ARGS, ":%s (%s%s%s%s) type:%s alias:%d owner:%p defined_class:%p",
12405 rb_id2name(me->called_id),
12406 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
12407 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
12408 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
12409 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
12410 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
12411 rb_method_type_name(me->def->type),
12412 me->def->alias_count,
12413 (void *)me->owner, // obj_info(me->owner),
12414 (void *)me->defined_class)); //obj_info(me->defined_class)));
12415
12416 if (me->def->type == VM_METHOD_TYPE_ISEQ) {
12417 // APPENDF((BUFF_ARGS, " (iseq:%p)", (void *)me->def->body.iseq.iseqptr));
12418 APPENDF((BUFF_ARGS, " (iseq:%s)", obj_info((VALUE)me->def->body.iseq.iseqptr)));
12419 }
12420 }
12421 else {
12422 APPENDF((BUFF_ARGS, "%s", rb_id2name(me->called_id)));
12423 }
12424 break;
12425 }
12426 case imemo_iseq: {
12427 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
12428 rb_raw_iseq_info(BUFF_ARGS, iseq);
12429 break;
12430 }
12431 case imemo_callinfo:
12432 {
12433 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
12434 APPENDF((BUFF_ARGS, "(mid:%s, flag:%x argc:%d, kwarg:%s)",
12435 rb_id2name(vm_ci_mid(ci)),
12436 vm_ci_flag(ci),
12437 vm_ci_argc(ci),
12438 vm_ci_kwarg(ci) ? "available" : "NULL"));
12439 break;
12440 }
12441 case imemo_callcache:
12442 {
12443 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
12444 VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
12445
12446 APPENDF((BUFF_ARGS, "(klass:%s, cme:%s (%p) call:%p",
12447 NIL_P(class_path) ? "??" : RSTRING_PTR(class_path),
12448 vm_cc_cme(cc) ? rb_id2name(vm_cc_cme(cc)->called_id) : "<NULL>",
12449 (void *)vm_cc_cme(cc), (void *)vm_cc_call(cc)));
12450 break;
12451 }
12452 default:
12453 break;
12454 }
12455 }
12456 default:
12457 break;
12458 }
12459#undef TF
12460#undef C
12461 }
12462 end:
12463 if (poisoned) {
12464 asan_poison_object(obj);
12465 }
12466
12467 return buff;
12468#undef APPENDF
12469#undef BUFF_ARGS
12470}
12471
12472#if RGENGC_OBJ_INFO
12473#define OBJ_INFO_BUFFERS_NUM 10
12474#define OBJ_INFO_BUFFERS_SIZE 0x100
12475static int obj_info_buffers_index = 0;
12476static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
12477
12478static const char *
12479obj_info(VALUE obj)
12480{
12481 const int index = obj_info_buffers_index++;
12482 char *const buff = &obj_info_buffers[index][0];
12483
12484 if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
12485 obj_info_buffers_index = 0;
12486 }
12487
12488 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
12489}
12490#else
12491static const char *
12492obj_info(VALUE obj)
12493{
12494 return obj_type_name(obj);
12495}
12496#endif
12497
12498MJIT_FUNC_EXPORTED const char *
12500{
12501 return obj_info(obj);
12502}
12503
12504void
12506{
12507 char buff[0x100];
12508 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
12509}
12510
12512rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
12513{
12514 char buff[0x100];
12515 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
12516}
12517
12518#if GC_DEBUG
12519
12520void
12522{
12523 rb_objspace_t *objspace = &rb_objspace;
12524
12525 fprintf(stderr, "created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
12526
12527 if (BUILTIN_TYPE(obj) == T_MOVED) {
12528 fprintf(stderr, "moved?: true\n");
12529 }
12530 else {
12531 fprintf(stderr, "moved?: false\n");
12532 }
12533 if (is_pointer_to_heap(objspace, (void *)obj)) {
12534 fprintf(stderr, "pointer to heap?: true\n");
12535 }
12536 else {
12537 fprintf(stderr, "pointer to heap?: false\n");
12538 return;
12539 }
12540
12541 fprintf(stderr, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
12542 fprintf(stderr, "pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ? "true" : "false");
12543 fprintf(stderr, "age? : %d\n", RVALUE_AGE(obj));
12544 fprintf(stderr, "old? : %s\n", RVALUE_OLD_P(obj) ? "true" : "false");
12545 fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ? "false" : "true");
12546 fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false");
12547
12549 fprintf(stderr, "lazy sweeping?: true\n");
12550 fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet");
12551 }
12552 else {
12553 fprintf(stderr, "lazy sweeping?: false\n");
12554 }
12555}
12556
12557static VALUE
12558gcdebug_sentinel(RB_BLOCK_CALL_FUNC_ARGLIST(obj, name))
12559{
12560 fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
12561 return Qnil;
12562}
12563
12564void
12565rb_gcdebug_sentinel(VALUE obj, const char *name)
12566{
12567 rb_define_finalizer(obj, rb_proc_new(gcdebug_sentinel, (VALUE)name));
12568}
12569
12570#endif /* GC_DEBUG */
12571
12572#if GC_DEBUG_STRESS_TO_CLASS
12573/*
12574 * call-seq:
12575 * GC.add_stress_to_class(class[, ...])
12576 *
12577 * Raises NoMemoryError when allocating an instance of the given classes.
12578 *
12579 */
12580static VALUE
12581rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
12582{
12583 rb_objspace_t *objspace = &rb_objspace;
12584
12585 if (!stress_to_class) {
12587 }
12589 return self;
12590}
12591
12592/*
12593 * call-seq:
12594 * GC.remove_stress_to_class(class[, ...])
12595 *
12596 * No longer raises NoMemoryError when allocating an instance of the
12597 * given classes.
12598 *
12599 */
12600static VALUE
12601rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
12602{
12603 rb_objspace_t *objspace = &rb_objspace;
12604 int i;
12605
12606 if (stress_to_class) {
12607 for (i = 0; i < argc; ++i) {
12609 }
12610 if (RARRAY_LEN(stress_to_class) == 0) {
12611 stress_to_class = 0;
12612 }
12613 }
12614 return Qnil;
12615}
12616#endif
12617
12618/*
12619 * Document-module: ObjectSpace
12620 *
12621 * The ObjectSpace module contains a number of routines
12622 * that interact with the garbage collection facility and allow you to
12623 * traverse all living objects with an iterator.
12624 *
12625 * ObjectSpace also provides support for object finalizers, procs that will be
12626 * called when a specific object is about to be destroyed by garbage
12627 * collection. See the documentation for
12628 * <code>ObjectSpace.define_finalizer</code> for important information on
12629 * how to use this method correctly.
12630 *
12631 * a = "A"
12632 * b = "B"
12633 *
12634 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
12635 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
12636 *
12637 * a = nil
12638 * b = nil
12639 *
12640 * _produces:_
12641 *
12642 * Finalizer two on 537763470
12643 * Finalizer one on 537763480
12644 */
12645
12646/*
12647 * Document-class: ObjectSpace::WeakMap
12648 *
12649 * An ObjectSpace::WeakMap object holds references to
12650 * any objects, but those objects can get garbage collected.
12651 *
12652 * This class is mostly used internally by WeakRef, please use
12653 * +lib/weakref.rb+ for the public interface.
12654 */
12655
12656/* Document-class: GC::Profiler
12657 *
12658 * The GC profiler provides access to information on GC runs including time,
12659 * length and object space size.
12660 *
12661 * Example:
12662 *
12663 * GC::Profiler.enable
12664 *
12665 * require 'rdoc/rdoc'
12666 *
12667 * GC::Profiler.report
12668 *
12669 * GC::Profiler.disable
12670 *
12671 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
12672 */
12673
12674#include "gc.rbinc"
12675
12676void
12678{
12679#undef rb_intern
12680 VALUE rb_mObjSpace;
12681 VALUE rb_mProfiler;
12682 VALUE gc_constants;
12683
12684 rb_mGC = rb_define_module("GC");
12685
12686 gc_constants = rb_hash_new();
12687 rb_hash_aset(gc_constants, ID2SYM(rb_intern("DEBUG")), GC_DEBUG ? Qtrue : Qfalse);
12688 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
12689 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
12690 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
12691 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_PLANES")), SIZET2NUM(HEAP_PAGE_BITMAP_PLANES));
12692 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE));
12693 OBJ_FREEZE(gc_constants);
12694 /* internal constants */
12695 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
12696
12697 rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
12698 rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
12699 rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
12700 rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
12701 rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
12702 rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
12703 rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
12704 rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
12705 rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
12706
12707 rb_mObjSpace = rb_define_module("ObjectSpace");
12708
12709 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
12710
12711 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
12712 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
12713
12714 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
12715
12717
12719 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
12720
12721 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
12722
12723 {
12724 VALUE rb_cWeakMap = rb_define_class_under(rb_mObjSpace, "WeakMap", rb_cObject);
12725 rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
12726 rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
12727 rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
12728 rb_define_method(rb_cWeakMap, "include?", wmap_has_key, 1);
12729 rb_define_method(rb_cWeakMap, "member?", wmap_has_key, 1);
12730 rb_define_method(rb_cWeakMap, "key?", wmap_has_key, 1);
12731 rb_define_method(rb_cWeakMap, "inspect", wmap_inspect, 0);
12732 rb_define_method(rb_cWeakMap, "each", wmap_each, 0);
12733 rb_define_method(rb_cWeakMap, "each_pair", wmap_each, 0);
12734 rb_define_method(rb_cWeakMap, "each_key", wmap_each_key, 0);
12735 rb_define_method(rb_cWeakMap, "each_value", wmap_each_value, 0);
12736 rb_define_method(rb_cWeakMap, "keys", wmap_keys, 0);
12737 rb_define_method(rb_cWeakMap, "values", wmap_values, 0);
12738 rb_define_method(rb_cWeakMap, "size", wmap_size, 0);
12739 rb_define_method(rb_cWeakMap, "length", wmap_size, 0);
12740 rb_include_module(rb_cWeakMap, rb_mEnumerable);
12741 }
12742
12743 /* internal methods */
12744 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency_m, 0);
12745 rb_define_singleton_method(rb_mGC, "verify_transient_heap_internal_consistency", gc_verify_transient_heap_internal_consistency, 0);
12746#if MALLOC_ALLOCATED_SIZE
12747 rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
12748 rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
12749#endif
12750
12751#if GC_DEBUG_STRESS_TO_CLASS
12752 rb_define_singleton_method(rb_mGC, "add_stress_to_class", rb_gcdebug_add_stress_to_class, -1);
12753 rb_define_singleton_method(rb_mGC, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class, -1);
12754#endif
12755
12756 {
12757 VALUE opts;
12758 /* GC build options */
12759 rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
12760#define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
12761 OPT(GC_DEBUG);
12762 OPT(USE_RGENGC);
12773#undef OPT
12774 OBJ_FREEZE(opts);
12775 }
12776}
12777
12778#ifdef ruby_xmalloc
12779#undef ruby_xmalloc
12780#endif
12781#ifdef ruby_xmalloc2
12782#undef ruby_xmalloc2
12783#endif
12784#ifdef ruby_xcalloc
12785#undef ruby_xcalloc
12786#endif
12787#ifdef ruby_xrealloc
12788#undef ruby_xrealloc
12789#endif
12790#ifdef ruby_xrealloc2
12791#undef ruby_xrealloc2
12792#endif
12793
12794void *
12796{
12797#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12798 ruby_malloc_info_file = __FILE__;
12799 ruby_malloc_info_line = __LINE__;
12800#endif
12801 return ruby_xmalloc_body(size);
12802}
12803
12804void *
12805ruby_xmalloc2(size_t n, size_t size)
12806{
12807#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12808 ruby_malloc_info_file = __FILE__;
12809 ruby_malloc_info_line = __LINE__;
12810#endif
12811 return ruby_xmalloc2_body(n, size);
12812}
12813
12814void *
12815ruby_xcalloc(size_t n, size_t size)
12816{
12817#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12818 ruby_malloc_info_file = __FILE__;
12819 ruby_malloc_info_line = __LINE__;
12820#endif
12821 return ruby_xcalloc_body(n, size);
12822}
12823
12824void *
12825ruby_xrealloc(void *ptr, size_t new_size)
12826{
12827#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12828 ruby_malloc_info_file = __FILE__;
12829 ruby_malloc_info_line = __LINE__;
12830#endif
12831 return ruby_xrealloc_body(ptr, new_size);
12832}
12833
12834void *
12835ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
12836{
12837#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12838 ruby_malloc_info_file = __FILE__;
12839 ruby_malloc_info_line = __LINE__;
12840#endif
12841 return ruby_xrealloc2_body(ptr, n, new_size);
12842}
size_t rb_ary_memsize(VALUE ary)
Definition: array.c:894
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:1301
void rb_ary_free(VALUE ary)
Definition: array.c:864
VALUE rb_ary_last(int argc, const VALUE *argv, VALUE ary)
Definition: array.c:1932
VALUE rb_ary_new(void)
Definition: array.c:749
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:846
void rb_ary_delete_same(VALUE ary, VALUE item)
Definition: array.c:3987
VALUE rb_ary_cat(VALUE ary, const VALUE *argv, long len)
Definition: array.c:1314
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition: assert.h:167
#define ALWAYS_INLINE(x)
Definition: attributes.h:86
#define NOINLINE(x)
Definition: attributes.h:82
#define PUREFUNC(x)
Definition: attributes.h:54
#define NORETURN(x)
Definition: attributes.h:152
#define PRINTF_ARGS(decl, string_index, first_to_check)
Definition: attributes.h:112
#define UNREACHABLE
Definition: assume.h:30
#define UNREACHABLE_RETURN
Definition: assume.h:31
#define BDIGIT
Definition: bigdecimal.h:48
size_t rb_big_size(VALUE big)
Definition: bignum.c:6775
VALUE rb_big_eql(VALUE x, VALUE y)
Definition: bignum.c:5541
VALUE rb_big_hash(VALUE x)
Definition: bignum.c:6723
int bits(struct state *s, int need)
Definition: blast.c:72
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1935
#define CHECK(sub)
Definition: compile.c:429
VALUE rb_obj_is_fiber(VALUE obj)
Definition: cont.c:1060
#define N
Definition: crc32.c:57
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
Definition: cxxanyargs.hpp:653
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
Definition: cxxanyargs.hpp:668
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
Definition: cxxanyargs.hpp:672
#define sub(x, y)
Definition: date_strftime.c:24
#define add(x, y)
Definition: date_strftime.c:23
#define range(low, item, hi)
Definition: date_strftime.c:21
union @11::@13 imemo
struct RIMemo * ptr
Definition: debug.c:88
enum imemo_type types
Definition: debug.c:86
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Definition: vm_trace.c:1627
#define RB_DEBUG_COUNTER_INC_IF(type, cond)
#define RB_DEBUG_COUNTER_INC(type)
#define FLUSH_REGISTER_WINDOWS
Definition: defines.h:106
#define RB_GNUC_EXTENSION
Definition: defines.h:85
#define MJIT_FUNC_EXPORTED
Definition: dllexport.h:55
#define assert(x)
Definition: dlmalloc.c:1176
#define free(x)
Definition: dln.c:52
#define DBL2NUM
Definition: double.h:29
int root
Definition: enough.c:226
string_t out
Definition: enough.c:230
int max
Definition: enough.c:225
VALUE rb_mEnumerable
Definition: enum.c:27
uint8_t len
Definition: escape.c:17
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
#define numberof(array)
Definition: etc.c:649
#define rb_ec_raised_p(ec, f)
Definition: eval_intern.h:272
#define rb_ec_raised_set(ec, f)
Definition: eval_intern.h:270
#define EC_EXEC_TAG()
Definition: eval_intern.h:193
#define EC_PUSH_TAG(ec)
Definition: eval_intern.h:130
#define EC_JUMP_TAG(ec, st)
Definition: eval_intern.h:196
#define EXIT_FAILURE
Definition: eval_intern.h:32
#define rb_ec_raised_clear(ec)
Definition: eval_intern.h:273
#define EC_POP_TAG()
Definition: eval_intern.h:138
@ RAISED_NOMEMORY
Definition: eval_intern.h:268
void rb_mark_end_proc(void)
Definition: eval_jump.c:78
#define RUBY_INTERNAL_EVENT_GC_EXIT
Definition: event.h:62
#define RUBY_INTERNAL_EVENT_GC_ENTER
Definition: event.h:61
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
Definition: event.h:60
#define RUBY_INTERNAL_EVENT_GC_END_MARK
Definition: event.h:59
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Definition: event.h:63
#define RUBY_INTERNAL_EVENT_FREEOBJ
Definition: event.h:57
#define RUBY_INTERNAL_EVENT_GC_START
Definition: event.h:58
uint32_t rb_event_flag_t
Definition: event.h:66
#define RUBY_INTERNAL_EVENT_NEWOBJ
Definition: event.h:56
#define O(member)
#define RSTRING_LEN(string)
Definition: fbuffer.h:22
#define RSTRING_PTR(string)
Definition: fbuffer.h:19
#define UNLIKELY(x)
Definition: ffi_common.h:126
#define memcpy(d, s, n)
Definition: ffi_common.h:55
#define LIKELY(x)
Definition: ffi_common.h:125
#define FL_SINGLETON
Definition: fl_type.h:49
#define FL_EXIVAR
Definition: fl_type.h:58
#define ELTS_SHARED
Definition: fl_type.h:84
#define FL_SEEN_OBJ_ID
Definition: fl_type.h:57
#define FL_PROMOTED0
Definition: fl_type.h:51
#define FL_FINALIZE
Definition: fl_type.h:53
#define FL_WB_PROTECTED
Definition: fl_type.h:50
#define FL_PROMOTED1
Definition: fl_type.h:52
#define FL_USHIFT
Definition: fl_type.h:61
@ RUBY_FL_WB_PROTECTED
Definition: fl_type.h:163
#define PRIsVALUE
Definition: function.c:10
#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing)
Definition: gc.c:1072
VALUE rb_wb_protected_newobj_of(VALUE klass, VALUE flags)
Definition: gc.c:2349
#define RGENGC_PROFILE
Definition: gc.c:416
#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR
Definition: gc.c:267
#define GC_OLDMALLOC_LIMIT_MAX
Definition: gc.c:297
int rb_objspace_internal_object_p(VALUE obj)
Definition: gc.c:3355
#define I(s)
VALUE * ruby_initial_gc_stress_ptr
Definition: gc.c:879
bool rb_obj_is_main_ractor(VALUE gv)
Definition: ractor.c:1707
#define GC_MALLOC_LIMIT_MAX
Definition: gc.c:284
#define stack_check(ec, water_mark)
Definition: gc.c:5540
void ruby_xfree(void *x)
Deallocates a storage instance.
Definition: gc.c:10914
#define STACK_END
Definition: gc.c:5482
#define CEILDIV(i, mod)
Definition: gc.c:814
#define APPENDF(f)
#define OBJ_ID_INITIAL
Definition: gc.c:3131
#define TRY_WITH_GC(siz, alloc)
Definition: gc.c:10583
void rb_memerror(void)
Definition: gc.c:10309
#define GC_HEAP_FREE_SLOTS_MIN_RATIO
Definition: gc.c:271
#define MALLOC_ALLOCATED_SIZE
Definition: gc.c:456
#define GC_ENABLE_INCREMENTAL_MARK
Definition: gc.c:443
#define STACK_START
Definition: gc.c:5481
#define heap_eden
Definition: gc.c:893
#define ARY_SHARED_P(ary)
Definition: gc.c:12197
#define obj_id_to_ref(objid)
Definition: gc.c:956
#define stress_to_class
Definition: gc.c:904
#define NUM2PTR(x)
#define CALC_EXACT_MALLOC_SIZE
Definition: gc.c:449
gc_stat_sym
Definition: gc.c:9655
@ gc_stat_sym_total_freed_objects
Definition: gc.c:9670
@ gc_stat_sym_old_objects
Definition: gc.c:9680
@ gc_stat_sym_total_allocated_objects
Definition: gc.c:9669
@ gc_stat_sym_compact_count
Definition: gc.c:9675
@ gc_stat_sym_total_moved_objects
Definition: gc.c:9677
@ gc_stat_sym_heap_allocatable_pages
Definition: gc.c:9659
@ gc_stat_sym_read_barrier_faults
Definition: gc.c:9676
@ gc_stat_sym_old_objects_limit
Definition: gc.c:9681
@ gc_stat_sym_heap_live_slots
Definition: gc.c:9661
@ gc_stat_sym_heap_free_slots
Definition: gc.c:9662
@ gc_stat_sym_heap_marked_slots
Definition: gc.c:9664
@ gc_stat_sym_total_allocated_pages
Definition: gc.c:9667
@ gc_stat_sym_count
Definition: gc.c:9656
@ gc_stat_sym_heap_available_slots
Definition: gc.c:9660
@ gc_stat_sym_remembered_wb_unprotected_objects_limit
Definition: gc.c:9679
@ gc_stat_sym_last
Definition: gc.c:9694
@ gc_stat_sym_malloc_increase_bytes_limit
Definition: gc.c:9672
@ gc_stat_sym_heap_final_slots
Definition: gc.c:9663
@ gc_stat_sym_total_freed_pages
Definition: gc.c:9668
@ gc_stat_sym_heap_sorted_length
Definition: gc.c:9658
@ gc_stat_sym_heap_tomb_pages
Definition: gc.c:9666
@ gc_stat_sym_heap_allocated_pages
Definition: gc.c:9657
@ gc_stat_sym_heap_eden_pages
Definition: gc.c:9665
@ gc_stat_sym_remembered_wb_unprotected_objects
Definition: gc.c:9678
@ gc_stat_sym_minor_gc_count
Definition: gc.c:9673
@ gc_stat_sym_malloc_increase_bytes
Definition: gc.c:9671
@ gc_stat_sym_major_gc_count
Definition: gc.c:9674
#define RGENGC_ESTIMATE_OLDMALLOC
Definition: gc.c:426
#define RGENGC_CHECK_MODE
Definition: gc.c:394
#define GC_HEAP_GROWTH_MAX_SLOTS
Definition: gc.c:264
int rb_objspace_garbage_object_p(VALUE obj)
Definition: gc.c:3933
#define GC_PROFILE_DETAIL_MEMORY
Definition: gc.c:440
#define ARY_EMBED_P(ary)
Definition: gc.c:12200
#define RVALUE_PIN_BITMAP(obj)
Definition: gc.c:1221
VALUE rb_gc_location(VALUE value)
Definition: gc.c:9003
#define heap_pages_final_slots
Definition: gc.c:891
struct stack_chunk stack_chunk_t
#define GC_MALLOC_LIMIT_MIN
Definition: gc.c:281
VALUE rb_newobj(void)
Definition: gc.c:2365
VALUE rb_gc_disable(void)
Definition: gc.c:9925
size_t rb_objspace_data_type_memsize(VALUE obj)
Definition: gc.c:2521
#define is_marking(objspace)
Definition: gc.c:938
#define gc_mode(objspace)
Definition: gc.c:935
#define gc_prof_enabled(objspace)
Definition: gc.c:1081
void Init_heap(void)
Definition: gc.c:3158
VALUE rb_gc_start(void)
Definition: gc.c:9490
#define gc_report(level, objspace,...)
Definition: gc.c:1084
void * rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
Definition: gc.c:10989
#define UNEXPECTED_NODE(func)
Definition: gc.c:2380
void * rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z)
Definition: gc.c:10927
void rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
Definition: gc.c:7876
void rb_mark_tbl_no_pin(st_table *tbl)
Definition: gc.c:5899
#define heap_pages_freeable_pages
Definition: gc.c:890
void ruby_mimfree(void *ptr)
Definition: gc.c:10979
rb_imemo_tmpbuf_t * rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
Definition: gc.c:2432
#define dont_gc_val()
Definition: gc.c:916
#define heap_pages_deferred_final
Definition: gc.c:892
#define dont_gc_set(b)
Definition: gc.c:915
VALUE rb_gc_enable(void)
Definition: gc.c:9888
void * ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
Identical to ruby_xrealloc(), except it resizes the given storage instance to newelems * newsiz bytes...
Definition: gc.c:12835
void rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
Definition: gc.c:12512
const char * rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
Definition: gc.c:12229
VALUE rb_undefine_finalizer(VALUE obj)
Definition: gc.c:3453
#define ruby_gc_stress_mode
Definition: gc.c:900
#define finalizing
Definition: gc.c:896
#define global_list
Definition: gc.c:898
void rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
Definition: gc.c:5869
rb_symbols_t ruby_global_symbols
Definition: symbol.c:76
#define COUNT_TYPE(t)
int rb_during_gc(void)
Definition: gc.c:9505
#define MALLOC_ALLOCATED_SIZE_CHECK
Definition: gc.c:459
struct mark_stack mark_stack_t
@ HEAP_PAGE_BITMAP_LIMIT
Definition: gc.c:820
@ HEAP_PAGE_OBJ_LIMIT
Definition: gc.c:819
@ HEAP_PAGE_BITMAP_SIZE
Definition: gc.c:821
@ HEAP_PAGE_BITMAP_PLANES
Definition: gc.c:822
@ HEAP_PAGE_ALIGN_MASK
Definition: gc.c:817
@ HEAP_PAGE_SIZE
Definition: gc.c:818
@ HEAP_PAGE_ALIGN
Definition: gc.c:816
void rb_mark_set(st_table *tbl)
Definition: gc.c:5686
void * ruby_xcalloc(size_t n, size_t size)
Identical to ruby_xmalloc2(), except it zero-fills the region before it returns.
Definition: gc.c:12815
void * ruby_xmalloc2_body(size_t n, size_t size)
Definition: gc.c:10846
#define CLEAR_IN_BITMAP(bits, p)
Definition: gc.c:864
#define HEAP_PAGE_ALIGN_LOG
Definition: gc.c:813
#define GET_HEAP_MARKING_BITS(x)
Definition: gc.c:871
void rb_mark_hash(st_table *tbl)
Definition: gc.c:5750
#define heap_pages_lomem
Definition: gc.c:887
VALUE rb_obj_id(VALUE obj)
Definition: gc.c:4110
void rb_gc_mark_movable(VALUE ptr)
Definition: gc.c:6106
#define GET_HEAP_WB_UNPROTECTED_BITS(x)
Definition: gc.c:870
void rb_gc_mark_maybe(VALUE obj)
Definition: gc.c:5931
#define nomem_error
Definition: gc.c:976
#define global_symbols
Definition: gc.c:9343
VALUE rb_class_allocate_instance(VALUE klass)
Definition: gc.c:2482
#define GC_ENABLE_LAZY_SWEEP
Definition: gc.c:446
#define OBJ_ID_INCREMENT
Definition: gc.c:3130
#define MARK_IN_BITMAP(bits, p)
Definition: gc.c:863
#define BUFF_ARGS
#define GC_PROFILE_RECORD_DEFAULT_SIZE
Definition: gc.c:11528
#define RVALUE_PAGE_MARKING(page, obj)
Definition: gc.c:1230
int rb_objspace_marked_object_p(VALUE obj)
Definition: gc.c:6122
void * rb_xmalloc_mul_add(size_t x, size_t y, size_t z)
Definition: gc.c:10920
VALUE rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
Definition: gc.c:7896
int rb_ec_stack_check(rb_execution_context_t *ec)
Definition: gc.c:5546
#define GET_PAGE_BODY(x)
Definition: gc.c:852
void * rb_aligned_malloc(size_t alignment, size_t size)
Definition: gc.c:10344
void rb_mark_tbl(st_table *tbl)
Definition: gc.c:5893
#define SET_STACK_END
Definition: gc.c:5479
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
Definition: gc.c:2505
void rb_gc_writebarrier_unprotect(VALUE obj)
Definition: gc.c:7777
#define NUM_IN_PAGE(p)
Definition: gc.c:856
#define malloc_allocated_size
Definition: gc.c:883
uintptr_t bits_t
Definition: gc.c:609
VALUE rb_mGC
Definition: gc.c:981
#define dont_gc_off()
Definition: gc.c:914
#define STACK_LENGTH
Definition: gc.c:5495
@ BITS_BITLENGTH
Definition: gc.c:612
@ BITS_SIZE
Definition: gc.c:611
void * ruby_mimmalloc(size_t size)
Definition: gc.c:10951
#define GC_HEAP_INIT_SLOTS
Definition: gc.c:255
VALUE rb_ec_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags)
Definition: gc.c:2356
#define heap_pages_sorted_length
Definition: gc.c:886
#define RVALUE_WB_UNPROTECTED_BITMAP(obj)
Definition: gc.c:1224
size_t rb_obj_memsize_of(VALUE obj)
Definition: gc.c:4296
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Definition: gc.c:5580
void rb_objspace_reachable_objects_from(VALUE obj, void(func)(VALUE, void *), void *data)
Definition: gc.c:10169
#define IMEMO_NAME(x)
const char * rb_method_type_name(rb_method_type_t type)
Definition: gc.c:12177
#define STACKFRAME_FOR_CALL_CFUNC
Definition: gc.c:5543
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
Definition: gc.c:2513
VALUE rb_memory_id(VALUE obj)
Definition: gc.c:4077
void rb_gc(void)
Definition: gc.c:9497
#define RGENGC_OLD_NEWOBJ_CHECK
Definition: gc.c:407
#define GC_ASSERT(expr)
Definition: gc.c:398
#define SET(name, attr)
void * rb_alloc_tmp_buffer(volatile VALUE *store, long len)
Definition: gc.c:11007
#define TYPE_NAME(t)
const char * rb_objspace_data_type_name(VALUE obj)
Definition: gc.c:2534
void rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache)
Definition: gc.c:7937
rb_objspace_t * rb_objspace_alloc(void)
Definition: gc.c:1595
#define gc_mode_set(objspace, mode)
Definition: gc.c:936
#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
Definition: gc.c:294
void rb_gc_force_recycle(VALUE obj)
Definition: gc.c:7968
#define GET_STACK_BOUNDS(start, end, appendix)
Definition: gc.c:5835
void * rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w)
Definition: gc.c:10941
void ruby_gc_set_params(void)
Definition: gc.c:10122
void rb_objspace_set_event_hook(const rb_event_flag_t event)
Definition: gc.c:2052
#define malloc_limit
Definition: gc.c:881
#define BITMAP_INDEX(p)
Definition: gc.c:857
void rb_gcdebug_print_obj_condition(VALUE obj)
gc_enter_event
Definition: gc.c:1008
@ gc_enter_event_finalizer
Definition: gc.c:1013
@ gc_enter_event_start
Definition: gc.c:1009
@ gc_enter_event_sweep_continue
Definition: gc.c:1011
@ gc_enter_event_rb_memerror
Definition: gc.c:1014
@ gc_enter_event_rest
Definition: gc.c:1012
@ gc_enter_event_mark_continue
Definition: gc.c:1010
#define S(s)
int ruby_get_stack_grow_direction(volatile VALUE *addr)
Definition: gc.c:5501
void * ruby_xrealloc_body(void *ptr, size_t new_size)
Definition: gc.c:10881
void rb_gc_copy_finalizer(VALUE dest, VALUE obj)
Definition: gc.c:3612
size_t rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
Definition: gc.c:215
void rb_iseq_update_references(rb_iseq_t *iseq)
Definition: iseq.c:257
#define GET_HEAP_MARK_BITS(x)
Definition: gc.c:867
#define RANY(o)
Definition: gc.c:965
void rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
Definition: gc.c:3308
int rb_objspace_markable_object_p(VALUE obj)
Definition: gc.c:3926
void rb_gc_update_tbl_refs(st_table *ptr)
Definition: gc.c:8850
bool rb_ractor_p(VALUE rv)
Definition: ractor.c:273
VALUE rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Definition: gc.c:2497
#define RGENGC_DEBUG
Definition: gc.c:375
void rb_gc_mark(VALUE ptr)
Definition: gc.c:6112
void rb_gc_verify_internal_consistency(void)
Definition: gc.c:7137
void rb_cc_table_free(VALUE klass)
Definition: gc.c:2727
#define gc_event_hook_available_p(objspace)
Definition: gc.c:2071
void ruby_malloc_size_overflow(size_t count, size_t elsize)
Definition: gc.c:10838
const char * rb_imemo_name(enum imemo_type type)
Definition: gc.c:2385
int ruby_enable_autocompact
Definition: gc.c:983
#define GC_HEAP_FREE_SLOTS_GOAL_RATIO
Definition: gc.c:274
#define RVALUE_MARK_BITMAP(obj)
Definition: gc.c:1220
#define RZOMBIE(o)
Definition: gc.c:974
void rb_gc_mark_values(long n, const VALUE *values)
Definition: gc.c:5596
void * ruby_xmalloc(size_t size)
Allocates a storage instance.
Definition: gc.c:12795
@ gc_stress_full_mark_after_malloc
Definition: gc.c:8085
@ gc_stress_no_immediate_sweep
Definition: gc.c:8084
@ gc_stress_max
Definition: gc.c:8086
@ gc_stress_no_major
Definition: gc.c:8083
void rb_obj_info_dump(VALUE obj)
Definition: gc.c:12505
#define STACK_CHUNK_SIZE
Definition: gc.c:631
void rb_gc_writebarrier(VALUE a, VALUE b)
Definition: gc.c:7750
#define gc_prof_record(objspace)
Definition: gc.c:1080
#define RVALUE_OLD_AGE
Definition: gc.c:1232
void Init_GC(void)
Definition: gc.c:12677
#define heap_tomb
Definition: gc.c:894
#define MARK_CHECKPOINT(category)
VALUE rb_newobj_of(VALUE klass, VALUE flags)
Definition: gc.c:2371
#define UPDATE_IF_MOVED(_objspace, _thing)
Definition: gc.c:1078
#define is_incremental_marking(objspace)
Definition: gc.c:944
void rb_vm_update_references(void *ptr)
Definition: vm.c:2474
memop_type
Definition: gc.c:10401
@ MEMOP_TYPE_FREE
Definition: gc.c:10403
@ MEMOP_TYPE_MALLOC
Definition: gc.c:10402
@ MEMOP_TYPE_REALLOC
Definition: gc.c:10404
#define GC_MALLOC_LIMIT_GROWTH_FACTOR
Definition: gc.c:287
#define is_sweeping(objspace)
Definition: gc.c:939
int ruby_stack_grow_direction
Definition: gc.c:5499
const char * rb_obj_info(VALUE obj)
Definition: gc.c:12499
int ruby_disable_gc
Definition: gc.c:982
#define heap_pages_himem
Definition: gc.c:888
void rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
Definition: gc.c:2649
#define RVALUE_AGE_SHIFT
Definition: gc.c:1233
VALUE rb_define_finalizer(VALUE obj, VALUE block)
Definition: gc.c:3604
#define MARK_OBJECT_ARY_BUCKET_SIZE
Definition: gc.c:8018
#define has_sweeping_pages(heap)
Definition: gc.c:951
VALUE rb_objspace_gc_enable(rb_objspace_t *objspace)
Definition: gc.c:9895
#define RGENGC_FORCE_MAJOR_GC
Definition: gc.c:433
void * ruby_xmalloc2(size_t n, size_t size)
Identical to ruby_xmalloc(), except it allocates nelems * elemsiz bytes.
Definition: gc.c:12805
gc_profile_record_flag
Definition: gc.c:470
@ GPR_FLAG_MAJOR_BY_FORCE
Definition: gc.c:476
@ GPR_FLAG_HAVE_FINALIZE
Definition: gc.c:491
@ GPR_DEFAULT_REASON
Definition: gc.c:496
@ GPR_FLAG_COMPACT
Definition: gc.c:494
@ GPR_FLAG_IMMEDIATE_SWEEP
Definition: gc.c:490
@ GPR_FLAG_MAJOR_MASK
Definition: gc.c:480
@ GPR_FLAG_NEWOBJ
Definition: gc.c:483
@ GPR_FLAG_MAJOR_BY_SHADY
Definition: gc.c:475
@ GPR_FLAG_NONE
Definition: gc.c:471
@ GPR_FLAG_FULL_MARK
Definition: gc.c:493
@ GPR_FLAG_MAJOR_BY_NOFREE
Definition: gc.c:473
@ GPR_FLAG_IMMEDIATE_MARK
Definition: gc.c:492
@ GPR_FLAG_MAJOR_BY_OLDGEN
Definition: gc.c:474
@ GPR_FLAG_CAPI
Definition: gc.c:486
@ GPR_FLAG_METHOD
Definition: gc.c:485
@ GPR_FLAG_STRESS
Definition: gc.c:487
@ GPR_FLAG_MALLOC
Definition: gc.c:484
struct rb_objspace rb_objspace_t
#define GC_OLDMALLOC_LIMIT_MIN
Definition: gc.c:291
#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj)
Definition: gc.c:1229
void * ruby_xrealloc(void *ptr, size_t new_size)
Resize the storage instance.
Definition: gc.c:12825
void rb_gc_unregister_address(VALUE *addr)
Inform the garbage collector that a pointer previously passed to rb_gc_register_address() no longer p...
Definition: gc.c:8052
#define RVALUE_UNCOLLECTIBLE_BITMAP(obj)
Definition: gc.c:1225
#define rb_objspace
Definition: gc.c:874
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
Definition: gc.c:2412
void rb_objspace_each_objects(each_obj_callback *callback, void *data)
Definition: gc.c:3285
void * ruby_xmalloc_body(size_t size)
Definition: gc.c:10829
void * ruby_xcalloc_body(size_t n, size_t size)
Definition: gc.c:10862
void rb_objspace_reachable_objects_from_root(void(func)(const char *category, VALUE, void *), void *passing_data)
Definition: gc.c:10202
#define gc_event_hook_prep(objspace, event, data, prep)
Definition: gc.c:2074
VALUE rb_gc_disable_no_rest(void)
Definition: gc.c:9910
#define nonspecial_obj_id(obj)
Definition: gc.c:955
void rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
Definition: gc.c:7848
VALUE rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags)
Definition: gc.c:2342
void rb_objspace_call_finalizer(rb_objspace_t *objspace)
Definition: gc.c:3771
gc_mode
Definition: gc.c:660
@ gc_mode_sweeping
Definition: gc.c:663
@ gc_mode_marking
Definition: gc.c:662
@ gc_mode_none
Definition: gc.c:661
#define gc_event_hook(objspace, event, data)
Definition: gc.c:2081
size_t rb_gc_stat(VALUE key)
Definition: gc.c:9853
#define GC_HEAP_GROWTH_FACTOR
Definition: gc.c:261
#define BITMAP_BIT(p)
Definition: gc.c:859
void rb_iseq_mark(const rb_iseq_t *iseq)
Definition: iseq.c:332
void rb_gc_register_address(VALUE *addr)
Inform the garbage collector that valptr points to a live Ruby object that should not be moved.
Definition: gc.c:8040
void rb_iseq_free(const rb_iseq_t *iseq)
Definition: iseq.c:105
void rb_objspace_free(rb_objspace_t *objspace)
Definition: gc.c:1610
#define heap_pages_sorted
Definition: gc.c:884
void rb_gc_writebarrier_remember(VALUE obj)
Definition: gc.c:7814
void rb_free_const_table(struct rb_id_table *tbl)
Definition: gc.c:2595
#define MARKED_IN_BITMAP(bits, p)
Definition: gc.c:862
#define finalizer_table
Definition: gc.c:897
#define gc_stress_full_mark_after_malloc_p()
Definition: gc.c:8089
#define FL_FROM_FREELIST
Definition: gc.c:543
int ruby_rgengc_debug
Definition: gc.c:383
size_t rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
Definition: gc.c:188
void rb_gc_mark_vm_stack_values(long n, const VALUE *values)
Definition: gc.c:5619
VALUE rb_objspace_gc_disable(rb_objspace_t *objspace)
Definition: gc.c:9932
void rb_malloc_info_show_results(void)
Definition: gc.c:10742
#define GET_HEAP_PINNED_BITS(x)
Definition: gc.c:868
#define heap_allocated_pages
Definition: gc.c:885
void rb_gc_register_mark_object(VALUE obj)
Inform the garbage collector that object is a live Ruby object that should not be moved.
Definition: gc.c:8022
#define RVALUE_MARKING_BITMAP(obj)
Definition: gc.c:1226
#define GC_HEAP_FREE_SLOTS_MAX_RATIO
Definition: gc.c:277
#define OPT(o)
#define ruby_gc_stressful
Definition: gc.c:899
#define C(c, s)
void Init_gc_stress(void)
Definition: gc.c:3189
void rb_gc_adjust_memory_usage(ssize_t diff)
Definition: gc.c:11062
#define GET_HEAP_PAGE(x)
Definition: gc.c:854
#define will_be_incremental_marking(objspace)
Definition: gc.c:949
#define gc_writebarrier_incremental(a, b, objspace)
Definition: gc.c:7746
#define during_gc
Definition: gc.c:895
#define dont_gc_on()
Definition: gc.c:913
VALUE rb_gc_latest_gc_info(VALUE key)
Definition: gc.c:9634
void * rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w)
Definition: gc.c:10934
#define RUBY_DTRACE_GC_HOOK(name)
Definition: gc.c:11667
#define heap_allocatable_pages
Definition: gc.c:889
void rb_free_tmp_buffer(volatile VALUE *store)
Definition: gc.c:11019
#define RESTORE_FINALIZER()
#define GC_HEAP_FREE_SLOTS
Definition: gc.c:258
struct rb_heap_struct rb_heap_t
size_t rb_obj_gc_flags(VALUE obj, ID *flags, size_t max)
Definition: gc.c:7908
#define malloc_increase
Definition: gc.c:882
size_t rb_iseq_memsize(const rb_iseq_t *iseq)
Definition: iseq.c:442
#define STACK_LEVEL_MAX
Definition: gc.c:5483
#define RMOVED(obj)
Definition: gc.c:551
size_t rb_gc_count(void)
Definition: gc.c:9529
#define is_lazy_sweeping(heap)
Definition: gc.c:952
void * ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
Definition: gc.c:10897
#define rb_jmp_buf
Definition: gc.c:120
#define rb_objspace_of(vm)
Definition: gc.c:875
#define GC_DEBUG
Definition: gc.c:361
VALUE rb_obj_rgengc_promoted_p(VALUE obj)
Definition: gc.c:7902
int each_obj_callback(void *, void *, size_t, void *)
Definition: gc.c:3196
#define ruby_initial_gc_stress
Definition: gc.c:877
#define GET_HEAP_UNCOLLECTIBLE_BITS(x)
Definition: gc.c:869
#define is_full_marking(objspace)
Definition: gc.c:940
#define RVALUE_PAGE_WB_UNPROTECTED(page, obj)
Definition: gc.c:1228
struct RVALUE RVALUE
#define GC_PROFILE_MORE_DETAIL
Definition: gc.c:437
#define rb_setjmp(env)
Definition: gc.c:119
#define SET_MACHINE_STACK_END(p)
Definition: gc.h:15
#define STACK_UPPER(x, a, b)
Definition: gc.h:92
#define CLASS_OF
Definition: globals.h:153
VALUE rb_stdout
Definition: globals.h:118
void rb_include_module(VALUE klass, VALUE module)
Definition: class.c:962
void rb_class_detach_subclasses(VALUE klass)
Definition: class.c:145
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition: class.c:797
int rb_singleton_class_internal_p(VALUE sklass)
Definition: class.c:554
VALUE rb_define_module(const char *name)
Definition: class.c:871
void rb_class_detach_module_subclasses(VALUE klass)
Definition: class.c:157
void rb_class_remove_from_module_subclasses(VALUE klass)
Definition: class.c:106
VALUE rb_define_module_under(VALUE outer, const char *name)
Definition: class.c:895
void rb_class_remove_from_super_subclasses(VALUE klass)
Definition: class.c:88
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Definition: class.c:2296
#define OBJ_FREEZE
Definition: fl_type.h:134
#define FL_ABLE
Definition: fl_type.h:121
#define FL_TEST_RAW
Definition: fl_type.h:131
#define FL_SET
Definition: fl_type.h:128
#define FL_TEST
Definition: fl_type.h:130
#define FL_UNSET
Definition: fl_type.h:132
int ruby_stack_check(void)
Definition: gc.c:5552
size_t ruby_stack_length(VALUE **p)
Definition: gc.c:5512
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2917
VALUE rb_eNotImpError
Definition: error.c:1067
void rb_bug(const char *fmt,...)
Definition: error.c:768
VALUE rb_eNoMemError
Definition: error.c:1068
VALUE rb_eRangeError
Definition: error.c:1061
VALUE rb_eTypeError
Definition: error.c:1057
void rb_vraise(VALUE exc, const char *fmt, va_list ap)
Definition: error.c:2911
VALUE rb_eRuntimeError
Definition: error.c:1055
void rb_warn(const char *fmt,...)
Definition: error.c:408
VALUE rb_eArgError
Definition: error.c:1058
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1148
VALUE rb_errinfo(void)
The current exception in the current thread.
Definition: eval.c:1911
VALUE rb_mKernel
Kernel module.
Definition: object.c:48
VALUE rb_cObject
Object class.
Definition: object.c:49
VALUE rb_any_to_s(VALUE)
Default implementation of #to_s.
Definition: object.c:561
VALUE rb_obj_class(VALUE)
Definition: object.c:245
VALUE rb_inspect(VALUE)
Convenient wrapper of Object::inspect.
Definition: object.c:585
VALUE rb_cBasicObject
BasicObject class.
Definition: object.c:47
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Determines if obj is a kind of c.
Definition: object.c:724
VALUE rb_to_int(VALUE)
Converts val into Integer.
Definition: object.c:3051
void skip(file *in, unsigned n)
Definition: gzappend.c:202
int rb_hash_stlike_foreach_with_replace(VALUE hash, st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg)
Definition: hash.c:1468
size_t rb_hash_ar_table_size(void)
Definition: hash.c:369
VALUE rb_hash_new_with_size(st_index_t size)
Definition: hash.c:1544
VALUE rb_hash_compare_by_id_p(VALUE hash)
Definition: hash.c:4432
int rb_hash_stlike_foreach(VALUE hash, st_foreach_callback_func *func, st_data_t arg)
Definition: hash.c:1457
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Definition: hash.c:2901
VALUE rb_hash_new(void)
Definition: hash.c:1538
st_table * rb_init_identtable(void)
Definition: hash.c:4451
void *PTR64 __attribute__((mode(DI)))
Definition: ffi.c:41
@ idEq
Definition: id.h:96
#define ID_SCOPE_MASK
Definition: id.h:32
void rb_id_table_foreach_with_replace(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, rb_id_table_update_callback_func_t *replace, void *data)
Definition: id_table.c:271
size_t rb_id_table_memsize(const struct rb_id_table *tbl)
Definition: id_table.c:124
void rb_id_table_free(struct rb_id_table *tbl)
Definition: id_table.c:103
void rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data)
Definition: id_table.c:311
void rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, void *data)
Definition: id_table.c:292
rb_id_table_iterator_result
Definition: id_table.h:10
@ ID_TABLE_DELETE
Definition: id_table.h:13
@ ID_TABLE_REPLACE
Definition: id_table.h:14
@ ID_TABLE_CONTINUE
Definition: id_table.h:11
IMEMO: Internal memo object.
#define IMEMO_TYPE_P(v, t)
Definition: imemo.h:178
imemo_type
Definition: imemo.h:34
@ imemo_ment
Definition: imemo.h:41
@ imemo_ifunc
iterator function
Definition: imemo.h:39
@ imemo_callinfo
Definition: imemo.h:46
@ imemo_parser_strterm
Definition: imemo.h:45
@ imemo_env
Definition: imemo.h:35
@ imemo_iseq
Definition: imemo.h:42
@ imemo_callcache
Definition: imemo.h:47
@ imemo_tmpbuf
Definition: imemo.h:43
@ imemo_cref
class reference
Definition: imemo.h:36
@ imemo_svar
special variable
Definition: imemo.h:37
@ imemo_memo
Definition: imemo.h:40
@ imemo_throw_data
Definition: imemo.h:38
@ imemo_constcache
Definition: imemo.h:48
@ imemo_ast
Definition: imemo.h:44
Thin wrapper to ruby/config.h.
#define ruby_verbose
Definition: error.h:68
VALUE rb_funcall(VALUE, ID, int,...)
Calls a method.
Definition: vm_eval.c:1077
Defines RBIMPL_HAS_BUILTIN.
#define rb_ary_new3
Definition: array.h:73
#define RETURN_ENUMERATOR(obj, argc, argv)
Definition: enumerator.h:74
#define rb_check_frozen
Definition: error.h:72
#define rb_check_arity
Definition: error.h:34
VALUE rb_io_write(VALUE, VALUE)
Definition: io.c:1953
VALUE rb_obj_is_proc(VALUE)
Definition: proc.c:152
VALUE rb_block_proc(void)
Definition: proc.c:826
#define rb_str_cat2
Definition: string.h:285
void rb_str_free(VALUE)
Definition: string.c:1433
VALUE rb_str_buf_new(long)
Definition: string.c:1398
VALUE rb_str_append(VALUE, VALUE)
Definition: string.c:3118
VALUE rb_str_buf_append(VALUE, VALUE)
Definition: string.c:3103
#define rb_str_new_cstr(str)
Definition: string.h:219
VALUE rb_class_name(VALUE)
Definition: variable.c:293
VALUE rb_class_path_cached(VALUE)
Definition: variable.c:178
void rb_free_generic_ivar(VALUE)
Definition: variable.c:1157
int rb_obj_respond_to(VALUE, ID, int)
Definition: vm_method.c:2545
void rb_clear_constant_cache(void)
Definition: vm_method.c:127
VALUE rb_check_funcall(VALUE, ID, int, const VALUE *)
Definition: vm_eval.c:619
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
#define ID2SYM
Definition: symbol.h:44
const char * rb_id2name(ID)
Definition: symbol.c:944
#define SYM2ID
Definition: symbol.h:45
VALUE rb_sym2str(VALUE)
Definition: symbol.c:927
ID rb_intern(const char *)
Definition: symbol.c:785
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:3150
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1892
#define strtod(s, e)
Definition: util.h:45
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
#define strdup(s)
Definition: util.h:39
#define FIX2INT
Definition: int.h:41
#define UINT2NUM
Definition: int.h:46
#define LL2NUM
Definition: long_long.h:30
#define ULL2NUM
Definition: long_long.h:31
#define BIGNUM_EMBED_FLAG
Definition: bignum.h:79
Internal header for Class.
#define RCLASS_CALLABLE_M_TBL(c)
Definition: class.h:84
#define RCLASS_SERIAL(c)
Definition: class.h:90
#define RCLASS_CC_TBL(c)
Definition: class.h:85
#define RCLASS_IV_TBL(c)
Definition: class.h:77
#define RCLASS_CONST_TBL(c)
Definition: class.h:78
#define RCLASS_EXT(c)
Definition: class.h:76
struct rb_classext_struct rb_classext_t
Definition: class.h:74
#define RCLASS_M_TBL(c)
Definition: class.h:80
#define RICLASS_ORIGIN_SHARED_MTBL
Definition: class.h:98
#define RICLASS_IS_ORIGIN
Definition: class.h:96
#define RCLASS_IV_INDEX_TBL(c)
Definition: class.h:86
Internal header for Complex.
Internal header for Fiber.
Internal header for GC.
#define ruby_sized_xrealloc
Definition: gc.h:164
#define ruby_sized_xrealloc2
Definition: gc.h:165
#define SIZED_REALLOC_N(v, T, m, n)
Definition: gc.h:159
#define ruby_sized_xfree
Definition: gc.h:166
Internal header for Hash.
#define RHASH(obj)
Definition: hash.h:57
@ RHASH_ST_TABLE_FLAG
Definition: hash.h:26
Internal header for IO.
void rb_io_fptr_finalize_internal(void *ptr)
Definition: io.c:4845
size_t rb_io_memsize(const rb_io_t *)
Definition: io.c:4875
Internal header for Numeric.
VALUE rb_int2str(VALUE num, int base)
Definition: numeric.c:3549
VALUE rb_int_ge(VALUE x, VALUE y)
Definition: numeric.c:4276
VALUE rb_int_plus(VALUE x, VALUE y)
Definition: numeric.c:3597
Internal header for Object.
Internal header for Proc.
VALUE rb_callable_receiver(VALUE)
Definition: proc.c:2787
VALUE rb_func_lambda_new(rb_block_call_func_t func, VALUE val, int min_argc, int max_argc)
Definition: proc.c:742
Internal header for Rational.
size_t rb_str_memsize(VALUE)
Definition: string.c:1460
Internal header for Struct.
#define RSTRUCT_LEN
Definition: struct.h:52
#define RSTRUCT(obj)
Definition: struct.h:34
@ RSTRUCT_EMBED_LEN_MASK
Definition: struct.h:18
void rb_gc_free_dsymbol(VALUE)
Definition: symbol.c:803
Internal header for Thread.
VALUE rb_obj_is_mutex(VALUE obj)
Definition: thread_sync.c:142
void rb_gc_mark_global_tbl(void)
Definition: variable.c:497
void rb_mark_generic_ivar(VALUE)
Definition: variable.c:1137
size_t rb_generic_ivar_memsize(VALUE)
Definition: variable.c:1167
void rb_gc_update_global_tbl(void)
Definition: variable.c:515
void rb_mv_generic_ivar(VALUE src, VALUE dst)
Definition: variable.c:1147
void rb_print_backtrace(void)
Definition: vm_dump.c:753
void rb_vm_each_stack_value(void *ptr, void(*cb)(VALUE, void *), void *ctx)
Definition: vm.c:2501
VALUE ruby_vm_special_exception_copy(VALUE)
Definition: vm_insnhelper.c:48
VALUE rb_obj_is_thread(VALUE obj)
Definition: vm.c:3003
void rb_vm_mark(void *ptr)
Definition: vm.c:2538
const char * rb_source_location_cstr(int *pline)
Definition: vm.c:1616
#define bp()
Definition: internal.h:105
#define rp(obj)
Definition: internal.h:95
#define roomof(x, y)
Definition: internal.h:23
#define PRIuSIZE
Definition: inttypes.h:127
#define PRIxVALUE
Definition: inttypes.h:75
#define PRIdSIZE
Definition: inttypes.h:124
#define PRIuVALUE
Definition: inttypes.h:74
voidpf void uLong size
Definition: ioapi.h:138
const char * filename
Definition: ioapi.h:137
typedef long(ZCALLBACK *tell_file_func) OF((voidpf opaque
voidpf uLong offset
Definition: ioapi.h:144
typedef int(ZCALLBACK *close_file_func) OF((voidpf opaque
const char int mode
Definition: ioapi.h:137
voidpf void * buf
Definition: ioapi.h:138
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1087
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Definition: iterator.h:31
VALUE rb_yield_values(int n,...)
Definition: vm_eval.c:1353
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1341
#define CHAR_BIT
Definition: limits.h:44
#define SIZE_MAX
Definition: limits.h:71
#define INT2FIX
Definition: long.h:48
#define ULONG2NUM
Definition: long.h:60
#define LONG2NUM
Definition: long.h:50
#define FIX2LONG
Definition: long.h:46
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
Definition: maybe_unused.h:35
#define T_MASK
Definition: md5.c:131
#define MEMZERO(p, type, n)
Definition: memory.h:128
#define ALLOC_N
Definition: memory.h:133
#define MEMMOVE(p1, p2, type, n)
Definition: memory.h:130
#define DSIZE_T
Definition: memory.h:81
#define METHOD_ENTRY_CACHED(me)
Definition: method.h:74
#define METHOD_ENTRY_COMPLEMENTED(me)
Definition: method.h:72
void rb_free_method_entry(const rb_method_entry_t *me)
Definition: vm_method.c:358
@ METHOD_VISI_PRIVATE
Definition: method.h:32
@ METHOD_VISI_PUBLIC
Definition: method.h:31
rb_method_type_t
Definition: method.h:109
@ VM_METHOD_TYPE_ISEQ
Ruby method.
Definition: method.h:110
@ VM_METHOD_TYPE_ATTRSET
attr_writer or attr_accessor
Definition: method.h:112
@ VM_METHOD_TYPE_CFUNC
C method.
Definition: method.h:111
@ VM_METHOD_TYPE_OPTIMIZED
Kernel::send, Proc::call, etc.
Definition: method.h:119
@ VM_METHOD_TYPE_REFINED
refinement
Definition: method.h:121
@ VM_METHOD_TYPE_NOTIMPLEMENTED
Definition: method.h:118
@ VM_METHOD_TYPE_MISSING
wrapper for method_missing(id)
Definition: method.h:120
@ VM_METHOD_TYPE_BMETHOD
Definition: method.h:114
@ VM_METHOD_TYPE_IVAR
attr_reader or attr_accessor
Definition: method.h:113
@ VM_METHOD_TYPE_ZSUPER
Definition: method.h:115
@ VM_METHOD_TYPE_ALIAS
Definition: method.h:116
@ VM_METHOD_TYPE_UNDEF
Definition: method.h:117
#define METHOD_ENTRY_INVALIDATED(me)
Definition: method.h:76
#define METHOD_ENTRY_VISI(me)
Definition: method.h:70
#define M
Definition: mt19937.c:53
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
Definition: cxxanyargs.hpp:392
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
const int id
Definition: nkf.c:209
const char * name
Definition: nkf.c:208
int count
Definition: nkf.c:5055
#define TRUE
Definition: nkf.h:175
#define FALSE
Definition: nkf.h:174
void rb_ast_update_references(rb_ast_t *ast)
Definition: node.c:1391
void rb_ast_mark(rb_ast_t *ast)
Definition: node.c:1401
size_t rb_ast_memsize(const rb_ast_t *ast)
Definition: node.c:1434
void rb_ast_free(rb_ast_t *ast)
Definition: node.c:1413
ONIG_EXTERN void onig_region_free(OnigRegion *region, int free_self)
Definition: regexec.c:343
ONIG_EXTERN void onig_free(OnigRegex)
#define RARRAY_AREF(a, i)
Definition: psych_emitter.c:7
void rb_ractor_finish_marking(void)
Definition: ractor.c:3215
#define RARRAY_EMBED_FLAG
Definition: rarray.h:43
#define RARRAY_LEN
Definition: rarray.h:52
#define RARRAY_CONST_PTR_TRANSIENT
Definition: rarray.h:54
#define RARRAY(obj)
Definition: rarray.h:42
#define RBASIC(obj)
Definition: rbasic.h:34
#define RBASIC_CLASS
Definition: rbasic.h:35
#define RCLASS_SUPER
Definition: rclass.h:33
#define RCLASS(obj)
Definition: rclass.h:31
#define DATA_PTR(obj)
Definition: rdata.h:56
#define RDATA(obj)
Definition: rdata.h:55
#define RUBY_DEFAULT_FREE
Definition: rdata.h:58
void(* RUBY_DATA_FUNC)(void *)
Definition: rdata.h:65
#define rb_data_object_wrap
Definition: rdata.h:177
size_t onig_region_memsize(const OnigRegion *regs)
Definition: regcomp.c:5693
size_t onig_memsize(const regex_t *reg)
Definition: regcomp.c:5678
#define NULL
Definition: regenc.h:69
#define RFILE(obj)
Definition: rfile.h:35
#define OBJ_PROMOTED
Definition: rgengc.h:119
#define USE_RGENGC
Definition: rgengc.h:37
#define RGENGC_WB_PROTECTED_OBJECT
Definition: rgengc.h:64
#define RHASH_SIZE(h)
Definition: rhash.h:50
#define RHASH_EMPTY_P(h)
Definition: rhash.h:51
void rb_strterm_mark(VALUE obj)
Definition: ripper.c:828
#define RMATCH(obj)
Definition: rmatch.h:32
#define ROBJECT_EMBED
Definition: robject.h:39
#define RREGEXP_PTR(obj)
Definition: rregexp.h:32
#define RTYPEDDATA_DATA(v)
Definition: rtypeddata.h:47
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: rtypeddata.h:130
@ RUBY_TYPED_FREE_IMMEDIATELY
Definition: rtypeddata.h:62
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: rtypeddata.h:122
const char * rb_obj_classname(VALUE)
Definition: variable.c:308
int ruby_native_thread_p(void)
Definition: thread.c:5564
int argc
Definition: ruby.c:240
char ** argv
Definition: ruby.c:241
#define ATOMIC_VALUE_EXCHANGE(var, val)
Definition: ruby_atomic.h:23
#define ATOMIC_EXCHANGE(var, val)
Definition: ruby_atomic.h:7
#define ATOMIC_SIZE_INC(var)
Definition: ruby_atomic.h:19
#define ATOMIC_PTR_EXCHANGE(var, val)
Definition: ruby_atomic.h:13
#define ATOMIC_SET(var, val)
Definition: ruby_atomic.h:14
#define ATOMIC_SIZE_CAS(var, oldval, newval)
Definition: ruby_atomic.h:16
#define ATOMIC_SIZE_ADD(var, val)
Definition: ruby_atomic.h:15
#define ATOMIC_SIZE_EXCHANGE(var, val)
Definition: ruby_atomic.h:18
Internal header for ASAN / MSAN / etc.
#define NO_SANITIZE(x, y)
Definition: sanitizers.h:61
#define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(x)
Definition: sanitizers.h:34
unsigned int uint32_t
Definition: sha2.h:101
rb_atomic_t cnt[RUBY_NSIG]
Definition: signal.c:508
#define lo
Definition: siphash.c:21
#define hi
Definition: siphash.c:22
#define SIZET2NUM
Definition: size_t.h:52
#define Qundef
#define SPECIAL_CONST_P
#define STATIC_SYM_P
#define Qtrue
#define RTEST
#define Qnil
#define Qfalse
#define NIL_P
#define FIXNUM_P
#define f
VALUE rb_str_catf(VALUE, const char *,...)
Definition: sprintf.c:1243
VALUE rb_sprintf(const char *,...)
Definition: sprintf.c:1203
#define realloc
Definition: st.c:172
#define calloc
Definition: st.c:171
#define malloc
Definition: st.c:170
@ ST_STOP
Definition: st.h:99
@ ST_DELETE
Definition: st.h:99
@ ST_REPLACE
Definition: st.h:99
@ ST_CONTINUE
Definition: st.h:99
unsigned long st_data_t
Definition: st.h:22
#define st_init_numtable_with_size
Definition: st.h:108
#define st_is_member(table, key)
Definition: st.h:97
#define st_numhash
Definition: st.h:166
#define st_foreach
Definition: st.h:142
#define st_init_numtable
Definition: st.h:106
#define st_lookup
Definition: st.h:128
#define st_add_direct
Definition: st.h:154
#define st_delete
Definition: st.h:118
#define st_memsize
Definition: st.h:174
#define st_insert
Definition: st.h:124
st_data_t st_index_t
Definition: st.h:50
int st_foreach_callback_func(st_data_t, st_data_t, st_data_t)
Definition: st.h:137
#define st_foreach_with_replace
Definition: st.h:140
#define st_init_table
Definition: st.h:102
#define st_free_table
Definition: st.h:156
#define st_init_strtable
Definition: st.h:110
#define st_update
Definition: st.h:136
Defines old _.
#define _(args)
Definition: stdarg.h:31
size_t strlen(const char *)
MEMO.
Definition: imemo.h:105
Definition: rarray.h:87
const VALUE shared_root
Definition: rarray.h:99
struct RArray::@95::@96 heap
union RArray::@95::@96::@97 aux
union RArray::@95 as
Definition: rbasic.h:47
const VALUE klass
Definition: rbasic.h:49
VALUE flags
Definition: rbasic.h:48
Definition: class.h:60
VALUE imag
Definition: complex.h:17
VALUE real
Definition: complex.h:16
Definition: rdata.h:67
RUBY_DATA_FUNC dmark
Definition: rdata.h:69
Definition: rfile.h:30
struct rb_io_t * fptr
Definition: rfile.h:32
Definition: numeric.h:39
Definition: hash.h:44
const VALUE ifnone
Definition: hash.h:50
Definition: rmatch.h:55
VALUE regexp
Definition: rmatch.h:59
VALUE str
Definition: rmatch.h:57
Definition: gc.c:545
VALUE dummy
Definition: gc.c:547
VALUE destination
Definition: gc.c:548
VALUE flags
Definition: gc.c:546
VALUE num
Definition: rational.h:20
VALUE den
Definition: rational.h:21
const VALUE src
Definition: rregexp.h:45
VALUE shared
Definition: rstring.h:81
union RString::@100 as
union RString::@100::@101::@102 aux
struct RString::@100::@101 heap
Definition: struct.h:23
const rb_data_type_t * type
Definition: rtypeddata.h:88
Definition: gc.c:557
rb_env_t env
Definition: gc.c:588
struct RHash hash
Definition: gc.c:571
struct RData data
Definition: gc.c:572
struct RClass klass
Definition: gc.c:566
struct RBasic basic
Definition: gc.c:564
VALUE flags
Definition: gc.c:560
rb_ast_t ast
Definition: gc.c:590
VALUE v3
Definition: gc.c:596
VALUE v1
Definition: gc.c:594
struct RMoved moved
Definition: gc.c:563
struct RBignum bignum
Definition: gc.c:575
struct RObject object
Definition: gc.c:565
union RVALUE::@82::@84 imemo
struct RComplex complex
Definition: gc.c:579
struct RArray array
Definition: gc.c:569
struct RFile file
Definition: gc.c:576
struct RFloat flonum
Definition: gc.c:567
struct RRegexp regexp
Definition: gc.c:570
VALUE v2
Definition: gc.c:595
const rb_iseq_t iseq
Definition: gc.c:587
struct RTypedData typeddata
Definition: gc.c:573
struct RVALUE::@82::@83 free
struct RVALUE::@82::@85 values
struct RString string
Definition: gc.c:568
struct RRational rational
Definition: gc.c:578
rb_cref_t cref
Definition: gc.c:581
struct RStruct rstruct
Definition: gc.c:574
union RVALUE::@82 as
struct RMatch match
Definition: gc.c:577
struct RVALUE * next
Definition: gc.c:561
Definition: gc.c:967
void(* dfree)(void *)
Definition: gc.c:970
VALUE next
Definition: gc.c:969
struct RBasic basic
Definition: gc.c:968
void * data
Definition: gc.c:971
rb_objspace_t * objspace
Definition: gc.c:2656
VALUE klass
Definition: gc.c:2657
bool alive
Definition: gc.c:2658
rb_objspace_t * objspace
Definition: gc.c:3202
each_obj_callback * callback
Definition: gc.c:3203
void * data
Definition: gc.c:3204
Definition: gzappend.c:170
struct force_finalize_list * next
Definition: gc.c:3753
Definition: gc.c:626
struct gc_list * next
Definition: gc.c:628
VALUE * varptr
Definition: gc.c:627
double gc_invoke_time
Definition: gc.c:505
size_t heap_use_size
Definition: gc.c:508
size_t moved_objects
Definition: gc.c:510
size_t heap_total_objects
Definition: gc.c:507
size_t heap_total_size
Definition: gc.c:509
double gc_time
Definition: gc.c:504
VALUE exc
Definition: gc.c:10233
va_list * ap
Definition: gc.c:10235
const char * fmt
Definition: gc.c:10234
struct heap_page_header header
Definition: gc.c:621
struct heap_page * page
Definition: gc.c:617
Definition: gc.c:825
short final_slots
Definition: gc.c:829
short total_slots
Definition: gc.c:826
bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:845
short pinned_slots
Definition: gc.c:828
unsigned int has_remembered_objects
Definition: gc.c:832
bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:844
unsigned int before_sweep
Definition: gc.c:831
RVALUE * freelist
Definition: gc.c:839
bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:846
bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:849
bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:842
struct list_node page_node
Definition: gc.c:840
RVALUE * start
Definition: gc.c:838
unsigned int has_uncollectible_shady_objects
Definition: gc.c:833
struct heap_page * free_next
Definition: gc.c:837
unsigned int in_tomb
Definition: gc.c:834
struct heap_page::@93 flags
short free_slots
Definition: gc.c:827
Definition: vm_core.h:222
VALUE value
Definition: vm_core.h:225
size_t size
Definition: gc.c:10506
Definition: gc.c:638
stack_chunk_t * chunk
Definition: gc.c:639
size_t unused_cache_size
Definition: gc.c:644
int index
Definition: gc.c:641
stack_chunk_t * cache
Definition: gc.c:640
size_t cache_size
Definition: gc.c:643
int limit
Definition: gc.c:642
rb_objspace_t * objspace
Definition: gc.c:8343
VALUE of
Definition: gc.c:3315
size_t num
Definition: gc.c:3314
struct heap_page * using_page
Definition: gc.h:66
struct RVALUE * freelist
Definition: gc.h:65
Definition: method.h:62
ID called_id
Definition: method.h:66
const VALUE klass
Definition: vm_callinfo.h:278
const struct rb_callable_method_entry_struct *const cme_
Definition: vm_callinfo.h:283
const struct rb_callcache * cc
Definition: vm_callinfo.h:440
const struct rb_callinfo * ci
Definition: vm_callinfo.h:439
const struct rb_callable_method_entry_struct * cme
Definition: vm_callinfo.h:437
struct rb_class_cc_entries::rb_class_cc_entries_entry * entries
struct st_table * iv_index_tbl
Definition: class.h:35
struct rb_subclass_entry * subclasses
Definition: class.h:43
const VALUE origin_
Definition: class.h:54
const VALUE refined_class
Definition: class.h:55
Definition: constant.h:33
VALUE value
Definition: constant.h:36
VALUE file
Definition: constant.h:37
const VALUE * pc
Definition: vm_core.h:770
CREF (Class REFerence)
Definition: method.h:44
struct rb_data_type_struct::@103 function
RUBY_DATA_FUNC dcompact
Definition: rtypeddata.h:76
RUBY_DATA_FUNC dmark
Definition: rtypeddata.h:73
rb_control_frame_t * cfp
Definition: vm_core.h:858
size_t total_pages
Definition: gc.c:656
struct heap_page * free_pages
Definition: gc.c:648
struct heap_page * sweeping_page
Definition: gc.c:650
struct list_head pages
Definition: gc.c:649
struct heap_page * compact_cursor
Definition: gc.c:651
size_t total_slots
Definition: gc.c:657
size_t compact_cursor_index
Definition: gc.c:652
struct rb_imemo_tmpbuf_struct * next
Definition: imemo.h:97
VALUE ecopts
Definition: io.h:86
Definition: io.h:61
struct rb_io_t::rb_io_enc_t encs
VALUE writeconv_asciicompat
Definition: io.h:93
VALUE pathv
Definition: io.h:69
VALUE write_lock
Definition: io.h:98
VALUE self
Definition: io.h:62
VALUE writeconv_pre_ecopts
Definition: io.h:96
VALUE tied_io_for_writing
Definition: io.h:74
rb_iseq_location_t location
Definition: vm_core.h:393
struct rb_iseq_constant_body * body
Definition: vm_core.h:448
Definition: class.h:28
VALUE class_value
Definition: class.h:31
rb_method_iseq_t iseq
Definition: method.h:179
union rb_method_definition_struct::@123 body
Definition: method.h:54
ID called_id
Definition: method.h:58
struct rb_method_definition_struct *const def
Definition: method.h:57
VALUE defined_class
Definition: method.h:56
VALUE owner
Definition: method.h:59
rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:135
size_t total_freed_objects
Definition: gc.c:762
size_t uncollectible_wb_unprotected_objects_limit
Definition: gc.c:775
size_t heap_used_at_gc_start
Definition: gc.c:758
size_t last_major_gc
Definition: gc.c:773
st_table * finalizer_table
Definition: gc.c:718
struct rb_objspace::@86 malloc_params
size_t major_gc_count
Definition: gc.c:734
rb_event_flag_t hook_events
Definition: gc.c:691
double invoke_time
Definition: gc.c:731
VALUE parent_object
Definition: gc.c:771
size_t marked_slots
Definition: gc.c:703
size_t moved_count_table[T_MASK]
Definition: gc.c:792
gc_profile_record * records
Definition: gc.c:723
unsigned int during_minor_gc
Definition: gc.c:685
struct gc_list * global_list
Definition: gc.c:766
st_table * obj_to_id_tbl
Definition: gc.c:804
size_t uncollectible_wb_unprotected_objects
Definition: gc.c:774
unsigned int gc_stressful
Definition: gc.c:683
size_t old_objects
Definition: gc.c:776
gc_profile_record * current_record
Definition: gc.c:724
struct rb_objspace::@92 rcompactor
rb_heap_t tomb_heap
Definition: gc.c:696
VALUE deferred_final
Definition: gc.c:715
mark_stack_t mark_stack
Definition: gc.c:702
struct rb_objspace::@87 flags
double gc_sweep_start_time
Definition: gc.c:756
size_t total_freed_pages
Definition: gc.c:764
struct heap_page ** sorted
Definition: gc.c:706
size_t read_barrier_faults
Definition: gc.c:736
struct rb_objspace::@91 rgengc
size_t sorted_length
Definition: gc.c:709
unsigned int dont_gc
Definition: gc.c:679
unsigned int immediate_sweep
Definition: gc.c:678
size_t old_objects_limit
Definition: gc.c:777
int latest_gc_info
Definition: gc.c:722
VALUE next_object_id
Definition: gc.c:693
unsigned int has_hook
Definition: gc.c:684
VALUE gc_stress_mode
Definition: gc.c:768
rb_atomic_t finalizing
Definition: gc.c:699
size_t total_moved
Definition: gc.c:793
unsigned int during_compacting
Definition: gc.c:682
size_t count
Definition: gc.c:761
size_t allocatable_pages
Definition: gc.c:708
size_t allocated_pages
Definition: gc.c:707
rb_heap_t eden_heap
Definition: gc.c:695
size_t considered_count_table[T_MASK]
Definition: gc.c:791
size_t total_allocated_objects_at_gc_start
Definition: gc.c:757
size_t freeable_pages
Definition: gc.c:711
int need_major_gc
Definition: gc.c:772
size_t final_slots
Definition: gc.c:714
unsigned int during_gc
Definition: gc.c:681
size_t size
Definition: gc.c:726
size_t minor_gc_count
Definition: gc.c:733
size_t limit
Definition: gc.c:668
size_t total_allocated_pages
Definition: gc.c:763
st_table * id_to_obj_tbl
Definition: gc.c:803
size_t next_index
Definition: gc.c:725
size_t compact_count
Definition: gc.c:735
unsigned int mode
Definition: gc.c:677
size_t increase
Definition: gc.c:669
size_t total_allocated_objects
Definition: gc.c:692
struct rb_objspace::@90 profile
unsigned int dont_incremental
Definition: gc.c:680
int run
Definition: gc.c:721
uint32_t id
Definition: vm_core.h:1990
void(* mark_func)(VALUE v, void *data)
Definition: ractor_core.h:147
struct rb_ractor_pub pub
Definition: ractor_core.h:83
struct rb_ractor_struct::gc_mark_func_data_struct * mfd
rb_ractor_newobj_cache_t newobj_cache
Definition: ractor_core.h:142
Definition: class.h:23
VALUE klass
Definition: class.h:24
struct rb_subclass_entry * next
Definition: class.h:25
VALUE self
Definition: vm_core.h:565
int num_regs
Definition: onigmo.h:718
Definition: rmatch.h:48
int char_offset_num_allocated
Definition: rmatch.h:52
struct rmatch_offset * char_offset
Definition: rmatch.h:51
struct re_registers regs
Definition: rmatch.h:49
void(* func)(const char *category, VALUE, void *)
Definition: gc.c:10190
const char * category
Definition: gc.c:10189
void * data
Definition: gc.c:10191
double oldmalloc_limit_growth_factor
Definition: gc.c:330
VALUE gc_stress
Definition: gc.c:332
size_t malloc_limit_max
Definition: gc.c:325
size_t growth_max_slots
Definition: gc.c:317
size_t malloc_limit_min
Definition: gc.c:324
double growth_factor
Definition: gc.c:316
size_t heap_init_slots
Definition: gc.c:314
double heap_free_slots_max_ratio
Definition: gc.c:321
double heap_free_slots_goal_ratio
Definition: gc.c:320
double malloc_limit_growth_factor
Definition: gc.c:326
double oldobject_limit_factor
Definition: gc.c:322
double heap_free_slots_min_ratio
Definition: gc.c:319
size_t heap_free_slots
Definition: gc.c:315
size_t oldmalloc_limit_min
Definition: gc.c:328
size_t oldmalloc_limit_max
Definition: gc.c:329
Definition: st.h:79
st_index_t num_entries
Definition: st.h:86
struct stack_chunk * next
Definition: gc.c:635
VALUE data[STACK_CHUNK_SIZE]
Definition: gc.c:634
Definition: blast.c:41
Definition: enough.c:118
long tv_nsec
Definition: missing.h:64
time_t tv_sec
Definition: missing.h:63
long tv_usec
Definition: missing.h:53
time_t tv_sec
Definition: missing.h:52
rb_objspace_t * objspace
Definition: gc.c:6816
IFUNC (Internal FUNCtion)
Definition: imemo.h:85
SVAR (Special VARiable)
Definition: imemo.h:54
THROW_DATA.
Definition: imemo.h:63
Definition: gc.c:11077
st_table * obj2wmap
Definition: gc.c:11078
st_table * wmap2obj
Definition: gc.c:11079
VALUE final
Definition: gc.c:11080
VALUE value
Definition: gc.c:11255
rb_objspace_t * objspace
Definition: gc.c:11254
#define vsnprintf
Definition: subst.h:15
#define snprintf
Definition: subst.h:14
#define t
Definition: symbol.c:253
#define RSYMBOL(obj)
Definition: symbol.h:33
#define rb_transient_heap_finish_marking()
#define rb_transient_heap_promote(obj)
#define rb_transient_heap_verify()
#define rb_transient_heap_mark(obj, ptr)
#define rb_transient_heap_update_references()
#define rb_transient_heap_start_marking(full_marking)
void error(const char *msg)
Definition: untgz.c:593
#define ALLOC(size)
Definition: unzip.c:112
unsigned long VALUE
Definition: value.h:38
#define SIGNED_VALUE
Definition: value.h:40
#define SIZEOF_VALUE
Definition: value.h:41
unsigned long ID
Definition: value.h:39
#define T_COMPLEX
Definition: value_type.h:58
#define TYPE(_)
Definition: value_type.h:105
#define T_FILE
Definition: value_type.h:61
#define T_STRING
Definition: value_type.h:77
#define T_NIL
Definition: value_type.h:71
#define T_FLOAT
Definition: value_type.h:63
#define T_IMEMO
Definition: value_type.h:66
#define T_BIGNUM
Definition: value_type.h:56
#define T_STRUCT
Definition: value_type.h:78
#define T_FIXNUM
Definition: value_type.h:62
#define T_DATA
Definition: value_type.h:59
#define T_NONE
Definition: value_type.h:73
#define T_NODE
Definition: value_type.h:72
#define T_MODULE
Definition: value_type.h:69
#define T_TRUE
Definition: value_type.h:80
#define T_RATIONAL
Definition: value_type.h:75
#define T_ICLASS
Definition: value_type.h:65
#define T_HASH
Definition: value_type.h:64
#define T_FALSE
Definition: value_type.h:60
#define T_UNDEF
Definition: value_type.h:81
#define DYNAMIC_SYM_P
Definition: value_type.h:85
#define T_ZOMBIE
Definition: value_type.h:82
#define T_ARRAY
Definition: value_type.h:55
#define T_OBJECT
Definition: value_type.h:74
#define T_SYMBOL
Definition: value_type.h:79
#define T_MATCH
Definition: value_type.h:68
#define T_CLASS
Definition: value_type.h:57
#define BUILTIN_TYPE
Definition: value_type.h:84
#define T_MOVED
Definition: value_type.h:70
ruby_value_type
C-level type of an object.
Definition: value_type.h:110
@ RUBY_T_MASK
Definition: value_type.h:142
#define SYMBOL_P
Definition: value_type.h:87
#define T_REGEXP
Definition: value_type.h:76
rb_ractor_t * ruby_single_main_ractor
Definition: vm.c:381
const rb_env_t * rb_vm_env_prev_env(const rb_env_t *env)
Definition: vm.c:830
#define rb_id2str(id)
Definition: vm_backtrace.c:30
#define TAG_RAISE
Definition: vm_core.h:204
#define TAG_NONE
Definition: vm_core.h:198
ruby_tag_type
Definition: vm_core.h:185
#define rb_vm_register_special_exception(sp, e, m)
Definition: vm_core.h:1720
@ block_type_iseq
Definition: vm_core.h:754
#define VM_ASSERT(expr)
Definition: vm_core.h:61
#define VM_ENV_DATA_INDEX_ENV
Definition: vm_core.h:1211
@ ruby_error_nomemory
Definition: vm_core.h:496
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:2001
void rb_hook_list_mark(rb_hook_list_t *hooks)
Definition: vm_trace.c:56
#define VM_UNREACHABLE(func)
Definition: vm_core.h:62
@ VM_ENV_FLAG_WB_REQUIRED
Definition: vm_core.h:1202
#define RUBY_DEBUG_LOG(fmt,...)
Definition: vm_debug.h:112
void rb_vm_barrier(void)
Definition: vm_sync.c:229
#define RB_VM_LOCK_LEAVE_CR_LEV(cr, levp)
Definition: vm_sync.h:117
#define RB_VM_LOCK_ENTER_CR_LEV(cr, levp)
Definition: vm_sync.h:116
#define RB_VM_LOCK_ENTER_NO_BARRIER()
Definition: vm_sync.h:125
#define ASSERT_vm_locking()
Definition: vm_sync.h:134
#define RB_VM_LOCK_LEAVE_NO_BARRIER()
Definition: vm_sync.h:126
#define RB_VM_LOCK_ENTER()
Definition: vm_sync.h:121
#define RB_VM_LOCK_ENTER_LEV(levp)
Definition: vm_sync.h:118
#define RB_VM_LOCK_LEAVE()
Definition: vm_sync.h:122
#define RB_VM_LOCK_LEAVE_LEV(levp)
Definition: vm_sync.h:119
Internal header to suppres / mandate warnings.
#define getenv(name)
Definition: win32.c:80
int err
Definition: win32.c:142
#define env
int intptr_t
Definition: win32.h:90
unsigned int uintptr_t
Definition: win32.h:106
int clock_gettime(clockid_t, struct timespec *)
Definition: win32.c:4668
if((ID)(DISPID) nameid !=nameid)
Definition: win32ole.c:357
IUnknown DWORD
Definition: win32ole.c:33
#define xfree
Definition: xmalloc.h:49
#define xmalloc
Definition: xmalloc.h:44
#define xcalloc
Definition: xmalloc.h:46
#define VALGRIND_MAKE_MEM_DEFINED(p, n)
Definition: zlib.c:24
#define VALGRIND_MAKE_MEM_UNDEFINED(p, n)
Definition: zlib.c:25
int def(FILE *source, FILE *dest, int level)
Definition: zpipe.c:36