Ruby 3.0.5p211 (2022-11-24 revision ba5cf0f7c52d4d35cc6a173c89eda98ceffa2dcf)
vm_core.h
Go to the documentation of this file.
1#ifndef RUBY_VM_CORE_H
2#define RUBY_VM_CORE_H
3/**********************************************************************
4
5 vm_core.h -
6
7 $Author$
8 created at: 04/01/01 19:41:38 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14/*
15 * Enable check mode.
16 * 1: enable local assertions.
17 */
18#ifndef VM_CHECK_MODE
19
20// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24#endif
25
39#ifndef VMDEBUG
40#define VMDEBUG 0
41#endif
42
43#if 0
44#undef VMDEBUG
45#define VMDEBUG 3
46#endif
47
49
50#include <stddef.h>
51#include <signal.h>
52#include <stdarg.h>
53
54#include "ruby_assert.h"
55
56#if VM_CHECK_MODE > 0
57#define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr)
58#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
59
60#else
61#define VM_ASSERT(expr) ((void)0)
62#define VM_UNREACHABLE(func) UNREACHABLE
63#endif
64
65#include <setjmp.h>
66
68#include "ccan/list/list.h"
69#include "id.h"
70#include "internal.h"
71#include "internal/array.h"
72#include "internal/serial.h"
73#include "internal/vm.h"
74#include "method.h"
75#include "node.h"
76#include "ruby/ruby.h"
77#include "ruby/st.h"
78#include "ruby_atomic.h"
79#include "vm_opts.h"
80
81#include "ruby/thread_native.h"
82#if defined(_WIN32)
83#include "thread_win32.h"
84#elif defined(HAVE_PTHREAD_H)
85#include "thread_pthread.h"
86#endif
87
88#define RUBY_VM_THREAD_MODEL 2
89
90/*
91 * implementation selector of get_insn_info algorithm
92 * 0: linear search
93 * 1: binary search
94 * 2: succinct bitvector
95 */
96#ifndef VM_INSN_INFO_TABLE_IMPL
97# define VM_INSN_INFO_TABLE_IMPL 2
98#endif
99
100#if defined(NSIG_MAX) /* POSIX issue 8 */
101# undef NSIG
102# define NSIG NSIG_MAX
103#elif defined(_SIG_MAXSIG) /* FreeBSD */
104# undef NSIG
105# define NSIG _SIG_MAXSIG
106#elif defined(_SIGMAX) /* QNX */
107# define NSIG (_SIGMAX + 1)
108#elif defined(NSIG) /* 99% of everything else */
109# /* take it */
110#else /* Last resort */
111# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
112#endif
113
114#define RUBY_NSIG NSIG
115
116#if defined(SIGCLD)
117# define RUBY_SIGCHLD (SIGCLD)
118#elif defined(SIGCHLD)
119# define RUBY_SIGCHLD (SIGCHLD)
120#else
121# define RUBY_SIGCHLD (0)
122#endif
123
124/* platforms with broken or non-existent SIGCHLD work by polling */
125#if defined(__APPLE__)
126# define SIGCHLD_LOSSY (1)
127#else
128# define SIGCHLD_LOSSY (0)
129#endif
130
131/* define to 0 to test old code path */
132#define WAITPID_USE_SIGCHLD (RUBY_SIGCHLD || SIGCHLD_LOSSY)
133
134#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
135# define USE_SIGALTSTACK
136void *rb_allocate_sigaltstack(void);
137void *rb_register_sigaltstack(void *);
138# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
139# define RB_ALTSTACK_FREE(var) free(var)
140# define RB_ALTSTACK(var) var
141#else /* noop */
142# define RB_ALTSTACK_INIT(var, altstack)
143# define RB_ALTSTACK_FREE(var)
144# define RB_ALTSTACK(var) (0)
145#endif
146
147/*****************/
148/* configuration */
149/*****************/
150
151/* gcc ver. check */
152#if defined(__GNUC__) && __GNUC__ >= 2
153
154#if OPT_TOKEN_THREADED_CODE
155#if OPT_DIRECT_THREADED_CODE
156#undef OPT_DIRECT_THREADED_CODE
157#endif
158#endif
159
160#else /* defined(__GNUC__) && __GNUC__ >= 2 */
161
162/* disable threaded code options */
163#if OPT_DIRECT_THREADED_CODE
164#undef OPT_DIRECT_THREADED_CODE
165#endif
166#if OPT_TOKEN_THREADED_CODE
167#undef OPT_TOKEN_THREADED_CODE
168#endif
169#endif
170
171/* call threaded code */
172#if OPT_CALL_THREADED_CODE
173#if OPT_DIRECT_THREADED_CODE
174#undef OPT_DIRECT_THREADED_CODE
175#endif /* OPT_DIRECT_THREADED_CODE */
176#if OPT_STACK_CACHING
177#undef OPT_STACK_CACHING
178#endif /* OPT_STACK_CACHING */
179#endif /* OPT_CALL_THREADED_CODE */
180
182typedef unsigned long rb_num_t;
183typedef signed long rb_snum_t;
184
195 RUBY_TAG_MASK = 0xf
197
198#define TAG_NONE RUBY_TAG_NONE
199#define TAG_RETURN RUBY_TAG_RETURN
200#define TAG_BREAK RUBY_TAG_BREAK
201#define TAG_NEXT RUBY_TAG_NEXT
202#define TAG_RETRY RUBY_TAG_RETRY
203#define TAG_REDO RUBY_TAG_REDO
204#define TAG_RAISE RUBY_TAG_RAISE
205#define TAG_THROW RUBY_TAG_THROW
206#define TAG_FATAL RUBY_TAG_FATAL
207#define TAG_MASK RUBY_TAG_MASK
208
213
214/* forward declarations */
215struct rb_thread_struct;
217
218/* iseq data type */
220
221// imemo_constcache
224
226 const rb_cref_t *ic_cref; // v1
228 // v3
229};
230
233};
234
237};
238
240 struct {
246};
247
249 const struct rb_callinfo *ci;
250 const struct rb_callcache *cc;
253 int argc;
255};
256
258
259#if 1
260#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
261#else
262#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
263#endif
264#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
265
267 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
268 VALUE base_label; /* String */
269 VALUE label; /* String */
270 VALUE first_lineno; /* TODO: may be unsigned short */
274
275#define PATHOBJ_PATH 0
276#define PATHOBJ_REALPATH 1
277
278static inline VALUE
279pathobj_path(VALUE pathobj)
280{
281 if (RB_TYPE_P(pathobj, T_STRING)) {
282 return pathobj;
283 }
284 else {
285 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
286 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
287 }
288}
289
290static inline VALUE
291pathobj_realpath(VALUE pathobj)
292{
293 if (RB_TYPE_P(pathobj, T_STRING)) {
294 return pathobj;
295 }
296 else {
297 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
298 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
299 }
300}
301
302/* Forward declarations */
303struct rb_mjit_unit;
304
316 } type; /* instruction sequence type */
317
318 unsigned int iseq_size;
319 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
320
344 struct {
345 struct {
346 unsigned int has_lead : 1;
347 unsigned int has_opt : 1;
348 unsigned int has_rest : 1;
349 unsigned int has_post : 1;
350 unsigned int has_kw : 1;
351 unsigned int has_kwrest : 1;
352 unsigned int has_block : 1;
353
354 unsigned int ambiguous_param0 : 1; /* {|a|} */
355 unsigned int accepts_no_kwarg : 1;
356 unsigned int ruby2_keywords: 1;
358
359 unsigned int size;
360
367
368 const VALUE *opt_table; /* (opt_num + 1) entries. */
369 /* opt_num and opt_table:
370 *
371 * def foo o1=e1, o2=e2, ..., oN=eN
372 * #=>
373 * # prologue code
374 * A1: e1
375 * A2: e2
376 * ...
377 * AN: eN
378 * AL: body
379 * opt_num = N
380 * opt_table = [A1, A2, ..., AN, AL]
381 */
382
383 const struct rb_iseq_param_keyword {
384 int num;
387 int rest_start;
388 const ID *table;
392
394
395 /* insn info, must be freed */
398 unsigned int *positions;
399 unsigned int size;
400#if VM_INSN_INFO_TABLE_IMPL == 2
401 struct succ_index_table *succ_index_table;
402#endif
404
405 const ID *local_table; /* must free */
406
407 /* catch table */
408 struct iseq_catch_table *catch_table;
409
410 /* for child iseq */
412 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
413
415 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
416
417 struct {
423
424 unsigned int local_table_size;
425 unsigned int is_size;
426 unsigned int ci_size;
427 unsigned int stack_max; /* for stack overflow check */
428
429 char catch_except_p; /* If a frame of this ISeq may catch exception, set TRUE */
430 bool builtin_inline_p; // This ISeq's builtin func is safe to be inlined by MJIT
432
433#if USE_MJIT
434 /* The following fields are MJIT related info. */
435 VALUE (*jit_func)(struct rb_execution_context_struct *,
436 struct rb_control_frame_struct *); /* function pointer for loaded native code */
437 long unsigned total_calls; /* number of total calls with `mjit_exec()` */
438 struct rb_mjit_unit *jit_unit;
439#endif
440};
441
442/* T_IMEMO/iseq */
443/* typedef rb_iseq_t is in method.h */
445 VALUE flags; /* 1 */
446 VALUE wrapper; /* 2 */
447
449
450 union { /* 4, 5 words */
451 struct iseq_compile_data *compile_data; /* used at compile time */
452
453 struct {
455 int index;
456 } loader;
457
458 struct {
461 } exec;
463};
464
465#ifndef USE_LAZY_LOAD
466#define USE_LAZY_LOAD 0
467#endif
468
469#if USE_LAZY_LOAD
470const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
471#endif
472
473static inline const rb_iseq_t *
474rb_iseq_check(const rb_iseq_t *iseq)
475{
476#if USE_LAZY_LOAD
477 if (iseq->body == NULL) {
478 rb_iseq_complete((rb_iseq_t *)iseq);
479 }
480#endif
481 return iseq;
482}
483
484static inline const rb_iseq_t *
485def_iseq_ptr(rb_method_definition_t *def)
486{
487//TODO: re-visit. to check the bug, enable this assertion.
488#if VM_CHECK_MODE > 0
489 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
490#endif
491 return rb_iseq_check(def->body.iseq.iseqptr);
492}
493
502
533
536
537#define GetVMPtr(obj, ptr) \
538 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
539
540struct rb_vm_struct;
541typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
542
543typedef struct rb_at_exit_list {
547
548struct rb_objspace;
549struct rb_objspace *rb_objspace_alloc(void);
550void rb_objspace_free(struct rb_objspace *);
552
553typedef struct rb_hook_list_struct {
556 unsigned int need_clean;
557 unsigned int running;
559
560
561// see builtin.h for definition
562typedef const struct rb_builtin_function *RB_BUILTIN;
563
564typedef struct rb_vm_struct {
566
567 struct {
568 struct list_head set;
569 unsigned int cnt;
570 unsigned int blocking_cnt;
571
573 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
574
575 struct {
576 // monitor
577 rb_nativethread_lock_t lock;
579 unsigned int lock_rec;
580
581 // barrier
583 unsigned int barrier_cnt;
585
586 // join at exit
591
592#ifdef USE_SIGALTSTACK
593 void *main_altstack;
594#endif
595
597 rb_nativethread_lock_t waitpid_lock;
598 struct list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
599 struct list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
600 struct list_head waiting_fds; /* <=> struct waiting_fd */
601
602 /* set in single-threaded processes only: */
603 volatile int ubf_async_safe;
604
605 unsigned int running: 1;
608 unsigned int thread_ignore_deadlock: 1;
609
610 /* object management */
613
614 /* load */
624
625 /* signal */
626 struct {
629
630 /* relation table of ensure - rollback for callcc */
632
633 /* postponed_job (async-signal-safe, NOT thread-safe) */
636
638
639 /* workqueue (thread-safe, NOT async-signal-safe) */
640 struct list_head workqueue; /* <=> rb_workqueue_job.jnode */
641 rb_nativethread_lock_t workqueue_lock;
642
646
648
650
652
655
658
660
661#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
662#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
663#endif
665
666#if USE_VM_CLOCK
667 uint32_t clock;
668#endif
669
670 /* params */
671 struct { /* size in byte */
677
680
681/* default values */
682
683#define RUBY_VM_SIZE_ALIGN 4096
684
685#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
686#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
687#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
688#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
689
690#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
691#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
692#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
693#if defined(__powerpc64__)
694#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
695#else
696#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
697#endif
698
699#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
700/* It seems sanitizers consume A LOT of machine stacks */
701#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
702#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
703#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
704#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
705#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
706#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
707#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
708#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
709#endif
710
711/* optimize insn */
712#define INTEGER_REDEFINED_OP_FLAG (1 << 0)
713#define FLOAT_REDEFINED_OP_FLAG (1 << 1)
714#define STRING_REDEFINED_OP_FLAG (1 << 2)
715#define ARRAY_REDEFINED_OP_FLAG (1 << 3)
716#define HASH_REDEFINED_OP_FLAG (1 << 4)
717/* #define BIGNUM_REDEFINED_OP_FLAG (1 << 5) */
718#define SYMBOL_REDEFINED_OP_FLAG (1 << 6)
719#define TIME_REDEFINED_OP_FLAG (1 << 7)
720#define REGEXP_REDEFINED_OP_FLAG (1 << 8)
721#define NIL_REDEFINED_OP_FLAG (1 << 9)
722#define TRUE_REDEFINED_OP_FLAG (1 << 10)
723#define FALSE_REDEFINED_OP_FLAG (1 << 11)
724#define PROC_REDEFINED_OP_FLAG (1 << 12)
725
726#define BASIC_OP_UNREDEFINED_P(op, klass) (LIKELY((GET_VM()->redefined_flag[(op)]&(klass)) == 0))
727
728#ifndef VM_DEBUG_BP_CHECK
729#define VM_DEBUG_BP_CHECK 0
730#endif
731
732#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
733#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
734#endif
735
738 const VALUE *ep;
739 union {
741 const struct vm_ifunc *ifunc;
744};
745
752
759
760struct rb_block {
761 union {
765 } as;
767};
768
770 const VALUE *pc; /* cfp[0] */
771 VALUE *sp; /* cfp[1] */
772 const rb_iseq_t *iseq; /* cfp[2] */
773 VALUE self; /* cfp[3] / block[0] */
774 const VALUE *ep; /* cfp[4] / block[1] */
775 const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc or forwarded block handler */
776 VALUE *__bp__; /* cfp[6] */ /* outside vm_push_frame, use vm_base_ptr instead. */
777
778#if VM_DEBUG_BP_CHECK
779 VALUE *bp_check; /* cfp[7] */
780#endif
782
784
785static inline struct rb_thread_struct *
786rb_thread_ptr(VALUE thval)
787{
789}
790
797
798#ifdef RUBY_JMP_BUF
799typedef RUBY_JMP_BUF rb_jmpbuf_t;
800#else
801typedef void *rb_jmpbuf_t[5];
802#endif
803
804/*
805 the members which are written in EC_PUSH_TAG() should be placed at
806 the beginning and the end, so that entire region is accessible.
807*/
808struct rb_vm_tag {
814 unsigned int lock_rec;
815};
816
817STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
818STATIC_ASSERT(rb_vm_tag_buf_end,
819 offsetof(struct rb_vm_tag, buf) + sizeof(rb_jmpbuf_t) <
820 sizeof(struct rb_vm_tag));
821
824};
825
828 void *arg;
829};
830
831struct rb_mutex_struct;
832
833typedef struct rb_ensure_entry {
838
839typedef struct rb_ensure_list {
843
844typedef char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) * 2 + 3];
845
847
852};
853
855 /* execution information */
856 VALUE *vm_stack; /* must free, must mark */
857 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
859
860 struct rb_vm_tag *tag;
862
863 /* interrupt flags */
864 rb_atomic_t interrupt_flag;
865 rb_atomic_t interrupt_mask; /* size should match flag */
866#if USE_VM_CLOCK
867 uint32_t checked_clock;
868#endif
869
872
873 /* storage (ec (fiber) local) */
877
878 /* eval env */
881
882 /* ensure & callcc */
884
885 /* trace information */
887
888 /* temporary places */
890 VALUE passed_block_handler; /* for rb_iterate */
891
892 uint8_t raised_flag; /* only 3 bits needed */
893
894 /* n.b. only 7 bits needed, really: */
896
898
899 /* for GC */
900 struct {
904 RUBY_ALIGNAS(SIZEOF_VALUE) jmp_buf regs;
906};
907
908#ifndef rb_execution_context_t
910#define rb_execution_context_t rb_execution_context_t
911#endif
912
913// for builtin.h
914#define VM_CORE_H_EC_DEFINED 1
915
916// Set the vm_stack pointer in the execution context.
917void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
918
919// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
920// @param ec the execution context to update.
921// @param stack a pointer to the stack to use.
922// @param size the size of the stack, as in `VALUE stack[size]`.
924
925// Clear (set to `NULL`) the vm_stack pointer.
926// @param ec the execution context to update.
928
931};
932
934
935typedef struct rb_thread_struct {
936 struct list_node lt_node; // managed by a ractor
940
942
944
945 /* for cfunc */
947
948 /* for load(true) */
951
952 /* thread control */
953 rb_nativethread_id_t thread_id;
954#ifdef NON_SCALAR_THREAD_ID
955 rb_thread_id_string_t thread_id_string;
956#endif
957 BITFIELD(enum rb_thread_status, status, 2);
958 /* bit flags */
959 unsigned int to_kill : 1;
960 unsigned int abort_on_exception: 1;
961 unsigned int report_on_exception: 1;
963 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
964 uint32_t running_time_us; /* 12500..800000 */
965
968
971
972 /* temporary place of retval on OPT_CALL_THREADED_CODE */
973#if OPT_CALL_THREADED_CODE
974 VALUE retval;
975#endif
976
977 /* async errinfo queue */
980
981 /* interrupt management */
982 rb_nativethread_lock_t interrupt_lock;
986
988
989 union {
990 struct {
994 } proc;
995 struct {
996 VALUE (*func)(void *);
997 void *arg;
998 } func;
1000
1007
1008 /* statistics data for profiler */
1010
1011 /* fiber */
1014
1016 unsigned blocking;
1017
1018 /* misc */
1020
1022
1023#ifdef USE_SIGALTSTACK
1024 void *altstack;
1025#endif
1027
1028typedef enum {
1032 /* 0x03..0x06 is reserved */
1035
1036#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1037#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1038#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1039#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1040#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1041 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1042
1043/* iseq.c */
1044RUBY_SYMBOL_EXPORT_BEGIN
1045
1046/* node -> iseq */
1047rb_iseq_t *rb_iseq_new (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type);
1048rb_iseq_t *rb_iseq_new_top (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1049rb_iseq_t *rb_iseq_new_main (const rb_ast_body_t *ast, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1050rb_iseq_t *rb_iseq_new_eval (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_iseq_t *parent, int isolated_depth);
1051rb_iseq_t *rb_iseq_new_with_opt(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_iseq_t *parent, int isolated_depth,
1052 enum iseq_type, const rb_compile_option_t*);
1053
1054struct iseq_link_anchor;
1058 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1059 const void *data;
1060};
1061static inline struct rb_iseq_new_with_callback_callback_func *
1062rb_iseq_new_with_callback_new_callback(
1063 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1064{
1066 return (struct rb_iseq_new_with_callback_callback_func *)memo;
1067}
1069 VALUE name, VALUE path, VALUE realpath, VALUE first_lineno,
1070 const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*);
1071
1072VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1073int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1074
1075VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1076
1081RUBY_SYMBOL_EXPORT_END
1082
1083#define GetProcPtr(obj, ptr) \
1084 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1085
1086typedef struct {
1087 const struct rb_block block;
1088 unsigned int is_from_method: 1; /* bool */
1089 unsigned int is_lambda: 1; /* bool */
1090 unsigned int is_isolated: 1; /* bool */
1091} rb_proc_t;
1092
1093RUBY_SYMBOL_EXPORT_BEGIN
1097RUBY_SYMBOL_EXPORT_END
1098
1099typedef struct {
1100 VALUE flags; /* imemo header */
1102 const VALUE *ep;
1103 const VALUE *env;
1104 unsigned int env_size;
1105} rb_env_t;
1106
1108
1109#define GetBindingPtr(obj, ptr) \
1110 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1111
1112typedef struct {
1113 const struct rb_block block;
1115 unsigned short first_lineno;
1116} rb_binding_t;
1117
1118/* used by compile time and send insn */
1119
1125
1126#define VM_CHECKMATCH_TYPE_MASK 0x03
1127#define VM_CHECKMATCH_ARRAY 0x04
1128
1134
1136 VM_SVAR_LASTLINE = 0, /* $_ */
1137 VM_SVAR_BACKREF = 1, /* $~ */
1138
1140 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1142
1143/* inline cache */
1147typedef const struct rb_callinfo *CALL_INFO;
1148typedef const struct rb_callcache *CALL_CACHE;
1149typedef struct rb_call_data *CALL_DATA;
1150
1152
1153#ifndef FUNC_FASTCALL
1154#define FUNC_FASTCALL(x) x
1155#endif
1156
1157typedef rb_control_frame_t *
1159
1160#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1161#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1162
1163#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1164#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1165#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1166
1167enum {
1168 /* Frame/Environment flag bits:
1169 * MMMM MMMM MMMM MMMM ____ _FFF FFFF EEEX (LSB)
1170 *
1171 * X : tag for GC marking (It seems as Fixnum)
1172 * EEE : 3 bits Env flags
1173 * FF..: 7 bits Frame flags
1174 * MM..: 15 bits frame magic (to check frame corruption)
1175 */
1176
1177 /* frame types */
1187
1189
1190 /* frame flag */
1198
1199 /* env flag */
1204};
1205
1206#define VM_ENV_DATA_SIZE ( 3)
1207
1208#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1209#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1210#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1211#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1212
1213#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1214
1215static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1216
1217static inline void
1218VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1219{
1220 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1221 VM_ASSERT(FIXNUM_P(flags));
1222 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1223}
1224
1225static inline void
1226VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1227{
1228 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1229 VM_ASSERT(FIXNUM_P(flags));
1230 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1231}
1232
1233static inline unsigned long
1234VM_ENV_FLAGS(const VALUE *ep, long flag)
1235{
1236 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1237 VM_ASSERT(FIXNUM_P(flags));
1238 return flags & flag;
1239}
1240
1241static inline unsigned long
1242VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1243{
1244 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1245}
1246
1247static inline int
1248VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1249{
1250 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1251}
1252
1253static inline int
1254VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1255{
1256 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1257}
1258
1259static inline int
1260VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1261{
1262 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1263}
1264
1265static inline int
1266VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1267{
1268 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1269}
1270
1271static inline int
1272rb_obj_is_iseq(VALUE iseq)
1273{
1274 return imemo_type_p(iseq, imemo_iseq);
1275}
1276
1277#if VM_CHECK_MODE > 0
1278#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1279#endif
1280
1281static inline int
1282VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1283{
1284 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1285 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p);
1286 return cframe_p;
1287}
1288
1289static inline int
1290VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1291{
1292 return !VM_FRAME_CFRAME_P(cfp);
1293}
1294
1295#define RUBYVM_CFUNC_FRAME_P(cfp) \
1296 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1297
1298#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1299#define VM_BLOCK_HANDLER_NONE 0
1300
1301static inline int
1302VM_ENV_LOCAL_P(const VALUE *ep)
1303{
1304 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1305}
1306
1307static inline const VALUE *
1308VM_ENV_PREV_EP(const VALUE *ep)
1309{
1310 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1312}
1313
1314static inline VALUE
1315VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1316{
1317 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1318 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1319}
1320
1321#if VM_CHECK_MODE > 0
1322int rb_vm_ep_in_heap_p(const VALUE *ep);
1323#endif
1324
1325static inline int
1326VM_ENV_ESCAPED_P(const VALUE *ep)
1327{
1328 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1329 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1330}
1331
1332#if VM_CHECK_MODE > 0
1333static inline int
1334vm_assert_env(VALUE obj)
1335{
1336 VM_ASSERT(imemo_type_p(obj, imemo_env));
1337 return 1;
1338}
1339#endif
1340
1341static inline VALUE
1342VM_ENV_ENVVAL(const VALUE *ep)
1343{
1344 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1345 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1346 VM_ASSERT(vm_assert_env(envval));
1347 return envval;
1348}
1349
1350static inline const rb_env_t *
1351VM_ENV_ENVVAL_PTR(const VALUE *ep)
1352{
1353 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1354}
1355
1356static inline const rb_env_t *
1357vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1358{
1359 rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq);
1360 env->env_size = env_size;
1361 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1362 return env;
1363}
1364
1365static inline void
1366VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1367{
1368 *((VALUE *)ptr) = v;
1369}
1370
1371static inline void
1372VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1373{
1374 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1375 VM_FORCE_WRITE(ptr, special_const_value);
1376}
1377
1378static inline void
1379VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1380{
1381 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1382 VM_FORCE_WRITE(&ep[index], v);
1383}
1384
1385const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1386const VALUE *rb_vm_proc_local_ep(VALUE proc);
1387void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1388void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1389
1391
1392#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1393#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1394
1395#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1396 ((void *)(ecfp) > (void *)(cfp))
1397
1398static inline const rb_control_frame_t *
1399RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1400{
1401 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1402}
1403
1404static inline int
1405RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1406{
1407 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1408}
1409
1410static inline int
1411VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1412{
1413 if ((block_handler & 0x03) == 0x01) {
1414#if VM_CHECK_MODE > 0
1415 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1416 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1417#endif
1418 return 1;
1419 }
1420 else {
1421 return 0;
1422 }
1423}
1424
1425static inline VALUE
1426VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1427{
1428 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1429 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1430 return block_handler;
1431}
1432
1433static inline const struct rb_captured_block *
1434VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1435{
1436 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1437 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1438 return captured;
1439}
1440
1441static inline int
1442VM_BH_IFUNC_P(VALUE block_handler)
1443{
1444 if ((block_handler & 0x03) == 0x03) {
1445#if VM_CHECK_MODE > 0
1446 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1447 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1448#endif
1449 return 1;
1450 }
1451 else {
1452 return 0;
1453 }
1454}
1455
1456static inline VALUE
1457VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1458{
1459 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1460 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1461 return block_handler;
1462}
1463
1464static inline const struct rb_captured_block *
1465VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1466{
1467 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1468 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1469 return captured;
1470}
1471
1472static inline const struct rb_captured_block *
1473VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1474{
1475 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1476 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1477 return captured;
1478}
1479
1480static inline enum rb_block_handler_type
1481vm_block_handler_type(VALUE block_handler)
1482{
1483 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1485 }
1486 else if (VM_BH_IFUNC_P(block_handler)) {
1488 }
1489 else if (SYMBOL_P(block_handler)) {
1491 }
1492 else {
1493 VM_ASSERT(rb_obj_is_proc(block_handler));
1495 }
1496}
1497
1498static inline void
1499vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1500{
1501 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1502 (vm_block_handler_type(block_handler), 1));
1503}
1504
1505static inline int
1506vm_cfp_forwarded_bh_p(const rb_control_frame_t *cfp, VALUE block_handler)
1507{
1508 return ((VALUE) cfp->block_code) == block_handler;
1509}
1510
1511static inline enum rb_block_type
1512vm_block_type(const struct rb_block *block)
1513{
1514#if VM_CHECK_MODE > 0
1515 switch (block->type) {
1516 case block_type_iseq:
1517 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1518 break;
1519 case block_type_ifunc:
1520 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1521 break;
1522 case block_type_symbol:
1523 VM_ASSERT(SYMBOL_P(block->as.symbol));
1524 break;
1525 case block_type_proc:
1527 break;
1528 }
1529#endif
1530 return block->type;
1531}
1532
1533static inline void
1534vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1535{
1536 struct rb_block *mb = (struct rb_block *)block;
1537 mb->type = type;
1538}
1539
1540static inline const struct rb_block *
1541vm_proc_block(VALUE procval)
1542{
1543 VM_ASSERT(rb_obj_is_proc(procval));
1544 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1545}
1546
1547static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1548static inline const VALUE *vm_block_ep(const struct rb_block *block);
1549
1550static inline const rb_iseq_t *
1551vm_proc_iseq(VALUE procval)
1552{
1553 return vm_block_iseq(vm_proc_block(procval));
1554}
1555
1556static inline const VALUE *
1557vm_proc_ep(VALUE procval)
1558{
1559 return vm_block_ep(vm_proc_block(procval));
1560}
1561
1562static inline const rb_iseq_t *
1563vm_block_iseq(const struct rb_block *block)
1564{
1565 switch (vm_block_type(block)) {
1566 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1567 case block_type_proc: return vm_proc_iseq(block->as.proc);
1568 case block_type_ifunc:
1569 case block_type_symbol: return NULL;
1570 }
1571 VM_UNREACHABLE(vm_block_iseq);
1572 return NULL;
1573}
1574
1575static inline const VALUE *
1576vm_block_ep(const struct rb_block *block)
1577{
1578 switch (vm_block_type(block)) {
1579 case block_type_iseq:
1580 case block_type_ifunc: return block->as.captured.ep;
1581 case block_type_proc: return vm_proc_ep(block->as.proc);
1582 case block_type_symbol: return NULL;
1583 }
1584 VM_UNREACHABLE(vm_block_ep);
1585 return NULL;
1586}
1587
1588static inline VALUE
1589vm_block_self(const struct rb_block *block)
1590{
1591 switch (vm_block_type(block)) {
1592 case block_type_iseq:
1593 case block_type_ifunc:
1594 return block->as.captured.self;
1595 case block_type_proc:
1596 return vm_block_self(vm_proc_block(block->as.proc));
1597 case block_type_symbol:
1598 return Qundef;
1599 }
1600 VM_UNREACHABLE(vm_block_self);
1601 return Qundef;
1602}
1603
1604static inline VALUE
1605VM_BH_TO_SYMBOL(VALUE block_handler)
1606{
1607 VM_ASSERT(SYMBOL_P(block_handler));
1608 return block_handler;
1609}
1610
1611static inline VALUE
1612VM_BH_FROM_SYMBOL(VALUE symbol)
1613{
1615 return symbol;
1616}
1617
1618static inline VALUE
1619VM_BH_TO_PROC(VALUE block_handler)
1620{
1621 VM_ASSERT(rb_obj_is_proc(block_handler));
1622 return block_handler;
1623}
1624
1625static inline VALUE
1626VM_BH_FROM_PROC(VALUE procval)
1627{
1628 VM_ASSERT(rb_obj_is_proc(procval));
1629 return procval;
1630}
1631
1632/* VM related object allocate functions */
1636VALUE rb_proc_dup(VALUE self);
1637
1638/* for debug */
1640extern void rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc);
1643 , VALUE reg_a, VALUE reg_b
1644#endif
1645);
1646
1647#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp)
1648#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp))
1649void rb_vm_bugreport(const void *);
1650typedef void (*ruby_sighandler_t)(int);
1651NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1652
1653/* functions about thread/vm execution */
1654RUBY_SYMBOL_EXPORT_BEGIN
1655VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1656VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1657VALUE rb_iseq_path(const rb_iseq_t *iseq);
1658VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1659RUBY_SYMBOL_EXPORT_END
1660
1661VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1662void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1663
1664int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1665void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1666
1667VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1668
1669VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1670static inline VALUE
1671rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1672{
1673 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1674}
1675
1676static inline VALUE
1677rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1678{
1679 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1680}
1681
1685const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1688 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1690
1692
1697
1698static inline void
1699rb_vm_living_threads_init(rb_vm_t *vm)
1700{
1701 list_head_init(&vm->waiting_fds);
1702 list_head_init(&vm->waiting_pids);
1703 list_head_init(&vm->workqueue);
1704 list_head_init(&vm->waiting_grps);
1705 list_head_init(&vm->ractor.set);
1706}
1707
1708typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1714int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1717
1718void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1719
1720#define rb_vm_register_special_exception(sp, e, m) \
1721 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1722
1724
1725void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1726
1728
1729#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1730
1731#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1732 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1733 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1734 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1735 if (UNLIKELY((cfp) <= &bound[1])) { \
1736 vm_stackoverflow(); \
1737 } \
1738} while (0)
1739
1740#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1741 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1742
1743VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1744
1746
1747/* for thread */
1748
1749#if RUBY_VM_THREAD_MODEL == 2
1750RUBY_SYMBOL_EXPORT_BEGIN
1751
1757
1758RUBY_SYMBOL_EXPORT_END
1759
1760#define GET_VM() rb_current_vm()
1761#define GET_RACTOR() rb_current_ractor()
1762#define GET_THREAD() rb_current_thread()
1763#define GET_EC() rb_current_execution_context(true)
1764
1765static inline rb_thread_t *
1766rb_ec_thread_ptr(const rb_execution_context_t *ec)
1767{
1768 return ec->thread_ptr;
1769}
1770
1771static inline rb_ractor_t *
1772rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1773{
1774 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1775 if (th) {
1776 VM_ASSERT(th->ractor != NULL);
1777 return th->ractor;
1778 }
1779 else {
1780 return NULL;
1781 }
1782}
1783
1784static inline rb_vm_t *
1785rb_ec_vm_ptr(const rb_execution_context_t *ec)
1786{
1787 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1788 if (th) {
1789 return th->vm;
1790 }
1791 else {
1792 return NULL;
1793 }
1794}
1795
1796static inline rb_execution_context_t *
1797rb_current_execution_context(bool expect_ec)
1798{
1799#ifdef RB_THREAD_LOCAL_SPECIFIER
1800 #if __APPLE__
1801 rb_execution_context_t *ec = rb_current_ec();
1802 #else
1804 #endif
1805#else
1806 rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
1807#endif
1808 VM_ASSERT(!expect_ec || ec != NULL);
1809 return ec;
1810}
1811
1812static inline rb_thread_t *
1813rb_current_thread(void)
1814{
1815 const rb_execution_context_t *ec = GET_EC();
1816 return rb_ec_thread_ptr(ec);
1817}
1818
1819static inline rb_ractor_t *
1820rb_current_ractor(void)
1821{
1824 }
1825 else {
1826 const rb_execution_context_t *ec = GET_EC();
1827 return rb_ec_ractor_ptr(ec);
1828 }
1829}
1830
1831static inline rb_vm_t *
1832rb_current_vm(void)
1833{
1834#if 0 // TODO: reconsider the assertions
1836 ruby_current_execution_context_ptr == NULL ||
1837 rb_ec_thread_ptr(GET_EC()) == NULL ||
1838 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
1839 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
1840#endif
1841
1842 return ruby_current_vm_ptr;
1843}
1844
1846 unsigned int recorded_lock_rec,
1847 unsigned int current_lock_rec);
1848
1849static inline unsigned int
1850rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
1851{
1852 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1853
1854 if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
1855 return 0;
1856 }
1857 else {
1858 return vm->ractor.sync.lock_rec;
1859 }
1860}
1861
1862#else
1863#error "unsupported thread model"
1864#endif
1865
1866enum {
1873};
1874
1875#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
1876#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
1877#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
1878#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
1879#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
1880#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
1881#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
1882 (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
1883
1884static inline bool
1885RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
1886{
1887#if USE_VM_CLOCK
1888 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
1889
1890 if (current_clock != ec->checked_clock) {
1891 ec->checked_clock = current_clock;
1893 }
1894#endif
1895 return ec->interrupt_flag & ~(ec)->interrupt_mask;
1896}
1897
1899int rb_signal_buff_size(void);
1900int rb_signal_exec(rb_thread_t *th, int sig);
1902void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
1910void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
1913void rb_fiber_close(rb_fiber_t *fib);
1916
1917// vm_sync.h
1919void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
1920
1921#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
1922static inline void
1923rb_vm_check_ints(rb_execution_context_t *ec)
1924{
1925 VM_ASSERT(ec == GET_EC());
1926 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
1927 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
1928 }
1929}
1930
1931/* tracer */
1932
1942
1944
1945 /* calc from cfp */
1948};
1949
1952void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
1954
1955void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
1956
1957#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
1958 const rb_event_flag_t flag_arg_ = (flag_); \
1959 rb_hook_list_t *hooks_arg_ = (hooks_); \
1960 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
1961 /* defer evaluating the other arguments */ \
1962 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
1963 } \
1964} while (0)
1965
1966static inline void
1967rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
1968 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
1969{
1970 struct rb_trace_arg_struct trace_arg;
1971
1972 VM_ASSERT((hooks->events & flag) != 0);
1973
1974 trace_arg.event = flag;
1975 trace_arg.ec = ec;
1976 trace_arg.cfp = ec->cfp;
1977 trace_arg.self = self;
1978 trace_arg.id = id;
1979 trace_arg.called_id = called_id;
1980 trace_arg.klass = klass;
1981 trace_arg.data = data;
1982 trace_arg.path = Qundef;
1983 trace_arg.klass_solved = 0;
1984
1985 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
1986}
1987
1992};
1993
1994static inline rb_hook_list_t *
1995rb_ec_ractor_hooks(const rb_execution_context_t *ec)
1996{
1997 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
1998 return &cr_pub->hooks;
1999}
2000
2001#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2002 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2003
2004#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2005 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2006
2007static inline void
2008rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2009{
2011 NIL_P(eval_script) ? (VALUE)iseq :
2012 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2013}
2014
2015void rb_vm_trap_exit(rb_vm_t *vm);
2016
2017RUBY_SYMBOL_EXPORT_BEGIN
2018
2020
2021/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2022#define RUBY_EVENT_COVERAGE_LINE 0x010000
2023#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2024
2025extern VALUE rb_get_coverages(void);
2026extern void rb_set_coverages(VALUE, int, VALUE);
2027extern void rb_clear_coverages(void);
2028extern void rb_reset_coverages(void);
2029
2031
2032// ractor.c
2035
2036RUBY_SYMBOL_EXPORT_END
2037
2038#endif /* RUBY_VM_CORE_H */
#define offsetof(p_type, field)
Definition: addrinfo.h:186
#define NORETURN(x)
Definition: attributes.h:152
#define RUBY_ALIGNAS
Definition: stdalign.h:27
struct RIMemo * ptr
Definition: debug.c:88
#define MJIT_STATIC
Definition: dllexport.h:71
#define RUBY_EXTERN
Definition: dllexport.h:36
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
#define RUBY_EVENT_SCRIPT_COMPILED
Definition: event.h:46
uint32_t rb_event_flag_t
Definition: event.h:66
#define MAYBE_UNUSED
Definition: ffi_common.h:30
#define UNLIKELY(x)
Definition: ffi_common.h:126
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
Definition: gc.c:2412
VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt)
Definition: error.c:1446
void rb_bug(const char *fmt,...)
Definition: error.c:768
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:1024
void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt,...)
Definition: error.c:778
@ imemo_ifunc
iterator function
Definition: imemo.h:39
@ imemo_env
Definition: imemo.h:35
@ imemo_iseq
Definition: imemo.h:42
Thin wrapper to ruby/config.h.
VALUE rb_obj_is_proc(VALUE)
Definition: proc.c:152
void rb_unblock_function_t(void *)
Definition: thread.h:59
Internal header for Array.
#define STATIC_ASSERT
Definition: static_assert.h:14
Internal header for RubyVM.
method_missing_reason
Definition: vm.h:32
#define rb_ary_new_from_args(...)
Definition: internal.h:65
voidpf void uLong size
Definition: ioapi.h:138
typedef int(ZCALLBACK *close_file_func) OF((voidpf opaque
voidpf void * buf
Definition: ioapi.h:138
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
Definition: iterator.h:33
@ VM_METHOD_TYPE_ISEQ
Ruby method.
Definition: method.h:110
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
const int id
Definition: nkf.c:209
const char * name
Definition: nkf.c:208
#define RARRAY_AREF(a, i)
Definition: psych_emitter.c:7
#define NULL
Definition: regenc.h:69
#define RTYPEDDATA_DATA(v)
Definition: rtypeddata.h:47
int argc
Definition: ruby.c:240
char ** argv
Definition: ruby.c:241
Internal header for rb_serial_t.
unsigned LONG_LONG rb_serial_t
Definition: serial.h:19
unsigned int uint32_t
Definition: sha2.h:101
unsigned char uint8_t
Definition: sha2.h:100
#define Qundef
#define Qfalse
#define NIL_P
#define FIXNUM_P
Defines old _.
C99 shim for <stdbool.h>
Definition: vm_core.h:222
VALUE flags
Definition: vm_core.h:223
rb_serial_t ic_serial
Definition: vm_core.h:227
VALUE value
Definition: vm_core.h:225
const rb_cref_t * ic_cref
Definition: vm_core.h:226
struct iseq_inline_constant_cache_entry * entry
Definition: vm_core.h:232
Definition: vm_core.h:235
struct rb_iv_index_tbl_entry * entry
Definition: vm_core.h:236
Definition: iseq.h:213
struct rb_at_exit_list * next
Definition: vm_core.h:545
rb_vm_at_exit_func * func
Definition: vm_core.h:544
const VALUE pathobj
Definition: vm_core.h:1114
unsigned short first_lineno
Definition: vm_core.h:1115
union rb_block::@199 as
struct rb_captured_block captured
Definition: vm_core.h:762
VALUE symbol
Definition: vm_core.h:763
enum rb_block_type type
Definition: vm_core.h:766
VALUE proc
Definition: vm_core.h:764
Definition: method.h:62
const struct rb_callcache * cc
Definition: vm_core.h:250
const struct rb_callinfo * ci
Definition: vm_core.h:249
VALUE block_handler
Definition: vm_core.h:251
const struct vm_ifunc * ifunc
Definition: vm_core.h:741
const VALUE * ep
Definition: vm_core.h:738
const rb_iseq_t * iseq
Definition: vm_core.h:740
union rb_captured_block::@198 code
const VALUE * ep
Definition: vm_core.h:774
const void * block_code
Definition: vm_core.h:775
const rb_iseq_t * iseq
Definition: vm_core.h:772
const VALUE * pc
Definition: vm_core.h:770
CREF (Class REFerence)
Definition: method.h:44
Definition: vm_core.h:833
VALUE(* e_proc)(VALUE)
Definition: vm_core.h:835
VALUE marker
Definition: vm_core.h:834
VALUE data2
Definition: vm_core.h:836
struct rb_ensure_list * next
Definition: vm_core.h:840
struct rb_ensure_entry entry
Definition: vm_core.h:841
const VALUE * env
Definition: vm_core.h:1103
rb_iseq_t * iseq
Definition: vm_core.h:1101
unsigned int env_size
Definition: vm_core.h:1104
const VALUE * ep
Definition: vm_core.h:1102
VALUE flags
Definition: vm_core.h:1100
VALUE local_storage_recursive_hash_for_trace
Definition: vm_core.h:876
struct rb_execution_context_struct::@200 machine
struct rb_id_table * local_storage
Definition: vm_core.h:874
BITFIELD(enum method_missing_reason, method_missing_reason, 8)
rb_ensure_list_t * ensure_list
Definition: vm_core.h:883
struct rb_vm_protect_tag * protect_tag
Definition: vm_core.h:861
rb_atomic_t interrupt_flag
Definition: vm_core.h:864
rb_atomic_t interrupt_mask
Definition: vm_core.h:865
struct rb_thread_struct * thread_ptr
Definition: vm_core.h:871
rb_control_frame_t * cfp
Definition: vm_core.h:858
rb_fiber_t * fiber_ptr
Definition: vm_core.h:870
struct rb_vm_tag * tag
Definition: vm_core.h:860
const VALUE * root_lep
Definition: vm_core.h:879
struct rb_trace_arg_struct * trace_arg
Definition: vm_core.h:886
bool ractor_safe
Definition: vm_core.h:930
unsigned int need_clean
Definition: vm_core.h:556
unsigned int running
Definition: vm_core.h:557
rb_event_flag_t events
Definition: vm_core.h:555
struct rb_event_hook_struct * hooks
Definition: vm_core.h:554
const struct iseq_insn_info_entry * body
Definition: vm_core.h:397
struct iseq_catch_table * catch_table
Definition: vm_core.h:408
unsigned int ruby2_keywords
Definition: vm_core.h:356
unsigned int ambiguous_param0
Definition: vm_core.h:354
rb_snum_t flip_count
Definition: vm_core.h:418
const VALUE * opt_table
Definition: vm_core.h:368
struct rb_iseq_constant_body::iseq_insn_info insns_info
enum rb_iseq_constant_body::iseq_type type
unsigned int size
Definition: vm_core.h:359
unsigned int ci_size
Definition: vm_core.h:426
struct rb_id_table * outer_variables
Definition: vm_core.h:431
const struct rb_iseq_constant_body::@188::rb_iseq_param_keyword * keyword
struct rb_iseq_struct * local_iseq
Definition: vm_core.h:412
unsigned int has_block
Definition: vm_core.h:352
unsigned int local_table_size
Definition: vm_core.h:424
unsigned int has_kwrest
Definition: vm_core.h:351
unsigned int has_post
Definition: vm_core.h:349
unsigned int stack_max
Definition: vm_core.h:427
struct rb_iseq_constant_body::@188::@190 flags
VALUE * original_iseq
Definition: vm_core.h:421
union iseq_inline_storage_entry * is_entries
Definition: vm_core.h:414
VALUE * iseq_encoded
Definition: vm_core.h:319
unsigned int iseq_size
Definition: vm_core.h:318
rb_iseq_location_t location
Definition: vm_core.h:393
struct rb_iseq_constant_body::@189 variable
VALUE * default_values
Definition: vm_core.h:389
unsigned int accepts_no_kwarg
Definition: vm_core.h:355
unsigned int is_size
Definition: vm_core.h:425
unsigned int has_kw
Definition: vm_core.h:350
struct rb_iseq_constant_body::@188 param
parameter information
unsigned int has_opt
Definition: vm_core.h:347
unsigned int has_lead
Definition: vm_core.h:346
const struct rb_iseq_struct * parent_iseq
Definition: vm_core.h:411
const ID * table
Definition: vm_core.h:388
struct rb_call_data * call_data
Definition: vm_core.h:415
const ID * local_table
Definition: vm_core.h:405
unsigned int has_rest
Definition: vm_core.h:348
rb_code_location_t code_location
Definition: vm_core.h:272
void(* func)(rb_iseq_t *, struct iseq_link_anchor *, const void *)
Definition: vm_core.h:1058
struct rb_iseq_constant_body * body
Definition: vm_core.h:448
struct rb_hook_list_struct * local_hooks
Definition: vm_core.h:459
VALUE wrapper
Definition: vm_core.h:446
struct iseq_compile_data * compile_data
Definition: vm_core.h:451
union rb_iseq_struct::@191 aux
rb_event_flag_t global_trace_events
Definition: vm_core.h:460
VALUE flags
Definition: vm_core.h:445
Definition: class.h:28
unsigned int is_isolated
Definition: vm_core.h:1090
unsigned int is_from_method
Definition: vm_core.h:1088
unsigned int is_lambda
Definition: vm_core.h:1089
uint32_t id
Definition: vm_core.h:1990
VALUE self
Definition: vm_core.h:1989
rb_hook_list_t hooks
Definition: vm_core.h:1991
VALUE last_status
Definition: vm_core.h:943
unsigned int to_kill
Definition: vm_core.h:959
rb_jmpbuf_t root_jmpbuf
Definition: vm_core.h:1013
rb_execution_context_t * ec
Definition: vm_core.h:941
struct rb_unblock_callback unblock
Definition: vm_core.h:983
native_thread_data_t native_thread_data
Definition: vm_core.h:966
unsigned int pending_interrupt_queue_checked
Definition: vm_core.h:962
VALUE stat_insn_usage
Definition: vm_core.h:1009
rb_vm_t * vm
Definition: vm_core.h:939
rb_ractor_t * ractor
Definition: vm_core.h:938
VALUE top_wrapper
Definition: vm_core.h:950
int8_t priority
Definition: vm_core.h:963
unsigned int abort_on_exception
Definition: vm_core.h:960
rb_nativethread_id_t thread_id
Definition: vm_core.h:953
enum rb_thread_struct::thread_invoke_type invoke_type
VALUE pending_interrupt_mask_stack
Definition: vm_core.h:979
rb_nativethread_lock_t interrupt_lock
Definition: vm_core.h:982
unsigned int report_on_exception
Definition: vm_core.h:961
struct list_node lt_node
Definition: vm_core.h:936
void * blocking_region_buffer
Definition: vm_core.h:967
VALUE pending_interrupt_queue
Definition: vm_core.h:978
VALUE locking_mutex
Definition: vm_core.h:984
VALUE top_self
Definition: vm_core.h:949
union rb_thread_struct::@201 invoke_arg
BITFIELD(enum rb_thread_status, status, 2)
@ thread_invoke_type_proc
Definition: vm_core.h:1003
@ thread_invoke_type_func
Definition: vm_core.h:1005
@ thread_invoke_type_ractor_proc
Definition: vm_core.h:1004
@ thread_invoke_type_none
Definition: vm_core.h:1002
struct rb_ext_config ext_config
Definition: vm_core.h:1021
uint32_t running_time_us
Definition: vm_core.h:964
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:985
struct rb_waiting_list * join_list
Definition: vm_core.h:987
unsigned blocking
Definition: vm_core.h:1016
struct rb_calling_info * calling
Definition: vm_core.h:946
rb_fiber_t * root_fiber
Definition: vm_core.h:1012
rb_event_flag_t event
Definition: vm_core.h:1934
rb_execution_context_t * ec
Definition: vm_core.h:1935
const rb_control_frame_t * cfp
Definition: vm_core.h:1936
rb_unblock_function_t * func
Definition: vm_core.h:827
struct rb_vm_protect_tag * prev
Definition: vm_core.h:823
short redefined_flag[BOP_LAST_]
Definition: vm_core.h:678
VALUE load_path_check_cache
Definition: vm_core.h:618
struct list_head set
Definition: vm_core.h:568
bool barrier_waiting
Definition: vm_core.h:582
struct rb_vm_struct::@194::@197 sync
VALUE progname
Definition: vm_core.h:643
rb_nativethread_cond_t barrier_cond
Definition: vm_core.h:584
struct rb_vm_struct::@195 trap_list
st_table * defined_module_hash
Definition: vm_core.h:647
unsigned int barrier_cnt
Definition: vm_core.h:583
unsigned int thread_ignore_deadlock
Definition: vm_core.h:608
int builtin_inline_index
Definition: vm_core.h:657
VALUE coverages
Definition: vm_core.h:644
const struct rb_builtin_function * builtin_function_table
Definition: vm_core.h:656
struct st_table * ensure_rollback_table
Definition: vm_core.h:631
rb_nativethread_lock_t waitpid_lock
Definition: vm_core.h:597
int src_encoding_index
Definition: vm_core.h:637
VALUE load_path
Definition: vm_core.h:616
struct rb_vm_struct::@194 ractor
struct list_head waiting_fds
Definition: vm_core.h:600
struct rb_id_table * negative_cme_table
Definition: vm_core.h:659
VALUE load_path_snapshot
Definition: vm_core.h:617
struct st_table * loading_table
Definition: vm_core.h:623
VALUE * defined_strings
Definition: vm_core.h:653
struct rb_thread_struct * main_thread
Definition: vm_core.h:573
const struct rb_callcache * global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]
Definition: vm_core.h:664
VALUE self
Definition: vm_core.h:565
int coverage_mode
Definition: vm_core.h:645
struct rb_postponed_job_struct * postponed_job_buffer
Definition: vm_core.h:634
bool terminate_waiting
Definition: vm_core.h:588
struct list_head waiting_pids
Definition: vm_core.h:598
struct list_head workqueue
Definition: vm_core.h:640
size_t fiber_vm_stack_size
Definition: vm_core.h:674
VALUE expanded_load_path
Definition: vm_core.h:619
volatile int ubf_async_safe
Definition: vm_core.h:603
VALUE top_self
Definition: vm_core.h:615
size_t fiber_machine_stack_size
Definition: vm_core.h:675
struct rb_vm_struct::@196 default_params
struct rb_ractor_struct * main_ractor
Definition: vm_core.h:572
size_t thread_vm_stack_size
Definition: vm_core.h:672
VALUE orig_progname
Definition: vm_core.h:643
struct rb_objspace * objspace
Definition: vm_core.h:649
rb_nativethread_lock_t lock
Definition: vm_core.h:577
const VALUE special_exceptions[ruby_special_error_count]
Definition: vm_core.h:612
VALUE loaded_features_snapshot
Definition: vm_core.h:621
unsigned int lock_rec
Definition: vm_core.h:579
unsigned int running
Definition: vm_core.h:605
rb_atomic_t postponed_job_index
Definition: vm_core.h:635
unsigned int cnt
Definition: vm_core.h:569
struct st_table * loaded_features_index
Definition: vm_core.h:622
unsigned int blocking_cnt
Definition: vm_core.h:570
VALUE loaded_features
Definition: vm_core.h:620
struct list_head waiting_grps
Definition: vm_core.h:599
unsigned int thread_report_on_exception
Definition: vm_core.h:607
rb_nativethread_cond_t terminate_cond
Definition: vm_core.h:587
rb_nativethread_lock_t workqueue_lock
Definition: vm_core.h:641
unsigned int thread_abort_on_exception
Definition: vm_core.h:606
st_table * frozen_strings
Definition: vm_core.h:654
VALUE mark_object_ary
Definition: vm_core.h:611
size_t thread_machine_stack_size
Definition: vm_core.h:673
struct rb_ractor_struct * lock_owner
Definition: vm_core.h:578
rb_at_exit_list * at_exit
Definition: vm_core.h:651
rb_serial_t fork_gen
Definition: vm_core.h:596
VALUE retval
Definition: vm_core.h:810
struct rb_vm_tag * prev
Definition: vm_core.h:812
rb_jmpbuf_t buf
Definition: vm_core.h:811
enum ruby_tag_type state
Definition: vm_core.h:813
unsigned int lock_rec
Definition: vm_core.h:814
VALUE tag
Definition: vm_core.h:809
struct rb_waiting_list * next
Definition: vm_core.h:849
struct rb_fiber_struct * fiber
Definition: vm_core.h:851
struct rb_thread_struct * thread
Definition: vm_core.h:850
Definition: st.h:79
IFUNC (Internal FUNCtion)
Definition: imemo.h:85
#define t
Definition: symbol.c:253
RB_THREAD_LOCAL_SPECIFIER struct rb_execution_context_struct * ruby_current_ec
native_tls_key_t ruby_current_ec_key
Definition: vm.c:400
Definition: vm_core.h:239
struct iseq_inline_constant_cache ic_cache
Definition: vm_core.h:244
struct iseq_inline_iv_cache_entry iv_cache
Definition: vm_core.h:245
struct iseq_inline_storage_entry::@187 once
VALUE value
Definition: vm_core.h:242
struct rb_thread_struct * running_thread
Definition: vm_core.h:241
unsigned long VALUE
Definition: value.h:38
#define SIZEOF_VALUE
Definition: value.h:41
unsigned long ID
Definition: value.h:39
#define T_STRING
Definition: value_type.h:77
#define T_ARRAY
Definition: value_type.h:55
#define SYMBOL_P
Definition: value_type.h:87
rb_ractor_t * ruby_single_main_ractor
Definition: vm.c:381
rb_vm_t * ruby_current_vm_ptr
Definition: vm.c:380
rb_event_flag_t ruby_vm_event_flags
Definition: vm.c:403
unsigned int ruby_vm_event_local_num
Definition: vm.c:405
rb_event_flag_t ruby_vm_event_enabled_global_flags
Definition: vm.c:404
void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
Definition: vm.c:921
void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause)
Definition: eval.c:683
int rb_thread_check_trap_pending(void)
Definition: thread.c:1587
rb_thread_status
Definition: vm_core.h:791
@ THREAD_KILLED
Definition: vm_core.h:795
@ THREAD_STOPPED
Definition: vm_core.h:793
@ THREAD_RUNNABLE
Definition: vm_core.h:792
@ THREAD_STOPPED_FOREVER
Definition: vm_core.h:794
const rb_data_type_t ruby_threadptr_data_type
Definition: vm.c:2991
void rb_ec_clear_vm_stack(rb_execution_context_t *ec)
Definition: vm.c:3047
void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond)
Definition: vm_sync.c:205
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1087
VALUE rb_get_coverages(void)
Definition: thread.c:5773
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:3031
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
Definition: vm.c:93
void(* ruby_sighandler_t)(int)
Definition: vm_core.h:1650
rb_iseq_t * rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func *ifunc, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t *)
Definition: iseq.c:888
ruby_basic_operators
Definition: vm_core.h:503
@ BOP_EQ
Definition: vm_core.h:509
@ BOP_LENGTH
Definition: vm_core.h:516
@ BOP_DIV
Definition: vm_core.h:507
@ BOP_LAST_
Definition: vm_core.h:534
@ BOP_LE
Definition: vm_core.h:512
@ BOP_LTLT
Definition: vm_core.h:513
@ BOP_GE
Definition: vm_core.h:522
@ BOP_SIZE
Definition: vm_core.h:517
@ BOP_CALL
Definition: vm_core.h:530
@ BOP_AND
Definition: vm_core.h:531
@ BOP_NEQ
Definition: vm_core.h:524
@ BOP_NOT
Definition: vm_core.h:523
@ BOP_NIL_P
Definition: vm_core.h:519
@ BOP_SUCC
Definition: vm_core.h:520
@ BOP_EQQ
Definition: vm_core.h:510
@ BOP_ASET
Definition: vm_core.h:515
@ BOP_MAX
Definition: vm_core.h:528
@ BOP_AREF
Definition: vm_core.h:514
@ BOP_FREEZE
Definition: vm_core.h:526
@ BOP_PLUS
Definition: vm_core.h:504
@ BOP_MOD
Definition: vm_core.h:508
@ BOP_MINUS
Definition: vm_core.h:505
@ BOP_LT
Definition: vm_core.h:511
@ BOP_MATCH
Definition: vm_core.h:525
@ BOP_MULT
Definition: vm_core.h:506
@ BOP_EMPTY_P
Definition: vm_core.h:518
@ BOP_OR
Definition: vm_core.h:532
@ BOP_MIN
Definition: vm_core.h:529
@ BOP_GT
Definition: vm_core.h:521
@ BOP_UMINUS
Definition: vm_core.h:527
const rb_callable_method_entry_t * rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath)
Definition: iseq.c:502
VALUE rb_iseq_eval_main(const rb_iseq_t *iseq)
Definition: vm.c:2413
VALUE rb_mRubyVMFrozenCore
Definition: vm.c:375
ruby_vm_throw_flags
Definition: vm_core.h:209
@ VM_THROW_STATE_MASK
Definition: vm_core.h:211
@ VM_THROW_NO_ESCAPE_FLAG
Definition: vm_core.h:210
void rb_ec_error_print(rb_execution_context_t *volatile ec, volatile VALUE errinfo)
Definition: eval_error.c:360
void rb_vm_pop_frame(rb_execution_context_t *ec)
VALUE rb_iseq_eval(const rb_iseq_t *iseq)
Definition: vm.c:2403
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:4598
#define RUBY_VM_SET_TIMER_INTERRUPT(ec)
Definition: vm_core.h:1875
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:669
void Init_native_thread(rb_thread_t *th)
void rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc)
Definition: vm_dump.c:388
rb_iseq_t * rb_iseq_new_with_opt(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_iseq_t *parent, int isolated_depth, enum iseq_type, const rb_compile_option_t *)
Definition: iseq.c:862
void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath)
Definition: iseq.c:521
rb_execution_context_t * rb_vm_main_ractor_ec(rb_vm_t *vm)
Definition: ractor.c:1978
VALUE rb_proc_ractor_make_shareable(VALUE self)
Definition: vm.c:1105
int rb_threadptr_execute_interrupts(rb_thread_t *, int)
Definition: thread.c:2424
struct rb_thread_struct rb_thread_t
ruby_tag_type
Definition: vm_core.h:185
@ RUBY_TAG_NEXT
Definition: vm_core.h:189
@ RUBY_TAG_NONE
Definition: vm_core.h:186
@ RUBY_TAG_FATAL
Definition: vm_core.h:194
@ RUBY_TAG_MASK
Definition: vm_core.h:195
@ RUBY_TAG_THROW
Definition: vm_core.h:193
@ RUBY_TAG_BREAK
Definition: vm_core.h:188
@ RUBY_TAG_RETRY
Definition: vm_core.h:190
@ RUBY_TAG_RAISE
Definition: vm_core.h:192
@ RUBY_TAG_RETURN
Definition: vm_core.h:187
@ RUBY_TAG_REDO
Definition: vm_core.h:191
const rb_data_type_t ruby_binding_data_type
Definition: proc.c:319
VALUE rb_cISeq
Definition: iseq.c:46
void rb_vm_encoded_insn_data_table_init(void)
Definition: iseq.c:3139
void rb_vm_stack_to_heap(rb_execution_context_t *ec)
Definition: vm.c:820
#define GC_GUARDED_PTR_REF(p)
Definition: vm_core.h:1164
#define PATHOBJ_PATH
Definition: vm_core.h:275
void rb_clear_coverages(void)
Definition: thread.c:4739
vm_special_object_type
Definition: vm_core.h:1129
@ VM_SPECIAL_OBJECT_CBASE
Definition: vm_core.h:1131
@ VM_SPECIAL_OBJECT_VMCORE
Definition: vm_core.h:1130
@ VM_SPECIAL_OBJECT_CONST_BASE
Definition: vm_core.h:1132
VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
Definition: vm.c:126
void rb_objspace_call_finalizer(struct rb_objspace *)
Definition: gc.c:3771
VALUE rb_proc_isolate_bang(VALUE self)
Definition: vm.c:1055
void rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
Definition: gc.c:5869
VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec)
Definition: eval.c:1881
#define VM_ENV_DATA_INDEX_FLAGS
Definition: vm_core.h:1210
rb_block_type
Definition: vm_core.h:753
@ block_type_symbol
Definition: vm_core.h:756
@ block_type_iseq
Definition: vm_core.h:754
@ block_type_ifunc
Definition: vm_core.h:755
@ block_type_proc
Definition: vm_core.h:757
void rb_vm_bugreport(const void *)
Definition: vm_dump.c:962
#define VM_ASSERT(expr)
Definition: vm_core.h:61
VALUE rb_proc_dup(VALUE self)
Definition: vm.c:956
rb_block_handler_type
Definition: vm_core.h:746
@ block_handler_type_ifunc
Definition: vm_core.h:748
@ block_handler_type_proc
Definition: vm_core.h:750
@ block_handler_type_symbol
Definition: vm_core.h:749
@ block_handler_type_iseq
Definition: vm_core.h:747
VALUE rb_binding_alloc(VALUE klass)
Definition: proc.c:331
void rb_vm_at_exit_func(struct rb_vm_struct *)
Definition: vm_core.h:541
#define VM_ENV_DATA_INDEX_ENV
Definition: vm_core.h:1211
ruby_special_exceptions
Definition: vm_core.h:494
@ ruby_error_stackfatal
Definition: vm_core.h:498
@ ruby_error_nomemory
Definition: vm_core.h:496
@ ruby_error_reenter
Definition: vm_core.h:495
@ ruby_error_sysstack
Definition: vm_core.h:497
@ ruby_special_error_count
Definition: vm_core.h:500
@ ruby_error_stream_closed
Definition: vm_core.h:499
VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp)
Definition: vm.c:1172
#define PATHOBJ_REALPATH
Definition: vm_core.h:276
void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:3024
VALUE rb_proc_isolate(VALUE self)
Definition: vm.c:1097
VALUE CDHASH
Definition: vm_core.h:1151
VALUE rb_cRubyVM
Definition: vm.c:373
void rb_hook_list_free(rb_hook_list_t *hooks)
Definition: vm_trace.c:69
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1987
int rb_vm_get_sourceline(const rb_control_frame_t *)
Definition: vm_backtrace.c:71
union iseq_inline_storage_entry * ISE
Definition: vm_core.h:1146
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
Definition: thread.c:1981
rb_iseq_t * rb_iseq_new(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type)
Definition: iseq.c:809
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:2001
const struct rb_builtin_function * RB_BUILTIN
Definition: vm_core.h:562
#define VM_GLOBAL_CC_CACHE_TABLE_SIZE
Definition: vm_core.h:662
void * rb_jmpbuf_t[5]
Definition: vm_core.h:801
void rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm_dump.c:195
signed long rb_snum_t
Definition: vm_core.h:183
char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) *2+3]
Definition: vm_core.h:844
rb_iseq_t * rb_iseq_new_main(const rb_ast_body_t *ast, VALUE path, VALUE realpath, const rb_iseq_t *parent)
Definition: iseq.c:833
struct rb_vm_struct rb_vm_t
VALUE rb_iseq_coverage(const rb_iseq_t *iseq)
Definition: iseq.c:1146
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
Definition: thread.c:2565
const struct rb_callinfo * CALL_INFO
Definition: vm_core.h:1147
int rb_signal_exec(rb_thread_t *th, int sig)
Definition: signal.c:1099
@ TERMINATE_INTERRUPT_MASK
Definition: vm_core.h:1871
@ POSTPONED_JOB_INTERRUPT_MASK
Definition: vm_core.h:1869
@ VM_BARRIER_INTERRUPT_MASK
Definition: vm_core.h:1872
@ TRAP_INTERRUPT_MASK
Definition: vm_core.h:1870
@ TIMER_INTERRUPT_MASK
Definition: vm_core.h:1867
@ PENDING_INTERRUPT_MASK
Definition: vm_core.h:1868
VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
Definition: vm.c:1152
VALUE rb_vm_env_local_variables(const rb_env_t *env)
Definition: vm.c:876
#define FUNC_FASTCALL(x)
Definition: vm_core.h:1154
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
Definition: vm.c:2424
struct iseq_inline_constant_cache * IC
Definition: vm_core.h:1144
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:3114
const struct rb_callcache * CALL_CACHE
Definition: vm_core.h:1148
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr)
Definition: vm_eval.c:2421
void rb_gvl_destroy(rb_global_vm_lock_t *gvl)
rb_control_frame_t * rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm.c:589
void rb_hook_list_mark(rb_hook_list_t *hooks)
Definition: vm_trace.c:56
struct rb_ensure_entry rb_ensure_entry_t
const VALUE * rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
Definition: vm.c:1203
void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
Definition: vm_sync.c:211
void rb_thread_reset_timer_thread(void)
Definition: thread.c:4696
VALUE rb_eRactorUnsafeError
Definition: ractor.c:22
vm_svar_index
Definition: vm_core.h:1135
@ VM_SVAR_FLIPFLOP_START
Definition: vm_core.h:1140
@ VM_SVAR_EXTRA_START
Definition: vm_core.h:1139
@ VM_SVAR_BACKREF
Definition: vm_core.h:1137
@ VM_SVAR_LASTLINE
Definition: vm_core.h:1136
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
Definition: thread.c:567
rb_control_frame_t * rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm.c:577
struct rb_ensure_list rb_ensure_list_t
#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp)
Definition: vm_core.h:1395
#define VM_TAGGED_PTR_REF(v, mask)
Definition: vm_core.h:1161
void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp)
Definition: vm.c:639
struct rb_control_frame_struct rb_control_frame_t
VALUE rb_block_param_proxy
Definition: vm.c:376
const rb_env_t * rb_vm_env_prev_env(const rb_env_t *env)
Definition: vm.c:830
VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
Definition: vm.c:1475
void rb_thread_stop_timer_thread(void)
Definition: thread.c:4688
void rb_threadptr_signal_exit(rb_thread_t *th)
Definition: thread.c:2575
struct rb_hook_list_struct rb_hook_list_t
void rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm_dump.c:417
rb_iseq_t * rb_iseq_new_eval(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_iseq_t *parent, int isolated_depth)
Definition: iseq.c:841
void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p)
Definition: vm_trace.c:365
rb_control_frame_t *FUNC_FASTCALL rb_insn_func_t(rb_execution_context_t *, rb_control_frame_t *)
Definition: vm_core.h:1158
struct rb_call_data * CALL_DATA
Definition: vm_core.h:1149
void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
Definition: vm.c:326
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:508
int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child)
Disassemble a instruction Iseq -> Iseq inspect object.
Definition: iseq.c:2078
void rb_thread_wakeup_timer_thread(int)
void rb_set_coverages(VALUE, int, VALUE)
Definition: thread.c:5785
#define VM_BLOCK_HANDLER_NONE
Definition: vm_core.h:1299
const VALUE * rb_vm_proc_local_ep(VALUE proc)
Definition: thread.c:675
#define VM_ENV_DATA_INDEX_SPECVAL
Definition: vm_core.h:1209
VALUE rb_iseq_disasm(const rb_iseq_t *iseq)
Definition: iseq.c:2335
VALUE rb_eRactorIsolationError
Definition: ractor.c:23
void rb_fiber_close(rb_fiber_t *fib)
Definition: cont.c:2365
#define VM_TAGGED_PTR_SET(p, tag)
Definition: vm_core.h:1160
void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp)
Definition: vm.c:2440
rb_vm_defineclass_type_t
Definition: vm_core.h:1028
@ VM_DEFINECLASS_TYPE_CLASS
Definition: vm_core.h:1029
@ VM_DEFINECLASS_TYPE_MASK
Definition: vm_core.h:1033
@ VM_DEFINECLASS_TYPE_MODULE
Definition: vm_core.h:1031
@ VM_DEFINECLASS_TYPE_SINGLETON_CLASS
Definition: vm_core.h:1030
void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval)
Definition: vm_trace.c:1268
int rb_vm_check_ints_blocking(rb_execution_context_t *ec)
Definition: thread.c:222
rb_iseq_t * rb_iseq_new_top(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent)
Definition: iseq.c:817
#define RUBY_NSIG
Definition: vm_core.h:114
void rb_vm_trap_exit(rb_vm_t *vm)
Definition: signal.c:1077
int rb_signal_buff_size(void)
Definition: signal.c:747
struct rb_iseq_location_struct rb_iseq_location_t
void rb_objspace_free(struct rb_objspace *)
Definition: gc.c:1610
VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg)
Definition: vm.c:2612
struct iseq_inline_iv_cache_entry * IVC
Definition: vm_core.h:1145
VALUE rb_iseq_realpath(const rb_iseq_t *iseq)
Definition: iseq.c:1093
void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line)
Definition: vm_trace.c:1258
int rb_backtrace_iter_func(void *, VALUE, int, VALUE)
Definition: vm_core.h:1708
void rb_execution_context_update(const rb_execution_context_t *ec)
Definition: vm.c:2785
#define VM_UNREACHABLE(func)
Definition: vm_core.h:62
vm_check_match_type
Definition: vm_core.h:1120
@ VM_CHECKMATCH_TYPE_RESCUE
Definition: vm_core.h:1123
@ VM_CHECKMATCH_TYPE_CASE
Definition: vm_core.h:1122
@ VM_CHECKMATCH_TYPE_WHEN
Definition: vm_core.h:1121
void rb_execution_context_mark(const rb_execution_context_t *ec)
Definition: vm.c:2835
void rb_postponed_job_flush(rb_vm_t *vm)
Definition: vm_trace.c:1676
void rb_reset_coverages(void)
Definition: thread.c:5800
VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc, const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat)
Definition: vm_eval.c:260
@ VM_FRAME_FLAG_LAMBDA
Definition: vm_core.h:1194
@ VM_FRAME_FLAG_CFRAME_KW
Definition: vm_core.h:1196
@ VM_FRAME_MAGIC_IFUNC
Definition: vm_core.h:1183
@ VM_FRAME_MAGIC_METHOD
Definition: vm_core.h:1178
@ VM_FRAME_MAGIC_TOP
Definition: vm_core.h:1181
@ VM_FRAME_FLAG_CFRAME
Definition: vm_core.h:1193
@ VM_FRAME_MAGIC_DUMMY
Definition: vm_core.h:1186
@ VM_FRAME_FLAG_PASSED
Definition: vm_core.h:1197
@ VM_FRAME_FLAG_BMETHOD
Definition: vm_core.h:1192
@ VM_FRAME_MAGIC_BLOCK
Definition: vm_core.h:1179
@ VM_ENV_FLAG_LOCAL
Definition: vm_core.h:1200
@ VM_FRAME_MAGIC_MASK
Definition: vm_core.h:1188
@ VM_FRAME_MAGIC_CFUNC
Definition: vm_core.h:1182
@ VM_FRAME_MAGIC_EVAL
Definition: vm_core.h:1184
@ VM_FRAME_MAGIC_CLASS
Definition: vm_core.h:1180
@ VM_ENV_FLAG_ESCAPED
Definition: vm_core.h:1201
@ VM_ENV_FLAG_WB_REQUIRED
Definition: vm_core.h:1202
@ VM_ENV_FLAG_ISOLATED
Definition: vm_core.h:1203
@ VM_FRAME_FLAG_FINISH
Definition: vm_core.h:1191
@ VM_FRAME_MAGIC_RESCUE
Definition: vm_core.h:1185
@ VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM
Definition: vm_core.h:1195
unsigned long rb_num_t
Definition: vm_core.h:182
void rb_thread_start_timer_thread(void)
Definition: thread.c:4702
VALUE rb_proc_alloc(VALUE klass)
Definition: proc.c:145
void rb_vm_inc_const_missing_count(void)
Definition: vm.c:425
struct rb_objspace * rb_objspace_alloc(void)
Definition: gc.c:1595
#define OPT_STACK_CACHING
Definition: vm_opts.h:58
void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec, unsigned int recorded_lock_rec, unsigned int current_lock_rec)
Definition: vm_sync.c:282
#define env
int def(FILE *source, FILE *dest, int level)
Definition: zpipe.c:36