11#define vm_exec rb_vm_exec
18#include "internal/error.h"
19#include "internal/eval.h"
25#include "internal/symbol.h"
63static inline const VALUE *
64VM_EP_LEP(
const VALUE *ep)
66 while (!VM_ENV_LOCAL_P(ep)) {
67 ep = VM_ENV_PREV_EP(ep);
99static inline const VALUE *
102 return VM_EP_LEP(cfp->
ep);
105static inline const VALUE *
108 return VM_ENV_PREV_EP(cfp->
ep);
115 const VALUE *ep = VM_CF_LEP(cfp);
116 return VM_ENV_BLOCK_HANDLER(ep);
122 return VM_FRAME_CFRAME_KW_P(cfp);
128 return VM_CF_BLOCK_HANDLER(cfp);
139 if (start <= (
VALUE *)cfp && (
VALUE *)cfp < end) {
154 if (start <= ep && ep < end) {
165 if (VM_EP_IN_HEAP_P(ec, ep)) {
183rb_vm_ep_in_heap_p(
const VALUE *ep)
187 return vm_ep_in_heap_p_(ec, ep);
194 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
202 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
219 vm_block_handler_verify(block_handler);
220 return block_handler;
227 int omod_shared =
FALSE;
236 scope_visi.visi.method_visi = visi;
240 if (prev_cref !=
NULL && prev_cref != (
void *)1 ) {
241 refinements = CREF_REFINEMENTS(prev_cref);
243 if (!
NIL_P(refinements)) {
245 CREF_OMOD_SHARED_SET(prev_cref);
251 if (pushed_by_eval) CREF_PUSHED_BY_EVAL_SET(cref);
252 if (omod_shared) CREF_OMOD_SHARED_SET(cref);
260 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval,
FALSE);
266 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval,
TRUE);
278 VALUE klass = CREF_CLASS(cref);
280 rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
281 int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
283 new_cref = vm_cref_new(klass, visi->method_visi, visi->
module_func, next_cref, pushed_by_eval);
285 if (!
NIL_P(CREF_REFINEMENTS(cref))) {
288 CREF_REFINEMENTS_SET(new_cref, ref);
289 CREF_OMOD_SHARED_UNSET(new_cref);
299 VALUE top_wrapper = rb_ec_thread_ptr(ec)->top_wrapper;
311 return vm_cref_new_toplevel(GET_EC());
315vm_cref_dump(
const char *mesg,
const rb_cref_t *cref)
317 fprintf(stderr,
"vm_cref_dump: %s (%p)\n", mesg, (
void *)cref);
321 cref = CREF_NEXT(cref);
340#if VM_COLLECT_USAGE_DETAILS
341static void vm_collect_usage_operand(
int insn,
int n,
VALUE op);
342static void vm_collect_usage_insn(
int insn);
343static void vm_collect_usage_register(
int reg,
int isset);
369 mjit_add_class_serial(class_serial);
378#define ruby_vm_redefined_flag GET_VM()->redefined_flag
383#ifdef RB_THREAD_LOCAL_SPECIFIER
414 .
call_ = vm_call_general,
420static void thread_free(
void *
ptr);
436 if (!ec) ec = GET_EC();
454 classname =
"<unknown>";
490 static VALUE sym_global_constant_state, sym_class_serial;
498 else if (RB_TYPE_P(arg,
T_HASH))
507 if (sym_global_constant_state == 0) {
508#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
509 S(global_constant_state);
514#define SET(name, attr) \
515 if (key == sym_##name) \
516 return SERIALT2NUM(attr); \
517 else if (hash != Qnil) \
518 rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
536 if (iseq->
body->
type != ISEQ_TYPE_TOP) {
543 (
VALUE)vm_cref_new_toplevel(ec),
568 vm_set_eval_stack(ec, iseq, 0, &bind->
block);
572 vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->
cfp));
579 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
591 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
592 if (VM_FRAME_RUBYFRAME_P(cfp)) {
605 if (VM_FRAME_RUBYFRAME_P(cfp)) {
611 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
612 if (VM_FRAME_RUBYFRAME_P(cfp)) {
633 vm_pop_frame(ec, cfp, cfp->
ep);
642 while (ec->
cfp != cfp) {
644 printf(
"skipped frame: %s\n", vm_frametype_name(ec->
cfp));
668ruby_vm_run_at_exit_hooks(
rb_vm_t *vm)
688 fprintf(stderr,
"---\n");
689 fprintf(stderr,
"envptr: %p\n", (
void *)&
env->ep[0]);
690 fprintf(stderr,
"envval: %10p ", (
void *)
env->ep[1]);
692 fprintf(stderr,
"ep: %10p\n", (
void *)
env->ep);
694 fprintf(stderr,
">>\n");
696 fprintf(stderr,
"<<\n");
704 if (check_env(
env)) {
714 switch (vm_block_handler_type(block_handler)) {
717 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler),
rb_cProc);
721 return block_handler;
730 const VALUE *
const ep = cfp->
ep;
733 VALUE *env_body, *env_ep;
734 int local_size, env_size;
736 if (VM_ENV_ESCAPED_P(ep)) {
737 return VM_ENV_ENVVAL(ep);
740 if (!VM_ENV_LOCAL_P(ep)) {
741 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
742 if (!VM_ENV_ESCAPED_P(prev_ep)) {
745 while (prev_cfp->
ep != prev_ep) {
750 vm_make_env_each(ec, prev_cfp);
755 VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
758 VALUE blockprocval = vm_block_handler_escape(ec, block_handler);
763 if (!VM_FRAME_RUBYFRAME_P(cfp)) {
782 env_size = local_size +
785 MEMCPY(env_body, ep - (local_size - 1 ),
VALUE, local_size);
788 for (i = 0; i < local_size; i++) {
789 if (VM_FRAME_RUBYFRAME_P(cfp)) {
791 ep[-local_size + i] = 0;
796 env_iseq = VM_FRAME_RUBYFRAME_P(cfp) ? cfp->
iseq :
NULL;
797 env_ep = &env_body[local_size - 1 ];
799 env = vm_env_new(env_ep, env_body, env_size, env_iseq);
803 VM_STACK_ENV_WRITE(ep, 0, (
VALUE)
env);
810 VALUE envval = vm_make_env_each(ec, cfp);
813 check_env_value((
const rb_env_t *)envval);
824 vm_make_env_object(ec, cfp);
834 if (VM_ENV_LOCAL_P(ep)) {
838 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
839 return VM_ENV_ENVVAL_PTR(prev_ep);
859 collect_local_variables_in_iseq(
env->iseq, vars);
866 if (VM_ENV_ESCAPED_P(ep)) {
867 collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars);
879 local_var_list_init(&vars);
880 collect_local_variables_in_env(
env, &vars);
881 return local_var_list_finish(&vars);
888 local_var_list_init(&vars);
889 while (collect_local_variables_in_iseq(iseq, &vars)) {
892 return local_var_list_finish(&vars);
898vm_proc_create_from_captured(
VALUE klass,
901 int8_t is_from_method, int8_t is_lambda)
906 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), captured->
ep));
913 vm_block_type_set(&proc->
block, block_type);
924 switch (vm_block_type(src)) {
941proc_create(
VALUE klass,
const struct rb_block *block, int8_t is_from_method, int8_t is_lambda)
946 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), vm_block_ep(block)));
948 vm_block_type_set(&proc->
block, block->
type);
976collect_outer_variable_names(
ID id,
VALUE val,
void *
ptr)
998env_copy(
const VALUE *src_ep,
VALUE read_only_variables)
1007 if (read_only_variables) {
1008 for (
int i=RARRAY_LENINT(read_only_variables)-1; i>=0; i--) {
1014 if (!rb_ractor_shareable_p(v)) {
1016 "can not make shareable Proc because it can refer unshareable object %"
1030 if (!VM_ENV_LOCAL_P(src_ep)) {
1031 const VALUE *prev_ep = VM_ENV_PREV_EP(src_env->
ep);
1032 const rb_env_t *new_prev_env = env_copy(prev_ep, read_only_variables);
1033 prev_env = (
VALUE)new_prev_env;
1049 const rb_env_t *
env = env_copy(captured->
ep, read_only_variables);
1074 rb_raise(
rb_eArgError,
"can not isolate a Proc because it accesses outer variables (%s) and uses `yield'.",
1088 proc_isolate_env(self, proc,
Qfalse);
1107 const rb_iseq_t *iseq = vm_proc_iseq(self);
1128 rb_raise(
rb_eArgError,
"can not make a Proc shareable because it accesses outer variables (%s) and uses `yield'.",
1132 rb_raise(
rb_eArgError,
"can not make a Proc shareable because it accesses outer variables (%s).",
1136 else if (data.
yield) {
1143 proc_isolate_env(self, proc, read_only_variables);
1156 if (!VM_ENV_ESCAPED_P(captured->
ep)) {
1158 vm_make_env_object(ec, cfp);
1164 procval = vm_proc_create_from_captured(klass, captured,
1176 VALUE bindval, envval;
1179 if (cfp == 0 || ruby_level_cfp == 0) {
1184 envval = vm_make_env_object(ec, cfp);
1185 if (cfp == ruby_level_cfp) {
1193 vm_bind_update_env(bindval, bind, envval);
1206 VALUE path = pathobj_path(pathobj);
1207 VALUE realpath = pathobj_realpath(pathobj);
1214 ID minibuf[4], *dyns = minibuf;
1217 if (dyncount < 0)
return 0;
1219 base_block = &bind->
block;
1220 base_iseq = vm_block_iseq(base_block);
1225 MEMCPY(dyns + 1, dynvars,
ID, dyncount);
1227 ast.
root = &tmp_node;
1238 tmp_node.nd_tbl = 0;
1241 vm_set_eval_stack(ec, iseq, 0, base_block);
1242 vm_bind_update_env(bindval, bind, envval = vm_make_env_object(ec, ec->
cfp));
1260 ec->
cfp->
sp + arg_size,
1280 ec->
cfp->
sp + arg_size,
1326 for (i=0; i<
argc; i++) {
1330 opt_pc = vm_yield_setup_args(ec, iseq,
argc, sp, kw_splat, passed_block_handler,
1335 return invoke_block(ec, iseq, self,
captured, cref,
type, opt_pc);
1338 return invoke_bmethod(ec, iseq, self,
captured, me,
type, opt_pc);
1346 int is_lambda,
int force_blockarg)
1349 switch (vm_block_handler_type(block_handler)) {
1353 return invoke_iseq_block_from_c(ec, captured, captured->
self,
1354 argc,
argv, kw_splat, passed_block_handler,
1355 cref, is_lambda,
NULL);
1358 return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler),
1359 VM_BH_TO_IFUNC_BLOCK(block_handler)->
self,
1362 return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler),
1363 argc,
argv, kw_splat, passed_block_handler);
1365 if (force_blockarg ==
FALSE) {
1366 is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
1368 block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
1378 VALUE block_handler = VM_CF_BLOCK_HANDLER(ec->
cfp);
1379 vm_block_handler_verify(block_handler);
1384 return block_handler;
1390 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1392 cref, is_lambda,
FALSE);
1398 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1406 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1407 argc,
argv, kw_splat, block_handler,
1414 return invoke_block_from_c_bh(ec, check_block_handler(ec), 1, &args,
1421 int kw_splat,
VALUE passed_block_handler,
int is_lambda,
1427 int kw_splat,
VALUE passed_block_handler,
int is_lambda,
1433 switch (vm_block_type(block)) {
1435 return invoke_iseq_block_from_c(ec, &block->
as.
captured, self,
argc,
argv, kw_splat, passed_block_handler,
NULL, is_lambda, me);
1437 if (kw_splat == 1) {
1439 if (!RB_TYPE_P(keyword_hash,
T_HASH)) {
1448 return vm_yield_with_cfunc(ec, &block->
as.
captured, self,
argc,
argv, kw_splat, passed_block_handler, me);
1450 return vm_yield_with_symbol(ec, block->
as.
symbol,
argc,
argv, kw_splat, passed_block_handler);
1452 is_lambda = block_proc_is_lambda(block->
as.
proc);
1453 block = vm_proc_block(block->
as.
proc);
1464 return invoke_block_from_c_proc(ec,
proc, self,
argc,
argv, kw_splat, passed_block_handler,
proc->is_lambda,
NULL);
1471 return invoke_block_from_c_proc(ec,
proc, self,
argc,
argv, kw_splat, block_handler,
TRUE, me);
1478 VALUE self = vm_block_self(&
proc->block);
1479 vm_block_handler_verify(passed_block_handler);
1481 if (
proc->is_from_method) {
1485 return vm_invoke_proc(ec,
proc, self,
argc,
argv, kw_splat, passed_block_handler);
1493 vm_block_handler_verify(passed_block_handler);
1495 if (
proc->is_from_method) {
1499 return vm_invoke_proc(ec,
proc, self,
argc,
argv, kw_splat, passed_block_handler);
1508 while (cfp->
pc == 0) {
1510 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
1520 cfp = vm_normal_frame(ec, cfp);
1521 return lep_svar_get(ec, cfp ? VM_CF_LEP(cfp) : 0,
key);
1527 cfp = vm_normal_frame(ec, cfp);
1528 lep_svar_set(ec, cfp ? VM_CF_LEP(cfp) : 0,
key, val);
1534 return vm_cfp_svar_get(ec, ec->
cfp,
key);
1540 vm_cfp_svar_set(ec, ec->
cfp,
key, val);
1605 if (cfp && VM_FRAME_RUBYFRAME_P(cfp)) {
1610 if (pline) *pline = 0;
1627 return vm_ec_cref(ec);
1635 rb_cref_t *cref = vm_cref_replace_with_duplicated_cref(cfp->
ep);
1645 if (!cfp || cfp->
self != self)
return NULL;
1646 if (!vm_env_cref_by_cref(cfp->
ep))
return NULL;
1647 cref = vm_get_cref(cfp->
ep);
1648 if (CREF_CLASS(cref) != cbase)
return NULL;
1657 dp(CREF_CLASS(cref));
1658 printf(
"%ld\n", CREF_VISI(cref));
1659 cref = CREF_NEXT(cref);
1673 return vm_get_cbase(cfp->
ep);
1679make_localjump_error(
const char *mesg,
VALUE value,
int reason)
1713 VALUE exc = make_localjump_error(mesg, value, reason);
1724 mesg =
"unexpected return";
1727 mesg =
"unexpected break";
1730 mesg =
"unexpected next";
1733 mesg =
"unexpected redo";
1737 mesg =
"retry outside of rescue clause";
1744 val = GET_EC()->tag->retval;
1746 return make_localjump_error(mesg, val,
state);
1760 while (VM_ENV_LOCAL_P(cfp->
ep)) {
1772 const VALUE *ep = VM_CF_PREV_EP(cfp);
1788 vm_iter_break(GET_EC(),
Qnil);
1794 vm_iter_break(GET_EC(), val);
1799static st_table *vm_opt_method_def_table = 0;
1800static st_table *vm_opt_mid_table = 0;
1803vm_redefinition_check_flag(
VALUE klass)
1825 if (!vm_opt_mid_table) {
1835 switch (
def->type) {
1852 if (vm_redefinition_check_method_type(me->
def)) {
1854 int flag = vm_redefinition_check_flag(klass);
1861check_redefined_method(
ID mid,
VALUE value,
void *data)
1867 if (newme != me) rb_vm_check_redefinition_opt_method(me, me->
owner);
1875 if (!vm_redefinition_check_flag(klass))
return;
1884 if (me && vm_redefinition_check_method_type(me->
def)) {
1894vm_init_redefined_flag(
void)
1902#define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
1903#define C(k) add_opt_method(rb_c##k, mid, bop)
1904 OP(PLUS, PLUS), (
C(Integer),
C(Float),
C(String),
C(Array));
1905 OP(MINUS, MINUS), (
C(Integer),
C(Float));
1906 OP(MULT, MULT), (
C(Integer),
C(Float));
1909 OP(Eq, EQ), (
C(Integer),
C(Float),
C(String),
C(Symbol));
1910 OP(Eqq, EQQ), (
C(Integer),
C(Float),
C(Symbol),
C(String),
1911 C(NilClass),
C(TrueClass),
C(FalseClass));
1912 OP(LT, LT), (
C(Integer),
C(Float));
1913 OP(LE, LE), (
C(Integer),
C(Float));
1914 OP(GT, GT), (
C(Integer),
C(Float));
1915 OP(GE, GE), (
C(Integer),
C(Float));
1916 OP(LTLT, LTLT), (
C(String),
C(Array));
1918 OP(ASET, ASET), (
C(Array),
C(Hash));
1919 OP(Length,
LENGTH), (
C(Array),
C(String),
C(Hash));
1921 OP(EmptyP, EMPTY_P), (
C(Array),
C(String),
C(Hash));
1922 OP(Succ, SUCC), (
C(Integer),
C(String));
1924 OP(Freeze, FREEZE), (
C(String));
1925 OP(UMinus, UMINUS), (
C(String));
1929 OP(And, AND), (
C(Integer));
1930 OP(Or,
OR), (
C(Integer));
1942 switch (VM_FRAME_TYPE(cfp)) {
1962 THROW_DATA_CONSUMED_P(
err) ==
FALSE) {
1963 return THROW_DATA_VAL(
err);
1975 unsigned long type = VM_FRAME_TYPE(cfp);
1976#define C(t) if (type == VM_FRAME_MAGIC_##t) return #t
2004 switch (VM_FRAME_TYPE(ec->
cfp)) {
2014 THROW_DATA_CONSUMED_SET(
err);
2017 if (VM_FRAME_BMETHOD_P(ec->
cfp)) {
2024 if (!will_finish_vm_exec) {
2032 frame_return_value(
err));
2042 frame_return_value(
err),
TRUE);
2045 THROW_DATA_CONSUMED_SET(
err);
2053 THROW_DATA_CONSUMED_SET(
err);
2161 if (!mjit_enable_p || (result = mjit_exec(ec)) ==
Qundef) {
2162 result = vm_exec_core(ec, initial);
2169 while ((result = vm_exec_handle_exception(ec,
state, result, &initial)) ==
Qundef) {
2171 result = vm_exec_core(ec, initial);
2192 const struct iseq_catch_table *ct;
2193 unsigned long epc, cont_pc, cont_sp;
2199 cont_pc = cont_sp = 0;
2220 escape_cfp = THROW_DATA_CATCH_FRAME(
err);
2222 if (cfp == escape_cfp) {
2224 if (!VM_FRAME_FINISHED_P(cfp)) {
2225 THROW_DATA_CATCH_FRAME_SET(
err, cfp + 1);
2230 if (ct)
for (i = 0; i < ct->size; i++) {
2232 if (entry->
start < epc && entry->
end >= epc) {
2233 if (entry->
type == CATCH_TYPE_ENSURE) {
2234 catch_iseq = entry->
iseq;
2235 cont_pc = entry->
cont;
2236 cont_sp = entry->
sp;
2241 if (catch_iseq ==
NULL) {
2243 THROW_DATA_CATCH_FRAME_SET(
err, cfp + 1);
2246 return THROW_DATA_VAL(
err);
2253#if OPT_STACK_CACHING
2254 *initial = THROW_DATA_VAL(
err);
2256 *ec->
cfp->
sp++ = THROW_DATA_VAL(
err);
2266 if (ct)
for (i = 0; i < ct->size; i++) {
2268 if (entry->
start < epc && entry->
end >= epc) {
2270 if (entry->
type == CATCH_TYPE_RESCUE ||
2271 entry->
type == CATCH_TYPE_ENSURE) {
2272 catch_iseq = entry->
iseq;
2273 cont_pc = entry->
cont;
2274 cont_sp = entry->
sp;
2282 if (ct)
for (i = 0; i < ct->size; i++) {
2284 if (entry->
start < epc && entry->
end >= epc) {
2286 if (entry->
type == CATCH_TYPE_ENSURE) {
2287 catch_iseq = entry->
iseq;
2288 cont_pc = entry->
cont;
2289 cont_sp = entry->
sp;
2292 else if (entry->
type == CATCH_TYPE_RETRY) {
2294 escape_cfp = THROW_DATA_CATCH_FRAME(
err);
2295 if (cfp == escape_cfp) {
2315 if (ct)
for (i = 0; i < ct->size; i++) {
2318 if (entry->
start < epc && entry->
end >= epc) {
2319 if (entry->
type == CATCH_TYPE_ENSURE) {
2320 catch_iseq = entry->
iseq;
2321 cont_pc = entry->
cont;
2322 cont_sp = entry->
sp;
2327 cfp->
sp = vm_base_ptr(cfp) + entry->
sp;
2330#if OPT_STACK_CACHING
2331 *initial = THROW_DATA_VAL(
err);
2333 *ec->
cfp->
sp++ = THROW_DATA_VAL(
err);
2345 if (ct)
for (i = 0; i < ct->size; i++) {
2347 if (entry->
start < epc && entry->
end >= epc) {
2349 if (entry->
type == CATCH_TYPE_ENSURE) {
2350 catch_iseq = entry->
iseq;
2351 cont_pc = entry->
cont;
2352 cont_sp = entry->
sp;
2359 if (catch_iseq !=
NULL) {
2361 const int arg_size = 1;
2363 rb_iseq_check(catch_iseq);
2364 cfp->
sp = vm_base_ptr(cfp) + cont_sp;
2374 cfp->
sp + arg_size ,
2387 if (VM_FRAME_FINISHED_P(ec->
cfp)) {
2407 vm_set_top_stack(ec, iseq);
2418 vm_set_main_stack(ec, iseq);
2430 if (called_idp) *called_idp = me->
called_id;
2431 if (klassp) *klassp = me->
owner;
2461 recv, block_handler,
2462 (
VALUE)vm_cref_new_toplevel(ec),
2463 0, reg_cfp->
sp, 0, 0);
2506 list_for_each(&vm->
ractor.
set, r, vmlr_node) {
2507 VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
2508 rb_ractor_status_p(r, ractor_running));
2511 list_for_each(&r->
threads.
set, th, lt_node) {
2518 if (!rb_special_const_p(*p)) {
2531vm_mark_negative_cme(
VALUE val,
void *dmy)
2541 RUBY_GC_INFO(
"-------------------------------------------------\n");
2546 const VALUE *obj_ary;
2548 list_for_each(&vm->
ractor.
set, r, vmlr_node) {
2550 VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
2551 rb_ractor_status_p(r, ractor_running));
2559 for (i=0; i <
len; i++) {
2566 for (j=0; j < jlen; j++) {
2595 if (!vm_cc_invalidated_p(cc)) {
2610#undef rb_vm_register_special_exception
2652 rb_vm_living_threads_init(vm);
2653 ruby_vm_run_at_exit_hooks(vm);
2678vm_memsize(
const void *
ptr)
2694 {0, 0, vm_memsize,},
2700vm_default_params(
void)
2704#define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
2705 SET(thread_vm_stack_size);
2706 SET(thread_machine_stack_size);
2707 SET(fiber_vm_stack_size);
2708 SET(fiber_machine_stack_size);
2715get_param(
const char *
name,
size_t default_value,
size_t min_value)
2718 size_t result = default_value;
2720 long val = atol(envval);
2721 if (val < (
long)min_value) {
2722 val = (
long)min_value;
2726 if (0) fprintf(stderr,
"%s: %"PRIuSIZE"\n",
name, result);
2732check_machine_stack_size(
size_t *sizep)
2734#ifdef PTHREAD_STACK_MIN
2735 size_t size = *sizep;
2738#ifdef PTHREAD_STACK_MIN
2739 if (
size < PTHREAD_STACK_MIN) {
2740 *sizep = PTHREAD_STACK_MIN * 2;
2746vm_default_params_setup(
rb_vm_t *vm)
2749 get_param(
"RUBY_THREAD_VM_STACK_SIZE",
2754 get_param(
"RUBY_THREAD_MACHINE_STACK_SIZE",
2759 get_param(
"RUBY_FIBER_VM_STACK_SIZE",
2764 get_param(
"RUBY_FIBER_MACHINE_STACK_SIZE",
2777 rb_vm_living_threads_init(vm);
2781 vm_default_params_setup(vm);
2796 for (i = 0; i < (
long)(sp - p); i++) {
2799 if (ref != update) {
2804 while (cfp != limit_cfp) {
2810 if (!VM_ENV_LOCAL_P(ep)) {
2811 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
2848 while (cfp != limit_cfp) {
2855 if (!VM_ENV_LOCAL_P(ep)) {
2856 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
2897thread_compact(
void *
ptr)
2909thread_mark(
void *
ptr)
2917 case thread_invoke_type_proc:
2918 case thread_invoke_type_ractor_proc:
2922 case thread_invoke_type_func:
2951thread_free(
void *
ptr)
2957 rb_bug(
"thread_free: locking_mutex must be NULL (%p:%p)", (
void *)th, (
void *)th->locking_mutex);
2960 rb_bug(
"thread_free: keeping_mutexes must be NULL (%p:%p)", (
void *)th, (
void *)th->keeping_mutexes);
2976thread_memsize(
const void *
ptr)
2990#define thread_data_type ruby_threadptr_data_type
3014thread_alloc(
VALUE klass)
3081#ifdef NON_SCALAR_THREAD_ID
3082 th->thread_id_string[0] =
'\0';
3087#if OPT_CALL_THREADED_CODE
3096ruby_thread_init(
VALUE self)
3103 th_init(target_th, self);
3116 VALUE self = thread_alloc(klass);
3117 ruby_thread_init(self);
3121#define REWIND_CFP(expr) do { \
3122 rb_execution_context_t *ec__ = GET_EC(); \
3123 VALUE *const curr_sp = (ec__->cfp++)->sp; \
3124 VALUE *const saved_sp = ec__->cfp->sp; \
3125 ec__->cfp->sp = curr_sp; \
3127 (ec__->cfp--)->sp = saved_sp; \
3160m_core_set_postexe(
VALUE self)
3171 Check_Type(hash,
T_HASH);
3197 REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
3208m_core_make_shareable_copy(
VALUE recv,
VALUE obj)
3240 if (!
NIL_P(options)) {
3241 static ID keyword_ids[1];
3242 if (!keyword_ids[0])
3253 return mjit_resume();
3275#include <execinfo.h>
3276#define MAX_NATIVE_TRACE 1024
3277 static void *trace[MAX_NATIVE_TRACE];
3278 int n = (
int)backtrace(trace, MAX_NATIVE_TRACE);
3279 char **syms = backtrace_symbols(trace, n);
3286 for (i=0; i<n; i++) {
3294#if VM_COLLECT_USAGE_DETAILS
3295static VALUE usage_analysis_insn_start(
VALUE self);
3296static VALUE usage_analysis_operand_start(
VALUE self);
3297static VALUE usage_analysis_register_start(
VALUE self);
3298static VALUE usage_analysis_insn_stop(
VALUE self);
3299static VALUE usage_analysis_operand_stop(
VALUE self);
3300static VALUE usage_analysis_register_stop(
VALUE self);
3301static VALUE usage_analysis_insn_running(
VALUE self);
3302static VALUE usage_analysis_operand_running(
VALUE self);
3303static VALUE usage_analysis_register_running(
VALUE self);
3304static VALUE usage_analysis_insn_clear(
VALUE self);
3305static VALUE usage_analysis_operand_clear(
VALUE self);
3306static VALUE usage_analysis_register_clear(
VALUE self);
3364#if USE_DEBUG_COUNTER
3384 rb_define_method(klass,
"make_shareable_copy", m_core_make_shareable_copy, 1);
3387 RBASIC_CLEAR_CLASS(klass);
3562#if VM_COLLECT_USAGE_DETAILS
3564#define define_usage_analysis_hash(name) \
3565 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_" #name, rb_hash_new())
3566 define_usage_analysis_hash(
INSN);
3567 define_usage_analysis_hash(REGS);
3568 define_usage_analysis_hash(INSN_BIGRAM);
3590#if OPT_DIRECT_THREADED_CODE
3592#elif OPT_TOKEN_THREADED_CODE
3594#elif OPT_CALL_THREADED_CODE
3598#if OPT_STACK_CACHING
3601#if OPT_OPERANDS_UNIFICATION
3604#if OPT_INSTRUCTIONS_UNIFICATION
3607#if OPT_INLINE_METHOD_CACHE
3610#if OPT_BLOCKINLINING
3677 vm_init_redefined_flag();
3708 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
3722 rb_ractor_set_current_ec(th->
ractor, th->
ec);
3754 return GET_VM()->top_self;
3789 return GET_VM()->frozen_strings;
3792#if VM_COLLECT_USAGE_DETAILS
3794#define HASH_ASET(h, k, v) rb_hash_aset((h), (st_data_t)(k), (st_data_t)(v))
3808vm_analysis_insn(
int insn)
3812 static int prev_insn = -1;
3818 CONST_ID(usage_hash,
"USAGE_ANALYSIS_INSN");
3819 CONST_ID(bigram_hash,
"USAGE_ANALYSIS_INSN_BIGRAM");
3823 HASH_ASET(uh,
INT2FIX(insn), ihash);
3831 if (prev_insn != -1) {
3850vm_analysis_operand(
int insn,
int n,
VALUE op)
3860 CONST_ID(usage_hash,
"USAGE_ANALYSIS_INSN");
3865 HASH_ASET(uh,
INT2FIX(insn), ihash);
3869 HASH_ASET(ihash,
INT2FIX(n), ophash);
3882vm_analysis_register(
int reg,
int isset)
3887 static const char regstrs[][5] = {
3895 static const char getsetstr[][4] = {
3899 static VALUE syms[
sizeof(regstrs) /
sizeof(regstrs[0])][2];
3903 CONST_ID(usage_hash,
"USAGE_ANALYSIS_REGS");
3908 for (i = 0; i < (
int)(
sizeof(regstrs) /
sizeof(regstrs[0])); i++) {
3910 for (j = 0; j < 2; j++) {
3911 snprintf(buff, 0x10,
"%d %s %-4s", i, getsetstr[j], regstrs[i]);
3916 valstr = syms[reg][isset];
3927static void (*ruby_vm_collect_usage_func_insn)(
int insn) =
NULL;
3928static void (*ruby_vm_collect_usage_func_operand)(
int insn,
int n,
VALUE op) =
NULL;
3929static void (*ruby_vm_collect_usage_func_register)(
int reg,
int isset) =
NULL;
3933usage_analysis_insn_start(
VALUE self)
3935 ruby_vm_collect_usage_func_insn = vm_analysis_insn;
3941usage_analysis_operand_start(
VALUE self)
3943 ruby_vm_collect_usage_func_operand = vm_analysis_operand;
3949usage_analysis_register_start(
VALUE self)
3951 ruby_vm_collect_usage_func_register = vm_analysis_register;
3957usage_analysis_insn_stop(
VALUE self)
3959 ruby_vm_collect_usage_func_insn = 0;
3965usage_analysis_operand_stop(
VALUE self)
3967 ruby_vm_collect_usage_func_operand = 0;
3973usage_analysis_register_stop(
VALUE self)
3975 ruby_vm_collect_usage_func_register = 0;
3981usage_analysis_insn_running(
VALUE self)
3983 if (ruby_vm_collect_usage_func_insn == 0)
return Qfalse;
3989usage_analysis_operand_running(
VALUE self)
3991 if (ruby_vm_collect_usage_func_operand == 0)
return Qfalse;
3997usage_analysis_register_running(
VALUE self)
3999 if (ruby_vm_collect_usage_func_register == 0)
return Qfalse;
4005usage_analysis_insn_clear(
VALUE self)
4012 CONST_ID(usage_hash,
"USAGE_ANALYSIS_INSN");
4013 CONST_ID(bigram_hash,
"USAGE_ANALYSIS_INSN_BIGRAM");
4024usage_analysis_operand_clear(
VALUE self)
4029 CONST_ID(usage_hash,
"USAGE_ANALYSIS_INSN");
4038usage_analysis_register_clear(
VALUE self)
4043 CONST_ID(usage_hash,
"USAGE_ANALYSIS_REGS");
4054MAYBE_UNUSED(
static void (*ruby_vm_collect_usage_func_register)(
int reg,
int isset)) = 0;
4058#if VM_COLLECT_USAGE_DETAILS
4061vm_collect_usage_insn(
int insn)
4063 if (RUBY_DTRACE_INSN_ENABLED()) {
4066 if (ruby_vm_collect_usage_func_insn)
4067 (*ruby_vm_collect_usage_func_insn)(insn);
4075vm_collect_usage_operand(
int insn,
int n,
VALUE op)
4077 if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
4085 if (ruby_vm_collect_usage_func_operand)
4086 (*ruby_vm_collect_usage_func_operand)(insn, n, op);
4092vm_collect_usage_register(
int reg,
int isset)
4094 if (ruby_vm_collect_usage_func_register)
4095 (*ruby_vm_collect_usage_func_register)(reg, isset);
4102 return &vm_empty_cc;
4107#include "vm_call_iseq_optimized.inc"
VALUE rb_ary_push(VALUE ary, VALUE item)
VALUE rb_ary_delete_at(VALUE ary, long pos)
VALUE rb_ary_tmp_new(long capa)
VALUE rb_ary_join(VALUE ary, VALUE sep)
#define RUBY_ASSERT_MESG(expr, mesg)
Asserts that the expression is truthy.
VALUE rb_insns_name_array(void)
const char * rb_insns_name(int i)
Internal header for the compiler.
void rb_fiber_reset_root_local_storage(rb_thread_t *th)
#define OR(d, d0, d1, bl)
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_method_id(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define MJIT_FUNC_EXPORTED
char str[HTML_ESCAPE_MAX_LEN+1]
VALUE rb_f_raise(int argc, VALUE *argv)
#define EC_JUMP_TAG(ec, st)
#define rb_ec_raised_reset(ec, f)
void rb_set_end_proc(void(*func)(VALUE), VALUE data)
void rb_call_end_proc(VALUE data)
#define RUBY_EVENT_B_RETURN
#define RUBY_EVENT_RETURN
#define RUBY_EVENT_C_RETURN
#define RSTRING_PTR(string)
#define stack_check(ec, water_mark)
void ruby_xfree(void *x)
Deallocates a storage instance.
VALUE rb_gc_location(VALUE value)
void ruby_mimfree(void *ptr)
void rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
void rb_gc_mark_movable(VALUE ptr)
void rb_gc_mark_maybe(VALUE obj)
void rb_mark_tbl(st_table *tbl)
void * ruby_mimmalloc(size_t size)
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
rb_objspace_t * rb_objspace_alloc(void)
void rb_gc_update_tbl_refs(st_table *ptr)
void rb_gc_mark(VALUE ptr)
void rb_gc_mark_values(long n, const VALUE *values)
VALUE rb_objspace_gc_enable(rb_objspace_t *objspace)
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
void rb_objspace_free(rb_objspace_t *objspace)
void rb_gc_mark_vm_stack_values(long n, const VALUE *values)
void rb_gc_register_mark_object(VALUE obj)
Inform the garbage collector that object is a live Ruby object that should not be moved.
#define RUBY_MARK_LEAVE(msg)
#define RUBY_MARK_ENTER(msg)
#define RUBY_MARK_MOVABLE_UNLESS_NULL(ptr)
#define RUBY_FREE_ENTER(msg)
#define RUBY_FREE_LEAVE(msg)
#define RUBY_MARK_UNLESS_NULL(ptr)
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
VALUE rb_class_new(VALUE super)
Creates a new class.
VALUE rb_singleton_class(VALUE obj)
Returns the singleton class of obj.
VALUE rb_define_module_under(VALUE outer, const char *name)
void rb_undef_method(VALUE klass, const char *name)
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
void rb_raise(VALUE exc, const char *fmt,...)
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
void rb_bug(const char *fmt,...)
VALUE rb_cObject
Object class.
VALUE rb_obj_alloc(VALUE)
Allocates an instance of klass.
VALUE rb_cNilClass
NilClass class.
VALUE rb_cFalseClass
FalseClass class.
VALUE rb_inspect(VALUE)
Convenient wrapper of Object::inspect.
VALUE rb_cBasicObject
BasicObject class.
VALUE rb_obj_freeze(VALUE)
Make the object unmodifiable.
VALUE rb_cTrueClass
TrueClass class.
VALUE rb_to_hash_type(VALUE hash)
void rb_hash_bulk_insert(long argc, const VALUE *argv, VALUE hash)
void rb_hash_foreach(VALUE hash, rb_foreach_func *func, VALUE farg)
VALUE rb_hash_new_with_size(st_index_t size)
VALUE rb_hash_aref(VALUE hash, VALUE key)
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
VALUE rb_hash_dup(VALUE hash)
VALUE rb_hash_clear(VALUE hash)
@ id_core_set_method_alias
@ id_core_set_variable_alias
size_t rb_id_table_memsize(const struct rb_id_table *tbl)
void rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data)
struct rb_id_table * rb_id_table_create(size_t capa)
void rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, void *data)
rb_id_table_iterator_result
#define THROW_DATA_P(err)
@ imemo_ifunc
iterator function
@ imemo_cref
class reference
Defines RBIMPL_HAS_BUILTIN.
VALUE rb_block_proc(void)
VALUE rb_block_lambda(void)
VALUE rb_binding_new(void)
VALUE rb_str_intern(VALUE)
VALUE rb_const_get(VALUE, ID)
void rb_set_class_path(VALUE, VALUE, const char *)
VALUE rb_class_path(VALUE)
VALUE rb_attr_get(VALUE, ID)
void rb_alias_variable(ID, ID)
void rb_alias(VALUE, ID, ID)
void rb_undef_alloc_func(VALUE)
const char * rb_id2name(ID)
ID rb_intern(const char *)
void rb_define_global_const(const char *, VALUE)
VALUE rb_iv_set(VALUE, const char *, VALUE)
void rb_define_const(VALUE, const char *, VALUE)
Internal header aggregating init functions.
void Init_vm_backtrace(void)
#define RICLASS_IS_ORIGIN
Internal header for Fiber.
#define UNALIGNED_MEMBER_PTR(ptr, mem)
Internal header for Object.
Internal header for the parser.
Internal header for Proc.
Internal header for Regexp.
#define rb_fstring_lit(str)
Internal header for RubyVM.
typedef long(ZCALLBACK *tell_file_func) OF((voidpf opaque
typedef int(ZCALLBACK *close_file_func) OF((voidpf opaque
VALUE rb_iseq_path(const rb_iseq_t *iseq)
void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath)
rb_iseq_t * rb_iseq_new(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type type)
rb_iseq_t * rb_iseq_new_top(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent)
VALUE rb_iseq_realpath(const rb_iseq_t *iseq)
#define MEMCPY(p1, p2, type, n)
#define MEMZERO(p, type, n)
const rb_method_entry_t * rb_method_entry(VALUE klass, ID id)
void rb_add_method(VALUE klass, ID mid, rb_method_type_t type, void *option, rb_method_visibility_t visi)
@ VM_METHOD_TYPE_CFUNC
C method.
@ VM_METHOD_TYPE_OPTIMIZED
Kernel::send, Proc::call, etc.
@ OPTIMIZED_METHOD_TYPE_BLOCK_CALL
void rb_clear_method_cache(VALUE klass_or_module, ID mid)
const rb_method_entry_t * rb_method_entry_at(VALUE obj, ID id)
VALUE type(ANYARGS)
ANYARGS-ed function type.
void rb_node_init(NODE *n, enum node_type type, VALUE a0, VALUE a1, VALUE a2)
#define RUBY_DTRACE_METHOD_RETURN_HOOK(ec, klass, id)
#define RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, klass, id)
#define RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, klass, id)
VALUE rb_binding_alloc(VALUE klass)
VALUE rb_proc_alloc(VALUE klass)
#define RARRAY_CONST_PTR(s)
#define RARRAY_AREF(a, i)
rb_ractor_t * rb_ractor_main_alloc(void)
VALUE rb_ractor_ensure_shareable(VALUE obj, VALUE name)
VALUE rb_eRactorIsolationError
void rb_ractor_main_setup(rb_vm_t *vm, rb_ractor_t *r, rb_thread_t *th)
VALUE rb_ractor_make_shareable_copy(VALUE obj)
#define RB_OBJ_SHAREABLE_P(obj)
VALUE rb_ractor_make_shareable(VALUE obj)
#define RB_OBJ_WRITE(a, slot, b)
WB for new reference from ‘a’ to ‘b’.
#define RB_OBJ_WRITTEN(a, oldv, b)
WB for new reference from ‘a’ to ‘b’.
#define StringValuePtr(v)
#define StringValueCStr(v)
#define RTYPEDDATA_DATA(v)
#define TypedData_Wrap_Struct(klass, data_type, sval)
@ RUBY_TYPED_FREE_IMMEDIATELY
#define TypedData_Make_Struct(klass, type, data_type, sval)
Internal header for ASAN / MSAN / etc.
unsigned LONG_LONG rb_serial_t
#define st_init_table_with_size
enum iseq_catch_table_entry::catch_type type
struct rb_at_exit_list * next
rb_vm_at_exit_func * func
unsigned short first_lineno
const struct rb_block block
struct rb_captured_block captured
struct rb_method_definition_struct *const def
const vm_call_handler call_
const struct rb_callable_method_entry_struct *const cme_
union rb_callcache::@184 aux_
union rb_captured_block::@198 code
VALUE local_storage_recursive_hash_for_trace
struct rb_execution_context_struct::@200 machine
struct rb_id_table * local_storage
VALUE private_const_reference
VALUE passed_block_handler
VALUE local_storage_recursive_hash
struct iseq_catch_table * catch_table
enum rb_iseq_constant_body::iseq_type type
struct rb_id_table * outer_variables
unsigned int local_table_size
rb_iseq_location_t location
struct rb_iseq_constant_body::@188 param
parameter information
const struct rb_iseq_struct * parent_iseq
struct rb_iseq_constant_body * body
struct rb_hook_list_struct * local_hooks
struct rb_iseq_struct::@191::@193 exec
union rb_iseq_struct::@191 aux
struct rb_hook_list_struct * hooks
rb_method_bmethod_t bmethod
union rb_method_definition_struct::@123 body
struct rb_method_definition_struct *const def
const struct rb_block block
unsigned int is_from_method
struct rb_ractor_struct::@141 threads
rb_execution_context_t * ec
enum rb_thread_struct::thread_invoke_type invoke_type
VALUE pending_interrupt_mask_stack
unsigned int report_on_exception
VALUE pending_interrupt_queue
union rb_thread_struct::@201 invoke_arg
struct rb_ext_config ext_config
struct rb_mutex_struct * keeping_mutexes
VALUE load_path_check_cache
struct rb_vm_struct::@194::@197 sync
rb_nativethread_cond_t barrier_cond
struct rb_vm_struct::@195 trap_list
st_table * defined_module_hash
rb_nativethread_lock_t waitpid_lock
struct rb_vm_struct::@194 ractor
struct rb_id_table * negative_cme_table
struct st_table * loading_table
struct rb_thread_struct * main_thread
const struct rb_callcache * global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]
size_t fiber_vm_stack_size
size_t fiber_machine_stack_size
struct rb_vm_struct::@196 default_params
struct rb_ractor_struct * main_ractor
size_t thread_vm_stack_size
struct rb_objspace * objspace
rb_nativethread_lock_t lock
const VALUE special_exceptions[ruby_special_error_count]
VALUE loaded_features_snapshot
unsigned int thread_report_on_exception
rb_nativethread_cond_t terminate_cond
rb_nativethread_lock_t workqueue_lock
st_table * frozen_strings
size_t thread_machine_stack_size
rb_at_exit_list * at_exit
void ruby_thread_init_stack(rb_thread_t *th)
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
RB_THREAD_LOCAL_SPECIFIER struct rb_execution_context_struct * ruby_current_ec
#define RB_THREAD_LOCAL_SPECIFIER
ruby_value_type
C-level type of an object.
void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
const rb_data_type_t ruby_threadptr_data_type
void rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
void rb_ec_clear_vm_stack(rb_execution_context_t *ec)
rb_ractor_t * ruby_single_main_ractor
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
rb_serial_t ruby_vm_class_serial
VALUE rb_iseq_eval_main(const rb_iseq_t *iseq)
VALUE rb_mRubyVMFrozenCore
rb_cref_t * rb_vm_cref_new_toplevel(void)
void rb_threadptr_root_fiber_setup(rb_thread_t *th)
rb_vm_t * ruby_current_vm_ptr
void rb_lastline_set(VALUE val)
VALUE rb_iseq_eval(const rb_iseq_t *iseq)
int rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id, struct ruby_dtrace_method_hook_args *args)
VALUE rb_vm_call_cfunc(VALUE recv, VALUE(*func)(VALUE), VALUE arg, VALUE block_handler, VALUE filename)
VALUE rb_backref_get(void)
VALUE ruby_vm_const_missing_count
VALUE rb_proc_ractor_make_shareable(VALUE self)
const char * rb_sourcefile(void)
void rb_vm_stack_to_heap(rb_execution_context_t *ec)
int rb_vm_add_root_module(VALUE module)
int ruby_vm_destruct(rb_vm_t *vm)
VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
VALUE rb_proc_isolate_bang(VALUE self)
VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
void rb_fiber_mark_self(rb_fiber_t *fib)
native_tls_key_t ruby_current_ec_key
void rb_vm_jump_tag_but_local_jump(int state)
const rb_cref_t * rb_vm_cref_in_context(VALUE self, VALUE cbase)
VALUE rb_proc_dup(VALUE self)
VALUE rb_vm_top_self(void)
VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp)
const struct st_hash_type rb_fstring_hash_type
void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
VALUE rb_proc_isolate(VALUE self)
int rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
int rb_vm_check_optimizable_mid(VALUE mid)
void rb_vm_each_stack_value(void *ptr, void(*cb)(VALUE, void *), void *ctx)
st_table * rb_vm_fstring_table(void)
void rb_iter_break_value(VALUE val)
VALUE rb_obj_is_thread(VALUE obj)
void rb_vm_mark(void *ptr)
void rb_vm_set_progname(VALUE filename)
VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
VALUE rb_vm_env_local_variables(const rb_env_t *env)
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
void rb_vm_check_redefinition_by_prepend(VALUE klass)
rb_serial_t rb_next_class_serial(void)
VALUE rb_thread_alloc(VALUE klass)
void rb_backref_set(VALUE val)
rb_control_frame_t * rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
VALUE * rb_ruby_verbose_ptr(void)
VALUE rb_str_concat_literals(size_t, const VALUE *)
VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
rb_serial_t ruby_vm_global_constant_state
const VALUE * rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
VALUE * rb_gc_stack_start
rb_control_frame_t * rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
VALUE rb_iseq_local_variables(const rb_iseq_t *iseq)
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp)
void rb_vm_update_references(void *ptr)
VALUE rb_block_param_proxy
const rb_env_t * rb_vm_env_prev_env(const rb_env_t *env)
rb_event_flag_t ruby_vm_event_flags
const char * rb_source_location_cstr(int *pline)
VALUE rb_lastline_get(void)
void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
unsigned int ruby_vm_event_local_num
VALUE rb_source_location(int *pline)
void rb_vm_pop_cfunc_frame(void)
void rb_threadptr_root_fiber_release(rb_thread_t *th)
VALUE * rb_ruby_debug_ptr(void)
const struct rb_callcache * rb_vm_empty_cc(void)
rb_cref_t * rb_vm_cref_replace_with_duplicated_cref(void)
void Init_vm_objects(void)
void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE cls, VALUE mesg)
void rb_fiber_update_self(rb_fiber_t *fib)
size_t rb_gc_stack_maxsize
rb_cref_t * rb_vm_cref(void)
int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp)
rb_event_flag_t ruby_vm_event_enabled_global_flags
void ruby_vm_at_exit(void(*func)(rb_vm_t *))
ruby_vm_at_exit registers a function func to be invoked when a VM passed away.
VALUE rb_insn_operand_intern(const rb_iseq_t *iseq, VALUE insn, int op_no, VALUE op, int len, size_t pos, VALUE *pnop, VALUE child)
void rb_execution_context_update(const rb_execution_context_t *ec)
void rb_execution_context_mark(const rb_execution_context_t *ec)
VALUE rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me)
#define ruby_vm_redefined_flag
int rb_vm_cframe_keyword_p(const rb_control_frame_t *cfp)
void rb_vm_inc_const_missing_count(void)
int rb_vm_get_sourceline(const rb_control_frame_t *cfp)
#define VM_CALLCACHE_UNMARKABLE
#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN
const rb_callable_method_entry_t * rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
#define REGEXP_REDEFINED_OP_FLAG
#define STRING_REDEFINED_OP_FLAG
#define FALSE_REDEFINED_OP_FLAG
void rb_vm_pop_frame(rb_execution_context_t *ec)
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
void Init_native_thread(rb_thread_t *th)
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE
struct rb_thread_struct rb_thread_t
#define RB_ALTSTACK_FREE(var)
#define HASH_REDEFINED_OP_FLAG
#define GetBindingPtr(obj, ptr)
#define VM_ENV_DATA_INDEX_FLAGS
void rb_vm_bugreport(const void *)
#define TRUE_REDEFINED_OP_FLAG
@ block_handler_type_ifunc
@ block_handler_type_proc
@ block_handler_type_symbol
@ block_handler_type_iseq
#define TIME_REDEFINED_OP_FLAG
void rb_vm_at_exit_func(struct rb_vm_struct *)
#define VM_ENV_DATA_INDEX_ENV
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
#define ARRAY_REDEFINED_OP_FLAG
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
#define VM_GLOBAL_CC_CACHE_TABLE_SIZE
struct rb_vm_struct rb_vm_t
#define PROC_REDEFINED_OP_FLAG
#define RUBY_VM_SIZE_ALIGN
#define RUBY_VM_FIBER_VM_STACK_SIZE
#define VM_GUARDED_PREV_EP(ep)
#define RUBY_VM_THREAD_VM_STACK_SIZE
#define VM_DEBUG_BP_CHECK
#define NIL_REDEFINED_OP_FLAG
#define INTEGER_REDEFINED_OP_FLAG
#define CHECK_VM_STACK_OVERFLOW(cfp, margin)
#define SYMBOL_REDEFINED_OP_FLAG
#define VM_TAGGED_PTR_REF(v, mask)
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE
#define VM_BLOCK_HANDLER_NONE
#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_)
#define VM_ENV_DATA_INDEX_SPECVAL
#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN
#define GetProcPtr(obj, ptr)
#define VM_ENV_DATA_INDEX_ME_CREF
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)
#define VM_UNREACHABLE(func)
#define FLOAT_REDEFINED_OP_FLAG
@ VM_ENV_FLAG_WB_REQUIRED
#define vm_check_canary(ec, sp)
#define NEXT_CLASS_SERIAL()
int def(FILE *source, FILE *dest, int level)