Ruby 3.0.5p211 (2022-11-24 revision ba5cf0f7c52d4d35cc6a173c89eda98ceffa2dcf)
vm.c
Go to the documentation of this file.
1/**********************************************************************
2
3 Vm.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11#define vm_exec rb_vm_exec
12
13#include "eval_intern.h"
14#include "gc.h"
15#include "internal.h"
16#include "internal/compile.h"
17#include "internal/cont.h"
18#include "internal/error.h"
19#include "internal/eval.h"
20#include "internal/inits.h"
21#include "internal/object.h"
22#include "internal/parse.h"
23#include "internal/proc.h"
24#include "internal/re.h"
25#include "internal/symbol.h"
26#include "internal/vm.h"
27#include "internal/sanitizers.h"
28#include "iseq.h"
29#include "mjit.h"
30#include "ruby/st.h"
31#include "ruby/vm.h"
32#include "vm_core.h"
33#include "vm_callinfo.h"
34#include "vm_debug.h"
35#include "vm_exec.h"
36#include "vm_insnhelper.h"
37#include "ractor_core.h"
38#include "vm_sync.h"
39
40#include "builtin.h"
41
42#ifndef MJIT_HEADER
43#include "probes.h"
44#else
45#include "probes.dmyh"
46#endif
47#include "probes_helper.h"
48
49VALUE rb_str_concat_literals(size_t, const VALUE*);
50
51/* :FIXME: This #ifdef is because we build pch in case of mswin and
52 * not in case of other situations. That distinction might change in
53 * a future. We would better make it detectable in something better
54 * than just _MSC_VER. */
55#ifdef _MSC_VER
56RUBY_FUNC_EXPORTED
57#else
59#endif
61
62PUREFUNC(static inline const VALUE *VM_EP_LEP(const VALUE *));
63static inline const VALUE *
64VM_EP_LEP(const VALUE *ep)
65{
66 while (!VM_ENV_LOCAL_P(ep)) {
67 ep = VM_ENV_PREV_EP(ep);
68 }
69 return ep;
70}
71
72static inline const rb_control_frame_t *
73rb_vm_search_cf_from_ep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE * const ep)
74{
75 if (!ep) {
76 return NULL;
77 }
78 else {
79 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
80
81 while (cfp < eocfp) {
82 if (cfp->ep == ep) {
83 return cfp;
84 }
86 }
87
88 return NULL;
89 }
90}
91
92const VALUE *
94{
95 return VM_EP_LEP(ep);
96}
97
98PUREFUNC(static inline const VALUE *VM_CF_LEP(const rb_control_frame_t * const cfp));
99static inline const VALUE *
100VM_CF_LEP(const rb_control_frame_t * const cfp)
101{
102 return VM_EP_LEP(cfp->ep);
103}
104
105static inline const VALUE *
106VM_CF_PREV_EP(const rb_control_frame_t * const cfp)
107{
108 return VM_ENV_PREV_EP(cfp->ep);
109}
110
111PUREFUNC(static inline VALUE VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp));
112static inline VALUE
113VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp)
114{
115 const VALUE *ep = VM_CF_LEP(cfp);
116 return VM_ENV_BLOCK_HANDLER(ep);
117}
118
119int
121{
122 return VM_FRAME_CFRAME_KW_P(cfp);
123}
124
125VALUE
127{
128 return VM_CF_BLOCK_HANDLER(cfp);
129}
130
131#if VM_CHECK_MODE > 0
132static int
133VM_CFP_IN_HEAP_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
134{
135 const VALUE *start = ec->vm_stack;
136 const VALUE *end = (VALUE *)ec->vm_stack + ec->vm_stack_size;
137 VM_ASSERT(start != NULL);
138
139 if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
140 return FALSE;
141 }
142 else {
143 return TRUE;
144 }
145}
146
147static int
148VM_EP_IN_HEAP_P(const rb_execution_context_t *ec, const VALUE *ep)
149{
150 const VALUE *start = ec->vm_stack;
151 const VALUE *end = (VALUE *)ec->cfp;
152 VM_ASSERT(start != NULL);
153
154 if (start <= ep && ep < end) {
155 return FALSE;
156 }
157 else {
158 return TRUE;
159 }
160}
161
162static int
163vm_ep_in_heap_p_(const rb_execution_context_t *ec, const VALUE *ep)
164{
165 if (VM_EP_IN_HEAP_P(ec, ep)) {
166 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */
167
168 if (envval != Qundef) {
169 const rb_env_t *env = (const rb_env_t *)envval;
170
171 VM_ASSERT(vm_assert_env(envval));
172 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
173 VM_ASSERT(env->ep == ep);
174 }
175 return TRUE;
176 }
177 else {
178 return FALSE;
179 }
180}
181
182int
183rb_vm_ep_in_heap_p(const VALUE *ep)
184{
185 const rb_execution_context_t *ec = GET_EC();
186 if (ec->vm_stack == NULL) return TRUE;
187 return vm_ep_in_heap_p_(ec, ep);
188}
189#endif
190
191static struct rb_captured_block *
192VM_CFP_TO_CAPTURED_BLOCK(const rb_control_frame_t *cfp)
193{
194 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
195 return (struct rb_captured_block *)&cfp->self;
196}
197
198static rb_control_frame_t *
199VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block *captured)
200{
201 rb_control_frame_t *cfp = ((rb_control_frame_t *)((VALUE *)(captured) - 3));
202 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
203 VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 7 + VM_DEBUG_BP_CHECK ? 1 : 0);
204 return cfp;
205}
206
207static int
208VM_BH_FROM_CFP_P(VALUE block_handler, const rb_control_frame_t *cfp)
209{
210 const struct rb_captured_block *captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
211 return VM_TAGGED_PTR_REF(block_handler, 0x03) == captured;
212}
213
214static VALUE
215vm_passed_block_handler(rb_execution_context_t *ec)
216{
217 VALUE block_handler = ec->passed_block_handler;
219 vm_block_handler_verify(block_handler);
220 return block_handler;
221}
222
223static rb_cref_t *
224vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int use_prev_prev)
225{
226 VALUE refinements = Qnil;
227 int omod_shared = FALSE;
228 rb_cref_t *cref;
229
230 /* scope */
231 union {
233 VALUE value;
234 } scope_visi;
235
236 scope_visi.visi.method_visi = visi;
237 scope_visi.visi.module_func = module_func;
238
239 /* refinements */
240 if (prev_cref != NULL && prev_cref != (void *)1 /* TODO: why CREF_NEXT(cref) is 1? */) {
241 refinements = CREF_REFINEMENTS(prev_cref);
242
243 if (!NIL_P(refinements)) {
244 omod_shared = TRUE;
245 CREF_OMOD_SHARED_SET(prev_cref);
246 }
247 }
248
249 cref = (rb_cref_t *)rb_imemo_new(imemo_cref, klass, (VALUE)(use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref), scope_visi.value, refinements);
250
251 if (pushed_by_eval) CREF_PUSHED_BY_EVAL_SET(cref);
252 if (omod_shared) CREF_OMOD_SHARED_SET(cref);
253
254 return cref;
255}
256
257static rb_cref_t *
258vm_cref_new(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval)
259{
260 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, FALSE);
261}
262
263static rb_cref_t *
264vm_cref_new_use_prev(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval)
265{
266 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, TRUE);
267}
268
269static int
270ref_delete_symkey(VALUE key, VALUE value, VALUE unused)
271{
272 return SYMBOL_P(key) ? ST_DELETE : ST_CONTINUE;
273}
274
275static rb_cref_t *
276vm_cref_dup(const rb_cref_t *cref)
277{
278 VALUE klass = CREF_CLASS(cref);
279 const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
280 rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
281 int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
282
283 new_cref = vm_cref_new(klass, visi->method_visi, visi->module_func, next_cref, pushed_by_eval);
284
285 if (!NIL_P(CREF_REFINEMENTS(cref))) {
286 VALUE ref = rb_hash_dup(CREF_REFINEMENTS(cref));
287 rb_hash_foreach(ref, ref_delete_symkey, Qnil);
288 CREF_REFINEMENTS_SET(new_cref, ref);
289 CREF_OMOD_SHARED_UNSET(new_cref);
290 }
291
292 return new_cref;
293}
294
295static rb_cref_t *
296vm_cref_new_toplevel(rb_execution_context_t *ec)
297{
298 rb_cref_t *cref = vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE /* toplevel visibility is private */, FALSE, NULL, FALSE);
299 VALUE top_wrapper = rb_ec_thread_ptr(ec)->top_wrapper;
300
301 if (top_wrapper) {
302 cref = vm_cref_new(top_wrapper, METHOD_VISI_PRIVATE, FALSE, cref, FALSE);
303 }
304
305 return cref;
306}
307
308rb_cref_t *
310{
311 return vm_cref_new_toplevel(GET_EC());
312}
313
314static void
315vm_cref_dump(const char *mesg, const rb_cref_t *cref)
316{
317 fprintf(stderr, "vm_cref_dump: %s (%p)\n", mesg, (void *)cref);
318
319 while (cref) {
320 fprintf(stderr, "= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref))));
321 cref = CREF_NEXT(cref);
322 }
323}
324
325void
326rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
327{
328 *((const VALUE **)&dst->as.captured.ep) = ep;
329 RB_OBJ_WRITTEN(obj, Qundef, VM_ENV_ENVVAL(ep));
330}
331
332static void
333vm_bind_update_env(VALUE bindval, rb_binding_t *bind, VALUE envval)
334{
335 const rb_env_t *env = (rb_env_t *)envval;
336 RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, env->iseq);
337 rb_vm_block_ep_update(bindval, &bind->block, env->ep);
338}
339
340#if VM_COLLECT_USAGE_DETAILS
341static void vm_collect_usage_operand(int insn, int n, VALUE op);
342static void vm_collect_usage_insn(int insn);
343static void vm_collect_usage_register(int reg, int isset);
344#endif
345
346static VALUE vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp);
348 int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
350static VALUE vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
351
352#include "vm_insnhelper.c"
353
354#ifndef MJIT_HEADER
355
356#include "vm_exec.c"
357
358#include "vm_method.c"
359#endif /* #ifndef MJIT_HEADER */
360#include "vm_eval.c"
361#ifndef MJIT_HEADER
362
363#define PROCDEBUG 0
364
367{
368 rb_serial_t class_serial = NEXT_CLASS_SERIAL();
369 mjit_add_class_serial(class_serial);
370 return class_serial;
371}
372
377
378#define ruby_vm_redefined_flag GET_VM()->redefined_flag
382
383#ifdef RB_THREAD_LOCAL_SPECIFIER
385
386#ifdef __APPLE__
388 rb_current_ec(void)
389 {
390 return ruby_current_ec;
391 }
392 void
393 rb_current_ec_set(rb_execution_context_t *ec)
394 {
395 ruby_current_ec = ec;
396 }
397#endif
398
399#else
401#endif
402
406
409
410static const struct rb_callcache vm_empty_cc = {
412 .klass = Qfalse,
413 .cme_ = NULL,
414 .call_ = vm_call_general,
415 .aux_ = {
416 .v = Qfalse,
417 }
418};
419
420static void thread_free(void *ptr);
421
422//
423
424void
426{
428}
429
432 struct ruby_dtrace_method_hook_args *args)
433{
435 if (!klass) {
436 if (!ec) ec = GET_EC();
437 if (!rb_ec_frame_method_id_and_class(ec, &id, 0, &klass) || !klass)
438 return FALSE;
439 }
440 if (RB_TYPE_P(klass, T_ICLASS)) {
441 klass = RBASIC(klass)->klass;
442 }
443 else if (FL_TEST(klass, FL_SINGLETON)) {
444 klass = rb_attr_get(klass, id__attached__);
445 if (NIL_P(klass)) return FALSE;
446 }
448 if (type == T_CLASS || type == T_ICLASS || type == T_MODULE) {
450 const char *classname, *filename;
451 const char *methodname = rb_id2name(id);
452 if (methodname && (filename = rb_source_location_cstr(&args->line_no)) != 0) {
453 if (NIL_P(name) || !(classname = StringValuePtr(name)))
454 classname = "<unknown>";
455 args->classname = classname;
456 args->methodname = methodname;
457 args->filename = filename;
458 args->klass = klass;
459 args->name = name;
460 return TRUE;
461 }
462 }
463 return FALSE;
464}
465
466/*
467 * call-seq:
468 * RubyVM.stat -> Hash
469 * RubyVM.stat(hsh) -> hsh
470 * RubyVM.stat(Symbol) -> Numeric
471 *
472 * Returns a Hash containing implementation-dependent counters inside the VM.
473 *
474 * This hash includes information about method/constant cache serials:
475 *
476 * {
477 * :global_constant_state=>481,
478 * :class_serial=>9029
479 * }
480 *
481 * The contents of the hash are implementation specific and may be changed in
482 * the future.
483 *
484 * This method is only expected to work on C Ruby.
485 */
486
487static VALUE
488vm_stat(int argc, VALUE *argv, VALUE self)
489{
490 static VALUE sym_global_constant_state, sym_class_serial;
491 VALUE arg = Qnil;
492 VALUE hash = Qnil, key = Qnil;
493
494 if (rb_check_arity(argc, 0, 1) == 1) {
495 arg = argv[0];
496 if (SYMBOL_P(arg))
497 key = arg;
498 else if (RB_TYPE_P(arg, T_HASH))
499 hash = arg;
500 else
501 rb_raise(rb_eTypeError, "non-hash or symbol given");
502 }
503 else {
504 hash = rb_hash_new();
505 }
506
507 if (sym_global_constant_state == 0) {
508#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
509 S(global_constant_state);
510 S(class_serial);
511#undef S
512 }
513
514#define SET(name, attr) \
515 if (key == sym_##name) \
516 return SERIALT2NUM(attr); \
517 else if (hash != Qnil) \
518 rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
519
520 SET(global_constant_state, ruby_vm_global_constant_state);
521 SET(class_serial, ruby_vm_class_serial);
522#undef SET
523
524 if (!NIL_P(key)) { /* matched key should return above */
525 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
526 }
527
528 return hash;
529}
530
531/* control stack frame */
532
533static void
534vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
535{
536 if (iseq->body->type != ISEQ_TYPE_TOP) {
537 rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
538 }
539
540 /* for return */
541 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, rb_ec_thread_ptr(ec)->top_self,
543 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
544 iseq->body->iseq_encoded, ec->cfp->sp,
545 iseq->body->local_table_size, iseq->body->stack_max);
546}
547
548static void
549vm_set_eval_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block)
550{
551 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
552 vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
553 (VALUE)cref, /* cref or me */
554 iseq->body->iseq_encoded,
555 ec->cfp->sp, iseq->body->local_table_size,
556 iseq->body->stack_max);
557}
558
559static void
560vm_set_main_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
561{
562 VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
563 rb_binding_t *bind;
564
565 GetBindingPtr(toplevel_binding, bind);
566 RUBY_ASSERT_MESG(bind, "TOPLEVEL_BINDING is not built");
567
568 vm_set_eval_stack(ec, iseq, 0, &bind->block);
569
570 /* save binding */
571 if (iseq->body->local_table_size > 0) {
572 vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp));
573 }
574}
575
578{
579 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
580 if (cfp->iseq) {
581 return (rb_control_frame_t *)cfp;
582 }
584 }
585 return 0;
586}
587
590{
591 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
592 if (VM_FRAME_RUBYFRAME_P(cfp)) {
593 return (rb_control_frame_t *)cfp;
594 }
596 }
597 return 0;
598}
599
600#endif /* #ifndef MJIT_HEADER */
601
602static rb_control_frame_t *
603vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
604{
605 if (VM_FRAME_RUBYFRAME_P(cfp)) {
606 return (rb_control_frame_t *)cfp;
607 }
608
610
611 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
612 if (VM_FRAME_RUBYFRAME_P(cfp)) {
613 return (rb_control_frame_t *)cfp;
614 }
615
616 if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) {
617 break;
618 }
620 }
621 return 0;
622}
623
624MJIT_STATIC void
626{
627 rb_execution_context_t *ec = GET_EC();
628 rb_control_frame_t *cfp = ec->cfp;
630
633 vm_pop_frame(ec, cfp, cfp->ep);
634}
635
636#ifndef MJIT_HEADER
637
638void
640{
641 /* check skipped frame */
642 while (ec->cfp != cfp) {
643#if VMDEBUG
644 printf("skipped frame: %s\n", vm_frametype_name(ec->cfp));
645#endif
646 if (VM_FRAME_TYPE(ec->cfp) != VM_FRAME_MAGIC_CFUNC) {
647 rb_vm_pop_frame(ec);
648 }
649 else { /* unlikely path */
651 }
652 }
653}
654
655/* at exit */
656
657void
658ruby_vm_at_exit(void (*func)(rb_vm_t *))
659{
660 rb_vm_t *vm = GET_VM();
662 nl->func = func;
663 nl->next = vm->at_exit;
664 vm->at_exit = nl;
665}
666
667static void
668ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
669{
670 rb_at_exit_list *l = vm->at_exit;
671
672 while (l) {
673 rb_at_exit_list* t = l->next;
674 rb_vm_at_exit_func *func = l->func;
675 ruby_xfree(l);
676 l = t;
677 (*func)(vm);
678 }
679}
680
681/* Env */
682
683static VALUE check_env_value(const rb_env_t *env);
684
685static int
686check_env(const rb_env_t *env)
687{
688 fprintf(stderr, "---\n");
689 fprintf(stderr, "envptr: %p\n", (void *)&env->ep[0]);
690 fprintf(stderr, "envval: %10p ", (void *)env->ep[1]);
691 dp(env->ep[1]);
692 fprintf(stderr, "ep: %10p\n", (void *)env->ep);
693 if (rb_vm_env_prev_env(env)) {
694 fprintf(stderr, ">>\n");
695 check_env_value(rb_vm_env_prev_env(env));
696 fprintf(stderr, "<<\n");
697 }
698 return 1;
699}
700
701static VALUE
702check_env_value(const rb_env_t *env)
703{
704 if (check_env(env)) {
705 return (VALUE)env;
706 }
707 rb_bug("invalid env");
708 return Qnil; /* unreachable */
709}
710
711static VALUE
712vm_block_handler_escape(const rb_execution_context_t *ec, VALUE block_handler)
713{
714 switch (vm_block_handler_type(block_handler)) {
717 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
718
721 return block_handler;
722 }
723 VM_UNREACHABLE(vm_block_handler_escape);
724 return Qnil;
725}
726
727static VALUE
728vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *const cfp)
729{
730 const VALUE * const ep = cfp->ep;
731 const rb_env_t *env;
732 const rb_iseq_t *env_iseq;
733 VALUE *env_body, *env_ep;
734 int local_size, env_size;
735
736 if (VM_ENV_ESCAPED_P(ep)) {
737 return VM_ENV_ENVVAL(ep);
738 }
739
740 if (!VM_ENV_LOCAL_P(ep)) {
741 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
742 if (!VM_ENV_ESCAPED_P(prev_ep)) {
744
745 while (prev_cfp->ep != prev_ep) {
746 prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(prev_cfp);
747 VM_ASSERT(prev_cfp->ep != NULL);
748 }
749
750 vm_make_env_each(ec, prev_cfp);
751 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_SPECVAL], VM_GUARDED_PREV_EP(prev_cfp->ep));
752 }
753 }
754 else {
755 VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
756
757 if (block_handler != VM_BLOCK_HANDLER_NONE) {
758 VALUE blockprocval = vm_block_handler_escape(ec, block_handler);
759 VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval);
760 }
761 }
762
763 if (!VM_FRAME_RUBYFRAME_P(cfp)) {
764 local_size = VM_ENV_DATA_SIZE;
765 }
766 else {
767 local_size = cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
768 }
769
770 /*
771 * # local variables on a stack frame (N == local_size)
772 * [lvar1, lvar2, ..., lvarN, SPECVAL]
773 * ^
774 * ep[0]
775 *
776 * # moved local variables
777 * [lvar1, lvar2, ..., lvarN, SPECVAL, Envval, BlockProcval (if needed)]
778 * ^ ^
779 * env->env[0] ep[0]
780 */
781
782 env_size = local_size +
783 1 /* envval */;
784 env_body = ALLOC_N(VALUE, env_size);
785 MEMCPY(env_body, ep - (local_size - 1 /* specval */), VALUE, local_size);
786
787#if 0
788 for (i = 0; i < local_size; i++) {
789 if (VM_FRAME_RUBYFRAME_P(cfp)) {
790 /* clear value stack for GC */
791 ep[-local_size + i] = 0;
792 }
793 }
794#endif
795
796 env_iseq = VM_FRAME_RUBYFRAME_P(cfp) ? cfp->iseq : NULL;
797 env_ep = &env_body[local_size - 1 /* specval */];
798
799 env = vm_env_new(env_ep, env_body, env_size, env_iseq);
800
801 cfp->ep = env_ep;
802 VM_ENV_FLAGS_SET(env_ep, VM_ENV_FLAG_ESCAPED | VM_ENV_FLAG_WB_REQUIRED);
803 VM_STACK_ENV_WRITE(ep, 0, (VALUE)env); /* GC mark */
804 return (VALUE)env;
805}
806
807static VALUE
808vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
809{
810 VALUE envval = vm_make_env_each(ec, cfp);
811
812 if (PROCDEBUG) {
813 check_env_value((const rb_env_t *)envval);
814 }
815
816 return envval;
817}
818
819void
821{
822 rb_control_frame_t *cfp = ec->cfp;
823 while ((cfp = rb_vm_get_binding_creatable_next_cfp(ec, cfp)) != 0) {
824 vm_make_env_object(ec, cfp);
826 }
827}
828
829const rb_env_t *
831{
832 const VALUE *ep = env->ep;
833
834 if (VM_ENV_LOCAL_P(ep)) {
835 return NULL;
836 }
837 else {
838 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
839 return VM_ENV_ENVVAL_PTR(prev_ep);
840 }
841}
842
843static int
844collect_local_variables_in_iseq(const rb_iseq_t *iseq, const struct local_var_list *vars)
845{
846 unsigned int i;
847 if (!iseq) return 0;
848 for (i = 0; i < iseq->body->local_table_size; i++) {
849 local_var_list_add(vars, iseq->body->local_table[i]);
850 }
851 return 1;
852}
853
854static void
855collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list *vars)
856{
857 do {
858 if (VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ISOLATED)) break;
859 collect_local_variables_in_iseq(env->iseq, vars);
860 } while ((env = rb_vm_env_prev_env(env)) != NULL);
861}
862
863static int
864vm_collect_local_variables_in_heap(const VALUE *ep, const struct local_var_list *vars)
865{
866 if (VM_ENV_ESCAPED_P(ep)) {
867 collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars);
868 return 1;
869 }
870 else {
871 return 0;
872 }
873}
874
875VALUE
877{
878 struct local_var_list vars;
879 local_var_list_init(&vars);
880 collect_local_variables_in_env(env, &vars);
881 return local_var_list_finish(&vars);
882}
883
884VALUE
886{
887 struct local_var_list vars;
888 local_var_list_init(&vars);
889 while (collect_local_variables_in_iseq(iseq, &vars)) {
890 iseq = iseq->body->parent_iseq;
891 }
892 return local_var_list_finish(&vars);
893}
894
895/* Proc */
896
897static VALUE
898vm_proc_create_from_captured(VALUE klass,
899 const struct rb_captured_block *captured,
900 enum rb_block_type block_type,
901 int8_t is_from_method, int8_t is_lambda)
902{
903 VALUE procval = rb_proc_alloc(klass);
904 rb_proc_t *proc = RTYPEDDATA_DATA(procval);
905
906 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), captured->ep));
907
908 /* copy block */
909 RB_OBJ_WRITE(procval, &proc->block.as.captured.code.val, captured->code.val);
910 RB_OBJ_WRITE(procval, &proc->block.as.captured.self, captured->self);
911 rb_vm_block_ep_update(procval, &proc->block, captured->ep);
912
913 vm_block_type_set(&proc->block, block_type);
914 proc->is_from_method = is_from_method;
915 proc->is_lambda = is_lambda;
916
917 return procval;
918}
919
920void
921rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
922{
923 /* copy block */
924 switch (vm_block_type(src)) {
925 case block_type_iseq:
926 case block_type_ifunc:
927 RB_OBJ_WRITE(obj, &dst->as.captured.self, src->as.captured.self);
928 RB_OBJ_WRITE(obj, &dst->as.captured.code.val, src->as.captured.code.val);
929 rb_vm_block_ep_update(obj, dst, src->as.captured.ep);
930 break;
932 RB_OBJ_WRITE(obj, &dst->as.symbol, src->as.symbol);
933 break;
934 case block_type_proc:
935 RB_OBJ_WRITE(obj, &dst->as.proc, src->as.proc);
936 break;
937 }
938}
939
940static VALUE
941proc_create(VALUE klass, const struct rb_block *block, int8_t is_from_method, int8_t is_lambda)
942{
943 VALUE procval = rb_proc_alloc(klass);
944 rb_proc_t *proc = RTYPEDDATA_DATA(procval);
945
946 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), vm_block_ep(block)));
947 rb_vm_block_copy(procval, &proc->block, block);
948 vm_block_type_set(&proc->block, block->type);
949 proc->is_from_method = is_from_method;
950 proc->is_lambda = is_lambda;
951
952 return procval;
953}
954
955VALUE
957{
958 VALUE procval;
959 rb_proc_t *src;
960
961 GetProcPtr(self, src);
962 procval = proc_create(rb_cProc, &src->block, src->is_from_method, src->is_lambda);
964 RB_GC_GUARD(self); /* for: body = rb_proc_dup(body) */
965 return procval;
966}
967
971 bool yield;
973};
974
976collect_outer_variable_names(ID id, VALUE val, void *ptr)
977{
979
980 if (id == rb_intern("yield")) {
981 data->yield = true;
982 }
983 else {
984 if (data->isolate ||
985 val == Qtrue /* write */) {
986 if (data->ary == Qfalse) data->ary = rb_ary_new();
987 rb_ary_push(data->ary, rb_id2str(id));
988 }
989 else {
990 if (data->read_only == Qfalse) data->read_only = rb_ary_new();
991 rb_ary_push(data->read_only, rb_id2str(id));
992 }
993 }
994 return ID_TABLE_CONTINUE;
995}
996
997static const rb_env_t *
998env_copy(const VALUE *src_ep, VALUE read_only_variables)
999{
1000 const rb_env_t *src_env = (rb_env_t *)VM_ENV_ENVVAL(src_ep);
1001 VM_ASSERT(src_env->ep == src_ep);
1002
1003 VALUE *env_body = ZALLOC_N(VALUE, src_env->env_size); // fill with Qfalse
1004 VALUE *ep = &env_body[src_env->env_size - 2];
1005 volatile VALUE prev_env = Qnil;
1006
1007 if (read_only_variables) {
1008 for (int i=RARRAY_LENINT(read_only_variables)-1; i>=0; i--) {
1009 ID id = SYM2ID(rb_str_intern(RARRAY_AREF(read_only_variables, i)));
1010
1011 for (unsigned int j=0; j<src_env->iseq->body->local_table_size; j++) {
1012 if (id == src_env->iseq->body->local_table[j]) {
1013 VALUE v = src_env->env[j];
1014 if (!rb_ractor_shareable_p(v)) {
1016 "can not make shareable Proc because it can refer unshareable object %"
1017 PRIsVALUE" from variable `%s'", rb_inspect(v), rb_id2name(id));
1018 }
1019 env_body[j] = v;
1020 rb_ary_delete_at(read_only_variables, i);
1021 break;
1022 }
1023 }
1024 }
1025 }
1026
1029
1030 if (!VM_ENV_LOCAL_P(src_ep)) {
1031 const VALUE *prev_ep = VM_ENV_PREV_EP(src_env->ep);
1032 const rb_env_t *new_prev_env = env_copy(prev_ep, read_only_variables);
1033 prev_env = (VALUE)new_prev_env;
1034 ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_GUARDED_PREV_EP(new_prev_env->ep);
1035 }
1036 else {
1038 }
1039
1040 const rb_env_t *copied_env = vm_env_new(ep, env_body, src_env->env_size, src_env->iseq);
1041 RB_GC_GUARD(prev_env);
1042 return copied_env;
1043}
1044
1045static void
1046proc_isolate_env(VALUE self, rb_proc_t *proc, VALUE read_only_variables)
1047{
1048 const struct rb_captured_block *captured = &proc->block.as.captured;
1049 const rb_env_t *env = env_copy(captured->ep, read_only_variables);
1050 *((const VALUE **)&proc->block.as.captured.ep) = env->ep;
1052}
1053
1054VALUE
1056{
1057 const rb_iseq_t *iseq = vm_proc_iseq(self);
1058
1059 if (iseq) {
1061 if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
1062
1063 if (iseq->body->outer_variables) {
1064 struct collect_outer_variable_name_data data = {
1065 .isolate = true,
1066 .ary = Qfalse,
1067 .yield = false,
1068 };
1069 rb_id_table_foreach(iseq->body->outer_variables, collect_outer_variable_names, (void *)&data);
1070
1071 if (data.ary != Qfalse) {
1072 VALUE str = rb_ary_join(data.ary, rb_str_new2(", "));
1073 if (data.yield) {
1074 rb_raise(rb_eArgError, "can not isolate a Proc because it accesses outer variables (%s) and uses `yield'.",
1076 }
1077 else {
1078 rb_raise(rb_eArgError, "can not isolate a Proc because it accesses outer variables (%s).",
1080 }
1081 }
1082 else {
1083 VM_ASSERT(data.yield);
1084 rb_raise(rb_eArgError, "can not isolate a Proc because it uses `yield'.");
1085 }
1086 }
1087
1088 proc_isolate_env(self, proc, Qfalse);
1089 proc->is_isolated = TRUE;
1090 }
1091
1093 return self;
1094}
1095
1096VALUE
1098{
1099 VALUE dst = rb_proc_dup(self);
1101 return dst;
1102}
1103
1104VALUE
1106{
1107 const rb_iseq_t *iseq = vm_proc_iseq(self);
1108
1109 if (iseq) {
1110 rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
1111 if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
1112
1113 VALUE read_only_variables = Qfalse;
1114
1115 if (iseq->body->outer_variables) {
1116 struct collect_outer_variable_name_data data = {
1117 .isolate = false,
1118 .ary = Qfalse,
1119 .read_only = Qfalse,
1120 .yield = false,
1121 };
1122
1123 rb_id_table_foreach(iseq->body->outer_variables, collect_outer_variable_names, (void *)&data);
1124
1125 if (data.ary != Qfalse) {
1126 VALUE str = rb_ary_join(data.ary, rb_str_new2(", "));
1127 if (data.yield) {
1128 rb_raise(rb_eArgError, "can not make a Proc shareable because it accesses outer variables (%s) and uses `yield'.",
1130 }
1131 else {
1132 rb_raise(rb_eArgError, "can not make a Proc shareable because it accesses outer variables (%s).",
1134 }
1135 }
1136 else if (data.yield) {
1137 rb_raise(rb_eArgError, "can not make a Proc shareable because it uses `yield'.");
1138 }
1139
1140 read_only_variables = data.read_only;
1141 }
1142
1143 proc_isolate_env(self, proc, read_only_variables);
1144 proc->is_isolated = TRUE;
1145 }
1146
1148 return self;
1149}
1150
1152rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
1153{
1154 VALUE procval;
1155
1156 if (!VM_ENV_ESCAPED_P(captured->ep)) {
1157 rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured);
1158 vm_make_env_object(ec, cfp);
1159 }
1160 VM_ASSERT(VM_EP_IN_HEAP_P(ec, captured->ep));
1161 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq) ||
1162 imemo_type_p(captured->code.val, imemo_ifunc));
1163
1164 procval = vm_proc_create_from_captured(klass, captured,
1165 imemo_type(captured->code.val) == imemo_iseq ? block_type_iseq : block_type_ifunc, FALSE, is_lambda);
1166 return procval;
1167}
1168
1169/* Binding */
1170
1171VALUE
1173{
1175 rb_control_frame_t *ruby_level_cfp = rb_vm_get_ruby_level_next_cfp(ec, src_cfp);
1176 VALUE bindval, envval;
1177 rb_binding_t *bind;
1178
1179 if (cfp == 0 || ruby_level_cfp == 0) {
1180 rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber.");
1181 }
1182
1183 while (1) {
1184 envval = vm_make_env_object(ec, cfp);
1185 if (cfp == ruby_level_cfp) {
1186 break;
1187 }
1189 }
1190
1191 bindval = rb_binding_alloc(rb_cBinding);
1192 GetBindingPtr(bindval, bind);
1193 vm_bind_update_env(bindval, bind, envval);
1194 RB_OBJ_WRITE(bindval, &bind->block.as.captured.self, cfp->self);
1195 RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, cfp->iseq);
1196 RB_OBJ_WRITE(bindval, &bind->pathobj, ruby_level_cfp->iseq->body->location.pathobj);
1197 bind->first_lineno = rb_vm_get_sourceline(ruby_level_cfp);
1198
1199 return bindval;
1200}
1201
1202const VALUE *
1203rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
1204{
1205 VALUE envval, pathobj = bind->pathobj;
1206 VALUE path = pathobj_path(pathobj);
1207 VALUE realpath = pathobj_realpath(pathobj);
1208 const struct rb_block *base_block;
1209 const rb_env_t *env;
1210 rb_execution_context_t *ec = GET_EC();
1211 const rb_iseq_t *base_iseq, *iseq;
1212 rb_ast_body_t ast;
1213 NODE tmp_node;
1214 ID minibuf[4], *dyns = minibuf;
1215 VALUE idtmp = 0;
1216
1217 if (dyncount < 0) return 0;
1218
1219 base_block = &bind->block;
1220 base_iseq = vm_block_iseq(base_block);
1221
1222 if (dyncount >= numberof(minibuf)) dyns = ALLOCV_N(ID, idtmp, dyncount + 1);
1223
1224 dyns[0] = dyncount;
1225 MEMCPY(dyns + 1, dynvars, ID, dyncount);
1226 rb_node_init(&tmp_node, NODE_SCOPE, (VALUE)dyns, 0, 0);
1227 ast.root = &tmp_node;
1228 ast.compile_option = 0;
1229 ast.line_count = -1;
1230
1231 if (base_iseq) {
1232 iseq = rb_iseq_new(&ast, base_iseq->body->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL);
1233 }
1234 else {
1235 VALUE tempstr = rb_fstring_lit("<temp>");
1236 iseq = rb_iseq_new_top(&ast, tempstr, tempstr, tempstr, NULL);
1237 }
1238 tmp_node.nd_tbl = 0; /* reset table */
1239 ALLOCV_END(idtmp);
1240
1241 vm_set_eval_stack(ec, iseq, 0, base_block);
1242 vm_bind_update_env(bindval, bind, envval = vm_make_env_object(ec, ec->cfp));
1243 rb_vm_pop_frame(ec);
1244
1245 env = (const rb_env_t *)envval;
1246 return env->env;
1247}
1248
1249/* C -> Ruby: block */
1250
1251static inline VALUE
1252invoke_block(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_cref_t *cref, VALUE type, int opt_pc)
1253{
1254 int arg_size = iseq->body->param.size;
1255
1256 vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_FINISH, self,
1258 (VALUE)cref, /* cref or method */
1259 iseq->body->iseq_encoded + opt_pc,
1260 ec->cfp->sp + arg_size,
1261 iseq->body->local_table_size - arg_size,
1262 iseq->body->stack_max);
1263 return vm_exec(ec, true);
1264}
1265
1266static VALUE
1267invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc)
1268{
1269 /* bmethod */
1270 int arg_size = iseq->body->param.size;
1271 VALUE ret;
1272 rb_hook_list_t *hooks;
1273
1274 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
1275
1276 vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_BMETHOD, self,
1278 (VALUE)me,
1279 iseq->body->iseq_encoded + opt_pc,
1280 ec->cfp->sp + arg_size,
1281 iseq->body->local_table_size - arg_size,
1282 iseq->body->stack_max);
1283
1286
1287 if (UNLIKELY((hooks = me->def->body.bmethod.hooks) != NULL) &&
1288 hooks->events & RUBY_EVENT_CALL) {
1289 rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_CALL, self,
1290 me->def->original_id, me->called_id, me->owner, Qnil, FALSE);
1291 }
1292 VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
1293 ret = vm_exec(ec, true);
1294
1295 EXEC_EVENT_HOOK(ec, RUBY_EVENT_RETURN, self, me->def->original_id, me->called_id, me->owner, ret);
1296 if ((hooks = me->def->body.bmethod.hooks) != NULL &&
1297 hooks->events & RUBY_EVENT_RETURN) {
1298 rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_RETURN, self,
1299 me->def->original_id, me->called_id, me->owner, ret, FALSE);
1300 }
1302 return ret;
1303}
1304
1305ALWAYS_INLINE(static VALUE
1306 invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1307 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
1308 const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me));
1309
1310static inline VALUE
1311invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1312 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
1313 const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
1314{
1315 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
1316 int i, opt_pc;
1318 rb_control_frame_t *cfp = ec->cfp;
1319 VALUE *sp = cfp->sp;
1320
1321 stack_check(ec);
1322
1324 vm_check_canary(ec, sp);
1325 cfp->sp = sp + argc;
1326 for (i=0; i<argc; i++) {
1327 sp[i] = argv[i];
1328 }
1329
1330 opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, kw_splat, passed_block_handler,
1331 (is_lambda ? arg_setup_method : arg_setup_block));
1332 cfp->sp = sp;
1333
1334 if (me == NULL) {
1335 return invoke_block(ec, iseq, self, captured, cref, type, opt_pc);
1336 }
1337 else {
1338 return invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc);
1339 }
1340}
1341
1342static inline VALUE
1343invoke_block_from_c_bh(rb_execution_context_t *ec, VALUE block_handler,
1344 int argc, const VALUE *argv,
1345 int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref,
1346 int is_lambda, int force_blockarg)
1347{
1348 again:
1349 switch (vm_block_handler_type(block_handler)) {
1351 {
1352 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
1353 return invoke_iseq_block_from_c(ec, captured, captured->self,
1354 argc, argv, kw_splat, passed_block_handler,
1355 cref, is_lambda, NULL);
1356 }
1358 return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler),
1359 VM_BH_TO_IFUNC_BLOCK(block_handler)->self,
1360 argc, argv, kw_splat, passed_block_handler, NULL);
1362 return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler),
1363 argc, argv, kw_splat, passed_block_handler);
1365 if (force_blockarg == FALSE) {
1366 is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
1367 }
1368 block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
1369 goto again;
1370 }
1371 VM_UNREACHABLE(invoke_block_from_c_splattable);
1372 return Qundef;
1373}
1374
1375static inline VALUE
1376check_block_handler(rb_execution_context_t *ec)
1377{
1378 VALUE block_handler = VM_CF_BLOCK_HANDLER(ec->cfp);
1379 vm_block_handler_verify(block_handler);
1380 if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) {
1381 rb_vm_localjump_error("no block given", Qnil, 0);
1382 }
1383
1384 return block_handler;
1385}
1386
1387static VALUE
1388vm_yield_with_cref(rb_execution_context_t *ec, int argc, const VALUE *argv, int kw_splat, const rb_cref_t *cref, int is_lambda)
1389{
1390 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1391 argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE,
1392 cref, is_lambda, FALSE);
1393}
1394
1395static VALUE
1396vm_yield(rb_execution_context_t *ec, int argc, const VALUE *argv, int kw_splat)
1397{
1398 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1399 argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE,
1400 NULL, FALSE, FALSE);
1401}
1402
1403static VALUE
1404vm_yield_with_block(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE block_handler, int kw_splat)
1405{
1406 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1407 argc, argv, kw_splat, block_handler,
1408 NULL, FALSE, FALSE);
1409}
1410
1411static VALUE
1412vm_yield_force_blockarg(rb_execution_context_t *ec, VALUE args)
1413{
1414 return invoke_block_from_c_bh(ec, check_block_handler(ec), 1, &args,
1416}
1417
1418ALWAYS_INLINE(static VALUE
1419 invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1420 VALUE self, int argc, const VALUE *argv,
1421 int kw_splat, VALUE passed_block_handler, int is_lambda,
1422 const rb_callable_method_entry_t *me));
1423
1424static inline VALUE
1425invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1426 VALUE self, int argc, const VALUE *argv,
1427 int kw_splat, VALUE passed_block_handler, int is_lambda,
1429{
1430 const struct rb_block *block = &proc->block;
1431
1432 again:
1433 switch (vm_block_type(block)) {
1434 case block_type_iseq:
1435 return invoke_iseq_block_from_c(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, NULL, is_lambda, me);
1436 case block_type_ifunc:
1437 if (kw_splat == 1) {
1438 VALUE keyword_hash = argv[argc-1];
1439 if (!RB_TYPE_P(keyword_hash, T_HASH)) {
1440 keyword_hash = rb_to_hash_type(keyword_hash);
1441 }
1442 if (RHASH_EMPTY_P(keyword_hash)) {
1443 argc--;
1444 } else {
1445 ((VALUE *)argv)[argc-1] = rb_hash_dup(keyword_hash);
1446 }
1447 }
1448 return vm_yield_with_cfunc(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, me);
1449 case block_type_symbol:
1450 return vm_yield_with_symbol(ec, block->as.symbol, argc, argv, kw_splat, passed_block_handler);
1451 case block_type_proc:
1452 is_lambda = block_proc_is_lambda(block->as.proc);
1453 block = vm_proc_block(block->as.proc);
1454 goto again;
1455 }
1456 VM_UNREACHABLE(invoke_block_from_c_proc);
1457 return Qundef;
1458}
1459
1460static VALUE
1461vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1462 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1463{
1464 return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler, proc->is_lambda, NULL);
1465}
1466
1469 int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me)
1470{
1471 return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, block_handler, TRUE, me);
1472}
1473
1476 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1477{
1478 VALUE self = vm_block_self(&proc->block);
1479 vm_block_handler_verify(passed_block_handler);
1480
1481 if (proc->is_from_method) {
1482 return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
1483 }
1484 else {
1485 return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
1486 }
1487}
1488
1489VALUE
1491 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1492{
1493 vm_block_handler_verify(passed_block_handler);
1494
1495 if (proc->is_from_method) {
1496 return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
1497 }
1498 else {
1499 return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
1500 }
1501}
1502
1503/* special variable */
1504
1505static rb_control_frame_t *
1506vm_normal_frame(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
1507{
1508 while (cfp->pc == 0) {
1510 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
1511 return 0;
1512 }
1513 }
1514 return cfp;
1515}
1516
1517static VALUE
1518vm_cfp_svar_get(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key)
1519{
1520 cfp = vm_normal_frame(ec, cfp);
1521 return lep_svar_get(ec, cfp ? VM_CF_LEP(cfp) : 0, key);
1522}
1523
1524static void
1525vm_cfp_svar_set(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key, const VALUE val)
1526{
1527 cfp = vm_normal_frame(ec, cfp);
1528 lep_svar_set(ec, cfp ? VM_CF_LEP(cfp) : 0, key, val);
1529}
1530
1531static VALUE
1532vm_svar_get(const rb_execution_context_t *ec, VALUE key)
1533{
1534 return vm_cfp_svar_get(ec, ec->cfp, key);
1535}
1536
1537static void
1538vm_svar_set(const rb_execution_context_t *ec, VALUE key, VALUE val)
1539{
1540 vm_cfp_svar_set(ec, ec->cfp, key, val);
1541}
1542
1543VALUE
1545{
1546 return vm_svar_get(GET_EC(), VM_SVAR_BACKREF);
1547}
1548
1549void
1551{
1552 vm_svar_set(GET_EC(), VM_SVAR_BACKREF, val);
1553}
1554
1555VALUE
1557{
1558 return vm_svar_get(GET_EC(), VM_SVAR_LASTLINE);
1559}
1560
1561void
1563{
1564 vm_svar_set(GET_EC(), VM_SVAR_LASTLINE, val);
1565}
1566
1567/* misc */
1568
1569/* in intern.h */
1570const char *
1572{
1573 const rb_execution_context_t *ec = GET_EC();
1575
1576 if (cfp) {
1577 return RSTRING_PTR(rb_iseq_path(cfp->iseq));
1578 }
1579 else {
1580 return 0;
1581 }
1582}
1583
1584/* in intern.h */
1585int
1587{
1588 const rb_execution_context_t *ec = GET_EC();
1590
1591 if (cfp) {
1592 return rb_vm_get_sourceline(cfp);
1593 }
1594 else {
1595 return 0;
1596 }
1597}
1598
1599VALUE
1601{
1602 const rb_execution_context_t *ec = GET_EC();
1604
1605 if (cfp && VM_FRAME_RUBYFRAME_P(cfp)) {
1606 if (pline) *pline = rb_vm_get_sourceline(cfp);
1607 return rb_iseq_path(cfp->iseq);
1608 }
1609 else {
1610 if (pline) *pline = 0;
1611 return Qnil;
1612 }
1613}
1614
1615MJIT_FUNC_EXPORTED const char *
1617{
1618 VALUE path = rb_source_location(pline);
1619 if (NIL_P(path)) return NULL;
1620 return RSTRING_PTR(path);
1621}
1622
1623rb_cref_t *
1625{
1626 const rb_execution_context_t *ec = GET_EC();
1627 return vm_ec_cref(ec);
1628}
1629
1630rb_cref_t *
1632{
1633 const rb_execution_context_t *ec = GET_EC();
1635 rb_cref_t *cref = vm_cref_replace_with_duplicated_cref(cfp->ep);
1636 return cref;
1637}
1638
1639const rb_cref_t *
1641{
1642 const rb_execution_context_t *ec = GET_EC();
1644 const rb_cref_t *cref;
1645 if (!cfp || cfp->self != self) return NULL;
1646 if (!vm_env_cref_by_cref(cfp->ep)) return NULL;
1647 cref = vm_get_cref(cfp->ep);
1648 if (CREF_CLASS(cref) != cbase) return NULL;
1649 return cref;
1650}
1651
1652#if 0
1653void
1654debug_cref(rb_cref_t *cref)
1655{
1656 while (cref) {
1657 dp(CREF_CLASS(cref));
1658 printf("%ld\n", CREF_VISI(cref));
1659 cref = CREF_NEXT(cref);
1660 }
1661}
1662#endif
1663
1664VALUE
1666{
1667 const rb_execution_context_t *ec = GET_EC();
1669
1670 if (cfp == 0) {
1671 rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
1672 }
1673 return vm_get_cbase(cfp->ep);
1674}
1675
1676/* jump */
1677
1678static VALUE
1679make_localjump_error(const char *mesg, VALUE value, int reason)
1680{
1683 ID id;
1684
1685 switch (reason) {
1686 case TAG_BREAK:
1687 CONST_ID(id, "break");
1688 break;
1689 case TAG_REDO:
1690 CONST_ID(id, "redo");
1691 break;
1692 case TAG_RETRY:
1693 CONST_ID(id, "retry");
1694 break;
1695 case TAG_NEXT:
1696 CONST_ID(id, "next");
1697 break;
1698 case TAG_RETURN:
1699 CONST_ID(id, "return");
1700 break;
1701 default:
1702 CONST_ID(id, "noreason");
1703 break;
1704 }
1705 rb_iv_set(exc, "@exit_value", value);
1706 rb_iv_set(exc, "@reason", ID2SYM(id));
1707 return exc;
1708}
1709
1711rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
1712{
1713 VALUE exc = make_localjump_error(mesg, value, reason);
1714 rb_exc_raise(exc);
1715}
1716
1717VALUE
1719{
1720 const char *mesg;
1721
1722 switch (state) {
1723 case TAG_RETURN:
1724 mesg = "unexpected return";
1725 break;
1726 case TAG_BREAK:
1727 mesg = "unexpected break";
1728 break;
1729 case TAG_NEXT:
1730 mesg = "unexpected next";
1731 break;
1732 case TAG_REDO:
1733 mesg = "unexpected redo";
1734 val = Qnil;
1735 break;
1736 case TAG_RETRY:
1737 mesg = "retry outside of rescue clause";
1738 val = Qnil;
1739 break;
1740 default:
1741 return Qnil;
1742 }
1743 if (val == Qundef) {
1744 val = GET_EC()->tag->retval;
1745 }
1746 return make_localjump_error(mesg, val, state);
1747}
1748
1749void
1751{
1753 if (!NIL_P(exc)) rb_exc_raise(exc);
1754 EC_JUMP_TAG(GET_EC(), state);
1755}
1756
1757static rb_control_frame_t *
1758next_not_local_frame(rb_control_frame_t *cfp)
1759{
1760 while (VM_ENV_LOCAL_P(cfp->ep)) {
1762 }
1763 return cfp;
1764}
1765
1766NORETURN(static void vm_iter_break(rb_execution_context_t *ec, VALUE val));
1767
1768static void
1769vm_iter_break(rb_execution_context_t *ec, VALUE val)
1770{
1771 rb_control_frame_t *cfp = next_not_local_frame(ec->cfp);
1772 const VALUE *ep = VM_CF_PREV_EP(cfp);
1773 const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(ec, cfp, ep);
1774
1775#if 0 /* raise LocalJumpError */
1776 if (!target_cfp) {
1777 rb_vm_localjump_error("unexpected break", val, TAG_BREAK);
1778 }
1779#endif
1780
1781 ec->errinfo = (VALUE)THROW_DATA_NEW(val, target_cfp, TAG_BREAK);
1783}
1784
1785void
1787{
1788 vm_iter_break(GET_EC(), Qnil);
1789}
1790
1791void
1793{
1794 vm_iter_break(GET_EC(), val);
1795}
1796
1797/* optimization: redefine management */
1798
1799static st_table *vm_opt_method_def_table = 0;
1800static st_table *vm_opt_mid_table = 0;
1801
1802static int
1803vm_redefinition_check_flag(VALUE klass)
1804{
1805 if (klass == rb_cInteger) return INTEGER_REDEFINED_OP_FLAG;
1806 if (klass == rb_cFloat) return FLOAT_REDEFINED_OP_FLAG;
1807 if (klass == rb_cString) return STRING_REDEFINED_OP_FLAG;
1808 if (klass == rb_cArray) return ARRAY_REDEFINED_OP_FLAG;
1809 if (klass == rb_cHash) return HASH_REDEFINED_OP_FLAG;
1810 if (klass == rb_cSymbol) return SYMBOL_REDEFINED_OP_FLAG;
1811#if 0
1812 if (klass == rb_cTime) return TIME_REDEFINED_OP_FLAG;
1813#endif
1814 if (klass == rb_cRegexp) return REGEXP_REDEFINED_OP_FLAG;
1815 if (klass == rb_cNilClass) return NIL_REDEFINED_OP_FLAG;
1816 if (klass == rb_cTrueClass) return TRUE_REDEFINED_OP_FLAG;
1817 if (klass == rb_cFalseClass) return FALSE_REDEFINED_OP_FLAG;
1818 if (klass == rb_cProc) return PROC_REDEFINED_OP_FLAG;
1819 return 0;
1820}
1821
1822int
1824{
1825 if (!vm_opt_mid_table) {
1826 return FALSE;
1827 }
1828
1829 return st_lookup(vm_opt_mid_table, mid, NULL);
1830}
1831
1832static int
1833vm_redefinition_check_method_type(const rb_method_definition_t *def)
1834{
1835 switch (def->type) {
1838 return TRUE;
1839 default:
1840 return FALSE;
1841 }
1842}
1843
1844static void
1845rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
1846{
1847 st_data_t bop;
1848 if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN) &&
1849 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
1850 klass = RBASIC_CLASS(klass);
1851 }
1852 if (vm_redefinition_check_method_type(me->def)) {
1853 if (st_lookup(vm_opt_method_def_table, (st_data_t)me->def, &bop)) {
1854 int flag = vm_redefinition_check_flag(klass);
1855 ruby_vm_redefined_flag[bop] |= flag;
1856 }
1857 }
1858}
1859
1861check_redefined_method(ID mid, VALUE value, void *data)
1862{
1863 VALUE klass = (VALUE)data;
1864 const rb_method_entry_t *me = (rb_method_entry_t *)value;
1865 const rb_method_entry_t *newme = rb_method_entry(klass, mid);
1866
1867 if (newme != me) rb_vm_check_redefinition_opt_method(me, me->owner);
1868
1869 return ID_TABLE_CONTINUE;
1870}
1871
1872void
1874{
1875 if (!vm_redefinition_check_flag(klass)) return;
1876 rb_id_table_foreach(RCLASS_M_TBL(RCLASS_ORIGIN(klass)), check_redefined_method, (void *)klass);
1877}
1878
1879static void
1880add_opt_method(VALUE klass, ID mid, VALUE bop)
1881{
1882 const rb_method_entry_t *me = rb_method_entry_at(klass, mid);
1883
1884 if (me && vm_redefinition_check_method_type(me->def)) {
1885 st_insert(vm_opt_method_def_table, (st_data_t)me->def, (st_data_t)bop);
1886 st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue);
1887 }
1888 else {
1889 rb_bug("undefined optimized method: %s", rb_id2name(mid));
1890 }
1891}
1892
1893static void
1894vm_init_redefined_flag(void)
1895{
1896 ID mid;
1897 VALUE bop;
1898
1899 vm_opt_method_def_table = st_init_numtable();
1900 vm_opt_mid_table = st_init_numtable();
1901
1902#define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
1903#define C(k) add_opt_method(rb_c##k, mid, bop)
1904 OP(PLUS, PLUS), (C(Integer), C(Float), C(String), C(Array));
1905 OP(MINUS, MINUS), (C(Integer), C(Float));
1906 OP(MULT, MULT), (C(Integer), C(Float));
1907 OP(DIV, DIV), (C(Integer), C(Float));
1908 OP(MOD, MOD), (C(Integer), C(Float));
1909 OP(Eq, EQ), (C(Integer), C(Float), C(String), C(Symbol));
1910 OP(Eqq, EQQ), (C(Integer), C(Float), C(Symbol), C(String),
1911 C(NilClass), C(TrueClass), C(FalseClass));
1912 OP(LT, LT), (C(Integer), C(Float));
1913 OP(LE, LE), (C(Integer), C(Float));
1914 OP(GT, GT), (C(Integer), C(Float));
1915 OP(GE, GE), (C(Integer), C(Float));
1916 OP(LTLT, LTLT), (C(String), C(Array));
1917 OP(AREF, AREF), (C(Array), C(Hash), C(Integer));
1918 OP(ASET, ASET), (C(Array), C(Hash));
1919 OP(Length, LENGTH), (C(Array), C(String), C(Hash));
1920 OP(Size, SIZE), (C(Array), C(String), C(Hash));
1921 OP(EmptyP, EMPTY_P), (C(Array), C(String), C(Hash));
1922 OP(Succ, SUCC), (C(Integer), C(String));
1923 OP(EqTilde, MATCH), (C(Regexp), C(String));
1924 OP(Freeze, FREEZE), (C(String));
1925 OP(UMinus, UMINUS), (C(String));
1926 OP(Max, MAX), (C(Array));
1927 OP(Min, MIN), (C(Array));
1928 OP(Call, CALL), (C(Proc));
1929 OP(And, AND), (C(Integer));
1930 OP(Or, OR), (C(Integer));
1931 OP(NilP, NIL_P), (C(NilClass));
1932#undef C
1933#undef OP
1934}
1935
1936/* for vm development */
1937
1938#if VMDEBUG
1939static const char *
1940vm_frametype_name(const rb_control_frame_t *cfp)
1941{
1942 switch (VM_FRAME_TYPE(cfp)) {
1943 case VM_FRAME_MAGIC_METHOD: return "method";
1944 case VM_FRAME_MAGIC_BLOCK: return "block";
1945 case VM_FRAME_MAGIC_CLASS: return "class";
1946 case VM_FRAME_MAGIC_TOP: return "top";
1947 case VM_FRAME_MAGIC_CFUNC: return "cfunc";
1948 case VM_FRAME_MAGIC_IFUNC: return "ifunc";
1949 case VM_FRAME_MAGIC_EVAL: return "eval";
1950 case VM_FRAME_MAGIC_RESCUE: return "rescue";
1951 default:
1952 rb_bug("unknown frame");
1953 }
1954}
1955#endif
1956
1957static VALUE
1958frame_return_value(const struct vm_throw_data *err)
1959{
1960 if (THROW_DATA_P(err) &&
1961 THROW_DATA_STATE(err) == TAG_BREAK &&
1962 THROW_DATA_CONSUMED_P(err) == FALSE) {
1963 return THROW_DATA_VAL(err);
1964 }
1965 else {
1966 return Qnil;
1967 }
1968}
1969
1970#if 0
1971/* for debug */
1972static const char *
1973frame_name(const rb_control_frame_t *cfp)
1974{
1975 unsigned long type = VM_FRAME_TYPE(cfp);
1976#define C(t) if (type == VM_FRAME_MAGIC_##t) return #t
1977 C(METHOD);
1978 C(BLOCK);
1979 C(CLASS);
1980 C(TOP);
1981 C(CFUNC);
1982 C(PROC);
1983 C(IFUNC);
1984 C(EVAL);
1985 C(LAMBDA);
1986 C(RESCUE);
1987 C(DUMMY);
1988#undef C
1989 return "unknown";
1990}
1991#endif
1992
1993static void
1994hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
1995 int will_finish_vm_exec, int state, struct vm_throw_data *err)
1996{
1997 if (state == TAG_RAISE && RBASIC(err)->klass == rb_eSysStackError) {
1998 return;
1999 }
2000 else {
2001 const rb_iseq_t *iseq = cfp->iseq;
2002 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
2003
2004 switch (VM_FRAME_TYPE(ec->cfp)) {
2007 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
2008
2009 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
2010 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN,
2011 ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
2012 }
2013
2014 THROW_DATA_CONSUMED_SET(err);
2015 break;
2017 if (VM_FRAME_BMETHOD_P(ec->cfp)) {
2018 EXEC_EVENT_HOOK(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
2019 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
2020 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
2021 ec->cfp->self, 0, 0, 0, frame_return_value(err), FALSE);
2022 }
2023
2024 if (!will_finish_vm_exec) {
2026
2027 /* kick RUBY_EVENT_RETURN at invoke_block_from_c() for bmethod */
2032 frame_return_value(err));
2033
2034 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
2035 local_hooks = me->def->body.bmethod.hooks;
2036
2037 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
2038 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
2042 frame_return_value(err), TRUE);
2043 }
2044 }
2045 THROW_DATA_CONSUMED_SET(err);
2046 }
2047 else {
2048 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
2049 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
2050 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
2051 ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
2052 }
2053 THROW_DATA_CONSUMED_SET(err);
2054 }
2055 break;
2058 break;
2059 }
2060 }
2061}
2062
2063/* evaluator body */
2064
2065/* finish
2066 VMe (h1) finish
2067 VM finish F1 F2
2068 cfunc finish F1 F2 C1
2069 rb_funcall finish F1 F2 C1
2070 VMe finish F1 F2 C1
2071 VM finish F1 F2 C1 F3
2072
2073 F1 - F3 : pushed by VM
2074 C1 : pushed by send insn (CFUNC)
2075
2076 struct CONTROL_FRAME {
2077 VALUE *pc; // cfp[0], program counter
2078 VALUE *sp; // cfp[1], stack pointer
2079 rb_iseq_t *iseq; // cfp[2], iseq
2080 VALUE self; // cfp[3], self
2081 const VALUE *ep; // cfp[4], env pointer
2082 const void *block_code; // cfp[5], block code
2083 };
2084
2085 struct rb_captured_block {
2086 VALUE self;
2087 VALUE *ep;
2088 union code;
2089 };
2090
2091 struct METHOD_ENV {
2092 VALUE param0;
2093 ...
2094 VALUE paramN;
2095 VALUE lvar1;
2096 ...
2097 VALUE lvarM;
2098 VALUE cref; // ep[-2]
2099 VALUE special; // ep[-1]
2100 VALUE flags; // ep[ 0] == lep[0]
2101 };
2102
2103 struct BLOCK_ENV {
2104 VALUE block_param0;
2105 ...
2106 VALUE block_paramN;
2107 VALUE block_lvar1;
2108 ...
2109 VALUE block_lvarM;
2110 VALUE cref; // ep[-2]
2111 VALUE special; // ep[-1]
2112 VALUE flags; // ep[ 0]
2113 };
2114
2115 struct CLASS_ENV {
2116 VALUE class_lvar0;
2117 ...
2118 VALUE class_lvarN;
2119 VALUE cref;
2120 VALUE prev_ep; // for frame jump
2121 VALUE flags;
2122 };
2123
2124 struct C_METHOD_CONTROL_FRAME {
2125 VALUE *pc; // 0
2126 VALUE *sp; // stack pointer
2127 rb_iseq_t *iseq; // cmi
2128 VALUE self; // ?
2129 VALUE *ep; // ep == lep
2130 void *code; //
2131 };
2132
2133 struct C_BLOCK_CONTROL_FRAME {
2134 VALUE *pc; // point only "finish" insn
2135 VALUE *sp; // sp
2136 rb_iseq_t *iseq; // ?
2137 VALUE self; //
2138 VALUE *ep; // ep
2139 void *code; //
2140 };
2141
2142 If mjit_exec is already called before calling vm_exec, `mjit_enable_p` should
2143 be FALSE to avoid calling `mjit_exec` twice.
2144 */
2145
2146static inline VALUE
2147vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
2148 VALUE errinfo, VALUE *initial);
2149
2150VALUE
2151vm_exec(rb_execution_context_t *ec, bool mjit_enable_p)
2152{
2153 enum ruby_tag_type state;
2154 VALUE result = Qundef;
2155 VALUE initial = 0;
2156
2157 EC_PUSH_TAG(ec);
2158
2159 _tag.retval = Qnil;
2160 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2161 if (!mjit_enable_p || (result = mjit_exec(ec)) == Qundef) {
2162 result = vm_exec_core(ec, initial);
2163 }
2164 goto vm_loop_start; /* fallback to the VM */
2165 }
2166 else {
2167 result = ec->errinfo;
2169 while ((result = vm_exec_handle_exception(ec, state, result, &initial)) == Qundef) {
2170 /* caught a jump, exec the handler */
2171 result = vm_exec_core(ec, initial);
2172 vm_loop_start:
2173 VM_ASSERT(ec->tag == &_tag);
2174 /* when caught `throw`, `tag.state` is set. */
2175 if ((state = _tag.state) == TAG_NONE) break;
2176 _tag.state = TAG_NONE;
2177 }
2178 }
2179 EC_POP_TAG();
2180 return result;
2181}
2182
2183static inline VALUE
2184vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
2185 VALUE errinfo, VALUE *initial)
2186{
2187 struct vm_throw_data *err = (struct vm_throw_data *)errinfo;
2188
2189 for (;;) {
2190 unsigned int i;
2191 const struct iseq_catch_table_entry *entry;
2192 const struct iseq_catch_table *ct;
2193 unsigned long epc, cont_pc, cont_sp;
2194 const rb_iseq_t *catch_iseq;
2195 rb_control_frame_t *cfp;
2196 VALUE type;
2197 const rb_control_frame_t *escape_cfp;
2198
2199 cont_pc = cont_sp = 0;
2200 catch_iseq = NULL;
2201
2202 while (ec->cfp->pc == 0 || ec->cfp->iseq == 0) {
2203 if (UNLIKELY(VM_FRAME_TYPE(ec->cfp) == VM_FRAME_MAGIC_CFUNC)) {
2211 }
2212 rb_vm_pop_frame(ec);
2213 }
2214
2215 cfp = ec->cfp;
2216 epc = cfp->pc - cfp->iseq->body->iseq_encoded;
2217
2218 escape_cfp = NULL;
2219 if (state == TAG_BREAK || state == TAG_RETURN) {
2220 escape_cfp = THROW_DATA_CATCH_FRAME(err);
2221
2222 if (cfp == escape_cfp) {
2223 if (state == TAG_RETURN) {
2224 if (!VM_FRAME_FINISHED_P(cfp)) {
2225 THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
2226 THROW_DATA_STATE_SET(err, state = TAG_BREAK);
2227 }
2228 else {
2229 ct = cfp->iseq->body->catch_table;
2230 if (ct) for (i = 0; i < ct->size; i++) {
2231 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2232 if (entry->start < epc && entry->end >= epc) {
2233 if (entry->type == CATCH_TYPE_ENSURE) {
2234 catch_iseq = entry->iseq;
2235 cont_pc = entry->cont;
2236 cont_sp = entry->sp;
2237 break;
2238 }
2239 }
2240 }
2241 if (catch_iseq == NULL) {
2242 ec->errinfo = Qnil;
2243 THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
2244 hook_before_rewind(ec, ec->cfp, TRUE, state, err);
2245 rb_vm_pop_frame(ec);
2246 return THROW_DATA_VAL(err);
2247 }
2248 }
2249 /* through */
2250 }
2251 else {
2252 /* TAG_BREAK */
2253#if OPT_STACK_CACHING
2254 *initial = THROW_DATA_VAL(err);
2255#else
2256 *ec->cfp->sp++ = THROW_DATA_VAL(err);
2257#endif
2258 ec->errinfo = Qnil;
2259 return Qundef;
2260 }
2261 }
2262 }
2263
2264 if (state == TAG_RAISE) {
2265 ct = cfp->iseq->body->catch_table;
2266 if (ct) for (i = 0; i < ct->size; i++) {
2267 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2268 if (entry->start < epc && entry->end >= epc) {
2269
2270 if (entry->type == CATCH_TYPE_RESCUE ||
2271 entry->type == CATCH_TYPE_ENSURE) {
2272 catch_iseq = entry->iseq;
2273 cont_pc = entry->cont;
2274 cont_sp = entry->sp;
2275 break;
2276 }
2277 }
2278 }
2279 }
2280 else if (state == TAG_RETRY) {
2281 ct = cfp->iseq->body->catch_table;
2282 if (ct) for (i = 0; i < ct->size; i++) {
2283 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2284 if (entry->start < epc && entry->end >= epc) {
2285
2286 if (entry->type == CATCH_TYPE_ENSURE) {
2287 catch_iseq = entry->iseq;
2288 cont_pc = entry->cont;
2289 cont_sp = entry->sp;
2290 break;
2291 }
2292 else if (entry->type == CATCH_TYPE_RETRY) {
2293 const rb_control_frame_t *escape_cfp;
2294 escape_cfp = THROW_DATA_CATCH_FRAME(err);
2295 if (cfp == escape_cfp) {
2296 cfp->pc = cfp->iseq->body->iseq_encoded + entry->cont;
2297 ec->errinfo = Qnil;
2298 return Qundef;
2299 }
2300 }
2301 }
2302 }
2303 }
2304 else if ((state == TAG_BREAK && !escape_cfp) ||
2305 (state == TAG_REDO) ||
2306 (state == TAG_NEXT)) {
2307 type = (const enum catch_type[TAG_MASK]) {
2308 [TAG_BREAK] = CATCH_TYPE_BREAK,
2309 [TAG_NEXT] = CATCH_TYPE_NEXT,
2310 [TAG_REDO] = CATCH_TYPE_REDO,
2311 /* otherwise = dontcare */
2312 }[state];
2313
2314 ct = cfp->iseq->body->catch_table;
2315 if (ct) for (i = 0; i < ct->size; i++) {
2316 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2317
2318 if (entry->start < epc && entry->end >= epc) {
2319 if (entry->type == CATCH_TYPE_ENSURE) {
2320 catch_iseq = entry->iseq;
2321 cont_pc = entry->cont;
2322 cont_sp = entry->sp;
2323 break;
2324 }
2325 else if (entry->type == type) {
2326 cfp->pc = cfp->iseq->body->iseq_encoded + entry->cont;
2327 cfp->sp = vm_base_ptr(cfp) + entry->sp;
2328
2329 if (state != TAG_REDO) {
2330#if OPT_STACK_CACHING
2331 *initial = THROW_DATA_VAL(err);
2332#else
2333 *ec->cfp->sp++ = THROW_DATA_VAL(err);
2334#endif
2335 }
2336 ec->errinfo = Qnil;
2337 VM_ASSERT(ec->tag->state == TAG_NONE);
2338 return Qundef;
2339 }
2340 }
2341 }
2342 }
2343 else {
2344 ct = cfp->iseq->body->catch_table;
2345 if (ct) for (i = 0; i < ct->size; i++) {
2346 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2347 if (entry->start < epc && entry->end >= epc) {
2348
2349 if (entry->type == CATCH_TYPE_ENSURE) {
2350 catch_iseq = entry->iseq;
2351 cont_pc = entry->cont;
2352 cont_sp = entry->sp;
2353 break;
2354 }
2355 }
2356 }
2357 }
2358
2359 if (catch_iseq != NULL) { /* found catch table */
2360 /* enter catch scope */
2361 const int arg_size = 1;
2362
2363 rb_iseq_check(catch_iseq);
2364 cfp->sp = vm_base_ptr(cfp) + cont_sp;
2365 cfp->pc = cfp->iseq->body->iseq_encoded + cont_pc;
2366
2367 /* push block frame */
2368 cfp->sp[0] = (VALUE)err;
2369 vm_push_frame(ec, catch_iseq, VM_FRAME_MAGIC_RESCUE,
2370 cfp->self,
2371 VM_GUARDED_PREV_EP(cfp->ep),
2372 0, /* cref or me */
2373 catch_iseq->body->iseq_encoded,
2374 cfp->sp + arg_size /* push value */,
2375 catch_iseq->body->local_table_size - arg_size,
2376 catch_iseq->body->stack_max);
2377
2378 state = 0;
2379 ec->tag->state = TAG_NONE;
2380 ec->errinfo = Qnil;
2381
2382 return Qundef;
2383 }
2384 else {
2385 hook_before_rewind(ec, ec->cfp, FALSE, state, err);
2386
2387 if (VM_FRAME_FINISHED_P(ec->cfp)) {
2388 rb_vm_pop_frame(ec);
2389 ec->errinfo = (VALUE)err;
2390 ec->tag = ec->tag->prev;
2391 EC_JUMP_TAG(ec, state);
2392 }
2393 else {
2394 rb_vm_pop_frame(ec);
2395 }
2396 }
2397 }
2398}
2399
2400/* misc */
2401
2402VALUE
2404{
2405 rb_execution_context_t *ec = GET_EC();
2406 VALUE val;
2407 vm_set_top_stack(ec, iseq);
2408 val = vm_exec(ec, true);
2409 return val;
2410}
2411
2412VALUE
2414{
2415 rb_execution_context_t *ec = GET_EC();
2416 VALUE val;
2417
2418 vm_set_main_stack(ec, iseq);
2419 val = vm_exec(ec, true);
2420 return val;
2421}
2422
2423int
2424rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
2425{
2427
2428 if (me) {
2429 if (idp) *idp = me->def->original_id;
2430 if (called_idp) *called_idp = me->called_id;
2431 if (klassp) *klassp = me->owner;
2432 return TRUE;
2433 }
2434 else {
2435 return FALSE;
2436 }
2437}
2438
2439int
2441{
2442 return rb_vm_control_frame_id_and_class(ec->cfp, idp, called_idp, klassp);
2443}
2444
2445int
2447{
2448 return rb_ec_frame_method_id_and_class(GET_EC(), idp, 0, klassp);
2449}
2450
2451VALUE
2453 VALUE block_handler, VALUE filename)
2454{
2455 rb_execution_context_t *ec = GET_EC();
2456 const rb_control_frame_t *reg_cfp = ec->cfp;
2457 const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
2458 VALUE val;
2459
2460 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
2461 recv, block_handler,
2462 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
2463 0, reg_cfp->sp, 0, 0);
2464
2465 val = (*func)(arg);
2466
2467 rb_vm_pop_frame(ec);
2468 return val;
2469}
2470
2471/* vm */
2472
2473void
2475{
2476 if (ptr) {
2477 rb_vm_t *vm = ptr;
2478
2483
2484 if (vm->load_path_check_cache) {
2486 }
2487
2491 vm->top_self = rb_gc_location(vm->top_self);
2493
2494 if (vm->coverages) {
2496 }
2497 }
2498}
2499
2500void
2501rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx)
2502{
2503 if (ptr) {
2504 rb_vm_t *vm = ptr;
2505 rb_ractor_t *r = 0;
2506 list_for_each(&vm->ractor.set, r, vmlr_node) {
2507 VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
2508 rb_ractor_status_p(r, ractor_running));
2509 if (r->threads.cnt > 0) {
2510 rb_thread_t *th = 0;
2511 list_for_each(&r->threads.set, th, lt_node) {
2512 VM_ASSERT(th != NULL);
2513 rb_execution_context_t * ec = th->ec;
2514 if (ec->vm_stack) {
2515 VALUE *p = ec->vm_stack;
2516 VALUE *sp = ec->cfp->sp;
2517 while (p <= sp) {
2518 if (!rb_special_const_p(*p)) {
2519 cb(*p, ctx);
2520 }
2521 p++;
2522 }
2523 }
2524 }
2525 }
2526 }
2527 }
2528}
2529
2531vm_mark_negative_cme(VALUE val, void *dmy)
2532{
2533 rb_gc_mark(val);
2534 return ID_TABLE_CONTINUE;
2535}
2536
2537void
2539{
2540 RUBY_MARK_ENTER("vm");
2541 RUBY_GC_INFO("-------------------------------------------------\n");
2542 if (ptr) {
2543 rb_vm_t *vm = ptr;
2544 rb_ractor_t *r = 0;
2545 long i, len;
2546 const VALUE *obj_ary;
2547
2548 list_for_each(&vm->ractor.set, r, vmlr_node) {
2549 // ractor.set only contains blocking or running ractors
2550 VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
2551 rb_ractor_status_p(r, ractor_running));
2552 rb_gc_mark(rb_ractor_self(r));
2553 }
2554
2556
2558 obj_ary = RARRAY_CONST_PTR(vm->mark_object_ary);
2559 for (i=0; i < len; i++) {
2560 const VALUE *ptr;
2561 long j, jlen;
2562
2563 rb_gc_mark(*obj_ary);
2564 jlen = RARRAY_LEN(*obj_ary);
2565 ptr = RARRAY_CONST_PTR(*obj_ary);
2566 for (j=0; j < jlen; j++) {
2567 rb_gc_mark(*ptr++);
2568 }
2569 obj_ary++;
2570 }
2571
2581 /* Prevent classes from moving */
2583
2584 if (vm->loading_table) {
2586 }
2587
2589
2590 rb_id_table_foreach_values(vm->negative_cme_table, vm_mark_negative_cme, NULL);
2591 for (i=0; i<VM_GLOBAL_CC_CACHE_TABLE_SIZE; i++) {
2592 const struct rb_callcache *cc = vm->global_cc_cache_table[i];
2593
2594 if (cc != NULL) {
2595 if (!vm_cc_invalidated_p(cc)) {
2596 rb_gc_mark((VALUE)cc);
2597 }
2598 else {
2599 vm->global_cc_cache_table[i] = NULL;
2600 }
2601 }
2602 }
2603
2604 mjit_mark();
2605 }
2606
2607 RUBY_MARK_LEAVE("vm");
2608}
2609
2610#undef rb_vm_register_special_exception
2611void
2613{
2614 rb_vm_t *vm = GET_VM();
2615 VALUE exc = rb_exc_new3(cls, rb_obj_freeze(mesg));
2616 OBJ_FREEZE(exc);
2617 ((VALUE *)vm->special_exceptions)[sp] = exc;
2619}
2620
2621int
2623{
2624 rb_vm_t *vm = GET_VM();
2625
2626 st_insert(vm->defined_module_hash, (st_data_t)module, (st_data_t)module);
2627
2628 return TRUE;
2629}
2630
2631static int
2632free_loading_table_entry(st_data_t key, st_data_t value, st_data_t arg)
2633{
2634 xfree((char *)key);
2635 return ST_DELETE;
2636}
2637
2638int
2640{
2641 RUBY_FREE_ENTER("vm");
2642
2643 if (vm) {
2644 rb_thread_t *th = vm->ractor.main_thread;
2645 struct rb_objspace *objspace = vm->objspace;
2646 vm->ractor.main_thread = NULL;
2647
2648 if (th) {
2650 thread_free(th);
2651 }
2652 rb_vm_living_threads_init(vm);
2653 ruby_vm_run_at_exit_hooks(vm);
2654 if (vm->loading_table) {
2655 st_foreach(vm->loading_table, free_loading_table_entry, 0);
2657 vm->loading_table = 0;
2658 }
2659 if (vm->frozen_strings) {
2661 vm->frozen_strings = 0;
2662 }
2663 RB_ALTSTACK_FREE(vm->main_altstack);
2664 if (objspace) {
2665 rb_objspace_free(objspace);
2666 }
2669 /* after freeing objspace, you *can't* use ruby_xfree() */
2670 ruby_mimfree(vm);
2672 }
2673 RUBY_FREE_LEAVE("vm");
2674 return 0;
2675}
2676
2677static size_t
2678vm_memsize(const void *ptr)
2679{
2680 const rb_vm_t *vmobj = ptr;
2681 size_t size = sizeof(rb_vm_t);
2682
2683 // TODO
2684 // size += vmobj->ractor_num * sizeof(rb_ractor_t);
2685
2686 if (vmobj->defined_strings) {
2687 size += DEFINED_EXPR * sizeof(VALUE);
2688 }
2689 return size;
2690}
2691
2692static const rb_data_type_t vm_data_type = {
2693 "VM",
2694 {0, 0, vm_memsize,},
2696};
2697
2698
2699static VALUE
2700vm_default_params(void)
2701{
2702 rb_vm_t *vm = GET_VM();
2703 VALUE result = rb_hash_new_with_size(4);
2704#define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
2705 SET(thread_vm_stack_size);
2706 SET(thread_machine_stack_size);
2707 SET(fiber_vm_stack_size);
2708 SET(fiber_machine_stack_size);
2709#undef SET
2710 rb_obj_freeze(result);
2711 return result;
2712}
2713
2714static size_t
2715get_param(const char *name, size_t default_value, size_t min_value)
2716{
2717 const char *envval;
2718 size_t result = default_value;
2719 if ((envval = getenv(name)) != 0) {
2720 long val = atol(envval);
2721 if (val < (long)min_value) {
2722 val = (long)min_value;
2723 }
2724 result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN);
2725 }
2726 if (0) fprintf(stderr, "%s: %"PRIuSIZE"\n", name, result); /* debug print */
2727
2728 return result;
2729}
2730
2731static void
2732check_machine_stack_size(size_t *sizep)
2733{
2734#ifdef PTHREAD_STACK_MIN
2735 size_t size = *sizep;
2736#endif
2737
2738#ifdef PTHREAD_STACK_MIN
2739 if (size < PTHREAD_STACK_MIN) {
2740 *sizep = PTHREAD_STACK_MIN * 2;
2741 }
2742#endif
2743}
2744
2745static void
2746vm_default_params_setup(rb_vm_t *vm)
2747{
2749 get_param("RUBY_THREAD_VM_STACK_SIZE",
2752
2754 get_param("RUBY_THREAD_MACHINE_STACK_SIZE",
2757
2759 get_param("RUBY_FIBER_VM_STACK_SIZE",
2762
2764 get_param("RUBY_FIBER_MACHINE_STACK_SIZE",
2767
2768 /* environment dependent check */
2769 check_machine_stack_size(&vm->default_params.thread_machine_stack_size);
2770 check_machine_stack_size(&vm->default_params.fiber_machine_stack_size);
2771}
2772
2773static void
2774vm_init2(rb_vm_t *vm)
2775{
2776 MEMZERO(vm, rb_vm_t, 1);
2777 rb_vm_living_threads_init(vm);
2779 vm->src_encoding_index = -1;
2780
2781 vm_default_params_setup(vm);
2782}
2783
2784void
2786{
2787 /* update VM stack */
2788 if (ec->vm_stack) {
2789 long i;
2790 VM_ASSERT(ec->cfp);
2791 VALUE *p = ec->vm_stack;
2792 VALUE *sp = ec->cfp->sp;
2793 rb_control_frame_t *cfp = ec->cfp;
2794 rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
2795
2796 for (i = 0; i < (long)(sp - p); i++) {
2797 VALUE ref = p[i];
2798 VALUE update = rb_gc_location(ref);
2799 if (ref != update) {
2800 p[i] = update;
2801 }
2802 }
2803
2804 while (cfp != limit_cfp) {
2805 const VALUE *ep = cfp->ep;
2806 cfp->self = rb_gc_location(cfp->self);
2807 cfp->iseq = (rb_iseq_t *)rb_gc_location((VALUE)cfp->iseq);
2808 cfp->block_code = (void *)rb_gc_location((VALUE)cfp->block_code);
2809
2810 if (!VM_ENV_LOCAL_P(ep)) {
2811 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
2812 if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
2813 VM_FORCE_WRITE(&prev_ep[VM_ENV_DATA_INDEX_ENV], rb_gc_location(prev_ep[VM_ENV_DATA_INDEX_ENV]));
2814 }
2815
2816 if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
2819 }
2820 }
2821
2823 }
2824 }
2825}
2826
2828mark_local_storage_i(VALUE local, void *data)
2829{
2831 return ID_TABLE_CONTINUE;
2832}
2833
2834void
2836{
2837 /* mark VM stack */
2838 if (ec->vm_stack) {
2839 VM_ASSERT(ec->cfp);
2840 VALUE *p = ec->vm_stack;
2841 VALUE *sp = ec->cfp->sp;
2842 rb_control_frame_t *cfp = ec->cfp;
2843 rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
2844
2845 VM_ASSERT(sp == ec->cfp->sp);
2846 rb_gc_mark_vm_stack_values((long)(sp - p), p);
2847
2848 while (cfp != limit_cfp) {
2849 const VALUE *ep = cfp->ep;
2850 VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep));
2854
2855 if (!VM_ENV_LOCAL_P(ep)) {
2856 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
2857 if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
2859 }
2860
2861 if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
2864 }
2865 }
2866
2868 }
2869 }
2870
2871 /* mark machine stack */
2872 if (ec->machine.stack_start && ec->machine.stack_end &&
2873 ec != GET_EC() /* marked for current ec at the first stage of marking */
2874 ) {
2876 rb_gc_mark_locations((VALUE *)&ec->machine.regs,
2877 (VALUE *)(&ec->machine.regs) +
2878 sizeof(ec->machine.regs) / (sizeof(VALUE)));
2879 }
2880
2883 if (ec->local_storage) {
2884 rb_id_table_foreach_values(ec->local_storage, mark_local_storage_i, NULL);
2885 }
2889}
2890
2895
2896static void
2897thread_compact(void *ptr)
2898{
2899 rb_thread_t *th = ptr;
2900
2901 th->self = rb_gc_location(th->self);
2902
2903 if (!th->root_fiber) {
2905 }
2906}
2907
2908static void
2909thread_mark(void *ptr)
2910{
2911 rb_thread_t *th = ptr;
2912 RUBY_MARK_ENTER("thread");
2914
2915 /* mark ruby objects */
2916 switch (th->invoke_type) {
2917 case thread_invoke_type_proc:
2918 case thread_invoke_type_ractor_proc:
2921 break;
2922 case thread_invoke_type_func:
2924 break;
2925 default:
2926 break;
2927 }
2928
2929 rb_gc_mark(rb_ractor_self(th->ractor));
2937
2938 /* Ensure EC stack objects are pinned */
2944
2946
2947 RUBY_MARK_LEAVE("thread");
2948}
2949
2950static void
2951thread_free(void *ptr)
2952{
2953 rb_thread_t *th = ptr;
2954 RUBY_FREE_ENTER("thread");
2955
2956 if (th->locking_mutex != Qfalse) {
2957 rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
2958 }
2959 if (th->keeping_mutexes != NULL) {
2960 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
2961 }
2962
2964
2965 if (th->vm && th->vm->ractor.main_thread == th) {
2966 RUBY_GC_INFO("MRI main thread\n");
2967 }
2968 else {
2969 ruby_xfree(ptr);
2970 }
2971
2972 RUBY_FREE_LEAVE("thread");
2973}
2974
2975static size_t
2976thread_memsize(const void *ptr)
2977{
2978 const rb_thread_t *th = ptr;
2979 size_t size = sizeof(rb_thread_t);
2980
2981 if (!th->root_fiber) {
2982 size += th->ec->vm_stack_size * sizeof(VALUE);
2983 }
2984 if (th->ec->local_storage) {
2986 }
2987 return size;
2988}
2989
2990#define thread_data_type ruby_threadptr_data_type
2992 "VM/thread",
2993 {
2994 thread_mark,
2995 thread_free,
2996 thread_memsize,
2997 thread_compact,
2998 },
3000};
3001
3002VALUE
3004{
3006 return Qtrue;
3007 }
3008 else {
3009 return Qfalse;
3010 }
3011}
3012
3013static VALUE
3014thread_alloc(VALUE klass)
3015{
3016 VALUE obj;
3017 rb_thread_t *th;
3019
3020 return obj;
3021}
3022
3023inline void
3025{
3026 ec->vm_stack = stack;
3027 ec->vm_stack_size = size;
3028}
3029
3030void
3032{
3033 rb_ec_set_vm_stack(ec, stack, size);
3034
3035 ec->cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
3036
3037 vm_push_frame(ec,
3038 NULL /* dummy iseq */,
3040 Qnil /* dummy self */, VM_BLOCK_HANDLER_NONE /* dummy block ptr */,
3041 0 /* dummy cref/me */,
3042 0 /* dummy pc */, ec->vm_stack, 0, 0
3043 );
3044}
3045
3046void
3048{
3049 rb_ec_set_vm_stack(ec, NULL, 0);
3050
3051 // Avoid dangling pointers:
3052 ec->cfp = NULL;
3053}
3054
3055static void
3056th_init(rb_thread_t *th, VALUE self)
3057{
3058 th->self = self;
3060
3061 /* All threads are blocking until a non-blocking fiber is scheduled */
3062 th->blocking = 1;
3063 th->scheduler = Qnil;
3064
3065 if (self == 0) {
3066 size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
3068 }
3069 else {
3070 VM_ASSERT(th->ec->cfp == NULL);
3071 VM_ASSERT(th->ec->vm_stack == NULL);
3072 VM_ASSERT(th->ec->vm_stack_size == 0);
3073 }
3074
3075 th->status = THREAD_RUNNABLE;
3076 th->last_status = Qnil;
3077 th->ec->errinfo = Qnil;
3078 th->ec->root_svar = Qfalse;
3081#ifdef NON_SCALAR_THREAD_ID
3082 th->thread_id_string[0] = '\0';
3083#endif
3084
3085 th->value = Qundef;
3086
3087#if OPT_CALL_THREADED_CODE
3088 th->retval = Qundef;
3089#endif
3090 th->name = Qnil;
3092 th->ext_config.ractor_safe = true;
3093}
3094
3095static VALUE
3096ruby_thread_init(VALUE self)
3097{
3098 rb_thread_t *th = GET_THREAD();
3099 rb_thread_t *target_th = rb_thread_ptr(self);
3100 rb_vm_t *vm = th->vm;
3101
3102 target_th->vm = vm;
3103 th_init(target_th, self);
3104
3105 target_th->top_wrapper = 0;
3106 target_th->top_self = rb_vm_top_self();
3107 target_th->ec->root_svar = Qfalse;
3108 target_th->ractor = th->ractor;
3109
3110 return self;
3111}
3112
3113VALUE
3115{
3116 VALUE self = thread_alloc(klass);
3117 ruby_thread_init(self);
3118 return self;
3119}
3120
3121#define REWIND_CFP(expr) do { \
3122 rb_execution_context_t *ec__ = GET_EC(); \
3123 VALUE *const curr_sp = (ec__->cfp++)->sp; \
3124 VALUE *const saved_sp = ec__->cfp->sp; \
3125 ec__->cfp->sp = curr_sp; \
3126 expr; \
3127 (ec__->cfp--)->sp = saved_sp; \
3128} while (0)
3129
3130static VALUE
3131m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
3132{
3133 REWIND_CFP({
3134 rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
3135 });
3136 return Qnil;
3137}
3138
3139static VALUE
3140m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
3141{
3142 REWIND_CFP({
3143 rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
3144 });
3145 return Qnil;
3146}
3147
3148static VALUE
3149m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
3150{
3151 REWIND_CFP({
3152 ID mid = SYM2ID(sym);
3153 rb_undef(cbase, mid);
3154 rb_clear_method_cache(self, mid);
3155 });
3156 return Qnil;
3157}
3158
3159static VALUE
3160m_core_set_postexe(VALUE self)
3161{
3163 return Qnil;
3164}
3165
3166static VALUE core_hash_merge_kwd(VALUE hash, VALUE kw);
3167
3168static VALUE
3169core_hash_merge(VALUE hash, long argc, const VALUE *argv)
3170{
3171 Check_Type(hash, T_HASH);
3172 VM_ASSERT(argc % 2 == 0);
3174 return hash;
3175}
3176
3177static VALUE
3178m_core_hash_merge_ptr(int argc, VALUE *argv, VALUE recv)
3179{
3180 VALUE hash = argv[0];
3181
3182 REWIND_CFP(hash = core_hash_merge(hash, argc-1, argv+1));
3183
3184 return hash;
3185}
3186
3187static int
3188kwmerge_i(VALUE key, VALUE value, VALUE hash)
3189{
3190 rb_hash_aset(hash, key, value);
3191 return ST_CONTINUE;
3192}
3193
3194static VALUE
3195m_core_hash_merge_kwd(VALUE recv, VALUE hash, VALUE kw)
3196{
3197 REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
3198 return hash;
3199}
3200
3201static VALUE
3202m_core_make_shareable(VALUE recv, VALUE obj)
3203{
3204 return rb_ractor_make_shareable(obj);
3205}
3206
3207static VALUE
3208m_core_make_shareable_copy(VALUE recv, VALUE obj)
3209{
3211}
3212
3213static VALUE
3214m_core_ensure_shareable(VALUE recv, VALUE obj, VALUE name)
3215{
3216 return rb_ractor_ensure_shareable(obj, name);
3217}
3218
3219static VALUE
3220core_hash_merge_kwd(VALUE hash, VALUE kw)
3221{
3222 rb_hash_foreach(rb_to_hash_type(kw), kwmerge_i, hash);
3223 return hash;
3224}
3225
3226/* Returns true if JIT is enabled */
3227static VALUE
3228mjit_enabled_p(VALUE _)
3229{
3230 return mjit_enabled ? Qtrue : Qfalse;
3231}
3232
3233static VALUE
3234mjit_pause_m(int argc, VALUE *argv, RB_UNUSED_VAR(VALUE self))
3235{
3236 VALUE options = Qnil;
3237 VALUE wait = Qtrue;
3238 rb_scan_args(argc, argv, "0:", &options);
3239
3240 if (!NIL_P(options)) {
3241 static ID keyword_ids[1];
3242 if (!keyword_ids[0])
3243 keyword_ids[0] = rb_intern("wait");
3244 rb_get_kwargs(options, keyword_ids, 0, 1, &wait);
3245 }
3246
3247 return mjit_pause(RTEST(wait));
3248}
3249
3250static VALUE
3251mjit_resume_m(VALUE _)
3252{
3253 return mjit_resume();
3254}
3255
3256extern VALUE *rb_gc_stack_start;
3257extern size_t rb_gc_stack_maxsize;
3258
3259/* debug functions */
3260
3261/* :nodoc: */
3262static VALUE
3263sdr(VALUE self)
3264{
3266 return Qnil;
3267}
3268
3269/* :nodoc: */
3270static VALUE
3271nsdr(VALUE self)
3272{
3273 VALUE ary = rb_ary_new();
3274#if HAVE_BACKTRACE
3275#include <execinfo.h>
3276#define MAX_NATIVE_TRACE 1024
3277 static void *trace[MAX_NATIVE_TRACE];
3278 int n = (int)backtrace(trace, MAX_NATIVE_TRACE);
3279 char **syms = backtrace_symbols(trace, n);
3280 int i;
3281
3282 if (syms == 0) {
3283 rb_memerror();
3284 }
3285
3286 for (i=0; i<n; i++) {
3287 rb_ary_push(ary, rb_str_new2(syms[i]));
3288 }
3289 free(syms); /* OK */
3290#endif
3291 return ary;
3292}
3293
3294#if VM_COLLECT_USAGE_DETAILS
3295static VALUE usage_analysis_insn_start(VALUE self);
3296static VALUE usage_analysis_operand_start(VALUE self);
3297static VALUE usage_analysis_register_start(VALUE self);
3298static VALUE usage_analysis_insn_stop(VALUE self);
3299static VALUE usage_analysis_operand_stop(VALUE self);
3300static VALUE usage_analysis_register_stop(VALUE self);
3301static VALUE usage_analysis_insn_running(VALUE self);
3302static VALUE usage_analysis_operand_running(VALUE self);
3303static VALUE usage_analysis_register_running(VALUE self);
3304static VALUE usage_analysis_insn_clear(VALUE self);
3305static VALUE usage_analysis_operand_clear(VALUE self);
3306static VALUE usage_analysis_register_clear(VALUE self);
3307#endif
3308
3309static VALUE
3310f_raise(int c, VALUE *v, VALUE _)
3311{
3312 return rb_f_raise(c, v);
3313}
3314
3315static VALUE
3316f_proc(VALUE _)
3317{
3318 return rb_block_proc();
3319}
3320
3321static VALUE
3322f_lambda(VALUE _)
3323{
3324 return rb_block_lambda();
3325}
3326
3327static VALUE
3328vm_mtbl(VALUE self, VALUE obj, VALUE sym)
3329{
3330 vm_mtbl_dump(CLASS_OF(obj), RTEST(sym) ? SYM2ID(sym) : 0);
3331 return Qnil;
3332}
3333
3334static VALUE
3335vm_mtbl2(VALUE self, VALUE obj, VALUE sym)
3336{
3337 vm_mtbl_dump(obj, RTEST(sym) ? SYM2ID(sym) : 0);
3338 return Qnil;
3339}
3340
3341void
3343{
3344 VALUE opts;
3345 VALUE klass;
3346 VALUE fcore;
3347 VALUE mjit;
3348
3349 /*
3350 * Document-class: RubyVM
3351 *
3352 * The RubyVM module only exists on MRI. +RubyVM+ is not defined in
3353 * other Ruby implementations such as JRuby and TruffleRuby.
3354 *
3355 * The RubyVM module provides some access to MRI internals.
3356 * This module is for very limited purposes, such as debugging,
3357 * prototyping, and research. Normal users must not use it.
3358 * This module is not portable between Ruby implementations.
3359 */
3363 rb_define_singleton_method(rb_cRubyVM, "stat", vm_stat, -1);
3364#if USE_DEBUG_COUNTER
3365 rb_define_singleton_method(rb_cRubyVM, "reset_debug_counters", rb_debug_counter_reset, 0);
3366 rb_define_singleton_method(rb_cRubyVM, "show_debug_counters", rb_debug_counter_show, 0);
3367#endif
3368
3369 /* FrozenCore (hidden) */
3371 rb_set_class_path(fcore, rb_cRubyVM, "FrozenCore");
3372 RBASIC(fcore)->flags = T_ICLASS;
3373 klass = rb_singleton_class(fcore);
3374 rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
3375 rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
3376 rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
3377 rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 0);
3378 rb_define_method_id(klass, id_core_hash_merge_ptr, m_core_hash_merge_ptr, -1);
3379 rb_define_method_id(klass, id_core_hash_merge_kwd, m_core_hash_merge_kwd, 2);
3380 rb_define_method_id(klass, id_core_raise, f_raise, -1);
3381 rb_define_method_id(klass, idProc, f_proc, 0);
3382 rb_define_method_id(klass, idLambda, f_lambda, 0);
3383 rb_define_method(klass, "make_shareable", m_core_make_shareable, 1);
3384 rb_define_method(klass, "make_shareable_copy", m_core_make_shareable_copy, 1);
3385 rb_define_method(klass, "ensure_shareable", m_core_ensure_shareable, 2);
3386 rb_obj_freeze(fcore);
3387 RBASIC_CLEAR_CLASS(klass);
3388 rb_obj_freeze(klass);
3390 rb_mRubyVMFrozenCore = fcore;
3391
3392 /* ::RubyVM::MJIT
3393 * Provides access to the Method JIT compiler of MRI.
3394 * Of course, this module is MRI specific.
3395 */
3396 mjit = rb_define_module_under(rb_cRubyVM, "MJIT");
3397 rb_define_singleton_method(mjit, "enabled?", mjit_enabled_p, 0);
3398 rb_define_singleton_method(mjit, "pause", mjit_pause_m, -1);
3399 rb_define_singleton_method(mjit, "resume", mjit_resume_m, 0);
3400
3401 /*
3402 * Document-class: Thread
3403 *
3404 * Threads are the Ruby implementation for a concurrent programming model.
3405 *
3406 * Programs that require multiple threads of execution are a perfect
3407 * candidate for Ruby's Thread class.
3408 *
3409 * For example, we can create a new thread separate from the main thread's
3410 * execution using ::new.
3411 *
3412 * thr = Thread.new { puts "What's the big deal" }
3413 *
3414 * Then we are able to pause the execution of the main thread and allow
3415 * our new thread to finish, using #join:
3416 *
3417 * thr.join #=> "What's the big deal"
3418 *
3419 * If we don't call +thr.join+ before the main thread terminates, then all
3420 * other threads including +thr+ will be killed.
3421 *
3422 * Alternatively, you can use an array for handling multiple threads at
3423 * once, like in the following example:
3424 *
3425 * threads = []
3426 * threads << Thread.new { puts "What's the big deal" }
3427 * threads << Thread.new { 3.times { puts "Threads are fun!" } }
3428 *
3429 * After creating a few threads we wait for them all to finish
3430 * consecutively.
3431 *
3432 * threads.each { |thr| thr.join }
3433 *
3434 * To retrieve the last value of a thread, use #value
3435 *
3436 * thr = Thread.new { sleep 1; "Useful value" }
3437 * thr.value #=> "Useful value"
3438 *
3439 * === Thread initialization
3440 *
3441 * In order to create new threads, Ruby provides ::new, ::start, and
3442 * ::fork. A block must be provided with each of these methods, otherwise
3443 * a ThreadError will be raised.
3444 *
3445 * When subclassing the Thread class, the +initialize+ method of your
3446 * subclass will be ignored by ::start and ::fork. Otherwise, be sure to
3447 * call super in your +initialize+ method.
3448 *
3449 * === Thread termination
3450 *
3451 * For terminating threads, Ruby provides a variety of ways to do this.
3452 *
3453 * The class method ::kill, is meant to exit a given thread:
3454 *
3455 * thr = Thread.new { sleep }
3456 * Thread.kill(thr) # sends exit() to thr
3457 *
3458 * Alternatively, you can use the instance method #exit, or any of its
3459 * aliases #kill or #terminate.
3460 *
3461 * thr.exit
3462 *
3463 * === Thread status
3464 *
3465 * Ruby provides a few instance methods for querying the state of a given
3466 * thread. To get a string with the current thread's state use #status
3467 *
3468 * thr = Thread.new { sleep }
3469 * thr.status # => "sleep"
3470 * thr.exit
3471 * thr.status # => false
3472 *
3473 * You can also use #alive? to tell if the thread is running or sleeping,
3474 * and #stop? if the thread is dead or sleeping.
3475 *
3476 * === Thread variables and scope
3477 *
3478 * Since threads are created with blocks, the same rules apply to other
3479 * Ruby blocks for variable scope. Any local variables created within this
3480 * block are accessible to only this thread.
3481 *
3482 * ==== Fiber-local vs. Thread-local
3483 *
3484 * Each fiber has its own bucket for Thread#[] storage. When you set a
3485 * new fiber-local it is only accessible within this Fiber. To illustrate:
3486 *
3487 * Thread.new {
3488 * Thread.current[:foo] = "bar"
3489 * Fiber.new {
3490 * p Thread.current[:foo] # => nil
3491 * }.resume
3492 * }.join
3493 *
3494 * This example uses #[] for getting and #[]= for setting fiber-locals,
3495 * you can also use #keys to list the fiber-locals for a given
3496 * thread and #key? to check if a fiber-local exists.
3497 *
3498 * When it comes to thread-locals, they are accessible within the entire
3499 * scope of the thread. Given the following example:
3500 *
3501 * Thread.new{
3502 * Thread.current.thread_variable_set(:foo, 1)
3503 * p Thread.current.thread_variable_get(:foo) # => 1
3504 * Fiber.new{
3505 * Thread.current.thread_variable_set(:foo, 2)
3506 * p Thread.current.thread_variable_get(:foo) # => 2
3507 * }.resume
3508 * p Thread.current.thread_variable_get(:foo) # => 2
3509 * }.join
3510 *
3511 * You can see that the thread-local +:foo+ carried over into the fiber
3512 * and was changed to +2+ by the end of the thread.
3513 *
3514 * This example makes use of #thread_variable_set to create new
3515 * thread-locals, and #thread_variable_get to reference them.
3516 *
3517 * There is also #thread_variables to list all thread-locals, and
3518 * #thread_variable? to check if a given thread-local exists.
3519 *
3520 * === Exception handling
3521 *
3522 * When an unhandled exception is raised inside a thread, it will
3523 * terminate. By default, this exception will not propagate to other
3524 * threads. The exception is stored and when another thread calls #value
3525 * or #join, the exception will be re-raised in that thread.
3526 *
3527 * t = Thread.new{ raise 'something went wrong' }
3528 * t.value #=> RuntimeError: something went wrong
3529 *
3530 * An exception can be raised from outside the thread using the
3531 * Thread#raise instance method, which takes the same parameters as
3532 * Kernel#raise.
3533 *
3534 * Setting Thread.abort_on_exception = true, Thread#abort_on_exception =
3535 * true, or $DEBUG = true will cause a subsequent unhandled exception
3536 * raised in a thread to be automatically re-raised in the main thread.
3537 *
3538 * With the addition of the class method ::handle_interrupt, you can now
3539 * handle exceptions asynchronously with threads.
3540 *
3541 * === Scheduling
3542 *
3543 * Ruby provides a few ways to support scheduling threads in your program.
3544 *
3545 * The first way is by using the class method ::stop, to put the current
3546 * running thread to sleep and schedule the execution of another thread.
3547 *
3548 * Once a thread is asleep, you can use the instance method #wakeup to
3549 * mark your thread as eligible for scheduling.
3550 *
3551 * You can also try ::pass, which attempts to pass execution to another
3552 * thread but is dependent on the OS whether a running thread will switch
3553 * or not. The same goes for #priority, which lets you hint to the thread
3554 * scheduler which threads you want to take precedence when passing
3555 * execution. This method is also dependent on the OS and may be ignored
3556 * on some platforms.
3557 *
3558 */
3561
3562#if VM_COLLECT_USAGE_DETAILS
3563 /* ::RubyVM::USAGE_ANALYSIS_* */
3564#define define_usage_analysis_hash(name) /* shut up rdoc -C */ \
3565 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_" #name, rb_hash_new())
3566 define_usage_analysis_hash(INSN);
3567 define_usage_analysis_hash(REGS);
3568 define_usage_analysis_hash(INSN_BIGRAM);
3569
3570 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_START", usage_analysis_insn_start, 0);
3571 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_START", usage_analysis_operand_start, 0);
3572 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_START", usage_analysis_register_start, 0);
3573 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_STOP", usage_analysis_insn_stop, 0);
3574 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_STOP", usage_analysis_operand_stop, 0);
3575 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_STOP", usage_analysis_register_stop, 0);
3576 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_RUNNING", usage_analysis_insn_running, 0);
3577 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_RUNNING", usage_analysis_operand_running, 0);
3578 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_RUNNING", usage_analysis_register_running, 0);
3579 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_CLEAR", usage_analysis_insn_clear, 0);
3580 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_CLEAR", usage_analysis_operand_clear, 0);
3581 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_CLEAR", usage_analysis_register_clear, 0);
3582#endif
3583
3584 /* ::RubyVM::OPTS
3585 * An Array of VM build options.
3586 * This constant is MRI specific.
3587 */
3588 rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
3589
3590#if OPT_DIRECT_THREADED_CODE
3591 rb_ary_push(opts, rb_str_new2("direct threaded code"));
3592#elif OPT_TOKEN_THREADED_CODE
3593 rb_ary_push(opts, rb_str_new2("token threaded code"));
3594#elif OPT_CALL_THREADED_CODE
3595 rb_ary_push(opts, rb_str_new2("call threaded code"));
3596#endif
3597
3598#if OPT_STACK_CACHING
3599 rb_ary_push(opts, rb_str_new2("stack caching"));
3600#endif
3601#if OPT_OPERANDS_UNIFICATION
3602 rb_ary_push(opts, rb_str_new2("operands unification"));
3603#endif
3604#if OPT_INSTRUCTIONS_UNIFICATION
3605 rb_ary_push(opts, rb_str_new2("instructions unification"));
3606#endif
3607#if OPT_INLINE_METHOD_CACHE
3608 rb_ary_push(opts, rb_str_new2("inline method cache"));
3609#endif
3610#if OPT_BLOCKINLINING
3611 rb_ary_push(opts, rb_str_new2("block inlining"));
3612#endif
3613
3614 /* ::RubyVM::INSTRUCTION_NAMES
3615 * A list of bytecode instruction names in MRI.
3616 * This constant is MRI specific.
3617 */
3618 rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
3619
3620 /* ::RubyVM::DEFAULT_PARAMS
3621 * This constant exposes the VM's default parameters.
3622 * Note that changing these values does not affect VM execution.
3623 * Specification is not stable and you should not depend on this value.
3624 * Of course, this constant is MRI specific.
3625 */
3626 rb_define_const(rb_cRubyVM, "DEFAULT_PARAMS", vm_default_params());
3627
3628 /* debug functions ::RubyVM::SDR(), ::RubyVM::NSDR() */
3629#if VMDEBUG
3630 rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
3631 rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
3632 rb_define_singleton_method(rb_cRubyVM, "mtbl", vm_mtbl, 2);
3633 rb_define_singleton_method(rb_cRubyVM, "mtbl2", vm_mtbl2, 2);
3634#else
3635 (void)sdr;
3636 (void)nsdr;
3637 (void)vm_mtbl;
3638 (void)vm_mtbl2;
3639#endif
3640
3641 /* VM bootstrap: phase 2 */
3642 {
3644 rb_thread_t *th = GET_THREAD();
3645 VALUE filename = rb_fstring_lit("<main>");
3646 const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
3647
3648 // Ractor setup
3649 rb_ractor_main_setup(vm, th->ractor, th);
3650
3651 /* create vm object */
3652 vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
3653
3654 /* create main thread */
3656 vm->ractor.main_thread = th;
3657 vm->ractor.main_ractor = th->ractor;
3658 th->vm = vm;
3659 th->top_wrapper = 0;
3660 th->top_self = rb_vm_top_self();
3661
3663 th->ec->cfp->iseq = iseq;
3664 th->ec->cfp->pc = iseq->body->iseq_encoded;
3665 th->ec->cfp->self = th->top_self;
3666
3667 VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME);
3668 VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE));
3669
3670 /*
3671 * The Binding of the top level scope
3672 */
3673 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
3674
3676 }
3677 vm_init_redefined_flag();
3678
3684
3685 /* vm_backtrace.c */
3687}
3688
3689void
3691{
3692 rb_thread_t *th = GET_VM()->ractor.main_thread;
3693 rb_control_frame_t *cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
3694 --cfp;
3695
3697}
3698
3699extern const struct st_hash_type rb_fstring_hash_type;
3700
3701void
3703{
3704 /* VM bootstrap: phase 1 */
3705 rb_vm_t * vm = ruby_mimmalloc(sizeof(*vm));
3706 rb_thread_t * th = ruby_mimmalloc(sizeof(*th));
3707 if (!vm || !th) {
3708 fprintf(stderr, "[FATAL] failed to allocate memory\n");
3709 exit(EXIT_FAILURE);
3710 }
3711 MEMZERO(th, rb_thread_t, 1);
3712 vm_init2(vm);
3713
3717
3719 th->vm = vm;
3720 th_init(th, 0);
3722 rb_ractor_set_current_ec(th->ractor, th->ec);
3724
3728}
3729
3730void
3732{
3733 rb_vm_t *vm = GET_VM();
3734
3736
3737 /* initialize mark object array, hash */
3741}
3742
3743/* top self */
3744
3745static VALUE
3746main_to_s(VALUE obj)
3747{
3748 return rb_str_new2("main");
3749}
3750
3751VALUE
3753{
3754 return GET_VM()->top_self;
3755}
3756
3757void
3759{
3760 rb_vm_t *vm = GET_VM();
3761
3763 rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
3764 rb_define_alias(rb_singleton_class(rb_vm_top_self()), "inspect", "to_s");
3765}
3766
3767VALUE *
3769{
3770 rb_ractor_t *cr = GET_RACTOR();
3771 return &cr->verbose;
3772}
3773
3774VALUE *
3776{
3777 rb_ractor_t *cr = GET_RACTOR();
3778 return &cr->debug;
3779}
3780
3781/* iseq.c */
3783 VALUE insn, int op_no, VALUE op,
3784 int len, size_t pos, VALUE *pnop, VALUE child);
3785
3786st_table *
3788{
3789 return GET_VM()->frozen_strings;
3790}
3791
3792#if VM_COLLECT_USAGE_DETAILS
3793
3794#define HASH_ASET(h, k, v) rb_hash_aset((h), (st_data_t)(k), (st_data_t)(v))
3795
3796/* uh = {
3797 * insn(Fixnum) => ihash(Hash)
3798 * }
3799 * ihash = {
3800 * -1(Fixnum) => count, # insn usage
3801 * 0(Fixnum) => ophash, # operand usage
3802 * }
3803 * ophash = {
3804 * val(interned string) => count(Fixnum)
3805 * }
3806 */
3807static void
3808vm_analysis_insn(int insn)
3809{
3810 ID usage_hash;
3811 ID bigram_hash;
3812 static int prev_insn = -1;
3813
3814 VALUE uh;
3815 VALUE ihash;
3816 VALUE cv;
3817
3818 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
3819 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
3820 uh = rb_const_get(rb_cRubyVM, usage_hash);
3821 if ((ihash = rb_hash_aref(uh, INT2FIX(insn))) == Qnil) {
3822 ihash = rb_hash_new();
3823 HASH_ASET(uh, INT2FIX(insn), ihash);
3824 }
3825 if ((cv = rb_hash_aref(ihash, INT2FIX(-1))) == Qnil) {
3826 cv = INT2FIX(0);
3827 }
3828 HASH_ASET(ihash, INT2FIX(-1), INT2FIX(FIX2INT(cv) + 1));
3829
3830 /* calc bigram */
3831 if (prev_insn != -1) {
3832 VALUE bi;
3833 VALUE ary[2];
3834 VALUE cv;
3835
3836 ary[0] = INT2FIX(prev_insn);
3837 ary[1] = INT2FIX(insn);
3838 bi = rb_ary_new4(2, &ary[0]);
3839
3840 uh = rb_const_get(rb_cRubyVM, bigram_hash);
3841 if ((cv = rb_hash_aref(uh, bi)) == Qnil) {
3842 cv = INT2FIX(0);
3843 }
3844 HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1));
3845 }
3846 prev_insn = insn;
3847}
3848
3849static void
3850vm_analysis_operand(int insn, int n, VALUE op)
3851{
3852 ID usage_hash;
3853
3854 VALUE uh;
3855 VALUE ihash;
3856 VALUE ophash;
3857 VALUE valstr;
3858 VALUE cv;
3859
3860 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
3861
3862 uh = rb_const_get(rb_cRubyVM, usage_hash);
3863 if ((ihash = rb_hash_aref(uh, INT2FIX(insn))) == Qnil) {
3864 ihash = rb_hash_new();
3865 HASH_ASET(uh, INT2FIX(insn), ihash);
3866 }
3867 if ((ophash = rb_hash_aref(ihash, INT2FIX(n))) == Qnil) {
3868 ophash = rb_hash_new();
3869 HASH_ASET(ihash, INT2FIX(n), ophash);
3870 }
3871 /* intern */
3872 valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
3873
3874 /* set count */
3875 if ((cv = rb_hash_aref(ophash, valstr)) == Qnil) {
3876 cv = INT2FIX(0);
3877 }
3878 HASH_ASET(ophash, valstr, INT2FIX(FIX2INT(cv) + 1));
3879}
3880
3881static void
3882vm_analysis_register(int reg, int isset)
3883{
3884 ID usage_hash;
3885 VALUE uh;
3886 VALUE valstr;
3887 static const char regstrs[][5] = {
3888 "pc", /* 0 */
3889 "sp", /* 1 */
3890 "ep", /* 2 */
3891 "cfp", /* 3 */
3892 "self", /* 4 */
3893 "iseq", /* 5 */
3894 };
3895 static const char getsetstr[][4] = {
3896 "get",
3897 "set",
3898 };
3899 static VALUE syms[sizeof(regstrs) / sizeof(regstrs[0])][2];
3900
3901 VALUE cv;
3902
3903 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
3904 if (syms[0] == 0) {
3905 char buff[0x10];
3906 int i;
3907
3908 for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) {
3909 int j;
3910 for (j = 0; j < 2; j++) {
3911 snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]);
3912 syms[i][j] = ID2SYM(rb_intern(buff));
3913 }
3914 }
3915 }
3916 valstr = syms[reg][isset];
3917
3918 uh = rb_const_get(rb_cRubyVM, usage_hash);
3919 if ((cv = rb_hash_aref(uh, valstr)) == Qnil) {
3920 cv = INT2FIX(0);
3921 }
3922 HASH_ASET(uh, valstr, INT2FIX(FIX2INT(cv) + 1));
3923}
3924
3925#undef HASH_ASET
3926
3927static void (*ruby_vm_collect_usage_func_insn)(int insn) = NULL;
3928static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op) = NULL;
3929static void (*ruby_vm_collect_usage_func_register)(int reg, int isset) = NULL;
3930
3931/* :nodoc: */
3932static VALUE
3933usage_analysis_insn_start(VALUE self)
3934{
3935 ruby_vm_collect_usage_func_insn = vm_analysis_insn;
3936 return Qnil;
3937}
3938
3939/* :nodoc: */
3940static VALUE
3941usage_analysis_operand_start(VALUE self)
3942{
3943 ruby_vm_collect_usage_func_operand = vm_analysis_operand;
3944 return Qnil;
3945}
3946
3947/* :nodoc: */
3948static VALUE
3949usage_analysis_register_start(VALUE self)
3950{
3951 ruby_vm_collect_usage_func_register = vm_analysis_register;
3952 return Qnil;
3953}
3954
3955/* :nodoc: */
3956static VALUE
3957usage_analysis_insn_stop(VALUE self)
3958{
3959 ruby_vm_collect_usage_func_insn = 0;
3960 return Qnil;
3961}
3962
3963/* :nodoc: */
3964static VALUE
3965usage_analysis_operand_stop(VALUE self)
3966{
3967 ruby_vm_collect_usage_func_operand = 0;
3968 return Qnil;
3969}
3970
3971/* :nodoc: */
3972static VALUE
3973usage_analysis_register_stop(VALUE self)
3974{
3975 ruby_vm_collect_usage_func_register = 0;
3976 return Qnil;
3977}
3978
3979/* :nodoc: */
3980static VALUE
3981usage_analysis_insn_running(VALUE self)
3982{
3983 if (ruby_vm_collect_usage_func_insn == 0) return Qfalse;
3984 return Qtrue;
3985}
3986
3987/* :nodoc: */
3988static VALUE
3989usage_analysis_operand_running(VALUE self)
3990{
3991 if (ruby_vm_collect_usage_func_operand == 0) return Qfalse;
3992 return Qtrue;
3993}
3994
3995/* :nodoc: */
3996static VALUE
3997usage_analysis_register_running(VALUE self)
3998{
3999 if (ruby_vm_collect_usage_func_register == 0) return Qfalse;
4000 return Qtrue;
4001}
4002
4003/* :nodoc: */
4004static VALUE
4005usage_analysis_insn_clear(VALUE self)
4006{
4007 ID usage_hash;
4008 ID bigram_hash;
4009 VALUE uh;
4010 VALUE bh;
4011
4012 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
4013 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
4014 uh = rb_const_get(rb_cRubyVM, usage_hash);
4015 bh = rb_const_get(rb_cRubyVM, bigram_hash);
4016 rb_hash_clear(uh);
4017 rb_hash_clear(bh);
4018
4019 return Qtrue;
4020}
4021
4022/* :nodoc: */
4023static VALUE
4024usage_analysis_operand_clear(VALUE self)
4025{
4026 ID usage_hash;
4027 VALUE uh;
4028
4029 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
4030 uh = rb_const_get(rb_cRubyVM, usage_hash);
4031 rb_hash_clear(uh);
4032
4033 return Qtrue;
4034}
4035
4036/* :nodoc: */
4037static VALUE
4038usage_analysis_register_clear(VALUE self)
4039{
4040 ID usage_hash;
4041 VALUE uh;
4042
4043 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
4044 uh = rb_const_get(rb_cRubyVM, usage_hash);
4045 rb_hash_clear(uh);
4046
4047 return Qtrue;
4048}
4049
4050#else
4051
4052MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_insn)(int insn)) = 0;
4053MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op)) = 0;
4054MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_register)(int reg, int isset)) = 0;
4055
4056#endif
4057
4058#if VM_COLLECT_USAGE_DETAILS
4059/* @param insn instruction number */
4060static void
4061vm_collect_usage_insn(int insn)
4062{
4063 if (RUBY_DTRACE_INSN_ENABLED()) {
4064 RUBY_DTRACE_INSN(rb_insns_name(insn));
4065 }
4066 if (ruby_vm_collect_usage_func_insn)
4067 (*ruby_vm_collect_usage_func_insn)(insn);
4068}
4069
4070/* @param insn instruction number
4071 * @param n n-th operand
4072 * @param op operand value
4073 */
4074static void
4075vm_collect_usage_operand(int insn, int n, VALUE op)
4076{
4077 if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
4078 VALUE valstr;
4079
4080 valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
4081
4082 RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn));
4083 RB_GC_GUARD(valstr);
4084 }
4085 if (ruby_vm_collect_usage_func_operand)
4086 (*ruby_vm_collect_usage_func_operand)(insn, n, op);
4087}
4088
4089/* @param reg register id. see code of vm_analysis_register() */
4090/* @param isset 0: read, 1: write */
4091static void
4092vm_collect_usage_register(int reg, int isset)
4093{
4094 if (ruby_vm_collect_usage_func_register)
4095 (*ruby_vm_collect_usage_func_register)(reg, isset);
4096}
4097#endif
4098
4099MJIT_FUNC_EXPORTED const struct rb_callcache *
4101{
4102 return &vm_empty_cc;
4103}
4104
4105#endif /* #ifndef MJIT_HEADER */
4106
4107#include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */
VALUE rb_cArray
Definition: array.c:40
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:1301
VALUE rb_ary_delete_at(VALUE ary, long pos)
Definition: array.c:4010
VALUE rb_ary_new(void)
Definition: array.c:749
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:846
VALUE rb_ary_join(VALUE ary, VALUE sep)
Definition: array.c:2780
#define RUBY_ASSERT_MESG(expr, mesg)
Asserts that the expression is truthy.
Definition: assert.h:159
#define ALWAYS_INLINE(x)
Definition: attributes.h:86
#define RB_UNUSED_VAR(x)
Definition: attributes.h:168
#define PUREFUNC(x)
Definition: attributes.h:54
#define NORETURN(x)
Definition: attributes.h:152
#define Max(a, b)
Definition: bigdecimal.h:348
#define Min(a, b)
Definition: bigdecimal.h:349
#define local
Definition: blast.c:36
VALUE rb_insns_name_array(void)
Definition: compile.c:9178
const char * rb_insns_name(int i)
Definition: compile.c:9172
Internal header for the compiler.
void rb_fiber_reset_root_local_storage(rb_thread_t *th)
Definition: cont.c:2438
#define AREF(s, idx)
Definition: cparse.c:97
#define OR(d, d0, d1, bl)
Definition: crypt.c:125
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
Definition: cxxanyargs.hpp:653
#define rb_define_method_id(klass, mid, func, arity)
Defines klass#mid.
Definition: cxxanyargs.hpp:656
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
Definition: cxxanyargs.hpp:668
#define DIV(n, d)
Definition: date_core.c:165
#define MOD(n, d)
Definition: date_core.c:166
struct RIMemo * ptr
Definition: debug.c:88
#define MJIT_STATIC
Definition: dllexport.h:71
#define MJIT_FUNC_EXPORTED
Definition: dllexport.h:55
#define free(x)
Definition: dln.c:52
#define sym(name)
Definition: enumerator.c:4007
#define MATCH(s)
uint8_t len
Definition: escape.c:17
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
#define numberof(array)
Definition: etc.c:649
VALUE rb_eLocalJumpError
Definition: eval.c:48
VALUE rb_f_raise(int argc, VALUE *argv)
Definition: eval.c:771
VALUE rb_eSysStackError
Definition: eval.c:49
#define EC_EXEC_TAG()
Definition: eval_intern.h:193
#define EC_PUSH_TAG(ec)
Definition: eval_intern.h:130
#define EC_JUMP_TAG(ec, st)
Definition: eval_intern.h:196
#define EXIT_FAILURE
Definition: eval_intern.h:32
#define EC_POP_TAG()
Definition: eval_intern.h:138
#define rb_ec_raised_reset(ec, f)
Definition: eval_intern.h:271
@ RAISED_STACKOVERFLOW
Definition: eval_intern.h:267
void rb_set_end_proc(void(*func)(VALUE), VALUE data)
Definition: eval_jump.c:59
void rb_call_end_proc(VALUE data)
Definition: eval_jump.c:11
#define RUBY_EVENT_END
Definition: event.h:32
#define RUBY_EVENT_B_RETURN
Definition: event.h:42
#define RUBY_EVENT_RETURN
Definition: event.h:34
#define RUBY_EVENT_C_RETURN
Definition: event.h:36
uint32_t rb_event_flag_t
Definition: event.h:66
#define RUBY_EVENT_CALL
Definition: event.h:33
#define RSTRING_PTR(string)
Definition: fbuffer.h:19
#define MAYBE_UNUSED
Definition: ffi_common.h:30
#define UNLIKELY(x)
Definition: ffi_common.h:126
#define FL_SINGLETON
Definition: fl_type.h:49
#define FL_USHIFT
Definition: fl_type.h:61
@ RUBY_FL_SHAREABLE
Definition: fl_type.h:169
#define PRIsVALUE
Definition: function.c:10
#define stack_check(ec, water_mark)
Definition: gc.c:5540
void ruby_xfree(void *x)
Deallocates a storage instance.
Definition: gc.c:10914
void rb_memerror(void)
Definition: gc.c:10309
VALUE rb_gc_location(VALUE value)
Definition: gc.c:9003
void ruby_mimfree(void *ptr)
Definition: gc.c:10979
void rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
Definition: gc.c:5869
void rb_gc_mark_movable(VALUE ptr)
Definition: gc.c:6106
void rb_gc_mark_maybe(VALUE obj)
Definition: gc.c:5931
void rb_mark_tbl(st_table *tbl)
Definition: gc.c:5893
void * ruby_mimmalloc(size_t size)
Definition: gc.c:10951
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Definition: gc.c:5580
rb_objspace_t * rb_objspace_alloc(void)
Definition: gc.c:1595
void rb_gc_update_tbl_refs(st_table *ptr)
Definition: gc.c:8850
void rb_gc_mark(VALUE ptr)
Definition: gc.c:6112
void rb_gc_mark_values(long n, const VALUE *values)
Definition: gc.c:5596
VALUE rb_objspace_gc_enable(rb_objspace_t *objspace)
Definition: gc.c:9895
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
Definition: gc.c:2412
void rb_objspace_free(rb_objspace_t *objspace)
Definition: gc.c:1610
void rb_gc_mark_vm_stack_values(long n, const VALUE *values)
Definition: gc.c:5619
void rb_gc_register_mark_object(VALUE obj)
Inform the garbage collector that object is a live Ruby object that should not be moved.
Definition: gc.c:8022
#define RUBY_MARK_LEAVE(msg)
Definition: gc.h:65
#define RUBY_MARK_ENTER(msg)
Definition: gc.h:64
#define RUBY_MARK_MOVABLE_UNLESS_NULL(ptr)
Definition: gc.h:71
#define RUBY_FREE_ENTER(msg)
Definition: gc.h:66
#define RUBY_GC_INFO
Definition: gc.h:68
#define RUBY_FREE_LEAVE(msg)
Definition: gc.h:67
#define RUBY_MARK_UNLESS_NULL(ptr)
Definition: gc.h:75
VALUE rb_cTime
Definition: time.c:645
VALUE rb_cInteger
Definition: numeric.c:191
#define CLASS_OF
Definition: globals.h:153
VALUE rb_cBinding
Definition: proc.c:45
VALUE rb_cRegexp
Definition: re.c:2301
VALUE rb_cSymbol
Definition: string.c:81
VALUE rb_cFloat
Definition: numeric.c:190
VALUE rb_cProc
Definition: proc.c:46
VALUE rb_cString
Definition: string.c:80
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:748
VALUE rb_class_new(VALUE super)
Creates a new class.
Definition: class.c:253
VALUE rb_singleton_class(VALUE obj)
Returns the singleton class of obj.
Definition: class.c:1924
VALUE rb_define_module_under(VALUE outer, const char *name)
Definition: class.c:895
void rb_undef_method(VALUE klass, const char *name)
Definition: class.c:1777
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:1999
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Definition: class.c:2296
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Definition: class.c:2085
#define OBJ_FREEZE
Definition: fl_type.h:134
#define FL_TEST
Definition: fl_type.h:130
#define FL_SET_RAW
Definition: fl_type.h:129
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2917
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:712
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:1007
void rb_bug(const char *fmt,...)
Definition: error.c:768
VALUE rb_eTypeError
Definition: error.c:1057
VALUE rb_eRuntimeError
Definition: error.c:1055
VALUE rb_eArgError
Definition: error.c:1058
VALUE rb_cObject
Object class.
Definition: object.c:49
VALUE rb_obj_alloc(VALUE)
Allocates an instance of klass.
Definition: object.c:1900
VALUE rb_cNilClass
NilClass class.
Definition: object.c:53
VALUE rb_cFalseClass
FalseClass class.
Definition: object.c:55
VALUE rb_inspect(VALUE)
Convenient wrapper of Object::inspect.
Definition: object.c:585
VALUE rb_cBasicObject
BasicObject class.
Definition: object.c:47
VALUE rb_obj_freeze(VALUE)
Make the object unmodifiable.
Definition: object.c:1101
VALUE rb_cTrueClass
TrueClass class.
Definition: object.c:54
#define SIZE
Definition: gun.c:76
VALUE rb_to_hash_type(VALUE hash)
Definition: hash.c:1853
void rb_hash_bulk_insert(long argc, const VALUE *argv, VALUE hash)
Definition: hash.c:4777
void rb_hash_foreach(VALUE hash, rb_foreach_func *func, VALUE farg)
Definition: hash.c:1498
VALUE rb_cHash
Definition: hash.c:106
VALUE rb_hash_new_with_size(st_index_t size)
Definition: hash.c:1544
VALUE rb_hash_aref(VALUE hash, VALUE key)
Definition: hash.c:2046
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Definition: hash.c:2901
VALUE rb_hash_dup(VALUE hash)
Definition: hash.c:1579
VALUE rb_hash_clear(VALUE hash)
Definition: hash.c:2819
VALUE rb_hash_new(void)
Definition: hash.c:1538
@ id_core_set_method_alias
Definition: id.h:120
@ id_core_set_postexe
Definition: id.h:125
@ id_core_hash_merge_kwd
Definition: id.h:127
@ id_core_set_variable_alias
Definition: id.h:121
@ id_core_hash_merge_ptr
Definition: id.h:126
@ id_core_undef_method
Definition: id.h:122
@ id_core_raise
Definition: id.h:128
size_t rb_id_table_memsize(const struct rb_id_table *tbl)
Definition: id_table.c:124
void rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data)
Definition: id_table.c:311
struct rb_id_table * rb_id_table_create(size_t capa)
Definition: id_table.c:96
void rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, void *data)
Definition: id_table.c:292
rb_id_table_iterator_result
Definition: id_table.h:10
@ ID_TABLE_CONTINUE
Definition: id_table.h:11
#define THROW_DATA_P(err)
Definition: imemo.h:120
imemo_type
Definition: imemo.h:34
@ imemo_ifunc
iterator function
Definition: imemo.h:39
@ imemo_iseq
Definition: imemo.h:42
@ imemo_callcache
Definition: imemo.h:47
@ imemo_cref
class reference
Definition: imemo.h:36
Defines RBIMPL_HAS_BUILTIN.
#define rb_ary_new4
Definition: array.h:74
void rb_undef(VALUE, ID)
Definition: vm_method.c:1545
#define rb_exc_new2
Definition: error.h:30
#define rb_exc_new3
Definition: error.h:31
#define rb_check_arity
Definition: error.h:34
VALUE rb_block_proc(void)
Definition: proc.c:826
VALUE rb_block_lambda(void)
Definition: proc.c:845
VALUE rb_binding_new(void)
Definition: proc.c:364
#define rb_str_new2
Definition: string.h:276
VALUE rb_str_intern(VALUE)
Definition: symbol.c:840
VALUE rb_str_dup(VALUE)
Definition: string.c:1631
VALUE rb_const_get(VALUE, ID)
Definition: variable.c:2624
void rb_set_class_path(VALUE, VALUE, const char *)
Definition: variable.c:234
VALUE rb_class_path(VALUE)
Definition: variable.c:169
VALUE rb_attr_get(VALUE, ID)
Definition: variable.c:1242
void rb_alias_variable(ID, ID)
Definition: variable.c:843
void rb_alias(VALUE, ID, ID)
Definition: vm_method.c:1926
void rb_undef_alloc_func(VALUE)
Definition: vm_method.c:954
#define ID2SYM
Definition: symbol.h:44
const char * rb_id2name(ID)
Definition: symbol.c:944
#define SYM2ID
Definition: symbol.h:45
VALUE rb_sym2str(VALUE)
Definition: symbol.c:927
ID rb_intern(const char *)
Definition: symbol.c:785
#define CONST_ID
Definition: symbol.h:47
void rb_define_global_const(const char *, VALUE)
Definition: variable.c:3162
VALUE rb_iv_set(VALUE, const char *, VALUE)
Definition: variable.c:3580
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:3150
@ LENGTH
Definition: inflate.h:48
#define CALL(n)
Definition: inits.c:18
Internal header aggregating init functions.
void Init_vm_backtrace(void)
#define FIX2INT
Definition: int.h:41
#define RCLASS_ORIGIN(c)
Definition: class.h:87
#define RCLASS_M_TBL(c)
Definition: class.h:80
#define RICLASS_IS_ORIGIN
Definition: class.h:96
Internal header for Fiber.
Internal header for GC.
#define UNALIGNED_MEMBER_PTR(ptr, mem)
Definition: gc.h:59
Internal header for Object.
Internal header for the parser.
Internal header for Proc.
Internal header for Regexp.
#define rb_fstring_lit(str)
Definition: string.h:78
Internal header for RubyVM.
#define PRIuSIZE
Definition: inttypes.h:127
voidpf void uLong size
Definition: ioapi.h:138
const char * filename
Definition: ioapi.h:137
typedef long(ZCALLBACK *tell_file_func) OF((voidpf opaque
typedef int(ZCALLBACK *close_file_func) OF((voidpf opaque
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1087
void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath)
Definition: iseq.c:521
rb_iseq_t * rb_iseq_new(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type type)
Definition: iseq.c:809
rb_iseq_t * rb_iseq_new_top(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent)
Definition: iseq.c:817
VALUE rb_iseq_realpath(const rb_iseq_t *iseq)
Definition: iseq.c:1093
@ DEFINED_EXPR
Definition: iseq.h:291
#define INT2FIX
Definition: long.h:48
#define MEMCPY(p1, p2, type, n)
Definition: memory.h:129
#define MEMZERO(p, type, n)
Definition: memory.h:128
#define ZALLOC_N
Definition: memory.h:135
#define ALLOC_N
Definition: memory.h:133
#define RB_GC_GUARD(v)
Definition: memory.h:91
#define ALLOCV_N
Definition: memory.h:139
#define ALLOCV_END
Definition: memory.h:140
#define MIN(a, b)
Definition: ffi.c:30
const rb_method_entry_t * rb_method_entry(VALUE klass, ID id)
Definition: vm_method.c:1023
void rb_add_method(VALUE klass, ID mid, rb_method_type_t type, void *option, rb_method_visibility_t visi)
Definition: vm_method.c:900
rb_method_visibility_t
Definition: method.h:29
@ METHOD_VISI_PRIVATE
Definition: method.h:32
@ METHOD_VISI_PUBLIC
Definition: method.h:31
@ VM_METHOD_TYPE_CFUNC
C method.
Definition: method.h:111
@ VM_METHOD_TYPE_OPTIMIZED
Kernel::send, Proc::call, etc.
Definition: method.h:119
@ VM_METHOD_TYPE_BMETHOD
Definition: method.h:114
@ OPTIMIZED_METHOD_TYPE_BLOCK_CALL
Definition: method.h:168
void rb_clear_method_cache(VALUE klass_or_module, ID mid)
Definition: vm_method.c:236
const rb_method_entry_t * rb_method_entry_at(VALUE obj, ID id)
Definition: vm_method.c:973
#define mjit_enabled
Definition: mjit.h:210
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
const int id
Definition: nkf.c:209
const char * name
Definition: nkf.c:208
#define TRUE
Definition: nkf.h:175
#define FALSE
Definition: nkf.h:174
void rb_node_init(NODE *n, enum node_type type, VALUE a0, VALUE a1, VALUE a2)
Definition: node.c:1139
@ NODE_SCOPE
Definition: node.h:22
#define RUBY_DTRACE_METHOD_RETURN_HOOK(ec, klass, id)
Definition: probes_helper.h:35
#define RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, klass, id)
Definition: probes_helper.h:32
#define RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, klass, id)
Definition: probes_helper.h:41
VALUE rb_binding_alloc(VALUE klass)
Definition: proc.c:331
VALUE rb_proc_alloc(VALUE klass)
Definition: proc.c:145
#define RARRAY_CONST_PTR(s)
Definition: psych_emitter.c:4
#define RARRAY_AREF(a, i)
Definition: psych_emitter.c:7
rb_ractor_t * rb_ractor_main_alloc(void)
Definition: ractor.c:1508
VALUE rb_ractor_ensure_shareable(VALUE obj, VALUE name)
Definition: ractor.c:2518
VALUE rb_eRactorIsolationError
Definition: ractor.c:23
void rb_ractor_main_setup(rb_vm_t *vm, rb_ractor_t *r, rb_thread_t *th)
Definition: ractor.c:1581
VALUE rb_ractor_make_shareable_copy(VALUE obj)
Definition: ractor.c:2508
#define RB_OBJ_SHAREABLE_P(obj)
Definition: ractor.h:50
VALUE rb_ractor_make_shareable(VALUE obj)
Definition: ractor.c:2499
#define RARRAY_LEN
Definition: rarray.h:52
#define RBASIC(obj)
Definition: rbasic.h:34
#define RBASIC_CLASS
Definition: rbasic.h:35
#define NULL
Definition: regenc.h:69
#define MAX(a, b)
Definition: regint.h:296
#define RB_OBJ_WRITE(a, slot, b)
WB for new reference from ‘a’ to ‘b’.
Definition: rgengc.h:107
#define RB_OBJ_WRITTEN(a, oldv, b)
WB for new reference from ‘a’ to ‘b’.
Definition: rgengc.h:114
#define RHASH_EMPTY_P(h)
Definition: rhash.h:51
#define StringValuePtr(v)
Definition: rstring.h:51
#define StringValueCStr(v)
Definition: rstring.h:52
#define RTYPEDDATA_DATA(v)
Definition: rtypeddata.h:47
#define TypedData_Wrap_Struct(klass, data_type, sval)
Definition: rtypeddata.h:101
@ RUBY_TYPED_FREE_IMMEDIATELY
Definition: rtypeddata.h:62
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: rtypeddata.h:122
int argc
Definition: ruby.c:240
char ** argv
Definition: ruby.c:241
Internal header for ASAN / MSAN / etc.
#define RB_NO_KEYWORDS
Definition: scan_args.h:46
unsigned LONG_LONG rb_serial_t
Definition: serial.h:19
#define Qundef
#define Qtrue
#define RTEST
#define Qnil
#define Qfalse
#define NIL_P
@ ST_DELETE
Definition: st.h:99
@ ST_CONTINUE
Definition: st.h:99
unsigned long st_data_t
Definition: st.h:22
#define st_foreach
Definition: st.h:142
#define st_init_numtable
Definition: st.h:106
#define st_lookup
Definition: st.h:128
#define st_init_table_with_size
Definition: st.h:104
#define st_insert
Definition: st.h:124
#define st_free_table
Definition: st.h:156
#define st_init_strtable
Definition: st.h:110
#define _(args)
Definition: stdarg.h:31
Definition: proc.c:35
Definition: node.h:149
Definition: pyobjc-tc.c:15
Definition: iseq.h:218
rb_iseq_t * iseq
Definition: iseq.h:240
unsigned int cont
Definition: iseq.h:244
enum iseq_catch_table_entry::catch_type type
unsigned int start
Definition: iseq.h:242
unsigned int end
Definition: iseq.h:243
unsigned int sp
Definition: iseq.h:245
const NODE * root
Definition: node.h:399
VALUE compile_option
Definition: node.h:400
struct rb_at_exit_list * next
Definition: vm_core.h:545
rb_vm_at_exit_func * func
Definition: vm_core.h:544
const VALUE pathobj
Definition: vm_core.h:1114
unsigned short first_lineno
Definition: vm_core.h:1115
const struct rb_block block
Definition: vm_core.h:1113
union rb_block::@199 as
struct rb_captured_block captured
Definition: vm_core.h:762
VALUE symbol
Definition: vm_core.h:763
enum rb_block_type type
Definition: vm_core.h:766
VALUE proc
Definition: vm_core.h:764
Definition: method.h:62
ID called_id
Definition: method.h:66
const VALUE owner
Definition: method.h:67
struct rb_method_definition_struct *const def
Definition: method.h:65
const VALUE klass
Definition: vm_callinfo.h:278
const VALUE flags
Definition: vm_callinfo.h:275
const vm_call_handler call_
Definition: vm_callinfo.h:284
const struct rb_callable_method_entry_struct *const cme_
Definition: vm_callinfo.h:283
union rb_callcache::@184 aux_
const VALUE * ep
Definition: vm_core.h:738
const rb_iseq_t * iseq
Definition: vm_core.h:740
union rb_captured_block::@198 code
const VALUE * ep
Definition: vm_core.h:774
const void * block_code
Definition: vm_core.h:775
const rb_iseq_t * iseq
Definition: vm_core.h:772
const VALUE * pc
Definition: vm_core.h:770
CREF (Class REFerence)
Definition: method.h:44
const VALUE * env
Definition: vm_core.h:1103
rb_iseq_t * iseq
Definition: vm_core.h:1101
unsigned int env_size
Definition: vm_core.h:1104
const VALUE * ep
Definition: vm_core.h:1102
VALUE local_storage_recursive_hash_for_trace
Definition: vm_core.h:876
struct rb_execution_context_struct::@200 machine
struct rb_id_table * local_storage
Definition: vm_core.h:874
rb_control_frame_t * cfp
Definition: vm_core.h:858
rb_fiber_t * fiber_ptr
Definition: vm_core.h:870
struct rb_vm_tag * tag
Definition: vm_core.h:860
bool ractor_safe
Definition: vm_core.h:930
rb_event_flag_t events
Definition: vm_core.h:555
struct iseq_catch_table * catch_table
Definition: vm_core.h:408
enum rb_iseq_constant_body::iseq_type type
unsigned int size
Definition: vm_core.h:359
struct rb_id_table * outer_variables
Definition: vm_core.h:431
unsigned int local_table_size
Definition: vm_core.h:424
unsigned int stack_max
Definition: vm_core.h:427
VALUE * iseq_encoded
Definition: vm_core.h:319
rb_iseq_location_t location
Definition: vm_core.h:393
struct rb_iseq_constant_body::@188 param
parameter information
const struct rb_iseq_struct * parent_iseq
Definition: vm_core.h:411
const ID * local_table
Definition: vm_core.h:405
struct rb_iseq_constant_body * body
Definition: vm_core.h:448
struct rb_hook_list_struct * local_hooks
Definition: vm_core.h:459
struct rb_iseq_struct::@191::@193 exec
union rb_iseq_struct::@191 aux
struct rb_hook_list_struct * hooks
Definition: method.h:161
rb_method_bmethod_t bmethod
Definition: method.h:184
union rb_method_definition_struct::@123 body
Definition: method.h:54
struct rb_method_definition_struct *const def
Definition: method.h:57
VALUE owner
Definition: method.h:59
unsigned int is_isolated
Definition: vm_core.h:1090
const struct rb_block block
Definition: vm_core.h:1087
unsigned int is_from_method
Definition: vm_core.h:1088
unsigned int is_lambda
Definition: vm_core.h:1089
unsigned int cnt
Definition: ractor_core.h:95
struct list_head set
Definition: ractor_core.h:94
struct rb_ractor_struct::@141 threads
unsigned int module_func
Definition: method.h:40
VALUE last_status
Definition: vm_core.h:943
rb_execution_context_t * ec
Definition: vm_core.h:941
VALUE(* func)(void *)
Definition: vm_core.h:996
VALUE stat_insn_usage
Definition: vm_core.h:1009
rb_vm_t * vm
Definition: vm_core.h:939
rb_ractor_t * ractor
Definition: vm_core.h:938
VALUE top_wrapper
Definition: vm_core.h:950
enum rb_thread_struct::thread_invoke_type invoke_type
VALUE pending_interrupt_mask_stack
Definition: vm_core.h:979
unsigned int report_on_exception
Definition: vm_core.h:961
VALUE pending_interrupt_queue
Definition: vm_core.h:978
VALUE locking_mutex
Definition: vm_core.h:984
VALUE top_self
Definition: vm_core.h:949
union rb_thread_struct::@201 invoke_arg
struct rb_ext_config ext_config
Definition: vm_core.h:1021
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:985
unsigned blocking
Definition: vm_core.h:1016
rb_fiber_t * root_fiber
Definition: vm_core.h:1012
VALUE load_path_check_cache
Definition: vm_core.h:618
struct list_head set
Definition: vm_core.h:568
struct rb_vm_struct::@194::@197 sync
rb_nativethread_cond_t barrier_cond
Definition: vm_core.h:584
struct rb_vm_struct::@195 trap_list
st_table * defined_module_hash
Definition: vm_core.h:647
VALUE coverages
Definition: vm_core.h:644
rb_nativethread_lock_t waitpid_lock
Definition: vm_core.h:597
int src_encoding_index
Definition: vm_core.h:637
VALUE load_path
Definition: vm_core.h:616
struct rb_vm_struct::@194 ractor
struct rb_id_table * negative_cme_table
Definition: vm_core.h:659
VALUE load_path_snapshot
Definition: vm_core.h:617
struct st_table * loading_table
Definition: vm_core.h:623
VALUE * defined_strings
Definition: vm_core.h:653
struct rb_thread_struct * main_thread
Definition: vm_core.h:573
const struct rb_callcache * global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]
Definition: vm_core.h:664
VALUE self
Definition: vm_core.h:565
size_t fiber_vm_stack_size
Definition: vm_core.h:674
VALUE expanded_load_path
Definition: vm_core.h:619
VALUE top_self
Definition: vm_core.h:615
size_t fiber_machine_stack_size
Definition: vm_core.h:675
struct rb_vm_struct::@196 default_params
struct rb_ractor_struct * main_ractor
Definition: vm_core.h:572
size_t thread_vm_stack_size
Definition: vm_core.h:672
VALUE orig_progname
Definition: vm_core.h:643
struct rb_objspace * objspace
Definition: vm_core.h:649
rb_nativethread_lock_t lock
Definition: vm_core.h:577
const VALUE special_exceptions[ruby_special_error_count]
Definition: vm_core.h:612
VALUE loaded_features_snapshot
Definition: vm_core.h:621
VALUE cmd[RUBY_NSIG]
Definition: vm_core.h:627
VALUE loaded_features
Definition: vm_core.h:620
unsigned int thread_report_on_exception
Definition: vm_core.h:607
rb_nativethread_cond_t terminate_cond
Definition: vm_core.h:587
rb_nativethread_lock_t workqueue_lock
Definition: vm_core.h:641
st_table * frozen_strings
Definition: vm_core.h:654
VALUE mark_object_ary
Definition: vm_core.h:611
size_t thread_machine_stack_size
Definition: vm_core.h:673
rb_at_exit_list * at_exit
Definition: vm_core.h:651
struct rb_vm_tag * prev
Definition: vm_core.h:812
enum ruby_tag_type state
Definition: vm_core.h:813
Definition: st.h:79
Definition: blast.c:41
THROW_DATA.
Definition: imemo.h:63
#define snprintf
Definition: subst.h:14
#define t
Definition: symbol.c:253
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:669
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
RB_THREAD_LOCAL_SPECIFIER struct rb_execution_context_struct * ruby_current_ec
#define RB_THREAD_LOCAL_SPECIFIER
DWORD native_tls_key_t
Definition: thread_win32.h:37
#define ALLOC(size)
Definition: unzip.c:112
unsigned long VALUE
Definition: value.h:38
unsigned long ID
Definition: value.h:39
#define T_IMEMO
Definition: value_type.h:66
#define T_MODULE
Definition: value_type.h:69
#define T_ICLASS
Definition: value_type.h:65
#define T_HASH
Definition: value_type.h:64
#define T_CLASS
Definition: value_type.h:57
#define BUILTIN_TYPE
Definition: value_type.h:84
ruby_value_type
C-level type of an object.
Definition: value_type.h:110
#define SYMBOL_P
Definition: value_type.h:87
void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
Definition: vm.c:921
const rb_data_type_t ruby_threadptr_data_type
Definition: vm.c:2991
void rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
Definition: vm.c:1711
void rb_ec_clear_vm_stack(rb_execution_context_t *ec)
Definition: vm.c:3047
rb_ractor_t * ruby_single_main_ractor
Definition: vm.c:381
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:3031
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
Definition: vm.c:93
rb_serial_t ruby_vm_class_serial
Definition: vm.c:408
VALUE rb_iseq_eval_main(const rb_iseq_t *iseq)
Definition: vm.c:2413
VALUE rb_mRubyVMFrozenCore
Definition: vm.c:375
rb_cref_t * rb_vm_cref_new_toplevel(void)
Definition: vm.c:309
void rb_threadptr_root_fiber_setup(rb_thread_t *th)
Definition: cont.c:2091
rb_vm_t * ruby_current_vm_ptr
Definition: vm.c:380
void rb_lastline_set(VALUE val)
Definition: vm.c:1562
VALUE rb_iseq_eval(const rb_iseq_t *iseq)
Definition: vm.c:2403
int rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id, struct ruby_dtrace_method_hook_args *args)
Definition: vm.c:431
VALUE rb_vm_call_cfunc(VALUE recv, VALUE(*func)(VALUE), VALUE arg, VALUE block_handler, VALUE filename)
Definition: vm.c:2452
VALUE rb_backref_get(void)
Definition: vm.c:1544
#define thread_data_type
Definition: vm.c:2990
VALUE ruby_vm_const_missing_count
Definition: vm.c:379
VALUE rb_proc_ractor_make_shareable(VALUE self)
Definition: vm.c:1105
void rb_iter_break(void)
Definition: vm.c:1786
const char * rb_sourcefile(void)
Definition: vm.c:1571
void rb_vm_stack_to_heap(rb_execution_context_t *ec)
Definition: vm.c:820
int rb_vm_add_root_module(VALUE module)
Definition: vm.c:2622
int ruby_vm_destruct(rb_vm_t *vm)
Definition: vm.c:2639
VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
Definition: vm.c:126
VALUE rb_proc_isolate_bang(VALUE self)
Definition: vm.c:1055
#define vm_exec
Definition: vm.c:11
VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
Definition: vm.c:1490
void rb_fiber_mark_self(rb_fiber_t *fib)
native_tls_key_t ruby_current_ec_key
Definition: vm.c:400
void rb_vm_jump_tag_but_local_jump(int state)
Definition: vm.c:1750
const rb_cref_t * rb_vm_cref_in_context(VALUE self, VALUE cbase)
Definition: vm.c:1640
void Init_top_self(void)
Definition: vm.c:3758
VALUE rb_proc_dup(VALUE self)
Definition: vm.c:956
VALUE rb_vm_top_self(void)
Definition: vm.c:3752
VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp)
Definition: vm.c:1172
const struct st_hash_type rb_fstring_hash_type
Definition: string.c:287
void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:3024
VALUE rb_proc_isolate(VALUE self)
Definition: vm.c:1097
VALUE rb_cRubyVM
Definition: vm.c:373
int rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
Definition: vm.c:2446
int rb_vm_check_optimizable_mid(VALUE mid)
Definition: vm.c:1823
void rb_vm_each_stack_value(void *ptr, void(*cb)(VALUE, void *), void *ctx)
Definition: vm.c:2501
st_table * rb_vm_fstring_table(void)
Definition: vm.c:3787
#define OP(mid_, bop_)
#define PROCDEBUG
Definition: vm.c:363
void rb_iter_break_value(VALUE val)
Definition: vm.c:1792
#define SET(name, attr)
VALUE rb_obj_is_thread(VALUE obj)
Definition: vm.c:3003
void rb_vm_mark(void *ptr)
Definition: vm.c:2538
void rb_vm_set_progname(VALUE filename)
Definition: vm.c:3690
VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
Definition: vm.c:1152
VALUE rb_vm_env_local_variables(const rb_env_t *env)
Definition: vm.c:876
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
Definition: vm.c:2424
void rb_vm_check_redefinition_by_prepend(VALUE klass)
Definition: vm.c:1873
rb_serial_t rb_next_class_serial(void)
Definition: vm.c:366
#define S(s)
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:3114
void rb_backref_set(VALUE val)
Definition: vm.c:1550
rb_control_frame_t * rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm.c:589
VALUE * rb_ruby_verbose_ptr(void)
Definition: vm.c:3768
VALUE rb_str_concat_literals(size_t, const VALUE *)
Definition: string.c:3127
VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
Definition: vm.c:1475
rb_serial_t ruby_vm_global_constant_state
Definition: vm.c:407
const VALUE * rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
Definition: vm.c:1203
VALUE * rb_gc_stack_start
#define REWIND_CFP(expr)
Definition: vm.c:3121
rb_control_frame_t * rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm.c:577
VALUE rb_iseq_local_variables(const rb_iseq_t *iseq)
Definition: vm.c:885
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:1718
VALUE rb_cThread
Definition: vm.c:374
void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp)
Definition: vm.c:639
void rb_vm_update_references(void *ptr)
Definition: vm.c:2474
VALUE rb_block_param_proxy
Definition: vm.c:376
const rb_env_t * rb_vm_env_prev_env(const rb_env_t *env)
Definition: vm.c:830
rb_event_flag_t ruby_vm_event_flags
Definition: vm.c:403
VALUE rb_vm_cbase(void)
Definition: vm.c:1665
#define C(k)
const char * rb_source_location_cstr(int *pline)
Definition: vm.c:1616
VALUE rb_lastline_get(void)
Definition: vm.c:1556
void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
Definition: vm.c:326
unsigned int ruby_vm_event_local_num
Definition: vm.c:405
VALUE rb_source_location(int *pline)
Definition: vm.c:1600
void rb_vm_pop_cfunc_frame(void)
Definition: vm.c:625
void rb_threadptr_root_fiber_release(rb_thread_t *th)
Definition: cont.c:2110
VALUE * rb_ruby_debug_ptr(void)
Definition: vm.c:3775
const struct rb_callcache * rb_vm_empty_cc(void)
Definition: vm.c:4100
rb_cref_t * rb_vm_cref_replace_with_duplicated_cref(void)
Definition: vm.c:1631
void Init_vm_objects(void)
Definition: vm.c:3731
void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE cls, VALUE mesg)
Definition: vm.c:2612
void rb_fiber_update_self(rb_fiber_t *fib)
Definition: cont.c:980
size_t rb_gc_stack_maxsize
rb_cref_t * rb_vm_cref(void)
Definition: vm.c:1624
int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp)
Definition: vm.c:2440
void Init_VM(void)
Definition: vm.c:3342
rb_event_flag_t ruby_vm_event_enabled_global_flags
Definition: vm.c:404
void ruby_vm_at_exit(void(*func)(rb_vm_t *))
ruby_vm_at_exit registers a function func to be invoked when a VM passed away.
Definition: vm.c:658
VALUE rb_insn_operand_intern(const rb_iseq_t *iseq, VALUE insn, int op_no, VALUE op, int len, size_t pos, VALUE *pnop, VALUE child)
void Init_BareVM(void)
Definition: vm.c:3702
void rb_execution_context_update(const rb_execution_context_t *ec)
Definition: vm.c:2785
void rb_execution_context_mark(const rb_execution_context_t *ec)
Definition: vm.c:2835
VALUE rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me)
Definition: vm.c:1468
#define ruby_vm_redefined_flag
Definition: vm.c:378
int rb_sourceline(void)
Definition: vm.c:1586
int rb_vm_cframe_keyword_p(const rb_control_frame_t *cfp)
Definition: vm.c:120
void rb_vm_inc_const_missing_count(void)
Definition: vm.c:425
@ arg_setup_block
Definition: vm_args.c:37
@ arg_setup_method
Definition: vm_args.c:36
int rb_vm_get_sourceline(const rb_control_frame_t *cfp)
Definition: vm_backtrace.c:71
#define rb_id2str(id)
Definition: vm_backtrace.c:30
#define VM_CALLCACHE_UNMARKABLE
Definition: vm_callinfo.h:293
@ THREAD_RUNNABLE
Definition: vm_core.h:792
#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN
Definition: vm_core.h:686
#define TAG_RAISE
Definition: vm_core.h:204
const rb_callable_method_entry_t * rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
#define REGEXP_REDEFINED_OP_FLAG
Definition: vm_core.h:720
#define STRING_REDEFINED_OP_FLAG
Definition: vm_core.h:714
#define TAG_NONE
Definition: vm_core.h:198
#define FALSE_REDEFINED_OP_FLAG
Definition: vm_core.h:723
void rb_vm_pop_frame(rb_execution_context_t *ec)
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
Definition: vm_core.h:708
void Init_native_thread(rb_thread_t *th)
#define TAG_RETRY
Definition: vm_core.h:202
#define VM_ENV_DATA_SIZE
Definition: vm_core.h:1206
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE
Definition: vm_core.h:702
struct rb_thread_struct rb_thread_t
ruby_tag_type
Definition: vm_core.h:185
#define RB_ALTSTACK_FREE(var)
Definition: vm_core.h:143
#define HASH_REDEFINED_OP_FLAG
Definition: vm_core.h:716
#define GetBindingPtr(obj, ptr)
Definition: vm_core.h:1109
#define VM_ENV_DATA_INDEX_FLAGS
Definition: vm_core.h:1210
rb_block_type
Definition: vm_core.h:753
@ block_type_symbol
Definition: vm_core.h:756
@ block_type_iseq
Definition: vm_core.h:754
@ block_type_ifunc
Definition: vm_core.h:755
@ block_type_proc
Definition: vm_core.h:757
void rb_vm_bugreport(const void *)
Definition: vm_dump.c:962
#define VM_ASSERT(expr)
Definition: vm_core.h:61
#define TRUE_REDEFINED_OP_FLAG
Definition: vm_core.h:722
@ block_handler_type_ifunc
Definition: vm_core.h:748
@ block_handler_type_proc
Definition: vm_core.h:750
@ block_handler_type_symbol
Definition: vm_core.h:749
@ block_handler_type_iseq
Definition: vm_core.h:747
#define TIME_REDEFINED_OP_FLAG
Definition: vm_core.h:719
void rb_vm_at_exit_func(struct rb_vm_struct *)
Definition: vm_core.h:541
#define VM_ENV_DATA_INDEX_ENV
Definition: vm_core.h:1211
ruby_special_exceptions
Definition: vm_core.h:494
#define TAG_MASK
Definition: vm_core.h:207
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
Definition: vm_core.h:704
#define ARRAY_REDEFINED_OP_FLAG
Definition: vm_core.h:715
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:2001
#define VM_GLOBAL_CC_CACHE_TABLE_SIZE
Definition: vm_core.h:662
struct rb_vm_struct rb_vm_t
#define PROC_REDEFINED_OP_FLAG
Definition: vm_core.h:724
#define RUBY_VM_SIZE_ALIGN
Definition: vm_core.h:683
#define RUBY_VM_FIBER_VM_STACK_SIZE
Definition: vm_core.h:690
#define VM_GUARDED_PREV_EP(ep)
Definition: vm_core.h:1298
#define TAG_REDO
Definition: vm_core.h:203
#define RUBY_VM_THREAD_VM_STACK_SIZE
Definition: vm_core.h:685
#define VM_DEBUG_BP_CHECK
Definition: vm_core.h:729
#define NIL_REDEFINED_OP_FLAG
Definition: vm_core.h:721
#define INTEGER_REDEFINED_OP_FLAG
Definition: vm_core.h:712
#define CHECK_VM_STACK_OVERFLOW(cfp, margin)
Definition: vm_core.h:1740
#define SYMBOL_REDEFINED_OP_FLAG
Definition: vm_core.h:718
@ VM_SVAR_BACKREF
Definition: vm_core.h:1137
@ VM_SVAR_LASTLINE
Definition: vm_core.h:1136
#define TAG_BREAK
Definition: vm_core.h:200
#define VM_TAGGED_PTR_REF(v, mask)
Definition: vm_core.h:1161
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE
Definition: vm_core.h:706
#define TAG_RETURN
Definition: vm_core.h:199
#define TAG_NEXT
Definition: vm_core.h:201
#define VM_BLOCK_HANDLER_NONE
Definition: vm_core.h:1299
#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:2004
#define VM_ENV_DATA_INDEX_SPECVAL
Definition: vm_core.h:1209
#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN
Definition: vm_core.h:691
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:1083
#define VM_ENV_DATA_INDEX_ME_CREF
Definition: vm_core.h:1208
#define RUBY_NSIG
Definition: vm_core.h:114
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)
Definition: vm_core.h:1392
#define VM_UNREACHABLE(func)
Definition: vm_core.h:62
#define FLOAT_REDEFINED_OP_FLAG
Definition: vm_core.h:713
@ VM_FRAME_FLAG_LAMBDA
Definition: vm_core.h:1194
@ VM_FRAME_MAGIC_IFUNC
Definition: vm_core.h:1183
@ VM_FRAME_MAGIC_METHOD
Definition: vm_core.h:1178
@ VM_FRAME_MAGIC_TOP
Definition: vm_core.h:1181
@ VM_FRAME_FLAG_CFRAME
Definition: vm_core.h:1193
@ VM_FRAME_MAGIC_DUMMY
Definition: vm_core.h:1186
@ VM_FRAME_FLAG_PASSED
Definition: vm_core.h:1197
@ VM_FRAME_FLAG_BMETHOD
Definition: vm_core.h:1192
@ VM_FRAME_MAGIC_BLOCK
Definition: vm_core.h:1179
@ VM_ENV_FLAG_LOCAL
Definition: vm_core.h:1200
@ VM_FRAME_MAGIC_CFUNC
Definition: vm_core.h:1182
@ VM_FRAME_MAGIC_EVAL
Definition: vm_core.h:1184
@ VM_FRAME_MAGIC_CLASS
Definition: vm_core.h:1180
@ VM_ENV_FLAG_ESCAPED
Definition: vm_core.h:1201
@ VM_ENV_FLAG_WB_REQUIRED
Definition: vm_core.h:1202
@ VM_ENV_FLAG_ISOLATED
Definition: vm_core.h:1203
@ VM_FRAME_FLAG_FINISH
Definition: vm_core.h:1191
@ VM_FRAME_MAGIC_RESCUE
Definition: vm_core.h:1185
#define dp(v)
Definition: vm_debug.h:20
#define vm_check_canary(ec, sp)
#define NEXT_CLASS_SERIAL()
#define getenv(name)
Definition: win32.c:80
int wait(int *status)
Definition: win32.c:5245
int err
Definition: win32.c:142
#define env
#define xfree
Definition: xmalloc.h:49
int def(FILE *source, FILE *dest, int level)
Definition: zpipe.c:36