Ruby 3.0.5p211 (2022-11-24 revision ba5cf0f7c52d4d35cc6a173c89eda98ceffa2dcf)
vm_insnhelper.c
Go to the documentation of this file.
1/**********************************************************************
2
3 vm_insnhelper.c - instruction helper functions.
4
5 $Author$
6
7 Copyright (C) 2007 Koichi Sasada
8
9**********************************************************************/
10
12
13#include <math.h>
14
15#include "constant.h"
16#include "debug_counter.h"
17#include "internal.h"
18#include "internal/class.h"
19#include "internal/compar.h"
20#include "internal/hash.h"
21#include "internal/numeric.h"
22#include "internal/proc.h"
23#include "internal/random.h"
24#include "internal/variable.h"
25#include "variable.h"
26
27/* finish iseq array */
28#include "insns.inc"
29#ifndef MJIT_HEADER
30#include "insns_info.inc"
31#endif
32
34extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
36extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
37 int argc, const VALUE *argv, int priv);
38
39#ifndef MJIT_HEADER
40static const struct rb_callcache vm_empty_cc;
41#endif
42
43/* control stack frame */
44
45static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
46
49{
51 rb_obj_copy_ivar(e, exc);
52 return e;
53}
54
55NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
56static void
57ec_stack_overflow(rb_execution_context_t *ec, int setup)
58{
59 VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
61 if (setup) {
64 rb_ivar_set(mesg, idBt, at);
65 rb_ivar_set(mesg, idBt_locations, at);
66 }
67 ec->errinfo = mesg;
69}
70
71NORETURN(static void vm_stackoverflow(void));
72#ifdef MJIT_HEADER
73NOINLINE(static COLDFUNC void vm_stackoverflow(void));
74#endif
75
76static void
77vm_stackoverflow(void)
78{
79 ec_stack_overflow(GET_EC(), TRUE);
80}
81
83MJIT_STATIC void
85{
86 if (rb_during_gc()) {
87 rb_bug("system stack overflow during GC. Faulty native extension?");
88 }
89 if (crit) {
91 ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
93 }
94#ifdef USE_SIGALTSTACK
95 ec_stack_overflow(ec, TRUE);
96#else
97 ec_stack_overflow(ec, FALSE);
98#endif
99}
100
101
102#if VM_CHECK_MODE > 0
103static int
104callable_class_p(VALUE klass)
105{
106#if VM_CHECK_MODE >= 2
107 if (!klass) return FALSE;
108 switch (RB_BUILTIN_TYPE(klass)) {
109 default:
110 break;
111 case T_ICLASS:
112 if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
113 case T_MODULE:
114 return TRUE;
115 }
116 while (klass) {
117 if (klass == rb_cBasicObject) {
118 return TRUE;
119 }
121 }
122 return FALSE;
123#else
124 return klass != 0;
125#endif
126}
127
128static int
129callable_method_entry_p(const rb_callable_method_entry_t *cme)
130{
131 if (cme == NULL) {
132 return TRUE;
133 }
134 else {
136
137 if (callable_class_p(cme->defined_class)) {
138 return TRUE;
139 }
140 else {
141 return FALSE;
142 }
143 }
144}
145
146static void
147vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
148{
149 unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
150 enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
151
152 if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
153 cref_or_me_type = imemo_type(cref_or_me);
154 }
156 req_me = TRUE;
157 }
158
159 if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
160 rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
161 }
162 if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
163 rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
164 }
165
166 if (req_me) {
167 if (cref_or_me_type != imemo_ment) {
168 rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
169 }
170 }
171 else {
172 if (req_cref && cref_or_me_type != imemo_cref) {
173 rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
174 }
175 else { /* cref or Qfalse */
176 if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
177 if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC) && (cref_or_me_type == imemo_ment)) {
178 /* ignore */
179 }
180 else {
181 rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
182 }
183 }
184 }
185 }
186
187 if (cref_or_me_type == imemo_ment) {
188 const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
189
190 if (!callable_method_entry_p(me)) {
191 rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
192 }
193 }
194
196 VM_ASSERT(iseq == NULL ||
197 RUBY_VM_NORMAL_ISEQ_P(iseq) /* argument error. it should be fixed */);
198 }
199 else {
200 VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
201 }
202}
203
204static void
206 VALUE specval,
207 VALUE cref_or_me,
208 const rb_iseq_t *iseq)
209{
210 VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
212
213#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
214 case magic: \
215 vm_check_frame_detail(type, req_block, req_me, req_cref, \
216 specval, cref_or_me, is_cframe, iseq); \
217 break
218 switch (given_magic) {
219 /* BLK ME CREF CFRAME */
229 default:
230 rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
231 }
232#undef CHECK
233}
234
235static VALUE vm_stack_canary; /* Initialized later */
236static bool vm_stack_canary_was_born = false;
237
238#ifndef MJIT_HEADER
240rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
241{
242 const struct rb_control_frame_struct *reg_cfp = ec->cfp;
243 const struct rb_iseq_struct *iseq;
244
245 if (! LIKELY(vm_stack_canary_was_born)) {
246 return; /* :FIXME: isn't it rather fatal to enter this branch? */
247 }
248 else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
249 /* This is at the very beginning of a thread. cfp does not exist. */
250 return;
251 }
252 else if (! (iseq = GET_ISEQ())) {
253 return;
254 }
255 else if (LIKELY(sp[0] != vm_stack_canary)) {
256 return;
257 }
258 else {
259 /* we are going to call methods below; squash the canary to
260 * prevent infinite loop. */
261 sp[0] = Qundef;
262 }
263
264 const VALUE *orig = rb_iseq_original_iseq(iseq);
265 const VALUE *encoded = iseq->body->iseq_encoded;
266 const ptrdiff_t pos = GET_PC() - encoded;
267 const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
268 const char *name = insn_name(insn);
269 const VALUE iseqw = rb_iseqw_new(iseq);
270 const VALUE inspection = rb_inspect(iseqw);
271 const char *stri = rb_str_to_cstr(inspection);
272 const VALUE disasm = rb_iseq_disasm(iseq);
273 const char *strd = rb_str_to_cstr(disasm);
274
275 /* rb_bug() is not capable of outputting this large contents. It
276 is designed to run form a SIGSEGV handler, which tends to be
277 very restricted. */
278 fprintf(stderr,
279 "We are killing the stack canary set by %s, "
280 "at %s@pc=%"PRIdPTR"\n"
281 "watch out the C stack trace.\n"
282 "%s",
283 name, stri, pos, strd);
284 rb_bug("see above.");
285}
286#endif
287#define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
288
289#else
290#define vm_check_canary(ec, sp)
291#define vm_check_frame(a, b, c, d)
292#endif /* VM_CHECK_MODE > 0 */
293
294#if USE_DEBUG_COUNTER
295static void
297 const struct rb_execution_context_struct *ec,
298 const struct rb_control_frame_struct *reg_cfp,
299 VALUE type)
300{
301 const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
302
303 RB_DEBUG_COUNTER_INC(frame_push);
304
305 if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
306 const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
307 const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
308 if (prev) {
309 if (curr) {
310 RB_DEBUG_COUNTER_INC(frame_R2R);
311 }
312 else {
313 RB_DEBUG_COUNTER_INC(frame_R2C);
314 }
315 }
316 else {
317 if (curr) {
318 RB_DEBUG_COUNTER_INC(frame_C2R);
319 }
320 else {
321 RB_DEBUG_COUNTER_INC(frame_C2C);
322 }
323 }
324 }
325
326 switch (type & VM_FRAME_MAGIC_MASK) {
327 case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
328 case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
329 case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
330 case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
331 case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
332 case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
333 case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
334 case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
335 case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
336 }
337
338 rb_bug("unreachable");
339}
340#else
341#define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
342#endif
343
347
348static void
349vm_push_frame(rb_execution_context_t *ec,
350 const rb_iseq_t *iseq,
351 VALUE type,
352 VALUE self,
353 VALUE specval,
354 VALUE cref_or_me,
355 const VALUE *pc,
356 VALUE *sp,
357 int local_size,
358 int stack_max)
359{
361
362 vm_check_frame(type, specval, cref_or_me, iseq);
363 VM_ASSERT(local_size >= 0);
364
365 /* check stack overflow */
366 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
367 vm_check_canary(ec, sp);
368
369 /* setup vm value stack */
370
371 /* initialize local variables */
372 for (int i=0; i < local_size; i++) {
373 *sp++ = Qnil;
374 }
375
376 /* setup ep with managing data */
377 *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
378 *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
379 *sp++ = type; /* ep[-0] / ENV_FLAGS */
380
381 /* setup new frame */
382 *cfp = (const struct rb_control_frame_struct) {
383 .pc = pc,
384 .sp = sp,
385 .iseq = iseq,
386 .self = self,
387 .ep = sp - 1,
388 .block_code = NULL,
389 .__bp__ = sp, /* Store initial value of ep as bp to skip calculation cost of bp on JIT cancellation. */
390#if VM_DEBUG_BP_CHECK
391 .bp_check = sp,
392#endif
393 };
394
395 ec->cfp = cfp;
396
397 if (VMDEBUG == 2) {
398 SDR();
399 }
401}
402
403/* return TRUE if the frame is finished */
404static inline int
405vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
406{
407 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
408
410 if (VMDEBUG == 2) SDR();
411
414
415 return flags & VM_FRAME_FLAG_FINISH;
416}
417
418MJIT_STATIC void
420{
421 vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
422}
423
424/* method dispatch */
425static inline VALUE
426rb_arity_error_new(int argc, int min, int max)
427{
428 VALUE err_mess = 0;
429 if (min == max) {
430 err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d)", argc, min);
431 }
432 else if (max == UNLIMITED_ARGUMENTS) {
433 err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d+)", argc, min);
434 }
435 else {
436 err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d..%d)", argc, min, max);
437 }
438 return rb_exc_new3(rb_eArgError, err_mess);
439}
440
441MJIT_STATIC void
442rb_error_arity(int argc, int min, int max)
443{
444 rb_exc_raise(rb_arity_error_new(argc, min, max));
445}
446
447/* lvar */
448
449NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
450
451static void
452vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
453{
454 /* remember env value forcely */
455 rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
456 VM_FORCE_WRITE(&ep[index], v);
457 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
458 RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
459}
460
461static inline void
462vm_env_write(const VALUE *ep, int index, VALUE v)
463{
464 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
465 if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
466 VM_STACK_ENV_WRITE(ep, index, v);
467 }
468 else {
469 vm_env_write_slowpath(ep, index, v);
470 }
471}
472
475{
476 if (block_handler == VM_BLOCK_HANDLER_NONE) {
477 return Qnil;
478 }
479 else {
480 switch (vm_block_handler_type(block_handler)) {
483 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
485 return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
487 return VM_BH_TO_PROC(block_handler);
488 default:
490 }
491 }
492}
493
494/* svar */
495
496#if VM_CHECK_MODE > 0
497static int
498vm_svar_valid_p(VALUE svar)
499{
500 if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
501 switch (imemo_type(svar)) {
502 case imemo_svar:
503 case imemo_cref:
504 case imemo_ment:
505 return TRUE;
506 default:
507 break;
508 }
509 }
510 rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
511 return FALSE;
512}
513#endif
514
515static inline struct vm_svar *
516lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
517{
518 VALUE svar;
519
520 if (lep && (ec == NULL || ec->root_lep != lep)) {
521 svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
522 }
523 else {
524 svar = ec->root_svar;
525 }
526
527 VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
528
529 return (struct vm_svar *)svar;
530}
531
532static inline void
533lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
534{
535 VM_ASSERT(vm_svar_valid_p((VALUE)svar));
536
537 if (lep && (ec == NULL || ec->root_lep != lep)) {
538 vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
539 }
540 else {
541 RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
542 }
543}
544
545static VALUE
546lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
547{
548 const struct vm_svar *svar = lep_svar(ec, lep);
549
550 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
551
552 switch (key) {
553 case VM_SVAR_LASTLINE:
554 return svar->lastline;
555 case VM_SVAR_BACKREF:
556 return svar->backref;
557 default: {
558 const VALUE ary = svar->others;
559
560 if (NIL_P(ary)) {
561 return Qnil;
562 }
563 else {
564 return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
565 }
566 }
567 }
568}
569
570static struct vm_svar *
571svar_new(VALUE obj)
572{
573 return (struct vm_svar *)rb_imemo_new(imemo_svar, Qnil, Qnil, Qnil, obj);
574}
575
576static void
577lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
578{
579 struct vm_svar *svar = lep_svar(ec, lep);
580
581 if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
582 lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
583 }
584
585 switch (key) {
586 case VM_SVAR_LASTLINE:
587 RB_OBJ_WRITE(svar, &svar->lastline, val);
588 return;
589 case VM_SVAR_BACKREF:
590 RB_OBJ_WRITE(svar, &svar->backref, val);
591 return;
592 default: {
593 VALUE ary = svar->others;
594
595 if (NIL_P(ary)) {
596 RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
597 }
599 }
600 }
601}
602
603static inline VALUE
604vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
605{
606 VALUE val;
607
608 if (type == 0) {
609 val = lep_svar_get(ec, lep, key);
610 }
611 else {
612 VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
613
614 if (type & 0x01) {
615 switch (type >> 1) {
616 case '&':
618 break;
619 case '`':
621 break;
622 case '\'':
624 break;
625 case '+':
627 break;
628 default:
629 rb_bug("unexpected back-ref");
630 }
631 }
632 else {
633 val = rb_reg_nth_match((int)(type >> 1), backref);
634 }
635 }
636 return val;
637}
638
639PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
641check_method_entry(VALUE obj, int can_be_svar)
642{
643 if (obj == Qfalse) return NULL;
644
645#if VM_CHECK_MODE > 0
646 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
647#endif
648
649 switch (imemo_type(obj)) {
650 case imemo_ment:
651 return (rb_callable_method_entry_t *)obj;
652 case imemo_cref:
653 return NULL;
654 case imemo_svar:
655 if (can_be_svar) {
656 return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
657 }
658 default:
659#if VM_CHECK_MODE > 0
660 rb_bug("check_method_entry: svar should not be there:");
661#endif
662 return NULL;
663 }
664}
665
668{
669 const VALUE *ep = cfp->ep;
671
672 while (!VM_ENV_LOCAL_P(ep)) {
673 if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
674 ep = VM_ENV_PREV_EP(ep);
675 }
676
677 return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
678}
679
680static rb_iseq_t *
681method_entry_iseqptr(const rb_callable_method_entry_t *me)
682{
683 switch (me->def->type) {
685 return me->def->body.iseq.iseqptr;
686 default:
687 return NULL;
688 }
689}
690
691static rb_cref_t *
692method_entry_cref(const rb_callable_method_entry_t *me)
693{
694 switch (me->def->type) {
696 return me->def->body.iseq.cref;
697 default:
698 return NULL;
699 }
700}
701
702#if VM_CHECK_MODE == 0
703PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
704#endif
705static rb_cref_t *
706check_cref(VALUE obj, int can_be_svar)
707{
708 if (obj == Qfalse) return NULL;
709
710#if VM_CHECK_MODE > 0
711 if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
712#endif
713
714 switch (imemo_type(obj)) {
715 case imemo_ment:
716 return method_entry_cref((rb_callable_method_entry_t *)obj);
717 case imemo_cref:
718 return (rb_cref_t *)obj;
719 case imemo_svar:
720 if (can_be_svar) {
721 return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
722 }
723 default:
724#if VM_CHECK_MODE > 0
725 rb_bug("check_method_entry: svar should not be there:");
726#endif
727 return NULL;
728 }
729}
730
731static inline rb_cref_t *
732vm_env_cref(const VALUE *ep)
733{
734 rb_cref_t *cref;
735
736 while (!VM_ENV_LOCAL_P(ep)) {
737 if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
738 ep = VM_ENV_PREV_EP(ep);
739 }
740
741 return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
742}
743
744static int
745is_cref(const VALUE v, int can_be_svar)
746{
747 if (RB_TYPE_P(v, T_IMEMO)) {
748 switch (imemo_type(v)) {
749 case imemo_cref:
750 return TRUE;
751 case imemo_svar:
752 if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
753 default:
754 break;
755 }
756 }
757 return FALSE;
758}
759
760static int
761vm_env_cref_by_cref(const VALUE *ep)
762{
763 while (!VM_ENV_LOCAL_P(ep)) {
764 if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
765 ep = VM_ENV_PREV_EP(ep);
766 }
767 return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
768}
769
770static rb_cref_t *
771cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
772{
773 const VALUE v = *vptr;
774 rb_cref_t *cref, *new_cref;
775
776 if (RB_TYPE_P(v, T_IMEMO)) {
777 switch (imemo_type(v)) {
778 case imemo_cref:
779 cref = (rb_cref_t *)v;
780 new_cref = vm_cref_dup(cref);
781 if (parent) {
782 RB_OBJ_WRITE(parent, vptr, new_cref);
783 }
784 else {
785 VM_FORCE_WRITE(vptr, (VALUE)new_cref);
786 }
787 return (rb_cref_t *)new_cref;
788 case imemo_svar:
789 if (can_be_svar) {
790 return cref_replace_with_duplicated_cref_each_frame((const VALUE *)&((struct vm_svar *)v)->cref_or_me, FALSE, v);
791 }
792 /* fall through */
793 case imemo_ment:
794 rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
795 default:
796 break;
797 }
798 }
799 return FALSE;
800}
801
802static rb_cref_t *
803vm_cref_replace_with_duplicated_cref(const VALUE *ep)
804{
805 if (vm_env_cref_by_cref(ep)) {
806 rb_cref_t *cref;
807 VALUE envval;
808
809 while (!VM_ENV_LOCAL_P(ep)) {
810 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
811 if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
812 return cref;
813 }
814 ep = VM_ENV_PREV_EP(ep);
815 }
816 envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
817 return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
818 }
819 else {
820 rb_bug("vm_cref_dup: unreachable");
821 }
822}
823
824static rb_cref_t *
825vm_get_cref(const VALUE *ep)
826{
827 rb_cref_t *cref = vm_env_cref(ep);
828
829 if (cref != NULL) {
830 return cref;
831 }
832 else {
833 rb_bug("vm_get_cref: unreachable");
834 }
835}
836
837static rb_cref_t *
838vm_ec_cref(const rb_execution_context_t *ec)
839{
841
842 if (cfp == NULL) {
843 return NULL;
844 }
845 return vm_get_cref(cfp->ep);
846}
847
848static const rb_cref_t *
849vm_get_const_key_cref(const VALUE *ep)
850{
851 const rb_cref_t *cref = vm_get_cref(ep);
852 const rb_cref_t *key_cref = cref;
853
854 while (cref) {
855 if (FL_TEST(CREF_CLASS(cref), FL_SINGLETON) ||
856 FL_TEST(CREF_CLASS(cref), RCLASS_CLONED)) {
857 return key_cref;
858 }
859 cref = CREF_NEXT(cref);
860 }
861
862 /* does not include singleton class */
863 return NULL;
864}
865
866void
867rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
868{
869 rb_cref_t *new_cref;
870
871 while (cref) {
872 if (CREF_CLASS(cref) == old_klass) {
873 new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
874 *new_cref_ptr = new_cref;
875 return;
876 }
877 new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
878 cref = CREF_NEXT(cref);
879 *new_cref_ptr = new_cref;
880 new_cref_ptr = (rb_cref_t **)&new_cref->next;
881 }
882 *new_cref_ptr = NULL;
883}
884
885static rb_cref_t *
886vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval)
887{
888 rb_cref_t *prev_cref = NULL;
889
890 if (ep) {
891 prev_cref = vm_env_cref(ep);
892 }
893 else {
894 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
895
896 if (cfp) {
897 prev_cref = vm_env_cref(cfp->ep);
898 }
899 }
900
901 return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval);
902}
903
904static inline VALUE
905vm_get_cbase(const VALUE *ep)
906{
907 const rb_cref_t *cref = vm_get_cref(ep);
908 VALUE klass = Qundef;
909
910 while (cref) {
911 if ((klass = CREF_CLASS(cref)) != 0) {
912 break;
913 }
914 cref = CREF_NEXT(cref);
915 }
916
917 return klass;
918}
919
920static inline VALUE
921vm_get_const_base(const VALUE *ep)
922{
923 const rb_cref_t *cref = vm_get_cref(ep);
924 VALUE klass = Qundef;
925
926 while (cref) {
927 if (!CREF_PUSHED_BY_EVAL(cref) &&
928 (klass = CREF_CLASS(cref)) != 0) {
929 break;
930 }
931 cref = CREF_NEXT(cref);
932 }
933
934 return klass;
935}
936
937static inline void
938vm_check_if_namespace(VALUE klass)
939{
940 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
941 rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
942 }
943}
944
945static inline void
946vm_ensure_not_refinement_module(VALUE self)
947{
948 if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
949 rb_warn("not defined at the refinement, but at the outer class/module");
950 }
951}
952
953static inline VALUE
954vm_get_iclass(rb_control_frame_t *cfp, VALUE klass)
955{
956 return klass;
957}
958
959static inline VALUE
960vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
961{
962 void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
963 VALUE val;
964
965 if (orig_klass == Qnil && allow_nil) {
966 /* in current lexical scope */
967 const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
968 const rb_cref_t *cref;
969 VALUE klass = Qnil;
970
971 while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
972 root_cref = CREF_NEXT(root_cref);
973 }
974 cref = root_cref;
975 while (cref && CREF_NEXT(cref)) {
976 if (CREF_PUSHED_BY_EVAL(cref)) {
977 klass = Qnil;
978 }
979 else {
980 klass = CREF_CLASS(cref);
981 }
982 cref = CREF_NEXT(cref);
983
984 if (!NIL_P(klass)) {
985 VALUE av, am = 0;
987 search_continue:
988 if ((ce = rb_const_lookup(klass, id))) {
989 rb_const_warn_if_deprecated(ce, klass, id);
990 val = ce->value;
991 if (val == Qundef) {
992 if (am == klass) break;
993 am = klass;
994 if (is_defined) return 1;
995 if (rb_autoloading_value(klass, id, &av, NULL)) return av;
996 rb_autoload_load(klass, id);
997 goto search_continue;
998 }
999 else {
1000 if (is_defined) {
1001 return 1;
1002 }
1003 else {
1004 if (UNLIKELY(!rb_ractor_main_p())) {
1005 if (!rb_ractor_shareable_p(val)) {
1007 "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
1008 }
1009 }
1010 return val;
1011 }
1012 }
1013 }
1014 }
1015 }
1016
1017 /* search self */
1018 if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
1019 klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
1020 }
1021 else {
1022 klass = CLASS_OF(ec->cfp->self);
1023 }
1024
1025 if (is_defined) {
1026 return rb_const_defined(klass, id);
1027 }
1028 else {
1029 return rb_const_get(klass, id);
1030 }
1031 }
1032 else {
1033 vm_check_if_namespace(orig_klass);
1034 if (is_defined) {
1035 return rb_public_const_defined_from(orig_klass, id);
1036 }
1037 else {
1038 return rb_public_const_get_from(orig_klass, id);
1039 }
1040 }
1041}
1042
1043static inline VALUE
1044vm_get_cvar_base(const rb_cref_t *cref, rb_control_frame_t *cfp, int top_level_raise)
1045{
1046 VALUE klass;
1047
1048 if (!cref) {
1049 rb_bug("vm_get_cvar_base: no cref");
1050 }
1051
1052 while (CREF_NEXT(cref) &&
1053 (NIL_P(CREF_CLASS(cref)) || FL_TEST(CREF_CLASS(cref), FL_SINGLETON) ||
1054 CREF_PUSHED_BY_EVAL(cref))) {
1055 cref = CREF_NEXT(cref);
1056 }
1057 if (top_level_raise && !CREF_NEXT(cref)) {
1058 rb_raise(rb_eRuntimeError, "class variable access from toplevel");
1059 }
1060
1061 klass = vm_get_iclass(cfp, CREF_CLASS(cref));
1062
1063 if (NIL_P(klass)) {
1064 rb_raise(rb_eTypeError, "no class variables available");
1065 }
1066 return klass;
1067}
1068
1069static VALUE
1070vm_search_const_defined_class(const VALUE cbase, ID id)
1071{
1072 if (rb_const_defined_at(cbase, id)) return cbase;
1073 if (cbase == rb_cObject) {
1074 VALUE tmp = RCLASS_SUPER(cbase);
1075 while (tmp) {
1076 if (rb_const_defined_at(tmp, id)) return tmp;
1077 tmp = RCLASS_SUPER(tmp);
1078 }
1079 }
1080 return 0;
1081}
1082
1083static bool
1084iv_index_tbl_lookup(struct st_table *iv_index_tbl, ID id, struct rb_iv_index_tbl_entry **ent)
1085{
1086 int found;
1087
1088 if (iv_index_tbl == NULL) return false;
1089
1091 {
1092 found = st_lookup(iv_index_tbl, (st_data_t)id, (st_data_t *)ent);
1093 }
1095
1096 return found ? true : false;
1097}
1098
1099ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent));
1100
1101static inline void
1102fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent)
1103{
1104 // fill cache
1105 if (!is_attr) {
1106 ic->entry = ent;
1107 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1108 }
1109 else {
1110 vm_cc_attr_index_set(cc, (int)ent->index + 1);
1111 }
1112}
1113
1114ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int));
1115static inline VALUE
1116vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1117{
1118#if OPT_IC_FOR_IVAR
1119 VALUE val = Qundef;
1120
1121 if (SPECIAL_CONST_P(obj)) {
1122 // frozen?
1123 }
1124 else if (LIKELY(is_attr ?
1125 RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, vm_cc_attr_index(cc) > 0) :
1126 RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_serial,
1127 ic->entry && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass)))) {
1128 uint32_t index = !is_attr ? ic->entry->index : (vm_cc_attr_index(cc) - 1);
1129
1130 RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
1131
1132 if (LIKELY(BUILTIN_TYPE(obj) == T_OBJECT) &&
1133 LIKELY(index < ROBJECT_NUMIV(obj))) {
1134 val = ROBJECT_IVPTR(obj)[index];
1135
1136 VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
1137 }
1138 else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
1139 val = rb_ivar_generic_lookup_with_index(obj, id, index);
1140 }
1141
1142 goto ret;
1143 }
1144 else {
1145 struct rb_iv_index_tbl_entry *ent;
1146
1147 if (BUILTIN_TYPE(obj) == T_OBJECT) {
1148 struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
1149
1150 if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
1151 fill_ivar_cache(iseq, ic, cc, is_attr, ent);
1152
1153 // get value
1154 if (ent->index < ROBJECT_NUMIV(obj)) {
1155 val = ROBJECT_IVPTR(obj)[ent->index];
1156
1157 VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
1158 }
1159 }
1160 }
1161 else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
1162 struct st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
1163
1164 if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
1165 fill_ivar_cache(iseq, ic, cc, is_attr, ent);
1166 val = rb_ivar_generic_lookup_with_index(obj, id, ent->index);
1167 }
1168 }
1169 else {
1170 // T_CLASS / T_MODULE
1171 goto general_path;
1172 }
1173
1174 ret:
1175 if (LIKELY(val != Qundef)) {
1176 return val;
1177 }
1178 else {
1179 return Qnil;
1180 }
1181 }
1182 general_path:
1183#endif /* OPT_IC_FOR_IVAR */
1184 RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
1185
1186 if (is_attr) {
1187 return rb_attr_get(obj, id);
1188 }
1189 else {
1190 return rb_ivar_get(obj, id);
1191 }
1192}
1193
1194ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
1195NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
1196NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
1197
1198static VALUE
1199vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1200{
1202
1203#if OPT_IC_FOR_IVAR
1204 if (RB_TYPE_P(obj, T_OBJECT)) {
1205 struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
1206 struct rb_iv_index_tbl_entry *ent;
1207
1208 if (iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
1209 if (!is_attr) {
1210 ic->entry = ent;
1211 RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
1212 }
1213 else if (ent->index >= INT_MAX) {
1214 rb_raise(rb_eArgError, "too many instance variables");
1215 }
1216 else {
1217 vm_cc_attr_index_set(cc, (int)(ent->index + 1));
1218 }
1219
1220 uint32_t index = ent->index;
1221
1222 if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
1223 rb_init_iv_list(obj);
1224 }
1225 VALUE *ptr = ROBJECT_IVPTR(obj);
1226 RB_OBJ_WRITE(obj, &ptr[index], val);
1227 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_iv_hit);
1228
1229 return val;
1230 }
1231 }
1232#endif
1233 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
1234 return rb_ivar_set(obj, id, val);
1235}
1236
1237static VALUE
1238vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
1239{
1240 return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
1241}
1242
1243static VALUE
1244vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
1245{
1246 return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
1247}
1248
1249static inline VALUE
1250vm_setivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
1251{
1252#if OPT_IC_FOR_IVAR
1253 if (LIKELY(RB_TYPE_P(obj, T_OBJECT)) &&
1254 LIKELY(!RB_OBJ_FROZEN_RAW(obj))) {
1255
1256 VM_ASSERT(!rb_ractor_shareable_p(obj));
1257
1258 if (LIKELY(
1259 (!is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_serial, ic->entry && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass))) ||
1260 ( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, vm_cc_attr_index(cc) > 0)))) {
1261 uint32_t index = !is_attr ? ic->entry->index : vm_cc_attr_index(cc)-1;
1262
1263 if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
1264 rb_init_iv_list(obj);
1265 }
1266 VALUE *ptr = ROBJECT_IVPTR(obj);
1267 RB_OBJ_WRITE(obj, &ptr[index], val);
1268 RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
1269 return val; /* inline cache hit */
1270 }
1271 }
1272 else {
1273 RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
1274 }
1275#endif /* OPT_IC_FOR_IVAR */
1276 if (is_attr) {
1277 return vm_setivar_slowpath_attr(obj, id, val, cc);
1278 }
1279 else {
1280 return vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
1281 }
1282}
1283
1284static inline VALUE
1285vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
1286{
1287 return vm_getivar(obj, id, iseq, ic, NULL, FALSE);
1288}
1289
1290static inline void
1291vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
1292{
1293 vm_setivar(obj, id, val, iseq, ic, 0, 0);
1294}
1295
1296static VALUE
1297vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
1298{
1299 /* continue throw */
1300
1301 if (FIXNUM_P(err)) {
1302 ec->tag->state = FIX2INT(err);
1303 }
1304 else if (SYMBOL_P(err)) {
1305 ec->tag->state = TAG_THROW;
1306 }
1307 else if (THROW_DATA_P(err)) {
1308 ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
1309 }
1310 else {
1311 ec->tag->state = TAG_RAISE;
1312 }
1313 return err;
1314}
1315
1316static VALUE
1317vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
1318 const int flag, const VALUE throwobj)
1319{
1320 const rb_control_frame_t *escape_cfp = NULL;
1321 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
1322
1323 if (flag != 0) {
1324 /* do nothing */
1325 }
1326 else if (state == TAG_BREAK) {
1327 int is_orphan = 1;
1328 const VALUE *ep = GET_EP();
1329 const rb_iseq_t *base_iseq = GET_ISEQ();
1330 escape_cfp = reg_cfp;
1331
1332 while (base_iseq->body->type != ISEQ_TYPE_BLOCK) {
1333 if (escape_cfp->iseq->body->type == ISEQ_TYPE_CLASS) {
1334 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1335 ep = escape_cfp->ep;
1336 base_iseq = escape_cfp->iseq;
1337 }
1338 else {
1339 ep = VM_ENV_PREV_EP(ep);
1340 base_iseq = base_iseq->body->parent_iseq;
1341 escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
1342 VM_ASSERT(escape_cfp->iseq == base_iseq);
1343 }
1344 }
1345
1346 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1347 /* lambda{... break ...} */
1348 is_orphan = 0;
1349 state = TAG_RETURN;
1350 }
1351 else {
1352 ep = VM_ENV_PREV_EP(ep);
1353
1354 while (escape_cfp < eocfp) {
1355 if (escape_cfp->ep == ep) {
1356 const rb_iseq_t *const iseq = escape_cfp->iseq;
1357 const VALUE epc = escape_cfp->pc - iseq->body->iseq_encoded;
1358 const struct iseq_catch_table *const ct = iseq->body->catch_table;
1359 unsigned int i;
1360
1361 if (!ct) break;
1362 for (i=0; i < ct->size; i++) {
1363 const struct iseq_catch_table_entry *const entry =
1364 UNALIGNED_MEMBER_PTR(ct, entries[i]);
1365
1366 if (entry->type == CATCH_TYPE_BREAK &&
1367 entry->iseq == base_iseq &&
1368 entry->start < epc && entry->end >= epc) {
1369 if (entry->cont == epc) { /* found! */
1370 is_orphan = 0;
1371 }
1372 break;
1373 }
1374 }
1375 break;
1376 }
1377
1378 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1379 }
1380 }
1381
1382 if (is_orphan) {
1383 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1384 }
1385 }
1386 else if (state == TAG_RETRY) {
1387 const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
1388
1389 escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
1390 }
1391 else if (state == TAG_RETURN) {
1392 const VALUE *current_ep = GET_EP();
1393 const VALUE *target_lep = VM_EP_LEP(current_ep);
1394 int in_class_frame = 0;
1395 int toplevel = 1;
1396 escape_cfp = reg_cfp;
1397
1398 while (escape_cfp < eocfp) {
1399 const VALUE *lep = VM_CF_LEP(escape_cfp);
1400
1401 if (!target_lep) {
1402 target_lep = lep;
1403 }
1404
1405 if (lep == target_lep &&
1406 VM_FRAME_RUBYFRAME_P(escape_cfp) &&
1407 escape_cfp->iseq->body->type == ISEQ_TYPE_CLASS) {
1408 in_class_frame = 1;
1409 target_lep = 0;
1410 }
1411
1412 if (lep == target_lep) {
1413 if (VM_FRAME_LAMBDA_P(escape_cfp)) {
1414 toplevel = 0;
1415 if (in_class_frame) {
1416 /* lambda {class A; ... return ...; end} */
1417 goto valid_return;
1418 }
1419 else {
1420 const VALUE *tep = current_ep;
1421
1422 while (target_lep != tep) {
1423 if (escape_cfp->ep == tep) {
1424 /* in lambda */
1425 goto valid_return;
1426 }
1427 tep = VM_ENV_PREV_EP(tep);
1428 }
1429 }
1430 }
1431 else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
1432 switch (escape_cfp->iseq->body->type) {
1433 case ISEQ_TYPE_TOP:
1434 case ISEQ_TYPE_MAIN:
1435 if (toplevel) {
1436 if (in_class_frame) goto unexpected_return;
1437 goto valid_return;
1438 }
1439 break;
1440 case ISEQ_TYPE_EVAL:
1441 case ISEQ_TYPE_CLASS:
1442 toplevel = 0;
1443 break;
1444 default:
1445 break;
1446 }
1447 }
1448 }
1449
1450 if (escape_cfp->ep == target_lep && escape_cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
1451 goto valid_return;
1452 }
1453
1454 escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
1455 }
1456 unexpected_return:;
1457 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1458
1459 valid_return:;
1460 /* do nothing */
1461 }
1462 else {
1463 rb_bug("isns(throw): unsupported throw type");
1464 }
1465
1466 ec->tag->state = state;
1467 return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
1468}
1469
1470static VALUE
1471vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
1472 rb_num_t throw_state, VALUE throwobj)
1473{
1474 const int state = (int)(throw_state & VM_THROW_STATE_MASK);
1475 const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
1476
1477 if (state != 0) {
1478 return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
1479 }
1480 else {
1481 return vm_throw_continue(ec, throwobj);
1482 }
1483}
1484
1485static inline void
1486vm_expandarray(VALUE *sp, VALUE ary, rb_num_t num, int flag)
1487{
1488 int is_splat = flag & 0x01;
1489 rb_num_t space_size = num + is_splat;
1490 VALUE *base = sp - 1;
1491 const VALUE *ptr;
1492 rb_num_t len;
1493 const VALUE obj = ary;
1494
1495 if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
1496 ary = obj;
1497 ptr = &ary;
1498 len = 1;
1499 }
1500 else {
1502 len = (rb_num_t)RARRAY_LEN(ary);
1503 }
1504
1505 if (space_size == 0) {
1506 /* no space left on stack */
1507 }
1508 else if (flag & 0x02) {
1509 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1510 rb_num_t i = 0, j;
1511
1512 if (len < num) {
1513 for (i=0; i<num-len; i++) {
1514 *base++ = Qnil;
1515 }
1516 }
1517 for (j=0; i<num; i++, j++) {
1518 VALUE v = ptr[len - j - 1];
1519 *base++ = v;
1520 }
1521 if (is_splat) {
1522 *base = rb_ary_new4(len - j, ptr);
1523 }
1524 }
1525 else {
1526 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1527 rb_num_t i;
1528 VALUE *bptr = &base[space_size - 1];
1529
1530 for (i=0; i<num; i++) {
1531 if (len <= i) {
1532 for (; i<num; i++) {
1533 *bptr-- = Qnil;
1534 }
1535 break;
1536 }
1537 *bptr-- = ptr[i];
1538 }
1539 if (is_splat) {
1540 if (num > len) {
1541 *bptr = rb_ary_new();
1542 }
1543 else {
1544 *bptr = rb_ary_new4(len - num, ptr + num);
1545 }
1546 }
1547 }
1548 RB_GC_GUARD(ary);
1549}
1550
1551static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
1552
1553static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
1554
1555static struct rb_class_cc_entries *
1556vm_ccs_create(VALUE klass, const rb_callable_method_entry_t *cme)
1557{
1558 struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
1559#if VM_CHECK_MODE > 0
1560 ccs->debug_sig = ~(VALUE)ccs;
1561#endif
1562 ccs->capa = 0;
1563 ccs->len = 0;
1564 RB_OBJ_WRITE(klass, &ccs->cme, cme);
1566 ccs->entries = NULL;
1567 return ccs;
1568}
1569
1570static void
1571vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
1572{
1573 if (! vm_cc_markable(cc)) {
1574 return;
1575 }
1576 else if (! vm_ci_markable(ci)) {
1577 return;
1578 }
1579
1580 if (UNLIKELY(ccs->len == ccs->capa)) {
1581 if (ccs->capa == 0) {
1582 ccs->capa = 1;
1583 ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
1584 }
1585 else {
1586 ccs->capa *= 2;
1587 REALLOC_N(ccs->entries, struct rb_class_cc_entries_entry, ccs->capa);
1588 }
1589 }
1590 VM_ASSERT(ccs->len < ccs->capa);
1591
1592 const int pos = ccs->len++;
1593 RB_OBJ_WRITE(klass, &ccs->entries[pos].ci, ci);
1594 RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
1595
1596 if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
1597 // for tuning
1598 // vm_mtbl_dump(klass, 0);
1599 }
1600}
1601
1602#if VM_CHECK_MODE > 0
1603void
1604rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
1605{
1606 fprintf(stderr, "ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
1607 for (int i=0; i<ccs->len; i++) {
1608 vm_ci_dump(ccs->entries[i].ci);
1609 rp(ccs->entries[i].cc);
1610 }
1611}
1612
1613static int
1614vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
1615{
1616 VM_ASSERT(vm_ccs_p(ccs));
1617 VM_ASSERT(ccs->len <= ccs->capa);
1618
1619 for (int i=0; i<ccs->len; i++) {
1620 const struct rb_callinfo *ci = ccs->entries[i].ci;
1621 const struct rb_callcache *cc = ccs->entries[i].cc;
1622
1623 VM_ASSERT(vm_ci_p(ci));
1624 VM_ASSERT(vm_ci_mid(ci) == mid);
1626 VM_ASSERT(vm_cc_class_check(cc, klass));
1627 VM_ASSERT(vm_cc_cme(cc) == ccs->cme);
1628 }
1629 return TRUE;
1630}
1631#endif
1632
1633#ifndef MJIT_HEADER
1634static const struct rb_callcache *
1635vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
1636{
1637 const ID mid = vm_ci_mid(ci);
1638 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
1639 struct rb_class_cc_entries *ccs = NULL;
1640
1641 if (cc_tbl) {
1642 if (rb_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs)) {
1643 const int ccs_len = ccs->len;
1644 VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
1645
1646 if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
1647 rb_vm_ccs_free(ccs);
1648 rb_id_table_delete(cc_tbl, mid);
1649 ccs = NULL;
1650 }
1651 else {
1652 for (int i=0; i<ccs_len; i++) {
1653 const struct rb_callinfo *ccs_ci = ccs->entries[i].ci;
1654 const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
1655
1656 VM_ASSERT(vm_ci_p(ccs_ci));
1658
1659 if (ccs_ci == ci) { // TODO: equality
1660 RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
1661
1662 VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
1663 VM_ASSERT(ccs_cc->klass == klass);
1664 VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
1665
1666 return ccs_cc;
1667 }
1668 }
1669 }
1670 }
1671 }
1672 else {
1673 cc_tbl = RCLASS_CC_TBL(klass) = rb_id_table_create(2);
1674 }
1675
1676 RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
1677
1678 const rb_callable_method_entry_t *cme;
1679
1680 if (ccs) {
1681 cme = ccs->cme;
1682 cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
1683
1685 }
1686 else {
1687 cme = rb_callable_method_entry(klass, mid);
1688 }
1689
1690 VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
1691
1692 if (cme == NULL) {
1693 // undef or not found: can't cache the information
1694 VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
1695 return &vm_empty_cc;
1696 }
1697
1698#if VM_CHECK_MODE > 0
1699 const rb_callable_method_entry_t *searched_cme = rb_callable_method_entry(klass, mid);
1700 VM_ASSERT(cme == searched_cme);
1701#endif
1702
1703 const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general);
1705
1706 if (ccs == NULL) {
1707 VM_ASSERT(cc_tbl != NULL);
1708
1709 if (LIKELY(rb_id_table_lookup(cc_tbl, mid, (VALUE*)&ccs))) {
1710 // rb_callable_method_entry() prepares ccs.
1711 }
1712 else {
1713 // TODO: required?
1714 ccs = vm_ccs_create(klass, cme);
1715 rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
1716 }
1717 }
1718
1719 vm_ccs_push(klass, ccs, ci, cc);
1720
1721 VM_ASSERT(vm_cc_cme(cc) != NULL);
1722 VM_ASSERT(cme->called_id == mid);
1723 VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
1724
1725 return cc;
1726}
1727
1728MJIT_FUNC_EXPORTED const struct rb_callcache *
1730{
1731 const struct rb_callcache *cc;
1732
1733 VM_ASSERT(RB_TYPE_P(klass, T_CLASS) || RB_TYPE_P(klass, T_ICLASS));
1734
1736 {
1737 cc = vm_search_cc(klass, ci);
1738
1739 VM_ASSERT(cc);
1741 VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
1742 VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
1743 VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
1744 VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
1745 }
1747
1748 return cc;
1749}
1750#endif
1751
1752static const struct rb_callcache *
1753vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
1754{
1755#if USE_DEBUG_COUNTER
1756 const struct rb_callcache *old_cc = cd->cc;
1757#endif
1758
1759 const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
1760
1761#if OPT_INLINE_METHOD_CACHE
1762 cd->cc = cc;
1763
1764 const struct rb_callcache *empty_cc =
1765#ifdef MJIT_HEADER
1767#else
1768 &vm_empty_cc;
1769#endif
1770 if (cd_owner && cc != empty_cc) RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
1771
1772#if USE_DEBUG_COUNTER
1773 if (old_cc == &empty_cc) {
1774 // empty
1775 RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
1776 }
1777 else if (old_cc == cc) {
1778 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
1779 }
1780 else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
1781 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
1782 }
1783 else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
1784 vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
1785 RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
1786 }
1787 else {
1788 RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
1789 }
1790#endif
1791#endif // OPT_INLINE_METHOD_CACHE
1792
1793 VM_ASSERT(vm_cc_cme(cc) == NULL ||
1794 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
1795
1796 return cc;
1797}
1798
1799#ifndef MJIT_HEADER
1800ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
1801#endif
1802static const struct rb_callcache *
1803vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
1804{
1805 const struct rb_callcache *cc = cd->cc;
1806
1807#if OPT_INLINE_METHOD_CACHE
1808 if (LIKELY(vm_cc_class_check(cc, klass))) {
1809 const struct rb_callable_method_entry_struct *cme = vm_cc_cme(cc);
1810 if (LIKELY(cme && !METHOD_ENTRY_INVALIDATED(cme))) {
1811 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
1812 RB_DEBUG_COUNTER_INC(mc_inline_hit);
1813 VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
1814 (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
1815 vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
1816
1817 return cc;
1818 }
1819 RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
1820 }
1821 else {
1822 RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
1823 }
1824#endif
1825
1826 return vm_search_method_slowpath0(cd_owner, cd, klass);
1827}
1828
1829static const struct rb_callcache *
1830vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
1831{
1832 VALUE klass = CLASS_OF(recv);
1835
1836 return vm_search_method_fastpath(cd_owner, cd, klass);
1837}
1838
1839static inline int
1840check_cfunc(const rb_callable_method_entry_t *me, VALUE (*func)())
1841{
1842 if (! me) {
1843 return false;
1844 }
1845 else {
1847 VM_ASSERT(callable_method_entry_p(me));
1848 VM_ASSERT(me->def);
1849 if (me->def->type != VM_METHOD_TYPE_CFUNC) {
1850 return false;
1851 }
1852 else {
1853 return me->def->body.cfunc.func == func;
1854 }
1855 }
1856}
1857
1858static inline int
1859vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, VALUE (*func)())
1860{
1861 VM_ASSERT(iseq != NULL);
1862 const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
1863 return check_cfunc(vm_cc_cme(cc), func);
1864}
1865
1866#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
1867
1868static inline bool
1869FIXNUM_2_P(VALUE a, VALUE b)
1870{
1871 /* FIXNUM_P(a) && FIXNUM_P(b)
1872 * == ((a & 1) && (b & 1))
1873 * == a & b & 1 */
1874 SIGNED_VALUE x = a;
1875 SIGNED_VALUE y = b;
1876 SIGNED_VALUE z = x & y & 1;
1877 return z == 1;
1878}
1879
1880static inline bool
1881FLONUM_2_P(VALUE a, VALUE b)
1882{
1883#if USE_FLONUM
1884 /* FLONUM_P(a) && FLONUM_P(b)
1885 * == ((a & 3) == 2) && ((b & 3) == 2)
1886 * == ! ((a ^ 2) | (b ^ 2) & 3)
1887 */
1888 SIGNED_VALUE x = a;
1889 SIGNED_VALUE y = b;
1890 SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
1891 return !z;
1892#else
1893 return false;
1894#endif
1895}
1896
1897static VALUE
1898opt_equality_specialized(VALUE recv, VALUE obj)
1899{
1900 if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
1901 goto compare_by_identity;
1902 }
1903 else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
1904 goto compare_by_identity;
1905 }
1906 else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
1907 goto compare_by_identity;
1908 }
1909 else if (SPECIAL_CONST_P(recv)) {
1910 //
1911 }
1912 else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
1913 double a = RFLOAT_VALUE(recv);
1914 double b = RFLOAT_VALUE(obj);
1915
1916#if MSC_VERSION_BEFORE(1300)
1917 if (isnan(a)) {
1918 return Qfalse;
1919 }
1920 else if (isnan(b)) {
1921 return Qfalse;
1922 }
1923 else
1924#endif
1925 if (a == b) {
1926 return Qtrue;
1927 }
1928 else {
1929 return Qfalse;
1930 }
1931 }
1932 else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
1933 if (recv == obj) {
1934 return Qtrue;
1935 }
1936 else if (RB_TYPE_P(obj, T_STRING)) {
1937 return rb_str_eql_internal(obj, recv);
1938 }
1939 }
1940 return Qundef;
1941
1942 compare_by_identity:
1943 if (recv == obj) {
1944 return Qtrue;
1945 }
1946 else {
1947 return Qfalse;
1948 }
1949}
1950
1951static VALUE
1952opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
1953{
1954 VM_ASSERT(cd_owner != NULL);
1955
1956 VALUE val = opt_equality_specialized(recv, obj);
1957 if (val != Qundef) return val;
1958
1959 if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
1960 return Qundef;
1961 }
1962 else {
1963 if (recv == obj) {
1964 return Qtrue;
1965 }
1966 else {
1967 return Qfalse;
1968 }
1969 }
1970}
1971
1972#undef EQ_UNREDEFINED_P
1973
1974#ifndef MJIT_HEADER
1975
1976static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, int argc); // vm_eval.c
1977NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
1978
1979static VALUE
1980opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
1981{
1982 const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, 1);
1983
1984 if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
1985 if (recv == obj) {
1986 return Qtrue;
1987 }
1988 else {
1989 return Qfalse;
1990 }
1991 }
1992 else {
1993 return Qundef;
1994 }
1995}
1996
1997static VALUE
1998opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
1999{
2000 VALUE val = opt_equality_specialized(recv, obj);
2001 if (val != Qundef) {
2002 return val;
2003 }
2004 else {
2005 return opt_equality_by_mid_slowpath(recv, obj, mid);
2006 }
2007}
2008
2009VALUE
2011{
2012 return opt_equality_by_mid(obj1, obj2, idEq);
2013}
2014
2015VALUE
2017{
2018 return opt_equality_by_mid(obj1, obj2, idEqlP);
2019}
2020
2021#endif // MJIT_HEADER
2022
2023extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
2024
2025static VALUE
2027{
2028 switch (type) {
2030 return pattern;
2032 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
2033 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2034 }
2035 /* fall through */
2037 const rb_callable_method_entry_t *me =
2039 if (me) {
2040 return rb_vm_call0(ec, pattern, idEqq, 1, &target, me, RB_NO_KEYWORDS);
2041 }
2042 else {
2043 /* fallback to funcall (e.g. method_missing) */
2044 return rb_funcallv(pattern, idEqq, 1, &target);
2045 }
2046 }
2047 default:
2048 rb_bug("check_match: unreachable");
2049 }
2050}
2051
2052
2053#if MSC_VERSION_BEFORE(1300)
2054#define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
2055#else
2056#define CHECK_CMP_NAN(a, b) /* do nothing */
2057#endif
2058
2059static inline VALUE
2060double_cmp_lt(double a, double b)
2061{
2062 CHECK_CMP_NAN(a, b);
2063 return a < b ? Qtrue : Qfalse;
2064}
2065
2066static inline VALUE
2067double_cmp_le(double a, double b)
2068{
2069 CHECK_CMP_NAN(a, b);
2070 return a <= b ? Qtrue : Qfalse;
2071}
2072
2073static inline VALUE
2074double_cmp_gt(double a, double b)
2075{
2076 CHECK_CMP_NAN(a, b);
2077 return a > b ? Qtrue : Qfalse;
2078}
2079
2080static inline VALUE
2081double_cmp_ge(double a, double b)
2082{
2083 CHECK_CMP_NAN(a, b);
2084 return a >= b ? Qtrue : Qfalse;
2085}
2086
2087static inline VALUE *
2088vm_base_ptr(const rb_control_frame_t *cfp)
2089{
2090#if 0 // we may optimize and use this once we confirm it does not spoil performance on JIT.
2092
2093 if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
2094 VALUE *bp = prev_cfp->sp + cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
2095 if (cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
2096 /* adjust `self' */
2097 bp += 1;
2098 }
2099#if VM_DEBUG_BP_CHECK
2100 if (bp != cfp->bp_check) {
2101 fprintf(stderr, "bp_check: %ld, bp: %ld\n",
2102 (long)(cfp->bp_check - GET_EC()->vm_stack),
2103 (long)(bp - GET_EC()->vm_stack));
2104 rb_bug("vm_base_ptr: unreachable");
2105 }
2106#endif
2107 return bp;
2108 }
2109 else {
2110 return NULL;
2111 }
2112#else
2113 return cfp->__bp__;
2114#endif
2115}
2116
2117/* method call processes with call_info */
2118
2119#include "vm_args.c"
2120
2121static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
2122ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
2123static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
2124static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
2125static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2126static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2127static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
2128
2129static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
2130
2131static VALUE
2132vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2133{
2134 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
2135
2136 return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
2137}
2138
2139static VALUE
2140vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2141{
2142 RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
2143
2144 const struct rb_callcache *cc = calling->cc;
2145 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2146 int param = iseq->body->param.size;
2147 int local = iseq->body->local_table_size;
2148 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2149}
2150
2151MJIT_STATIC bool
2153{
2154 return iseq->body->param.flags.has_opt == FALSE &&
2155 iseq->body->param.flags.has_rest == FALSE &&
2156 iseq->body->param.flags.has_post == FALSE &&
2157 iseq->body->param.flags.has_kw == FALSE &&
2158 iseq->body->param.flags.has_kwrest == FALSE &&
2160 iseq->body->param.flags.has_block == FALSE;
2161}
2162
2163static bool
2164rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
2165{
2166 return iseq->body->param.flags.has_opt == TRUE &&
2167 iseq->body->param.flags.has_rest == FALSE &&
2168 iseq->body->param.flags.has_post == FALSE &&
2169 iseq->body->param.flags.has_kw == FALSE &&
2170 iseq->body->param.flags.has_kwrest == FALSE &&
2172 iseq->body->param.flags.has_block == FALSE;
2173}
2174
2175static bool
2176rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
2177{
2178 return iseq->body->param.flags.has_opt == FALSE &&
2179 iseq->body->param.flags.has_rest == FALSE &&
2180 iseq->body->param.flags.has_post == FALSE &&
2181 iseq->body->param.flags.has_kw == TRUE &&
2182 iseq->body->param.flags.has_kwrest == FALSE &&
2183 iseq->body->param.flags.has_block == FALSE;
2184}
2185
2186// If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
2187MJIT_STATIC bool
2188rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
2189{
2190 return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
2191}
2192
2193
2194static inline void
2195CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
2196 struct rb_calling_info *restrict calling,
2197 const struct rb_callinfo *restrict ci)
2198{
2199 if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
2200 VALUE final_hash;
2201 /* This expands the rest argument to the stack.
2202 * So, vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT is now inconsistent.
2203 */
2204 vm_caller_setup_arg_splat(cfp, calling);
2205 if (!IS_ARGS_KW_OR_KW_SPLAT(ci) &&
2206 calling->argc > 0 &&
2207 RB_TYPE_P((final_hash = *(cfp->sp - 1)), T_HASH) &&
2208 (((struct RHash *)final_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
2209 *(cfp->sp - 1) = rb_hash_dup(final_hash);
2210 calling->kw_splat = 1;
2211 }
2212 }
2214 if (IS_ARGS_KEYWORD(ci)) {
2215 /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
2216 * by creating a keyword hash.
2217 * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
2218 */
2219 vm_caller_setup_arg_kw(cfp, calling, ci);
2220 }
2221 else {
2222 VALUE keyword_hash = cfp->sp[-1];
2223 if (!RB_TYPE_P(keyword_hash, T_HASH)) {
2224 /* Convert a non-hash keyword splat to a new hash */
2225 cfp->sp[-1] = rb_hash_dup(rb_to_hash_type(keyword_hash));
2226 }
2227 else if (!IS_ARGS_KW_SPLAT_MUT(ci)) {
2228 /* Convert a hash keyword splat to a new hash unless
2229 * a mutable keyword splat was passed.
2230 */
2231 cfp->sp[-1] = rb_hash_dup(keyword_hash);
2232 }
2233 }
2234 }
2235}
2236
2237static inline void
2238CALLER_REMOVE_EMPTY_KW_SPLAT(struct rb_control_frame_struct *restrict cfp,
2239 struct rb_calling_info *restrict calling,
2240 const struct rb_callinfo *restrict ci)
2241{
2242 if (UNLIKELY(calling->kw_splat)) {
2243 /* This removes the last Hash object if it is empty.
2244 * So, vm_ci_flag(ci) & VM_CALL_KW_SPLAT is now inconsistent.
2245 * However, you can use vm_ci_flag(ci) & VM_CALL_KW_SPLAT to
2246 * determine whether a hash should be added back with
2247 * warning (for backwards compatibility in cases where
2248 * the method does not have the number of required
2249 * arguments.
2250 */
2251 if (RHASH_EMPTY_P(cfp->sp[-1])) {
2252 cfp->sp--;
2253 calling->argc--;
2254 calling->kw_splat = 0;
2255 }
2256 }
2257}
2258
2259#define USE_OPT_HIST 0
2260
2261#if USE_OPT_HIST
2262#define OPT_HIST_MAX 64
2263static int opt_hist[OPT_HIST_MAX+1];
2264
2265__attribute__((destructor))
2266static void
2267opt_hist_show_results_at_exit(void)
2268{
2269 for (int i=0; i<OPT_HIST_MAX; i++) {
2270 fprintf(stderr, "opt_hist\t%d\t%d\n", i, opt_hist[i]);
2271 }
2272}
2273#endif
2274
2275static VALUE
2276vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2277 struct rb_calling_info *calling)
2278{
2279 const struct rb_callcache *cc = calling->cc;
2280 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2281 const int lead_num = iseq->body->param.lead_num;
2282 const int opt = calling->argc - lead_num;
2283 const int opt_num = iseq->body->param.opt_num;
2284 const int opt_pc = (int)iseq->body->param.opt_table[opt];
2285 const int param = iseq->body->param.size;
2286 const int local = iseq->body->local_table_size;
2287 const int delta = opt_num - opt;
2288
2289 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2290
2291#if USE_OPT_HIST
2292 if (opt_pc < OPT_HIST_MAX) {
2293 opt_hist[opt]++;
2294 }
2295 else {
2296 opt_hist[OPT_HIST_MAX]++;
2297 }
2298#endif
2299
2300 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
2301}
2302
2303static VALUE
2304vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2305 struct rb_calling_info *calling)
2306{
2307 const struct rb_callcache *cc = calling->cc;
2308 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2309 const int lead_num = iseq->body->param.lead_num;
2310 const int opt = calling->argc - lead_num;
2311 const int opt_pc = (int)iseq->body->param.opt_table[opt];
2312
2313 RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
2314
2315#if USE_OPT_HIST
2316 if (opt_pc < OPT_HIST_MAX) {
2317 opt_hist[opt]++;
2318 }
2319 else {
2320 opt_hist[OPT_HIST_MAX]++;
2321 }
2322#endif
2323
2324 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2325}
2326
2327static void
2328args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq,
2329 VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
2330 VALUE *const locals);
2331
2332static VALUE
2333vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2334 struct rb_calling_info *calling)
2335{
2336 const struct rb_callinfo *ci = calling->ci;
2337 const struct rb_callcache *cc = calling->cc;
2338
2339 VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
2340 RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
2341
2342 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2343 const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
2344 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
2345 const int ci_kw_len = kw_arg->keyword_len;
2346 const VALUE * const ci_keywords = kw_arg->keywords;
2347 VALUE *argv = cfp->sp - calling->argc;
2348 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2349 const int lead_num = iseq->body->param.lead_num;
2350 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
2351 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
2352 args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
2353
2354 int param = iseq->body->param.size;
2355 int local = iseq->body->local_table_size;
2356 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2357}
2358
2359static VALUE
2360vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
2361 struct rb_calling_info *calling)
2362{
2363 const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->ci;
2364 const struct rb_callcache *cc = calling->cc;
2365
2366 VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
2367 RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
2368
2369 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2370 const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
2371 VALUE * const argv = cfp->sp - calling->argc;
2372 VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
2373
2374 int i;
2375 for (i=0; i<kw_param->num; i++) {
2376 klocals[i] = kw_param->default_values[i];
2377 }
2378 klocals[i] = INT2FIX(0); // kw specify flag
2379 // NOTE:
2380 // nobody check this value, but it should be cleared because it can
2381 // points invalid VALUE (T_NONE objects, raw pointer and so on).
2382
2383 int param = iseq->body->param.size;
2384 int local = iseq->body->local_table_size;
2385 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
2386}
2387
2388static inline int
2389vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
2390 const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
2391{
2392 const struct rb_callinfo *ci = calling->ci;
2393 const struct rb_callcache *cc = calling->cc;
2394 bool cacheable_ci = vm_ci_markable(ci);
2395
2396 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
2397 if (LIKELY(rb_simple_iseq_p(iseq))) {
2398 rb_control_frame_t *cfp = ec->cfp;
2399 CALLER_SETUP_ARG(cfp, calling, ci);
2400 CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
2401
2402 if (calling->argc != iseq->body->param.lead_num) {
2403 argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
2404 }
2405
2406 VM_ASSERT(ci == calling->ci);
2407 VM_ASSERT(cc == calling->cc);
2408 CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), cacheable_ci && vm_call_iseq_optimizable_p(ci, cc));
2409 return 0;
2410 }
2411 else if (rb_iseq_only_optparam_p(iseq)) {
2412 rb_control_frame_t *cfp = ec->cfp;
2413 CALLER_SETUP_ARG(cfp, calling, ci);
2414 CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
2415
2416 const int lead_num = iseq->body->param.lead_num;
2417 const int opt_num = iseq->body->param.opt_num;
2418 const int argc = calling->argc;
2419 const int opt = argc - lead_num;
2420
2421 if (opt < 0 || opt > opt_num) {
2422 argument_arity_error(ec, iseq, argc, lead_num, lead_num + opt_num);
2423 }
2424
2425 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
2426 CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
2427 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
2428 cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
2429 }
2430 else {
2431 CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
2432 !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
2433 cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
2434 }
2435
2436 /* initialize opt vars for self-references */
2437 VM_ASSERT((int)iseq->body->param.size == lead_num + opt_num);
2438 for (int i=argc; i<lead_num + opt_num; i++) {
2439 argv[i] = Qnil;
2440 }
2441 return (int)iseq->body->param.opt_table[opt];
2442 }
2443 else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
2444 const int lead_num = iseq->body->param.lead_num;
2445 const int argc = calling->argc;
2446 const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
2447
2448 if (vm_ci_flag(ci) & VM_CALL_KWARG) {
2449 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
2450
2451 if (argc - kw_arg->keyword_len == lead_num) {
2452 const int ci_kw_len = kw_arg->keyword_len;
2453 const VALUE * const ci_keywords = kw_arg->keywords;
2454 VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
2455 MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
2456
2457 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2458 args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
2459
2460 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
2461 cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
2462
2463 return 0;
2464 }
2465 }
2466 else if (argc == lead_num) {
2467 /* no kwarg */
2468 VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
2469 args_setup_kw_parameters(ec, iseq, NULL, 0, NULL, klocals);
2470
2471 if (klocals[kw_param->num] == INT2FIX(0)) {
2472 /* copy from default_values */
2473 CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
2474 cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
2475 }
2476
2477 return 0;
2478 }
2479 }
2480 }
2481
2482 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
2483}
2484
2485static VALUE
2486vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2487{
2488 RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
2489
2490 const struct rb_callcache *cc = calling->cc;
2491 const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
2492 const int param_size = iseq->body->param.size;
2493 const int local_size = iseq->body->local_table_size;
2494 const int opt_pc = vm_callee_setup_arg(ec, calling, def_iseq_ptr(vm_cc_cme(cc)->def), cfp->sp - calling->argc, param_size, local_size);
2495 return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
2496}
2497
2498static inline VALUE
2499vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
2500 int opt_pc, int param_size, int local_size)
2501{
2502 const struct rb_callinfo *ci = calling->ci;
2503 const struct rb_callcache *cc = calling->cc;
2504
2505 if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
2506 return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
2507 }
2508 else {
2509 return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
2510 }
2511}
2512
2513static inline VALUE
2514vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
2515 int opt_pc, int param_size, int local_size)
2516{
2517 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
2518 VALUE *argv = cfp->sp - calling->argc;
2519 VALUE *sp = argv + param_size;
2520 cfp->sp = argv - 1 /* recv */;
2521
2522 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
2523 calling->block_handler, (VALUE)me,
2524 iseq->body->iseq_encoded + opt_pc, sp,
2525 local_size - param_size,
2526 iseq->body->stack_max);
2527 return Qundef;
2528}
2529
2530static inline VALUE
2531vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
2532{
2533 const struct rb_callcache *cc = calling->cc;
2534 unsigned int i;
2535 VALUE *argv = cfp->sp - calling->argc;
2536 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
2537 const rb_iseq_t *iseq = def_iseq_ptr(me->def);
2538 VALUE *src_argv = argv;
2539 VALUE *sp_orig, *sp;
2540 VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
2541
2542 if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
2543 struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
2544 const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
2545 dst_captured->code.val = src_captured->code.val;
2546 if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
2547 calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
2548 }
2549 else {
2550 calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
2551 }
2552 }
2553
2554 vm_pop_frame(ec, cfp, cfp->ep);
2555 cfp = ec->cfp;
2556
2557 sp_orig = sp = cfp->sp;
2558
2559 /* push self */
2560 sp[0] = calling->recv;
2561 sp++;
2562
2563 /* copy arguments */
2564 for (i=0; i < iseq->body->param.size; i++) {
2565 *sp++ = src_argv[i];
2566 }
2567
2568 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
2569 calling->recv, calling->block_handler, (VALUE)me,
2570 iseq->body->iseq_encoded + opt_pc, sp,
2572 iseq->body->stack_max);
2573
2574 cfp->sp = sp_orig;
2575
2576 return Qundef;
2577}
2578
2579static void
2580ractor_unsafe_check(void)
2581{
2582 if (!rb_ractor_main_p()) {
2583 rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
2584 }
2585}
2586
2587static VALUE
2588call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2589{
2590 ractor_unsafe_check();
2591 return (*func)(recv, rb_ary_new4(argc, argv));
2592}
2593
2594static VALUE
2595call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2596{
2597 ractor_unsafe_check();
2598 return (*func)(argc, argv, recv);
2599}
2600
2601static VALUE
2602call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2603{
2604 ractor_unsafe_check();
2605 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
2606 return (*f)(recv);
2607}
2608
2609static VALUE
2610call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2611{
2612 ractor_unsafe_check();
2613 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
2614 return (*f)(recv, argv[0]);
2615}
2616
2617static VALUE
2618call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2619{
2620 ractor_unsafe_check();
2621 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
2622 return (*f)(recv, argv[0], argv[1]);
2623}
2624
2625static VALUE
2626call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2627{
2628 ractor_unsafe_check();
2629 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
2630 return (*f)(recv, argv[0], argv[1], argv[2]);
2631}
2632
2633static VALUE
2634call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2635{
2636 ractor_unsafe_check();
2637 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
2638 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
2639}
2640
2641static VALUE
2642call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2643{
2644 ractor_unsafe_check();
2646 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
2647}
2648
2649static VALUE
2650call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2651{
2652 ractor_unsafe_check();
2654 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
2655}
2656
2657static VALUE
2658call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2659{
2660 ractor_unsafe_check();
2662 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
2663}
2664
2665static VALUE
2666call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2667{
2668 ractor_unsafe_check();
2670 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
2671}
2672
2673static VALUE
2674call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2675{
2676 ractor_unsafe_check();
2678 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
2679}
2680
2681static VALUE
2682call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2683{
2684 ractor_unsafe_check();
2686 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
2687}
2688
2689static VALUE
2690call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2691{
2692 ractor_unsafe_check();
2694 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
2695}
2696
2697static VALUE
2698call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2699{
2700 ractor_unsafe_check();
2702 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
2703}
2704
2705static VALUE
2706call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2707{
2708 ractor_unsafe_check();
2710 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
2711}
2712
2713static VALUE
2714call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2715{
2716 ractor_unsafe_check();
2718 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
2719}
2720
2721static VALUE
2722call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2723{
2724 ractor_unsafe_check();
2726 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
2727}
2728
2729static VALUE
2730ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2731{
2732 return (*func)(recv, rb_ary_new4(argc, argv));
2733}
2734
2735static VALUE
2736ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2737{
2738 return (*func)(argc, argv, recv);
2739}
2740
2741static VALUE
2742ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2743{
2744 VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
2745 return (*f)(recv);
2746}
2747
2748static VALUE
2749ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2750{
2751 VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
2752 return (*f)(recv, argv[0]);
2753}
2754
2755static VALUE
2756ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2757{
2758 VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
2759 return (*f)(recv, argv[0], argv[1]);
2760}
2761
2762static VALUE
2763ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2764{
2765 VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
2766 return (*f)(recv, argv[0], argv[1], argv[2]);
2767}
2768
2769static VALUE
2770ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2771{
2772 VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
2773 return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
2774}
2775
2776static VALUE
2777ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2778{
2780 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
2781}
2782
2783static VALUE
2784ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2785{
2787 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
2788}
2789
2790static VALUE
2791ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2792{
2794 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
2795}
2796
2797static VALUE
2798ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2799{
2801 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
2802}
2803
2804static VALUE
2805ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2806{
2808 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
2809}
2810
2811static VALUE
2812ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2813{
2815 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
2816}
2817
2818static VALUE
2819ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2820{
2822 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
2823}
2824
2825static VALUE
2826ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2827{
2829 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
2830}
2831
2832static VALUE
2833ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2834{
2836 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
2837}
2838
2839static VALUE
2840ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2841{
2843 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
2844}
2845
2846static VALUE
2847ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
2848{
2850 return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
2851}
2852
2853static inline int
2854vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
2855{
2856 const int ov_flags = RAISED_STACKOVERFLOW;
2857 if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
2858 if (rb_ec_raised_p(ec, ov_flags)) {
2859 rb_ec_raised_reset(ec, ov_flags);
2860 return TRUE;
2861 }
2862 return FALSE;
2863}
2864
2865#define CHECK_CFP_CONSISTENCY(func) \
2866 (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
2867 rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
2868
2869static inline
2870const rb_method_cfunc_t *
2871vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
2872{
2873#if VM_DEBUG_VERIFY_METHOD_CACHE
2874 switch (me->def->type) {
2877 break;
2878# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
2879 METHOD_BUG(ISEQ);
2880 METHOD_BUG(ATTRSET);
2881 METHOD_BUG(IVAR);
2882 METHOD_BUG(BMETHOD);
2883 METHOD_BUG(ZSUPER);
2884 METHOD_BUG(UNDEF);
2885 METHOD_BUG(OPTIMIZED);
2886 METHOD_BUG(MISSING);
2887 METHOD_BUG(REFINED);
2888 METHOD_BUG(ALIAS);
2889# undef METHOD_BUG
2890 default:
2891 rb_bug("wrong method type: %d", me->def->type);
2892 }
2893#endif
2894 return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
2895}
2896
2897static VALUE
2898vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
2899{
2900 RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
2901 const struct rb_callinfo *ci = calling->ci;
2902 const struct rb_callcache *cc = calling->cc;
2903 VALUE val;
2904 const rb_callable_method_entry_t *me = vm_cc_cme(cc);
2905 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
2906 int len = cfunc->argc;
2907
2908 VALUE recv = calling->recv;
2909 VALUE block_handler = calling->block_handler;
2911 int argc = calling->argc;
2912 int orig_argc = argc;
2913
2914 if (UNLIKELY(calling->kw_splat)) {
2915 frame_type |= VM_FRAME_FLAG_CFRAME_KW;
2916 }
2917
2919 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
2920
2921 vm_push_frame(ec, NULL, frame_type, recv,
2922 block_handler, (VALUE)me,
2923 0, ec->cfp->sp, 0, 0);
2924
2925 if (len >= 0) rb_check_arity(argc, len, len);
2926
2927 reg_cfp->sp -= orig_argc + 1;
2928 val = (*cfunc->invoker)(recv, argc, reg_cfp->sp + 1, cfunc->func);
2929
2930 CHECK_CFP_CONSISTENCY("vm_call_cfunc");
2931
2932 rb_vm_pop_frame(ec);
2933
2934 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
2936
2937 return val;
2938}
2939
2940static VALUE
2941vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
2942{
2943 const struct rb_callinfo *ci = calling->ci;
2944 RB_DEBUG_COUNTER_INC(ccf_cfunc);
2945
2946 CALLER_SETUP_ARG(reg_cfp, calling, ci);
2947 CALLER_REMOVE_EMPTY_KW_SPLAT(reg_cfp, calling, ci);
2948 CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat);
2949 return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
2950}
2951
2952static VALUE
2953vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2954{
2955 const struct rb_callcache *cc = calling->cc;
2956 RB_DEBUG_COUNTER_INC(ccf_ivar);
2957 cfp->sp -= 1;
2958 return vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE);
2959}
2960
2961static VALUE
2962vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2963{
2964 const struct rb_callcache *cc = calling->cc;
2965 RB_DEBUG_COUNTER_INC(ccf_attrset);
2966 VALUE val = *(cfp->sp - 1);
2967 cfp->sp -= 2;
2968 return vm_setivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, val, NULL, NULL, cc, 1);
2969}
2970
2971static inline VALUE
2972vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
2973{
2974 rb_proc_t *proc;
2975 VALUE val;
2976 const struct rb_callcache *cc = calling->cc;
2977 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
2978 VALUE procv = cme->def->body.bmethod.proc;
2979
2980 if (!RB_OBJ_SHAREABLE_P(procv) &&
2981 cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
2982 rb_raise(rb_eRuntimeError, "defined in a different Ractor");
2983 }
2984
2985 /* control block frame */
2986 GetProcPtr(procv, proc);
2987 val = rb_vm_invoke_bmethod(ec, proc, calling->recv, calling->argc, argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
2988
2989 return val;
2990}
2991
2992static VALUE
2993vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
2994{
2995 RB_DEBUG_COUNTER_INC(ccf_bmethod);
2996
2997 VALUE *argv;
2998 int argc;
2999 const struct rb_callinfo *ci = calling->ci;
3000
3001 CALLER_SETUP_ARG(cfp, calling, ci);
3002 argc = calling->argc;
3003 argv = ALLOCA_N(VALUE, argc);
3004 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
3005 cfp->sp += - argc - 1;
3006
3007 return vm_call_bmethod_body(ec, calling, argv);
3008}
3009
3011rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
3012{
3013 VALUE klass = current_class;
3014
3015 /* for prepended Module, then start from cover class */
3016 if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN) &&
3017 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
3018 klass = RBASIC_CLASS(klass);
3019 }
3020
3021 while (RTEST(klass)) {
3022 VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
3023 if (owner == target_owner) {
3024 return klass;
3025 }
3026 klass = RCLASS_SUPER(klass);
3027 }
3028
3029 return current_class; /* maybe module function */
3030}
3031
3032static const rb_callable_method_entry_t *
3033aliased_callable_method_entry(const rb_callable_method_entry_t *me)
3034{
3035 const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
3036 const rb_callable_method_entry_t *cme;
3037
3038 if (orig_me->defined_class == 0) {
3039 VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
3040 VM_ASSERT(RB_TYPE_P(orig_me->owner, T_MODULE));
3041 cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
3042
3043 if (me->def->alias_count + me->def->complemented_count == 0) {
3044 RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
3045 }
3046 else {
3049 rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
3050 }
3051 }
3052 else {
3053 cme = (const rb_callable_method_entry_t *)orig_me;
3054 }
3055
3056 VM_ASSERT(callable_method_entry_p(cme));
3057 return cme;
3058}
3059
3060static VALUE
3061vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3062{
3063 calling->cc = &VM_CC_ON_STACK(Qundef,
3064 vm_call_general,
3065 { 0 },
3066 aliased_callable_method_entry(vm_cc_cme(calling->cc)));
3067
3068 return vm_call_method_each_type(ec, cfp, calling);
3069}
3070
3071static enum method_missing_reason
3072ci_missing_reason(const struct rb_callinfo *ci)
3073{
3075 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
3076 if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
3077 if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
3078 return stat;
3079}
3080
3081static VALUE
3082vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
3083 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol)
3084{
3085 ASSUME(calling->argc >= 0);
3086 /* Also assumes CALLER_SETUP_ARG is already done. */
3087
3088 enum method_missing_reason missing_reason = MISSING_NOENTRY;
3089 int argc = calling->argc;
3090 VALUE recv = calling->recv;
3091 VALUE klass = CLASS_OF(recv);
3092 ID mid = rb_check_id(&symbol);
3093 int flags = VM_CALL_FCALL |
3095 (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
3096
3097 if (UNLIKELY(! mid)) {
3098 mid = idMethodMissing;
3099 missing_reason = ci_missing_reason(ci);
3100 ec->method_missing_reason = missing_reason;
3101
3102 /* E.g. when argc == 2
3103 *
3104 * | | | | TOPN
3105 * | | +------+
3106 * | | +---> | arg1 | 0
3107 * +------+ | +------+
3108 * | arg1 | -+ +-> | arg0 | 1
3109 * +------+ | +------+
3110 * | arg0 | ---+ | sym | 2
3111 * +------+ +------+
3112 * | recv | | recv | 3
3113 * --+------+--------+------+------
3114 */
3115 int i = argc;
3116 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
3117 INC_SP(1);
3118 MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
3119 argc = ++calling->argc;
3120
3121 if (rb_method_basic_definition_p(klass, idMethodMissing)) {
3122 /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
3123 TOPN(i) = symbol;
3124 int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
3127 rb_eNoMethodError, 0, recv, argc, argv, priv);
3128
3129 rb_exc_raise(exc);
3130 }
3131 else {
3132 TOPN(i) = rb_str_intern(symbol);
3133 }
3134 }
3135
3136 calling->ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci));
3137 calling->cc = &VM_CC_ON_STACK(klass,
3138 vm_call_general,
3139 { .method_missing_reason = missing_reason },
3141
3142 return vm_call_method(ec, reg_cfp, calling);
3143}
3144
3145static VALUE
3146vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3147{
3148 RB_DEBUG_COUNTER_INC(ccf_opt_send);
3149
3150 int i;
3151 VALUE sym;
3152
3153 CALLER_SETUP_ARG(reg_cfp, calling, calling->ci);
3154
3155 i = calling->argc - 1;
3156
3157 if (calling->argc == 0) {
3158 rb_raise(rb_eArgError, "no method name given");
3159 }
3160 else {
3161 sym = TOPN(i);
3162 /* E.g. when i == 2
3163 *
3164 * | | | | TOPN
3165 * +------+ | |
3166 * | arg1 | ---+ | | 0
3167 * +------+ | +------+
3168 * | arg0 | -+ +-> | arg1 | 1
3169 * +------+ | +------+
3170 * | sym | +---> | arg0 | 2
3171 * +------+ +------+
3172 * | recv | | recv | 3
3173 * --+------+--------+------+------
3174 */
3175 /* shift arguments */
3176 if (i > 0) {
3177 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
3178 }
3179 calling->argc -= 1;
3180 DEC_SP(1);
3181
3182 return vm_call_symbol(ec, reg_cfp, calling, calling->ci, sym);
3183 }
3184}
3185
3186static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
3187
3188NOINLINE(static VALUE
3189 vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
3190 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
3191
3192static VALUE
3193vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
3194 struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
3195{
3196 int argc = calling->argc;
3197
3198 /* remove self */
3199 if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
3200 DEC_SP(1);
3201
3202 return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
3203}
3204
3205static VALUE
3206vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3207{
3208 RB_DEBUG_COUNTER_INC(ccf_opt_call);
3209
3210 const struct rb_callinfo *ci = calling->ci;
3211 VALUE procval = calling->recv;
3212 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
3213}
3214
3215static VALUE
3216vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3217{
3218 RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
3219
3220 VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
3221 const struct rb_callinfo *ci = calling->ci;
3222
3224 return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
3225 }
3226 else {
3227 calling->recv = rb_vm_bh_to_procval(ec, block_handler);
3228 calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
3229 return vm_call_general(ec, reg_cfp, calling);
3230 }
3231}
3232
3233static VALUE
3234vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
3235 const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
3236{
3237 RB_DEBUG_COUNTER_INC(ccf_method_missing);
3238
3239 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
3240 unsigned int argc;
3241
3242 CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
3243 argc = calling->argc + 1;
3244
3245 unsigned int flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
3246 calling->argc = argc;
3247
3248 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
3249 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
3250 vm_check_canary(ec, reg_cfp->sp);
3251 if (argc > 1) {
3252 MEMMOVE(argv+1, argv, VALUE, argc-1);
3253 }
3254 argv[0] = ID2SYM(vm_ci_mid(orig_ci));
3255 INC_SP(1);
3256
3257 ec->method_missing_reason = reason;
3258 calling->ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci));
3259 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 },
3260 rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
3261 return vm_call_method(ec, reg_cfp, calling);
3262}
3263
3264static VALUE
3265vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3266{
3267 return vm_call_method_missing_body(ec, reg_cfp, calling, calling->ci, vm_cc_cmethod_missing_reason(calling->cc));
3268}
3269
3270static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
3271static VALUE
3272vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
3273{
3274 klass = RCLASS_SUPER(klass);
3275
3276 const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->ci)) : NULL;
3277 if (cme == NULL) {
3278 return vm_call_method_nome(ec, cfp, calling);
3279 }
3280 if (cme->def->type == VM_METHOD_TYPE_REFINED &&
3281 cme->def->body.refined.orig_me) {
3282 cme = refined_method_callable_without_refinement(cme);
3283 }
3284
3285 calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 }, cme);
3286
3287 return vm_call_method_each_type(ec, cfp, calling);
3288}
3289
3290static inline VALUE
3291find_refinement(VALUE refinements, VALUE klass)
3292{
3293 if (NIL_P(refinements)) {
3294 return Qnil;
3295 }
3296 return rb_hash_lookup(refinements, klass);
3297}
3298
3299PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
3300static rb_control_frame_t *
3301current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
3302{
3303 rb_control_frame_t *top_cfp = cfp;
3304
3305 if (cfp->iseq && cfp->iseq->body->type == ISEQ_TYPE_BLOCK) {
3306 const rb_iseq_t *local_iseq = cfp->iseq->body->local_iseq;
3307
3308 do {
3310 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
3311 /* TODO: orphan block */
3312 return top_cfp;
3313 }
3314 } while (cfp->iseq != local_iseq);
3315 }
3316 return cfp;
3317}
3318
3319static const rb_callable_method_entry_t *
3320refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
3321{
3322 const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
3323 const rb_callable_method_entry_t *cme;
3324
3325 if (orig_me->defined_class == 0) {
3326 cme = NULL;
3328 }
3329 else {
3330 cme = (const rb_callable_method_entry_t *)orig_me;
3331 }
3332
3333 VM_ASSERT(callable_method_entry_p(cme));
3334
3335 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
3336 cme = NULL;
3337 }
3338
3339 return cme;
3340}
3341
3342static const rb_callable_method_entry_t *
3343search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3344{
3345 ID mid = vm_ci_mid(calling->ci);
3346 const rb_cref_t *cref = vm_get_cref(cfp->ep);
3347 const struct rb_callcache * const cc = calling->cc;
3348 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
3349
3350 for (; cref; cref = CREF_NEXT(cref)) {
3351 const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
3352 if (NIL_P(refinement)) continue;
3353
3354 const rb_callable_method_entry_t *const ref_me =
3355 rb_callable_method_entry(refinement, mid);
3356
3357 if (ref_me) {
3358 if (vm_cc_call(cc) == vm_call_super_method) {
3359 const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
3361 if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
3362 continue;
3363 }
3364 }
3365
3366 if (cme->def->type != VM_METHOD_TYPE_REFINED ||
3367 cme->def != ref_me->def) {
3368 cme = ref_me;
3369 }
3370 if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
3371 return cme;
3372 }
3373 }
3374 else {
3375 return NULL;
3376 }
3377 }
3378
3379 if (vm_cc_cme(cc)->def->body.refined.orig_me) {
3380 return refined_method_callable_without_refinement(vm_cc_cme(cc));
3381 }
3382 else {
3383 VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
3385 return cme;
3386 }
3387}
3388
3389static VALUE
3390vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3391{
3392 struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 },
3393 search_refined_method(ec, cfp, calling));
3394
3395 if (vm_cc_cme(ref_cc)) {
3396 calling->cc= ref_cc;
3397 return vm_call_method(ec, cfp, calling);
3398 }
3399 else {
3400 return vm_call_method_nome(ec, cfp, calling);
3401 }
3402}
3403
3404static VALUE
3405vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3406{
3407 const struct rb_callinfo *ci = calling->ci;
3408 const struct rb_callcache *cc = calling->cc;
3409
3410 switch (vm_cc_cme(cc)->def->type) {
3412 CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
3413 return vm_call_iseq_setup(ec, cfp, calling);
3414
3417 CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
3418 return vm_call_cfunc(ec, cfp, calling);
3419
3421 CALLER_SETUP_ARG(cfp, calling, ci);
3422 CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
3423
3424 rb_check_arity(calling->argc, 1, 1);
3425 vm_cc_attr_index_set(cc, 0);
3426 CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG)));
3427 return vm_call_attrset(ec, cfp, calling);
3428
3430 CALLER_SETUP_ARG(cfp, calling, ci);
3431 CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
3432 rb_check_arity(calling->argc, 0, 0);
3433 vm_cc_attr_index_set(cc, 0);
3434 CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT)));
3435 return vm_call_ivar(ec, cfp, calling);
3436
3438 vm_cc_method_missing_reason_set(cc, 0);
3439 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
3440 return vm_call_method_missing(ec, cfp, calling);
3441
3443 CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
3444 return vm_call_bmethod(ec, cfp, calling);
3445
3447 CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
3448 return vm_call_alias(ec, cfp, calling);
3449
3451 switch (vm_cc_cme(cc)->def->body.optimize_type) {
3453 CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
3454 return vm_call_opt_send(ec, cfp, calling);
3456 CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
3457 return vm_call_opt_call(ec, cfp, calling);
3459 CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
3460 return vm_call_opt_block_call(ec, cfp, calling);
3461 default:
3462 rb_bug("vm_call_method: unsupported optimized method type (%d)",
3463 vm_cc_cme(cc)->def->body.optimize_type);
3464 }
3465
3467 break;
3468
3470 return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
3471
3473 // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
3474 // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
3475 return vm_call_refined(ec, cfp, calling);
3476 }
3477
3478 rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
3479}
3480
3481NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
3482
3483static VALUE
3484vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3485{
3486 /* method missing */
3487 const struct rb_callinfo *ci = calling->ci;
3488 const int stat = ci_missing_reason(ci);
3489
3490 if (vm_ci_mid(ci) == idMethodMissing) {
3491 rb_control_frame_t *reg_cfp = cfp;
3492 VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
3493 vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
3494 }
3495 else {
3496 return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
3497 }
3498}
3499
3500static inline VALUE
3501vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
3502{
3503 const struct rb_callinfo *ci = calling->ci;
3504 const struct rb_callcache *cc = calling->cc;
3505
3506 VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
3507
3508 if (vm_cc_cme(cc) != NULL) {
3509 switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
3510 case METHOD_VISI_PUBLIC: /* likely */
3511 return vm_call_method_each_type(ec, cfp, calling);
3512
3514 if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
3516 if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
3517
3518 vm_cc_method_missing_reason_set(cc, stat);
3519 CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
3520 return vm_call_method_missing(ec, cfp, calling);
3521 }
3522 return vm_call_method_each_type(ec, cfp, calling);
3523
3525 if (!(vm_ci_flag(ci) & VM_CALL_OPT_SEND)) {
3526 if (!rb_obj_is_kind_of(cfp->self, vm_cc_cme(cc)->defined_class)) {
3527 vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
3528 return vm_call_method_missing(ec, cfp, calling);
3529 }
3530 else {
3531 /* caching method info to dummy cc */
3532 VM_ASSERT(vm_cc_cme(cc) != NULL);
3533 struct rb_callcache cc_on_stack = *cc;
3535 calling->cc = &cc_on_stack;
3536 return vm_call_method_each_type(ec, cfp, calling);
3537 }
3538 }
3539 return vm_call_method_each_type(ec, cfp, calling);
3540
3541 default:
3542 rb_bug("unreachable");
3543 }
3544 }
3545 else {
3546 return vm_call_method_nome(ec, cfp, calling);
3547 }
3548}
3549
3550static VALUE
3551vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3552{
3553 RB_DEBUG_COUNTER_INC(ccf_general);
3554 return vm_call_method(ec, reg_cfp, calling);
3555}
3556
3557static VALUE
3558vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
3559{
3560 RB_DEBUG_COUNTER_INC(ccf_super_method);
3561
3562 /* this check is required to distinguish with other functions. */
3563 const struct rb_callcache *cc = calling->cc;
3564 if (vm_cc_call(cc) != vm_call_super_method) rb_bug("bug");
3565 return vm_call_method(ec, reg_cfp, calling);
3566}
3567
3568/* super */
3569
3570static inline VALUE
3571vm_search_normal_superclass(VALUE klass)
3572{
3573 if (BUILTIN_TYPE(klass) == T_ICLASS &&
3575 klass = RBASIC(klass)->klass;
3576 }
3578 return RCLASS_SUPER(klass);
3579}
3580
3581NORETURN(static void vm_super_outside(void));
3582
3583static void
3584vm_super_outside(void)
3585{
3586 rb_raise(rb_eNoMethodError, "super called outside of method");
3587}
3588
3589static const struct rb_callcache *
3590vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
3591{
3592 VALUE current_defined_class;
3594
3595 if (!me) {
3596 vm_super_outside();
3597 }
3598
3599 current_defined_class = me->defined_class;
3600
3601 if (!NIL_P(RCLASS_REFINED_CLASS(current_defined_class))) {
3602 current_defined_class = RCLASS_REFINED_CLASS(current_defined_class);
3603 }
3604
3605 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
3606 !FL_TEST_RAW(current_defined_class, RMODULE_INCLUDED_INTO_REFINEMENT) &&
3607 reg_cfp->iseq != method_entry_iseqptr(me) &&
3608 !rb_obj_is_kind_of(recv, current_defined_class)) {
3609 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
3610 RCLASS_INCLUDER(current_defined_class) : current_defined_class;
3611
3612 if (m) { /* not bound UnboundMethod */
3614 "self has wrong type to call super in this context: "
3615 "%"PRIsVALUE" (expected %"PRIsVALUE")",
3616 rb_obj_class(recv), m);
3617 }
3618 }
3619
3620 if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
3622 "implicit argument passing of super from method defined"
3623 " by define_method() is not supported."
3624 " Specify all arguments explicitly.");
3625 }
3626
3627 ID mid = me->def->original_id;
3628
3629 // update iseq. really? (TODO)
3630 cd->ci = vm_ci_new_runtime(mid,
3631 vm_ci_flag(cd->ci),
3632 vm_ci_argc(cd->ci),
3633 vm_ci_kwarg(cd->ci));
3634
3635 RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
3636
3637 const struct rb_callcache *cc;
3638
3639 VALUE klass = vm_search_normal_superclass(me->defined_class);
3640
3641 if (!klass) {
3642 /* bound instance method of module */
3643 cc = vm_cc_new(klass, NULL, vm_call_method_missing);
3644 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
3645 }
3646 else {
3647 cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
3648 const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
3649
3650 // define_method can cache for different method id
3651 if (cached_cme == NULL) {
3652 // temporary CC. revisit it
3653 static const struct rb_callcache *empty_cc_for_super = NULL;
3654 if (empty_cc_for_super == NULL) {
3655 empty_cc_for_super = vm_cc_new(0, NULL, vm_call_super_method);
3656 FL_SET_RAW((VALUE)empty_cc_for_super, VM_CALLCACHE_UNMARKABLE);
3657 rb_gc_register_mark_object((VALUE)empty_cc_for_super);
3658 }
3659 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc = empty_cc_for_super);
3660 }
3661 else if (cached_cme->called_id != mid) {
3663 cc = vm_cc_new(klass, cme, vm_call_super_method);
3664 RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
3665 }
3666 else {
3667 switch (cached_cme->def->type) {
3668 // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
3670 // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
3673 vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
3674 break;
3675 default:
3676 break; // use fastpath
3677 }
3678 }
3679 }
3680
3681 return cc;
3682}
3683
3684/* yield */
3685
3686static inline int
3687block_proc_is_lambda(const VALUE procval)
3688{
3689 rb_proc_t *proc;
3690
3691 if (procval) {
3692 GetProcPtr(procval, proc);
3693 return proc->is_lambda;
3694 }
3695 else {
3696 return 0;
3697 }
3698}
3699
3700static VALUE
3701vm_yield_with_cfunc(rb_execution_context_t *ec,
3702 const struct rb_captured_block *captured,
3703 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
3705{
3706 int is_lambda = FALSE; /* TODO */
3707 VALUE val, arg, blockarg;
3708 int frame_flag;
3709 const struct vm_ifunc *ifunc = captured->code.ifunc;
3710
3711 if (is_lambda) {
3712 arg = rb_ary_new4(argc, argv);
3713 }
3714 else if (argc == 0) {
3715 arg = Qnil;
3716 }
3717 else {
3718 arg = argv[0];
3719 }
3720
3721 blockarg = rb_vm_bh_to_procval(ec, block_handler);
3722
3724 if (kw_splat) {
3725 frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
3726 }
3727
3728 vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
3729 frame_flag,
3730 self,
3731 VM_GUARDED_PREV_EP(captured->ep),
3732 (VALUE)me,
3733 0, ec->cfp->sp, 0, 0);
3734 val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
3735 rb_vm_pop_frame(ec);
3736
3737 return val;
3738}
3739
3740static VALUE
3741vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
3742{
3743 return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
3744}
3745
3746static inline int
3747vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
3748{
3749 int i;
3750 long len = RARRAY_LEN(ary);
3751
3753
3754 for (i=0; i<len && i<iseq->body->param.lead_num; i++) {
3755 argv[i] = RARRAY_AREF(ary, i);
3756 }
3757
3758 return i;
3759}
3760
3761static inline VALUE
3762vm_callee_setup_block_arg_arg0_check(VALUE *argv)
3763{
3764 VALUE ary, arg0 = argv[0];
3765 ary = rb_check_array_type(arg0);
3766#if 0
3767 argv[0] = arg0;
3768#else
3769 VM_ASSERT(argv[0] == arg0);
3770#endif
3771 return ary;
3772}
3773
3774static int
3775vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
3776{
3777 if (rb_simple_iseq_p(iseq)) {
3778 rb_control_frame_t *cfp = ec->cfp;
3779 VALUE arg0;
3780
3781 CALLER_SETUP_ARG(cfp, calling, ci);
3782 CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
3783
3785 calling->argc == 1 &&
3786 iseq->body->param.flags.has_lead &&
3787 !iseq->body->param.flags.ambiguous_param0 &&
3788 !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
3789 calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
3790 }
3791
3792 if (calling->argc != iseq->body->param.lead_num) {
3794 if (calling->argc < iseq->body->param.lead_num) {
3795 int i;
3797 for (i=calling->argc; i<iseq->body->param.lead_num; i++) argv[i] = Qnil;
3798 calling->argc = iseq->body->param.lead_num; /* fill rest parameters */
3799 }
3800 else if (calling->argc > iseq->body->param.lead_num) {
3801 calling->argc = iseq->body->param.lead_num; /* simply truncate arguments */
3802 }
3803 }
3804 else {
3805 argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
3806 }
3807 }
3808
3809 return 0;
3810 }
3811 else {
3812 return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
3813 }
3814}
3815
3816static int
3817vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int kw_splat, VALUE block_handler, enum arg_setup_type arg_setup_type)
3818{
3819 struct rb_calling_info calling_entry, *calling;
3820
3821 calling = &calling_entry;
3822 calling->argc = argc;
3823 calling->block_handler = block_handler;
3824 calling->kw_splat = kw_splat;
3825 calling->recv = Qundef;
3826 struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, (kw_splat ? VM_CALL_KW_SPLAT : 0), 0, 0);
3827
3828 return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
3829}
3830
3831/* ruby iseq -> ruby block */
3832
3833static VALUE
3834vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
3835 struct rb_calling_info *calling, const struct rb_callinfo *ci,
3836 bool is_lambda, VALUE block_handler)
3837{
3838 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
3839 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
3840 const int arg_size = iseq->body->param.size;
3841 VALUE * const rsp = GET_SP() - calling->argc;
3842 int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, rsp, is_lambda ? arg_setup_method : arg_setup_block);
3843
3844 SET_SP(rsp);
3845
3846 vm_push_frame(ec, iseq,
3847 VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0),
3848 captured->self,
3849 VM_GUARDED_PREV_EP(captured->ep), 0,
3850 iseq->body->iseq_encoded + opt_pc,
3851 rsp + arg_size,
3852 iseq->body->local_table_size - arg_size, iseq->body->stack_max);
3853
3854 return Qundef;
3855}
3856
3857static VALUE
3858vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
3859 struct rb_calling_info *calling, const struct rb_callinfo *ci,
3860 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
3861{
3862 if (calling->argc < 1) {
3863 rb_raise(rb_eArgError, "no receiver given");
3864 }
3865 else {
3866 VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
3867 CALLER_SETUP_ARG(reg_cfp, calling, ci);
3868 calling->recv = TOPN(--calling->argc);
3869 return vm_call_symbol(ec, reg_cfp, calling, ci, symbol);
3870 }
3871}
3872
3873static VALUE
3874vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
3875 struct rb_calling_info *calling, const struct rb_callinfo *ci,
3876 MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
3877{
3878 VALUE val;
3879 int argc;
3880 const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
3881 CALLER_SETUP_ARG(ec->cfp, calling, ci);
3882 CALLER_REMOVE_EMPTY_KW_SPLAT(ec->cfp, calling, ci);
3883 argc = calling->argc;
3884 val = vm_yield_with_cfunc(ec, captured, captured->self, argc, STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
3885 POPN(argc); /* TODO: should put before C/yield? */
3886 return val;
3887}
3888
3889static VALUE
3890vm_proc_to_block_handler(VALUE procval)
3891{
3892 const struct rb_block *block = vm_proc_block(procval);
3893
3894 switch (vm_block_type(block)) {
3895 case block_type_iseq:
3896 return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
3897 case block_type_ifunc:
3898 return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
3899 case block_type_symbol:
3900 return VM_BH_FROM_SYMBOL(block->as.symbol);
3901 case block_type_proc:
3902 return VM_BH_FROM_PROC(block->as.proc);
3903 }
3904 VM_UNREACHABLE(vm_yield_with_proc);
3905 return Qundef;
3906}
3907
3908static VALUE
3909vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
3910 struct rb_calling_info *calling, const struct rb_callinfo *ci,
3911 bool is_lambda, VALUE block_handler)
3912{
3913 while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
3914 VALUE proc = VM_BH_TO_PROC(block_handler);
3915 is_lambda = block_proc_is_lambda(proc);
3916 block_handler = vm_proc_to_block_handler(proc);
3917 }
3918
3919 return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
3920}
3921
3922static inline VALUE
3923vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
3924 struct rb_calling_info *calling, const struct rb_callinfo *ci,
3925 bool is_lambda, VALUE block_handler)
3926{
3927 VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
3928 struct rb_calling_info *calling, const struct rb_callinfo *ci,
3929 bool is_lambda, VALUE block_handler);
3930
3931 switch (vm_block_handler_type(block_handler)) {
3932 case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
3933 case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
3934 case block_handler_type_proc: func = vm_invoke_proc_block; break;
3935 case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
3936 default: rb_bug("vm_invoke_block: unreachable");
3937 }
3938
3939 return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
3940}
3941
3942static VALUE
3943vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
3944{
3945 const rb_execution_context_t *ec = GET_EC();
3947 struct rb_captured_block *captured;
3948
3949 if (cfp == 0) {
3950 rb_bug("vm_make_proc_with_iseq: unreachable");
3951 }
3952
3953 captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
3954 captured->code.iseq = blockiseq;
3955
3956 return rb_vm_make_proc(ec, captured, rb_cProc);
3957}
3958
3959static VALUE
3960vm_once_exec(VALUE iseq)
3961{
3962 VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
3963 return rb_proc_call_with_block(proc, 0, 0, Qnil);
3964}
3965
3966static VALUE
3967vm_once_clear(VALUE data)
3968{
3969 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
3970 is->once.running_thread = NULL;
3971 return Qnil;
3972}
3973
3974/* defined insn */
3975
3976static enum defined_type
3977check_respond_to_missing(VALUE obj, VALUE v)
3978{
3979 VALUE args[2];
3980 VALUE r;
3981
3982 args[0] = obj; args[1] = Qfalse;
3983 r = rb_check_funcall(v, idRespond_to_missing, 2, args);
3984 if (r != Qundef && RTEST(r)) {
3985 return DEFINED_METHOD;
3986 }
3987 else {
3988 return DEFINED_NOT_DEFINED;
3989 }
3990}
3991
3992static VALUE
3993vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE needstr, VALUE v)
3994{
3995 VALUE klass;
3996 enum defined_type expr_type = DEFINED_NOT_DEFINED;
3997 enum defined_type type = (enum defined_type)op_type;
3998
3999 switch (type) {
4000 case DEFINED_IVAR:
4001 if (rb_ivar_defined(GET_SELF(), SYM2ID(obj))) {
4002 expr_type = DEFINED_IVAR;
4003 }
4004 break;
4005 case DEFINED_IVAR2:
4006 klass = vm_get_cbase(GET_EP());
4007 break;
4008 case DEFINED_GVAR:
4009 if (rb_gvar_defined(SYM2ID(obj))) {
4010 expr_type = DEFINED_GVAR;
4011 }
4012 break;
4013 case DEFINED_CVAR: {
4014 const rb_cref_t *cref = vm_get_cref(GET_EP());
4015 klass = vm_get_cvar_base(cref, GET_CFP(), 0);
4016 if (rb_cvar_defined(klass, SYM2ID(obj))) {
4017 expr_type = DEFINED_CVAR;
4018 }
4019 break;
4020 }
4021 case DEFINED_CONST:
4022 case DEFINED_CONST_FROM: {
4023 bool allow_nil = type == DEFINED_CONST;
4024 klass = v;
4025 if (vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true)) {
4026 expr_type = DEFINED_CONST;
4027 }
4028 break;
4029 }
4030 case DEFINED_FUNC:
4031 klass = CLASS_OF(v);
4032 if (rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE)) {
4033 expr_type = DEFINED_METHOD;
4034 }
4035 break;
4036 case DEFINED_METHOD:{
4037 VALUE klass = CLASS_OF(v);
4039
4040 if (me) {
4041 switch (METHOD_ENTRY_VISI(me)) {
4043 break;
4046 break;
4047 }
4048 case METHOD_VISI_PUBLIC:
4049 expr_type = DEFINED_METHOD;
4050 break;
4051 default:
4052 rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
4053 }
4054 }
4055 else {
4056 expr_type = check_respond_to_missing(obj, v);
4057 }
4058 break;
4059 }
4060 case DEFINED_YIELD:
4062 expr_type = DEFINED_YIELD;
4063 }
4064 break;
4065 case DEFINED_ZSUPER:
4066 {
4068
4069 if (me) {
4070 VALUE klass = vm_search_normal_superclass(me->defined_class);
4071 ID id = me->def->original_id;
4072
4073 if (rb_method_boundp(klass, id, 0)) {
4074 expr_type = DEFINED_ZSUPER;
4075 }
4076 }
4077 }
4078 break;
4079 case DEFINED_REF:{
4080 if (vm_getspecial(ec, GET_LEP(), Qfalse, FIX2INT(obj)) != Qnil) {
4081 expr_type = DEFINED_GVAR;
4082 }
4083 break;
4084 }
4085 default:
4086 rb_bug("unimplemented defined? type (VM)");
4087 break;
4088 }
4089
4090 if (expr_type != 0) {
4091 if (needstr != Qfalse) {
4092 return rb_iseq_defined_string(expr_type);
4093 }
4094 else {
4095 return Qtrue;
4096 }
4097 }
4098 else {
4099 return Qnil;
4100 }
4101}
4102
4103static const VALUE *
4104vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
4105{
4106 rb_num_t i;
4107 const VALUE *ep = reg_ep;
4108 for (i = 0; i < lv; i++) {
4109 ep = GET_PREV_EP(ep);
4110 }
4111 return ep;
4112}
4113
4114static VALUE
4115vm_get_special_object(const VALUE *const reg_ep,
4117{
4118 switch (type) {
4120 return rb_mRubyVMFrozenCore;
4122 return vm_get_cbase(reg_ep);
4124 return vm_get_const_base(reg_ep);
4125 default:
4126 rb_bug("putspecialobject insn: unknown value_type %d", type);
4127 }
4128}
4129
4130static VALUE
4131vm_concat_array(VALUE ary1, VALUE ary2st)
4132{
4133 const VALUE ary2 = ary2st;
4134 VALUE tmp1 = rb_check_to_array(ary1);
4135 VALUE tmp2 = rb_check_to_array(ary2);
4136
4137 if (NIL_P(tmp1)) {
4138 tmp1 = rb_ary_new3(1, ary1);
4139 }
4140
4141 if (NIL_P(tmp2)) {
4142 tmp2 = rb_ary_new3(1, ary2);
4143 }
4144
4145 if (tmp1 == ary1) {
4146 tmp1 = rb_ary_dup(ary1);
4147 }
4148 return rb_ary_concat(tmp1, tmp2);
4149}
4150
4151static VALUE
4152vm_splat_array(VALUE flag, VALUE ary)
4153{
4154 VALUE tmp = rb_check_to_array(ary);
4155 if (NIL_P(tmp)) {
4156 return rb_ary_new3(1, ary);
4157 }
4158 else if (RTEST(flag)) {
4159 return rb_ary_dup(tmp);
4160 }
4161 else {
4162 return tmp;
4163 }
4164}
4165
4166static VALUE
4167vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
4168{
4170
4171 if (flag & VM_CHECKMATCH_ARRAY) {
4172 long i;
4173 const long n = RARRAY_LEN(pattern);
4174
4175 for (i = 0; i < n; i++) {
4176 VALUE v = RARRAY_AREF(pattern, i);
4177 VALUE c = check_match(ec, v, target, type);
4178
4179 if (RTEST(c)) {
4180 return c;
4181 }
4182 }
4183 return Qfalse;
4184 }
4185 else {
4186 return check_match(ec, pattern, target, type);
4187 }
4188}
4189
4190static VALUE
4191vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
4192{
4193 const VALUE kw_bits = *(ep - bits);
4194
4195 if (FIXNUM_P(kw_bits)) {
4196 unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
4197 if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
4198 return Qfalse;
4199 }
4200 else {
4201 VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
4202 if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
4203 }
4204 return Qtrue;
4205}
4206
4207static void
4208vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
4209{
4210 if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
4211 RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
4212 RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
4213 RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
4214
4215 switch (flag) {
4216 case RUBY_EVENT_CALL:
4218 return;
4219 case RUBY_EVENT_C_CALL:
4221 return;
4222 case RUBY_EVENT_RETURN:
4224 return;
4227 return;
4228 }
4229 }
4230}
4231
4232static VALUE
4233vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
4234{
4235 VALUE ns;
4236
4237 if ((ns = vm_search_const_defined_class(cbase, id)) == 0) {
4238 return ns;
4239 }
4240 else if (VM_DEFINECLASS_SCOPED_P(flags)) {
4241 return rb_public_const_get_at(ns, id);
4242 }
4243 else {
4244 return rb_const_get_at(ns, id);
4245 }
4246}
4247
4248static VALUE
4249vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
4250{
4251 if (!RB_TYPE_P(klass, T_CLASS)) {
4252 return 0;
4253 }
4254 else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
4255 VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
4256
4257 if (tmp != super) {
4259 "superclass mismatch for class %"PRIsVALUE"",
4260 rb_id2str(id));
4261 }
4262 else {
4263 return klass;
4264 }
4265 }
4266 else {
4267 return klass;
4268 }
4269}
4270
4271static VALUE
4272vm_check_if_module(ID id, VALUE mod)
4273{
4274 if (!RB_TYPE_P(mod, T_MODULE)) {
4275 return 0;
4276 }
4277 else {
4278 return mod;
4279 }
4280}
4281
4282static VALUE
4283declare_under(ID id, VALUE cbase, VALUE c)
4284{
4285 rb_set_class_path_string(c, cbase, rb_id2str(id));
4286 rb_const_set(cbase, id, c);
4287 return c;
4288}
4289
4290static VALUE
4291vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
4292{
4293 /* new class declaration */
4295 VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
4297 rb_class_inherited(s, c);
4298 return c;
4299}
4300
4301static VALUE
4302vm_declare_module(ID id, VALUE cbase)
4303{
4304 /* new module declaration */
4305 return declare_under(id, cbase, rb_module_new());
4306}
4307
4308NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
4309static void
4310unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
4311{
4312 VALUE name = rb_id2str(id);
4313 VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
4314 name, type);
4315 VALUE location = rb_const_source_location_at(cbase, id);
4316 if (!NIL_P(location)) {
4317 rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
4318 " previous definition of %"PRIsVALUE" was here",
4319 rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
4320 }
4322}
4323
4324static VALUE
4325vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
4326{
4327 VALUE klass;
4328
4329 if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
4331 "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
4332 rb_obj_class(super));
4333 }
4334
4335 vm_check_if_namespace(cbase);
4336
4337 /* find klass */
4338 rb_autoload_load(cbase, id);
4339 if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
4340 if (!vm_check_if_class(id, flags, super, klass))
4341 unmatched_redefinition("class", cbase, id, klass);
4342 return klass;
4343 }
4344 else {
4345 return vm_declare_class(id, flags, cbase, super);
4346 }
4347}
4348
4349static VALUE
4350vm_define_module(ID id, rb_num_t flags, VALUE cbase)
4351{
4352 VALUE mod;
4353
4354 vm_check_if_namespace(cbase);
4355 if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
4356 if (!vm_check_if_module(id, mod))
4357 unmatched_redefinition("module", cbase, id, mod);
4358 return mod;
4359 }
4360 else {
4361 return vm_declare_module(id, cbase);
4362 }
4363}
4364
4365static VALUE
4366vm_find_or_create_class_by_id(ID id,
4367 rb_num_t flags,
4368 VALUE cbase,
4369 VALUE super)
4370{
4372
4373 switch (type) {
4375 /* classdef returns class scope value */
4376 return vm_define_class(id, flags, cbase, super);
4377
4379 /* classdef returns class scope value */
4380 return rb_singleton_class(cbase);
4381
4383 /* classdef returns class scope value */
4384 return vm_define_module(id, flags, cbase);
4385
4386 default:
4387 rb_bug("unknown defineclass type: %d", (int)type);
4388 }
4389}
4390
4392vm_scope_visibility_get(const rb_execution_context_t *ec)
4393{
4395
4396 if (!vm_env_cref_by_cref(cfp->ep)) {
4397 return METHOD_VISI_PUBLIC;
4398 }
4399 else {
4400 return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
4401 }
4402}
4403
4404static int
4405vm_scope_module_func_check(const rb_execution_context_t *ec)
4406{
4408
4409 if (!vm_env_cref_by_cref(cfp->ep)) {
4410 return FALSE;
4411 }
4412 else {
4413 return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
4414 }
4415}
4416
4417static void
4418vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
4419{
4420 VALUE klass;
4422 rb_cref_t *cref = vm_ec_cref(ec);
4423
4424 if (!is_singleton) {
4425 klass = CREF_CLASS(cref);
4426 visi = vm_scope_visibility_get(ec);
4427 }
4428 else { /* singleton */
4429 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
4430 visi = METHOD_VISI_PUBLIC;
4431 }
4432
4433 if (NIL_P(klass)) {
4434 rb_raise(rb_eTypeError, "no class/module to add method");
4435 }
4436
4437 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
4438
4439 if (!is_singleton && vm_scope_module_func_check(ec)) {
4440 klass = rb_singleton_class(klass);
4441 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
4442 }
4443}
4444
4445static VALUE
4446vm_invokeblock_i(struct rb_execution_context_struct *ec,
4447 struct rb_control_frame_struct *reg_cfp,
4448 struct rb_calling_info *calling)
4449{
4450 const struct rb_callinfo *ci = calling->ci;
4451 VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
4452
4453 if (block_handler == VM_BLOCK_HANDLER_NONE) {
4454 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
4455 }
4456 else {
4457 return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
4458 }
4459}
4460
4461#ifdef MJIT_HEADER
4462static const struct rb_callcache *
4463vm_search_method_wrap(const struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE recv)
4464{
4465 return vm_search_method((VALUE)reg_cfp->iseq, cd, recv);
4466}
4467
4468static const struct rb_callcache *
4469vm_search_invokeblock(const struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE recv)
4470{
4471 static const struct rb_callcache cc = {
4473 .klass = 0,
4474 .cme_ = 0,
4475 .call_ = vm_invokeblock_i,
4476 .aux_ = {0},
4477 };
4478 return &cc;
4479}
4480
4481# define mexp_search_method vm_search_method_wrap
4482# define mexp_search_super vm_search_super_method
4483# define mexp_search_invokeblock vm_search_invokeblock
4484#else
4489};
4490#endif
4491
4492static
4493#ifndef MJIT_HEADER
4494inline
4495#endif
4496VALUE
4497vm_sendish(
4498 struct rb_execution_context_struct *ec,
4499 struct rb_control_frame_struct *reg_cfp,
4500 struct rb_call_data *cd,
4501 VALUE block_handler,
4502#ifdef MJIT_HEADER
4503 const struct rb_callcache *(*method_explorer)(const struct rb_control_frame_struct *cfp, struct rb_call_data *cd, VALUE recv)
4504#else
4505 enum method_explorer_type method_explorer
4506#endif
4507) {
4508 VALUE val;
4509 const struct rb_callinfo *ci = cd->ci;
4510 const struct rb_callcache *cc;
4511 int argc = vm_ci_argc(ci);
4512 VALUE recv = TOPN(argc);
4513 struct rb_calling_info calling = {
4515 .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
4516 .recv = recv,
4517 .argc = argc,
4518 .ci = ci,
4519 };
4520
4521// The enum-based branch and inlining are faster in VM, but function pointers without inlining are faster in JIT.
4522#ifdef MJIT_HEADER
4523 calling.cc = cc = method_explorer(GET_CFP(), cd, recv);
4524 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
4525#else
4526 switch (method_explorer) {
4527 case mexp_search_method:
4528 calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
4529 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
4530 break;
4531 case mexp_search_super:
4532 calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
4533 calling.ci = cd->ci; // TODO: does it safe?
4534 val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
4535 break;
4537 val = vm_invokeblock_i(ec, GET_CFP(), &calling);
4538 break;
4539 }
4540#endif
4541
4542 if (val != Qundef) {
4543 return val; /* CFUNC normal return */
4544 }
4545 else {
4546 RESTORE_REGS(); /* CFP pushed in cc->call() */
4547 }
4548
4549#ifdef MJIT_HEADER
4550 /* When calling ISeq which may catch an exception from JIT-ed
4551 code, we should not call mjit_exec directly to prevent the
4552 caller frame from being canceled. That's because the caller
4553 frame may have stack values in the local variables and the
4554 cancelling the caller frame will purge them. But directly
4555 calling mjit_exec is faster... */
4556 if (GET_ISEQ()->body->catch_except_p) {
4557 VM_ENV_FLAGS_SET(GET_EP(), VM_FRAME_FLAG_FINISH);
4558 return vm_exec(ec, true);
4559 }
4560 else if ((val = mjit_exec(ec)) == Qundef) {
4561 VM_ENV_FLAGS_SET(GET_EP(), VM_FRAME_FLAG_FINISH);
4562 return vm_exec(ec, false);
4563 }
4564 else {
4565 return val;
4566 }
4567#else
4568 /* When calling from VM, longjmp in the callee won't purge any
4569 JIT-ed caller frames. So it's safe to directly call
4570 mjit_exec. */
4571 return mjit_exec(ec);
4572#endif
4573}
4574
4575static VALUE
4576vm_opt_str_freeze(VALUE str, int bop, ID id)
4577{
4579 return str;
4580 }
4581 else {
4582 return Qundef;
4583 }
4584}
4585
4586/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
4587#define id_cmp idCmp
4588
4589static VALUE
4590vm_opt_newarray_max(rb_num_t num, const VALUE *ptr)
4591{
4593 if (num == 0) {
4594 return Qnil;
4595 }
4596 else {
4597 struct cmp_opt_data cmp_opt = { 0, 0 };
4598 VALUE result = *ptr;
4599 rb_snum_t i = num - 1;
4600 while (i-- > 0) {
4601 const VALUE v = *++ptr;
4602 if (OPTIMIZED_CMP(v, result, cmp_opt) > 0) {
4603 result = v;
4604 }
4605 }
4606 return result;
4607 }
4608 }
4609 else {
4610 VALUE ary = rb_ary_new4(num, ptr);
4611 return rb_funcall(ary, idMax, 0);
4612 }
4613}
4614
4615static VALUE
4616vm_opt_newarray_min(rb_num_t num, const VALUE *ptr)
4617{
4619 if (num == 0) {
4620 return Qnil;
4621 }
4622 else {
4623 struct cmp_opt_data cmp_opt = { 0, 0 };
4624 VALUE result = *ptr;
4625 rb_snum_t i = num - 1;
4626 while (i-- > 0) {
4627 const VALUE v = *++ptr;
4628 if (OPTIMIZED_CMP(v, result, cmp_opt) < 0) {
4629 result = v;
4630 }
4631 }
4632 return result;
4633 }
4634 }
4635 else {
4636 VALUE ary = rb_ary_new4(num, ptr);
4637 return rb_funcall(ary, idMin, 0);
4638 }
4639}
4640
4641#undef id_cmp
4642
4643#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
4644
4645// For MJIT inlining
4646static inline bool
4647vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, rb_serial_t ic_serial, const VALUE *reg_ep)
4648{
4649 if (ic_serial == GET_GLOBAL_CONSTANT_STATE() &&
4650 ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p())) {
4651
4652 VM_ASSERT((flags & IMEMO_CONST_CACHE_SHAREABLE) ? rb_ractor_shareable_p(value) : true);
4653
4654 return (ic_cref == NULL || // no need to check CREF
4655 ic_cref == vm_get_cref(reg_ep));
4656 }
4657 return false;
4658}
4659
4660static bool
4661vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
4662{
4664 return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, ice->ic_serial, reg_ep);
4665}
4666
4667static void
4668vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep)
4669{
4670
4672 RB_OBJ_WRITE(ice, &ice->value, val);
4673 ice->ic_cref = vm_get_const_key_cref(reg_ep);
4675 if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
4677 RB_OBJ_WRITE(iseq, &ic->entry, ice);
4678}
4679
4680static VALUE
4681vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
4682{
4683 rb_thread_t *th = rb_ec_thread_ptr(ec);
4684 rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
4685
4686 again:
4687 if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
4688 return is->once.value;
4689 }
4690 else if (is->once.running_thread == NULL) {
4691 VALUE val;
4692 is->once.running_thread = th;
4693 val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
4694 RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
4695 /* is->once.running_thread is cleared by vm_once_clear() */
4696 is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
4697 return val;
4698 }
4699 else if (is->once.running_thread == th) {
4700 /* recursive once */
4701 return vm_once_exec((VALUE)iseq);
4702 }
4703 else {
4704 /* waiting for finish */
4707 goto again;
4708 }
4709}
4710
4711static OFFSET
4712vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
4713{
4714 switch (OBJ_BUILTIN_TYPE(key)) {
4715 case -1:
4716 case T_FLOAT:
4717 case T_SYMBOL:
4718 case T_BIGNUM:
4719 case T_STRING:
4728 st_data_t val;
4729 if (RB_FLOAT_TYPE_P(key)) {
4730 double kval = RFLOAT_VALUE(key);
4731 if (!isinf(kval) && modf(kval, &kval) == 0.0) {
4732 key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
4733 }
4734 }
4735 if (rb_hash_stlike_lookup(hash, key, &val)) {
4736 return FIX2LONG((VALUE)val);
4737 }
4738 else {
4739 return else_offset;
4740 }
4741 }
4742 }
4743 return 0;
4744}
4745
4746NORETURN(static void
4747 vm_stack_consistency_error(const rb_execution_context_t *ec,
4748 const rb_control_frame_t *,
4749 const VALUE *));
4750static void
4751vm_stack_consistency_error(const rb_execution_context_t *ec,
4752 const rb_control_frame_t *cfp,
4753 const VALUE *bp)
4754{
4755 const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
4756 const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
4757 static const char stack_consistency_error[] =
4758 "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
4759#if defined RUBY_DEVEL
4760 VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
4761 rb_str_cat_cstr(mesg, "\n");
4762 rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
4764#else
4765 rb_bug(stack_consistency_error, nsp, nbp);
4766#endif
4767}
4768
4769static VALUE
4770vm_opt_plus(VALUE recv, VALUE obj)
4771{
4772 if (FIXNUM_2_P(recv, obj) &&
4774 return rb_fix_plus_fix(recv, obj);
4775 }
4776 else if (FLONUM_2_P(recv, obj) &&
4778 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
4779 }
4780 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
4781 return Qundef;
4782 }
4783 else if (RBASIC_CLASS(recv) == rb_cFloat &&
4784 RBASIC_CLASS(obj) == rb_cFloat &&
4786 return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
4787 }
4788 else if (RBASIC_CLASS(recv) == rb_cString &&
4789 RBASIC_CLASS(obj) == rb_cString &&
4791 return rb_str_opt_plus(recv, obj);
4792 }
4793 else if (RBASIC_CLASS(recv) == rb_cArray &&
4794 RBASIC_CLASS(obj) == rb_cArray &&
4796 return rb_ary_plus(recv, obj);
4797 }
4798 else {
4799 return Qundef;
4800 }
4801}
4802
4803static VALUE
4804vm_opt_minus(VALUE recv, VALUE obj)
4805{
4806 if (FIXNUM_2_P(recv, obj) &&
4808 return rb_fix_minus_fix(recv, obj);
4809 }
4810 else if (FLONUM_2_P(recv, obj) &&
4812 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
4813 }
4814 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
4815 return Qundef;
4816 }
4817 else if (RBASIC_CLASS(recv) == rb_cFloat &&
4818 RBASIC_CLASS(obj) == rb_cFloat &&
4820 return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
4821 }
4822 else {
4823 return Qundef;
4824 }
4825}
4826
4827static VALUE
4828vm_opt_mult(VALUE recv, VALUE obj)
4829{
4830 if (FIXNUM_2_P(recv, obj) &&
4832 return rb_fix_mul_fix(recv, obj);
4833 }
4834 else if (FLONUM_2_P(recv, obj) &&
4836 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
4837 }
4838 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
4839 return Qundef;
4840 }
4841 else if (RBASIC_CLASS(recv) == rb_cFloat &&
4842 RBASIC_CLASS(obj) == rb_cFloat &&
4844 return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
4845 }
4846 else {
4847 return Qundef;
4848 }
4849}
4850
4851static VALUE
4852vm_opt_div(VALUE recv, VALUE obj)
4853{
4854 if (FIXNUM_2_P(recv, obj) &&
4856 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
4857 }
4858 else if (FLONUM_2_P(recv, obj) &&
4860 return rb_flo_div_flo(recv, obj);
4861 }
4862 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
4863 return Qundef;
4864 }
4865 else if (RBASIC_CLASS(recv) == rb_cFloat &&
4866 RBASIC_CLASS(obj) == rb_cFloat &&
4868 return rb_flo_div_flo(recv, obj);
4869 }
4870 else {
4871 return Qundef;
4872 }
4873}
4874
4875static VALUE
4876vm_opt_mod(VALUE recv, VALUE obj)
4877{
4878 if (FIXNUM_2_P(recv, obj) &&
4880 return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
4881 }
4882 else if (FLONUM_2_P(recv, obj) &&
4884 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
4885 }
4886 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
4887 return Qundef;
4888 }
4889 else if (RBASIC_CLASS(recv) == rb_cFloat &&
4890 RBASIC_CLASS(obj) == rb_cFloat &&
4892 return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
4893 }
4894 else {
4895 return Qundef;
4896 }
4897}
4898
4899static VALUE
4900vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
4901{
4902 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
4903 VALUE val = opt_equality(iseq, recv, obj, cd_eq);
4904
4905 if (val != Qundef) {
4906 return RTEST(val) ? Qfalse : Qtrue;
4907 }
4908 }
4909
4910 return Qundef;
4911}
4912
4913static VALUE
4914vm_opt_lt(VALUE recv, VALUE obj)
4915{
4916 if (FIXNUM_2_P(recv, obj) &&
4918 return (SIGNED_VALUE)recv < (SIGNED_VALUE)obj ? Qtrue : Qfalse;
4919 }
4920 else if (FLONUM_2_P(recv, obj) &&
4922 return RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
4923 }
4924 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
4925 return Qundef;
4926 }
4927 else if (RBASIC_CLASS(recv) == rb_cFloat &&
4928 RBASIC_CLASS(obj) == rb_cFloat &&
4931 return RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
4932 }
4933 else {
4934 return Qundef;
4935 }
4936}
4937
4938static VALUE
4939vm_opt_le(VALUE recv, VALUE obj)
4940{
4941 if (FIXNUM_2_P(recv, obj) &&
4943 return (SIGNED_VALUE)recv <= (SIGNED_VALUE)obj ? Qtrue : Qfalse;
4944 }
4945 else if (FLONUM_2_P(recv, obj) &&
4947 return RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
4948 }
4949 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
4950 return Qundef;
4951 }
4952 else if (RBASIC_CLASS(recv) == rb_cFloat &&
4953 RBASIC_CLASS(obj) == rb_cFloat &&
4956 return RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
4957 }
4958 else {
4959 return Qundef;
4960 }
4961}
4962
4963static VALUE
4964vm_opt_gt(VALUE recv, VALUE obj)
4965{
4966 if (FIXNUM_2_P(recv, obj) &&
4968 return (SIGNED_VALUE)recv > (SIGNED_VALUE)obj ? Qtrue : Qfalse;
4969 }
4970 else if (FLONUM_2_P(recv, obj) &&
4972 return RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
4973 }
4974 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
4975 return Qundef;
4976 }
4977 else if (RBASIC_CLASS(recv) == rb_cFloat &&
4978 RBASIC_CLASS(obj) == rb_cFloat &&
4981 return RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
4982 }
4983 else {
4984 return Qundef;
4985 }
4986}
4987
4988static VALUE
4989vm_opt_ge(VALUE recv, VALUE obj)
4990{
4991 if (FIXNUM_2_P(recv, obj) &&
4993 return (SIGNED_VALUE)recv >= (SIGNED_VALUE)obj ? Qtrue : Qfalse;
4994 }
4995 else if (FLONUM_2_P(recv, obj) &&
4997 return RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
4998 }
4999 else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
5000 return Qundef;
5001 }
5002 else if (RBASIC_CLASS(recv) == rb_cFloat &&
5003 RBASIC_CLASS(obj) == rb_cFloat &&
5006 return RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
5007 }
5008 else {
5009 return Qundef;
5010 }
5011}
5012
5013
5014static VALUE
5015vm_opt_ltlt(VALUE recv, VALUE obj)
5016{
5017 if (SPECIAL_CONST_P(recv)) {
5018 return Qundef;
5019 }
5020 else if (RBASIC_CLASS(recv) == rb_cString &&
5022 return rb_str_concat(recv, obj);
5023 }
5024 else if (RBASIC_CLASS(recv) == rb_cArray &&
5026 return rb_ary_push(recv, obj);
5027 }
5028 else {
5029 return Qundef;
5030 }
5031}
5032
5033static VALUE
5034vm_opt_and(VALUE recv, VALUE obj)
5035{
5036 if (FIXNUM_2_P(recv, obj) &&
5038 return (recv & obj) | 1;
5039 }
5040 else {
5041 return Qundef;
5042 }
5043}
5044
5045static VALUE
5046vm_opt_or(VALUE recv, VALUE obj)
5047{
5048 if (FIXNUM_2_P(recv, obj) &&
5050 return recv | obj;
5051 }
5052 else {
5053 return Qundef;
5054 }
5055}
5056
5057static VALUE
5058vm_opt_aref(VALUE recv, VALUE obj)
5059{
5060 if (SPECIAL_CONST_P(recv)) {
5061 if (FIXNUM_2_P(recv, obj) &&
5063 return rb_fix_aref(recv, obj);
5064 }
5065 return Qundef;
5066 }
5067 else if (RBASIC_CLASS(recv) == rb_cArray &&
5069 if (FIXNUM_P(obj)) {
5070 return rb_ary_entry_internal(recv, FIX2LONG(obj));
5071 }
5072 else {
5073 return rb_ary_aref1(recv, obj);
5074 }
5075 }
5076 else if (RBASIC_CLASS(recv) == rb_cHash &&
5078 return rb_hash_aref(recv, obj);
5079 }
5080 else {
5081 return Qundef;
5082 }
5083}
5084
5085static VALUE
5086vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
5087{
5088 if (SPECIAL_CONST_P(recv)) {
5089 return Qundef;
5090 }
5091 else if (RBASIC_CLASS(recv) == rb_cArray &&
5093 FIXNUM_P(obj)) {
5094 rb_ary_store(recv, FIX2LONG(obj), set);
5095 return set;
5096 }
5097 else if (RBASIC_CLASS(recv) == rb_cHash &&
5099 rb_hash_aset(recv, obj, set);
5100 return set;
5101 }
5102 else {
5103 return Qundef;
5104 }
5105}
5106
5107static VALUE
5108vm_opt_aref_with(VALUE recv, VALUE key)
5109{
5110 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
5113 return rb_hash_aref(recv, key);
5114 }
5115 else {
5116 return Qundef;
5117 }
5118}
5119
5120static VALUE
5121vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
5122{
5123 if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
5126 return rb_hash_aset(recv, key, val);
5127 }
5128 else {
5129 return Qundef;
5130 }
5131}
5132
5133static VALUE
5134vm_opt_length(VALUE recv, int bop)
5135{
5136 if (SPECIAL_CONST_P(recv)) {
5137 return Qundef;
5138 }
5139 else if (RBASIC_CLASS(recv) == rb_cString &&
5141 if (bop == BOP_EMPTY_P) {
5142 return LONG2NUM(RSTRING_LEN(recv));
5143 }
5144 else {
5145 return rb_str_length(recv);
5146 }
5147 }
5148 else if (RBASIC_CLASS(recv) == rb_cArray &&
5150 return LONG2NUM(RARRAY_LEN(recv));
5151 }
5152 else if (RBASIC_CLASS(recv) == rb_cHash &&
5154 return INT2FIX(RHASH_SIZE(recv));
5155 }
5156 else {
5157 return Qundef;
5158 }
5159}
5160
5161static VALUE
5162vm_opt_empty_p(VALUE recv)
5163{
5164 switch (vm_opt_length(recv, BOP_EMPTY_P)) {
5165 case Qundef: return Qundef;
5166 case INT2FIX(0): return Qtrue;
5167 default: return Qfalse;
5168 }
5169}
5170
5171VALUE rb_false(VALUE obj);
5172
5173static VALUE
5174vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
5175{
5176 if (recv == Qnil &&
5178 return Qtrue;
5179 }
5180 else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
5181 return Qfalse;
5182 }
5183 else {
5184 return Qundef;
5185 }
5186}
5187
5188static VALUE
5189fix_succ(VALUE x)
5190{
5191 switch (x) {
5192 case ~0UL:
5193 /* 0xFFFF_FFFF == INT2FIX(-1)
5194 * `-1.succ` is of course 0. */
5195 return INT2FIX(0);
5196 case RSHIFT(~0UL, 1):
5197 /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
5198 * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
5199 return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
5200 default:
5201 /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
5202 * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
5203 * == lx*2 + ly*2 + 1
5204 * == (lx*2+1) + (ly*2+1) - 1
5205 * == x + y - 1
5206 *
5207 * Here, if we put y := INT2FIX(1):
5208 *
5209 * == x + INT2FIX(1) - 1
5210 * == x + 2 .
5211 */
5212 return x + 2;
5213 }
5214}
5215
5216static VALUE
5217vm_opt_succ(VALUE recv)
5218{
5219 if (FIXNUM_P(recv) &&
5221 return fix_succ(recv);
5222 }
5223 else if (SPECIAL_CONST_P(recv)) {
5224 return Qundef;
5225 }
5226 else if (RBASIC_CLASS(recv) == rb_cString &&
5228 return rb_str_succ(recv);
5229 }
5230 else {
5231 return Qundef;
5232 }
5233}
5234
5235static VALUE
5236vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
5237{
5238 if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
5239 return RTEST(recv) ? Qfalse : Qtrue;
5240 }
5241 else {
5242 return Qundef;
5243 }
5244}
5245
5246static VALUE
5247vm_opt_regexpmatch2(VALUE recv, VALUE obj)
5248{
5249 if (SPECIAL_CONST_P(recv)) {
5250 return Qundef;
5251 }
5252 else if (RBASIC_CLASS(recv) == rb_cString &&
5253 CLASS_OF(obj) == rb_cRegexp &&
5255 return rb_reg_match(obj, recv);
5256 }
5257 else if (RBASIC_CLASS(recv) == rb_cRegexp &&
5259 return rb_reg_match(recv, obj);
5260 }
5261 else {
5262 return Qundef;
5263 }
5264}
5265
5266rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
5267
5268NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
5269
5270static inline void
5271vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
5272 rb_event_flag_t pc_events, rb_event_flag_t target_event,
5273 rb_hook_list_t *global_hooks, rb_hook_list_t *local_hooks, VALUE val)
5274{
5275 rb_event_flag_t event = pc_events & target_event;
5276 VALUE self = GET_SELF();
5277
5278 VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
5279
5280 if (event & global_hooks->events) {
5281 /* increment PC because source line is calculated with PC-1 */
5282 reg_cfp->pc++;
5283 vm_dtrace(event, ec);
5284 rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
5285 reg_cfp->pc--;
5286 }
5287
5288 if (local_hooks != NULL) {
5289 if (event & local_hooks->events) {
5290 /* increment PC because source line is calculated with PC-1 */
5291 reg_cfp->pc++;
5292 rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
5293 reg_cfp->pc--;
5294 }
5295 }
5296}
5297
5298// Return true if given cc has cfunc which is NOT handled by opt_send_without_block.
5299bool
5301{
5302 switch (insn) {
5303 case BIN(opt_eq):
5304 return check_cfunc(vm_cc_cme(cc), rb_obj_equal);
5305 case BIN(opt_nil_p):
5306 return check_cfunc(vm_cc_cme(cc), rb_false);
5307 case BIN(opt_not):
5308 return check_cfunc(vm_cc_cme(cc), rb_obj_not);
5309 default:
5310 return false;
5311 }
5312}
5313
5314#define VM_TRACE_HOOK(target_event, val) do { \
5315 if ((pc_events & (target_event)) & enabled_flags) { \
5316 vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks, (val)); \
5317 } \
5318} while (0)
5319
5320static void
5321vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
5322{
5323 const VALUE *pc = reg_cfp->pc;
5325
5326 if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
5327 return;
5328 }
5329 else {
5330 const rb_iseq_t *iseq = reg_cfp->iseq;
5331 size_t pos = pc - iseq->body->iseq_encoded;
5332 rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
5333 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
5334 rb_event_flag_t local_hook_events = local_hooks != NULL ? local_hooks->events : 0;
5335 enabled_flags |= local_hook_events;
5336
5337 VM_ASSERT((local_hook_events & ~ISEQ_TRACE_EVENTS) == 0);
5338
5339 if ((pc_events & enabled_flags) == 0) {
5340#if 0
5341 /* disable trace */
5342 /* TODO: incomplete */
5343 rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
5344#else
5345 /* do not disable trace because of performance problem
5346 * (re-enable overhead)
5347 */
5348#endif
5349 return;
5350 }
5351 else if (ec->trace_arg != NULL) {
5352 /* already tracing */
5353 return;
5354 }
5355 else {
5356 rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
5357
5358 if (0) {
5359 fprintf(stderr, "vm_trace>>%4d (%4x) - %s:%d %s\n",
5360 (int)pos,
5361 (int)pc_events,
5363 (int)rb_iseq_line_no(iseq, pos),
5364 RSTRING_PTR(rb_iseq_label(iseq)));
5365 }
5366 VM_ASSERT(reg_cfp->pc == pc);
5367 VM_ASSERT(pc_events != 0);
5368 VM_ASSERT(enabled_flags & pc_events);
5369
5370 /* check traces */
5376 }
5377 }
5378}
5379
5380#if VM_CHECK_MODE > 0
5382void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
5383
5384void
5386{
5387 /* This has to be called _after_ our PRNG is properly set up. */
5388 int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
5389 vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
5390
5391 vm_stack_canary_was_born = true;
5392 VM_ASSERT(n == 0);
5393}
5394
5395#ifndef MJIT_HEADER
5397rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
5398{
5399 /* Because a method has already been called, why not call
5400 * another one. */
5401 const char *insn = rb_insns_name(i);
5402 VALUE inspection = rb_inspect(c);
5403 const char *str = StringValueCStr(inspection);
5404
5405 rb_bug("dead canary found at %s: %s", insn, str);
5406}
5407#endif
5408
5409#else
5410void Init_vm_stack_canary(void) { /* nothing to do */ }
5411#endif
5412
5413
5414/* a part of the following code is generated by this ruby script:
5415
541616.times{|i|
5417 typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
5418 typedef_args.prepend(", ") if i != 0
5419 call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
5420 call_args.prepend(", ") if i != 0
5421 puts %Q{
5422static VALUE
5423builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5424{
5425 typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
5426 return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
5427}}
5428}
5429
5430puts
5431puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
543216.times{|i|
5433 puts " builtin_invoker#{i},"
5434}
5435puts "};"
5436*/
5437
5438static VALUE
5439builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5440{
5441 typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
5442 return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
5443}
5444
5445static VALUE
5446builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5447{
5448 typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
5449 return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
5450}
5451
5452static VALUE
5453builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5454{
5455 typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
5456 return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
5457}
5458
5459static VALUE
5460builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5461{
5462 typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
5463 return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
5464}
5465
5466static VALUE
5467builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5468{
5469 typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
5470 return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
5471}
5472
5473static VALUE
5474builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5475{
5476 typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
5477 return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
5478}
5479
5480static VALUE
5481builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5482{
5483 typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
5484 return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
5485}
5486
5487static VALUE
5488builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5489{
5490 typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
5491 return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
5492}
5493
5494static VALUE
5495builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5496{
5497 typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
5498 return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
5499}
5500
5501static VALUE
5502builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5503{
5504 typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
5505 return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
5506}
5507
5508static VALUE
5509builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5510{
5511 typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
5512 return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
5513}
5514
5515static VALUE
5516builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5517{
5518 typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
5519 return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
5520}
5521
5522static VALUE
5523builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5524{
5525 typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
5526 return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
5527}
5528
5529static VALUE
5530builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5531{
5532 typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
5533 return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
5534}
5535
5536static VALUE
5537builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5538{
5539 typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
5540 return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
5541}
5542
5543static VALUE
5544builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
5545{
5546 typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
5547 return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
5548}
5549
5551
5552static builtin_invoker
5553lookup_builtin_invoker(int argc)
5554{
5555 static const builtin_invoker invokers[] = {
5556 builtin_invoker0,
5557 builtin_invoker1,
5558 builtin_invoker2,
5559 builtin_invoker3,
5560 builtin_invoker4,
5561 builtin_invoker5,
5562 builtin_invoker6,
5563 builtin_invoker7,
5564 builtin_invoker8,
5565 builtin_invoker9,
5566 builtin_invoker10,
5567 builtin_invoker11,
5568 builtin_invoker12,
5569 builtin_invoker13,
5570 builtin_invoker14,
5571 builtin_invoker15,
5572 };
5573
5574 return invokers[argc];
5575}
5576
5577static inline VALUE
5578invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
5579{
5580 const bool canary_p = reg_cfp->iseq->body->builtin_inline_p; // Verify an assumption of `Primitive.attr! 'inline'`
5581 SETUP_CANARY(canary_p);
5582 VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, (rb_insn_func_t)bf->func_ptr);
5583 CHECK_CANARY(canary_p, BIN(invokebuiltin));
5584 return ret;
5585}
5586
5587static VALUE
5588vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
5589{
5590 return invoke_bf(ec, cfp, bf, argv);
5591}
5592
5593static VALUE
5594vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
5595{
5596 if (0) { // debug print
5597 fprintf(stderr, "vm_invoke_builtin_delegate: passing -> ");
5598 for (int i=0; i<bf->argc; i++) {
5599 fprintf(stderr, ":%s ", rb_id2name(cfp->iseq->body->local_table[i+start_index]));
5600 }
5601 fprintf(stderr, "\n");
5602 fprintf(stderr, "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc, bf->func_ptr);
5603 }
5604
5605 if (bf->argc == 0) {
5606 return invoke_bf(ec, cfp, bf, NULL);
5607 }
5608 else {
5609 const VALUE *argv = cfp->ep - cfp->iseq->body->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
5610 return invoke_bf(ec, cfp, bf, argv);
5611 }
5612}
5613
5614// for __builtin_inline!()
5615
5616VALUE
5618{
5619 const rb_control_frame_t *cfp = ec->cfp;
5620 return cfp->ep[index];
5621}
void rb_ary_store(VALUE ary, long idx, VALUE val)
Definition: array.c:1141
VALUE rb_check_to_array(VALUE ary)
Definition: array.c:994
VALUE rb_cArray
Definition: array.c:40
VALUE rb_ary_dup(VALUE ary)
Definition: array.c:2666
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:1301
VALUE rb_ary_aref1(VALUE ary, VALUE arg)
Definition: array.c:1823
VALUE rb_ary_new(void)
Definition: array.c:749
VALUE rb_ary_concat(VALUE x, VALUE y)
Definition: array.c:4859
VALUE rb_check_array_type(VALUE ary)
Definition: array.c:988
VALUE rb_ary_plus(VALUE x, VALUE y)
Definition: array.c:4800
VALUE rb_ary_entry(VALUE ary, long offset)
Definition: array.c:1672
#define ALWAYS_INLINE(x)
Definition: attributes.h:86
#define COLDFUNC
Definition: attributes.h:110
#define NOINLINE(x)
Definition: attributes.h:82
#define PUREFUNC(x)
Definition: attributes.h:54
#define NORETURN(x)
Definition: attributes.h:152
#define ASSUME
Definition: assume.h:29
VALUE rb_uint2big(uintptr_t n)
Definition: bignum.c:3164
VALUE rb_dbl2big(double d)
Definition: bignum.c:5248
#define local
Definition: blast.c:36
int bits(struct state *s, int need)
Definition: blast.c:72
#define CHECK(sub)
Definition: compile.c:429
const char * rb_insns_name(int i)
Definition: compile.c:9172
VALUE * rb_iseq_original_iseq(const rb_iseq_t *iseq)
Definition: compile.c:863
#define OBJ_BUILTIN_TYPE(obj)
Definition: compilers.h:68
int rb_autoloading_value(VALUE mod, ID id, VALUE *value, rb_const_flag_t *flag)
Definition: variable.c:2299
VALUE rb_public_const_get_at(VALUE klass, ID id)
Definition: variable.c:2642
rb_const_entry_t * rb_const_lookup(VALUE klass, ID id)
Definition: variable.c:3614
int rb_public_const_defined_from(VALUE klass, ID id)
Definition: variable.c:2940
VALUE rb_const_source_location_at(VALUE, ID)
Definition: variable.c:2700
VALUE rb_public_const_get_from(VALUE klass, ID id)
Definition: variable.c:2636
#define mod(x, y)
Definition: date_strftime.c:28
struct RIMemo * ptr
Definition: debug.c:88
#define RB_DEBUG_COUNTER_INC_UNLESS(type, cond)
#define RB_DEBUG_COUNTER_SETMAX(type, num)
#define RB_DEBUG_COUNTER_INC(type)
#define check_match(s, start, match, length)
Definition: deflate.c:1514
#define MJIT_STATIC
Definition: dllexport.h:71
#define MJIT_FUNC_EXPORTED
Definition: dllexport.h:55
#define RFLOAT_VALUE
Definition: double.h:28
#define DBL2NUM
Definition: double.h:29
#define d1
big_t * num
Definition: enough.c:232
int max
Definition: enough.c:225
#define sym(name)
Definition: enumerator.c:4007
uint8_t len
Definition: escape.c:17
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
#define rb_ec_raised_p(ec, f)
Definition: eval_intern.h:272
VALUE rb_ec_backtrace_object(const rb_execution_context_t *ec)
Definition: vm_backtrace.c:770
#define EC_JUMP_TAG(ec, st)
Definition: eval_intern.h:196
void rb_vm_localjump_error(const char *, VALUE, int)
Definition: vm.c:1711
#define rb_ec_raised_reset(ec, f)
Definition: eval_intern.h:271
@ RAISED_STACKOVERFLOW
Definition: eval_intern.h:267
#define RUBY_EVENT_END
Definition: event.h:32
#define RUBY_EVENT_C_CALL
Definition: event.h:35
#define RUBY_EVENT_B_RETURN
Definition: event.h:42
#define RUBY_EVENT_CLASS
Definition: event.h:31
#define RUBY_EVENT_LINE
Definition: event.h:30
#define RUBY_EVENT_RETURN
Definition: event.h:34
#define RUBY_EVENT_C_RETURN
Definition: event.h:36
#define RUBY_EVENT_B_CALL
Definition: event.h:41
uint32_t rb_event_flag_t
Definition: event.h:66
#define RUBY_EVENT_CALL
Definition: event.h:33
#define RSTRING_LEN(string)
Definition: fbuffer.h:22
#define RSTRING_PTR(string)
Definition: fbuffer.h:19
#define MAYBE_UNUSED
Definition: ffi_common.h:30
#define UNLIKELY(x)
Definition: ffi_common.h:126
#define LIKELY(x)
Definition: ffi_common.h:125
#define FL_SINGLETON
Definition: fl_type.h:49
#define FL_EXIVAR
Definition: fl_type.h:58
#define FL_USHIFT
Definition: fl_type.h:61
#define PRIsVALUE
Definition: function.c:10
int rb_during_gc(void)
Definition: gc.c:9505
void rb_gc_verify_internal_consistency(void)
Definition: gc.c:7137
const char * rb_obj_info(VALUE obj)
Definition: gc.c:12499
void rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
Definition: gc.c:2649
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
Definition: gc.c:2412
void rb_gc_writebarrier_remember(VALUE obj)
Definition: gc.c:7814
void rb_gc_register_mark_object(VALUE obj)
Inform the garbage collector that object is a live Ruby object that should not be moved.
Definition: gc.c:8022
#define CLASS_OF
Definition: globals.h:153
VALUE rb_cRegexp
Definition: re.c:2301
VALUE rb_cFloat
Definition: numeric.c:190
VALUE rb_cProc
Definition: proc.c:46
VALUE rb_cString
Definition: string.c:80
VALUE rb_singleton_class(VALUE obj)
Returns the singleton class of obj.
Definition: class.c:1924
VALUE rb_module_new(void)
Definition: class.c:856
VALUE rb_class_inherited(VALUE super, VALUE klass)
Calls Class::inherited.
Definition: class.c:722
VALUE rb_define_class_id(ID id, VALUE super)
Defines a new class.
Definition: class.c:701
#define FL_TEST_RAW
Definition: fl_type.h:131
#define FL_TEST
Definition: fl_type.h:130
#define FL_SET_RAW
Definition: fl_type.h:129
void rb_notimplement(void)
Definition: error.c:2960
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2917
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:712
void rb_bug(const char *fmt,...)
Definition: error.c:768
VALUE rb_eTypeError
Definition: error.c:1057
VALUE rb_eFatal
Definition: error.c:1053
VALUE rb_eNoMethodError
Definition: error.c:1065
void rb_exc_fatal(VALUE mesg)
Raises a fatal error in the current thread.
Definition: eval.c:728
VALUE rb_eRuntimeError
Definition: error.c:1055
void rb_warn(const char *fmt,...)
Definition: error.c:408
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Definition: error.c:1107
VALUE rb_eArgError
Definition: error.c:1058
VALUE rb_iseqw_new(const rb_iseq_t *)
Definition: iseq.c:1217
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1148
VALUE rb_cClass
Class class.
Definition: object.c:51
VALUE rb_obj_not_equal(VALUE obj1, VALUE obj2)
Definition: object.c:220
VALUE rb_cObject
Object class.
Definition: object.c:49
VALUE rb_obj_alloc(VALUE)
Allocates an instance of klass.
Definition: object.c:1900
VALUE rb_obj_equal(VALUE obj1, VALUE obj2)
Definition: object.c:197
VALUE rb_obj_class(VALUE)
Definition: object.c:245
VALUE rb_inspect(VALUE)
Convenient wrapper of Object::inspect.
Definition: object.c:585
VALUE rb_cBasicObject
BasicObject class.
Definition: object.c:47
VALUE rb_cModule
Module class.
Definition: object.c:50
VALUE rb_class_real(VALUE)
Looks up the nearest ancestor of cl, skipping singleton classes or module inclusions.
Definition: object.c:235
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Determines if obj is a kind of c.
Definition: object.c:724
VALUE rb_obj_not(VALUE obj)
Definition: object.c:210
VALUE rb_false(VALUE obj)
Definition: object.c:1395
void rb_obj_copy_ivar(VALUE dest, VALUE obj)
Definition: object.c:275
VALUE rb_to_hash_type(VALUE hash)
Definition: hash.c:1853
VALUE rb_hash_has_key(VALUE hash, VALUE key)
Definition: hash.c:3638
VALUE rb_cHash
Definition: hash.c:106
VALUE rb_hash_compare_by_id_p(VALUE hash)
Definition: hash.c:4432
VALUE rb_hash_aref(VALUE hash, VALUE key)
Definition: hash.c:2046
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Definition: hash.c:2901
VALUE rb_hash_lookup(VALUE hash, VALUE key)
Definition: hash.c:2072
VALUE rb_hash_dup(VALUE hash)
Definition: hash.c:1579
int rb_hash_stlike_lookup(VALUE hash, st_data_t key, st_data_t *pval)
Definition: hash.c:2026
void *PTR64 __attribute__((mode(DI)))
Definition: ffi.c:41
@ idEq
Definition: id.h:96
@ idRespond_to_missing
Definition: id.h:117
@ idEqq
Definition: id.h:97
@ idEqlP
Definition: id.h:115
int rb_id_table_insert(struct rb_id_table *tbl, ID id, VALUE val)
Definition: id_table.c:257
int rb_id_table_lookup(struct rb_id_table *tbl, ID id, VALUE *valp)
Definition: id_table.c:227
struct rb_id_table * rb_id_table_create(size_t capa)
Definition: id_table.c:96
int rb_id_table_delete(struct rb_id_table *tbl, ID id)
Definition: id_table.c:263
#define IMEMO_TYPE_P(v, t)
Definition: imemo.h:178
#define THROW_DATA_P(err)
Definition: imemo.h:120
imemo_type
Definition: imemo.h:34
@ imemo_ment
Definition: imemo.h:41
@ imemo_env
Definition: imemo.h:35
@ imemo_callcache
Definition: imemo.h:47
@ imemo_cref
class reference
Definition: imemo.h:36
@ imemo_svar
special variable
Definition: imemo.h:37
@ imemo_constcache
Definition: imemo.h:48
#define FIXABLE
Definition: fixnum.h:25
Thin wrapper to ruby/config.h.
VALUE rb_funcall(VALUE, ID, int,...)
Calls a method.
Definition: vm_eval.c:1077
#define rb_ary_new4
Definition: array.h:74
#define rb_ary_new3
Definition: array.h:73
#define UNLIMITED_ARGUMENTS
Definition: error.h:29
#define rb_check_frozen_internal(obj)
Definition: error.h:58
#define rb_exc_new3
Definition: error.h:31
#define rb_check_arity
Definition: error.h:34
VALUE rb_proc_call_with_block(VALUE, int argc, const VALUE *argv, VALUE)
Definition: proc.c:1013
VALUE rb_reg_last_match(VALUE)
Definition: re.c:1750
VALUE rb_reg_match_post(VALUE)
Definition: re.c:1794
VALUE rb_reg_match_pre(VALUE)
Definition: re.c:1768
VALUE rb_reg_match(VALUE, VALUE)
Definition: re.c:3194
VALUE rb_reg_nth_match(int, VALUE)
Definition: re.c:1725
VALUE rb_reg_match_last(VALUE)
Definition: re.c:1811
VALUE rb_str_concat(VALUE, VALUE)
Definition: string.c:3217
VALUE rb_str_append(VALUE, VALUE)
Definition: string.c:3118
VALUE rb_str_intern(VALUE)
Definition: symbol.c:840
#define rb_str_cat_cstr(buf, str)
Definition: string.h:266
VALUE rb_str_length(VALUE)
Definition: string.c:1995
VALUE rb_str_succ(VALUE)
Definition: string.c:4315
void rb_thread_schedule(void)
Definition: thread.c:1623
void rb_set_class_path_string(VALUE, VALUE, VALUE)
Definition: variable.c:214
VALUE rb_const_get(VALUE, ID)
Definition: variable.c:2624
VALUE rb_cvar_defined(VALUE, ID)
Definition: variable.c:3387
VALUE rb_ivar_get(VALUE, ID)
Definition: variable.c:1234
int rb_const_defined_at(VALUE, ID)
Definition: variable.c:2934
VALUE rb_class_path(VALUE)
Definition: variable.c:169
VALUE rb_const_get_at(VALUE, ID)
Definition: variable.c:2630
VALUE rb_ivar_defined(VALUE, ID)
Definition: variable.c:1510
void rb_const_set(VALUE, ID, VALUE)
Definition: variable.c:3003
VALUE rb_attr_get(VALUE, ID)
Definition: variable.c:1242
VALUE rb_ivar_set(VALUE, ID, VALUE)
Definition: variable.c:1493
VALUE rb_autoload_load(VALUE, ID)
Definition: variable.c:2452
int rb_const_defined(VALUE, ID)
Definition: variable.c:2928
VALUE rb_check_funcall(VALUE, ID, int, const VALUE *)
Definition: vm_eval.c:619
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
int rb_method_boundp(VALUE, ID, int)
Definition: vm_method.c:1469
rb_alloc_func_t rb_get_alloc_func(VALUE)
Definition: vm_method.c:960
#define ID2SYM
Definition: symbol.h:44
const char * rb_id2name(ID)
Definition: symbol.c:944
#define SYM2ID
Definition: symbol.h:45
ID rb_check_id(volatile VALUE *)
Returns ID for the given name if it is interned already, or 0.
Definition: symbol.c:1069
#define FIX2INT
Definition: int.h:41
Internal header for Class.
#define RCLASS_SERIAL(c)
Definition: class.h:90
#define RCLASS_CLONED
Definition: class.h:97
#define RCLASS_INCLUDER(c)
Definition: class.h:94
#define RCLASS_CC_TBL(c)
Definition: class.h:85
#define RCLASS_ORIGIN(c)
Definition: class.h:87
#define RCLASS_REFINED_CLASS(c)
Definition: class.h:88
#define RICLASS_IS_ORIGIN
Definition: class.h:96
#define RCLASS_IV_INDEX_TBL(c)
Definition: class.h:86
Internal header for Comparable.
#define OPTIMIZED_CMP(a, b, data)
Definition: compar.h:38
#define UNALIGNED_MEMBER_PTR(ptr, mem)
Definition: gc.h:59
Internal header for Hash.
@ RHASH_PASS_AS_KEYWORDS
Definition: hash.h:24
Internal header for Numeric.
VALUE rb_fix_aref(VALUE fix, VALUE idx)
Definition: numeric.c:4613
VALUE rb_flo_div_flo(VALUE x, VALUE y)
Definition: numeric.c:1140
double ruby_float_mod(double x, double y)
Definition: numeric.c:1237
#define ROBJECT_IV_INDEX_TBL
Definition: object.h:81
Internal header for Proc.
VALUE rb_sym_to_proc(VALUE sym)
Definition: proc.c:1443
Internal header for Random.
int ruby_fill_random_bytes(void *, size_t, int)
Definition: random.c:575
#define STATIC_ASSERT
Definition: static_assert.h:14
VALUE rb_sym_proc_call(ID mid, int argc, const VALUE *argv, int kw_splat, VALUE passed_proc)
Definition: string.c:11171
VALUE rb_str_opt_plus(VALUE x, VALUE y)
Definition: string.c:2075
char * rb_str_to_cstr(VALUE str)
Definition: string.c:2432
VALUE rb_gvar_defined(ID)
Definition: variable.c:781
void rb_const_warn_if_deprecated(const rb_const_entry_t *, VALUE, ID)
Definition: variable.c:2533
void rb_init_iv_list(VALUE obj)
Definition: variable.c:1438
int rb_ec_obj_respond_to(struct rb_execution_context_struct *ec, VALUE obj, ID id, int priv)
Definition: vm_method.c:2552
method_missing_reason
Definition: vm.h:32
@ MISSING_SUPER
Definition: vm.h:38
@ MISSING_VCALL
Definition: vm.h:37
@ MISSING_PRIVATE
Definition: vm.h:34
@ MISSING_PROTECTED
Definition: vm.h:35
@ MISSING_NOENTRY
Definition: vm.h:33
@ MISSING_FCALL
Definition: vm.h:36
#define bp()
Definition: internal.h:105
#define rp(obj)
Definition: internal.h:95
#define rb_funcallv(...)
Definition: internal.h:77
#define rb_method_basic_definition_p(...)
Definition: internal.h:78
#define PRIdPTR
Definition: inttypes.h:52
#define PRIdPTRDIFF
Definition: inttypes.h:105
typedef int(ZCALLBACK *close_file_func) OF((voidpf opaque
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1087
unsigned int rb_iseq_line_no(const rb_iseq_t *iseq, size_t pos)
Definition: iseq.c:1821
VALUE rb_iseq_label(const rb_iseq_t *iseq)
Definition: iseq.c:1105
VALUE rb_iseq_defined_string(enum defined_type type)
Definition: iseq.c:3087
void rb_iseq_trace_set(const rb_iseq_t *iseq, rb_event_flag_t turnon_events)
Definition: iseq.c:3333
VALUE rb_iseq_disasm(const rb_iseq_t *iseq)
Definition: iseq.c:2335
defined_type
Definition: iseq.h:276
@ DEFINED_METHOD
Definition: iseq.h:284
@ DEFINED_CONST
Definition: iseq.h:283
@ DEFINED_GVAR
Definition: iseq.h:281
@ DEFINED_IVAR
Definition: iseq.h:279
@ DEFINED_REF
Definition: iseq.h:293
@ DEFINED_IVAR2
Definition: iseq.h:292
@ DEFINED_NOT_DEFINED
Definition: iseq.h:277
@ DEFINED_FUNC
Definition: iseq.h:294
@ DEFINED_YIELD
Definition: iseq.h:285
@ DEFINED_ZSUPER
Definition: iseq.h:286
@ DEFINED_CVAR
Definition: iseq.h:282
@ DEFINED_CONST_FROM
Definition: iseq.h:295
#define ISEQ_TRACE_EVENTS
Definition: iseq.h:68
int isinf(double n)
Definition: isinf.c:56
#define CHAR_BIT
Definition: limits.h:44
#define INT2FIX
Definition: long.h:48
#define LONG2FIX
Definition: long.h:49
#define FIX2ULONG
Definition: long.h:47
#define LONG2NUM
Definition: long.h:50
#define FIX2LONG
Definition: long.h:46
Internal header for Math.
#define MEMCPY(p1, p2, type, n)
Definition: memory.h:129
#define REALLOC_N
Definition: memory.h:137
#define ALLOCA_N(type, n)
Definition: memory.h:112
#define ALLOC_N
Definition: memory.h:133
#define RB_GC_GUARD(v)
Definition: memory.h:91
#define MEMMOVE(p1, p2, type, n)
Definition: memory.h:130
const rb_callable_method_entry_t * rb_callable_method_entry_without_refinements(VALUE klass, ID id, VALUE *defined_class)
Definition: vm_method.c:1253
const rb_callable_method_entry_t * rb_callable_method_entry_with_refinements(VALUE klass, ID id, VALUE *defined_class)
Definition: vm_method.c:1235
void rb_add_method_iseq(VALUE klass, ID mid, const rb_iseq_t *iseq, rb_cref_t *cref, rb_method_visibility_t visi)
Definition: vm_method.c:910
rb_method_visibility_t
Definition: method.h:29
@ METHOD_VISI_PRIVATE
Definition: method.h:32
@ METHOD_VISI_PROTECTED
Definition: method.h:33
@ METHOD_VISI_PUBLIC
Definition: method.h:31
@ METHOD_VISI_UNDEF
Definition: method.h:30
const rb_callable_method_entry_t * rb_callable_method_entry(VALUE klass, ID id)
Definition: vm_method.c:1177
rb_method_type_t
Definition: method.h:109
@ VM_METHOD_TYPE_ISEQ
Ruby method.
Definition: method.h:110
@ VM_METHOD_TYPE_ATTRSET
attr_writer or attr_accessor
Definition: method.h:112
@ VM_METHOD_TYPE_CFUNC
C method.
Definition: method.h:111
@ VM_METHOD_TYPE_OPTIMIZED
Kernel::send, Proc::call, etc.
Definition: method.h:119
@ VM_METHOD_TYPE_REFINED
refinement
Definition: method.h:121
@ VM_METHOD_TYPE_NOTIMPLEMENTED
Definition: method.h:118
@ VM_METHOD_TYPE_MISSING
wrapper for method_missing(id)
Definition: method.h:120
@ VM_METHOD_TYPE_BMETHOD
Definition: method.h:114
@ VM_METHOD_TYPE_IVAR
attr_reader or attr_accessor
Definition: method.h:113
@ VM_METHOD_TYPE_ZSUPER
Definition: method.h:115
@ VM_METHOD_TYPE_ALIAS
Definition: method.h:116
@ VM_METHOD_TYPE_UNDEF
Definition: method.h:117
#define METHOD_ENTRY_INVALIDATED(me)
Definition: method.h:76
@ OPTIMIZED_METHOD_TYPE_CALL
Definition: method.h:167
@ OPTIMIZED_METHOD_TYPE_BLOCK_CALL
Definition: method.h:168
@ OPTIMIZED_METHOD_TYPE_SEND
Definition: method.h:166
#define METHOD_ENTRY_CACHEABLE(me)
Definition: method.h:78
const rb_method_entry_t * rb_method_entry_with_refinements(VALUE klass, ID id, VALUE *defined_class)
Definition: vm_method.c:1208
#define UNDEFINED_METHOD_ENTRY_P(me)
Definition: method.h:198
const rb_callable_method_entry_t * rb_method_entry_complement_defined_class(const rb_method_entry_t *src_me, ID called_id, VALUE defined_class)
Definition: vm_method.c:619
#define METHOD_ENTRY_CACHED_SET(me)
Definition: method.h:75
#define METHOD_ENTRY_VISI(me)
Definition: method.h:70
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
const char * name
Definition: nkf.c:208
#define TRUE
Definition: nkf.h:175
#define FALSE
Definition: nkf.h:174
#define RUBY_DTRACE_METHOD_RETURN_HOOK(ec, klass, id)
Definition: probes_helper.h:35
#define RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, klass, id)
Definition: probes_helper.h:38
#define RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, klass, id)
Definition: probes_helper.h:32
#define RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, klass, id)
Definition: probes_helper.h:41
#define RARRAY_AREF(a, i)
Definition: psych_emitter.c:7
VALUE rb_eRactorUnsafeError
Definition: ractor.c:22
VALUE rb_eRactorIsolationError
Definition: ractor.c:23
#define RB_OBJ_SHAREABLE_P(obj)
Definition: ractor.h:50
#define RARRAY_LEN
Definition: rarray.h:52
#define RARRAY_CONST_PTR_TRANSIENT
Definition: rarray.h:54
#define RBASIC(obj)
Definition: rbasic.h:34
#define RBASIC_CLASS
Definition: rbasic.h:35
#define RMODULE_INCLUDED_INTO_REFINEMENT
Definition: rclass.h:29
#define RMODULE_IS_REFINEMENT
Definition: rclass.h:28
#define RCLASS_SUPER
Definition: rclass.h:33
#define NULL
Definition: regenc.h:69
#define RB_OBJ_WRITE(a, slot, b)
WB for new reference from ‘a’ to ‘b’.
Definition: rgengc.h:107
#define RB_OBJ_WRITTEN(a, oldv, b)
WB for new reference from ‘a’ to ‘b’.
Definition: rgengc.h:114
#define RHASH_SIZE(h)
Definition: rhash.h:50
#define RHASH_EMPTY_P(h)
Definition: rhash.h:51
VALUE rb_mRubyVMFrozenCore
Definition: vm.c:375
#define StringValueCStr(v)
Definition: rstring.h:52
int argc
Definition: ruby.c:240
char ** argv
Definition: ruby.c:241
#define RB_NO_KEYWORDS
Definition: scan_args.h:46
unsigned LONG_LONG rb_serial_t
Definition: serial.h:19
unsigned int uint32_t
Definition: sha2.h:101
unsigned long long uint64_t
Definition: sha2.h:102
#define Qundef
#define SPECIAL_CONST_P
#define STATIC_SYM_P
#define Qtrue
#define RTEST
#define Qnil
#define Qfalse
#define NIL_P
#define FIXNUM_P
#define f
VALUE rb_str_catf(VALUE, const char *,...)
Definition: sprintf.c:1243
VALUE rb_sprintf(const char *,...)
Definition: sprintf.c:1203
unsigned long st_data_t
Definition: st.h:22
#define st_lookup
Definition: st.h:128
#define ANYARGS
Definition: stdarg.h:42
Definition: hash.h:44
Definition: iseq.h:218
@ CATCH_TYPE_BREAK
Definition: iseq.h:223
rb_iseq_t * iseq
Definition: iseq.h:240
unsigned int cont
Definition: iseq.h:244
enum iseq_catch_table_entry::catch_type type
unsigned int start
Definition: iseq.h:242
unsigned int end
Definition: iseq.h:243
unsigned int sp
Definition: iseq.h:245
Definition: vm_core.h:222
VALUE flags
Definition: vm_core.h:223
rb_serial_t ic_serial
Definition: vm_core.h:227
VALUE value
Definition: vm_core.h:225
const rb_cref_t * ic_cref
Definition: vm_core.h:226
struct iseq_inline_constant_cache_entry * entry
Definition: vm_core.h:232
Definition: vm_core.h:235
struct rb_iv_index_tbl_entry * entry
Definition: vm_core.h:236
union rb_block::@199 as
struct rb_captured_block captured
Definition: vm_core.h:762
VALUE symbol
Definition: vm_core.h:763
VALUE proc
Definition: vm_core.h:764
const void *const func_ptr
Definition: builtin.h:8
const int argc
Definition: builtin.h:9
const char *const name
Definition: builtin.h:13
const struct rb_callcache * cc
Definition: vm_callinfo.h:428
const struct rb_callinfo * ci
Definition: vm_callinfo.h:427
Definition: method.h:62
ID called_id
Definition: method.h:66
const VALUE defined_class
Definition: method.h:64
const VALUE owner
Definition: method.h:67
struct rb_method_definition_struct *const def
Definition: method.h:65
const VALUE klass
Definition: vm_callinfo.h:278
const VALUE flags
Definition: vm_callinfo.h:275
const vm_call_handler call_
Definition: vm_callinfo.h:284
const struct rb_callable_method_entry_struct *const cme_
Definition: vm_callinfo.h:283
union rb_callcache::@184 aux_
VALUE flag
Definition: vm_callinfo.h:65
VALUE flags
Definition: vm_callinfo.h:62
const struct rb_callcache * cc
Definition: vm_core.h:250
const struct rb_callinfo * ci
Definition: vm_core.h:249
VALUE block_handler
Definition: vm_core.h:251
const struct vm_ifunc * ifunc
Definition: vm_core.h:741
const VALUE * ep
Definition: vm_core.h:738
const rb_iseq_t * iseq
Definition: vm_core.h:740
union rb_captured_block::@198 code
const struct rb_callcache * cc
Definition: vm_callinfo.h:440
const struct rb_callinfo * ci
Definition: vm_callinfo.h:439
const struct rb_callable_method_entry_struct * cme
Definition: vm_callinfo.h:437
struct rb_class_cc_entries::rb_class_cc_entries_entry * entries
Definition: constant.h:33
VALUE value
Definition: constant.h:36
const VALUE * ep
Definition: vm_core.h:774
const rb_iseq_t * iseq
Definition: vm_core.h:772
const VALUE * pc
Definition: vm_core.h:770
CREF (Class REFerence)
Definition: method.h:44
struct rb_cref_struct * next
Definition: method.h:48
rb_control_frame_t * cfp
Definition: vm_core.h:858
struct rb_vm_tag * tag
Definition: vm_core.h:860
const VALUE * root_lep
Definition: vm_core.h:879
struct rb_trace_arg_struct * trace_arg
Definition: vm_core.h:886
rb_event_flag_t events
Definition: vm_core.h:555
struct iseq_catch_table * catch_table
Definition: vm_core.h:408
unsigned int ambiguous_param0
Definition: vm_core.h:354
const VALUE * opt_table
Definition: vm_core.h:368
enum rb_iseq_constant_body::iseq_type type
unsigned int size
Definition: vm_core.h:359
const struct rb_iseq_constant_body::@188::rb_iseq_param_keyword * keyword
struct rb_iseq_struct * local_iseq
Definition: vm_core.h:412
unsigned int has_block
Definition: vm_core.h:352
unsigned int local_table_size
Definition: vm_core.h:424
unsigned int has_kwrest
Definition: vm_core.h:351
unsigned int has_post
Definition: vm_core.h:349
unsigned int stack_max
Definition: vm_core.h:427
struct rb_iseq_constant_body::@188::@190 flags
VALUE * iseq_encoded
Definition: vm_core.h:319
unsigned int accepts_no_kwarg
Definition: vm_core.h:355
unsigned int has_kw
Definition: vm_core.h:350
struct rb_iseq_constant_body::@188 param
parameter information
unsigned int has_opt
Definition: vm_core.h:347
unsigned int has_lead
Definition: vm_core.h:346
const struct rb_iseq_struct * parent_iseq
Definition: vm_core.h:411
const ID * local_table
Definition: vm_core.h:405
unsigned int has_rest
Definition: vm_core.h:348
struct rb_iseq_constant_body * body
Definition: vm_core.h:448
struct rb_hook_list_struct * local_hooks
Definition: vm_core.h:459
struct rb_iseq_struct::@191::@193 exec
union rb_iseq_struct::@191 aux
Definition: class.h:28
uint32_t index
Definition: class.h:29
VALUE class_value
Definition: class.h:31
rb_serial_t class_serial
Definition: class.h:30
struct rb_method_entry_struct * original_me
Definition: method.h:151
VALUE(* invoker)(VALUE recv, int argc, const VALUE *argv, VALUE(*func)(ANYARGS))
Definition: method.h:141
VALUE(* func)(ANYARGS)
Definition: method.h:140
rb_method_iseq_t iseq
Definition: method.h:179
rb_method_alias_t alias
Definition: method.h:182
rb_method_bmethod_t bmethod
Definition: method.h:184
union rb_method_definition_struct::@123 body
rb_method_cfunc_t cfunc
Definition: method.h:180
rb_method_refined_t refined
Definition: method.h:183
Definition: method.h:54
VALUE defined_class
Definition: method.h:56
VALUE owner
Definition: method.h:59
rb_cref_t * cref
class reference, should be marked
Definition: method.h:136
rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:135
struct rb_method_entry_struct * orig_me
Definition: method.h:155
unsigned int is_lambda
Definition: vm_core.h:1089
enum ruby_tag_type state
Definition: vm_core.h:813
Definition: st.h:79
Definition: blast.c:41
IFUNC (Internal FUNCtion)
Definition: imemo.h:85
const void * data
Definition: imemo.h:89
rb_block_call_func_t func
Definition: imemo.h:88
SVAR (Special VARiable)
Definition: imemo.h:54
const VALUE backref
Definition: imemo.h:58
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition: imemo.h:56
THROW_DATA.
Definition: imemo.h:63
#define UNDEF
Definition: vm_core.h:239
struct iseq_inline_storage_entry::@187 once
VALUE value
Definition: vm_core.h:242
struct rb_thread_struct * running_thread
Definition: vm_core.h:241
#define ALLOC(size)
Definition: unzip.c:112
unsigned long VALUE
Definition: value.h:38
#define SIGNED_VALUE
Definition: value.h:40
unsigned long ID
Definition: value.h:39
#define T_STRING
Definition: value_type.h:77
#define T_FLOAT
Definition: value_type.h:63
#define T_IMEMO
Definition: value_type.h:66
#define T_BIGNUM
Definition: value_type.h:56
#define T_MODULE
Definition: value_type.h:69
#define T_ICLASS
Definition: value_type.h:65
#define T_HASH
Definition: value_type.h:64
#define T_ARRAY
Definition: value_type.h:55
#define T_OBJECT
Definition: value_type.h:74
#define T_SYMBOL
Definition: value_type.h:79
#define T_CLASS
Definition: value_type.h:57
#define BUILTIN_TYPE
Definition: value_type.h:84
#define SYMBOL_P
Definition: value_type.h:87
VALUE rb_ivar_generic_lookup_with_index(VALUE obj, ID id, uint32_t index)
Definition: variable.c:966
VALUE ruby_vm_const_missing_count
Definition: vm.c:379
#define vm_exec
Definition: vm.c:11
rb_control_frame_t * rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm.c:589
rb_event_flag_t ruby_vm_event_flags
Definition: vm.c:403
unsigned int ruby_vm_event_local_num
Definition: vm.c:405
const struct rb_callcache * rb_vm_empty_cc(void)
Definition: vm.c:4100
VALUE rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me)
Definition: vm.c:1468
#define KW_SPECIFIED_BITS_MAX
Definition: vm_args.c:324
arg_setup_type
Definition: vm_args.c:35
@ arg_setup_block
Definition: vm_args.c:37
@ arg_setup_method
Definition: vm_args.c:36
#define rb_id2str(id)
Definition: vm_backtrace.c:30
VALUE(* vm_call_handler)(struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling)
Definition: vm_callinfo.h:267
#define VM_CI_ON_STACK(mid_, flags_, argc_, kwarg_)
Definition: vm_callinfo.h:256
#define VM_CALL_TAILCALL
Definition: vm_callinfo.h:39
#define VM_CALLCACHE_UNMARKABLE
Definition: vm_callinfo.h:293
#define VM_CALL_ARGS_SPLAT
Definition: vm_callinfo.h:31
#define VM_CALL_OPT_SEND
Definition: vm_callinfo.h:42
#define vm_cc_empty()
Definition: vm_callinfo.h:385
#define vm_ci_new_runtime(mid, flag, argc, kwarg)
Definition: vm_callinfo.h:181
#define VM_CALL_FCALL
Definition: vm_callinfo.h:33
#define VM_CALL_VCALL
Definition: vm_callinfo.h:34
#define VM_CALL_KWARG
Definition: vm_callinfo.h:37
#define VM_CALL_KW_SPLAT
Definition: vm_callinfo.h:38
#define VM_CALL_ZSUPER
Definition: vm_callinfo.h:41
#define VM_CC_ON_STACK(clazz, call, aux, cme)
Definition: vm_callinfo.h:305
#define VM_CALL_SUPER
Definition: vm_callinfo.h:40
#define TAG_RAISE
Definition: vm_core.h:204
@ BOP_DIV
Definition: vm_core.h:507
@ BOP_LE
Definition: vm_core.h:512
@ BOP_LTLT
Definition: vm_core.h:513
@ BOP_GE
Definition: vm_core.h:522
@ BOP_CALL
Definition: vm_core.h:530
@ BOP_AND
Definition: vm_core.h:531
@ BOP_NIL_P
Definition: vm_core.h:519
@ BOP_SUCC
Definition: vm_core.h:520
@ BOP_EQQ
Definition: vm_core.h:510
@ BOP_ASET
Definition: vm_core.h:515
@ BOP_MAX
Definition: vm_core.h:528
@ BOP_AREF
Definition: vm_core.h:514
@ BOP_PLUS
Definition: vm_core.h:504
@ BOP_MOD
Definition: vm_core.h:508
@ BOP_MINUS
Definition: vm_core.h:505
@ BOP_LT
Definition: vm_core.h:511
@ BOP_MATCH
Definition: vm_core.h:525
@ BOP_MULT
Definition: vm_core.h:506
@ BOP_EMPTY_P
Definition: vm_core.h:518
@ BOP_OR
Definition: vm_core.h:532
@ BOP_MIN
Definition: vm_core.h:529
@ BOP_GT
Definition: vm_core.h:521
#define REGEXP_REDEFINED_OP_FLAG
Definition: vm_core.h:720
#define STRING_REDEFINED_OP_FLAG
Definition: vm_core.h:714
@ VM_THROW_STATE_MASK
Definition: vm_core.h:211
@ VM_THROW_NO_ESCAPE_FLAG
Definition: vm_core.h:210
#define FALSE_REDEFINED_OP_FLAG
Definition: vm_core.h:723
#define TAG_RETRY
Definition: vm_core.h:202
#define VM_ENV_DATA_SIZE
Definition: vm_core.h:1206
ruby_tag_type
Definition: vm_core.h:185
vm_special_object_type
Definition: vm_core.h:1129
@ VM_SPECIAL_OBJECT_CBASE
Definition: vm_core.h:1131
@ VM_SPECIAL_OBJECT_VMCORE
Definition: vm_core.h:1130
@ VM_SPECIAL_OBJECT_CONST_BASE
Definition: vm_core.h:1132
#define HASH_REDEFINED_OP_FLAG
Definition: vm_core.h:716
#define RUBY_VM_NEXT_CONTROL_FRAME(cfp)
Definition: vm_core.h:1393
#define VM_ENV_DATA_INDEX_FLAGS
Definition: vm_core.h:1210
@ block_type_symbol
Definition: vm_core.h:756
@ block_type_iseq
Definition: vm_core.h:754
@ block_type_ifunc
Definition: vm_core.h:755
@ block_type_proc
Definition: vm_core.h:757
#define VM_ASSERT(expr)
Definition: vm_core.h:61
#define TRUE_REDEFINED_OP_FLAG
Definition: vm_core.h:722
@ block_handler_type_ifunc
Definition: vm_core.h:748
@ block_handler_type_proc
Definition: vm_core.h:750
@ block_handler_type_symbol
Definition: vm_core.h:749
@ block_handler_type_iseq
Definition: vm_core.h:747
@ ruby_error_stackfatal
Definition: vm_core.h:498
@ ruby_error_sysstack
Definition: vm_core.h:497
#define VMDEBUG
VM Debug Level.
Definition: vm_core.h:40
VALUE CDHASH
Definition: vm_core.h:1151
#define ARRAY_REDEFINED_OP_FLAG
Definition: vm_core.h:715
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:2001
#define RUBY_EVENT_COVERAGE_LINE
Definition: vm_core.h:2022
#define VM_CHECKMATCH_TYPE_MASK
Definition: vm_core.h:1126
#define TAG_THROW
Definition: vm_core.h:205
signed long rb_snum_t
Definition: vm_core.h:183
#define PROC_REDEFINED_OP_FLAG
Definition: vm_core.h:724
#define VM_DEFINECLASS_SCOPED_P(x)
Definition: vm_core.h:1039
#define VM_DEFINECLASS_TYPE(x)
Definition: vm_core.h:1036
#define RUBY_VM_CHECK_INTS(ec)
Definition: vm_core.h:1921
#define VM_GUARDED_PREV_EP(ep)
Definition: vm_core.h:1298
#define NIL_REDEFINED_OP_FLAG
Definition: vm_core.h:721
#define INTEGER_REDEFINED_OP_FLAG
Definition: vm_core.h:712
#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x)
Definition: vm_core.h:1040
#define CHECK_VM_STACK_OVERFLOW(cfp, margin)
Definition: vm_core.h:1740
#define SYMBOL_REDEFINED_OP_FLAG
Definition: vm_core.h:718
@ VM_SVAR_EXTRA_START
Definition: vm_core.h:1139
@ VM_SVAR_BACKREF
Definition: vm_core.h:1137
@ VM_SVAR_LASTLINE
Definition: vm_core.h:1136
#define TAG_BREAK
Definition: vm_core.h:200
#define BASIC_OP_UNREDEFINED_P(op, klass)
Definition: vm_core.h:726
rb_control_frame_t *FUNC_FASTCALL rb_insn_func_t(rb_execution_context_t *, rb_control_frame_t *)
Definition: vm_core.h:1158
#define TAG_RETURN
Definition: vm_core.h:199
#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin)
Definition: vm_core.h:1731
#define SDR()
Definition: vm_core.h:1647
#define RUBY_EVENT_COVERAGE_BRANCH
Definition: vm_core.h:2023
#define VM_BLOCK_HANDLER_NONE
Definition: vm_core.h:1299
#define VM_ENV_DATA_INDEX_SPECVAL
Definition: vm_core.h:1209
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:1083
#define VM_ENV_DATA_INDEX_ME_CREF
Definition: vm_core.h:1208
rb_vm_defineclass_type_t
Definition: vm_core.h:1028
@ VM_DEFINECLASS_TYPE_CLASS
Definition: vm_core.h:1029
@ VM_DEFINECLASS_TYPE_MODULE
Definition: vm_core.h:1031
@ VM_DEFINECLASS_TYPE_SINGLETON_CLASS
Definition: vm_core.h:1030
#define VM_CHECKMATCH_ARRAY
Definition: vm_core.h:1127
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)
Definition: vm_core.h:1392
#define VM_CHECK_MODE
Definition: vm_core.h:23
#define VM_UNREACHABLE(func)
Definition: vm_core.h:62
vm_check_match_type
Definition: vm_core.h:1120
@ VM_CHECKMATCH_TYPE_RESCUE
Definition: vm_core.h:1123
@ VM_CHECKMATCH_TYPE_CASE
Definition: vm_core.h:1122
@ VM_CHECKMATCH_TYPE_WHEN
Definition: vm_core.h:1121
#define FLOAT_REDEFINED_OP_FLAG
Definition: vm_core.h:713
@ VM_FRAME_FLAG_LAMBDA
Definition: vm_core.h:1194
@ VM_FRAME_FLAG_CFRAME_KW
Definition: vm_core.h:1196
@ VM_FRAME_MAGIC_IFUNC
Definition: vm_core.h:1183
@ VM_FRAME_MAGIC_METHOD
Definition: vm_core.h:1178
@ VM_FRAME_MAGIC_TOP
Definition: vm_core.h:1181
@ VM_FRAME_FLAG_CFRAME
Definition: vm_core.h:1193
@ VM_FRAME_MAGIC_DUMMY
Definition: vm_core.h:1186
@ VM_FRAME_FLAG_BMETHOD
Definition: vm_core.h:1192
@ VM_FRAME_MAGIC_BLOCK
Definition: vm_core.h:1179
@ VM_ENV_FLAG_LOCAL
Definition: vm_core.h:1200
@ VM_FRAME_MAGIC_MASK
Definition: vm_core.h:1188
@ VM_FRAME_MAGIC_CFUNC
Definition: vm_core.h:1182
@ VM_FRAME_MAGIC_EVAL
Definition: vm_core.h:1184
@ VM_FRAME_MAGIC_CLASS
Definition: vm_core.h:1180
@ VM_ENV_FLAG_WB_REQUIRED
Definition: vm_core.h:1202
@ VM_FRAME_FLAG_FINISH
Definition: vm_core.h:1191
@ VM_FRAME_MAGIC_RESCUE
Definition: vm_core.h:1185
unsigned long rb_num_t
Definition: vm_core.h:182
unsigned long lindex_t
Definition: vm_exec.h:15
long OFFSET
Definition: vm_exec.h:14
#define VM_SP_CNT(ec, sp)
Definition: vm_exec.h:165
#define vm_push_frame_debug_counter_inc(ec, cfp, t)
VALUE(* builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
#define EQ_UNREDEFINED_P(t)
const rb_callable_method_entry_t * rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2)
Definition: vm_method.c:1849
#define CHECK_CFP_CONSISTENCY(func)
void rb_vm_pop_frame(rb_execution_context_t *ec)
method_explorer_type
@ mexp_search_method
@ mexp_search_super
@ mexp_search_invokeblock
void rb_ec_stack_overflow(rb_execution_context_t *ec, int crit)
Definition: vm_insnhelper.c:84
VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE *, const rb_callable_method_entry_t *, int kw_splat)
Definition: vm_eval.c:46
void rb_error_arity(int argc, int min, int max)
void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts)
Definition: vm_method.c:428
VALUE rb_eql_opt(VALUE obj1, VALUE obj2)
VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj, int argc, const VALUE *argv, int priv)
Definition: vm_eval.c:886
VALUE ruby_vm_special_exception_copy(VALUE exc)
Definition: vm_insnhelper.c:48
VALUE rb_equal_opt(VALUE obj1, VALUE obj2)
bool rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
bool rb_vm_opt_cfunc_p(CALL_CACHE cc, int insn)
#define vm_check_frame(a, b, c, d)
VALUE rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
#define CHECK_CMP_NAN(a, b)
void rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
void Init_vm_stack_canary(void)
#define VM_TRACE_HOOK(target_event, val)
const struct rb_callcache * rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
#define vm_check_canary(ec, sp)
#define IMEMO_CONST_CACHE_SHAREABLE
bool rb_simple_iseq_p(const rb_iseq_t *iseq)
rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos)
Definition: iseq.c:1834
VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
rb_method_definition_t * rb_method_definition_create(rb_method_type_t type, ID mid)
Definition: vm_method.c:546
VALUE rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
#define INC_SP(x)
Definition: vm_insnhelper.h:93
#define CHECK_CANARY(cond, insn)
#define IS_ARGS_KW_SPLAT_MUT(ci)
#define SET_SP(x)
Definition: vm_insnhelper.h:92
#define IS_ARGS_KW_OR_KW_SPLAT(ci)
#define GET_ISEQ()
Definition: vm_insnhelper.h:99
#define GET_PC()
Definition: vm_insnhelper.h:77
#define POPN(n)
Definition: vm_insnhelper.h:39
#define SETUP_CANARY(cond)
#define RESTORE_REGS()
Definition: vm_insnhelper.h:52
#define GET_SP()
Definition: vm_insnhelper.h:91
#define IS_ARGS_SPLAT(ci)
#define IS_ARGS_KW_SPLAT(ci)
#define GET_EP()
Definition: vm_insnhelper.h:86
#define GET_BLOCK_HANDLER()
#define TOPN(n)
Definition: vm_insnhelper.h:38
#define GET_CFP()
Definition: vm_insnhelper.h:85
#define IS_ARGS_KEYWORD(ci)
#define GET_SELF()
#define GET_LEP()
Definition: vm_insnhelper.h:88
#define DEC_SP(x)
Definition: vm_insnhelper.h:94
#define GET_PREV_EP(ep)
#define GET_GLOBAL_CONSTANT_STATE()
#define STACK_ADDR_FROM_TOP(n)
Definition: vm_insnhelper.h:41
#define RB_VM_LOCK_ENTER()
Definition: vm_sync.h:121
#define RB_VM_LOCK_LEAVE()
Definition: vm_sync.h:122
int err
Definition: win32.c:142
#define stat
Definition: win32.h:195
#define isnan(x)
Definition: win32.h:346
int def(FILE *source, FILE *dest, int level)
Definition: zpipe.c:36