Ruby 3.0.5p211 (2022-11-24 revision ba5cf0f7c52d4d35cc6a173c89eda98ceffa2dcf)
vm_trace.c
Go to the documentation of this file.
1/**********************************************************************
2
3 vm_trace.c -
4
5 $Author: ko1 $
6 created at: Tue Aug 14 19:37:09 2012
7
8 Copyright (C) 1993-2012 Yukihiro Matsumoto
9
10**********************************************************************/
11
12/*
13 * This file include two parts:
14 *
15 * (1) set_trace_func internal mechanisms
16 * and C level API
17 *
18 * (2) Ruby level API
19 * (2-1) set_trace_func API
20 * (2-2) TracePoint API (not yet)
21 *
22 */
23
24#include "eval_intern.h"
25#include "internal.h"
26#include "internal/hash.h"
27#include "internal/symbol.h"
28#include "iseq.h"
29#include "mjit.h"
30#include "ruby/debug.h"
31#include "vm_core.h"
32#include "ruby/ractor.h"
33
34#include "builtin.h"
35
36/* (1) trace mechanisms */
37
38typedef struct rb_event_hook_struct {
44
45 struct {
47 unsigned int target_line;
50
51typedef void (*rb_event_hook_raw_arg_func_t)(VALUE data, const rb_trace_arg_t *arg);
52
53#define MAX_EVENT_NUM 32
54
55void
57{
58 rb_event_hook_t *hook = hooks->hooks;
59
60 while (hook) {
61 rb_gc_mark(hook->data);
62 hook = hook->next;
63 }
64}
65
66static void clean_hooks(const rb_execution_context_t *ec, rb_hook_list_t *list);
67
68void
70{
71 hooks->need_clean = TRUE;
72 clean_hooks(GET_EC(), hooks);
73}
74
75/* ruby_vm_event_flags management */
76
77static void
78update_global_event_hook(rb_event_flag_t vm_events)
79{
80 rb_event_flag_t new_iseq_events = vm_events & ISEQ_TRACE_EVENTS;
82
83 if (new_iseq_events & ~enabled_iseq_events) {
84 /* Stop calling all JIT-ed code. Compiling trace insns is not supported for now. */
85#if USE_MJIT
87#endif
88
89 /* write all ISeqs iff new events are added */
90 rb_iseq_trace_set_all(new_iseq_events | enabled_iseq_events);
91 }
92
93 ruby_vm_event_flags = vm_events;
96}
97
98/* add/remove hooks */
99
100static rb_event_hook_t *
101alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
102{
103 rb_event_hook_t *hook;
104
105 if ((events & RUBY_INTERNAL_EVENT_MASK) && (events & ~RUBY_INTERNAL_EVENT_MASK)) {
106 rb_raise(rb_eTypeError, "Can not specify normal event and internal event simultaneously.");
107 }
108
109 hook = ALLOC(rb_event_hook_t);
110 hook->hook_flags = hook_flags;
111 hook->events = events;
112 hook->func = func;
113 hook->data = data;
114
115 /* no filters */
116 hook->filter.th = NULL;
117 hook->filter.target_line = 0;
118
119 return hook;
120}
121
122static void
123hook_list_connect(VALUE list_owner, rb_hook_list_t *list, rb_event_hook_t *hook, int global_p)
124{
125 hook->next = list->hooks;
126 list->hooks = hook;
127 list->events |= hook->events;
128
129 if (global_p) {
130 /* global hooks are root objects at GC mark. */
131 update_global_event_hook(list->events);
132 }
133 else {
134 RB_OBJ_WRITTEN(list_owner, Qundef, hook->data);
135 }
136}
137
138static void
139connect_event_hook(const rb_execution_context_t *ec, rb_event_hook_t *hook)
140{
141 rb_hook_list_t *list = rb_ec_ractor_hooks(ec);
142 hook_list_connect(Qundef, list, hook, TRUE);
143}
144
145static void
146rb_threadptr_add_event_hook(const rb_execution_context_t *ec, rb_thread_t *th,
147 rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
148{
149 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
150 hook->filter.th = th;
151 connect_event_hook(ec, hook);
152}
153
154void
156{
157 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
158}
159
160void
162{
163 rb_event_hook_t *hook = alloc_event_hook(func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
164 connect_event_hook(GET_EC(), hook);
165}
166
167void
169{
170 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, hook_flags);
171}
172
173void
175{
176 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
177 connect_event_hook(GET_EC(), hook);
178}
179
180static void
181clean_hooks(const rb_execution_context_t *ec, rb_hook_list_t *list)
182{
183 rb_event_hook_t *hook, **nextp = &list->hooks;
184 VM_ASSERT(list->need_clean == TRUE);
185
186 list->events = 0;
187 list->need_clean = FALSE;
188
189 while ((hook = *nextp) != 0) {
191 *nextp = hook->next;
192 xfree(hook);
193 }
194 else {
195 list->events |= hook->events; /* update active events */
196 nextp = &hook->next;
197 }
198 }
199
200 if (list == rb_ec_ractor_hooks(ec)) {
201 /* global events */
202 update_global_event_hook(list->events);
203 }
204 else {
205 /* local events */
206 }
207}
208
209static void
210clean_hooks_check(const rb_execution_context_t *ec, rb_hook_list_t *list)
211{
212 if (UNLIKELY(list->need_clean != FALSE)) {
213 if (list->running == 0) {
214 clean_hooks(ec, list);
215 }
216 }
217}
218
219#define MATCH_ANY_FILTER_TH ((rb_thread_t *)1)
220
221/* if func is 0, then clear all funcs */
222static int
223remove_event_hook(const rb_execution_context_t *ec, const rb_thread_t *filter_th, rb_event_hook_func_t func, VALUE data)
224{
225 rb_hook_list_t *list = rb_ec_ractor_hooks(ec);
226 int ret = 0;
227 rb_event_hook_t *hook = list->hooks;
228
229 while (hook) {
230 if (func == 0 || hook->func == func) {
231 if (hook->filter.th == filter_th || filter_th == MATCH_ANY_FILTER_TH) {
232 if (data == Qundef || hook->data == data) {
234 ret+=1;
235 list->need_clean = TRUE;
236 }
237 }
238 }
239 hook = hook->next;
240 }
241
242 clean_hooks_check(ec, list);
243 return ret;
244}
245
246static int
247rb_threadptr_remove_event_hook(const rb_execution_context_t *ec, const rb_thread_t *filter_th, rb_event_hook_func_t func, VALUE data)
248{
249 return remove_event_hook(ec, filter_th, func, data);
250}
251
252int
254{
255 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func, Qundef);
256}
257
258int
260{
261 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func, data);
262}
263
264int
266{
267 return remove_event_hook(GET_EC(), NULL, func, Qundef);
268}
269
270int
272{
273 return remove_event_hook(GET_EC(), NULL, func, data);
274}
275
276void
278{
279 rb_threadptr_remove_event_hook(ec, rb_ec_thread_ptr(ec), 0, Qundef);
280}
281
282void
284{
285 rb_threadptr_remove_event_hook(ec, MATCH_ANY_FILTER_TH, 0, Qundef);
286}
287
288/* invoke hooks */
289
290static void
291exec_hooks_body(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
292{
293 rb_event_hook_t *hook;
294
295 for (hook = list->hooks; hook; hook = hook->next) {
297 (trace_arg->event & hook->events) &&
298 (LIKELY(hook->filter.th == 0) || hook->filter.th == rb_ec_thread_ptr(ec)) &&
299 (LIKELY(hook->filter.target_line == 0) || (hook->filter.target_line == (unsigned int)rb_vm_get_sourceline(ec->cfp)))) {
301 (*hook->func)(trace_arg->event, hook->data, trace_arg->self, trace_arg->id, trace_arg->klass);
302 }
303 else {
304 (*((rb_event_hook_raw_arg_func_t)hook->func))(hook->data, trace_arg);
305 }
306 }
307 }
308}
309
310static int
311exec_hooks_precheck(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
312{
313 if (list->events & trace_arg->event) {
314 list->running++;
315 return TRUE;
316 }
317 else {
318 return FALSE;
319 }
320}
321
322static void
323exec_hooks_postcheck(const rb_execution_context_t *ec, rb_hook_list_t *list)
324{
325 list->running--;
326 clean_hooks_check(ec, list);
327}
328
329static void
330exec_hooks_unprotected(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
331{
332 if (exec_hooks_precheck(ec, list, trace_arg) == 0) return;
333 exec_hooks_body(ec, list, trace_arg);
334 exec_hooks_postcheck(ec, list);
335}
336
337static int
338exec_hooks_protected(rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
339{
340 enum ruby_tag_type state;
341 volatile int raised;
342
343 if (exec_hooks_precheck(ec, list, trace_arg) == 0) return 0;
344
345 raised = rb_ec_reset_raised(ec);
346
347 /* TODO: Support !RUBY_EVENT_HOOK_FLAG_SAFE hooks */
348
349 EC_PUSH_TAG(ec);
350 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
351 exec_hooks_body(ec, list, trace_arg);
352 }
353 EC_POP_TAG();
354
355 exec_hooks_postcheck(ec, list);
356
357 if (raised) {
359 }
360
361 return state;
362}
363
366{
367 rb_execution_context_t *ec = trace_arg->ec;
368
369 if (UNLIKELY(trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
371 /* skip hooks because this thread doing INTERNAL_EVENT */
372 }
373 else {
374 rb_trace_arg_t *prev_trace_arg = ec->trace_arg;
375
376 ec->trace_arg = trace_arg;
377 /* only global hooks */
378 exec_hooks_unprotected(ec, rb_ec_ractor_hooks(ec), trace_arg);
379 ec->trace_arg = prev_trace_arg;
380 }
381 }
382 else {
383 if (ec->trace_arg == NULL && /* check reentrant */
384 trace_arg->self != rb_mRubyVMFrozenCore /* skip special methods. TODO: remove it. */) {
385 const VALUE errinfo = ec->errinfo;
386 const VALUE old_recursive = ec->local_storage_recursive_hash;
387 int state = 0;
388
389 /* setup */
391 ec->errinfo = Qnil;
392 ec->trace_arg = trace_arg;
393
394 /* kick hooks */
395 if ((state = exec_hooks_protected(ec, hooks, trace_arg)) == TAG_NONE) {
396 ec->errinfo = errinfo;
397 }
398
399 /* cleanup */
400 ec->trace_arg = NULL;
402 ec->local_storage_recursive_hash = old_recursive;
403
404 if (state) {
405 if (pop_p) {
406 if (VM_FRAME_FINISHED_P(ec->cfp)) {
407 ec->tag = ec->tag->prev;
408 }
409 rb_vm_pop_frame(ec);
410 }
411 EC_JUMP_TAG(ec, state);
412 }
413 }
414 }
415}
416
417VALUE
419{
420 volatile int raised;
421 volatile VALUE result = Qnil;
422 rb_execution_context_t *const ec = GET_EC();
423 rb_vm_t *const vm = rb_ec_vm_ptr(ec);
424 enum ruby_tag_type state;
425 rb_trace_arg_t dummy_trace_arg;
426 dummy_trace_arg.event = 0;
427
428 if (!ec->trace_arg) {
429 ec->trace_arg = &dummy_trace_arg;
430 }
431
432 raised = rb_ec_reset_raised(ec);
433
434 EC_PUSH_TAG(ec);
435 if (LIKELY((state = EC_EXEC_TAG()) == TAG_NONE)) {
436 result = (*func)(arg);
437 }
438 else {
439 (void)*&vm; /* suppress "clobbered" warning */
440 }
441 EC_POP_TAG();
442
443 if (raised) {
445 }
446
447 if (ec->trace_arg == &dummy_trace_arg) {
448 ec->trace_arg = NULL;
449 }
450
451 if (state) {
452#if defined RUBY_USE_SETJMPEX && RUBY_USE_SETJMPEX
453 RB_GC_GUARD(result);
454#endif
455 EC_JUMP_TAG(ec, state);
456 }
457
458 return result;
459}
460
461static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
462
463/* (2-1) set_trace_func (old API) */
464
465/*
466 * call-seq:
467 * set_trace_func(proc) -> proc
468 * set_trace_func(nil) -> nil
469 *
470 * Establishes _proc_ as the handler for tracing, or disables
471 * tracing if the parameter is +nil+.
472 *
473 * *Note:* this method is obsolete, please use TracePoint instead.
474 *
475 * _proc_ takes up to six parameters:
476 *
477 * * an event name
478 * * a filename
479 * * a line number
480 * * an object id
481 * * a binding
482 * * the name of a class
483 *
484 * _proc_ is invoked whenever an event occurs.
485 *
486 * Events are:
487 *
488 * +c-call+:: call a C-language routine
489 * +c-return+:: return from a C-language routine
490 * +call+:: call a Ruby method
491 * +class+:: start a class or module definition
492 * +end+:: finish a class or module definition
493 * +line+:: execute code on a new line
494 * +raise+:: raise an exception
495 * +return+:: return from a Ruby method
496 *
497 * Tracing is disabled within the context of _proc_.
498 *
499 * class Test
500 * def test
501 * a = 1
502 * b = 2
503 * end
504 * end
505 *
506 * set_trace_func proc { |event, file, line, id, binding, classname|
507 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
508 * }
509 * t = Test.new
510 * t.test
511 *
512 * line prog.rb:11 false
513 * c-call prog.rb:11 new Class
514 * c-call prog.rb:11 initialize Object
515 * c-return prog.rb:11 initialize Object
516 * c-return prog.rb:11 new Class
517 * line prog.rb:12 false
518 * call prog.rb:2 test Test
519 * line prog.rb:3 test Test
520 * line prog.rb:4 test Test
521 * return prog.rb:4 test Test
522 */
523
524static VALUE
525set_trace_func(VALUE obj, VALUE trace)
526{
527 rb_remove_event_hook(call_trace_func);
528
529 if (NIL_P(trace)) {
530 return Qnil;
531 }
532
533 if (!rb_obj_is_proc(trace)) {
534 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
535 }
536
537 rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
538 return trace;
539}
540
541static void
542thread_add_trace_func(rb_execution_context_t *ec, rb_thread_t *filter_th, VALUE trace)
543{
544 if (!rb_obj_is_proc(trace)) {
545 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
546 }
547
548 rb_threadptr_add_event_hook(ec, filter_th, call_trace_func, RUBY_EVENT_ALL, trace, RUBY_EVENT_HOOK_FLAG_SAFE);
549}
550
551/*
552 * call-seq:
553 * thr.add_trace_func(proc) -> proc
554 *
555 * Adds _proc_ as a handler for tracing.
556 *
557 * See Thread#set_trace_func and Kernel#set_trace_func.
558 */
559
560static VALUE
561thread_add_trace_func_m(VALUE obj, VALUE trace)
562{
563 thread_add_trace_func(GET_EC(), rb_thread_ptr(obj), trace);
564 return trace;
565}
566
567/*
568 * call-seq:
569 * thr.set_trace_func(proc) -> proc
570 * thr.set_trace_func(nil) -> nil
571 *
572 * Establishes _proc_ on _thr_ as the handler for tracing, or
573 * disables tracing if the parameter is +nil+.
574 *
575 * See Kernel#set_trace_func.
576 */
577
578static VALUE
579thread_set_trace_func_m(VALUE target_thread, VALUE trace)
580{
581 rb_execution_context_t *ec = GET_EC();
582 rb_thread_t *target_th = rb_thread_ptr(target_thread);
583
584 rb_threadptr_remove_event_hook(ec, target_th, call_trace_func, Qundef);
585
586 if (NIL_P(trace)) {
587 return Qnil;
588 }
589 else {
590 thread_add_trace_func(ec, target_th, trace);
591 return trace;
592 }
593}
594
595static const char *
596get_event_name(rb_event_flag_t event)
597{
598 switch (event) {
599 case RUBY_EVENT_LINE: return "line";
600 case RUBY_EVENT_CLASS: return "class";
601 case RUBY_EVENT_END: return "end";
602 case RUBY_EVENT_CALL: return "call";
603 case RUBY_EVENT_RETURN: return "return";
604 case RUBY_EVENT_C_CALL: return "c-call";
605 case RUBY_EVENT_C_RETURN: return "c-return";
606 case RUBY_EVENT_RAISE: return "raise";
607 default:
608 return "unknown";
609 }
610}
611
612static ID
613get_event_id(rb_event_flag_t event)
614{
615 ID id;
616
617 switch (event) {
618#define C(name, NAME) case RUBY_EVENT_##NAME: CONST_ID(id, #name); return id;
619 C(line, LINE);
620 C(class, CLASS);
621 C(end, END);
622 C(call, CALL);
623 C(return, RETURN);
624 C(c_call, C_CALL);
625 C(c_return, C_RETURN);
626 C(raise, RAISE);
627 C(b_call, B_CALL);
628 C(b_return, B_RETURN);
629 C(thread_begin, THREAD_BEGIN);
630 C(thread_end, THREAD_END);
631 C(fiber_switch, FIBER_SWITCH);
632 C(script_compiled, SCRIPT_COMPILED);
633#undef C
634 default:
635 return 0;
636 }
637}
638
639static void
640get_path_and_lineno(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, rb_event_flag_t event, VALUE *pathp, int *linep)
641{
642 cfp = rb_vm_get_ruby_level_next_cfp(ec, cfp);
643
644 if (cfp) {
645 const rb_iseq_t *iseq = cfp->iseq;
646 *pathp = rb_iseq_path(iseq);
647
648 if (event & (RUBY_EVENT_CLASS |
651 *linep = FIX2INT(rb_iseq_first_lineno(iseq));
652 }
653 else {
654 *linep = rb_vm_get_sourceline(cfp);
655 }
656 }
657 else {
658 *pathp = Qnil;
659 *linep = 0;
660 }
661}
662
663static void
664call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
665{
666 int line;
668 VALUE eventname = rb_str_new2(get_event_name(event));
669 VALUE argv[6];
670 const rb_execution_context_t *ec = GET_EC();
671
672 get_path_and_lineno(ec, ec->cfp, event, &filename, &line);
673
674 if (!klass) {
675 rb_ec_frame_method_id_and_class(ec, &id, 0, &klass);
676 }
677
678 if (klass) {
679 if (RB_TYPE_P(klass, T_ICLASS)) {
680 klass = RBASIC(klass)->klass;
681 }
682 else if (FL_TEST(klass, FL_SINGLETON)) {
683 klass = rb_ivar_get(klass, id__attached__);
684 }
685 }
686
687 argv[0] = eventname;
688 argv[1] = filename;
689 argv[2] = INT2FIX(line);
690 argv[3] = id ? ID2SYM(id) : Qnil;
691 argv[4] = (self && (filename != Qnil)) ? rb_binding_new() : Qnil;
692 argv[5] = klass ? klass : Qnil;
693
695}
696
697/* (2-2) TracePoint API */
698
699static VALUE rb_cTracePoint;
700
701typedef struct rb_tp_struct {
703 int tracing; /* bool */
705 VALUE local_target_set; /* Hash: target ->
706 * Qtrue (if target is iseq) or
707 * Qfalse (if target is bmethod)
708 */
709 void (*func)(VALUE tpval, void *data);
710 void *data;
715
716static void
717tp_mark(void *ptr)
718{
719 rb_tp_t *tp = ptr;
720 rb_gc_mark(tp->proc);
722 if (tp->target_th) rb_gc_mark(tp->target_th->self);
723}
724
725static size_t
726tp_memsize(const void *ptr)
727{
728 return sizeof(rb_tp_t);
729}
730
731static const rb_data_type_t tp_data_type = {
732 "tracepoint",
733 {tp_mark, RUBY_TYPED_DEFAULT_FREE, tp_memsize,},
735};
736
737static VALUE
738tp_alloc(VALUE klass)
739{
740 rb_tp_t *tp;
741 return TypedData_Make_Struct(klass, rb_tp_t, &tp_data_type, tp);
742}
743
744static rb_event_flag_t
745symbol2event_flag(VALUE v)
746{
747 ID id;
749 const rb_event_flag_t RUBY_EVENT_A_CALL =
751 const rb_event_flag_t RUBY_EVENT_A_RETURN =
753
754#define C(name, NAME) CONST_ID(id, #name); if (sym == ID2SYM(id)) return RUBY_EVENT_##NAME
755 C(line, LINE);
756 C(class, CLASS);
757 C(end, END);
758 C(call, CALL);
759 C(return, RETURN);
760 C(c_call, C_CALL);
761 C(c_return, C_RETURN);
762 C(raise, RAISE);
763 C(b_call, B_CALL);
764 C(b_return, B_RETURN);
765 C(thread_begin, THREAD_BEGIN);
766 C(thread_end, THREAD_END);
767 C(fiber_switch, FIBER_SWITCH);
768 C(script_compiled, SCRIPT_COMPILED);
769
770 /* joke */
771 C(a_call, A_CALL);
772 C(a_return, A_RETURN);
773#undef C
774 rb_raise(rb_eArgError, "unknown event: %"PRIsVALUE, rb_sym2str(sym));
775}
776
777static rb_tp_t *
778tpptr(VALUE tpval)
779{
780 rb_tp_t *tp;
781 TypedData_Get_Struct(tpval, rb_tp_t, &tp_data_type, tp);
782 return tp;
783}
784
785static rb_trace_arg_t *
786get_trace_arg(void)
787{
788 rb_trace_arg_t *trace_arg = GET_EC()->trace_arg;
789 if (trace_arg == 0) {
790 rb_raise(rb_eRuntimeError, "access from outside");
791 }
792 return trace_arg;
793}
794
795struct rb_trace_arg_struct *
797{
798 return get_trace_arg();
799}
800
803{
804 return trace_arg->event;
805}
806
807VALUE
809{
810 return ID2SYM(get_event_id(trace_arg->event));
811}
812
813static void
814fill_path_and_lineno(rb_trace_arg_t *trace_arg)
815{
816 if (trace_arg->path == Qundef) {
817 get_path_and_lineno(trace_arg->ec, trace_arg->cfp, trace_arg->event, &trace_arg->path, &trace_arg->lineno);
818 }
819}
820
821VALUE
823{
824 fill_path_and_lineno(trace_arg);
825 return INT2FIX(trace_arg->lineno);
826}
827VALUE
829{
830 fill_path_and_lineno(trace_arg);
831 return trace_arg->path;
832}
833
834static void
835fill_id_and_klass(rb_trace_arg_t *trace_arg)
836{
837 if (!trace_arg->klass_solved) {
838 if (!trace_arg->klass) {
839 rb_vm_control_frame_id_and_class(trace_arg->cfp, &trace_arg->id, &trace_arg->called_id, &trace_arg->klass);
840 }
841
842 if (trace_arg->klass) {
843 if (RB_TYPE_P(trace_arg->klass, T_ICLASS)) {
844 trace_arg->klass = RBASIC(trace_arg->klass)->klass;
845 }
846 }
847 else {
848 trace_arg->klass = Qnil;
849 }
850
851 trace_arg->klass_solved = 1;
852 }
853}
854
855VALUE
857{
858 switch(trace_arg->event) {
859 case RUBY_EVENT_CALL:
862 case RUBY_EVENT_B_RETURN: {
863 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(trace_arg->ec, trace_arg->cfp);
864 if (cfp) {
865 int is_proc = 0;
866 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_BLOCK && !VM_FRAME_LAMBDA_P(cfp)) {
867 is_proc = 1;
868 }
869 return rb_iseq_parameters(cfp->iseq, is_proc);
870 }
871 break;
872 }
874 case RUBY_EVENT_C_RETURN: {
875 fill_id_and_klass(trace_arg);
876 if (trace_arg->klass && trace_arg->id) {
877 const rb_method_entry_t *me;
878 VALUE iclass = Qnil;
879 me = rb_method_entry_without_refinements(trace_arg->klass, trace_arg->id, &iclass);
881 }
882 break;
883 }
884 case RUBY_EVENT_RAISE:
885 case RUBY_EVENT_LINE:
886 case RUBY_EVENT_CLASS:
887 case RUBY_EVENT_END:
889 rb_raise(rb_eRuntimeError, "not supported by this event");
890 break;
891 }
892 return Qnil;
893}
894
895VALUE
897{
898 fill_id_and_klass(trace_arg);
899 return trace_arg->id ? ID2SYM(trace_arg->id) : Qnil;
900}
901
902VALUE
904{
905 fill_id_and_klass(trace_arg);
906 return trace_arg->called_id ? ID2SYM(trace_arg->called_id) : Qnil;
907}
908
909VALUE
911{
912 fill_id_and_klass(trace_arg);
913 return trace_arg->klass;
914}
915
916VALUE
918{
920 cfp = rb_vm_get_binding_creatable_next_cfp(trace_arg->ec, trace_arg->cfp);
921
922 if (cfp) {
923 return rb_vm_make_binding(trace_arg->ec, cfp);
924 }
925 else {
926 return Qnil;
927 }
928}
929
930VALUE
932{
933 return trace_arg->self;
934}
935
936VALUE
938{
940 /* ok */
941 }
942 else {
943 rb_raise(rb_eRuntimeError, "not supported by this event");
944 }
945 if (trace_arg->data == Qundef) {
946 rb_bug("rb_tracearg_return_value: unreachable");
947 }
948 return trace_arg->data;
949}
950
951VALUE
953{
954 if (trace_arg->event & (RUBY_EVENT_RAISE)) {
955 /* ok */
956 }
957 else {
958 rb_raise(rb_eRuntimeError, "not supported by this event");
959 }
960 if (trace_arg->data == Qundef) {
961 rb_bug("rb_tracearg_raised_exception: unreachable");
962 }
963 return trace_arg->data;
964}
965
966VALUE
968{
969 VALUE data = trace_arg->data;
970
971 if (trace_arg->event & (RUBY_EVENT_SCRIPT_COMPILED)) {
972 /* ok */
973 }
974 else {
975 rb_raise(rb_eRuntimeError, "not supported by this event");
976 }
977 if (data == Qundef) {
978 rb_bug("rb_tracearg_raised_exception: unreachable");
979 }
980 if (rb_obj_is_iseq(data)) {
981 return Qnil;
982 }
983 else {
984 VM_ASSERT(RB_TYPE_P(data, T_ARRAY));
985 /* [src, iseq] */
986 return RARRAY_AREF(data, 0);
987 }
988}
989
990VALUE
992{
993 VALUE data = trace_arg->data;
994
995 if (trace_arg->event & (RUBY_EVENT_SCRIPT_COMPILED)) {
996 /* ok */
997 }
998 else {
999 rb_raise(rb_eRuntimeError, "not supported by this event");
1000 }
1001 if (data == Qundef) {
1002 rb_bug("rb_tracearg_raised_exception: unreachable");
1003 }
1004
1005 if (rb_obj_is_iseq(data)) {
1006 return rb_iseqw_new((const rb_iseq_t *)data);
1007 }
1008 else {
1009 VM_ASSERT(RB_TYPE_P(data, T_ARRAY));
1010 VM_ASSERT(rb_obj_is_iseq(RARRAY_AREF(data, 1)));
1011
1012 /* [src, iseq] */
1013 return rb_iseqw_new((const rb_iseq_t *)RARRAY_AREF(data, 1));
1014 }
1015}
1016
1017VALUE
1019{
1021 /* ok */
1022 }
1023 else {
1024 rb_raise(rb_eRuntimeError, "not supported by this event");
1025 }
1026 if (trace_arg->data == Qundef) {
1027 rb_bug("rb_tracearg_object: unreachable");
1028 }
1029 return trace_arg->data;
1030}
1031
1032static VALUE
1033tracepoint_attr_event(rb_execution_context_t *ec, VALUE tpval)
1034{
1035 return rb_tracearg_event(get_trace_arg());
1036}
1037
1038static VALUE
1039tracepoint_attr_lineno(rb_execution_context_t *ec, VALUE tpval)
1040{
1041 return rb_tracearg_lineno(get_trace_arg());
1042}
1043static VALUE
1044tracepoint_attr_path(rb_execution_context_t *ec, VALUE tpval)
1045{
1046 return rb_tracearg_path(get_trace_arg());
1047}
1048
1049static VALUE
1050tracepoint_attr_parameters(rb_execution_context_t *ec, VALUE tpval)
1051{
1052 return rb_tracearg_parameters(get_trace_arg());
1053}
1054
1055static VALUE
1056tracepoint_attr_method_id(rb_execution_context_t *ec, VALUE tpval)
1057{
1058 return rb_tracearg_method_id(get_trace_arg());
1059}
1060
1061static VALUE
1062tracepoint_attr_callee_id(rb_execution_context_t *ec, VALUE tpval)
1063{
1064 return rb_tracearg_callee_id(get_trace_arg());
1065}
1066
1067static VALUE
1068tracepoint_attr_defined_class(rb_execution_context_t *ec, VALUE tpval)
1069{
1070 return rb_tracearg_defined_class(get_trace_arg());
1071}
1072
1073static VALUE
1074tracepoint_attr_binding(rb_execution_context_t *ec, VALUE tpval)
1075{
1076 return rb_tracearg_binding(get_trace_arg());
1077}
1078
1079static VALUE
1080tracepoint_attr_self(rb_execution_context_t *ec, VALUE tpval)
1081{
1082 return rb_tracearg_self(get_trace_arg());
1083}
1084
1085static VALUE
1086tracepoint_attr_return_value(rb_execution_context_t *ec, VALUE tpval)
1087{
1088 return rb_tracearg_return_value(get_trace_arg());
1089}
1090
1091static VALUE
1092tracepoint_attr_raised_exception(rb_execution_context_t *ec, VALUE tpval)
1093{
1094 return rb_tracearg_raised_exception(get_trace_arg());
1095}
1096
1097static VALUE
1098tracepoint_attr_eval_script(rb_execution_context_t *ec, VALUE tpval)
1099{
1100 return rb_tracearg_eval_script(get_trace_arg());
1101}
1102
1103static VALUE
1104tracepoint_attr_instruction_sequence(rb_execution_context_t *ec, VALUE tpval)
1105{
1106 return rb_tracearg_instruction_sequence(get_trace_arg());
1107}
1108
1109static void
1110tp_call_trace(VALUE tpval, rb_trace_arg_t *trace_arg)
1111{
1112 rb_tp_t *tp = tpptr(tpval);
1113
1114 if (tp->func) {
1115 (*tp->func)(tpval, tp->data);
1116 }
1117 else {
1118 if (tp->ractor == NULL || tp->ractor == GET_RACTOR()) {
1119 rb_proc_call_with_block((VALUE)tp->proc, 1, &tpval, Qnil);
1120 }
1121 }
1122}
1123
1124VALUE
1126{
1127 rb_tp_t *tp;
1128 tp = tpptr(tpval);
1129
1130 if (tp->local_target_set != Qfalse) {
1131 rb_raise(rb_eArgError, "can't nest-enable a targeting TracePoint");
1132 }
1133
1134 if (tp->target_th) {
1135 rb_thread_add_event_hook2(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1137 }
1138 else {
1139 rb_add_event_hook2((rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1141 }
1142 tp->tracing = 1;
1143 return Qundef;
1144}
1145
1146static const rb_iseq_t *
1147iseq_of(VALUE target)
1148{
1149 VALUE iseqv = rb_funcall(rb_cISeq, rb_intern("of"), 1, target);
1150 if (NIL_P(iseqv)) {
1151 rb_raise(rb_eArgError, "specified target is not supported");
1152 }
1153 else {
1154 return rb_iseqw_to_iseq(iseqv);
1155 }
1156}
1157
1158const rb_method_definition_t *rb_method_def(VALUE method); /* proc.c */
1159
1160static VALUE
1161rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
1162{
1163 rb_tp_t *tp = tpptr(tpval);
1164 const rb_iseq_t *iseq = iseq_of(target);
1165 int n;
1166 unsigned int line = 0;
1167
1168 if (tp->tracing > 0) {
1169 rb_raise(rb_eArgError, "can't nest-enable a targeting TracePoint");
1170 }
1171
1172 if (!NIL_P(target_line)) {
1173 if ((tp->events & RUBY_EVENT_LINE) == 0) {
1174 rb_raise(rb_eArgError, "target_line is specified, but line event is not specified");
1175 }
1176 else {
1177 line = NUM2UINT(target_line);
1178 }
1179 }
1180
1183
1184 /* iseq */
1185 n = rb_iseq_add_local_tracepoint_recursively(iseq, tp->events, tpval, line);
1187
1188 /* bmethod */
1189 if (rb_obj_is_method(target)) {
1191 if (def->type == VM_METHOD_TYPE_BMETHOD &&
1193 def->body.bmethod.hooks = ZALLOC(rb_hook_list_t);
1194 rb_hook_list_connect_tracepoint(target, def->body.bmethod.hooks, tpval, 0);
1195 rb_hash_aset(tp->local_target_set, target, Qfalse);
1196
1197 n++;
1198 }
1199 }
1200
1201 if (n == 0) {
1202 rb_raise(rb_eArgError, "can not enable any hooks");
1203 }
1204
1206
1207 tp->tracing = 1;
1208
1209 return Qnil;
1210}
1211
1212static int
1213disable_local_event_iseq_i(VALUE target, VALUE iseq_p, VALUE tpval)
1214{
1215 if (iseq_p) {
1217 }
1218 else {
1219 /* bmethod */
1221 rb_hook_list_t *hooks = def->body.bmethod.hooks;
1222 VM_ASSERT(hooks != NULL);
1223 rb_hook_list_remove_tracepoint(hooks, tpval);
1224 if (hooks->running == 0) {
1225 rb_hook_list_free(def->body.bmethod.hooks);
1226 }
1227 def->body.bmethod.hooks = NULL;
1228 }
1229 return ST_CONTINUE;
1230}
1231
1232VALUE
1234{
1235 rb_tp_t *tp;
1236
1237 tp = tpptr(tpval);
1238
1239 if (tp->local_target_set) {
1240 rb_hash_foreach(tp->local_target_set, disable_local_event_iseq_i, tpval);
1243 }
1244 else {
1245 if (tp->target_th) {
1247 }
1248 else {
1250 }
1251 }
1252 tp->tracing = 0;
1253 tp->target_th = NULL;
1254 return Qundef;
1255}
1256
1257void
1258rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line)
1259{
1260 rb_tp_t *tp = tpptr(tpval);
1261 rb_event_hook_t *hook = alloc_event_hook((rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1263 hook->filter.target_line = target_line;
1264 hook_list_connect(target, list, hook, FALSE);
1265}
1266
1267void
1269{
1270 rb_event_hook_t *hook = list->hooks;
1271 rb_event_flag_t events = 0;
1272
1273 while (hook) {
1274 if (hook->data == tpval) {
1276 list->need_clean = TRUE;
1277 }
1278 else {
1279 events |= hook->events;
1280 }
1281 hook = hook->next;
1282 }
1283
1284 list->events = events;
1285}
1286
1287static VALUE
1288tracepoint_enable_m(rb_execution_context_t *ec, VALUE tpval, VALUE target, VALUE target_line, VALUE target_thread)
1289{
1290 rb_tp_t *tp = tpptr(tpval);
1291 int previous_tracing = tp->tracing;
1292
1293 /* check target_thread */
1294 if (RTEST(target_thread)) {
1295 if (tp->target_th) {
1296 rb_raise(rb_eArgError, "can not override target_thread filter");
1297 }
1298 tp->target_th = rb_thread_ptr(target_thread);
1299 }
1300 else {
1301 tp->target_th = NULL;
1302 }
1303
1304 if (NIL_P(target)) {
1305 if (!NIL_P(target_line)) {
1306 rb_raise(rb_eArgError, "only target_line is specified");
1307 }
1308 rb_tracepoint_enable(tpval);
1309 }
1310 else {
1311 rb_tracepoint_enable_for_target(tpval, target, target_line);
1312 }
1313
1314 if (rb_block_given_p()) {
1315 return rb_ensure(rb_yield, Qundef,
1316 previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
1317 tpval);
1318 }
1319 else {
1320 return previous_tracing ? Qtrue : Qfalse;
1321 }
1322}
1323
1324static VALUE
1325tracepoint_disable_m(rb_execution_context_t *ec, VALUE tpval)
1326{
1327 rb_tp_t *tp = tpptr(tpval);
1328 int previous_tracing = tp->tracing;
1329
1330 if (rb_block_given_p()) {
1331 if (tp->local_target_set != Qfalse) {
1332 rb_raise(rb_eArgError, "can't disable a targeting TracePoint in a block");
1333 }
1334
1335 rb_tracepoint_disable(tpval);
1336 return rb_ensure(rb_yield, Qundef,
1337 previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
1338 tpval);
1339 }
1340 else {
1341 rb_tracepoint_disable(tpval);
1342 return previous_tracing ? Qtrue : Qfalse;
1343 }
1344}
1345
1346VALUE
1348{
1349 rb_tp_t *tp = tpptr(tpval);
1350 return tp->tracing ? Qtrue : Qfalse;
1351}
1352
1353static VALUE
1354tracepoint_enabled_p(rb_execution_context_t *ec, VALUE tpval)
1355{
1356 return rb_tracepoint_enabled_p(tpval);
1357}
1358
1359static VALUE
1360tracepoint_new(VALUE klass, rb_thread_t *target_th, rb_event_flag_t events, void (func)(VALUE, void*), void *data, VALUE proc)
1361{
1362 VALUE tpval = tp_alloc(klass);
1363 rb_tp_t *tp;
1364 TypedData_Get_Struct(tpval, rb_tp_t, &tp_data_type, tp);
1365
1366 tp->proc = proc;
1367 tp->ractor = rb_ractor_shareable_p(proc) ? NULL : GET_RACTOR();
1368 tp->func = func;
1369 tp->data = data;
1370 tp->events = events;
1371 tp->self = tpval;
1372
1373 return tpval;
1374}
1375
1376/*
1377 * Creates a tracepoint by registering a callback function for one or more
1378 * tracepoint events. Once the tracepoint is created, you can use
1379 * rb_tracepoint_enable to enable the tracepoint.
1380 *
1381 * Parameters:
1382 * 1. VALUE target_thval - Meant for picking the thread in which the tracepoint
1383 * is to be created. However, current implementation ignore this parameter,
1384 * tracepoint is created for all threads. Simply specify Qnil.
1385 * 2. rb_event_flag_t events - Event(s) to listen to.
1386 * 3. void (*func)(VALUE, void *) - A callback function.
1387 * 4. void *data - Void pointer that will be passed to the callback function.
1388 *
1389 * When the callback function is called, it will be passed 2 parameters:
1390 * 1)VALUE tpval - the TracePoint object from which trace args can be extracted.
1391 * 2)void *data - A void pointer which helps to share scope with the callback function.
1392 *
1393 * It is important to note that you cannot register callbacks for normal events and internal events
1394 * simultaneously because they are different purpose.
1395 * You can use any Ruby APIs (calling methods and so on) on normal event hooks.
1396 * However, in internal events, you can not use any Ruby APIs (even object creations).
1397 * This is why we can't specify internal events by TracePoint directly.
1398 * Limitations are MRI version specific.
1399 *
1400 * Example:
1401 * rb_tracepoint_new(Qnil, RUBY_INTERNAL_EVENT_NEWOBJ | RUBY_INTERNAL_EVENT_FREEOBJ, obj_event_i, data);
1402 *
1403 * In this example, a callback function obj_event_i will be registered for
1404 * internal events RUBY_INTERNAL_EVENT_NEWOBJ and RUBY_INTERNAL_EVENT_FREEOBJ.
1405 */
1406VALUE
1407rb_tracepoint_new(VALUE target_thval, rb_event_flag_t events, void (*func)(VALUE, void *), void *data)
1408{
1409 rb_thread_t *target_th = NULL;
1410
1411 if (RTEST(target_thval)) {
1412 target_th = rb_thread_ptr(target_thval);
1413 /* TODO: Test it!
1414 * Warning: This function is not tested.
1415 */
1416 }
1417 return tracepoint_new(rb_cTracePoint, target_th, events, func, data, Qundef);
1418}
1419
1420static VALUE
1421tracepoint_new_s(rb_execution_context_t *ec, VALUE self, VALUE args)
1422{
1423 rb_event_flag_t events = 0;
1424 long i;
1425 long argc = RARRAY_LEN(args);
1426
1427 if (argc > 0) {
1428 for (i=0; i<argc; i++) {
1429 events |= symbol2event_flag(RARRAY_AREF(args, i));
1430 }
1431 }
1432 else {
1434 }
1435
1436 if (!rb_block_given_p()) {
1437 rb_raise(rb_eArgError, "must be called with a block");
1438 }
1439
1440 return tracepoint_new(self, 0, events, 0, 0, rb_block_proc());
1441}
1442
1443static VALUE
1444tracepoint_trace_s(rb_execution_context_t *ec, VALUE self, VALUE args)
1445{
1446 VALUE trace = tracepoint_new_s(ec, self, args);
1447 rb_tracepoint_enable(trace);
1448 return trace;
1449}
1450
1451static VALUE
1452tracepoint_inspect(rb_execution_context_t *ec, VALUE self)
1453{
1454 rb_tp_t *tp = tpptr(self);
1455 rb_trace_arg_t *trace_arg = GET_EC()->trace_arg;
1456
1457 if (trace_arg) {
1458 switch (trace_arg->event) {
1459 case RUBY_EVENT_LINE:
1460 {
1461 VALUE sym = rb_tracearg_method_id(trace_arg);
1462 if (NIL_P(sym))
1463 break;
1464 return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE":%d in `%"PRIsVALUE"'>",
1465 rb_tracearg_event(trace_arg),
1466 rb_tracearg_path(trace_arg),
1467 FIX2INT(rb_tracearg_lineno(trace_arg)),
1468 sym);
1469 }
1470 case RUBY_EVENT_CALL:
1471 case RUBY_EVENT_C_CALL:
1472 case RUBY_EVENT_RETURN:
1474 return rb_sprintf("#<TracePoint:%"PRIsVALUE" `%"PRIsVALUE"' %"PRIsVALUE":%d>",
1475 rb_tracearg_event(trace_arg),
1476 rb_tracearg_method_id(trace_arg),
1477 rb_tracearg_path(trace_arg),
1478 FIX2INT(rb_tracearg_lineno(trace_arg)));
1481 return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE">",
1482 rb_tracearg_event(trace_arg),
1483 rb_tracearg_self(trace_arg));
1484 default:
1485 break;
1486 }
1487 return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE":%d>",
1488 rb_tracearg_event(trace_arg),
1489 rb_tracearg_path(trace_arg),
1490 FIX2INT(rb_tracearg_lineno(trace_arg)));
1491 }
1492 else {
1493 return rb_sprintf("#<TracePoint:%s>", tp->tracing ? "enabled" : "disabled");
1494 }
1495}
1496
1497static void
1498tracepoint_stat_event_hooks(VALUE hash, VALUE key, rb_event_hook_t *hook)
1499{
1500 int active = 0, deleted = 0;
1501
1502 while (hook) {
1504 deleted++;
1505 }
1506 else {
1507 active++;
1508 }
1509 hook = hook->next;
1510 }
1511
1512 rb_hash_aset(hash, key, rb_ary_new3(2, INT2FIX(active), INT2FIX(deleted)));
1513}
1514
1515static VALUE
1516tracepoint_stat_s(rb_execution_context_t *ec, VALUE self)
1517{
1518 rb_vm_t *vm = GET_VM();
1520
1521 tracepoint_stat_event_hooks(stat, vm->self, rb_ec_ractor_hooks(ec)->hooks);
1522 /* TODO: thread local hooks */
1523
1524 return stat;
1525}
1526
1527#include "trace_point.rbinc"
1528
1529/* This function is called from inits.c */
1530void
1532{
1533 /* trace_func */
1534 rb_define_global_function("set_trace_func", set_trace_func, 1);
1535 rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
1536 rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
1537
1538 rb_cTracePoint = rb_define_class("TracePoint", rb_cObject);
1539 rb_undef_alloc_func(rb_cTracePoint);
1540}
1541
1544 void *data;
1546
1547#define MAX_POSTPONED_JOB 1000
1548#define MAX_POSTPONED_JOB_SPECIAL_ADDITION 24
1549
1551 struct list_node jnode; /* <=> vm->workqueue */
1553};
1554
1555void
1557{
1558 rb_vm_t *vm = GET_VM();
1560 vm->postponed_job_index = 0;
1561 /* workqueue is initialized when VM locks are initialized */
1562}
1563
1569
1570/* Async-signal-safe */
1572postponed_job_register(rb_execution_context_t *ec, rb_vm_t *vm,
1573 unsigned int flags, rb_postponed_job_func_t func, void *data, rb_atomic_t max, rb_atomic_t expected_index)
1574{
1575 rb_postponed_job_t *pjob;
1576
1577 if (expected_index >= max) return PJRR_FULL; /* failed */
1578
1579 if (ATOMIC_CAS(vm->postponed_job_index, expected_index, expected_index+1) == expected_index) {
1580 pjob = &vm->postponed_job_buffer[expected_index];
1581 }
1582 else {
1583 return PJRR_INTERRUPTED;
1584 }
1585
1586 /* unused: pjob->flags = flags; */
1587 pjob->func = func;
1588 pjob->data = data;
1589
1591
1592 return PJRR_SUCCESS;
1593}
1594
1596get_valid_ec(rb_vm_t *vm)
1597{
1598 rb_execution_context_t *ec = rb_current_execution_context(false);
1599 if (ec == NULL) ec = rb_vm_main_ractor_ec(vm);
1600 return ec;
1601}
1602
1603/*
1604 * return 0 if job buffer is full
1605 * Async-signal-safe
1606 */
1607int
1608rb_postponed_job_register(unsigned int flags, rb_postponed_job_func_t func, void *data)
1609{
1610 rb_vm_t *vm = GET_VM();
1611 rb_execution_context_t *ec = get_valid_ec(vm);
1612
1613 begin:
1614 switch (postponed_job_register(ec, vm, flags, func, data, MAX_POSTPONED_JOB, vm->postponed_job_index)) {
1615 case PJRR_SUCCESS : return 1;
1616 case PJRR_FULL : return 0;
1617 case PJRR_INTERRUPTED: goto begin;
1618 default: rb_bug("unreachable\n");
1619 }
1620}
1621
1622/*
1623 * return 0 if job buffer is full
1624 * Async-signal-safe
1625 */
1626int
1627rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
1628{
1629 rb_vm_t *vm = GET_VM();
1630 rb_execution_context_t *ec = get_valid_ec(vm);
1631 rb_postponed_job_t *pjob;
1632 rb_atomic_t i, index;
1633
1634 begin:
1635 index = vm->postponed_job_index;
1636 for (i=0; i<index; i++) {
1637 pjob = &vm->postponed_job_buffer[i];
1638 if (pjob->func == func) {
1640 return 2;
1641 }
1642 }
1643 switch (postponed_job_register(ec, vm, flags, func, data, MAX_POSTPONED_JOB + MAX_POSTPONED_JOB_SPECIAL_ADDITION, index)) {
1644 case PJRR_SUCCESS : return 1;
1645 case PJRR_FULL : return 0;
1646 case PJRR_INTERRUPTED: goto begin;
1647 default: rb_bug("unreachable\n");
1648 }
1649}
1650
1651/*
1652 * thread-safe and called from non-Ruby thread
1653 * returns FALSE on failure (ENOMEM), TRUE otherwise
1654 */
1655int
1656rb_workqueue_register(unsigned flags, rb_postponed_job_func_t func, void *data)
1657{
1658 struct rb_workqueue_job *wq_job = malloc(sizeof(*wq_job));
1659 rb_vm_t *vm = GET_VM();
1660
1661 if (!wq_job) return FALSE;
1662 wq_job->job.func = func;
1663 wq_job->job.data = data;
1664
1666 list_add_tail(&vm->workqueue, &wq_job->jnode);
1668
1669 // TODO: current implementation affects only main ractor
1671
1672 return TRUE;
1673}
1674
1675void
1677{
1678 rb_execution_context_t *ec = GET_EC();
1679 const rb_atomic_t block_mask = POSTPONED_JOB_INTERRUPT_MASK|TRAP_INTERRUPT_MASK;
1680 volatile rb_atomic_t saved_mask = ec->interrupt_mask & block_mask;
1681 VALUE volatile saved_errno = ec->errinfo;
1682 struct list_head tmp;
1683
1684 list_head_init(&tmp);
1685
1687 list_append_list(&tmp, &vm->workqueue);
1689
1690 ec->errinfo = Qnil;
1691 /* mask POSTPONED_JOB dispatch */
1692 ec->interrupt_mask |= block_mask;
1693 {
1694 EC_PUSH_TAG(ec);
1695 if (EC_EXEC_TAG() == TAG_NONE) {
1696 rb_atomic_t index;
1697 struct rb_workqueue_job *wq_job;
1698
1699 while ((index = vm->postponed_job_index) > 0) {
1700 if (ATOMIC_CAS(vm->postponed_job_index, index, index-1) == index) {
1701 rb_postponed_job_t *pjob = &vm->postponed_job_buffer[index-1];
1702 (*pjob->func)(pjob->data);
1703 }
1704 }
1705 while ((wq_job = list_pop(&tmp, struct rb_workqueue_job, jnode))) {
1706 rb_postponed_job_t pjob = wq_job->job;
1707
1708 free(wq_job);
1709 (pjob.func)(pjob.data);
1710 }
1711 }
1712 EC_POP_TAG();
1713 }
1714 /* restore POSTPONED_JOB mask */
1715 ec->interrupt_mask &= ~(saved_mask ^ block_mask);
1716 ec->errinfo = saved_errno;
1717
1718 /* don't leak memory if a job threw an exception */
1719 if (!list_empty(&tmp)) {
1721 list_prepend_list(&vm->workqueue, &tmp);
1723
1725 }
1726}
#define END(name)
Definition: asm.h:115
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
Definition: cxxanyargs.hpp:653
#define rb_define_global_function(mid, func, arity)
Defines rb_mKernel #mid.
Definition: cxxanyargs.hpp:678
struct RIMemo * ptr
Definition: debug.c:88
rb_event_hook_flag_t
Definition: debug.h:92
@ RUBY_EVENT_HOOK_FLAG_DELETED
Definition: debug.h:94
@ RUBY_EVENT_HOOK_FLAG_SAFE
Definition: debug.h:93
@ RUBY_EVENT_HOOK_FLAG_RAW_ARG
Definition: debug.h:95
void(* rb_postponed_job_func_t)(void *arg)
Definition: debug.h:86
#define RETURN(val)
Definition: dir.c:306
#define MJIT_FUNC_EXPORTED
Definition: dllexport.h:55
#define free(x)
Definition: dln.c:52
int max
Definition: enough.c:225
#define sym(name)
Definition: enumerator.c:4007
int rb_ec_set_raised(rb_execution_context_t *ec)
Definition: thread.c:2587
#define EC_EXEC_TAG()
Definition: eval_intern.h:193
#define EC_PUSH_TAG(ec)
Definition: eval_intern.h:130
#define EC_JUMP_TAG(ec, st)
Definition: eval_intern.h:196
#define EC_POP_TAG()
Definition: eval_intern.h:138
int rb_ec_reset_raised(rb_execution_context_t *ec)
Definition: thread.c:2597
#define RUBY_EVENT_END
Definition: event.h:32
#define RUBY_EVENT_C_CALL
Definition: event.h:35
#define RUBY_EVENT_TRACEPOINT_ALL
Definition: event.h:47
#define RUBY_EVENT_RAISE
Definition: event.h:37
#define RUBY_EVENT_B_RETURN
Definition: event.h:42
#define RUBY_EVENT_SCRIPT_COMPILED
Definition: event.h:46
#define RUBY_INTERNAL_EVENT_MASK
Definition: event.h:64
#define RUBY_EVENT_ALL
Definition: event.h:38
#define RUBY_EVENT_THREAD_BEGIN
Definition: event.h:43
#define RUBY_EVENT_CLASS
Definition: event.h:31
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Definition: event.h:67
#define RUBY_EVENT_LINE
Definition: event.h:30
#define RUBY_EVENT_RETURN
Definition: event.h:34
#define RUBY_EVENT_C_RETURN
Definition: event.h:36
#define RUBY_EVENT_B_CALL
Definition: event.h:41
#define RUBY_INTERNAL_EVENT_FREEOBJ
Definition: event.h:57
uint32_t rb_event_flag_t
Definition: event.h:66
#define RUBY_EVENT_CALL
Definition: event.h:33
#define RUBY_INTERNAL_EVENT_NEWOBJ
Definition: event.h:56
#define RUBY_EVENT_THREAD_END
Definition: event.h:44
#define UNLIKELY(x)
Definition: ffi_common.h:126
#define LIKELY(x)
Definition: ffi_common.h:125
#define FL_SINGLETON
Definition: fl_type.h:49
#define PRIsVALUE
Definition: function.c:10
void rb_objspace_set_event_hook(const rb_event_flag_t event)
Definition: gc.c:2052
void rb_gc_mark(VALUE ptr)
Definition: gc.c:6112
VALUE rb_cThread
Definition: vm.c:374
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:748
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:935
#define FL_TEST
Definition: fl_type.h:130
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2917
void rb_bug(const char *fmt,...)
Definition: error.c:768
VALUE rb_ident_hash_new(void)
Definition: hash.c:4443
VALUE rb_eTypeError
Definition: error.c:1057
VALUE rb_eRuntimeError
Definition: error.c:1055
VALUE rb_eArgError
Definition: error.c:1058
VALUE rb_iseqw_new(const rb_iseq_t *)
Definition: iseq.c:1217
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1148
VALUE rb_cObject
Object class.
Definition: object.c:49
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition: object.c:92
void rb_hash_foreach(VALUE hash, rb_foreach_func *func, VALUE farg)
Definition: hash.c:1498
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Definition: hash.c:2901
VALUE rb_hash_new(void)
Definition: hash.c:1538
VALUE rb_funcall(VALUE, ID, int,...)
Calls a method.
Definition: vm_eval.c:1077
Defines RBIMPL_HAS_BUILTIN.
#define rb_ary_new3
Definition: array.h:73
VALUE rb_obj_is_method(VALUE)
Definition: proc.c:1590
VALUE rb_obj_is_proc(VALUE)
Definition: proc.c:152
VALUE rb_proc_call_with_block(VALUE, int argc, const VALUE *argv, VALUE)
Definition: proc.c:1013
VALUE rb_block_proc(void)
Definition: proc.c:826
VALUE rb_binding_new(void)
Definition: proc.c:364
#define rb_str_new2
Definition: string.h:276
VALUE rb_ivar_get(VALUE, ID)
Definition: variable.c:1234
void rb_undef_alloc_func(VALUE)
Definition: vm_method.c:954
#define ID2SYM
Definition: symbol.h:44
VALUE rb_sym2str(VALUE)
Definition: symbol.c:927
ID rb_intern(const char *)
Definition: symbol.c:785
#define CALL(n)
Definition: inits.c:18
#define FIX2INT
Definition: int.h:41
#define NUM2UINT
Definition: int.h:45
Internal header for Hash.
VALUE rb_to_symbol_type(VALUE obj)
Definition: symbol.c:1211
const char * filename
Definition: ioapi.h:137
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1087
VALUE rb_cISeq
Definition: iseq.c:46
VALUE rb_iseq_parameters(const rb_iseq_t *iseq, int is_proc)
Definition: iseq.c:2993
const rb_iseq_t * rb_iseqw_to_iseq(VALUE iseqw)
Definition: iseq.c:1410
void rb_iseq_trace_set_all(rb_event_flag_t turnon_events)
Definition: iseq.c:3379
VALUE rb_iseq_first_lineno(const rb_iseq_t *iseq)
Definition: iseq.c:1117
int rb_iseq_remove_local_tracepoint_recursively(const rb_iseq_t *iseq, VALUE tpval)
Definition: iseq.c:3322
int rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line)
Definition: iseq.c:3266
#define ISEQ_TRACE_EVENTS
Definition: iseq.h:68
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1341
#define INT2FIX
Definition: long.h:48
#define ALLOC_N
Definition: memory.h:133
#define RB_GC_GUARD(v)
Definition: memory.h:91
int rb_method_entry_arity(const rb_method_entry_t *me)
Definition: proc.c:2684
@ VM_METHOD_TYPE_BMETHOD
Definition: method.h:114
VALUE rb_unnamed_parameters(int arity)
Definition: proc.c:1393
const rb_method_entry_t * rb_method_entry_without_refinements(VALUE klass, ID id, VALUE *defined_class)
Definition: vm_method.c:1247
bool mjit_call_p
Definition: mjit_worker.c:205
const int id
Definition: nkf.c:209
#define TRUE
Definition: nkf.h:175
#define FALSE
Definition: nkf.h:174
#define RARRAY_AREF(a, i)
Definition: psych_emitter.c:7
rb_execution_context_t * rb_vm_main_ractor_ec(rb_vm_t *vm)
Definition: ractor.c:1978
#define RARRAY_LEN
Definition: rarray.h:52
#define RBASIC(obj)
Definition: rbasic.h:34
#define NULL
Definition: regenc.h:69
#define RB_OBJ_WRITTEN(a, oldv, b)
WB for new reference from ‘a’ to ‘b’.
Definition: rgengc.h:114
VALUE rb_mRubyVMFrozenCore
Definition: vm.c:375
#define RUBY_TYPED_DEFAULT_FREE
Definition: rtypeddata.h:44
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: rtypeddata.h:130
@ RUBY_TYPED_FREE_IMMEDIATELY
Definition: rtypeddata.h:62
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: rtypeddata.h:122
int argc
Definition: ruby.c:240
char ** argv
Definition: ruby.c:241
#define ATOMIC_CAS(var, oldval, newval)
Definition: ruby_atomic.h:5
#define Qundef
#define Qtrue
#define RTEST
#define Qnil
#define Qfalse
#define NIL_P
VALUE rb_sprintf(const char *,...)
Definition: sprintf.c:1203
#define malloc
Definition: st.c:170
@ ST_CONTINUE
Definition: st.h:99
const rb_iseq_t * iseq
Definition: vm_core.h:772
rb_event_flag_t events
Definition: vm_trace.c:40
rb_thread_t * th
Definition: vm_trace.c:46
unsigned int target_line
Definition: vm_trace.c:47
struct rb_event_hook_struct::@204 filter
rb_event_hook_func_t func
Definition: vm_trace.c:41
rb_event_hook_flag_t hook_flags
Definition: vm_trace.c:39
struct rb_event_hook_struct * next
Definition: vm_trace.c:43
VALUE local_storage_recursive_hash_for_trace
Definition: vm_core.h:876
rb_atomic_t interrupt_mask
Definition: vm_core.h:865
rb_control_frame_t * cfp
Definition: vm_core.h:858
struct rb_vm_tag * tag
Definition: vm_core.h:860
struct rb_trace_arg_struct * trace_arg
Definition: vm_core.h:886
unsigned int need_clean
Definition: vm_core.h:556
unsigned int running
Definition: vm_core.h:557
rb_event_flag_t events
Definition: vm_core.h:555
struct rb_event_hook_struct * hooks
Definition: vm_core.h:554
Definition: method.h:54
rb_postponed_job_func_t func
Definition: vm_trace.c:1543
rb_thread_t * target_th
Definition: vm_trace.c:704
VALUE proc
Definition: vm_trace.c:711
rb_event_flag_t events
Definition: vm_trace.c:702
VALUE self
Definition: vm_trace.c:713
rb_ractor_t * ractor
Definition: vm_trace.c:712
void * data
Definition: vm_trace.c:710
void(* func)(VALUE tpval, void *data)
Definition: vm_trace.c:709
VALUE local_target_set
Definition: vm_trace.c:705
rb_event_flag_t event
Definition: vm_core.h:1934
rb_execution_context_t * ec
Definition: vm_core.h:1935
const rb_control_frame_t * cfp
Definition: vm_core.h:1936
VALUE self
Definition: vm_core.h:565
struct rb_postponed_job_struct * postponed_job_buffer
Definition: vm_core.h:634
struct list_head workqueue
Definition: vm_core.h:640
rb_atomic_t postponed_job_index
Definition: vm_core.h:635
rb_nativethread_lock_t workqueue_lock
Definition: vm_core.h:641
struct rb_vm_tag * prev
Definition: vm_core.h:812
struct list_node jnode
Definition: vm_trace.c:1551
rb_postponed_job_t job
Definition: vm_trace.c:1552
Definition: blast.c:41
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Definition: thread.c:442
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Definition: thread.c:448
#define ALLOC(size)
Definition: unzip.c:112
unsigned long VALUE
Definition: value.h:38
unsigned long ID
Definition: value.h:39
#define T_ICLASS
Definition: value_type.h:65
#define T_ARRAY
Definition: value_type.h:55
VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp)
Definition: vm.c:1172
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
Definition: vm.c:2424
rb_control_frame_t * rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm.c:589
rb_control_frame_t * rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm.c:577
rb_event_flag_t ruby_vm_event_flags
Definition: vm.c:403
unsigned int ruby_vm_event_local_num
Definition: vm.c:405
int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp)
Definition: vm.c:2440
rb_event_flag_t ruby_vm_event_enabled_global_flags
Definition: vm.c:404
int rb_vm_get_sourceline(const rb_control_frame_t *cfp)
Definition: vm_backtrace.c:71
#define TAG_NONE
Definition: vm_core.h:198
void rb_vm_pop_frame(rb_execution_context_t *ec)
ruby_tag_type
Definition: vm_core.h:185
#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec)
Definition: vm_core.h:1877
#define VM_ASSERT(expr)
Definition: vm_core.h:61
@ POSTPONED_JOB_INTERRUPT_MASK
Definition: vm_core.h:1869
@ TRAP_INTERRUPT_MASK
Definition: vm_core.h:1870
@ VM_FRAME_MAGIC_BLOCK
Definition: vm_core.h:1179
VALUE rb_tracearg_binding(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:917
VALUE rb_tracearg_parameters(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:856
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Definition: vm_trace.c:161
struct rb_postponed_job_struct rb_postponed_job_t
void Init_vm_trace(void)
Definition: vm_trace.c:1531
VALUE rb_tracearg_instruction_sequence(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:991
void(* rb_event_hook_raw_arg_func_t)(VALUE data, const rb_trace_arg_t *arg)
Definition: vm_trace.c:51
VALUE rb_tracepoint_enabled_p(VALUE tpval)
Definition: vm_trace.c:1347
const rb_method_definition_t * rb_method_def(VALUE method)
Definition: proc.c:2802
VALUE rb_tracearg_object(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:1018
VALUE rb_tracearg_callee_id(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:903
VALUE rb_tracearg_defined_class(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:910
#define MATCH_ANY_FILTER_TH
Definition: vm_trace.c:219
struct rb_trace_arg_struct * rb_tracearg_from_tracepoint(VALUE tpval)
Definition: vm_trace.c:796
VALUE rb_suppress_tracing(VALUE(*func)(VALUE), VALUE arg)
Definition: vm_trace.c:418
VALUE rb_tracearg_raised_exception(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:952
postponed_job_register_result
Definition: vm_trace.c:1564
@ PJRR_INTERRUPTED
Definition: vm_trace.c:1567
@ PJRR_SUCCESS
Definition: vm_trace.c:1565
@ PJRR_FULL
Definition: vm_trace.c:1566
void rb_thread_add_event_hook(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Definition: vm_trace.c:155
#define MAX_POSTPONED_JOB_SPECIAL_ADDITION
Definition: vm_trace.c:1548
VALUE rb_tracepoint_disable(VALUE tpval)
Definition: vm_trace.c:1233
int rb_remove_event_hook(rb_event_hook_func_t func)
Definition: vm_trace.c:265
VALUE rb_tracearg_self(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:931
VALUE rb_tracepoint_new(VALUE target_thval, rb_event_flag_t events, void(*func)(VALUE, void *), void *data)
Definition: vm_trace.c:1407
void rb_hook_list_free(rb_hook_list_t *hooks)
Definition: vm_trace.c:69
int rb_thread_remove_event_hook(VALUE thval, rb_event_hook_func_t func)
Definition: vm_trace.c:253
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Definition: vm_trace.c:1627
VALUE rb_tracearg_return_value(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:937
void Init_vm_postponed_job(void)
Definition: vm_trace.c:1556
struct rb_tp_struct rb_tp_t
rb_event_flag_t rb_tracearg_event_flag(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:802
void rb_ec_clear_all_trace_func(const rb_execution_context_t *ec)
Definition: vm_trace.c:283
void rb_exec_event_hooks(rb_trace_arg_t *trace_arg, rb_hook_list_t *hooks, int pop_p)
Definition: vm_trace.c:365
struct rb_event_hook_struct rb_event_hook_t
#define C(name, NAME)
VALUE rb_tracearg_path(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:828
VALUE rb_tracearg_eval_script(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:967
int rb_thread_remove_event_hook_with_data(VALUE thval, rb_event_hook_func_t func, VALUE data)
Definition: vm_trace.c:259
VALUE rb_tracepoint_enable(VALUE tpval)
Definition: vm_trace.c:1125
#define MAX_POSTPONED_JOB
Definition: vm_trace.c:1547
void rb_hook_list_mark(rb_hook_list_t *hooks)
Definition: vm_trace.c:56
int rb_postponed_job_register(unsigned int flags, rb_postponed_job_func_t func, void *data)
Definition: vm_trace.c:1608
VALUE rb_tracearg_method_id(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:896
int rb_remove_event_hook_with_data(rb_event_hook_func_t func, VALUE data)
Definition: vm_trace.c:271
void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec)
Definition: vm_trace.c:277
void rb_thread_add_event_hook2(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
Definition: vm_trace.c:168
void rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
Definition: vm_trace.c:174
void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval)
Definition: vm_trace.c:1268
VALUE rb_tracearg_lineno(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:822
int rb_workqueue_register(unsigned flags, rb_postponed_job_func_t func, void *data)
Definition: vm_trace.c:1656
void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line)
Definition: vm_trace.c:1258
void rb_postponed_job_flush(rb_vm_t *vm)
Definition: vm_trace.c:1676
VALUE rb_tracearg_event(rb_trace_arg_t *trace_arg)
Definition: vm_trace.c:808
#define stat
Definition: win32.h:195
#define xfree
Definition: xmalloc.h:49
int def(FILE *source, FILE *dest, int level)
Definition: zpipe.c:36
#define ZALLOC(strm, items, size)
Definition: zutil.h:266