Ruby 3.0.5p211 (2022-11-24 revision ba5cf0f7c52d4d35cc6a173c89eda98ceffa2dcf)
thread.c
Go to the documentation of this file.
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#include "eval_intern.h"
74#include "gc.h"
75#include "hrtime.h"
76#include "internal.h"
77#include "internal/class.h"
78#include "internal/cont.h"
79#include "internal/error.h"
80#include "internal/hash.h"
81#include "internal/io.h"
82#include "internal/object.h"
83#include "internal/proc.h"
84#include "internal/scheduler.h"
85#include "internal/signal.h"
86#include "internal/thread.h"
87#include "internal/time.h"
88#include "internal/warnings.h"
89#include "iseq.h"
90#include "mjit.h"
91#include "ruby/debug.h"
92#include "ruby/io.h"
93#include "ruby/thread.h"
94#include "ruby/thread_native.h"
95#include "timev.h"
96#include "vm_core.h"
97#include "ractor_core.h"
98#include "vm_debug.h"
99#include "vm_sync.h"
100
101#ifndef USE_NATIVE_THREAD_PRIORITY
102#define USE_NATIVE_THREAD_PRIORITY 0
103#define RUBY_THREAD_PRIORITY_MAX 3
104#define RUBY_THREAD_PRIORITY_MIN -3
105#endif
106
107#ifndef THREAD_DEBUG
108#define THREAD_DEBUG 0
109#endif
110
111static VALUE rb_cThreadShield;
112
113static VALUE sym_immediate;
114static VALUE sym_on_blocking;
115static VALUE sym_never;
116
121
122#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
123#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
124
125static inline VALUE
126rb_thread_local_storage(VALUE thread)
127{
129 rb_ivar_set(thread, idLocals, rb_hash_new());
130 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
131 }
132 return rb_ivar_get(thread, idLocals);
133}
134
135static void sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
136static void sleep_forever(rb_thread_t *th, unsigned int fl);
137static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker);
138static int rb_threadptr_dead(rb_thread_t *th);
139static void rb_check_deadlock(rb_ractor_t *r);
140static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
141static const char *thread_status_name(rb_thread_t *th, int detail);
142static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
143NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
144static int consume_communication_pipe(int fd);
145static int check_signals_nogvl(rb_thread_t *, int sigwait_fd);
146void rb_sigwait_fd_migrate(rb_vm_t *); /* process.c */
147
148#define eKillSignal INT2FIX(0)
149#define eTerminateSignal INT2FIX(1)
150static volatile int system_working = 1;
151
153 struct list_node wfd_node; /* <=> vm.waiting_fds */
155 int fd;
156};
157
158/********************************************************************************/
159
160#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
161
164};
165
166static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
167static void unblock_function_clear(rb_thread_t *th);
168
169static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
170 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
171static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
172
173#define GVL_UNLOCK_BEGIN(th) do { \
174 RB_GC_SAVE_MACHINE_CONTEXT(th); \
175 gvl_release(rb_ractor_gvl(th->ractor));
176
177#define GVL_UNLOCK_END(th) \
178 gvl_acquire(rb_ractor_gvl(th->ractor), th); \
179 rb_ractor_thread_switch(th->ractor, th); \
180} while(0)
181
182#ifdef __GNUC__
183#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
184#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
185#else
186#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
187#endif
188#else
189#define only_if_constant(expr, notconst) notconst
190#endif
191#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
192 struct rb_blocking_region_buffer __region; \
193 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
194 /* always return true unless fail_if_interrupted */ \
195 !only_if_constant(fail_if_interrupted, TRUE)) { \
196 exec; \
197 blocking_region_end(th, &__region); \
198 }; \
199} while(0)
200
201/*
202 * returns true if this thread was spuriously interrupted, false otherwise
203 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
204 */
205#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
206static inline int
207vm_check_ints_blocking(rb_execution_context_t *ec)
208{
209 rb_thread_t *th = rb_ec_thread_ptr(ec);
210
211 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
212 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
213 }
214 else {
217 }
219}
220
221int
223{
224 return vm_check_ints_blocking(ec);
225}
226
227/*
228 * poll() is supported by many OSes, but so far Linux is the only
229 * one we know of that supports using poll() in all places select()
230 * would work.
231 */
232#if defined(HAVE_POLL)
233# if defined(__linux__)
234# define USE_POLL
235# endif
236# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
237# define USE_POLL
238 /* FreeBSD does not set POLLOUT when POLLHUP happens */
239# define POLLERR_SET (POLLHUP | POLLERR)
240# endif
241#endif
242
243static void
244timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
245 const struct timeval *timeout)
246{
247 if (timeout) {
248 *rel = rb_timeval2hrtime(timeout);
249 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
250 *to = rel;
251 }
252 else {
253 *to = 0;
254 }
255}
256
257#if THREAD_DEBUG
258#ifdef HAVE_VA_ARGS_MACRO
259void rb_thread_debug(const char *file, int line, const char *fmt, ...);
260#define thread_debug(...) rb_thread_debug(__FILE__, __LINE__, __VA_ARGS__)
261#define POSITION_FORMAT "%s:%d:"
262#define POSITION_ARGS ,file, line
263#else
264void rb_thread_debug(const char *fmt, ...);
265#define thread_debug rb_thread_debug
266#define POSITION_FORMAT
267#define POSITION_ARGS
268#endif
269
270# ifdef NON_SCALAR_THREAD_ID
271#define fill_thread_id_string ruby_fill_thread_id_string
272const char *
273ruby_fill_thread_id_string(rb_nativethread_id_t thid, rb_thread_id_string_t buf)
274{
275 extern const char ruby_digitmap[];
276 size_t i;
277
278 buf[0] = '0';
279 buf[1] = 'x';
280 for (i = 0; i < sizeof(thid); i++) {
281# ifdef LITTLE_ENDIAN
282 size_t j = sizeof(thid) - i - 1;
283# else
284 size_t j = i;
285# endif
286 unsigned char c = (unsigned char)((char *)&thid)[j];
287 buf[2 + i * 2] = ruby_digitmap[(c >> 4) & 0xf];
288 buf[3 + i * 2] = ruby_digitmap[c & 0xf];
289 }
290 buf[sizeof(rb_thread_id_string_t)-1] = '\0';
291 return buf;
292}
293# define fill_thread_id_str(th) fill_thread_id_string((th)->thread_id, (th)->thread_id_string)
294# define thread_id_str(th) ((th)->thread_id_string)
295# define PRI_THREAD_ID "s"
296# endif
297
298# if THREAD_DEBUG < 0
299static int rb_thread_debug_enabled;
300
301/*
302 * call-seq:
303 * Thread.DEBUG -> num
304 *
305 * Returns the thread debug level. Available only if compiled with
306 * THREAD_DEBUG=-1.
307 */
308
309static VALUE
310rb_thread_s_debug(VALUE _)
311{
312 return INT2NUM(rb_thread_debug_enabled);
313}
314
315/*
316 * call-seq:
317 * Thread.DEBUG = num
318 *
319 * Sets the thread debug level. Available only if compiled with
320 * THREAD_DEBUG=-1.
321 */
322
323static VALUE
324rb_thread_s_debug_set(VALUE self, VALUE val)
325{
326 rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
327 return val;
328}
329# else
330# define rb_thread_debug_enabled THREAD_DEBUG
331# endif
332#else
333#define thread_debug if(0)printf
334#endif
335
336#ifndef fill_thread_id_str
337# define fill_thread_id_string(thid, buf) ((void *)(uintptr_t)(thid))
338# define fill_thread_id_str(th) (void)0
339# define thread_id_str(th) ((void *)(uintptr_t)(th)->thread_id)
340# define PRI_THREAD_ID "p"
341#endif
342
343NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start));
344void ruby_sigchld_handler(rb_vm_t *); /* signal.c */
345
346static void
347ubf_sigwait(void *ignore)
348{
350}
351
352#if defined(_WIN32)
353#include "thread_win32.c"
354
355#define DEBUG_OUT() \
356 WaitForSingleObject(&debug_mutex, INFINITE); \
357 printf(POSITION_FORMAT"%#lx - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
358 fflush(stdout); \
359 ReleaseMutex(&debug_mutex);
360
361#elif defined(HAVE_PTHREAD_H)
362#include "thread_pthread.c"
363
364#define DEBUG_OUT() \
365 pthread_mutex_lock(&debug_mutex); \
366 printf(POSITION_FORMAT"%"PRI_THREAD_ID" - %s" POSITION_ARGS, \
367 fill_thread_id_string(pthread_self(), thread_id_string), buf); \
368 fflush(stdout); \
369 pthread_mutex_unlock(&debug_mutex);
370
371#else
372#error "unsupported thread type"
373#endif
374
375/*
376 * TODO: somebody with win32 knowledge should be able to get rid of
377 * timer-thread by busy-waiting on signals. And it should be possible
378 * to make the GVL in thread_pthread.c be platform-independent.
379 */
380#ifndef BUSY_WAIT_SIGNALS
381# define BUSY_WAIT_SIGNALS (0)
382#endif
383
384#ifndef USE_EVENTFD
385# define USE_EVENTFD (0)
386#endif
387
388#if THREAD_DEBUG
389static int debug_mutex_initialized = 1;
390static rb_nativethread_lock_t debug_mutex;
391
392void
393rb_thread_debug(
395 const char *file, int line,
396#endif
397 const char *fmt, ...)
398{
399 va_list args;
400 char buf[BUFSIZ];
401#ifdef NON_SCALAR_THREAD_ID
402 rb_thread_id_string_t thread_id_string;
403#endif
404
405 if (!rb_thread_debug_enabled) return;
406
407 if (debug_mutex_initialized == 1) {
408 debug_mutex_initialized = 0;
409 rb_native_mutex_initialize(&debug_mutex);
410 }
411
412 va_start(args, fmt);
413 vsnprintf(buf, BUFSIZ, fmt, args);
414 va_end(args);
415
416 DEBUG_OUT();
417}
418#endif
419
420#include "thread_sync.c"
421
422void
424{
425 gvl_release(gvl);
426 gvl_destroy(gvl);
427}
428
429void
430rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
431{
433}
434
435void
436rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
437{
439}
440
441void
442rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
443{
445}
446
447void
448rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
449{
451}
452
453static int
454unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
455{
456 do {
457 if (fail_if_interrupted) {
458 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
459 return FALSE;
460 }
461 }
462 else {
464 }
465
467 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
469
470 VM_ASSERT(th->unblock.func == NULL);
471
472 th->unblock.func = func;
473 th->unblock.arg = arg;
475
476 return TRUE;
477}
478
479static void
480unblock_function_clear(rb_thread_t *th)
481{
483 th->unblock.func = 0;
485}
486
487static void
488rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
489{
491
492 if (trap) {
494 }
495 else {
497 }
498 if (th->unblock.func != NULL) {
499 (th->unblock.func)(th->unblock.arg);
500 }
501 else {
502 /* none */
503 }
505}
506
507void
509{
510 rb_threadptr_interrupt_common(th, 0);
511}
512
513static void
514threadptr_trap_interrupt(rb_thread_t *th)
515{
516 rb_threadptr_interrupt_common(th, 1);
517}
518
519static void
520terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
521{
522 rb_thread_t *th = 0;
523
524 list_for_each(&r->threads.set, th, lt_node) {
525 if (th != main_thread) {
526 thread_debug("terminate_all: begin (thid: %"PRI_THREAD_ID", status: %s)\n",
527 thread_id_str(th), thread_status_name(th, TRUE));
530 thread_debug("terminate_all: end (thid: %"PRI_THREAD_ID", status: %s)\n",
531 thread_id_str(th), thread_status_name(th, TRUE));
532 }
533 else {
534 thread_debug("terminate_all: main thread (%p)\n", (void *)th);
535 }
536 }
537}
538
539static void
540rb_threadptr_join_list_wakeup(rb_thread_t *thread)
541{
542 while (thread->join_list) {
543 struct rb_waiting_list *join_list = thread->join_list;
544
545 // Consume the entry from the join list:
546 thread->join_list = join_list->next;
547
548 rb_thread_t *target_thread = join_list->thread;
549
550 if (target_thread->scheduler != Qnil && rb_fiberptr_blocking(join_list->fiber) == 0) {
551 rb_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
552 } else {
553 rb_threadptr_interrupt(target_thread);
554
555 switch (target_thread->status) {
556 case THREAD_STOPPED:
558 target_thread->status = THREAD_RUNNABLE;
559 default:
560 break;
561 }
562 }
563 }
564}
565
566void
568{
569 while (th->keeping_mutexes) {
570 rb_mutex_t *mutex = th->keeping_mutexes;
571 th->keeping_mutexes = mutex->next_mutex;
572
573 /* rb_warn("mutex #<%p> remains to be locked by terminated thread", (void *)mutexes); */
574
575 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
576 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
577 }
578}
579
580void
582{
583 rb_ractor_t *cr = th->ractor;
584 rb_execution_context_t * volatile ec = th->ec;
585 volatile int sleeping = 0;
586
587 if (cr->threads.main != th) {
588 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
589 (void *)cr->threads.main, (void *)th);
590 }
591
592 /* unlock all locking mutexes */
594
595 EC_PUSH_TAG(ec);
596 if (EC_EXEC_TAG() == TAG_NONE) {
597 retry:
598 thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
599 terminate_all(cr, th);
600
601 while (rb_ractor_living_thread_num(cr) > 1) {
603 /*q
604 * Thread exiting routine in thread_start_func_2 notify
605 * me when the last sub-thread exit.
606 */
607 sleeping = 1;
608 native_sleep(th, &rel);
610 sleeping = 0;
611 }
612 }
613 else {
614 /*
615 * When caught an exception (e.g. Ctrl+C), let's broadcast
616 * kill request again to ensure killing all threads even
617 * if they are blocked on sleep, mutex, etc.
618 */
619 if (sleeping) {
620 sleeping = 0;
621 goto retry;
622 }
623 }
624 EC_POP_TAG();
625}
626
628
629static void
630thread_cleanup_func_before_exec(void *th_ptr)
631{
632 rb_thread_t *th = th_ptr;
633 th->status = THREAD_KILLED;
634
635 // The thread stack doesn't exist in the forked process:
637
639}
640
641static void
642thread_cleanup_func(void *th_ptr, int atfork)
643{
644 rb_thread_t *th = th_ptr;
645
646 th->locking_mutex = Qfalse;
647 thread_cleanup_func_before_exec(th_ptr);
648
649 /*
650 * Unfortunately, we can't release native threading resource at fork
651 * because libc may have unstable locking state therefore touching
652 * a threading resource may cause a deadlock.
653 *
654 * FIXME: Skipping native_mutex_destroy(pthread_mutex_destroy) is safe
655 * with NPTL, but native_thread_destroy calls pthread_cond_destroy
656 * which calls free(3), so there is a small memory leak atfork, here.
657 */
658 if (atfork)
659 return;
660
662 native_thread_destroy(th);
663}
664
665static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
666static VALUE rb_thread_to_s(VALUE thread);
667
668void
670{
671 native_thread_init_stack(th);
672}
673
674const VALUE *
676{
677 const VALUE *ep = vm_proc_ep(proc);
678
679 if (ep) {
680 return rb_vm_ep_local_ep(ep);
681 }
682 else {
683 return NULL;
684 }
685}
686
687// for ractor, defined in vm.c
689 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
690
691static VALUE
692thread_do_start_proc(rb_thread_t *th)
693{
694 VALUE args = th->invoke_arg.proc.args;
695 const VALUE *args_ptr;
696 int args_len;
697 VALUE procval = th->invoke_arg.proc.proc;
698 rb_proc_t *proc;
699 GetProcPtr(procval, proc);
700
701 th->ec->errinfo = Qnil;
702 th->ec->root_lep = rb_vm_proc_local_ep(procval);
703 th->ec->root_svar = Qfalse;
704
705 vm_check_ints_blocking(th->ec);
706
707 if (th->invoke_type == thread_invoke_type_ractor_proc) {
708 VALUE self = rb_ractor_self(th->ractor);
709 VM_ASSERT(FIXNUM_P(args));
710 args_len = FIX2INT(args);
711 args_ptr = ALLOCA_N(VALUE, args_len);
712 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
713 vm_check_ints_blocking(th->ec);
714
716 th->ec, proc, self,
717 args_len, args_ptr,
718 th->invoke_arg.proc.kw_splat,
720 );
721 }
722 else {
723 args_len = RARRAY_LENINT(args);
724 if (args_len < 8) {
725 /* free proc.args if the length is enough small */
726 args_ptr = ALLOCA_N(VALUE, args_len);
727 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR_TRANSIENT(args), VALUE, args_len);
728 th->invoke_arg.proc.args = Qnil;
729 }
730 else {
731 args_ptr = RARRAY_CONST_PTR(args);
732 }
733
734 vm_check_ints_blocking(th->ec);
735
736 return rb_vm_invoke_proc(
737 th->ec, proc,
738 args_len, args_ptr,
739 th->invoke_arg.proc.kw_splat,
741 );
742 }
743}
744
745static void
746thread_do_start(rb_thread_t *th)
747{
748 native_set_thread_name(th);
749 VALUE result = Qundef;
750
752
753 switch (th->invoke_type) {
754 case thread_invoke_type_proc:
755 result = thread_do_start_proc(th);
756 break;
757
758 case thread_invoke_type_ractor_proc:
759 result = thread_do_start_proc(th);
760 rb_ractor_atexit(th->ec, result);
761 break;
762
763 case thread_invoke_type_func:
764 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
765 break;
766
767 case thread_invoke_type_none:
768 rb_bug("unreachable");
769 }
770
772
773 th->value = result;
774
776}
777
779
780// io.c
784
785static int
786thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
787{
789 enum ruby_tag_type state;
790 VALUE errinfo = Qnil;
791 size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
792 rb_thread_t *ractor_main_th = th->ractor->threads.main;
793 VALUE * vm_stack = NULL;
794
795 VM_ASSERT(th != th->vm->ractor.main_thread);
796 thread_debug("thread start: %p\n", (void *)th);
797
798 // setup native thread
799 gvl_acquire(rb_ractor_gvl(th->ractor), th);
800 ruby_thread_set_native(th);
801
802 // setup ractor
803 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
804 RB_VM_LOCK();
805 {
806 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
807 rb_ractor_t *r = th->ractor;
811 }
812 RB_VM_UNLOCK();
813 }
814
815 // This assertion is not passed on win32 env. Check it later.
816 // VM_ASSERT((size * sizeof(VALUE)) <= th->ec->machine.stack_maxsize);
817
818 // setup VM and machine stack
819 vm_stack = alloca(size * sizeof(VALUE));
820 VM_ASSERT(vm_stack);
821
822 rb_ec_initialize_vm_stack(th->ec, vm_stack, size);
823 th->ec->machine.stack_start = STACK_DIR_UPPER(vm_stack + size, vm_stack);
824 th->ec->machine.stack_maxsize -= size * sizeof(VALUE);
825
826 thread_debug("thread start (get lock): %p\n", (void *)th);
827
828 // Ensure that we are not joinable.
829 VM_ASSERT(th->value == Qundef);
830
831 EC_PUSH_TAG(th->ec);
832
833 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
834 SAVE_ROOT_JMPBUF(th, thread_do_start(th));
835 } else {
836 errinfo = th->ec->errinfo;
837
839 if (!NIL_P(exc)) errinfo = exc;
840
841 if (state == TAG_FATAL) {
842 if (th->invoke_type == thread_invoke_type_ractor_proc) {
844 }
845 /* fatal error within this thread, need to stop whole script */
846 }
847 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
848 /* exit on main_thread. */
849 }
850 else {
851 if (th->report_on_exception) {
852 VALUE mesg = rb_thread_to_s(th->self);
853 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
854 rb_write_error_str(mesg);
855 rb_ec_error_print(th->ec, errinfo);
856 }
857
858 if (th->invoke_type == thread_invoke_type_ractor_proc) {
860 }
861
862 if (th->vm->thread_abort_on_exception ||
864 /* exit on main_thread */
865 }
866 else {
867 errinfo = Qnil;
868 }
869 }
870 th->value = Qnil;
871 }
872
873 // The thread is effectively finished and can be joined.
874 VM_ASSERT(th->value != Qundef);
875
876 rb_threadptr_join_list_wakeup(th);
878
879 if (th->invoke_type == thread_invoke_type_ractor_proc) {
882 }
883
884 th->status = THREAD_KILLED;
885 thread_debug("thread end: %p\n", (void *)th);
886
887 if (th->vm->ractor.main_thread == th) {
888 ruby_stop(0);
889 }
890
891 if (RB_TYPE_P(errinfo, T_OBJECT)) {
892 /* treat with normal error object */
893 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
894 }
895
896 EC_POP_TAG();
897
899
900 /* locking_mutex must be Qfalse */
901 if (th->locking_mutex != Qfalse) {
902 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
903 (void *)th, th->locking_mutex);
904 }
905
906 if (ractor_main_th->status == THREAD_KILLED &&
907 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
908 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
909 rb_threadptr_interrupt(ractor_main_th);
910 }
911
912 rb_check_deadlock(th->ractor);
913
915
916 thread_cleanup_func(th, FALSE);
917 VM_ASSERT(th->ec->vm_stack == NULL);
918
919 if (th->invoke_type == thread_invoke_type_ractor_proc) {
920 // after rb_ractor_living_threads_remove()
921 // GC will happen anytime and this ractor can be collected (and destroy GVL).
922 // So gvl_release() should be before it.
923 gvl_release(rb_ractor_gvl(th->ractor));
925 }
926 else {
928 gvl_release(rb_ractor_gvl(th->ractor));
929 }
930
931 return 0;
932}
933
935 enum thread_invoke_type type;
936
937 // for normal proc thread
940
941 // for ractor
943
944 // for func
945 VALUE (*fn)(void *);
946};
947
948static VALUE
949thread_create_core(VALUE thval, struct thread_create_params *params)
950{
951 rb_execution_context_t *ec = GET_EC();
952 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
953 int err;
954
955 if (OBJ_FROZEN(current_th->thgroup)) {
957 "can't start a new thread (frozen ThreadGroup)");
958 }
959
960 switch (params->type) {
961 case thread_invoke_type_proc:
962 th->invoke_type = thread_invoke_type_proc;
963 th->invoke_arg.proc.args = params->args;
964 th->invoke_arg.proc.proc = params->proc;
965 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
966 break;
967
968 case thread_invoke_type_ractor_proc:
969#if RACTOR_CHECK_MODE > 0
970 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
971#endif
972 th->invoke_type = thread_invoke_type_ractor_proc;
973 th->ractor = params->g;
974 th->ractor->threads.main = th;
975 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
976 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
977 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
978 rb_ractor_send_parameters(ec, params->g, params->args);
979 break;
980
981 case thread_invoke_type_func:
982 th->invoke_type = thread_invoke_type_func;
983 th->invoke_arg.func.func = params->fn;
984 th->invoke_arg.func.arg = (void *)params->args;
985 break;
986
987 default:
988 rb_bug("unreachable");
989 }
990
991 th->priority = current_th->priority;
992 th->thgroup = current_th->thgroup;
993
996 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
997 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
998
1000
1001 RUBY_DEBUG_LOG("r:%u th:%p", rb_ractor_id(th->ractor), th);
1002
1004
1005 /* kick thread */
1006 err = native_thread_create(th);
1007 if (err) {
1008 th->status = THREAD_KILLED;
1010 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
1011 }
1012 return thval;
1013}
1014
1015#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
1016
1017/*
1018 * call-seq:
1019 * Thread.new { ... } -> thread
1020 * Thread.new(*args, &proc) -> thread
1021 * Thread.new(*args) { |args| ... } -> thread
1022 *
1023 * Creates a new thread executing the given block.
1024 *
1025 * Any +args+ given to ::new will be passed to the block:
1026 *
1027 * arr = []
1028 * a, b, c = 1, 2, 3
1029 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
1030 * arr #=> [1, 2, 3]
1031 *
1032 * A ThreadError exception is raised if ::new is called without a block.
1033 *
1034 * If you're going to subclass Thread, be sure to call super in your
1035 * +initialize+ method, otherwise a ThreadError will be raised.
1036 */
1037static VALUE
1038thread_s_new(int argc, VALUE *argv, VALUE klass)
1039{
1040 rb_thread_t *th;
1041 VALUE thread = rb_thread_alloc(klass);
1042
1043 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
1044 rb_raise(rb_eThreadError, "can't alloc thread");
1045 }
1046
1048 th = rb_thread_ptr(thread);
1049 if (!threadptr_initialized(th)) {
1050 rb_raise(rb_eThreadError, "uninitialized thread - check `%"PRIsVALUE"#initialize'",
1051 klass);
1052 }
1053 return thread;
1054}
1055
1056/*
1057 * call-seq:
1058 * Thread.start([args]*) {|args| block } -> thread
1059 * Thread.fork([args]*) {|args| block } -> thread
1060 *
1061 * Basically the same as ::new. However, if class Thread is subclassed, then
1062 * calling +start+ in that subclass will not invoke the subclass's
1063 * +initialize+ method.
1064 */
1065
1066static VALUE
1067thread_start(VALUE klass, VALUE args)
1068{
1069 struct thread_create_params params = {
1070 .type = thread_invoke_type_proc,
1071 .args = args,
1072 .proc = rb_block_proc(),
1073 };
1074 return thread_create_core(rb_thread_alloc(klass), &params);
1075}
1076
1077static VALUE
1078threadptr_invoke_proc_location(rb_thread_t *th)
1079{
1080 if (th->invoke_type == thread_invoke_type_proc) {
1081 return rb_proc_location(th->invoke_arg.proc.proc);
1082 }
1083 else {
1084 return Qnil;
1085 }
1086}
1087
1088/* :nodoc: */
1089static VALUE
1090thread_initialize(VALUE thread, VALUE args)
1091{
1092 rb_thread_t *th = rb_thread_ptr(thread);
1093
1094 if (!rb_block_given_p()) {
1095 rb_raise(rb_eThreadError, "must be called with a block");
1096 }
1097 else if (th->invoke_type != thread_invoke_type_none) {
1098 VALUE loc = threadptr_invoke_proc_location(th);
1099 if (!NIL_P(loc)) {
1101 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
1102 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
1103 }
1104 else {
1105 rb_raise(rb_eThreadError, "already initialized thread");
1106 }
1107 }
1108 else {
1109 struct thread_create_params params = {
1110 .type = thread_invoke_type_proc,
1111 .args = args,
1112 .proc = rb_block_proc(),
1113 };
1114 return thread_create_core(thread, &params);
1115 }
1116}
1117
1118VALUE
1119rb_thread_create(VALUE (*fn)(void *), void *arg)
1120{
1121 struct thread_create_params params = {
1122 .type = thread_invoke_type_func,
1123 .fn = fn,
1124 .args = (VALUE)arg,
1125 };
1126 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1127}
1128
1129VALUE
1131{
1132 struct thread_create_params params = {
1133 .type = thread_invoke_type_ractor_proc,
1134 .g = g,
1135 .args = args,
1136 .proc = proc,
1137 };
1138 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1139}
1140
1141
1142struct join_arg {
1146};
1147
1148static VALUE
1149remove_from_join_list(VALUE arg)
1150{
1151 struct join_arg *p = (struct join_arg *)arg;
1152 rb_thread_t *target_thread = p->target;
1153
1154 if (target_thread->status != THREAD_KILLED) {
1155 struct rb_waiting_list **join_list = &target_thread->join_list;
1156
1157 while (*join_list) {
1158 if (*join_list == p->waiter) {
1159 *join_list = (*join_list)->next;
1160 break;
1161 }
1162
1163 join_list = &(*join_list)->next;
1164 }
1165 }
1166
1167 return Qnil;
1168}
1169
1170static rb_hrtime_t *double2hrtime(rb_hrtime_t *, double);
1171
1172static int
1173thread_finished(rb_thread_t *th)
1174{
1175 return th->status == THREAD_KILLED || th->value != Qundef;
1176}
1177
1178static VALUE
1179thread_join_sleep(VALUE arg)
1180{
1181 struct join_arg *p = (struct join_arg *)arg;
1182 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1183 rb_hrtime_t end = 0, rel = 0, *limit = 0;
1184
1185 /*
1186 * This supports INFINITY and negative values, so we can't use
1187 * rb_time_interval right now...
1188 */
1189 if (p->timeout == Qnil) {
1190 /* unlimited */
1191 }
1192 else if (FIXNUM_P(p->timeout)) {
1193 rel = rb_sec2hrtime(NUM2TIMET(p->timeout));
1194 limit = &rel;
1195 }
1196 else {
1197 limit = double2hrtime(&rel, rb_num2dbl(p->timeout));
1198 }
1199
1200 if (limit) {
1201 end = rb_hrtime_add(*limit, rb_hrtime_now());
1202 }
1203
1204 while (!thread_finished(target_th)) {
1205 VALUE scheduler = rb_scheduler_current();
1206
1207 if (scheduler != Qnil) {
1208 rb_scheduler_block(scheduler, target_th->self, p->timeout);
1209 } else if (!limit) {
1210 th->status = THREAD_STOPPED_FOREVER;
1211 rb_ractor_sleeper_threads_inc(th->ractor);
1212 rb_check_deadlock(th->ractor);
1213 native_sleep(th, 0);
1214 rb_ractor_sleeper_threads_dec(th->ractor);
1215 }
1216 else {
1217 if (hrtime_update_expire(limit, end)) {
1218 thread_debug("thread_join: timeout (thid: %"PRI_THREAD_ID")\n",
1219 thread_id_str(target_th));
1220 return Qfalse;
1221 }
1222 th->status = THREAD_STOPPED;
1223 native_sleep(th, limit);
1224 }
1226 th->status = THREAD_RUNNABLE;
1227 thread_debug("thread_join: interrupted (thid: %"PRI_THREAD_ID", status: %s)\n",
1228 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1229 }
1230 return Qtrue;
1231}
1232
1233static VALUE
1234thread_join(rb_thread_t *target_th, VALUE timeout)
1235{
1236 rb_execution_context_t *ec = GET_EC();
1237 rb_thread_t *th = ec->thread_ptr;
1238 rb_fiber_t *fiber = ec->fiber_ptr;
1239
1240 if (th == target_th) {
1241 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1242 }
1243
1244 if (th->ractor->threads.main == target_th) {
1245 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1246 }
1247
1248 thread_debug("thread_join (thid: %"PRI_THREAD_ID", status: %s)\n",
1249 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1250
1251 if (target_th->status != THREAD_KILLED) {
1252 struct rb_waiting_list waiter;
1253 waiter.next = target_th->join_list;
1254 waiter.thread = th;
1255 waiter.fiber = fiber;
1256 target_th->join_list = &waiter;
1257
1258 struct join_arg arg;
1259 arg.waiter = &waiter;
1260 arg.target = target_th;
1261 arg.timeout = timeout;
1262
1263 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1264 return Qnil;
1265 }
1266 }
1267
1268 thread_debug("thread_join: success (thid: %"PRI_THREAD_ID", status: %s)\n",
1269 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1270
1271 if (target_th->ec->errinfo != Qnil) {
1272 VALUE err = target_th->ec->errinfo;
1273
1274 if (FIXNUM_P(err)) {
1275 switch (err) {
1276 case INT2FIX(TAG_FATAL):
1277 thread_debug("thread_join: terminated (thid: %"PRI_THREAD_ID", status: %s)\n",
1278 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1279
1280 /* OK. killed. */
1281 break;
1282 default:
1283 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1284 }
1285 }
1286 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1287 rb_bug("thread_join: THROW_DATA should not reach here.");
1288 }
1289 else {
1290 /* normal exception */
1292 }
1293 }
1294 return target_th->self;
1295}
1296
1297/*
1298 * call-seq:
1299 * thr.join -> thr
1300 * thr.join(limit) -> thr
1301 *
1302 * The calling thread will suspend execution and run this +thr+.
1303 *
1304 * Does not return until +thr+ exits or until the given +limit+ seconds have
1305 * passed.
1306 *
1307 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1308 * returned.
1309 *
1310 * Any threads not joined will be killed when the main program exits.
1311 *
1312 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1313 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1314 * will be processed at this time.
1315 *
1316 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1317 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1318 * x.join # Let thread x finish, thread a will be killed on exit.
1319 * #=> "axyz"
1320 *
1321 * The following example illustrates the +limit+ parameter.
1322 *
1323 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1324 * puts "Waiting" until y.join(0.15)
1325 *
1326 * This will produce:
1327 *
1328 * tick...
1329 * Waiting
1330 * tick...
1331 * Waiting
1332 * tick...
1333 * tick...
1334 */
1335
1336static VALUE
1337thread_join_m(int argc, VALUE *argv, VALUE self)
1338{
1339 VALUE timeout = Qnil;
1340
1341 if (rb_check_arity(argc, 0, 1)) {
1342 timeout = argv[0];
1343 }
1344
1345 // Convert the timeout eagerly, so it's always converted and deterministic
1346 if (timeout == Qnil) {
1347 /* unlimited */
1348 }
1349 else if (FIXNUM_P(timeout)) {
1350 /* handled directly in thread_join_sleep() */
1351 }
1352 else {
1354 }
1355
1356 return thread_join(rb_thread_ptr(self), timeout);
1357}
1358
1359/*
1360 * call-seq:
1361 * thr.value -> obj
1362 *
1363 * Waits for +thr+ to complete, using #join, and returns its value or raises
1364 * the exception which terminated the thread.
1365 *
1366 * a = Thread.new { 2 + 2 }
1367 * a.value #=> 4
1368 *
1369 * b = Thread.new { raise 'something went wrong' }
1370 * b.value #=> RuntimeError: something went wrong
1371 */
1372
1373static VALUE
1374thread_value(VALUE self)
1375{
1376 rb_thread_t *th = rb_thread_ptr(self);
1377 thread_join(th, Qnil);
1378 return th->value;
1379}
1380
1381/*
1382 * Thread Scheduling
1383 */
1384
1385/*
1386 * Back when we used "struct timeval", not all platforms implemented
1387 * tv_sec as time_t. Nowadays we use "struct timespec" and tv_sec
1388 * seems to be implemented more consistently across platforms.
1389 * At least other parts of our code hasn't had to deal with non-time_t
1390 * tv_sec in timespec...
1391 */
1392#define TIMESPEC_SEC_MAX TIMET_MAX
1393#define TIMESPEC_SEC_MIN TIMET_MIN
1394
1396#if __has_warning("-Wimplicit-int-float-conversion")
1397COMPILER_WARNING_IGNORED(-Wimplicit-int-float-conversion)
1398#elif defined(_MSC_VER)
1399/* C4305: 'initializing': truncation from '__int64' to 'const double' */
1401#endif
1402static const double TIMESPEC_SEC_MAX_as_double = TIMESPEC_SEC_MAX;
1404
1405static rb_hrtime_t *
1406double2hrtime(rb_hrtime_t *hrt, double d)
1407{
1408 /* assume timespec.tv_sec has same signedness as time_t */
1409 const double TIMESPEC_SEC_MAX_PLUS_ONE = 2.0 * (TIMESPEC_SEC_MAX_as_double / 2.0 + 1.0);
1410
1411 if (TIMESPEC_SEC_MAX_PLUS_ONE <= d) {
1412 return NULL;
1413 }
1414 else if (d <= 0) {
1415 *hrt = 0;
1416 }
1417 else {
1418 *hrt = (rb_hrtime_t)(d * (double)RB_HRTIME_PER_SEC);
1419 }
1420 return hrt;
1421}
1422
1423static void
1424getclockofday(struct timespec *ts)
1425{
1426#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1427 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1428 return;
1429#endif
1430 rb_timespec_now(ts);
1431}
1432
1433/*
1434 * Don't inline this, since library call is already time consuming
1435 * and we don't want "struct timespec" on stack too long for GC
1436 */
1440{
1441 struct timespec ts;
1442
1443 getclockofday(&ts);
1444 return rb_timespec2hrtime(&ts);
1445}
1446
1447static void
1448sleep_forever(rb_thread_t *th, unsigned int fl)
1449{
1450 enum rb_thread_status prev_status = th->status;
1451 enum rb_thread_status status;
1452 int woke;
1453
1455 th->status = status;
1457 while (th->status == status) {
1458 if (fl & SLEEP_DEADLOCKABLE) {
1459 rb_ractor_sleeper_threads_inc(th->ractor);
1460 rb_check_deadlock(th->ractor);
1461 }
1462 native_sleep(th, 0);
1463 if (fl & SLEEP_DEADLOCKABLE) {
1464 rb_ractor_sleeper_threads_dec(th->ractor);
1465 }
1466 woke = vm_check_ints_blocking(th->ec);
1467 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1468 break;
1469 }
1470 th->status = prev_status;
1471}
1472
1473/*
1474 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1475 * being uninitialized, maybe other versions, too.
1476 */
1478#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1479COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1480#endif
1481#ifndef PRIu64
1482#define PRIu64 PRI_64_PREFIX "u"
1483#endif
1484/*
1485 * @end is the absolute time when @ts is set to expire
1486 * Returns true if @end has past
1487 * Updates @ts and returns false otherwise
1488 */
1489static int
1490hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1491{
1492 rb_hrtime_t now = rb_hrtime_now();
1493
1494 if (now > end) return 1;
1495 thread_debug("hrtime_update_expire: "
1496 "%"PRIu64" > %"PRIu64"\n",
1497 (uint64_t)end, (uint64_t)now);
1498 *timeout = end - now;
1499 return 0;
1500}
1502
1503static void
1504sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1505{
1506 enum rb_thread_status prev_status = th->status;
1507 int woke;
1508 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1509
1510 th->status = THREAD_STOPPED;
1512 while (th->status == THREAD_STOPPED) {
1513 native_sleep(th, &rel);
1514 woke = vm_check_ints_blocking(th->ec);
1515 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1516 break;
1517 if (hrtime_update_expire(&rel, end))
1518 break;
1519 }
1520 th->status = prev_status;
1521}
1522
1523void
1525{
1526 thread_debug("rb_thread_sleep_forever\n");
1527 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1528}
1529
1530void
1532{
1533 thread_debug("rb_thread_sleep_deadly\n");
1534 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1535}
1536
1537void
1539{
1540 rb_thread_t *th = GET_THREAD();
1541 enum rb_thread_status prev_status = th->status;
1542
1543 th->status = THREAD_STOPPED;
1544 native_sleep(th, 0);
1546 th->status = prev_status;
1547}
1548
1549static void
1550rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker)
1551{
1552 VALUE scheduler = rb_scheduler_current();
1553 if (scheduler != Qnil) {
1554 rb_scheduler_block(scheduler, blocker, Qnil);
1555 } else {
1556 thread_debug("rb_thread_sleep_deadly_allow_spurious_wakeup\n");
1557 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1558 }
1559}
1560
1561void
1563{
1564 rb_thread_t *th = GET_THREAD();
1565
1566 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1567}
1568
1569/*
1570 * CAUTION: This function causes thread switching.
1571 * rb_thread_check_ints() check ruby's interrupts.
1572 * some interrupt needs thread switching/invoke handlers,
1573 * and so on.
1574 */
1575
1576void
1578{
1580}
1581
1582/*
1583 * Hidden API for tcl/tk wrapper.
1584 * There is no guarantee to perpetuate it.
1585 */
1586int
1588{
1589 return rb_signal_buff_size() != 0;
1590}
1591
1592/* This function can be called in blocking region. */
1593int
1595{
1596 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1597}
1598
1599void
1601{
1603}
1604
1605static void
1606rb_thread_schedule_limits(uint32_t limits_us)
1607{
1608 thread_debug("rb_thread_schedule\n");
1609 if (!rb_thread_alone()) {
1610 rb_thread_t *th = GET_THREAD();
1611
1612 if (th->running_time_us >= limits_us) {
1613 thread_debug("rb_thread_schedule/switch start\n");
1615 gvl_yield(rb_ractor_gvl(th->ractor), th);
1616 rb_ractor_thread_switch(th->ractor, th);
1617 thread_debug("rb_thread_schedule/switch done\n");
1618 }
1619 }
1620}
1621
1622void
1624{
1625 rb_thread_schedule_limits(0);
1626 RUBY_VM_CHECK_INTS(GET_EC());
1627}
1628
1629/* blocking region */
1630
1631static inline int
1632blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1633 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1634{
1635 region->prev_status = th->status;
1636 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1637 th->blocking_region_buffer = region;
1638 th->status = THREAD_STOPPED;
1639 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1640 thread_debug("enter blocking region (%p)\n", (void *)th);
1642 gvl_release(rb_ractor_gvl(th->ractor));
1643 return TRUE;
1644 }
1645 else {
1646 return FALSE;
1647 }
1648}
1649
1650static inline void
1651blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1652{
1653 /* entry to ubf_list still permitted at this point, make it impossible: */
1654 unblock_function_clear(th);
1655 /* entry to ubf_list impossible at this point, so unregister is safe: */
1656 unregister_ubf_list(th);
1657
1658 gvl_acquire(rb_ractor_gvl(th->ractor), th);
1659 rb_ractor_thread_switch(th->ractor, th);
1660
1661 thread_debug("leave blocking region (%p)\n", (void *)th);
1662 th->blocking_region_buffer = 0;
1663 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1664 if (th->status == THREAD_STOPPED) {
1665 th->status = region->prev_status;
1666 }
1667}
1668
1669void *
1670rb_nogvl(void *(*func)(void *), void *data1,
1671 rb_unblock_function_t *ubf, void *data2,
1672 int flags)
1673{
1674 void *val = 0;
1675 rb_execution_context_t *ec = GET_EC();
1676 rb_thread_t *th = rb_ec_thread_ptr(ec);
1677 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1678 bool is_main_thread = vm->ractor.main_thread == th;
1679 int saved_errno = 0;
1680 VALUE ubf_th = Qfalse;
1681
1682 if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
1683 ubf = ubf_select;
1684 data2 = th;
1685 }
1686 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1687 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1688 vm->ubf_async_safe = 1;
1689 }
1690 else {
1691 ubf_th = rb_thread_start_unblock_thread();
1692 }
1693 }
1694
1695 BLOCKING_REGION(th, {
1696 val = func(data1);
1697 saved_errno = errno;
1698 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1699
1700 if (is_main_thread) vm->ubf_async_safe = 0;
1701
1702 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1704 }
1705
1706 if (ubf_th != Qfalse) {
1707 thread_value(rb_thread_kill(ubf_th));
1708 }
1709
1710 errno = saved_errno;
1711
1712 return val;
1713}
1714
1715/*
1716 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1717 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1718 * without interrupt process.
1719 *
1720 * rb_thread_call_without_gvl() does:
1721 * (1) Check interrupts.
1722 * (2) release GVL.
1723 * Other Ruby threads may run in parallel.
1724 * (3) call func with data1
1725 * (4) acquire GVL.
1726 * Other Ruby threads can not run in parallel any more.
1727 * (5) Check interrupts.
1728 *
1729 * rb_thread_call_without_gvl2() does:
1730 * (1) Check interrupt and return if interrupted.
1731 * (2) release GVL.
1732 * (3) call func with data1 and a pointer to the flags.
1733 * (4) acquire GVL.
1734 *
1735 * If another thread interrupts this thread (Thread#kill, signal delivery,
1736 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1737 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1738 * toggling a cancellation flag, canceling the invocation of a call inside
1739 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1740 *
1741 * There are built-in ubfs and you can specify these ubfs:
1742 *
1743 * * RUBY_UBF_IO: ubf for IO operation
1744 * * RUBY_UBF_PROCESS: ubf for process operation
1745 *
1746 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1747 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1748 * provide proper ubf(), your program will not stop for Control+C or other
1749 * shutdown events.
1750 *
1751 * "Check interrupts" on above list means checking asynchronous
1752 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1753 * request, and so on) and calling corresponding procedures
1754 * (such as `trap' for signals, raise an exception for Thread#raise).
1755 * If `func()' finished and received interrupts, you may skip interrupt
1756 * checking. For example, assume the following func() it reads data from file.
1757 *
1758 * read_func(...) {
1759 * // (a) before read
1760 * read(buffer); // (b) reading
1761 * // (c) after read
1762 * }
1763 *
1764 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1765 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1766 * at (c), after *read* operation is completed, checking interrupts is harmful
1767 * because it causes irrevocable side-effect, the read data will vanish. To
1768 * avoid such problem, the `read_func()' should be used with
1769 * `rb_thread_call_without_gvl2()'.
1770 *
1771 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1772 * immediately. This function does not show when the execution was interrupted.
1773 * For example, there are 4 possible timing (a), (b), (c) and before calling
1774 * read_func(). You need to record progress of a read_func() and check
1775 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1776 * `rb_thread_check_ints()' correctly or your program can not process proper
1777 * process such as `trap' and so on.
1778 *
1779 * NOTE: You can not execute most of Ruby C API and touch Ruby
1780 * objects in `func()' and `ubf()', including raising an
1781 * exception, because current thread doesn't acquire GVL
1782 * (it causes synchronization problems). If you need to
1783 * call ruby functions either use rb_thread_call_with_gvl()
1784 * or read source code of C APIs and confirm safety by
1785 * yourself.
1786 *
1787 * NOTE: In short, this API is difficult to use safely. I recommend you
1788 * use other ways if you have. We lack experiences to use this API.
1789 * Please report your problem related on it.
1790 *
1791 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1792 * for a short running `func()'. Be sure to benchmark and use this
1793 * mechanism when `func()' consumes enough time.
1794 *
1795 * Safe C API:
1796 * * rb_thread_interrupted() - check interrupt flag
1797 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1798 * they will work without GVL, and may acquire GVL when GC is needed.
1799 */
1800void *
1801rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1802 rb_unblock_function_t *ubf, void *data2)
1803{
1804 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1805}
1806
1807void *
1808rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1809 rb_unblock_function_t *ubf, void *data2)
1810{
1811 return rb_nogvl(func, data1, ubf, data2, 0);
1812}
1813
1814VALUE
1816{
1817 volatile VALUE val = Qundef; /* shouldn't be used */
1818 rb_execution_context_t * volatile ec = GET_EC();
1819 volatile int saved_errno = 0;
1820 enum ruby_tag_type state;
1821 COROUTINE_STACK_LOCAL(struct waiting_fd, wfd);
1822
1823 wfd->fd = fd;
1824 wfd->th = rb_ec_thread_ptr(ec);
1825
1827 {
1828 list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &wfd->wfd_node);
1829 }
1831
1832 EC_PUSH_TAG(ec);
1833 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1834 BLOCKING_REGION(wfd->th, {
1835 val = func(data1);
1836 saved_errno = errno;
1837 }, ubf_select, wfd->th, FALSE);
1838 }
1839 EC_POP_TAG();
1840
1841 /*
1842 * must be deleted before jump
1843 * this will delete either from waiting_fds or on-stack LIST_HEAD(busy)
1844 */
1846 {
1847 list_del(&wfd->wfd_node);
1849 }
1851
1852 if (state) {
1853 EC_JUMP_TAG(ec, state);
1854 }
1855 /* TODO: check func() */
1857
1858 errno = saved_errno;
1859
1860 return val;
1861}
1862
1863/*
1864 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1865 *
1866 * After releasing GVL using
1867 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1868 * methods. If you need to access Ruby you must use this function
1869 * rb_thread_call_with_gvl().
1870 *
1871 * This function rb_thread_call_with_gvl() does:
1872 * (1) acquire GVL.
1873 * (2) call passed function `func'.
1874 * (3) release GVL.
1875 * (4) return a value which is returned at (2).
1876 *
1877 * NOTE: You should not return Ruby object at (2) because such Object
1878 * will not be marked.
1879 *
1880 * NOTE: If an exception is raised in `func', this function DOES NOT
1881 * protect (catch) the exception. If you have any resources
1882 * which should free before throwing exception, you need use
1883 * rb_protect() in `func' and return a value which represents
1884 * exception was raised.
1885 *
1886 * NOTE: This function should not be called by a thread which was not
1887 * created as Ruby thread (created by Thread.new or so). In other
1888 * words, this function *DOES NOT* associate or convert a NON-Ruby
1889 * thread to a Ruby thread.
1890 */
1891void *
1892rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1893{
1894 rb_thread_t *th = ruby_thread_from_native();
1895 struct rb_blocking_region_buffer *brb;
1896 struct rb_unblock_callback prev_unblock;
1897 void *r;
1898
1899 if (th == 0) {
1900 /* Error has occurred, but we can't use rb_bug()
1901 * because this thread is not Ruby's thread.
1902 * What should we do?
1903 */
1904 bp();
1905 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1906 exit(EXIT_FAILURE);
1907 }
1908
1910 prev_unblock = th->unblock;
1911
1912 if (brb == 0) {
1913 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1914 }
1915
1916 blocking_region_end(th, brb);
1917 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1918 r = (*func)(data1);
1919 /* leave from Ruby world: You can not access Ruby values, etc. */
1920 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1921 RUBY_ASSERT_ALWAYS(released);
1922 return r;
1923}
1924
1925/*
1926 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1927 *
1928 ***
1929 *** This API is EXPERIMENTAL!
1930 *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1931 ***
1932 */
1933
1934int
1936{
1937 rb_thread_t *th = ruby_thread_from_native();
1938
1939 if (th && th->blocking_region_buffer == 0) {
1940 return 1;
1941 }
1942 else {
1943 return 0;
1944 }
1945}
1946
1947/*
1948 * call-seq:
1949 * Thread.pass -> nil
1950 *
1951 * Give the thread scheduler a hint to pass execution to another thread.
1952 * A running thread may or may not switch, it depends on OS and processor.
1953 */
1954
1955static VALUE
1956thread_s_pass(VALUE klass)
1957{
1959 return Qnil;
1960}
1961
1962/*****************************************************/
1963
1964/*
1965 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1966 *
1967 * Async events such as an exception thrown by Thread#raise,
1968 * Thread#kill and thread termination (after main thread termination)
1969 * will be queued to th->pending_interrupt_queue.
1970 * - clear: clear the queue.
1971 * - enque: enqueue err object into queue.
1972 * - deque: dequeue err object from queue.
1973 * - active_p: return 1 if the queue should be checked.
1974 *
1975 * All rb_threadptr_pending_interrupt_* functions are called by
1976 * a GVL acquired thread, of course.
1977 * Note that all "rb_" prefix APIs need GVL to call.
1978 */
1979
1980void
1982{
1984}
1985
1986void
1988{
1991}
1992
1993static void
1994threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1995{
1996 if (!th->pending_interrupt_queue) {
1997 rb_raise(rb_eThreadError, "uninitialized thread");
1998 }
1999}
2000
2007
2008static enum handle_interrupt_timing
2009rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2010{
2011 VALUE mask;
2012 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2013 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2014 VALUE mod;
2015 long i;
2016
2017 for (i=0; i<mask_stack_len; i++) {
2018 mask = mask_stack[mask_stack_len-(i+1)];
2019
2020 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2021 VALUE klass = mod;
2022 VALUE sym;
2023
2024 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2025 klass = RBASIC(mod)->klass;
2026 }
2027 else if (mod != RCLASS_ORIGIN(mod)) {
2028 continue;
2029 }
2030
2031 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2032 if (sym == sym_immediate) {
2033 return INTERRUPT_IMMEDIATE;
2034 }
2035 else if (sym == sym_on_blocking) {
2036 return INTERRUPT_ON_BLOCKING;
2037 }
2038 else if (sym == sym_never) {
2039 return INTERRUPT_NEVER;
2040 }
2041 else {
2042 rb_raise(rb_eThreadError, "unknown mask signature");
2043 }
2044 }
2045 }
2046 /* try to next mask */
2047 }
2048 return INTERRUPT_NONE;
2049}
2050
2051static int
2052rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2053{
2054 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2055}
2056
2057static int
2058rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2059{
2060 int i;
2061 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2063 if (rb_class_inherited_p(e, err)) {
2064 return TRUE;
2065 }
2066 }
2067 return FALSE;
2068}
2069
2070static VALUE
2071rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2072{
2073#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2074 int i;
2075
2076 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2078
2079 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2080
2081 switch (mask_timing) {
2083 if (timing != INTERRUPT_ON_BLOCKING) {
2084 break;
2085 }
2086 /* fall through */
2087 case INTERRUPT_NONE: /* default: IMMEDIATE */
2090 return err;
2091 case INTERRUPT_NEVER:
2092 break;
2093 }
2094 }
2095
2097 return Qundef;
2098#else
2100 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2102 }
2103 return err;
2104#endif
2105}
2106
2107static int
2108threadptr_pending_interrupt_active_p(rb_thread_t *th)
2109{
2110 /*
2111 * For optimization, we don't check async errinfo queue
2112 * if the queue and the thread interrupt mask were not changed
2113 * since last check.
2114 */
2116 return 0;
2117 }
2118
2119 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2120 return 0;
2121 }
2122
2123 return 1;
2124}
2125
2126static int
2127handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2128{
2129 VALUE *maskp = (VALUE *)args;
2130
2131 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2132 rb_raise(rb_eArgError, "unknown mask signature");
2133 }
2134
2135 if (!*maskp) {
2136 *maskp = rb_ident_hash_new();
2137 }
2138 rb_hash_aset(*maskp, key, val);
2139
2140 return ST_CONTINUE;
2141}
2142
2143/*
2144 * call-seq:
2145 * Thread.handle_interrupt(hash) { ... } -> result of the block
2146 *
2147 * Changes asynchronous interrupt timing.
2148 *
2149 * _interrupt_ means asynchronous event and corresponding procedure
2150 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2151 * and main thread termination (if main thread terminates, then all
2152 * other thread will be killed).
2153 *
2154 * The given +hash+ has pairs like <code>ExceptionClass =>
2155 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2156 * the given block. The TimingSymbol can be one of the following symbols:
2157 *
2158 * [+:immediate+] Invoke interrupts immediately.
2159 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2160 * [+:never+] Never invoke all interrupts.
2161 *
2162 * _BlockingOperation_ means that the operation will block the calling thread,
2163 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2164 * operation executed without GVL.
2165 *
2166 * Masked asynchronous interrupts are delayed until they are enabled.
2167 * This method is similar to sigprocmask(3).
2168 *
2169 * === NOTE
2170 *
2171 * Asynchronous interrupts are difficult to use.
2172 *
2173 * If you need to communicate between threads, please consider to use another way such as Queue.
2174 *
2175 * Or use them with deep understanding about this method.
2176 *
2177 * === Usage
2178 *
2179 * In this example, we can guard from Thread#raise exceptions.
2180 *
2181 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2182 * ignored in the first block of the main thread. In the second
2183 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2184 *
2185 * th = Thread.new do
2186 * Thread.handle_interrupt(RuntimeError => :never) {
2187 * begin
2188 * # You can write resource allocation code safely.
2189 * Thread.handle_interrupt(RuntimeError => :immediate) {
2190 * # ...
2191 * }
2192 * ensure
2193 * # You can write resource deallocation code safely.
2194 * end
2195 * }
2196 * end
2197 * Thread.pass
2198 * # ...
2199 * th.raise "stop"
2200 *
2201 * While we are ignoring the RuntimeError exception, it's safe to write our
2202 * resource allocation code. Then, the ensure block is where we can safely
2203 * deallocate your resources.
2204 *
2205 * ==== Guarding from Timeout::Error
2206 *
2207 * In the next example, we will guard from the Timeout::Error exception. This
2208 * will help prevent from leaking resources when Timeout::Error exceptions occur
2209 * during normal ensure clause. For this example we use the help of the
2210 * standard library Timeout, from lib/timeout.rb
2211 *
2212 * require 'timeout'
2213 * Thread.handle_interrupt(Timeout::Error => :never) {
2214 * timeout(10){
2215 * # Timeout::Error doesn't occur here
2216 * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
2217 * # possible to be killed by Timeout::Error
2218 * # while blocking operation
2219 * }
2220 * # Timeout::Error doesn't occur here
2221 * }
2222 * }
2223 *
2224 * In the first part of the +timeout+ block, we can rely on Timeout::Error being
2225 * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
2226 * operation that will block the calling thread is susceptible to a
2227 * Timeout::Error exception being raised.
2228 *
2229 * ==== Stack control settings
2230 *
2231 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2232 * to control more than one ExceptionClass and TimingSymbol at a time.
2233 *
2234 * Thread.handle_interrupt(FooError => :never) {
2235 * Thread.handle_interrupt(BarError => :never) {
2236 * # FooError and BarError are prohibited.
2237 * }
2238 * }
2239 *
2240 * ==== Inheritance with ExceptionClass
2241 *
2242 * All exceptions inherited from the ExceptionClass parameter will be considered.
2243 *
2244 * Thread.handle_interrupt(Exception => :never) {
2245 * # all exceptions inherited from Exception are prohibited.
2246 * }
2247 *
2248 */
2249static VALUE
2250rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2251{
2252 VALUE mask;
2253 rb_execution_context_t * volatile ec = GET_EC();
2254 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2255 volatile VALUE r = Qnil;
2256 enum ruby_tag_type state;
2257
2258 if (!rb_block_given_p()) {
2259 rb_raise(rb_eArgError, "block is needed.");
2260 }
2261
2262 mask = 0;
2263 mask_arg = rb_to_hash_type(mask_arg);
2264 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2265 if (!mask) {
2266 return rb_yield(Qnil);
2267 }
2270 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2273 }
2274
2275 EC_PUSH_TAG(th->ec);
2276 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2277 r = rb_yield(Qnil);
2278 }
2279 EC_POP_TAG();
2280
2282 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2285 }
2286
2288
2289 if (state) {
2290 EC_JUMP_TAG(th->ec, state);
2291 }
2292
2293 return r;
2294}
2295
2296/*
2297 * call-seq:
2298 * target_thread.pending_interrupt?(error = nil) -> true/false
2299 *
2300 * Returns whether or not the asynchronous queue is empty for the target thread.
2301 *
2302 * If +error+ is given, then check only for +error+ type deferred events.
2303 *
2304 * See ::pending_interrupt? for more information.
2305 */
2306static VALUE
2307rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2308{
2309 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2310
2311 if (!target_th->pending_interrupt_queue) {
2312 return Qfalse;
2313 }
2314 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2315 return Qfalse;
2316 }
2317 if (rb_check_arity(argc, 0, 1)) {
2318 VALUE err = argv[0];
2320 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2321 }
2322 if (rb_threadptr_pending_interrupt_include_p(target_th, err)) {
2323 return Qtrue;
2324 }
2325 else {
2326 return Qfalse;
2327 }
2328 }
2329 else {
2330 return Qtrue;
2331 }
2332}
2333
2334/*
2335 * call-seq:
2336 * Thread.pending_interrupt?(error = nil) -> true/false
2337 *
2338 * Returns whether or not the asynchronous queue is empty.
2339 *
2340 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2341 * this method can be used to determine if there are any deferred events.
2342 *
2343 * If you find this method returns true, then you may finish +:never+ blocks.
2344 *
2345 * For example, the following method processes deferred asynchronous events
2346 * immediately.
2347 *
2348 * def Thread.kick_interrupt_immediately
2349 * Thread.handle_interrupt(Object => :immediate) {
2350 * Thread.pass
2351 * }
2352 * end
2353 *
2354 * If +error+ is given, then check only for +error+ type deferred events.
2355 *
2356 * === Usage
2357 *
2358 * th = Thread.new{
2359 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2360 * while true
2361 * ...
2362 * # reach safe point to invoke interrupt
2363 * if Thread.pending_interrupt?
2364 * Thread.handle_interrupt(Object => :immediate){}
2365 * end
2366 * ...
2367 * end
2368 * }
2369 * }
2370 * ...
2371 * th.raise # stop thread
2372 *
2373 * This example can also be written as the following, which you should use to
2374 * avoid asynchronous interrupts.
2375 *
2376 * flag = true
2377 * th = Thread.new{
2378 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2379 * while true
2380 * ...
2381 * # reach safe point to invoke interrupt
2382 * break if flag == false
2383 * ...
2384 * end
2385 * }
2386 * }
2387 * ...
2388 * flag = false # stop thread
2389 */
2390
2391static VALUE
2392rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2393{
2394 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2395}
2396
2397NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2398
2399static void
2400rb_threadptr_to_kill(rb_thread_t *th)
2401{
2403 th->status = THREAD_RUNNABLE;
2404 th->to_kill = 1;
2405 th->ec->errinfo = INT2FIX(TAG_FATAL);
2406 EC_JUMP_TAG(th->ec, TAG_FATAL);
2407}
2408
2409static inline rb_atomic_t
2410threadptr_get_interrupts(rb_thread_t *th)
2411{
2412 rb_execution_context_t *ec = th->ec;
2413 rb_atomic_t interrupt;
2414 rb_atomic_t old;
2415
2416 do {
2417 interrupt = ec->interrupt_flag;
2418 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2419 } while (old != interrupt);
2420 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2421}
2422
2425{
2426 rb_atomic_t interrupt;
2427 int postponed_job_interrupt = 0;
2428 int ret = FALSE;
2429
2430 if (th->ec->raised_flag) return ret;
2431
2432 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2433 int sig;
2434 int timer_interrupt;
2435 int pending_interrupt;
2436 int trap_interrupt;
2437 int terminate_interrupt;
2438
2439 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2440 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2441 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2442 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2443 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2444
2445 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2448 }
2449
2450 if (postponed_job_interrupt) {
2452 }
2453
2454 /* signal handling */
2455 if (trap_interrupt && (th == th->vm->ractor.main_thread)) {
2456 enum rb_thread_status prev_status = th->status;
2457 int sigwait_fd = rb_sigwait_fd_get(th);
2458
2459 if (sigwait_fd >= 0) {
2460 (void)consume_communication_pipe(sigwait_fd);
2462 rb_sigwait_fd_put(th, sigwait_fd);
2464 }
2465 th->status = THREAD_RUNNABLE;
2466 while ((sig = rb_get_next_signal()) != 0) {
2467 ret |= rb_signal_exec(th, sig);
2468 }
2469 th->status = prev_status;
2470 }
2471
2472 /* exception from another thread */
2473 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2474 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2475 thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
2476 ret = TRUE;
2477
2478 if (err == Qundef) {
2479 /* no error */
2480 }
2481 else if (err == eKillSignal /* Thread#kill received */ ||
2482 err == eTerminateSignal /* Terminate thread */ ||
2483 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2484 terminate_interrupt = 1;
2485 }
2486 else {
2488 /* the only special exception to be queued across thread */
2490 }
2491 /* set runnable if th was slept. */
2492 if (th->status == THREAD_STOPPED ||
2493 th->status == THREAD_STOPPED_FOREVER)
2494 th->status = THREAD_RUNNABLE;
2496 }
2497 }
2498
2499 if (terminate_interrupt) {
2500 rb_threadptr_to_kill(th);
2501 }
2502
2503 if (timer_interrupt) {
2504 uint32_t limits_us = TIME_QUANTUM_USEC;
2505
2506 if (th->priority > 0)
2507 limits_us <<= th->priority;
2508 else
2509 limits_us >>= -th->priority;
2510
2511 if (th->status == THREAD_RUNNABLE)
2512 th->running_time_us += TIME_QUANTUM_USEC;
2513
2514 VM_ASSERT(th->ec->cfp);
2516 0, 0, 0, Qundef);
2517
2518 rb_thread_schedule_limits(limits_us);
2519 }
2520 }
2521 return ret;
2522}
2523
2524void
2526{
2527 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2528}
2529
2530static void
2531rb_threadptr_ready(rb_thread_t *th)
2532{
2534}
2535
2536static VALUE
2537rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2538{
2539 VALUE exc;
2540
2541 if (rb_threadptr_dead(target_th)) {
2542 return Qnil;
2543 }
2544
2545 if (argc == 0) {
2546 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2547 }
2548 else {
2549 exc = rb_make_exception(argc, argv);
2550 }
2551
2552 /* making an exception object can switch thread,
2553 so we need to check thread deadness again */
2554 if (rb_threadptr_dead(target_th)) {
2555 return Qnil;
2556 }
2557
2558 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2560 rb_threadptr_interrupt(target_th);
2561 return Qnil;
2562}
2563
2564void
2566{
2567 VALUE argv[2];
2568
2569 argv[0] = rb_eSignal;
2570 argv[1] = INT2FIX(sig);
2571 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2572}
2573
2574void
2576{
2577 VALUE argv[2];
2578
2579 argv[0] = rb_eSystemExit;
2580 argv[1] = rb_str_new2("exit");
2581
2582 // TODO: check signal raise deliverly
2583 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2584}
2585
2586int
2588{
2589 if (ec->raised_flag & RAISED_EXCEPTION) {
2590 return 1;
2591 }
2593 return 0;
2594}
2595
2596int
2598{
2599 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2600 return 0;
2601 }
2602 ec->raised_flag &= ~RAISED_EXCEPTION;
2603 return 1;
2604}
2605
2606int
2607rb_notify_fd_close(int fd, struct list_head *busy)
2608{
2609 rb_vm_t *vm = GET_THREAD()->vm;
2610 struct waiting_fd *wfd = 0, *next;
2611
2613 {
2614 list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2615 if (wfd->fd == fd) {
2616 rb_thread_t *th = wfd->th;
2617 VALUE err;
2618
2619 list_del(&wfd->wfd_node);
2620 list_add(busy, &wfd->wfd_node);
2621
2625 }
2626 }
2627 }
2629
2630 return !list_empty(busy);
2631}
2632
2633void
2635{
2636 struct list_head busy;
2637
2638 list_head_init(&busy);
2639 if (rb_notify_fd_close(fd, &busy)) {
2640 do rb_thread_schedule(); while (!list_empty(&busy));
2641 }
2642}
2643
2644/*
2645 * call-seq:
2646 * thr.raise
2647 * thr.raise(string)
2648 * thr.raise(exception [, string [, array]])
2649 *
2650 * Raises an exception from the given thread. The caller does not have to be
2651 * +thr+. See Kernel#raise for more information.
2652 *
2653 * Thread.abort_on_exception = true
2654 * a = Thread.new { sleep(200) }
2655 * a.raise("Gotcha")
2656 *
2657 * This will produce:
2658 *
2659 * prog.rb:3: Gotcha (RuntimeError)
2660 * from prog.rb:2:in `initialize'
2661 * from prog.rb:2:in `new'
2662 * from prog.rb:2
2663 */
2664
2665static VALUE
2666thread_raise_m(int argc, VALUE *argv, VALUE self)
2667{
2668 rb_thread_t *target_th = rb_thread_ptr(self);
2669 const rb_thread_t *current_th = GET_THREAD();
2670
2671 threadptr_check_pending_interrupt_queue(target_th);
2672 rb_threadptr_raise(target_th, argc, argv);
2673
2674 /* To perform Thread.current.raise as Kernel.raise */
2675 if (current_th == target_th) {
2676 RUBY_VM_CHECK_INTS(target_th->ec);
2677 }
2678 return Qnil;
2679}
2680
2681
2682/*
2683 * call-seq:
2684 * thr.exit -> thr
2685 * thr.kill -> thr
2686 * thr.terminate -> thr
2687 *
2688 * Terminates +thr+ and schedules another thread to be run, returning
2689 * the terminated Thread. If this is the main thread, or the last
2690 * thread, exits the process.
2691 */
2692
2693VALUE
2695{
2696 rb_thread_t *th = rb_thread_ptr(thread);
2697
2698 if (th->to_kill || th->status == THREAD_KILLED) {
2699 return thread;
2700 }
2701 if (th == th->vm->ractor.main_thread) {
2703 }
2704
2705 thread_debug("rb_thread_kill: %p (%"PRI_THREAD_ID")\n", (void *)th, thread_id_str(th));
2706
2707 if (th == GET_THREAD()) {
2708 /* kill myself immediately */
2709 rb_threadptr_to_kill(th);
2710 }
2711 else {
2712 threadptr_check_pending_interrupt_queue(th);
2715 }
2716 return thread;
2717}
2718
2719int
2721{
2722 rb_thread_t *th = rb_thread_ptr(thread);
2723
2724 if (th->to_kill || th->status == THREAD_KILLED) {
2725 return TRUE;
2726 }
2727 return FALSE;
2728}
2729
2730/*
2731 * call-seq:
2732 * Thread.kill(thread) -> thread
2733 *
2734 * Causes the given +thread+ to exit, see also Thread::exit.
2735 *
2736 * count = 0
2737 * a = Thread.new { loop { count += 1 } }
2738 * sleep(0.1) #=> 0
2739 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2740 * count #=> 93947
2741 * a.alive? #=> false
2742 */
2743
2744static VALUE
2745rb_thread_s_kill(VALUE obj, VALUE th)
2746{
2747 return rb_thread_kill(th);
2748}
2749
2750
2751/*
2752 * call-seq:
2753 * Thread.exit -> thread
2754 *
2755 * Terminates the currently running thread and schedules another thread to be
2756 * run.
2757 *
2758 * If this thread is already marked to be killed, ::exit returns the Thread.
2759 *
2760 * If this is the main thread, or the last thread, exit the process.
2761 */
2762
2763static VALUE
2764rb_thread_exit(VALUE _)
2765{
2766 rb_thread_t *th = GET_THREAD();
2767 return rb_thread_kill(th->self);
2768}
2769
2770
2771/*
2772 * call-seq:
2773 * thr.wakeup -> thr
2774 *
2775 * Marks a given thread as eligible for scheduling, however it may still
2776 * remain blocked on I/O.
2777 *
2778 * *Note:* This does not invoke the scheduler, see #run for more information.
2779 *
2780 * c = Thread.new { Thread.stop; puts "hey!" }
2781 * sleep 0.1 while c.status!='sleep'
2782 * c.wakeup
2783 * c.join
2784 * #=> "hey!"
2785 */
2786
2787VALUE
2789{
2790 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2791 rb_raise(rb_eThreadError, "killed thread");
2792 }
2793 return thread;
2794}
2795
2796VALUE
2798{
2799 rb_thread_t *target_th = rb_thread_ptr(thread);
2800 if (target_th->status == THREAD_KILLED) return Qnil;
2801
2802 rb_threadptr_ready(target_th);
2803
2804 if (target_th->status == THREAD_STOPPED ||
2805 target_th->status == THREAD_STOPPED_FOREVER) {
2806 target_th->status = THREAD_RUNNABLE;
2807 }
2808
2809 return thread;
2810}
2811
2812
2813/*
2814 * call-seq:
2815 * thr.run -> thr
2816 *
2817 * Wakes up +thr+, making it eligible for scheduling.
2818 *
2819 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2820 * sleep 0.1 while a.status!='sleep'
2821 * puts "Got here"
2822 * a.run
2823 * a.join
2824 *
2825 * This will produce:
2826 *
2827 * a
2828 * Got here
2829 * c
2830 *
2831 * See also the instance method #wakeup.
2832 */
2833
2834VALUE
2836{
2837 rb_thread_wakeup(thread);
2839 return thread;
2840}
2841
2842
2843VALUE
2845{
2846 if (rb_thread_alone()) {
2848 "stopping only thread\n\tnote: use sleep to stop forever");
2849 }
2851 return Qnil;
2852}
2853
2854/*
2855 * call-seq:
2856 * Thread.stop -> nil
2857 *
2858 * Stops execution of the current thread, putting it into a ``sleep'' state,
2859 * and schedules execution of another thread.
2860 *
2861 * a = Thread.new { print "a"; Thread.stop; print "c" }
2862 * sleep 0.1 while a.status!='sleep'
2863 * print "b"
2864 * a.run
2865 * a.join
2866 * #=> "abc"
2867 */
2868
2869static VALUE
2870thread_stop(VALUE _)
2871{
2872 return rb_thread_stop();
2873}
2874
2875/********************************************************************/
2876
2877VALUE
2879{
2880 // TODO
2881 return rb_ractor_thread_list(GET_RACTOR());
2882}
2883
2884/*
2885 * call-seq:
2886 * Thread.list -> array
2887 *
2888 * Returns an array of Thread objects for all threads that are either runnable
2889 * or stopped.
2890 *
2891 * Thread.new { sleep(200) }
2892 * Thread.new { 1000000.times {|i| i*i } }
2893 * Thread.new { Thread.stop }
2894 * Thread.list.each {|t| p t}
2895 *
2896 * This will produce:
2897 *
2898 * #<Thread:0x401b3e84 sleep>
2899 * #<Thread:0x401b3f38 run>
2900 * #<Thread:0x401b3fb0 sleep>
2901 * #<Thread:0x401bdf4c run>
2902 */
2903
2904static VALUE
2905thread_list(VALUE _)
2906{
2907 return rb_thread_list();
2908}
2909
2910VALUE
2912{
2913 return GET_THREAD()->self;
2914}
2915
2916/*
2917 * call-seq:
2918 * Thread.current -> thread
2919 *
2920 * Returns the currently executing thread.
2921 *
2922 * Thread.current #=> #<Thread:0x401bdf4c run>
2923 */
2924
2925static VALUE
2926thread_s_current(VALUE klass)
2927{
2928 return rb_thread_current();
2929}
2930
2931VALUE
2933{
2934 return GET_RACTOR()->threads.main->self;
2935}
2936
2937/*
2938 * call-seq:
2939 * Thread.main -> thread
2940 *
2941 * Returns the main thread.
2942 */
2943
2944static VALUE
2945rb_thread_s_main(VALUE klass)
2946{
2947 return rb_thread_main();
2948}
2949
2950
2951/*
2952 * call-seq:
2953 * Thread.abort_on_exception -> true or false
2954 *
2955 * Returns the status of the global ``abort on exception'' condition.
2956 *
2957 * The default is +false+.
2958 *
2959 * When set to +true+, if any thread is aborted by an exception, the
2960 * raised exception will be re-raised in the main thread.
2961 *
2962 * Can also be specified by the global $DEBUG flag or command line option
2963 * +-d+.
2964 *
2965 * See also ::abort_on_exception=.
2966 *
2967 * There is also an instance level method to set this for a specific thread,
2968 * see #abort_on_exception.
2969 */
2970
2971static VALUE
2972rb_thread_s_abort_exc(VALUE _)
2973{
2974 return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
2975}
2976
2977
2978/*
2979 * call-seq:
2980 * Thread.abort_on_exception= boolean -> true or false
2981 *
2982 * When set to +true+, if any thread is aborted by an exception, the
2983 * raised exception will be re-raised in the main thread.
2984 * Returns the new state.
2985 *
2986 * Thread.abort_on_exception = true
2987 * t1 = Thread.new do
2988 * puts "In new thread"
2989 * raise "Exception from thread"
2990 * end
2991 * sleep(1)
2992 * puts "not reached"
2993 *
2994 * This will produce:
2995 *
2996 * In new thread
2997 * prog.rb:4: Exception from thread (RuntimeError)
2998 * from prog.rb:2:in `initialize'
2999 * from prog.rb:2:in `new'
3000 * from prog.rb:2
3001 *
3002 * See also ::abort_on_exception.
3003 *
3004 * There is also an instance level method to set this for a specific thread,
3005 * see #abort_on_exception=.
3006 */
3007
3008static VALUE
3009rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3010{
3011 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3012 return val;
3013}
3014
3015
3016/*
3017 * call-seq:
3018 * thr.abort_on_exception -> true or false
3019 *
3020 * Returns the status of the thread-local ``abort on exception'' condition for
3021 * this +thr+.
3022 *
3023 * The default is +false+.
3024 *
3025 * See also #abort_on_exception=.
3026 *
3027 * There is also a class level method to set this for all threads, see
3028 * ::abort_on_exception.
3029 */
3030
3031static VALUE
3032rb_thread_abort_exc(VALUE thread)
3033{
3034 return rb_thread_ptr(thread)->abort_on_exception ? Qtrue : Qfalse;
3035}
3036
3037
3038/*
3039 * call-seq:
3040 * thr.abort_on_exception= boolean -> true or false
3041 *
3042 * When set to +true+, if this +thr+ is aborted by an exception, the
3043 * raised exception will be re-raised in the main thread.
3044 *
3045 * See also #abort_on_exception.
3046 *
3047 * There is also a class level method to set this for all threads, see
3048 * ::abort_on_exception=.
3049 */
3050
3051static VALUE
3052rb_thread_abort_exc_set(VALUE thread, VALUE val)
3053{
3054 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3055 return val;
3056}
3057
3058
3059/*
3060 * call-seq:
3061 * Thread.report_on_exception -> true or false
3062 *
3063 * Returns the status of the global ``report on exception'' condition.
3064 *
3065 * The default is +true+ since Ruby 2.5.
3066 *
3067 * All threads created when this flag is true will report
3068 * a message on $stderr if an exception kills the thread.
3069 *
3070 * Thread.new { 1.times { raise } }
3071 *
3072 * will produce this output on $stderr:
3073 *
3074 * #<Thread:...> terminated with exception (report_on_exception is true):
3075 * Traceback (most recent call last):
3076 * 2: from -e:1:in `block in <main>'
3077 * 1: from -e:1:in `times'
3078 *
3079 * This is done to catch errors in threads early.
3080 * In some cases, you might not want this output.
3081 * There are multiple ways to avoid the extra output:
3082 *
3083 * * If the exception is not intended, the best is to fix the cause of
3084 * the exception so it does not happen anymore.
3085 * * If the exception is intended, it might be better to rescue it closer to
3086 * where it is raised rather then let it kill the Thread.
3087 * * If it is guaranteed the Thread will be joined with Thread#join or
3088 * Thread#value, then it is safe to disable this report with
3089 * <code>Thread.current.report_on_exception = false</code>
3090 * when starting the Thread.
3091 * However, this might handle the exception much later, or not at all
3092 * if the Thread is never joined due to the parent thread being blocked, etc.
3093 *
3094 * See also ::report_on_exception=.
3095 *
3096 * There is also an instance level method to set this for a specific thread,
3097 * see #report_on_exception=.
3098 *
3099 */
3100
3101static VALUE
3102rb_thread_s_report_exc(VALUE _)
3103{
3104 return GET_THREAD()->vm->thread_report_on_exception ? Qtrue : Qfalse;
3105}
3106
3107
3108/*
3109 * call-seq:
3110 * Thread.report_on_exception= boolean -> true or false
3111 *
3112 * Returns the new state.
3113 * When set to +true+, all threads created afterwards will inherit the
3114 * condition and report a message on $stderr if an exception kills a thread:
3115 *
3116 * Thread.report_on_exception = true
3117 * t1 = Thread.new do
3118 * puts "In new thread"
3119 * raise "Exception from thread"
3120 * end
3121 * sleep(1)
3122 * puts "In the main thread"
3123 *
3124 * This will produce:
3125 *
3126 * In new thread
3127 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3128 * Traceback (most recent call last):
3129 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3130 * In the main thread
3131 *
3132 * See also ::report_on_exception.
3133 *
3134 * There is also an instance level method to set this for a specific thread,
3135 * see #report_on_exception=.
3136 */
3137
3138static VALUE
3139rb_thread_s_report_exc_set(VALUE self, VALUE val)
3140{
3141 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3142 return val;
3143}
3144
3145
3146/*
3147 * call-seq:
3148 * Thread.ignore_deadlock -> true or false
3149 *
3150 * Returns the status of the global ``ignore deadlock'' condition.
3151 * The default is +false+, so that deadlock conditions are not ignored.
3152 *
3153 * See also ::ignore_deadlock=.
3154 *
3155 */
3156
3157static VALUE
3158rb_thread_s_ignore_deadlock(VALUE _)
3159{
3160 return GET_THREAD()->vm->thread_ignore_deadlock ? Qtrue : Qfalse;
3161}
3162
3163
3164/*
3165 * call-seq:
3166 * Thread.ignore_deadlock = boolean -> true or false
3167 *
3168 * Returns the new state.
3169 * When set to +true+, the VM will not check for deadlock conditions.
3170 * It is only useful to set this if your application can break a
3171 * deadlock condition via some other means, such as a signal.
3172 *
3173 * Thread.ignore_deadlock = true
3174 * queue = Queue.new
3175 *
3176 * trap(:SIGUSR1){queue.push "Received signal"}
3177 *
3178 * # raises fatal error unless ignoring deadlock
3179 * puts queue.pop
3180 *
3181 * See also ::ignore_deadlock.
3182 */
3183
3184static VALUE
3185rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3186{
3187 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3188 return val;
3189}
3190
3191
3192/*
3193 * call-seq:
3194 * thr.report_on_exception -> true or false
3195 *
3196 * Returns the status of the thread-local ``report on exception'' condition for
3197 * this +thr+.
3198 *
3199 * The default value when creating a Thread is the value of
3200 * the global flag Thread.report_on_exception.
3201 *
3202 * See also #report_on_exception=.
3203 *
3204 * There is also a class level method to set this for all new threads, see
3205 * ::report_on_exception=.
3206 */
3207
3208static VALUE
3209rb_thread_report_exc(VALUE thread)
3210{
3211 return rb_thread_ptr(thread)->report_on_exception ? Qtrue : Qfalse;
3212}
3213
3214
3215/*
3216 * call-seq:
3217 * thr.report_on_exception= boolean -> true or false
3218 *
3219 * When set to +true+, a message is printed on $stderr if an exception
3220 * kills this +thr+. See ::report_on_exception for details.
3221 *
3222 * See also #report_on_exception.
3223 *
3224 * There is also a class level method to set this for all new threads, see
3225 * ::report_on_exception=.
3226 */
3227
3228static VALUE
3229rb_thread_report_exc_set(VALUE thread, VALUE val)
3230{
3231 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3232 return val;
3233}
3234
3235
3236/*
3237 * call-seq:
3238 * thr.group -> thgrp or nil
3239 *
3240 * Returns the ThreadGroup which contains the given thread, or returns +nil+
3241 * if +thr+ is not a member of any group.
3242 *
3243 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3244 */
3245
3246VALUE
3248{
3249 VALUE group = rb_thread_ptr(thread)->thgroup;
3250 return group == 0 ? Qnil : group;
3251}
3252
3253static const char *
3254thread_status_name(rb_thread_t *th, int detail)
3255{
3256 switch (th->status) {
3257 case THREAD_RUNNABLE:
3258 return th->to_kill ? "aborting" : "run";
3260 if (detail) return "sleep_forever";
3261 case THREAD_STOPPED:
3262 return "sleep";
3263 case THREAD_KILLED:
3264 return "dead";
3265 default:
3266 return "unknown";
3267 }
3268}
3269
3270static int
3271rb_threadptr_dead(rb_thread_t *th)
3272{
3273 return th->status == THREAD_KILLED;
3274}
3275
3276
3277/*
3278 * call-seq:
3279 * thr.status -> string, false or nil
3280 *
3281 * Returns the status of +thr+.
3282 *
3283 * [<tt>"sleep"</tt>]
3284 * Returned if this thread is sleeping or waiting on I/O
3285 * [<tt>"run"</tt>]
3286 * When this thread is executing
3287 * [<tt>"aborting"</tt>]
3288 * If this thread is aborting
3289 * [+false+]
3290 * When this thread is terminated normally
3291 * [+nil+]
3292 * If terminated with an exception.
3293 *
3294 * a = Thread.new { raise("die now") }
3295 * b = Thread.new { Thread.stop }
3296 * c = Thread.new { Thread.exit }
3297 * d = Thread.new { sleep }
3298 * d.kill #=> #<Thread:0x401b3678 aborting>
3299 * a.status #=> nil
3300 * b.status #=> "sleep"
3301 * c.status #=> false
3302 * d.status #=> "aborting"
3303 * Thread.current.status #=> "run"
3304 *
3305 * See also the instance methods #alive? and #stop?
3306 */
3307
3308static VALUE
3309rb_thread_status(VALUE thread)
3310{
3311 rb_thread_t *target_th = rb_thread_ptr(thread);
3312
3313 if (rb_threadptr_dead(target_th)) {
3314 if (!NIL_P(target_th->ec->errinfo) &&
3315 !FIXNUM_P(target_th->ec->errinfo)) {
3316 return Qnil;
3317 }
3318 else {
3319 return Qfalse;
3320 }
3321 }
3322 else {
3323 return rb_str_new2(thread_status_name(target_th, FALSE));
3324 }
3325}
3326
3327
3328/*
3329 * call-seq:
3330 * thr.alive? -> true or false
3331 *
3332 * Returns +true+ if +thr+ is running or sleeping.
3333 *
3334 * thr = Thread.new { }
3335 * thr.join #=> #<Thread:0x401b3fb0 dead>
3336 * Thread.current.alive? #=> true
3337 * thr.alive? #=> false
3338 *
3339 * See also #stop? and #status.
3340 */
3341
3342static VALUE
3343rb_thread_alive_p(VALUE thread)
3344{
3345 if (thread_finished(rb_thread_ptr(thread))) {
3346 return Qfalse;
3347 }
3348 else {
3349 return Qtrue;
3350 }
3351}
3352
3353/*
3354 * call-seq:
3355 * thr.stop? -> true or false
3356 *
3357 * Returns +true+ if +thr+ is dead or sleeping.
3358 *
3359 * a = Thread.new { Thread.stop }
3360 * b = Thread.current
3361 * a.stop? #=> true
3362 * b.stop? #=> false
3363 *
3364 * See also #alive? and #status.
3365 */
3366
3367static VALUE
3368rb_thread_stop_p(VALUE thread)
3369{
3370 rb_thread_t *th = rb_thread_ptr(thread);
3371
3372 if (rb_threadptr_dead(th)) {
3373 return Qtrue;
3374 }
3375 else if (th->status == THREAD_STOPPED ||
3376 th->status == THREAD_STOPPED_FOREVER) {
3377 return Qtrue;
3378 }
3379 else {
3380 return Qfalse;
3381 }
3382}
3383
3384/*
3385 * call-seq:
3386 * thr.name -> string
3387 *
3388 * show the name of the thread.
3389 */
3390
3391static VALUE
3392rb_thread_getname(VALUE thread)
3393{
3394 return rb_thread_ptr(thread)->name;
3395}
3396
3397/*
3398 * call-seq:
3399 * thr.name=(name) -> string
3400 *
3401 * set given name to the ruby thread.
3402 * On some platform, it may set the name to pthread and/or kernel.
3403 */
3404
3405static VALUE
3406rb_thread_setname(VALUE thread, VALUE name)
3407{
3408 rb_thread_t *target_th = rb_thread_ptr(thread);
3409
3410 if (!NIL_P(name)) {
3411 rb_encoding *enc;
3413 enc = rb_enc_get(name);
3414 if (!rb_enc_asciicompat(enc)) {
3415 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3416 rb_enc_name(enc));
3417 }
3419 }
3420 target_th->name = name;
3421 if (threadptr_initialized(target_th)) {
3422 native_set_another_thread_name(target_th->thread_id, name);
3423 }
3424 return name;
3425}
3426
3427/*
3428 * call-seq:
3429 * thr.to_s -> string
3430 *
3431 * Dump the name, id, and status of _thr_ to a string.
3432 */
3433
3434static VALUE
3435rb_thread_to_s(VALUE thread)
3436{
3437 VALUE cname = rb_class_path(rb_obj_class(thread));
3438 rb_thread_t *target_th = rb_thread_ptr(thread);
3439 const char *status;
3440 VALUE str, loc;
3441
3442 status = thread_status_name(target_th, TRUE);
3443 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3444 if (!NIL_P(target_th->name)) {
3445 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3446 }
3447 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3449 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3451 }
3452 rb_str_catf(str, " %s>", status);
3453
3454 return str;
3455}
3456
3457/* variables for recursive traversals */
3458static ID recursive_key;
3459
3460static VALUE
3461threadptr_local_aref(rb_thread_t *th, ID id)
3462{
3463 if (id == recursive_key) {
3464 return th->ec->local_storage_recursive_hash;
3465 }
3466 else {
3467 VALUE val;
3468 struct rb_id_table *local_storage = th->ec->local_storage;
3469
3470 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3471 return val;
3472 }
3473 else {
3474 return Qnil;
3475 }
3476 }
3477}
3478
3479VALUE
3481{
3482 return threadptr_local_aref(rb_thread_ptr(thread), id);
3483}
3484
3485/*
3486 * call-seq:
3487 * thr[sym] -> obj or nil
3488 *
3489 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3490 * if not explicitly inside a Fiber), using either a symbol or a string name.
3491 * If the specified variable does not exist, returns +nil+.
3492 *
3493 * [
3494 * Thread.new { Thread.current["name"] = "A" },
3495 * Thread.new { Thread.current[:name] = "B" },
3496 * Thread.new { Thread.current["name"] = "C" }
3497 * ].each do |th|
3498 * th.join
3499 * puts "#{th.inspect}: #{th[:name]}"
3500 * end
3501 *
3502 * This will produce:
3503 *
3504 * #<Thread:0x00000002a54220 dead>: A
3505 * #<Thread:0x00000002a541a8 dead>: B
3506 * #<Thread:0x00000002a54130 dead>: C
3507 *
3508 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3509 * This confusion did not exist in Ruby 1.8 because
3510 * fibers are only available since Ruby 1.9.
3511 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3512 * following idiom for dynamic scope.
3513 *
3514 * def meth(newvalue)
3515 * begin
3516 * oldvalue = Thread.current[:name]
3517 * Thread.current[:name] = newvalue
3518 * yield
3519 * ensure
3520 * Thread.current[:name] = oldvalue
3521 * end
3522 * end
3523 *
3524 * The idiom may not work as dynamic scope if the methods are thread-local
3525 * and a given block switches fiber.
3526 *
3527 * f = Fiber.new {
3528 * meth(1) {
3529 * Fiber.yield
3530 * }
3531 * }
3532 * meth(2) {
3533 * f.resume
3534 * }
3535 * f.resume
3536 * p Thread.current[:name]
3537 * #=> nil if fiber-local
3538 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3539 *
3540 * For thread-local variables, please see #thread_variable_get and
3541 * #thread_variable_set.
3542 *
3543 */
3544
3545static VALUE
3546rb_thread_aref(VALUE thread, VALUE key)
3547{
3548 ID id = rb_check_id(&key);
3549 if (!id) return Qnil;
3550 return rb_thread_local_aref(thread, id);
3551}
3552
3553/*
3554 * call-seq:
3555 * thr.fetch(sym) -> obj
3556 * thr.fetch(sym) { } -> obj
3557 * thr.fetch(sym, default) -> obj
3558 *
3559 * Returns a fiber-local for the given key. If the key can't be
3560 * found, there are several options: With no other arguments, it will
3561 * raise a KeyError exception; if <i>default</i> is given, then that
3562 * will be returned; if the optional code block is specified, then
3563 * that will be run and its result returned. See Thread#[] and
3564 * Hash#fetch.
3565 */
3566static VALUE
3567rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3568{
3569 VALUE key, val;
3570 ID id;
3571 rb_thread_t *target_th = rb_thread_ptr(self);
3572 int block_given;
3573
3574 rb_check_arity(argc, 1, 2);
3575 key = argv[0];
3576
3577 block_given = rb_block_given_p();
3578 if (block_given && argc == 2) {
3579 rb_warn("block supersedes default value argument");
3580 }
3581
3582 id = rb_check_id(&key);
3583
3584 if (id == recursive_key) {
3585 return target_th->ec->local_storage_recursive_hash;
3586 }
3587 else if (id && target_th->ec->local_storage &&
3588 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3589 return val;
3590 }
3591 else if (block_given) {
3592 return rb_yield(key);
3593 }
3594 else if (argc == 1) {
3595 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3596 }
3597 else {
3598 return argv[1];
3599 }
3600}
3601
3602static VALUE
3603threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3604{
3605 if (id == recursive_key) {
3607 return val;
3608 }
3609 else {
3610 struct rb_id_table *local_storage = th->ec->local_storage;
3611
3612 if (NIL_P(val)) {
3613 if (!local_storage) return Qnil;
3614 rb_id_table_delete(local_storage, id);
3615 return Qnil;
3616 }
3617 else {
3618 if (local_storage == NULL) {
3619 th->ec->local_storage = local_storage = rb_id_table_create(0);
3620 }
3621 rb_id_table_insert(local_storage, id, val);
3622 return val;
3623 }
3624 }
3625}
3626
3627VALUE
3629{
3630 if (OBJ_FROZEN(thread)) {
3631 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3632 }
3633
3634 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3635}
3636
3637/*
3638 * call-seq:
3639 * thr[sym] = obj -> obj
3640 *
3641 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3642 * using either a symbol or a string.
3643 *
3644 * See also Thread#[].
3645 *
3646 * For thread-local variables, please see #thread_variable_set and
3647 * #thread_variable_get.
3648 */
3649
3650static VALUE
3651rb_thread_aset(VALUE self, VALUE id, VALUE val)
3652{
3653 return rb_thread_local_aset(self, rb_to_id(id), val);
3654}
3655
3656/*
3657 * call-seq:
3658 * thr.thread_variable_get(key) -> obj or nil
3659 *
3660 * Returns the value of a thread local variable that has been set. Note that
3661 * these are different than fiber local values. For fiber local values,
3662 * please see Thread#[] and Thread#[]=.
3663 *
3664 * Thread local values are carried along with threads, and do not respect
3665 * fibers. For example:
3666 *
3667 * Thread.new {
3668 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3669 * Thread.current["foo"] = "bar" # set a fiber local
3670 *
3671 * Fiber.new {
3672 * Fiber.yield [
3673 * Thread.current.thread_variable_get("foo"), # get the thread local
3674 * Thread.current["foo"], # get the fiber local
3675 * ]
3676 * }.resume
3677 * }.join.value # => ['bar', nil]
3678 *
3679 * The value "bar" is returned for the thread local, where nil is returned
3680 * for the fiber local. The fiber is executed in the same thread, so the
3681 * thread local values are available.
3682 */
3683
3684static VALUE
3685rb_thread_variable_get(VALUE thread, VALUE key)
3686{
3687 VALUE locals;
3688
3690 return Qnil;
3691 }
3692 locals = rb_thread_local_storage(thread);
3693 return rb_hash_aref(locals, rb_to_symbol(key));
3694}
3695
3696/*
3697 * call-seq:
3698 * thr.thread_variable_set(key, value)
3699 *
3700 * Sets a thread local with +key+ to +value+. Note that these are local to
3701 * threads, and not to fibers. Please see Thread#thread_variable_get and
3702 * Thread#[] for more information.
3703 */
3704
3705static VALUE
3706rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3707{
3708 VALUE locals;
3709
3710 if (OBJ_FROZEN(thread)) {
3711 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3712 }
3713
3714 locals = rb_thread_local_storage(thread);
3715 return rb_hash_aset(locals, rb_to_symbol(key), val);
3716}
3717
3718/*
3719 * call-seq:
3720 * thr.key?(sym) -> true or false
3721 *
3722 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3723 * variable.
3724 *
3725 * me = Thread.current
3726 * me[:oliver] = "a"
3727 * me.key?(:oliver) #=> true
3728 * me.key?(:stanley) #=> false
3729 */
3730
3731static VALUE
3732rb_thread_key_p(VALUE self, VALUE key)
3733{
3734 VALUE val;
3735 ID id = rb_check_id(&key);
3736 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3737
3738 if (!id || local_storage == NULL) {
3739 return Qfalse;
3740 }
3741 else if (rb_id_table_lookup(local_storage, id, &val)) {
3742 return Qtrue;
3743 }
3744 else {
3745 return Qfalse;
3746 }
3747}
3748
3750thread_keys_i(ID key, VALUE value, void *ary)
3751{
3752 rb_ary_push((VALUE)ary, ID2SYM(key));
3753 return ID_TABLE_CONTINUE;
3754}
3755
3756int
3758{
3759 // TODO
3760 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3761}
3762
3763/*
3764 * call-seq:
3765 * thr.keys -> array
3766 *
3767 * Returns an array of the names of the fiber-local variables (as Symbols).
3768 *
3769 * thr = Thread.new do
3770 * Thread.current[:cat] = 'meow'
3771 * Thread.current["dog"] = 'woof'
3772 * end
3773 * thr.join #=> #<Thread:0x401b3f10 dead>
3774 * thr.keys #=> [:dog, :cat]
3775 */
3776
3777static VALUE
3778rb_thread_keys(VALUE self)
3779{
3780 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3781 VALUE ary = rb_ary_new();
3782
3783 if (local_storage) {
3784 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3785 }
3786 return ary;
3787}
3788
3789static int
3790keys_i(VALUE key, VALUE value, VALUE ary)
3791{
3792 rb_ary_push(ary, key);
3793 return ST_CONTINUE;
3794}
3795
3796/*
3797 * call-seq:
3798 * thr.thread_variables -> array
3799 *
3800 * Returns an array of the names of the thread-local variables (as Symbols).
3801 *
3802 * thr = Thread.new do
3803 * Thread.current.thread_variable_set(:cat, 'meow')
3804 * Thread.current.thread_variable_set("dog", 'woof')
3805 * end
3806 * thr.join #=> #<Thread:0x401b3f10 dead>
3807 * thr.thread_variables #=> [:dog, :cat]
3808 *
3809 * Note that these are not fiber local variables. Please see Thread#[] and
3810 * Thread#thread_variable_get for more details.
3811 */
3812
3813static VALUE
3814rb_thread_variables(VALUE thread)
3815{
3816 VALUE locals;
3817 VALUE ary;
3818
3819 ary = rb_ary_new();
3821 return ary;
3822 }
3823 locals = rb_thread_local_storage(thread);
3824 rb_hash_foreach(locals, keys_i, ary);
3825
3826 return ary;
3827}
3828
3829/*
3830 * call-seq:
3831 * thr.thread_variable?(key) -> true or false
3832 *
3833 * Returns +true+ if the given string (or symbol) exists as a thread-local
3834 * variable.
3835 *
3836 * me = Thread.current
3837 * me.thread_variable_set(:oliver, "a")
3838 * me.thread_variable?(:oliver) #=> true
3839 * me.thread_variable?(:stanley) #=> false
3840 *
3841 * Note that these are not fiber local variables. Please see Thread#[] and
3842 * Thread#thread_variable_get for more details.
3843 */
3844
3845static VALUE
3846rb_thread_variable_p(VALUE thread, VALUE key)
3847{
3848 VALUE locals;
3849
3851 return Qfalse;
3852 }
3853 locals = rb_thread_local_storage(thread);
3854
3855 if (rb_hash_lookup(locals, rb_to_symbol(key)) != Qnil) {
3856 return Qtrue;
3857 }
3858 else {
3859 return Qfalse;
3860 }
3861
3862 return Qfalse;
3863}
3864
3865/*
3866 * call-seq:
3867 * thr.priority -> integer
3868 *
3869 * Returns the priority of <i>thr</i>. Default is inherited from the
3870 * current thread which creating the new thread, or zero for the
3871 * initial main thread; higher-priority thread will run more frequently
3872 * than lower-priority threads (but lower-priority threads can also run).
3873 *
3874 * This is just hint for Ruby thread scheduler. It may be ignored on some
3875 * platform.
3876 *
3877 * Thread.current.priority #=> 0
3878 */
3879
3880static VALUE
3881rb_thread_priority(VALUE thread)
3882{
3883 return INT2NUM(rb_thread_ptr(thread)->priority);
3884}
3885
3886
3887/*
3888 * call-seq:
3889 * thr.priority= integer -> thr
3890 *
3891 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3892 * will run more frequently than lower-priority threads (but lower-priority
3893 * threads can also run).
3894 *
3895 * This is just hint for Ruby thread scheduler. It may be ignored on some
3896 * platform.
3897 *
3898 * count1 = count2 = 0
3899 * a = Thread.new do
3900 * loop { count1 += 1 }
3901 * end
3902 * a.priority = -1
3903 *
3904 * b = Thread.new do
3905 * loop { count2 += 1 }
3906 * end
3907 * b.priority = -2
3908 * sleep 1 #=> 1
3909 * count1 #=> 622504
3910 * count2 #=> 5832
3911 */
3912
3913static VALUE
3914rb_thread_priority_set(VALUE thread, VALUE prio)
3915{
3916 rb_thread_t *target_th = rb_thread_ptr(thread);
3917 int priority;
3918
3919#if USE_NATIVE_THREAD_PRIORITY
3920 target_th->priority = NUM2INT(prio);
3921 native_thread_apply_priority(th);
3922#else
3923 priority = NUM2INT(prio);
3924 if (priority > RUBY_THREAD_PRIORITY_MAX) {
3925 priority = RUBY_THREAD_PRIORITY_MAX;
3926 }
3927 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3928 priority = RUBY_THREAD_PRIORITY_MIN;
3929 }
3930 target_th->priority = (int8_t)priority;
3931#endif
3932 return INT2NUM(target_th->priority);
3933}
3934
3935/* for IO */
3936
3937#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3938
3939/*
3940 * several Unix platforms support file descriptors bigger than FD_SETSIZE
3941 * in select(2) system call.
3942 *
3943 * - Linux 2.2.12 (?)
3944 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3945 * select(2) documents how to allocate fd_set dynamically.
3946 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3947 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3948 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3949 * select(2) documents how to allocate fd_set dynamically.
3950 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3951 * - HP-UX documents how to allocate fd_set dynamically.
3952 * http://docs.hp.com/en/B2355-60105/select.2.html
3953 * - Solaris 8 has select_large_fdset
3954 * - Mac OS X 10.7 (Lion)
3955 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3956 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3957 * http://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/_index.html
3958 *
3959 * When fd_set is not big enough to hold big file descriptors,
3960 * it should be allocated dynamically.
3961 * Note that this assumes fd_set is structured as bitmap.
3962 *
3963 * rb_fd_init allocates the memory.
3964 * rb_fd_term free the memory.
3965 * rb_fd_set may re-allocates bitmap.
3966 *
3967 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3968 */
3969
3970void
3972{
3973 fds->maxfd = 0;
3974 fds->fdset = ALLOC(fd_set);
3975 FD_ZERO(fds->fdset);
3976}
3977
3978void
3980{
3981 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3982
3983 if (size < sizeof(fd_set))
3984 size = sizeof(fd_set);
3985 dst->maxfd = src->maxfd;
3986 dst->fdset = xmalloc(size);
3987 memcpy(dst->fdset, src->fdset, size);
3988}
3989
3990void
3992{
3993 if (fds->fdset) xfree(fds->fdset);
3994 fds->maxfd = 0;
3995 fds->fdset = 0;
3996}
3997
3998void
4000{
4001 if (fds->fdset)
4002 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4003}
4004
4005static void
4006rb_fd_resize(int n, rb_fdset_t *fds)
4007{
4008 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4009 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4010
4011 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4012 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4013
4014 if (m > o) {
4015 fds->fdset = xrealloc(fds->fdset, m);
4016 memset((char *)fds->fdset + o, 0, m - o);
4017 }
4018 if (n >= fds->maxfd) fds->maxfd = n + 1;
4019}
4020
4021void
4022rb_fd_set(int n, rb_fdset_t *fds)
4023{
4024 rb_fd_resize(n, fds);
4025 FD_SET(n, fds->fdset);
4026}
4027
4028void
4029rb_fd_clr(int n, rb_fdset_t *fds)
4030{
4031 if (n >= fds->maxfd) return;
4032 FD_CLR(n, fds->fdset);
4033}
4034
4035int
4036rb_fd_isset(int n, const rb_fdset_t *fds)
4037{
4038 if (n >= fds->maxfd) return 0;
4039 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4040}
4041
4042void
4043rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4044{
4045 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4046
4047 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4048 dst->maxfd = max;
4049 dst->fdset = xrealloc(dst->fdset, size);
4050 memcpy(dst->fdset, src, size);
4051}
4052
4053void
4054rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4055{
4056 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4057
4058 if (size < sizeof(fd_set))
4059 size = sizeof(fd_set);
4060 dst->maxfd = src->maxfd;
4061 dst->fdset = xrealloc(dst->fdset, size);
4062 memcpy(dst->fdset, src->fdset, size);
4063}
4064
4065int
4066rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4067{
4068 fd_set *r = NULL, *w = NULL, *e = NULL;
4069 if (readfds) {
4070 rb_fd_resize(n - 1, readfds);
4071 r = rb_fd_ptr(readfds);
4072 }
4073 if (writefds) {
4074 rb_fd_resize(n - 1, writefds);
4075 w = rb_fd_ptr(writefds);
4076 }
4077 if (exceptfds) {
4078 rb_fd_resize(n - 1, exceptfds);
4079 e = rb_fd_ptr(exceptfds);
4080 }
4081 return select(n, r, w, e, timeout);
4082}
4083
4084#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4085
4086#undef FD_ZERO
4087#undef FD_SET
4088#undef FD_CLR
4089#undef FD_ISSET
4090
4091#define FD_ZERO(f) rb_fd_zero(f)
4092#define FD_SET(i, f) rb_fd_set((i), (f))
4093#define FD_CLR(i, f) rb_fd_clr((i), (f))
4094#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4095
4096#elif defined(_WIN32)
4097
4098void
4100{
4101 set->capa = FD_SETSIZE;
4102 set->fdset = ALLOC(fd_set);
4103 FD_ZERO(set->fdset);
4104}
4105
4106void
4108{
4109 rb_fd_init(dst);
4110 rb_fd_dup(dst, src);
4111}
4112
4113void
4115{
4116 xfree(set->fdset);
4117 set->fdset = NULL;
4118 set->capa = 0;
4119}
4120
4121void
4122rb_fd_set(int fd, rb_fdset_t *set)
4123{
4124 unsigned int i;
4125 SOCKET s = rb_w32_get_osfhandle(fd);
4126
4127 for (i = 0; i < set->fdset->fd_count; i++) {
4128 if (set->fdset->fd_array[i] == s) {
4129 return;
4130 }
4131 }
4132 if (set->fdset->fd_count >= (unsigned)set->capa) {
4133 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4134 set->fdset =
4136 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4137 }
4138 set->fdset->fd_array[set->fdset->fd_count++] = s;
4139}
4140
4141#undef FD_ZERO
4142#undef FD_SET
4143#undef FD_CLR
4144#undef FD_ISSET
4145
4146#define FD_ZERO(f) rb_fd_zero(f)
4147#define FD_SET(i, f) rb_fd_set((i), (f))
4148#define FD_CLR(i, f) rb_fd_clr((i), (f))
4149#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4150
4151#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4152
4153#endif
4154
4155#ifndef rb_fd_no_init
4156#define rb_fd_no_init(fds) (void)(fds)
4157#endif
4158
4159static int
4160wait_retryable(int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4161{
4162 if (*result < 0) {
4163 switch (errnum) {
4164 case EINTR:
4165#ifdef ERESTART
4166 case ERESTART:
4167#endif
4168 *result = 0;
4169 if (rel && hrtime_update_expire(rel, end)) {
4170 *rel = 0;
4171 }
4172 return TRUE;
4173 }
4174 return FALSE;
4175 }
4176 else if (*result == 0) {
4177 /* check for spurious wakeup */
4178 if (rel) {
4179 return !hrtime_update_expire(rel, end);
4180 }
4181 return TRUE;
4182 }
4183 return FALSE;
4184}
4185
4187 int max;
4197};
4198
4199static VALUE
4200select_set_free(VALUE p)
4201{
4202 struct select_set *set = (struct select_set *)p;
4203
4204 if (set->sigwait_fd >= 0) {
4205 rb_sigwait_fd_put(set->th, set->sigwait_fd);
4207 }
4208
4209 rb_fd_term(&set->orig_rset);
4210 rb_fd_term(&set->orig_wset);
4211 rb_fd_term(&set->orig_eset);
4212
4213 return Qfalse;
4214}
4215
4216static const rb_hrtime_t *
4217sigwait_timeout(rb_thread_t *th, int sigwait_fd, const rb_hrtime_t *orig,
4218 int *drained_p)
4219{
4220 static const rb_hrtime_t quantum = TIME_QUANTUM_USEC * 1000;
4221
4222 if (sigwait_fd >= 0 && (!ubf_threads_empty() || BUSY_WAIT_SIGNALS)) {
4223 *drained_p = check_signals_nogvl(th, sigwait_fd);
4224 if (!orig || *orig > quantum)
4225 return &quantum;
4226 }
4227
4228 return orig;
4229}
4230
4231static VALUE
4232do_select(VALUE p)
4233{
4234 struct select_set *set = (struct select_set *)p;
4235 int result = 0;
4236 int lerrno;
4237 rb_hrtime_t *to, rel, end = 0;
4238
4239 timeout_prepare(&to, &rel, &end, set->timeout);
4240#define restore_fdset(dst, src) \
4241 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4242#define do_select_update() \
4243 (restore_fdset(set->rset, &set->orig_rset), \
4244 restore_fdset(set->wset, &set->orig_wset), \
4245 restore_fdset(set->eset, &set->orig_eset), \
4246 TRUE)
4247
4248 do {
4249 int drained;
4250 lerrno = 0;
4251
4252 BLOCKING_REGION(set->th, {
4253 const rb_hrtime_t *sto;
4254 struct timeval tv;
4255
4256 sto = sigwait_timeout(set->th, set->sigwait_fd, to, &drained);
4257 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4258 result = native_fd_select(set->max, set->rset, set->wset,
4259 set->eset,
4260 rb_hrtime2timeval(&tv, sto), set->th);
4261 if (result < 0) lerrno = errno;
4262 }
4263 }, set->sigwait_fd >= 0 ? ubf_sigwait : ubf_select, set->th, TRUE);
4264
4265 if (set->sigwait_fd >= 0) {
4266 if (result > 0 && rb_fd_isset(set->sigwait_fd, set->rset)) {
4267 result--;
4268 (void)check_signals_nogvl(set->th, set->sigwait_fd);
4269 } else {
4270 (void)check_signals_nogvl(set->th, -1);
4271 }
4272 }
4273
4274 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4275 } while (wait_retryable(&result, lerrno, to, end) && do_select_update());
4276
4277 if (result < 0) {
4278 errno = lerrno;
4279 }
4280
4281 return (VALUE)result;
4282}
4283
4284static void
4285rb_thread_wait_fd_rw(int fd, int read)
4286{
4287 int result = 0;
4288 int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT;
4289
4290 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
4291
4292 if (fd < 0) {
4293 rb_raise(rb_eIOError, "closed stream");
4294 }
4295
4296 result = rb_wait_for_single_fd(fd, events, NULL);
4297 if (result < 0) {
4298 rb_sys_fail(0);
4299 }
4300
4301 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
4302}
4303
4304void
4306{
4307 rb_thread_wait_fd_rw(fd, 1);
4308}
4309
4310int
4312{
4313 rb_thread_wait_fd_rw(fd, 0);
4314 return TRUE;
4315}
4316
4317static rb_fdset_t *
4318init_set_fd(int fd, rb_fdset_t *fds)
4319{
4320 if (fd < 0) {
4321 return 0;
4322 }
4323 rb_fd_init(fds);
4324 rb_fd_set(fd, fds);
4325
4326 return fds;
4327}
4328
4329int
4331 struct timeval *timeout)
4332{
4333 struct select_set set;
4334
4335 set.th = GET_THREAD();
4337 set.max = max;
4338 set.rset = read;
4339 set.wset = write;
4340 set.eset = except;
4341 set.timeout = timeout;
4342
4343 if (!set.rset && !set.wset && !set.eset) {
4344 if (!timeout) {
4346 return 0;
4347 }
4349 return 0;
4350 }
4351
4352 set.sigwait_fd = rb_sigwait_fd_get(set.th);
4353 if (set.sigwait_fd >= 0) {
4354 if (set.rset)
4355 rb_fd_set(set.sigwait_fd, set.rset);
4356 else
4357 set.rset = init_set_fd(set.sigwait_fd, &set.orig_rset);
4358 if (set.sigwait_fd >= set.max) {
4359 set.max = set.sigwait_fd + 1;
4360 }
4361 }
4362#define fd_init_copy(f) do { \
4363 if (set.f) { \
4364 rb_fd_resize(set.max - 1, set.f); \
4365 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4366 rb_fd_init_copy(&set.orig_##f, set.f); \
4367 } \
4368 } \
4369 else { \
4370 rb_fd_no_init(&set.orig_##f); \
4371 } \
4372 } while (0)
4376#undef fd_init_copy
4377
4378 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4379}
4380
4381#ifdef USE_POLL
4382
4383/* The same with linux kernel. TODO: make platform independent definition. */
4384#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4385#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4386#define POLLEX_SET (POLLPRI)
4387
4388#ifndef POLLERR_SET /* defined for FreeBSD for now */
4389# define POLLERR_SET (0)
4390#endif
4391
4392/*
4393 * returns a mask of events
4394 */
4395int
4396rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4397{
4398 struct pollfd fds[2];
4399 int result = 0, lerrno;
4400 rb_hrtime_t *to, rel, end = 0;
4401 int drained;
4402 nfds_t nfds;
4404 struct waiting_fd wfd;
4405 int state;
4406
4407 wfd.th = GET_THREAD();
4408 wfd.fd = fd;
4409
4411 {
4412 list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
4413 }
4415
4416 EC_PUSH_TAG(wfd.th->ec);
4417 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4418 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4419 timeout_prepare(&to, &rel, &end, timeout);
4420 fds[0].fd = fd;
4421 fds[0].events = (short)events;
4422 fds[0].revents = 0;
4423 do {
4424 fds[1].fd = rb_sigwait_fd_get(wfd.th);
4425
4426 if (fds[1].fd >= 0) {
4427 fds[1].events = POLLIN;
4428 fds[1].revents = 0;
4429 nfds = 2;
4430 ubf = ubf_sigwait;
4431 }
4432 else {
4433 nfds = 1;
4434 ubf = ubf_select;
4435 }
4436
4437 lerrno = 0;
4438 BLOCKING_REGION(wfd.th, {
4439 const rb_hrtime_t *sto;
4440 struct timespec ts;
4441
4442 sto = sigwait_timeout(wfd.th, fds[1].fd, to, &drained);
4443 if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4444 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, sto), 0);
4445 if (result < 0) lerrno = errno;
4446 }
4447 }, ubf, wfd.th, TRUE);
4448
4449 if (fds[1].fd >= 0) {
4450 if (result > 0 && fds[1].revents) {
4451 result--;
4452 (void)check_signals_nogvl(wfd.th, fds[1].fd);
4453 } else {
4454 (void)check_signals_nogvl(wfd.th, -1);
4455 }
4456 rb_sigwait_fd_put(wfd.th, fds[1].fd);
4457 rb_sigwait_fd_migrate(wfd.th->vm);
4458 }
4459 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4460 } while (wait_retryable(&result, lerrno, to, end));
4461 }
4462 EC_POP_TAG();
4463
4465 {
4466 list_del(&wfd.wfd_node);
4467 }
4469
4470 if (state) {
4471 EC_JUMP_TAG(wfd.th->ec, state);
4472 }
4473
4474 if (result < 0) {
4475 errno = lerrno;
4476 return -1;
4477 }
4478
4479 if (fds[0].revents & POLLNVAL) {
4480 errno = EBADF;
4481 return -1;
4482 }
4483
4484 /*
4485 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4486 * Therefore we need to fix it up.
4487 */
4488 result = 0;
4489 if (fds[0].revents & POLLIN_SET)
4490 result |= RB_WAITFD_IN;
4491 if (fds[0].revents & POLLOUT_SET)
4492 result |= RB_WAITFD_OUT;
4493 if (fds[0].revents & POLLEX_SET)
4494 result |= RB_WAITFD_PRI;
4495
4496 /* all requested events are ready if there is an error */
4497 if (fds[0].revents & POLLERR_SET)
4498 result |= events;
4499
4500 return result;
4501}
4502#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4503struct select_args {
4504 union {
4505 int fd;
4512 struct timeval *tv;
4513};
4514
4515static VALUE
4516select_single(VALUE ptr)
4517{
4518 struct select_args *args = (struct select_args *)ptr;
4519 int r;
4520
4521 r = rb_thread_fd_select(args->as.fd + 1,
4522 args->read, args->write, args->except, args->tv);
4523 if (r == -1)
4524 args->as.error = errno;
4525 if (r > 0) {
4526 r = 0;
4527 if (args->read && rb_fd_isset(args->as.fd, args->read))
4528 r |= RB_WAITFD_IN;
4529 if (args->write && rb_fd_isset(args->as.fd, args->write))
4530 r |= RB_WAITFD_OUT;
4531 if (args->except && rb_fd_isset(args->as.fd, args->except))
4532 r |= RB_WAITFD_PRI;
4533 }
4534 return (VALUE)r;
4535}
4536
4537static VALUE
4538select_single_cleanup(VALUE ptr)
4539{
4540 struct select_args *args = (struct select_args *)ptr;
4541
4542 list_del(&args->wfd.wfd_node);
4543 if (args->read) rb_fd_term(args->read);
4544 if (args->write) rb_fd_term(args->write);
4545 if (args->except) rb_fd_term(args->except);
4546
4547 return (VALUE)-1;
4548}
4549
4550int
4552{
4553 rb_fdset_t rfds, wfds, efds;
4554 struct select_args args;
4555 int r;
4556 VALUE ptr = (VALUE)&args;
4557
4558 args.as.fd = fd;
4559 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4560 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4561 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4562 args.tv = timeout;
4563 args.wfd.fd = fd;
4564 args.wfd.th = GET_THREAD();
4565
4567 {
4568 list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
4569 }
4571
4572 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4573 if (r == -1)
4574 errno = args.as.error;
4575
4576 return r;
4577}
4578#endif /* ! USE_POLL */
4579
4580/*
4581 * for GC
4582 */
4583
4584#ifdef USE_CONSERVATIVE_STACK_END
4585void
4587{
4588 VALUE stack_end;
4589 *stack_end_p = &stack_end;
4590}
4591#endif
4592
4593/*
4594 *
4595 */
4596
4597void
4599{
4600 /* mth must be main_thread */
4601 if (rb_signal_buff_size() > 0) {
4602 /* wakeup main thread */
4603 threadptr_trap_interrupt(mth);
4604 }
4605}
4606
4607static void
4608async_bug_fd(const char *mesg, int errno_arg, int fd)
4609{
4610 char buff[64];
4611 size_t n = strlcpy(buff, mesg, sizeof(buff));
4612 if (n < sizeof(buff)-3) {
4613 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4614 }
4615 rb_async_bug_errno(buff, errno_arg);
4616}
4617
4618/* VM-dependent API is not available for this function */
4619static int
4620consume_communication_pipe(int fd)
4621{
4622#if USE_EVENTFD
4623 uint64_t buff[1];
4624#else
4625 /* buffer can be shared because no one refers to them. */
4626 static char buff[1024];
4627#endif
4628 ssize_t result;
4629 int ret = FALSE; /* for rb_sigwait_sleep */
4630
4631 /*
4632 * disarm UBF_TIMER before we read, because it can become
4633 * re-armed at any time via sighandler and the pipe will refill
4634 * We can disarm it because this thread is now processing signals
4635 * and we do not want unnecessary SIGVTALRM
4636 */
4637 ubf_timer_disarm();
4638
4639 while (1) {
4640 result = read(fd, buff, sizeof(buff));
4641 if (result > 0) {
4642 ret = TRUE;
4643 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4644 return ret;
4645 }
4646 }
4647 else if (result == 0) {
4648 return ret;
4649 }
4650 else if (result < 0) {
4651 int e = errno;
4652 switch (e) {
4653 case EINTR:
4654 continue; /* retry */
4655 case EAGAIN:
4656#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4657 case EWOULDBLOCK:
4658#endif
4659 return ret;
4660 default:
4661 async_bug_fd("consume_communication_pipe: read", e, fd);
4662 }
4663 }
4664 }
4665}
4666
4667static int
4668check_signals_nogvl(rb_thread_t *th, int sigwait_fd)
4669{
4670 rb_vm_t *vm = GET_VM(); /* th may be 0 */
4671 int ret = sigwait_fd >= 0 ? consume_communication_pipe(sigwait_fd) : FALSE;
4672 ubf_wakeup_all_threads();
4674 if (rb_signal_buff_size()) {
4675 if (th == vm->ractor.main_thread) {
4676 /* no need to lock + wakeup if already in main thread */
4678 }
4679 else {
4680 threadptr_trap_interrupt(vm->ractor.main_thread);
4681 }
4682 ret = TRUE; /* for SIGCHLD_LOSSY && rb_sigwait_sleep */
4683 }
4684 return ret;
4685}
4686
4687void
4689{
4690 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4691 native_reset_timer_thread();
4692 }
4693}
4694
4695void
4697{
4698 native_reset_timer_thread();
4699}
4700
4701void
4703{
4704 system_working = 1;
4705 rb_thread_create_timer_thread();
4706}
4707
4708static int
4709clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4710{
4711 int i;
4712 VALUE coverage = (VALUE)val;
4713 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4714 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4715
4716 if (lines) {
4717 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4718 rb_ary_clear(lines);
4719 }
4720 else {
4721 int i;
4722 for (i = 0; i < RARRAY_LEN(lines); i++) {
4723 if (RARRAY_AREF(lines, i) != Qnil)
4724 RARRAY_ASET(lines, i, INT2FIX(0));
4725 }
4726 }
4727 }
4728 if (branches) {
4729 VALUE counters = RARRAY_AREF(branches, 1);
4730 for (i = 0; i < RARRAY_LEN(counters); i++) {
4731 RARRAY_ASET(counters, i, INT2FIX(0));
4732 }
4733 }
4734
4735 return ST_CONTINUE;
4736}
4737
4738void
4740{
4741 VALUE coverages = rb_get_coverages();
4742 if (RTEST(coverages)) {
4743 rb_hash_foreach(coverages, clear_coverage_i, 0);
4744 }
4745}
4746
4747#if defined(HAVE_WORKING_FORK)
4748
4749static void
4750rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4751{
4752 rb_thread_t *i = 0;
4753 rb_vm_t *vm = th->vm;
4754 rb_ractor_t *r = th->ractor;
4755 vm->ractor.main_ractor = r;
4756 vm->ractor.main_thread = th;
4757 r->threads.main = th;
4758 r->status_ = ractor_created;
4759
4760 gvl_atfork(rb_ractor_gvl(th->ractor));
4761 ubf_list_atfork();
4762
4763 // OK. Only this thread accesses:
4764 list_for_each(&vm->ractor.set, r, vmlr_node) {
4765 list_for_each(&r->threads.set, i, lt_node) {
4766 atfork(i, th);
4767 }
4768 }
4769 rb_vm_living_threads_init(vm);
4770
4771 rb_ractor_atfork(vm, th);
4772
4773 /* may be held by MJIT threads in parent */
4776
4777 /* may be held by any thread in parent */
4779
4780 vm->fork_gen++;
4781 rb_ractor_sleeper_threads_clear(th->ractor);
4783
4784 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4785 VM_ASSERT(vm->ractor.cnt == 1);
4786}
4787
4788static void
4789terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4790{
4791 if (th != current_th) {
4792 rb_mutex_abandon_keeping_mutexes(th);
4793 rb_mutex_abandon_locking_mutex(th);
4794 thread_cleanup_func(th, TRUE);
4795 }
4796}
4797
4798void rb_fiber_atfork(rb_thread_t *);
4799void
4800rb_thread_atfork(void)
4801{
4802 rb_thread_t *th = GET_THREAD();
4803 rb_thread_atfork_internal(th, terminate_atfork_i);
4804 th->join_list = NULL;
4805 rb_fiber_atfork(th);
4806
4807 /* We don't want reproduce CVE-2003-0900. */
4809
4810 /* For child, starting MJIT worker thread in this place which is safer than immediately after `after_fork_ruby`. */
4811 mjit_child_after_fork();
4812}
4813
4814static void
4815terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4816{
4817 if (th != current_th) {
4818 thread_cleanup_func_before_exec(th);
4819 }
4820}
4821
4822void
4824{
4825 rb_thread_t *th = GET_THREAD();
4826 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4827}
4828#else
4829void
4831{
4832}
4833
4834void
4836{
4837}
4838#endif
4839
4840struct thgroup {
4843};
4844
4845static size_t
4846thgroup_memsize(const void *ptr)
4847{
4848 return sizeof(struct thgroup);
4849}
4850
4851static const rb_data_type_t thgroup_data_type = {
4852 "thgroup",
4853 {0, RUBY_TYPED_DEFAULT_FREE, thgroup_memsize,},
4855};
4856
4857/*
4858 * Document-class: ThreadGroup
4859 *
4860 * ThreadGroup provides a means of keeping track of a number of threads as a
4861 * group.
4862 *
4863 * A given Thread object can only belong to one ThreadGroup at a time; adding
4864 * a thread to a new group will remove it from any previous group.
4865 *
4866 * Newly created threads belong to the same group as the thread from which they
4867 * were created.
4868 */
4869
4870/*
4871 * Document-const: Default
4872 *
4873 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4874 * by default.
4875 */
4876static VALUE
4877thgroup_s_alloc(VALUE klass)
4878{
4879 VALUE group;
4880 struct thgroup *data;
4881
4882 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4883 data->enclosed = 0;
4884 data->group = group;
4885
4886 return group;
4887}
4888
4889/*
4890 * call-seq:
4891 * thgrp.list -> array
4892 *
4893 * Returns an array of all existing Thread objects that belong to this group.
4894 *
4895 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4896 */
4897
4898static VALUE
4899thgroup_list(VALUE group)
4900{
4901 VALUE ary = rb_ary_new();
4902 rb_thread_t *th = 0;
4903 rb_ractor_t *r = GET_RACTOR();
4904
4905 list_for_each(&r->threads.set, th, lt_node) {
4906 if (th->thgroup == group) {
4907 rb_ary_push(ary, th->self);
4908 }
4909 }
4910 return ary;
4911}
4912
4913
4914/*
4915 * call-seq:
4916 * thgrp.enclose -> thgrp
4917 *
4918 * Prevents threads from being added to or removed from the receiving
4919 * ThreadGroup.
4920 *
4921 * New threads can still be started in an enclosed ThreadGroup.
4922 *
4923 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4924 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4925 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4926 * tg.add thr
4927 * #=> ThreadError: can't move from the enclosed thread group
4928 */
4929
4930static VALUE
4931thgroup_enclose(VALUE group)
4932{
4933 struct thgroup *data;
4934
4935 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4936 data->enclosed = 1;
4937
4938 return group;
4939}
4940
4941
4942/*
4943 * call-seq:
4944 * thgrp.enclosed? -> true or false
4945 *
4946 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4947 */
4948
4949static VALUE
4950thgroup_enclosed_p(VALUE group)
4951{
4952 struct thgroup *data;
4953
4954 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4955 if (data->enclosed)
4956 return Qtrue;
4957 return Qfalse;
4958}
4959
4960
4961/*
4962 * call-seq:
4963 * thgrp.add(thread) -> thgrp
4964 *
4965 * Adds the given +thread+ to this group, removing it from any other
4966 * group to which it may have previously been a member.
4967 *
4968 * puts "Initial group is #{ThreadGroup::Default.list}"
4969 * tg = ThreadGroup.new
4970 * t1 = Thread.new { sleep }
4971 * t2 = Thread.new { sleep }
4972 * puts "t1 is #{t1}"
4973 * puts "t2 is #{t2}"
4974 * tg.add(t1)
4975 * puts "Initial group now #{ThreadGroup::Default.list}"
4976 * puts "tg group now #{tg.list}"
4977 *
4978 * This will produce:
4979 *
4980 * Initial group is #<Thread:0x401bdf4c>
4981 * t1 is #<Thread:0x401b3c90>
4982 * t2 is #<Thread:0x401b3c18>
4983 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4984 * tg group now #<Thread:0x401b3c90>
4985 */
4986
4987static VALUE
4988thgroup_add(VALUE group, VALUE thread)
4989{
4990 rb_thread_t *target_th = rb_thread_ptr(thread);
4991 struct thgroup *data;
4992
4993 if (OBJ_FROZEN(group)) {
4994 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4995 }
4996 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4997 if (data->enclosed) {
4998 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4999 }
5000
5001 if (!target_th->thgroup) {
5002 return Qnil;
5003 }
5004
5005 if (OBJ_FROZEN(target_th->thgroup)) {
5006 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
5007 }
5008 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
5009 if (data->enclosed) {
5011 "can't move from the enclosed thread group");
5012 }
5013
5014 target_th->thgroup = group;
5015 return group;
5016}
5017
5018/*
5019 * Document-class: ThreadShield
5020 */
5021static void
5022thread_shield_mark(void *ptr)
5023{
5025}
5026
5027static const rb_data_type_t thread_shield_data_type = {
5028 "thread_shield",
5029 {thread_shield_mark, 0, 0,},
5031};
5032
5033static VALUE
5034thread_shield_alloc(VALUE klass)
5035{
5036 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
5037}
5038
5039#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5040#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5041#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5042#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5044static inline unsigned int
5045rb_thread_shield_waiting(VALUE b)
5046{
5048}
5049
5050static inline void
5051rb_thread_shield_waiting_inc(VALUE b)
5052{
5053 unsigned int w = rb_thread_shield_waiting(b);
5054 w++;
5056 rb_raise(rb_eRuntimeError, "waiting count overflow");
5057 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5058 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5059}
5060
5061static inline void
5062rb_thread_shield_waiting_dec(VALUE b)
5063{
5064 unsigned int w = rb_thread_shield_waiting(b);
5065 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5066 w--;
5067 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5068 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5069}
5070
5071VALUE
5073{
5074 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5075 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5076 return thread_shield;
5077}
5078
5079/*
5080 * Wait a thread shield.
5081 *
5082 * Returns
5083 * true: acquired the thread shield
5084 * false: the thread shield was destroyed and no other threads waiting
5085 * nil: the thread shield was destroyed but still in use
5086 */
5087VALUE
5089{
5090 VALUE mutex = GetThreadShieldPtr(self);
5091 rb_mutex_t *m;
5092
5093 if (!mutex) return Qfalse;
5094 m = mutex_ptr(mutex);
5095 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5096 rb_thread_shield_waiting_inc(self);
5097 rb_mutex_lock(mutex);
5098 rb_thread_shield_waiting_dec(self);
5099 if (DATA_PTR(self)) return Qtrue;
5100 rb_mutex_unlock(mutex);
5101 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5102}
5103
5104static VALUE
5105thread_shield_get_mutex(VALUE self)
5106{
5107 VALUE mutex = GetThreadShieldPtr(self);
5108 if (!mutex)
5109 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5110 return mutex;
5111}
5112
5113/*
5114 * Release a thread shield, and return true if it has waiting threads.
5115 */
5116VALUE
5118{
5119 VALUE mutex = thread_shield_get_mutex(self);
5120 rb_mutex_unlock(mutex);
5121 return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
5122}
5123
5124/*
5125 * Release and destroy a thread shield, and return true if it has waiting threads.
5126 */
5127VALUE
5129{
5130 VALUE mutex = thread_shield_get_mutex(self);
5131 DATA_PTR(self) = 0;
5132 rb_mutex_unlock(mutex);
5133 return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
5134}
5135
5136static VALUE
5137threadptr_recursive_hash(rb_thread_t *th)
5138{
5139 return th->ec->local_storage_recursive_hash;
5140}
5141
5142static void
5143threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5144{
5145 th->ec->local_storage_recursive_hash = hash;
5146}
5147
5149
5150/*
5151 * Returns the current "recursive list" used to detect recursion.
5152 * This list is a hash table, unique for the current thread and for
5153 * the current __callee__.
5154 */
5155
5156static VALUE
5157recursive_list_access(VALUE sym)
5158{
5159 rb_thread_t *th = GET_THREAD();
5160 VALUE hash = threadptr_recursive_hash(th);
5161 VALUE list;
5162 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5163 hash = rb_ident_hash_new();
5164 threadptr_recursive_hash_set(th, hash);
5165 list = Qnil;
5166 }
5167 else {
5168 list = rb_hash_aref(hash, sym);
5169 }
5170 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5171 list = rb_ident_hash_new();
5172 rb_hash_aset(hash, sym, list);
5173 }
5174 return list;
5175}
5176
5177/*
5178 * Returns Qtrue iff obj (or the pair <obj, paired_obj>) is already
5179 * in the recursion list.
5180 * Assumes the recursion list is valid.
5181 */
5182
5183static VALUE
5184recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5185{
5186#if SIZEOF_LONG == SIZEOF_VOIDP
5187 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5188#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5189 #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
5190 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5191#endif
5192
5193 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5194 if (pair_list == Qundef)
5195 return Qfalse;
5196 if (paired_obj_id) {
5197 if (!RB_TYPE_P(pair_list, T_HASH)) {
5198 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5199 return Qfalse;
5200 }
5201 else {
5202 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5203 return Qfalse;
5204 }
5205 }
5206 return Qtrue;
5207}
5208
5209/*
5210 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5211 * For a single obj, it sets list[obj] to Qtrue.
5212 * For a pair, it sets list[obj] to paired_obj_id if possible,
5213 * otherwise list[obj] becomes a hash like:
5214 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5215 * Assumes the recursion list is valid.
5216 */
5217
5218static void
5219recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5220{
5221 VALUE pair_list;
5222
5223 if (!paired_obj) {
5224 rb_hash_aset(list, obj, Qtrue);
5225 }
5226 else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
5227 rb_hash_aset(list, obj, paired_obj);
5228 }
5229 else {
5230 if (!RB_TYPE_P(pair_list, T_HASH)){
5231 VALUE other_paired_obj = pair_list;
5232 pair_list = rb_hash_new();
5233 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5234 rb_hash_aset(list, obj, pair_list);
5235 }
5236 rb_hash_aset(pair_list, paired_obj, Qtrue);
5237 }
5238}
5239
5240/*
5241 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5242 * For a pair, if list[obj] is a hash, then paired_obj_id is
5243 * removed from the hash and no attempt is made to simplify
5244 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5245 * Assumes the recursion list is valid.
5246 */
5247
5248static int
5249recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5250{
5251 if (paired_obj) {
5252 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5253 if (pair_list == Qundef) {
5254 return 0;
5255 }
5256 if (RB_TYPE_P(pair_list, T_HASH)) {
5257 rb_hash_delete_entry(pair_list, paired_obj);
5258 if (!RHASH_EMPTY_P(pair_list)) {
5259 return 1; /* keep hash until is empty */
5260 }
5261 }
5262 }
5263 rb_hash_delete_entry(list, obj);
5264 return 1;
5265}
5266
5273};
5274
5275static VALUE
5276exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5277{
5278 struct exec_recursive_params *p = (void *)data;
5279 return (*p->func)(p->obj, p->arg, FALSE);
5280}
5281
5282/*
5283 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5284 * current method is called recursively on obj, or on the pair <obj, pairid>
5285 * If outer is 0, then the innermost func will be called with recursive set
5286 * to Qtrue, otherwise the outermost func will be called. In the latter case,
5287 * all inner func are short-circuited by throw.
5288 * Implementation details: the value thrown is the recursive list which is
5289 * proper to the current method and unlikely to be caught anywhere else.
5290 * list[recursive_key] is used as a flag for the outermost call.
5291 */
5292
5293static VALUE
5294exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
5295{
5296 VALUE result = Qundef;
5297 const ID mid = rb_frame_last_func();
5298 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5299 struct exec_recursive_params p;
5300 int outermost;
5301 p.list = recursive_list_access(sym);
5302 p.obj = obj;
5303 p.pairid = pairid;
5304 p.arg = arg;
5305 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5306
5307 if (recursive_check(p.list, p.obj, pairid)) {
5308 if (outer && !outermost) {
5309 rb_throw_obj(p.list, p.list);
5310 }
5311 return (*func)(obj, arg, TRUE);
5312 }
5313 else {
5314 enum ruby_tag_type state;
5315
5316 p.func = func;
5317
5318 if (outermost) {
5319 recursive_push(p.list, ID2SYM(recursive_key), 0);
5320 recursive_push(p.list, p.obj, p.pairid);
5321 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5322 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5323 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5324 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5325 if (result == p.list) {
5326 result = (*func)(obj, arg, TRUE);
5327 }
5328 }
5329 else {
5330 volatile VALUE ret = Qundef;
5331 recursive_push(p.list, p.obj, p.pairid);
5332 EC_PUSH_TAG(GET_EC());
5333 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5334 ret = (*func)(obj, arg, FALSE);
5335 }
5336 EC_POP_TAG();
5337 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5338 goto invalid;
5339 }
5340 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5341 result = ret;
5342 }
5343 }
5344 *(volatile struct exec_recursive_params *)&p;
5345 return result;
5346
5347 invalid:
5348 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5349 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5352}
5353
5354/*
5355 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5356 * current method is called recursively on obj
5357 */
5358
5359VALUE
5361{
5362 return exec_recursive(func, obj, 0, arg, 0);
5363}
5364
5365/*
5366 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5367 * current method is called recursively on the ordered pair <obj, paired_obj>
5368 */
5369
5370VALUE
5372{
5373 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0);
5374}
5375
5376/*
5377 * If recursion is detected on the current method and obj, the outermost
5378 * func will be called with (obj, arg, Qtrue). All inner func will be
5379 * short-circuited using throw.
5380 */
5381
5382VALUE
5384{
5385 return exec_recursive(func, obj, 0, arg, 1);
5386}
5387
5388/*
5389 * If recursion is detected on the current method, obj and paired_obj,
5390 * the outermost func will be called with (obj, arg, Qtrue). All inner
5391 * func will be short-circuited using throw.
5392 */
5393
5394VALUE
5396{
5397 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1);
5398}
5399
5400/*
5401 * call-seq:
5402 * thread.backtrace -> array or nil
5403 *
5404 * Returns the current backtrace of the target thread.
5405 *
5406 */
5407
5408static VALUE
5409rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5410{
5411 return rb_vm_thread_backtrace(argc, argv, thval);
5412}
5413
5414/* call-seq:
5415 * thread.backtrace_locations(*args) -> array or nil
5416 *
5417 * Returns the execution stack for the target thread---an array containing
5418 * backtrace location objects.
5419 *
5420 * See Thread::Backtrace::Location for more information.
5421 *
5422 * This method behaves similarly to Kernel#caller_locations except it applies
5423 * to a specific thread.
5424 */
5425static VALUE
5426rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5427{
5429}
5430
5431void
5433{
5434 rb_thread_t *th = GET_THREAD();
5435
5439}
5440
5441/*
5442 * Document-class: ThreadError
5443 *
5444 * Raised when an invalid operation is attempted on a thread.
5445 *
5446 * For example, when no other thread has been started:
5447 *
5448 * Thread.stop
5449 *
5450 * This will raises the following exception:
5451 *
5452 * ThreadError: stopping only thread
5453 * note: use sleep to stop forever
5454 */
5455
5456void
5458{
5459 VALUE cThGroup;
5460 rb_thread_t *th = GET_THREAD();
5461
5462 sym_never = ID2SYM(rb_intern_const("never"));
5463 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5464 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5465
5466 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5467 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5468 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5469 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5470 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5471 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5472 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5473 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5474 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5475 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5476 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5477 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5478 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5479 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5480 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5481 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5482#if THREAD_DEBUG < 0
5483 rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
5484 rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
5485#endif
5486 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5487 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5488 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5489
5490 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5491 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5492 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5493 rb_define_method(rb_cThread, "value", thread_value, 0);
5495 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5499 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5500 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5501 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5502 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5503 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5504 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5505 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5507 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5508 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5509 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5510 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5511 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5512 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5513 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5514 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5515 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5516 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5518 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5519 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5520
5521 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5522 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5523 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5524 rb_define_alias(rb_cThread, "inspect", "to_s");
5525
5527 "stream closed in another thread");
5528
5529 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5530 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5531 rb_define_method(cThGroup, "list", thgroup_list, 0);
5532 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5533 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5534 rb_define_method(cThGroup, "add", thgroup_add, 1);
5535
5536 {
5537 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5538 rb_define_const(cThGroup, "Default", th->thgroup);
5539 }
5540
5541 recursive_key = rb_intern_const("__recursive_key__");
5543
5544 /* init thread core */
5545 {
5546 /* main thread setting */
5547 {
5548 /* acquire global vm lock */
5550 gvl_acquire(gvl, th);
5551
5555 }
5556 }
5557
5558 rb_thread_create_timer_thread();
5559
5560 Init_thread_sync();
5561}
5562
5563int
5565{
5566 rb_thread_t *th = ruby_thread_from_native();
5567
5568 return th != 0;
5569}
5570
5571static void
5572debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5573{
5574 rb_thread_t *th = 0;
5575 VALUE sep = rb_str_new_cstr("\n ");
5576
5577 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5578 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5579 (void *)GET_THREAD(), (void *)r->threads.main);
5580
5581 list_for_each(&r->threads.set, th, lt_node) {
5582 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5583 "native:%"PRI_THREAD_ID" int:%u",
5584 th->self, (void *)th, thread_id_str(th), th->ec->interrupt_flag);
5585
5586 if (th->locking_mutex) {
5587 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5588 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5589 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5590 }
5591
5592 {
5593 struct rb_waiting_list *list = th->join_list;
5594 while (list) {
5595 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5596 list = list->next;
5597 }
5598 }
5599 rb_str_catf(msg, "\n ");
5600 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5601 rb_str_catf(msg, "\n");
5602 }
5603}
5604
5605static void
5606rb_check_deadlock(rb_ractor_t *r)
5607{
5608 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5609
5610 int found = 0;
5611 rb_thread_t *th = NULL;
5612 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5613 int ltnum = rb_ractor_living_thread_num(r);
5614
5615 if (ltnum > sleeper_num) return;
5616 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5617 if (patrol_thread && patrol_thread != GET_THREAD()) return;
5618
5619 list_for_each(&r->threads.set, th, lt_node) {
5620 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5621 found = 1;
5622 }
5623 else if (th->locking_mutex) {
5624 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5625 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !list_empty(&mutex->waitq))) {
5626 found = 1;
5627 }
5628 }
5629 if (found)
5630 break;
5631 }
5632
5633 if (!found) {
5634 VALUE argv[2];
5635 argv[0] = rb_eFatal;
5636 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5637 debug_deadlock_check(r, argv[1]);
5638 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5639 rb_threadptr_raise(r->threads.main, 2, argv);
5640 }
5641}
5642
5643static void
5644update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5645{
5646 const rb_control_frame_t *cfp = GET_EC()->cfp;
5647 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5648 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5649 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5650 if (lines) {
5651 long line = rb_sourceline() - 1;
5652 long count;
5653 VALUE num;
5654 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5655 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5657 rb_ary_push(lines, LONG2FIX(line + 1));
5658 return;
5659 }
5660 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5661 return;
5662 }
5663 num = RARRAY_AREF(lines, line);
5664 if (!FIXNUM_P(num)) return;
5665 count = FIX2LONG(num) + 1;
5666 if (POSFIXABLE(count)) {
5667 RARRAY_ASET(lines, line, LONG2FIX(count));
5668 }
5669 }
5670 }
5671}
5672
5673static void
5674update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5675{
5676 const rb_control_frame_t *cfp = GET_EC()->cfp;
5677 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5678 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5679 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5680 if (branches) {
5681 long pc = cfp->pc - cfp->iseq->body->iseq_encoded - 1;
5682 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5683 VALUE counters = RARRAY_AREF(branches, 1);
5684 VALUE num = RARRAY_AREF(counters, idx);
5685 count = FIX2LONG(num) + 1;
5686 if (POSFIXABLE(count)) {
5687 RARRAY_ASET(counters, idx, LONG2FIX(count));
5688 }
5689 }
5690 }
5691}
5692
5693const rb_method_entry_t *
5694rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5695{
5696 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5697
5698 if (!me->def) return NULL; // negative cme
5699
5700 retry:
5701 switch (me->def->type) {
5702 case VM_METHOD_TYPE_ISEQ: {
5703 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5704 rb_iseq_location_t *loc = &iseq->body->location;
5705 path = rb_iseq_path(iseq);
5706 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5707 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5708 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5709 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5710 break;
5711 }
5713 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5714 if (iseq) {
5715 rb_iseq_location_t *loc;
5716 rb_iseq_check(iseq);
5717 path = rb_iseq_path(iseq);
5718 loc = &iseq->body->location;
5719 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5720 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5721 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5722 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5723 break;
5724 }
5725 return NULL;
5726 }
5728 me = me->def->body.alias.original_me;
5729 goto retry;
5731 me = me->def->body.refined.orig_me;
5732 if (!me) return NULL;
5733 goto retry;
5734 default:
5735 return NULL;
5736 }
5737
5738 /* found */
5739 if (RB_TYPE_P(path, T_ARRAY)) {
5740 path = rb_ary_entry(path, 1);
5741 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5742 }
5743 if (resolved_location) {
5744 resolved_location[0] = path;
5745 resolved_location[1] = beg_pos_lineno;
5746 resolved_location[2] = beg_pos_column;
5747 resolved_location[3] = end_pos_lineno;
5748 resolved_location[4] = end_pos_column;
5749 }
5750 return me;
5751}
5752
5753static void
5754update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5755{
5756 const rb_control_frame_t *cfp = GET_EC()->cfp;
5758 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5759 VALUE rcount;
5760 long count;
5761
5762 me = rb_resolve_me_location(me, 0);
5763 if (!me) return;
5764
5765 rcount = rb_hash_aref(me2counter, (VALUE) me);
5766 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5767 if (POSFIXABLE(count)) {
5768 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5769 }
5770}
5771
5772VALUE
5774{
5775 return GET_VM()->coverages;
5776}
5777
5778int
5780{
5781 return GET_VM()->coverage_mode;
5782}
5783
5784void
5785rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5786{
5787 GET_VM()->coverages = coverages;
5788 GET_VM()->coverage_mode = mode;
5792 }
5795 }
5796}
5797
5798/* Make coverage arrays empty so old covered files are no longer tracked. */
5799void
5801{
5804 GET_VM()->coverages = Qfalse;
5805 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5806 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5807 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5808 }
5809 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5810 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5811 }
5812}
5813
5814VALUE
5816{
5817 VALUE coverage = rb_ary_tmp_new_fill(3);
5818 VALUE lines = Qfalse, branches = Qfalse;
5819 int mode = GET_VM()->coverage_mode;
5820
5822 lines = n > 0 ? rb_ary_tmp_new_fill(n) : rb_ary_tmp_new(0);
5823 }
5824 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5825
5827 branches = rb_ary_tmp_new_fill(2);
5828 /* internal data structures for branch coverage:
5829 *
5830 * { branch base node =>
5831 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5832 * branch target id =>
5833 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5834 * ...
5835 * }],
5836 * ...
5837 * }
5838 *
5839 * Example:
5840 * { NODE_CASE =>
5841 * [1, 0, 4, 3, {
5842 * NODE_WHEN => [2, 8, 2, 9, 0],
5843 * NODE_WHEN => [3, 8, 3, 9, 1],
5844 * ...
5845 * }],
5846 * ...
5847 * }
5848 */
5849 VALUE structure = rb_hash_new();
5850 rb_obj_hide(structure);
5851 RARRAY_ASET(branches, 0, structure);
5852 /* branch execution counters */
5853 RARRAY_ASET(branches, 1, rb_ary_tmp_new(0));
5854 }
5855 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5856
5857 return coverage;
5858}
5859
5860static VALUE
5861uninterruptible_exit(VALUE v)
5862{
5863 rb_thread_t *cur_th = GET_THREAD();
5865
5867 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5868 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5869 }
5870 return Qnil;
5871}
5872
5873VALUE
5875{
5876 VALUE interrupt_mask = rb_ident_hash_new();
5877 rb_thread_t *cur_th = GET_THREAD();
5878
5879 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5880 OBJ_FREEZE_RAW(interrupt_mask);
5881 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5882
5883 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5884
5885 RUBY_VM_CHECK_INTS(cur_th->ec);
5886 return ret;
5887}
#define COROUTINE_STACK_LOCAL(type, name)
Definition: Stack.h:14
#define COROUTINE_STACK_FREE(name)
Definition: Stack.h:15
VALUE rb_ary_shift(VALUE ary)
Definition: array.c:1413
VALUE rb_ary_tmp_new_fill(long capa)
Definition: array.c:854
VALUE rb_ary_dup(VALUE ary)
Definition: array.c:2666
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:1301
VALUE rb_ary_delete_at(VALUE ary, long pos)
Definition: array.c:4010
VALUE rb_ary_new(void)
Definition: array.c:749
VALUE rb_ary_pop(VALUE ary)
Definition: array.c:1350
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:846
VALUE rb_ary_clear(VALUE ary)
Definition: array.c:4534
VALUE rb_ary_entry(VALUE ary, long offset)
Definition: array.c:1672
VALUE rb_ary_join(VALUE ary, VALUE sep)
Definition: array.c:2780
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition: assert.h:167
#define NOINLINE(x)
Definition: attributes.h:82
#define NORETURN(x)
Definition: attributes.h:152
#define UNREACHABLE_RETURN
Definition: assume.h:31
const char ruby_digitmap[]
Definition: bignum.c:48
VALUE rb_fiberptr_self(struct rb_fiber_struct *fiber)
Definition: cont.c:1153
void rb_fiber_close(rb_fiber_t *fiber)
Definition: cont.c:2365
unsigned int rb_fiberptr_blocking(struct rb_fiber_struct *fiber)
Definition: cont.c:1158
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
Definition: cxxanyargs.hpp:653
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
Definition: cxxanyargs.hpp:668
#define mod(x, y)
Definition: date_strftime.c:28
enum @11::@13::@14 mask
struct RIMemo * ptr
Definition: debug.c:88
void rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flag)
Definition: vm_trace.c:174
int rb_remove_event_hook(rb_event_hook_func_t func)
Definition: vm_trace.c:265
@ RUBY_EVENT_HOOK_FLAG_SAFE
Definition: debug.h:93
@ RUBY_EVENT_HOOK_FLAG_RAW_ARG
Definition: debug.h:95
#define MJIT_FUNC_EXPORTED
Definition: dllexport.h:55
rb_encoding * rb_enc_get(VALUE obj)
Definition: encoding.c:1070
struct @77 g
big_t * num
Definition: enough.c:232
int max
Definition: enough.c:225
#define sym(name)
Definition: enumerator.c:4007
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
void rb_obj_call_init_kw(VALUE obj, int argc, const VALUE *argv, int kw_splat)
Definition: eval.c:1717
int rb_keyword_given_p(void)
Definition: eval.c:948
VALUE rb_eThreadError
Definition: eval.c:953
void rb_ec_error_print(rb_execution_context_t *volatile ec, volatile VALUE errinfo)
Definition: eval_error.c:360
VALUE rb_ec_backtrace_str_ary(const rb_execution_context_t *ec, long lev, long n)
Definition: vm_backtrace.c:892
#define EC_EXEC_TAG()
Definition: eval_intern.h:193
#define EC_PUSH_TAG(ec)
Definition: eval_intern.h:130
#define EC_JUMP_TAG(ec, st)
Definition: eval_intern.h:196
#define EXIT_FAILURE
Definition: eval_intern.h:32
#define EC_POP_TAG()
Definition: eval_intern.h:138
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:1718
#define SAVE_ROOT_JMPBUF(th, stmt)
Definition: eval_intern.h:120
@ RAISED_EXCEPTION
Definition: eval_intern.h:266
#define RUBY_INTERNAL_EVENT_SWITCH
Definition: event.h:53
#define RUBY_EVENT_THREAD_BEGIN
Definition: event.h:43
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Definition: event.h:67
uint32_t rb_event_flag_t
Definition: event.h:66
#define RUBY_EVENT_CALL
Definition: event.h:33
#define RUBY_EVENT_THREAD_END
Definition: event.h:44
#define alloca
Definition: ffi_common.h:27
#define memcpy(d, s, n)
Definition: ffi_common.h:55
#define LIKELY(x)
Definition: ffi_common.h:125
#define PRIsVALUE
Definition: function.c:10
void * rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z)
Definition: gc.c:10927
VALUE rb_memory_id(VALUE obj)
Definition: gc.c:4077
void rb_gc_force_recycle(VALUE obj)
Definition: gc.c:7968
void rb_gc_mark(VALUE ptr)
Definition: gc.c:6112
#define STACK_DIR_UPPER(a, b)
Definition: gc.h:108
#define RB_GC_SAVE_MACHINE_CONTEXT(th)
Definition: gc.h:19
#define STACK_GROW_DIR_DETECTION
Definition: gc.h:107
VALUE rb_eIOError
Definition: io.c:185
#define CLASS_OF
Definition: globals.h:153
VALUE rb_cThread
Definition: vm.c:374
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:748
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition: eval.c:1272
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:1999
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:935
#define OBJ_FROZEN
Definition: fl_type.h:136
#define OBJ_FREEZE_RAW
Definition: fl_type.h:135
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition: eval.c:327
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2917
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:712
void rb_bug(const char *fmt,...)
Definition: error.c:768
VALUE rb_eSystemExit
Definition: error.c:1050
VALUE rb_eStandardError
Definition: error.c:1054
VALUE rb_ident_hash_new(void)
Definition: hash.c:4443
VALUE rb_eTypeError
Definition: error.c:1057
#define EXIT_SUCCESS
Definition: error.c:52
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Definition: error.c:3234
VALUE rb_eFatal
Definition: error.c:1053
VALUE rb_make_exception(int argc, const VALUE *argv)
Make an Exception object from the list of arguments in a manner similar to Kernel#raise.
Definition: eval.c:888
VALUE rb_eRuntimeError
Definition: error.c:1055
void rb_warn(const char *fmt,...)
Definition: error.c:408
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Definition: error.c:1094
VALUE rb_eArgError
Definition: error.c:1058
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1148
void rb_async_bug_errno(const char *mesg, int errno_arg)
Definition: error.c:817
void rb_sys_fail(const char *mesg)
Definition: error.c:3041
VALUE rb_eSignal
Definition: error.c:1052
VALUE rb_cObject
Object class.
Definition: object.c:49
VALUE rb_obj_alloc(VALUE)
Allocates an instance of klass.
Definition: object.c:1900
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition: object.c:92
VALUE rb_obj_class(VALUE)
Definition: object.c:245
VALUE rb_cModule
Module class.
Definition: object.c:50
VALUE rb_class_inherited_p(VALUE, VALUE)
Determines if mod inherits arg.
Definition: object.c:1578
VALUE rb_to_float(VALUE)
Converts a Numeric object into Float.
Definition: object.c:3559
double rb_num2dbl(VALUE)
Converts a Numeric object to double.
Definition: object.c:3635
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Determines if obj is a kind of c.
Definition: object.c:724
VALUE rb_to_hash_type(VALUE hash)
Definition: hash.c:1853
VALUE rb_hash_lookup2(VALUE hash, VALUE key, VALUE def)
Definition: hash.c:2059
void rb_hash_foreach(VALUE hash, rb_foreach_func *func, VALUE farg)
Definition: hash.c:1498
VALUE rb_hash_delete_entry(VALUE hash, VALUE key)
Definition: hash.c:2309
VALUE rb_hash_aref(VALUE hash, VALUE key)
Definition: hash.c:2046
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Definition: hash.c:2901
VALUE rb_hash_lookup(VALUE hash, VALUE key)
Definition: hash.c:2072
VALUE rb_hash_new(void)
Definition: hash.c:1538
uint64_t rb_hrtime_t
Definition: hrtime.h:47
#define RB_HRTIME_PER_SEC
Definition: hrtime.h:37
@ idNULL
Definition: id.h:113
int rb_id_table_insert(struct rb_id_table *tbl, ID id, VALUE val)
Definition: id_table.c:257
int rb_id_table_lookup(struct rb_id_table *tbl, ID id, VALUE *valp)
Definition: id_table.c:227
struct rb_id_table * rb_id_table_create(size_t capa)
Definition: id_table.c:96
int rb_id_table_delete(struct rb_id_table *tbl, ID id)
Definition: id_table.c:263
void rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, void *data)
Definition: id_table.c:292
rb_id_table_iterator_result
Definition: id_table.h:10
@ ID_TABLE_CONTINUE
Definition: id_table.h:11
#define THROW_DATA_P(err)
Definition: imemo.h:120
#define rb_enc_name(enc)
Definition: encoding.h:168
#define rb_enc_asciicompat(enc)
Definition: encoding.h:236
#define POSFIXABLE
Definition: fixnum.h:29
Thin wrapper to ruby/config.h.
#define HAVE_VA_ARGS_MACRO
Definition: config.h:49
#define ruby_debug
Definition: error.h:69
void rb_exit(int)
Definition: process.c:4416
#define rb_check_arity
Definition: error.h:34
VALUE rb_block_proc(void)
Definition: proc.c:826
void rb_reset_random_seed(void)
Definition: random.c:1664
#define rb_str_new2
Definition: string.h:276
VALUE rb_str_concat(VALUE, VALUE)
Definition: string.c:3217
VALUE rb_str_new_frozen(VALUE)
Definition: string.c:1273
#define rb_str_cat_cstr(buf, str)
Definition: string.h:266
#define rb_str_new_cstr(str)
Definition: string.h:219
#define RUBY_UBF_IO
Definition: thread.h:64
void rb_unblock_function_t(void *)
Definition: thread.h:59
VALUE rb_mutex_unlock(VALUE mutex)
Definition: thread_sync.c:474
#define RUBY_UBF_PROCESS
Definition: thread.h:65
VALUE rb_blocking_function_t(void *)
Definition: thread.h:60
VALUE rb_mutex_lock(VALUE mutex)
Definition: thread_sync.c:402
struct timeval rb_time_timeval(VALUE time)
Definition: time.c:2690
void rb_timespec_now(struct timespec *)
Definition: time.c:1889
VALUE rb_ivar_get(VALUE, ID)
Definition: variable.c:1234
VALUE rb_class_path(VALUE)
Definition: variable.c:169
VALUE rb_ivar_set(VALUE, ID, VALUE)
Definition: variable.c:1493
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
int rb_sourceline(void)
Definition: vm.c:1586
#define ID2SYM
Definition: symbol.h:44
ID rb_check_id(volatile VALUE *)
Returns ID for the given name if it is interned already, or 0.
Definition: symbol.c:1069
VALUE rb_to_symbol(VALUE name)
Definition: string.c:11511
ID rb_to_id(VALUE)
Definition: string.c:11501
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:3150
#define RB_WAITFD_OUT
Definition: io.h:41
int rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
Definition: io.c:1384
#define RB_WAITFD_PRI
Definition: io.h:40
#define RB_WAITFD_IN
Definition: io.h:39
char * strerror(int)
Definition: strerror.c:11
size_t strlcpy(char *, const char *, size_t)
Definition: strlcpy.c:29
#define RB_NOGVL_UBF_ASYNC_SAFE
Definition: thread.h:18
#define RB_NOGVL_INTR_FAIL
Definition: thread.h:17
#define FIX2INT
Definition: int.h:41
#define NUM2INT
Definition: int.h:44
#define INT2NUM
Definition: int.h:43
Internal header for Class.
#define RCLASS_ORIGIN(c)
Definition: class.h:87
Internal header for Fiber.
Internal header for GC.
Internal header for Hash.
Internal header for IO.
Internal header for Object.
Internal header for Proc.
VALUE rb_proc_location(VALUE self)
Definition: proc.c:1387
Internal header for SignalException.
int rb_get_next_signal(void)
Definition: signal.c:773
#define STATIC_ASSERT
Definition: static_assert.h:14
Internal header for Thread.
#define COVERAGE_TARGET_METHODS
Definition: thread.h:22
#define COVERAGE_TARGET_BRANCHES
Definition: thread.h:21
#define COVERAGE_INDEX_BRANCHES
Definition: thread.h:19
#define COVERAGE_TARGET_ONESHOT_LINES
Definition: thread.h:23
#define COVERAGE_TARGET_LINES
Definition: thread.h:20
#define COVERAGE_INDEX_LINES
Definition: thread.h:18
Internal header for Time.
VALUE ruby_vm_special_exception_copy(VALUE)
Definition: vm_insnhelper.c:48
VALUE rb_vm_thread_backtrace(int argc, const VALUE *argv, VALUE thval)
VALUE rb_vm_thread_backtrace_locations(int argc, const VALUE *argv, VALUE thval)
#define bp()
Definition: internal.h:105
#define PRIdVALUE
Definition: inttypes.h:72
#define PRIuSIZE
Definition: inttypes.h:127
#define PRIxVALUE
Definition: inttypes.h:75
voidpf void uLong size
Definition: ioapi.h:138
typedef int(ZCALLBACK *close_file_func) OF((voidpf opaque
const char int mode
Definition: ioapi.h:137
voidpf void * buf
Definition: ioapi.h:138
void rb_iseq_remove_coverage_all(void)
Definition: iseq.c:1170
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1087
VALUE rb_iseq_coverage(const rb_iseq_t *iseq)
Definition: iseq.c:1146
void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset)
Definition: iseq.c:1846
const rb_iseq_t * rb_proc_get_iseq(VALUE proc, int *is_proc)
Definition: proc.c:1242
#define ISEQ_PC2BRANCHINDEX(iseq)
Definition: iseq.h:32
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Definition: iterator.h:31
void rb_throw_obj(VALUE, VALUE)
Definition: vm_eval.c:2290
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1341
void rb_fd_copy(rb_fdset_t *, const fd_set *, int)
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
#define INT2FIX
Definition: long.h:48
#define LONG2FIX
Definition: long.h:49
#define FIX2LONG
Definition: long.h:46
#define MEMCPY(p1, p2, type, n)
Definition: memory.h:129
#define ALLOCA_N(type, n)
Definition: memory.h:112
#define MEMZERO(p, type, n)
Definition: memory.h:128
@ VM_METHOD_TYPE_ISEQ
Ruby method.
Definition: method.h:110
@ VM_METHOD_TYPE_REFINED
refinement
Definition: method.h:121
@ VM_METHOD_TYPE_BMETHOD
Definition: method.h:114
@ VM_METHOD_TYPE_ALIAS
Definition: method.h:116
const int id
Definition: nkf.c:209
const char * name
Definition: nkf.c:208
int count
Definition: nkf.c:5055
#define TRUE
Definition: nkf.h:175
#define FALSE
Definition: nkf.h:174
#define rb_fd_isset
Definition: posix.h:41
#define rb_fd_select
Definition: posix.h:43
#define rb_fd_init
Definition: posix.h:42
#define rb_fd_set
Definition: posix.h:39
#define rb_fd_init_copy(d, s)
Definition: posix.h:79
#define rb_fd_zero
Definition: posix.h:38
#define rb_fd_clr
Definition: posix.h:40
#define rb_fd_term(f)
Definition: posix.h:80
void rb_sigwait_fd_put(const rb_thread_t *, int fd)
int rb_sigwait_fd_get(const rb_thread_t *)
#define RARRAY_CONST_PTR(s)
Definition: psych_emitter.c:4
#define RARRAY_AREF(a, i)
Definition: psych_emitter.c:7
void rb_ractor_blocking_threads_dec(rb_ractor_t *cr, const char *file, int line)
Definition: ractor.c:1870
void rb_ractor_teardown(rb_execution_context_t *ec)
Definition: ractor.c:1652
rb_global_vm_lock_t * rb_ractor_gvl(rb_ractor_t *r)
Definition: ractor.c:1715
void rb_ractor_living_threads_insert(rb_ractor_t *r, rb_thread_t *th)
Definition: ractor.c:1760
void rb_ractor_living_threads_remove(rb_ractor_t *cr, rb_thread_t *th)
Definition: ractor.c:1838
void rb_vm_ractor_blocking_cnt_dec(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
Definition: ractor.c:1798
void rb_ractor_atexit_exception(rb_execution_context_t *ec)
Definition: ractor.c:1675
void rb_ractor_send_parameters(rb_execution_context_t *ec, rb_ractor_t *r, VALUE args)
Definition: ractor.c:1690
void rb_ractor_blocking_threads_inc(rb_ractor_t *cr, const char *file, int line)
Definition: ractor.c:1858
void rb_ractor_atexit(rb_execution_context_t *ec, VALUE result)
Definition: ractor.c:1668
VALUE rb_ractor_thread_list(rb_ractor_t *r)
Definition: ractor.c:1727
void rb_ractor_receive_parameters(rb_execution_context_t *ec, rb_ractor_t *r, int len, VALUE *ptr)
Definition: ractor.c:1682
int rb_ractor_living_thread_num(const rb_ractor_t *r)
Definition: ractor.c:1721
void rb_ractor_atfork(rb_vm_t *vm, rb_thread_t *th)
#define RARRAY_LEN
Definition: rarray.h:52
#define RARRAY_CONST_PTR_TRANSIENT
Definition: rarray.h:54
#define RBASIC(obj)
Definition: rbasic.h:34
#define RBASIC_CLASS
Definition: rbasic.h:35
#define RCLASS_SUPER
Definition: rclass.h:33
#define DATA_PTR(obj)
Definition: rdata.h:56
#define NULL
Definition: regenc.h:69
#define RHASH_EMPTY_P(h)
Definition: rhash.h:51
#define StringValueCStr(v)
Definition: rstring.h:52
#define RUBY_TYPED_DEFAULT_FREE
Definition: rtypeddata.h:44
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: rtypeddata.h:130
#define TypedData_Wrap_Struct(klass, data_type, sval)
Definition: rtypeddata.h:101
@ RUBY_TYPED_FREE_IMMEDIATELY
Definition: rtypeddata.h:62
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: rtypeddata.h:122
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Definition: sprintf.c:1037
int argc
Definition: ruby.c:240
char ** argv
Definition: ruby.c:241
#define ATOMIC_CAS(var, oldval, newval)
Definition: ruby_atomic.h:5
#define EWOULDBLOCK
Definition: rubysocket.h:164
#define RB_PASS_CALLED_KEYWORDS
Definition: scan_args.h:48
Internal header for Scheduler.
VALUE rb_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Definition: scheduler.c:160
VALUE rb_scheduler_current()
Definition: scheduler.c:105
VALUE rb_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Definition: scheduler.c:166
VALUE rb_scheduler_set(VALUE scheduler)
Definition: scheduler.c:73
#define rb_fd_resize(n, f)
Definition: select.h:41
unsigned int uint32_t
Definition: sha2.h:101
unsigned long long uint64_t
Definition: sha2.h:102
int rb_signal_exec(rb_thread_t *th, int sig)
Definition: signal.c:1099
int rb_signal_buff_size(void)
Definition: signal.c:747
#define Qundef
#define Qtrue
#define RTEST
#define Qnil
#define Qfalse
#define NIL_P
#define FIXNUM_P
VALUE rb_str_catf(VALUE, const char *,...)
Definition: sprintf.c:1243
VALUE rb_sprintf(const char *,...)
Definition: sprintf.c:1203
@ ST_CONTINUE
Definition: st.h:99
unsigned long st_data_t
Definition: st.h:22
#define _(args)
Definition: stdarg.h:31
VALUE(* func)(VALUE, VALUE, int)
Definition: thread.c:5268
Definition: gzappend.c:170
struct rb_waiting_list * waiter
Definition: thread.c:1143
rb_thread_t * target
Definition: thread.c:1144
VALUE timeout
Definition: thread.c:1145
enum rb_thread_status prev_status
Definition: thread.c:163
Definition: method.h:62
rb_code_position_t beg_pos
Definition: node.h:136
rb_code_position_t end_pos
Definition: node.h:137
const rb_iseq_t * iseq
Definition: vm_core.h:772
const VALUE * pc
Definition: vm_core.h:770
struct rb_execution_context_struct::@200 machine
struct rb_id_table * local_storage
Definition: vm_core.h:874
rb_atomic_t interrupt_flag
Definition: vm_core.h:864
rb_atomic_t interrupt_mask
Definition: vm_core.h:865
struct rb_thread_struct * thread_ptr
Definition: vm_core.h:871
rb_control_frame_t * cfp
Definition: vm_core.h:858
rb_fiber_t * fiber_ptr
Definition: vm_core.h:870
const VALUE * root_lep
Definition: vm_core.h:879
int maxfd
Definition: largesize.h:70
fd_set * fdset
Definition: largesize.h:71
int capa
Definition: win32.h:45
VALUE * iseq_encoded
Definition: vm_core.h:319
rb_iseq_location_t location
Definition: vm_core.h:393
rb_code_location_t code_location
Definition: vm_core.h:272
struct rb_iseq_constant_body * body
Definition: vm_core.h:448
struct rb_method_entry_struct * original_me
Definition: method.h:151
rb_method_iseq_t iseq
Definition: method.h:179
rb_method_alias_t alias
Definition: method.h:182
rb_method_bmethod_t bmethod
Definition: method.h:184
union rb_method_definition_struct::@123 body
rb_method_refined_t refined
Definition: method.h:183
Definition: method.h:54
struct rb_method_definition_struct *const def
Definition: method.h:57
rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:135
struct rb_method_entry_struct * orig_me
Definition: method.h:155
struct list_head waitq
Definition: thread_sync.c:12
rb_fiber_t * fiber
Definition: thread_sync.c:10
struct rb_mutex_struct * next_mutex
Definition: thread_sync.c:11
rb_thread_t * main
Definition: ractor_core.h:100
unsigned int cnt
Definition: ractor_core.h:95
struct list_head set
Definition: ractor_core.h:94
struct rb_ractor_struct::@141 threads
enum rb_ractor_struct::ractor_status status_
unsigned int to_kill
Definition: vm_core.h:959
rb_execution_context_t * ec
Definition: vm_core.h:941
struct rb_unblock_callback unblock
Definition: vm_core.h:983
VALUE(* func)(void *)
Definition: vm_core.h:996
unsigned int pending_interrupt_queue_checked
Definition: vm_core.h:962
rb_vm_t * vm
Definition: vm_core.h:939
rb_ractor_t * ractor
Definition: vm_core.h:938
int8_t priority
Definition: vm_core.h:963
unsigned int abort_on_exception
Definition: vm_core.h:960
rb_nativethread_id_t thread_id
Definition: vm_core.h:953
enum rb_thread_struct::thread_invoke_type invoke_type
VALUE pending_interrupt_mask_stack
Definition: vm_core.h:979
rb_nativethread_lock_t interrupt_lock
Definition: vm_core.h:982
unsigned int report_on_exception
Definition: vm_core.h:961
void * blocking_region_buffer
Definition: vm_core.h:967
VALUE pending_interrupt_queue
Definition: vm_core.h:978
VALUE locking_mutex
Definition: vm_core.h:984
union rb_thread_struct::@201 invoke_arg
uint32_t running_time_us
Definition: vm_core.h:964
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:985
struct rb_waiting_list * join_list
Definition: vm_core.h:987
rb_unblock_function_t * func
Definition: vm_core.h:827
struct list_head set
Definition: vm_core.h:568
unsigned int thread_ignore_deadlock
Definition: vm_core.h:608
rb_nativethread_lock_t waitpid_lock
Definition: vm_core.h:597
struct rb_vm_struct::@194 ractor
struct list_head waiting_fds
Definition: vm_core.h:600
struct rb_thread_struct * main_thread
Definition: vm_core.h:573
volatile int ubf_async_safe
Definition: vm_core.h:603
struct rb_vm_struct::@196 default_params
struct rb_ractor_struct * main_ractor
Definition: vm_core.h:572
size_t thread_vm_stack_size
Definition: vm_core.h:672
const VALUE special_exceptions[ruby_special_error_count]
Definition: vm_core.h:612
unsigned int cnt
Definition: vm_core.h:569
unsigned int blocking_cnt
Definition: vm_core.h:570
rb_nativethread_lock_t workqueue_lock
Definition: vm_core.h:641
unsigned int thread_abort_on_exception
Definition: vm_core.h:606
rb_serial_t fork_gen
Definition: vm_core.h:596
struct rb_waiting_list * next
Definition: vm_core.h:849
struct rb_fiber_struct * fiber
Definition: vm_core.h:851
struct rb_thread_struct * thread
Definition: vm_core.h:850
rb_fdset_t * except
Definition: thread.c:4510
union select_args::@169 as
rb_fdset_t * write
Definition: thread.c:4509
struct timeval * timeout
Definition: io.c:9411
VALUE write
Definition: io.c:9410
int error
Definition: thread.c:4506
VALUE read
Definition: io.c:9410
struct waiting_fd wfd
Definition: thread.c:4511
rb_fdset_t * read
Definition: thread.c:4508
VALUE except
Definition: io.c:9410
struct timeval * tv
Definition: thread.c:4512
int sigwait_fd
Definition: thread.c:4188
rb_fdset_t orig_rset
Definition: thread.c:4193
rb_fdset_t orig_wset
Definition: thread.c:4194
rb_fdset_t * eset
Definition: thread.c:4192
rb_thread_t * th
Definition: thread.c:4189
rb_fdset_t orig_eset
Definition: thread.c:4195
rb_fdset_t * rset
Definition: thread.c:4190
int max
Definition: thread.c:4187
rb_fdset_t * wset
Definition: thread.c:4191
struct timeval * timeout
Definition: thread.c:4196
Definition: blast.c:41
int enclosed
Definition: thread.c:4841
VALUE group
Definition: thread.c:4842
enum thread_invoke_type type
Definition: thread.c:935
VALUE(* fn)(void *)
Definition: thread.c:945
rb_ractor_t * g
Definition: thread.c:942
int fd
Definition: thread.c:155
struct list_node wfd_node
Definition: thread.c:153
rb_thread_t * th
Definition: thread.c:154
#define vsnprintf
Definition: subst.h:15
int rb_ec_set_raised(rb_execution_context_t *ec)
Definition: thread.c:2587
int rb_thread_check_trap_pending(void)
Definition: thread.c:1587
VALUE rb_get_coverages(void)
Definition: thread.c:5773
int rb_thread_interrupted(VALUE thval)
Definition: thread.c:1594
#define threadptr_initialized(th)
Definition: thread.c:1015
void rb_vm_gvl_destroy(rb_global_vm_lock_t *gvl)
Definition: thread.c:423
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:4598
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:669
#define PRI_THREAD_ID
Definition: thread.c:340
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1935
const rb_method_entry_t * rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
Definition: thread.c:5694
int rb_thread_fd_writable(int fd)
Definition: thread.c:4311
VALUE rb_thread_group(VALUE thread)
Definition: thread.c:3247
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Definition: thread.c:442
#define THREAD_SHIELD_WAITING_MAX
Definition: thread.c:5042
VALUE rb_thread_local_aref(VALUE thread, ID id)
Definition: thread.c:3480
VALUE rb_default_coverage(int n)
Definition: thread.c:5815
VALUE rb_thread_create(VALUE(*fn)(void *), void *arg)
Definition: thread.c:1119
#define THREAD_LOCAL_STORAGE_INITIALISED
Definition: thread.c:122
void rb_clear_coverages(void)
Definition: thread.c:4739
VALUE rb_thread_kill(VALUE thread)
Definition: thread.c:2694
VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
Definition: vm.c:1490
#define GetThreadShieldPtr(obj)
Definition: thread.c:5039
int rb_thread_to_be_killed(VALUE thread)
Definition: thread.c:2720
VALUE rb_thread_main(void)
Definition: thread.c:2932
void rb_thread_sleep_forever(void)
Definition: thread.c:1524
int rb_thread_fd_select(int max, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
Definition: thread.c:4330
#define RUBY_THREAD_PRIORITY_MAX
Definition: thread.c:103
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Definition: thread.c:1670
VALUE rb_thread_create_ractor(rb_ractor_t *g, VALUE args, VALUE proc)
Definition: thread.c:1130
#define THREAD_SHIELD_WAITING_MASK
Definition: thread.c:5040
VALUE rb_io_prep_stderr(void)
Definition: io.c:8223
void rb_thread_fd_close(int fd)
Definition: thread.c:2634
#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted)
Definition: thread.c:191
void rb_sigwait_fd_migrate(rb_vm_t *)
Definition: process.c:1125
VALUE rb_thread_shield_new(void)
Definition: thread.c:5072
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1892
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1987
void rb_thread_wait_for(struct timeval time)
Definition: thread.c:1562
VALUE rb_io_prep_stdout(void)
Definition: io.c:8217
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
Definition: thread.c:1981
VALUE rb_thread_shield_destroy(VALUE self)
Definition: thread.c:5128
SLEEP_FLAGS
Definition: thread.c:117
@ SLEEP_DEADLOCKABLE
Definition: thread.c:118
@ SLEEP_SPURIOUS_CHECK
Definition: thread.c:119
#define TIMESPEC_SEC_MAX
Definition: thread.c:1392
VALUE rb_thread_stop(void)
Definition: thread.c:2844
VALUE rb_io_prep_stdin(void)
Definition: io.c:8211
void rb_thread_wait_fd(int fd)
Definition: thread.c:4305
void * rb_thread_call_without_gvl(void *(*func)(void *data), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1808
VALUE rb_uninterruptible(VALUE(*b_proc)(VALUE), VALUE data)
Definition: thread.c:5874
VALUE rb_exec_recursive_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:5383
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
Definition: thread.c:2565
void rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
Definition: thread.c:5785
#define USE_EVENTFD
Definition: thread.c:385
#define thread_id_str(th)
Definition: thread.c:339
#define do_select_update()
VALUE rb_exec_recursive_paired_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:5395
#define eKillSignal
Definition: thread.c:148
void rb_thread_atfork_before_exec(void)
Definition: thread.c:4835
VALUE rb_thread_shield_wait(VALUE self)
Definition: thread.c:5088
void rb_thread_sleep_interruptible(void)
Definition: thread.c:1538
void rb_thread_check_ints(void)
Definition: thread.c:1577
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1801
void rb_thread_reset_timer_thread(void)
Definition: thread.c:4696
#define OBJ_ID_EQL(obj_id, other)
VALUE rb_thread_run(VALUE thread)
Definition: thread.c:2835
#define thread_debug
Definition: thread.c:333
int rb_notify_fd_close(int fd, struct list_head *busy)
Definition: thread.c:2607
VALUE rb_thread_wakeup(VALUE thread)
Definition: thread.c:2788
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
Definition: thread.c:567
#define eTerminateSignal
Definition: thread.c:149
int rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
Definition: thread.c:2424
rb_hrtime_t rb_hrtime_now(void)
Definition: thread.c:1439
void ruby_sigchld_handler(rb_vm_t *)
Definition: signal.c:1090
void rb_thread_sleep_deadly(void)
Definition: thread.c:1531
void rb_thread_terminate_all(rb_thread_t *th)
Definition: thread.c:581
void rb_thread_stop_timer_thread(void)
Definition: thread.c:4688
VALUE rb_thread_shield_release(VALUE self)
Definition: thread.c:5117
void rb_threadptr_signal_exit(rb_thread_t *th)
Definition: thread.c:2575
void rb_gc_set_stack_end(VALUE **stack_end_p)
Definition: thread.c:4586
void rb_thread_atfork(void)
Definition: thread.c:4830
#define fd_init_copy(f)
VALUE rb_exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:5360
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Definition: thread.c:448
VALUE rb_thread_current(void)
Definition: thread.c:2911
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:508
#define PRIu64
Definition: thread.c:1482
VALUE rb_exec_recursive_paired(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:5371
int rb_thread_alone(void)
Definition: thread.c:3757
void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec)
Definition: vm_trace.c:277
int rb_ec_reset_raised(rb_execution_context_t *ec)
Definition: thread.c:2597
const VALUE * rb_vm_proc_local_ep(VALUE proc)
Definition: thread.c:675
void Init_Thread(void)
Definition: thread.c:5457
void rb_thread_schedule(void)
Definition: thread.c:1623
#define THREAD_SHIELD_WAITING_SHIFT
Definition: thread.c:5041
#define RUBY_THREAD_PRIORITY_MIN
Definition: thread.c:104
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Definition: thread.c:430
int rb_get_coverage_mode(void)
Definition: thread.c:5779
int rb_vm_check_ints_blocking(rb_execution_context_t *ec)
Definition: thread.c:222
#define BUSY_WAIT_SIGNALS
Definition: thread.c:381
handle_interrupt_timing
Definition: thread.c:2001
@ INTERRUPT_NONE
Definition: thread.c:2002
@ INTERRUPT_ON_BLOCKING
Definition: thread.c:2004
@ INTERRUPT_NEVER
Definition: thread.c:2005
@ INTERRUPT_IMMEDIATE
Definition: thread.c:2003
VALUE rb_thread_local_aset(VALUE thread, ID id, VALUE val)
Definition: thread.c:3628
void rb_threadptr_root_fiber_terminate(rb_thread_t *th)
Definition: cont.c:2130
#define THREAD_LOCAL_STORAGE_INITIALISED_P(th)
Definition: thread.c:123
void rb_thread_execute_interrupts(VALUE thval)
Definition: thread.c:2525
int ruby_native_thread_p(void)
Definition: thread.c:5564
#define RUBY_VM_CHECK_INTS_BLOCKING(ec)
Definition: thread.c:205
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
Definition: thread.c:1815
VALUE rb_thread_wakeup_alive(VALUE thread)
Definition: thread.c:2797
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Definition: thread.c:436
void Init_Thread_Mutex(void)
Definition: thread.c:5432
void rb_reset_coverages(void)
Definition: thread.c:5800
void rb_thread_sleep(int sec)
Definition: thread.c:1600
void rb_thread_start_timer_thread(void)
Definition: thread.c:4702
int rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
Definition: thread.c:4551
VALUE rb_thread_list(void)
Definition: thread.c:2878
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
#define ALLOC(size)
Definition: unzip.c:112
unsigned long VALUE
Definition: value.h:38
unsigned long ID
Definition: value.h:39
#define T_STRING
Definition: value_type.h:77
#define T_ICLASS
Definition: value_type.h:65
#define T_HASH
Definition: value_type.h:64
#define T_ARRAY
Definition: value_type.h:55
#define T_OBJECT
Definition: value_type.h:74
#define BUILTIN_TYPE
Definition: value_type.h:84
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:3031
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
Definition: vm.c:93
VALUE rb_proc_isolate_bang(VALUE self)
Definition: vm.c:1055
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:3114
VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
Definition: vm.c:1475
rb_thread_status
Definition: vm_core.h:791
@ THREAD_KILLED
Definition: vm_core.h:795
@ THREAD_STOPPED
Definition: vm_core.h:793
@ THREAD_RUNNABLE
Definition: vm_core.h:792
@ THREAD_STOPPED_FOREVER
Definition: vm_core.h:794
#define RUBY_VM_SET_INTERRUPT(ec)
Definition: vm_core.h:1876
const rb_callable_method_entry_t * rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
#define TAG_NONE
Definition: vm_core.h:198
#define RUBY_VM_INTERRUPTED(ec)
Definition: vm_core.h:1881
ruby_tag_type
Definition: vm_core.h:185
#define rb_vm_register_special_exception(sp, e, m)
Definition: vm_core.h:1720
#define VM_ASSERT(expr)
Definition: vm_core.h:61
@ ruby_error_stream_closed
Definition: vm_core.h:499
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:2001
#define RUBY_EVENT_COVERAGE_LINE
Definition: vm_core.h:2022
char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) *2+3]
Definition: vm_core.h:844
#define TAG_FATAL
Definition: vm_core.h:206
#define RUBY_VM_CHECK_INTS(ec)
Definition: vm_core.h:1921
@ TERMINATE_INTERRUPT_MASK
Definition: vm_core.h:1871
@ POSTPONED_JOB_INTERRUPT_MASK
Definition: vm_core.h:1869
@ VM_BARRIER_INTERRUPT_MASK
Definition: vm_core.h:1872
@ TRAP_INTERRUPT_MASK
Definition: vm_core.h:1870
@ TIMER_INTERRUPT_MASK
Definition: vm_core.h:1867
@ PENDING_INTERRUPT_MASK
Definition: vm_core.h:1868
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr)
Definition: vm_eval.c:2421
void rb_thread_wakeup_timer_thread(int)
#define RUBY_EVENT_COVERAGE_BRANCH
Definition: vm_core.h:2023
#define VM_BLOCK_HANDLER_NONE
Definition: vm_core.h:1299
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:1083
#define RUBY_VM_SET_TRAP_INTERRUPT(ec)
Definition: vm_core.h:1878
void rb_postponed_job_flush(rb_vm_t *vm)
Definition: vm_trace.c:1676
#define RUBY_DEBUG_LOG(fmt,...)
Definition: vm_debug.h:112
#define RB_VM_LOCK()
Definition: vm_sync.h:113
#define RB_VM_UNLOCK()
Definition: vm_sync.h:114
#define RB_VM_LOCK_ENTER()
Definition: vm_sync.h:121
#define RB_VM_LOCK_LEAVE()
Definition: vm_sync.h:122
Internal header to suppres / mandate warnings.
#define COMPILER_WARNING_PUSH
Definition: warnings.h:13
#define COMPILER_WARNING_POP
Definition: warnings.h:14
#define COMPILER_WARNING_IGNORED(flag)
Definition: warnings.h:16
int err
Definition: win32.c:142
SOCKET rb_w32_get_osfhandle(int)
Definition: win32.c:1115
#define FD_CLR(f, s)
Definition: win32.h:605
#define CLOCK_MONOTONIC
Definition: win32.h:134
#define FD_ISSET(f, s)
Definition: win32.h:608
int clock_gettime(clockid_t, struct timespec *)
Definition: win32.c:4668
#define FD_SET(fd, set)
Definition: win32.h:587
if((ID)(DISPID) nameid !=nameid)
Definition: win32ole.c:357
void rb_write_error_str(VALUE mesg)
Definition: io.c:8083
#define xfree
Definition: xmalloc.h:49
#define xrealloc
Definition: xmalloc.h:47
#define xmalloc
Definition: xmalloc.h:44
int write(ozstream &zs, const T *x, Items items)
Definition: zstream.h:264
int read(izstream &zs, T *x, Items items)
Definition: zstream.h:115