61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
79#include "internal/error.h"
101#ifndef USE_NATIVE_THREAD_PRIORITY
102#define USE_NATIVE_THREAD_PRIORITY 0
103#define RUBY_THREAD_PRIORITY_MAX 3
104#define RUBY_THREAD_PRIORITY_MIN -3
108#define THREAD_DEBUG 0
111static VALUE rb_cThreadShield;
113static VALUE sym_immediate;
114static VALUE sym_on_blocking;
115static VALUE sym_never;
122#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
123#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
126rb_thread_local_storage(
VALUE thread)
136static void sleep_forever(
rb_thread_t *th,
unsigned int fl);
137static void rb_thread_sleep_deadly_allow_spurious_wakeup(
VALUE blocker);
140static int rb_threadptr_pending_interrupt_empty_p(
const rb_thread_t *th);
141static const char *thread_status_name(
rb_thread_t *th,
int detail);
143NORETURN(
static void async_bug_fd(
const char *mesg,
int errno_arg,
int fd));
144static int consume_communication_pipe(
int fd);
145static int check_signals_nogvl(
rb_thread_t *,
int sigwait_fd);
148#define eKillSignal INT2FIX(0)
149#define eTerminateSignal INT2FIX(1)
150static volatile int system_working = 1;
160#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
167static void unblock_function_clear(
rb_thread_t *th);
173#define GVL_UNLOCK_BEGIN(th) do { \
174 RB_GC_SAVE_MACHINE_CONTEXT(th); \
175 gvl_release(rb_ractor_gvl(th->ractor));
177#define GVL_UNLOCK_END(th) \
178 gvl_acquire(rb_ractor_gvl(th->ractor), th); \
179 rb_ractor_thread_switch(th->ractor, th); \
183#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
184#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
186#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
189#define only_if_constant(expr, notconst) notconst
191#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
192 struct rb_blocking_region_buffer __region; \
193 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
195 !only_if_constant(fail_if_interrupted, TRUE)) { \
197 blocking_region_end(th, &__region); \
205#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
211 if (
LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
212 if (
LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec)))
return FALSE;
224 return vm_check_ints_blocking(ec);
232#if defined(HAVE_POLL)
233# if defined(__linux__)
236# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
239# define POLLERR_SET (POLLHUP | POLLERR)
248 *rel = rb_timeval2hrtime(timeout);
258#ifdef HAVE_VA_ARGS_MACRO
259void rb_thread_debug(
const char *
file,
int line,
const char *fmt, ...);
260#define thread_debug(...) rb_thread_debug(__FILE__, __LINE__, __VA_ARGS__)
261#define POSITION_FORMAT "%s:%d:"
262#define POSITION_ARGS ,file, line
264void rb_thread_debug(
const char *fmt, ...);
265#define thread_debug rb_thread_debug
266#define POSITION_FORMAT
270# ifdef NON_SCALAR_THREAD_ID
271#define fill_thread_id_string ruby_fill_thread_id_string
280 for (i = 0; i <
sizeof(thid); i++) {
282 size_t j =
sizeof(thid) - i - 1;
286 unsigned char c = (
unsigned char)((
char *)&thid)[j];
293# define fill_thread_id_str(th) fill_thread_id_string((th)->thread_id, (th)->thread_id_string)
294# define thread_id_str(th) ((th)->thread_id_string)
295# define PRI_THREAD_ID "s"
299static int rb_thread_debug_enabled;
312 return INT2NUM(rb_thread_debug_enabled);
326 rb_thread_debug_enabled =
RTEST(val) ?
NUM2INT(val) : 0;
330# define rb_thread_debug_enabled THREAD_DEBUG
333#define thread_debug if(0)printf
336#ifndef fill_thread_id_str
337# define fill_thread_id_string(thid, buf) ((void *)(uintptr_t)(thid))
338# define fill_thread_id_str(th) (void)0
339# define thread_id_str(th) ((void *)(uintptr_t)(th)->thread_id)
340# define PRI_THREAD_ID "p"
347ubf_sigwait(
void *ignore)
356 WaitForSingleObject(&debug_mutex, INFINITE); \
357 printf(POSITION_FORMAT"%#lx - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
359 ReleaseMutex(&debug_mutex);
361#elif defined(HAVE_PTHREAD_H)
365 pthread_mutex_lock(&debug_mutex); \
366 printf(POSITION_FORMAT"%"PRI_THREAD_ID" - %s" POSITION_ARGS, \
367 fill_thread_id_string(pthread_self(), thread_id_string), buf); \
369 pthread_mutex_unlock(&debug_mutex);
372#error "unsupported thread type"
380#ifndef BUSY_WAIT_SIGNALS
381# define BUSY_WAIT_SIGNALS (0)
385# define USE_EVENTFD (0)
389static int debug_mutex_initialized = 1;
390static rb_nativethread_lock_t debug_mutex;
395 const char *
file,
int line,
397 const char *fmt, ...)
401#ifdef NON_SCALAR_THREAD_ID
405 if (!rb_thread_debug_enabled)
return;
407 if (debug_mutex_initialized == 1) {
408 debug_mutex_initialized = 0;
457 if (fail_if_interrupted) {
458 if (RUBY_VM_INTERRUPTED_ANY(th->
ec)) {
488rb_threadptr_interrupt_common(
rb_thread_t *th,
int trap)
510 rb_threadptr_interrupt_common(th, 0);
516 rb_threadptr_interrupt_common(th, 1);
525 if (th != main_thread) {
534 thread_debug(
"terminate_all: main thread (%p)\n", (
void *)th);
555 switch (target_thread->status) {
575 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->
fiber);
576 if (error_message)
rb_bug(
"invalid keeping_mutexes: %s", error_message);
585 volatile int sleeping = 0;
588 rb_bug(
"rb_thread_terminate_all: called by child thread (%p, %p)",
598 thread_debug(
"rb_thread_terminate_all (main thread: %p)\n", (
void *)th);
599 terminate_all(cr, th);
608 native_sleep(th, &rel);
630thread_cleanup_func_before_exec(
void *th_ptr)
642thread_cleanup_func(
void *th_ptr,
int atfork)
647 thread_cleanup_func_before_exec(th_ptr);
662 native_thread_destroy(th);
671 native_thread_init_stack(th);
677 const VALUE *ep = vm_proc_ep(proc);
695 const VALUE *args_ptr;
705 vm_check_ints_blocking(th->
ec);
707 if (th->
invoke_type == thread_invoke_type_ractor_proc) {
713 vm_check_ints_blocking(th->
ec);
723 args_len = RARRAY_LENINT(args);
734 vm_check_ints_blocking(th->
ec);
748 native_set_thread_name(th);
754 case thread_invoke_type_proc:
755 result = thread_do_start_proc(th);
758 case thread_invoke_type_ractor_proc:
759 result = thread_do_start_proc(th);
763 case thread_invoke_type_func:
767 case thread_invoke_type_none:
800 ruby_thread_set_native(th);
803 if (rb_ractor_status_p(th->
ractor, ractor_blocking)) {
826 thread_debug(
"thread start (get lock): %p\n", (
void *)th);
839 if (!
NIL_P(exc)) errinfo = exc;
842 if (th->
invoke_type == thread_invoke_type_ractor_proc) {
853 rb_str_cat_cstr(mesg,
" terminated with exception (report_on_exception is true):\n");
858 if (th->
invoke_type == thread_invoke_type_ractor_proc) {
876 rb_threadptr_join_list_wakeup(th);
879 if (th->
invoke_type == thread_invoke_type_ractor_proc) {
893 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
902 rb_bug(
"thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
903 (
void *)th, th->locking_mutex);
912 rb_check_deadlock(th->
ractor);
916 thread_cleanup_func(th,
FALSE);
919 if (th->
invoke_type == thread_invoke_type_ractor_proc) {
952 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
957 "can't start a new thread (frozen ThreadGroup)");
960 switch (params->
type) {
961 case thread_invoke_type_proc:
968 case thread_invoke_type_ractor_proc:
969#if RACTOR_CHECK_MODE > 0
970 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->
g));
981 case thread_invoke_type_func:
991 th->
priority = current_th->priority;
992 th->
thgroup = current_th->thgroup;
1006 err = native_thread_create(th);
1015#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
1048 th = rb_thread_ptr(thread);
1070 .
type = thread_invoke_type_proc,
1097 else if (th->
invoke_type != thread_invoke_type_none) {
1098 VALUE loc = threadptr_invoke_proc_location(th);
1110 .
type = thread_invoke_type_proc,
1114 return thread_create_core(thread, ¶ms);
1122 .
type = thread_invoke_type_func,
1133 .
type = thread_invoke_type_ractor_proc,
1149remove_from_join_list(
VALUE arg)
1157 while (*join_list) {
1158 if (*join_list == p->
waiter) {
1159 *join_list = (*join_list)->
next;
1163 join_list = &(*join_list)->
next;
1179thread_join_sleep(
VALUE arg)
1193 rel = rb_sec2hrtime(NUM2TIMET(p->
timeout));
1204 while (!thread_finished(target_th)) {
1207 if (scheduler !=
Qnil) {
1209 }
else if (!limit) {
1211 rb_ractor_sleeper_threads_inc(th->
ractor);
1212 rb_check_deadlock(th->
ractor);
1213 native_sleep(th, 0);
1214 rb_ractor_sleeper_threads_dec(th->
ractor);
1217 if (hrtime_update_expire(limit, end)) {
1223 native_sleep(th, limit);
1240 if (th == target_th) {
1255 waiter.fiber =
fiber;
1260 arg.target = target_th;
1287 rb_bug(
"thread_join: THROW_DATA should not reach here.");
1294 return target_th->
self;
1356 return thread_join(rb_thread_ptr(self),
timeout);
1374thread_value(
VALUE self)
1377 thread_join(th,
Qnil);
1392#define TIMESPEC_SEC_MAX TIMET_MAX
1393#define TIMESPEC_SEC_MIN TIMET_MIN
1396#if __has_warning("-Wimplicit-int-float-conversion")
1398#elif defined(_MSC_VER)
1409 const double TIMESPEC_SEC_MAX_PLUS_ONE = 2.0 * (TIMESPEC_SEC_MAX_as_double / 2.0 + 1.0);
1411 if (TIMESPEC_SEC_MAX_PLUS_ONE <= d) {
1426#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1444 return rb_timespec2hrtime(&ts);
1455 th->status = status;
1457 while (th->status == status) {
1459 rb_ractor_sleeper_threads_inc(th->
ractor);
1460 rb_check_deadlock(th->
ractor);
1462 native_sleep(th, 0);
1464 rb_ractor_sleeper_threads_dec(th->
ractor);
1466 woke = vm_check_ints_blocking(th->
ec);
1470 th->status = prev_status;
1478#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1482#define PRIu64 PRI_64_PREFIX "u"
1494 if (now > end)
return 1;
1498 *timeout = end - now;
1513 native_sleep(th, &rel);
1514 woke = vm_check_ints_blocking(th->
ec);
1517 if (hrtime_update_expire(&rel, end))
1520 th->status = prev_status;
1544 native_sleep(th, 0);
1546 th->status = prev_status;
1550rb_thread_sleep_deadly_allow_spurious_wakeup(
VALUE blocker)
1553 if (scheduler !=
Qnil) {
1556 thread_debug(
"rb_thread_sleep_deadly_allow_spurious_wakeup\n");
1606rb_thread_schedule_limits(
uint32_t limits_us)
1616 rb_ractor_thread_switch(th->
ractor, th);
1625 rb_thread_schedule_limits(0);
1636 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1640 thread_debug(
"enter blocking region (%p)\n", (
void *)th);
1654 unblock_function_clear(th);
1656 unregister_ubf_list(th);
1659 rb_ractor_thread_switch(th->
ractor, th);
1661 thread_debug(
"leave blocking region (%p)\n", (
void *)th);
1677 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1679 int saved_errno = 0;
1691 ubf_th = rb_thread_start_unblock_thread();
1697 saved_errno = errno;
1710 errno = saved_errno;
1811 return rb_nogvl(func, data1, ubf, data2, 0);
1819 volatile int saved_errno = 0;
1824 wfd->th = rb_ec_thread_ptr(ec);
1828 list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &wfd->wfd_node);
1836 saved_errno = errno;
1837 }, ubf_select, wfd->th,
FALSE);
1847 list_del(&wfd->wfd_node);
1858 errno = saved_errno;
1905 fprintf(stderr,
"[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1913 rb_bug(
"rb_thread_call_with_gvl: called by a thread which has GVL.");
1916 blocking_region_end(th, brb);
1920 int released = blocking_region_begin(th, brb, prev_unblock.
func, prev_unblock.
arg,
FALSE);
1956thread_s_pass(
VALUE klass)
1994threadptr_check_pending_interrupt_queue(
rb_thread_t *th)
2017 for (i=0; i<mask_stack_len; i++) {
2018 mask = mask_stack[mask_stack_len-(i+1)];
2032 if (
sym == sym_immediate) {
2035 else if (
sym == sym_on_blocking) {
2038 else if (
sym == sym_never) {
2052rb_threadptr_pending_interrupt_empty_p(
const rb_thread_t *th)
2081 switch (mask_timing) {
2100 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2108threadptr_pending_interrupt_active_p(
rb_thread_t *th)
2119 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2131 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2250rb_thread_s_handle_interrupt(
VALUE self,
VALUE mask_arg)
2270 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2282 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2309 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2314 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2322 if (rb_threadptr_pending_interrupt_include_p(target_th,
err)) {
2394 return rb_thread_pending_interrupt_p(
argc,
argv, GET_THREAD()->self);
2409static inline rb_atomic_t
2413 rb_atomic_t interrupt;
2419 }
while (old != interrupt);
2426 rb_atomic_t interrupt;
2427 int postponed_job_interrupt = 0;
2432 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2434 int timer_interrupt;
2435 int pending_interrupt;
2437 int terminate_interrupt;
2450 if (postponed_job_interrupt) {
2459 if (sigwait_fd >= 0) {
2460 (void)consume_communication_pipe(sigwait_fd);
2473 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2484 terminate_interrupt = 1;
2499 if (terminate_interrupt) {
2500 rb_threadptr_to_kill(th);
2503 if (timer_interrupt) {
2504 uint32_t limits_us = TIME_QUANTUM_USEC;
2518 rb_thread_schedule_limits(limits_us);
2541 if (rb_threadptr_dead(target_th)) {
2554 if (rb_threadptr_dead(target_th)) {
2558 rb_ec_setup_exception(GET_EC(), exc,
Qundef);
2609 rb_vm_t *vm = GET_THREAD()->vm;
2615 if (wfd->
fd ==
fd) {
2630 return !list_empty(busy);
2636 struct list_head busy;
2638 list_head_init(&busy);
2671 threadptr_check_pending_interrupt_queue(target_th);
2672 rb_threadptr_raise(target_th,
argc,
argv);
2675 if (current_th == target_th) {
2707 if (th == GET_THREAD()) {
2709 rb_threadptr_to_kill(th);
2712 threadptr_check_pending_interrupt_queue(th);
2802 rb_threadptr_ready(target_th);
2848 "stopping only thread\n\tnote: use sleep to stop forever");
2913 return GET_THREAD()->self;
2926thread_s_current(
VALUE klass)
2934 return GET_RACTOR()->threads.main->self;
2945rb_thread_s_main(
VALUE klass)
2972rb_thread_s_abort_exc(
VALUE _)
2974 return GET_THREAD()->vm->thread_abort_on_exception ?
Qtrue :
Qfalse;
3009rb_thread_s_abort_exc_set(
VALUE self,
VALUE val)
3011 GET_THREAD()->vm->thread_abort_on_exception =
RTEST(val);
3032rb_thread_abort_exc(
VALUE thread)
3034 return rb_thread_ptr(thread)->abort_on_exception ?
Qtrue :
Qfalse;
3052rb_thread_abort_exc_set(
VALUE thread,
VALUE val)
3054 rb_thread_ptr(thread)->abort_on_exception =
RTEST(val);
3102rb_thread_s_report_exc(
VALUE _)
3104 return GET_THREAD()->vm->thread_report_on_exception ?
Qtrue :
Qfalse;
3139rb_thread_s_report_exc_set(
VALUE self,
VALUE val)
3141 GET_THREAD()->vm->thread_report_on_exception =
RTEST(val);
3158rb_thread_s_ignore_deadlock(
VALUE _)
3160 return GET_THREAD()->vm->thread_ignore_deadlock ?
Qtrue :
Qfalse;
3185rb_thread_s_ignore_deadlock_set(
VALUE self,
VALUE val)
3187 GET_THREAD()->vm->thread_ignore_deadlock =
RTEST(val);
3209rb_thread_report_exc(
VALUE thread)
3211 return rb_thread_ptr(thread)->report_on_exception ?
Qtrue :
Qfalse;
3229rb_thread_report_exc_set(
VALUE thread,
VALUE val)
3231 rb_thread_ptr(thread)->report_on_exception =
RTEST(val);
3249 VALUE group = rb_thread_ptr(thread)->thgroup;
3250 return group == 0 ?
Qnil : group;
3256 switch (th->status) {
3258 return th->
to_kill ?
"aborting" :
"run";
3260 if (detail)
return "sleep_forever";
3313 if (rb_threadptr_dead(target_th)) {
3343rb_thread_alive_p(
VALUE thread)
3345 if (thread_finished(rb_thread_ptr(thread))) {
3368rb_thread_stop_p(
VALUE thread)
3372 if (rb_threadptr_dead(th)) {
3392rb_thread_getname(
VALUE thread)
3394 return rb_thread_ptr(thread)->name;
3435rb_thread_to_s(
VALUE thread)
3442 status = thread_status_name(target_th,
TRUE);
3447 if ((loc = threadptr_invoke_proc_location(target_th)) !=
Qnil) {
3458static ID recursive_key;
3463 if (
id == recursive_key) {
3482 return threadptr_local_aref(rb_thread_ptr(thread),
id);
3549 if (!
id)
return Qnil;
3578 if (block_given &&
argc == 2) {
3579 rb_warn(
"block supersedes default value argument");
3584 if (
id == recursive_key) {
3591 else if (block_given) {
3594 else if (
argc == 1) {
3605 if (
id == recursive_key) {
3613 if (!local_storage)
return Qnil;
3618 if (local_storage ==
NULL) {
3634 return threadptr_local_aset(rb_thread_ptr(thread),
id, val);
3692 locals = rb_thread_local_storage(thread);
3714 locals = rb_thread_local_storage(thread);
3736 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3738 if (!
id || local_storage ==
NULL) {
3778rb_thread_keys(
VALUE self)
3780 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3783 if (local_storage) {
3814rb_thread_variables(
VALUE thread)
3823 locals = rb_thread_local_storage(thread);
3853 locals = rb_thread_local_storage(thread);
3881rb_thread_priority(
VALUE thread)
3883 return INT2NUM(rb_thread_ptr(thread)->priority);
3914rb_thread_priority_set(
VALUE thread,
VALUE prio)
3919#if USE_NATIVE_THREAD_PRIORITY
3921 native_thread_apply_priority(th);
3930 target_th->
priority = (int8_t)priority;
3937#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3975 FD_ZERO(fds->
fdset);
3981 size_t size = howmany(rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
3983 if (
size <
sizeof(fd_set))
3984 size =
sizeof(fd_set);
4008 size_t m = howmany(n + 1, NFDBITS) *
sizeof(fd_mask);
4009 size_t o = howmany(fds->
maxfd, NFDBITS) *
sizeof(fd_mask);
4011 if (m <
sizeof(fd_set)) m =
sizeof(fd_set);
4012 if (o <
sizeof(fd_set)) o =
sizeof(fd_set);
4016 memset((
char *)fds->
fdset + o, 0, m - o);
4031 if (n >= fds->
maxfd)
return;
4038 if (n >= fds->
maxfd)
return 0;
4045 size_t size = howmany(
max, NFDBITS) *
sizeof(fd_mask);
4047 if (
size <
sizeof(fd_set))
size =
sizeof(fd_set);
4056 size_t size = howmany(rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
4058 if (
size <
sizeof(fd_set))
4059 size =
sizeof(fd_set);
4071 r = rb_fd_ptr(readfds);
4075 w = rb_fd_ptr(writefds);
4079 e = rb_fd_ptr(exceptfds);
4081 return select(n, r, w, e, timeout);
4084#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4091#define FD_ZERO(f) rb_fd_zero(f)
4092#define FD_SET(i, f) rb_fd_set((i), (f))
4093#define FD_CLR(i, f) rb_fd_clr((i), (f))
4094#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4096#elif defined(_WIN32)
4101 set->
capa = FD_SETSIZE;
4103 FD_ZERO(set->
fdset);
4127 for (i = 0; i < set->
fdset->fd_count; i++) {
4128 if (set->
fdset->fd_array[i] == s) {
4132 if (set->
fdset->fd_count >= (
unsigned)set->
capa) {
4133 set->
capa = (set->
fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4136 set->
fdset, set->
capa,
sizeof(SOCKET),
sizeof(
unsigned int));
4138 set->
fdset->fd_array[set->
fdset->fd_count++] = s;
4146#define FD_ZERO(f) rb_fd_zero(f)
4147#define FD_SET(i, f) rb_fd_set((i), (f))
4148#define FD_CLR(i, f) rb_fd_clr((i), (f))
4149#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4151#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4155#ifndef rb_fd_no_init
4156#define rb_fd_no_init(fds) (void)(fds)
4169 if (rel && hrtime_update_expire(rel, end)) {
4176 else if (*result == 0) {
4179 return !hrtime_update_expire(rel, end);
4200select_set_free(
VALUE p)
4220 static const rb_hrtime_t quantum = TIME_QUANTUM_USEC * 1000;
4224 if (!orig || *orig > quantum)
4239 timeout_prepare(&to, &rel, &end, set->
timeout);
4240#define restore_fdset(dst, src) \
4241 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4242#define do_select_update() \
4243 (restore_fdset(set->rset, &set->orig_rset), \
4244 restore_fdset(set->wset, &set->orig_wset), \
4245 restore_fdset(set->eset, &set->orig_eset), \
4253 const rb_hrtime_t *sto;
4256 sto = sigwait_timeout(set->th, set->sigwait_fd, to, &drained);
4257 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4258 result = native_fd_select(set->max, set->rset, set->wset,
4260 rb_hrtime2timeval(&tv, sto), set->th);
4261 if (result < 0) lerrno = errno;
4270 (void)check_signals_nogvl(set->
th, -1);
4281 return (
VALUE)result;
4285rb_thread_wait_fd_rw(
int fd,
int read)
4290 thread_debug(
"rb_thread_wait_fd_rw(%d, %s)\n", fd,
read ?
"read" :
"write");
4301 thread_debug(
"rb_thread_wait_fd_rw(%d, %s): done\n", fd,
read ?
"read" :
"write");
4307 rb_thread_wait_fd_rw(fd, 1);
4313 rb_thread_wait_fd_rw(fd, 0);
4335 set.
th = GET_THREAD();
4362#define fd_init_copy(f) do { \
4364 rb_fd_resize(set.max - 1, set.f); \
4365 if (&set.orig_##f != set.f) { \
4366 rb_fd_init_copy(&set.orig_##f, set.f); \
4370 rb_fd_no_init(&set.orig_##f); \
4384#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4385#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4386#define POLLEX_SET (POLLPRI)
4389# define POLLERR_SET (0)
4398 struct pollfd fds[2];
4399 int result = 0, lerrno;
4407 wfd.th = GET_THREAD();
4412 list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
4419 timeout_prepare(&to, &rel, &end, timeout);
4421 fds[0].events = (short)events;
4426 if (fds[1].
fd >= 0) {
4427 fds[1].events = POLLIN;
4439 const rb_hrtime_t *sto;
4442 sto = sigwait_timeout(wfd.th, fds[1].fd, to, &drained);
4443 if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4444 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, sto), 0);
4445 if (result < 0) lerrno = errno;
4447 }, ubf, wfd.th,
TRUE);
4449 if (fds[1].
fd >= 0) {
4450 if (result > 0 && fds[1].revents) {
4452 (void)check_signals_nogvl(wfd.th, fds[1].fd);
4454 (void)check_signals_nogvl(wfd.th, -1);
4460 }
while (wait_retryable(&result, lerrno, to, end));
4466 list_del(&wfd.wfd_node);
4479 if (fds[0].revents & POLLNVAL) {
4489 if (fds[0].revents & POLLIN_SET)
4491 if (fds[0].revents & POLLOUT_SET)
4493 if (fds[0].revents & POLLEX_SET)
4497 if (fds[0].revents & POLLERR_SET)
4564 args.
wfd.
th = GET_THREAD();
4584#ifdef USE_CONSERVATIVE_STACK_END
4589 *stack_end_p = &stack_end;
4603 threadptr_trap_interrupt(mth);
4608async_bug_fd(
const char *mesg,
int errno_arg,
int fd)
4611 size_t n =
strlcpy(buff, mesg,
sizeof(buff));
4612 if (n <
sizeof(buff)-3) {
4620consume_communication_pipe(
int fd)
4626 static char buff[1024];
4640 result =
read(
fd, buff,
sizeof(buff));
4643 if (
USE_EVENTFD || result < (ssize_t)
sizeof(buff)) {
4647 else if (result == 0) {
4650 else if (result < 0) {
4656#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4661 async_bug_fd(
"consume_communication_pipe: read", e,
fd);
4668check_signals_nogvl(
rb_thread_t *th,
int sigwait_fd)
4671 int ret = sigwait_fd >= 0 ? consume_communication_pipe(sigwait_fd) :
FALSE;
4672 ubf_wakeup_all_threads();
4690 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4691 native_reset_timer_thread();
4698 native_reset_timer_thread();
4705 rb_thread_create_timer_thread();
4724 RARRAY_ASET(lines, i,
INT2FIX(0));
4731 RARRAY_ASET(counters, i,
INT2FIX(0));
4742 if (
RTEST(coverages)) {
4747#if defined(HAVE_WORKING_FORK)
4764 list_for_each(&vm->
ractor.
set, r, vmlr_node) {
4769 rb_vm_living_threads_init(vm);
4781 rb_ractor_sleeper_threads_clear(th->
ractor);
4791 if (th != current_th) {
4792 rb_mutex_abandon_keeping_mutexes(th);
4793 rb_mutex_abandon_locking_mutex(th);
4794 thread_cleanup_func(th,
TRUE);
4803 rb_thread_atfork_internal(th, terminate_atfork_i);
4805 rb_fiber_atfork(th);
4811 mjit_child_after_fork();
4817 if (th != current_th) {
4818 thread_cleanup_func_before_exec(th);
4826 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4846thgroup_memsize(
const void *
ptr)
4848 return sizeof(
struct thgroup);
4877thgroup_s_alloc(
VALUE klass)
4905 list_for_each(&r->
threads.
set, th, lt_node) {
5011 "can't move from the enclosed thread group");
5022thread_shield_mark(
void *
ptr)
5029 {thread_shield_mark, 0, 0,},
5034thread_shield_alloc(
VALUE klass)
5039#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5040#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5041#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5042#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5044static inline unsigned int
5045rb_thread_shield_waiting(
VALUE b)
5051rb_thread_shield_waiting_inc(
VALUE b)
5053 unsigned int w = rb_thread_shield_waiting(b);
5057 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5062rb_thread_shield_waiting_dec(
VALUE b)
5064 unsigned int w = rb_thread_shield_waiting(b);
5067 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5074 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5076 return thread_shield;
5093 if (!mutex)
return Qfalse;
5094 m = mutex_ptr(mutex);
5095 if (m->
fiber == GET_EC()->fiber_ptr)
return Qnil;
5096 rb_thread_shield_waiting_inc(self);
5098 rb_thread_shield_waiting_dec(self);
5101 return rb_thread_shield_waiting(self) > 0 ?
Qnil :
Qfalse;
5105thread_shield_get_mutex(
VALUE self)
5119 VALUE mutex = thread_shield_get_mutex(self);
5121 return rb_thread_shield_waiting(self) > 0 ?
Qtrue :
Qfalse;
5130 VALUE mutex = thread_shield_get_mutex(self);
5133 return rb_thread_shield_waiting(self) > 0 ?
Qtrue :
Qfalse;
5160 VALUE hash = threadptr_recursive_hash(th);
5164 threadptr_recursive_hash_set(th, hash);
5186#if SIZEOF_LONG == SIZEOF_VOIDP
5187 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5188#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5189 #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
5190 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5196 if (paired_obj_id) {
5197 if (!RB_TYPE_P(pair_list,
T_HASH)) {
5230 if (!RB_TYPE_P(pair_list,
T_HASH)){
5231 VALUE other_paired_obj = pair_list;
5253 if (pair_list ==
Qundef) {
5256 if (RB_TYPE_P(pair_list,
T_HASH)) {
5301 p.
list = recursive_list_access(
sym);
5305 outermost = outer && !recursive_check(p.list,
ID2SYM(recursive_key), 0);
5307 if (recursive_check(p.list, p.obj,
pairid)) {
5308 if (outer && !outermost) {
5319 recursive_push(p.list,
ID2SYM(recursive_key), 0);
5320 recursive_push(p.list, p.obj, p.pairid);
5322 if (!recursive_pop(p.list, p.obj, p.pairid))
goto invalid;
5323 if (!recursive_pop(p.list,
ID2SYM(recursive_key), 0))
goto invalid;
5325 if (result == p.list) {
5331 recursive_push(p.list, p.obj, p.pairid);
5337 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5462 sym_never =
ID2SYM(rb_intern_const(
"never"));
5463 sym_immediate =
ID2SYM(rb_intern_const(
"immediate"));
5464 sym_on_blocking =
ID2SYM(rb_intern_const(
"on_blocking"));
5527 "stream closed in another thread");
5541 recursive_key = rb_intern_const(
"__recursive_key__");
5550 gvl_acquire(gvl, th);
5558 rb_thread_create_timer_thread();
5577 rb_str_catf(msg,
"\n%d threads, %d sleeps current:%p main thread:%p\n",
5581 list_for_each(&r->
threads.
set, th, lt_node) {
5586 if (th->locking_mutex) {
5587 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5589 (
void *)mutex->
fiber, rb_mutex_num_waiting(mutex));
5612 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5615 if (ltnum > sleeper_num)
return;
5616 if (ltnum < sleeper_num)
rb_bug(
"sleeper must not be more than vm_living_thread_num(vm)");
5617 if (patrol_thread && patrol_thread != GET_THREAD())
return;
5619 list_for_each(&r->
threads.
set, th, lt_node) {
5637 debug_deadlock_check(r,
argv[1]);
5638 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5696 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5701 switch (me->
def->type) {
5716 rb_iseq_check(iseq);
5732 if (!me)
return NULL;
5739 if (RB_TYPE_P(path,
T_ARRAY)) {
5743 if (resolved_location) {
5744 resolved_location[0] = path;
5745 resolved_location[1] = beg_pos_lineno;
5746 resolved_location[2] = beg_pos_column;
5747 resolved_location[3] = end_pos_lineno;
5748 resolved_location[4] = end_pos_column;
5775 return GET_VM()->coverages;
5781 return GET_VM()->coverage_mode;
5787 GET_VM()->coverages = coverages;
5788 GET_VM()->coverage_mode =
mode;
5804 GET_VM()->coverages =
Qfalse;
5819 int mode = GET_VM()->coverage_mode;
5851 RARRAY_ASET(branches, 0, structure);
5861uninterruptible_exit(
VALUE v)
5867 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
#define COROUTINE_STACK_LOCAL(type, name)
#define COROUTINE_STACK_FREE(name)
VALUE rb_ary_shift(VALUE ary)
VALUE rb_ary_tmp_new_fill(long capa)
VALUE rb_ary_dup(VALUE ary)
VALUE rb_ary_push(VALUE ary, VALUE item)
VALUE rb_ary_delete_at(VALUE ary, long pos)
VALUE rb_ary_pop(VALUE ary)
VALUE rb_ary_tmp_new(long capa)
VALUE rb_ary_clear(VALUE ary)
VALUE rb_ary_entry(VALUE ary, long offset)
VALUE rb_ary_join(VALUE ary, VALUE sep)
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
#define UNREACHABLE_RETURN
const char ruby_digitmap[]
VALUE rb_fiberptr_self(struct rb_fiber_struct *fiber)
void rb_fiber_close(rb_fiber_t *fiber)
unsigned int rb_fiberptr_blocking(struct rb_fiber_struct *fiber)
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
void rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flag)
int rb_remove_event_hook(rb_event_hook_func_t func)
@ RUBY_EVENT_HOOK_FLAG_SAFE
@ RUBY_EVENT_HOOK_FLAG_RAW_ARG
#define MJIT_FUNC_EXPORTED
rb_encoding * rb_enc_get(VALUE obj)
char str[HTML_ESCAPE_MAX_LEN+1]
void rb_obj_call_init_kw(VALUE obj, int argc, const VALUE *argv, int kw_splat)
int rb_keyword_given_p(void)
void rb_ec_error_print(rb_execution_context_t *volatile ec, volatile VALUE errinfo)
VALUE rb_ec_backtrace_str_ary(const rb_execution_context_t *ec, long lev, long n)
#define EC_JUMP_TAG(ec, st)
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
#define SAVE_ROOT_JMPBUF(th, stmt)
#define RUBY_INTERNAL_EVENT_SWITCH
#define RUBY_EVENT_THREAD_BEGIN
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
#define RUBY_EVENT_THREAD_END
void * rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z)
VALUE rb_memory_id(VALUE obj)
void rb_gc_force_recycle(VALUE obj)
void rb_gc_mark(VALUE ptr)
#define STACK_DIR_UPPER(a, b)
#define RB_GC_SAVE_MACHINE_CONTEXT(th)
#define STACK_GROW_DIR_DETECTION
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
int rb_block_given_p(void)
Determines if the current method is given a block.
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
void rb_raise(VALUE exc, const char *fmt,...)
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
void rb_bug(const char *fmt,...)
VALUE rb_ident_hash_new(void)
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
VALUE rb_make_exception(int argc, const VALUE *argv)
Make an Exception object from the list of arguments in a manner similar to Kernel#raise.
void rb_warn(const char *fmt,...)
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
void rb_async_bug_errno(const char *mesg, int errno_arg)
void rb_sys_fail(const char *mesg)
VALUE rb_cObject
Object class.
VALUE rb_obj_alloc(VALUE)
Allocates an instance of klass.
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
VALUE rb_obj_class(VALUE)
VALUE rb_cModule
Module class.
VALUE rb_class_inherited_p(VALUE, VALUE)
Determines if mod inherits arg.
VALUE rb_to_float(VALUE)
Converts a Numeric object into Float.
double rb_num2dbl(VALUE)
Converts a Numeric object to double.
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Determines if obj is a kind of c.
VALUE rb_to_hash_type(VALUE hash)
VALUE rb_hash_lookup2(VALUE hash, VALUE key, VALUE def)
void rb_hash_foreach(VALUE hash, rb_foreach_func *func, VALUE farg)
VALUE rb_hash_delete_entry(VALUE hash, VALUE key)
VALUE rb_hash_aref(VALUE hash, VALUE key)
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
VALUE rb_hash_lookup(VALUE hash, VALUE key)
#define RB_HRTIME_PER_SEC
int rb_id_table_insert(struct rb_id_table *tbl, ID id, VALUE val)
int rb_id_table_lookup(struct rb_id_table *tbl, ID id, VALUE *valp)
struct rb_id_table * rb_id_table_create(size_t capa)
int rb_id_table_delete(struct rb_id_table *tbl, ID id)
void rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, void *data)
rb_id_table_iterator_result
#define THROW_DATA_P(err)
#define rb_enc_asciicompat(enc)
Thin wrapper to ruby/config.h.
#define HAVE_VA_ARGS_MACRO
VALUE rb_block_proc(void)
void rb_reset_random_seed(void)
VALUE rb_str_concat(VALUE, VALUE)
VALUE rb_str_new_frozen(VALUE)
#define rb_str_cat_cstr(buf, str)
#define rb_str_new_cstr(str)
void rb_unblock_function_t(void *)
VALUE rb_mutex_unlock(VALUE mutex)
VALUE rb_blocking_function_t(void *)
VALUE rb_mutex_lock(VALUE mutex)
struct timeval rb_time_timeval(VALUE time)
void rb_timespec_now(struct timespec *)
VALUE rb_ivar_get(VALUE, ID)
VALUE rb_class_path(VALUE)
VALUE rb_ivar_set(VALUE, ID, VALUE)
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
ID rb_check_id(volatile VALUE *)
Returns ID for the given name if it is interned already, or 0.
VALUE rb_to_symbol(VALUE name)
void rb_define_const(VALUE, const char *, VALUE)
int rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
size_t strlcpy(char *, const char *, size_t)
#define RB_NOGVL_UBF_ASYNC_SAFE
#define RB_NOGVL_INTR_FAIL
Internal header for Class.
Internal header for Fiber.
Internal header for Hash.
Internal header for Object.
Internal header for Proc.
VALUE rb_proc_location(VALUE self)
Internal header for SignalException.
int rb_get_next_signal(void)
Internal header for Thread.
#define COVERAGE_TARGET_METHODS
#define COVERAGE_TARGET_BRANCHES
#define COVERAGE_INDEX_BRANCHES
#define COVERAGE_TARGET_ONESHOT_LINES
#define COVERAGE_TARGET_LINES
#define COVERAGE_INDEX_LINES
Internal header for Time.
VALUE ruby_vm_special_exception_copy(VALUE)
VALUE rb_vm_thread_backtrace(int argc, const VALUE *argv, VALUE thval)
VALUE rb_vm_thread_backtrace_locations(int argc, const VALUE *argv, VALUE thval)
typedef int(ZCALLBACK *close_file_func) OF((voidpf opaque
void rb_iseq_remove_coverage_all(void)
VALUE rb_iseq_path(const rb_iseq_t *iseq)
VALUE rb_iseq_coverage(const rb_iseq_t *iseq)
void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset)
const rb_iseq_t * rb_proc_get_iseq(VALUE proc, int *is_proc)
#define ISEQ_PC2BRANCHINDEX(iseq)
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
void rb_throw_obj(VALUE, VALUE)
void rb_fd_copy(rb_fdset_t *, const fd_set *, int)
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
#define MEMCPY(p1, p2, type, n)
#define ALLOCA_N(type, n)
#define MEMZERO(p, type, n)
@ VM_METHOD_TYPE_ISEQ
Ruby method.
@ VM_METHOD_TYPE_REFINED
refinement
#define rb_fd_init_copy(d, s)
void rb_sigwait_fd_put(const rb_thread_t *, int fd)
int rb_sigwait_fd_get(const rb_thread_t *)
#define RARRAY_CONST_PTR(s)
#define RARRAY_AREF(a, i)
void rb_ractor_blocking_threads_dec(rb_ractor_t *cr, const char *file, int line)
void rb_ractor_teardown(rb_execution_context_t *ec)
rb_global_vm_lock_t * rb_ractor_gvl(rb_ractor_t *r)
void rb_ractor_living_threads_insert(rb_ractor_t *r, rb_thread_t *th)
void rb_ractor_living_threads_remove(rb_ractor_t *cr, rb_thread_t *th)
void rb_vm_ractor_blocking_cnt_dec(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
void rb_ractor_atexit_exception(rb_execution_context_t *ec)
void rb_ractor_send_parameters(rb_execution_context_t *ec, rb_ractor_t *r, VALUE args)
void rb_ractor_blocking_threads_inc(rb_ractor_t *cr, const char *file, int line)
void rb_ractor_atexit(rb_execution_context_t *ec, VALUE result)
VALUE rb_ractor_thread_list(rb_ractor_t *r)
void rb_ractor_receive_parameters(rb_execution_context_t *ec, rb_ractor_t *r, int len, VALUE *ptr)
int rb_ractor_living_thread_num(const rb_ractor_t *r)
void rb_ractor_atfork(rb_vm_t *vm, rb_thread_t *th)
#define RARRAY_CONST_PTR_TRANSIENT
#define StringValueCStr(v)
#define RUBY_TYPED_DEFAULT_FREE
#define TypedData_Get_Struct(obj, type, data_type, sval)
#define TypedData_Wrap_Struct(klass, data_type, sval)
@ RUBY_TYPED_FREE_IMMEDIATELY
#define TypedData_Make_Struct(klass, type, data_type, sval)
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
#define ATOMIC_CAS(var, oldval, newval)
#define RB_PASS_CALLED_KEYWORDS
Internal header for Scheduler.
VALUE rb_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
VALUE rb_scheduler_current()
VALUE rb_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
VALUE rb_scheduler_set(VALUE scheduler)
#define rb_fd_resize(n, f)
unsigned long long uint64_t
int rb_signal_exec(rb_thread_t *th, int sig)
int rb_signal_buff_size(void)
VALUE rb_str_catf(VALUE, const char *,...)
VALUE rb_sprintf(const char *,...)
VALUE(* func)(VALUE, VALUE, int)
struct rb_waiting_list * waiter
enum rb_thread_status prev_status
rb_code_position_t beg_pos
rb_code_position_t end_pos
struct rb_execution_context_struct::@200 machine
struct rb_id_table * local_storage
rb_atomic_t interrupt_flag
rb_atomic_t interrupt_mask
struct rb_thread_struct * thread_ptr
VALUE local_storage_recursive_hash
rb_iseq_location_t location
rb_code_location_t code_location
struct rb_iseq_constant_body * body
struct rb_method_entry_struct * original_me
rb_method_bmethod_t bmethod
union rb_method_definition_struct::@123 body
rb_method_refined_t refined
struct rb_method_definition_struct *const def
rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
struct rb_method_entry_struct * orig_me
struct rb_mutex_struct * next_mutex
struct rb_ractor_struct::@141 threads
enum rb_ractor_struct::ractor_status status_
rb_execution_context_t * ec
struct rb_unblock_callback unblock
unsigned int pending_interrupt_queue_checked
unsigned int abort_on_exception
rb_nativethread_id_t thread_id
enum rb_thread_struct::thread_invoke_type invoke_type
VALUE pending_interrupt_mask_stack
rb_nativethread_lock_t interrupt_lock
unsigned int report_on_exception
void * blocking_region_buffer
VALUE pending_interrupt_queue
union rb_thread_struct::@201 invoke_arg
struct rb_mutex_struct * keeping_mutexes
struct rb_waiting_list * join_list
rb_unblock_function_t * func
unsigned int thread_ignore_deadlock
rb_nativethread_lock_t waitpid_lock
struct rb_vm_struct::@194 ractor
struct list_head waiting_fds
struct rb_thread_struct * main_thread
volatile int ubf_async_safe
struct rb_vm_struct::@196 default_params
struct rb_ractor_struct * main_ractor
size_t thread_vm_stack_size
const VALUE special_exceptions[ruby_special_error_count]
unsigned int blocking_cnt
rb_nativethread_lock_t workqueue_lock
unsigned int thread_abort_on_exception
struct rb_waiting_list * next
struct rb_fiber_struct * fiber
struct rb_thread_struct * thread
union select_args::@169 as
enum thread_invoke_type type
struct list_node wfd_node
int rb_ec_set_raised(rb_execution_context_t *ec)
int rb_thread_check_trap_pending(void)
VALUE rb_get_coverages(void)
int rb_thread_interrupted(VALUE thval)
#define threadptr_initialized(th)
void rb_vm_gvl_destroy(rb_global_vm_lock_t *gvl)
void rb_threadptr_check_signal(rb_thread_t *mth)
void ruby_thread_init_stack(rb_thread_t *th)
int ruby_thread_has_gvl_p(void)
const rb_method_entry_t * rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
int rb_thread_fd_writable(int fd)
VALUE rb_thread_group(VALUE thread)
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
#define THREAD_SHIELD_WAITING_MAX
VALUE rb_thread_local_aref(VALUE thread, ID id)
VALUE rb_default_coverage(int n)
VALUE rb_thread_create(VALUE(*fn)(void *), void *arg)
#define THREAD_LOCAL_STORAGE_INITIALISED
void rb_clear_coverages(void)
VALUE rb_thread_kill(VALUE thread)
VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
#define GetThreadShieldPtr(obj)
int rb_thread_to_be_killed(VALUE thread)
VALUE rb_thread_main(void)
void rb_thread_sleep_forever(void)
int rb_thread_fd_select(int max, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
#define RUBY_THREAD_PRIORITY_MAX
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
VALUE rb_thread_create_ractor(rb_ractor_t *g, VALUE args, VALUE proc)
#define THREAD_SHIELD_WAITING_MASK
VALUE rb_io_prep_stderr(void)
void rb_thread_fd_close(int fd)
#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted)
void rb_sigwait_fd_migrate(rb_vm_t *)
VALUE rb_thread_shield_new(void)
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
void rb_thread_wait_for(struct timeval time)
VALUE rb_io_prep_stdout(void)
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
VALUE rb_thread_shield_destroy(VALUE self)
VALUE rb_thread_stop(void)
VALUE rb_io_prep_stdin(void)
void rb_thread_wait_fd(int fd)
void * rb_thread_call_without_gvl(void *(*func)(void *data), void *data1, rb_unblock_function_t *ubf, void *data2)
VALUE rb_uninterruptible(VALUE(*b_proc)(VALUE), VALUE data)
VALUE rb_exec_recursive_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
void rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
#define thread_id_str(th)
#define do_select_update()
VALUE rb_exec_recursive_paired_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
void rb_thread_atfork_before_exec(void)
VALUE rb_thread_shield_wait(VALUE self)
void rb_thread_sleep_interruptible(void)
void rb_thread_check_ints(void)
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
void rb_thread_reset_timer_thread(void)
#define OBJ_ID_EQL(obj_id, other)
VALUE rb_thread_run(VALUE thread)
int rb_notify_fd_close(int fd, struct list_head *busy)
VALUE rb_thread_wakeup(VALUE thread)
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
int rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
rb_hrtime_t rb_hrtime_now(void)
void ruby_sigchld_handler(rb_vm_t *)
void rb_thread_sleep_deadly(void)
void rb_thread_terminate_all(rb_thread_t *th)
void rb_thread_stop_timer_thread(void)
VALUE rb_thread_shield_release(VALUE self)
void rb_threadptr_signal_exit(rb_thread_t *th)
void rb_gc_set_stack_end(VALUE **stack_end_p)
void rb_thread_atfork(void)
VALUE rb_exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
VALUE rb_thread_current(void)
void rb_threadptr_interrupt(rb_thread_t *th)
VALUE rb_exec_recursive_paired(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
int rb_thread_alone(void)
void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec)
int rb_ec_reset_raised(rb_execution_context_t *ec)
const VALUE * rb_vm_proc_local_ep(VALUE proc)
void rb_thread_schedule(void)
#define THREAD_SHIELD_WAITING_SHIFT
#define RUBY_THREAD_PRIORITY_MIN
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
int rb_get_coverage_mode(void)
int rb_vm_check_ints_blocking(rb_execution_context_t *ec)
#define BUSY_WAIT_SIGNALS
VALUE rb_thread_local_aset(VALUE thread, ID id, VALUE val)
void rb_threadptr_root_fiber_terminate(rb_thread_t *th)
#define THREAD_LOCAL_STORAGE_INITIALISED_P(th)
void rb_thread_execute_interrupts(VALUE thval)
int ruby_native_thread_p(void)
#define RUBY_VM_CHECK_INTS_BLOCKING(ec)
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
VALUE rb_thread_wakeup_alive(VALUE thread)
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
void Init_Thread_Mutex(void)
void rb_reset_coverages(void)
void rb_thread_sleep(int sec)
void rb_thread_start_timer_thread(void)
int rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
VALUE rb_thread_list(void)
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
VALUE rb_proc_isolate_bang(VALUE self)
VALUE rb_thread_alloc(VALUE klass)
VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
#define RUBY_VM_SET_INTERRUPT(ec)
const rb_callable_method_entry_t * rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
#define RUBY_VM_INTERRUPTED(ec)
#define rb_vm_register_special_exception(sp, e, m)
@ ruby_error_stream_closed
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
#define RUBY_EVENT_COVERAGE_LINE
char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) *2+3]
#define RUBY_VM_CHECK_INTS(ec)
@ TERMINATE_INTERRUPT_MASK
@ POSTPONED_JOB_INTERRUPT_MASK
@ VM_BARRIER_INTERRUPT_MASK
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr)
void rb_thread_wakeup_timer_thread(int)
#define RUBY_EVENT_COVERAGE_BRANCH
#define VM_BLOCK_HANDLER_NONE
#define GetProcPtr(obj, ptr)
#define RUBY_VM_SET_TRAP_INTERRUPT(ec)
void rb_postponed_job_flush(rb_vm_t *vm)
#define RUBY_DEBUG_LOG(fmt,...)
#define RB_VM_LOCK_ENTER()
#define RB_VM_LOCK_LEAVE()
Internal header to suppres / mandate warnings.
#define COMPILER_WARNING_PUSH
#define COMPILER_WARNING_POP
#define COMPILER_WARNING_IGNORED(flag)
SOCKET rb_w32_get_osfhandle(int)
int clock_gettime(clockid_t, struct timespec *)
if((ID)(DISPID) nameid !=nameid)
void rb_write_error_str(VALUE mesg)
int write(ozstream &zs, const T *x, Items items)
int read(izstream &zs, T *x, Items items)