33static const int DEBUG = 0;
35#define RB_PAGE_SIZE (pagesize)
36#define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
40static VALUE rb_cContinuation;
41static VALUE rb_cFiber;
42static VALUE rb_eFiberError;
43#ifdef RB_EXPERIMENTAL_FIBER_POOL
44static VALUE rb_cFiberPool;
47#define CAPTURE_JUST_VALID_VM_STACK 1
50#ifdef COROUTINE_LIMITED_ADDRESS_SPACE
51#define FIBER_POOL_ALLOCATION_FREE
52#define FIBER_POOL_INITIAL_SIZE 8
53#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 32
55#define FIBER_POOL_INITIAL_SIZE 32
56#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 1024
66#ifdef CAPTURE_JUST_VALID_VM_STACK
103#ifdef FIBER_POOL_ALLOCATION_FREE
146#ifdef FIBER_POOL_ALLOCATION_FREE
154#ifdef FIBER_POOL_ALLOCATION_FREE
228#define FIBER_CREATED_P(fiber) ((fiber)->status == FIBER_CREATED)
229#define FIBER_RESUMED_P(fiber) ((fiber)->status == FIBER_RESUMED)
230#define FIBER_SUSPENDED_P(fiber) ((fiber)->status == FIBER_SUSPENDED)
231#define FIBER_TERMINATED_P(fiber) ((fiber)->status == FIBER_TERMINATED)
232#define FIBER_RUNNABLE_P(fiber) (FIBER_CREATED_P(fiber) || FIBER_SUSPENDED_P(fiber))
251static ID fiber_initialize_keywords[2] = {0};
258#if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
259#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
261#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
264#define ERRNOMSG strerror(errno)
269fiber_pool_vacancy_pointer(
void * base,
size_t size)
323 fiber_pool_stack_reset(&vacancy->
stack);
332 vacancy->
next = head;
334#ifdef FIBER_POOL_ALLOCATION_FREE
336 head->previous = vacancy;
337 vacancy->previous =
NULL;
344#ifdef FIBER_POOL_ALLOCATION_FREE
349 vacancy->
next->previous = vacancy->previous;
352 if (vacancy->previous) {
353 vacancy->previous->
next = vacancy->
next;
362fiber_pool_vacancy_pop(
struct fiber_pool * pool)
367 fiber_pool_vacancy_remove(vacancy);
374fiber_pool_vacancy_pop(
struct fiber_pool * pool)
397 fiber_pool_vacancy_reset(vacancy);
401 return fiber_pool_vacancy_push(vacancy, vacancies);
409fiber_pool_allocate_memory(
size_t *
count,
size_t stride)
419 void * base = VirtualAlloc(0, (*
count)*stride, MEM_COMMIT, PAGE_READWRITE);
422 *
count = (*count) >> 1;
431 if (base == MAP_FAILED) {
433 *
count = (*count) >> 1;
471#ifdef FIBER_POOL_ALLOCATION_FREE
472 allocation->used = 0;
482 for (
size_t i = 0; i <
count; i += 1) {
489 if (!VirtualProtect(page,
RB_PAGE_SIZE, PAGE_READWRITE | PAGE_GUARD, &old_protect)) {
490 VirtualFree(allocation->
base, 0, MEM_RELEASE);
500 vacancies = fiber_pool_vacancy_initialize(
506#ifdef FIBER_POOL_ALLOCATION_FREE
514#ifdef FIBER_POOL_ALLOCATION_FREE
515 if (allocation->
next) {
516 allocation->
next->previous = allocation;
519 allocation->previous =
NULL;
549#ifdef FIBER_POOL_ALLOCATION_FREE
558 if (DEBUG) fprintf(stderr,
"fiber_pool_allocation_free: %p base=%p count=%"PRIuSIZE"\n", allocation, allocation->
base, allocation->
count);
561 for (i = 0; i < allocation->
count; i += 1) {
567 fiber_pool_vacancy_remove(vacancy);
571 VirtualFree(allocation->
base, 0, MEM_RELEASE);
576 if (allocation->previous) {
577 allocation->previous->
next = allocation->
next;
584 if (allocation->
next) {
585 allocation->
next->previous = allocation->previous;
623#ifdef FIBER_POOL_ALLOCATION_FREE
627 fiber_pool_stack_reset(&vacancy->
stack);
629 return vacancy->
stack;
637 void * base = fiber_pool_stack_base(
stack);
645#if VM_CHECK_MODE > 0 && defined(MADV_DONTNEED)
647 madvise(base,
size, MADV_DONTNEED);
648#elif defined(MADV_FREE_REUSABLE)
649 madvise(base,
size, MADV_FREE_REUSABLE);
650#elif defined(MADV_FREE)
651 madvise(base,
size, MADV_FREE);
652#elif defined(MADV_DONTNEED)
653 madvise(base,
size, MADV_DONTNEED);
655 VirtualAlloc(base,
size, MEM_RESET, PAGE_READWRITE);
675 fiber_pool_vacancy_reset(vacancy);
681#ifdef FIBER_POOL_ALLOCATION_FREE
684 allocation->used -= 1;
687 if (allocation->used == 0) {
688 fiber_pool_allocation_free(allocation);
691 fiber_pool_stack_free(&vacancy->
stack);
696 fiber_pool_stack_free(&vacancy->
stack);
709fiber_initialize_coroutine(
rb_fiber_t *fiber,
size_t * vm_stack_size)
713 void * vm_stack =
NULL;
721#ifdef COROUTINE_PRIVATE_STACK
747 if (DEBUG) fprintf(stderr,
"fiber_stack_release: %p, stack.base=%p\n", (
void*)fiber, fiber->stack.base);
751 fiber_pool_stack_release(&fiber->
stack);
778 switch (fiber->status) {
809 rb_ractor_set_current_ec(th->
ractor, th->
ec = ec);
840 if (!fiber)
rb_raise(rb_eFiberError,
"uninitialized fiber");
847#define THREAD_MUST_BE_RUNNING(th) do { \
848 if (!(th)->ec->tag) rb_raise(rb_eThreadError, "not running thread"); \
864cont_compact(
void *
ptr)
890#ifdef CAPTURE_JUST_VALID_VM_STACK
941 coroutine_destroy(&fiber->
context);
942 fiber_stack_release(fiber);
957cont_memsize(
const void *
ptr)
962 size =
sizeof(*cont);
964#ifdef CAPTURE_JUST_VALID_VM_STACK
1002fiber_compact(
void *
ptr)
1009 cont_compact(&fiber->
cont);
1010 fiber_verify(fiber);
1014fiber_mark(
void *
ptr)
1018 fiber_verify(fiber);
1021 cont_mark(&fiber->
cont);
1026fiber_free(
void *
ptr)
1037 cont_free(&fiber->
cont);
1042fiber_memsize(
const void *
ptr)
1045 size_t size =
sizeof(*fiber);
1047 const rb_thread_t *th = rb_ec_thread_ptr(saved_ec);
1055 size += cont_memsize(&fiber->
cont);
1099 {cont_mark, cont_free, cont_memsize, cont_compact},
1131 cont_save_thread(cont, th);
1136 cont_init_mjit_cont(cont);
1140cont_new(
VALUE klass)
1143 volatile VALUE contval;
1148 cont->
self = contval;
1149 cont_init(cont, th);
1167 cont_init_mjit_cont(&fiber->
cont);
1175 while (p < ec->cfp->sp) {
1176 fprintf(stderr,
"%3d ", (
int)(p - ec->
vm_stack));
1187 while (cfp != end_of_cfp) {
1192 fprintf(stderr,
"%2d pc: %d\n", i++, pc);
1202cont_capture(
volatile int *
volatile stat)
1206 volatile VALUE contval;
1211 cont = cont_new(rb_cContinuation);
1212 contval = cont->
self;
1214#ifdef CAPTURE_JUST_VALID_VM_STACK
1232 cont_save_machine_stack(th, cont);
1245 *entry++ = p->
entry;
1254 value = cont->
value;
1270 ec_switch(th, fiber);
1293 ec_switch(th, fiber);
1301#ifdef CAPTURE_JUST_VALID_VM_STACK
1358 fiber_restore_thread(th, new_fiber);
1374 cont_restore_thread(cont);
1382 _JUMP_BUFFER *
bp = (
void*)&cont->
jmpbuf;
1383 bp->Frame = ((_JUMP_BUFFER*)((
void*)&
buf))->Frame;
1402#define STACK_PAD_SIZE 1
1404#define STACK_PAD_SIZE 1024
1408#if !STACK_GROW_DIRECTION
1409 if (addr_in_prev_frame > &space[0]) {
1412#if STACK_GROW_DIRECTION <= 0
1414 if (&space[0] > end) {
1419 cont_restore_0(cont, &space[0]);
1423#if !STACK_GROW_DIRECTION
1428#if STACK_GROW_DIRECTION >= 0
1439#if !STACK_GROW_DIRECTION
1443 cont_restore_1(cont);
1530rb_callcc(
VALUE self)
1532 volatile int called;
1533 volatile VALUE val = cont_capture(&called);
1565 st_table **table_p = &GET_VM()->ensure_rollback_table;
1573lookup_rollback_func(
e_proc *ensure_func)
1575 st_table *table = GET_VM()->ensure_rollback_table;
1595 for (p=current; p; p=p->
next)
1598 for (entry=target; entry->
marker; entry++)
1603 base_point = cur_size;
1604 while (base_point) {
1605 if (target_size >= base_point &&
1606 p->
entry.
marker == target[target_size - base_point].marker)
1613 for (i=0; i < target_size - base_point; i++) {
1614 if (!lookup_rollback_func(target[i].
e_proc)) {
1619 while (cur_size > base_point) {
1622 current = current->
next;
1626 for (j = 0; j < i; j++) {
1627 func = lookup_rollback_func(target[i - j - 1].
e_proc);
1629 (*func)(target[i - j - 1].
data2);
1657 if (cont_thread_value(cont) != th->
self) {
1673 cont_restore_0(cont, &contval);
1768 {fiber_mark, fiber_free, fiber_memsize, fiber_compact,},
1773fiber_alloc(
VALUE klass)
1779fiber_t_alloc(
VALUE fiber_value,
unsigned int blocking)
1793 cont_init(&fiber->
cont, th);
1812 rb_fiber_t *fiber = fiber_t_alloc(self, blocking);
1841rb_fiber_pool_default(
VALUE pool)
1843 return &shared_fiber_pool;
1858 rb_get_kwargs(options, fiber_initialize_keywords, 0, 2, arguments);
1860 if (arguments[0] !=
Qundef) {
1861 blocking = arguments[0];
1864 if (arguments[1] !=
Qundef) {
1865 pool = arguments[1];
1869 return fiber_initialize(self,
rb_block_proc(), rb_fiber_pool_default(pool),
RTEST(blocking));
1904 return fiber_initialize(fiber_alloc(rb_cFiber),
rb_proc_new(func, obj), rb_fiber_pool_default(
Qnil), 1);
1914 if (scheduler !=
Qnil) {
1980rb_fiber_scheduler(
VALUE klass)
2002rb_fiber_set_scheduler(
VALUE klass,
VALUE scheduler)
2020 int need_interrupt =
TRUE;
2059 need_interrupt =
TRUE;
2062 rb_fiber_terminate(fiber, need_interrupt);
2069 VALUE fiber_value = fiber_alloc(rb_cFiber);
2080#ifdef COROUTINE_PRIVATE_STACK
2081 fiber->
stack = fiber_pool_stack_acquire(&shared_fiber_pool);
2084 coroutine_initialize_main(&fiber->
context);
2145 root_fiber_alloc(rb_ec_thread_ptr(ec));
2151return_fiber(
bool terminate)
2163 rb_raise(rb_eFiberError,
"attempt to yield on a not resumed fiber");
2172 for (fiber = root_fiber;
2198 fiber = root_fiber_alloc(th);
2202 fiber_prepare_stack(next_fiber);
2211 fiber_setcontext(next_fiber, fiber);
2235 return make_passing_arg(
argc,
argv);
2238 if (cont_thread_value(cont) != th->
self) {
2239 rb_raise(rb_eFiberError,
"fiber called across threads");
2242 rb_raise(rb_eFiberError,
"fiber called across stack rewinding barrier");
2245 value =
rb_exc_new2(rb_eFiberError,
"dead fiber called");
2259 cont->
value = value;
2272 if (
RTEST(resuming_fiber)) {
2274 fiber->
prev = fiber_current();
2291 value = fiber_store(fiber, th);
2294 fiber_stack_release(fiber);
2297 if (fiber_current()->blocking) {
2331 return (fiber_ptr(fiber)->blocking == 0) ?
Qfalse :
Qtrue;
2353rb_f_fiber_blocking_p(
VALUE klass)
2356 unsigned blocking = thread->
blocking;
2371rb_fiber_terminate(
rb_fiber_t *fiber,
int need_interrupt)
2379 coroutine_destroy(&fiber->
context);
2384 next_fiber = return_fiber(
true);
2397 rb_raise(rb_eFiberError,
"cannot raise exception on unborn fiber");
2400 rb_raise(rb_eFiberError,
"attempt to resume a terminated fiber");
2402 else if (fiber == current_fiber) {
2403 rb_raise(rb_eFiberError,
"attempt to resume the current fiber");
2406 rb_raise(rb_eFiberError,
"attempt to resume a resumed fiber (double resume)");
2409 rb_raise(rb_eFiberError,
"attempt to resume a resuming fiber");
2413 rb_raise(rb_eFiberError,
"attempt to resume a transferring fiber");
2416 return fiber_switch(fiber,
argc,
argv, kw_splat, fiber_value,
false);
2428 return fiber_switch(return_fiber(
false),
argc,
argv, kw_splat,
Qfalse,
true);
2510 rb_raise(rb_eFiberError,
"attempt to raise a resuming fiber");
2513 return rb_fiber_transfer_kw(fiber_value, -1, &exc,
RB_NO_KEYWORDS);
2693 rb_raise(rb_eFiberError,
"attempt to transfer to a resuming fiber");
2696 rb_raise(rb_eFiberError,
"attempt to transfer to a yielding fiber");
2726rb_fiber_s_current(
VALUE klass)
2740fiber_to_s(
VALUE fiber_value)
2742 const rb_fiber_t *fiber = fiber_ptr(fiber_value);
2744 char status_info[0x20];
2747 snprintf(status_info, 0x20,
" (%s by resuming)", fiber_status_name(fiber->status));
2750 snprintf(status_info, 0x20,
" (%s)", fiber_status_name(fiber->status));
2755 strlcat(status_info,
">",
sizeof(status_info));
2764#ifdef HAVE_WORKING_FORK
2777#ifdef RB_EXPERIMENTAL_FIBER_POOL
2779fiber_pool_free(
void *
ptr)
2791fiber_pool_memsize(
const void *
ptr)
2794 size_t size =
sizeof(*fiber_pool);
2803 {
NULL, fiber_pool_free, fiber_pool_memsize,},
2808fiber_pool_alloc(
VALUE klass)
2920rb_fiber_scheduler_interface_close(
VALUE self)
2943rb_fiber_scheduler_interface_process_wait(
VALUE self)
2967rb_fiber_scheduler_interface_io_wait(
VALUE self)
2983rb_fiber_scheduler_interface_kernel_sleep(
VALUE self)
3002rb_fiber_scheduler_interface_block(
VALUE self)
3020rb_fiber_scheduler_interface_unblock(
VALUE self)
3038rb_fiber_scheduler_interface_fiber(
VALUE self)
3053 GetSystemInfo(&info);
3054 pagesize = info.dwPageSize;
3056 pagesize = sysconf(_SC_PAGESIZE);
3062 fiber_initialize_keywords[0] = rb_intern_const(
"blocking");
3063 fiber_initialize_keywords[1] = rb_intern_const(
"pool");
3065 char * fiber_shared_fiber_pool_free_stacks =
getenv(
"RUBY_SHARED_FIBER_POOL_FREE_STACKS");
3066 if (fiber_shared_fiber_pool_free_stacks) {
3067 shared_fiber_pool.
free_stacks = atoi(fiber_shared_fiber_pool_free_stacks);
3079 rb_define_method(rb_cFiber,
"backtrace_locations", rb_fiber_backtrace_locations, -1);
3092 rb_define_method(rb_cFiberScheduler,
"close", rb_fiber_scheduler_interface_close, 0);
3093 rb_define_method(rb_cFiberScheduler,
"process_wait", rb_fiber_scheduler_interface_process_wait, 0);
3094 rb_define_method(rb_cFiberScheduler,
"io_wait", rb_fiber_scheduler_interface_io_wait, 0);
3095 rb_define_method(rb_cFiberScheduler,
"kernel_sleep", rb_fiber_scheduler_interface_kernel_sleep, 0);
3096 rb_define_method(rb_cFiberScheduler,
"block", rb_fiber_scheduler_interface_block, 0);
3097 rb_define_method(rb_cFiberScheduler,
"unblock", rb_fiber_scheduler_interface_unblock, 0);
3098 rb_define_method(rb_cFiberScheduler,
"fiber", rb_fiber_scheduler_interface_fiber, 0);
3101#ifdef RB_EXPERIMENTAL_FIBER_POOL
3104 rb_define_method(rb_cFiberPool,
"initialize", rb_fiber_pool_initialize, -1);
3108RUBY_SYMBOL_EXPORT_BEGIN
3124#ifdef HAVE_RB_EXT_RACTOR_SAFE
3132RUBY_SYMBOL_EXPORT_END
struct coroutine_context * coroutine_transfer(struct coroutine_context *current, struct coroutine_context *target)
VALUE rb_ary_tmp_new(long capa)
#define UNREACHABLE_RETURN
rb_thread_t * rb_fiber_threadptr(const rb_fiber_t *fiber)
void rb_threadptr_root_fiber_setup(rb_thread_t *th)
void rb_fiber_update_self(rb_fiber_t *fiber)
#define THREAD_MUST_BE_RUNNING(th)
void rb_fiber_reset_root_local_storage(rb_thread_t *th)
VALUE rb_fiberptr_self(struct rb_fiber_struct *fiber)
void ruby_Init_Fiber_as_Coroutine(void)
void ruby_register_rollback_func_for_ensure(e_proc *ensure_func, e_proc *rollback_func)
void ruby_Init_Continuation_body(void)
#define FIBER_RUNNABLE_P(fiber)
VALUE rb_fiber_current(void)
VALUE rb_fiber_resume_kw(VALUE fiber_value, int argc, const VALUE *argv, int kw_splat)
VALUE rb_fiber_transfer(VALUE fiber_value, int argc, const VALUE *argv)
VALUE rb_fiber_yield_kw(int argc, const VALUE *argv, int kw_splat)
#define FIBER_POOL_INITIAL_SIZE
VALUE rb_fiber_resume(VALUE fiber_value, int argc, const VALUE *argv)
#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE
VALUE rb_fiber_alive_p(VALUE fiber_value)
void rb_fiber_init_mjit_cont(struct rb_fiber_struct *fiber)
VALUE rb_fiber_blocking_p(VALUE fiber)
VALUE rb_obj_is_fiber(VALUE obj)
void rb_fiber_close(rb_fiber_t *fiber)
#define FIBER_RESUMED_P(fiber)
#define FIBER_STACK_FLAGS
VALUE rb_fiber_yield(int argc, const VALUE *argv)
void rb_fiber_mark_self(const rb_fiber_t *fiber)
VALUE rb_fiber_new(rb_block_call_func_t func, VALUE obj)
#define FIBER_CREATED_P(fiber)
struct rb_context_struct rb_context_t
void rb_threadptr_root_fiber_release(rb_thread_t *th)
#define FIBER_SUSPENDED_P(fiber)
unsigned int rb_fiberptr_blocking(struct rb_fiber_struct *fiber)
void rb_threadptr_root_fiber_terminate(rb_thread_t *th)
#define FIBER_TERMINATED_P(fiber)
void rb_fiber_start(void)
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define rb_define_global_function(mid, func, arity)
Defines rb_mKernel #mid.
#define FLUSH_REGISTER_WINDOWS
char str[HTML_ESCAPE_MAX_LEN+1]
int rb_keyword_given_p(void)
#define ruby_longjmp(env, val)
#define VAR_FROM_MEMORY(var)
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
#define VAR_INITIALIZED(var)
#define RUBY_EVENT_FIBER_SWITCH
#define RSTRING_LEN(string)
void ruby_xfree(void *x)
Deallocates a storage instance.
VALUE rb_gc_location(VALUE value)
void rb_gc_mark_movable(VALUE ptr)
void * ruby_mimmalloc(size_t size)
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
void rb_gc_mark(VALUE ptr)
void rb_obj_info_dump(VALUE obj)
#define RUBY_MARK_LEAVE(msg)
#define RUBY_MARK_ENTER(msg)
#define RUBY_FREE_ENTER(msg)
#define STACK_DIR_UPPER(a, b)
#define SET_MACHINE_STACK_END(p)
#define RUBY_FREE_UNLESS_NULL(ptr)
#define STACK_GROW_DIR_DETECTION
#define RUBY_FREE_LEAVE(msg)
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
void rb_undef_method(VALUE klass, const char *name)
int rb_scan_args_kw(int kw_flag, int argc, const VALUE *argv, const char *fmt,...)
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
void rb_raise(VALUE exc, const char *fmt,...)
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
void rb_bug(const char *fmt,...)
VALUE rb_make_exception(int argc, const VALUE *argv)
Make an Exception object from the list of arguments in a manner similar to Kernel#raise.
VALUE rb_cObject
Object class.
VALUE rb_any_to_s(VALUE)
Default implementation of #to_s.
size_t rb_id_table_memsize(const struct rb_id_table *tbl)
void rb_id_table_free(struct rb_id_table *tbl)
Thin wrapper to ruby/config.h.
VALUE rb_funcall_passing_block_kw(VALUE, ID, int, const VALUE *, int)
void rb_ext_ractor_safe(bool flag)
VALUE rb_obj_is_proc(VALUE)
VALUE rb_block_proc(void)
void rb_str_set_len(VALUE, long)
#define rb_str_cat_cstr(buf, str)
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
void rb_undef_alloc_func(VALUE)
ID rb_intern(const char *)
size_t strlcat(char *, const char *, size_t)
Internal header for Fiber.
Internal header for Proc.
VALUE rb_block_to_s(VALUE self, const struct rb_block *block, const char *additional_info)
VALUE rb_vm_backtrace_locations(int argc, const VALUE *argv, struct rb_execution_context_struct *ec)
VALUE rb_vm_backtrace(int argc, const VALUE *argv, struct rb_execution_context_struct *ec)
rb_block_call_func * rb_block_call_func_t
#define MEMCPY(p1, p2, type, n)
#define ALLOCA_N(type, n)
#define MEMZERO(p, type, n)
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
#define RARRAY_CONST_PTR(s)
#define TypedData_Get_Struct(obj, type, data_type, sval)
#define TypedData_Wrap_Struct(klass, data_type, sval)
@ RUBY_TYPED_FREE_IMMEDIATELY
#define TypedData_Make_Struct(klass, type, data_type, sval)
Internal header for Scheduler.
VALUE rb_scheduler_set(VALUE scheduler)
int rb_signal_buff_size(void)
struct fiber_pool_allocation * next
struct fiber_pool_allocation * allocation
struct fiber_pool_vacancy * next
struct fiber_pool_stack stack
struct fiber_pool_allocation * allocations
struct fiber_pool_vacancy * vacancies
rb_execution_context_t saved_ec
struct mjit_cont * mjit_cont
rb_ensure_entry_t * ensure_array
struct cont_saved_vm_stack saved_vm_stack
struct rb_context_struct::@3 machine
struct rb_ensure_list * next
struct rb_ensure_entry entry
VALUE local_storage_recursive_hash_for_trace
struct rb_execution_context_struct::@200 machine
struct rb_id_table * local_storage
rb_ensure_list_t * ensure_list
struct rb_vm_protect_tag * protect_tag
struct rb_thread_struct * thread_ptr
VALUE local_storage_recursive_hash
struct rb_trace_arg_struct * trace_arg
struct coroutine_context context
BITFIELD(enum fiber_status, status, 2)
struct fiber_pool_stack stack
struct rb_fiber_struct * prev
struct rb_iseq_constant_body * body
const struct rb_block block
rb_execution_context_t * ec
struct rb_vm_struct::@194 ractor
struct rb_thread_struct * main_thread
size_t fiber_vm_stack_size
size_t fiber_machine_stack_size
struct rb_vm_struct::@196 default_params
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
const VALUE * rb_vm_proc_local_ep(VALUE proc)
void rb_ec_clear_vm_stack(rb_execution_context_t *ec)
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
void rb_vm_stack_to_heap(rb_execution_context_t *ec)
void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
void rb_execution_context_update(const rb_execution_context_t *ec)
void rb_execution_context_mark(const rb_execution_context_t *ec)
#define RUBY_VM_SET_INTERRUPT(ec)
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
#define RUBY_VM_CHECK_INTS(ec)
#define VM_BLOCK_HANDLER_NONE
#define GetProcPtr(obj, ptr)
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)
#define RUBY_VM_SET_TRAP_INTERRUPT(ec)
#define VM_UNREACHABLE(func)
Internal header to suppres / mandate warnings.
#define COMPILER_WARNING_PUSH
#define COMPILER_WARNING_POP
#define COMPILER_WARNING_IGNORED(flag)
#define ZALLOC(strm, items, size)