Ruby 3.0.5p211 (2022-11-24 revision ba5cf0f7c52d4d35cc6a173c89eda98ceffa2dcf)
cont.c
Go to the documentation of this file.
1/**********************************************************************
2
3 cont.c -
4
5 $Author$
6 created at: Thu May 23 09:03:43 2007
7
8 Copyright (C) 2007 Koichi Sasada
9
10**********************************************************************/
11
13
14#ifndef _WIN32
15#include <unistd.h>
16#include <sys/mman.h>
17#endif
18
19#include COROUTINE_H
20
21#include "eval_intern.h"
22#include "gc.h"
23#include "internal.h"
24#include "internal/cont.h"
25#include "internal/proc.h"
26#include "internal/warnings.h"
27#include "internal/scheduler.h"
28#include "mjit.h"
29#include "vm_core.h"
30#include "id_table.h"
31#include "ractor_core.h"
32
33static const int DEBUG = 0;
34
35#define RB_PAGE_SIZE (pagesize)
36#define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
37static long pagesize;
38
39static const rb_data_type_t cont_data_type, fiber_data_type;
40static VALUE rb_cContinuation;
41static VALUE rb_cFiber;
42static VALUE rb_eFiberError;
43#ifdef RB_EXPERIMENTAL_FIBER_POOL
44static VALUE rb_cFiberPool;
45#endif
46
47#define CAPTURE_JUST_VALID_VM_STACK 1
48
49// Defined in `coroutine/$arch/Context.h`:
50#ifdef COROUTINE_LIMITED_ADDRESS_SPACE
51#define FIBER_POOL_ALLOCATION_FREE
52#define FIBER_POOL_INITIAL_SIZE 8
53#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 32
54#else
55#define FIBER_POOL_INITIAL_SIZE 32
56#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 1024
57#endif
58
62};
63
66#ifdef CAPTURE_JUST_VALID_VM_STACK
67 size_t slen; /* length of stack (head of ec->vm_stack) */
68 size_t clen; /* length of control frames (tail of ec->vm_stack) */
69#endif
70};
71
72struct fiber_pool;
73
74// Represents a single stack.
76 // A pointer to the memory allocation (lowest address) for the stack.
77 void * base;
78
79 // The current stack pointer, taking into account the direction of the stack.
80 void * current;
81
82 // The size of the stack excluding any guard pages.
83 size_t size;
84
85 // The available stack capacity w.r.t. the current stack offset.
86 size_t available;
87
88 // The pool this stack should be allocated from.
89 struct fiber_pool * pool;
90
91 // If the stack is allocated, the allocation it came from.
93};
94
95// A linked list of vacant (unused) stacks.
96// This structure is stored in the first page of a stack if it is not in use.
97// @sa fiber_pool_vacancy_pointer
99 // Details about the vacant stack:
101
102 // The vacancy linked list.
103#ifdef FIBER_POOL_ALLOCATION_FREE
104 struct fiber_pool_vacancy * previous;
105#endif
107};
108
109// Manages singly linked list of mapped regions of memory which contains 1 more more stack:
110//
111// base = +-------------------------------+-----------------------+ +
112// |VM Stack |VM Stack | | |
113// | | | | |
114// | | | | |
115// +-------------------------------+ | |
116// |Machine Stack |Machine Stack | | |
117// | | | | |
118// | | | | |
119// | | | . . . . | | size
120// | | | | |
121// | | | | |
122// | | | | |
123// | | | | |
124// | | | | |
125// +-------------------------------+ | |
126// |Guard Page |Guard Page | | |
127// +-------------------------------+-----------------------+ v
128//
129// +------------------------------------------------------->
130//
131// count
132//
134 // A pointer to the memory mapped region.
135 void * base;
136
137 // The size of the individual stacks.
138 size_t size;
139
140 // The stride of individual stacks (including any guard pages or other accounting details).
141 size_t stride;
142
143 // The number of stacks that were allocated.
144 size_t count;
145
146#ifdef FIBER_POOL_ALLOCATION_FREE
147 // The number of stacks used in this allocation.
148 size_t used;
149#endif
150
151 struct fiber_pool * pool;
152
153 // The allocation linked list.
154#ifdef FIBER_POOL_ALLOCATION_FREE
155 struct fiber_pool_allocation * previous;
156#endif
158};
159
160// A fiber pool manages vacant stacks to reduce the overhead of creating fibers.
162 // A singly-linked list of allocations which contain 1 or more stacks each.
164
165 // Provides O(1) stack "allocation":
167
168 // The size of the stack allocations (excluding any guard page).
169 size_t size;
170
171 // The total number of stacks that have been allocated in this pool.
172 size_t count;
173
174 // The initial number of stacks to allocate.
176
177 // Whether to madvise(free) the stack or not:
179
180 // The number of stacks that have been used in this pool.
181 size_t used;
182
183 // The amount to allocate for the vm_stack:
185};
186
187typedef struct rb_context_struct {
189 int argc;
193
195
196 struct {
204 /* Pointer to MJIT info about the continuation. */
207
208
209/*
210 * Fiber status:
211 * [Fiber.new] ------> FIBER_CREATED
212 * | [Fiber#resume]
213 * v
214 * +--> FIBER_RESUMED ----+
215 * [Fiber#resume] | | [Fiber.yield] |
216 * | v |
217 * +-- FIBER_SUSPENDED | [Terminate]
218 * |
219 * FIBER_TERMINATED <-+
220 */
227
228#define FIBER_CREATED_P(fiber) ((fiber)->status == FIBER_CREATED)
229#define FIBER_RESUMED_P(fiber) ((fiber)->status == FIBER_RESUMED)
230#define FIBER_SUSPENDED_P(fiber) ((fiber)->status == FIBER_SUSPENDED)
231#define FIBER_TERMINATED_P(fiber) ((fiber)->status == FIBER_TERMINATED)
232#define FIBER_RUNNABLE_P(fiber) (FIBER_CREATED_P(fiber) || FIBER_SUSPENDED_P(fiber))
233
239
240 BITFIELD(enum fiber_status, status, 2);
241 /* Whether the fiber is allowed to implicitly yield. */
242 unsigned int yielding : 1;
243 unsigned int blocking : 1;
244
247};
248
249static struct fiber_pool shared_fiber_pool = {NULL, NULL, 0, 0, 0, 0};
250
251static ID fiber_initialize_keywords[2] = {0};
252
253/*
254 * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
255 * if MAP_STACK is passed.
256 * http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755
257 */
258#if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
259#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
260#else
261#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
262#endif
263
264#define ERRNOMSG strerror(errno)
265
266// Locates the stack vacancy details for the given stack.
267// Requires that fiber_pool_vacancy fits within one page.
268inline static struct fiber_pool_vacancy *
269fiber_pool_vacancy_pointer(void * base, size_t size)
270{
272
273 return (struct fiber_pool_vacancy *)(
274 (char*)base + STACK_DIR_UPPER(0, size - RB_PAGE_SIZE)
275 );
276}
277
278// Reset the current stack pointer and available size of the given stack.
279inline static void
280fiber_pool_stack_reset(struct fiber_pool_stack * stack)
281{
283
284 stack->current = (char*)stack->base + STACK_DIR_UPPER(0, stack->size);
286}
287
288// A pointer to the base of the current unused portion of the stack.
289inline static void *
290fiber_pool_stack_base(struct fiber_pool_stack * stack)
291{
293
295
297}
298
299// Allocate some memory from the stack. Used to allocate vm_stack inline with machine stack.
300// @sa fiber_initialize_coroutine
301inline static void *
302fiber_pool_stack_alloca(struct fiber_pool_stack * stack, size_t offset)
303{
305
306 if (DEBUG) fprintf(stderr, "fiber_pool_stack_alloca(%p): %"PRIuSIZE"/%"PRIuSIZE"\n", (void*)stack, offset, stack->available);
308
309 // The pointer to the memory being allocated:
310 void * pointer = STACK_DIR_UPPER(stack->current, (char*)stack->current - offset);
311
312 // Move the stack pointer:
315
316 return pointer;
317}
318
319// Reset the current stack pointer and available size of the given stack.
320inline static void
321fiber_pool_vacancy_reset(struct fiber_pool_vacancy * vacancy)
322{
323 fiber_pool_stack_reset(&vacancy->stack);
324
325 // Consume one page of the stack because it's used for the vacancy list:
326 fiber_pool_stack_alloca(&vacancy->stack, RB_PAGE_SIZE);
327}
328
329inline static struct fiber_pool_vacancy *
330fiber_pool_vacancy_push(struct fiber_pool_vacancy * vacancy, struct fiber_pool_vacancy * head)
331{
332 vacancy->next = head;
333
334#ifdef FIBER_POOL_ALLOCATION_FREE
335 if (head) {
336 head->previous = vacancy;
337 vacancy->previous = NULL;
338 }
339#endif
340
341 return vacancy;
342}
343
344#ifdef FIBER_POOL_ALLOCATION_FREE
345static void
346fiber_pool_vacancy_remove(struct fiber_pool_vacancy * vacancy)
347{
348 if (vacancy->next) {
349 vacancy->next->previous = vacancy->previous;
350 }
351
352 if (vacancy->previous) {
353 vacancy->previous->next = vacancy->next;
354 }
355 else {
356 // It's the head of the list:
357 vacancy->stack.pool->vacancies = vacancy->next;
358 }
359}
360
361inline static struct fiber_pool_vacancy *
362fiber_pool_vacancy_pop(struct fiber_pool * pool)
363{
364 struct fiber_pool_vacancy * vacancy = pool->vacancies;
365
366 if (vacancy) {
367 fiber_pool_vacancy_remove(vacancy);
368 }
369
370 return vacancy;
371}
372#else
373inline static struct fiber_pool_vacancy *
374fiber_pool_vacancy_pop(struct fiber_pool * pool)
375{
376 struct fiber_pool_vacancy * vacancy = pool->vacancies;
377
378 if (vacancy) {
379 pool->vacancies = vacancy->next;
380 }
381
382 return vacancy;
383}
384#endif
385
386// Initialize the vacant stack. The [base, size] allocation should not include the guard page.
387// @param base The pointer to the lowest address of the allocated memory.
388// @param size The size of the allocated memory.
389inline static struct fiber_pool_vacancy *
390fiber_pool_vacancy_initialize(struct fiber_pool * fiber_pool, struct fiber_pool_vacancy * vacancies, void * base, size_t size)
391{
392 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, size);
393
394 vacancy->stack.base = base;
395 vacancy->stack.size = size;
396
397 fiber_pool_vacancy_reset(vacancy);
398
399 vacancy->stack.pool = fiber_pool;
400
401 return fiber_pool_vacancy_push(vacancy, vacancies);
402}
403
404// Allocate a maximum of count stacks, size given by stride.
405// @param count the number of stacks to allocate / were allocated.
406// @param stride the size of the individual stacks.
407// @return [void *] the allocated memory or NULL if allocation failed.
408inline static void *
409fiber_pool_allocate_memory(size_t * count, size_t stride)
410{
411 // We use a divide-by-2 strategy to try and allocate memory. We are trying
412 // to allocate `count` stacks. In normal situation, this won't fail. But
413 // if we ran out of address space, or we are allocating more memory than
414 // the system would allow (e.g. overcommit * physical memory + swap), we
415 // divide count by two and try again. This condition should only be
416 // encountered in edge cases, but we handle it here gracefully.
417 while (*count > 1) {
418#if defined(_WIN32)
419 void * base = VirtualAlloc(0, (*count)*stride, MEM_COMMIT, PAGE_READWRITE);
420
421 if (!base) {
422 *count = (*count) >> 1;
423 }
424 else {
425 return base;
426 }
427#else
428 errno = 0;
429 void * base = mmap(NULL, (*count)*stride, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
430
431 if (base == MAP_FAILED) {
432 // If the allocation fails, count = count / 2, and try again.
433 *count = (*count) >> 1;
434 }
435 else {
436 return base;
437 }
438#endif
439 }
440
441 return NULL;
442}
443
444// Given an existing fiber pool, expand it by the specified number of stacks.
445// @param count the maximum number of stacks to allocate.
446// @return the allocated fiber pool.
447// @sa fiber_pool_allocation_free
448static struct fiber_pool_allocation *
449fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count)
450{
452
453 size_t size = fiber_pool->size;
454 size_t stride = size + RB_PAGE_SIZE;
455
456 // Allocate the memory required for the stacks:
457 void * base = fiber_pool_allocate_memory(&count, stride);
458
459 if (base == NULL) {
460 rb_raise(rb_eFiberError, "can't alloc machine stack to fiber (%"PRIuSIZE" x %"PRIuSIZE" bytes): %s", count, size, ERRNOMSG);
461 }
462
463 struct fiber_pool_vacancy * vacancies = fiber_pool->vacancies;
464 struct fiber_pool_allocation * allocation = RB_ALLOC(struct fiber_pool_allocation);
465
466 // Initialize fiber pool allocation:
467 allocation->base = base;
468 allocation->size = size;
469 allocation->stride = stride;
470 allocation->count = count;
471#ifdef FIBER_POOL_ALLOCATION_FREE
472 allocation->used = 0;
473#endif
474 allocation->pool = fiber_pool;
475
476 if (DEBUG) {
477 fprintf(stderr, "fiber_pool_expand(%"PRIuSIZE"): %p, %"PRIuSIZE"/%"PRIuSIZE" x [%"PRIuSIZE":%"PRIuSIZE"]\n",
479 }
480
481 // Iterate over all stacks, initializing the vacancy list:
482 for (size_t i = 0; i < count; i += 1) {
483 void * base = (char*)allocation->base + (stride * i);
484 void * page = (char*)base + STACK_DIR_UPPER(size, 0);
485
486#if defined(_WIN32)
487 DWORD old_protect;
488
489 if (!VirtualProtect(page, RB_PAGE_SIZE, PAGE_READWRITE | PAGE_GUARD, &old_protect)) {
490 VirtualFree(allocation->base, 0, MEM_RELEASE);
491 rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
492 }
493#else
494 if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
495 munmap(allocation->base, count*stride);
496 rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
497 }
498#endif
499
500 vacancies = fiber_pool_vacancy_initialize(
501 fiber_pool, vacancies,
502 (char*)base + STACK_DIR_UPPER(0, RB_PAGE_SIZE),
503 size
504 );
505
506#ifdef FIBER_POOL_ALLOCATION_FREE
507 vacancies->stack.allocation = allocation;
508#endif
509 }
510
511 // Insert the allocation into the head of the pool:
512 allocation->next = fiber_pool->allocations;
513
514#ifdef FIBER_POOL_ALLOCATION_FREE
515 if (allocation->next) {
516 allocation->next->previous = allocation;
517 }
518
519 allocation->previous = NULL;
520#endif
521
522 fiber_pool->allocations = allocation;
523 fiber_pool->vacancies = vacancies;
525
526 return allocation;
527}
528
529// Initialize the specified fiber pool with the given number of stacks.
530// @param vm_stack_size The size of the vm stack to allocate.
531static void
532fiber_pool_initialize(struct fiber_pool * fiber_pool, size_t size, size_t count, size_t vm_stack_size)
533{
534 VM_ASSERT(vm_stack_size < size);
535
539 fiber_pool->count = 0;
542 fiber_pool->used = 0;
543
544 fiber_pool->vm_stack_size = vm_stack_size;
545
546 fiber_pool_expand(fiber_pool, count);
547}
548
549#ifdef FIBER_POOL_ALLOCATION_FREE
550// Free the list of fiber pool allocations.
551static void
552fiber_pool_allocation_free(struct fiber_pool_allocation * allocation)
553{
555
556 VM_ASSERT(allocation->used == 0);
557
558 if (DEBUG) fprintf(stderr, "fiber_pool_allocation_free: %p base=%p count=%"PRIuSIZE"\n", allocation, allocation->base, allocation->count);
559
560 size_t i;
561 for (i = 0; i < allocation->count; i += 1) {
562 void * base = (char*)allocation->base + (allocation->stride * i) + STACK_DIR_UPPER(0, RB_PAGE_SIZE);
563
564 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, allocation->size);
565
566 // Pop the vacant stack off the free list:
567 fiber_pool_vacancy_remove(vacancy);
568 }
569
570#ifdef _WIN32
571 VirtualFree(allocation->base, 0, MEM_RELEASE);
572#else
573 munmap(allocation->base, allocation->stride * allocation->count);
574#endif
575
576 if (allocation->previous) {
577 allocation->previous->next = allocation->next;
578 }
579 else {
580 // We are the head of the list, so update the pool:
581 allocation->pool->allocations = allocation->next;
582 }
583
584 if (allocation->next) {
585 allocation->next->previous = allocation->previous;
586 }
587
588 allocation->pool->count -= allocation->count;
589
590 ruby_xfree(allocation);
591}
592#endif
593
594// Acquire a stack from the given fiber pool. If none are available, allocate more.
595static struct fiber_pool_stack
596fiber_pool_stack_acquire(struct fiber_pool * fiber_pool) {
597 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pop(fiber_pool);
598
599 if (DEBUG) fprintf(stderr, "fiber_pool_stack_acquire: %p used=%"PRIuSIZE"\n", (void*)fiber_pool->vacancies, fiber_pool->used);
600
601 if (!vacancy) {
602 const size_t maximum = FIBER_POOL_ALLOCATION_MAXIMUM_SIZE;
603 const size_t minimum = fiber_pool->initial_count;
604
605 size_t count = fiber_pool->count;
606 if (count > maximum) count = maximum;
607 if (count < minimum) count = minimum;
608
609 fiber_pool_expand(fiber_pool, count);
610
611 // The free list should now contain some stacks:
613
614 vacancy = fiber_pool_vacancy_pop(fiber_pool);
615 }
616
617 VM_ASSERT(vacancy);
618 VM_ASSERT(vacancy->stack.base);
619
620 // Take the top item from the free list:
621 fiber_pool->used += 1;
622
623#ifdef FIBER_POOL_ALLOCATION_FREE
624 vacancy->stack.allocation->used += 1;
625#endif
626
627 fiber_pool_stack_reset(&vacancy->stack);
628
629 return vacancy->stack;
630}
631
632// We advise the operating system that the stack memory pages are no longer being used.
633// This introduce some performance overhead but allows system to relaim memory when there is pressure.
634static inline void
635fiber_pool_stack_free(struct fiber_pool_stack * stack)
636{
637 void * base = fiber_pool_stack_base(stack);
638 size_t size = stack->available;
639
640 // If this is not true, the vacancy information will almost certainly be destroyed:
642
643 if (DEBUG) fprintf(stderr, "fiber_pool_stack_free: %p+%"PRIuSIZE" [base=%p, size=%"PRIuSIZE"]\n", base, size, stack->base, stack->size);
644
645#if VM_CHECK_MODE > 0 && defined(MADV_DONTNEED)
646 // This immediately discards the pages and the memory is reset to zero.
647 madvise(base, size, MADV_DONTNEED);
648#elif defined(MADV_FREE_REUSABLE)
649 madvise(base, size, MADV_FREE_REUSABLE);
650#elif defined(MADV_FREE)
651 madvise(base, size, MADV_FREE);
652#elif defined(MADV_DONTNEED)
653 madvise(base, size, MADV_DONTNEED);
654#elif defined(_WIN32)
655 VirtualAlloc(base, size, MEM_RESET, PAGE_READWRITE);
656 // Not available in all versions of Windows.
657 //DiscardVirtualMemory(base, size);
658#endif
659}
660
661// Release and return a stack to the vacancy list.
662static void
663fiber_pool_stack_release(struct fiber_pool_stack * stack)
664{
665 struct fiber_pool * pool = stack->pool;
666 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(stack->base, stack->size);
667
668 if (DEBUG) fprintf(stderr, "fiber_pool_stack_release: %p used=%"PRIuSIZE"\n", stack->base, stack->pool->used);
669
670 // Copy the stack details into the vacancy area:
671 vacancy->stack = *stack;
672 // After this point, be careful about updating/using state in stack, since it's copied to the vacancy area.
673
674 // Reset the stack pointers and reserve space for the vacancy data:
675 fiber_pool_vacancy_reset(vacancy);
676
677 // Push the vacancy into the vancancies list:
678 pool->vacancies = fiber_pool_vacancy_push(vacancy, stack->pool->vacancies);
679 pool->used -= 1;
680
681#ifdef FIBER_POOL_ALLOCATION_FREE
682 struct fiber_pool_allocation * allocation = stack->allocation;
683
684 allocation->used -= 1;
685
686 // Release address space and/or dirty memory:
687 if (allocation->used == 0) {
688 fiber_pool_allocation_free(allocation);
689 }
690 else if (stack->pool->free_stacks) {
691 fiber_pool_stack_free(&vacancy->stack);
692 }
693#else
694 // This is entirely optional, but clears the dirty flag from the stack memory, so it won't get swapped to disk when there is memory pressure:
695 if (stack->pool->free_stacks) {
696 fiber_pool_stack_free(&vacancy->stack);
697 }
698#endif
699}
700
701static COROUTINE
702fiber_entry(struct coroutine_context * from, struct coroutine_context * to)
703{
705}
706
707// Initialize a fiber's coroutine's machine stack and vm stack.
708static VALUE *
709fiber_initialize_coroutine(rb_fiber_t *fiber, size_t * vm_stack_size)
710{
711 struct fiber_pool * fiber_pool = fiber->stack.pool;
712 rb_execution_context_t *sec = &fiber->cont.saved_ec;
713 void * vm_stack = NULL;
714
716
717 fiber->stack = fiber_pool_stack_acquire(fiber_pool);
718 vm_stack = fiber_pool_stack_alloca(&fiber->stack, fiber_pool->vm_stack_size);
720
721#ifdef COROUTINE_PRIVATE_STACK
722 coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available, sec->machine.stack_start);
723 // The stack for this execution context is still the main machine stack, so don't adjust it.
724 // If this is not managed correctly, you will fail in `rb_ec_stack_check`.
725
726 // We limit the machine stack usage to the fiber stack size.
727 if (sec->machine.stack_maxsize > fiber->stack.available) {
728 sec->machine.stack_maxsize = fiber->stack.available;
729 }
730#else
731 coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available);
732
733 // The stack for this execution context is the one we allocated:
734 sec->machine.stack_start = fiber->stack.current;
735 sec->machine.stack_maxsize = fiber->stack.available;
736#endif
737
738 return vm_stack;
739}
740
741// Release the stack from the fiber, it's execution context, and return it to the fiber pool.
742static void
743fiber_stack_release(rb_fiber_t * fiber)
744{
746
747 if (DEBUG) fprintf(stderr, "fiber_stack_release: %p, stack.base=%p\n", (void*)fiber, fiber->stack.base);
748
749 // Return the stack back to the fiber pool if it wasn't already:
750 if (fiber->stack.base) {
751 fiber_pool_stack_release(&fiber->stack);
752 fiber->stack.base = NULL;
753 }
754
755 // The stack is no longer associated with this execution context:
757}
758
759static const char *
760fiber_status_name(enum fiber_status s)
761{
762 switch (s) {
763 case FIBER_CREATED: return "created";
764 case FIBER_RESUMED: return "resumed";
765 case FIBER_SUSPENDED: return "suspended";
766 case FIBER_TERMINATED: return "terminated";
767 }
768 VM_UNREACHABLE(fiber_status_name);
769 return NULL;
770}
771
772static void
773fiber_verify(const rb_fiber_t *fiber)
774{
775#if VM_CHECK_MODE > 0
776 VM_ASSERT(fiber->cont.saved_ec.fiber_ptr == fiber);
777
778 switch (fiber->status) {
779 case FIBER_RESUMED:
781 break;
782 case FIBER_SUSPENDED:
784 break;
785 case FIBER_CREATED:
786 case FIBER_TERMINATED:
787 /* TODO */
788 break;
789 default:
790 VM_UNREACHABLE(fiber_verify);
791 }
792#endif
793}
794
795inline static void
796fiber_status_set(rb_fiber_t *fiber, enum fiber_status s)
797{
798 // if (DEBUG) fprintf(stderr, "fiber: %p, status: %s -> %s\n", (void *)fiber, fiber_status_name(fiber->status), fiber_status_name(s));
800 VM_ASSERT(fiber->status != s);
801 fiber_verify(fiber);
802 fiber->status = s;
803}
804
805static inline void
806ec_switch(rb_thread_t *th, rb_fiber_t *fiber)
807{
809 rb_ractor_set_current_ec(th->ractor, th->ec = ec);
810 // ruby_current_execution_context_ptr = th->ec = ec;
811
812 /*
813 * timer-thread may set trap interrupt on previous th->ec at any time;
814 * ensure we do not delay (or lose) the trap interrupt handling.
815 */
816 if (th->vm->ractor.main_thread == th &&
817 rb_signal_buff_size() > 0) {
819 }
820
821 VM_ASSERT(ec->fiber_ptr->cont.self == 0 || ec->vm_stack != NULL);
822}
823
824static rb_context_t *
825cont_ptr(VALUE obj)
826{
827 rb_context_t *cont;
828
829 TypedData_Get_Struct(obj, rb_context_t, &cont_data_type, cont);
830
831 return cont;
832}
833
834static rb_fiber_t *
835fiber_ptr(VALUE obj)
836{
837 rb_fiber_t *fiber;
838
839 TypedData_Get_Struct(obj, rb_fiber_t, &fiber_data_type, fiber);
840 if (!fiber) rb_raise(rb_eFiberError, "uninitialized fiber");
841
842 return fiber;
843}
844
845NOINLINE(static VALUE cont_capture(volatile int *volatile stat));
846
847#define THREAD_MUST_BE_RUNNING(th) do { \
848 if (!(th)->ec->tag) rb_raise(rb_eThreadError, "not running thread"); \
849 } while (0)
850
853{
854 return fiber->cont.saved_ec.thread_ptr;
855}
856
857static VALUE
858cont_thread_value(const rb_context_t *cont)
859{
860 return cont->saved_ec.thread_ptr->self;
861}
862
863static void
864cont_compact(void *ptr)
865{
866 rb_context_t *cont = ptr;
867
868 if (cont->self) {
869 cont->self = rb_gc_location(cont->self);
870 }
871 cont->value = rb_gc_location(cont->value);
873}
874
875static void
876cont_mark(void *ptr)
877{
878 rb_context_t *cont = ptr;
879
880 RUBY_MARK_ENTER("cont");
881 if (cont->self) {
883 }
885
887 rb_gc_mark(cont_thread_value(cont));
888
889 if (cont->saved_vm_stack.ptr) {
890#ifdef CAPTURE_JUST_VALID_VM_STACK
893#else
895 cont->saved_vm_stack.ptr, cont->saved_ec.stack_size);
896#endif
897 }
898
899 if (cont->machine.stack) {
900 if (cont->type == CONTINUATION_CONTEXT) {
901 /* cont */
903 cont->machine.stack + cont->machine.stack_size);
904 }
905 else {
906 /* fiber */
907 const rb_fiber_t *fiber = (rb_fiber_t*)cont;
908
909 if (!FIBER_TERMINATED_P(fiber)) {
911 cont->machine.stack + cont->machine.stack_size);
912 }
913 }
914 }
915
916 RUBY_MARK_LEAVE("cont");
917}
918
919#if 0
920static int
921fiber_is_root_p(const rb_fiber_t *fiber)
922{
923 return fiber == fiber->cont.saved_ec.thread_ptr->root_fiber;
924}
925#endif
926
927static void
928cont_free(void *ptr)
929{
930 rb_context_t *cont = ptr;
931
932 RUBY_FREE_ENTER("cont");
933
934 if (cont->type == CONTINUATION_CONTEXT) {
938 }
939 else {
940 rb_fiber_t *fiber = (rb_fiber_t*)cont;
941 coroutine_destroy(&fiber->context);
942 fiber_stack_release(fiber);
943 }
944
946
947 if (mjit_enabled) {
948 VM_ASSERT(cont->mjit_cont != NULL);
949 mjit_cont_free(cont->mjit_cont);
950 }
951 /* free rb_cont_t or rb_fiber_t */
953 RUBY_FREE_LEAVE("cont");
954}
955
956static size_t
957cont_memsize(const void *ptr)
958{
959 const rb_context_t *cont = ptr;
960 size_t size = 0;
961
962 size = sizeof(*cont);
963 if (cont->saved_vm_stack.ptr) {
964#ifdef CAPTURE_JUST_VALID_VM_STACK
965 size_t n = (cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
966#else
967 size_t n = cont->saved_ec.vm_stack_size;
968#endif
969 size += n * sizeof(*cont->saved_vm_stack.ptr);
970 }
971
972 if (cont->machine.stack) {
973 size += cont->machine.stack_size * sizeof(*cont->machine.stack);
974 }
975
976 return size;
977}
978
979void
981{
982 if (fiber->cont.self) {
983 fiber->cont.self = rb_gc_location(fiber->cont.self);
984 }
985 else {
987 }
988}
989
990void
992{
993 if (fiber->cont.self) {
995 }
996 else {
998 }
999}
1000
1001static void
1002fiber_compact(void *ptr)
1003{
1004 rb_fiber_t *fiber = ptr;
1005 fiber->first_proc = rb_gc_location(fiber->first_proc);
1006
1007 if (fiber->prev) rb_fiber_update_self(fiber->prev);
1008
1009 cont_compact(&fiber->cont);
1010 fiber_verify(fiber);
1011}
1012
1013static void
1014fiber_mark(void *ptr)
1015{
1016 rb_fiber_t *fiber = ptr;
1017 RUBY_MARK_ENTER("cont");
1018 fiber_verify(fiber);
1020 if (fiber->prev) rb_fiber_mark_self(fiber->prev);
1021 cont_mark(&fiber->cont);
1022 RUBY_MARK_LEAVE("cont");
1023}
1024
1025static void
1026fiber_free(void *ptr)
1027{
1028 rb_fiber_t *fiber = ptr;
1029 RUBY_FREE_ENTER("fiber");
1030
1031 //if (DEBUG) fprintf(stderr, "fiber_free: %p[%p]\n", fiber, fiber->stack.base);
1032
1033 if (fiber->cont.saved_ec.local_storage) {
1035 }
1036
1037 cont_free(&fiber->cont);
1038 RUBY_FREE_LEAVE("fiber");
1039}
1040
1041static size_t
1042fiber_memsize(const void *ptr)
1043{
1044 const rb_fiber_t *fiber = ptr;
1045 size_t size = sizeof(*fiber);
1046 const rb_execution_context_t *saved_ec = &fiber->cont.saved_ec;
1047 const rb_thread_t *th = rb_ec_thread_ptr(saved_ec);
1048
1049 /*
1050 * vm.c::thread_memsize already counts th->ec->local_storage
1051 */
1052 if (saved_ec->local_storage && fiber != th->root_fiber) {
1054 }
1055 size += cont_memsize(&fiber->cont);
1056 return size;
1057}
1058
1059VALUE
1061{
1062 if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
1063 return Qtrue;
1064 }
1065 else {
1066 return Qfalse;
1067 }
1068}
1069
1070static void
1071cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
1072{
1073 size_t size;
1074
1076
1077 if (th->ec->machine.stack_start > th->ec->machine.stack_end) {
1079 cont->machine.stack_src = th->ec->machine.stack_end;
1080 }
1081 else {
1083 cont->machine.stack_src = th->ec->machine.stack_start;
1084 }
1085
1086 if (cont->machine.stack) {
1087 REALLOC_N(cont->machine.stack, VALUE, size);
1088 }
1089 else {
1090 cont->machine.stack = ALLOC_N(VALUE, size);
1091 }
1092
1094 MEMCPY(cont->machine.stack, cont->machine.stack_src, VALUE, size);
1095}
1096
1097static const rb_data_type_t cont_data_type = {
1098 "continuation",
1099 {cont_mark, cont_free, cont_memsize, cont_compact},
1101};
1102
1103static inline void
1104cont_save_thread(rb_context_t *cont, rb_thread_t *th)
1105{
1106 rb_execution_context_t *sec = &cont->saved_ec;
1107
1108 VM_ASSERT(th->status == THREAD_RUNNABLE);
1109
1110 /* save thread context */
1111 *sec = *th->ec;
1112
1113 /* saved_ec->machine.stack_end should be NULL */
1114 /* because it may happen GC afterward */
1115 sec->machine.stack_end = NULL;
1116}
1117
1118static void
1119cont_init_mjit_cont(rb_context_t *cont)
1120{
1121 VM_ASSERT(cont->mjit_cont == NULL);
1122 if (mjit_enabled) {
1123 cont->mjit_cont = mjit_cont_new(&(cont->saved_ec));
1124 }
1125}
1126
1127static void
1128cont_init(rb_context_t *cont, rb_thread_t *th)
1129{
1130 /* save thread context */
1131 cont_save_thread(cont, th);
1132 cont->saved_ec.thread_ptr = th;
1133 cont->saved_ec.local_storage = NULL;
1136 cont_init_mjit_cont(cont);
1137}
1138
1139static rb_context_t *
1140cont_new(VALUE klass)
1141{
1142 rb_context_t *cont;
1143 volatile VALUE contval;
1144 rb_thread_t *th = GET_THREAD();
1145
1147 contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
1148 cont->self = contval;
1149 cont_init(cont, th);
1150 return cont;
1151}
1152
1154{
1155 return fiber->cont.self;
1156}
1157
1158unsigned int rb_fiberptr_blocking(struct rb_fiber_struct *fiber)
1159{
1160 return fiber->blocking;
1161}
1162
1163// This is used for root_fiber because other fibers call cont_init_mjit_cont through cont_new.
1164void
1166{
1167 cont_init_mjit_cont(&fiber->cont);
1168}
1169
1170#if 0
1171void
1172show_vm_stack(const rb_execution_context_t *ec)
1173{
1174 VALUE *p = ec->vm_stack;
1175 while (p < ec->cfp->sp) {
1176 fprintf(stderr, "%3d ", (int)(p - ec->vm_stack));
1177 rb_obj_info_dump(*p);
1178 p++;
1179 }
1180}
1181
1182void
1183show_vm_pcs(const rb_control_frame_t *cfp,
1184 const rb_control_frame_t *end_of_cfp)
1185{
1186 int i=0;
1187 while (cfp != end_of_cfp) {
1188 int pc = 0;
1189 if (cfp->iseq) {
1190 pc = cfp->pc - cfp->iseq->body->iseq_encoded;
1191 }
1192 fprintf(stderr, "%2d pc: %d\n", i++, pc);
1194 }
1195}
1196#endif
1198#ifdef __clang__
1199COMPILER_WARNING_IGNORED(-Wduplicate-decl-specifier)
1200#endif
1201static VALUE
1202cont_capture(volatile int *volatile stat)
1203{
1204 rb_context_t *volatile cont;
1205 rb_thread_t *th = GET_THREAD();
1206 volatile VALUE contval;
1207 const rb_execution_context_t *ec = th->ec;
1208
1211 cont = cont_new(rb_cContinuation);
1212 contval = cont->self;
1213
1214#ifdef CAPTURE_JUST_VALID_VM_STACK
1215 cont->saved_vm_stack.slen = ec->cfp->sp - ec->vm_stack;
1216 cont->saved_vm_stack.clen = ec->vm_stack + ec->vm_stack_size - (VALUE*)ec->cfp;
1219 ec->vm_stack,
1220 VALUE, cont->saved_vm_stack.slen);
1222 (VALUE*)ec->cfp,
1223 VALUE,
1224 cont->saved_vm_stack.clen);
1225#else
1228#endif
1229 // At this point, `cfp` is valid but `vm_stack` should be cleared:
1230 rb_ec_set_vm_stack(&cont->saved_ec, NULL, 0);
1231 VM_ASSERT(cont->saved_ec.cfp != NULL);
1232 cont_save_machine_stack(th, cont);
1233
1234 /* backup ensure_list to array for search in another context */
1235 {
1237 int size = 0;
1238 rb_ensure_entry_t *entry;
1239 for (p=th->ec->ensure_list; p; p=p->next)
1240 size++;
1241 entry = cont->ensure_array = ALLOC_N(rb_ensure_entry_t,size+1);
1242 for (p=th->ec->ensure_list; p; p=p->next) {
1243 if (!p->entry.marker)
1244 p->entry.marker = rb_ary_tmp_new(0); /* dummy object */
1245 *entry++ = p->entry;
1246 }
1247 entry->marker = 0;
1248 }
1249
1250 if (ruby_setjmp(cont->jmpbuf)) {
1251 VALUE value;
1252
1253 VAR_INITIALIZED(cont);
1254 value = cont->value;
1255 if (cont->argc == -1) rb_exc_raise(value);
1256 cont->value = Qnil;
1257 *stat = 1;
1258 return value;
1259 }
1260 else {
1261 *stat = 0;
1262 return contval;
1263 }
1264}
1266
1267static inline void
1268fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fiber)
1269{
1270 ec_switch(th, fiber);
1271 VM_ASSERT(th->ec->fiber_ptr == fiber);
1272}
1273
1274static inline void
1275cont_restore_thread(rb_context_t *cont)
1276{
1277 rb_thread_t *th = GET_THREAD();
1278
1279 /* restore thread context */
1280 if (cont->type == CONTINUATION_CONTEXT) {
1281 /* continuation */
1282 rb_execution_context_t *sec = &cont->saved_ec;
1283 rb_fiber_t *fiber = NULL;
1284
1285 if (sec->fiber_ptr != NULL) {
1286 fiber = sec->fiber_ptr;
1287 }
1288 else if (th->root_fiber) {
1289 fiber = th->root_fiber;
1290 }
1291
1292 if (fiber && th->ec != &fiber->cont.saved_ec) {
1293 ec_switch(th, fiber);
1294 }
1295
1296 if (th->ec->trace_arg != sec->trace_arg) {
1297 rb_raise(rb_eRuntimeError, "can't call across trace_func");
1298 }
1299
1300 /* copy vm stack */
1301#ifdef CAPTURE_JUST_VALID_VM_STACK
1302 MEMCPY(th->ec->vm_stack,
1303 cont->saved_vm_stack.ptr,
1304 VALUE, cont->saved_vm_stack.slen);
1305 MEMCPY(th->ec->vm_stack + th->ec->vm_stack_size - cont->saved_vm_stack.clen,
1306 cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
1307 VALUE, cont->saved_vm_stack.clen);
1308#else
1310#endif
1311 /* other members of ec */
1312
1313 th->ec->cfp = sec->cfp;
1314 th->ec->raised_flag = sec->raised_flag;
1315 th->ec->tag = sec->tag;
1316 th->ec->protect_tag = sec->protect_tag;
1317 th->ec->root_lep = sec->root_lep;
1318 th->ec->root_svar = sec->root_svar;
1319 th->ec->ensure_list = sec->ensure_list;
1320 th->ec->errinfo = sec->errinfo;
1321
1322 VM_ASSERT(th->ec->vm_stack != NULL);
1323 }
1324 else {
1325 /* fiber */
1326 fiber_restore_thread(th, (rb_fiber_t*)cont);
1327 }
1328}
1329
1330NOINLINE(static void fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber));
1331
1332static void
1333fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber)
1334{
1335 rb_thread_t *th = GET_THREAD();
1336
1337 /* save old_fiber's machine stack - to ensure efficient garbage collection */
1338 if (!FIBER_TERMINATED_P(old_fiber)) {
1341 if (STACK_DIR_UPPER(0, 1)) {
1342 old_fiber->cont.machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
1343 old_fiber->cont.machine.stack = th->ec->machine.stack_end;
1344 }
1345 else {
1346 old_fiber->cont.machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
1347 old_fiber->cont.machine.stack = th->ec->machine.stack_start;
1348 }
1349 }
1350
1351 /* exchange machine_stack_start between old_fiber and new_fiber */
1353
1354 /* old_fiber->machine.stack_end should be NULL */
1355 old_fiber->cont.saved_ec.machine.stack_end = NULL;
1356
1357 /* restore thread context */
1358 fiber_restore_thread(th, new_fiber);
1359
1360 // if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] -> %p[%p]\n", old_fiber, old_fiber->stack.base, new_fiber, new_fiber->stack.base);
1361
1362 /* swap machine context */
1363 coroutine_transfer(&old_fiber->context, &new_fiber->context);
1364
1365 // It's possible to get here, and new_fiber is already freed.
1366 // if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] <- %p[%p]\n", old_fiber, old_fiber->stack.base, new_fiber, new_fiber->stack.base);
1367}
1368
1369NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
1370
1371static void
1372cont_restore_1(rb_context_t *cont)
1373{
1374 cont_restore_thread(cont);
1375
1376 /* restore machine stack */
1377#ifdef _M_AMD64
1378 {
1379 /* workaround for x64 SEH */
1380 jmp_buf buf;
1381 setjmp(buf);
1382 _JUMP_BUFFER *bp = (void*)&cont->jmpbuf;
1383 bp->Frame = ((_JUMP_BUFFER*)((void*)&buf))->Frame;
1384 }
1385#endif
1386 if (cont->machine.stack_src) {
1388 MEMCPY(cont->machine.stack_src, cont->machine.stack,
1389 VALUE, cont->machine.stack_size);
1390 }
1391
1392 ruby_longjmp(cont->jmpbuf, 1);
1393}
1394
1395NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
1396
1397static void
1398cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
1399{
1400 if (cont->machine.stack_src) {
1401#ifdef HAVE_ALLOCA
1402#define STACK_PAD_SIZE 1
1403#else
1404#define STACK_PAD_SIZE 1024
1405#endif
1406 VALUE space[STACK_PAD_SIZE];
1407
1408#if !STACK_GROW_DIRECTION
1409 if (addr_in_prev_frame > &space[0]) {
1410 /* Stack grows downward */
1411#endif
1412#if STACK_GROW_DIRECTION <= 0
1413 volatile VALUE *const end = cont->machine.stack_src;
1414 if (&space[0] > end) {
1415# ifdef HAVE_ALLOCA
1416 volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
1417 space[0] = *sp;
1418# else
1419 cont_restore_0(cont, &space[0]);
1420# endif
1421 }
1422#endif
1423#if !STACK_GROW_DIRECTION
1424 }
1425 else {
1426 /* Stack grows upward */
1427#endif
1428#if STACK_GROW_DIRECTION >= 0
1429 volatile VALUE *const end = cont->machine.stack_src + cont->machine.stack_size;
1430 if (&space[STACK_PAD_SIZE] < end) {
1431# ifdef HAVE_ALLOCA
1432 volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
1433 space[0] = *sp;
1434# else
1435 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
1436# endif
1437 }
1438#endif
1439#if !STACK_GROW_DIRECTION
1440 }
1441#endif
1442 }
1443 cont_restore_1(cont);
1444}
1445
1446/*
1447 * Document-class: Continuation
1448 *
1449 * Continuation objects are generated by Kernel#callcc,
1450 * after having +require+d <i>continuation</i>. They hold
1451 * a return address and execution context, allowing a nonlocal return
1452 * to the end of the #callcc block from anywhere within a
1453 * program. Continuations are somewhat analogous to a structured
1454 * version of C's <code>setjmp/longjmp</code> (although they contain
1455 * more state, so you might consider them closer to threads).
1456 *
1457 * For instance:
1458 *
1459 * require "continuation"
1460 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
1461 * callcc{|cc| $cc = cc}
1462 * puts(message = arr.shift)
1463 * $cc.call unless message =~ /Max/
1464 *
1465 * <em>produces:</em>
1466 *
1467 * Freddie
1468 * Herbie
1469 * Ron
1470 * Max
1471 *
1472 * Also you can call callcc in other methods:
1473 *
1474 * require "continuation"
1475 *
1476 * def g
1477 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
1478 * cc = callcc { |cc| cc }
1479 * puts arr.shift
1480 * return cc, arr.size
1481 * end
1482 *
1483 * def f
1484 * c, size = g
1485 * c.call(c) if size > 1
1486 * end
1487 *
1488 * f
1489 *
1490 * This (somewhat contrived) example allows the inner loop to abandon
1491 * processing early:
1492 *
1493 * require "continuation"
1494 * callcc {|cont|
1495 * for i in 0..4
1496 * print "#{i}: "
1497 * for j in i*5...(i+1)*5
1498 * cont.call() if j == 17
1499 * printf "%3d", j
1500 * end
1501 * end
1502 * }
1503 * puts
1504 *
1505 * <em>produces:</em>
1506 *
1507 * 0: 0 1 2 3 4
1508 * 1: 5 6 7 8 9
1509 * 2: 10 11 12 13 14
1510 * 3: 15 16
1511 */
1512
1513/*
1514 * call-seq:
1515 * callcc {|cont| block } -> obj
1516 *
1517 * Generates a Continuation object, which it passes to
1518 * the associated block. You need to <code>require
1519 * 'continuation'</code> before using this method. Performing a
1520 * <em>cont</em><code>.call</code> will cause the #callcc
1521 * to return (as will falling through the end of the block). The
1522 * value returned by the #callcc is the value of the
1523 * block, or the value passed to <em>cont</em><code>.call</code>. See
1524 * class Continuation for more details. Also see
1525 * Kernel#throw for an alternative mechanism for
1526 * unwinding a call stack.
1527 */
1528
1529static VALUE
1530rb_callcc(VALUE self)
1531{
1532 volatile int called;
1533 volatile VALUE val = cont_capture(&called);
1534
1535 if (called) {
1536 return val;
1537 }
1538 else {
1539 return rb_yield(val);
1540 }
1541}
1542
1543static VALUE
1544make_passing_arg(int argc, const VALUE *argv)
1545{
1546 switch (argc) {
1547 case -1:
1548 return argv[0];
1549 case 0:
1550 return Qnil;
1551 case 1:
1552 return argv[0];
1553 default:
1554 return rb_ary_new4(argc, argv);
1555 }
1556}
1557
1559
1560/* CAUTION!! : Currently, error in rollback_func is not supported */
1561/* same as rb_protect if set rollback_func to NULL */
1562void
1564{
1565 st_table **table_p = &GET_VM()->ensure_rollback_table;
1566 if (UNLIKELY(*table_p == NULL)) {
1567 *table_p = st_init_numtable();
1568 }
1569 st_insert(*table_p, (st_data_t)ensure_func, (st_data_t)rollback_func);
1570}
1571
1572static inline e_proc *
1573lookup_rollback_func(e_proc *ensure_func)
1574{
1575 st_table *table = GET_VM()->ensure_rollback_table;
1576 st_data_t val;
1577 if (table && st_lookup(table, (st_data_t)ensure_func, &val))
1578 return (e_proc *) val;
1579 return (e_proc *) Qundef;
1580}
1581
1582
1583static inline void
1584rollback_ensure_stack(VALUE self,rb_ensure_list_t *current,rb_ensure_entry_t *target)
1585{
1587 rb_ensure_entry_t *entry;
1588 size_t i, j;
1589 size_t cur_size;
1590 size_t target_size;
1591 size_t base_point;
1592 e_proc *func;
1593
1594 cur_size = 0;
1595 for (p=current; p; p=p->next)
1596 cur_size++;
1597 target_size = 0;
1598 for (entry=target; entry->marker; entry++)
1599 target_size++;
1600
1601 /* search common stack point */
1602 p = current;
1603 base_point = cur_size;
1604 while (base_point) {
1605 if (target_size >= base_point &&
1606 p->entry.marker == target[target_size - base_point].marker)
1607 break;
1608 base_point --;
1609 p = p->next;
1610 }
1611
1612 /* rollback function check */
1613 for (i=0; i < target_size - base_point; i++) {
1614 if (!lookup_rollback_func(target[i].e_proc)) {
1615 rb_raise(rb_eRuntimeError, "continuation called from out of critical rb_ensure scope");
1616 }
1617 }
1618 /* pop ensure stack */
1619 while (cur_size > base_point) {
1620 /* escape from ensure block */
1621 (*current->entry.e_proc)(current->entry.data2);
1622 current = current->next;
1623 cur_size--;
1624 }
1625 /* push ensure stack */
1626 for (j = 0; j < i; j++) {
1627 func = lookup_rollback_func(target[i - j - 1].e_proc);
1628 if ((VALUE)func != Qundef) {
1629 (*func)(target[i - j - 1].data2);
1630 }
1631 }
1632}
1633
1634NORETURN(static VALUE rb_cont_call(int argc, VALUE *argv, VALUE contval));
1635
1636/*
1637 * call-seq:
1638 * cont.call(args, ...)
1639 * cont[args, ...]
1640 *
1641 * Invokes the continuation. The program continues from the end of
1642 * the #callcc block. If no arguments are given, the original #callcc
1643 * returns +nil+. If one argument is given, #callcc returns
1644 * it. Otherwise, an array containing <i>args</i> is returned.
1645 *
1646 * callcc {|cont| cont.call } #=> nil
1647 * callcc {|cont| cont.call 1 } #=> 1
1648 * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
1649 */
1650
1651static VALUE
1652rb_cont_call(int argc, VALUE *argv, VALUE contval)
1653{
1654 rb_context_t *cont = cont_ptr(contval);
1655 rb_thread_t *th = GET_THREAD();
1656
1657 if (cont_thread_value(cont) != th->self) {
1658 rb_raise(rb_eRuntimeError, "continuation called across threads");
1659 }
1660 if (cont->saved_ec.protect_tag != th->ec->protect_tag) {
1661 rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
1662 }
1663 if (cont->saved_ec.fiber_ptr) {
1664 if (th->ec->fiber_ptr != cont->saved_ec.fiber_ptr) {
1665 rb_raise(rb_eRuntimeError, "continuation called across fiber");
1666 }
1667 }
1668 rollback_ensure_stack(contval, th->ec->ensure_list, cont->ensure_array);
1669
1670 cont->argc = argc;
1671 cont->value = make_passing_arg(argc, argv);
1672
1673 cont_restore_0(cont, &contval);
1675}
1676
1677/*********/
1678/* fiber */
1679/*********/
1680
1681/*
1682 * Document-class: Fiber
1683 *
1684 * Fibers are primitives for implementing light weight cooperative
1685 * concurrency in Ruby. Basically they are a means of creating code blocks
1686 * that can be paused and resumed, much like threads. The main difference
1687 * is that they are never preempted and that the scheduling must be done by
1688 * the programmer and not the VM.
1689 *
1690 * As opposed to other stackless light weight concurrency models, each fiber
1691 * comes with a stack. This enables the fiber to be paused from deeply
1692 * nested function calls within the fiber block. See the ruby(1)
1693 * manpage to configure the size of the fiber stack(s).
1694 *
1695 * When a fiber is created it will not run automatically. Rather it must
1696 * be explicitly asked to run using the Fiber#resume method.
1697 * The code running inside the fiber can give up control by calling
1698 * Fiber.yield in which case it yields control back to caller (the
1699 * caller of the Fiber#resume).
1700 *
1701 * Upon yielding or termination the Fiber returns the value of the last
1702 * executed expression
1703 *
1704 * For instance:
1705 *
1706 * fiber = Fiber.new do
1707 * Fiber.yield 1
1708 * 2
1709 * end
1710 *
1711 * puts fiber.resume
1712 * puts fiber.resume
1713 * puts fiber.resume
1714 *
1715 * <em>produces</em>
1716 *
1717 * 1
1718 * 2
1719 * FiberError: dead fiber called
1720 *
1721 * The Fiber#resume method accepts an arbitrary number of parameters,
1722 * if it is the first call to #resume then they will be passed as
1723 * block arguments. Otherwise they will be the return value of the
1724 * call to Fiber.yield
1725 *
1726 * Example:
1727 *
1728 * fiber = Fiber.new do |first|
1729 * second = Fiber.yield first + 2
1730 * end
1731 *
1732 * puts fiber.resume 10
1733 * puts fiber.resume 1_000_000
1734 * puts fiber.resume "The fiber will be dead before I can cause trouble"
1735 *
1736 * <em>produces</em>
1737 *
1738 * 12
1739 * 1000000
1740 * FiberError: dead fiber called
1741 *
1742 * == Non-blocking Fibers
1743 *
1744 * Since Ruby 3.0, the concept of <em>non-blocking fiber</em> was introduced.
1745 * Non-blocking fiber, when reaching any potentially blocking operation (like
1746 * sleep, wait for another process, wait for I/O data to be ready), instead
1747 * of just freezing itself and all execution in the thread, yields control
1748 * to other fibers, and allows the <em>scheduler</em> to handle waiting and waking
1749 * (resuming) the fiber when it can proceed.
1750 *
1751 * For Fiber to behave as non-blocking, it should be created in Fiber.new with
1752 * <tt>blocking: false</tt> (which is the default now), and Fiber.scheduler
1753 * should be set with Fiber.set_scheduler. If Fiber.scheduler is not set in
1754 * the current thread, blocking and non-blocking fiber's behavior is identical.
1755 *
1756 * Ruby doesn't provide a scheduler class: it is expected to be implemented by
1757 * the user and correspond to Fiber::SchedulerInterface.
1758 *
1759 * There is also Fiber.schedule method, which is expected to immediately perform
1760 * passed block in a non-blocking manner (but its actual implementation is up to
1761 * the scheduler).
1762 *
1763 *
1764 */
1765
1766static const rb_data_type_t fiber_data_type = {
1767 "fiber",
1768 {fiber_mark, fiber_free, fiber_memsize, fiber_compact,},
1770};
1771
1772static VALUE
1773fiber_alloc(VALUE klass)
1774{
1775 return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
1776}
1777
1778static rb_fiber_t*
1779fiber_t_alloc(VALUE fiber_value, unsigned int blocking)
1780{
1781 rb_fiber_t *fiber;
1782 rb_thread_t *th = GET_THREAD();
1783
1784 if (DATA_PTR(fiber_value) != 0) {
1785 rb_raise(rb_eRuntimeError, "cannot initialize twice");
1786 }
1787
1789 fiber = ZALLOC(rb_fiber_t);
1790 fiber->cont.self = fiber_value;
1791 fiber->cont.type = FIBER_CONTEXT;
1792 fiber->blocking = blocking;
1793 cont_init(&fiber->cont, th);
1794
1795 fiber->cont.saved_ec.fiber_ptr = fiber;
1797
1798 fiber->prev = NULL;
1799
1800 /* fiber->status == 0 == CREATED
1801 * So that we don't need to set status: fiber_status_set(fiber, FIBER_CREATED); */
1802 VM_ASSERT(FIBER_CREATED_P(fiber));
1803
1804 DATA_PTR(fiber_value) = fiber;
1805
1806 return fiber;
1807}
1808
1809static VALUE
1810fiber_initialize(VALUE self, VALUE proc, struct fiber_pool * fiber_pool, unsigned int blocking)
1811{
1812 rb_fiber_t *fiber = fiber_t_alloc(self, blocking);
1813
1814 fiber->first_proc = proc;
1815 fiber->stack.base = NULL;
1816 fiber->stack.pool = fiber_pool;
1817
1818 return self;
1819}
1820
1821static void
1822fiber_prepare_stack(rb_fiber_t *fiber)
1823{
1824 rb_context_t *cont = &fiber->cont;
1825 rb_execution_context_t *sec = &cont->saved_ec;
1826
1827 size_t vm_stack_size = 0;
1828 VALUE *vm_stack = fiber_initialize_coroutine(fiber, &vm_stack_size);
1829
1830 /* initialize cont */
1831 cont->saved_vm_stack.ptr = NULL;
1832 rb_ec_initialize_vm_stack(sec, vm_stack, vm_stack_size / sizeof(VALUE));
1833
1834 sec->tag = NULL;
1835 sec->local_storage = NULL;
1838}
1839
1840static struct fiber_pool *
1841rb_fiber_pool_default(VALUE pool)
1842{
1843 return &shared_fiber_pool;
1844}
1845
1846/* :nodoc: */
1847static VALUE
1848rb_fiber_initialize_kw(int argc, VALUE* argv, VALUE self, int kw_splat)
1849{
1850 VALUE pool = Qnil;
1851 VALUE blocking = Qfalse;
1852
1853 if (kw_splat != RB_NO_KEYWORDS) {
1854 VALUE options = Qnil;
1855 VALUE arguments[2] = {Qundef};
1856
1857 argc = rb_scan_args_kw(kw_splat, argc, argv, ":", &options);
1858 rb_get_kwargs(options, fiber_initialize_keywords, 0, 2, arguments);
1859
1860 if (arguments[0] != Qundef) {
1861 blocking = arguments[0];
1862 }
1863
1864 if (arguments[1] != Qundef) {
1865 pool = arguments[1];
1866 }
1867 }
1868
1869 return fiber_initialize(self, rb_block_proc(), rb_fiber_pool_default(pool), RTEST(blocking));
1870}
1871
1872/*
1873 * call-seq:
1874 * Fiber.new(blocking: false) { |*args| ... } -> fiber
1875 *
1876 * Creates new Fiber. Initially, fiber is not running, but can be resumed with
1877 * #resume. Arguments to the first #resume call would be passed to the block:
1878 *
1879 * f = Fiber.new do |initial|
1880 * current = initial
1881 * loop do
1882 * puts "current: #{current.inspect}"
1883 * current = Fiber.yield
1884 * end
1885 * end
1886 * f.resume(100) # prints: current: 100
1887 * f.resume(1, 2, 3) # prints: current: [1, 2, 3]
1888 * f.resume # prints: current: nil
1889 * # ... and so on ...
1890 *
1891 * if <tt>blocking: false</tt> is passed to the <tt>Fiber.new</tt>, _and_ current thread
1892 * has Fiber.scheduler defined, the Fiber becomes non-blocking (see "Non-blocking
1893 * fibers" section in class docs).
1894 */
1895static VALUE
1896rb_fiber_initialize(int argc, VALUE* argv, VALUE self)
1897{
1898 return rb_fiber_initialize_kw(argc, argv, self, rb_keyword_given_p());
1899}
1900
1901VALUE
1903{
1904 return fiber_initialize(fiber_alloc(rb_cFiber), rb_proc_new(func, obj), rb_fiber_pool_default(Qnil), 1);
1905}
1906
1907static VALUE
1908rb_f_fiber_kw(int argc, VALUE* argv, int kw_splat)
1909{
1910 rb_thread_t * th = GET_THREAD();
1911 VALUE scheduler = th->scheduler;
1912 VALUE fiber = Qnil;
1913
1914 if (scheduler != Qnil) {
1915 fiber = rb_funcall_passing_block_kw(scheduler, rb_intern("fiber"), argc, argv, kw_splat);
1916 } else {
1917 rb_raise(rb_eRuntimeError, "No scheduler is available!");
1918 }
1919
1920 return fiber;
1921}
1922
1923/*
1924 * call-seq:
1925 * Fiber.schedule { |*args| ... } -> fiber
1926 *
1927 * The method is <em>expected</em> to immediately run the provided block of code in a
1928 * separate non-blocking fiber.
1929 *
1930 * puts "Go to sleep!"
1931 *
1932 * Fiber.set_scheduler(MyScheduler.new)
1933 *
1934 * Fiber.schedule do
1935 * puts "Going to sleep"
1936 * sleep(1)
1937 * puts "I slept well"
1938 * end
1939 *
1940 * puts "Wakey-wakey, sleepyhead"
1941 *
1942 * Assuming MyScheduler is properly implemented, this program will produce:
1943 *
1944 * Go to sleep!
1945 * Going to sleep
1946 * Wakey-wakey, sleepyhead
1947 * ...1 sec pause here...
1948 * I slept well
1949 *
1950 * ...e.g. on the first blocking operation inside the Fiber (<tt>sleep(1)</tt>),
1951 * the control is yielded at the outside code (main fiber), and <em>at the end
1952 * of the execution</em>, the scheduler takes care of properly resuming all the
1953 * blocked fibers.
1954 *
1955 * Note that the behavior described above is how the method is <em>expected</em>
1956 * to behave, actual behavior is up to the current scheduler's implementation of
1957 * Fiber::SchedulerInterface#fiber method. Ruby doesn't enforce this method to
1958 * behave in any particular way.
1959 *
1960 * If the scheduler is not set, the method raises
1961 * <tt>RuntimeError (No scheduler is available!)</tt>.
1962 *
1963 */
1964static VALUE
1965rb_f_fiber(int argc, VALUE *argv, VALUE obj)
1966{
1967 return rb_f_fiber_kw(argc, argv, rb_keyword_given_p());
1968}
1969
1970/*
1971 * call-seq:
1972 * Fiber.scheduler -> obj or nil
1973 *
1974 * Fiber scheduler, set in the current thread with Fiber.set_scheduler. If the scheduler
1975 * is +nil+ (which is the default), non-blocking fibers behavior is the same as blocking.
1976 * (see "Non-blocking fibers" section in class docs for details about the scheduler concept).
1977 *
1978 */
1979static VALUE
1980rb_fiber_scheduler(VALUE klass)
1981{
1982 return rb_scheduler_get();
1983}
1984
1985/*
1986 * call-seq:
1987 * Fiber.set_scheduler(scheduler) -> scheduler
1988 *
1989 * Sets Fiber scheduler for the current thread. If the scheduler is set, non-blocking
1990 * fibers (created by Fiber.new with <tt>blocking: false</tt>, or by Fiber.schedule)
1991 * call that scheduler's hook methods on potentially blocking operations, and the current
1992 * thread will call scheduler's +close+ method on finalization (allowing the scheduler to
1993 * properly manage all non-finished fibers).
1994 *
1995 * +scheduler+ can be an object of any class corresponding to Fiber::SchedulerInterface. Its
1996 * implementation is up to the user.
1997 *
1998 * See also the "Non-blocking fibers" section in class docs.
1999 *
2000 */
2001static VALUE
2002rb_fiber_set_scheduler(VALUE klass, VALUE scheduler)
2003{
2004 // if (rb_scheduler_get() != Qnil) {
2005 // rb_raise(rb_eFiberError, "Scheduler is already defined!");
2006 // }
2007
2008 return rb_scheduler_set(scheduler);
2009}
2010
2011NORETURN(static void rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt));
2012
2013void
2015{
2016 rb_thread_t * volatile th = GET_THREAD();
2017 rb_fiber_t *fiber = th->ec->fiber_ptr;
2018 rb_proc_t *proc;
2019 enum ruby_tag_type state;
2020 int need_interrupt = TRUE;
2021
2022 VM_ASSERT(th->ec == GET_EC());
2023 VM_ASSERT(FIBER_RESUMED_P(fiber));
2024
2025 if (fiber->blocking) {
2026 th->blocking += 1;
2027 }
2028
2029 EC_PUSH_TAG(th->ec);
2030 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2031 rb_context_t *cont = &VAR_FROM_MEMORY(fiber)->cont;
2032 int argc;
2033 const VALUE *argv, args = cont->value;
2034 GetProcPtr(fiber->first_proc, proc);
2035 argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args;
2036 cont->value = Qnil;
2037 th->ec->errinfo = Qnil;
2039 th->ec->root_svar = Qfalse;
2040
2041 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
2042 cont->value = rb_vm_invoke_proc(th->ec, proc, argc, argv, cont->kw_splat, VM_BLOCK_HANDLER_NONE);
2043 }
2044 EC_POP_TAG();
2045
2046 if (state) {
2047 VALUE err = th->ec->errinfo;
2048 VM_ASSERT(FIBER_RESUMED_P(fiber));
2049
2050 if (state == TAG_RAISE || state == TAG_FATAL) {
2052 }
2053 else {
2055 if (!NIL_P(err)) {
2057 }
2058 }
2059 need_interrupt = TRUE;
2060 }
2061
2062 rb_fiber_terminate(fiber, need_interrupt);
2064}
2065
2066static rb_fiber_t *
2067root_fiber_alloc(rb_thread_t *th)
2068{
2069 VALUE fiber_value = fiber_alloc(rb_cFiber);
2070 rb_fiber_t *fiber = th->ec->fiber_ptr;
2071
2072 VM_ASSERT(DATA_PTR(fiber_value) == NULL);
2073 VM_ASSERT(fiber->cont.type == FIBER_CONTEXT);
2074 VM_ASSERT(fiber->status == FIBER_RESUMED);
2075
2076 th->root_fiber = fiber;
2077 DATA_PTR(fiber_value) = fiber;
2078 fiber->cont.self = fiber_value;
2079
2080#ifdef COROUTINE_PRIVATE_STACK
2081 fiber->stack = fiber_pool_stack_acquire(&shared_fiber_pool);
2082 coroutine_initialize_main(&fiber->context, fiber_pool_stack_base(&fiber->stack), fiber->stack.available, th->ec->machine.stack_start);
2083#else
2084 coroutine_initialize_main(&fiber->context);
2085#endif
2086
2087 return fiber;
2088}
2089
2090void
2092{
2093 rb_fiber_t *fiber = ruby_mimmalloc(sizeof(rb_fiber_t));
2094 if (!fiber) {
2095 rb_bug("%s", strerror(errno)); /* ... is it possible to call rb_bug here? */
2096 }
2097 MEMZERO(fiber, rb_fiber_t, 1);
2098 fiber->cont.type = FIBER_CONTEXT;
2099 fiber->cont.saved_ec.fiber_ptr = fiber;
2100 fiber->cont.saved_ec.thread_ptr = th;
2101 fiber->blocking = 1;
2102 fiber_status_set(fiber, FIBER_RESUMED); /* skip CREATED */
2103 th->ec = &fiber->cont.saved_ec;
2104 // This skips mjit_cont_new for the initial thread because mjit_enabled is always false
2105 // at this point. mjit_init calls rb_fiber_init_mjit_cont again for this root_fiber.
2107}
2108
2109void
2111{
2112 if (th->root_fiber) {
2113 /* ignore. A root fiber object will free th->ec */
2114 }
2115 else {
2116 rb_execution_context_t *ec = GET_EC();
2117
2119 VM_ASSERT(th->ec->fiber_ptr->cont.self == 0);
2120
2121 if (th->ec == ec) {
2122 rb_ractor_set_current_ec(th->ractor, NULL);
2123 }
2124 fiber_free(th->ec->fiber_ptr);
2125 th->ec = NULL;
2126 }
2127}
2128
2129void
2131{
2132 rb_fiber_t *fiber = th->ec->fiber_ptr;
2133
2134 fiber->status = FIBER_TERMINATED;
2135
2136 // The vm_stack is `alloca`ed on the thread stack, so it's gone too:
2138}
2139
2140static inline rb_fiber_t*
2141fiber_current(void)
2142{
2143 rb_execution_context_t *ec = GET_EC();
2144 if (ec->fiber_ptr->cont.self == 0) {
2145 root_fiber_alloc(rb_ec_thread_ptr(ec));
2146 }
2147 return ec->fiber_ptr;
2148}
2149
2150static inline rb_fiber_t*
2151return_fiber(bool terminate)
2152{
2153 rb_fiber_t *fiber = fiber_current();
2154 rb_fiber_t *prev = fiber->prev;
2155
2156 if (prev) {
2157 fiber->prev = NULL;
2158 prev->resuming_fiber = Qnil;
2159 return prev;
2160 }
2161 else {
2162 if (!terminate) {
2163 rb_raise(rb_eFiberError, "attempt to yield on a not resumed fiber");
2164 }
2165
2166 rb_thread_t *th = GET_THREAD();
2167 rb_fiber_t *root_fiber = th->root_fiber;
2168
2169 VM_ASSERT(root_fiber != NULL);
2170
2171 // search resuming fiber
2172 for (fiber = root_fiber;
2173 RTEST(fiber->resuming_fiber);
2174 fiber = fiber_ptr(fiber->resuming_fiber)) {
2175 }
2176
2177 return fiber;
2178 }
2179}
2180
2181VALUE
2183{
2184 return fiber_current()->cont.self;
2185}
2186
2187// Prepare to execute next_fiber on the given thread.
2188static inline VALUE
2189fiber_store(rb_fiber_t *next_fiber, rb_thread_t *th)
2190{
2191 rb_fiber_t *fiber;
2192
2193 if (th->ec->fiber_ptr != NULL) {
2194 fiber = th->ec->fiber_ptr;
2195 }
2196 else {
2197 /* create root fiber */
2198 fiber = root_fiber_alloc(th);
2199 }
2200
2201 if (FIBER_CREATED_P(next_fiber)) {
2202 fiber_prepare_stack(next_fiber);
2203 }
2204
2206 VM_ASSERT(FIBER_RUNNABLE_P(next_fiber));
2207
2208 if (FIBER_RESUMED_P(fiber)) fiber_status_set(fiber, FIBER_SUSPENDED);
2209
2210 fiber_status_set(next_fiber, FIBER_RESUMED);
2211 fiber_setcontext(next_fiber, fiber);
2212
2213 fiber = th->ec->fiber_ptr;
2214
2215 /* Raise an exception if that was the result of executing the fiber */
2216 if (fiber->cont.argc == -1) rb_exc_raise(fiber->cont.value);
2217
2218 return fiber->cont.value;
2219}
2220
2221static inline VALUE
2222fiber_switch(rb_fiber_t *fiber, int argc, const VALUE *argv, int kw_splat, VALUE resuming_fiber, bool yielding)
2223{
2224 VALUE value;
2225 rb_context_t *cont = &fiber->cont;
2226 rb_thread_t *th = GET_THREAD();
2227
2228 /* make sure the root_fiber object is available */
2229 if (th->root_fiber == NULL) root_fiber_alloc(th);
2230
2231 if (th->ec->fiber_ptr == fiber) {
2232 /* ignore fiber context switch
2233 * because destination fiber is same as current fiber
2234 */
2235 return make_passing_arg(argc, argv);
2236 }
2237
2238 if (cont_thread_value(cont) != th->self) {
2239 rb_raise(rb_eFiberError, "fiber called across threads");
2240 }
2241 else if (cont->saved_ec.protect_tag != th->ec->protect_tag) {
2242 rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
2243 }
2244 else if (FIBER_TERMINATED_P(fiber)) {
2245 value = rb_exc_new2(rb_eFiberError, "dead fiber called");
2246
2247 if (!FIBER_TERMINATED_P(th->ec->fiber_ptr)) {
2248 rb_exc_raise(value);
2249 VM_UNREACHABLE(fiber_switch);
2250 }
2251 else {
2252 /* th->ec->fiber_ptr is also dead => switch to root fiber */
2253 /* (this means we're being called from rb_fiber_terminate, */
2254 /* and the terminated fiber's return_fiber() is already dead) */
2256
2257 cont = &th->root_fiber->cont;
2258 cont->argc = -1;
2259 cont->value = value;
2260
2261 fiber_setcontext(th->root_fiber, th->ec->fiber_ptr);
2262
2263 VM_UNREACHABLE(fiber_switch);
2264 }
2265 }
2266
2268
2269 rb_fiber_t *current_fiber = fiber_current();
2270
2271 VM_ASSERT(!RTEST(current_fiber->resuming_fiber));
2272 if (RTEST(resuming_fiber)) {
2273 current_fiber->resuming_fiber = resuming_fiber;
2274 fiber->prev = fiber_current();
2275 fiber->yielding = 0;
2276 }
2277
2278 VM_ASSERT(!current_fiber->yielding);
2279 if (yielding) {
2280 current_fiber->yielding = 1;
2281 }
2282
2283 if (current_fiber->blocking) {
2284 th->blocking -= 1;
2285 }
2286
2287 cont->argc = argc;
2288 cont->kw_splat = kw_splat;
2289 cont->value = make_passing_arg(argc, argv);
2290
2291 value = fiber_store(fiber, th);
2292
2293 if (RTEST(resuming_fiber) && FIBER_TERMINATED_P(fiber)) {
2294 fiber_stack_release(fiber);
2295 }
2296
2297 if (fiber_current()->blocking) {
2298 th->blocking += 1;
2299 }
2300
2302
2303 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
2304
2305 return value;
2306}
2307
2308VALUE
2309rb_fiber_transfer(VALUE fiber_value, int argc, const VALUE *argv)
2310{
2311 return fiber_switch(fiber_ptr(fiber_value), argc, argv, RB_NO_KEYWORDS, Qfalse, false);
2312}
2313
2314/*
2315 * call-seq:
2316 * fiber.blocking? -> true or false
2317 *
2318 * Returns +true+ if +fiber+ is blocking and +false+ otherwise.
2319 * Fiber is non-blocking if it was created via passing <tt>blocking: false</tt>
2320 * to Fiber.new, or via Fiber.schedule.
2321 *
2322 * Note, that even if the method returns +false+, Fiber behaves differently
2323 * only if Fiber.scheduler is set in the current thread.
2324 *
2325 * See the "Non-blocking fibers" section in class docs for details.
2326 *
2327 */
2328VALUE
2330{
2331 return (fiber_ptr(fiber)->blocking == 0) ? Qfalse : Qtrue;
2332}
2333
2334/*
2335 * call-seq:
2336 * Fiber.blocking? -> false or number
2337 *
2338 * Returns +false+ if the current fiber is non-blocking.
2339 * Fiber is non-blocking if it was created via passing <tt>blocking: false</tt>
2340 * to Fiber.new, or via Fiber.schedule.
2341 *
2342 * If the current Fiber is blocking, the method, unlike usual
2343 * predicate methods, returns a *number* of blocking fibers currently
2344 * running (TBD: always 1?).
2345 *
2346 * Note, that even if the method returns +false+, Fiber behaves differently
2347 * only if Fiber.scheduler is set in the current thread.
2348 *
2349 * See the "Non-blocking fibers" section in class docs for details.
2350 *
2351 */
2352static VALUE
2353rb_f_fiber_blocking_p(VALUE klass)
2354{
2355 rb_thread_t *thread = GET_THREAD();
2356 unsigned blocking = thread->blocking;
2357
2358 if (blocking == 0)
2359 return Qfalse;
2360
2361 return INT2NUM(blocking);
2362}
2363
2364void
2366{
2367 fiber_status_set(fiber, FIBER_TERMINATED);
2368}
2369
2370static void
2371rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt)
2372{
2373 VALUE value = fiber->cont.value;
2374 rb_fiber_t *next_fiber;
2375
2376 VM_ASSERT(FIBER_RESUMED_P(fiber));
2377 rb_fiber_close(fiber);
2378
2379 coroutine_destroy(&fiber->context);
2380
2381 fiber->cont.machine.stack = NULL;
2382 fiber->cont.machine.stack_size = 0;
2383
2384 next_fiber = return_fiber(true);
2385 if (need_interrupt) RUBY_VM_SET_INTERRUPT(&next_fiber->cont.saved_ec);
2386 fiber_switch(next_fiber, 1, &value, RB_NO_KEYWORDS, Qfalse, false);
2387 ruby_stop(0);
2388}
2389
2390VALUE
2391rb_fiber_resume_kw(VALUE fiber_value, int argc, const VALUE *argv, int kw_splat)
2392{
2393 rb_fiber_t *fiber = fiber_ptr(fiber_value);
2394 rb_fiber_t *current_fiber = fiber_current();
2395
2396 if (argc == -1 && FIBER_CREATED_P(fiber)) {
2397 rb_raise(rb_eFiberError, "cannot raise exception on unborn fiber");
2398 }
2399 else if (FIBER_TERMINATED_P(fiber)) {
2400 rb_raise(rb_eFiberError, "attempt to resume a terminated fiber");
2401 }
2402 else if (fiber == current_fiber) {
2403 rb_raise(rb_eFiberError, "attempt to resume the current fiber");
2404 }
2405 else if (fiber->prev != NULL) {
2406 rb_raise(rb_eFiberError, "attempt to resume a resumed fiber (double resume)");
2407 }
2408 else if (RTEST(fiber->resuming_fiber)) {
2409 rb_raise(rb_eFiberError, "attempt to resume a resuming fiber");
2410 }
2411 else if (fiber->prev == NULL &&
2412 (!fiber->yielding && fiber->status != FIBER_CREATED)) {
2413 rb_raise(rb_eFiberError, "attempt to resume a transferring fiber");
2414 }
2415
2416 return fiber_switch(fiber, argc, argv, kw_splat, fiber_value, false);
2417}
2418
2419VALUE
2420rb_fiber_resume(VALUE fiber_value, int argc, const VALUE *argv)
2421{
2422 return rb_fiber_resume_kw(fiber_value, argc, argv, RB_NO_KEYWORDS);
2423}
2424
2425VALUE
2426rb_fiber_yield_kw(int argc, const VALUE *argv, int kw_splat)
2427{
2428 return fiber_switch(return_fiber(false), argc, argv, kw_splat, Qfalse, true);
2429}
2430
2431VALUE
2433{
2434 return fiber_switch(return_fiber(false), argc, argv, RB_NO_KEYWORDS, Qfalse, true);
2435}
2436
2437void
2439{
2440 if (th->root_fiber && th->root_fiber != th->ec->fiber_ptr) {
2442 }
2443}
2444
2445/*
2446 * call-seq:
2447 * fiber.alive? -> true or false
2448 *
2449 * Returns true if the fiber can still be resumed (or transferred
2450 * to). After finishing execution of the fiber block this method will
2451 * always return false. You need to <code>require 'fiber'</code>
2452 * before using this method.
2453 */
2454VALUE
2456{
2457 return FIBER_TERMINATED_P(fiber_ptr(fiber_value)) ? Qfalse : Qtrue;
2458}
2459
2460/*
2461 * call-seq:
2462 * fiber.resume(args, ...) -> obj
2463 *
2464 * Resumes the fiber from the point at which the last Fiber.yield was
2465 * called, or starts running it if it is the first call to
2466 * #resume. Arguments passed to resume will be the value of the
2467 * Fiber.yield expression or will be passed as block parameters to
2468 * the fiber's block if this is the first #resume.
2469 *
2470 * Alternatively, when resume is called it evaluates to the arguments passed
2471 * to the next Fiber.yield statement inside the fiber's block
2472 * or to the block value if it runs to completion without any
2473 * Fiber.yield
2474 */
2475static VALUE
2476rb_fiber_m_resume(int argc, VALUE *argv, VALUE fiber)
2477{
2479}
2480
2481static VALUE rb_fiber_transfer_kw(VALUE fiber_value, int argc, VALUE *argv, int kw_splat);
2482
2483/*
2484 * call-seq:
2485 * fiber.raise -> obj
2486 * fiber.raise(string) -> obj
2487 * fiber.raise(exception [, string [, array]]) -> obj
2488 *
2489 * Raises an exception in the fiber at the point at which the last
2490 * +Fiber.yield+ was called. If the fiber has not been started or has
2491 * already run to completion, raises +FiberError+. If the fiber is
2492 * yielding, it is resumed. If it is transferring, it is transferred into.
2493 * But if it is resuming, raises +FiberError+.
2494 *
2495 * With no arguments, raises a +RuntimeError+. With a single +String+
2496 * argument, raises a +RuntimeError+ with the string as a message. Otherwise,
2497 * the first parameter should be the name of an +Exception+ class (or an
2498 * object that returns an +Exception+ object when sent an +exception+
2499 * message). The optional second parameter sets the message associated with
2500 * the exception, and the third parameter is an array of callback information.
2501 * Exceptions are caught by the +rescue+ clause of <code>begin...end</code>
2502 * blocks.
2503 */
2504static VALUE
2505rb_fiber_raise(int argc, VALUE *argv, VALUE fiber_value)
2506{
2507 rb_fiber_t *fiber = fiber_ptr(fiber_value);
2509 if (RTEST(fiber->resuming_fiber)) {
2510 rb_raise(rb_eFiberError, "attempt to raise a resuming fiber");
2511 }
2512 else if (FIBER_SUSPENDED_P(fiber) && !fiber->yielding) {
2513 return rb_fiber_transfer_kw(fiber_value, -1, &exc, RB_NO_KEYWORDS);
2514 }
2515 else {
2516 return rb_fiber_resume_kw(fiber_value, -1, &exc, RB_NO_KEYWORDS);
2517 }
2518}
2519
2520/*
2521 * call-seq:
2522 * fiber.backtrace -> array
2523 * fiber.backtrace(start) -> array
2524 * fiber.backtrace(start, count) -> array
2525 * fiber.backtrace(start..end) -> array
2526 *
2527 * Returns the current execution stack of the fiber. +start+, +count+ and +end+ allow
2528 * to select only parts of the backtrace.
2529 *
2530 * def level3
2531 * Fiber.yield
2532 * end
2533 *
2534 * def level2
2535 * level3
2536 * end
2537 *
2538 * def level1
2539 * level2
2540 * end
2541 *
2542 * f = Fiber.new { level1 }
2543 *
2544 * # It is empty before the fiber started
2545 * f.backtrace
2546 * #=> []
2547 *
2548 * f.resume
2549 *
2550 * f.backtrace
2551 * #=> ["test.rb:2:in `yield'", "test.rb:2:in `level3'", "test.rb:6:in `level2'", "test.rb:10:in `level1'", "test.rb:13:in `block in <main>'"]
2552 * p f.backtrace(1) # start from the item 1
2553 * #=> ["test.rb:2:in `level3'", "test.rb:6:in `level2'", "test.rb:10:in `level1'", "test.rb:13:in `block in <main>'"]
2554 * p f.backtrace(2, 2) # start from item 2, take 2
2555 * #=> ["test.rb:6:in `level2'", "test.rb:10:in `level1'"]
2556 * p f.backtrace(1..3) # take items from 1 to 3
2557 * #=> ["test.rb:2:in `level3'", "test.rb:6:in `level2'", "test.rb:10:in `level1'"]
2558 *
2559 * f.resume
2560 *
2561 * # It is nil after the fiber is finished
2562 * f.backtrace
2563 * #=> nil
2564 *
2565 */
2566static VALUE
2567rb_fiber_backtrace(int argc, VALUE *argv, VALUE fiber)
2568{
2569 return rb_vm_backtrace(argc, argv, &fiber_ptr(fiber)->cont.saved_ec);
2570}
2571
2572/*
2573 * call-seq:
2574 * fiber.backtrace_locations -> array
2575 * fiber.backtrace_locations(start) -> array
2576 * fiber.backtrace_locations(start, count) -> array
2577 * fiber.backtrace_locations(start..end) -> array
2578 *
2579 * Like #backtrace, but returns each line of the execution stack as a
2580 * Thread::Backtrace::Location. Accepts the same arguments as #backtrace.
2581 *
2582 * f = Fiber.new { Fiber.yield }
2583 * f.resume
2584 * loc = f.backtrace_locations.first
2585 * loc.label #=> "yield"
2586 * loc.path #=> "test.rb"
2587 * loc.lineno #=> 1
2588 *
2589 *
2590 */
2591static VALUE
2592rb_fiber_backtrace_locations(int argc, VALUE *argv, VALUE fiber)
2593{
2594 return rb_vm_backtrace_locations(argc, argv, &fiber_ptr(fiber)->cont.saved_ec);
2595}
2596
2597/*
2598 * call-seq:
2599 * fiber.transfer(args, ...) -> obj
2600 *
2601 * Transfer control to another fiber, resuming it from where it last
2602 * stopped or starting it if it was not resumed before. The calling
2603 * fiber will be suspended much like in a call to
2604 * Fiber.yield. You need to <code>require 'fiber'</code>
2605 * before using this method.
2606 *
2607 * The fiber which receives the transfer call is treats it much like
2608 * a resume call. Arguments passed to transfer are treated like those
2609 * passed to resume.
2610 *
2611 * The two style of control passing to and from fiber (one is #resume and
2612 * Fiber::yield, another is #transfer to and from fiber) can't be freely
2613 * mixed.
2614 *
2615 * * If the Fiber's lifecycle had started with transfer, it will never
2616 * be able to yield or be resumed control passing, only
2617 * finish or transfer back. (It still can resume other fibers that
2618 * are allowed to be resumed.)
2619 * * If the Fiber's lifecycle had started with resume, it can yield
2620 * or transfer to another Fiber, but can receive control back only
2621 * the way compatible with the way it was given away: if it had
2622 * transferred, it only can be transferred back, and if it had
2623 * yielded, it only can be resumed back. After that, it again can
2624 * transfer or yield.
2625 *
2626 * If those rules are broken FiberError is raised.
2627 *
2628 * For an individual Fiber design, yield/resume is more easy to use
2629 * style (the Fiber just gives away control, it doesn't need to think
2630 * about who the control is given to), while transfer is more flexible
2631 * for complex cases, allowing to build arbitrary graphs of Fibers
2632 * dependent on each other.
2633 *
2634 *
2635 * Example:
2636 *
2637 * require 'fiber'
2638 *
2639 * manager = nil # For local var to be visible inside worker block
2640 *
2641 * # This fiber would be started with transfer
2642 * # It can't yield, and can't be resumed
2643 * worker = Fiber.new { |work|
2644 * puts "Worker: starts"
2645 * puts "Worker: Performed #{work.inspect}, transferring back"
2646 * # Fiber.yield # this would raise FiberError: attempt to yield on a not resumed fiber
2647 * # manager.resume # this would raise FiberError: attempt to resume a resumed fiber (double resume)
2648 * manager.transfer(work.capitalize)
2649 * }
2650 *
2651 * # This fiber would be started with resume
2652 * # It can yield or transfer, and can be transferred
2653 * # back or resumed
2654 * manager = Fiber.new {
2655 * puts "Manager: starts"
2656 * puts "Manager: transferring 'something' to worker"
2657 * result = worker.transfer('something')
2658 * puts "Manager: worker returned #{result.inspect}"
2659 * # worker.resume # this would raise FiberError: attempt to resume a transferring fiber
2660 * Fiber.yield # this is OK, the fiber transferred from and to, now it can yield
2661 * puts "Manager: finished"
2662 * }
2663 *
2664 * puts "Starting the manager"
2665 * manager.resume
2666 * puts "Resuming the manager"
2667 * # manager.transfer # this would raise FiberError: attempt to transfer to a yielding fiber
2668 * manager.resume
2669 *
2670 * <em>produces</em>
2671 *
2672 * Starting the manager
2673 * Manager: starts
2674 * Manager: transferring 'something' to worker
2675 * Worker: starts
2676 * Worker: Performed "something", transferring back
2677 * Manager: worker returned "Something"
2678 * Resuming the manager
2679 * Manager: finished
2680 *
2681 */
2682static VALUE
2683rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fiber_value)
2684{
2685 return rb_fiber_transfer_kw(fiber_value, argc, argv, rb_keyword_given_p());
2686}
2687
2688static VALUE
2689rb_fiber_transfer_kw(VALUE fiber_value, int argc, VALUE *argv, int kw_splat)
2690{
2691 rb_fiber_t *fiber = fiber_ptr(fiber_value);
2692 if (RTEST(fiber->resuming_fiber)) {
2693 rb_raise(rb_eFiberError, "attempt to transfer to a resuming fiber");
2694 }
2695 if (fiber->yielding) {
2696 rb_raise(rb_eFiberError, "attempt to transfer to a yielding fiber");
2697 }
2698 return fiber_switch(fiber, argc, argv, kw_splat, Qfalse, false);
2699}
2700
2701/*
2702 * call-seq:
2703 * Fiber.yield(args, ...) -> obj
2704 *
2705 * Yields control back to the context that resumed the fiber, passing
2706 * along any arguments that were passed to it. The fiber will resume
2707 * processing at this point when #resume is called next.
2708 * Any arguments passed to the next #resume will be the value that
2709 * this Fiber.yield expression evaluates to.
2710 */
2711static VALUE
2712rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
2713{
2715}
2716
2717/*
2718 * call-seq:
2719 * Fiber.current() -> fiber
2720 *
2721 * Returns the current fiber. You need to <code>require 'fiber'</code>
2722 * before using this method. If you are not running in the context of
2723 * a fiber this method will return the root fiber.
2724 */
2725static VALUE
2726rb_fiber_s_current(VALUE klass)
2727{
2728 return rb_fiber_current();
2729}
2730
2731/*
2732 * call-seq:
2733 * fiber.to_s -> string
2734 *
2735 * Returns fiber information string.
2736 *
2737 */
2738
2739static VALUE
2740fiber_to_s(VALUE fiber_value)
2741{
2742 const rb_fiber_t *fiber = fiber_ptr(fiber_value);
2743 const rb_proc_t *proc;
2744 char status_info[0x20];
2745
2746 if (RTEST(fiber->resuming_fiber)) {
2747 snprintf(status_info, 0x20, " (%s by resuming)", fiber_status_name(fiber->status));
2748 }
2749 else {
2750 snprintf(status_info, 0x20, " (%s)", fiber_status_name(fiber->status));
2751 }
2752
2753 if (!rb_obj_is_proc(fiber->first_proc)) {
2754 VALUE str = rb_any_to_s(fiber_value);
2755 strlcat(status_info, ">", sizeof(status_info));
2757 rb_str_cat_cstr(str, status_info);
2758 return str;
2759 }
2760 GetProcPtr(fiber->first_proc, proc);
2761 return rb_block_to_s(fiber_value, &proc->block, status_info);
2762}
2763
2764#ifdef HAVE_WORKING_FORK
2765void
2766rb_fiber_atfork(rb_thread_t *th)
2767{
2768 if (th->root_fiber) {
2769 if (&th->root_fiber->cont.saved_ec != th->ec) {
2770 th->root_fiber = th->ec->fiber_ptr;
2771 }
2772 th->root_fiber->prev = 0;
2773 }
2774}
2775#endif
2776
2777#ifdef RB_EXPERIMENTAL_FIBER_POOL
2778static void
2779fiber_pool_free(void *ptr)
2780{
2781 struct fiber_pool * fiber_pool = ptr;
2782 RUBY_FREE_ENTER("fiber_pool");
2783
2784 fiber_pool_free_allocations(fiber_pool->allocations);
2786
2787 RUBY_FREE_LEAVE("fiber_pool");
2788}
2789
2790static size_t
2791fiber_pool_memsize(const void *ptr)
2792{
2793 const struct fiber_pool * fiber_pool = ptr;
2794 size_t size = sizeof(*fiber_pool);
2795
2797
2798 return size;
2799}
2800
2801static const rb_data_type_t FiberPoolDataType = {
2802 "fiber_pool",
2803 {NULL, fiber_pool_free, fiber_pool_memsize,},
2805};
2806
2807static VALUE
2808fiber_pool_alloc(VALUE klass)
2809{
2810 struct fiber_pool * fiber_pool = RB_ALLOC(struct fiber_pool);
2811
2812 return TypedData_Wrap_Struct(klass, &FiberPoolDataType, fiber_pool);
2813}
2814
2815static VALUE
2816rb_fiber_pool_initialize(int argc, VALUE* argv, VALUE self)
2817{
2818 rb_thread_t *th = GET_THREAD();
2820 struct fiber_pool * fiber_pool = NULL;
2821
2822 // Maybe these should be keyword arguments.
2824
2825 if (NIL_P(size)) {
2827 }
2828
2829 if (NIL_P(count)) {
2830 count = INT2NUM(128);
2831 }
2832
2833 if (NIL_P(vm_stack_size)) {
2835 }
2836
2837 TypedData_Get_Struct(self, struct fiber_pool, &FiberPoolDataType, fiber_pool);
2838
2839 fiber_pool_initialize(fiber_pool, NUM2SIZET(size), NUM2SIZET(count), NUM2SIZET(vm_stack_size));
2840
2841 return self;
2842}
2843#endif
2844
2845/*
2846 * Document-class: FiberError
2847 *
2848 * Raised when an invalid operation is attempted on a Fiber, in
2849 * particular when attempting to call/resume a dead fiber,
2850 * attempting to yield from the root fiber, or calling a fiber across
2851 * threads.
2852 *
2853 * fiber = Fiber.new{}
2854 * fiber.resume #=> nil
2855 * fiber.resume #=> FiberError: dead fiber called
2856 */
2857
2858/*
2859 * Document-class: Fiber::SchedulerInterface
2860 *
2861 * This is not an existing class, but documentation of the interface that Scheduler
2862 * object should comply in order to be used as Fiber.scheduler and handle non-blocking
2863 * fibers. See also the "Non-blocking fibers" section in Fiber class docs for explanations
2864 * of some concepts.
2865 *
2866 * Scheduler's behavior and usage are expected to be as follows:
2867 *
2868 * * When the execution in the non-blocking Fiber reaches some blocking operation (like
2869 * sleep, wait for a process, or a non-ready I/O), it calls some of the scheduler's
2870 * hook methods, listed below.
2871 * * Scheduler somehow registers what the current fiber is waited for, and yields control
2872 * to other fibers with Fiber.yield (so the fiber would be suspended while expecting its
2873 * wait to end, and other fibers in the same thread can perform)
2874 * * At the end of the current thread execution, the scheduler's method #close is called
2875 * * The scheduler runs into a wait loop, checking all the blocked fibers (which it has
2876 * registered on hook calls) and resuming them when the awaited resource is ready (I/O
2877 * ready, sleep time passed).
2878 *
2879 * A typical implementation would probably rely for this closing loop on a gem like
2880 * EventMachine[https://github.com/eventmachine/eventmachine] or
2881 * Async[https://github.com/socketry/async].
2882 *
2883 * This way concurrent execution will be achieved in a way that is transparent for every
2884 * individual Fiber's code.
2885 *
2886 * Hook methods are:
2887 *
2888 * * #io_wait
2889 * * #process_wait
2890 * * #kernel_sleep
2891 * * #block and #unblock
2892 * * (the list is expanded as Ruby developers make more methods having non-blocking calls)
2893 *
2894 * When not specified otherwise, the hook implementations are mandatory: if they are not
2895 * implemented, the methods trying to call hook will fail. To provide backward compatibility,
2896 * in the future hooks will be optional (if they are not implemented, due to the scheduler
2897 * being created for the older Ruby version, the code which needs this hook will not fail,
2898 * and will just behave in a blocking fashion).
2899 *
2900 * It is also strongly suggested that the scheduler implement the #fiber method, which is
2901 * delegated to by Fiber.schedule.
2902 *
2903 * Sample _toy_ implementation of the scheduler can be found in Ruby's code, in
2904 * <tt>test/fiber/scheduler.rb</tt>
2905 *
2906 */
2907
2908#if 0 /* for RDoc */
2909/*
2910 *
2911 * Document-method: Fiber::SchedulerInterface#close
2912 *
2913 * Called when the current thread exits. The scheduler is expected to implement this
2914 * method in order to allow all waiting fibers to finalize their execution.
2915 *
2916 * The suggested pattern is to implement the main event loop in the #close method.
2917 *
2918 */
2919static VALUE
2920rb_fiber_scheduler_interface_close(VALUE self)
2921{
2922}
2923
2924/*
2925 * Document-method: SchedulerInterface#process_wait
2926 * call-seq: process_wait(pid, flags)
2927 *
2928 * Invoked by Process::Status.wait in order to wait for a specified process.
2929 * See that method description for arguments description.
2930 *
2931 * Suggested minimal implementation:
2932 *
2933 * Thread.new do
2934 * Process::Status.wait(pid, flags)
2935 * end.value
2936 *
2937 * This hook is optional: if it is not present in the current scheduler,
2938 * Process::Status.wait will behave as a blocking method.
2939 *
2940 * Expected to returns a Process::Status instance.
2941 */
2942static VALUE
2943rb_fiber_scheduler_interface_process_wait(VALUE self)
2944{
2945}
2946
2947/*
2948 * Document-method: SchedulerInterface#io_wait
2949 * call-seq: io_wait(io, events, timeout)
2950 *
2951 * Invoked by IO#wait, IO#wait_readable, IO#wait_writable to ask whether the
2952 * specified descriptor is ready for specified events within
2953 * the specified +timeout+.
2954 *
2955 * +events+ is a bit mask of <tt>IO::READABLE</tt>, <tt>IO::WRITABLE</tt>, and
2956 * <tt>IO::PRIORITY</tt>.
2957 *
2958 * Suggested implementation should register which Fiber is waiting for which
2959 * resources and immediately calling Fiber.yield to pass control to other
2960 * fibers. Then, in the #close method, the scheduler might dispatch all the
2961 * I/O resources to fibers waiting for it.
2962 *
2963 * Expected to return the subset of events that are ready immediately.
2964 *
2965 */
2966static VALUE
2967rb_fiber_scheduler_interface_io_wait(VALUE self)
2968{
2969}
2970
2971/*
2972 * Document-method: SchedulerInterface#kernel_sleep
2973 * call-seq: kernel_sleep(duration = nil)
2974 *
2975 * Invoked by Kernel#sleep and Mutex#sleep and is expected to provide
2976 * an implementation of sleeping in a non-blocking way. Implementation might
2977 * register the current fiber in some list of "what fiber waits till what
2978 * moment", call Fiber.yield to pass control, and then in #close resume
2979 * the fibers whose wait period have ended.
2980 *
2981 */
2982static VALUE
2983rb_fiber_scheduler_interface_kernel_sleep(VALUE self)
2984{
2985}
2986
2987/*
2988 * Document-method: SchedulerInterface#block
2989 * call-seq: block(blocker, timeout = nil)
2990 *
2991 * Invoked by methods like Thread.join, and by Mutex, to signify that current
2992 * Fiber is blocked till further notice (e.g. #unblock) or till +timeout+ will
2993 * pass.
2994 *
2995 * +blocker+ is what we are waiting on, informational only (for debugging and
2996 * logging). There are no guarantees about its value.
2997 *
2998 * Expected to return boolean, specifying whether the blocking operation was
2999 * successful or not.
3000 */
3001static VALUE
3002rb_fiber_scheduler_interface_block(VALUE self)
3003{
3004}
3005
3006/*
3007 * Document-method: SchedulerInterface#unblock
3008 * call-seq: unblock(blocker, fiber)
3009 *
3010 * Invoked to wake up Fiber previously blocked with #block (for example, Mutex#lock
3011 * calls #block and Mutex#unlock calls #unblock). The scheduler should use
3012 * the +fiber+ parameter to understand which fiber is unblocked.
3013 *
3014 * +blocker+ is what was awaited for, but it is informational only (for debugging
3015 * and logging), and it is not guaranteed to be the same value as the +blocker+ for
3016 * #block.
3017 *
3018 */
3019static VALUE
3020rb_fiber_scheduler_interface_unblock(VALUE self)
3021{
3022}
3023
3024/*
3025 * Document-method: SchedulerInterface#fiber
3026 * call-seq: fiber(&block)
3027 *
3028 * Implementation of the Fiber.schedule. The method is <em>expected</em> to immediately
3029 * run passed block of code in a separate non-blocking fiber, and to return that Fiber.
3030 *
3031 * Minimal suggested implementation is:
3032 *
3033 * def fiber(&block)
3034 * Fiber.new(blocking: false, &block).tap(&:resume)
3035 * end
3036 */
3037static VALUE
3038rb_fiber_scheduler_interface_fiber(VALUE self)
3039{
3040}
3041#endif
3042
3043void
3045{
3046 rb_thread_t *th = GET_THREAD();
3048 size_t machine_stack_size = th->vm->default_params.fiber_machine_stack_size;
3049 size_t stack_size = machine_stack_size + vm_stack_size;
3050
3051#ifdef _WIN32
3052 SYSTEM_INFO info;
3053 GetSystemInfo(&info);
3054 pagesize = info.dwPageSize;
3055#else /* not WIN32 */
3056 pagesize = sysconf(_SC_PAGESIZE);
3057#endif
3059
3060 fiber_pool_initialize(&shared_fiber_pool, stack_size, FIBER_POOL_INITIAL_SIZE, vm_stack_size);
3061
3062 fiber_initialize_keywords[0] = rb_intern_const("blocking");
3063 fiber_initialize_keywords[1] = rb_intern_const("pool");
3064
3065 char * fiber_shared_fiber_pool_free_stacks = getenv("RUBY_SHARED_FIBER_POOL_FREE_STACKS");
3066 if (fiber_shared_fiber_pool_free_stacks) {
3067 shared_fiber_pool.free_stacks = atoi(fiber_shared_fiber_pool_free_stacks);
3068 }
3069
3070 rb_cFiber = rb_define_class("Fiber", rb_cObject);
3071 rb_define_alloc_func(rb_cFiber, fiber_alloc);
3072 rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
3073 rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
3074 rb_define_method(rb_cFiber, "initialize", rb_fiber_initialize, -1);
3075 rb_define_method(rb_cFiber, "blocking?", rb_fiber_blocking_p, 0);
3076 rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
3077 rb_define_method(rb_cFiber, "raise", rb_fiber_raise, -1);
3078 rb_define_method(rb_cFiber, "backtrace", rb_fiber_backtrace, -1);
3079 rb_define_method(rb_cFiber, "backtrace_locations", rb_fiber_backtrace_locations, -1);
3080 rb_define_method(rb_cFiber, "to_s", fiber_to_s, 0);
3081 rb_define_alias(rb_cFiber, "inspect", "to_s");
3082
3083 rb_define_singleton_method(rb_cFiber, "blocking?", rb_f_fiber_blocking_p, 0);
3084 rb_define_singleton_method(rb_cFiber, "scheduler", rb_fiber_scheduler, 0);
3085 rb_define_singleton_method(rb_cFiber, "set_scheduler", rb_fiber_set_scheduler, 1);
3086
3087 rb_define_singleton_method(rb_cFiber, "schedule", rb_f_fiber, -1);
3088 //rb_define_global_function("Fiber", rb_f_fiber, -1);
3089
3090#if 0 /* for RDoc */
3091 rb_cFiberScheduler = rb_define_class_under(rb_cFiber, "SchedulerInterface", rb_cObject);
3092 rb_define_method(rb_cFiberScheduler, "close", rb_fiber_scheduler_interface_close, 0);
3093 rb_define_method(rb_cFiberScheduler, "process_wait", rb_fiber_scheduler_interface_process_wait, 0);
3094 rb_define_method(rb_cFiberScheduler, "io_wait", rb_fiber_scheduler_interface_io_wait, 0);
3095 rb_define_method(rb_cFiberScheduler, "kernel_sleep", rb_fiber_scheduler_interface_kernel_sleep, 0);
3096 rb_define_method(rb_cFiberScheduler, "block", rb_fiber_scheduler_interface_block, 0);
3097 rb_define_method(rb_cFiberScheduler, "unblock", rb_fiber_scheduler_interface_unblock, 0);
3098 rb_define_method(rb_cFiberScheduler, "fiber", rb_fiber_scheduler_interface_fiber, 0);
3099#endif
3100
3101#ifdef RB_EXPERIMENTAL_FIBER_POOL
3102 rb_cFiberPool = rb_define_class("Pool", rb_cFiber);
3103 rb_define_alloc_func(rb_cFiberPool, fiber_pool_alloc);
3104 rb_define_method(rb_cFiberPool, "initialize", rb_fiber_pool_initialize, -1);
3105#endif
3106}
3107
3108RUBY_SYMBOL_EXPORT_BEGIN
3109
3110void
3112{
3113 rb_cContinuation = rb_define_class("Continuation", rb_cObject);
3114 rb_undef_alloc_func(rb_cContinuation);
3115 rb_undef_method(CLASS_OF(rb_cContinuation), "new");
3116 rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
3117 rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
3118 rb_define_global_function("callcc", rb_callcc, 0);
3119}
3120
3121void
3123{
3124#ifdef HAVE_RB_EXT_RACTOR_SAFE
3125 rb_ext_ractor_safe(true);
3126#endif
3127 rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
3128 rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
3129 rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
3130}
3131
3132RUBY_SYMBOL_EXPORT_END
#define COROUTINE
Definition: Context.h:15
struct coroutine_context * coroutine_transfer(struct coroutine_context *current, struct coroutine_context *target)
Definition: Context.c:136
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:846
#define NOINLINE(x)
Definition: attributes.h:82
#define NORETURN(x)
Definition: attributes.h:152
#define UNREACHABLE_RETURN
Definition: assume.h:31
rb_thread_t * rb_fiber_threadptr(const rb_fiber_t *fiber)
Definition: cont.c:852
#define RB_PAGE_SIZE
Definition: cont.c:35
void rb_threadptr_root_fiber_setup(rb_thread_t *th)
Definition: cont.c:2091
void rb_fiber_update_self(rb_fiber_t *fiber)
Definition: cont.c:980
#define THREAD_MUST_BE_RUNNING(th)
Definition: cont.c:847
void rb_fiber_reset_root_local_storage(rb_thread_t *th)
Definition: cont.c:2438
#define ERRNOMSG
Definition: cont.c:264
VALUE rb_fiberptr_self(struct rb_fiber_struct *fiber)
Definition: cont.c:1153
void ruby_Init_Fiber_as_Coroutine(void)
Definition: cont.c:3122
#define STACK_PAD_SIZE
void ruby_register_rollback_func_for_ensure(e_proc *ensure_func, e_proc *rollback_func)
Definition: cont.c:1563
void ruby_Init_Continuation_body(void)
Definition: cont.c:3111
#define FIBER_RUNNABLE_P(fiber)
Definition: cont.c:232
VALUE rb_fiber_current(void)
Definition: cont.c:2182
VALUE rb_fiber_resume_kw(VALUE fiber_value, int argc, const VALUE *argv, int kw_splat)
Definition: cont.c:2391
fiber_status
Definition: cont.c:221
@ FIBER_TERMINATED
Definition: cont.c:225
@ FIBER_SUSPENDED
Definition: cont.c:224
@ FIBER_CREATED
Definition: cont.c:222
@ FIBER_RESUMED
Definition: cont.c:223
VALUE rb_fiber_transfer(VALUE fiber_value, int argc, const VALUE *argv)
Definition: cont.c:2309
VALUE rb_fiber_yield_kw(int argc, const VALUE *argv, int kw_splat)
Definition: cont.c:2426
#define FIBER_POOL_INITIAL_SIZE
Definition: cont.c:55
VALUE rb_fiber_resume(VALUE fiber_value, int argc, const VALUE *argv)
Definition: cont.c:2420
#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE
Definition: cont.c:56
VALUE rb_fiber_alive_p(VALUE fiber_value)
Definition: cont.c:2455
void rb_fiber_init_mjit_cont(struct rb_fiber_struct *fiber)
Definition: cont.c:1165
context_type
Definition: cont.c:59
@ FIBER_CONTEXT
Definition: cont.c:61
@ CONTINUATION_CONTEXT
Definition: cont.c:60
VALUE rb_fiber_blocking_p(VALUE fiber)
Definition: cont.c:2329
VALUE e_proc(VALUE)
Definition: cont.c:1558
VALUE rb_obj_is_fiber(VALUE obj)
Definition: cont.c:1060
void rb_fiber_close(rb_fiber_t *fiber)
Definition: cont.c:2365
#define FIBER_RESUMED_P(fiber)
Definition: cont.c:229
#define FIBER_STACK_FLAGS
Definition: cont.c:261
VALUE rb_fiber_yield(int argc, const VALUE *argv)
Definition: cont.c:2432
void Init_Cont(void)
Definition: cont.c:3044
void rb_fiber_mark_self(const rb_fiber_t *fiber)
Definition: cont.c:991
VALUE rb_fiber_new(rb_block_call_func_t func, VALUE obj)
Definition: cont.c:1902
#define FIBER_CREATED_P(fiber)
Definition: cont.c:228
struct rb_context_struct rb_context_t
void rb_threadptr_root_fiber_release(rb_thread_t *th)
Definition: cont.c:2110
#define FIBER_SUSPENDED_P(fiber)
Definition: cont.c:230
unsigned int rb_fiberptr_blocking(struct rb_fiber_struct *fiber)
Definition: cont.c:1158
void rb_threadptr_root_fiber_terminate(rb_thread_t *th)
Definition: cont.c:2130
#define FIBER_TERMINATED_P(fiber)
Definition: cont.c:231
void rb_fiber_start(void)
Definition: cont.c:2014
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
Definition: cxxanyargs.hpp:653
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
Definition: cxxanyargs.hpp:668
#define rb_define_global_function(mid, func, arity)
Defines rb_mKernel #mid.
Definition: cxxanyargs.hpp:678
struct RIMemo * ptr
Definition: debug.c:88
#define FLUSH_REGISTER_WINDOWS
Definition: defines.h:106
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
int rb_keyword_given_p(void)
Definition: eval.c:948
#define ruby_longjmp(env, val)
Definition: eval_intern.h:59
#define EC_EXEC_TAG()
Definition: eval_intern.h:193
#define VAR_FROM_MEMORY(var)
Definition: eval_intern.h:152
#define EC_PUSH_TAG(ec)
Definition: eval_intern.h:130
#define EC_POP_TAG()
Definition: eval_intern.h:138
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:1718
#define VAR_INITIALIZED(var)
Definition: eval_intern.h:153
#define ruby_setjmp(env)
Definition: eval_intern.h:58
#define RUBY_EVENT_FIBER_SWITCH
Definition: event.h:45
#define RSTRING_LEN(string)
Definition: fbuffer.h:22
#define UNLIKELY(x)
Definition: ffi_common.h:126
void ruby_xfree(void *x)
Deallocates a storage instance.
Definition: gc.c:10914
VALUE rb_gc_location(VALUE value)
Definition: gc.c:9003
void rb_gc_mark_movable(VALUE ptr)
Definition: gc.c:6106
void * ruby_mimmalloc(size_t size)
Definition: gc.c:10951
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Definition: gc.c:5580
void rb_gc_mark(VALUE ptr)
Definition: gc.c:6112
void rb_obj_info_dump(VALUE obj)
Definition: gc.c:12505
#define RUBY_MARK_LEAVE(msg)
Definition: gc.h:65
#define RUBY_MARK_ENTER(msg)
Definition: gc.h:64
#define RUBY_FREE_ENTER(msg)
Definition: gc.h:66
#define STACK_DIR_UPPER(a, b)
Definition: gc.h:108
#define SET_MACHINE_STACK_END(p)
Definition: gc.h:15
#define RUBY_FREE_UNLESS_NULL(ptr)
Definition: gc.h:79
#define STACK_GROW_DIR_DETECTION
Definition: gc.h:107
#define RUBY_FREE_LEAVE(msg)
Definition: gc.h:67
#define CLASS_OF
Definition: globals.h:153
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:748
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition: class.c:797
void rb_undef_method(VALUE klass, const char *name)
Definition: class.c:1777
int rb_scan_args_kw(int kw_flag, int argc, const VALUE *argv, const char *fmt,...)
Definition: class.c:2309
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:1999
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Definition: class.c:2296
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Definition: class.c:2085
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition: eval.c:327
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2917
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:712
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:1007
void rb_bug(const char *fmt,...)
Definition: error.c:768
VALUE rb_eStandardError
Definition: error.c:1054
VALUE rb_make_exception(int argc, const VALUE *argv)
Make an Exception object from the list of arguments in a manner similar to Kernel#raise.
Definition: eval.c:888
VALUE rb_eRuntimeError
Definition: error.c:1055
VALUE rb_cObject
Object class.
Definition: object.c:49
VALUE rb_any_to_s(VALUE)
Default implementation of #to_s.
Definition: object.c:561
size_t rb_id_table_memsize(const struct rb_id_table *tbl)
Definition: id_table.c:124
void rb_id_table_free(struct rb_id_table *tbl)
Definition: id_table.c:103
Thin wrapper to ruby/config.h.
VALUE rb_funcall_passing_block_kw(VALUE, ID, int, const VALUE *, int)
Definition: vm_eval.c:1156
#define rb_ary_new4
Definition: array.h:74
#define rb_exc_new2
Definition: error.h:30
void rb_ext_ractor_safe(bool flag)
Definition: load.c:1058
VALUE rb_obj_is_proc(VALUE)
Definition: proc.c:152
VALUE rb_block_proc(void)
Definition: proc.c:826
void rb_str_set_len(VALUE, long)
Definition: string.c:2842
#define rb_str_cat_cstr(buf, str)
Definition: string.h:266
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
void rb_undef_alloc_func(VALUE)
Definition: vm_method.c:954
ID rb_intern(const char *)
Definition: symbol.c:785
char * strerror(int)
Definition: strerror.c:11
size_t strlcat(char *, const char *, size_t)
Definition: strlcat.c:31
#define INT2NUM
Definition: int.h:43
Internal header for Fiber.
Internal header for GC.
Internal header for Proc.
VALUE rb_block_to_s(VALUE self, const struct rb_block *block, const char *additional_info)
Definition: proc.c:1491
VALUE rb_vm_backtrace_locations(int argc, const VALUE *argv, struct rb_execution_context_struct *ec)
VALUE rb_vm_backtrace(int argc, const VALUE *argv, struct rb_execution_context_struct *ec)
#define bp()
Definition: internal.h:105
#define PRIuSIZE
Definition: inttypes.h:127
voidpf void uLong size
Definition: ioapi.h:138
voidpf uLong offset
Definition: ioapi.h:144
voidpf void * buf
Definition: ioapi.h:138
rb_block_call_func * rb_block_call_func_t
Definition: iterator.h:34
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1341
#define MEMCPY(p1, p2, type, n)
Definition: memory.h:129
#define REALLOC_N
Definition: memory.h:137
#define ALLOCA_N(type, n)
Definition: memory.h:112
#define RB_ALLOC(type)
Definition: memory.h:106
#define MEMZERO(p, type, n)
Definition: memory.h:128
#define ALLOC_N
Definition: memory.h:133
#define mjit_enabled
Definition: mjit.h:210
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
Definition: cxxanyargs.hpp:392
int count
Definition: nkf.c:5055
#define TRUE
Definition: nkf.h:175
#define RARRAY_CONST_PTR(s)
Definition: psych_emitter.c:4
#define DATA_PTR(obj)
Definition: rdata.h:56
#define NULL
Definition: regenc.h:69
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: rtypeddata.h:130
#define TypedData_Wrap_Struct(klass, data_type, sval)
Definition: rtypeddata.h:101
@ RUBY_TYPED_FREE_IMMEDIATELY
Definition: rtypeddata.h:62
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: rtypeddata.h:122
int argc
Definition: ruby.c:240
char ** argv
Definition: ruby.c:241
#define RB_NO_KEYWORDS
Definition: scan_args.h:46
Internal header for Scheduler.
VALUE rb_scheduler_get()
Definition: scheduler.c:44
VALUE rb_scheduler_set(VALUE scheduler)
Definition: scheduler.c:73
int rb_signal_buff_size(void)
Definition: signal.c:747
#define NUM2SIZET
Definition: size_t.h:51
#define Qundef
#define Qtrue
#define RTEST
#define Qnil
#define Qfalse
#define NIL_P
unsigned long st_data_t
Definition: st.h:22
#define st_init_numtable
Definition: st.h:106
#define st_lookup
Definition: st.h:128
#define st_insert
Definition: st.h:124
VALUE * ptr
Definition: cont.c:65
size_t slen
Definition: cont.c:67
size_t clen
Definition: cont.c:68
struct fiber_pool * pool
Definition: cont.c:151
struct fiber_pool_allocation * next
Definition: cont.c:157
void * current
Definition: cont.c:80
size_t size
Definition: cont.c:83
struct fiber_pool * pool
Definition: cont.c:89
void * base
Definition: cont.c:77
struct fiber_pool_allocation * allocation
Definition: cont.c:92
size_t available
Definition: cont.c:86
struct fiber_pool_vacancy * next
Definition: cont.c:106
struct fiber_pool_stack stack
Definition: cont.c:100
size_t used
Definition: cont.c:181
struct fiber_pool_allocation * allocations
Definition: cont.c:163
int free_stacks
Definition: cont.c:178
size_t size
Definition: cont.c:169
struct fiber_pool_vacancy * vacancies
Definition: cont.c:166
size_t vm_stack_size
Definition: cont.c:184
size_t initial_count
Definition: cont.c:175
size_t count
Definition: cont.c:172
rb_execution_context_t saved_ec
Definition: cont.c:201
struct mjit_cont * mjit_cont
Definition: cont.c:205
rb_ensure_entry_t * ensure_array
Definition: cont.c:203
rb_jmpbuf_t jmpbuf
Definition: cont.c:202
VALUE self
Definition: cont.c:191
VALUE value
Definition: cont.c:192
size_t stack_size
Definition: cont.c:199
enum context_type type
Definition: cont.c:188
struct cont_saved_vm_stack saved_vm_stack
Definition: cont.c:194
VALUE * stack_src
Definition: cont.c:198
VALUE * stack
Definition: cont.c:197
struct rb_context_struct::@3 machine
const rb_iseq_t * iseq
Definition: vm_core.h:772
const VALUE * pc
Definition: vm_core.h:770
Definition: vm_core.h:833
VALUE(* e_proc)(VALUE)
Definition: vm_core.h:835
VALUE marker
Definition: vm_core.h:834
VALUE data2
Definition: vm_core.h:836
struct rb_ensure_list * next
Definition: vm_core.h:840
struct rb_ensure_entry entry
Definition: vm_core.h:841
VALUE local_storage_recursive_hash_for_trace
Definition: vm_core.h:876
struct rb_execution_context_struct::@200 machine
struct rb_id_table * local_storage
Definition: vm_core.h:874
rb_ensure_list_t * ensure_list
Definition: vm_core.h:883
struct rb_vm_protect_tag * protect_tag
Definition: vm_core.h:861
struct rb_thread_struct * thread_ptr
Definition: vm_core.h:871
rb_control_frame_t * cfp
Definition: vm_core.h:858
rb_fiber_t * fiber_ptr
Definition: vm_core.h:870
struct rb_vm_tag * tag
Definition: vm_core.h:860
const VALUE * root_lep
Definition: vm_core.h:879
struct rb_trace_arg_struct * trace_arg
Definition: vm_core.h:886
struct coroutine_context context
Definition: cont.c:245
VALUE first_proc
Definition: cont.c:236
unsigned int yielding
Definition: cont.c:242
BITFIELD(enum fiber_status, status, 2)
unsigned int blocking
Definition: cont.c:243
rb_context_t cont
Definition: cont.c:235
struct fiber_pool_stack stack
Definition: cont.c:246
struct rb_fiber_struct * prev
Definition: cont.c:237
VALUE resuming_fiber
Definition: cont.c:238
VALUE * iseq_encoded
Definition: vm_core.h:319
struct rb_iseq_constant_body * body
Definition: vm_core.h:448
const struct rb_block block
Definition: vm_core.h:1087
rb_execution_context_t * ec
Definition: vm_core.h:941
rb_vm_t * vm
Definition: vm_core.h:939
rb_ractor_t * ractor
Definition: vm_core.h:938
unsigned blocking
Definition: vm_core.h:1016
rb_fiber_t * root_fiber
Definition: vm_core.h:1012
struct rb_vm_struct::@194 ractor
struct rb_thread_struct * main_thread
Definition: vm_core.h:573
size_t fiber_vm_stack_size
Definition: vm_core.h:674
size_t fiber_machine_stack_size
Definition: vm_core.h:675
struct rb_vm_struct::@196 default_params
Definition: st.h:79
Definition: blast.c:41
#define snprintf
Definition: subst.h:14
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1987
const VALUE * rb_vm_proc_local_ep(VALUE proc)
Definition: thread.c:675
unsigned long VALUE
Definition: value.h:38
unsigned long ID
Definition: value.h:39
void rb_ec_clear_vm_stack(rb_execution_context_t *ec)
Definition: vm.c:3047
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:3031
void rb_vm_stack_to_heap(rb_execution_context_t *ec)
Definition: vm.c:820
void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:3024
VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
Definition: vm.c:1475
void rb_execution_context_update(const rb_execution_context_t *ec)
Definition: vm.c:2785
void rb_execution_context_mark(const rb_execution_context_t *ec)
Definition: vm.c:2835
@ THREAD_RUNNABLE
Definition: vm_core.h:792
#define TAG_RAISE
Definition: vm_core.h:204
#define RUBY_VM_SET_INTERRUPT(ec)
Definition: vm_core.h:1876
#define TAG_NONE
Definition: vm_core.h:198
ruby_tag_type
Definition: vm_core.h:185
#define VM_ASSERT(expr)
Definition: vm_core.h:61
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:2001
void * rb_jmpbuf_t[5]
Definition: vm_core.h:801
#define TAG_FATAL
Definition: vm_core.h:206
#define RUBY_VM_CHECK_INTS(ec)
Definition: vm_core.h:1921
#define VM_BLOCK_HANDLER_NONE
Definition: vm_core.h:1299
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:1083
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)
Definition: vm_core.h:1392
#define RUBY_VM_SET_TRAP_INTERRUPT(ec)
Definition: vm_core.h:1878
#define VM_UNREACHABLE(func)
Definition: vm_core.h:62
Internal header to suppres / mandate warnings.
#define COMPILER_WARNING_PUSH
Definition: warnings.h:13
#define COMPILER_WARNING_POP
Definition: warnings.h:14
#define COMPILER_WARNING_IGNORED(flag)
Definition: warnings.h:16
#define getenv(name)
Definition: win32.c:80
int err
Definition: win32.c:142
#define stat
Definition: win32.h:195
IUnknown DWORD
Definition: win32ole.c:33
#define ZALLOC(strm, items, size)
Definition: zutil.h:266