Ruby 3.0.5p211 (2022-11-24 revision ba5cf0f7c52d4d35cc6a173c89eda98ceffa2dcf)
thread_sync.c
Go to the documentation of this file.
1/* included by thread.c */
2#include "ccan/list/list.h"
3#include "coroutine/Stack.h"
4
5static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
6static VALUE rb_eClosedQueueError;
7
8/* Mutex */
9typedef struct rb_mutex_struct {
12 struct list_head waitq; /* protected by GVL */
14
15/* sync_waiter is always on-stack */
20 struct list_node node;
21};
22
23#define MUTEX_ALLOW_TRAP FL_USER1
24
25static void
26sync_wakeup(struct list_head *head, long max)
27{
28 struct sync_waiter *cur = 0, *next;
29
30 list_for_each_safe(head, cur, next, node) {
31 list_del_init(&cur->node);
32
33 if (cur->th->status != THREAD_KILLED) {
34
35 if (cur->th->scheduler != Qnil && rb_fiberptr_blocking(cur->fiber) == 0) {
37 } else {
39 cur->th->status = THREAD_RUNNABLE;
40 }
41
42 if (--max == 0) return;
43 }
44 }
45}
46
47static void
48wakeup_one(struct list_head *head)
49{
50 sync_wakeup(head, 1);
51}
52
53static void
54wakeup_all(struct list_head *head)
55{
56 sync_wakeup(head, LONG_MAX);
57}
58
59#if defined(HAVE_WORKING_FORK)
60static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
61static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th);
62static void rb_mutex_abandon_locking_mutex(rb_thread_t *th);
63#endif
64static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber);
65
66/*
67 * Document-class: Mutex
68 *
69 * Mutex implements a simple semaphore that can be used to coordinate access to
70 * shared data from multiple concurrent threads.
71 *
72 * Example:
73 *
74 * semaphore = Mutex.new
75 *
76 * a = Thread.new {
77 * semaphore.synchronize {
78 * # access shared resource
79 * }
80 * }
81 *
82 * b = Thread.new {
83 * semaphore.synchronize {
84 * # access shared resource
85 * }
86 * }
87 *
88 */
89
90#define mutex_mark ((void(*)(void*))0)
91
92static size_t
93rb_mutex_num_waiting(rb_mutex_t *mutex)
94{
95 struct sync_waiter *w = 0;
96 size_t n = 0;
97
98 list_for_each(&mutex->waitq, w, node) {
99 n++;
100 }
101
102 return n;
103}
104
106
107static void
108mutex_free(void *ptr)
109{
110 rb_mutex_t *mutex = ptr;
111 if (mutex->fiber) {
112 /* rb_warn("free locked mutex"); */
113 const char *err = rb_mutex_unlock_th(mutex, rb_fiber_threadptr(mutex->fiber), mutex->fiber);
114 if (err) rb_bug("%s", err);
115 }
117}
118
119static size_t
120mutex_memsize(const void *ptr)
121{
122 return sizeof(rb_mutex_t);
123}
124
125static const rb_data_type_t mutex_data_type = {
126 "mutex",
127 {mutex_mark, mutex_free, mutex_memsize,},
129};
130
131static rb_mutex_t *
132mutex_ptr(VALUE obj)
133{
134 rb_mutex_t *mutex;
135
136 TypedData_Get_Struct(obj, rb_mutex_t, &mutex_data_type, mutex);
137
138 return mutex;
139}
140
141VALUE
143{
144 if (rb_typeddata_is_kind_of(obj, &mutex_data_type)) {
145 return Qtrue;
146 }
147 else {
148 return Qfalse;
149 }
150}
151
152static VALUE
153mutex_alloc(VALUE klass)
154{
155 VALUE obj;
156 rb_mutex_t *mutex;
157
158 obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
159
160 list_head_init(&mutex->waitq);
161 return obj;
162}
163
164/*
165 * call-seq:
166 * Mutex.new -> mutex
167 *
168 * Creates a new Mutex
169 */
170static VALUE
171mutex_initialize(VALUE self)
172{
173 return self;
174}
175
176VALUE
178{
179 return mutex_alloc(rb_cMutex);
180}
181
182/*
183 * call-seq:
184 * mutex.locked? -> true or false
185 *
186 * Returns +true+ if this lock is currently held by some thread.
187 */
188VALUE
190{
191 rb_mutex_t *mutex = mutex_ptr(self);
192
193 return mutex->fiber ? Qtrue : Qfalse;
194}
195
196static void
197thread_mutex_insert(rb_thread_t *thread, rb_mutex_t *mutex) {
198 if (thread->keeping_mutexes) {
199 mutex->next_mutex = thread->keeping_mutexes;
200 }
201
202 thread->keeping_mutexes = mutex;
203}
204
205static void
206thread_mutex_remove(rb_thread_t *thread, rb_mutex_t *mutex) {
207 rb_mutex_t **keeping_mutexes = &thread->keeping_mutexes;
208
209 while (*keeping_mutexes && *keeping_mutexes != mutex) {
210 // Move to the next mutex in the list:
211 keeping_mutexes = &(*keeping_mutexes)->next_mutex;
212 }
213
214 if (*keeping_mutexes) {
215 *keeping_mutexes = mutex->next_mutex;
216 mutex->next_mutex = NULL;
217 }
218}
219
220static void
221mutex_locked(rb_thread_t *th, VALUE self)
222{
223 rb_mutex_t *mutex = mutex_ptr(self);
224
225 thread_mutex_insert(th, mutex);
226}
227
228/*
229 * call-seq:
230 * mutex.try_lock -> true or false
231 *
232 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
233 * lock was granted.
234 */
235VALUE
237{
238 rb_mutex_t *mutex = mutex_ptr(self);
239
240 if (mutex->fiber == 0) {
241 rb_fiber_t *fiber = GET_EC()->fiber_ptr;
242 rb_thread_t *th = GET_THREAD();
243 mutex->fiber = fiber;
244
245 mutex_locked(th, self);
246 return Qtrue;
247 }
248
249 return Qfalse;
250}
251
252/*
253 * At maximum, only one thread can use cond_timedwait and watch deadlock
254 * periodically. Multiple polling thread (i.e. concurrent deadlock check)
255 * introduces new race conditions. [Bug #6278] [ruby-core:44275]
256 */
257static const rb_thread_t *patrol_thread = NULL;
258
259static VALUE
260mutex_owned_p(rb_fiber_t *fiber, rb_mutex_t *mutex)
261{
262 if (mutex->fiber == fiber) {
263 return Qtrue;
264 }
265 else {
266 return Qfalse;
267 }
268}
269
270static VALUE call_rb_scheduler_block(VALUE mutex) {
272}
273
274static VALUE
275delete_from_waitq(VALUE v)
276{
277 struct sync_waiter *w = (void *)v;
278 list_del(&w->node);
279
281
282 return Qnil;
283}
284
285static VALUE
286do_mutex_lock(VALUE self, int interruptible_p)
287{
288 rb_execution_context_t *ec = GET_EC();
291 rb_mutex_t *mutex = mutex_ptr(self);
292
293 /* When running trap handler */
296 rb_raise(rb_eThreadError, "can't be called from trap context");
297 }
298
299 if (rb_mutex_trylock(self) == Qfalse) {
300 if (mutex->fiber == fiber) {
301 rb_raise(rb_eThreadError, "deadlock; recursive locking");
302 }
303
304 while (mutex->fiber != fiber) {
305 VALUE scheduler = rb_scheduler_current();
306 if (scheduler != Qnil) {
308 w->self = self;
309 w->th = th;
310 w->fiber = fiber;
311
312 list_add_tail(&mutex->waitq, &w->node);
313
314 rb_ensure(call_rb_scheduler_block, self, delete_from_waitq, (VALUE)w);
315
316 if (!mutex->fiber) {
317 mutex->fiber = fiber;
318 }
319 } else {
320 enum rb_thread_status prev_status = th->status;
321 rb_hrtime_t *timeout = 0;
322 rb_hrtime_t rel = rb_msec2hrtime(100);
323
324 th->status = THREAD_STOPPED_FOREVER;
326 rb_ractor_sleeper_threads_inc(th->ractor);
327 /*
328 * Carefully! while some contended threads are in native_sleep(),
329 * ractor->sleeper is unstable value. we have to avoid both deadlock
330 * and busy loop.
331 */
332 if ((rb_ractor_living_thread_num(th->ractor) == rb_ractor_sleeper_thread_num(th->ractor)) &&
333 !patrol_thread) {
334 timeout = &rel;
335 patrol_thread = th;
336 }
337
339 w->self = self;
340 w->th = th;
341 w->fiber = fiber;
342
343 list_add_tail(&mutex->waitq, &w->node);
344
345 native_sleep(th, timeout); /* release GVL */
346
347 list_del(&w->node);
348
350
351 if (!mutex->fiber) {
352 mutex->fiber = fiber;
353 }
354
355 if (patrol_thread == th)
356 patrol_thread = NULL;
357
359 if (mutex->fiber && timeout && !RUBY_VM_INTERRUPTED(th->ec)) {
360 rb_check_deadlock(th->ractor);
361 }
362 if (th->status == THREAD_STOPPED_FOREVER) {
363 th->status = prev_status;
364 }
365 rb_ractor_sleeper_threads_dec(th->ractor);
366 }
367
368 if (interruptible_p) {
369 /* release mutex before checking for interrupts...as interrupt checking
370 * code might call rb_raise() */
371 if (mutex->fiber == fiber) mutex->fiber = 0;
372 RUBY_VM_CHECK_INTS_BLOCKING(th->ec); /* may release mutex */
373 if (!mutex->fiber) {
374 mutex->fiber = fiber;
375 }
376 }
377 }
378
379 if (mutex->fiber == fiber) mutex_locked(th, self);
380 }
381
382 // assertion
383 if (mutex_owned_p(fiber, mutex) == Qfalse) rb_bug("do_mutex_lock: mutex is not owned.");
384
385 return self;
386}
387
388static VALUE
389mutex_lock_uninterruptible(VALUE self)
390{
391 return do_mutex_lock(self, 0);
392}
393
394/*
395 * call-seq:
396 * mutex.lock -> self
397 *
398 * Attempts to grab the lock and waits if it isn't available.
399 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
400 */
401VALUE
403{
404 return do_mutex_lock(self, 1);
405}
406
407/*
408 * call-seq:
409 * mutex.owned? -> true or false
410 *
411 * Returns +true+ if this lock is currently held by current thread.
412 */
413VALUE
415{
416 rb_fiber_t *fiber = GET_EC()->fiber_ptr;
417 rb_mutex_t *mutex = mutex_ptr(self);
418
419 return mutex_owned_p(fiber, mutex);
420}
421
422static const char *
423rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber)
424{
425 const char *err = NULL;
426
427 if (mutex->fiber == 0) {
428 err = "Attempt to unlock a mutex which is not locked";
429 }
430 else if (mutex->fiber != fiber) {
431 err = "Attempt to unlock a mutex which is locked by another thread/fiber";
432 }
433 else {
434 struct sync_waiter *cur = 0, *next;
435
436 mutex->fiber = 0;
437 list_for_each_safe(&mutex->waitq, cur, next, node) {
438 list_del_init(&cur->node);
439
440 if (cur->th->scheduler != Qnil && rb_fiberptr_blocking(cur->fiber) == 0) {
442 goto found;
443 } else {
444 switch (cur->th->status) {
445 case THREAD_RUNNABLE: /* from someone else calling Thread#run */
446 case THREAD_STOPPED_FOREVER: /* likely (rb_mutex_lock) */
448 goto found;
449 case THREAD_STOPPED: /* probably impossible */
450 rb_bug("unexpected THREAD_STOPPED");
451 case THREAD_KILLED:
452 /* not sure about this, possible in exit GC? */
453 rb_bug("unexpected THREAD_KILLED");
454 continue;
455 }
456 }
457 }
458
459 found:
460 thread_mutex_remove(th, mutex);
461 }
462
463 return err;
464}
465
466/*
467 * call-seq:
468 * mutex.unlock -> self
469 *
470 * Releases the lock.
471 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
472 */
473VALUE
475{
476 const char *err;
477 rb_mutex_t *mutex = mutex_ptr(self);
478 rb_thread_t *th = GET_THREAD();
479
480 err = rb_mutex_unlock_th(mutex, th, GET_EC()->fiber_ptr);
481 if (err) rb_raise(rb_eThreadError, "%s", err);
482
483 return self;
484}
485
486#if defined(HAVE_WORKING_FORK)
487static void
488rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
489{
490 rb_mutex_abandon_all(th->keeping_mutexes);
492}
493
494static void
495rb_mutex_abandon_locking_mutex(rb_thread_t *th)
496{
497 if (th->locking_mutex) {
498 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
499
500 list_head_init(&mutex->waitq);
502 }
503}
504
505static void
506rb_mutex_abandon_all(rb_mutex_t *mutexes)
507{
508 rb_mutex_t *mutex;
509
510 while (mutexes) {
511 mutex = mutexes;
512 mutexes = mutex->next_mutex;
513 mutex->fiber = 0;
514 mutex->next_mutex = 0;
515 list_head_init(&mutex->waitq);
516 }
517}
518#endif
519
520static VALUE
521rb_mutex_sleep_forever(VALUE self)
522{
523 rb_thread_sleep_deadly_allow_spurious_wakeup(self);
524 return Qnil;
525}
526
527static VALUE
528rb_mutex_wait_for(VALUE time)
529{
530 rb_hrtime_t *rel = (rb_hrtime_t *)time;
531 /* permit spurious check */
532 sleep_hrtime(GET_THREAD(), *rel, 0);
533 return Qnil;
534}
535
536VALUE
538{
539 struct timeval t;
540
541 if (!NIL_P(timeout)) {
542 t = rb_time_interval(timeout);
543 }
544
545 rb_mutex_unlock(self);
546 time_t beg = time(0);
547
548 VALUE scheduler = rb_scheduler_current();
549 if (scheduler != Qnil) {
550 rb_scheduler_kernel_sleep(scheduler, timeout);
551 mutex_lock_uninterruptible(self);
552 } else {
553 if (NIL_P(timeout)) {
554 rb_ensure(rb_mutex_sleep_forever, self, mutex_lock_uninterruptible, self);
555 } else {
556 rb_hrtime_t rel = rb_timeval2hrtime(&t);
557 rb_ensure(rb_mutex_wait_for, (VALUE)&rel, mutex_lock_uninterruptible, self);
558 }
559 }
560
562 time_t end = time(0) - beg;
563 return TIMET2NUM(end);
564}
565
566/*
567 * call-seq:
568 * mutex.sleep(timeout = nil) -> number
569 *
570 * Releases the lock and sleeps +timeout+ seconds if it is given and
571 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
572 * the current thread.
573 *
574 * When the thread is next woken up, it will attempt to reacquire
575 * the lock.
576 *
577 * Note that this method can wakeup without explicit Thread#wakeup call.
578 * For example, receiving signal and so on.
579 */
580static VALUE
581mutex_sleep(int argc, VALUE *argv, VALUE self)
582{
583 VALUE timeout;
584
585 timeout = rb_check_arity(argc, 0, 1) ? argv[0] : Qnil;
586 return rb_mutex_sleep(self, timeout);
587}
588
589/*
590 * call-seq:
591 * mutex.synchronize { ... } -> result of the block
592 *
593 * Obtains a lock, runs the block, and releases the lock when the block
594 * completes. See the example under +Mutex+.
595 */
596
597VALUE
598rb_mutex_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
599{
600 rb_mutex_lock(mutex);
601 return rb_ensure(func, arg, rb_mutex_unlock, mutex);
602}
603
604/*
605 * call-seq:
606 * mutex.synchronize { ... } -> result of the block
607 *
608 * Obtains a lock, runs the block, and releases the lock when the block
609 * completes. See the example under +Mutex+.
610 */
611static VALUE
612rb_mutex_synchronize_m(VALUE self)
613{
614 if (!rb_block_given_p()) {
615 rb_raise(rb_eThreadError, "must be called with a block");
616 }
617
618 return rb_mutex_synchronize(self, rb_yield, Qundef);
619}
620
621void rb_mutex_allow_trap(VALUE self, int val)
622{
623 Check_TypedStruct(self, &mutex_data_type);
624
625 if (val)
627 else
629}
630
631/* Queue */
632
633#define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
634PACKED_STRUCT_UNALIGNED(struct rb_queue {
635 struct list_head waitq;
636 rb_serial_t fork_gen;
637 const VALUE que;
638 int num_waiting;
639});
640
641#define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq)
642#define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq)
643PACKED_STRUCT_UNALIGNED(struct rb_szqueue {
644 struct rb_queue q;
645 int num_waiting_push;
646 struct list_head pushq;
647 long max;
648});
649
650static void
651queue_mark(void *ptr)
652{
653 struct rb_queue *q = ptr;
654
655 /* no need to mark threads in waitq, they are on stack */
656 rb_gc_mark(q->que);
657}
658
659static size_t
660queue_memsize(const void *ptr)
661{
662 return sizeof(struct rb_queue);
663}
664
665static const rb_data_type_t queue_data_type = {
666 "queue",
667 {queue_mark, RUBY_TYPED_DEFAULT_FREE, queue_memsize,},
669};
670
671static VALUE
672queue_alloc(VALUE klass)
673{
674 VALUE obj;
675 struct rb_queue *q;
676
677 obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q);
678 list_head_init(queue_waitq(q));
679 return obj;
680}
681
682static int
683queue_fork_check(struct rb_queue *q)
684{
685 rb_serial_t fork_gen = GET_VM()->fork_gen;
686
687 if (q->fork_gen == fork_gen) {
688 return 0;
689 }
690 /* forked children can't reach into parent thread stacks */
691 q->fork_gen = fork_gen;
692 list_head_init(queue_waitq(q));
693 q->num_waiting = 0;
694 return 1;
695}
696
697static struct rb_queue *
698queue_ptr(VALUE obj)
699{
700 struct rb_queue *q;
701
702 TypedData_Get_Struct(obj, struct rb_queue, &queue_data_type, q);
703 queue_fork_check(q);
704
705 return q;
706}
707
708#define QUEUE_CLOSED FL_USER5
709
710static void
711szqueue_mark(void *ptr)
712{
713 struct rb_szqueue *sq = ptr;
714
715 queue_mark(&sq->q);
716}
717
718static size_t
719szqueue_memsize(const void *ptr)
720{
721 return sizeof(struct rb_szqueue);
722}
723
724static const rb_data_type_t szqueue_data_type = {
725 "sized_queue",
726 {szqueue_mark, RUBY_TYPED_DEFAULT_FREE, szqueue_memsize,},
728};
729
730static VALUE
731szqueue_alloc(VALUE klass)
732{
733 struct rb_szqueue *sq;
734 VALUE obj = TypedData_Make_Struct(klass, struct rb_szqueue,
735 &szqueue_data_type, sq);
736 list_head_init(szqueue_waitq(sq));
737 list_head_init(szqueue_pushq(sq));
738 return obj;
739}
740
741static struct rb_szqueue *
742szqueue_ptr(VALUE obj)
743{
744 struct rb_szqueue *sq;
745
746 TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq);
747 if (queue_fork_check(&sq->q)) {
748 list_head_init(szqueue_pushq(sq));
749 sq->num_waiting_push = 0;
750 }
751
752 return sq;
753}
754
755static VALUE
756ary_buf_new(void)
757{
758 return rb_ary_tmp_new(1);
759}
760
761static VALUE
762check_array(VALUE obj, VALUE ary)
763{
764 if (!RB_TYPE_P(ary, T_ARRAY)) {
765 rb_raise(rb_eTypeError, "%+"PRIsVALUE" not initialized", obj);
766 }
767 return ary;
768}
769
770static long
771queue_length(VALUE self, struct rb_queue *q)
772{
773 return RARRAY_LEN(check_array(self, q->que));
774}
775
776static int
777queue_closed_p(VALUE self)
778{
779 return FL_TEST_RAW(self, QUEUE_CLOSED) != 0;
780}
781
782/*
783 * Document-class: ClosedQueueError
784 *
785 * The exception class which will be raised when pushing into a closed
786 * Queue. See Queue#close and SizedQueue#close.
787 */
788
789NORETURN(static void raise_closed_queue_error(VALUE self));
790
791static void
792raise_closed_queue_error(VALUE self)
793{
794 rb_raise(rb_eClosedQueueError, "queue closed");
795}
796
797static VALUE
798queue_closed_result(VALUE self, struct rb_queue *q)
799{
800 assert(queue_length(self, q) == 0);
801 return Qnil;
802}
803
804/*
805 * Document-class: Queue
806 *
807 * The Queue class implements multi-producer, multi-consumer queues.
808 * It is especially useful in threaded programming when information
809 * must be exchanged safely between multiple threads. The Queue class
810 * implements all the required locking semantics.
811 *
812 * The class implements FIFO type of queue. In a FIFO queue, the first
813 * tasks added are the first retrieved.
814 *
815 * Example:
816 *
817 * queue = Queue.new
818 *
819 * producer = Thread.new do
820 * 5.times do |i|
821 * sleep rand(i) # simulate expense
822 * queue << i
823 * puts "#{i} produced"
824 * end
825 * end
826 *
827 * consumer = Thread.new do
828 * 5.times do |i|
829 * value = queue.pop
830 * sleep rand(i/2) # simulate expense
831 * puts "consumed #{value}"
832 * end
833 * end
834 *
835 * consumer.join
836 *
837 */
838
839/*
840 * Document-method: Queue::new
841 *
842 * Creates a new queue instance.
843 */
844
845static VALUE
846rb_queue_initialize(VALUE self)
847{
848 struct rb_queue *q = queue_ptr(self);
849 RB_OBJ_WRITE(self, &q->que, ary_buf_new());
850 list_head_init(queue_waitq(q));
851 return self;
852}
853
854static VALUE
855queue_do_push(VALUE self, struct rb_queue *q, VALUE obj)
856{
857 if (queue_closed_p(self)) {
858 raise_closed_queue_error(self);
859 }
860 rb_ary_push(check_array(self, q->que), obj);
861 wakeup_one(queue_waitq(q));
862 return self;
863}
864
865/*
866 * Document-method: Queue#close
867 * call-seq:
868 * close
869 *
870 * Closes the queue. A closed queue cannot be re-opened.
871 *
872 * After the call to close completes, the following are true:
873 *
874 * - +closed?+ will return true
875 *
876 * - +close+ will be ignored.
877 *
878 * - calling enq/push/<< will raise a +ClosedQueueError+.
879 *
880 * - when +empty?+ is false, calling deq/pop/shift will return an object
881 * from the queue as usual.
882 * - when +empty?+ is true, deq(false) will not suspend the thread and will return nil.
883 * deq(true) will raise a +ThreadError+.
884 *
885 * ClosedQueueError is inherited from StopIteration, so that you can break loop block.
886 *
887 * Example:
888 *
889 * q = Queue.new
890 * Thread.new{
891 * while e = q.deq # wait for nil to break loop
892 * # ...
893 * end
894 * }
895 * q.close
896 */
897
898static VALUE
899rb_queue_close(VALUE self)
900{
901 struct rb_queue *q = queue_ptr(self);
902
903 if (!queue_closed_p(self)) {
904 FL_SET(self, QUEUE_CLOSED);
905
906 wakeup_all(queue_waitq(q));
907 }
908
909 return self;
910}
911
912/*
913 * Document-method: Queue#closed?
914 * call-seq: closed?
915 *
916 * Returns +true+ if the queue is closed.
917 */
918
919static VALUE
920rb_queue_closed_p(VALUE self)
921{
922 return queue_closed_p(self) ? Qtrue : Qfalse;
923}
924
925/*
926 * Document-method: Queue#push
927 * call-seq:
928 * push(object)
929 * enq(object)
930 * <<(object)
931 *
932 * Pushes the given +object+ to the queue.
933 */
934
935static VALUE
936rb_queue_push(VALUE self, VALUE obj)
937{
938 return queue_do_push(self, queue_ptr(self), obj);
939}
940
941static VALUE
942queue_sleep(VALUE self)
943{
944 rb_thread_sleep_deadly_allow_spurious_wakeup(self);
945 return Qnil;
946}
947
950 union {
951 struct rb_queue *q;
952 struct rb_szqueue *sq;
953 } as;
954};
955
956static VALUE
957queue_sleep_done(VALUE p)
958{
959 struct queue_waiter *qw = (struct queue_waiter *)p;
960
961 list_del(&qw->w.node);
962 qw->as.q->num_waiting--;
963
965
966 return Qfalse;
967}
968
969static VALUE
970szqueue_sleep_done(VALUE p)
971{
972 struct queue_waiter *qw = (struct queue_waiter *)p;
973
974 list_del(&qw->w.node);
975 qw->as.sq->num_waiting_push--;
976
978
979 return Qfalse;
980}
981
982static VALUE
983queue_do_pop(VALUE self, struct rb_queue *q, int should_block)
984{
985 check_array(self, q->que);
986
987 while (RARRAY_LEN(q->que) == 0) {
988 if (!should_block) {
989 rb_raise(rb_eThreadError, "queue empty");
990 }
991 else if (queue_closed_p(self)) {
992 return queue_closed_result(self, q);
993 }
994 else {
995 rb_execution_context_t *ec = GET_EC();
996
997 assert(RARRAY_LEN(q->que) == 0);
998 assert(queue_closed_p(self) == 0);
999
1001
1002 qw->w.self = self;
1003 qw->w.th = ec->thread_ptr;
1004 qw->w.fiber = ec->fiber_ptr;
1005
1006 qw->as.q = q;
1007 list_add_tail(queue_waitq(qw->as.q), &qw->w.node);
1008 qw->as.q->num_waiting++;
1009
1010 rb_ensure(queue_sleep, self, queue_sleep_done, (VALUE)qw);
1011 }
1012 }
1013
1014 return rb_ary_shift(q->que);
1015}
1016
1017static int
1018queue_pop_should_block(int argc, const VALUE *argv)
1019{
1020 int should_block = 1;
1021 rb_check_arity(argc, 0, 1);
1022 if (argc > 0) {
1023 should_block = !RTEST(argv[0]);
1024 }
1025 return should_block;
1026}
1027
1028/*
1029 * Document-method: Queue#pop
1030 * call-seq:
1031 * pop(non_block=false)
1032 * deq(non_block=false)
1033 * shift(non_block=false)
1034 *
1035 * Retrieves data from the queue.
1036 *
1037 * If the queue is empty, the calling thread is suspended until data is pushed
1038 * onto the queue. If +non_block+ is true, the thread isn't suspended, and
1039 * +ThreadError+ is raised.
1040 */
1041
1042static VALUE
1043rb_queue_pop(int argc, VALUE *argv, VALUE self)
1044{
1045 int should_block = queue_pop_should_block(argc, argv);
1046 return queue_do_pop(self, queue_ptr(self), should_block);
1047}
1048
1049/*
1050 * Document-method: Queue#empty?
1051 * call-seq: empty?
1052 *
1053 * Returns +true+ if the queue is empty.
1054 */
1055
1056static VALUE
1057rb_queue_empty_p(VALUE self)
1058{
1059 return queue_length(self, queue_ptr(self)) == 0 ? Qtrue : Qfalse;
1060}
1061
1062/*
1063 * Document-method: Queue#clear
1064 *
1065 * Removes all objects from the queue.
1066 */
1067
1068static VALUE
1069rb_queue_clear(VALUE self)
1070{
1071 struct rb_queue *q = queue_ptr(self);
1072
1073 rb_ary_clear(check_array(self, q->que));
1074 return self;
1075}
1076
1077/*
1078 * Document-method: Queue#length
1079 * call-seq:
1080 * length
1081 * size
1082 *
1083 * Returns the length of the queue.
1084 */
1085
1086static VALUE
1087rb_queue_length(VALUE self)
1088{
1089 return LONG2NUM(queue_length(self, queue_ptr(self)));
1090}
1091
1092/*
1093 * Document-method: Queue#num_waiting
1094 *
1095 * Returns the number of threads waiting on the queue.
1096 */
1097
1098static VALUE
1099rb_queue_num_waiting(VALUE self)
1100{
1101 struct rb_queue *q = queue_ptr(self);
1102
1103 return INT2NUM(q->num_waiting);
1104}
1105
1106/*
1107 * Document-class: SizedQueue
1108 *
1109 * This class represents queues of specified size capacity. The push operation
1110 * may be blocked if the capacity is full.
1111 *
1112 * See Queue for an example of how a SizedQueue works.
1113 */
1114
1115/*
1116 * Document-method: SizedQueue::new
1117 * call-seq: new(max)
1118 *
1119 * Creates a fixed-length queue with a maximum size of +max+.
1120 */
1121
1122static VALUE
1123rb_szqueue_initialize(VALUE self, VALUE vmax)
1124{
1125 long max;
1126 struct rb_szqueue *sq = szqueue_ptr(self);
1127
1128 max = NUM2LONG(vmax);
1129 if (max <= 0) {
1130 rb_raise(rb_eArgError, "queue size must be positive");
1131 }
1132
1133 RB_OBJ_WRITE(self, &sq->q.que, ary_buf_new());
1134 list_head_init(szqueue_waitq(sq));
1135 list_head_init(szqueue_pushq(sq));
1136 sq->max = max;
1137
1138 return self;
1139}
1140
1141/*
1142 * Document-method: SizedQueue#close
1143 * call-seq:
1144 * close
1145 *
1146 * Similar to Queue#close.
1147 *
1148 * The difference is behavior with waiting enqueuing threads.
1149 *
1150 * If there are waiting enqueuing threads, they are interrupted by
1151 * raising ClosedQueueError('queue closed').
1152 */
1153static VALUE
1154rb_szqueue_close(VALUE self)
1155{
1156 if (!queue_closed_p(self)) {
1157 struct rb_szqueue *sq = szqueue_ptr(self);
1158
1159 FL_SET(self, QUEUE_CLOSED);
1160 wakeup_all(szqueue_waitq(sq));
1161 wakeup_all(szqueue_pushq(sq));
1162 }
1163 return self;
1164}
1165
1166/*
1167 * Document-method: SizedQueue#max
1168 *
1169 * Returns the maximum size of the queue.
1170 */
1171
1172static VALUE
1173rb_szqueue_max_get(VALUE self)
1174{
1175 return LONG2NUM(szqueue_ptr(self)->max);
1176}
1177
1178/*
1179 * Document-method: SizedQueue#max=
1180 * call-seq: max=(number)
1181 *
1182 * Sets the maximum size of the queue to the given +number+.
1183 */
1184
1185static VALUE
1186rb_szqueue_max_set(VALUE self, VALUE vmax)
1187{
1188 long max = NUM2LONG(vmax);
1189 long diff = 0;
1190 struct rb_szqueue *sq = szqueue_ptr(self);
1191
1192 if (max <= 0) {
1193 rb_raise(rb_eArgError, "queue size must be positive");
1194 }
1195 if (max > sq->max) {
1196 diff = max - sq->max;
1197 }
1198 sq->max = max;
1199 sync_wakeup(szqueue_pushq(sq), diff);
1200 return vmax;
1201}
1202
1203static int
1204szqueue_push_should_block(int argc, const VALUE *argv)
1205{
1206 int should_block = 1;
1207 rb_check_arity(argc, 1, 2);
1208 if (argc > 1) {
1209 should_block = !RTEST(argv[1]);
1210 }
1211 return should_block;
1212}
1213
1214/*
1215 * Document-method: SizedQueue#push
1216 * call-seq:
1217 * push(object, non_block=false)
1218 * enq(object, non_block=false)
1219 * <<(object)
1220 *
1221 * Pushes +object+ to the queue.
1222 *
1223 * If there is no space left in the queue, waits until space becomes
1224 * available, unless +non_block+ is true. If +non_block+ is true, the
1225 * thread isn't suspended, and +ThreadError+ is raised.
1226 */
1227
1228static VALUE
1229rb_szqueue_push(int argc, VALUE *argv, VALUE self)
1230{
1231 struct rb_szqueue *sq = szqueue_ptr(self);
1232 int should_block = szqueue_push_should_block(argc, argv);
1233
1234 while (queue_length(self, &sq->q) >= sq->max) {
1235 if (!should_block) {
1236 rb_raise(rb_eThreadError, "queue full");
1237 }
1238 else if (queue_closed_p(self)) {
1239 break;
1240 }
1241 else {
1242 rb_execution_context_t *ec = GET_EC();
1244 struct list_head *pushq = szqueue_pushq(sq);
1245
1246 qw->w.self = self;
1247 qw->w.th = ec->thread_ptr;
1248 qw->w.fiber = ec->fiber_ptr;
1249
1250 qw->as.sq = sq;
1251 list_add_tail(pushq, &qw->w.node);
1252 sq->num_waiting_push++;
1253
1254 rb_ensure(queue_sleep, self, szqueue_sleep_done, (VALUE)qw);
1255 }
1256 }
1257
1258 if (queue_closed_p(self)) {
1259 raise_closed_queue_error(self);
1260 }
1261
1262 return queue_do_push(self, &sq->q, argv[0]);
1263}
1264
1265static VALUE
1266szqueue_do_pop(VALUE self, int should_block)
1267{
1268 struct rb_szqueue *sq = szqueue_ptr(self);
1269 VALUE retval = queue_do_pop(self, &sq->q, should_block);
1270
1271 if (queue_length(self, &sq->q) < sq->max) {
1272 wakeup_one(szqueue_pushq(sq));
1273 }
1274
1275 return retval;
1276}
1277
1278/*
1279 * Document-method: SizedQueue#pop
1280 * call-seq:
1281 * pop(non_block=false)
1282 * deq(non_block=false)
1283 * shift(non_block=false)
1284 *
1285 * Retrieves data from the queue.
1286 *
1287 * If the queue is empty, the calling thread is suspended until data is pushed
1288 * onto the queue. If +non_block+ is true, the thread isn't suspended, and
1289 * +ThreadError+ is raised.
1290 */
1291
1292static VALUE
1293rb_szqueue_pop(int argc, VALUE *argv, VALUE self)
1294{
1295 int should_block = queue_pop_should_block(argc, argv);
1296 return szqueue_do_pop(self, should_block);
1297}
1298
1299/*
1300 * Document-method: SizedQueue#clear
1301 *
1302 * Removes all objects from the queue.
1303 */
1304
1305static VALUE
1306rb_szqueue_clear(VALUE self)
1307{
1308 struct rb_szqueue *sq = szqueue_ptr(self);
1309
1310 rb_ary_clear(check_array(self, sq->q.que));
1311 wakeup_all(szqueue_pushq(sq));
1312 return self;
1313}
1314
1315/*
1316 * Document-method: SizedQueue#length
1317 * call-seq:
1318 * length
1319 * size
1320 *
1321 * Returns the length of the queue.
1322 */
1323
1324static VALUE
1325rb_szqueue_length(VALUE self)
1326{
1327 struct rb_szqueue *sq = szqueue_ptr(self);
1328
1329 return LONG2NUM(queue_length(self, &sq->q));
1330}
1331
1332/*
1333 * Document-method: SizedQueue#num_waiting
1334 *
1335 * Returns the number of threads waiting on the queue.
1336 */
1337
1338static VALUE
1339rb_szqueue_num_waiting(VALUE self)
1340{
1341 struct rb_szqueue *sq = szqueue_ptr(self);
1342
1343 return INT2NUM(sq->q.num_waiting + sq->num_waiting_push);
1344}
1345
1346/*
1347 * Document-method: SizedQueue#empty?
1348 * call-seq: empty?
1349 *
1350 * Returns +true+ if the queue is empty.
1351 */
1352
1353static VALUE
1354rb_szqueue_empty_p(VALUE self)
1355{
1356 struct rb_szqueue *sq = szqueue_ptr(self);
1357
1358 return queue_length(self, &sq->q) == 0 ? Qtrue : Qfalse;
1359}
1360
1361
1362/* ConditionalVariable */
1364 struct list_head waitq;
1366};
1367
1368/*
1369 * Document-class: ConditionVariable
1370 *
1371 * ConditionVariable objects augment class Mutex. Using condition variables,
1372 * it is possible to suspend while in the middle of a critical section until a
1373 * resource becomes available.
1374 *
1375 * Example:
1376 *
1377 * mutex = Mutex.new
1378 * resource = ConditionVariable.new
1379 *
1380 * a = Thread.new {
1381 * mutex.synchronize {
1382 * # Thread 'a' now needs the resource
1383 * resource.wait(mutex)
1384 * # 'a' can now have the resource
1385 * }
1386 * }
1387 *
1388 * b = Thread.new {
1389 * mutex.synchronize {
1390 * # Thread 'b' has finished using the resource
1391 * resource.signal
1392 * }
1393 * }
1394 */
1395
1396static size_t
1397condvar_memsize(const void *ptr)
1398{
1399 return sizeof(struct rb_condvar);
1400}
1401
1402static const rb_data_type_t cv_data_type = {
1403 "condvar",
1404 {0, RUBY_TYPED_DEFAULT_FREE, condvar_memsize,},
1406};
1407
1408static struct rb_condvar *
1409condvar_ptr(VALUE self)
1410{
1411 struct rb_condvar *cv;
1412 rb_serial_t fork_gen = GET_VM()->fork_gen;
1413
1414 TypedData_Get_Struct(self, struct rb_condvar, &cv_data_type, cv);
1415
1416 /* forked children can't reach into parent thread stacks */
1417 if (cv->fork_gen != fork_gen) {
1418 cv->fork_gen = fork_gen;
1419 list_head_init(&cv->waitq);
1420 }
1421
1422 return cv;
1423}
1424
1425static VALUE
1426condvar_alloc(VALUE klass)
1427{
1428 struct rb_condvar *cv;
1429 VALUE obj;
1430
1431 obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv);
1432 list_head_init(&cv->waitq);
1433
1434 return obj;
1435}
1436
1437/*
1438 * Document-method: ConditionVariable::new
1439 *
1440 * Creates a new condition variable instance.
1441 */
1442
1443static VALUE
1444rb_condvar_initialize(VALUE self)
1445{
1446 struct rb_condvar *cv = condvar_ptr(self);
1447 list_head_init(&cv->waitq);
1448 return self;
1449}
1450
1454};
1455
1456static ID id_sleep;
1457
1458static VALUE
1459do_sleep(VALUE args)
1460{
1461 struct sleep_call *p = (struct sleep_call *)args;
1462 return rb_funcallv(p->mutex, id_sleep, 1, &p->timeout);
1463}
1464
1465/*
1466 * Document-method: ConditionVariable#wait
1467 * call-seq: wait(mutex, timeout=nil)
1468 *
1469 * Releases the lock held in +mutex+ and waits; reacquires the lock on wakeup.
1470 *
1471 * If +timeout+ is given, this method returns after +timeout+ seconds passed,
1472 * even if no other thread doesn't signal.
1473 */
1474
1475static VALUE
1476rb_condvar_wait(int argc, VALUE *argv, VALUE self)
1477{
1478 rb_execution_context_t *ec = GET_EC();
1479
1480 struct rb_condvar *cv = condvar_ptr(self);
1481 struct sleep_call args;
1482
1483 rb_scan_args(argc, argv, "11", &args.mutex, &args.timeout);
1484
1486 w->self = args.mutex;
1487 w->th = ec->thread_ptr;
1488 w->fiber = ec->fiber_ptr;
1489
1490 list_add_tail(&cv->waitq, &w->node);
1491 rb_ensure(do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)w);
1492
1493 return self;
1494}
1495
1496/*
1497 * Document-method: ConditionVariable#signal
1498 *
1499 * Wakes up the first thread in line waiting for this lock.
1500 */
1501
1502static VALUE
1503rb_condvar_signal(VALUE self)
1504{
1505 struct rb_condvar *cv = condvar_ptr(self);
1506 wakeup_one(&cv->waitq);
1507 return self;
1508}
1509
1510/*
1511 * Document-method: ConditionVariable#broadcast
1512 *
1513 * Wakes up all threads waiting for this lock.
1514 */
1515
1516static VALUE
1517rb_condvar_broadcast(VALUE self)
1518{
1519 struct rb_condvar *cv = condvar_ptr(self);
1520 wakeup_all(&cv->waitq);
1521 return self;
1522}
1523
1524NORETURN(static VALUE undumpable(VALUE obj));
1525/* :nodoc: */
1526static VALUE
1527undumpable(VALUE obj)
1528{
1529 rb_raise(rb_eTypeError, "can't dump %"PRIsVALUE, rb_obj_class(obj));
1531}
1532
1533static VALUE
1534define_thread_class(VALUE outer, const char *name, VALUE super)
1535{
1536 VALUE klass = rb_define_class_under(outer, name, super);
1538 return klass;
1539}
1540
1541static void
1542Init_thread_sync(void)
1543{
1544#undef rb_intern
1545#if 0
1546 rb_cMutex = rb_define_class("Mutex", rb_cObject); /* teach rdoc Mutex */
1547 rb_cConditionVariable = rb_define_class("ConditionVariable", rb_cObject); /* teach rdoc ConditionVariable */
1548 rb_cQueue = rb_define_class("Queue", rb_cObject); /* teach rdoc Queue */
1549 rb_cSizedQueue = rb_define_class("SizedQueue", rb_cObject); /* teach rdoc SizedQueue */
1550#endif
1551
1552#define DEFINE_CLASS(name, super) \
1553 rb_c##name = define_thread_class(rb_cThread, #name, rb_c##super)
1554
1555 /* Mutex */
1556 DEFINE_CLASS(Mutex, Object);
1557 rb_define_alloc_func(rb_cMutex, mutex_alloc);
1558 rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
1559 rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
1560 rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
1561 rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
1562 rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
1563 rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
1564 rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize_m, 0);
1565 rb_define_method(rb_cMutex, "owned?", rb_mutex_owned_p, 0);
1566
1567 /* Queue */
1568 DEFINE_CLASS(Queue, Object);
1569 rb_define_alloc_func(rb_cQueue, queue_alloc);
1570
1571 rb_eClosedQueueError = rb_define_class("ClosedQueueError", rb_eStopIteration);
1572
1573 rb_define_method(rb_cQueue, "initialize", rb_queue_initialize, 0);
1574 rb_undef_method(rb_cQueue, "initialize_copy");
1575 rb_define_method(rb_cQueue, "marshal_dump", undumpable, 0);
1576 rb_define_method(rb_cQueue, "close", rb_queue_close, 0);
1577 rb_define_method(rb_cQueue, "closed?", rb_queue_closed_p, 0);
1578 rb_define_method(rb_cQueue, "push", rb_queue_push, 1);
1579 rb_define_method(rb_cQueue, "pop", rb_queue_pop, -1);
1580 rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0);
1581 rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0);
1582 rb_define_method(rb_cQueue, "length", rb_queue_length, 0);
1583 rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0);
1584
1585 rb_define_alias(rb_cQueue, "enq", "push");
1586 rb_define_alias(rb_cQueue, "<<", "push");
1587 rb_define_alias(rb_cQueue, "deq", "pop");
1588 rb_define_alias(rb_cQueue, "shift", "pop");
1589 rb_define_alias(rb_cQueue, "size", "length");
1590
1591 DEFINE_CLASS(SizedQueue, Queue);
1592 rb_define_alloc_func(rb_cSizedQueue, szqueue_alloc);
1593
1594 rb_define_method(rb_cSizedQueue, "initialize", rb_szqueue_initialize, 1);
1595 rb_define_method(rb_cSizedQueue, "close", rb_szqueue_close, 0);
1596 rb_define_method(rb_cSizedQueue, "max", rb_szqueue_max_get, 0);
1597 rb_define_method(rb_cSizedQueue, "max=", rb_szqueue_max_set, 1);
1598 rb_define_method(rb_cSizedQueue, "push", rb_szqueue_push, -1);
1599 rb_define_method(rb_cSizedQueue, "pop", rb_szqueue_pop, -1);
1600 rb_define_method(rb_cSizedQueue, "empty?", rb_szqueue_empty_p, 0);
1601 rb_define_method(rb_cSizedQueue, "clear", rb_szqueue_clear, 0);
1602 rb_define_method(rb_cSizedQueue, "length", rb_szqueue_length, 0);
1603 rb_define_method(rb_cSizedQueue, "num_waiting", rb_szqueue_num_waiting, 0);
1604
1605 rb_define_alias(rb_cSizedQueue, "enq", "push");
1606 rb_define_alias(rb_cSizedQueue, "<<", "push");
1607 rb_define_alias(rb_cSizedQueue, "deq", "pop");
1608 rb_define_alias(rb_cSizedQueue, "shift", "pop");
1609 rb_define_alias(rb_cSizedQueue, "size", "length");
1610
1611 /* CVar */
1612 DEFINE_CLASS(ConditionVariable, Object);
1613 rb_define_alloc_func(rb_cConditionVariable, condvar_alloc);
1614
1615 id_sleep = rb_intern("sleep");
1616
1617 rb_define_method(rb_cConditionVariable, "initialize", rb_condvar_initialize, 0);
1618 rb_undef_method(rb_cConditionVariable, "initialize_copy");
1619 rb_define_method(rb_cConditionVariable, "marshal_dump", undumpable, 0);
1620 rb_define_method(rb_cConditionVariable, "wait", rb_condvar_wait, -1);
1621 rb_define_method(rb_cConditionVariable, "signal", rb_condvar_signal, 0);
1622 rb_define_method(rb_cConditionVariable, "broadcast", rb_condvar_broadcast, 0);
1623
1624 rb_provide("thread.rb");
1625}
#define COROUTINE_STACK_LOCAL(type, name)
Definition: Stack.h:14
#define COROUTINE_STACK_FREE(name)
Definition: Stack.h:15
VALUE rb_ary_shift(VALUE ary)
Definition: array.c:1413
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:1301
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:846
VALUE rb_ary_clear(VALUE ary)
Definition: array.c:4534
#define NORETURN(x)
Definition: attributes.h:152
#define PACKED_STRUCT_UNALIGNED(x)
Definition: attributes.h:163
#define UNREACHABLE_RETURN
Definition: assume.h:31
VALUE rb_fiberptr_self(struct rb_fiber_struct *fiber)
Definition: cont.c:1153
unsigned int rb_fiberptr_blocking(struct rb_fiber_struct *fiber)
Definition: cont.c:1158
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
Definition: cxxanyargs.hpp:653
struct RIMemo * ptr
Definition: debug.c:88
#define assert(x)
Definition: dlmalloc.c:1176
int max
Definition: enough.c:225
VALUE rb_eStopIteration
Definition: enumerator.c:141
VALUE rb_eThreadError
Definition: eval.c:953
#define PRIsVALUE
Definition: function.c:10
void ruby_xfree(void *x)
Deallocates a storage instance.
Definition: gc.c:10914
void rb_gc_mark(VALUE ptr)
Definition: gc.c:6112
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:748
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition: class.c:797
void rb_undef_method(VALUE klass, const char *name)
Definition: class.c:1777
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:1999
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Definition: class.c:2296
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:935
#define FL_UNSET_RAW
Definition: fl_type.h:133
#define FL_TEST_RAW
Definition: fl_type.h:131
#define FL_SET
Definition: fl_type.h:128
#define FL_SET_RAW
Definition: fl_type.h:129
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2917
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:1007
void rb_bug(const char *fmt,...)
Definition: error.c:768
VALUE rb_eTypeError
Definition: error.c:1057
VALUE rb_eArgError
Definition: error.c:1058
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1148
VALUE rb_cObject
Object class.
Definition: object.c:49
VALUE rb_obj_class(VALUE)
Definition: object.c:245
uint64_t rb_hrtime_t
Definition: hrtime.h:47
#define rb_check_arity
Definition: error.h:34
void rb_provide(const char *)
Definition: load.c:616
struct timeval rb_time_interval(VALUE num)
Definition: time.c:2684
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
ID rb_intern(const char *)
Definition: symbol.c:785
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:3150
#define INT2NUM
Definition: int.h:43
#define rb_funcallv(...)
Definition: internal.h:77
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1341
#define LONG_MAX
Definition: limits.h:36
#define LONG2NUM
Definition: long.h:50
#define NUM2LONG
Definition: long.h:51
const char * name
Definition: nkf.c:208
int rb_ractor_living_thread_num(const rb_ractor_t *r)
Definition: ractor.c:1721
#define RARRAY_LEN
Definition: rarray.h:52
#define NULL
Definition: regenc.h:69
#define RB_OBJ_WRITE(a, slot, b)
WB for new reference from ‘a’ to ‘b’.
Definition: rgengc.h:107
#define RUBY_TYPED_DEFAULT_FREE
Definition: rtypeddata.h:44
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: rtypeddata.h:130
@ RUBY_TYPED_FREE_IMMEDIATELY
Definition: rtypeddata.h:62
@ RUBY_TYPED_WB_PROTECTED
Definition: rtypeddata.h:64
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: rtypeddata.h:122
#define Check_TypedStruct(v, t)
Definition: rtypeddata.h:48
int argc
Definition: ruby.c:240
char ** argv
Definition: ruby.c:241
VALUE rb_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Definition: scheduler.c:160
VALUE rb_scheduler_current()
Definition: scheduler.c:105
VALUE rb_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Definition: scheduler.c:166
VALUE rb_scheduler_kernel_sleep(VALUE scheduler, VALUE duration)
Definition: scheduler.c:136
unsigned LONG_LONG rb_serial_t
Definition: serial.h:19
#define Qundef
#define Qtrue
#define RTEST
#define Qnil
#define Qfalse
#define NIL_P
struct sync_waiter w
Definition: thread_sync.c:949
struct rb_queue * q
Definition: thread_sync.c:951
struct rb_szqueue * sq
Definition: thread_sync.c:952
union queue_waiter::@172 as
struct list_head waitq
Definition: thread_sync.c:1364
rb_serial_t fork_gen
Definition: thread_sync.c:1365
rb_atomic_t interrupt_mask
Definition: vm_core.h:865
struct rb_thread_struct * thread_ptr
Definition: vm_core.h:871
rb_fiber_t * fiber_ptr
Definition: vm_core.h:870
struct list_head waitq
Definition: thread_sync.c:12
rb_fiber_t * fiber
Definition: thread_sync.c:10
struct rb_mutex_struct * next_mutex
Definition: thread_sync.c:11
rb_execution_context_t * ec
Definition: vm_core.h:941
rb_ractor_t * ractor
Definition: vm_core.h:938
VALUE locking_mutex
Definition: vm_core.h:984
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:985
VALUE timeout
Definition: thread_sync.c:1453
VALUE self
Definition: thread_sync.c:17
struct list_node node
Definition: thread_sync.c:20
rb_fiber_t * fiber
Definition: thread_sync.c:19
rb_thread_t * th
Definition: thread_sync.c:18
#define t
Definition: symbol.c:253
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:508
#define RUBY_VM_CHECK_INTS_BLOCKING(ec)
Definition: thread.c:205
rb_thread_t * rb_fiber_threadptr(const rb_fiber_t *fiber)
Definition: cont.c:852
#define DEFINE_CLASS(name, super)
void rb_mutex_allow_trap(VALUE self, int val)
Definition: thread_sync.c:621
VALUE rb_mutex_new(void)
Definition: thread_sync.c:177
#define QUEUE_CLOSED
Definition: thread_sync.c:708
#define szqueue_waitq(sq)
Definition: thread_sync.c:641
VALUE rb_mutex_owned_p(VALUE self)
Definition: thread_sync.c:414
struct rb_mutex_struct rb_mutex_t
#define szqueue_pushq(sq)
Definition: thread_sync.c:642
#define MUTEX_ALLOW_TRAP
Definition: thread_sync.c:23
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Definition: thread_sync.c:598
VALUE rb_mutex_unlock(VALUE self)
Definition: thread_sync.c:474
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Definition: thread_sync.c:537
VALUE rb_mutex_lock(VALUE self)
Definition: thread_sync.c:402
VALUE rb_mutex_trylock(VALUE self)
Definition: thread_sync.c:236
#define queue_waitq(q)
Definition: thread_sync.c:633
VALUE rb_mutex_locked_p(VALUE self)
Definition: thread_sync.c:189
VALUE rb_obj_is_mutex(VALUE obj)
Definition: thread_sync.c:142
#define mutex_mark
Definition: thread_sync.c:90
unsigned long VALUE
Definition: value.h:38
unsigned long ID
Definition: value.h:39
#define T_ARRAY
Definition: value_type.h:55
rb_thread_status
Definition: vm_core.h:791
@ THREAD_KILLED
Definition: vm_core.h:795
@ THREAD_STOPPED
Definition: vm_core.h:793
@ THREAD_RUNNABLE
Definition: vm_core.h:792
@ THREAD_STOPPED_FOREVER
Definition: vm_core.h:794
#define RUBY_VM_INTERRUPTED(ec)
Definition: vm_core.h:1881
@ TRAP_INTERRUPT_MASK
Definition: vm_core.h:1870
int err
Definition: win32.c:142