2#include "ccan/list/list.h"
5static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
6static VALUE rb_eClosedQueueError;
23#define MUTEX_ALLOW_TRAP FL_USER1
26sync_wakeup(
struct list_head *head,
long max)
30 list_for_each_safe(head, cur, next,
node) {
31 list_del_init(&cur->
node);
42 if (--
max == 0)
return;
48wakeup_one(
struct list_head *head)
54wakeup_all(
struct list_head *head)
59#if defined(HAVE_WORKING_FORK)
60static void rb_mutex_abandon_all(
rb_mutex_t *mutexes);
90#define mutex_mark ((void(*)(void*))0)
120mutex_memsize(
const void *
ptr)
153mutex_alloc(
VALUE klass)
160 list_head_init(&mutex->
waitq);
179 return mutex_alloc(rb_cMutex);
209 while (*keeping_mutexes && *keeping_mutexes != mutex) {
211 keeping_mutexes = &(*keeping_mutexes)->
next_mutex;
214 if (*keeping_mutexes) {
225 thread_mutex_insert(
th, mutex);
240 if (mutex->
fiber == 0) {
270static VALUE call_rb_scheduler_block(
VALUE mutex) {
275delete_from_waitq(
VALUE v)
286do_mutex_lock(
VALUE self,
int interruptible_p)
306 if (scheduler !=
Qnil) {
312 list_add_tail(&mutex->
waitq, &w->node);
326 rb_ractor_sleeper_threads_inc(
th->
ractor);
343 list_add_tail(&mutex->
waitq, &w->node);
345 native_sleep(
th, timeout);
355 if (patrol_thread ==
th)
356 patrol_thread =
NULL;
363 th->status = prev_status;
365 rb_ractor_sleeper_threads_dec(
th->
ractor);
368 if (interruptible_p) {
383 if (mutex_owned_p(
fiber, mutex) ==
Qfalse)
rb_bug(
"do_mutex_lock: mutex is not owned.");
391 return do_mutex_lock(
self, 0);
404 return do_mutex_lock(
self, 1);
419 return mutex_owned_p(
fiber, mutex);
427 if (mutex->
fiber == 0) {
428 err =
"Attempt to unlock a mutex which is not locked";
431 err =
"Attempt to unlock a mutex which is locked by another thread/fiber";
437 list_for_each_safe(&mutex->
waitq, cur, next,
node) {
438 list_del_init(&cur->
node);
444 switch (cur->
th->status) {
450 rb_bug(
"unexpected THREAD_STOPPED");
453 rb_bug(
"unexpected THREAD_KILLED");
460 thread_mutex_remove(
th, mutex);
480 err = rb_mutex_unlock_th(mutex,
th, GET_EC()->fiber_ptr);
486#if defined(HAVE_WORKING_FORK)
500 list_head_init(&mutex->
waitq);
515 list_head_init(&mutex->
waitq);
523 rb_thread_sleep_deadly_allow_spurious_wakeup(
self);
528rb_mutex_wait_for(
VALUE time)
532 sleep_hrtime(GET_THREAD(), *rel, 0);
541 if (!
NIL_P(timeout)) {
546 time_t beg = time(0);
549 if (scheduler !=
Qnil) {
551 mutex_lock_uninterruptible(self);
553 if (
NIL_P(timeout)) {
554 rb_ensure(rb_mutex_sleep_forever, self, mutex_lock_uninterruptible, self);
557 rb_ensure(rb_mutex_wait_for, (
VALUE)&rel, mutex_lock_uninterruptible, self);
562 time_t end = time(0) - beg;
563 return TIMET2NUM(end);
612rb_mutex_synchronize_m(
VALUE self)
633#define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
635 struct list_head waitq;
641#define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq)
642#define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq)
645 int num_waiting_push;
646 struct list_head pushq;
653 struct rb_queue *q =
ptr;
660queue_memsize(
const void *
ptr)
662 return sizeof(
struct rb_queue);
672queue_alloc(
VALUE klass)
683queue_fork_check(
struct rb_queue *q)
687 if (q->fork_gen == fork_gen) {
691 q->fork_gen = fork_gen;
697static struct rb_queue *
708#define QUEUE_CLOSED FL_USER5
711szqueue_mark(
void *
ptr)
713 struct rb_szqueue *sq =
ptr;
719szqueue_memsize(
const void *
ptr)
721 return sizeof(
struct rb_szqueue);
731szqueue_alloc(
VALUE klass)
733 struct rb_szqueue *sq;
735 &szqueue_data_type, sq);
741static struct rb_szqueue *
742szqueue_ptr(
VALUE obj)
744 struct rb_szqueue *sq;
747 if (queue_fork_check(&sq->q)) {
749 sq->num_waiting_push = 0;
764 if (!RB_TYPE_P(ary,
T_ARRAY)) {
771queue_length(
VALUE self,
struct rb_queue *q)
777queue_closed_p(
VALUE self)
792raise_closed_queue_error(
VALUE self)
794 rb_raise(rb_eClosedQueueError,
"queue closed");
798queue_closed_result(
VALUE self,
struct rb_queue *q)
800 assert(queue_length(self, q) == 0);
846rb_queue_initialize(
VALUE self)
848 struct rb_queue *q = queue_ptr(self);
855queue_do_push(
VALUE self,
struct rb_queue *q,
VALUE obj)
857 if (queue_closed_p(self)) {
858 raise_closed_queue_error(self);
899rb_queue_close(
VALUE self)
901 struct rb_queue *q = queue_ptr(self);
903 if (!queue_closed_p(self)) {
920rb_queue_closed_p(
VALUE self)
938 return queue_do_push(self, queue_ptr(self), obj);
942queue_sleep(
VALUE self)
944 rb_thread_sleep_deadly_allow_spurious_wakeup(self);
952 struct rb_szqueue *
sq;
957queue_sleep_done(
VALUE p)
961 list_del(&qw->
w.
node);
962 qw->
as.
q->num_waiting--;
970szqueue_sleep_done(
VALUE p)
974 list_del(&qw->
w.
node);
975 qw->
as.
sq->num_waiting_push--;
983queue_do_pop(
VALUE self,
struct rb_queue *
q,
int should_block)
985 check_array(self,
q->que);
991 else if (queue_closed_p(self)) {
992 return queue_closed_result(self,
q);
998 assert(queue_closed_p(self) == 0);
1007 list_add_tail(
queue_waitq(qw->as.q), &qw->w.node);
1008 qw->as.q->num_waiting++;
1020 int should_block = 1;
1025 return should_block;
1045 int should_block = queue_pop_should_block(
argc,
argv);
1046 return queue_do_pop(self, queue_ptr(self), should_block);
1057rb_queue_empty_p(
VALUE self)
1059 return queue_length(self, queue_ptr(self)) == 0 ?
Qtrue :
Qfalse;
1069rb_queue_clear(
VALUE self)
1071 struct rb_queue *q = queue_ptr(self);
1087rb_queue_length(
VALUE self)
1089 return LONG2NUM(queue_length(self, queue_ptr(self)));
1099rb_queue_num_waiting(
VALUE self)
1101 struct rb_queue *q = queue_ptr(self);
1103 return INT2NUM(q->num_waiting);
1126 struct rb_szqueue *sq = szqueue_ptr(self);
1154rb_szqueue_close(
VALUE self)
1156 if (!queue_closed_p(self)) {
1157 struct rb_szqueue *sq = szqueue_ptr(self);
1173rb_szqueue_max_get(
VALUE self)
1190 struct rb_szqueue *sq = szqueue_ptr(self);
1195 if (
max > sq->max) {
1196 diff =
max - sq->max;
1206 int should_block = 1;
1211 return should_block;
1231 struct rb_szqueue *sq = szqueue_ptr(self);
1232 int should_block = szqueue_push_should_block(
argc,
argv);
1234 while (queue_length(self, &sq->q) >= sq->max) {
1235 if (!should_block) {
1238 else if (queue_closed_p(self)) {
1251 list_add_tail(pushq, &qw->w.node);
1252 sq->num_waiting_push++;
1258 if (queue_closed_p(self)) {
1259 raise_closed_queue_error(self);
1262 return queue_do_push(self, &sq->q,
argv[0]);
1266szqueue_do_pop(
VALUE self,
int should_block)
1268 struct rb_szqueue *sq = szqueue_ptr(self);
1269 VALUE retval = queue_do_pop(self, &sq->q, should_block);
1271 if (queue_length(self, &sq->q) < sq->max) {
1295 int should_block = queue_pop_should_block(
argc,
argv);
1296 return szqueue_do_pop(self, should_block);
1306rb_szqueue_clear(
VALUE self)
1308 struct rb_szqueue *sq = szqueue_ptr(self);
1325rb_szqueue_length(
VALUE self)
1327 struct rb_szqueue *sq = szqueue_ptr(self);
1329 return LONG2NUM(queue_length(self, &sq->q));
1339rb_szqueue_num_waiting(
VALUE self)
1341 struct rb_szqueue *sq = szqueue_ptr(self);
1343 return INT2NUM(sq->q.num_waiting + sq->num_waiting_push);
1354rb_szqueue_empty_p(
VALUE self)
1356 struct rb_szqueue *sq = szqueue_ptr(self);
1358 return queue_length(self, &sq->q) == 0 ?
Qtrue :
Qfalse;
1397condvar_memsize(
const void *
ptr)
1409condvar_ptr(
VALUE self)
1419 list_head_init(&cv->
waitq);
1426condvar_alloc(
VALUE klass)
1432 list_head_init(&cv->
waitq);
1444rb_condvar_initialize(
VALUE self)
1447 list_head_init(&cv->
waitq);
1486 w->self = args.mutex;
1490 list_add_tail(&cv->
waitq, &w->node);
1503rb_condvar_signal(
VALUE self)
1506 wakeup_one(&cv->
waitq);
1517rb_condvar_broadcast(
VALUE self)
1520 wakeup_all(&cv->
waitq);
1527undumpable(
VALUE obj)
1542Init_thread_sync(
void)
1552#define DEFINE_CLASS(name, super) \
1553 rb_c##name = define_thread_class(rb_cThread, #name, rb_c##super)
1603 rb_define_method(rb_cSizedQueue,
"num_waiting", rb_szqueue_num_waiting, 0);
1617 rb_define_method(rb_cConditionVariable,
"initialize", rb_condvar_initialize, 0);
1622 rb_define_method(rb_cConditionVariable,
"broadcast", rb_condvar_broadcast, 0);
#define COROUTINE_STACK_LOCAL(type, name)
#define COROUTINE_STACK_FREE(name)
VALUE rb_ary_shift(VALUE ary)
VALUE rb_ary_push(VALUE ary, VALUE item)
VALUE rb_ary_tmp_new(long capa)
VALUE rb_ary_clear(VALUE ary)
#define PACKED_STRUCT_UNALIGNED(x)
#define UNREACHABLE_RETURN
VALUE rb_fiberptr_self(struct rb_fiber_struct *fiber)
unsigned int rb_fiberptr_blocking(struct rb_fiber_struct *fiber)
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
void ruby_xfree(void *x)
Deallocates a storage instance.
void rb_gc_mark(VALUE ptr)
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
void rb_undef_method(VALUE klass, const char *name)
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
int rb_block_given_p(void)
Determines if the current method is given a block.
void rb_raise(VALUE exc, const char *fmt,...)
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
void rb_bug(const char *fmt,...)
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
VALUE rb_cObject
Object class.
VALUE rb_obj_class(VALUE)
void rb_provide(const char *)
struct timeval rb_time_interval(VALUE num)
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
ID rb_intern(const char *)
void rb_define_const(VALUE, const char *, VALUE)
int rb_ractor_living_thread_num(const rb_ractor_t *r)
#define RB_OBJ_WRITE(a, slot, b)
WB for new reference from ‘a’ to ‘b’.
#define RUBY_TYPED_DEFAULT_FREE
#define TypedData_Get_Struct(obj, type, data_type, sval)
@ RUBY_TYPED_FREE_IMMEDIATELY
@ RUBY_TYPED_WB_PROTECTED
#define TypedData_Make_Struct(klass, type, data_type, sval)
#define Check_TypedStruct(v, t)
VALUE rb_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
VALUE rb_scheduler_current()
VALUE rb_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
VALUE rb_scheduler_kernel_sleep(VALUE scheduler, VALUE duration)
unsigned LONG_LONG rb_serial_t
union queue_waiter::@172 as
rb_atomic_t interrupt_mask
struct rb_thread_struct * thread_ptr
struct rb_mutex_struct * next_mutex
rb_execution_context_t * ec
struct rb_mutex_struct * keeping_mutexes
void rb_threadptr_interrupt(rb_thread_t *th)
#define RUBY_VM_CHECK_INTS_BLOCKING(ec)
rb_thread_t * rb_fiber_threadptr(const rb_fiber_t *fiber)
#define DEFINE_CLASS(name, super)
void rb_mutex_allow_trap(VALUE self, int val)
#define szqueue_waitq(sq)
VALUE rb_mutex_owned_p(VALUE self)
struct rb_mutex_struct rb_mutex_t
#define szqueue_pushq(sq)
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
VALUE rb_mutex_unlock(VALUE self)
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
VALUE rb_mutex_lock(VALUE self)
VALUE rb_mutex_trylock(VALUE self)
VALUE rb_mutex_locked_p(VALUE self)
VALUE rb_obj_is_mutex(VALUE obj)
#define RUBY_VM_INTERRUPTED(ec)