12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
17#ifdef HAVE_SYS_RESOURCE_H
18#include <sys/resource.h>
20#ifdef HAVE_THR_STKSEGMENT
28#ifdef HAVE_SYS_PRCTL_H
31#if defined(HAVE_SYS_TIME_H)
40#if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
41# define USE_EVENTFD (1)
42# include <sys/eventfd.h>
44# define USE_EVENTFD (0)
47#if defined(SIGVTALRM) && !defined(__CYGWIN__)
48# define USE_UBF_LIST 1
76#define UBF_TIMER_NONE 0
77#define UBF_TIMER_POSIX 1
78#define UBF_TIMER_PTHREAD 2
81# if defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_CREATE) && \
82 defined(CLOCK_MONOTONIC) && defined(USE_UBF_LIST)
84# define UBF_TIMER UBF_TIMER_POSIX
85# elif defined(USE_UBF_LIST)
87# define UBF_TIMER UBF_TIMER_PTHREAD
90# define UBF_TIMER UBF_TIMER_NONE
103#if UBF_TIMER == UBF_TIMER_POSIX
104static const struct itimerspec zero;
113#elif UBF_TIMER == UBF_TIMER_PTHREAD
114static void *timer_pthread_fn(
void *);
128static void ubf_timer_disarm(
void);
129static void threadptr_trap_interrupt(
rb_thread_t *);
130static void clear_thread_cache_altstack(
void);
131static void ubf_wakeup_all_threads(
void);
132static int ubf_threads_empty(
void);
134#define TIMER_THREAD_CREATED_P() (signal_self_pipe.owner_process == getpid())
137#define BUSY_WAIT_SIGNALS (0)
145#define THREAD_INVALID ((const rb_thread_t *)-1)
148#ifdef HAVE_SCHED_YIELD
149#define native_thread_yield() (void)sched_yield()
151#define native_thread_yield() ((void)0)
154#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
155 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
156 defined(HAVE_CLOCK_GETTIME)
157static pthread_condattr_t condattr_mono;
158static pthread_condattr_t *condattr_monotonic = &condattr_mono;
160static const void *
const condattr_monotonic =
NULL;
166#define TIME_QUANTUM_MSEC (100)
167#define TIME_QUANTUM_USEC (TIME_QUANTUM_MSEC * 1000)
168#define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
207 abs = native_cond_timeout(&nd->
cond.
gvlq, TIME_QUANTUM_NSEC);
211 ubf_wakeup_all_threads();
241 "we must not be in ubf_list and GVL waitq at the same time");
247 do_gvl_timer(gvl, th);
252 }
while (gvl->
owner);
266 if (!designate_timer_thread(gvl) && !ubf_threads_empty()) {
276 gvl_acquire_common(gvl, th);
295 gvl_release_common(gvl);
308 ubf_wakeup_all_threads();
310 next = gvl_release_common(gvl);
328 native_thread_yield();
332 gvl_acquire_common(gvl, th);
342 list_head_init(&gvl->
waitq);
363 clear_thread_cache_altstack();
366#if defined(HAVE_WORKING_FORK)
367static void thread_cache_reset(
void);
371 thread_cache_reset();
373 gvl_acquire(gvl, GET_THREAD());
377#define NATIVE_MUTEX_LOCK_DEBUG 0
380mutex_debug(
const char *msg,
void *lock)
382 if (NATIVE_MUTEX_LOCK_DEBUG) {
384 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
386 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(
EXIT_FAILURE);}
387 fprintf(stdout,
"%s: %p\n", msg, lock);
388 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(
EXIT_FAILURE);}
396 mutex_debug(
"lock", lock);
397 if ((r = pthread_mutex_lock(lock)) != 0) {
406 mutex_debug(
"unlock", lock);
407 if ((r = pthread_mutex_unlock(lock)) != 0) {
416 mutex_debug(
"trylock", lock);
417 if ((r = pthread_mutex_trylock(lock)) != 0) {
431 int r = pthread_mutex_init(lock, 0);
432 mutex_debug(
"init", lock);
441 int r = pthread_mutex_destroy(lock);
442 mutex_debug(
"destroy", lock);
451 int r = pthread_cond_init(cond, condattr_monotonic);
460 int r = pthread_cond_destroy(cond);
481 r = pthread_cond_signal(cond);
482 }
while (r == EAGAIN);
493 r = pthread_cond_broadcast(cond);
494 }
while (r == EAGAIN);
503 int r = pthread_cond_wait(cond, mutex);
522 rb_hrtime2timespec(&ts, abs);
523 r = pthread_cond_timedwait(cond, mutex, &ts);
524 }
while (r == EINTR);
537 native_cond_timedwait(cond, mutex, &hrmsec);
543 if (condattr_monotonic) {
550 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
554#define native_cleanup_push pthread_cleanup_push
555#define native_cleanup_pop pthread_cleanup_pop
557#ifdef RB_THREAD_LOCAL_SPECIFIER
560static pthread_key_t ruby_native_thread_key;
570ruby_thread_from_native(
void)
572#ifdef RB_THREAD_LOCAL_SPECIFIER
573 return ruby_native_thread;
575 return pthread_getspecific(ruby_native_thread_key);
583 rb_ractor_set_current_ec(th->
ractor, th->
ec);
585#ifdef RB_THREAD_LOCAL_SPECIFIER
586 ruby_native_thread = th;
589 return pthread_setspecific(ruby_native_thread_key, th) == 0;
598#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
599 if (condattr_monotonic) {
600 int r = pthread_condattr_init(condattr_monotonic);
604 if (r) condattr_monotonic =
NULL;
608#ifndef RB_THREAD_LOCAL_SPECIFIER
609 if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
610 rb_bug(
"pthread_key_create failed (ruby_native_thread_key)");
613 rb_bug(
"pthread_key_create failed (ruby_current_ec_key)");
617 ruby_thread_set_native(th);
619 native_thread_init(th);
620 posix_signal(SIGVTALRM, null_func);
636#ifndef USE_THREAD_CACHE
637#define USE_THREAD_CACHE 1
653 if (USE_THREAD_CACHE)
654 ruby_thread_set_native(0);
658static rb_thread_t *register_cached_thread_and_wait(
void *);
661#if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
662#define STACKADDR_AVAILABLE 1
663#elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
664#define STACKADDR_AVAILABLE 1
665#undef MAINSTACKADDR_AVAILABLE
666#define MAINSTACKADDR_AVAILABLE 1
667void *pthread_get_stackaddr_np(pthread_t);
668size_t pthread_get_stacksize_np(pthread_t);
669#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
670#define STACKADDR_AVAILABLE 1
671#elif defined HAVE_PTHREAD_GETTHRDS_NP
672#define STACKADDR_AVAILABLE 1
673#elif defined __HAIKU__
674#define STACKADDR_AVAILABLE 1
677#ifndef MAINSTACKADDR_AVAILABLE
678# ifdef STACKADDR_AVAILABLE
679# define MAINSTACKADDR_AVAILABLE 1
681# define MAINSTACKADDR_AVAILABLE 0
684#if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
685# define get_main_stack(addr, size) get_stack(addr, size)
688#ifdef STACKADDR_AVAILABLE
693get_stack(
void **addr,
size_t *
size)
695#define CHECK_ERR(expr) \
696 {int err = (expr); if (err) return err;}
697#ifdef HAVE_PTHREAD_GETATTR_NP
701 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
702# ifdef HAVE_PTHREAD_ATTR_GETSTACK
703 CHECK_ERR(pthread_attr_getstack(&attr, addr,
size));
706 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
707 CHECK_ERR(pthread_attr_getstacksize(&attr,
size));
709# ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
710 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
713 *
size -= getpagesize();
715 pthread_attr_destroy(&attr);
716#elif defined HAVE_PTHREAD_ATTR_GET_NP
718 CHECK_ERR(pthread_attr_init(&attr));
719 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
720# ifdef HAVE_PTHREAD_ATTR_GETSTACK
721 CHECK_ERR(pthread_attr_getstack(&attr, addr,
size));
723 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
724 CHECK_ERR(pthread_attr_getstacksize(&attr,
size));
727 pthread_attr_destroy(&attr);
728#elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP)
729 pthread_t th = pthread_self();
730 *addr = pthread_get_stackaddr_np(th);
731 *
size = pthread_get_stacksize_np(th);
732#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
734# if defined HAVE_THR_STKSEGMENT
735 CHECK_ERR(thr_stksegment(&stk));
737 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
741#elif defined HAVE_PTHREAD_GETTHRDS_NP
742 pthread_t th = pthread_self();
743 struct __pthrdsinfo thinfo;
745 int regsiz=
sizeof(reg);
746 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
747 &thinfo,
sizeof(thinfo),
749 *addr = thinfo.__pi_stackaddr;
753 *
size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
755#elif defined __HAIKU__
758 CHECK_ERR(get_thread_info(find_thread(
NULL), &info));
759 *addr = info.stack_base;
763#error STACKADDR_AVAILABLE is defined but not implemented.
771 rb_nativethread_id_t
id;
772 size_t stack_maxsize;
776#ifdef STACK_END_ADDRESS
777extern void *STACK_END_ADDRESS;
781 RUBY_STACK_SPACE_LIMIT = 1024 * 1024,
782 RUBY_STACK_SPACE_RATIO = 5
786space_size(
size_t stack_size)
788 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
789 if (space_size > RUBY_STACK_SPACE_LIMIT) {
790 return RUBY_STACK_SPACE_LIMIT;
799reserve_stack(
volatile char *limit,
size_t size)
802# error needs alloca()
805 volatile char buf[0x100];
806 enum {stack_check_margin = 0x1000};
810 if (!getrlimit(RLIMIT_STACK, &rl) && rl.rlim_cur == RLIM_INFINITY)
813 if (
size < stack_check_margin)
return;
814 size -= stack_check_margin;
818 const volatile char *end =
buf +
sizeof(
buf);
828 size_t sz = limit - end;
843 size_t sz =
buf - limit;
850# define reserve_stack(limit, size) ((void)(limit), (void)(size))
853#undef ruby_init_stack
861 native_main_thread.id = pthread_self();
863#if MAINSTACKADDR_AVAILABLE
864 if (native_main_thread.stack_maxsize)
return;
868 if (get_main_stack(&stackaddr, &
size) == 0) {
869 native_main_thread.stack_maxsize =
size;
870 native_main_thread.stack_start = stackaddr;
871 reserve_stack(stackaddr,
size);
876#ifdef STACK_END_ADDRESS
877 native_main_thread.stack_start = STACK_END_ADDRESS;
879 if (!native_main_thread.stack_start ||
881 native_main_thread.stack_start > addr,
882 native_main_thread.stack_start < addr)) {
883 native_main_thread.stack_start = (
VALUE *)addr;
887#if defined(HAVE_GETRLIMIT)
888#if defined(PTHREAD_STACK_DEFAULT)
889# if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
890# error "PTHREAD_STACK_DEFAULT is too small"
892 size_t size = PTHREAD_STACK_DEFAULT;
897 int pagesize = getpagesize();
900 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
901 size = (size_t)rlim.rlim_cur;
903 addr = native_main_thread.stack_start;
905 space = ((size_t)((
char *)addr +
size) / pagesize) * pagesize - (size_t)addr;
908 space = (size_t)addr - ((
size_t)((
char *)addr -
size) / pagesize + 1) * pagesize;
910 native_main_thread.stack_maxsize = space;
914#if MAINSTACKADDR_AVAILABLE
924 start = native_main_thread.stack_start;
925 end = (
char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
928 start = (
char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
929 end = native_main_thread.stack_start;
932 if ((
void *)addr < start || (
void *)addr > end) {
934 native_main_thread.stack_start = (
VALUE *)addr;
935 native_main_thread.stack_maxsize = 0;
940#define CHECK_ERR(expr) \
941 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
946 rb_nativethread_id_t curr = pthread_self();
948 if (pthread_equal(curr, native_main_thread.id)) {
953#ifdef STACKADDR_AVAILABLE
957 if (get_stack(&start, &
size) == 0) {
971#define USE_NATIVE_THREAD_INIT 1
975thread_start_func_1(
void *th_ptr)
983#if !defined USE_NATIVE_THREAD_INIT
988#if defined USE_NATIVE_THREAD_INIT
989 native_thread_init_stack(th);
991 native_thread_init(th);
993#if defined USE_NATIVE_THREAD_INIT
996 thread_start_func_2(th, &stack_start);
1001 if ((th = register_cached_thread_and_wait(
RB_ALTSTACK(altstack))) != 0) {
1010struct cached_thread_entry {
1012 rb_nativethread_id_t thread_id;
1015 struct list_node node;
1020static LIST_HEAD(cached_thread_head);
1022# if defined(HAVE_WORKING_FORK)
1024thread_cache_reset(
void)
1027 list_head_init(&cached_thread_head);
1036#ifndef THREAD_CACHE_TIME
1037# define THREAD_CACHE_TIME ((rb_hrtime_t)3 * RB_HRTIME_PER_SEC)
1041register_cached_thread_and_wait(
void *altstack)
1044 struct cached_thread_entry entry;
1047 entry.altstack = altstack;
1049 entry.thread_id = pthread_self();
1050 end = native_cond_timeout(&entry.cond, end);
1054 list_add(&cached_thread_head, &entry.node);
1056 native_cond_timedwait(&entry.cond, &thread_cache_lock, &end);
1058 if (entry.th ==
NULL) {
1059 list_del(&entry.node);
1072# if defined(HAVE_WORKING_FORK)
1073static void thread_cache_reset(
void) { }
1081 struct cached_thread_entry *entry;
1084 entry = list_pop(&cached_thread_head,
struct cached_thread_entry, node);
1099clear_thread_cache_altstack(
void)
1102 struct cached_thread_entry *entry;
1105 list_for_each(&cached_thread_head, entry, node) {
1107 entry->altstack = 0;
1119 if (use_cached_thread(th)) {
1120 thread_debug(
"create (use cached thread): %p\n", (
void *)th);
1123 pthread_attr_t attr;
1125 const size_t space = space_size(stack_size);
1127#ifdef USE_SIGALTSTACK
1128 th->altstack = rb_allocate_sigaltstack();
1132 CHECK_ERR(pthread_attr_init(&attr));
1134# ifdef PTHREAD_STACK_MIN
1135 thread_debug(
"create - stack size: %lu\n", (
unsigned long)stack_size);
1136 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
1139# ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
1140 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
1142 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
1144 err = pthread_create(&th->
thread_id, &attr, thread_start_func_1, th);
1148 CHECK_ERR(pthread_attr_destroy(&attr));
1153#if USE_NATIVE_THREAD_PRIORITY
1158#if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
1159 struct sched_param sp;
1163 pthread_getschedparam(th->
thread_id, &policy, &sp);
1164 max = sched_get_priority_max(policy);
1165 min = sched_get_priority_min(policy);
1167 if (min > priority) {
1170 else if (
max < priority) {
1174 sp.sched_priority = priority;
1175 pthread_setschedparam(th->
thread_id, policy, &sp);
1186 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
1190ubf_pthread_cond_signal(
void *
ptr)
1193 thread_debug(
"ubf_pthread_cond_signal (%p)\n", (
void *)th);
1221 thread_debug(
"native_sleep: interrupted before sleep\n");
1234 end = native_cond_timeout(cond, *rel);
1235 native_cond_timedwait(cond, lock, &end);
1248static LIST_HEAD(ubf_list_head);
1252ubf_list_atfork(
void)
1254 list_head_init(&ubf_list_head);
1264 if (list_empty((
struct list_head*)node)) {
1266 list_add(&ubf_list_head, node);
1280 if (!list_empty((
struct list_head*)node)) {
1282 list_del_init(node);
1302ubf_select(
void *
ptr)
1306 const rb_thread_t *cur = ruby_thread_from_native();
1308 register_ubf_list(th);
1320 if (cur != gvl->
timer && cur != sigwait_th) {
1334 ubf_wakeup_thread(th);
1338ubf_threads_empty(
void)
1340 return list_empty(&ubf_list_head);
1344ubf_wakeup_all_threads(
void)
1349 if (!ubf_threads_empty()) {
1351 list_for_each(&ubf_list_head, dat, node.ubf) {
1352 th = container_of(dat,
rb_thread_t, native_thread_data);
1353 ubf_wakeup_thread(th);
1360#define register_ubf_list(th) (void)(th)
1361#define unregister_ubf_list(th) (void)(th)
1363static void ubf_wakeup_all_threads(
void) {
return; }
1364static int ubf_threads_empty(
void) {
return 1; }
1365#define ubf_list_atfork() do {} while (0)
1369#define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
1377 volatile rb_pid_t owner_process;
1378} signal_self_pipe = {
1385rb_thread_wakeup_timer_thread_fd(
int fd)
1390 const char buff =
'!';
1397 if ((result =
write(fd, &buff,
sizeof(buff))) <= 0) {
1400 case EINTR:
goto retry;
1402#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1407 async_bug_fd(
"rb_thread_wakeup_timer_thread: write", e, fd);
1410 if (TT_DEBUG)
WRITE_CONST(2,
"rb_thread_wakeup_timer_thread: write\n");
1422ubf_timer_arm(rb_pid_t current)
1424#if UBF_TIMER == UBF_TIMER_POSIX
1425 if ((!current || timer_posix.owner == current) &&
1426 !
ATOMIC_CAS(timer_posix.state, RTIMER_DISARM, RTIMER_ARMING)) {
1427 struct itimerspec it;
1429 it.it_interval.tv_sec = it.it_value.tv_sec = 0;
1430 it.it_interval.tv_nsec = it.it_value.tv_nsec = TIME_QUANTUM_NSEC;
1432 if (timer_settime(timer_posix.timerid, 0, &it, 0))
1435 switch (
ATOMIC_CAS(timer_posix.state, RTIMER_ARMING, RTIMER_ARMED)) {
1439 (void)timer_settime(timer_posix.timerid, 0, &zero, 0);
1441 case RTIMER_ARMING:
return;
1452 (void)timer_settime(timer_posix.timerid, 0, &zero, 0);
1458#elif UBF_TIMER == UBF_TIMER_PTHREAD
1459 if (!current || current == timer_pthread.owner) {
1461 rb_thread_wakeup_timer_thread_fd(timer_pthread.low[1]);
1473 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.normal[1]);
1482 if (signal_self_pipe.owner_process == current) {
1483 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.normal[1]);
1489 if (system_working > 0) {
1500 if (!mth || system_working <= 0)
return;
1507 ubf_timer_arm(current);
1518#define CLOSE_INVALIDATE_PAIR(expr) \
1519 close_invalidate_pair(expr,"close_invalidate: "#expr)
1521close_invalidate(
int *fdp,
const char *msg)
1526 if (close(fd) < 0) {
1527 async_bug_fd(msg, errno, fd);
1532close_invalidate_pair(
int fds[2],
const char *msg)
1535 close_invalidate(&fds[0], msg);
1539 close_invalidate(&fds[0], msg);
1540 close_invalidate(&fds[1], msg);
1550 oflags =
fcntl(fd, F_GETFL);
1561setup_communication_pipe_internal(
int pipes[2])
1565 if (pipes[0] >= 0 || pipes[1] >= 0) {
1575#if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
1576 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
1577 if (pipes[0] >= 0) {
1585 rb_warn(
"pipe creation failed for timer: %s, scheduling broken",
1591 set_nonblock(pipes[0]);
1592 set_nonblock(pipes[1]);
1596#if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
1597# define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
1602#if defined(__linux__)
1604#elif defined(__APPLE__)
1617#ifdef SET_CURRENT_THREAD_NAME
1622 else if ((loc = threadptr_invoke_proc_location(th)) !=
Qnil) {
1624 char buf[THREAD_NAME_MAX];
1637 if (
len >=
sizeof(
buf)) {
1638 buf[
sizeof(
buf)-2] =
'*';
1639 buf[
sizeof(
buf)-1] =
'\0';
1641 SET_CURRENT_THREAD_NAME(
buf);
1647native_set_another_thread_name(rb_nativethread_id_t thread_id,
VALUE name)
1649#if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
1650 char buf[THREAD_NAME_MAX];
1652# if !defined SET_ANOTHER_THREAD_NAME
1653 if (!pthread_equal(pthread_self(), thread_id))
return;
1658 if (n >= (
int)
sizeof(
buf)) {
1660 buf[
sizeof(
buf)-1] =
'\0';
1664# if defined SET_ANOTHER_THREAD_NAME
1665 SET_ANOTHER_THREAD_NAME(thread_id, s);
1666# elif defined SET_CURRENT_THREAD_NAME
1667 SET_CURRENT_THREAD_NAME(s);
1673ubf_timer_invalidate(
void)
1675#if UBF_TIMER == UBF_TIMER_PTHREAD
1676 CLOSE_INVALIDATE_PAIR(timer_pthread.low);
1681ubf_timer_pthread_create(rb_pid_t current)
1683#if UBF_TIMER == UBF_TIMER_PTHREAD
1685 if (timer_pthread.owner == current)
1688 if (setup_communication_pipe_internal(timer_pthread.low) < 0)
1691 err = pthread_create(&timer_pthread.thid, 0, timer_pthread_fn, GET_VM());
1693 timer_pthread.owner = current;
1695 rb_warn(
"pthread_create failed for timer: %s, signals racy",
1701ubf_timer_create(rb_pid_t current)
1703#if UBF_TIMER == UBF_TIMER_POSIX
1705# define UBF_TIMER_CLOCK CLOCK_REALTIME
1707# define UBF_TIMER_CLOCK CLOCK_MONOTONIC
1710 struct sigevent sev;
1712 sev.sigev_notify = SIGEV_SIGNAL;
1713 sev.sigev_signo = SIGVTALRM;
1714 sev.sigev_value.sival_ptr = &timer_posix;
1716 if (!timer_create(UBF_TIMER_CLOCK, &sev, &timer_posix.timerid)) {
1719 if (prev != RTIMER_DEAD) {
1720 rb_bug(
"timer_posix was not dead: %u\n", (
unsigned)prev);
1722 timer_posix.owner = current;
1728 if (UBF_TIMER == UBF_TIMER_PTHREAD)
1729 ubf_timer_pthread_create(current);
1733rb_thread_create_timer_thread(
void)
1736 rb_pid_t current = getpid();
1737 rb_pid_t owner = signal_self_pipe.owner_process;
1739 if (owner && owner != current) {
1740 CLOSE_INVALIDATE_PAIR(signal_self_pipe.normal);
1741 CLOSE_INVALIDATE_PAIR(signal_self_pipe.ub_main);
1742 ubf_timer_invalidate();
1745 if (setup_communication_pipe_internal(signal_self_pipe.normal) < 0)
return;
1746 if (setup_communication_pipe_internal(signal_self_pipe.ub_main) < 0)
return;
1748 ubf_timer_create(current);
1749 if (owner != current) {
1751 sigwait_th = THREAD_INVALID;
1752 signal_self_pipe.owner_process = current;
1757ubf_timer_disarm(
void)
1759#if UBF_TIMER == UBF_TIMER_POSIX
1762 if (timer_posix.owner && timer_posix.owner != getpid())
return;
1763 prev =
ATOMIC_CAS(timer_posix.state, RTIMER_ARMED, RTIMER_DISARM);
1765 case RTIMER_DISARM:
return;
1766 case RTIMER_ARMING:
return;
1768 if (timer_settime(timer_posix.timerid, 0, &zero, 0)) {
1771 if (
err == EINVAL) {
1772 prev =
ATOMIC_CAS(timer_posix.state, RTIMER_DISARM, RTIMER_DISARM);
1775 if (prev == RTIMER_DEAD)
return;
1781 case RTIMER_DEAD:
return;
1783 rb_bug(
"UBF_TIMER_POSIX bad state: %u\n", (
unsigned)prev);
1786#elif UBF_TIMER == UBF_TIMER_PTHREAD
1792ubf_timer_destroy(
void)
1794#if UBF_TIMER == UBF_TIMER_POSIX
1795 if (timer_posix.owner == getpid()) {
1796 rb_atomic_t expect = RTIMER_DISARM;
1797 size_t i,
max = 10000000;
1800 for (i = 0; i <
max; i++) {
1801 switch (
ATOMIC_CAS(timer_posix.state, expect, RTIMER_DEAD)) {
1803 if (expect == RTIMER_DISARM)
goto done;
1804 expect = RTIMER_DISARM;
1807 native_thread_yield();
1808 expect = RTIMER_ARMED;
1811 if (expect == RTIMER_ARMED) {
1812 if (timer_settime(timer_posix.timerid, 0, &zero, 0))
1816 expect = RTIMER_ARMED;
1819 rb_bug(
"RTIMER_DEAD unexpected");
1822 rb_bug(
"timed out waiting for timer to arm");
1824 if (timer_delete(timer_posix.timerid) < 0)
1829#elif UBF_TIMER == UBF_TIMER_PTHREAD
1832 timer_pthread.owner = 0;
1834 rb_thread_wakeup_timer_thread_fd(timer_pthread.low[1]);
1835 err = pthread_join(timer_pthread.thid, 0);
1843native_stop_timer_thread(
void)
1846 stopped = --system_working <= 0;
1848 ubf_timer_destroy();
1850 if (TT_DEBUG) fprintf(stderr,
"stop timer thread\n");
1855native_reset_timer_thread(
void)
1857 if (TT_DEBUG) fprintf(stderr,
"reset timer thread\n");
1860#ifdef HAVE_SIGALTSTACK
1862ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
1866 const size_t water_mark = 1024 * 1024;
1869#ifdef STACKADDR_AVAILABLE
1870 if (get_stack(&base, &
size) == 0) {
1872 if (pthread_equal(th->
thread_id, native_main_thread.id)) {
1874 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur >
size) {
1875 size = (size_t)rlim.rlim_cur;
1890 size /= RUBY_STACK_SPACE_RATIO;
1891 if (
size > water_mark)
size = water_mark;
1893 if (
size > ~(
size_t)base+1)
size = ~(
size_t)base+1;
1894 if (addr > base && addr <= (
void *)((
char *)base +
size))
return 1;
1897 if (
size > (
size_t)base)
size = (
size_t)base;
1898 if (addr > (
void *)((
char *)base -
size) && addr <= base)
return 1;
1911#if UBF_TIMER == UBF_TIMER_PTHREAD
1912 if (fd == timer_pthread.low[0] || fd == timer_pthread.low[1])
1915 if (fd == signal_self_pipe.normal[0] || fd == signal_self_pipe.normal[1])
1917 if (fd == signal_self_pipe.ub_main[0] || fd == signal_self_pipe.ub_main[1])
1921 if (signal_self_pipe.owner_process == getpid())
1929 return pthread_self();
1937 void (*worker_func)(void) = (
void(*)(void))arg;
1939#ifdef SET_CURRENT_THREAD_NAME
1940 SET_CURRENT_THREAD_NAME(
"ruby-mjitworker");
1948rb_thread_create_mjit_thread(
void (*worker_func)(
void))
1950 pthread_attr_t attr;
1951 pthread_t worker_pid;
1954 if (pthread_attr_init(&attr) != 0)
return ret;
1957 if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0
1958 && pthread_create(&worker_pid, &attr,
mjit_worker, (
void *)worker_func) == 0) {
1961 pthread_attr_destroy(&attr);
1969 if (signal_self_pipe.normal[0] >= 0) {
1970 VM_ASSERT(signal_self_pipe.owner_process == getpid());
1977 if (
ATOMIC_PTR_CAS(sigwait_th, THREAD_INVALID, th) == THREAD_INVALID) {
1978 return signal_self_pipe.normal[0];
1989 VM_ASSERT(signal_self_pipe.normal[0] == fd);
1991 if (old != th)
assert(old == th);
1997ruby_ppoll(
struct pollfd *fds, nfds_t nfds,
1998 const struct timespec *ts,
const sigset_t *sigmask)
2005 if (ts->
tv_sec > INT_MAX/1000)
2006 timeout_ms = INT_MAX;
2010 tmp2 = (
int)((ts->
tv_nsec + 999999L) / (1000L * 1000L));
2011 if (INT_MAX - tmp < tmp2)
2012 timeout_ms = INT_MAX;
2014 timeout_ms = (
int)(tmp + tmp2);
2020 return poll(fds, nfds, timeout_ms);
2022# define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
2031 pfd.fd = sigwait_fd;
2032 pfd.events = POLLIN;
2035 (void)ppoll(&pfd, 1, rb_hrtime2timespec(&ts, rel), 0);
2036 check_signals_nogvl(th, sigwait_fd);
2055 const rb_hrtime_t *sto = sigwait_timeout(th, sigwait_fd, &to, &n);
2058 n = ppoll(&pfd, 1, rb_hrtime2timespec(&ts, sto), 0);
2059 if (check_signals_nogvl(th, sigwait_fd))
2063 if (rel && hrtime_update_expire(&to, end))
2075ubf_ppoll_sleep(
void *ignore)
2077 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.ub_main[1]);
2090#define GVL_UNLOCK_BEGIN_YIELD(th) do { \
2091 const native_thread_data_t *next; \
2092 rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor); \
2093 RB_GC_SAVE_MACHINE_CONTEXT(th); \
2094 rb_native_mutex_lock(&gvl->lock); \
2095 next = gvl_release_common(gvl); \
2096 rb_native_mutex_unlock(&gvl->lock); \
2097 if (!next && rb_ractor_living_thread_num(th->ractor) > 1) { \
2098 native_thread_yield(); \
2118 GVL_UNLOCK_BEGIN_YIELD(th);
2121 struct pollfd pfd[2];
2124 pfd[0].fd = signal_self_pipe.normal[0];
2125 pfd[1].fd = signal_self_pipe.ub_main[0];
2126 pfd[0].events = pfd[1].events = POLLIN;
2127 if (ppoll(pfd, 2, rb_hrtime2timespec(&ts, rel), 0) > 0) {
2128 if (pfd[1].revents & POLLIN) {
2129 (void)consume_communication_pipe(pfd[1].fd);
2138 unblock_function_clear(th);
2148 if (sigwait_fd >= 0) {
2153 GVL_UNLOCK_BEGIN_YIELD(th);
2159 check_signals_nogvl(th, sigwait_fd);
2161 unblock_function_clear(th);
2167 native_ppoll_sleep(th, rel);
2170 native_cond_sleep(th, rel);
2176#if UBF_TIMER == UBF_TIMER_PTHREAD
2178timer_pthread_fn(
void *p)
2186 pfd.fd = timer_pthread.low[0];
2187 pfd.events = POLLIN;
2189 while (system_working > 0) {
2190 (void)poll(&pfd, 1, timeout);
2191 ccp = consume_communication_pipe(pfd.fd);
2193 if (system_working > 0) {
2195 pthread_kill(main_thread_id, SIGVTALRM);
2198 timeout = TIME_QUANTUM_MSEC;
2206 pthread_kill(main_thread_id, SIGVTALRM);
2218ubf_caller(
void *ignore)
2231rb_thread_start_unblock_thread(
void)
#define ATOMIC_PTR_CAS(var, old, new)
char * strrchr(const char *, const char)
#define RSTRING_PTR(string)
void rb_gc_force_recycle(VALUE obj)
#define STACK_DIR_UPPER(a, b)
#define STACK_UPPER(x, a, b)
#define STACK_GROW_DIR_DETECTION
#define IS_STACK_DIR_UPPER()
void ruby_init_stack(volatile VALUE *)
void rb_raise(VALUE exc, const char *fmt,...)
void rb_bug(const char *fmt,...)
void rb_bug_errno(const char *mesg, int errno_arg)
void rb_warn(const char *fmt,...)
void rb_async_bug_errno(const char *mesg, int errno_arg)
void rb_sys_fail(const char *mesg)
#define WRITE_CONST(fd, str)
rb_hrtime_t rb_hrtime_now(void)
#define RB_HRTIME_PER_MSEC
#define RB_HRTIME_PER_SEC
void *PTR64 __attribute__((mode(DI)))
int rb_cloexec_pipe(int fildes[2])
void rb_update_max_fd(int fd)
int rb_reserved_fd_p(int fd)
void rb_thread_sleep_forever(void)
VALUE rb_thread_create(VALUE(*)(void *), void *)
void rb_timespec_now(struct timespec *)
#define ACCESS_ONCE(type, x)
typedef int(ZCALLBACK *close_file_func) OF((voidpf opaque
void rb_sigwait_fd_migrate(rb_vm_t *vm)
void rb_sigwait_fd_put(const rb_thread_t *, int fd)
int rb_sigwait_fd_get(const rb_thread_t *)
void rb_sigwait_sleep(const rb_thread_t *, int fd, const rb_hrtime_t *)
#define RARRAY_AREF(a, i)
void rb_ractor_blocking_threads_dec(rb_ractor_t *cr, const char *file, int line)
rb_global_vm_lock_t * rb_ractor_gvl(rb_ractor_t *r)
void rb_gvl_init(rb_global_vm_lock_t *gvl)
void rb_ractor_blocking_threads_inc(rb_ractor_t *cr, const char *file, int line)
#define RSTRING_GETMEM(str, ptrvar, lenvar)
#define ATOMIC_EXCHANGE(var, val)
#define ATOMIC_PTR_EXCHANGE(var, val)
#define ATOMIC_SET(var, val)
#define ATOMIC_CAS(var, oldval, newval)
unsigned long long uint64_t
void ruby_sigchld_handler(rb_vm_t *vm)
int rb_signal_buff_size(void)
struct native_thread_data_struct::@171 cond
rb_nativethread_cond_t intr
union native_thread_data_struct::@170 node
rb_nativethread_cond_t gvlq
struct rb_execution_context_struct::@200 machine
rb_nativethread_cond_t switch_wait_cond
rb_nativethread_cond_t switch_cond
rb_nativethread_lock_t lock
const struct rb_thread_struct * owner
const struct rb_thread_struct * timer
rb_execution_context_t * ec
struct rb_unblock_callback unblock
native_thread_data_t native_thread_data
rb_nativethread_id_t thread_id
rb_nativethread_lock_t interrupt_lock
rb_unblock_function_t * func
struct rb_vm_struct::@194 ractor
struct rb_thread_struct * main_thread
volatile int ubf_async_safe
struct rb_vm_struct::@196 default_params
size_t thread_vm_stack_size
size_t thread_machine_stack_size
#define GVL_UNLOCK_BEGIN(th)
#define thread_id_str(th)
#define GVL_UNLOCK_END(th)
#define fill_thread_id_str(th)
#define BUSY_WAIT_SIGNALS
rb_nativethread_id_t rb_nativethread_self()
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
#define RB_NATIVETHREAD_LOCK_INIT
#define RB_THREAD_LOCAL_SPECIFIER
native_tls_key_t ruby_current_ec_key
#define RUBY_VM_INTERRUPTED(ec)
#define RUBY_VM_SET_TIMER_INTERRUPT(ec)
void Init_native_thread(rb_thread_t *th)
#define RB_ALTSTACK_FREE(var)
#define RB_ALTSTACK_INIT(var, altstack)
#define RUBY_VM_THREAD_VM_STACK_SIZE
void rb_thread_wakeup_timer_thread(int)
#define RUBY_VM_SET_TRAP_INTERRUPT(ec)
int write(ozstream &zs, const T *x, Items items)