Ruby  2.0.0p353(2013-11-22revision43784)
thread.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  thread.c -
4 
5  $Author: nagachika $
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 /*
12  YARV Thread Design
13 
14  model 1: Userlevel Thread
15  Same as traditional ruby thread.
16 
17  model 2: Native Thread with Global VM lock
18  Using pthread (or Windows thread) and Ruby threads run concurrent.
19 
20  model 3: Native Thread with fine grain lock
21  Using pthread and Ruby threads run concurrent or parallel.
22 
23 ------------------------------------------------------------------------
24 
25  model 2:
26  A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
27  When thread scheduling, running thread release GVL. If running thread
28  try blocking operation, this thread must release GVL and another
29  thread can continue this flow. After blocking operation, thread
30  must check interrupt (RUBY_VM_CHECK_INTS).
31 
32  Every VM can run parallel.
33 
34  Ruby threads are scheduled by OS thread scheduler.
35 
36 ------------------------------------------------------------------------
37 
38  model 3:
39  Every threads run concurrent or parallel and to access shared object
40  exclusive access control is needed. For example, to access String
41  object or Array object, fine grain lock must be locked every time.
42  */
43 
44 
45 /*
46  * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
47  * 2.15 or later and set _FORTIFY_SOURCE > 0.
48  * However, the implementation is wrong. Even though Linux's select(2)
49  * support large fd size (>FD_SETSIZE), it wrongly assume fd is always
50  * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
51  * it doesn't work correctly and makes program abort. Therefore we need to
52  * disable FORTY_SOURCE until glibc fixes it.
53  */
54 #undef _FORTIFY_SOURCE
55 #undef __USE_FORTIFY_LEVEL
56 #define __USE_FORTIFY_LEVEL 0
57 
58 /* for model 2 */
59 
60 #include "eval_intern.h"
61 #include "gc.h"
62 #include "internal.h"
63 #include "ruby/io.h"
64 #include "ruby/thread.h"
65 
66 #ifndef USE_NATIVE_THREAD_PRIORITY
67 #define USE_NATIVE_THREAD_PRIORITY 0
68 #define RUBY_THREAD_PRIORITY_MAX 3
69 #define RUBY_THREAD_PRIORITY_MIN -3
70 #endif
71 
72 #ifndef THREAD_DEBUG
73 #define THREAD_DEBUG 0
74 #endif
75 
76 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
77 #define TIMET_MIN (~(time_t)0 <= 0 ? (time_t)(((unsigned_time_t)1) << (sizeof(time_t) * CHAR_BIT - 1)) : (time_t)0)
78 
81 
85 
86 static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check);
87 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check);
88 static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check);
89 static double timeofday(void);
90 static int rb_threadptr_dead(rb_thread_t *th);
91 static void rb_check_deadlock(rb_vm_t *vm);
93 
94 #define eKillSignal INT2FIX(0)
95 #define eTerminateSignal INT2FIX(1)
96 static volatile int system_working = 1;
97 
98 #define closed_stream_error GET_VM()->special_exceptions[ruby_error_closed_stream]
99 
100 inline static void
102 {
103  st_delete(table, &key, 0);
104 }
105 
106 /********************************************************************************/
107 
108 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
109 
113 };
114 
116  struct rb_unblock_callback *old, int fail_if_interrupted);
117 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
118 
119 static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
120  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
121 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
122 
123 #ifdef __ia64
124 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th) \
125  do{(th)->machine_register_stack_end = rb_ia64_bsp();}while(0)
126 #else
127 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th)
128 #endif
129 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
130  do { \
131  FLUSH_REGISTER_WINDOWS; \
132  RB_GC_SAVE_MACHINE_REGISTER_STACK(th); \
133  setjmp((th)->machine_regs); \
134  SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
135  } while (0)
136 
137 #define GVL_UNLOCK_BEGIN() do { \
138  rb_thread_t *_th_stored = GET_THREAD(); \
139  RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
140  gvl_release(_th_stored->vm);
141 
142 #define GVL_UNLOCK_END() \
143  gvl_acquire(_th_stored->vm, _th_stored); \
144  rb_thread_set_current(_th_stored); \
145 } while(0)
146 
147 #ifdef __GNUC__
148 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
149 #else
150 #define only_if_constant(expr, notconst) notconst
151 #endif
152 #define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted) do { \
153  rb_thread_t *__th = GET_THREAD(); \
154  struct rb_blocking_region_buffer __region; \
155  if (blocking_region_begin(__th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
156  /* always return true unless fail_if_interrupted */ \
157  !only_if_constant(fail_if_interrupted, TRUE)) { \
158  exec; \
159  blocking_region_end(__th, &__region); \
160  }; \
161 } while(0)
162 
163 #if THREAD_DEBUG
164 #ifdef HAVE_VA_ARGS_MACRO
165 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
166 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
167 #define POSITION_FORMAT "%s:%d:"
168 #define POSITION_ARGS ,file, line
169 #else
170 void rb_thread_debug(const char *fmt, ...);
171 #define thread_debug rb_thread_debug
172 #define POSITION_FORMAT
173 #define POSITION_ARGS
174 #endif
175 
176 # if THREAD_DEBUG < 0
177 static int rb_thread_debug_enabled;
178 
179 /*
180  * call-seq:
181  * Thread.DEBUG -> num
182  *
183  * Returns the thread debug level. Available only if compiled with
184  * THREAD_DEBUG=-1.
185  */
186 
187 static VALUE
188 rb_thread_s_debug(void)
189 {
190  return INT2NUM(rb_thread_debug_enabled);
191 }
192 
193 /*
194  * call-seq:
195  * Thread.DEBUG = num
196  *
197  * Sets the thread debug level. Available only if compiled with
198  * THREAD_DEBUG=-1.
199  */
200 
201 static VALUE
202 rb_thread_s_debug_set(VALUE self, VALUE val)
203 {
204  rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
205  return val;
206 }
207 # else
208 # define rb_thread_debug_enabled THREAD_DEBUG
209 # endif
210 #else
211 #define thread_debug if(0)printf
212 #endif
213 
214 #ifndef __ia64
215 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
216 #endif
217 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
218  VALUE *register_stack_start));
219 static void timer_thread_function(void *);
220 
221 #if defined(_WIN32)
222 #include "thread_win32.c"
223 
224 #define DEBUG_OUT() \
225  WaitForSingleObject(&debug_mutex, INFINITE); \
226  printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
227  fflush(stdout); \
228  ReleaseMutex(&debug_mutex);
229 
230 #elif defined(HAVE_PTHREAD_H)
231 #include "thread_pthread.c"
232 
233 #define DEBUG_OUT() \
234  pthread_mutex_lock(&debug_mutex); \
235  printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
236  fflush(stdout); \
237  pthread_mutex_unlock(&debug_mutex);
238 
239 #else
240 #error "unsupported thread type"
241 #endif
242 
243 #if THREAD_DEBUG
244 static int debug_mutex_initialized = 1;
245 static rb_thread_lock_t debug_mutex;
246 
247 void
248 rb_thread_debug(
249 #ifdef HAVE_VA_ARGS_MACRO
250  const char *file, int line,
251 #endif
252  const char *fmt, ...)
253 {
254  va_list args;
255  char buf[BUFSIZ];
256 
257  if (!rb_thread_debug_enabled) return;
258 
259  if (debug_mutex_initialized == 1) {
260  debug_mutex_initialized = 0;
261  native_mutex_initialize(&debug_mutex);
262  }
263 
264  va_start(args, fmt);
265  vsnprintf(buf, BUFSIZ, fmt, args);
266  va_end(args);
267 
268  DEBUG_OUT();
269 }
270 #endif
271 
272 void
274 {
275  gvl_release(vm);
276  gvl_destroy(vm);
277  native_mutex_destroy(&vm->thread_destruct_lock);
278 }
279 
280 void
282 {
283  native_mutex_unlock(lock);
284 }
285 
286 void
288 {
289  native_mutex_destroy(lock);
290 }
291 
292 static int
294  struct rb_unblock_callback *old, int fail_if_interrupted)
295 {
296  check_ints:
297  if (fail_if_interrupted) {
298  if (RUBY_VM_INTERRUPTED_ANY(th)) {
299  return FALSE;
300  }
301  }
302  else {
303  RUBY_VM_CHECK_INTS(th);
304  }
305 
306  native_mutex_lock(&th->interrupt_lock);
307  if (RUBY_VM_INTERRUPTED_ANY(th)) {
308  native_mutex_unlock(&th->interrupt_lock);
309  goto check_ints;
310  }
311  else {
312  if (old) *old = th->unblock;
313  th->unblock.func = func;
314  th->unblock.arg = arg;
315  }
316  native_mutex_unlock(&th->interrupt_lock);
317 
318  return TRUE;
319 }
320 
321 static void
323 {
324  native_mutex_lock(&th->interrupt_lock);
325  th->unblock = *old;
326  native_mutex_unlock(&th->interrupt_lock);
327 }
328 
329 static void
331 {
332  native_mutex_lock(&th->interrupt_lock);
333  if (trap)
335  else
337  if (th->unblock.func) {
338  (th->unblock.func)(th->unblock.arg);
339  }
340  else {
341  /* none */
342  }
343  native_mutex_unlock(&th->interrupt_lock);
344 }
345 
346 void
348 {
350 }
351 
352 void
354 {
356 }
357 
358 static int
360 {
361  VALUE thval = key;
362  rb_thread_t *th;
363  GetThreadPtr(thval, th);
364 
365  if (th != main_thread) {
366  thread_debug("terminate_i: %p\n", (void *)th);
369  }
370  else {
371  thread_debug("terminate_i: main thread (%p)\n", (void *)th);
372  }
373  return ST_CONTINUE;
374 }
375 
376 typedef struct rb_mutex_struct
377 {
380  struct rb_thread_struct volatile *th;
384 } rb_mutex_t;
385 
386 static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
387 static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
388 
389 void
391 {
392  const char *err;
393  rb_mutex_t *mutex;
394  rb_mutex_t *mutexes = th->keeping_mutexes;
395 
396  while (mutexes) {
397  mutex = mutexes;
398  /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
399  mutexes); */
400  mutexes = mutex->next_mutex;
401  err = rb_mutex_unlock_th(mutex, th);
402  if (err) rb_bug("invalid keeping_mutexes: %s", err);
403  }
404 }
405 
406 void
408 {
409  rb_thread_t *th = GET_THREAD(); /* main thread */
410  rb_vm_t *vm = th->vm;
411 
412  if (vm->main_thread != th) {
413  rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
414  (void *)vm->main_thread, (void *)th);
415  }
416 
417  /* unlock all locking mutexes */
419 
420  retry:
421  thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
423 
424  while (!rb_thread_alone()) {
425  int state;
426 
427  TH_PUSH_TAG(th);
428  if ((state = TH_EXEC_TAG()) == 0) {
429  native_sleep(th, 0);
431  }
432  TH_POP_TAG();
433 
434  if (state) {
435  goto retry;
436  }
437  }
438 }
439 
440 static void
442 {
443  rb_thread_t *th = th_ptr;
444  th->status = THREAD_KILLED;
446 #ifdef __ia64
447  th->machine_register_stack_start = th->machine_register_stack_end = 0;
448 #endif
449 }
450 
451 static void
452 thread_cleanup_func(void *th_ptr, int atfork)
453 {
454  rb_thread_t *th = th_ptr;
455 
456  th->locking_mutex = Qfalse;
458 
459  /*
460  * Unfortunately, we can't release native threading resource at fork
461  * because libc may have unstable locking state therefore touching
462  * a threading resource may cause a deadlock.
463  */
464  if (atfork)
465  return;
466 
467  native_mutex_destroy(&th->interrupt_lock);
468  native_thread_destroy(th);
469 }
470 
471 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
472 
473 void
475 {
476  native_thread_init_stack(th);
477 }
478 
479 static int
480 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
481 {
482  int state;
483  VALUE args = th->first_args;
484  rb_proc_t *proc;
485  rb_thread_list_t *join_list;
486  rb_thread_t *main_th;
487  VALUE errinfo = Qnil;
488 # ifdef USE_SIGALTSTACK
489  void rb_register_sigaltstack(rb_thread_t *th);
490 
491  rb_register_sigaltstack(th);
492 # endif
493 
494  if (th == th->vm->main_thread)
495  rb_bug("thread_start_func_2 must not used for main thread");
496 
497  ruby_thread_set_native(th);
498 
499  th->machine_stack_start = stack_start;
500 #ifdef __ia64
501  th->machine_register_stack_start = register_stack_start;
502 #endif
503  thread_debug("thread start: %p\n", (void *)th);
504 
505  gvl_acquire(th->vm, th);
506  {
507  thread_debug("thread start (get lock): %p\n", (void *)th);
509 
510  TH_PUSH_TAG(th);
511  if ((state = EXEC_TAG()) == 0) {
512  SAVE_ROOT_JMPBUF(th, {
513  if (!th->first_func) {
514  GetProcPtr(th->first_proc, proc);
515  th->errinfo = Qnil;
516  th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
517  th->root_svar = Qnil;
518  EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, Qundef);
519  th->value = rb_vm_invoke_proc(th, proc, (int)RARRAY_LEN(args), RARRAY_PTR(args), 0);
520  EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_END, th->self, 0, 0, Qundef);
521  }
522  else {
523  th->value = (*th->first_func)((void *)args);
524  }
525  });
526  }
527  else {
528  errinfo = th->errinfo;
529  if (state == TAG_FATAL) {
530  /* fatal error within this thread, need to stop whole script */
531  }
532  else if (th->safe_level >= 4) {
533  /* Ignore it. Main thread shouldn't be harmed from untrusted thread. */
534  errinfo = Qnil;
535  }
536  else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
537  /* exit on main_thread. */
538  }
539  else if (th->vm->thread_abort_on_exception ||
541  /* exit on main_thread */
542  }
543  else {
544  errinfo = Qnil;
545  }
546  th->value = Qnil;
547  }
548 
549  th->status = THREAD_KILLED;
550  thread_debug("thread end: %p\n", (void *)th);
551 
552  main_th = th->vm->main_thread;
553  if (RB_TYPE_P(errinfo, T_OBJECT)) {
554  /* treat with normal error object */
555  rb_threadptr_raise(main_th, 1, &errinfo);
556  }
557  TH_POP_TAG();
558 
559  /* locking_mutex must be Qfalse */
560  if (th->locking_mutex != Qfalse) {
561  rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
562  (void *)th, th->locking_mutex);
563  }
564 
565  /* delete self other than main thread from living_threads */
567  if (rb_thread_alone()) {
568  /* I'm last thread. wake up main thread from rb_thread_terminate_all */
569  rb_threadptr_interrupt(main_th);
570  }
571 
572  /* wake up joining threads */
573  join_list = th->join_list;
574  while (join_list) {
575  rb_threadptr_interrupt(join_list->th);
576  switch (join_list->th->status) {
578  join_list->th->status = THREAD_RUNNABLE;
579  default: break;
580  }
581  join_list = join_list->next;
582  }
583 
585  rb_check_deadlock(th->vm);
586 
587  if (!th->root_fiber) {
589  th->stack = 0;
590  }
591  }
592  native_mutex_lock(&th->vm->thread_destruct_lock);
593  /* make sure vm->running_thread never point me after this point.*/
594  th->vm->running_thread = NULL;
595  native_mutex_unlock(&th->vm->thread_destruct_lock);
597  gvl_release(th->vm);
598 
599  return 0;
600 }
601 
602 static VALUE
604 {
605  rb_thread_t *th, *current_th = GET_THREAD();
606  int err;
607 
608  if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
610  "can't start a new thread (frozen ThreadGroup)");
611  }
612  GetThreadPtr(thval, th);
613 
614  /* setup thread environment */
615  th->first_func = fn;
616  th->first_proc = fn ? Qfalse : rb_block_proc();
617  th->first_args = args; /* GC: shouldn't put before above line */
618 
619  th->priority = current_th->priority;
620  th->thgroup = current_th->thgroup;
621 
625  RBASIC(th->pending_interrupt_mask_stack)->klass = 0;
626 
627  th->interrupt_mask = 0;
628 
629  native_mutex_initialize(&th->interrupt_lock);
630 
631  /* kick thread */
632  err = native_thread_create(th);
633  if (err) {
634  th->status = THREAD_KILLED;
635  rb_raise(rb_eThreadError, "can't create Thread (%d)", err);
636  }
637  st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
638  return thval;
639 }
640 
641 /*
642  * call-seq:
643  * Thread.new { ... } -> thread
644  * Thread.new(*args, &proc) -> thread
645  * Thread.new(*args) { |args| ... } -> thread
646  *
647  * Creates a new thread executing the given block.
648  *
649  * Any +args+ given to ::new will be passed to the block:
650  *
651  * arr = []
652  * a, b, c = 1, 2, 3
653  * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
654  * arr #=> [1, 2, 3]
655  *
656  * A ThreadError exception is raised if ::new is called without a block.
657  *
658  * If you're going to subclass Thread, be sure to call super in your
659  * +initialize+ method, otherwise a ThreadError will be raised.
660  */
661 static VALUE
663 {
664  rb_thread_t *th;
665  VALUE thread = rb_thread_alloc(klass);
666 
667  if (GET_VM()->main_thread->status == THREAD_KILLED)
668  rb_raise(rb_eThreadError, "can't alloc thread");
669 
670  rb_obj_call_init(thread, argc, argv);
671  GetThreadPtr(thread, th);
672  if (!th->first_args) {
673  rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
674  rb_class2name(klass));
675  }
676  return thread;
677 }
678 
679 /*
680  * call-seq:
681  * Thread.start([args]*) {|args| block } -> thread
682  * Thread.fork([args]*) {|args| block } -> thread
683  *
684  * Basically the same as ::new. However, if class Thread is subclassed, then
685  * calling +start+ in that subclass will not invoke the subclass's
686  * +initialize+ method.
687  */
688 
689 static VALUE
691 {
692  return thread_create_core(rb_thread_alloc(klass), args, 0);
693 }
694 
695 /* :nodoc: */
696 static VALUE
698 {
699  rb_thread_t *th;
700  if (!rb_block_given_p()) {
701  rb_raise(rb_eThreadError, "must be called with a block");
702  }
703  GetThreadPtr(thread, th);
704  if (th->first_args) {
705  VALUE proc = th->first_proc, line, loc;
706  const char *file;
707  if (!proc || !RTEST(loc = rb_proc_location(proc))) {
708  rb_raise(rb_eThreadError, "already initialized thread");
709  }
710  file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
711  if (NIL_P(line = RARRAY_PTR(loc)[1])) {
712  rb_raise(rb_eThreadError, "already initialized thread - %s",
713  file);
714  }
715  rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
716  file, NUM2INT(line));
717  }
718  return thread_create_core(thread, args, 0);
719 }
720 
721 VALUE
722 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
723 {
725 }
726 
727 
728 /* +infty, for this purpose */
729 #define DELAY_INFTY 1E30
730 
731 struct join_arg {
733  double limit;
734  int forever;
735 };
736 
737 static VALUE
739 {
740  struct join_arg *p = (struct join_arg *)arg;
741  rb_thread_t *target_th = p->target, *th = p->waiting;
742 
743  if (target_th->status != THREAD_KILLED) {
744  rb_thread_list_t **p = &target_th->join_list;
745 
746  while (*p) {
747  if ((*p)->th == th) {
748  *p = (*p)->next;
749  break;
750  }
751  p = &(*p)->next;
752  }
753  }
754 
755  return Qnil;
756 }
757 
758 static VALUE
760 {
761  struct join_arg *p = (struct join_arg *)arg;
762  rb_thread_t *target_th = p->target, *th = p->waiting;
763  double now, limit = p->limit;
764 
765  while (target_th->status != THREAD_KILLED) {
766  if (p->forever) {
767  sleep_forever(th, 1, 0);
768  }
769  else {
770  now = timeofday();
771  if (now > limit) {
772  thread_debug("thread_join: timeout (thid: %p)\n",
773  (void *)target_th->thread_id);
774  return Qfalse;
775  }
776  sleep_wait_for_interrupt(th, limit - now, 0);
777  }
778  thread_debug("thread_join: interrupted (thid: %p)\n",
779  (void *)target_th->thread_id);
780  }
781  return Qtrue;
782 }
783 
784 static VALUE
785 thread_join(rb_thread_t *target_th, double delay)
786 {
787  rb_thread_t *th = GET_THREAD();
788  struct join_arg arg;
789 
790  if (th == target_th) {
791  rb_raise(rb_eThreadError, "Target thread must not be current thread");
792  }
793  if (GET_VM()->main_thread == target_th) {
794  rb_raise(rb_eThreadError, "Target thread must not be main thread");
795  }
796 
797  arg.target = target_th;
798  arg.waiting = th;
799  arg.limit = timeofday() + delay;
800  arg.forever = delay == DELAY_INFTY;
801 
802  thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
803 
804  if (target_th->status != THREAD_KILLED) {
806  list.next = target_th->join_list;
807  list.th = th;
808  target_th->join_list = &list;
809  if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
810  remove_from_join_list, (VALUE)&arg)) {
811  return Qnil;
812  }
813  }
814 
815  thread_debug("thread_join: success (thid: %p)\n",
816  (void *)target_th->thread_id);
817 
818  if (target_th->errinfo != Qnil) {
819  VALUE err = target_th->errinfo;
820 
821  if (FIXNUM_P(err)) {
822  /* */
823  }
824  else if (RB_TYPE_P(target_th->errinfo, T_NODE)) {
827  }
828  else {
829  /* normal exception */
830  rb_exc_raise(err);
831  }
832  }
833  return target_th->self;
834 }
835 
836 /*
837  * call-seq:
838  * thr.join -> thr
839  * thr.join(limit) -> thr
840  *
841  * The calling thread will suspend execution and run <i>thr</i>. Does not
842  * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
843  * the time limit expires, <code>nil</code> will be returned, otherwise
844  * <i>thr</i> is returned.
845  *
846  * Any threads not joined will be killed when the main program exits. If
847  * <i>thr</i> had previously raised an exception and the
848  * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
849  * (so the exception has not yet been processed) it will be processed at this
850  * time.
851  *
852  * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
853  * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
854  * x.join # Let x thread finish, a will be killed on exit.
855  *
856  * <em>produces:</em>
857  *
858  * axyz
859  *
860  * The following example illustrates the <i>limit</i> parameter.
861  *
862  * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
863  * puts "Waiting" until y.join(0.15)
864  *
865  * <em>produces:</em>
866  *
867  * tick...
868  * Waiting
869  * tick...
870  * Waitingtick...
871  *
872  *
873  * tick...
874  */
875 
876 static VALUE
878 {
879  rb_thread_t *target_th;
880  double delay = DELAY_INFTY;
881  VALUE limit;
882 
883  GetThreadPtr(self, target_th);
884 
885  rb_scan_args(argc, argv, "01", &limit);
886  if (!NIL_P(limit)) {
887  delay = rb_num2dbl(limit);
888  }
889 
890  return thread_join(target_th, delay);
891 }
892 
893 /*
894  * call-seq:
895  * thr.value -> obj
896  *
897  * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
898  * its value.
899  *
900  * a = Thread.new { 2 + 2 }
901  * a.value #=> 4
902  */
903 
904 static VALUE
906 {
907  rb_thread_t *th;
908  GetThreadPtr(self, th);
910  return th->value;
911 }
912 
913 /*
914  * Thread Scheduling
915  */
916 
917 static struct timeval
918 double2timeval(double d)
919 {
920  struct timeval time;
921 
922  if (isinf(d)) {
923  time.tv_sec = TIMET_MAX;
924  time.tv_usec = 0;
925  return time;
926  }
927 
928  time.tv_sec = (int)d;
929  time.tv_usec = (int)((d - (int)d) * 1e6);
930  if (time.tv_usec < 0) {
931  time.tv_usec += (int)1e6;
932  time.tv_sec -= 1;
933  }
934  return time;
935 }
936 
937 static void
938 sleep_forever(rb_thread_t *th, int deadlockable, int spurious_check)
939 {
940  enum rb_thread_status prev_status = th->status;
941  enum rb_thread_status status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
942 
943  th->status = status;
945  while (th->status == status) {
946  if (deadlockable) {
947  th->vm->sleeper++;
948  rb_check_deadlock(th->vm);
949  }
950  native_sleep(th, 0);
951  if (deadlockable) {
952  th->vm->sleeper--;
953  }
955  if (!spurious_check)
956  break;
957  }
958  th->status = prev_status;
959 }
960 
961 static void
963 {
964 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
965  struct timespec ts;
966 
967  if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
968  tp->tv_sec = ts.tv_sec;
969  tp->tv_usec = ts.tv_nsec / 1000;
970  } else
971 #endif
972  {
973  gettimeofday(tp, NULL);
974  }
975 }
976 
977 static void
978 sleep_timeval(rb_thread_t *th, struct timeval tv, int spurious_check)
979 {
980  struct timeval to, tvn;
981  enum rb_thread_status prev_status = th->status;
982 
983  getclockofday(&to);
984  if (TIMET_MAX - tv.tv_sec < to.tv_sec)
985  to.tv_sec = TIMET_MAX;
986  else
987  to.tv_sec += tv.tv_sec;
988  if ((to.tv_usec += tv.tv_usec) >= 1000000) {
989  if (to.tv_sec == TIMET_MAX)
990  to.tv_usec = 999999;
991  else {
992  to.tv_sec++;
993  to.tv_usec -= 1000000;
994  }
995  }
996 
997  th->status = THREAD_STOPPED;
999  while (th->status == THREAD_STOPPED) {
1000  native_sleep(th, &tv);
1002  getclockofday(&tvn);
1003  if (to.tv_sec < tvn.tv_sec) break;
1004  if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
1005  thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
1006  (long)to.tv_sec, (long)to.tv_usec,
1007  (long)tvn.tv_sec, (long)tvn.tv_usec);
1008  tv.tv_sec = to.tv_sec - tvn.tv_sec;
1009  if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
1010  --tv.tv_sec;
1011  tv.tv_usec += 1000000;
1012  }
1013  if (!spurious_check)
1014  break;
1015  }
1016  th->status = prev_status;
1017 }
1018 
1019 void
1021 {
1022  thread_debug("rb_thread_sleep_forever\n");
1023  sleep_forever(GET_THREAD(), 0, 1);
1024 }
1025 
1026 static void
1028 {
1029  thread_debug("rb_thread_sleep_deadly\n");
1030  sleep_forever(GET_THREAD(), 1, 1);
1031 }
1032 
1033 static double
1035 {
1036 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1037  struct timespec tp;
1038 
1039  if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
1040  return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
1041  } else
1042 #endif
1043  {
1044  struct timeval tv;
1045  gettimeofday(&tv, NULL);
1046  return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
1047  }
1048 }
1049 
1050 static void
1051 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
1052 {
1053  sleep_timeval(th, double2timeval(sleepsec), spurious_check);
1054 }
1055 
1056 static void
1058 {
1059  struct timeval time;
1060  time.tv_sec = 0;
1061  time.tv_usec = 100 * 1000; /* 0.1 sec */
1062  sleep_timeval(th, time, 1);
1063 }
1064 
1065 void
1067 {
1068  rb_thread_t *th = GET_THREAD();
1069  sleep_timeval(th, time, 1);
1070 }
1071 
1072 void
1074 {
1075  if (!rb_thread_alone()) {
1076  rb_thread_t *th = GET_THREAD();
1078  sleep_for_polling(th);
1079  }
1080 }
1081 
1082 /*
1083  * CAUTION: This function causes thread switching.
1084  * rb_thread_check_ints() check ruby's interrupts.
1085  * some interrupt needs thread switching/invoke handlers,
1086  * and so on.
1087  */
1088 
1089 void
1091 {
1093 }
1094 
1095 /*
1096  * Hidden API for tcl/tk wrapper.
1097  * There is no guarantee to perpetuate it.
1098  */
1099 int
1101 {
1102  return rb_signal_buff_size() != 0;
1103 }
1104 
1105 /* This function can be called in blocking region. */
1106 int
1108 {
1109  rb_thread_t *th;
1110  GetThreadPtr(thval, th);
1111  return (int)RUBY_VM_INTERRUPTED(th);
1112 }
1113 
1114 void
1116 {
1118 }
1119 
1120 static void
1121 rb_thread_schedule_limits(unsigned long limits_us)
1122 {
1123  thread_debug("rb_thread_schedule\n");
1124  if (!rb_thread_alone()) {
1125  rb_thread_t *th = GET_THREAD();
1126 
1127  if (th->running_time_us >= limits_us) {
1128  thread_debug("rb_thread_schedule/switch start\n");
1130  gvl_yield(th->vm, th);
1132  thread_debug("rb_thread_schedule/switch done\n");
1133  }
1134  }
1135 }
1136 
1137 void
1139 {
1140  rb_thread_t *cur_th = GET_THREAD();
1142 
1143  if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(cur_th))) {
1145  }
1146 }
1147 
1148 /* blocking region */
1149 
1150 static inline int
1152  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1153 {
1154  region->prev_status = th->status;
1155  if (set_unblock_function(th, ubf, arg, &region->oldubf, fail_if_interrupted)) {
1156  th->blocking_region_buffer = region;
1157  th->status = THREAD_STOPPED;
1158  thread_debug("enter blocking region (%p)\n", (void *)th);
1160  gvl_release(th->vm);
1161  return TRUE;
1162  }
1163  else {
1164  return FALSE;
1165  }
1166 }
1167 
1168 static inline void
1170 {
1171  gvl_acquire(th->vm, th);
1173  thread_debug("leave blocking region (%p)\n", (void *)th);
1174  remove_signal_thread_list(th);
1175  th->blocking_region_buffer = 0;
1176  reset_unblock_function(th, &region->oldubf);
1177  if (th->status == THREAD_STOPPED) {
1178  th->status = region->prev_status;
1179  }
1180 }
1181 
1184 {
1185  rb_thread_t *th = GET_THREAD();
1187  blocking_region_begin(th, region, ubf_select, th, FALSE);
1188  return region;
1189 }
1190 
1191 void
1193 {
1194  int saved_errno = errno;
1195  rb_thread_t *th = ruby_thread_from_native();
1196  blocking_region_end(th, region);
1197  xfree(region);
1199  errno = saved_errno;
1200 }
1201 
1202 static void *
1203 call_without_gvl(void *(*func)(void *), void *data1,
1204  rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
1205 {
1206  void *val = 0;
1207 
1208  rb_thread_t *th = GET_THREAD();
1209  int saved_errno = 0;
1210 
1211  th->waiting_fd = -1;
1212  if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
1213  ubf = ubf_select;
1214  data2 = th;
1215  }
1216 
1217  BLOCKING_REGION({
1218  val = func(data1);
1219  saved_errno = errno;
1220  }, ubf, data2, fail_if_interrupted);
1221 
1222  if (!fail_if_interrupted) {
1224  }
1225 
1226  errno = saved_errno;
1227 
1228  return val;
1229 }
1230 
1231 /*
1232  * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1233  * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1234  * without interrupt proceess.
1235  *
1236  * rb_thread_call_without_gvl() does:
1237  * (1) Check interrupts.
1238  * (2) release GVL.
1239  * Other Ruby threads may run in parallel.
1240  * (3) call func with data1
1241  * (4) acquire GVL.
1242  * Other Ruby threads can not run in parallel any more.
1243  * (5) Check interrupts.
1244  *
1245  * rb_thread_call_without_gvl2() does:
1246  * (1) Check interrupt and return if interrupted.
1247  * (2) release GVL.
1248  * (3) call func with data1 and a pointer to the flags.
1249  * (4) acquire GVL.
1250  *
1251  * If another thread interrupts this thread (Thread#kill, signal delivery,
1252  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1253  * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1254  * toggling a cancellation flag, canceling the invocation of a call inside
1255  * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1256  *
1257  * There are built-in ubfs and you can specify these ubfs:
1258  *
1259  * * RUBY_UBF_IO: ubf for IO operation
1260  * * RUBY_UBF_PROCESS: ubf for process operation
1261  *
1262  * However, we can not guarantee our built-in ubfs interrupt your `func()'
1263  * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1264  * provide proper ubf(), your program will not stop for Control+C or other
1265  * shutdown events.
1266  *
1267  * "Check interrupts" on above list means that check asynchronous
1268  * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1269  * request, and so on) and call corresponding procedures
1270  * (such as `trap' for signals, raise an exception for Thread#raise).
1271  * If `func()' finished and receive interrupts, you may skip interrupt
1272  * checking. For example, assume the following func() it read data from file.
1273  *
1274  * read_func(...) {
1275  * // (a) before read
1276  * read(buffer); // (b) reading
1277  * // (c) after read
1278  * }
1279  *
1280  * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1281  * `read_func()' and interrupts are checked. However, if an interrupt occurs
1282  * at (c), after *read* operation is completed, check intterrupts is harmful
1283  * because it causes irrevocable side-effect, the read data will vanish. To
1284  * avoid such problem, the `read_func()' should be used with
1285  * `rb_thread_call_without_gvl2()'.
1286  *
1287  * If `rb_thread_call_without_gvl2()' detects interrupt, return its execution
1288  * immediately. This function does not show when the execution was interrupted.
1289  * For example, there are 4 possible timing (a), (b), (c) and before calling
1290  * read_func(). You need to record progress of a read_func() and check
1291  * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1292  * `rb_thread_check_ints()' correctly or your program can not process proper
1293  * process such as `trap' and so on.
1294  *
1295  * NOTE: You can not execute most of Ruby C API and touch Ruby
1296  * objects in `func()' and `ubf()', including raising an
1297  * exception, because current thread doesn't acquire GVL
1298  * (it causes synchronization problems). If you need to
1299  * call ruby functions either use rb_thread_call_with_gvl()
1300  * or read source code of C APIs and confirm safety by
1301  * yourself.
1302  *
1303  * NOTE: In short, this API is difficult to use safely. I recommend you
1304  * use other ways if you have. We lack experiences to use this API.
1305  * Please report your problem related on it.
1306  *
1307  * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1308  * for a short running `func()'. Be sure to benchmark and use this
1309  * mechanism when `func()' consumes enough time.
1310  *
1311  * Safe C API:
1312  * * rb_thread_interrupted() - check interrupt flag
1313  * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1314  * they will work without GVL, and may acquire GVL when GC is needed.
1315  */
1316 void *
1317 rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1318  rb_unblock_function_t *ubf, void *data2)
1319 {
1320  return call_without_gvl(func, data1, ubf, data2, TRUE);
1321 }
1322 
1323 void *
1324 rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1325  rb_unblock_function_t *ubf, void *data2)
1326 {
1327  return call_without_gvl(func, data1, ubf, data2, FALSE);
1328 }
1329 
1330 VALUE
1332 {
1333  VALUE val = Qundef; /* shouldn't be used */
1334  rb_thread_t *th = GET_THREAD();
1335  int saved_errno = 0;
1336  int state;
1337 
1338  th->waiting_fd = fd;
1339 
1340  TH_PUSH_TAG(th);
1341  if ((state = EXEC_TAG()) == 0) {
1342  BLOCKING_REGION({
1343  val = func(data1);
1344  saved_errno = errno;
1345  }, ubf_select, th, FALSE);
1346  }
1347  TH_POP_TAG();
1348 
1349  /* clear waitinf_fd anytime */
1350  th->waiting_fd = -1;
1351 
1352  if (state) {
1353  JUMP_TAG(state);
1354  }
1355  /* TODO: check func() */
1357 
1358  errno = saved_errno;
1359 
1360  return val;
1361 }
1362 
1363 VALUE
1365  rb_blocking_function_t *func, void *data1,
1366  rb_unblock_function_t *ubf, void *data2)
1367 {
1368  void *(*f)(void*) = (void *(*)(void*))func;
1369  return (VALUE)rb_thread_call_without_gvl(f, data1, ubf, data2);
1370 }
1371 
1372 /*
1373  * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1374  *
1375  * After releasing GVL using rb_thread_blocking_region() or
1376  * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1377  * methods. If you need to access Ruby you must use this function
1378  * rb_thread_call_with_gvl().
1379  *
1380  * This function rb_thread_call_with_gvl() does:
1381  * (1) acquire GVL.
1382  * (2) call passed function `func'.
1383  * (3) release GVL.
1384  * (4) return a value which is returned at (2).
1385  *
1386  * NOTE: You should not return Ruby object at (2) because such Object
1387  * will not marked.
1388  *
1389  * NOTE: If an exception is raised in `func', this function DOES NOT
1390  * protect (catch) the exception. If you have any resources
1391  * which should free before throwing exception, you need use
1392  * rb_protect() in `func' and return a value which represents
1393  * exception is raised.
1394  *
1395  * NOTE: This function should not be called by a thread which was not
1396  * created as Ruby thread (created by Thread.new or so). In other
1397  * words, this function *DOES NOT* associate or convert a NON-Ruby
1398  * thread to a Ruby thread.
1399  */
1400 void *
1401 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1402 {
1403  rb_thread_t *th = ruby_thread_from_native();
1404  struct rb_blocking_region_buffer *brb;
1405  struct rb_unblock_callback prev_unblock;
1406  void *r;
1407 
1408  if (th == 0) {
1409  /* Error is occurred, but we can't use rb_bug()
1410  * because this thread is not Ruby's thread.
1411  * What should we do?
1412  */
1413 
1414  fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1415  exit(EXIT_FAILURE);
1416  }
1417 
1419  prev_unblock = th->unblock;
1420 
1421  if (brb == 0) {
1422  rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1423  }
1424 
1425  blocking_region_end(th, brb);
1426  /* enter to Ruby world: You can access Ruby values, methods and so on. */
1427  r = (*func)(data1);
1428  /* leave from Ruby world: You can not access Ruby values, etc. */
1429  blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1430  return r;
1431 }
1432 
1433 /*
1434  * ruby_thread_has_gvl_p - check if current native thread has GVL.
1435  *
1436  ***
1437  *** This API is EXPERIMENTAL!
1438  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1439  ***
1440  */
1441 
1442 int
1444 {
1445  rb_thread_t *th = ruby_thread_from_native();
1446 
1447  if (th && th->blocking_region_buffer == 0) {
1448  return 1;
1449  }
1450  else {
1451  return 0;
1452  }
1453 }
1454 
1455 /*
1456  * call-seq:
1457  * Thread.pass -> nil
1458  *
1459  * Give the thread scheduler a hint to pass execution to another thread.
1460  * A running thread may or may not switch, it depends on OS and processor.
1461  */
1462 
1463 static VALUE
1465 {
1467  return Qnil;
1468 }
1469 
1470 /*****************************************************/
1471 
1472 /*
1473  * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1474  *
1475  * Async events such as an exception throwed by Thread#raise,
1476  * Thread#kill and thread termination (after main thread termination)
1477  * will be queued to th->pending_interrupt_queue.
1478  * - clear: clear the queue.
1479  * - enque: enque err object into queue.
1480  * - deque: deque err object from queue.
1481  * - active_p: return 1 if the queue should be checked.
1482  *
1483  * All rb_threadptr_pending_interrupt_* functions are called by
1484  * a GVL acquired thread, of course.
1485  * Note that all "rb_" prefix APIs need GVL to call.
1486  */
1487 
1488 void
1490 {
1492 }
1493 
1494 void
1496 {
1499 }
1500 
1506 };
1507 
1508 static enum handle_interrupt_timing
1510 {
1511  VALUE mask;
1512  long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1513  VALUE *mask_stack = RARRAY_PTR(th->pending_interrupt_mask_stack);
1514  VALUE ancestors = rb_mod_ancestors(err); /* TODO: GC guard */
1515  long ancestors_len = RARRAY_LEN(ancestors);
1516  VALUE *ancestors_ptr = RARRAY_PTR(ancestors);
1517  int i, j;
1518 
1519  for (i=0; i<mask_stack_len; i++) {
1520  mask = mask_stack[mask_stack_len-(i+1)];
1521 
1522  for (j=0; j<ancestors_len; j++) {
1523  VALUE klass = ancestors_ptr[j];
1524  VALUE sym;
1525 
1526  /* TODO: remove rb_intern() */
1527  if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
1528  if (sym == sym_immediate) {
1529  return INTERRUPT_IMMEDIATE;
1530  }
1531  else if (sym == sym_on_blocking) {
1532  return INTERRUPT_ON_BLOCKING;
1533  }
1534  else if (sym == sym_never) {
1535  return INTERRUPT_NEVER;
1536  }
1537  else {
1538  rb_raise(rb_eThreadError, "unknown mask signature");
1539  }
1540  }
1541  }
1542  /* try to next mask */
1543  }
1544  return INTERRUPT_NONE;
1545 }
1546 
1547 static int
1549 {
1550  return RARRAY_LEN(th->pending_interrupt_queue) == 0;
1551 }
1552 
1553 static int
1555 {
1556  int i;
1557  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1559  if (rb_class_inherited_p(e, err)) {
1560  return TRUE;
1561  }
1562  }
1563  return FALSE;
1564 }
1565 
1566 static VALUE
1568 {
1569 #if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
1570  int i;
1571 
1572  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1574 
1576 
1577  switch (mask_timing) {
1578  case INTERRUPT_ON_BLOCKING:
1579  if (timing != INTERRUPT_ON_BLOCKING) {
1580  break;
1581  }
1582  /* fall through */
1583  case INTERRUPT_NONE: /* default: IMMEDIATE */
1584  case INTERRUPT_IMMEDIATE:
1586  return err;
1587  case INTERRUPT_NEVER:
1588  break;
1589  }
1590  }
1591 
1593  return Qundef;
1594 #else
1598  }
1599  return err;
1600 #endif
1601 }
1602 
1603 int
1605 {
1606  /*
1607  * For optimization, we don't check async errinfo queue
1608  * if it nor a thread interrupt mask were not changed
1609  * since last check.
1610  */
1612  return 0;
1613  }
1614 
1616  return 0;
1617  }
1618 
1619  return 1;
1620 }
1621 
1622 static int
1624 {
1625  if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
1626  rb_raise(rb_eArgError, "unknown mask signature");
1627  }
1628 
1629  return ST_CONTINUE;
1630 }
1631 
1632 /*
1633  * call-seq:
1634  * Thread.handle_interrupt(hash) { ... } -> result of the block
1635  *
1636  * Changes asynchronous interrupt timing.
1637  *
1638  * _interrupt_ means asynchronous event and corresponding procedure
1639  * by Thread#raise, Thread#kill, signal trap (not supported yet)
1640  * and main thread termination (if main thread terminates, then all
1641  * other thread will be killed).
1642  *
1643  * The given +hash+ has pairs like <code>ExceptionClass =>
1644  * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
1645  * the given block. The TimingSymbol can be one of the following symbols:
1646  *
1647  * [+:immediate+] Invoke interrupts immediately.
1648  * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
1649  * [+:never+] Never invoke all interrupts.
1650  *
1651  * _BlockingOperation_ means that the operation will block the calling thread,
1652  * such as read and write. On CRuby implementation, _BlockingOperation_ is any
1653  * operation executed without GVL.
1654  *
1655  * Masked asynchronous interrupts are delayed until they are enabled.
1656  * This method is similar to sigprocmask(3).
1657  *
1658  * === NOTE
1659  *
1660  * Asynchronous interrupts are difficult to use.
1661  *
1662  * If you need to communicate between threads, please consider to use another way such as Queue.
1663  *
1664  * Or use them with deep understanding about this method.
1665  *
1666  * === Usage
1667  *
1668  * In this example, we can guard from Thread#raise exceptions.
1669  *
1670  * Using the +:never+ TimingSymbol the RuntimeError exception will always be
1671  * ignored in the first block of the main thread. In the second
1672  * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
1673  *
1674  * th = Thread.new do
1675  * Thead.handle_interrupt(RuntimeError => :never) {
1676  * begin
1677  * # You can write resource allocation code safely.
1678  * Thread.handle_interrupt(RuntimeError => :immediate) {
1679  * # ...
1680  * }
1681  * ensure
1682  * # You can write resource deallocation code safely.
1683  * end
1684  * }
1685  * end
1686  * Thread.pass
1687  * # ...
1688  * th.raise "stop"
1689  *
1690  * While we are ignoring the RuntimeError exception, it's safe to write our
1691  * resource allocation code. Then, the ensure block is where we can safely
1692  * deallocate your resources.
1693  *
1694  * ==== Guarding from TimeoutError
1695  *
1696  * In the next example, we will guard from the TimeoutError exception. This
1697  * will help prevent from leaking resources when TimeoutError exceptions occur
1698  * during normal ensure clause. For this example we use the help of the
1699  * standard library Timeout, from lib/timeout.rb
1700  *
1701  * require 'timeout'
1702  * Thread.handle_interrupt(TimeoutError => :never) {
1703  * timeout(10){
1704  * # TimeoutError doesn't occur here
1705  * Thread.handle_interrupt(TimeoutError => :on_blocking) {
1706  * # possible to be killed by TimeoutError
1707  * # while blocking operation
1708  * }
1709  * # TimeoutError doesn't occur here
1710  * }
1711  * }
1712  *
1713  * In the first part of the +timeout+ block, we can rely on TimeoutError being
1714  * ignored. Then in the <code>TimeoutError => :on_blocking</code> block, any
1715  * operation that will block the calling thread is susceptible to a
1716  * TimeoutError exception being raised.
1717  *
1718  * ==== Stack control settings
1719  *
1720  * It's possible to stack multiple levels of ::handle_interrupt blocks in order
1721  * to control more than one ExceptionClass and TimingSymbol at a time.
1722  *
1723  * Thread.handle_interrupt(FooError => :never) {
1724  * Thread.handle_interrupt(BarError => :never) {
1725  * # FooError and BarError are prohibited.
1726  * }
1727  * }
1728  *
1729  * ==== Inheritance with ExceptionClass
1730  *
1731  * All exceptions inherited from the ExceptionClass parameter will be considered.
1732  *
1733  * Thread.handle_interrupt(Exception => :never) {
1734  * # all exceptions inherited from Exception are prohibited.
1735  * }
1736  *
1737  */
1738 static VALUE
1740 {
1741  VALUE mask;
1742  rb_thread_t *th = GET_THREAD();
1743  VALUE r = Qnil;
1744  int state;
1745 
1746  if (!rb_block_given_p()) {
1747  rb_raise(rb_eArgError, "block is needed.");
1748  }
1749 
1750  mask = rb_convert_type(mask_arg, T_HASH, "Hash", "to_hash");
1756  }
1757 
1758  TH_PUSH_TAG(th);
1759  if ((state = EXEC_TAG()) == 0) {
1760  r = rb_yield(Qnil);
1761  }
1762  TH_POP_TAG();
1763 
1768  }
1769 
1770  RUBY_VM_CHECK_INTS(th);
1771 
1772  if (state) {
1773  JUMP_TAG(state);
1774  }
1775 
1776  return r;
1777 }
1778 
1779 /*
1780  * call-seq:
1781  * target_thread.pending_interrupt?(error = nil) -> true/false
1782  *
1783  * Returns whether or not the asychronous queue is empty for the target thread.
1784  *
1785  * If +error+ is given, then check only for +error+ type deferred events.
1786  *
1787  * See ::pending_interrupt? for more information.
1788  */
1789 static VALUE
1791 {
1792  rb_thread_t *target_th;
1793 
1794  GetThreadPtr(target_thread, target_th);
1795 
1796  if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
1797  return Qfalse;
1798  }
1799  else {
1800  if (argc == 1) {
1801  VALUE err;
1802  rb_scan_args(argc, argv, "01", &err);
1803  if (!rb_obj_is_kind_of(err, rb_cModule)) {
1804  rb_raise(rb_eTypeError, "class or module required for rescue clause");
1805  }
1806  if (rb_threadptr_pending_interrupt_include_p(target_th, err)) {
1807  return Qtrue;
1808  }
1809  else {
1810  return Qfalse;
1811  }
1812  }
1813  return Qtrue;
1814  }
1815 }
1816 
1817 /*
1818  * call-seq:
1819  * Thread.pending_interrupt?(error = nil) -> true/false
1820  *
1821  * Returns whether or not the asynchronous queue is empty.
1822  *
1823  * Since Thread::handle_interrupt can be used to defer asynchronous events.
1824  * This method can be used to determine if there are any deferred events.
1825  *
1826  * If you find this method returns true, then you may finish +:never+ blocks.
1827  *
1828  * For example, the following method processes deferred asynchronous events
1829  * immediately.
1830  *
1831  * def Thread.kick_interrupt_immediately
1832  * Thread.handle_interrupt(Object => :immediate) {
1833  * Thread.pass
1834  * }
1835  * end
1836  *
1837  * If +error+ is given, then check only for +error+ type deferred events.
1838  *
1839  * === Usage
1840  *
1841  * th = Thread.new{
1842  * Thread.handle_interrupt(RuntimeError => :on_blocking){
1843  * while true
1844  * ...
1845  * # reach safe point to invoke interrupt
1846  * if Thread.pending_interrupt?
1847  * Thread.handle_interrupt(Object => :immediate){}
1848  * end
1849  * ...
1850  * end
1851  * }
1852  * }
1853  * ...
1854  * th.raise # stop thread
1855  *
1856  * This example can also be written as the following, which you should use to
1857  * avoid asynchronous interrupts.
1858  *
1859  * flag = true
1860  * th = Thread.new{
1861  * Thread.handle_interrupt(RuntimeError => :on_blocking){
1862  * while true
1863  * ...
1864  * # reach safe point to invoke interrupt
1865  * break if flag == false
1866  * ...
1867  * end
1868  * }
1869  * }
1870  * ...
1871  * flag = false # stop thread
1872  */
1873 
1874 static VALUE
1876 {
1877  return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
1878 }
1879 
1880 static void
1882 {
1884  th->status = THREAD_RUNNABLE;
1885  th->to_kill = 1;
1886  th->errinfo = INT2FIX(TAG_FATAL);
1887  TH_JUMP_TAG(th, TAG_FATAL);
1888 }
1889 
1890 void
1892 {
1893  if (th->raised_flag) return;
1894 
1895  while (1) {
1896  rb_atomic_t interrupt;
1897  rb_atomic_t old;
1898  int sig;
1899  int timer_interrupt;
1900  int pending_interrupt;
1901  int finalizer_interrupt;
1902  int trap_interrupt;
1903 
1904  do {
1905  interrupt = th->interrupt_flag;
1906  old = ATOMIC_CAS(th->interrupt_flag, interrupt, interrupt & th->interrupt_mask);
1907  } while (old != interrupt);
1908 
1909  interrupt &= (rb_atomic_t)~th->interrupt_mask;
1910  if (!interrupt)
1911  return;
1912 
1913  timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
1914  pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
1915  finalizer_interrupt = interrupt & FINALIZER_INTERRUPT_MASK;
1916  trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
1917 
1918  /* signal handling */
1919  if (trap_interrupt && (th == th->vm->main_thread)) {
1920  enum rb_thread_status prev_status = th->status;
1921  th->status = THREAD_RUNNABLE;
1922  while ((sig = rb_get_next_signal()) != 0) {
1923  rb_signal_exec(th, sig);
1924  }
1925  th->status = prev_status;
1926  }
1927 
1928  /* exception from another thread */
1929  if (pending_interrupt && rb_threadptr_pending_interrupt_active_p(th)) {
1931  thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
1932 
1933  if (err == Qundef) {
1934  /* no error */
1935  }
1936  else if (err == eKillSignal /* Thread#kill receieved */ ||
1937  err == eTerminateSignal /* Terminate thread */ ||
1938  err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
1940  }
1941  else {
1942  /* set runnable if th was slept. */
1943  if (th->status == THREAD_STOPPED ||
1945  th->status = THREAD_RUNNABLE;
1946  rb_exc_raise(err);
1947  }
1948  }
1949 
1950  if (finalizer_interrupt) {
1952  }
1953 
1954  if (timer_interrupt) {
1955  unsigned long limits_us = TIME_QUANTUM_USEC;
1956 
1957  if (th->priority > 0)
1958  limits_us <<= th->priority;
1959  else
1960  limits_us >>= -th->priority;
1961 
1962  if (th->status == THREAD_RUNNABLE)
1963  th->running_time_us += TIME_QUANTUM_USEC;
1964 
1965  EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0, Qundef);
1966 
1967  rb_thread_schedule_limits(limits_us);
1968  }
1969  }
1970 }
1971 
1972 void
1974 {
1975  rb_thread_t *th;
1976  GetThreadPtr(thval, th);
1978 }
1979 
1980 static void
1982 {
1984 }
1985 
1986 static VALUE
1988 {
1989  VALUE exc;
1990 
1991  if (rb_threadptr_dead(th)) {
1992  return Qnil;
1993  }
1994 
1995  if (argc == 0) {
1996  exc = rb_exc_new(rb_eRuntimeError, 0, 0);
1997  }
1998  else {
1999  exc = rb_make_exception(argc, argv);
2000  }
2003  return Qnil;
2004 }
2005 
2006 void
2008 {
2009  VALUE argv[2];
2010 
2011  argv[0] = rb_eSignal;
2012  argv[1] = INT2FIX(sig);
2013  rb_threadptr_raise(th->vm->main_thread, 2, argv);
2014 }
2015 
2016 void
2018 {
2019  VALUE argv[2];
2020 
2021  argv[0] = rb_eSystemExit;
2022  argv[1] = rb_str_new2("exit");
2023  rb_threadptr_raise(th->vm->main_thread, 2, argv);
2024 }
2025 
2026 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
2027 #define USE_SIGALTSTACK
2028 #endif
2029 
2030 void
2032 {
2033  th->raised_flag = 0;
2034 #ifdef USE_SIGALTSTACK
2036 #else
2037  th->errinfo = sysstack_error;
2038  TH_JUMP_TAG(th, TAG_RAISE);
2039 #endif
2040 }
2041 
2042 int
2044 {
2045  if (th->raised_flag & RAISED_EXCEPTION) {
2046  return 1;
2047  }
2049  return 0;
2050 }
2051 
2052 int
2054 {
2055  if (!(th->raised_flag & RAISED_EXCEPTION)) {
2056  return 0;
2057  }
2058  th->raised_flag &= ~RAISED_EXCEPTION;
2059  return 1;
2060 }
2061 
2062 static int
2064 {
2065  int fd = (int)data;
2066  rb_thread_t *th;
2067  GetThreadPtr((VALUE)key, th);
2068 
2069  if (th->waiting_fd == fd) {
2073  }
2074  return ST_CONTINUE;
2075 }
2076 
2077 void
2079 {
2080  st_foreach(GET_THREAD()->vm->living_threads, thread_fd_close_i, (st_index_t)fd);
2081 }
2082 
2083 /*
2084  * call-seq:
2085  * thr.raise
2086  * thr.raise(string)
2087  * thr.raise(exception [, string [, array]])
2088  *
2089  * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
2090  * caller does not have to be <i>thr</i>.
2091  *
2092  * Thread.abort_on_exception = true
2093  * a = Thread.new { sleep(200) }
2094  * a.raise("Gotcha")
2095  *
2096  * <em>produces:</em>
2097  *
2098  * prog.rb:3: Gotcha (RuntimeError)
2099  * from prog.rb:2:in `initialize'
2100  * from prog.rb:2:in `new'
2101  * from prog.rb:2
2102  */
2103 
2104 static VALUE
2106 {
2107  rb_thread_t *target_th;
2108  rb_thread_t *th = GET_THREAD();
2109  GetThreadPtr(self, target_th);
2110  rb_threadptr_raise(target_th, argc, argv);
2111 
2112  /* To perform Thread.current.raise as Kernel.raise */
2113  if (th == target_th) {
2114  RUBY_VM_CHECK_INTS(th);
2115  }
2116  return Qnil;
2117 }
2118 
2119 
2120 /*
2121  * call-seq:
2122  * thr.exit -> thr or nil
2123  * thr.kill -> thr or nil
2124  * thr.terminate -> thr or nil
2125  *
2126  * Terminates <i>thr</i> and schedules another thread to be run. If this thread
2127  * is already marked to be killed, <code>exit</code> returns the
2128  * <code>Thread</code>. If this is the main thread, or the last thread, exits
2129  * the process.
2130  */
2131 
2132 VALUE
2134 {
2135  rb_thread_t *th;
2136 
2137  GetThreadPtr(thread, th);
2138 
2139  if (th != GET_THREAD() && th->safe_level < 4) {
2140  rb_secure(4);
2141  }
2142  if (th->to_kill || th->status == THREAD_KILLED) {
2143  return thread;
2144  }
2145  if (th == th->vm->main_thread) {
2147  }
2148 
2149  thread_debug("rb_thread_kill: %p (%p)\n", (void *)th, (void *)th->thread_id);
2150 
2151  if (th == GET_THREAD()) {
2152  /* kill myself immediately */
2154  }
2155  else {
2158  }
2159  return thread;
2160 }
2161 
2162 
2163 /*
2164  * call-seq:
2165  * Thread.kill(thread) -> thread
2166  *
2167  * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
2168  *
2169  * count = 0
2170  * a = Thread.new { loop { count += 1 } }
2171  * sleep(0.1) #=> 0
2172  * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2173  * count #=> 93947
2174  * a.alive? #=> false
2175  */
2176 
2177 static VALUE
2179 {
2180  return rb_thread_kill(th);
2181 }
2182 
2183 
2184 /*
2185  * call-seq:
2186  * Thread.exit -> thread
2187  *
2188  * Terminates the currently running thread and schedules another thread to be
2189  * run. If this thread is already marked to be killed, <code>exit</code>
2190  * returns the <code>Thread</code>. If this is the main thread, or the last
2191  * thread, exit the process.
2192  */
2193 
2194 static VALUE
2196 {
2197  rb_thread_t *th = GET_THREAD();
2198  return rb_thread_kill(th->self);
2199 }
2200 
2201 
2202 /*
2203  * call-seq:
2204  * thr.wakeup -> thr
2205  *
2206  * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
2207  * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
2208  *
2209  * c = Thread.new { Thread.stop; puts "hey!" }
2210  * sleep 0.1 while c.status!='sleep'
2211  * c.wakeup
2212  * c.join
2213  *
2214  * <em>produces:</em>
2215  *
2216  * hey!
2217  */
2218 
2219 VALUE
2221 {
2222  if (!RTEST(rb_thread_wakeup_alive(thread))) {
2223  rb_raise(rb_eThreadError, "killed thread");
2224  }
2225  return thread;
2226 }
2227 
2228 VALUE
2230 {
2231  rb_thread_t *th;
2232  GetThreadPtr(thread, th);
2233 
2234  if (th->status == THREAD_KILLED) {
2235  return Qnil;
2236  }
2237  rb_threadptr_ready(th);
2238  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
2239  th->status = THREAD_RUNNABLE;
2240  return thread;
2241 }
2242 
2243 
2244 /*
2245  * call-seq:
2246  * thr.run -> thr
2247  *
2248  * Wakes up <i>thr</i>, making it eligible for scheduling.
2249  *
2250  * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2251  * sleep 0.1 while a.status!='sleep'
2252  * puts "Got here"
2253  * a.run
2254  * a.join
2255  *
2256  * <em>produces:</em>
2257  *
2258  * a
2259  * Got here
2260  * c
2261  */
2262 
2263 VALUE
2265 {
2266  rb_thread_wakeup(thread);
2268  return thread;
2269 }
2270 
2271 
2272 /*
2273  * call-seq:
2274  * Thread.stop -> nil
2275  *
2276  * Stops execution of the current thread, putting it into a ``sleep'' state,
2277  * and schedules execution of another thread.
2278  *
2279  * a = Thread.new { print "a"; Thread.stop; print "c" }
2280  * sleep 0.1 while a.status!='sleep'
2281  * print "b"
2282  * a.run
2283  * a.join
2284  *
2285  * <em>produces:</em>
2286  *
2287  * abc
2288  */
2289 
2290 VALUE
2292 {
2293  if (rb_thread_alone()) {
2295  "stopping only thread\n\tnote: use sleep to stop forever");
2296  }
2298  return Qnil;
2299 }
2300 
2301 static int
2303 {
2304  VALUE ary = (VALUE)data;
2305  rb_thread_t *th;
2306  GetThreadPtr((VALUE)key, th);
2307 
2308  switch (th->status) {
2309  case THREAD_RUNNABLE:
2310  case THREAD_STOPPED:
2312  rb_ary_push(ary, th->self);
2313  default:
2314  break;
2315  }
2316  return ST_CONTINUE;
2317 }
2318 
2319 /********************************************************************/
2320 
2321 /*
2322  * call-seq:
2323  * Thread.list -> array
2324  *
2325  * Returns an array of <code>Thread</code> objects for all threads that are
2326  * either runnable or stopped.
2327  *
2328  * Thread.new { sleep(200) }
2329  * Thread.new { 1000000.times {|i| i*i } }
2330  * Thread.new { Thread.stop }
2331  * Thread.list.each {|t| p t}
2332  *
2333  * <em>produces:</em>
2334  *
2335  * #<Thread:0x401b3e84 sleep>
2336  * #<Thread:0x401b3f38 run>
2337  * #<Thread:0x401b3fb0 sleep>
2338  * #<Thread:0x401bdf4c run>
2339  */
2340 
2341 VALUE
2343 {
2344  VALUE ary = rb_ary_new();
2345  st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
2346  return ary;
2347 }
2348 
2349 VALUE
2351 {
2352  return GET_THREAD()->self;
2353 }
2354 
2355 /*
2356  * call-seq:
2357  * Thread.current -> thread
2358  *
2359  * Returns the currently executing thread.
2360  *
2361  * Thread.current #=> #<Thread:0x401bdf4c run>
2362  */
2363 
2364 static VALUE
2366 {
2367  return rb_thread_current();
2368 }
2369 
2370 VALUE
2372 {
2373  return GET_THREAD()->vm->main_thread->self;
2374 }
2375 
2376 /*
2377  * call-seq:
2378  * Thread.main -> thread
2379  *
2380  * Returns the main thread.
2381  */
2382 
2383 static VALUE
2385 {
2386  return rb_thread_main();
2387 }
2388 
2389 
2390 /*
2391  * call-seq:
2392  * Thread.abort_on_exception -> true or false
2393  *
2394  * Returns the status of the global ``abort on exception'' condition. The
2395  * default is <code>false</code>. When set to <code>true</code>, or if the
2396  * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
2397  * command line option <code>-d</code> was specified) all threads will abort
2398  * (the process will <code>exit(0)</code>) if an exception is raised in any
2399  * thread. See also <code>Thread::abort_on_exception=</code>.
2400  */
2401 
2402 static VALUE
2404 {
2406 }
2407 
2408 
2409 /*
2410  * call-seq:
2411  * Thread.abort_on_exception= boolean -> true or false
2412  *
2413  * When set to <code>true</code>, all threads will abort if an exception is
2414  * raised. Returns the new state.
2415  *
2416  * Thread.abort_on_exception = true
2417  * t1 = Thread.new do
2418  * puts "In new thread"
2419  * raise "Exception from thread"
2420  * end
2421  * sleep(1)
2422  * puts "not reached"
2423  *
2424  * <em>produces:</em>
2425  *
2426  * In new thread
2427  * prog.rb:4: Exception from thread (RuntimeError)
2428  * from prog.rb:2:in `initialize'
2429  * from prog.rb:2:in `new'
2430  * from prog.rb:2
2431  */
2432 
2433 static VALUE
2435 {
2436  rb_secure(4);
2438  return val;
2439 }
2440 
2441 
2442 /*
2443  * call-seq:
2444  * thr.abort_on_exception -> true or false
2445  *
2446  * Returns the status of the thread-local ``abort on exception'' condition for
2447  * <i>thr</i>. The default is <code>false</code>. See also
2448  * <code>Thread::abort_on_exception=</code>.
2449  */
2450 
2451 static VALUE
2453 {
2454  rb_thread_t *th;
2455  GetThreadPtr(thread, th);
2456  return th->abort_on_exception ? Qtrue : Qfalse;
2457 }
2458 
2459 
2460 /*
2461  * call-seq:
2462  * thr.abort_on_exception= boolean -> true or false
2463  *
2464  * When set to <code>true</code>, causes all threads (including the main
2465  * program) to abort if an exception is raised in <i>thr</i>. The process will
2466  * effectively <code>exit(0)</code>.
2467  */
2468 
2469 static VALUE
2471 {
2472  rb_thread_t *th;
2473  rb_secure(4);
2474 
2475  GetThreadPtr(thread, th);
2476  th->abort_on_exception = RTEST(val);
2477  return val;
2478 }
2479 
2480 
2481 /*
2482  * call-seq:
2483  * thr.group -> thgrp or nil
2484  *
2485  * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
2486  * the thread is not a member of any group.
2487  *
2488  * Thread.main.group #=> #<ThreadGroup:0x4029d914>
2489  */
2490 
2491 VALUE
2493 {
2494  rb_thread_t *th;
2495  VALUE group;
2496  GetThreadPtr(thread, th);
2497  group = th->thgroup;
2498 
2499  if (!group) {
2500  group = Qnil;
2501  }
2502  return group;
2503 }
2504 
2505 static const char *
2507 {
2508  switch (th->status) {
2509  case THREAD_RUNNABLE:
2510  if (th->to_kill)
2511  return "aborting";
2512  else
2513  return "run";
2514  case THREAD_STOPPED:
2516  return "sleep";
2517  case THREAD_KILLED:
2518  return "dead";
2519  default:
2520  return "unknown";
2521  }
2522 }
2523 
2524 static int
2526 {
2527  return th->status == THREAD_KILLED;
2528 }
2529 
2530 
2531 /*
2532  * call-seq:
2533  * thr.status -> string, false or nil
2534  *
2535  * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
2536  * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
2537  * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
2538  * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
2539  * terminated with an exception.
2540  *
2541  * a = Thread.new { raise("die now") }
2542  * b = Thread.new { Thread.stop }
2543  * c = Thread.new { Thread.exit }
2544  * d = Thread.new { sleep }
2545  * d.kill #=> #<Thread:0x401b3678 aborting>
2546  * a.status #=> nil
2547  * b.status #=> "sleep"
2548  * c.status #=> false
2549  * d.status #=> "aborting"
2550  * Thread.current.status #=> "run"
2551  */
2552 
2553 static VALUE
2555 {
2556  rb_thread_t *th;
2557  GetThreadPtr(thread, th);
2558 
2559  if (rb_threadptr_dead(th)) {
2560  if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
2561  /* TODO */ ) {
2562  return Qnil;
2563  }
2564  return Qfalse;
2565  }
2566  return rb_str_new2(thread_status_name(th));
2567 }
2568 
2569 
2570 /*
2571  * call-seq:
2572  * thr.alive? -> true or false
2573  *
2574  * Returns <code>true</code> if <i>thr</i> is running or sleeping.
2575  *
2576  * thr = Thread.new { }
2577  * thr.join #=> #<Thread:0x401b3fb0 dead>
2578  * Thread.current.alive? #=> true
2579  * thr.alive? #=> false
2580  */
2581 
2582 static VALUE
2584 {
2585  rb_thread_t *th;
2586  GetThreadPtr(thread, th);
2587 
2588  if (rb_threadptr_dead(th))
2589  return Qfalse;
2590  return Qtrue;
2591 }
2592 
2593 /*
2594  * call-seq:
2595  * thr.stop? -> true or false
2596  *
2597  * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
2598  *
2599  * a = Thread.new { Thread.stop }
2600  * b = Thread.current
2601  * a.stop? #=> true
2602  * b.stop? #=> false
2603  */
2604 
2605 static VALUE
2607 {
2608  rb_thread_t *th;
2609  GetThreadPtr(thread, th);
2610 
2611  if (rb_threadptr_dead(th))
2612  return Qtrue;
2613  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
2614  return Qtrue;
2615  return Qfalse;
2616 }
2617 
2618 /*
2619  * call-seq:
2620  * thr.safe_level -> integer
2621  *
2622  * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
2623  * levels can help when implementing sandboxes which run insecure code.
2624  *
2625  * thr = Thread.new { $SAFE = 3; sleep }
2626  * Thread.current.safe_level #=> 0
2627  * thr.safe_level #=> 3
2628  */
2629 
2630 static VALUE
2632 {
2633  rb_thread_t *th;
2634  GetThreadPtr(thread, th);
2635 
2636  return INT2NUM(th->safe_level);
2637 }
2638 
2639 /*
2640  * call-seq:
2641  * thr.inspect -> string
2642  *
2643  * Dump the name, id, and status of _thr_ to a string.
2644  */
2645 
2646 static VALUE
2648 {
2649  const char *cname = rb_obj_classname(thread);
2650  rb_thread_t *th;
2651  const char *status;
2652  VALUE str;
2653 
2654  GetThreadPtr(thread, th);
2655  status = thread_status_name(th);
2656  str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
2657  OBJ_INFECT(str, thread);
2658 
2659  return str;
2660 }
2661 
2662 VALUE
2664 {
2665  rb_thread_t *th;
2666  st_data_t val;
2667 
2668  GetThreadPtr(thread, th);
2669  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2670  rb_raise(rb_eSecurityError, "Insecure: thread locals");
2671  }
2672  if (!th->local_storage) {
2673  return Qnil;
2674  }
2675  if (st_lookup(th->local_storage, id, &val)) {
2676  return (VALUE)val;
2677  }
2678  return Qnil;
2679 }
2680 
2681 /*
2682  * call-seq:
2683  * thr[sym] -> obj or nil
2684  *
2685  * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
2686  * if not explicitely inside a Fiber), using either a symbol or a string name.
2687  * If the specified variable does not exist, returns <code>nil</code>.
2688  *
2689  * [
2690  * Thread.new { Thread.current["name"] = "A" },
2691  * Thread.new { Thread.current[:name] = "B" },
2692  * Thread.new { Thread.current["name"] = "C" }
2693  * ].each do |th|
2694  * th.join
2695  * puts "#{th.inspect}: #{th[:name]}"
2696  * end
2697  *
2698  * <em>produces:</em>
2699  *
2700  * #<Thread:0x00000002a54220 dead>: A
2701  * #<Thread:0x00000002a541a8 dead>: B
2702  * #<Thread:0x00000002a54130 dead>: C
2703  *
2704  * Thread#[] and Thread#[]= are not thread-local but fiber-local.
2705  * This confusion did not exist in Ruby 1.8 because
2706  * fibers were only available since Ruby 1.9.
2707  * Ruby 1.9 chooses that the methods behaves fiber-local to save
2708  * following idiom for dynamic scope.
2709  *
2710  * def meth(newvalue)
2711  * begin
2712  * oldvalue = Thread.current[:name]
2713  * Thread.current[:name] = newvalue
2714  * yield
2715  * ensure
2716  * Thread.current[:name] = oldvalue
2717  * end
2718  * end
2719  *
2720  * The idiom may not work as dynamic scope if the methods are thread-local
2721  * and a given block switches fiber.
2722  *
2723  * f = Fiber.new {
2724  * meth(1) {
2725  * Fiber.yield
2726  * }
2727  * }
2728  * meth(2) {
2729  * f.resume
2730  * }
2731  * f.resume
2732  * p Thread.current[:name]
2733  * #=> nil if fiber-local
2734  * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
2735  *
2736  * For thread-local variables, please see <code>Thread#thread_local_get</code>
2737  * and <code>Thread#thread_local_set</code>.
2738  *
2739  */
2740 
2741 static VALUE
2743 {
2744  return rb_thread_local_aref(thread, rb_to_id(id));
2745 }
2746 
2747 VALUE
2749 {
2750  rb_thread_t *th;
2751  GetThreadPtr(thread, th);
2752 
2753  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2754  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2755  }
2756  if (OBJ_FROZEN(thread)) {
2757  rb_error_frozen("thread locals");
2758  }
2759  if (!th->local_storage) {
2761  }
2762  if (NIL_P(val)) {
2763  st_delete_wrap(th->local_storage, id);
2764  return Qnil;
2765  }
2766  st_insert(th->local_storage, id, val);
2767  return val;
2768 }
2769 
2770 /*
2771  * call-seq:
2772  * thr[sym] = obj -> obj
2773  *
2774  * Attribute Assignment---Sets or creates the value of a fiber-local variable,
2775  * using either a symbol or a string. See also <code>Thread#[]</code>. For
2776  * thread-local variables, please see <code>Thread#thread_variable_set</code>
2777  * and <code>Thread#thread_variable_get</code>.
2778  */
2779 
2780 static VALUE
2782 {
2783  return rb_thread_local_aset(self, rb_to_id(id), val);
2784 }
2785 
2786 /*
2787  * call-seq:
2788  * thr.thread_variable_get(key) -> obj or nil
2789  *
2790  * Returns the value of a thread local variable that has been set. Note that
2791  * these are different than fiber local values. For fiber local values,
2792  * please see Thread#[] and Thread#[]=.
2793  *
2794  * Thread local values are carried along with threads, and do not respect
2795  * fibers. For example:
2796  *
2797  * Thread.new {
2798  * Thread.current.thread_variable_set("foo", "bar") # set a thread local
2799  * Thread.current["foo"] = "bar" # set a fiber local
2800  *
2801  * Fiber.new {
2802  * Fiber.yield [
2803  * Thread.current.thread_variable_get("foo"), # get the thread local
2804  * Thread.current["foo"], # get the fiber local
2805  * ]
2806  * }.resume
2807  * }.join.value # => ['bar', nil]
2808  *
2809  * The value "bar" is returned for the thread local, where nil is returned
2810  * for the fiber local. The fiber is executed in the same thread, so the
2811  * thread local values are available.
2812  *
2813  * See also Thread#[]
2814  */
2815 
2816 static VALUE
2818 {
2819  VALUE locals;
2820  rb_thread_t *th;
2821 
2822  GetThreadPtr(thread, th);
2823 
2824  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2825  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2826  }
2827 
2828  locals = rb_iv_get(thread, "locals");
2829  return rb_hash_aref(locals, ID2SYM(rb_to_id(id)));
2830 }
2831 
2832 /*
2833  * call-seq:
2834  * thr.thread_variable_set(key, value)
2835  *
2836  * Sets a thread local with +key+ to +value+. Note that these are local to
2837  * threads, and not to fibers. Please see Thread#thread_variable_get and
2838  * Thread#[] for more information.
2839  */
2840 
2841 static VALUE
2843 {
2844  VALUE locals;
2845  rb_thread_t *th;
2846 
2847  GetThreadPtr(thread, th);
2848 
2849  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2850  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2851  }
2852  if (OBJ_FROZEN(thread)) {
2853  rb_error_frozen("thread locals");
2854  }
2855 
2856  locals = rb_iv_get(thread, "locals");
2857  return rb_hash_aset(locals, ID2SYM(rb_to_id(id)), val);
2858 }
2859 
2860 /*
2861  * call-seq:
2862  * thr.key?(sym) -> true or false
2863  *
2864  * Returns <code>true</code> if the given string (or symbol) exists as a
2865  * fiber-local variable.
2866  *
2867  * me = Thread.current
2868  * me[:oliver] = "a"
2869  * me.key?(:oliver) #=> true
2870  * me.key?(:stanley) #=> false
2871  */
2872 
2873 static VALUE
2875 {
2876  rb_thread_t *th;
2877  ID id = rb_to_id(key);
2878 
2879  GetThreadPtr(self, th);
2880 
2881  if (!th->local_storage) {
2882  return Qfalse;
2883  }
2884  if (st_lookup(th->local_storage, id, 0)) {
2885  return Qtrue;
2886  }
2887  return Qfalse;
2888 }
2889 
2890 static int
2892 {
2893  rb_ary_push(ary, ID2SYM(key));
2894  return ST_CONTINUE;
2895 }
2896 
2897 static int
2899 {
2900  return (int)vm->living_threads->num_entries;
2901 }
2902 
2903 int
2905 {
2906  int num = 1;
2907  if (GET_THREAD()->vm->living_threads) {
2908  num = vm_living_thread_num(GET_THREAD()->vm);
2909  thread_debug("rb_thread_alone: %d\n", num);
2910  }
2911  return num == 1;
2912 }
2913 
2914 /*
2915  * call-seq:
2916  * thr.keys -> array
2917  *
2918  * Returns an an array of the names of the fiber-local variables (as Symbols).
2919  *
2920  * thr = Thread.new do
2921  * Thread.current[:cat] = 'meow'
2922  * Thread.current["dog"] = 'woof'
2923  * end
2924  * thr.join #=> #<Thread:0x401b3f10 dead>
2925  * thr.keys #=> [:dog, :cat]
2926  */
2927 
2928 static VALUE
2930 {
2931  rb_thread_t *th;
2932  VALUE ary = rb_ary_new();
2933  GetThreadPtr(self, th);
2934 
2935  if (th->local_storage) {
2937  }
2938  return ary;
2939 }
2940 
2941 static int
2943 {
2944  rb_ary_push(ary, key);
2945  return ST_CONTINUE;
2946 }
2947 
2948 /*
2949  * call-seq:
2950  * thr.thread_variables -> array
2951  *
2952  * Returns an an array of the names of the thread-local variables (as Symbols).
2953  *
2954  * thr = Thread.new do
2955  * Thread.current.thread_variable_set(:cat, 'meow')
2956  * Thread.current.thread_variable_set("dog", 'woof')
2957  * end
2958  * thr.join #=> #<Thread:0x401b3f10 dead>
2959  * thr.thread_variables #=> [:dog, :cat]
2960  *
2961  * Note that these are not fiber local variables. Please see Thread#[] and
2962  * Thread#thread_variable_get for more details.
2963  */
2964 
2965 static VALUE
2967 {
2968  VALUE locals;
2969  VALUE ary;
2970 
2971  locals = rb_iv_get(thread, "locals");
2972  ary = rb_ary_new();
2973  rb_hash_foreach(locals, keys_i, ary);
2974 
2975  return ary;
2976 }
2977 
2978 /*
2979  * call-seq:
2980  * thr.thread_variable?(key) -> true or false
2981  *
2982  * Returns <code>true</code> if the given string (or symbol) exists as a
2983  * thread-local variable.
2984  *
2985  * me = Thread.current
2986  * me.thread_variable_set(:oliver, "a")
2987  * me.thread_variable?(:oliver) #=> true
2988  * me.thread_variable?(:stanley) #=> false
2989  *
2990  * Note that these are not fiber local variables. Please see Thread#[] and
2991  * Thread#thread_variable_get for more details.
2992  */
2993 
2994 static VALUE
2996 {
2997  VALUE locals;
2998 
2999  locals = rb_iv_get(thread, "locals");
3000 
3001  if (!RHASH(locals)->ntbl)
3002  return Qfalse;
3003 
3004  if (st_lookup(RHASH(locals)->ntbl, ID2SYM(rb_to_id(key)), 0)) {
3005  return Qtrue;
3006  }
3007 
3008  return Qfalse;
3009 }
3010 
3011 /*
3012  * call-seq:
3013  * thr.priority -> integer
3014  *
3015  * Returns the priority of <i>thr</i>. Default is inherited from the
3016  * current thread which creating the new thread, or zero for the
3017  * initial main thread; higher-priority thread will run more frequently
3018  * than lower-priority threads (but lower-priority threads can also run).
3019  *
3020  * This is just hint for Ruby thread scheduler. It may be ignored on some
3021  * platform.
3022  *
3023  * Thread.current.priority #=> 0
3024  */
3025 
3026 static VALUE
3028 {
3029  rb_thread_t *th;
3030  GetThreadPtr(thread, th);
3031  return INT2NUM(th->priority);
3032 }
3033 
3034 
3035 /*
3036  * call-seq:
3037  * thr.priority= integer -> thr
3038  *
3039  * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3040  * will run more frequently than lower-priority threads (but lower-priority
3041  * threads can also run).
3042  *
3043  * This is just hint for Ruby thread scheduler. It may be ignored on some
3044  * platform.
3045  *
3046  * count1 = count2 = 0
3047  * a = Thread.new do
3048  * loop { count1 += 1 }
3049  * end
3050  * a.priority = -1
3051  *
3052  * b = Thread.new do
3053  * loop { count2 += 1 }
3054  * end
3055  * b.priority = -2
3056  * sleep 1 #=> 1
3057  * count1 #=> 622504
3058  * count2 #=> 5832
3059  */
3060 
3061 static VALUE
3063 {
3064  rb_thread_t *th;
3065  int priority;
3066  GetThreadPtr(thread, th);
3067 
3068  rb_secure(4);
3069 
3070 #if USE_NATIVE_THREAD_PRIORITY
3071  th->priority = NUM2INT(prio);
3072  native_thread_apply_priority(th);
3073 #else
3074  priority = NUM2INT(prio);
3075  if (priority > RUBY_THREAD_PRIORITY_MAX) {
3076  priority = RUBY_THREAD_PRIORITY_MAX;
3077  }
3078  else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3079  priority = RUBY_THREAD_PRIORITY_MIN;
3080  }
3081  th->priority = priority;
3082 #endif
3083  return INT2NUM(th->priority);
3084 }
3085 
3086 /* for IO */
3087 
3088 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3089 
3090 /*
3091  * several Unix platforms support file descriptors bigger than FD_SETSIZE
3092  * in select(2) system call.
3093  *
3094  * - Linux 2.2.12 (?)
3095  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3096  * select(2) documents how to allocate fd_set dynamically.
3097  * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3098  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3099  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3100  * select(2) documents how to allocate fd_set dynamically.
3101  * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3102  * - HP-UX documents how to allocate fd_set dynamically.
3103  * http://docs.hp.com/en/B2355-60105/select.2.html
3104  * - Solaris 8 has select_large_fdset
3105  * - Mac OS X 10.7 (Lion)
3106  * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3107  * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3108  * http://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/_index.html
3109  *
3110  * When fd_set is not big enough to hold big file descriptors,
3111  * it should be allocated dynamically.
3112  * Note that this assumes fd_set is structured as bitmap.
3113  *
3114  * rb_fd_init allocates the memory.
3115  * rb_fd_term free the memory.
3116  * rb_fd_set may re-allocates bitmap.
3117  *
3118  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3119  */
3120 
3121 void
3122 rb_fd_init(rb_fdset_t *fds)
3123 {
3124  fds->maxfd = 0;
3125  fds->fdset = ALLOC(fd_set);
3126  FD_ZERO(fds->fdset);
3127 }
3128 
3129 void
3131 {
3132  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3133 
3134  if (size < sizeof(fd_set))
3135  size = sizeof(fd_set);
3136  dst->maxfd = src->maxfd;
3137  dst->fdset = xmalloc(size);
3138  memcpy(dst->fdset, src->fdset, size);
3139 }
3140 
3141 void
3142 rb_fd_term(rb_fdset_t *fds)
3143 {
3144  if (fds->fdset) xfree(fds->fdset);
3145  fds->maxfd = 0;
3146  fds->fdset = 0;
3147 }
3148 
3149 void
3150 rb_fd_zero(rb_fdset_t *fds)
3151 {
3152  if (fds->fdset)
3153  MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3154 }
3155 
3156 static void
3157 rb_fd_resize(int n, rb_fdset_t *fds)
3158 {
3159  size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
3160  size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
3161 
3162  if (m < sizeof(fd_set)) m = sizeof(fd_set);
3163  if (o < sizeof(fd_set)) o = sizeof(fd_set);
3164 
3165  if (m > o) {
3166  fds->fdset = xrealloc(fds->fdset, m);
3167  memset((char *)fds->fdset + o, 0, m - o);
3168  }
3169  if (n >= fds->maxfd) fds->maxfd = n + 1;
3170 }
3171 
3172 void
3173 rb_fd_set(int n, rb_fdset_t *fds)
3174 {
3175  rb_fd_resize(n, fds);
3176  FD_SET(n, fds->fdset);
3177 }
3178 
3179 void
3180 rb_fd_clr(int n, rb_fdset_t *fds)
3181 {
3182  if (n >= fds->maxfd) return;
3183  FD_CLR(n, fds->fdset);
3184 }
3185 
3186 int
3187 rb_fd_isset(int n, const rb_fdset_t *fds)
3188 {
3189  if (n >= fds->maxfd) return 0;
3190  return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
3191 }
3192 
3193 void
3194 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
3195 {
3196  size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
3197 
3198  if (size < sizeof(fd_set)) size = sizeof(fd_set);
3199  dst->maxfd = max;
3200  dst->fdset = xrealloc(dst->fdset, size);
3201  memcpy(dst->fdset, src, size);
3202 }
3203 
3204 static void
3205 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
3206 {
3207  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3208 
3209  if (size > sizeof(fd_set)) {
3210  rb_raise(rb_eArgError, "too large fdsets");
3211  }
3212  memcpy(dst, rb_fd_ptr(src), sizeof(fd_set));
3213 }
3214 
3215 void
3216 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
3217 {
3218  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3219 
3220  if (size < sizeof(fd_set))
3221  size = sizeof(fd_set);
3222  dst->maxfd = src->maxfd;
3223  dst->fdset = xrealloc(dst->fdset, size);
3224  memcpy(dst->fdset, src->fdset, size);
3225 }
3226 
3227 #ifdef __native_client__
3228 int select(int nfds, fd_set *readfds, fd_set *writefds,
3229  fd_set *exceptfds, struct timeval *timeout);
3230 #endif
3231 
3232 int
3233 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
3234 {
3235  fd_set *r = NULL, *w = NULL, *e = NULL;
3236  if (readfds) {
3237  rb_fd_resize(n - 1, readfds);
3238  r = rb_fd_ptr(readfds);
3239  }
3240  if (writefds) {
3241  rb_fd_resize(n - 1, writefds);
3242  w = rb_fd_ptr(writefds);
3243  }
3244  if (exceptfds) {
3245  rb_fd_resize(n - 1, exceptfds);
3246  e = rb_fd_ptr(exceptfds);
3247  }
3248  return select(n, r, w, e, timeout);
3249 }
3250 
3251 #undef FD_ZERO
3252 #undef FD_SET
3253 #undef FD_CLR
3254 #undef FD_ISSET
3255 
3256 #define FD_ZERO(f) rb_fd_zero(f)
3257 #define FD_SET(i, f) rb_fd_set((i), (f))
3258 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3259 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3260 
3261 #elif defined(_WIN32)
3262 
3263 void
3264 rb_fd_init(rb_fdset_t *set)
3265 {
3266  set->capa = FD_SETSIZE;
3267  set->fdset = ALLOC(fd_set);
3268  FD_ZERO(set->fdset);
3269 }
3270 
3271 void
3273 {
3274  rb_fd_init(dst);
3275  rb_fd_dup(dst, src);
3276 }
3277 
3278 static void
3279 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
3280 {
3281  int max = rb_fd_max(src);
3282 
3283  /* we assume src is the result of select() with dst, so dst should be
3284  * larger or equal than src. */
3285  if (max > FD_SETSIZE || (UINT)max > dst->fd_count) {
3286  rb_raise(rb_eArgError, "too large fdsets");
3287  }
3288 
3289  memcpy(dst->fd_array, src->fdset->fd_array, max);
3290  dst->fd_count = max;
3291 }
3292 
3293 void
3294 rb_fd_term(rb_fdset_t *set)
3295 {
3296  xfree(set->fdset);
3297  set->fdset = NULL;
3298  set->capa = 0;
3299 }
3300 
3301 void
3302 rb_fd_set(int fd, rb_fdset_t *set)
3303 {
3304  unsigned int i;
3305  SOCKET s = rb_w32_get_osfhandle(fd);
3306 
3307  for (i = 0; i < set->fdset->fd_count; i++) {
3308  if (set->fdset->fd_array[i] == s) {
3309  return;
3310  }
3311  }
3312  if (set->fdset->fd_count >= (unsigned)set->capa) {
3313  set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
3314  set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
3315  }
3316  set->fdset->fd_array[set->fdset->fd_count++] = s;
3317 }
3318 
3319 #undef FD_ZERO
3320 #undef FD_SET
3321 #undef FD_CLR
3322 #undef FD_ISSET
3323 
3324 #define FD_ZERO(f) rb_fd_zero(f)
3325 #define FD_SET(i, f) rb_fd_set((i), (f))
3326 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3327 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3328 
3329 #else
3330 #define rb_fd_rcopy(d, s) (*(d) = *(s))
3331 #endif
3332 
3333 static int
3334 do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except,
3335  struct timeval *timeout)
3336 {
3338  int lerrno;
3339  rb_fdset_t UNINITIALIZED_VAR(orig_read);
3340  rb_fdset_t UNINITIALIZED_VAR(orig_write);
3341  rb_fdset_t UNINITIALIZED_VAR(orig_except);
3342  double limit = 0;
3343  struct timeval wait_rest;
3344  rb_thread_t *th = GET_THREAD();
3345 
3346  if (timeout) {
3347  limit = timeofday();
3348  limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
3349  wait_rest = *timeout;
3350  timeout = &wait_rest;
3351  }
3352 
3353  if (read)
3354  rb_fd_init_copy(&orig_read, read);
3355  if (write)
3356  rb_fd_init_copy(&orig_write, write);
3357  if (except)
3358  rb_fd_init_copy(&orig_except, except);
3359 
3360  retry:
3361  lerrno = 0;
3362 
3363  BLOCKING_REGION({
3364  result = native_fd_select(n, read, write, except, timeout, th);
3365  if (result < 0) lerrno = errno;
3366  }, ubf_select, th, FALSE);
3367 
3369 
3370  errno = lerrno;
3371 
3372  if (result < 0) {
3373  switch (errno) {
3374  case EINTR:
3375 #ifdef ERESTART
3376  case ERESTART:
3377 #endif
3378  if (read)
3379  rb_fd_dup(read, &orig_read);
3380  if (write)
3381  rb_fd_dup(write, &orig_write);
3382  if (except)
3383  rb_fd_dup(except, &orig_except);
3384 
3385  if (timeout) {
3386  double d = limit - timeofday();
3387 
3388  wait_rest.tv_sec = (time_t)d;
3389  wait_rest.tv_usec = (int)((d-(double)wait_rest.tv_sec)*1e6);
3390  if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
3391  if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
3392  }
3393 
3394  goto retry;
3395  default:
3396  break;
3397  }
3398  }
3399 
3400  if (read)
3401  rb_fd_term(&orig_read);
3402  if (write)
3403  rb_fd_term(&orig_write);
3404  if (except)
3405  rb_fd_term(&orig_except);
3406 
3407  return result;
3408 }
3409 
3410 static void
3411 rb_thread_wait_fd_rw(int fd, int read)
3412 {
3413  int result = 0;
3414  int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT;
3415 
3416  thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
3417 
3418  if (fd < 0) {
3419  rb_raise(rb_eIOError, "closed stream");
3420  }
3421 
3422  result = rb_wait_for_single_fd(fd, events, NULL);
3423  if (result < 0) {
3424  rb_sys_fail(0);
3425  }
3426 
3427  thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
3428 }
3429 
3430 void
3432 {
3433  rb_thread_wait_fd_rw(fd, 1);
3434 }
3435 
3436 int
3438 {
3439  rb_thread_wait_fd_rw(fd, 0);
3440  return TRUE;
3441 }
3442 
3443 int
3444 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
3445  struct timeval *timeout)
3446 {
3447  rb_fdset_t fdsets[3];
3448  rb_fdset_t *rfds = NULL;
3449  rb_fdset_t *wfds = NULL;
3450  rb_fdset_t *efds = NULL;
3451  int retval;
3452 
3453  if (read) {
3454  rfds = &fdsets[0];
3455  rb_fd_init(rfds);
3456  rb_fd_copy(rfds, read, max);
3457  }
3458  if (write) {
3459  wfds = &fdsets[1];
3460  rb_fd_init(wfds);
3461  rb_fd_copy(wfds, write, max);
3462  }
3463  if (except) {
3464  efds = &fdsets[2];
3465  rb_fd_init(efds);
3466  rb_fd_copy(efds, except, max);
3467  }
3468 
3469  retval = rb_thread_fd_select(max, rfds, wfds, efds, timeout);
3470 
3471  if (rfds) {
3472  rb_fd_rcopy(read, rfds);
3473  rb_fd_term(rfds);
3474  }
3475  if (wfds) {
3476  rb_fd_rcopy(write, wfds);
3477  rb_fd_term(wfds);
3478  }
3479  if (efds) {
3480  rb_fd_rcopy(except, efds);
3481  rb_fd_term(efds);
3482  }
3483 
3484  return retval;
3485 }
3486 
3487 int
3488 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
3489  struct timeval *timeout)
3490 {
3491  if (!read && !write && !except) {
3492  if (!timeout) {
3494  return 0;
3495  }
3496  rb_thread_wait_for(*timeout);
3497  return 0;
3498  }
3499 
3500  if (read) {
3501  rb_fd_resize(max - 1, read);
3502  }
3503  if (write) {
3504  rb_fd_resize(max - 1, write);
3505  }
3506  if (except) {
3507  rb_fd_resize(max - 1, except);
3508  }
3509  return do_select(max, read, write, except, timeout);
3510 }
3511 
3512 /*
3513  * poll() is supported by many OSes, but so far Linux is the only
3514  * one we know of that supports using poll() in all places select()
3515  * would work.
3516  */
3517 #if defined(HAVE_POLL) && defined(__linux__)
3518 # define USE_POLL
3519 #endif
3520 
3521 #ifdef USE_POLL
3522 
3523 /* The same with linux kernel. TODO: make platform independent definition. */
3524 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
3525 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
3526 #define POLLEX_SET (POLLPRI)
3527 
3528 #ifndef HAVE_PPOLL
3529 /* TODO: don't ignore sigmask */
3530 int
3531 ppoll(struct pollfd *fds, nfds_t nfds,
3532  const struct timespec *ts, const sigset_t *sigmask)
3533 {
3534  int timeout_ms;
3535 
3536  if (ts) {
3537  int tmp, tmp2;
3538 
3539  if (ts->tv_sec > TIMET_MAX/1000)
3540  timeout_ms = -1;
3541  else {
3542  tmp = ts->tv_sec * 1000;
3543  tmp2 = ts->tv_nsec / (1000 * 1000);
3544  if (TIMET_MAX - tmp < tmp2)
3545  timeout_ms = -1;
3546  else
3547  timeout_ms = tmp + tmp2;
3548  }
3549  }
3550  else
3551  timeout_ms = -1;
3552 
3553  return poll(fds, nfds, timeout_ms);
3554 }
3555 #endif
3556 
3557 /*
3558  * returns a mask of events
3559  */
3560 int
3561 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
3562 {
3563  struct pollfd fds;
3564  int result = 0, lerrno;
3565  double limit = 0;
3566  struct timespec ts;
3567  struct timespec *timeout = NULL;
3568  rb_thread_t *th = GET_THREAD();
3569 
3570  if (tv) {
3571  ts.tv_sec = tv->tv_sec;
3572  ts.tv_nsec = tv->tv_usec * 1000;
3573  limit = timeofday();
3574  limit += (double)tv->tv_sec + (double)tv->tv_usec * 1e-6;
3575  timeout = &ts;
3576  }
3577 
3578  fds.fd = fd;
3579  fds.events = (short)events;
3580 
3581 retry:
3582  lerrno = 0;
3583  BLOCKING_REGION({
3584  result = ppoll(&fds, 1, timeout, NULL);
3585  if (result < 0) lerrno = errno;
3586  }, ubf_select, th, FALSE);
3587 
3589 
3590  if (result < 0) {
3591  errno = lerrno;
3592  switch (errno) {
3593  case EINTR:
3594 #ifdef ERESTART
3595  case ERESTART:
3596 #endif
3597  if (timeout) {
3598  double d = limit - timeofday();
3599 
3600  ts.tv_sec = (long)d;
3601  ts.tv_nsec = (long)((d - (double)ts.tv_sec) * 1e9);
3602  if (ts.tv_sec < 0)
3603  ts.tv_sec = 0;
3604  if (ts.tv_nsec < 0)
3605  ts.tv_nsec = 0;
3606  }
3607  goto retry;
3608  }
3609  return -1;
3610  }
3611 
3612  if (fds.revents & POLLNVAL) {
3613  errno = EBADF;
3614  return -1;
3615  }
3616 
3617  /*
3618  * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
3619  * Therefore we need fix it up.
3620  */
3621  result = 0;
3622  if (fds.revents & POLLIN_SET)
3623  result |= RB_WAITFD_IN;
3624  if (fds.revents & POLLOUT_SET)
3625  result |= RB_WAITFD_OUT;
3626  if (fds.revents & POLLEX_SET)
3627  result |= RB_WAITFD_PRI;
3628 
3629  return result;
3630 }
3631 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
3632 static rb_fdset_t *
3634 {
3635  rb_fd_init(fds);
3636  rb_fd_set(fd, fds);
3637 
3638  return fds;
3639 }
3640 
3641 struct select_args {
3642  union {
3643  int fd;
3644  int error;
3645  } as;
3649  struct timeval *tv;
3650 };
3651 
3652 static VALUE
3654 {
3655  struct select_args *args = (struct select_args *)ptr;
3656  int r;
3657 
3658  r = rb_thread_fd_select(args->as.fd + 1,
3659  args->read, args->write, args->except, args->tv);
3660  if (r == -1)
3661  args->as.error = errno;
3662  if (r > 0) {
3663  r = 0;
3664  if (args->read && rb_fd_isset(args->as.fd, args->read))
3665  r |= RB_WAITFD_IN;
3666  if (args->write && rb_fd_isset(args->as.fd, args->write))
3667  r |= RB_WAITFD_OUT;
3668  if (args->except && rb_fd_isset(args->as.fd, args->except))
3669  r |= RB_WAITFD_PRI;
3670  }
3671  return (VALUE)r;
3672 }
3673 
3674 static VALUE
3676 {
3677  struct select_args *args = (struct select_args *)ptr;
3678 
3679  if (args->read) rb_fd_term(args->read);
3680  if (args->write) rb_fd_term(args->write);
3681  if (args->except) rb_fd_term(args->except);
3682 
3683  return (VALUE)-1;
3684 }
3685 
3686 int
3687 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
3688 {
3689  rb_fdset_t rfds, wfds, efds;
3690  struct select_args args;
3691  int r;
3692  VALUE ptr = (VALUE)&args;
3693 
3694  args.as.fd = fd;
3695  args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
3696  args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
3697  args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
3698  args.tv = tv;
3699 
3700  r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
3701  if (r == -1)
3702  errno = args.as.error;
3703 
3704  return r;
3705 }
3706 #endif /* ! USE_POLL */
3707 
3708 /*
3709  * for GC
3710  */
3711 
3712 #ifdef USE_CONSERVATIVE_STACK_END
3713 void
3715 {
3716  VALUE stack_end;
3717  *stack_end_p = &stack_end;
3718 }
3719 #endif
3720 
3721 
3722 /*
3723  *
3724  */
3725 
3726 void
3728 {
3729  /* mth must be main_thread */
3730  if (rb_signal_buff_size() > 0) {
3731  /* wakeup main thread */
3733  }
3734 }
3735 
3736 static void
3738 {
3739  rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
3740 
3741  /*
3742  * Tricky: thread_destruct_lock doesn't close a race against
3743  * vm->running_thread switch. however it guarantee th->running_thread
3744  * point to valid pointer or NULL.
3745  */
3746  native_mutex_lock(&vm->thread_destruct_lock);
3747  /* for time slice */
3748  if (vm->running_thread)
3750  native_mutex_unlock(&vm->thread_destruct_lock);
3751 
3752  /* check signal */
3754 
3755 #if 0
3756  /* prove profiler */
3757  if (vm->prove_profile.enable) {
3758  rb_thread_t *th = vm->running_thread;
3759 
3760  if (vm->during_gc) {
3761  /* GC prove profiling */
3762  }
3763  }
3764 #endif
3765 }
3766 
3767 void
3769 {
3770  if (timer_thread_id && native_stop_timer_thread(close_anyway)) {
3771  native_reset_timer_thread();
3772  }
3773 }
3774 
3775 void
3777 {
3778  native_reset_timer_thread();
3779 }
3780 
3781 void
3783 {
3784  system_working = 1;
3785  rb_thread_create_timer_thread();
3786 }
3787 
3788 static int
3790 {
3791  int i;
3792  VALUE lines = (VALUE)val;
3793 
3794  for (i = 0; i < RARRAY_LEN(lines); i++) {
3795  if (RARRAY_PTR(lines)[i] != Qnil) {
3796  RARRAY_PTR(lines)[i] = INT2FIX(0);
3797  }
3798  }
3799  return ST_CONTINUE;
3800 }
3801 
3802 static void
3804 {
3805  VALUE coverages = rb_get_coverages();
3806  if (RTEST(coverages)) {
3807  st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
3808  }
3809 }
3810 
3811 static void
3813 {
3814  rb_thread_t *th = GET_THREAD();
3815  rb_vm_t *vm = th->vm;
3816  VALUE thval = th->self;
3817  vm->main_thread = th;
3818 
3819  gvl_atfork(th->vm);
3820  st_foreach(vm->living_threads, atfork, (st_data_t)th);
3821  st_clear(vm->living_threads);
3822  st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
3823  vm->sleeper = 0;
3824  clear_coverage();
3825 }
3826 
3827 static int
3829 {
3830  VALUE thval = key;
3831  rb_thread_t *th;
3832  GetThreadPtr(thval, th);
3833 
3834  if (th != (rb_thread_t *)current_th) {
3835  if (th->keeping_mutexes) {
3837  }
3838  th->keeping_mutexes = NULL;
3840  }
3841  return ST_CONTINUE;
3842 }
3843 
3844 void
3846 {
3848  GET_THREAD()->join_list = NULL;
3849 
3850  /* We don't want reproduce CVE-2003-0900. */
3852 }
3853 
3854 static int
3856 {
3857  VALUE thval = key;
3858  rb_thread_t *th;
3859  GetThreadPtr(thval, th);
3860 
3861  if (th != (rb_thread_t *)current_th) {
3863  }
3864  return ST_CONTINUE;
3865 }
3866 
3867 void
3869 {
3871 }
3872 
3873 struct thgroup {
3876 };
3877 
3878 static size_t
3879 thgroup_memsize(const void *ptr)
3880 {
3881  return ptr ? sizeof(struct thgroup) : 0;
3882 }
3883 
3885  "thgroup",
3887 };
3888 
3889 /*
3890  * Document-class: ThreadGroup
3891  *
3892  * <code>ThreadGroup</code> provides a means of keeping track of a number of
3893  * threads as a group. A <code>Thread</code> can belong to only one
3894  * <code>ThreadGroup</code> at a time; adding a thread to a new group will
3895  * remove it from any previous group.
3896  *
3897  * Newly created threads belong to the same group as the thread from which they
3898  * were created.
3899  */
3900 
3901 /*
3902  * Document-const: Default
3903  *
3904  * The default ThreadGroup created when Ruby starts; all Threads belong to it
3905  * by default.
3906  */
3907 static VALUE
3909 {
3910  VALUE group;
3911  struct thgroup *data;
3912 
3913  group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
3914  data->enclosed = 0;
3915  data->group = group;
3916 
3917  return group;
3918 }
3919 
3923 };
3924 
3925 static int
3927 {
3928  VALUE thread = (VALUE)key;
3929  VALUE ary = ((struct thgroup_list_params *)data)->ary;
3930  VALUE group = ((struct thgroup_list_params *)data)->group;
3931  rb_thread_t *th;
3932  GetThreadPtr(thread, th);
3933 
3934  if (th->thgroup == group) {
3935  rb_ary_push(ary, thread);
3936  }
3937  return ST_CONTINUE;
3938 }
3939 
3940 /*
3941  * call-seq:
3942  * thgrp.list -> array
3943  *
3944  * Returns an array of all existing <code>Thread</code> objects that belong to
3945  * this group.
3946  *
3947  * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
3948  */
3949 
3950 static VALUE
3952 {
3953  VALUE ary = rb_ary_new();
3954  struct thgroup_list_params param;
3955 
3956  param.ary = ary;
3957  param.group = group;
3958  st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
3959  return ary;
3960 }
3961 
3962 
3963 /*
3964  * call-seq:
3965  * thgrp.enclose -> thgrp
3966  *
3967  * Prevents threads from being added to or removed from the receiving
3968  * <code>ThreadGroup</code>. New threads can still be started in an enclosed
3969  * <code>ThreadGroup</code>.
3970  *
3971  * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
3972  * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
3973  * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
3974  * tg.add thr
3975  *
3976  * <em>produces:</em>
3977  *
3978  * ThreadError: can't move from the enclosed thread group
3979  */
3980 
3981 static VALUE
3983 {
3984  struct thgroup *data;
3985 
3986  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
3987  data->enclosed = 1;
3988 
3989  return group;
3990 }
3991 
3992 
3993 /*
3994  * call-seq:
3995  * thgrp.enclosed? -> true or false
3996  *
3997  * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
3998  * ThreadGroup#enclose.
3999  */
4000 
4001 static VALUE
4003 {
4004  struct thgroup *data;
4005 
4006  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4007  if (data->enclosed)
4008  return Qtrue;
4009  return Qfalse;
4010 }
4011 
4012 
4013 /*
4014  * call-seq:
4015  * thgrp.add(thread) -> thgrp
4016  *
4017  * Adds the given <em>thread</em> to this group, removing it from any other
4018  * group to which it may have previously belonged.
4019  *
4020  * puts "Initial group is #{ThreadGroup::Default.list}"
4021  * tg = ThreadGroup.new
4022  * t1 = Thread.new { sleep }
4023  * t2 = Thread.new { sleep }
4024  * puts "t1 is #{t1}"
4025  * puts "t2 is #{t2}"
4026  * tg.add(t1)
4027  * puts "Initial group now #{ThreadGroup::Default.list}"
4028  * puts "tg group now #{tg.list}"
4029  *
4030  * <em>produces:</em>
4031  *
4032  * Initial group is #<Thread:0x401bdf4c>
4033  * t1 is #<Thread:0x401b3c90>
4034  * t2 is #<Thread:0x401b3c18>
4035  * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4036  * tg group now #<Thread:0x401b3c90>
4037  */
4038 
4039 static VALUE
4041 {
4042  rb_thread_t *th;
4043  struct thgroup *data;
4044 
4045  rb_secure(4);
4046  GetThreadPtr(thread, th);
4047 
4048  if (OBJ_FROZEN(group)) {
4049  rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4050  }
4051  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4052  if (data->enclosed) {
4053  rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4054  }
4055 
4056  if (!th->thgroup) {
4057  return Qnil;
4058  }
4059 
4060  if (OBJ_FROZEN(th->thgroup)) {
4061  rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4062  }
4064  if (data->enclosed) {
4066  "can't move from the enclosed thread group");
4067  }
4068 
4069  th->thgroup = group;
4070  return group;
4071 }
4072 
4073 
4074 /*
4075  * Document-class: Mutex
4076  *
4077  * Mutex implements a simple semaphore that can be used to coordinate access to
4078  * shared data from multiple concurrent threads.
4079  *
4080  * Example:
4081  *
4082  * require 'thread'
4083  * semaphore = Mutex.new
4084  *
4085  * a = Thread.new {
4086  * semaphore.synchronize {
4087  * # access shared resource
4088  * }
4089  * }
4090  *
4091  * b = Thread.new {
4092  * semaphore.synchronize {
4093  * # access shared resource
4094  * }
4095  * }
4096  *
4097  */
4098 
4099 #define GetMutexPtr(obj, tobj) \
4100  TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
4101 
4102 static const char *rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
4103 
4104 #define mutex_mark NULL
4105 
4106 static void
4107 mutex_free(void *ptr)
4108 {
4109  if (ptr) {
4110  rb_mutex_t *mutex = ptr;
4111  if (mutex->th) {
4112  /* rb_warn("free locked mutex"); */
4113  const char *err = rb_mutex_unlock_th(mutex, mutex->th);
4114  if (err) rb_bug("%s", err);
4115  }
4116  native_mutex_destroy(&mutex->lock);
4117  native_cond_destroy(&mutex->cond);
4118  }
4119  ruby_xfree(ptr);
4120 }
4121 
4122 static size_t
4123 mutex_memsize(const void *ptr)
4124 {
4125  return ptr ? sizeof(rb_mutex_t) : 0;
4126 }
4127 
4129  "mutex",
4131 };
4132 
4133 VALUE
4135 {
4136  if (rb_typeddata_is_kind_of(obj, &mutex_data_type)) {
4137  return Qtrue;
4138  }
4139  else {
4140  return Qfalse;
4141  }
4142 }
4143 
4144 static VALUE
4146 {
4147  VALUE volatile obj;
4148  rb_mutex_t *mutex;
4149 
4150  obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
4151  native_mutex_initialize(&mutex->lock);
4152  native_cond_initialize(&mutex->cond, RB_CONDATTR_CLOCK_MONOTONIC);
4153  return obj;
4154 }
4155 
4156 /*
4157  * call-seq:
4158  * Mutex.new -> mutex
4159  *
4160  * Creates a new Mutex
4161  */
4162 static VALUE
4164 {
4165  return self;
4166 }
4167 
4168 VALUE
4170 {
4171  return mutex_alloc(rb_cMutex);
4172 }
4173 
4174 /*
4175  * call-seq:
4176  * mutex.locked? -> true or false
4177  *
4178  * Returns +true+ if this lock is currently held by some thread.
4179  */
4180 VALUE
4182 {
4183  rb_mutex_t *mutex;
4184  GetMutexPtr(self, mutex);
4185  return mutex->th ? Qtrue : Qfalse;
4186 }
4187 
4188 static void
4190 {
4191  rb_mutex_t *mutex;
4192  GetMutexPtr(self, mutex);
4193 
4194  if (th->keeping_mutexes) {
4195  mutex->next_mutex = th->keeping_mutexes;
4196  }
4197  th->keeping_mutexes = mutex;
4198 }
4199 
4200 /*
4201  * call-seq:
4202  * mutex.try_lock -> true or false
4203  *
4204  * Attempts to obtain the lock and returns immediately. Returns +true+ if the
4205  * lock was granted.
4206  */
4207 VALUE
4209 {
4210  rb_mutex_t *mutex;
4211  VALUE locked = Qfalse;
4212  GetMutexPtr(self, mutex);
4213 
4214  native_mutex_lock(&mutex->lock);
4215  if (mutex->th == 0) {
4216  mutex->th = GET_THREAD();
4217  locked = Qtrue;
4218 
4219  mutex_locked(GET_THREAD(), self);
4220  }
4221  native_mutex_unlock(&mutex->lock);
4222 
4223  return locked;
4224 }
4225 
4226 static int
4227 lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
4228 {
4229  int interrupted = 0;
4230  int err = 0;
4231 
4232  mutex->cond_waiting++;
4233  for (;;) {
4234  if (!mutex->th) {
4235  mutex->th = th;
4236  break;
4237  }
4238  if (RUBY_VM_INTERRUPTED(th)) {
4239  interrupted = 1;
4240  break;
4241  }
4242  if (err == ETIMEDOUT) {
4243  interrupted = 2;
4244  break;
4245  }
4246 
4247  if (timeout_ms) {
4248  struct timespec timeout_rel;
4249  struct timespec timeout;
4250 
4251  timeout_rel.tv_sec = 0;
4252  timeout_rel.tv_nsec = timeout_ms * 1000 * 1000;
4253  timeout = native_cond_timeout(&mutex->cond, timeout_rel);
4254  err = native_cond_timedwait(&mutex->cond, &mutex->lock, &timeout);
4255  }
4256  else {
4257  native_cond_wait(&mutex->cond, &mutex->lock);
4258  err = 0;
4259  }
4260  }
4261  mutex->cond_waiting--;
4262 
4263  return interrupted;
4264 }
4265 
4266 static void
4267 lock_interrupt(void *ptr)
4268 {
4269  rb_mutex_t *mutex = (rb_mutex_t *)ptr;
4270  native_mutex_lock(&mutex->lock);
4271  if (mutex->cond_waiting > 0)
4272  native_cond_broadcast(&mutex->cond);
4273  native_mutex_unlock(&mutex->lock);
4274 }
4275 
4276 /*
4277  * At maximum, only one thread can use cond_timedwait and watch deadlock
4278  * periodically. Multiple polling thread (i.e. concurrent deadlock check)
4279  * introduces new race conditions. [Bug #6278] [ruby-core:44275]
4280  */
4282 
4283 /*
4284  * call-seq:
4285  * mutex.lock -> self
4286  *
4287  * Attempts to grab the lock and waits if it isn't available.
4288  * Raises +ThreadError+ if +mutex+ was locked by the current thread.
4289  */
4290 VALUE
4292 {
4293  rb_thread_t *th = GET_THREAD();
4294  rb_mutex_t *mutex;
4295  GetMutexPtr(self, mutex);
4296 
4297  /* When running trap handler */
4298  if (!mutex->allow_trap && th->interrupt_mask & TRAP_INTERRUPT_MASK) {
4299  rb_raise(rb_eThreadError, "can't be called from trap context");
4300  }
4301 
4302  if (rb_mutex_trylock(self) == Qfalse) {
4303  if (mutex->th == GET_THREAD()) {
4304  rb_raise(rb_eThreadError, "deadlock; recursive locking");
4305  }
4306 
4307  while (mutex->th != th) {
4308  int interrupted;
4309  enum rb_thread_status prev_status = th->status;
4310  volatile int timeout_ms = 0;
4311  struct rb_unblock_callback oldubf;
4312 
4313  set_unblock_function(th, lock_interrupt, mutex, &oldubf, FALSE);
4315  th->locking_mutex = self;
4316 
4317  native_mutex_lock(&mutex->lock);
4318  th->vm->sleeper++;
4319  /*
4320  * Carefully! while some contended threads are in lock_func(),
4321  * vm->sleepr is unstable value. we have to avoid both deadlock
4322  * and busy loop.
4323  */
4324  if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
4325  !patrol_thread) {
4326  timeout_ms = 100;
4327  patrol_thread = th;
4328  }
4329 
4330  GVL_UNLOCK_BEGIN();
4331  interrupted = lock_func(th, mutex, (int)timeout_ms);
4332  native_mutex_unlock(&mutex->lock);
4333  GVL_UNLOCK_END();
4334 
4335  if (patrol_thread == th)
4336  patrol_thread = NULL;
4337 
4338  reset_unblock_function(th, &oldubf);
4339 
4340  th->locking_mutex = Qfalse;
4341  if (mutex->th && interrupted == 2) {
4342  rb_check_deadlock(th->vm);
4343  }
4344  if (th->status == THREAD_STOPPED_FOREVER) {
4345  th->status = prev_status;
4346  }
4347  th->vm->sleeper--;
4348 
4349  if (mutex->th == th) mutex_locked(th, self);
4350 
4351  if (interrupted) {
4353  }
4354  }
4355  }
4356  return self;
4357 }
4358 
4359 /*
4360  * call-seq:
4361  * mutex.owned? -> true or false
4362  *
4363  * Returns +true+ if this lock is currently held by current thread.
4364  * <em>This API is experimental, and subject to change.</em>
4365  */
4366 VALUE
4368 {
4369  VALUE owned = Qfalse;
4370  rb_thread_t *th = GET_THREAD();
4371  rb_mutex_t *mutex;
4372 
4373  GetMutexPtr(self, mutex);
4374 
4375  if (mutex->th == th)
4376  owned = Qtrue;
4377 
4378  return owned;
4379 }
4380 
4381 static const char *
4383 {
4384  const char *err = NULL;
4385 
4386  native_mutex_lock(&mutex->lock);
4387 
4388  if (mutex->th == 0) {
4389  err = "Attempt to unlock a mutex which is not locked";
4390  }
4391  else if (mutex->th != th) {
4392  err = "Attempt to unlock a mutex which is locked by another thread";
4393  }
4394  else {
4395  mutex->th = 0;
4396  if (mutex->cond_waiting > 0)
4397  native_cond_signal(&mutex->cond);
4398  }
4399 
4400  native_mutex_unlock(&mutex->lock);
4401 
4402  if (!err) {
4403  rb_mutex_t *volatile *th_mutex = &th->keeping_mutexes;
4404  while (*th_mutex != mutex) {
4405  th_mutex = &(*th_mutex)->next_mutex;
4406  }
4407  *th_mutex = mutex->next_mutex;
4408  mutex->next_mutex = NULL;
4409  }
4410 
4411  return err;
4412 }
4413 
4414 /*
4415  * call-seq:
4416  * mutex.unlock -> self
4417  *
4418  * Releases the lock.
4419  * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
4420  */
4421 VALUE
4423 {
4424  const char *err;
4425  rb_mutex_t *mutex;
4426  GetMutexPtr(self, mutex);
4427 
4428  err = rb_mutex_unlock_th(mutex, GET_THREAD());
4429  if (err) rb_raise(rb_eThreadError, "%s", err);
4430 
4431  return self;
4432 }
4433 
4434 static void
4436 {
4437  rb_mutex_t *mutex;
4438 
4439  while (mutexes) {
4440  mutex = mutexes;
4441  mutexes = mutex->next_mutex;
4442  mutex->th = 0;
4443  mutex->next_mutex = 0;
4444  }
4445 }
4446 
4447 static VALUE
4449 {
4450  sleep_forever(GET_THREAD(), 1, 0); /* permit spurious check */
4451  return Qnil;
4452 }
4453 
4454 static VALUE
4456 {
4457  struct timeval *t = (struct timeval *)time;
4458  sleep_timeval(GET_THREAD(), *t, 0); /* permit spurious check */
4459  return Qnil;
4460 }
4461 
4462 VALUE
4464 {
4465  time_t beg, end;
4466  struct timeval t;
4467 
4468  if (!NIL_P(timeout)) {
4469  t = rb_time_interval(timeout);
4470  }
4471  rb_mutex_unlock(self);
4472  beg = time(0);
4473  if (NIL_P(timeout)) {
4475  }
4476  else {
4478  }
4479  end = time(0) - beg;
4480  return INT2FIX(end);
4481 }
4482 
4483 /*
4484  * call-seq:
4485  * mutex.sleep(timeout = nil) -> number
4486  *
4487  * Releases the lock and sleeps +timeout+ seconds if it is given and
4488  * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
4489  * the current thread.
4490  *
4491  * Note that this method can wakeup without explicit Thread#wakeup call.
4492  * For example, receiving signal and so on.
4493  */
4494 static VALUE
4496 {
4497  VALUE timeout;
4498 
4499  rb_scan_args(argc, argv, "01", &timeout);
4500  return rb_mutex_sleep(self, timeout);
4501 }
4502 
4503 /*
4504  * call-seq:
4505  * mutex.synchronize { ... } -> result of the block
4506  *
4507  * Obtains a lock, runs the block, and releases the lock when the block
4508  * completes. See the example under +Mutex+.
4509  */
4510 
4511 VALUE
4513 {
4514  rb_mutex_lock(mutex);
4515  return rb_ensure(func, arg, rb_mutex_unlock, mutex);
4516 }
4517 
4518 /*
4519  * call-seq:
4520  * mutex.synchronize { ... } -> result of the block
4521  *
4522  * Obtains a lock, runs the block, and releases the lock when the block
4523  * completes. See the example under +Mutex+.
4524  */
4525 static VALUE
4527 {
4528  if (!rb_block_given_p()) {
4529  rb_raise(rb_eThreadError, "must be called with a block");
4530  }
4531 
4532  return rb_mutex_synchronize(self, rb_yield, Qundef);
4533 }
4534 
4535 void rb_mutex_allow_trap(VALUE self, int val)
4536 {
4537  rb_mutex_t *m;
4538  GetMutexPtr(self, m);
4539 
4540  m->allow_trap = val;
4541 }
4542 
4543 /*
4544  * Document-class: ThreadShield
4545  */
4546 static void
4548 {
4549  rb_gc_mark((VALUE)ptr);
4550 }
4551 
4553  "thread_shield",
4554  {thread_shield_mark, 0, 0,},
4555 };
4556 
4557 static VALUE
4559 {
4560  return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4561 }
4562 
4563 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4564 #define THREAD_SHIELD_WAITING_MASK (FL_USER0|FL_USER1|FL_USER2|FL_USER3|FL_USER4|FL_USER5|FL_USER6|FL_USER7|FL_USER8|FL_USER9|FL_USER10|FL_USER11|FL_USER12|FL_USER13|FL_USER14|FL_USER15|FL_USER16|FL_USER17|FL_USER18|FL_USER19)
4565 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4566 #define rb_thread_shield_waiting(b) (int)((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT)
4567 
4568 static inline void
4570 {
4571  unsigned int w = rb_thread_shield_waiting(b);
4572  w++;
4574  rb_raise(rb_eRuntimeError, "waiting count overflow");
4575  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4576  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4577 }
4578 
4579 static inline void
4581 {
4582  unsigned int w = rb_thread_shield_waiting(b);
4583  if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4584  w--;
4585  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4586  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4587 }
4588 
4589 VALUE
4591 {
4592  VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4593  rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4594  return thread_shield;
4595 }
4596 
4597 /*
4598  * Wait a thread shield.
4599  *
4600  * Returns
4601  * true: acquired the thread shield
4602  * false: the thread shield was destroyed and no other threads waiting
4603  * nil: the thread shield was destroyed but still in use
4604  */
4605 VALUE
4607 {
4608  VALUE mutex = GetThreadShieldPtr(self);
4609  rb_mutex_t *m;
4610 
4611  if (!mutex) return Qfalse;
4612  GetMutexPtr(mutex, m);
4613  if (m->th == GET_THREAD()) return Qnil;
4615  rb_mutex_lock(mutex);
4617  if (DATA_PTR(self)) return Qtrue;
4618  rb_mutex_unlock(mutex);
4619  return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
4620 }
4621 
4622 /*
4623  * Release a thread shield, and return true if it has waiting threads.
4624  */
4625 VALUE
4627 {
4628  VALUE mutex = GetThreadShieldPtr(self);
4629  rb_mutex_unlock(mutex);
4630  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4631 }
4632 
4633 /*
4634  * Release and destroy a thread shield, and return true if it has waiting threads.
4635  */
4636 VALUE
4638 {
4639  VALUE mutex = GetThreadShieldPtr(self);
4640  DATA_PTR(self) = 0;
4641  rb_mutex_unlock(mutex);
4642  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4643 }
4644 
4645 /* variables for recursive traversals */
4647 
4648 /*
4649  * Returns the current "recursive list" used to detect recursion.
4650  * This list is a hash table, unique for the current thread and for
4651  * the current __callee__.
4652  */
4653 
4654 static VALUE
4656 {
4657  volatile VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
4659  VALUE list;
4660  if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
4661  hash = rb_hash_new();
4662  OBJ_UNTRUST(hash);
4663  rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
4664  list = Qnil;
4665  }
4666  else {
4667  list = rb_hash_aref(hash, sym);
4668  }
4669  if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
4670  list = rb_hash_new();
4671  OBJ_UNTRUST(list);
4672  rb_hash_aset(hash, sym, list);
4673  }
4674  return list;
4675 }
4676 
4677 /*
4678  * Returns Qtrue iff obj_id (or the pair <obj, paired_obj>) is already
4679  * in the recursion list.
4680  * Assumes the recursion list is valid.
4681  */
4682 
4683 static VALUE
4684 recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
4685 {
4686 #if SIZEOF_LONG == SIZEOF_VOIDP
4687  #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
4688 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4689  #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
4690  rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
4691 #endif
4692 
4693  VALUE pair_list = rb_hash_lookup2(list, obj_id, Qundef);
4694  if (pair_list == Qundef)
4695  return Qfalse;
4696  if (paired_obj_id) {
4697  if (!RB_TYPE_P(pair_list, T_HASH)) {
4698  if (!OBJ_ID_EQL(paired_obj_id, pair_list))
4699  return Qfalse;
4700  }
4701  else {
4702  if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
4703  return Qfalse;
4704  }
4705  }
4706  return Qtrue;
4707 }
4708 
4709 /*
4710  * Pushes obj_id (or the pair <obj_id, paired_obj_id>) in the recursion list.
4711  * For a single obj_id, it sets list[obj_id] to Qtrue.
4712  * For a pair, it sets list[obj_id] to paired_obj_id if possible,
4713  * otherwise list[obj_id] becomes a hash like:
4714  * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
4715  * Assumes the recursion list is valid.
4716  */
4717 
4718 static void
4720 {
4721  VALUE pair_list;
4722 
4723  if (!paired_obj) {
4724  rb_hash_aset(list, obj, Qtrue);
4725  }
4726  else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
4727  rb_hash_aset(list, obj, paired_obj);
4728  }
4729  else {
4730  if (!RB_TYPE_P(pair_list, T_HASH)){
4731  VALUE other_paired_obj = pair_list;
4732  pair_list = rb_hash_new();
4733  OBJ_UNTRUST(pair_list);
4734  rb_hash_aset(pair_list, other_paired_obj, Qtrue);
4735  rb_hash_aset(list, obj, pair_list);
4736  }
4737  rb_hash_aset(pair_list, paired_obj, Qtrue);
4738  }
4739 }
4740 
4741 /*
4742  * Pops obj_id (or the pair <obj_id, paired_obj_id>) from the recursion list.
4743  * For a pair, if list[obj_id] is a hash, then paired_obj_id is
4744  * removed from the hash and no attempt is made to simplify
4745  * list[obj_id] from {only_one_paired_id => true} to only_one_paired_id
4746  * Assumes the recursion list is valid.
4747  */
4748 
4749 static void
4751 {
4752  if (paired_obj) {
4753  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
4754  if (pair_list == Qundef) {
4755  VALUE symname = rb_inspect(ID2SYM(rb_frame_this_func()));
4756  VALUE thrname = rb_inspect(rb_thread_current());
4757  rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list for %s in %s",
4758  StringValuePtr(symname), StringValuePtr(thrname));
4759  }
4760  if (RB_TYPE_P(pair_list, T_HASH)) {
4761  rb_hash_delete(pair_list, paired_obj);
4762  if (!RHASH_EMPTY_P(pair_list)) {
4763  return; /* keep hash until is empty */
4764  }
4765  }
4766  }
4767  rb_hash_delete(list, obj);
4768 }
4769 
4771  VALUE (*func) (VALUE, VALUE, int);
4772  VALUE list;
4773  VALUE obj;
4774  VALUE objid;
4775  VALUE pairid;
4776  VALUE arg;
4777 };
4778 
4779 static VALUE
4781 {
4782  VALUE result = Qundef;
4783  int state;
4784 
4785  recursive_push(p->list, p->objid, p->pairid);
4786  PUSH_TAG();
4787  if ((state = EXEC_TAG()) == 0) {
4788  result = (*p->func)(p->obj, p->arg, FALSE);
4789  }
4790  POP_TAG();
4791  recursive_pop(p->list, p->objid, p->pairid);
4792  if (state)
4793  JUMP_TAG(state);
4794  return result;
4795 }
4796 
4797 /*
4798  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4799  * current method is called recursively on obj, or on the pair <obj, pairid>
4800  * If outer is 0, then the innermost func will be called with recursive set
4801  * to Qtrue, otherwise the outermost func will be called. In the latter case,
4802  * all inner func are short-circuited by throw.
4803  * Implementation details: the value thrown is the recursive list which is
4804  * proper to the current method and unlikely to be catched anywhere else.
4805  * list[recursive_key] is used as a flag for the outermost call.
4806  */
4807 
4808 static VALUE
4809 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
4810 {
4811  VALUE result = Qundef;
4812  struct exec_recursive_params p;
4813  int outermost;
4815  p.objid = rb_obj_id(obj);
4816  p.obj = obj;
4817  p.pairid = pairid;
4818  p.arg = arg;
4819  outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
4820 
4821  if (recursive_check(p.list, p.objid, pairid)) {
4822  if (outer && !outermost) {
4823  rb_throw_obj(p.list, p.list);
4824  }
4825  return (*func)(obj, arg, TRUE);
4826  }
4827  else {
4828  p.func = func;
4829 
4830  if (outermost) {
4831  recursive_push(p.list, ID2SYM(recursive_key), 0);
4832  result = rb_catch_obj(p.list, exec_recursive_i, (VALUE)&p);
4833  recursive_pop(p.list, ID2SYM(recursive_key), 0);
4834  if (result == p.list) {
4835  result = (*func)(obj, arg, TRUE);
4836  }
4837  }
4838  else {
4839  result = exec_recursive_i(0, &p);
4840  }
4841  }
4842  *(volatile struct exec_recursive_params *)&p;
4843  return result;
4844 }
4845 
4846 /*
4847  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4848  * current method is called recursively on obj
4849  */
4850 
4851 VALUE
4853 {
4854  return exec_recursive(func, obj, 0, arg, 0);
4855 }
4856 
4857 /*
4858  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4859  * current method is called recursively on the ordered pair <obj, paired_obj>
4860  */
4861 
4862 VALUE
4864 {
4865  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 0);
4866 }
4867 
4868 /*
4869  * If recursion is detected on the current method and obj, the outermost
4870  * func will be called with (obj, arg, Qtrue). All inner func will be
4871  * short-circuited using throw.
4872  */
4873 
4874 VALUE
4876 {
4877  return exec_recursive(func, obj, 0, arg, 1);
4878 }
4879 
4880 /*
4881  * If recursion is detected on the current method, obj and paired_obj,
4882  * the outermost func will be called with (obj, arg, Qtrue). All inner
4883  * func will be short-circuited using throw.
4884  */
4885 
4886 VALUE
4888 {
4889  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 1);
4890 }
4891 
4892 /*
4893  * call-seq:
4894  * thr.backtrace -> array
4895  *
4896  * Returns the current backtrace of the target thread.
4897  *
4898  */
4899 
4900 static VALUE
4902 {
4903  return vm_thread_backtrace(argc, argv, thval);
4904 }
4905 
4906 /* call-seq:
4907  * thr.backtrace_locations(*args) -> array or nil
4908  *
4909  * Returns the execution stack for the target thread---an array containing
4910  * backtrace location objects.
4911  *
4912  * See Thread::Backtrace::Location for more information.
4913  *
4914  * This method behaves similarly to Kernel#caller_locations except it applies
4915  * to a specific thread.
4916  */
4917 static VALUE
4919 {
4920  return vm_thread_backtrace_locations(argc, argv, thval);
4921 }
4922 
4923 /*
4924  * Document-class: ThreadError
4925  *
4926  * Raised when an invalid operation is attempted on a thread.
4927  *
4928  * For example, when no other thread has been started:
4929  *
4930  * Thread.stop
4931  *
4932  * <em>raises the exception:</em>
4933  *
4934  * ThreadError: stopping only thread
4935  */
4936 
4937 /*
4938  * +Thread+ encapsulates the behavior of a thread of
4939  * execution, including the main thread of the Ruby script.
4940  *
4941  * In the descriptions of the methods in this class, the parameter _sym_
4942  * refers to a symbol, which is either a quoted string or a
4943  * +Symbol+ (such as <code>:name</code>).
4944  */
4945 
4946 void
4948 {
4949 #undef rb_intern
4950 #define rb_intern(str) rb_intern_const(str)
4951 
4952  VALUE cThGroup;
4953  rb_thread_t *th = GET_THREAD();
4954 
4955  sym_never = ID2SYM(rb_intern("never"));
4956  sym_immediate = ID2SYM(rb_intern("immediate"));
4957  sym_on_blocking = ID2SYM(rb_intern("on_blocking"));
4958 
4969  rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
4971 #if THREAD_DEBUG < 0
4972  rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
4973  rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
4974 #endif
4977  rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
4978 
4979  rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
4984  rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
4995  rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
4996  rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
4997  rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
4998  rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5001  rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5002  rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5006  rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5007 
5009 
5010  closed_stream_error = rb_exc_new2(rb_eIOError, "stream closed");
5013 
5014  cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5016  rb_define_method(cThGroup, "list", thgroup_list, 0);
5017  rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5018  rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5019  rb_define_method(cThGroup, "add", thgroup_add, 1);
5020 
5021  {
5022  th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
5023  rb_define_const(cThGroup, "Default", th->thgroup);
5024  }
5025 
5026  rb_cMutex = rb_define_class("Mutex", rb_cObject);
5028  rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
5030  rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
5033  rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
5036 
5037  recursive_key = rb_intern("__recursive_key__");
5039 
5040  /* init thread core */
5041  {
5042  /* main thread setting */
5043  {
5044  /* acquire global vm lock */
5045  gvl_init(th->vm);
5046  gvl_acquire(th->vm, th);
5047  native_mutex_initialize(&th->vm->thread_destruct_lock);
5048  native_mutex_initialize(&th->interrupt_lock);
5049 
5053 
5054  th->interrupt_mask = 0;
5055  }
5056  }
5057 
5058  rb_thread_create_timer_thread();
5059 
5060  /* suppress warnings on cygwin, mingw and mswin.*/
5061  (void)native_mutex_trylock;
5062 }
5063 
5064 int
5066 {
5067  rb_thread_t *th = ruby_thread_from_native();
5068 
5069  return th != 0;
5070 }
5071 
5072 static int
5074 {
5075  VALUE thval = key;
5076  rb_thread_t *th;
5077  GetThreadPtr(thval, th);
5078 
5080  *found = 1;
5081  }
5082  else if (th->locking_mutex) {
5083  rb_mutex_t *mutex;
5084  GetMutexPtr(th->locking_mutex, mutex);
5085 
5086  native_mutex_lock(&mutex->lock);
5087  if (mutex->th == th || (!mutex->th && mutex->cond_waiting)) {
5088  *found = 1;
5089  }
5090  native_mutex_unlock(&mutex->lock);
5091  }
5092 
5093  return (*found) ? ST_STOP : ST_CONTINUE;
5094 }
5095 
5096 #ifdef DEBUG_DEADLOCK_CHECK
5097 static int
5098 debug_i(st_data_t key, st_data_t val, int *found)
5099 {
5100  VALUE thval = key;
5101  rb_thread_t *th;
5102  GetThreadPtr(thval, th);
5103 
5104  printf("th:%p %d %d", th, th->status, th->interrupt_flag);
5105  if (th->locking_mutex) {
5106  rb_mutex_t *mutex;
5107  GetMutexPtr(th->locking_mutex, mutex);
5108 
5109  native_mutex_lock(&mutex->lock);
5110  printf(" %p %d\n", mutex->th, mutex->cond_waiting);
5111  native_mutex_unlock(&mutex->lock);
5112  }
5113  else
5114  puts("");
5115 
5116  return ST_CONTINUE;
5117 }
5118 #endif
5119 
5120 static void
5122 {
5123  int found = 0;
5124 
5125  if (vm_living_thread_num(vm) > vm->sleeper) return;
5126  if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5127  if (patrol_thread && patrol_thread != GET_THREAD()) return;
5128 
5130 
5131  if (!found) {
5132  VALUE argv[2];
5133  argv[0] = rb_eFatal;
5134  argv[1] = rb_str_new2("No live threads left. Deadlock?");
5135 #ifdef DEBUG_DEADLOCK_CHECK
5136  printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
5137  st_foreach(vm->living_threads, debug_i, (st_data_t)0);
5138 #endif
5139  vm->sleeper--;
5140  rb_threadptr_raise(vm->main_thread, 2, argv);
5141  }
5142 }
5143 
5144 static void
5145 update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
5146 {
5148  if (coverage && RBASIC(coverage)->klass == 0) {
5149  long line = rb_sourceline() - 1;
5150  long count;
5151  if (RARRAY_PTR(coverage)[line] == Qnil) {
5152  return;
5153  }
5154  count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
5155  if (POSFIXABLE(count)) {
5156  RARRAY_PTR(coverage)[line] = LONG2FIX(count);
5157  }
5158  }
5159 }
5160 
5161 VALUE
5163 {
5164  return GET_VM()->coverages;
5165 }
5166 
5167 void
5169 {
5170  GET_VM()->coverages = coverages;
5172 }
5173 
5174 void
5176 {
5177  GET_VM()->coverages = Qfalse;
5179 }
5180 
5181 VALUE
5183 {
5184  VALUE interrupt_mask = rb_hash_new();
5185  rb_thread_t *cur_th = GET_THREAD();
5186 
5187  rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5188  rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5189 
5190  return rb_ensure(b_proc, data, rb_ary_pop, cur_th->pending_interrupt_mask_stack);
5191 }
static int vm_living_thread_num(rb_vm_t *vm)
Definition: thread.c:2898
struct timeval rb_time_interval(VALUE num)
Definition: time.c:2492
rb_control_frame_t * cfp
Definition: vm_core.h:500
void rb_gc_finalize_deferred(void)
Definition: gc.c:1455
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
Definition: thread.c:390
#define RUBY_EVENT_SWITCH
Definition: ruby.h:1600
rb_thread_list_t * join_list
Definition: vm_core.h:581
#define T_OBJECT
Definition: ruby.h:485
static VALUE sym_never
Definition: thread.c:84
static VALUE thgroup_enclose(VALUE group)
Definition: thread.c:3982
VALUE rb_eStandardError
Definition: error.c:509
static VALUE rb_thread_variable_p(VALUE thread, VALUE key)
Definition: thread.c:2995
#define eKillSignal
Definition: thread.c:94
#define RUBY_VM_CHECK_INTS(th)
Definition: vm_core.h:948
unsigned long running_time_us
Definition: vm_core.h:617
rb_vm_t * vm
Definition: vm_core.h:495
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Definition: error.c:536
static VALUE thgroup_add(VALUE group, VALUE thread)
Definition: thread.c:4040
static int check_deadlock_i(st_data_t key, st_data_t val, int *found)
Definition: thread.c:5073
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Definition: thread.c:4463
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1443
VALUE rb_ary_pop(VALUE ary)
Definition: array.c:879
static VALUE rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
Definition: thread.c:1739
static const rb_thread_t * patrol_thread
Definition: thread.c:4281
struct rb_mutex_struct * next_mutex
Definition: thread.c:382
void ruby_thread_stack_overflow(rb_thread_t *th)
Definition: thread.c:2031
#define RARRAY_LEN(a)
Definition: ruby.h:899
void rb_bug(const char *fmt,...)
Definition: error.c:290
static VALUE rb_thread_priority(VALUE thread)
Definition: thread.c:3027
int gettimeofday(struct timeval *, struct timezone *)
Definition: win32.c:4013
#define FALSE
Definition: nkf.h:174
#define rb_hash_lookup
Definition: tcltklib.c:268
#define mutex_mark
Definition: thread.c:4104
static int lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
Definition: thread.c:4227
static const char * rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th)
Definition: thread.c:4382
VALUE rb_obj_id(VALUE obj)
Definition: gc.c:1688
static void thread_cleanup_func_before_exec(void *th_ptr)
Definition: thread.c:441
#define INT2NUM(x)
Definition: ruby.h:1178
static VALUE trap(int sig, sighandler_t func, VALUE command)
Definition: signal.c:900
struct rb_thread_struct * running_thread
Definition: vm_core.h:344
int i
Definition: win32ole.c:784
VALUE rb_make_exception(int argc, VALUE *argv)
Definition: eval.c:642
void rb_mutex_allow_trap(VALUE self, int val)
Definition: thread.c:4535
struct timeval * tv
Definition: thread.c:3649
Definition: st.h:77
#define RUBY_VM_SET_INTERRUPT(th)
Definition: vm_core.h:916
static VALUE rb_thread_abort_exc_set(VALUE thread, VALUE val)
Definition: thread.c:2470
VALUE rb_mutex_owned_p(VALUE self)
Definition: thread.c:4367
st_table * local_storage
Definition: vm_core.h:579
double limit
Definition: thread.c:733
void rb_thread_lock_unlock(rb_thread_lock_t *lock)
Definition: thread.c:281
Definition: st.h:108
int pending_interrupt_queue_checked
Definition: vm_core.h:551
VALUE rb_eSignal
Definition: error.c:507
static void rb_mutex_abandon_all(rb_mutex_t *mutexes)
Definition: thread.c:4435
struct rb_blocking_region_buffer * rb_thread_blocking_region_begin(void)
Definition: thread.c:1183
rb_fdset_t * read
Definition: thread.c:3646
VALUE rb_exec_recursive_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:4875
#define NUM2INT(x)
Definition: ruby.h:622
int count
Definition: encoding.c:51
static int max(int a, int b)
Definition: strftime.c:141
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1497
static VALUE thgroup_enclosed_p(VALUE group)
Definition: thread.c:4002
int rb_thread_check_trap_pending(void)
Definition: thread.c:1100
if(dispIdMember==DISPID_VALUE)
Definition: win32ole.c:791
void rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
Definition: thread.c:1891
VALUE rb_thread_list(void)
Definition: thread.c:2342
static VALUE thread_join_sleep(VALUE arg)
Definition: thread.c:759
rb_thread_lock_t interrupt_lock
Definition: vm_core.h:556
VALUE rb_exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:4852
pthread_mutex_t rb_thread_lock_t
rb_thread_lock_t thread_destruct_lock
Definition: vm_core.h:341
#define CLASS_OF(v)
Definition: ruby.h:448
static int terminate_atfork_before_exec_i(st_data_t key, st_data_t val, st_data_t current_th)
Definition: thread.c:3855
void rb_thread_blocking_region_end(struct rb_blocking_region_buffer *region)
Definition: thread.c:1192
static VALUE rb_thread_variables(VALUE thread)
Definition: thread.c:2966
struct rb_thread_struct * th
Definition: vm_core.h:489
void rb_unblock_function_t(void *)
Definition: intern.h:835
VALUE rb_ary_delete_at(VALUE ary, long pos)
Definition: array.c:2813
static VALUE recursive_list_access(void)
Definition: thread.c:4655
rb_unblock_function_t * func
Definition: vm_core.h:480
#define Qtrue
Definition: ruby.h:434
int rb_remove_event_hook(rb_event_hook_func_t func)
Definition: vm_trace.c:194
int st_insert(st_table *, st_data_t, st_data_t)
static void update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
Definition: thread.c:5145
static VALUE thread_s_new(int argc, VALUE *argv, VALUE klass)
Definition: thread.c:662
void rb_error_frozen(const char *what)
Definition: error.c:1972
#define TypedData_Wrap_Struct(klass, data_type, sval)
Definition: ruby.h:1016
VALUE pending_interrupt_mask_stack
Definition: vm_core.h:552
VALUE rb_ary_shift(VALUE ary)
Definition: array.c:929
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: ruby.h:1030
VALUE rb_mod_ancestors(VALUE mod)
Definition: class.c:920
static VALUE mutex_initialize(VALUE self)
Definition: thread.c:4163
VALUE coverage
Definition: vm_core.h:219
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
Definition: thread.c:2007
long tv_sec
Definition: ossl_asn1.c:17
struct rb_thread_struct volatile * th
Definition: thread.c:380
static struct timeval double2timeval(double d)
Definition: thread.c:918
ID rb_frame_this_func(void)
Definition: eval.c:902
#define sysstack_error
Definition: vm_core.h:861
SOCKET rb_w32_get_osfhandle(int)
Definition: win32.c:958
VALUE rb_eTypeError
Definition: error.c:511
VALUE rb_thread_stop(void)
Definition: thread.c:2291
#define TH_JUMP_TAG(th, st)
Definition: eval_intern.h:116
static VALUE mutex_alloc(VALUE klass)
Definition: thread.c:4145
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Definition: thread.c:4512
VALUE rb_catch_obj(VALUE, VALUE(*)(ANYARGS), VALUE)
static const rb_data_type_t mutex_data_type
Definition: thread.c:4128
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:822
void rb_thread_wait_for(struct timeval time)
Definition: thread.c:1066
SSL_METHOD *(* func)(void)
Definition: ossl_ssl.c:108
st_table * living_threads
Definition: vm_core.h:346
void rb_signal_exec(rb_thread_t *th, int sig)
Definition: signal.c:712
static int handle_interrupt_arg_check_i(VALUE key, VALUE val)
Definition: thread.c:1623
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:465
#define rb_fd_zero(f)
Definition: intern.h:327
static VALUE rb_thread_safe_level(VALUE thread)
Definition: thread.c:2631
static VALUE rb_thread_aset(VALUE self, VALUE id, VALUE val)
Definition: thread.c:2781
VALUE rb_thread_current(void)
Definition: thread.c:2350
#define PRIxVALUE
Definition: ruby.h:145
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1495
#define OBJ_ID_EQL(obj_id, other)
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:1780
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:2002
static VALUE rb_mutex_sleep_forever(VALUE time)
Definition: thread.c:4448
static VALUE rb_thread_abort_exc(VALUE thread)
Definition: thread.c:2452
static void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
Definition: thread.c:1169
void * rb_thread_call_without_gvl(void *(*func)(void *data), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1324
VALUE rb_ary_clear(VALUE ary)
Definition: array.c:3220
static void clear_coverage(void)
Definition: thread.c:3803
int rb_thread_alone(void)
Definition: thread.c:2904
VALUE rb_convert_type(VALUE, int, const char *, const char *)
Definition: object.c:2368
#define TH_EXEC_TAG()
Definition: eval_intern.h:111
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Definition: object.c:582
#define T_HASH
Definition: ruby.h:493
static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check)
Definition: thread.c:938
VALUE rb_thread_local_aref(VALUE thread, ID id)
Definition: thread.c:2663
VALUE rb_eSecurityError
Definition: error.c:520
#define DATA_PTR(dta)
Definition: ruby.h:985
#define RUBY_VM_SET_TRAP_INTERRUPT(th)
Definition: vm_core.h:918
static size_t thgroup_memsize(const void *ptr)
Definition: thread.c:3879
rb_thread_cond_t cond
Definition: thread.c:379
static VALUE sym_immediate
Definition: thread.c:82
static int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region, rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
Definition: thread.c:1151
void rb_gc_mark(VALUE ptr)
Definition: gc.c:2598
static void thread_shield_mark(void *ptr)
Definition: thread.c:4547
st_data_t st_index_t
Definition: st.h:63
#define TAG_RAISE
Definition: eval_intern.h:140
#define PUSH_TAG()
Definition: eval_intern.h:108
static size_t mutex_memsize(const void *ptr)
Definition: thread.c:4123
static volatile int system_working
Definition: thread.c:96
static VALUE thread_join(rb_thread_t *target_th, double delay)
Definition: thread.c:785
static VALUE remove_from_join_list(VALUE arg)
Definition: thread.c:738
VALUE rb_thread_kill(VALUE thread)
Definition: thread.c:2133
VALUE rb_mutex_locked_p(VALUE self)
Definition: thread.c:4181
static int rb_threadptr_dead(rb_thread_t *th)
Definition: thread.c:2525
#define FIXNUM_P(f)
Definition: ruby.h:355
static VALUE rb_thread_alive_p(VALUE thread)
Definition: thread.c:2583
rb_fdset_t * write
Definition: thread.c:3647
VALUE rb_exec_recursive_paired(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:4863
#define rb_fd_rcopy(d, s)
Definition: thread.c:3330
static VALUE exec_recursive_i(VALUE tag, struct exec_recursive_params *p)
Definition: thread.c:4780
void rb_thread_start_timer_thread(void)
Definition: thread.c:3782
static rb_fdset_t * init_set_fd(int fd, rb_fdset_t *fds)
Definition: thread.c:3633
const char * rb_obj_classname(VALUE)
Definition: variable.c:396
VALUE rb_cMutex
Definition: thread.c:79
#define RHASH_TBL(h)
Definition: ruby.h:928
int allow_trap
Definition: thread.c:383
#define RB_WAITFD_OUT
Definition: io.h:49
VALUE thgroup_default
Definition: vm_core.h:347
#define rb_fd_set(n, f)
Definition: intern.h:328
time_t tv_sec
Definition: missing.h:46
#define sym(x)
Definition: date_core.c:3715
static VALUE rb_thread_stop_p(VALUE thread)
Definition: thread.c:2606
static void thread_cleanup_func(void *th_ptr, int atfork)
Definition: thread.c:452
static double timeofday(void)
Definition: thread.c:1034
#define TAG_FATAL
Definition: eval_intern.h:142
int ruby_native_thread_p(void)
Definition: thread.c:5065
static VALUE rb_thread_s_abort_exc_set(VALUE self, VALUE val)
Definition: thread.c:2434
#define rb_fd_isset(n, f)
Definition: intern.h:330
Win32OLEIDispatch * p
Definition: win32ole.c:786
void rb_hash_foreach(VALUE hash, int(*func)(ANYARGS), VALUE farg)
Definition: hash.c:200
VALUE rb_thread_wakeup(VALUE thread)
Definition: thread.c:2220
static VALUE rb_thread_s_main(VALUE klass)
Definition: thread.c:2384
void rb_exc_raise(VALUE mesg)
Definition: eval.c:527
static void rb_thread_wait_fd_rw(int fd, int read)
Definition: thread.c:3411
static VALUE sym_on_blocking
Definition: thread.c:83
int args
Definition: win32ole.c:785
VALUE * stack
Definition: vm_core.h:498
static void rb_thread_schedule_limits(unsigned long limits_us)
Definition: thread.c:1121
#define RB_TYPE_P(obj, type)
Definition: ruby.h:1537
void rb_reset_random_seed(void)
Definition: random.c:1443
int rb_thread_fd_writable(int fd)
Definition: thread.c:3437
#define RHASH(obj)
Definition: ruby.h:1102
static void rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
Definition: thread.c:330
static VALUE thgroup_s_alloc(VALUE klass)
Definition: thread.c:3908
#define POSFIXABLE(f)
Definition: ruby.h:356
#define RUBY_VM_INTERRUPTED_ANY(th)
Definition: vm_core.h:920
#define TH_POP_TAG()
Definition: eval_intern.h:101
int st_lookup(st_table *, st_data_t, st_data_t *)
static int thread_list_i(st_data_t key, st_data_t val, void *data)
Definition: thread.c:2302
#define MEMZERO(p, type, n)
Definition: ruby.h:1241
#define closed_stream_error
Definition: thread.c:98
static const char * thread_status_name(rb_thread_t *th)
Definition: thread.c:2506
rb_thread_t * target
Definition: thread.c:732
#define RUBY_THREAD_PRIORITY_MAX
Definition: thread.c:68
fd_set rb_fdset_t
Definition: intern.h:326
#define rb_fd_term(f)
Definition: intern.h:337
static VALUE rb_thread_priority_set(VALUE thread, VALUE prio)
Definition: thread.c:3062
double rb_num2dbl(VALUE)
Definition: object.c:2716
static int do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
Definition: thread.c:3334
static void sleep_for_polling(rb_thread_t *th)
Definition: thread.c:1057
int rb_block_given_p(void)
Definition: eval.c:672
#define EXEC_TAG()
Definition: eval_intern.h:113
VALUE locking_mutex
Definition: vm_core.h:558
static const rb_data_type_t thread_shield_data_type
Definition: thread.c:4552
#define val
long tv_usec
Definition: ossl_asn1.c:18
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:1426
VALUE rb_eRuntimeError
Definition: error.c:510
rb_thread_lock_t lock
Definition: thread.c:378
static VALUE rb_thread_inspect(VALUE thread)
Definition: thread.c:2647
#define RB_WAITFD_PRI
Definition: io.h:48
#define PRIdVALUE
Definition: ruby.h:142
static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *)
Definition: thread.c:1987
#define rb_fd_ptr(f)
Definition: intern.h:334
VALUE rb_mutex_trylock(VALUE self)
Definition: thread.c:4208
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:473
VALUE rb_ary_new(void)
Definition: array.c:424
void * blocking_region_buffer
Definition: vm_core.h:536
static VALUE exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
Definition: thread.c:4809
static VALUE thread_create_core(VALUE thval, VALUE args, VALUE(*fn)(ANYARGS))
Definition: thread.c:603
void Init_Thread(void)
Definition: thread.c:4947
VALUE rb_iv_get(VALUE, const char *)
Definition: variable.c:2583
#define JUMP_TAG(st)
Definition: eval_intern.h:120
rb_iseq_t * iseq
Definition: vm_core.h:428
VALUE vm_thread_backtrace(int argc, VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:861
#define NIL_P(v)
Definition: ruby.h:446
static int rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
Definition: thread.c:1554
long tv_nsec
Definition: missing.h:47
void rb_thread_stop_timer_thread(int close_anyway)
Definition: thread.c:3768
#define UNLIKELY(x)
Definition: vm_core.h:115
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:499
static void rb_threadptr_ready(rb_thread_t *th)
Definition: thread.c:1981
int st_delete(st_table *, st_data_t *, st_data_t *)
int enclosed
Definition: thread.c:3874
#define rb_intern(str)
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2202
void rb_thread_atfork_before_exec(void)
Definition: thread.c:3868
#define thread_debug
Definition: thread.c:211
static int rb_threadptr_pending_interrupt_empty_p(rb_thread_t *th)
Definition: thread.c:1548
#define OBJ_FROZEN(x)
Definition: ruby.h:1163
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:3727
#define OBJ_UNTRUST(x)
Definition: ruby.h:1156
VALUE rb_class_inherited_p(VALUE, VALUE)
Definition: object.c:1480
int thread_abort_on_exception
Definition: vm_core.h:350
int argc
Definition: ruby.c:130
rb_thread_status
Definition: vm_core.h:455
#define Qfalse
Definition: ruby.h:433
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:474
VALUE rb_proc_location(VALUE self)
Definition: proc.c:758
static VALUE rb_thread_exit(void)
Definition: thread.c:2195
RUBY_EXTERN VALUE rb_cModule
Definition: ruby.h:1445
void rb_thread_check_ints(void)
Definition: thread.c:1090
#define RUBY_UBF_PROCESS
Definition: intern.h:844
void rb_exit(int status)
Definition: process.c:3530
void rb_thread_fd_close(int fd)
Definition: thread.c:2078
static VALUE coverage(const char *f, int n)
Definition: ripper.c:11865
RUBY_EXTERN int isinf(double)
Definition: isinf.c:56
#define T_NODE
Definition: ruby.h:506
VALUE rb_thread_shield_new(void)
Definition: thread.c:4590
volatile int sleeper
Definition: vm_core.h:352
VALUE rb_obj_alloc(VALUE)
Definition: object.c:1721
int err
Definition: win32.c:87
#define OBJ_FREEZE(x)
Definition: ruby.h:1164
#define EXIT_FAILURE
Definition: eval_intern.h:24
VALUE rb_thread_shield_release(VALUE self)
Definition: thread.c:4626
void rb_thread_atfork(void)
Definition: thread.c:3845
#define POP_TAG()
Definition: eval_intern.h:109
VALUE * machine_stack_start
Definition: vm_core.h:588
#define GVL_UNLOCK_BEGIN()
Definition: thread.c:137
static const rb_data_type_t thgroup_data_type
Definition: thread.c:3884
VALUE rb_thread_create(VALUE(*fn)(ANYARGS), void *arg)
Definition: thread.c:722
void rb_throw_obj(VALUE tag, VALUE value)
Definition: vm_eval.c:1723
static VALUE thread_s_current(VALUE klass)
Definition: thread.c:2365
#define FD_SET(fd, set)
Definition: win32.h:594
VALUE rb_cThreadShield
Definition: thread.c:80
static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
Definition: thread.c:1051
#define TIMET_MAX
Definition: thread.c:76
#define ATOMIC_CAS(var, oldval, newval)
Definition: ruby_atomic.h:105
#define ALLOC(type)
Definition: ruby.h:1224
void rb_thread_polling(void)
Definition: thread.c:1073
VALUE read
Definition: io.c:8222
#define GetMutexPtr(obj, tobj)
Definition: thread.c:4099
static VALUE rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
Definition: thread.c:4918
int rb_thread_select(int max, fd_set *read, fd_set *write, fd_set *except, struct timeval *timeout)
Definition: thread.c:3444
VALUE rb_yield(VALUE)
Definition: vm_eval.c:934
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1317
struct rb_unblock_callback oldubf
Definition: thread.c:112
#define rb_thread_set_current(th)
Definition: vm_core.h:896
int errno
#define TRUE
Definition: nkf.h:175
VALUE rb_uninterruptible(VALUE(*b_proc)(ANYARGS), VALUE data)
Definition: thread.c:5182
static int thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
Definition: thread.c:3926
#define EXIT_SUCCESS
Definition: error.c:31
VALUE special_exceptions[ruby_special_error_count]
Definition: vm_core.h:357
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:559
VALUE rb_thread_shield_wait(VALUE self)
Definition: thread.c:4606
VALUE rb_sprintf(const char *format,...)
Definition: sprintf.c:1270
int rb_get_next_signal(void)
Definition: signal.c:590
VALUE rb_hash_delete(VALUE hash, VALUE key)
Definition: hash.c:859
int rb_thread_fd_select(int max, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
Definition: thread.c:3488
#define rb_fd_copy(d, s, n)
Definition: intern.h:331
static int set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg, struct rb_unblock_callback *old, int fail_if_interrupted)
Definition: thread.c:293
#define const
Definition: strftime.c:102
static int thread_fd_close_i(st_data_t key, st_data_t val, st_data_t data)
Definition: thread.c:2063
VALUE rb_hash_new(void)
Definition: hash.c:234
void ruby_xfree(void *x)
Definition: gc.c:3651
#define DELAY_INFTY
Definition: thread.c:729
int rb_threadptr_reset_raised(rb_thread_t *th)
Definition: thread.c:2053
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Definition: class.c:1570
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4308
#define RUBY_VM_SET_TIMER_INTERRUPT(th)
Definition: vm_core.h:915
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Definition: vm_trace.c:135
unsigned long ID
Definition: ruby.h:105
static VALUE thread_initialize(VALUE thread, VALUE args)
Definition: thread.c:697
handle_interrupt_timing
Definition: thread.c:1501
static void rb_check_deadlock(rb_vm_t *vm)
Definition: thread.c:5121
static VALUE rb_mutex_synchronize_m(VALUE self, VALUE args)
Definition: thread.c:4526
#define GVL_UNLOCK_END()
Definition: thread.c:142
#define Qnil
Definition: ruby.h:435
void rb_thread_sleep_forever(void)
Definition: thread.c:1020
VALUE rb_exc_new2(VALUE etype, const char *s)
Definition: error.c:542
static VALUE thread_shield_alloc(VALUE klass)
Definition: thread.c:4558
VALUE group
Definition: thread.c:3875
#define OBJ_TAINT(x)
Definition: ruby.h:1154
unsigned long VALUE
Definition: ruby.h:104
#define THREAD_SHIELD_WAITING_MASK
Definition: thread.c:4564
#define SAVE_ROOT_JMPBUF(th, stmt)
Definition: eval_intern.h:86
static VALUE result
Definition: nkf.c:40
RUBY_EXTERN VALUE rb_cThread
Definition: ruby.h:1459
int rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
Definition: thread.c:3687
static int keys_i(VALUE key, VALUE value, VALUE ary)
Definition: thread.c:2942
#define UNINITIALIZED_VAR(x)
Definition: vm_core.h:121
#define RBASIC(obj)
Definition: ruby.h:1094
union select_args::@118 as
const char * rb_class2name(VALUE)
Definition: variable.c:389
struct rb_thread_struct * main_thread
Definition: vm_core.h:343
static int clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
Definition: thread.c:3789
int error
Definition: thread.c:3644
static VALUE rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
Definition: thread.c:1875
VALUE first_proc
Definition: vm_core.h:583
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
Definition: thread.c:1489
void rb_gc_set_stack_end(VALUE **stack_end_p)
Definition: thread.c:3714
static void rb_thread_shield_waiting_dec(VALUE b)
Definition: thread.c:4580
#define TH_PUSH_TAG(th)
Definition: eval_intern.h:94
void rb_thread_schedule(void)
Definition: thread.c:1138
VALUE rb_mutex_new(void)
Definition: thread.c:4169
static VALUE rb_thread_variable_get(VALUE thread, VALUE id)
Definition: thread.c:2817
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
Definition: eval.c:804
VALUE rb_exec_recursive_paired_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:4887
static VALUE thread_value(VALUE self)
Definition: thread.c:905
static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
Definition: thread.c:322
rb_atomic_t interrupt_flag
Definition: vm_core.h:554
static void timer_thread_function(void *)
Definition: thread.c:3737
void rb_thread_wait_fd(int fd)
Definition: thread.c:3431
st_table * st_init_numtable(void)
Definition: st.c:272
VALUE rb_blocking_function_t(void *)
Definition: intern.h:836
void rb_sys_fail(const char *mesg)
Definition: error.c:1899
VALUE rb_thread_main(void)
Definition: thread.c:2371
void xfree(void *)
static VALUE rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
Definition: thread.c:4901
int abort_on_exception
Definition: vm_core.h:613
static VALUE rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
Definition: thread.c:1790
VALUE(* first_func)(ANYARGS)
Definition: vm_core.h:585
enum rb_thread_status status
Definition: vm_core.h:531
static void st_delete_wrap(st_table *table, st_data_t key)
Definition: thread.c:101
void rb_thread_sleep(int sec)
Definition: thread.c:1115
#define rb_fd_max(f)
Definition: intern.h:338
static VALUE thread_s_pass(VALUE klass)
Definition: thread.c:1464
static VALUE thread_join_m(int argc, VALUE *argv, VALUE self)
Definition: thread.c:877
#define RSTRING_PTR(str)
Definition: ruby.h:866
#define thread_start_func_2(th, st, rst)
Definition: thread.c:215
static void rb_thread_sleep_deadly(void)
Definition: thread.c:1027
enum rb_thread_status prev_status
Definition: thread.c:111
static VALUE mutex_sleep(int argc, VALUE *argv, VALUE self)
Definition: thread.c:4495
VALUE * machine_stack_end
Definition: vm_core.h:589
VALUE first_args
Definition: vm_core.h:584
void rb_thread_recycle_stack_release(VALUE *)
Definition: vm.c:1766
void rb_thread_terminate_all(void)
Definition: thread.c:407
#define THREAD_SHIELD_WAITING_SHIFT
Definition: thread.c:4565
static void rb_threadptr_to_kill(rb_thread_t *th)
Definition: thread.c:1881
int size
Definition: encoding.c:52
void rb_reset_coverages(void)
Definition: thread.c:5175
#define f
VALUE rb_hash_lookup2(VALUE hash, VALUE key, VALUE def)
Definition: hash.c:571
#define INT2FIX(i)
Definition: ruby.h:241
void rb_thread_execute_interrupts(VALUE thval)
Definition: thread.c:1973
VALUE(* func)(VALUE, VALUE, int)
Definition: thread.c:4771
int rb_sourceline(void)
Definition: vm.c:816
void rb_thread_lock_destroy(rb_thread_lock_t *lock)
Definition: thread.c:287
static VALUE thgroup_list(VALUE group)
Definition: thread.c:3951
unsigned long interrupt_mask
Definition: vm_core.h:555
VALUE rb_block_proc(void)
Definition: proc.c:479
#define xmalloc
Definition: defines.h:64
#define RUBY_THREAD_PRIORITY_MIN
Definition: thread.c:69
#define ANYARGS
Definition: defines.h:57
VALUE rb_thread_group(VALUE thread)
Definition: thread.c:2492
struct rb_unblock_callback unblock
Definition: vm_core.h:557
static VALUE rb_thread_aref(VALUE thread, VALUE id)
Definition: thread.c:2742
unsigned long rb_event_flag_t
Definition: ruby.h:1603
#define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted)
Definition: thread.c:152
VALUE rb_hash_aref(VALUE hash, VALUE key)
Definition: hash.c:560
#define RARRAY_PTR(a)
Definition: ruby.h:904
#define rb_fd_select(n, rfds, wfds, efds, timeout)
Definition: intern.h:339
#define RB_GC_SAVE_MACHINE_CONTEXT(th)
Definition: thread.c:129
void rb_thread_reset_timer_thread(void)
Definition: thread.c:3776
static VALUE rb_thread_status(VALUE thread)
Definition: thread.c:2554
int rb_signal_buff_size(void)
Definition: signal.c:560
static void rb_thread_shield_waiting_inc(VALUE b)
Definition: thread.c:4569
uint8_t key[16]
Definition: random.c:1370
#define rb_fd_clr(n, f)
Definition: intern.h:329
#define LONG2FIX(i)
Definition: ruby.h:242
#define RTEST(v)
Definition: ruby.h:445
#define FD_CLR(f, s)
Definition: win32.h:612
VALUE root_fiber
Definition: vm_core.h:608
rb_thread_t * waiting
Definition: thread.c:732
#define OBJ_INFECT(x, s)
Definition: ruby.h:1157
struct rb_encoding_entry * list
Definition: encoding.c:50
#define ETIMEDOUT
Definition: win32.h:549
VALUE rb_thread_shield_destroy(VALUE self)
Definition: thread.c:4637
static VALUE rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
Definition: thread.c:1567
static void recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
Definition: thread.c:4719
v
Definition: win32ole.c:798
static VALUE thread_start(VALUE klass, VALUE args)
Definition: thread.c:690
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1019
static VALUE rb_mutex_wait_for(VALUE time)
Definition: thread.c:4455
VALUE rb_ary_dup(VALUE ary)
Definition: array.c:1778
#define GetThreadPtr(obj, ptr)
Definition: vm_core.h:452
static unsigned int hash(const char *str, unsigned int len)
Definition: lex.c:56
int rb_atomic_t
Definition: ruby_atomic.h:93
static VALUE thread_raise_m(int argc, VALUE *argv, VALUE self)
Definition: thread.c:2105
#define rb_safe_level()
Definition: tcltklib.c:94
#define rb_fd_resize(n, f)
Definition: intern.h:333
#define rb_thread_shield_waiting(b)
Definition: thread.c:4566
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, klass_, data_)
Definition: vm_core.h:993
static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check)
Definition: thread.c:978
#define ruby_debug
Definition: ruby.h:1364
#define RUBY_EVENT_COVERAGE
Definition: ruby.h:1601
#define xrealloc
Definition: defines.h:67
RUBY_EXTERN VALUE rb_eIOError
Definition: ruby.h:1476
#define ID2SYM(x)
Definition: ruby.h:363
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
Definition: thread.c:1331
void rb_threadptr_trap_interrupt(rb_thread_t *th)
Definition: thread.c:353
unsigned long st_data_t
Definition: st.h:35
#define StringValuePtr(v)
Definition: ruby.h:547
VALUE rb_eFatal
Definition: error.c:508
int forever
Definition: thread.c:734
#define rb_fd_init_copy(d, s)
Definition: intern.h:336
static int terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
Definition: thread.c:359
struct rb_thread_list_struct * next
Definition: vm_core.h:488
#define RUBY_VM_INTERRUPTED(th)
Definition: vm_core.h:919
#define rb_fd_init(f)
Definition: intern.h:335
VALUE rb_inspect(VALUE)
Definition: object.c:402
static VALUE rb_thread_s_abort_exc(void)
Definition: thread.c:2403
VALUE rb_thread_local_aset(VALUE thread, ID id, VALUE val)
Definition: thread.c:2748
static void recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
Definition: thread.c:4750
#define rb_fd_dup(d, s)
Definition: intern.h:332
void rb_vm_gvl_destroy(rb_vm_t *vm)
Definition: thread.c:273
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th)
Definition: thread.c:1604
void st_clear(st_table *)
Definition: st.c:308
void rb_secure(int)
Definition: safe.c:79
rb_fdset_t * except
Definition: thread.c:3648
static void mutex_locked(rb_thread_t *th, VALUE self)
Definition: thread.c:4189
#define FD_ISSET(f, s)
Definition: win32.h:615
#define RUBY_TYPED_DEFAULT_FREE
Definition: ruby.h:1004
NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start))
VALUE vm_thread_backtrace_locations(int argc, VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:867
static VALUE rb_thread_keys(VALUE self)
Definition: thread.c:2929
#define GetThreadShieldPtr(obj)
Definition: thread.c:4563
#define vsnprintf
Definition: subst.h:7
#define RB_WAITFD_IN
Definition: io.h:47
VALUE pending_interrupt_queue
Definition: vm_core.h:550
#define RHASH_EMPTY_P(h)
Definition: ruby.h:932
VALUE write
Definition: io.c:8222
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1401
static void getclockofday(struct timeval *tp)
Definition: thread.c:962
static VALUE select_single_cleanup(VALUE ptr)
Definition: thread.c:3675
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:907
static VALUE select_single(VALUE ptr)
Definition: thread.c:3653
struct rb_mutex_struct rb_mutex_t
#define eTerminateSignal
Definition: thread.c:95
int cond_waiting
Definition: thread.c:381
VALUE rb_get_coverages(void)
Definition: thread.c:5162
VALUE except
Definition: io.c:8222
VALUE rb_eSystemExit
Definition: error.c:505
#define NULL
Definition: _sdbm.c:103
#define FIX2LONG(x)
Definition: ruby.h:353
#define Qundef
Definition: ruby.h:436
VALUE rb_hash_aset(VALUE, VALUE, VALUE)
static int thread_keys_i(ID key, VALUE value, VALUE ary)
Definition: thread.c:2891
static void * call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
Definition: thread.c:1203
#define GET_THROWOBJ_STATE(obj)
Definition: eval_intern.h:154
static VALUE rb_thread_variable_set(VALUE thread, VALUE id, VALUE val)
Definition: thread.c:2842
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:347
st_index_t num_entries
Definition: st.h:93
VALUE rb_thread_wakeup_alive(VALUE thread)
Definition: thread.c:2229
VALUE rb_thread_blocking_region(rb_blocking_function_t *func, void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1364
void rb_obj_call_init(VALUE obj, int argc, VALUE *argv)
Definition: eval.c:1227
static void mutex_free(void *ptr)
Definition: thread.c:4107
VALUE rb_mutex_unlock(VALUE self)
Definition: thread.c:4422
static rb_thread_t * GET_THREAD(void)
Definition: vm_core.h:883
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1344
int st_foreach(st_table *, int(*)(ANYARGS), st_data_t)
Definition: st.c:1000
VALUE rb_str_new2(const char *)
#define GET_THROWOBJ_VAL(obj)
Definition: eval_intern.h:152
void rb_set_coverages(VALUE coverages)
Definition: thread.c:5168
ID rb_to_id(VALUE)
Definition: string.c:8154
int select(int num_fds, fd_set *in_fds, fd_set *out_fds, fd_set *ex_fds, struct timeval *timeout)
VALUE rb_eThreadError
Definition: eval.c:690
static VALUE rb_thread_key_p(VALUE self, VALUE key)
Definition: thread.c:2874
VALUE rb_eArgError
Definition: error.c:512
#define RUBY_VM_CHECK_INTS_BLOCKING(th)
Definition: vm_core.h:937
VALUE rb_obj_is_mutex(VALUE obj)
Definition: thread.c:4134
static VALUE rb_thread_s_kill(VALUE obj, VALUE th)
Definition: thread.c:2178
static VALUE recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
Definition: thread.c:4684
rb_thread_id_t thread_id
Definition: vm_core.h:530
VALUE rb_thread_run(VALUE thread)
Definition: thread.c:2264
void rb_threadptr_signal_exit(rb_thread_t *th)
Definition: thread.c:2017
static void lock_interrupt(void *ptr)
Definition: thread.c:4267
static void rb_thread_atfork_internal(int(*atfork)(st_data_t, st_data_t, st_data_t))
Definition: thread.c:3812
char ** argv
Definition: ruby.c:131
int rb_thread_interrupted(VALUE thval)
Definition: thread.c:1107
struct timeval rb_time_timeval(VALUE)
Definition: time.c:2498
VALUE rb_mutex_lock(VALUE self)
Definition: thread.c:4291
int rb_threadptr_set_raised(rb_thread_t *th)
Definition: thread.c:2043
#define RUBY_UBF_IO
Definition: intern.h:843
static enum handle_interrupt_timing rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
Definition: thread.c:1509
static int terminate_atfork_i(st_data_t key, st_data_t val, st_data_t current_th)
Definition: thread.c:3828
#define GET_VM()
Definition: vm_core.h:876
static ID recursive_key
Definition: thread.c:4646