Ruby  2.0.0p353(2013-11-22revision43784)
cont.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  cont.c -
4 
5  $Author: nagachika $
6  created at: Thu May 23 09:03:43 2007
7 
8  Copyright (C) 2007 Koichi Sasada
9 
10 **********************************************************************/
11 
12 #include "ruby/ruby.h"
13 #include "internal.h"
14 #include "vm_core.h"
15 #include "gc.h"
16 #include "eval_intern.h"
17 
18 /* FIBER_USE_NATIVE enables Fiber performance improvement using system
19  * dependent method such as make/setcontext on POSIX system or
20  * CreateFiber() API on Windows.
21  * This hack make Fiber context switch faster (x2 or more).
22  * However, it decrease maximum number of Fiber. For example, on the
23  * 32bit POSIX OS, ten or twenty thousands Fiber can be created.
24  *
25  * Details is reported in the paper "A Fast Fiber Implementation for Ruby 1.9"
26  * in Proc. of 51th Programming Symposium, pp.21--28 (2010) (in Japanese).
27  */
28 
29 #if !defined(FIBER_USE_NATIVE)
30 # if defined(HAVE_GETCONTEXT) && defined(HAVE_SETCONTEXT)
31 # if 0
32 # elif defined(__NetBSD__)
33 /* On our experience, NetBSD doesn't support using setcontext() and pthread
34  * simultaneously. This is because pthread_self(), TLS and other information
35  * are represented by stack pointer (higher bits of stack pointer).
36  * TODO: check such constraint on configure.
37  */
38 # define FIBER_USE_NATIVE 0
39 # elif defined(__sun)
40 /* On Solaris because resuming any Fiber caused SEGV, for some reason.
41  */
42 # define FIBER_USE_NATIVE 0
43 # elif defined(__ia64)
44 /* At least, Linux/ia64's getcontext(3) doesn't save register window.
45  */
46 # define FIBER_USE_NATIVE 0
47 # elif defined(__GNU__)
48 /* GNU/Hurd doesn't fully support getcontext, setcontext, makecontext
49  * and swapcontext functions. Disabling their usage till support is
50  * implemented. More info at
51  * http://darnassus.sceen.net/~hurd-web/open_issues/glibc/#getcontext
52  */
53 # define FIBER_USE_NATIVE 0
54 # else
55 # define FIBER_USE_NATIVE 1
56 # endif
57 # elif defined(_WIN32)
58 # if _WIN32_WINNT >= 0x0400
59 /* only when _WIN32_WINNT >= 0x0400 on Windows because Fiber APIs are
60  * supported only such building (and running) environments.
61  * [ruby-dev:41192]
62  */
63 # define FIBER_USE_NATIVE 1
64 # endif
65 # endif
66 #endif
67 #if !defined(FIBER_USE_NATIVE)
68 #define FIBER_USE_NATIVE 0
69 #endif
70 
71 #if FIBER_USE_NATIVE
72 #ifndef _WIN32
73 #include <unistd.h>
74 #include <sys/mman.h>
75 #include <ucontext.h>
76 #endif
77 #define RB_PAGE_SIZE (pagesize)
78 #define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
79 static long pagesize;
80 #endif /*FIBER_USE_NATIVE*/
81 
82 #define CAPTURE_JUST_VALID_VM_STACK 1
83 
88 };
89 
90 typedef struct rb_context_struct {
92  VALUE self;
93  int argc;
96 #ifdef CAPTURE_JUST_VALID_VM_STACK
97  size_t vm_stack_slen; /* length of stack (head of th->stack) */
98  size_t vm_stack_clen; /* length of control frames (tail of th->stack) */
99 #endif
102 #ifdef __ia64
103  VALUE *machine_register_stack;
104  VALUE *machine_register_stack_src;
105  int machine_register_stack_size;
106 #endif
110 } rb_context_t;
111 
116 };
117 
118 #if FIBER_USE_NATIVE && !defined(_WIN32)
119 #define MAX_MAHINE_STACK_CACHE 10
120 static int machine_stack_cache_index = 0;
121 typedef struct machine_stack_cache_struct {
122  void *ptr;
123  size_t size;
124 } machine_stack_cache_t;
125 static machine_stack_cache_t machine_stack_cache[MAX_MAHINE_STACK_CACHE];
126 static machine_stack_cache_t terminated_machine_stack;
127 #endif
128 
129 typedef struct rb_fiber_struct {
135  /* If a fiber invokes "transfer",
136  * then this fiber can't "resume" any more after that.
137  * You shouldn't mix "transfer" and "resume".
138  */
140 
141 #if FIBER_USE_NATIVE
142 #ifdef _WIN32
143  void *fib_handle;
144 #else
145  ucontext_t context;
146 #endif
147 #endif
148 } rb_fiber_t;
149 
154 
155 #define GetContPtr(obj, ptr) \
156  TypedData_Get_Struct((obj), rb_context_t, &cont_data_type, (ptr))
157 
158 #define GetFiberPtr(obj, ptr) do {\
159  TypedData_Get_Struct((obj), rb_fiber_t, &fiber_data_type, (ptr)); \
160  if (!(ptr)) rb_raise(rb_eFiberError, "uninitialized fiber"); \
161 } while (0)
162 
163 NOINLINE(static VALUE cont_capture(volatile int *stat));
164 
165 #define THREAD_MUST_BE_RUNNING(th) do { \
166  if (!(th)->tag) rb_raise(rb_eThreadError, "not running thread"); \
167  } while (0)
168 
169 static void
170 cont_mark(void *ptr)
171 {
172  RUBY_MARK_ENTER("cont");
173  if (ptr) {
174  rb_context_t *cont = ptr;
175  rb_gc_mark(cont->value);
178 
179  if (cont->vm_stack) {
180 #ifdef CAPTURE_JUST_VALID_VM_STACK
182  cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
183 #else
184  rb_gc_mark_localtion(cont->vm_stack,
185  cont->vm_stack, cont->saved_thread.stack_size);
186 #endif
187  }
188 
189  if (cont->machine_stack) {
190  if (cont->type == CONTINUATION_CONTEXT) {
191  /* cont */
193  cont->machine_stack + cont->machine_stack_size);
194  }
195  else {
196  /* fiber */
197  rb_thread_t *th;
198  rb_fiber_t *fib = (rb_fiber_t*)cont;
199  GetThreadPtr(cont->saved_thread.self, th);
200  if ((th->fiber != cont->self) && fib->status == RUNNING) {
202  cont->machine_stack + cont->machine_stack_size);
203  }
204  }
205  }
206 #ifdef __ia64
207  if (cont->machine_register_stack) {
208  rb_gc_mark_locations(cont->machine_register_stack,
209  cont->machine_register_stack + cont->machine_register_stack_size);
210  }
211 #endif
212  }
213  RUBY_MARK_LEAVE("cont");
214 }
215 
216 static void
217 cont_free(void *ptr)
218 {
219  RUBY_FREE_ENTER("cont");
220  if (ptr) {
221  rb_context_t *cont = ptr;
222  RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack); fflush(stdout);
223 #if FIBER_USE_NATIVE
224  if (cont->type == CONTINUATION_CONTEXT) {
225  /* cont */
227  }
228  else {
229  /* fiber */
230 #ifdef _WIN32
231  if (GET_THREAD()->fiber != cont->self && cont->type != ROOT_FIBER_CONTEXT) {
232  /* don't delete root fiber handle */
233  rb_fiber_t *fib = (rb_fiber_t*)cont;
234  if (fib->fib_handle) {
235  DeleteFiber(fib->fib_handle);
236  }
237  }
238 #else /* not WIN32 */
239  if (GET_THREAD()->fiber != cont->self) {
240  rb_fiber_t *fib = (rb_fiber_t*)cont;
241  if (fib->context.uc_stack.ss_sp) {
242  if (cont->type == ROOT_FIBER_CONTEXT) {
243  rb_bug("Illegal root fiber parameter");
244  }
245  munmap((void*)fib->context.uc_stack.ss_sp, fib->context.uc_stack.ss_size);
246  }
247  }
248  else {
249  /* It may reached here when finalize */
250  /* TODO examine whether it is a bug */
251  /* rb_bug("cont_free: release self"); */
252  }
253 #endif
254  }
255 #else /* not FIBER_USE_NATIVE */
257 #endif
258 #ifdef __ia64
259  RUBY_FREE_UNLESS_NULL(cont->machine_register_stack);
260 #endif
262 
263  /* free rb_cont_t or rb_fiber_t */
264  ruby_xfree(ptr);
265  }
266  RUBY_FREE_LEAVE("cont");
267 }
268 
269 static size_t
270 cont_memsize(const void *ptr)
271 {
272  const rb_context_t *cont = ptr;
273  size_t size = 0;
274  if (cont) {
275  size = sizeof(*cont);
276  if (cont->vm_stack) {
277 #ifdef CAPTURE_JUST_VALID_VM_STACK
278  size_t n = (cont->vm_stack_slen + cont->vm_stack_clen);
279 #else
280  size_t n = cont->saved_thread.stack_size;
281 #endif
282  size += n * sizeof(*cont->vm_stack);
283  }
284 
285  if (cont->machine_stack) {
286  size += cont->machine_stack_size * sizeof(*cont->machine_stack);
287  }
288 #ifdef __ia64
289  if (cont->machine_register_stack) {
290  size += cont->machine_register_stack_size * sizeof(*cont->machine_register_stack);
291  }
292 #endif
293  }
294  return size;
295 }
296 
297 static void
298 fiber_mark(void *ptr)
299 {
300  RUBY_MARK_ENTER("cont");
301  if (ptr) {
302  rb_fiber_t *fib = ptr;
303  rb_gc_mark(fib->prev);
304  cont_mark(&fib->cont);
305  }
306  RUBY_MARK_LEAVE("cont");
307 }
308 
309 static void
311 {
312  VALUE current_fibval = rb_fiber_current();
313  rb_fiber_t *current_fib;
314  GetFiberPtr(current_fibval, current_fib);
315 
316  /* join fiber link */
317  fib->next_fiber = current_fib->next_fiber;
318  fib->prev_fiber = current_fib;
319  current_fib->next_fiber->prev_fiber = fib;
320  current_fib->next_fiber = fib;
321 }
322 
323 static void
325 {
326  fib->prev_fiber->next_fiber = fib->next_fiber;
327  fib->next_fiber->prev_fiber = fib->prev_fiber;
328 }
329 
330 static void
331 fiber_free(void *ptr)
332 {
333  RUBY_FREE_ENTER("fiber");
334  if (ptr) {
335  rb_fiber_t *fib = ptr;
336  if (fib->cont.type != ROOT_FIBER_CONTEXT &&
339  }
340  fiber_link_remove(fib);
341 
342  cont_free(&fib->cont);
343  }
344  RUBY_FREE_LEAVE("fiber");
345 }
346 
347 static size_t
348 fiber_memsize(const void *ptr)
349 {
350  const rb_fiber_t *fib = ptr;
351  size_t size = 0;
352  if (ptr) {
353  size = sizeof(*fib);
354  if (fib->cont.type != ROOT_FIBER_CONTEXT) {
356  }
357  size += cont_memsize(&fib->cont);
358  }
359  return size;
360 }
361 
362 VALUE
364 {
365  if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
366  return Qtrue;
367  }
368  else {
369  return Qfalse;
370  }
371 }
372 
373 static void
375 {
376  size_t size;
377 
379 #ifdef __ia64
380  th->machine_register_stack_end = rb_ia64_bsp();
381 #endif
382 
383  if (th->machine_stack_start > th->machine_stack_end) {
386  }
387  else {
390  }
391 
392  if (cont->machine_stack) {
393  REALLOC_N(cont->machine_stack, VALUE, size);
394  }
395  else {
396  cont->machine_stack = ALLOC_N(VALUE, size);
397  }
398 
400  MEMCPY(cont->machine_stack, cont->machine_stack_src, VALUE, size);
401 
402 #ifdef __ia64
403  rb_ia64_flushrs();
404  size = cont->machine_register_stack_size = th->machine_register_stack_end - th->machine_register_stack_start;
405  cont->machine_register_stack_src = th->machine_register_stack_start;
406  if (cont->machine_register_stack) {
407  REALLOC_N(cont->machine_register_stack, VALUE, size);
408  }
409  else {
410  cont->machine_register_stack = ALLOC_N(VALUE, size);
411  }
412 
413  MEMCPY(cont->machine_register_stack, cont->machine_register_stack_src, VALUE, size);
414 #endif
415 }
416 
417 static const rb_data_type_t cont_data_type = {
418  "continuation",
420 };
421 
422 static void
424 {
425  /* save thread context */
426  cont->saved_thread = *th;
427  /* saved_thread->machine_stack_(start|end) should be NULL */
428  /* because it may happen GC afterward */
431 #ifdef __ia64
432  cont->saved_thread.machine_register_stack_start = 0;
433  cont->saved_thread.machine_register_stack_end = 0;
434 #endif
435 }
436 
437 static void
439 {
440  /* save thread context */
441  cont_save_thread(cont, th);
442  cont->saved_thread.local_storage = 0;
443 }
444 
445 static rb_context_t *
447 {
449  volatile VALUE contval;
450  rb_thread_t *th = GET_THREAD();
451 
453  contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
454  cont->self = contval;
455  cont_init(cont, th);
456  return cont;
457 }
458 
459 static VALUE
460 cont_capture(volatile int *stat)
461 {
463  rb_thread_t *th = GET_THREAD(), *sth;
464  volatile VALUE contval;
465 
468  cont = cont_new(rb_cContinuation);
469  contval = cont->self;
470  sth = &cont->saved_thread;
471 
472 #ifdef CAPTURE_JUST_VALID_VM_STACK
473  cont->vm_stack_slen = th->cfp->sp + th->mark_stack_len - th->stack;
474  cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp;
475  cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen);
476  MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen);
477  MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen);
478 #else
479  cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
480  MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
481 #endif
482  sth->stack = 0;
483 
484  cont_save_machine_stack(th, cont);
485 
486  if (ruby_setjmp(cont->jmpbuf)) {
487  volatile VALUE value;
488 
489  value = cont->value;
490  if (cont->argc == -1) rb_exc_raise(value);
491  cont->value = Qnil;
492  *stat = 1;
493  return value;
494  }
495  else {
496  *stat = 0;
497  return contval;
498  }
499 }
500 
501 static void
503 {
504  rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
505 
506  /* restore thread context */
507  if (cont->type == CONTINUATION_CONTEXT) {
508  /* continuation */
509  VALUE fib;
510 
511  th->fiber = sth->fiber;
512  fib = th->fiber ? th->fiber : th->root_fiber;
513 
514  if (fib) {
515  rb_fiber_t *fcont;
516  GetFiberPtr(fib, fcont);
517  th->stack_size = fcont->cont.saved_thread.stack_size;
518  th->stack = fcont->cont.saved_thread.stack;
519  }
520 #ifdef CAPTURE_JUST_VALID_VM_STACK
521  MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
522  MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
523  cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
524 #else
525  MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
526 #endif
527  }
528  else {
529  /* fiber */
530  th->stack = sth->stack;
531  th->stack_size = sth->stack_size;
532  th->local_storage = sth->local_storage;
533  th->fiber = cont->self;
534  }
535 
536  th->cfp = sth->cfp;
537  th->safe_level = sth->safe_level;
538  th->raised_flag = sth->raised_flag;
539  th->state = sth->state;
540  th->status = sth->status;
541  th->tag = sth->tag;
542  th->protect_tag = sth->protect_tag;
543  th->errinfo = sth->errinfo;
544  th->first_proc = sth->first_proc;
545  th->root_lep = sth->root_lep;
546  th->root_svar = sth->root_svar;
547 }
548 
549 #if FIBER_USE_NATIVE
550 #ifdef _WIN32
551 static void
552 fiber_set_stack_location(void)
553 {
554  rb_thread_t *th = GET_THREAD();
555  VALUE *ptr;
556 
557  SET_MACHINE_STACK_END(&ptr);
558  th->machine_stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE));
559 }
560 
561 static VOID CALLBACK
562 fiber_entry(void *arg)
563 {
564  fiber_set_stack_location();
565  rb_fiber_start();
566 }
567 #else /* _WIN32 */
568 
569 /*
570  * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
571  * if MAP_STACK is passed.
572  * http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755
573  */
574 #if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
575 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
576 #else
577 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
578 #endif
579 
580 static char*
581 fiber_machine_stack_alloc(size_t size)
582 {
583  char *ptr;
584 
585  if (machine_stack_cache_index > 0) {
586  if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) {
587  ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr;
588  machine_stack_cache_index--;
589  machine_stack_cache[machine_stack_cache_index].ptr = NULL;
590  machine_stack_cache[machine_stack_cache_index].size = 0;
591  }
592  else{
593  /* TODO handle multiple machine stack size */
594  rb_bug("machine_stack_cache size is not canonicalized");
595  }
596  }
597  else {
598  void *page;
600 
601  ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
602  if (ptr == MAP_FAILED) {
603  rb_raise(rb_eFiberError, "can't alloc machine stack to fiber");
604  }
605 
606  /* guard page setup */
607  page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0);
608  if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
609  rb_raise(rb_eFiberError, "mprotect failed");
610  }
611  }
612 
613  return ptr;
614 }
615 #endif
616 
617 static void
618 fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
619 {
620  rb_thread_t *sth = &fib->cont.saved_thread;
621 
622 #ifdef _WIN32
623  fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
624  if (!fib->fib_handle) {
625  /* try to release unnecessary fibers & retry to create */
626  rb_gc();
627  fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
628  if (!fib->fib_handle) {
629  rb_raise(rb_eFiberError, "can't create fiber");
630  }
631  }
633 #else /* not WIN32 */
634  ucontext_t *context = &fib->context;
635  char *ptr;
637 
638  getcontext(context);
639  ptr = fiber_machine_stack_alloc(size);
640  context->uc_link = NULL;
641  context->uc_stack.ss_sp = ptr;
642  context->uc_stack.ss_size = size;
643  makecontext(context, rb_fiber_start, 0);
644  sth->machine_stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
645  sth->machine_stack_maxsize = size - RB_PAGE_SIZE;
646 #endif
647 #ifdef __ia64
648  sth->machine_register_stack_maxsize = sth->machine_stack_maxsize;
649 #endif
650 }
651 
652 NOINLINE(static void fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib));
653 
654 static void
655 fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
656 {
657  rb_thread_t *th = GET_THREAD(), *sth = &newfib->cont.saved_thread;
658 
659  if (newfib->status != RUNNING) {
660  fiber_initialize_machine_stack_context(newfib, th->vm->default_params.fiber_machine_stack_size);
661  }
662 
663  /* restore thread context */
664  cont_restore_thread(&newfib->cont);
666  if (sth->machine_stack_end && (newfib != oldfib)) {
667  rb_bug("fiber_setcontext: sth->machine_stack_end has non zero value");
668  }
669 
670  /* save oldfib's machine stack */
671  if (oldfib->status != TERMINATED) {
674  if (STACK_DIR_UPPER(0, 1)) {
676  oldfib->cont.machine_stack = th->machine_stack_end;
677  }
678  else {
680  oldfib->cont.machine_stack = th->machine_stack_start;
681  }
682  }
683  /* exchange machine_stack_start between oldfib and newfib */
686  /* oldfib->machine_stack_end should be NULL */
687  oldfib->cont.saved_thread.machine_stack_end = 0;
688 #ifndef _WIN32
689  if (!newfib->context.uc_stack.ss_sp && th->root_fiber != newfib->cont.self) {
690  rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
691  }
692 #endif
693 
694  /* swap machine context */
695 #ifdef _WIN32
696  SwitchToFiber(newfib->fib_handle);
697 #else
698  swapcontext(&oldfib->context, &newfib->context);
699 #endif
700 }
701 #endif
702 
704 
705 static void
707 {
708  cont_restore_thread(cont);
709 
710  /* restore machine stack */
711 #ifdef _M_AMD64
712  {
713  /* workaround for x64 SEH */
714  jmp_buf buf;
715  setjmp(buf);
716  ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
717  ((_JUMP_BUFFER*)(&buf))->Frame;
718  }
719 #endif
720  if (cont->machine_stack_src) {
723  VALUE, cont->machine_stack_size);
724  }
725 
726 #ifdef __ia64
727  if (cont->machine_register_stack_src) {
728  MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack,
729  VALUE, cont->machine_register_stack_size);
730  }
731 #endif
732 
733  ruby_longjmp(cont->jmpbuf, 1);
734 }
735 
737 
738 #ifdef __ia64
739 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
740 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
741 static volatile int C(a), C(b), C(c), C(d), C(e);
742 static volatile int C(f), C(g), C(h), C(i), C(j);
743 static volatile int C(k), C(l), C(m), C(n), C(o);
744 static volatile int C(p), C(q), C(r), C(s), C(t);
745 #if 0
746 {/* the above lines make cc-mode.el confused so much */}
747 #endif
748 int rb_dummy_false = 0;
749 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
750 static void
751 register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
752 {
753  if (rb_dummy_false) {
754  /* use registers as much as possible */
755  E(a) = E(b) = E(c) = E(d) = E(e) =
756  E(f) = E(g) = E(h) = E(i) = E(j) =
757  E(k) = E(l) = E(m) = E(n) = E(o) =
758  E(p) = E(q) = E(r) = E(s) = E(t) = 0;
759  E(a) = E(b) = E(c) = E(d) = E(e) =
760  E(f) = E(g) = E(h) = E(i) = E(j) =
761  E(k) = E(l) = E(m) = E(n) = E(o) =
762  E(p) = E(q) = E(r) = E(s) = E(t) = 0;
763  }
764  if (curr_bsp < cont->machine_register_stack_src+cont->machine_register_stack_size) {
765  register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
766  }
767  cont_restore_0(cont, vp);
768 }
769 #undef C
770 #undef E
771 #endif
772 
773 static void
774 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
775 {
776  if (cont->machine_stack_src) {
777 #ifdef HAVE_ALLOCA
778 #define STACK_PAD_SIZE 1
779 #else
780 #define STACK_PAD_SIZE 1024
781 #endif
782  VALUE space[STACK_PAD_SIZE];
783 
784 #if !STACK_GROW_DIRECTION
785  if (addr_in_prev_frame > &space[0]) {
786  /* Stack grows downward */
787 #endif
788 #if STACK_GROW_DIRECTION <= 0
789  volatile VALUE *const end = cont->machine_stack_src;
790  if (&space[0] > end) {
791 # ifdef HAVE_ALLOCA
792  volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
793  space[0] = *sp;
794 # else
795  cont_restore_0(cont, &space[0]);
796 # endif
797  }
798 #endif
799 #if !STACK_GROW_DIRECTION
800  }
801  else {
802  /* Stack grows upward */
803 #endif
804 #if STACK_GROW_DIRECTION >= 0
805  volatile VALUE *const end = cont->machine_stack_src + cont->machine_stack_size;
806  if (&space[STACK_PAD_SIZE] < end) {
807 # ifdef HAVE_ALLOCA
808  volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
809  space[0] = *sp;
810 # else
811  cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
812 # endif
813  }
814 #endif
815 #if !STACK_GROW_DIRECTION
816  }
817 #endif
818  }
819  cont_restore_1(cont);
820 }
821 #ifdef __ia64
822 #define cont_restore_0(cont, vp) register_stack_extend((cont), (vp), (VALUE*)rb_ia64_bsp())
823 #endif
824 
825 /*
826  * Document-class: Continuation
827  *
828  * Continuation objects are generated by Kernel#callcc,
829  * after having +require+d <i>continuation</i>. They hold
830  * a return address and execution context, allowing a nonlocal return
831  * to the end of the <code>callcc</code> block from anywhere within a
832  * program. Continuations are somewhat analogous to a structured
833  * version of C's <code>setjmp/longjmp</code> (although they contain
834  * more state, so you might consider them closer to threads).
835  *
836  * For instance:
837  *
838  * require "continuation"
839  * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
840  * callcc{|cc| $cc = cc}
841  * puts(message = arr.shift)
842  * $cc.call unless message =~ /Max/
843  *
844  * <em>produces:</em>
845  *
846  * Freddie
847  * Herbie
848  * Ron
849  * Max
850  *
851  * This (somewhat contrived) example allows the inner loop to abandon
852  * processing early:
853  *
854  * require "continuation"
855  * callcc {|cont|
856  * for i in 0..4
857  * print "\n#{i}: "
858  * for j in i*5...(i+1)*5
859  * cont.call() if j == 17
860  * printf "%3d", j
861  * end
862  * end
863  * }
864  * puts
865  *
866  * <em>produces:</em>
867  *
868  * 0: 0 1 2 3 4
869  * 1: 5 6 7 8 9
870  * 2: 10 11 12 13 14
871  * 3: 15 16
872  */
873 
874 /*
875  * call-seq:
876  * callcc {|cont| block } -> obj
877  *
878  * Generates a Continuation object, which it passes to
879  * the associated block. You need to <code>require
880  * 'continuation'</code> before using this method. Performing a
881  * <em>cont</em><code>.call</code> will cause the #callcc
882  * to return (as will falling through the end of the block). The
883  * value returned by the #callcc is the value of the
884  * block, or the value passed to <em>cont</em><code>.call</code>. See
885  * class Continuation for more details. Also see
886  * Kernel#throw for an alternative mechanism for
887  * unwinding a call stack.
888  */
889 
890 static VALUE
892 {
893  volatile int called;
894  volatile VALUE val = cont_capture(&called);
895 
896  if (called) {
897  return val;
898  }
899  else {
900  return rb_yield(val);
901  }
902 }
903 
904 static VALUE
906 {
907  switch (argc) {
908  case 0:
909  return Qnil;
910  case 1:
911  return argv[0];
912  default:
913  return rb_ary_new4(argc, argv);
914  }
915 }
916 
917 /*
918  * call-seq:
919  * cont.call(args, ...)
920  * cont[args, ...]
921  *
922  * Invokes the continuation. The program continues from the end of the
923  * <code>callcc</code> block. If no arguments are given, the original
924  * <code>callcc</code> returns <code>nil</code>. If one argument is
925  * given, <code>callcc</code> returns it. Otherwise, an array
926  * containing <i>args</i> is returned.
927  *
928  * callcc {|cont| cont.call } #=> nil
929  * callcc {|cont| cont.call 1 } #=> 1
930  * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
931  */
932 
933 static VALUE
935 {
937  rb_thread_t *th = GET_THREAD();
938  GetContPtr(contval, cont);
939 
940  if (cont->saved_thread.self != th->self) {
941  rb_raise(rb_eRuntimeError, "continuation called across threads");
942  }
943  if (cont->saved_thread.protect_tag != th->protect_tag) {
944  rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
945  }
946  if (cont->saved_thread.fiber) {
947  rb_fiber_t *fcont;
948  GetFiberPtr(cont->saved_thread.fiber, fcont);
949 
950  if (th->fiber != cont->saved_thread.fiber) {
951  rb_raise(rb_eRuntimeError, "continuation called across fiber");
952  }
953  }
954 
955  cont->argc = argc;
956  cont->value = make_passing_arg(argc, argv);
957 
958  /* restore `tracing' context. see [Feature #4347] */
959  th->trace_arg = cont->saved_thread.trace_arg;
960 
961  cont_restore_0(cont, &contval);
962  return Qnil; /* unreachable */
963 }
964 
965 /*********/
966 /* fiber */
967 /*********/
968 
969 /*
970  * Document-class: Fiber
971  *
972  * Fibers are primitives for implementing light weight cooperative
973  * concurrency in Ruby. Basically they are a means of creating code blocks
974  * that can be paused and resumed, much like threads. The main difference
975  * is that they are never preempted and that the scheduling must be done by
976  * the programmer and not the VM.
977  *
978  * As opposed to other stackless light weight concurrency models, each fiber
979  * comes with a small 4KB stack. This enables the fiber to be paused from deeply
980  * nested function calls within the fiber block.
981  *
982  * When a fiber is created it will not run automatically. Rather it must be
983  * be explicitly asked to run using the <code>Fiber#resume</code> method.
984  * The code running inside the fiber can give up control by calling
985  * <code>Fiber.yield</code> in which case it yields control back to caller
986  * (the caller of the <code>Fiber#resume</code>).
987  *
988  * Upon yielding or termination the Fiber returns the value of the last
989  * executed expression
990  *
991  * For instance:
992  *
993  * fiber = Fiber.new do
994  * Fiber.yield 1
995  * 2
996  * end
997  *
998  * puts fiber.resume
999  * puts fiber.resume
1000  * puts fiber.resume
1001  *
1002  * <em>produces</em>
1003  *
1004  * 1
1005  * 2
1006  * FiberError: dead fiber called
1007  *
1008  * The <code>Fiber#resume</code> method accepts an arbitrary number of
1009  * parameters, if it is the first call to <code>resume</code> then they
1010  * will be passed as block arguments. Otherwise they will be the return
1011  * value of the call to <code>Fiber.yield</code>
1012  *
1013  * Example:
1014  *
1015  * fiber = Fiber.new do |first|
1016  * second = Fiber.yield first + 2
1017  * end
1018  *
1019  * puts fiber.resume 10
1020  * puts fiber.resume 14
1021  * puts fiber.resume 18
1022  *
1023  * <em>produces</em>
1024  *
1025  * 12
1026  * 14
1027  * FiberError: dead fiber called
1028  *
1029  */
1030 
1031 static const rb_data_type_t fiber_data_type = {
1032  "fiber",
1034 };
1035 
1036 static VALUE
1038 {
1039  return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
1040 }
1041 
1042 static rb_fiber_t*
1044 {
1045  rb_fiber_t *fib;
1046  rb_thread_t *th = GET_THREAD();
1047 
1048  if (DATA_PTR(fibval) != 0) {
1049  rb_raise(rb_eRuntimeError, "cannot initialize twice");
1050  }
1051 
1053  fib = ALLOC(rb_fiber_t);
1054  memset(fib, 0, sizeof(rb_fiber_t));
1055  fib->cont.self = fibval;
1056  fib->cont.type = FIBER_CONTEXT;
1057  cont_init(&fib->cont, th);
1058  fib->prev = Qnil;
1059  fib->status = CREATED;
1060 
1061  DATA_PTR(fibval) = fib;
1062 
1063  return fib;
1064 }
1065 
1066 static VALUE
1067 fiber_init(VALUE fibval, VALUE proc)
1068 {
1069  rb_fiber_t *fib = fiber_t_alloc(fibval);
1070  rb_context_t *cont = &fib->cont;
1071  rb_thread_t *th = &cont->saved_thread;
1072 
1073  /* initialize cont */
1074  cont->vm_stack = 0;
1075 
1076  th->stack = 0;
1077  th->stack_size = 0;
1078 
1079  fiber_link_join(fib);
1080 
1081  th->stack_size = th->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
1082  th->stack = ALLOC_N(VALUE, th->stack_size);
1083 
1084  th->cfp = (void *)(th->stack + th->stack_size);
1085  th->cfp--;
1086  th->cfp->pc = 0;
1087  th->cfp->sp = th->stack + 1;
1088 #if VM_DEBUG_BP_CHECK
1089  th->cfp->bp_check = 0;
1090 #endif
1091  th->cfp->ep = th->stack;
1092  *th->cfp->ep = VM_ENVVAL_BLOCK_PTR(0);
1093  th->cfp->self = Qnil;
1094  th->cfp->klass = Qnil;
1095  th->cfp->flag = 0;
1096  th->cfp->iseq = 0;
1097  th->cfp->proc = 0;
1098  th->cfp->block_iseq = 0;
1099  th->cfp->me = 0;
1100  th->tag = 0;
1102 
1103  th->first_proc = proc;
1104 
1105 #if !FIBER_USE_NATIVE
1106  MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
1107 #endif
1108 
1109  return fibval;
1110 }
1111 
1112 /* :nodoc: */
1113 static VALUE
1115 {
1116  return fiber_init(fibval, rb_block_proc());
1117 }
1118 
1119 VALUE
1121 {
1123 }
1124 
1125 static VALUE
1127 {
1128  rb_fiber_t *fib;
1129  VALUE curr = rb_fiber_current();
1130  VALUE prev;
1131  GetFiberPtr(curr, fib);
1132 
1133  prev = fib->prev;
1134  if (NIL_P(prev)) {
1135  const VALUE root_fiber = GET_THREAD()->root_fiber;
1136 
1137  if (root_fiber == curr) {
1138  rb_raise(rb_eFiberError, "can't yield from root fiber");
1139  }
1140  return root_fiber;
1141  }
1142  else {
1143  fib->prev = Qnil;
1144  return prev;
1145  }
1146 }
1147 
1149 
1150 static void
1152 {
1153  VALUE value = fib->cont.value;
1154  fib->status = TERMINATED;
1155 #if FIBER_USE_NATIVE && !defined(_WIN32)
1156  /* Ruby must not switch to other thread until storing terminated_machine_stack */
1157  terminated_machine_stack.ptr = fib->context.uc_stack.ss_sp;
1158  terminated_machine_stack.size = fib->context.uc_stack.ss_size / sizeof(VALUE);
1159  fib->context.uc_stack.ss_sp = NULL;
1160  fib->cont.machine_stack = NULL;
1161  fib->cont.machine_stack_size = 0;
1162 #endif
1163  rb_fiber_transfer(return_fiber(), 1, &value);
1164 }
1165 
1166 void
1168 {
1169  rb_thread_t *th = GET_THREAD();
1170  rb_fiber_t *fib;
1171  rb_context_t *cont;
1172  rb_proc_t *proc;
1173  int state;
1174 
1175  GetFiberPtr(th->fiber, fib);
1176  cont = &fib->cont;
1177 
1178  TH_PUSH_TAG(th);
1179  if ((state = EXEC_TAG()) == 0) {
1180  int argc;
1181  VALUE *argv, args;
1182  GetProcPtr(cont->saved_thread.first_proc, proc);
1183  args = cont->value;
1184  argv = (argc = cont->argc) > 1 ? RARRAY_PTR(args) : &args;
1185  cont->value = Qnil;
1186  th->errinfo = Qnil;
1187  th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
1188  th->root_svar = Qnil;
1189 
1190  fib->status = RUNNING;
1191  cont->value = rb_vm_invoke_proc(th, proc, argc, argv, 0);
1192  }
1193  TH_POP_TAG();
1194 
1195  if (state) {
1196  if (state == TAG_RAISE || state == TAG_FATAL) {
1198  }
1199  else {
1201  if (!NIL_P(err))
1203  }
1205  }
1206 
1207  rb_fiber_terminate(fib);
1208  rb_bug("rb_fiber_start: unreachable");
1209 }
1210 
1211 static rb_fiber_t *
1213 {
1214  rb_fiber_t *fib;
1215  /* no need to allocate vm stack */
1217  fib->cont.type = ROOT_FIBER_CONTEXT;
1218 #if FIBER_USE_NATIVE
1219 #ifdef _WIN32
1220  fib->fib_handle = ConvertThreadToFiber(0);
1221 #endif
1222 #endif
1223  fib->status = RUNNING;
1224  fib->prev_fiber = fib->next_fiber = fib;
1225 
1226  return fib;
1227 }
1228 
1229 VALUE
1231 {
1232  rb_thread_t *th = GET_THREAD();
1233  if (th->fiber == 0) {
1234  /* save root */
1235  rb_fiber_t *fib = root_fiber_alloc(th);
1236  th->root_fiber = th->fiber = fib->cont.self;
1237  }
1238  return th->fiber;
1239 }
1240 
1241 static VALUE
1243 {
1244  rb_thread_t *th = GET_THREAD();
1245  rb_fiber_t *fib;
1246 
1247  if (th->fiber) {
1248  GetFiberPtr(th->fiber, fib);
1249  cont_save_thread(&fib->cont, th);
1250  }
1251  else {
1252  /* create current fiber */
1253  fib = root_fiber_alloc(th);
1254  th->root_fiber = th->fiber = fib->cont.self;
1255  }
1256 
1257 #if !FIBER_USE_NATIVE
1258  cont_save_machine_stack(th, &fib->cont);
1259 #endif
1260 
1261  if (FIBER_USE_NATIVE || ruby_setjmp(fib->cont.jmpbuf)) {
1262 #if FIBER_USE_NATIVE
1263  fiber_setcontext(next_fib, fib);
1264 #ifndef _WIN32
1265  if (terminated_machine_stack.ptr) {
1266  if (machine_stack_cache_index < MAX_MAHINE_STACK_CACHE) {
1267  machine_stack_cache[machine_stack_cache_index].ptr = terminated_machine_stack.ptr;
1268  machine_stack_cache[machine_stack_cache_index].size = terminated_machine_stack.size;
1269  machine_stack_cache_index++;
1270  }
1271  else {
1272  if (terminated_machine_stack.ptr != fib->cont.machine_stack) {
1273  munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE));
1274  }
1275  else {
1276  rb_bug("terminated fiber resumed");
1277  }
1278  }
1279  terminated_machine_stack.ptr = NULL;
1280  terminated_machine_stack.size = 0;
1281  }
1282 #endif
1283 #endif
1284  /* restored */
1285  GetFiberPtr(th->fiber, fib);
1286  if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
1287  return fib->cont.value;
1288  }
1289 #if !FIBER_USE_NATIVE
1290  else {
1291  return Qundef;
1292  }
1293 #endif
1294 }
1295 
1296 static inline VALUE
1297 fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
1298 {
1299  VALUE value;
1300  rb_fiber_t *fib;
1301  rb_context_t *cont;
1302  rb_thread_t *th = GET_THREAD();
1303 
1304  GetFiberPtr(fibval, fib);
1305  cont = &fib->cont;
1306 
1307  if (th->fiber == fibval) {
1308  /* ignore fiber context switch
1309  * because destination fiber is same as current fiber
1310  */
1311  return make_passing_arg(argc, argv);
1312  }
1313 
1314  if (cont->saved_thread.self != th->self) {
1315  rb_raise(rb_eFiberError, "fiber called across threads");
1316  }
1317  else if (cont->saved_thread.protect_tag != th->protect_tag) {
1318  rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
1319  }
1320  else if (fib->status == TERMINATED) {
1321  value = rb_exc_new2(rb_eFiberError, "dead fiber called");
1322  if (th->fiber != fibval) {
1323  GetFiberPtr(th->fiber, fib);
1324  if (fib->status != TERMINATED) rb_exc_raise(value);
1325  fibval = th->root_fiber;
1326  }
1327  else {
1328  fibval = fib->prev;
1329  if (NIL_P(fibval)) fibval = th->root_fiber;
1330  }
1331  GetFiberPtr(fibval, fib);
1332  cont = &fib->cont;
1333  cont->argc = -1;
1334  cont->value = value;
1335 #if FIBER_USE_NATIVE
1336  {
1337  VALUE oldfibval;
1338  rb_fiber_t *oldfib;
1339  oldfibval = rb_fiber_current();
1340  GetFiberPtr(oldfibval, oldfib);
1341  fiber_setcontext(fib, oldfib);
1342  }
1343 #else
1344  cont_restore_0(cont, &value);
1345 #endif
1346  }
1347 
1348  if (is_resume) {
1349  fib->prev = rb_fiber_current();
1350  }
1351  else {
1352  /* restore `tracing' context. see [Feature #4347] */
1353  th->trace_arg = cont->saved_thread.trace_arg;
1354  }
1355 
1356  cont->argc = argc;
1357  cont->value = make_passing_arg(argc, argv);
1358 
1359  value = fiber_store(fib);
1360 #if !FIBER_USE_NATIVE
1361  if (value == Qundef) {
1362  cont_restore_0(cont, &value);
1363  rb_bug("rb_fiber_resume: unreachable");
1364  }
1365 #endif
1366  RUBY_VM_CHECK_INTS(th);
1367 
1368  return value;
1369 }
1370 
1371 VALUE
1373 {
1374  return fiber_switch(fib, argc, argv, 0);
1375 }
1376 
1377 VALUE
1379 {
1380  rb_fiber_t *fib;
1381  GetFiberPtr(fibval, fib);
1382 
1383  if (fib->prev != Qnil || fib->cont.type == ROOT_FIBER_CONTEXT) {
1384  rb_raise(rb_eFiberError, "double resume");
1385  }
1386  if (fib->transfered != 0) {
1387  rb_raise(rb_eFiberError, "cannot resume transferred Fiber");
1388  }
1389 
1390  return fiber_switch(fibval, argc, argv, 1);
1391 }
1392 
1393 VALUE
1395 {
1396  return rb_fiber_transfer(return_fiber(), argc, argv);
1397 }
1398 
1399 void
1401 {
1402  rb_thread_t *th;
1403  rb_fiber_t *fib;
1404 
1405  GetThreadPtr(thval, th);
1406  if (th->root_fiber && th->root_fiber != th->fiber) {
1407  GetFiberPtr(th->root_fiber, fib);
1409  }
1410 }
1411 
1412 /*
1413  * call-seq:
1414  * fiber.alive? -> true or false
1415  *
1416  * Returns true if the fiber can still be resumed (or transferred
1417  * to). After finishing execution of the fiber block this method will
1418  * always return false. You need to <code>require 'fiber'</code>
1419  * before using this method.
1420  */
1421 VALUE
1423 {
1424  rb_fiber_t *fib;
1425  GetFiberPtr(fibval, fib);
1426  return fib->status != TERMINATED ? Qtrue : Qfalse;
1427 }
1428 
1429 /*
1430  * call-seq:
1431  * fiber.resume(args, ...) -> obj
1432  *
1433  * Resumes the fiber from the point at which the last <code>Fiber.yield</code>
1434  * was called, or starts running it if it is the first call to
1435  * <code>resume</code>. Arguments passed to resume will be the value of
1436  * the <code>Fiber.yield</code> expression or will be passed as block
1437  * parameters to the fiber's block if this is the first <code>resume</code>.
1438  *
1439  * Alternatively, when resume is called it evaluates to the arguments passed
1440  * to the next <code>Fiber.yield</code> statement inside the fiber's block
1441  * or to the block value if it runs to completion without any
1442  * <code>Fiber.yield</code>
1443  */
1444 static VALUE
1446 {
1447  return rb_fiber_resume(fib, argc, argv);
1448 }
1449 
1450 /*
1451  * call-seq:
1452  * fiber.transfer(args, ...) -> obj
1453  *
1454  * Transfer control to another fiber, resuming it from where it last
1455  * stopped or starting it if it was not resumed before. The calling
1456  * fiber will be suspended much like in a call to
1457  * <code>Fiber.yield</code>. You need to <code>require 'fiber'</code>
1458  * before using this method.
1459  *
1460  * The fiber which receives the transfer call is treats it much like
1461  * a resume call. Arguments passed to transfer are treated like those
1462  * passed to resume.
1463  *
1464  * You cannot resume a fiber that transferred control to another one.
1465  * This will cause a double resume error. You need to transfer control
1466  * back to this fiber before it can yield and resume.
1467  *
1468  * Example:
1469  *
1470  * fiber1 = Fiber.new do
1471  * puts "In Fiber 1"
1472  * Fiber.yield
1473  * end
1474  *
1475  * fiber2 = Fiber.new do
1476  * puts "In Fiber 2"
1477  * fiber1.transfer
1478  * puts "Never see this message"
1479  * end
1480  *
1481  * fiber3 = Fiber.new do
1482  * puts "In Fiber 3"
1483  * end
1484  *
1485  * fiber2.resume
1486  * fiber3.resume
1487  *
1488  * <em>produces</em>
1489  *
1490  * In fiber 2
1491  * In fiber 1
1492  * In fiber 3
1493  *
1494  */
1495 static VALUE
1497 {
1498  rb_fiber_t *fib;
1499  GetFiberPtr(fibval, fib);
1500  fib->transfered = 1;
1501  return rb_fiber_transfer(fibval, argc, argv);
1502 }
1503 
1504 /*
1505  * call-seq:
1506  * Fiber.yield(args, ...) -> obj
1507  *
1508  * Yields control back to the context that resumed the fiber, passing
1509  * along any arguments that were passed to it. The fiber will resume
1510  * processing at this point when <code>resume</code> is called next.
1511  * Any arguments passed to the next <code>resume</code> will be the
1512  * value that this <code>Fiber.yield</code> expression evaluates to.
1513  */
1514 static VALUE
1516 {
1517  return rb_fiber_yield(argc, argv);
1518 }
1519 
1520 /*
1521  * call-seq:
1522  * Fiber.current() -> fiber
1523  *
1524  * Returns the current fiber. You need to <code>require 'fiber'</code>
1525  * before using this method. If you are not running in the context of
1526  * a fiber this method will return the root fiber.
1527  */
1528 static VALUE
1530 {
1531  return rb_fiber_current();
1532 }
1533 
1534 
1535 
1536 /*
1537  * Document-class: FiberError
1538  *
1539  * Raised when an invalid operation is attempted on a Fiber, in
1540  * particular when attempting to call/resume a dead fiber,
1541  * attempting to yield from the root fiber, or calling a fiber across
1542  * threads.
1543  *
1544  * fiber = Fiber.new{}
1545  * fiber.resume #=> nil
1546  * fiber.resume #=> FiberError: dead fiber called
1547  */
1548 
1549 void
1551 {
1552 #if FIBER_USE_NATIVE
1553  rb_thread_t *th = GET_THREAD();
1554 
1555 #ifdef _WIN32
1556  SYSTEM_INFO info;
1557  GetSystemInfo(&info);
1558  pagesize = info.dwPageSize;
1559 #else /* not WIN32 */
1560  pagesize = sysconf(_SC_PAGESIZE);
1561 #endif
1563 #endif
1564 
1565  rb_cFiber = rb_define_class("Fiber", rb_cObject);
1569  rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
1571 }
1572 
1573 #if defined __GNUC__ && __GNUC__ >= 4
1574 #pragma GCC visibility push(default)
1575 #endif
1576 
1577 void
1579 {
1580  rb_cContinuation = rb_define_class("Continuation", rb_cObject);
1585  rb_define_global_function("callcc", rb_callcc, 0);
1586 }
1587 
1588 void
1590 {
1594 }
1595 
1596 #if defined __GNUC__ && __GNUC__ >= 4
1597 #pragma GCC visibility pop
1598 #endif
void rb_gc(void)
Definition: gc.c:3108
rb_control_frame_t * cfp
Definition: vm_core.h:500
size_t machine_stack_size
Definition: cont.c:109
VALUE * vm_stack
Definition: cont.c:95
VALUE rb_eStandardError
Definition: error.c:509
#define RUBY_VM_CHECK_INTS(th)
Definition: vm_core.h:948
rb_vm_t * vm
Definition: vm_core.h:495
VALUE rb_ary_new4(long n, const VALUE *elts)
Definition: array.c:451
#define THREAD_MUST_BE_RUNNING(th)
Definition: cont.c:165
#define GetContPtr(obj, ptr)
Definition: cont.c:155
void rb_bug(const char *fmt,...)
Definition: error.c:290
VALUE * root_lep
Definition: vm_core.h:526
#define ruby_longjmp(env, val)
Definition: eval_intern.h:51
struct rb_vm_protect_tag * protect_tag
Definition: vm_core.h:562
#define rb_gc_mark_locations(start, end)
Definition: gc.c:2346
static VALUE rb_cContinuation
Definition: cont.c:151
int i
Definition: win32ole.c:784
#define RUBY_VM_SET_INTERRUPT(th)
Definition: vm_core.h:916
st_table * local_storage
Definition: vm_core.h:579
void rb_undef_alloc_func(VALUE)
Definition: vm_method.c:482
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1497
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:665
VALUE self
Definition: cont.c:92
#define FLUSH_REGISTER_WINDOWS
Definition: defines.h:208
#define CLASS_OF(v)
Definition: ruby.h:448
static VALUE cont_capture(volatile int *stat)
Definition: cont.c:460
#define Qtrue
Definition: ruby.h:434
struct rb_vm_struct::@131 default_params
#define TypedData_Wrap_Struct(klass, data_type, sval)
Definition: ruby.h:1016
void rb_fiber_reset_root_local_storage(VALUE thval)
Definition: cont.c:1400
VALUE prev
Definition: cont.c:131
static VALUE rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
Definition: cont.c:1445
static const rb_data_type_t cont_data_type
Definition: cont.c:150
#define GetFiberPtr(obj, ptr)
Definition: cont.c:158
static rb_fiber_t * root_fiber_alloc(rb_thread_t *th)
Definition: cont.c:1212
enum context_type type
Definition: cont.c:91
static void rb_fiber_terminate(rb_fiber_t *fib)
Definition: cont.c:1151
SSL_METHOD *(* func)(void)
Definition: ossl_ssl.c:108
void st_free_table(st_table *)
Definition: st.c:334
VALUE rb_fiber_yield(int argc, VALUE *argv)
Definition: cont.c:1394
size_t fiber_machine_stack_size
Definition: vm_core.h:403
static VALUE make_passing_arg(int argc, VALUE *argv)
Definition: cont.c:905
int transfered
Definition: cont.c:139
#define VM_ENVVAL_BLOCK_PTR(v)
Definition: vm_core.h:775
#define STACK_UPPER(x, a, b)
Definition: gc.h:74
VALUE rb_fiber_alive_p(VALUE fibval)
Definition: cont.c:1422
static VALUE fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
Definition: cont.c:1297
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1495
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:1780
static void cont_save_thread(rb_context_t *cont, rb_thread_t *th)
Definition: cont.c:423
#define RUBY_MARK_LEAVE(msg)
Definition: gc.h:54
VALUE rb_fiber_current(void)
Definition: cont.c:1230
#define C
Definition: util.c:194
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
static VALUE fiber_init(VALUE fibval, VALUE proc)
Definition: cont.c:1067
#define DATA_PTR(dta)
Definition: ruby.h:985
void rb_gc_mark(VALUE ptr)
Definition: gc.c:2598
static void fiber_link_join(rb_fiber_t *fib)
Definition: cont.c:310
#define TAG_RAISE
Definition: eval_intern.h:140
rb_jmpbuf_t jmpbuf
Definition: cont.c:108
void rb_define_global_function(const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a global function.
Definition: class.c:1526
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, const rb_block_t *blockptr)
Definition: vm.c:712
static const rb_data_type_t fiber_data_type
Definition: cont.c:150
VALUE value
Definition: cont.c:94
void rb_undef_method(VALUE klass, const char *name)
Definition: class.c:1362
static void cont_free(void *ptr)
Definition: cont.c:217
rb_thread_t saved_thread
Definition: cont.c:107
struct rb_context_struct rb_context_t
static VALUE rb_eFiberError
Definition: cont.c:153
#define TAG_FATAL
Definition: eval_intern.h:142
Win32OLEIDispatch * p
Definition: win32ole.c:786
void rb_exc_raise(VALUE mesg)
Definition: eval.c:527
size_t vm_stack_clen
Definition: cont.c:98
int args
Definition: win32ole.c:785
VALUE * stack
Definition: vm_core.h:498
static size_t fiber_memsize(const void *ptr)
Definition: cont.c:348
#define TH_POP_TAG()
Definition: eval_intern.h:101
enum fiber_status status
Definition: cont.c:132
VALUE rb_fiber_resume(VALUE fibval, int argc, VALUE *argv)
Definition: cont.c:1378
static VALUE return_fiber(void)
Definition: cont.c:1126
#define ALLOC_N(type, n)
Definition: ruby.h:1223
#define EXEC_TAG()
Definition: eval_intern.h:113
#define val
VALUE * rb_vm_ep_local_ep(VALUE *ep)
Definition: vm.c:36
rb_iseq_t * block_iseq
Definition: vm_core.h:433
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:1426
VALUE rb_eRuntimeError
Definition: error.c:510
size_t fiber_vm_stack_size
Definition: vm_core.h:402
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:473
Definition: cont.c:113
static VALUE rb_fiber_init(VALUE fibval)
Definition: cont.c:1114
static void cont_restore_1(rb_context_t *cont)
Definition: cont.c:706
rb_iseq_t * iseq
Definition: vm_core.h:428
#define NIL_P(v)
Definition: ruby.h:446
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:499
static VALUE fiber_alloc(VALUE klass)
Definition: cont.c:1037
VALUE tag
Definition: vm_core.h:469
#define RUBY_MARK_ENTER(msg)
Definition: gc.h:53
VALUE rb_fiber_new(VALUE(*func)(ANYARGS), VALUE obj)
Definition: cont.c:1120
int argc
Definition: ruby.c:130
#define Qfalse
Definition: ruby.h:433
void ruby_Init_Fiber_as_Coroutine(void)
Definition: cont.c:1589
void rb_fiber_start(void)
Definition: cont.c:1167
#define ALLOCA_N(type, n)
Definition: ruby.h:1227
static void cont_mark(void *ptr)
Definition: cont.c:170
static VALUE rb_callcc(VALUE self)
Definition: cont.c:891
#define MEMCPY(p1, p2, type, n)
Definition: ruby.h:1242
static VALUE rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fibval)
Definition: cont.c:1496
int err
Definition: win32.c:87
VALUE * machine_stack_start
Definition: vm_core.h:588
static VALUE rb_fiber_s_current(VALUE klass)
Definition: cont.c:1529
#define ALLOC(type)
Definition: ruby.h:1224
Definition: cont.c:114
size_t vm_stack_slen
Definition: cont.c:97
VALUE rb_yield(VALUE)
Definition: vm_eval.c:934
#define REALLOC_N(var, type, n)
Definition: ruby.h:1225
#define STACK_DIR_UPPER(a, b)
Definition: gc.h:82
void rb_vm_stack_to_heap(rb_thread_t *th)
Definition: vm.c:553
static rb_context_t * cont_new(VALUE klass)
Definition: cont.c:446
void ruby_xfree(void *x)
Definition: gc.c:3651
VALUE * machine_stack
Definition: cont.c:100
static VALUE rb_cont_call(int argc, VALUE *argv, VALUE contval)
Definition: cont.c:934
void Init_Cont(void)
Definition: cont.c:1550
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4308
static VALUE rb_cFiber
Definition: cont.c:152
#define Qnil
Definition: ruby.h:435
VALUE rb_exc_new2(VALUE etype, const char *s)
Definition: error.c:542
unsigned long VALUE
Definition: ruby.h:104
#define STACK_GROW_DIR_DETECTION
Definition: gc.h:81
rb_context_t cont
Definition: cont.c:130
RUBY_JMP_BUF rb_jmpbuf_t
Definition: vm_core.h:462
static VALUE fiber_store(rb_fiber_t *next_fib)
Definition: cont.c:1242
struct rb_fiber_struct rb_fiber_t
VALUE first_proc
Definition: vm_core.h:583
static void fiber_mark(void *ptr)
Definition: cont.c:298
void rb_thread_mark(void *th)
Definition: vm.c:1788
#define TH_PUSH_TAG(th)
Definition: eval_intern.h:94
#define SET_MACHINE_STACK_END(p)
Definition: gc.h:11
st_table * st_init_numtable(void)
Definition: st.c:272
static void cont_init(rb_context_t *cont, rb_thread_t *th)
Definition: cont.c:438
static VALUE rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
Definition: cont.c:1515
#define FIBER_USE_NATIVE
Definition: cont.c:68
#define ruby_setjmp(env)
Definition: eval_intern.h:50
enum rb_thread_status status
Definition: vm_core.h:531
#define RUBY_FREE_UNLESS_NULL(ptr)
Definition: gc.h:61
struct rb_fiber_struct * next_fiber
Definition: cont.c:134
VALUE * machine_stack_end
Definition: vm_core.h:589
static void cont_restore_thread(rb_context_t *cont)
Definition: cont.c:502
int size
Definition: encoding.c:52
#define f
int mark_stack_len
Definition: vm_core.h:597
VALUE root_svar
Definition: vm_core.h:527
rb_block_t block
Definition: vm_core.h:669
VALUE rb_block_proc(void)
Definition: proc.c:479
size_t st_memsize(const st_table *)
Definition: st.c:342
#define ANYARGS
Definition: defines.h:57
const rb_method_entry_t * me
Definition: vm_core.h:435
#define RUBY_FREE_LEAVE(msg)
Definition: gc.h:56
#define RARRAY_PTR(a)
Definition: ruby.h:904
#define RUBY_FREE_ENTER(msg)
Definition: gc.h:55
VALUE rb_obj_is_fiber(VALUE obj)
Definition: cont.c:363
#define STACK_PAD_SIZE
VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv)
Definition: cont.c:1372
VALUE rb_proc_new(VALUE(*)(ANYARGS), VALUE)
Definition: proc.c:2018
VALUE root_fiber
Definition: vm_core.h:608
static rb_fiber_t * fiber_t_alloc(VALUE fibval)
Definition: cont.c:1043
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1019
struct rb_fiber_struct * prev_fiber
Definition: cont.c:133
#define GetThreadPtr(obj, ptr)
Definition: vm_core.h:452
rb_jmpbuf_t root_jmpbuf
Definition: vm_core.h:609
static void cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
Definition: cont.c:774
void ruby_Init_Continuation_body(void)
Definition: cont.c:1578
size_t stack_size
Definition: vm_core.h:499
static void fiber_link_remove(rb_fiber_t *fib)
Definition: cont.c:324
struct rb_vm_tag * tag
Definition: vm_core.h:561
context_type
Definition: cont.c:84
static void fiber_free(void *ptr)
Definition: cont.c:331
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:907
static size_t cont_memsize(const void *ptr)
Definition: cont.c:270
#define stat(path, st)
Definition: win32.h:193
static void cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
Definition: cont.c:374
size_t machine_stack_maxsize
Definition: vm_core.h:590
fiber_status
Definition: cont.c:112
NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)))
#define NULL
Definition: _sdbm.c:103
#define Qundef
Definition: ruby.h:436
static rb_thread_t * GET_THREAD(void)
Definition: vm_core.h:883
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1344
VALUE * machine_stack_src
Definition: cont.c:101
struct rb_trace_arg_struct * trace_arg
Definition: vm_core.h:604
char ** argv
Definition: ruby.c:131
VALUE * ep
Definition: vm_core.h:445
NOINLINE(static VALUE cont_capture(volatile int *stat))