PageRenderTime 69ms CodeModel.GetById 39ms RepoModel.GetById 0ms app.codeStats 0ms

/cont.c

https://github.com/diabolo/ruby
C | 1448 lines | 1028 code | 153 blank | 267 comment | 145 complexity | 3ccf8b941f5938be503dbcf8e3e2c0ad MD5 | raw file
Possible License(s): GPL-2.0, BSD-3-Clause
  1. /**********************************************************************
  2. cont.c -
  3. $Author$
  4. created at: Thu May 23 09:03:43 2007
  5. Copyright (C) 2007 Koichi Sasada
  6. **********************************************************************/
  7. #include "ruby/ruby.h"
  8. #include "vm_core.h"
  9. #include "gc.h"
  10. #include "eval_intern.h"
  11. #if ((defined(_WIN32) && _WIN32_WINNT >= 0x0400) || defined(HAVE_SETCONTEXT)) && !defined(__NetBSD__) && !defined(FIBER_USE_NATIVE)
  12. #define FIBER_USE_NATIVE 1
  13. /* FIBER_USE_NATIVE enables Fiber performance improvement using system
  14. * dependent method such as make/setcontext on POSIX system or
  15. * CreateFiber() API on Windows.
  16. * This hack make Fiber context switch faster (x2 or more).
  17. * However, it decrease maximum number of Fiber. For example, on the
  18. * 32bit POSIX OS, ten or twenty thousands Fiber can be created.
  19. *
  20. * Details is reported in the paper "A Fast Fiber Implementation for Ruby 1.9"
  21. * in Proc. of 51th Programming Symposium, pp.21--28 (2010) (in Japanese).
  22. */
  23. /* On our experience, NetBSD doesn't support using setcontext() and pthread
  24. * simultaneously. This is because pthread_self(), TLS and other information
  25. * are represented by stack pointer (higher bits of stack pointer).
  26. * TODO: check such constraint on configure.
  27. */
  28. #endif
  29. #ifdef FIBER_USE_NATIVE
  30. #ifndef _WIN32
  31. #include <unistd.h>
  32. #include <sys/mman.h>
  33. #include <ucontext.h>
  34. #endif
  35. #define PAGE_SIZE (pagesize)
  36. #define PAGE_MASK (~(PAGE_SIZE - 1))
  37. static long pagesize;
  38. #define FIBER_MACHINE_STACK_ALLOCATION_SIZE (0x10000 / sizeof(VALUE))
  39. #endif
  40. #define CAPTURE_JUST_VALID_VM_STACK 1
  41. enum context_type {
  42. CONTINUATION_CONTEXT = 0,
  43. FIBER_CONTEXT = 1,
  44. ROOT_FIBER_CONTEXT = 2
  45. };
  46. typedef struct rb_context_struct {
  47. enum context_type type;
  48. VALUE self;
  49. int argc;
  50. VALUE value;
  51. VALUE *vm_stack;
  52. #ifdef CAPTURE_JUST_VALID_VM_STACK
  53. size_t vm_stack_slen; /* length of stack (head of th->stack) */
  54. size_t vm_stack_clen; /* length of control frames (tail of th->stack) */
  55. #endif
  56. VALUE *machine_stack;
  57. VALUE *machine_stack_src;
  58. #ifdef __ia64
  59. VALUE *machine_register_stack;
  60. VALUE *machine_register_stack_src;
  61. int machine_register_stack_size;
  62. #endif
  63. rb_thread_t saved_thread;
  64. rb_jmpbuf_t jmpbuf;
  65. size_t machine_stack_size;
  66. } rb_context_t;
  67. enum fiber_status {
  68. CREATED,
  69. RUNNING,
  70. TERMINATED
  71. };
  72. #if defined(FIBER_USE_NATIVE) && !defined(_WIN32)
  73. #define MAX_MAHINE_STACK_CACHE 10
  74. static int machine_stack_cache_index = 0;
  75. typedef struct machine_stack_cache_struct {
  76. void *ptr;
  77. size_t size;
  78. } machine_stack_cache_t;
  79. static machine_stack_cache_t machine_stack_cache[MAX_MAHINE_STACK_CACHE];
  80. static machine_stack_cache_t terminated_machine_stack;
  81. #endif
  82. typedef struct rb_fiber_struct {
  83. rb_context_t cont;
  84. VALUE prev;
  85. enum fiber_status status;
  86. struct rb_fiber_struct *prev_fiber;
  87. struct rb_fiber_struct *next_fiber;
  88. #ifdef FIBER_USE_NATIVE
  89. #ifdef _WIN32
  90. void *fib_handle;
  91. #else
  92. ucontext_t context;
  93. #endif
  94. #endif
  95. } rb_fiber_t;
  96. static const rb_data_type_t cont_data_type, fiber_data_type;
  97. static VALUE rb_cContinuation;
  98. static VALUE rb_cFiber;
  99. static VALUE rb_eFiberError;
  100. #define GetContPtr(obj, ptr) \
  101. TypedData_Get_Struct(obj, rb_context_t, &cont_data_type, ptr)
  102. #define GetFiberPtr(obj, ptr) do {\
  103. TypedData_Get_Struct(obj, rb_fiber_t, &fiber_data_type, ptr); \
  104. if (!ptr) rb_raise(rb_eFiberError, "uninitialized fiber"); \
  105. } while(0)
  106. NOINLINE(static VALUE cont_capture(volatile int *stat));
  107. void rb_thread_mark(rb_thread_t *th);
  108. #define THREAD_MUST_BE_RUNNING(th) do { \
  109. if (!th->tag) rb_raise(rb_eThreadError, "not running thread"); \
  110. } while (0)
  111. static void
  112. cont_mark(void *ptr)
  113. {
  114. RUBY_MARK_ENTER("cont");
  115. if (ptr) {
  116. rb_context_t *cont = ptr;
  117. rb_gc_mark(cont->value);
  118. rb_thread_mark(&cont->saved_thread);
  119. if (cont->vm_stack) {
  120. #ifdef CAPTURE_JUST_VALID_VM_STACK
  121. rb_gc_mark_locations(cont->vm_stack,
  122. cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
  123. #else
  124. rb_gc_mark_localtion(cont->vm_stack,
  125. cont->vm_stack, cont->saved_thread.stack_size);
  126. #endif
  127. }
  128. if (cont->machine_stack) {
  129. if (cont->type == CONTINUATION_CONTEXT) {
  130. /* cont */
  131. rb_gc_mark_locations(cont->machine_stack,
  132. cont->machine_stack + cont->machine_stack_size);
  133. }
  134. else {
  135. /* fiber */
  136. rb_thread_t *th;
  137. rb_fiber_t *fib = (rb_fiber_t*)cont;
  138. GetThreadPtr(cont->saved_thread.self, th);
  139. if ((th->fiber != cont->self) && fib->status == RUNNING) {
  140. rb_gc_mark_locations(cont->machine_stack,
  141. cont->machine_stack + cont->machine_stack_size);
  142. }
  143. }
  144. }
  145. #ifdef __ia64
  146. if (cont->machine_register_stack) {
  147. rb_gc_mark_locations(cont->machine_register_stack,
  148. cont->machine_register_stack + cont->machine_register_stack_size);
  149. }
  150. #endif
  151. }
  152. RUBY_MARK_LEAVE("cont");
  153. }
  154. static void
  155. cont_free(void *ptr)
  156. {
  157. RUBY_FREE_ENTER("cont");
  158. if (ptr) {
  159. rb_context_t *cont = ptr;
  160. RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack); fflush(stdout);
  161. #ifdef FIBER_USE_NATIVE
  162. if (cont->type == CONTINUATION_CONTEXT) {
  163. /* cont */
  164. RUBY_FREE_UNLESS_NULL(cont->machine_stack);
  165. }
  166. else {
  167. /* fiber */
  168. #ifdef _WIN32
  169. if (GET_THREAD()->fiber != cont->self && cont->type != ROOT_FIBER_CONTEXT) {
  170. /* don't delete root fiber handle */
  171. rb_fiber_t *fib = (rb_fiber_t*)cont;
  172. if (fib->fib_handle) {
  173. DeleteFiber(fib->fib_handle);
  174. }
  175. }
  176. #else /* not WIN32 */
  177. if (GET_THREAD()->fiber != cont->self) {
  178. rb_fiber_t *fib = (rb_fiber_t*)cont;
  179. if (fib->context.uc_stack.ss_sp) {
  180. if (cont->type == ROOT_FIBER_CONTEXT) {
  181. rb_bug("Illegal root fiber parameter");
  182. }
  183. munmap((void*)fib->context.uc_stack.ss_sp, fib->context.uc_stack.ss_size);
  184. }
  185. }
  186. else {
  187. /* It may reached here when finalize */
  188. /* TODO examine whether it is a bug */
  189. /* rb_bug("cont_free: release self"); */
  190. }
  191. #endif
  192. }
  193. #else /* not FIBER_USE_NATIVE */
  194. RUBY_FREE_UNLESS_NULL(cont->machine_stack);
  195. #endif
  196. #ifdef __ia64
  197. RUBY_FREE_UNLESS_NULL(cont->machine_register_stack);
  198. #endif
  199. RUBY_FREE_UNLESS_NULL(cont->vm_stack);
  200. /* free rb_cont_t or rb_fiber_t */
  201. ruby_xfree(ptr);
  202. }
  203. RUBY_FREE_LEAVE("cont");
  204. }
  205. static size_t
  206. cont_memsize(const void *ptr)
  207. {
  208. const rb_context_t *cont = ptr;
  209. size_t size = 0;
  210. if (cont) {
  211. size = sizeof(*cont);
  212. if (cont->vm_stack) {
  213. #ifdef CAPTURE_JUST_VALID_VM_STACK
  214. size_t n = (cont->vm_stack_slen + cont->vm_stack_clen);
  215. #else
  216. size_t n = cont->saved_thread.stack_size;
  217. #endif
  218. size += n * sizeof(*cont->vm_stack);
  219. }
  220. if (cont->machine_stack) {
  221. size += cont->machine_stack_size * sizeof(*cont->machine_stack);
  222. }
  223. #ifdef __ia64
  224. if (cont->machine_register_stack) {
  225. size += cont->machine_register_stack_size * sizeof(*cont->machine_register_stack);
  226. }
  227. #endif
  228. }
  229. return size;
  230. }
  231. static void
  232. fiber_mark(void *ptr)
  233. {
  234. RUBY_MARK_ENTER("cont");
  235. if (ptr) {
  236. rb_fiber_t *fib = ptr;
  237. rb_gc_mark(fib->prev);
  238. cont_mark(&fib->cont);
  239. }
  240. RUBY_MARK_LEAVE("cont");
  241. }
  242. static void
  243. fiber_link_join(rb_fiber_t *fib)
  244. {
  245. VALUE current_fibval = rb_fiber_current();
  246. rb_fiber_t *current_fib;
  247. GetFiberPtr(current_fibval, current_fib);
  248. /* join fiber link */
  249. fib->next_fiber = current_fib->next_fiber;
  250. fib->prev_fiber = current_fib;
  251. current_fib->next_fiber->prev_fiber = fib;
  252. current_fib->next_fiber = fib;
  253. }
  254. static void
  255. fiber_link_remove(rb_fiber_t *fib)
  256. {
  257. fib->prev_fiber->next_fiber = fib->next_fiber;
  258. fib->next_fiber->prev_fiber = fib->prev_fiber;
  259. }
  260. static void
  261. fiber_free(void *ptr)
  262. {
  263. RUBY_FREE_ENTER("fiber");
  264. if (ptr) {
  265. rb_fiber_t *fib = ptr;
  266. if (fib->cont.type != ROOT_FIBER_CONTEXT &&
  267. fib->cont.saved_thread.local_storage) {
  268. st_free_table(fib->cont.saved_thread.local_storage);
  269. }
  270. fiber_link_remove(fib);
  271. cont_free(&fib->cont);
  272. }
  273. RUBY_FREE_LEAVE("fiber");
  274. }
  275. static size_t
  276. fiber_memsize(const void *ptr)
  277. {
  278. const rb_fiber_t *fib = ptr;
  279. size_t size = 0;
  280. if (ptr) {
  281. size = sizeof(*fib);
  282. if (fib->cont.type != ROOT_FIBER_CONTEXT) {
  283. size += st_memsize(fib->cont.saved_thread.local_storage);
  284. }
  285. size += cont_memsize(&fib->cont);
  286. }
  287. return 0;
  288. }
  289. static void
  290. cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
  291. {
  292. size_t size;
  293. rb_thread_t *sth = &cont->saved_thread;
  294. SET_MACHINE_STACK_END(&th->machine_stack_end);
  295. #ifdef __ia64
  296. th->machine_register_stack_end = rb_ia64_bsp();
  297. #endif
  298. if (th->machine_stack_start > th->machine_stack_end) {
  299. size = cont->machine_stack_size = th->machine_stack_start - th->machine_stack_end;
  300. cont->machine_stack_src = th->machine_stack_end;
  301. }
  302. else {
  303. size = cont->machine_stack_size = th->machine_stack_end - th->machine_stack_start;
  304. cont->machine_stack_src = th->machine_stack_start;
  305. }
  306. if (cont->machine_stack) {
  307. REALLOC_N(cont->machine_stack, VALUE, size);
  308. }
  309. else {
  310. cont->machine_stack = ALLOC_N(VALUE, size);
  311. }
  312. FLUSH_REGISTER_WINDOWS;
  313. MEMCPY(cont->machine_stack, cont->machine_stack_src, VALUE, size);
  314. #ifdef __ia64
  315. rb_ia64_flushrs();
  316. size = cont->machine_register_stack_size = th->machine_register_stack_end - th->machine_register_stack_start;
  317. cont->machine_register_stack_src = th->machine_register_stack_start;
  318. if (cont->machine_register_stack) {
  319. REALLOC_N(cont->machine_register_stack, VALUE, size);
  320. }
  321. else {
  322. cont->machine_register_stack = ALLOC_N(VALUE, size);
  323. }
  324. MEMCPY(cont->machine_register_stack, cont->machine_register_stack_src, VALUE, size);
  325. #endif
  326. sth->machine_stack_start = sth->machine_stack_end = 0;
  327. #ifdef __ia64
  328. sth->machine_register_stack_start = sth->machine_register_stack_end = 0;
  329. #endif
  330. }
  331. static const rb_data_type_t cont_data_type = {
  332. "continuation",
  333. cont_mark, cont_free, cont_memsize,
  334. };
  335. static void
  336. cont_init(rb_context_t *cont, rb_thread_t *th)
  337. {
  338. /* save thread context */
  339. cont->saved_thread = *th;
  340. cont->saved_thread.local_storage = 0;
  341. }
  342. static rb_context_t *
  343. cont_new(VALUE klass)
  344. {
  345. rb_context_t *cont;
  346. volatile VALUE contval;
  347. rb_thread_t *th = GET_THREAD();
  348. THREAD_MUST_BE_RUNNING(th);
  349. contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
  350. cont->self = contval;
  351. cont_init(cont, th);
  352. return cont;
  353. }
  354. void rb_vm_stack_to_heap(rb_thread_t *th);
  355. static VALUE
  356. cont_capture(volatile int *stat)
  357. {
  358. rb_context_t *cont;
  359. rb_thread_t *th = GET_THREAD(), *sth;
  360. volatile VALUE contval;
  361. THREAD_MUST_BE_RUNNING(th);
  362. rb_vm_stack_to_heap(th);
  363. cont = cont_new(rb_cContinuation);
  364. contval = cont->self;
  365. sth = &cont->saved_thread;
  366. #ifdef CAPTURE_JUST_VALID_VM_STACK
  367. cont->vm_stack_slen = th->cfp->sp + th->mark_stack_len - th->stack;
  368. cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp;
  369. cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen);
  370. MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen);
  371. MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen);
  372. #else
  373. cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
  374. MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
  375. #endif
  376. sth->stack = 0;
  377. cont_save_machine_stack(th, cont);
  378. if (ruby_setjmp(cont->jmpbuf)) {
  379. VALUE value;
  380. value = cont->value;
  381. if (cont->argc == -1) rb_exc_raise(value);
  382. cont->value = Qnil;
  383. *stat = 1;
  384. return value;
  385. }
  386. else {
  387. *stat = 0;
  388. return cont->self;
  389. }
  390. }
  391. static void
  392. cont_restore_thread(rb_context_t *cont)
  393. {
  394. rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
  395. /* restore thread context */
  396. if (cont->type == CONTINUATION_CONTEXT) {
  397. /* continuation */
  398. VALUE fib;
  399. th->fiber = sth->fiber;
  400. fib = th->fiber ? th->fiber : th->root_fiber;
  401. if (fib) {
  402. rb_fiber_t *fcont;
  403. GetFiberPtr(fib, fcont);
  404. th->stack_size = fcont->cont.saved_thread.stack_size;
  405. th->stack = fcont->cont.saved_thread.stack;
  406. }
  407. #ifdef CAPTURE_JUST_VALID_VM_STACK
  408. MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
  409. MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
  410. cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
  411. #else
  412. MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
  413. #endif
  414. }
  415. else {
  416. /* fiber */
  417. th->stack = sth->stack;
  418. th->stack_size = sth->stack_size;
  419. th->local_storage = sth->local_storage;
  420. th->fiber = cont->self;
  421. }
  422. th->cfp = sth->cfp;
  423. th->safe_level = sth->safe_level;
  424. th->raised_flag = sth->raised_flag;
  425. th->state = sth->state;
  426. th->status = sth->status;
  427. th->tag = sth->tag;
  428. th->protect_tag = sth->protect_tag;
  429. th->errinfo = sth->errinfo;
  430. th->first_proc = sth->first_proc;
  431. }
  432. #ifdef FIBER_USE_NATIVE
  433. #ifdef _WIN32
  434. static void
  435. fiber_set_stack_location(void)
  436. {
  437. rb_thread_t *th = GET_THREAD();
  438. VALUE *ptr;
  439. SET_MACHINE_STACK_END(&ptr);
  440. th->machine_stack_start = (void*)(((VALUE)ptr & PAGE_MASK) + STACK_UPPER(&ptr, 0, PAGE_SIZE));
  441. }
  442. static VOID CALLBACK
  443. fiber_entry(void *arg)
  444. {
  445. fiber_set_stack_location();
  446. rb_fiber_start();
  447. }
  448. #else
  449. static VALUE*
  450. fiber_machine_stack_alloc(size_t size)
  451. {
  452. VALUE *ptr;
  453. if (machine_stack_cache_index > 0) {
  454. if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) {
  455. ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr;
  456. machine_stack_cache_index--;
  457. machine_stack_cache[machine_stack_cache_index].ptr = NULL;
  458. machine_stack_cache[machine_stack_cache_index].size = 0;
  459. }
  460. else{
  461. /* TODO handle multiple machine stack size */
  462. rb_bug("machine_stack_cache size is not canonicalized");
  463. }
  464. }
  465. else {
  466. STACK_GROW_DIR_DETECTION;
  467. ptr = (VALUE*)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
  468. if (ptr == (VALUE*)(SIGNED_VALUE)-1) {
  469. rb_raise(rb_eFiberError, "can't alloc machine stack to fiber");
  470. }
  471. if (mprotect(ptr + STACK_DIR_UPPER((size - PAGE_SIZE) / sizeof(VALUE), 0),
  472. PAGE_SIZE, PROT_READ | PROT_WRITE) < 0) {
  473. rb_raise(rb_eFiberError, "mprotect failed");
  474. }
  475. }
  476. return ptr;
  477. }
  478. #endif
  479. static void
  480. fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
  481. {
  482. rb_thread_t *sth = &fib->cont.saved_thread;
  483. #ifdef _WIN32
  484. fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
  485. if (!fib->fib_handle) {
  486. /* try to release unnecessary fibers & retry to create */
  487. rb_gc();
  488. fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
  489. if (!fib->fib_handle) {
  490. rb_raise(rb_eFiberError, "can't create fiber");
  491. }
  492. }
  493. #else /* not WIN32 */
  494. ucontext_t *context = &fib->context;
  495. VALUE *ptr;
  496. STACK_GROW_DIR_DETECTION;
  497. getcontext(context);
  498. ptr = fiber_machine_stack_alloc(size);
  499. context->uc_link = NULL;
  500. context->uc_stack.ss_sp = ptr;
  501. context->uc_stack.ss_size = size;
  502. makecontext(context, rb_fiber_start, 0);
  503. sth->machine_stack_start = ptr + STACK_DIR_UPPER(0, size / sizeof(VALUE));
  504. #endif
  505. sth->machine_stack_maxsize = size;
  506. #ifdef __ia64
  507. sth->machine_register_stack_maxsize = sth->machine_stack_maxsize;
  508. #endif
  509. }
  510. NOINLINE(static void fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib));
  511. static void
  512. fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
  513. {
  514. rb_thread_t *th = GET_THREAD(), *sth = &newfib->cont.saved_thread;
  515. if (newfib->status != RUNNING) {
  516. fiber_initialize_machine_stack_context(newfib, FIBER_MACHINE_STACK_ALLOCATION_SIZE * sizeof(VALUE));
  517. }
  518. /* restore thread context */
  519. cont_restore_thread(&newfib->cont);
  520. th->machine_stack_maxsize = sth->machine_stack_maxsize;
  521. if (sth->machine_stack_end && (newfib != oldfib)) {
  522. rb_bug("fiber_setcontext: sth->machine_stack_end has non zero value");
  523. }
  524. /* save oldfib's machine stack */
  525. if (oldfib->status != TERMINATED) {
  526. SET_MACHINE_STACK_END(&th->machine_stack_end);
  527. if (STACK_DIR_UPPER(0, 1)) {
  528. oldfib->cont.machine_stack_size = th->machine_stack_start - th->machine_stack_end;
  529. oldfib->cont.machine_stack = th->machine_stack_end;
  530. }
  531. else {
  532. oldfib->cont.machine_stack_size = th->machine_stack_end - th->machine_stack_start;
  533. oldfib->cont.machine_stack = th->machine_stack_start;
  534. }
  535. }
  536. /* exchange machine_stack_start between oldfib and newfib */
  537. oldfib->cont.saved_thread.machine_stack_start = th->machine_stack_start;
  538. th->machine_stack_start = sth->machine_stack_start;
  539. /* oldfib->machine_stack_end should be NULL */
  540. oldfib->cont.saved_thread.machine_stack_end = 0;
  541. #ifndef _WIN32
  542. if (!newfib->context.uc_stack.ss_sp && th->root_fiber != newfib->cont.self) {
  543. rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
  544. }
  545. #endif
  546. /* swap machine context */
  547. #ifdef _WIN32
  548. SwitchToFiber(newfib->fib_handle);
  549. #else
  550. swapcontext(&oldfib->context, &newfib->context);
  551. #endif
  552. }
  553. #endif
  554. NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
  555. static void
  556. cont_restore_1(rb_context_t *cont)
  557. {
  558. cont_restore_thread(cont);
  559. /* restore machine stack */
  560. #ifdef _M_AMD64
  561. {
  562. /* workaround for x64 SEH */
  563. jmp_buf buf;
  564. setjmp(buf);
  565. ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
  566. ((_JUMP_BUFFER*)(&buf))->Frame;
  567. }
  568. #endif
  569. if (cont->machine_stack_src) {
  570. FLUSH_REGISTER_WINDOWS;
  571. MEMCPY(cont->machine_stack_src, cont->machine_stack,
  572. VALUE, cont->machine_stack_size);
  573. }
  574. #ifdef __ia64
  575. if (cont->machine_register_stack_src) {
  576. MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack,
  577. VALUE, cont->machine_register_stack_size);
  578. }
  579. #endif
  580. ruby_longjmp(cont->jmpbuf, 1);
  581. }
  582. NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
  583. #ifdef __ia64
  584. #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
  585. #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
  586. static volatile int C(a), C(b), C(c), C(d), C(e);
  587. static volatile int C(f), C(g), C(h), C(i), C(j);
  588. static volatile int C(k), C(l), C(m), C(n), C(o);
  589. static volatile int C(p), C(q), C(r), C(s), C(t);
  590. #if 0
  591. {/* the above lines make cc-mode.el confused so much */}
  592. #endif
  593. int rb_dummy_false = 0;
  594. NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
  595. static void
  596. register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
  597. {
  598. if (rb_dummy_false) {
  599. /* use registers as much as possible */
  600. E(a) = E(b) = E(c) = E(d) = E(e) =
  601. E(f) = E(g) = E(h) = E(i) = E(j) =
  602. E(k) = E(l) = E(m) = E(n) = E(o) =
  603. E(p) = E(q) = E(r) = E(s) = E(t) = 0;
  604. E(a) = E(b) = E(c) = E(d) = E(e) =
  605. E(f) = E(g) = E(h) = E(i) = E(j) =
  606. E(k) = E(l) = E(m) = E(n) = E(o) =
  607. E(p) = E(q) = E(r) = E(s) = E(t) = 0;
  608. }
  609. if (curr_bsp < cont->machine_register_stack_src+cont->machine_register_stack_size) {
  610. register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
  611. }
  612. cont_restore_0(cont, vp);
  613. }
  614. #undef C
  615. #undef E
  616. #endif
  617. static void
  618. cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
  619. {
  620. if (cont->machine_stack_src) {
  621. #ifdef HAVE_ALLOCA
  622. #define STACK_PAD_SIZE 1
  623. #else
  624. #define STACK_PAD_SIZE 1024
  625. #endif
  626. VALUE space[STACK_PAD_SIZE];
  627. #if !STACK_GROW_DIRECTION
  628. if (addr_in_prev_frame > &space[0]) {
  629. /* Stack grows downward */
  630. #endif
  631. #if STACK_GROW_DIRECTION <= 0
  632. volatile VALUE *const end = cont->machine_stack_src;
  633. if (&space[0] > end) {
  634. # ifdef HAVE_ALLOCA
  635. volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
  636. (void)sp;
  637. # else
  638. cont_restore_0(cont, &space[0]);
  639. # endif
  640. }
  641. #endif
  642. #if !STACK_GROW_DIRECTION
  643. }
  644. else {
  645. /* Stack grows upward */
  646. #endif
  647. #if STACK_GROW_DIRECTION >= 0
  648. volatile VALUE *const end = cont->machine_stack_src + cont->machine_stack_size;
  649. if (&space[STACK_PAD_SIZE] < end) {
  650. # ifdef HAVE_ALLOCA
  651. volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
  652. (void)sp;
  653. # else
  654. cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
  655. # endif
  656. }
  657. #endif
  658. #if !STACK_GROW_DIRECTION
  659. }
  660. #endif
  661. }
  662. cont_restore_1(cont);
  663. }
  664. #ifdef __ia64
  665. #define cont_restore_0(cont, vp) register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
  666. #endif
  667. /*
  668. * Document-class: Continuation
  669. *
  670. * Continuation objects are generated by
  671. * <code>Kernel#callcc</code>. They hold a return address and execution
  672. * context, allowing a nonlocal return to the end of the
  673. * <code>callcc</code> block from anywhere within a program.
  674. * Continuations are somewhat analogous to a structured version of C's
  675. * <code>setjmp/longjmp</code> (although they contain more state, so
  676. * you might consider them closer to threads).
  677. *
  678. * For instance:
  679. *
  680. * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
  681. * callcc{|$cc|}
  682. * puts(message = arr.shift)
  683. * $cc.call unless message =~ /Max/
  684. *
  685. * <em>produces:</em>
  686. *
  687. * Freddie
  688. * Herbie
  689. * Ron
  690. * Max
  691. *
  692. * This (somewhat contrived) example allows the inner loop to abandon
  693. * processing early:
  694. *
  695. * callcc {|cont|
  696. * for i in 0..4
  697. * print "\n#{i}: "
  698. * for j in i*5...(i+1)*5
  699. * cont.call() if j == 17
  700. * printf "%3d", j
  701. * end
  702. * end
  703. * }
  704. * print "\n"
  705. *
  706. * <em>produces:</em>
  707. *
  708. * 0: 0 1 2 3 4
  709. * 1: 5 6 7 8 9
  710. * 2: 10 11 12 13 14
  711. * 3: 15 16
  712. */
  713. /*
  714. * call-seq:
  715. * callcc {|cont| block } -> obj
  716. *
  717. * Generates a <code>Continuation</code> object, which it passes to the
  718. * associated block. Performing a <em>cont</em><code>.call</code> will
  719. * cause the <code>callcc</code> to return (as will falling through the
  720. * end of the block). The value returned by the <code>callcc</code> is
  721. * the value of the block, or the value passed to
  722. * <em>cont</em><code>.call</code>. See class <code>Continuation</code>
  723. * for more details. Also see <code>Kernel::throw</code> for
  724. * an alternative mechanism for unwinding a call stack.
  725. */
  726. static VALUE
  727. rb_callcc(VALUE self)
  728. {
  729. volatile int called;
  730. volatile VALUE val = cont_capture(&called);
  731. if (called) {
  732. return val;
  733. }
  734. else {
  735. return rb_yield(val);
  736. }
  737. }
  738. static VALUE
  739. make_passing_arg(int argc, VALUE *argv)
  740. {
  741. switch(argc) {
  742. case 0:
  743. return Qnil;
  744. case 1:
  745. return argv[0];
  746. default:
  747. return rb_ary_new4(argc, argv);
  748. }
  749. }
  750. /*
  751. * call-seq:
  752. * cont.call(args, ...)
  753. * cont[args, ...]
  754. *
  755. * Invokes the continuation. The program continues from the end of the
  756. * <code>callcc</code> block. If no arguments are given, the original
  757. * <code>callcc</code> returns <code>nil</code>. If one argument is
  758. * given, <code>callcc</code> returns it. Otherwise, an array
  759. * containing <i>args</i> is returned.
  760. *
  761. * callcc {|cont| cont.call } #=> nil
  762. * callcc {|cont| cont.call 1 } #=> 1
  763. * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
  764. */
  765. static VALUE
  766. rb_cont_call(int argc, VALUE *argv, VALUE contval)
  767. {
  768. rb_context_t *cont;
  769. rb_thread_t *th = GET_THREAD();
  770. GetContPtr(contval, cont);
  771. if (cont->saved_thread.self != th->self) {
  772. rb_raise(rb_eRuntimeError, "continuation called across threads");
  773. }
  774. if (cont->saved_thread.protect_tag != th->protect_tag) {
  775. rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
  776. }
  777. if (cont->saved_thread.fiber) {
  778. rb_fiber_t *fcont;
  779. GetFiberPtr(cont->saved_thread.fiber, fcont);
  780. if (th->fiber != cont->saved_thread.fiber) {
  781. rb_raise(rb_eRuntimeError, "continuation called across fiber");
  782. }
  783. }
  784. cont->argc = argc;
  785. cont->value = make_passing_arg(argc, argv);
  786. cont_restore_0(cont, &contval);
  787. return Qnil; /* unreachable */
  788. }
  789. /*********/
  790. /* fiber */
  791. /*********/
  792. /*
  793. * Document-class: Fiber
  794. *
  795. * Fibers are primitives for implementing light weight cooperative
  796. * concurrency in Ruby. Basically they are a means of creating code blocks
  797. * that can be paused and resumed, much like threads. The main difference
  798. * is that they are never preempted and that the scheduling must be done by
  799. * the programmer and not the VM.
  800. *
  801. * As opposed to other stackless light weight concurrency models, each fiber
  802. * comes with a small 4KB stack. This enables the fiber to be paused from deeply
  803. * nested function calls within the fiber block.
  804. *
  805. * When a fiber is created it will not run automatically. Rather it must be
  806. * be explicitly asked to run using the <code>Fiber#resume</code> method.
  807. * The code running inside the fiber can give up control by calling
  808. * <code>Fiber.yield</code> in which case it yields control back to caller
  809. * (the caller of the <code>Fiber#resume</code>).
  810. *
  811. * Upon yielding or termination the Fiber returns the value of the last
  812. * executed expression
  813. *
  814. * For instance:
  815. *
  816. * fiber = Fiber.new do
  817. * Fiber.yield 1
  818. * 2
  819. * end
  820. *
  821. * puts fiber.resume
  822. * puts fiber.resume
  823. * puts fiber.resume
  824. *
  825. * <em>produces</em>
  826. *
  827. * 1
  828. * 2
  829. * FiberError: dead fiber called
  830. *
  831. * The <code>Fiber#resume</code> method accepts an arbitrary number of
  832. * parameters, if it is the first call to <code>resume</code> then they
  833. * will be passed as block arguments. Otherwise they will be the return
  834. * value of the call to <code>Fiber.yield</code>
  835. *
  836. * Example:
  837. *
  838. * fiber = Fiber.new do |first|
  839. * second = Fiber.yield first + 2
  840. * end
  841. *
  842. * puts fiber.resume 10
  843. * puts fiber.resume 14
  844. * puts fiber.resume 18
  845. *
  846. * <em>produces</em>
  847. *
  848. * 12
  849. * 14
  850. * FiberError: dead fiber called
  851. *
  852. */
  853. #define FIBER_VM_STACK_SIZE (4 * 1024)
  854. static const rb_data_type_t fiber_data_type = {
  855. "fiber",
  856. fiber_mark, fiber_free, fiber_memsize,
  857. };
  858. static VALUE
  859. fiber_alloc(VALUE klass)
  860. {
  861. return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
  862. }
  863. static rb_fiber_t*
  864. fiber_t_alloc(VALUE fibval)
  865. {
  866. rb_fiber_t *fib;
  867. rb_thread_t *th = GET_THREAD();
  868. THREAD_MUST_BE_RUNNING(th);
  869. fib = ALLOC(rb_fiber_t);
  870. memset(fib, 0, sizeof(rb_fiber_t));
  871. fib->cont.self = fibval;
  872. fib->cont.type = FIBER_CONTEXT;
  873. cont_init(&fib->cont, th);
  874. fib->prev = Qnil;
  875. fib->status = CREATED;
  876. DATA_PTR(fibval) = fib;
  877. return fib;
  878. }
  879. static VALUE
  880. fiber_init(VALUE fibval, VALUE proc)
  881. {
  882. rb_fiber_t *fib = fiber_t_alloc(fibval);
  883. rb_context_t *cont = &fib->cont;
  884. rb_thread_t *th = &cont->saved_thread;
  885. /* initialize cont */
  886. cont->vm_stack = 0;
  887. th->stack = 0;
  888. th->stack_size = 0;
  889. fiber_link_join(fib);
  890. /*cont->machine_stack, th->machine_stack_start and th->machine_stack_end should be NULL*/
  891. /*because it may happen GC at th->stack allocation*/
  892. th->machine_stack_start = th->machine_stack_end = 0;
  893. th->stack_size = FIBER_VM_STACK_SIZE;
  894. th->stack = ALLOC_N(VALUE, th->stack_size);
  895. th->cfp = (void *)(th->stack + th->stack_size);
  896. th->cfp--;
  897. th->cfp->pc = 0;
  898. th->cfp->sp = th->stack + 1;
  899. th->cfp->bp = 0;
  900. th->cfp->lfp = th->stack;
  901. *th->cfp->lfp = 0;
  902. th->cfp->dfp = th->stack;
  903. th->cfp->self = Qnil;
  904. th->cfp->flag = 0;
  905. th->cfp->iseq = 0;
  906. th->cfp->proc = 0;
  907. th->cfp->block_iseq = 0;
  908. th->cfp->me = 0;
  909. th->tag = 0;
  910. th->local_storage = st_init_numtable();
  911. th->first_proc = proc;
  912. #ifndef FIBER_USE_NATIVE
  913. MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
  914. #endif
  915. return fibval;
  916. }
  917. /* :nodoc: */
  918. static VALUE
  919. rb_fiber_init(VALUE fibval)
  920. {
  921. return fiber_init(fibval, rb_block_proc());
  922. }
  923. VALUE
  924. rb_fiber_new(VALUE (*func)(ANYARGS), VALUE obj)
  925. {
  926. return fiber_init(fiber_alloc(rb_cFiber), rb_proc_new(func, obj));
  927. }
  928. static VALUE
  929. return_fiber(void)
  930. {
  931. rb_fiber_t *fib;
  932. VALUE curr = rb_fiber_current();
  933. GetFiberPtr(curr, fib);
  934. if (fib->prev == Qnil) {
  935. rb_thread_t *th = GET_THREAD();
  936. if (th->root_fiber != curr) {
  937. return th->root_fiber;
  938. }
  939. else {
  940. rb_raise(rb_eFiberError, "can't yield from root fiber");
  941. }
  942. }
  943. else {
  944. VALUE prev = fib->prev;
  945. fib->prev = Qnil;
  946. return prev;
  947. }
  948. }
  949. VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv);
  950. static void
  951. rb_fiber_terminate(rb_fiber_t *fib)
  952. {
  953. VALUE value = fib->cont.value;
  954. fib->status = TERMINATED;
  955. #if defined(FIBER_USE_NATIVE) && !defined(_WIN32)
  956. /* Ruby must not switch to other thread until storing terminated_machine_stack */
  957. terminated_machine_stack.ptr = fib->context.uc_stack.ss_sp;
  958. terminated_machine_stack.size = fib->context.uc_stack.ss_size / sizeof(VALUE);
  959. fib->context.uc_stack.ss_sp = NULL;
  960. fib->cont.machine_stack = NULL;
  961. fib->cont.machine_stack_size = 0;
  962. #endif
  963. rb_fiber_transfer(return_fiber(), 1, &value);
  964. }
  965. void
  966. rb_fiber_start(void)
  967. {
  968. rb_thread_t *th = GET_THREAD();
  969. rb_fiber_t *fib;
  970. rb_context_t *cont;
  971. rb_proc_t *proc;
  972. int state;
  973. GetFiberPtr(th->fiber, fib);
  974. cont = &fib->cont;
  975. TH_PUSH_TAG(th);
  976. if ((state = EXEC_TAG()) == 0) {
  977. int argc;
  978. VALUE *argv, args;
  979. GetProcPtr(cont->saved_thread.first_proc, proc);
  980. args = cont->value;
  981. argv = (argc = cont->argc) > 1 ? RARRAY_PTR(args) : &args;
  982. cont->value = Qnil;
  983. th->errinfo = Qnil;
  984. th->local_lfp = proc->block.lfp;
  985. th->local_svar = Qnil;
  986. fib->status = RUNNING;
  987. cont->value = rb_vm_invoke_proc(th, proc, proc->block.self, argc, argv, 0);
  988. }
  989. TH_POP_TAG();
  990. if (state) {
  991. if (state == TAG_RAISE) {
  992. th->thrown_errinfo = th->errinfo;
  993. }
  994. else {
  995. th->thrown_errinfo =
  996. rb_vm_make_jump_tag_but_local_jump(state, th->errinfo);
  997. }
  998. RUBY_VM_SET_INTERRUPT(th);
  999. }
  1000. rb_fiber_terminate(fib);
  1001. rb_bug("rb_fiber_start: unreachable");
  1002. }
  1003. static rb_fiber_t *
  1004. root_fiber_alloc(rb_thread_t *th)
  1005. {
  1006. rb_fiber_t *fib;
  1007. /* no need to allocate vm stack */
  1008. fib = fiber_t_alloc(fiber_alloc(rb_cFiber));
  1009. fib->cont.type = ROOT_FIBER_CONTEXT;
  1010. #ifdef FIBER_USE_NATIVE
  1011. #ifdef _WIN32
  1012. fib->fib_handle = ConvertThreadToFiber(0);
  1013. #endif
  1014. fib->status = RUNNING;
  1015. #endif
  1016. fib->prev_fiber = fib->next_fiber = fib;
  1017. return fib;
  1018. }
  1019. VALUE
  1020. rb_fiber_current(void)
  1021. {
  1022. rb_thread_t *th = GET_THREAD();
  1023. if (th->fiber == 0) {
  1024. /* save root */
  1025. rb_fiber_t *fib = root_fiber_alloc(th);
  1026. th->root_fiber = th->fiber = fib->cont.self;
  1027. }
  1028. return th->fiber;
  1029. }
  1030. static VALUE
  1031. fiber_store(rb_fiber_t *next_fib)
  1032. {
  1033. rb_thread_t *th = GET_THREAD();
  1034. rb_fiber_t *fib;
  1035. if (th->fiber) {
  1036. GetFiberPtr(th->fiber, fib);
  1037. fib->cont.saved_thread = *th;
  1038. }
  1039. else {
  1040. /* create current fiber */
  1041. fib = root_fiber_alloc(th);
  1042. th->root_fiber = th->fiber = fib->cont.self;
  1043. }
  1044. #ifndef FIBER_USE_NATIVE
  1045. cont_save_machine_stack(th, &fib->cont);
  1046. if (ruby_setjmp(fib->cont.jmpbuf)) {
  1047. #else /* FIBER_USE_NATIVE */
  1048. {
  1049. fiber_setcontext(next_fib, fib);
  1050. #ifndef _WIN32
  1051. if (terminated_machine_stack.ptr) {
  1052. if (machine_stack_cache_index < MAX_MAHINE_STACK_CACHE) {
  1053. machine_stack_cache[machine_stack_cache_index].ptr = terminated_machine_stack.ptr;
  1054. machine_stack_cache[machine_stack_cache_index].size = terminated_machine_stack.size;
  1055. machine_stack_cache_index++;
  1056. }
  1057. else {
  1058. if (terminated_machine_stack.ptr != fib->cont.machine_stack) {
  1059. munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE));
  1060. }
  1061. else {
  1062. rb_bug("terminated fiber resumed");
  1063. }
  1064. }
  1065. terminated_machine_stack.ptr = NULL;
  1066. terminated_machine_stack.size = 0;
  1067. }
  1068. #endif
  1069. #endif
  1070. /* restored */
  1071. GetFiberPtr(th->fiber, fib);
  1072. if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
  1073. return fib->cont.value;
  1074. }
  1075. #ifndef FIBER_USE_NATIVE
  1076. else {
  1077. return Qundef;
  1078. }
  1079. #endif
  1080. }
  1081. static inline VALUE
  1082. fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
  1083. {
  1084. VALUE value;
  1085. rb_fiber_t *fib;
  1086. rb_context_t *cont;
  1087. rb_thread_t *th = GET_THREAD();
  1088. GetFiberPtr(fibval, fib);
  1089. cont = &fib->cont;
  1090. if (cont->saved_thread.self != th->self) {
  1091. rb_raise(rb_eFiberError, "fiber called across threads");
  1092. }
  1093. else if (cont->saved_thread.protect_tag != th->protect_tag) {
  1094. rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
  1095. }
  1096. else if (fib->status == TERMINATED) {
  1097. value = rb_exc_new2(rb_eFiberError, "dead fiber called");
  1098. if (th->fiber != fibval) {
  1099. GetFiberPtr(th->fiber, fib);
  1100. if (fib->status != TERMINATED) rb_exc_raise(value);
  1101. fibval = th->root_fiber;
  1102. }
  1103. else {
  1104. fibval = fib->prev;
  1105. if (NIL_P(fibval)) fibval = th->root_fiber;
  1106. }
  1107. GetFiberPtr(fibval, fib);
  1108. cont = &fib->cont;
  1109. cont->argc = -1;
  1110. cont->value = value;
  1111. #ifdef FIBER_USE_NATIVE
  1112. {
  1113. VALUE oldfibval;
  1114. rb_fiber_t *oldfib;
  1115. oldfibval = rb_fiber_current();
  1116. GetFiberPtr(oldfibval, oldfib);
  1117. fiber_setcontext(fib, oldfib);
  1118. }
  1119. #else
  1120. cont_restore_0(cont, &value);
  1121. #endif
  1122. }
  1123. if (is_resume) {
  1124. fib->prev = rb_fiber_current();
  1125. }
  1126. cont->argc = argc;
  1127. cont->value = make_passing_arg(argc, argv);
  1128. value = fiber_store(fib);
  1129. #ifndef FIBER_USE_NATIVE
  1130. if (value == Qundef) {
  1131. cont_restore_0(cont, &value);
  1132. rb_bug("rb_fiber_resume: unreachable");
  1133. }
  1134. #endif
  1135. RUBY_VM_CHECK_INTS();
  1136. return value;
  1137. }
  1138. VALUE
  1139. rb_fiber_transfer(VALUE fib, int argc, VALUE *argv)
  1140. {
  1141. return fiber_switch(fib, argc, argv, 0);
  1142. }
  1143. VALUE
  1144. rb_fiber_resume(VALUE fibval, int argc, VALUE *argv)
  1145. {
  1146. rb_fiber_t *fib;
  1147. GetFiberPtr(fibval, fib);
  1148. if (fib->prev != Qnil) {
  1149. rb_raise(rb_eFiberError, "double resume");
  1150. }
  1151. return fiber_switch(fibval, argc, argv, 1);
  1152. }
  1153. VALUE
  1154. rb_fiber_yield(int argc, VALUE *argv)
  1155. {
  1156. return rb_fiber_transfer(return_fiber(), argc, argv);
  1157. }
  1158. /*
  1159. * call-seq:
  1160. * fiber.alive? -> true or false
  1161. *
  1162. * Returns true if the fiber can still be resumed (or transferred to).
  1163. * After finishing execution of the fiber block this method will always
  1164. * return false.
  1165. */
  1166. VALUE
  1167. rb_fiber_alive_p(VALUE fibval)
  1168. {
  1169. rb_fiber_t *fib;
  1170. GetFiberPtr(fibval, fib);
  1171. return fib->status != TERMINATED ? Qtrue : Qfalse;
  1172. }
  1173. /*
  1174. * call-seq:
  1175. * fiber.resume(args, ...) -> obj
  1176. *
  1177. * Resumes the fiber from the point at which the last <code>Fiber.yield</code>
  1178. * was called, or starts running it if it is the first call to
  1179. * <code>resume</code>. Arguments passed to resume will be the value of
  1180. * the <code>Fiber.yield</code> expression or will be passed as block
  1181. * parameters to the fiber's block if this is the first <code>resume</code>.
  1182. *
  1183. * Alternatively, when resume is called it evaluates to the arguments passed
  1184. * to the next <code>Fiber.yield</code> statement inside the fiber's block
  1185. * or to the block value if it runs to completion without any
  1186. * <code>Fiber.yield</code>
  1187. */
  1188. static VALUE
  1189. rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
  1190. {
  1191. return rb_fiber_resume(fib, argc, argv);
  1192. }
  1193. /*
  1194. * call-seq:
  1195. * fiber.transfer(args, ...) -> obj
  1196. *
  1197. * Transfer control to another fiber, resuming it from where it last
  1198. * stopped or starting it if it was not resumed before. The calling
  1199. * fiber will be suspended much like in a call to <code>Fiber.yield</code>.
  1200. *
  1201. * The fiber which receives the transfer call is treats it much like
  1202. * a resume call. Arguments passed to transfer are treated like those
  1203. * passed to resume.
  1204. *
  1205. * You cannot resume a fiber that transferred control to another one.
  1206. * This will cause a double resume error. You need to transfer control
  1207. * back to this fiber before it can yield and resume.
  1208. */
  1209. static VALUE
  1210. rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fib)
  1211. {
  1212. return rb_fiber_transfer(fib, argc, argv);
  1213. }
  1214. /*
  1215. * call-seq:
  1216. * Fiber.yield(args, ...) -> obj
  1217. *
  1218. * Yields control back to the context that resumed the fiber, passing
  1219. * along any arguments that were passed to it. The fiber will resume
  1220. * processing at this point when <code>resume</code> is called next.
  1221. * Any arguments passed to the next <code>resume</code> will be the
  1222. * value that this <code>Fiber.yield</code> expression evaluates to.
  1223. */
  1224. static VALUE
  1225. rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
  1226. {
  1227. return rb_fiber_yield(argc, argv);
  1228. }
  1229. /*
  1230. * call-seq:
  1231. * Fiber.current() -> fiber
  1232. *
  1233. * Returns the current fiber. You need to <code>require 'fiber'</code>
  1234. * before using this method. If you are not running in the context of
  1235. * a fiber this method will return the root fiber.
  1236. */
  1237. static VALUE
  1238. rb_fiber_s_current(VALUE klass)
  1239. {
  1240. return rb_fiber_current();
  1241. }
  1242. /*
  1243. * Document-class: FiberError
  1244. *
  1245. * Raised when an invalid operation is attempted on a Fiber, in
  1246. * particular when attempting to call/resume a dead fiber,
  1247. * attempting to yield from the root fiber, or calling a fiber across
  1248. * threads.
  1249. *
  1250. * fiber = Fiber.new{}
  1251. * fiber.resume #=> nil
  1252. * fiber.resume #=> FiberError: dead fiber called
  1253. */
  1254. void
  1255. Init_Cont(void)
  1256. {
  1257. #ifdef FIBER_USE_NATIVE
  1258. rb_thread_t *th = GET_THREAD();
  1259. #ifdef _WIN32
  1260. SYSTEM_INFO info;
  1261. GetSystemInfo(&info);
  1262. pagesize = info.dwPageSize;
  1263. #else /* not WIN32 */
  1264. pagesize = sysconf(_SC_PAGESIZE);
  1265. #endif
  1266. SET_MACHINE_STACK_END(&th->machine_stack_end);
  1267. #endif
  1268. rb_cFiber = rb_define_class("Fiber", rb_cObject);
  1269. rb_define_alloc_func(rb_cFiber, fiber_alloc);
  1270. rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
  1271. rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
  1272. rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
  1273. rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
  1274. }
  1275. void
  1276. ruby_Init_Continuation_body(void)
  1277. {
  1278. rb_cContinuation = rb_define_class("Continuation", rb_cObject);
  1279. rb_undef_alloc_func(rb_cContinuation);
  1280. rb_undef_method(CLASS_OF(rb_cContinuation), "new");
  1281. rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
  1282. rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
  1283. rb_define_global_function("callcc", rb_callcc, 0);
  1284. }
  1285. void
  1286. ruby_Init_Fiber_as_Coroutine(void)
  1287. {
  1288. rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
  1289. rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
  1290. rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
  1291. }