Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Newer
Older
100644 787 lines (674 sloc) 18.771 kB
9c1d230 committing experimental branch content
Laurent Sansonetti authored
1 /**********************************************************************
2
3 cont.c -
4
5 $Author: matz $
6 created at: Thu May 23 09:03:43 2007
7
8 Copyright (C) 2007 Koichi Sasada
9
10 **********************************************************************/
11
d0898dd include/ruby/macruby.h -> macruby_internal.h
Laurent Sansonetti authored
12 #include "macruby_internal.h"
9c1d230 committing experimental branch content
Laurent Sansonetti authored
13
14 enum context_type {
15 CONTINUATION_CONTEXT = 0,
16 FIBER_CONTEXT = 1,
17 ROOT_FIBER_CONTEXT = 2,
18 };
19
20 typedef struct rb_context_struct {
21 VALUE self;
22 VALUE value;
23 VALUE *vm_stack;
24 VALUE *machine_stack;
25 VALUE *machine_stack_src;
26 #ifdef __ia64
27 VALUE *machine_register_stack;
28 VALUE *machine_register_stack_src;
29 int machine_register_stack_size;
30 #endif
31 rb_thread_t saved_thread;
32 rb_jmpbuf_t jmpbuf;
33 int machine_stack_size;
34 VALUE prev;
35 int alive;
36 enum context_type type;
37 } rb_context_t;
38
39 static VALUE rb_cContinuation;
40 static VALUE rb_cFiber;
41 static VALUE rb_eFiberError;
42
43 #define GetContPtr(obj, ptr) \
44 Data_Get_Struct(obj, rb_context_t, ptr)
45
46 NOINLINE(static VALUE cont_capture(volatile int *stat));
47
48 #if !WITH_OBJC
49 void rb_thread_mark(rb_thread_t *th);
50
51 static void
52 cont_mark(void *ptr)
53 {
54 RUBY_MARK_ENTER("cont");
55 if (ptr) {
56 rb_context_t *cont = ptr;
57 rb_gc_mark(cont->value);
58 rb_gc_mark(cont->prev);
59 rb_thread_mark(&cont->saved_thread);
60
61 if (cont->vm_stack) {
62 rb_gc_mark_locations(cont->vm_stack,
63 cont->vm_stack + cont->saved_thread.stack_size);
64 }
65
66 if (cont->machine_stack) {
67 rb_gc_mark_locations(cont->machine_stack,
68 cont->machine_stack + cont->machine_stack_size);
69 }
70 #ifdef __ia64
71 if (cont->machine_register_stack) {
72 rb_gc_mark_locations(cont->machine_register_stack,
73 cont->machine_register_stack + cont->machine_register_stack_size);
74 }
75 #endif
76 }
77 RUBY_MARK_LEAVE("cont");
78 }
79 #else /* !WITH_OBJC */
80 # define cont_mark (NULL)
81 #endif
82
83 static void
84 cont_free(void *ptr)
85 {
86 RUBY_FREE_ENTER("cont");
87 if (ptr) {
88 rb_context_t *cont = ptr;
89 RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack);
90 RUBY_FREE_UNLESS_NULL(cont->machine_stack);
91 #ifdef __ia64
92 RUBY_FREE_UNLESS_NULL(cont->machine_register_stack);
93 #endif
94 RUBY_FREE_UNLESS_NULL(cont->vm_stack);
95
96 if (cont->type == FIBER_CONTEXT) {
97 st_free_table(cont->saved_thread.local_storage);
98 }
99
100 ruby_xfree(ptr);
101 }
102 RUBY_FREE_LEAVE("cont");
103 }
104
105 static void
106 cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
107 {
108 #if !WITH_OBJC
109 int size;
110 rb_thread_t *sth = &cont->saved_thread;
111
112 SET_MACHINE_STACK_END(&th->machine_stack_end);
113 #ifdef __ia64
114 th->machine_register_stack_end = rb_ia64_bsp();
115 #endif
116
117 if (th->machine_stack_start > th->machine_stack_end) {
118 size = cont->machine_stack_size = th->machine_stack_start - th->machine_stack_end;
119 cont->machine_stack_src = th->machine_stack_end;
120 }
121 else {
122 size = cont->machine_stack_size = th->machine_stack_end - th->machine_stack_start;
123 cont->machine_stack_src = th->machine_stack_start;
124 }
125
126 if (cont->machine_stack) {
127 REALLOC_N(cont->machine_stack, VALUE, size);
128 }
129 else {
130 GC_WB(&cont->machine_stack, ALLOC_N(VALUE, size));
131 }
132
133 FLUSH_REGISTER_WINDOWS;
134 MEMCPY(cont->machine_stack, cont->machine_stack_src, VALUE, size);
135
136 #ifdef __ia64
137 rb_ia64_flushrs();
138 size = cont->machine_register_stack_size = th->machine_register_stack_end - th->machine_register_stack_start;
139 cont->machine_register_stack_src = th->machine_register_stack_start;
140 if (cont->machine_register_stack) {
141 REALLOC_N(cont->machine_register_stack, VALUE, size);
142 }
143 else {
144 GC_WB(&cont->machine_register_stack, ALLOC_N(VALUE, size));
145 }
146
147 MEMCPY(cont->machine_register_stack, cont->machine_register_stack_src, VALUE, size);
148 #endif
149
150 sth->machine_stack_start = sth->machine_stack_end = 0;
151 #ifdef __ia64
152 sth->machine_register_stack_start = sth->machine_register_stack_end = 0;
153 #endif
154 #endif
155 }
156
157 static rb_context_t *
158 cont_new(VALUE klass)
159 {
160 rb_context_t *cont;
161 volatile VALUE contval;
162 rb_thread_t *th = GET_THREAD();
163
164 contval = Data_Make_Struct(klass, rb_context_t,
165 cont_mark, cont_free, cont);
166
167 cont->self = contval;
168 cont->alive = Qtrue;
169
170 /* save thread context */
171 cont->saved_thread = *th;
172
173 return cont;
174 }
175
176 void vm_stack_to_heap(rb_thread_t *th);
177
178 static VALUE
179 cont_capture(volatile int *stat)
180 {
181 rb_context_t *cont;
182 rb_thread_t *th = GET_THREAD(), *sth;
183 volatile VALUE contval;
184
185 vm_stack_to_heap(th);
186 cont = cont_new(rb_cContinuation);
187 contval = cont->self;
188 sth = &cont->saved_thread;
189
190 GC_WB(&cont->vm_stack, ALLOC_N(VALUE, th->stack_size));
191 MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
192 sth->stack = 0;
193
194 cont_save_machine_stack(th, cont);
195
196 if (ruby_setjmp(cont->jmpbuf)) {
197 VALUE value;
198
199 value = cont->value;
200 cont->value = Qnil;
201 *stat = 1;
202 return value;
203 }
204 else {
205 *stat = 0;
206 return cont->self;
207 }
208 }
209
210 NORETURN(static void cont_restore_1(rb_context_t *));
211
212 static void
213 cont_restore_1(rb_context_t *cont)
214 {
215 rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
216
217 /* restore thread context */
218 if (cont->type == CONTINUATION_CONTEXT) {
219 /* continuation */
220 VALUE fib;
221
222 th->fiber = sth->fiber;
223 fib = th->fiber ? th->fiber : th->root_fiber;
224
225 if (fib) {
226 rb_context_t *fcont;
227 GetContPtr(fib, fcont);
228 th->stack_size = fcont->saved_thread.stack_size;
229 th->stack = fcont->saved_thread.stack;
230 }
231 MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
232 }
233 else {
234 /* fiber */
235 th->stack = sth->stack;
236 th->stack_size = sth->stack_size;
237 th->local_storage = sth->local_storage;
238 th->fiber = cont->self;
239 }
240
241 th->cfp = sth->cfp;
242 th->safe_level = sth->safe_level;
243 th->raised_flag = sth->raised_flag;
244 th->state = sth->state;
245 th->status = sth->status;
246 th->tag = sth->tag;
247 th->trap_tag = sth->trap_tag;
248 th->errinfo = sth->errinfo;
249 th->first_proc = sth->first_proc;
250
251 /* restore machine stack */
252 #ifdef _M_AMD64
253 {
254 /* workaround for x64 SEH */
255 jmp_buf buf;
256 setjmp(buf);
257 ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
258 ((_JUMP_BUFFER*)(&buf))->Frame;
259 }
260 #endif
261 if (cont->machine_stack_src) {
262 FLUSH_REGISTER_WINDOWS;
263 MEMCPY(cont->machine_stack_src, cont->machine_stack,
264 VALUE, cont->machine_stack_size);
265 }
266
267 #ifdef __ia64
268 if (cont->machine_register_stack_src) {
269 MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack,
270 VALUE, cont->machine_register_stack_size);
271 }
272 #endif
273
274 ruby_longjmp(cont->jmpbuf, 1);
275 }
276
277 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
278
279 #ifdef __ia64
280 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
281 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
282 static volatile int C(a), C(b), C(c), C(d), C(e);
283 static volatile int C(f), C(g), C(h), C(i), C(j);
284 static volatile int C(k), C(l), C(m), C(n), C(o);
285 static volatile int C(p), C(q), C(r), C(s), C(t);
286 int rb_dummy_false = 0;
287 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *)));
288 static void
289 register_stack_extend(rb_context_t *cont, VALUE *curr_bsp)
290 {
291 if (rb_dummy_false) {
292 /* use registers as much as possible */
293 E(a) = E(b) = E(c) = E(d) = E(e) =
294 E(f) = E(g) = E(h) = E(i) = E(j) =
295 E(k) = E(l) = E(m) = E(n) = E(o) =
296 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
297 E(a) = E(b) = E(c) = E(d) = E(e) =
298 E(f) = E(g) = E(h) = E(i) = E(j) =
299 E(k) = E(l) = E(m) = E(n) = E(o) =
300 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
301 }
302 if (curr_bsp < cont->machine_register_stack_src+cont->machine_register_stack_size) {
303 register_stack_extend(cont, (VALUE*)rb_ia64_bsp());
304 }
305 cont_restore_1(cont);
306 }
307 #undef C
308 #undef E
309 #endif
310
311 static void
312 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
313 {
314 if (cont->machine_stack_src) {
315 #define STACK_PAD_SIZE 1024
316 VALUE space[STACK_PAD_SIZE];
317
318 #if STACK_GROW_DIRECTION < 0 /* downward */
319 if (addr_in_prev_frame > cont->machine_stack_src) {
320 cont_restore_0(cont, &space[0]);
321 }
322 #elif STACK_GROW_DIRECTION > 0 /* upward */
323 if (addr_in_prev_frame < cont->machine_stack_src + cont->machine_stack_size) {
324 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
325 }
326 #else
327 if (addr_in_prev_frame > &space[0]) {
328 /* Stack grows downward */
329 if (addr_in_prev_frame > cont->machine_stack_src) {
330 cont_restore_0(cont, &space[0]);
331 }
332 }
333 else {
334 /* Stack grows upward */
335 if (addr_in_prev_frame < cont->machine_stack_src + cont->machine_stack_size) {
336 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
337 }
338 }
339 #endif
340 }
341 #ifdef __ia64
342 register_stack_extend(cont, (VALUE*)rb_ia64_bsp());
343 #else
344 cont_restore_1(cont);
345 #endif
346 }
347
348 /*
349 * Document-class: Continuation
350 *
351 * Continuation objects are generated by
352 * <code>Kernel#callcc</code>. They hold a return address and execution
353 * context, allowing a nonlocal return to the end of the
354 * <code>callcc</code> block from anywhere within a program.
355 * Continuations are somewhat analogous to a structured version of C's
356 * <code>setjmp/longjmp</code> (although they contain more state, so
357 * you might consider them closer to threads).
358 *
359 * For instance:
360 *
361 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
362 * callcc{|$cc|}
363 * puts(message = arr.shift)
364 * $cc.call unless message =~ /Max/
365 *
366 * <em>produces:</em>
367 *
368 * Freddie
369 * Herbie
370 * Ron
371 * Max
372 *
373 * This (somewhat contrived) example allows the inner loop to abandon
374 * processing early:
375 *
376 * callcc {|cont|
377 * for i in 0..4
378 * print "\n#{i}: "
379 * for j in i*5...(i+1)*5
380 * cont.call() if j == 17
381 * printf "%3d", j
382 * end
383 * end
384 * }
385 * print "\n"
386 *
387 * <em>produces:</em>
388 *
389 * 0: 0 1 2 3 4
390 * 1: 5 6 7 8 9
391 * 2: 10 11 12 13 14
392 * 3: 15 16
393 */
394
395 /*
396 * call-seq:
397 * callcc {|cont| block } => obj
398 *
399 * Generates a <code>Continuation</code> object, which it passes to the
400 * associated block. Performing a <em>cont</em><code>.call</code> will
401 * cause the <code>callcc</code> to return (as will falling through the
402 * end of the block). The value returned by the <code>callcc</code> is
403 * the value of the block, or the value passed to
404 * <em>cont</em><code>.call</code>. See class <code>Continuation</code>
405 * for more details. Also see <code>Kernel::throw</code> for
406 * an alternative mechanism for unwinding a call stack.
407 */
408
409 static VALUE
410 rb_callcc(VALUE self)
411 {
412 volatile int called;
413 volatile VALUE val = cont_capture(&called);
414
415 if (called) {
416 return val;
417 }
418 else {
419 return rb_yield(val);
420 }
421 }
422
423 static VALUE
424 make_passing_arg(int argc, VALUE *argv)
425 {
426 switch(argc) {
427 case 0:
428 return Qnil;
429 case 1:
430 return argv[0];
431 default:
432 return rb_ary_new4(argc, argv);
433 }
434 }
435
436 /*
437 * call-seq:
438 * cont.call(args, ...)
439 * cont[args, ...]
440 *
441 * Invokes the continuation. The program continues from the end of the
442 * <code>callcc</code> block. If no arguments are given, the original
443 * <code>callcc</code> returns <code>nil</code>. If one argument is
444 * given, <code>callcc</code> returns it. Otherwise, an array
445 * containing <i>args</i> is returned.
446 *
447 * callcc {|cont| cont.call } #=> nil
448 * callcc {|cont| cont.call 1 } #=> 1
449 * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
450 */
451
452 static VALUE
453 rb_cont_call(int argc, VALUE *argv, VALUE contval)
454 {
455 rb_context_t *cont;
456 rb_thread_t *th = GET_THREAD();
457 GetContPtr(contval, cont);
458
459 if (cont->saved_thread.self != th->self) {
460 rb_raise(rb_eRuntimeError, "continuation called across threads");
461 }
462 if (cont->saved_thread.trap_tag != th->trap_tag) {
463 rb_raise(rb_eRuntimeError, "continuation called across trap");
464 }
465 if (cont->saved_thread.fiber) {
466 rb_context_t *fcont;
467 GetContPtr(cont->saved_thread.fiber, fcont);
468
469 if (th->fiber != cont->saved_thread.fiber) {
470 rb_raise(rb_eRuntimeError, "continuation called across fiber");
471 }
472
473 if (!fcont->alive) {
474 rb_raise(rb_eRuntimeError, "continuation called dead fiber");
475 }
476 }
477
478 cont->value = make_passing_arg(argc, argv);
479
480 cont_restore_0(cont, &contval);
481 return Qnil; /* unreachable */
482 }
483
484 /*********/
485 /* fiber */
486 /*********/
487
488 #define FIBER_VM_STACK_SIZE (4 * 1024)
489
490 static rb_context_t *
491 fiber_alloc(VALUE klass)
492 {
493 rb_context_t *cont = cont_new(klass);
494
495 cont->type = FIBER_CONTEXT;
496 cont->prev = Qnil;
497
498 return cont;
499 }
500
501 static VALUE
502 fiber_new(VALUE klass, VALUE proc)
503 {
504 rb_context_t *cont = fiber_alloc(klass);
505 VALUE contval = cont->self;
506 rb_thread_t *th = &cont->saved_thread;
507
508 /* initialize */
509 cont->vm_stack = 0;
510
511 th->stack = 0;
512 th->stack_size = FIBER_VM_STACK_SIZE;
513 GC_WB(&th->stack, ALLOC_N(VALUE, th->stack_size));
514
515 th->cfp = (void *)(th->stack + th->stack_size);
516 th->cfp--;
517 th->cfp->pc = 0;
518 th->cfp->sp = th->stack + 1;
519 th->cfp->bp = 0;
520 th->cfp->lfp = th->stack;
521 *th->cfp->lfp = 0;
522 th->cfp->dfp = th->stack;
523 th->cfp->self = Qnil;
524 th->cfp->flag = 0;
525 th->cfp->iseq = 0;
526 th->cfp->proc = 0;
527 th->cfp->block_iseq = 0;
528 th->tag = 0;
529 GC_WB(&th->local_storage, st_init_numtable());
530
531 GC_WB(&th->first_proc, proc);
532
533 MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
534
535 return contval;
536 }
537
538 VALUE
539 rb_fiber_new(VALUE (*func)(ANYARGS), VALUE obj)
540 {
541 return fiber_new(rb_cFiber, rb_proc_new(func, obj));
542 }
543
544 static VALUE
545 rb_fiber_s_new(VALUE self)
546 {
547 return fiber_new(self, rb_block_proc());
548 }
549
550 static VALUE
551 return_fiber(void)
552 {
553 rb_context_t *cont;
554 VALUE curr = rb_fiber_current();
555 GetContPtr(curr, cont);
556
557 if (cont->prev == Qnil) {
558 rb_thread_t *th = GET_THREAD();
559
560 if (th->root_fiber != curr) {
561 return th->root_fiber;
562 }
563 else {
564 rb_raise(rb_eFiberError, "can't yield from root fiber");
565 }
566 }
567 else {
568 VALUE prev = cont->prev;
569 cont->prev = Qnil;
570 return prev;
571 }
572 }
573
574 VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv);
575
576 static void
577 rb_fiber_terminate(rb_context_t *cont)
578 {
579 VALUE value = cont->value;
580 cont->alive = Qfalse;
581 rb_fiber_transfer(return_fiber(), 1, &value);
582 }
583
584 void
585 rb_fiber_start(void)
586 {
587 rb_thread_t *th = GET_THREAD();
588 rb_context_t *cont;
589 // rb_proc_t *proc;
590 // VALUE args;
591 int state;
592
593 GetContPtr(th->fiber, cont);
594 TH_PUSH_TAG(th);
595 if ((state = EXEC_TAG()) == 0) {
596 #if 0 // TODO
597 GetProcPtr(cont->saved_thread.first_proc, proc);
598 args = cont->value;
599 cont->value = Qnil;
600 th->errinfo = Qnil;
601 GC_WB(&th->local_lfp, proc->block.lfp);
602 th->local_svar = Qnil;
603
604 cont->value = vm_invoke_proc(th, proc, proc->block.self, 1, &args, 0);
605 #endif
606 }
607 TH_POP_TAG();
608
609 if (state) {
610 if (TAG_RAISE) {
611 th->thrown_errinfo = th->errinfo;
612 }
613 else {
614 th->thrown_errinfo =
615 vm_make_jump_tag_but_local_jump(state, th->errinfo);
616 }
617 RUBY_VM_SET_INTERRUPT(th);
618 }
619
620 rb_fiber_terminate(cont);
621 rb_bug("rb_fiber_start: unreachable");
622 }
623
624 VALUE
625 rb_fiber_current()
626 {
627 rb_thread_t *th = GET_THREAD();
628 if (th->fiber == 0) {
629 /* save root */
630 rb_context_t *cont = fiber_alloc(rb_cFiber);
631 cont->type = ROOT_FIBER_CONTEXT;
632 th->root_fiber = th->fiber = cont->self;
633 }
634 return th->fiber;
635 }
636
637 static VALUE
638 fiber_store(rb_context_t *next_cont)
639 {
640 rb_thread_t *th = GET_THREAD();
641 rb_context_t *cont;
642
643 if (th->fiber) {
644 GetContPtr(th->fiber, cont);
645 cont->saved_thread = *th;
646 }
647 else {
648 /* create current fiber */
649 cont = fiber_alloc(rb_cFiber); /* no need to allocate vm stack */
650 cont->type = ROOT_FIBER_CONTEXT;
651 th->root_fiber = th->fiber = cont->self;
652 }
653
654 cont_save_machine_stack(th, cont);
655
656 if (ruby_setjmp(cont->jmpbuf)) {
657 /* restored */
658 GetContPtr(th->fiber, cont);
659 return cont->value;
660 }
661 else {
662 return Qundef;
663 }
664 }
665
666 static inline VALUE
667 fiber_switch(VALUE fib, int argc, VALUE *argv, int is_resume)
668 {
669 VALUE value;
670 rb_context_t *cont;
671 rb_thread_t *th = GET_THREAD();
672
673 GetContPtr(fib, cont);
674
675 if (cont->saved_thread.self != th->self) {
676 rb_raise(rb_eFiberError, "fiber called across threads");
677 }
678 else if (cont->saved_thread.trap_tag != th->trap_tag) {
679 rb_raise(rb_eFiberError, "fiber called across trap");
680 }
681 else if (!cont->alive) {
682 rb_raise(rb_eFiberError, "dead fiber called");
683 }
684
685 if (is_resume) {
686 cont->prev = rb_fiber_current();
687 }
688
689 cont->value = make_passing_arg(argc, argv);
690
691 if ((value = fiber_store(cont)) == Qundef) {
692 cont_restore_0(cont, &value);
693 rb_bug("rb_fiber_resume: unreachable");
694 }
695
696 RUBY_VM_CHECK_INTS();
697
698 return value;
699 }
700
701 VALUE
702 rb_fiber_transfer(VALUE fib, int argc, VALUE *argv)
703 {
704 return fiber_switch(fib, argc, argv, 0);
705 }
706
707 VALUE
708 rb_fiber_resume(VALUE fib, int argc, VALUE *argv)
709 {
710 rb_context_t *cont;
711 GetContPtr(fib, cont);
712
713 if (cont->prev != Qnil) {
714 rb_raise(rb_eFiberError, "double resume");
715 }
716
717 return fiber_switch(fib, argc, argv, 1);
718 }
719
720 VALUE
721 rb_fiber_yield(int argc, VALUE *argv)
722 {
723 return rb_fiber_transfer(return_fiber(), argc, argv);
724 }
725
726 VALUE
727 rb_fiber_alive_p(VALUE fib)
728 {
729 rb_context_t *cont;
730 GetContPtr(fib, cont);
731 return cont->alive;
732 }
733
734 static VALUE
735 rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
736 {
737 return rb_fiber_resume(fib, argc, argv);
738 }
739
740 static VALUE
741 rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fib)
742 {
743 return rb_fiber_transfer(fib, argc, argv);
744 }
745
746 static VALUE
747 rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
748 {
749 return rb_fiber_yield(argc, argv);
750 }
751
752 static VALUE
753 rb_fiber_s_current(VALUE klass)
754 {
755 return rb_fiber_current();
756 }
757
758 void
759 Init_Cont(void)
760 {
761 rb_cFiber = rb_define_class("Fiber", rb_cObject);
762 rb_undef_alloc_func(rb_cFiber);
763 rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
764 rb_define_singleton_method(rb_cFiber, "new", rb_fiber_s_new, 0);
765 rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
766 rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
767 }
768
769 void
770 Init_Continuation_body(void)
771 {
772 rb_cContinuation = rb_define_class("Continuation", rb_cObject);
773 rb_undef_alloc_func(rb_cContinuation);
774 rb_undef_method(CLASS_OF(rb_cContinuation), "new");
775 rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
776 rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
777 rb_define_global_function("callcc", rb_callcc, 0);
778 }
779
780 void
781 Init_Fiber_as_Coroutine(void)
782 {
783 rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
784 rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
785 rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
786 }
Something went wrong with that request. Please try again.