* io.c (rb_open_file): encoding in mode string was ignored if perm is
[ruby-svn.git] / cont.c
blobad2c358264230d2e0d2c4b38d92e52a3f93b1ad7
1 /**********************************************************************
3 cont.c -
5 $Author$
6 created at: Thu May 23 09:03:43 2007
8 Copyright (C) 2007 Koichi Sasada
10 **********************************************************************/
12 #include "ruby/ruby.h"
13 #include "vm_core.h"
14 #include "gc.h"
15 #include "eval_intern.h"
17 enum context_type {
18 CONTINUATION_CONTEXT = 0,
19 FIBER_CONTEXT = 1,
20 ROOT_FIBER_CONTEXT = 2
23 typedef struct rb_context_struct {
24 VALUE self;
25 VALUE value;
26 VALUE *vm_stack;
27 VALUE *machine_stack;
28 VALUE *machine_stack_src;
29 #ifdef __ia64
30 VALUE *machine_register_stack;
31 VALUE *machine_register_stack_src;
32 int machine_register_stack_size;
33 #endif
34 rb_thread_t saved_thread;
35 rb_jmpbuf_t jmpbuf;
36 int machine_stack_size;
37 VALUE prev;
38 int alive;
39 enum context_type type;
40 } rb_context_t;
42 static VALUE rb_cContinuation;
43 static VALUE rb_cFiber;
44 static VALUE rb_eFiberError;
46 #define GetContPtr(obj, ptr) \
47 Data_Get_Struct(obj, rb_context_t, ptr)
49 NOINLINE(static VALUE cont_capture(volatile int *stat));
51 void rb_thread_mark(rb_thread_t *th);
53 static void
54 cont_mark(void *ptr)
56 RUBY_MARK_ENTER("cont");
57 if (ptr) {
58 rb_context_t *cont = ptr;
59 rb_gc_mark(cont->value);
60 rb_gc_mark(cont->prev);
61 rb_thread_mark(&cont->saved_thread);
63 if (cont->vm_stack) {
64 rb_gc_mark_locations(cont->vm_stack,
65 cont->vm_stack + cont->saved_thread.stack_size);
68 if (cont->machine_stack) {
69 rb_gc_mark_locations(cont->machine_stack,
70 cont->machine_stack + cont->machine_stack_size);
72 #ifdef __ia64
73 if (cont->machine_register_stack) {
74 rb_gc_mark_locations(cont->machine_register_stack,
75 cont->machine_register_stack + cont->machine_register_stack_size);
77 #endif
79 RUBY_MARK_LEAVE("cont");
82 static void
83 cont_free(void *ptr)
85 RUBY_FREE_ENTER("cont");
86 if (ptr) {
87 rb_context_t *cont = ptr;
88 RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack);
89 RUBY_FREE_UNLESS_NULL(cont->machine_stack);
90 #ifdef __ia64
91 RUBY_FREE_UNLESS_NULL(cont->machine_register_stack);
92 #endif
93 RUBY_FREE_UNLESS_NULL(cont->vm_stack);
95 if (cont->type == FIBER_CONTEXT) {
96 st_free_table(cont->saved_thread.local_storage);
99 ruby_xfree(ptr);
101 RUBY_FREE_LEAVE("cont");
104 static void
105 cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
107 int size;
108 rb_thread_t *sth = &cont->saved_thread;
110 SET_MACHINE_STACK_END(&th->machine_stack_end);
111 #ifdef __ia64
112 th->machine_register_stack_end = rb_ia64_bsp();
113 #endif
115 if (th->machine_stack_start > th->machine_stack_end) {
116 size = cont->machine_stack_size = th->machine_stack_start - th->machine_stack_end;
117 cont->machine_stack_src = th->machine_stack_end;
119 else {
120 size = cont->machine_stack_size = th->machine_stack_end - th->machine_stack_start;
121 cont->machine_stack_src = th->machine_stack_start;
124 if (cont->machine_stack) {
125 REALLOC_N(cont->machine_stack, VALUE, size);
127 else {
128 cont->machine_stack = ALLOC_N(VALUE, size);
131 FLUSH_REGISTER_WINDOWS;
132 MEMCPY(cont->machine_stack, cont->machine_stack_src, VALUE, size);
134 #ifdef __ia64
135 rb_ia64_flushrs();
136 size = cont->machine_register_stack_size = th->machine_register_stack_end - th->machine_register_stack_start;
137 cont->machine_register_stack_src = th->machine_register_stack_start;
138 if (cont->machine_register_stack) {
139 REALLOC_N(cont->machine_register_stack, VALUE, size);
141 else {
142 cont->machine_register_stack = ALLOC_N(VALUE, size);
145 MEMCPY(cont->machine_register_stack, cont->machine_register_stack_src, VALUE, size);
146 #endif
148 sth->machine_stack_start = sth->machine_stack_end = 0;
149 #ifdef __ia64
150 sth->machine_register_stack_start = sth->machine_register_stack_end = 0;
151 #endif
154 static rb_context_t *
155 cont_new(VALUE klass)
157 rb_context_t *cont;
158 volatile VALUE contval;
159 rb_thread_t *th = GET_THREAD();
161 contval = Data_Make_Struct(klass, rb_context_t, cont_mark, cont_free, cont);
163 cont->self = contval;
164 cont->alive = Qtrue;
166 /* save thread context */
167 cont->saved_thread = *th;
169 return cont;
172 void vm_stack_to_heap(rb_thread_t *th);
174 static VALUE
175 cont_capture(volatile int *stat)
177 rb_context_t *cont;
178 rb_thread_t *th = GET_THREAD(), *sth;
179 volatile VALUE contval;
181 vm_stack_to_heap(th);
182 cont = cont_new(rb_cContinuation);
183 contval = cont->self;
184 sth = &cont->saved_thread;
186 cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
187 MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
188 sth->stack = 0;
190 cont_save_machine_stack(th, cont);
192 if (ruby_setjmp(cont->jmpbuf)) {
193 VALUE value;
195 value = cont->value;
196 cont->value = Qnil;
197 *stat = 1;
198 return value;
200 else {
201 *stat = 0;
202 return cont->self;
206 NORETURN(static void cont_restore_1(rb_context_t *));
208 static void
209 cont_restore_1(rb_context_t *cont)
211 rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
213 /* restore thread context */
214 if (cont->type == CONTINUATION_CONTEXT) {
215 /* continuation */
216 VALUE fib;
218 th->fiber = sth->fiber;
219 fib = th->fiber ? th->fiber : th->root_fiber;
221 if (fib) {
222 rb_context_t *fcont;
223 GetContPtr(fib, fcont);
224 th->stack_size = fcont->saved_thread.stack_size;
225 th->stack = fcont->saved_thread.stack;
227 MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
229 else {
230 /* fiber */
231 th->stack = sth->stack;
232 th->stack_size = sth->stack_size;
233 th->local_storage = sth->local_storage;
234 th->fiber = cont->self;
237 th->cfp = sth->cfp;
238 th->safe_level = sth->safe_level;
239 th->raised_flag = sth->raised_flag;
240 th->state = sth->state;
241 th->status = sth->status;
242 th->tag = sth->tag;
243 th->trap_tag = sth->trap_tag;
244 th->errinfo = sth->errinfo;
245 th->first_proc = sth->first_proc;
247 /* restore machine stack */
248 #ifdef _M_AMD64
250 /* workaround for x64 SEH */
251 jmp_buf buf;
252 setjmp(buf);
253 ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
254 ((_JUMP_BUFFER*)(&buf))->Frame;
256 #endif
257 if (cont->machine_stack_src) {
258 FLUSH_REGISTER_WINDOWS;
259 MEMCPY(cont->machine_stack_src, cont->machine_stack,
260 VALUE, cont->machine_stack_size);
263 #ifdef __ia64
264 if (cont->machine_register_stack_src) {
265 MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack,
266 VALUE, cont->machine_register_stack_size);
268 #endif
270 ruby_longjmp(cont->jmpbuf, 1);
273 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
275 #ifdef __ia64
276 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
277 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
278 static volatile int C(a), C(b), C(c), C(d), C(e);
279 static volatile int C(f), C(g), C(h), C(i), C(j);
280 static volatile int C(k), C(l), C(m), C(n), C(o);
281 static volatile int C(p), C(q), C(r), C(s), C(t);
282 int rb_dummy_false = 0;
283 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *)));
284 static void
285 register_stack_extend(rb_context_t *cont, VALUE *curr_bsp)
287 if (rb_dummy_false) {
288 /* use registers as much as possible */
289 E(a) = E(b) = E(c) = E(d) = E(e) =
290 E(f) = E(g) = E(h) = E(i) = E(j) =
291 E(k) = E(l) = E(m) = E(n) = E(o) =
292 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
293 E(a) = E(b) = E(c) = E(d) = E(e) =
294 E(f) = E(g) = E(h) = E(i) = E(j) =
295 E(k) = E(l) = E(m) = E(n) = E(o) =
296 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
298 if (curr_bsp < cont->machine_register_stack_src+cont->machine_register_stack_size) {
299 register_stack_extend(cont, (VALUE*)rb_ia64_bsp());
301 cont_restore_1(cont);
303 #undef C
304 #undef E
305 #endif
307 static void
308 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
310 if (cont->machine_stack_src) {
311 #define STACK_PAD_SIZE 1024
312 VALUE space[STACK_PAD_SIZE];
314 #if STACK_GROW_DIRECTION < 0 /* downward */
315 if (addr_in_prev_frame > cont->machine_stack_src) {
316 cont_restore_0(cont, &space[0]);
318 #elif STACK_GROW_DIRECTION > 0 /* upward */
319 if (addr_in_prev_frame < cont->machine_stack_src + cont->machine_stack_size) {
320 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
322 #else
323 if (addr_in_prev_frame > &space[0]) {
324 /* Stack grows downward */
325 if (addr_in_prev_frame > cont->machine_stack_src) {
326 cont_restore_0(cont, &space[0]);
329 else {
330 /* Stack grows upward */
331 if (addr_in_prev_frame < cont->machine_stack_src + cont->machine_stack_size) {
332 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
335 #endif
337 #ifdef __ia64
338 register_stack_extend(cont, (VALUE*)rb_ia64_bsp());
339 #else
340 cont_restore_1(cont);
341 #endif
345 * Document-class: Continuation
347 * Continuation objects are generated by
348 * <code>Kernel#callcc</code>. They hold a return address and execution
349 * context, allowing a nonlocal return to the end of the
350 * <code>callcc</code> block from anywhere within a program.
351 * Continuations are somewhat analogous to a structured version of C's
352 * <code>setjmp/longjmp</code> (although they contain more state, so
353 * you might consider them closer to threads).
355 * For instance:
357 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
358 * callcc{|$cc|}
359 * puts(message = arr.shift)
360 * $cc.call unless message =~ /Max/
362 * <em>produces:</em>
364 * Freddie
365 * Herbie
366 * Ron
367 * Max
369 * This (somewhat contrived) example allows the inner loop to abandon
370 * processing early:
372 * callcc {|cont|
373 * for i in 0..4
374 * print "\n#{i}: "
375 * for j in i*5...(i+1)*5
376 * cont.call() if j == 17
377 * printf "%3d", j
378 * end
379 * end
381 * print "\n"
383 * <em>produces:</em>
385 * 0: 0 1 2 3 4
386 * 1: 5 6 7 8 9
387 * 2: 10 11 12 13 14
388 * 3: 15 16
392 * call-seq:
393 * callcc {|cont| block } => obj
395 * Generates a <code>Continuation</code> object, which it passes to the
396 * associated block. Performing a <em>cont</em><code>.call</code> will
397 * cause the <code>callcc</code> to return (as will falling through the
398 * end of the block). The value returned by the <code>callcc</code> is
399 * the value of the block, or the value passed to
400 * <em>cont</em><code>.call</code>. See class <code>Continuation</code>
401 * for more details. Also see <code>Kernel::throw</code> for
402 * an alternative mechanism for unwinding a call stack.
405 static VALUE
406 rb_callcc(VALUE self)
408 volatile int called;
409 volatile VALUE val = cont_capture(&called);
411 if (called) {
412 return val;
414 else {
415 return rb_yield(val);
419 static VALUE
420 make_passing_arg(int argc, VALUE *argv)
422 switch(argc) {
423 case 0:
424 return Qnil;
425 case 1:
426 return argv[0];
427 default:
428 return rb_ary_new4(argc, argv);
433 * call-seq:
434 * cont.call(args, ...)
435 * cont[args, ...]
437 * Invokes the continuation. The program continues from the end of the
438 * <code>callcc</code> block. If no arguments are given, the original
439 * <code>callcc</code> returns <code>nil</code>. If one argument is
440 * given, <code>callcc</code> returns it. Otherwise, an array
441 * containing <i>args</i> is returned.
443 * callcc {|cont| cont.call } #=> nil
444 * callcc {|cont| cont.call 1 } #=> 1
445 * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
448 static VALUE
449 rb_cont_call(int argc, VALUE *argv, VALUE contval)
451 rb_context_t *cont;
452 rb_thread_t *th = GET_THREAD();
453 GetContPtr(contval, cont);
455 if (cont->saved_thread.self != th->self) {
456 rb_raise(rb_eRuntimeError, "continuation called across threads");
458 if (cont->saved_thread.trap_tag != th->trap_tag) {
459 rb_raise(rb_eRuntimeError, "continuation called across trap");
461 if (cont->saved_thread.fiber) {
462 rb_context_t *fcont;
463 GetContPtr(cont->saved_thread.fiber, fcont);
465 if (th->fiber != cont->saved_thread.fiber) {
466 rb_raise(rb_eRuntimeError, "continuation called across fiber");
469 if (!fcont->alive) {
470 rb_raise(rb_eRuntimeError, "continuation called dead fiber");
474 cont->value = make_passing_arg(argc, argv);
476 cont_restore_0(cont, &contval);
477 return Qnil; /* unreachable */
480 /*********/
481 /* fiber */
482 /*********/
484 #define FIBER_VM_STACK_SIZE (4 * 1024)
486 static rb_context_t *
487 fiber_alloc(VALUE klass)
489 rb_context_t *cont = cont_new(klass);
491 cont->type = FIBER_CONTEXT;
492 cont->prev = Qnil;
494 return cont;
497 static VALUE
498 fiber_new(VALUE klass, VALUE proc)
500 rb_context_t *cont = fiber_alloc(klass);
501 VALUE contval = cont->self;
502 rb_thread_t *th = &cont->saved_thread;
504 /* initialize */
505 cont->vm_stack = 0;
507 th->stack = 0;
508 th->stack_size = FIBER_VM_STACK_SIZE;
509 th->stack = ALLOC_N(VALUE, th->stack_size);
511 th->cfp = (void *)(th->stack + th->stack_size);
512 th->cfp--;
513 th->cfp->pc = 0;
514 th->cfp->sp = th->stack + 1;
515 th->cfp->bp = 0;
516 th->cfp->lfp = th->stack;
517 *th->cfp->lfp = 0;
518 th->cfp->dfp = th->stack;
519 th->cfp->self = Qnil;
520 th->cfp->flag = 0;
521 th->cfp->iseq = 0;
522 th->cfp->proc = 0;
523 th->cfp->block_iseq = 0;
524 th->tag = 0;
525 th->local_storage = st_init_numtable();
527 th->first_proc = proc;
529 MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
531 return contval;
534 VALUE
535 rb_fiber_new(VALUE (*func)(ANYARGS), VALUE obj)
537 return fiber_new(rb_cFiber, rb_proc_new(func, obj));
540 static VALUE
541 rb_fiber_s_new(VALUE self)
543 return fiber_new(self, rb_block_proc());
546 static VALUE
547 return_fiber(void)
549 rb_context_t *cont;
550 VALUE curr = rb_fiber_current();
551 GetContPtr(curr, cont);
553 if (cont->prev == Qnil) {
554 rb_thread_t *th = GET_THREAD();
556 if (th->root_fiber != curr) {
557 return th->root_fiber;
559 else {
560 rb_raise(rb_eFiberError, "can't yield from root fiber");
563 else {
564 VALUE prev = cont->prev;
565 cont->prev = Qnil;
566 return prev;
570 VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv);
572 static void
573 rb_fiber_terminate(rb_context_t *cont)
575 VALUE value = cont->value;
576 cont->alive = Qfalse;
577 rb_fiber_transfer(return_fiber(), 1, &value);
580 void
581 rb_fiber_start(void)
583 rb_thread_t *th = GET_THREAD();
584 rb_context_t *cont;
585 rb_proc_t *proc;
586 VALUE args;
587 int state;
589 GetContPtr(th->fiber, cont);
590 TH_PUSH_TAG(th);
591 if ((state = EXEC_TAG()) == 0) {
592 GetProcPtr(cont->saved_thread.first_proc, proc);
593 args = cont->value;
594 cont->value = Qnil;
595 th->errinfo = Qnil;
596 th->local_lfp = proc->block.lfp;
597 th->local_svar = Qnil;
599 cont->value = vm_invoke_proc(th, proc, proc->block.self, 1, &args, 0);
601 TH_POP_TAG();
603 if (state) {
604 if (TAG_RAISE) {
605 th->thrown_errinfo = th->errinfo;
607 else {
608 th->thrown_errinfo =
609 vm_make_jump_tag_but_local_jump(state, th->errinfo);
611 RUBY_VM_SET_INTERRUPT(th);
614 rb_fiber_terminate(cont);
615 rb_bug("rb_fiber_start: unreachable");
618 VALUE
619 rb_fiber_current()
621 rb_thread_t *th = GET_THREAD();
622 if (th->fiber == 0) {
623 /* save root */
624 rb_context_t *cont = fiber_alloc(rb_cFiber);
625 cont->type = ROOT_FIBER_CONTEXT;
626 th->root_fiber = th->fiber = cont->self;
628 return th->fiber;
631 static VALUE
632 fiber_store(rb_context_t *next_cont)
634 rb_thread_t *th = GET_THREAD();
635 rb_context_t *cont;
637 if (th->fiber) {
638 GetContPtr(th->fiber, cont);
639 cont->saved_thread = *th;
641 else {
642 /* create current fiber */
643 cont = fiber_alloc(rb_cFiber); /* no need to allocate vm stack */
644 cont->type = ROOT_FIBER_CONTEXT;
645 th->root_fiber = th->fiber = cont->self;
648 cont_save_machine_stack(th, cont);
650 if (ruby_setjmp(cont->jmpbuf)) {
651 /* restored */
652 GetContPtr(th->fiber, cont);
653 return cont->value;
655 else {
656 return Qundef;
660 static inline VALUE
661 fiber_switch(VALUE fib, int argc, VALUE *argv, int is_resume)
663 VALUE value;
664 rb_context_t *cont;
665 rb_thread_t *th = GET_THREAD();
667 GetContPtr(fib, cont);
669 if (cont->saved_thread.self != th->self) {
670 rb_raise(rb_eFiberError, "fiber called across threads");
672 else if (cont->saved_thread.trap_tag != th->trap_tag) {
673 rb_raise(rb_eFiberError, "fiber called across trap");
675 else if (!cont->alive) {
676 rb_raise(rb_eFiberError, "dead fiber called");
679 if (is_resume) {
680 cont->prev = rb_fiber_current();
683 cont->value = make_passing_arg(argc, argv);
685 if ((value = fiber_store(cont)) == Qundef) {
686 cont_restore_0(cont, &value);
687 rb_bug("rb_fiber_resume: unreachable");
690 RUBY_VM_CHECK_INTS();
692 return value;
695 VALUE
696 rb_fiber_transfer(VALUE fib, int argc, VALUE *argv)
698 return fiber_switch(fib, argc, argv, 0);
701 VALUE
702 rb_fiber_resume(VALUE fib, int argc, VALUE *argv)
704 rb_context_t *cont;
705 GetContPtr(fib, cont);
707 if (cont->prev != Qnil) {
708 rb_raise(rb_eFiberError, "double resume");
711 return fiber_switch(fib, argc, argv, 1);
714 VALUE
715 rb_fiber_yield(int argc, VALUE *argv)
717 return rb_fiber_transfer(return_fiber(), argc, argv);
720 VALUE
721 rb_fiber_alive_p(VALUE fib)
723 rb_context_t *cont;
724 GetContPtr(fib, cont);
725 return cont->alive;
728 static VALUE
729 rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
731 return rb_fiber_resume(fib, argc, argv);
734 static VALUE
735 rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fib)
737 return rb_fiber_transfer(fib, argc, argv);
740 static VALUE
741 rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
743 return rb_fiber_yield(argc, argv);
746 static VALUE
747 rb_fiber_s_current(VALUE klass)
749 return rb_fiber_current();
752 void
753 Init_Cont(void)
755 rb_cFiber = rb_define_class("Fiber", rb_cObject);
756 rb_undef_alloc_func(rb_cFiber);
757 rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
758 rb_define_singleton_method(rb_cFiber, "new", rb_fiber_s_new, 0);
759 rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
760 rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
763 void
764 Init_Continuation_body(void)
766 rb_cContinuation = rb_define_class("Continuation", rb_cObject);
767 rb_undef_alloc_func(rb_cContinuation);
768 rb_undef_method(CLASS_OF(rb_cContinuation), "new");
769 rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
770 rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
771 rb_define_global_function("callcc", rb_callcc, 0);
774 void
775 Init_Fiber_as_Coroutine(void)
777 rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
778 rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
779 rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);