* io.c (rb_open_file): encoding in mode string was ignored if perm is
[ruby-svn.git] / vm.c
blob0272df2df26c01fdfa58b0e911489615f2f56c44
1 /**********************************************************************
3 vm.c -
5 $Author$
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
11 #include "ruby/ruby.h"
12 #include "ruby/node.h"
13 #include "ruby/st.h"
14 #include "ruby/encoding.h"
15 #include "gc.h"
17 #include "insnhelper.h"
18 #include "vm_insnhelper.c"
19 #include "vm_eval.c"
21 #define BUFSIZE 0x100
22 #define PROCDEBUG 0
24 VALUE rb_cRubyVM;
25 VALUE rb_cThread;
26 VALUE rb_cEnv;
27 VALUE rb_mRubyVMFrozenCore;
29 VALUE ruby_vm_global_state_version = 1;
30 rb_thread_t *ruby_current_thread = 0;
31 rb_vm_t *ruby_current_vm = 0;
33 void vm_analysis_operand(int insn, int n, VALUE op);
34 void vm_analysis_register(int reg, int isset);
35 void vm_analysis_insn(int insn);
37 #if OPT_STACK_CACHING
38 static VALUE finish_insn_seq[1] = { BIN(finish_SC_ax_ax) };
39 #elif OPT_CALL_THREADED_CODE
40 static VALUE const finish_insn_seq[1] = { 0 };
41 #else
42 static VALUE finish_insn_seq[1] = { BIN(finish) };
43 #endif
45 void
46 rb_vm_change_state(void)
48 INC_VM_STATE_VERSION();
51 /* control stack frame */
53 static inline VALUE
54 rb_vm_set_finish_env(rb_thread_t * th)
56 vm_push_frame(th, 0, VM_FRAME_MAGIC_FINISH,
57 Qnil, th->cfp->lfp[0], 0,
58 th->cfp->sp, 0, 1);
59 th->cfp->pc = (VALUE *)&finish_insn_seq[0];
60 return Qtrue;
63 static void
64 vm_set_top_stack(rb_thread_t * th, VALUE iseqval)
66 rb_iseq_t *iseq;
67 GetISeqPtr(iseqval, iseq);
69 if (iseq->type != ISEQ_TYPE_TOP) {
70 rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
73 /* for return */
74 rb_vm_set_finish_env(th);
76 vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP,
77 th->top_self, 0, iseq->iseq_encoded,
78 th->cfp->sp, 0, iseq->local_size);
81 static void
82 vm_set_eval_stack(rb_thread_t * th, VALUE iseqval, const NODE *cref)
84 rb_iseq_t *iseq;
85 rb_block_t * const block = th->base_block;
86 GetISeqPtr(iseqval, iseq);
88 /* for return */
89 rb_vm_set_finish_env(th);
90 vm_push_frame(th, iseq, VM_FRAME_MAGIC_EVAL, block->self,
91 GC_GUARDED_PTR(block->dfp), iseq->iseq_encoded,
92 th->cfp->sp, block->lfp, iseq->local_size);
94 if (cref) {
95 th->cfp->dfp[-1] = (VALUE)cref;
99 rb_control_frame_t *
100 vm_get_ruby_level_next_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
102 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
103 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
104 return cfp;
106 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
108 return 0;
111 rb_control_frame_t *
112 vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
114 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
115 return cfp;
118 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
120 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
121 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
122 return cfp;
125 if ((cfp->flag & VM_FRAME_FLAG_PASSED) == 0) {
126 break;
128 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
130 return 0;
133 /* Env */
135 static void
136 env_free(void * const ptr)
138 RUBY_FREE_ENTER("env");
139 if (ptr) {
140 const rb_env_t * const env = ptr;
141 RUBY_FREE_UNLESS_NULL(env->env);
142 ruby_xfree(ptr);
144 RUBY_FREE_LEAVE("env");
147 static void
148 env_mark(void * const ptr)
150 RUBY_MARK_ENTER("env");
151 if (ptr) {
152 const rb_env_t * const env = ptr;
154 if (env->env) {
155 /* TODO: should mark more restricted range */
156 RUBY_GC_INFO("env->env\n");
157 rb_gc_mark_locations(env->env, env->env + env->env_size);
160 RUBY_GC_INFO("env->prev_envval\n");
161 RUBY_MARK_UNLESS_NULL(env->prev_envval);
162 RUBY_MARK_UNLESS_NULL(env->block.self);
163 RUBY_MARK_UNLESS_NULL(env->block.proc);
165 if (env->block.iseq) {
166 if (BUILTIN_TYPE(env->block.iseq) == T_NODE) {
167 RUBY_MARK_UNLESS_NULL((VALUE)env->block.iseq);
169 else {
170 RUBY_MARK_UNLESS_NULL(env->block.iseq->self);
174 RUBY_MARK_LEAVE("env");
177 static VALUE
178 env_alloc(void)
180 VALUE obj;
181 rb_env_t *env;
182 obj = Data_Make_Struct(rb_cEnv, rb_env_t, env_mark, env_free, env);
183 env->env = 0;
184 env->prev_envval = 0;
185 env->block.iseq = 0;
186 return obj;
189 static VALUE check_env_value(VALUE envval);
191 static int
192 check_env(rb_env_t * const env)
194 printf("---\n");
195 printf("envptr: %p\n", &env->block.dfp[0]);
196 printf("orphan: %p\n", (void *)env->block.dfp[1]);
197 printf("inheap: %p\n", (void *)env->block.dfp[2]);
198 printf("envval: %10p ", (void *)env->block.dfp[3]);
199 dp(env->block.dfp[3]);
200 printf("penvv : %10p ", (void *)env->block.dfp[4]);
201 dp(env->block.dfp[4]);
202 printf("lfp: %10p\n", env->block.lfp);
203 printf("dfp: %10p\n", env->block.dfp);
204 if (env->block.dfp[4]) {
205 printf(">>\n");
206 check_env_value(env->block.dfp[4]);
207 printf("<<\n");
209 return 1;
212 static VALUE
213 check_env_value(VALUE envval)
215 rb_env_t *env;
216 GetEnvPtr(envval, env);
218 if (check_env(env)) {
219 return envval;
221 rb_bug("invalid env");
222 return Qnil; /* unreachable */
225 static VALUE
226 vm_make_env_each(rb_thread_t * const th, rb_control_frame_t * const cfp,
227 VALUE *envptr, VALUE * const endptr)
229 VALUE envval, penvval = 0;
230 rb_env_t *env;
231 VALUE *nenvptr;
232 int i, local_size;
234 if (ENV_IN_HEAP_P(th, envptr)) {
235 return ENV_VAL(envptr);
238 if (envptr != endptr) {
239 VALUE *penvptr = GC_GUARDED_PTR_REF(*envptr);
240 rb_control_frame_t *pcfp = cfp;
242 if (ENV_IN_HEAP_P(th, penvptr)) {
243 penvval = ENV_VAL(penvptr);
245 else {
246 while (pcfp->dfp != penvptr) {
247 pcfp++;
248 if (pcfp->dfp == 0) {
249 SDR();
250 rb_bug("invalid dfp");
253 penvval = vm_make_env_each(th, pcfp, penvptr, endptr);
254 cfp->lfp = pcfp->lfp;
255 *envptr = GC_GUARDED_PTR(pcfp->dfp);
259 /* allocate env */
260 envval = env_alloc();
261 GetEnvPtr(envval, env);
263 if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
264 local_size = 2;
266 else {
267 local_size = cfp->iseq->local_size;
270 env->env_size = local_size + 1 + 2;
271 env->local_size = local_size;
272 env->env = ALLOC_N(VALUE, env->env_size);
273 env->prev_envval = penvval;
275 for (i = 0; i <= local_size; i++) {
276 env->env[i] = envptr[-local_size + i];
277 #if 0
278 fprintf(stderr, "%2d ", &envptr[-local_size + i] - th->stack); dp(env->env[i]);
279 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
280 /* clear value stack for GC */
281 envptr[-local_size + i] = 0;
283 #endif
286 *envptr = envval; /* GC mark */
287 nenvptr = &env->env[i - 1];
288 nenvptr[1] = envval; /* frame self */
289 nenvptr[2] = penvval; /* frame prev env object */
291 /* reset lfp/dfp in cfp */
292 cfp->dfp = nenvptr;
293 if (envptr == endptr) {
294 cfp->lfp = nenvptr;
297 /* as Binding */
298 env->block.self = cfp->self;
299 env->block.lfp = cfp->lfp;
300 env->block.dfp = cfp->dfp;
301 env->block.iseq = cfp->iseq;
303 if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
304 /* TODO */
305 env->block.iseq = 0;
307 return envval;
310 static int
311 collect_local_variables_in_env(rb_env_t * const env, const VALUE ary)
313 int i;
314 for (i = 0; i < env->block.iseq->local_table_size; i++) {
315 ID lid = env->block.iseq->local_table[i];
316 if (lid) {
317 rb_ary_push(ary, ID2SYM(lid));
320 if (env->prev_envval) {
321 rb_env_t *prevenv;
322 GetEnvPtr(env->prev_envval, prevenv);
323 collect_local_variables_in_env(prevenv, ary);
325 return 0;
329 vm_collect_local_variables_in_heap(rb_thread_t * const th,
330 VALUE * const dfp, const VALUE ary)
332 if (ENV_IN_HEAP_P(th, dfp)) {
333 rb_env_t *env;
334 GetEnvPtr(ENV_VAL(dfp), env);
335 collect_local_variables_in_env(env, ary);
336 return 1;
338 else {
339 return 0;
343 VALUE
344 vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp)
346 VALUE envval;
348 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_FINISH) {
349 /* for method_missing */
350 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
353 envval = vm_make_env_each(th, cfp, cfp->dfp, cfp->lfp);
355 if (PROCDEBUG) {
356 check_env_value(envval);
359 return envval;
362 void
363 vm_stack_to_heap(rb_thread_t * const th)
365 rb_control_frame_t *cfp = th->cfp;
366 while ((cfp = vm_get_ruby_level_next_cfp(th, cfp)) != 0) {
367 vm_make_env_object(th, cfp);
368 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
372 /* Proc */
374 static VALUE
375 vm_make_proc_from_block(rb_thread_t *th, rb_control_frame_t *cfp,
376 rb_block_t *block)
378 VALUE procval;
379 rb_control_frame_t *bcfp;
380 VALUE *bdfp; /* to gc mark */
382 if (block->proc) {
383 return block->proc;
386 bcfp = RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block);
387 bdfp = bcfp->dfp;
388 block->proc = procval = vm_make_proc(th, bcfp, block);
389 return procval;
392 VALUE
393 vm_make_proc(rb_thread_t *th,
394 rb_control_frame_t *cfp, const rb_block_t *block)
396 VALUE procval, envval, blockprocval = 0;
397 rb_proc_t *proc;
399 if (GC_GUARDED_PTR_REF(cfp->lfp[0])) {
400 if (!RUBY_VM_CLASS_SPECIAL_P(cfp->lfp[0])) {
401 rb_proc_t *p;
403 blockprocval = vm_make_proc_from_block(
404 th, cfp, (rb_block_t *)GC_GUARDED_PTR_REF(*cfp->lfp));
406 GetProcPtr(blockprocval, p);
407 *cfp->lfp = GC_GUARDED_PTR(&p->block);
410 envval = vm_make_env_object(th, cfp);
412 if (PROCDEBUG) {
413 check_env_value(envval);
415 procval = rb_proc_alloc(rb_cProc);
416 GetProcPtr(procval, proc);
417 proc->blockprocval = blockprocval;
418 proc->block.self = block->self;
419 proc->block.lfp = block->lfp;
420 proc->block.dfp = block->dfp;
421 proc->block.iseq = block->iseq;
422 proc->block.proc = procval;
423 proc->envval = envval;
424 proc->safe_level = th->safe_level;
426 if (VMDEBUG) {
427 if (th->stack < block->dfp && block->dfp < th->stack + th->stack_size) {
428 rb_bug("invalid ptr: block->dfp");
430 if (th->stack < block->lfp && block->lfp < th->stack + th->stack_size) {
431 rb_bug("invalid ptr: block->lfp");
435 return procval;
438 /* C -> Ruby: block */
440 static inline VALUE
441 invoke_block_from_c(rb_thread_t *th, const rb_block_t *block,
442 VALUE self, int argc, const VALUE *argv,
443 const rb_block_t *blockptr, const NODE *cref)
445 if (BUILTIN_TYPE(block->iseq) != T_NODE) {
446 const rb_iseq_t *iseq = block->iseq;
447 const rb_control_frame_t *cfp = th->cfp;
448 int i, opt_pc, arg_size = iseq->arg_size;
449 int type = block_proc_is_lambda(block->proc) ?
450 VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
452 rb_vm_set_finish_env(th);
454 CHECK_STACK_OVERFLOW(cfp, argc + iseq->stack_max);
456 for (i=0; i<argc; i++) {
457 cfp->sp[i] = argv[i];
460 opt_pc = vm_yield_setup_args(th, iseq, argc, cfp->sp, blockptr,
461 type == VM_FRAME_MAGIC_LAMBDA);
463 vm_push_frame(th, iseq, type,
464 self, GC_GUARDED_PTR(block->dfp),
465 iseq->iseq_encoded + opt_pc, cfp->sp + arg_size, block->lfp,
466 iseq->local_size - arg_size);
468 if (cref) {
469 th->cfp->dfp[-1] = (VALUE)cref;
472 return vm_eval_body(th);
474 else {
475 return vm_yield_with_cfunc(th, block, self, argc, argv, blockptr);
479 static inline const rb_block_t *
480 check_block(rb_thread_t *th)
482 const rb_block_t *blockptr = GC_GUARDED_PTR_REF(th->cfp->lfp[0]);
484 if (blockptr == 0) {
485 vm_localjump_error("no block given", Qnil, 0);
488 return blockptr;
491 static inline VALUE
492 vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const NODE *cref)
494 const rb_block_t *blockptr = check_block(th);
495 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, cref);
498 static inline VALUE
499 vm_yield(rb_thread_t *th, int argc, const VALUE *argv)
501 const rb_block_t *blockptr = check_block(th);
502 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, 0);
505 VALUE
506 vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
507 int argc, const VALUE *argv, rb_block_t * blockptr)
509 VALUE val = Qundef;
510 int state;
511 volatile int stored_safe = th->safe_level;
512 rb_control_frame_t * volatile cfp = th->cfp;
514 TH_PUSH_TAG(th);
515 if ((state = EXEC_TAG()) == 0) {
516 if (!proc->is_from_method) {
517 th->safe_level = proc->safe_level;
519 val = invoke_block_from_c(th, &proc->block, self, argc, argv, blockptr, 0);
521 TH_POP_TAG();
523 if (!proc->is_from_method) {
524 th->safe_level = stored_safe;
527 if (state) {
528 if (state == TAG_RETURN && proc->is_lambda) {
529 VALUE err = th->errinfo;
530 VALUE *escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
532 if (escape_dfp == cfp->dfp) {
533 printf("ok\n");
534 state = 0;
535 th->errinfo = Qnil;
536 th->cfp = cfp;
537 val = GET_THROWOBJ_VAL(err);
542 if (state) {
543 JUMP_TAG(state);
545 return val;
548 /* special variable */
550 static rb_control_frame_t *
551 vm_normal_frame(rb_thread_t *th, rb_control_frame_t *cfp)
553 while (cfp->pc == 0) {
554 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
555 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
556 return 0;
559 return cfp;
562 static VALUE
563 vm_cfp_svar_get(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key)
565 cfp = vm_normal_frame(th, cfp);
566 return lfp_svar_get(th, cfp ? cfp->lfp : 0, key);
569 static void
570 vm_cfp_svar_set(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key, const VALUE val)
572 cfp = vm_normal_frame(th, cfp);
573 lfp_svar_set(th, cfp ? cfp->lfp : 0, key, val);
576 static VALUE
577 vm_svar_get(VALUE key)
579 rb_thread_t *th = GET_THREAD();
580 return vm_cfp_svar_get(th, th->cfp, key);
583 static void
584 vm_svar_set(VALUE key, VALUE val)
586 rb_thread_t *th = GET_THREAD();
587 vm_cfp_svar_set(th, th->cfp, key, val);
590 VALUE
591 rb_backref_get(void)
593 return vm_svar_get(1);
596 void
597 rb_backref_set(VALUE val)
599 vm_svar_set(1, val);
602 VALUE
603 rb_lastline_get(void)
605 return vm_svar_get(0);
608 void
609 rb_lastline_set(VALUE val)
611 vm_svar_set(0, val);
614 /* backtrace */
617 vm_get_sourceline(const rb_control_frame_t *cfp)
619 int line_no = 0;
620 const rb_iseq_t *iseq = cfp->iseq;
622 if (RUBY_VM_NORMAL_ISEQ_P(iseq)) {
623 int i;
624 int pos = cfp->pc - cfp->iseq->iseq_encoded;
626 for (i = 0; i < iseq->insn_info_size; i++) {
627 if (iseq->insn_info_table[i].position == pos) {
628 line_no = iseq->insn_info_table[i - 1].line_no;
629 goto found;
632 line_no = iseq->insn_info_table[i - 1].line_no;
634 found:
635 return line_no;
638 static VALUE
639 vm_backtrace_each(rb_thread_t *th,
640 const rb_control_frame_t *limit_cfp, const rb_control_frame_t *cfp,
641 const char * file, int line_no, VALUE ary)
643 VALUE str;
645 while (cfp > limit_cfp) {
646 str = 0;
647 if (cfp->iseq != 0) {
648 if (cfp->pc != 0) {
649 rb_iseq_t *iseq = cfp->iseq;
651 line_no = vm_get_sourceline(cfp);
652 file = RSTRING_PTR(iseq->filename);
653 str = rb_sprintf("%s:%d:in `%s'",
654 file, line_no, RSTRING_PTR(iseq->name));
655 rb_ary_push(ary, str);
658 else if (RUBYVM_CFUNC_FRAME_P(cfp)) {
659 str = rb_sprintf("%s:%d:in `%s'",
660 file, line_no,
661 rb_id2name(cfp->method_id));
662 rb_ary_push(ary, str);
664 cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp);
666 return rb_ary_reverse(ary);
669 static inline VALUE
670 vm_backtrace(rb_thread_t *th, int lev)
672 VALUE ary;
673 const rb_control_frame_t *cfp = th->cfp;
674 const rb_control_frame_t *top_of_cfp = (void *)(th->stack + th->stack_size);
675 top_of_cfp -= 2;
677 if (lev < 0) {
678 /* TODO ?? */
679 ary = rb_ary_new();
681 else {
682 while (lev-- >= 0) {
683 cfp++;
684 if (cfp >= top_of_cfp) {
685 return Qnil;
688 ary = rb_ary_new();
691 ary = vm_backtrace_each(th, RUBY_VM_NEXT_CONTROL_FRAME(cfp),
692 top_of_cfp, "", 0, ary);
693 return ary;
696 const char *
697 rb_sourcefile(void)
699 rb_thread_t *th = GET_THREAD();
700 rb_control_frame_t *cfp = vm_get_ruby_level_next_cfp(th, th->cfp);
702 if (cfp) {
703 return RSTRING_PTR(cfp->iseq->filename);
705 else {
706 return 0;
711 rb_sourceline(void)
713 rb_thread_t *th = GET_THREAD();
714 rb_control_frame_t *cfp = vm_get_ruby_level_next_cfp(th, th->cfp);
716 if (cfp) {
717 return vm_get_sourceline(cfp);
719 else {
720 return 0;
724 NODE *
725 vm_cref(void)
727 rb_thread_t *th = GET_THREAD();
728 rb_control_frame_t *cfp = vm_get_ruby_level_next_cfp(th, th->cfp);
729 return vm_get_cref(cfp->iseq, cfp->lfp, cfp->dfp);
732 #if 0
733 void
734 debug_cref(NODE *cref)
736 while (cref) {
737 dp(cref->nd_clss);
738 printf("%ld\n", cref->nd_visi);
739 cref = cref->nd_next;
742 #endif
744 static NODE *
745 vm_cref_push(rb_thread_t *th, VALUE klass, int noex)
747 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->cfp);
748 NODE *cref = NEW_BLOCK(klass);
749 cref->nd_file = 0;
750 cref->nd_visi = noex;
752 if (cfp) {
753 cref->nd_next = vm_get_cref(cfp->iseq, cfp->lfp, cfp->dfp);
756 return cref;
759 static inline VALUE
760 vm_get_cbase(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
762 NODE *cref = vm_get_cref(iseq, lfp, dfp);
763 VALUE klass = Qundef;
765 while (cref) {
766 if ((klass = cref->nd_clss) != 0) {
767 break;
769 cref = cref->nd_next;
772 return klass;
775 VALUE
776 rb_vm_cbase(void)
778 rb_thread_t *th = GET_THREAD();
779 rb_control_frame_t *cfp = vm_get_ruby_level_next_cfp(th, th->cfp);
781 return vm_get_cbase(cfp->iseq, cfp->lfp, cfp->dfp);
784 /* jump */
786 static VALUE
787 make_localjump_error(const char *mesg, VALUE value, int reason)
789 extern VALUE rb_eLocalJumpError;
790 VALUE exc = rb_exc_new2(rb_eLocalJumpError, mesg);
791 ID id;
793 switch (reason) {
794 case TAG_BREAK:
795 CONST_ID(id, "break");
796 break;
797 case TAG_REDO:
798 CONST_ID(id, "redo");
799 break;
800 case TAG_RETRY:
801 CONST_ID(id, "retry");
802 break;
803 case TAG_NEXT:
804 CONST_ID(id, "next");
805 break;
806 case TAG_RETURN:
807 CONST_ID(id, "return");
808 break;
809 default:
810 CONST_ID(id, "noreason");
811 break;
813 rb_iv_set(exc, "@exit_value", value);
814 rb_iv_set(exc, "@reason", ID2SYM(id));
815 return exc;
818 void
819 vm_localjump_error(const char *mesg, VALUE value, int reason)
821 VALUE exc = make_localjump_error(mesg, value, reason);
822 rb_exc_raise(exc);
825 VALUE
826 vm_make_jump_tag_but_local_jump(int state, VALUE val)
828 VALUE result = Qnil;
830 if (val == Qundef) {
831 val = GET_THREAD()->tag->retval;
833 switch (state) {
834 case 0:
835 break;
836 case TAG_RETURN:
837 result = make_localjump_error("unexpected return", val, state);
838 break;
839 case TAG_BREAK:
840 result = make_localjump_error("unexpected break", val, state);
841 break;
842 case TAG_NEXT:
843 result = make_localjump_error("unexpected next", val, state);
844 break;
845 case TAG_REDO:
846 result = make_localjump_error("unexpected redo", Qnil, state);
847 break;
848 case TAG_RETRY:
849 result = make_localjump_error("retry outside of rescue clause", Qnil, state);
850 break;
851 default:
852 break;
854 return result;
857 void
858 vm_jump_tag_but_local_jump(int state, VALUE val)
860 VALUE exc = vm_make_jump_tag_but_local_jump(state, val);
861 if (val != Qnil) {
862 rb_exc_raise(exc);
864 JUMP_TAG(state);
867 NORETURN(static void vm_iter_break(rb_thread_t *th));
869 static void
870 vm_iter_break(rb_thread_t *th)
872 rb_control_frame_t *cfp = th->cfp;
873 VALUE *dfp = GC_GUARDED_PTR_REF(*cfp->dfp);
875 th->state = TAG_BREAK;
876 th->errinfo = (VALUE)NEW_THROW_OBJECT(Qnil, (VALUE)dfp, TAG_BREAK);
877 TH_JUMP_TAG(th, TAG_BREAK);
880 void
881 rb_iter_break(void)
883 vm_iter_break(GET_THREAD());
886 /* optimization: redefine management */
888 VALUE ruby_vm_redefined_flag = 0;
889 static st_table *vm_opt_method_table = 0;
891 static void
892 rb_vm_check_redefinition_opt_method(const NODE *node)
894 VALUE bop;
896 if (st_lookup(vm_opt_method_table, (st_data_t)node, &bop)) {
897 ruby_vm_redefined_flag |= bop;
901 static void
902 add_opt_method(VALUE klass, ID mid, VALUE bop)
904 NODE *node;
905 if (st_lookup(RCLASS_M_TBL(klass), mid, (void *)&node) &&
906 nd_type(node->nd_body->nd_body) == NODE_CFUNC) {
907 st_insert(vm_opt_method_table, (st_data_t)node, (st_data_t)bop);
909 else {
910 rb_bug("undefined optimized method: %s", rb_id2name(mid));
914 static void
915 vm_init_redefined_flag(void)
917 ID mid;
918 VALUE bop;
920 vm_opt_method_table = st_init_numtable();
922 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_)
923 #define C(k) add_opt_method(rb_c##k, mid, bop)
924 OP(PLUS, PLUS), (C(Fixnum), C(Float), C(String), C(Array));
925 OP(MINUS, MINUS), (C(Fixnum));
926 OP(MULT, MULT), (C(Fixnum), C(Float));
927 OP(DIV, DIV), (C(Fixnum), C(Float));
928 OP(MOD, MOD), (C(Fixnum), C(Float));
929 OP(Eq, EQ), (C(Fixnum), C(Float), C(String));
930 OP(LT, LT), (C(Fixnum));
931 OP(LE, LE), (C(Fixnum));
932 OP(LTLT, LTLT), (C(String), C(Array));
933 OP(AREF, AREF), (C(Array), C(Hash));
934 OP(ASET, ASET), (C(Array), C(Hash));
935 OP(Length, LENGTH), (C(Array), C(String), C(Hash));
936 OP(Succ, SUCC), (C(Fixnum), C(String), C(Time));
937 OP(GT, GT), (C(Fixnum));
938 OP(GE, GE), (C(Fixnum));
939 #undef C
940 #undef OP
943 /* evaluator body */
945 #include "vm_evalbody.c"
947 /* finish
948 VMe (h1) finish
949 VM finish F1 F2
950 cfunc finish F1 F2 C1
951 rb_funcall finish F1 F2 C1
952 VMe finish F1 F2 C1
953 VM finish F1 F2 C1 F3
955 F1 - F3 : pushed by VM
956 C1 : pushed by send insn (CFUNC)
958 struct CONTROL_FRAME {
959 VALUE *pc; // cfp[0], program counter
960 VALUE *sp; // cfp[1], stack pointer
961 VALUE *bp; // cfp[2], base pointer
962 rb_iseq_t *iseq; // cfp[3], iseq
963 VALUE flag; // cfp[4], magic
964 VALUE self; // cfp[5], self
965 VALUE *lfp; // cfp[6], local frame pointer
966 VALUE *dfp; // cfp[7], dynamic frame pointer
967 rb_iseq_t * block_iseq; // cfp[8], block iseq
968 VALUE proc; // cfp[9], always 0
971 struct BLOCK {
972 VALUE self;
973 VALUE *lfp;
974 VALUE *dfp;
975 rb_iseq_t *block_iseq;
976 VALUE proc;
979 struct METHOD_CONTROL_FRAME {
980 rb_control_frame_t frame;
983 struct METHOD_FRAME {
984 VALUE arg0;
986 VALUE argM;
987 VALUE param0;
989 VALUE paramN;
990 VALUE cref;
991 VALUE special; // lfp [1]
992 struct block_object *block_ptr | 0x01; // lfp [0]
995 struct BLOCK_CONTROL_FRAME {
996 rb_control_frame_t frame;
999 struct BLOCK_FRAME {
1000 VALUE arg0;
1002 VALUE argM;
1003 VALUE param0;
1005 VALUE paramN;
1006 VALUE cref;
1007 VALUE *(prev_ptr | 0x01); // DFP[0]
1010 struct CLASS_CONTROL_FRAME {
1011 rb_control_frame_t frame;
1014 struct CLASS_FRAME {
1015 VALUE param0;
1017 VALUE paramN;
1018 VALUE cref;
1019 VALUE prev_dfp; // for frame jump
1022 struct C_METHOD_CONTROL_FRAME {
1023 VALUE *pc; // 0
1024 VALUE *sp; // stack pointer
1025 VALUE *bp; // base pointer (used in exception)
1026 rb_iseq_t *iseq; // cmi
1027 VALUE magic; // C_METHOD_FRAME
1028 VALUE self; // ?
1029 VALUE *lfp; // lfp
1030 VALUE *dfp; // == lfp
1031 rb_iseq_t * block_iseq; //
1032 VALUE proc; // always 0
1035 struct C_BLOCK_CONTROL_FRAME {
1036 VALUE *pc; // point only "finish" insn
1037 VALUE *sp; // sp
1038 rb_iseq_t *iseq; // ?
1039 VALUE magic; // C_METHOD_FRAME
1040 VALUE self; // needed?
1041 VALUE *lfp; // lfp
1042 VALUE *dfp; // lfp
1043 rb_iseq_t * block_iseq; // 0
1048 static VALUE
1049 vm_eval_body(rb_thread_t *th)
1051 int state;
1052 VALUE result, err;
1053 VALUE initial = 0;
1054 VALUE *escape_dfp = NULL;
1056 TH_PUSH_TAG(th);
1057 _tag.retval = Qnil;
1058 if ((state = EXEC_TAG()) == 0) {
1059 vm_loop_start:
1060 result = vm_eval(th, initial);
1061 if ((state = th->state) != 0) {
1062 err = result;
1063 th->state = 0;
1064 goto exception_handler;
1067 else {
1068 int i;
1069 struct iseq_catch_table_entry *entry;
1070 unsigned long epc, cont_pc, cont_sp;
1071 VALUE catch_iseqval;
1072 rb_control_frame_t *cfp;
1073 VALUE type;
1075 err = th->errinfo;
1077 if (state == TAG_RAISE) {
1078 if (OBJ_FROZEN(err)) rb_exc_raise(err);
1079 rb_ivar_set(err, idThrowState, INT2FIX(state));
1082 exception_handler:
1083 cont_pc = cont_sp = catch_iseqval = 0;
1085 while (th->cfp->pc == 0 || th->cfp->iseq == 0) {
1086 th->cfp++;
1089 cfp = th->cfp;
1090 epc = cfp->pc - cfp->iseq->iseq_encoded;
1092 if (state == TAG_BREAK || state == TAG_RETURN) {
1093 escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
1095 if (cfp->dfp == escape_dfp) {
1096 if (state == TAG_RETURN) {
1097 if ((cfp + 1)->pc != &finish_insn_seq[0]) {
1098 SET_THROWOBJ_CATCH_POINT(err, (VALUE)(cfp + 1)->dfp);
1099 SET_THROWOBJ_STATE(err, state = TAG_BREAK);
1101 else {
1102 result = GET_THROWOBJ_VAL(err);
1103 th->errinfo = Qnil;
1104 th->cfp += 2;
1105 goto finish_vme;
1107 /* through */
1109 else {
1110 /* TAG_BREAK */
1111 #if OPT_STACK_CACHING
1112 initial = (GET_THROWOBJ_VAL(err));
1113 #else
1114 *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
1115 #endif
1116 th->errinfo = Qnil;
1117 goto vm_loop_start;
1122 if (state == TAG_RAISE) {
1123 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
1124 entry = &cfp->iseq->catch_table[i];
1125 if (entry->start < epc && entry->end >= epc) {
1127 if (entry->type == CATCH_TYPE_RESCUE ||
1128 entry->type == CATCH_TYPE_ENSURE) {
1129 catch_iseqval = entry->iseq;
1130 cont_pc = entry->cont;
1131 cont_sp = entry->sp;
1132 break;
1137 else if (state == TAG_RETRY) {
1138 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
1139 entry = &cfp->iseq->catch_table[i];
1140 if (entry->start < epc && entry->end >= epc) {
1142 if (entry->type == CATCH_TYPE_ENSURE) {
1143 catch_iseqval = entry->iseq;
1144 cont_pc = entry->cont;
1145 cont_sp = entry->sp;
1146 break;
1148 else if (entry->type == CATCH_TYPE_RETRY) {
1149 VALUE *escape_dfp;
1150 escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
1151 if (cfp->dfp == escape_dfp) {
1152 cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
1153 th->errinfo = Qnil;
1154 goto vm_loop_start;
1160 else if (state == TAG_BREAK && ((VALUE)escape_dfp & ~0x03) == 0) {
1161 type = CATCH_TYPE_BREAK;
1163 search_restart_point:
1164 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
1165 entry = &cfp->iseq->catch_table[i];
1167 if (entry->start < epc && entry->end >= epc) {
1168 if (entry->type == CATCH_TYPE_ENSURE) {
1169 catch_iseqval = entry->iseq;
1170 cont_pc = entry->cont;
1171 cont_sp = entry->sp;
1172 break;
1174 else if (entry->type == type) {
1175 cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
1176 cfp->sp = cfp->bp + entry->sp;
1178 if (state != TAG_REDO) {
1179 #if OPT_STACK_CACHING
1180 initial = (GET_THROWOBJ_VAL(err));
1181 #else
1182 *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
1183 #endif
1185 th->errinfo = Qnil;
1186 goto vm_loop_start;
1191 else if (state == TAG_REDO) {
1192 type = CATCH_TYPE_REDO;
1193 goto search_restart_point;
1195 else if (state == TAG_NEXT) {
1196 type = CATCH_TYPE_NEXT;
1197 goto search_restart_point;
1199 else {
1200 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
1201 entry = &cfp->iseq->catch_table[i];
1202 if (entry->start < epc && entry->end >= epc) {
1204 if (entry->type == CATCH_TYPE_ENSURE) {
1205 catch_iseqval = entry->iseq;
1206 cont_pc = entry->cont;
1207 cont_sp = entry->sp;
1208 break;
1214 if (catch_iseqval != 0) {
1215 /* found catch table */
1216 rb_iseq_t *catch_iseq;
1218 /* enter catch scope */
1219 GetISeqPtr(catch_iseqval, catch_iseq);
1220 cfp->sp = cfp->bp + cont_sp;
1221 cfp->pc = cfp->iseq->iseq_encoded + cont_pc;
1223 /* push block frame */
1224 cfp->sp[0] = err;
1225 vm_push_frame(th, catch_iseq, VM_FRAME_MAGIC_BLOCK,
1226 cfp->self, (VALUE)cfp->dfp, catch_iseq->iseq_encoded,
1227 cfp->sp + 1 /* push value */, cfp->lfp, catch_iseq->local_size - 1);
1229 state = 0;
1230 th->errinfo = Qnil;
1231 goto vm_loop_start;
1233 else {
1234 th->cfp++;
1235 if (th->cfp->pc != &finish_insn_seq[0]) {
1236 goto exception_handler;
1238 else {
1239 vm_pop_frame(th);
1240 th->errinfo = err;
1241 TH_POP_TAG2();
1242 JUMP_TAG(state);
1246 finish_vme:
1247 TH_POP_TAG();
1248 return result;
1251 /* misc */
1253 VALUE
1254 rb_iseq_eval(VALUE iseqval)
1256 rb_thread_t *th = GET_THREAD();
1257 VALUE val;
1258 volatile VALUE tmp;
1260 vm_set_top_stack(th, iseqval);
1262 if (!rb_const_defined(rb_cObject, rb_intern("TOPLEVEL_BINDING"))) {
1263 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
1265 val = vm_eval_body(th);
1266 tmp = iseqval; /* prohibit tail call optimization */
1267 return val;
1271 rb_thread_method_id_and_class(rb_thread_t *th,
1272 ID *idp, VALUE *klassp)
1274 rb_control_frame_t *cfp = th->cfp;
1275 rb_iseq_t *iseq = cfp->iseq;
1276 if (!iseq) {
1277 if (idp) *idp = cfp->method_id;
1278 if (klassp) *klassp = cfp->method_class;
1279 return 1;
1281 while (iseq) {
1282 if (RUBY_VM_IFUNC_P(iseq)) {
1283 if (idp) CONST_ID(*idp, "<ifunc>");
1284 if (klassp) *klassp = 0;
1285 return 1;
1287 if (iseq->defined_method_id) {
1288 if (idp) *idp = iseq->defined_method_id;
1289 if (klassp) *klassp = iseq->klass;
1290 return 1;
1292 if (iseq->local_iseq == iseq) {
1293 break;
1295 iseq = iseq->parent_iseq;
1297 return 0;
1301 rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
1303 return rb_thread_method_id_and_class(GET_THREAD(), idp, klassp);
1306 VALUE
1307 rb_thread_current_status(const rb_thread_t *th)
1309 const rb_control_frame_t *cfp = th->cfp;
1310 VALUE str = Qnil;
1312 if (cfp->iseq != 0) {
1313 if (cfp->pc != 0) {
1314 rb_iseq_t *iseq = cfp->iseq;
1315 int line_no = vm_get_sourceline(cfp);
1316 char *file = RSTRING_PTR(iseq->filename);
1317 str = rb_sprintf("%s:%d:in `%s'",
1318 file, line_no, RSTRING_PTR(iseq->name));
1321 else if (cfp->method_id) {
1322 str = rb_sprintf("`%s#%s' (cfunc)",
1323 RSTRING_PTR(rb_class_name(cfp->method_class)),
1324 rb_id2name(cfp->method_id));
1327 return str;
1330 VALUE
1331 rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
1332 const rb_block_t *blockptr, VALUE filename)
1334 rb_thread_t *th = GET_THREAD();
1335 const rb_control_frame_t *reg_cfp = th->cfp;
1336 volatile VALUE iseqval = rb_iseq_new(0, filename, filename, 0, ISEQ_TYPE_TOP);
1337 VALUE val;
1339 vm_push_frame(th, DATA_PTR(iseqval), VM_FRAME_MAGIC_TOP,
1340 recv, (VALUE)blockptr, 0, reg_cfp->sp, 0, 1);
1342 val = (*func)(arg);
1344 vm_pop_frame(th);
1345 return val;
1348 /* vm */
1350 static void
1351 vm_free(void *ptr)
1353 RUBY_FREE_ENTER("vm");
1354 if (ptr) {
1355 rb_vm_t *vmobj = ptr;
1357 st_free_table(vmobj->living_threads);
1358 vmobj->living_threads = 0;
1359 /* TODO: MultiVM Instance */
1360 /* VM object should not be cleaned by GC */
1361 /* ruby_xfree(ptr); */
1362 /* ruby_current_vm = 0; */
1364 RUBY_FREE_LEAVE("vm");
1367 static int
1368 vm_mark_each_thread_func(st_data_t key, st_data_t value, st_data_t dummy)
1370 VALUE thval = (VALUE)key;
1371 rb_gc_mark(thval);
1372 return ST_CONTINUE;
1375 static void
1376 mark_event_hooks(rb_event_hook_t *hook)
1378 while (hook) {
1379 rb_gc_mark(hook->data);
1380 hook = hook->next;
1384 void
1385 rb_vm_mark(void *ptr)
1387 RUBY_MARK_ENTER("vm");
1388 RUBY_GC_INFO("-------------------------------------------------\n");
1389 if (ptr) {
1390 rb_vm_t *vm = ptr;
1391 if (vm->living_threads) {
1392 st_foreach(vm->living_threads, vm_mark_each_thread_func, 0);
1394 RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
1395 RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
1396 RUBY_MARK_UNLESS_NULL(vm->load_path);
1397 RUBY_MARK_UNLESS_NULL(vm->loaded_features);
1398 RUBY_MARK_UNLESS_NULL(vm->top_self);
1399 RUBY_MARK_UNLESS_NULL(vm->coverages);
1400 rb_gc_mark_locations(vm->special_exceptions, vm->special_exceptions + ruby_special_error_count);
1402 if (vm->loading_table) {
1403 rb_mark_tbl(vm->loading_table);
1406 mark_event_hooks(vm->event_hooks);
1409 RUBY_MARK_LEAVE("vm");
1412 static void
1413 vm_init2(rb_vm_t *vm)
1415 MEMZERO(vm, rb_vm_t, 1);
1416 vm->src_encoding_index = -1;
1419 /* Thread */
1421 #define USE_THREAD_DATA_RECYCLE 1
1423 #if USE_THREAD_DATA_RECYCLE
1424 #define RECYCLE_MAX 64
1425 VALUE *thread_recycle_stack_slot[RECYCLE_MAX];
1426 int thread_recycle_stack_count = 0;
1428 static VALUE *
1429 thread_recycle_stack(int size)
1431 if (thread_recycle_stack_count) {
1432 return thread_recycle_stack_slot[--thread_recycle_stack_count];
1434 else {
1435 return ALLOC_N(VALUE, size);
1439 #else
1440 #define thread_recycle_stack(size) ALLOC_N(VALUE, (size))
1441 #endif
1443 void
1444 rb_thread_recycle_stack_release(VALUE *stack)
1446 #if USE_THREAD_DATA_RECYCLE
1447 if (thread_recycle_stack_count < RECYCLE_MAX) {
1448 thread_recycle_stack_slot[thread_recycle_stack_count++] = stack;
1449 return;
1451 #endif
1452 ruby_xfree(stack);
1455 #ifdef USE_THREAD_RECYCLE
1456 static rb_thread_t *
1457 thread_recycle_struct(void)
1459 void *p = ALLOC_N(rb_thread_t, 1);
1460 memset(p, 0, sizeof(rb_thread_t));
1461 return p;
1463 #endif
1465 static void
1466 thread_free(void *ptr)
1468 rb_thread_t *th;
1469 RUBY_FREE_ENTER("thread");
1471 if (ptr) {
1472 th = ptr;
1474 if (!th->root_fiber) {
1475 RUBY_FREE_UNLESS_NULL(th->stack);
1478 if (th->locking_mutex != Qfalse) {
1479 rb_bug("thread_free: locking_mutex must be NULL (%p:%ld)", th, th->locking_mutex);
1481 if (th->keeping_mutexes != NULL) {
1482 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%ld)", th, th->locking_mutex);
1485 if (th->local_storage) {
1486 st_free_table(th->local_storage);
1489 #if USE_VALUE_CACHE
1491 VALUE *ptr = th->value_cache_ptr;
1492 while (*ptr) {
1493 VALUE v = *ptr;
1494 RBASIC(v)->flags = 0;
1495 RBASIC(v)->klass = 0;
1496 ptr++;
1499 #endif
1501 if (th->vm->main_thread == th) {
1502 RUBY_GC_INFO("main thread\n");
1504 else {
1505 ruby_xfree(ptr);
1508 RUBY_FREE_LEAVE("thread");
1511 void rb_gc_mark_machine_stack(rb_thread_t *th);
1513 void
1514 rb_thread_mark(void *ptr)
1516 rb_thread_t *th = NULL;
1517 RUBY_MARK_ENTER("thread");
1518 if (ptr) {
1519 th = ptr;
1520 if (th->stack) {
1521 VALUE *p = th->stack;
1522 VALUE *sp = th->cfp->sp;
1523 rb_control_frame_t *cfp = th->cfp;
1524 rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);
1526 while (p < sp) {
1527 rb_gc_mark(*p++);
1529 rb_gc_mark_locations(p, p + th->mark_stack_len);
1531 while (cfp != limit_cfp) {
1532 rb_gc_mark(cfp->proc);
1533 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1537 /* mark ruby objects */
1538 RUBY_MARK_UNLESS_NULL(th->first_proc);
1539 if (th->first_proc) RUBY_MARK_UNLESS_NULL(th->first_args);
1541 RUBY_MARK_UNLESS_NULL(th->thgroup);
1542 RUBY_MARK_UNLESS_NULL(th->value);
1543 RUBY_MARK_UNLESS_NULL(th->errinfo);
1544 RUBY_MARK_UNLESS_NULL(th->thrown_errinfo);
1545 RUBY_MARK_UNLESS_NULL(th->local_svar);
1546 RUBY_MARK_UNLESS_NULL(th->top_self);
1547 RUBY_MARK_UNLESS_NULL(th->top_wrapper);
1548 RUBY_MARK_UNLESS_NULL(th->fiber);
1549 RUBY_MARK_UNLESS_NULL(th->root_fiber);
1550 RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
1551 RUBY_MARK_UNLESS_NULL(th->last_status);
1553 RUBY_MARK_UNLESS_NULL(th->locking_mutex);
1555 rb_mark_tbl(th->local_storage);
1557 if (GET_THREAD() != th && th->machine_stack_start && th->machine_stack_end) {
1558 rb_gc_mark_machine_stack(th);
1559 rb_gc_mark_locations((VALUE *)&th->machine_regs,
1560 (VALUE *)(&th->machine_regs) +
1561 sizeof(th->machine_regs) / sizeof(VALUE));
1564 mark_event_hooks(th->event_hooks);
1567 RUBY_MARK_LEAVE("thread");
1570 static VALUE
1571 thread_alloc(VALUE klass)
1573 VALUE volatile obj;
1574 #ifdef USE_THREAD_RECYCLE
1575 rb_thread_t *th = thread_recycle_struct();
1576 obj = Data_Wrap_Struct(klass, rb_thread_mark, thread_free, th);
1577 #else
1578 rb_thread_t *th;
1579 obj = Data_Make_Struct(klass, rb_thread_t, rb_thread_mark, thread_free, th);
1580 #endif
1581 return obj;
1584 static void
1585 th_init2(rb_thread_t *th, VALUE self)
1587 th->self = self;
1589 /* allocate thread stack */
1590 th->stack_size = RUBY_VM_THREAD_STACK_SIZE;
1591 th->stack = thread_recycle_stack(th->stack_size);
1593 th->cfp = (void *)(th->stack + th->stack_size);
1595 vm_push_frame(th, 0, VM_FRAME_MAGIC_TOP, Qnil, 0, 0,
1596 th->stack, 0, 1);
1598 th->status = THREAD_RUNNABLE;
1599 th->errinfo = Qnil;
1600 th->last_status = Qnil;
1602 #if USE_VALUE_CACHE
1603 th->value_cache_ptr = &th->value_cache[0];
1604 #endif
1607 static void
1608 th_init(rb_thread_t *th, VALUE self)
1610 th_init2(th, self);
1613 static VALUE
1614 ruby_thread_init(VALUE self)
1616 rb_thread_t *th;
1617 rb_vm_t *vm = GET_THREAD()->vm;
1618 GetThreadPtr(self, th);
1620 th_init(th, self);
1621 th->vm = vm;
1623 th->top_wrapper = 0;
1624 th->top_self = rb_vm_top_self();
1625 return self;
1628 VALUE
1629 rb_thread_alloc(VALUE klass)
1631 VALUE self = thread_alloc(klass);
1632 ruby_thread_init(self);
1633 return self;
1636 static void
1637 vm_define_method(rb_thread_t *th, VALUE obj, ID id, VALUE iseqval,
1638 rb_num_t is_singleton, NODE *cref)
1640 NODE *newbody;
1641 VALUE klass = cref->nd_clss;
1642 int noex = cref->nd_visi;
1643 rb_iseq_t *miseq;
1644 GetISeqPtr(iseqval, miseq);
1646 if (NIL_P(klass)) {
1647 rb_raise(rb_eTypeError, "no class/module to add method");
1650 if (is_singleton) {
1651 if (FIXNUM_P(obj) || SYMBOL_P(obj)) {
1652 rb_raise(rb_eTypeError,
1653 "can't define singleton method \"%s\" for %s",
1654 rb_id2name(id), rb_obj_classname(obj));
1657 if (OBJ_FROZEN(obj)) {
1658 rb_error_frozen("object");
1661 klass = rb_singleton_class(obj);
1662 noex = NOEX_PUBLIC;
1665 /* dup */
1666 COPY_CREF(miseq->cref_stack, cref);
1667 miseq->klass = klass;
1668 miseq->defined_method_id = id;
1669 newbody = NEW_NODE(RUBY_VM_METHOD_NODE, 0, miseq->self, 0);
1670 rb_add_method(klass, id, newbody, noex);
1672 if (!is_singleton && noex == NOEX_MODFUNC) {
1673 rb_add_method(rb_singleton_class(klass), id, newbody, NOEX_PUBLIC);
1675 INC_VM_STATE_VERSION();
1678 #define REWIND_CFP(expr) do { \
1679 rb_thread_t *th__ = GET_THREAD(); \
1680 th__->cfp++; expr; th__->cfp--; \
1681 } while (0)
1683 static VALUE
1684 m_core_define_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
1686 REWIND_CFP({
1687 vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 0, vm_cref());
1689 return Qnil;
1692 static VALUE
1693 m_core_define_singleton_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
1695 REWIND_CFP({
1696 vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 1, vm_cref());
1698 return Qnil;
1701 static VALUE
1702 m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
1704 REWIND_CFP({
1705 rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
1707 return Qnil;
1710 static VALUE
1711 m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
1713 REWIND_CFP({
1714 rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
1716 return Qnil;
1719 static VALUE
1720 m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
1722 REWIND_CFP({
1723 rb_undef(cbase, SYM2ID(sym));
1724 INC_VM_STATE_VERSION();
1726 return Qnil;
1729 static VALUE
1730 m_core_set_postexe(VALUE self, VALUE iseqval)
1732 REWIND_CFP({
1733 rb_iseq_t *blockiseq;
1734 rb_block_t *blockptr;
1735 rb_thread_t *th = GET_THREAD();
1736 rb_control_frame_t *cfp = vm_get_ruby_level_next_cfp(th, th->cfp);
1737 VALUE proc;
1738 extern void rb_call_end_proc(VALUE data);
1740 GetISeqPtr(iseqval, blockiseq);
1742 blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
1743 blockptr->iseq = blockiseq;
1744 blockptr->proc = 0;
1746 proc = vm_make_proc(th, cfp, blockptr);
1747 rb_set_end_proc(rb_call_end_proc, proc);
1749 return Qnil;
1752 VALUE insns_name_array(void);
1753 extern VALUE *rb_gc_stack_start;
1754 extern size_t rb_gc_stack_maxsize;
1755 #ifdef __ia64
1756 extern VALUE *rb_gc_register_stack_start;
1757 #endif
1759 /* debug functions */
1761 static VALUE
1762 sdr(void)
1764 rb_vm_bugreport();
1765 return Qnil;
1768 static VALUE
1769 nsdr(void)
1771 VALUE ary = rb_ary_new();
1772 #if HAVE_BACKTRACE
1773 #include <execinfo.h>
1774 #define MAX_NATIVE_TRACE 1024
1775 static void *trace[MAX_NATIVE_TRACE];
1776 int n = backtrace(trace, MAX_NATIVE_TRACE);
1777 char **syms = backtrace_symbols(trace, n);
1778 int i;
1780 if (syms == 0) {
1781 rb_memerror();
1784 for (i=0; i<n; i++) {
1785 rb_ary_push(ary, rb_str_new2(syms[i]));
1787 free(syms); /* OK */
1788 #endif
1789 return ary;
1792 void
1793 Init_VM(void)
1795 VALUE opts;
1796 VALUE klass;
1797 VALUE fcore;
1799 /* ::VM */
1800 rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
1801 rb_undef_alloc_func(rb_cRubyVM);
1803 /* ::VM::FrozenCore */
1804 fcore = rb_class_new(rb_cBasicObject);
1805 RBASIC(fcore)->flags = T_ICLASS;
1806 klass = rb_singleton_class(fcore);
1807 rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
1808 rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
1809 rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
1810 rb_define_method_id(klass, id_core_define_method, m_core_define_method, 3);
1811 rb_define_method_id(klass, id_core_define_singleton_method, m_core_define_singleton_method, 3);
1812 rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 1);
1813 rb_obj_freeze(fcore);
1814 rb_mRubyVMFrozenCore = fcore;
1816 /* ::VM::Env */
1817 rb_cEnv = rb_define_class_under(rb_cRubyVM, "Env", rb_cObject);
1818 rb_undef_alloc_func(rb_cEnv);
1820 /* ::Thread */
1821 rb_cThread = rb_define_class("Thread", rb_cObject);
1822 rb_undef_alloc_func(rb_cThread);
1824 /* ::VM::USAGE_ANALYSIS_* */
1825 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN", rb_hash_new());
1826 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_REGS", rb_hash_new());
1827 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN_BIGRAM", rb_hash_new());
1828 rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
1830 #if OPT_DIRECT_THREADED_CODE
1831 rb_ary_push(opts, rb_str_new2("direct threaded code"));
1832 #elif OPT_TOKEN_THREADED_CODE
1833 rb_ary_push(opts, rb_str_new2("token threaded code"));
1834 #elif OPT_CALL_THREADED_CODE
1835 rb_ary_push(opts, rb_str_new2("call threaded code"));
1836 #endif
1838 #if OPT_BASIC_OPERATIONS
1839 rb_ary_push(opts, rb_str_new2("optimize basic operation"));
1840 #endif
1842 #if OPT_STACK_CACHING
1843 rb_ary_push(opts, rb_str_new2("stack caching"));
1844 #endif
1845 #if OPT_OPERANDS_UNIFICATION
1846 rb_ary_push(opts, rb_str_new2("operands unification]"));
1847 #endif
1848 #if OPT_INSTRUCTIONS_UNIFICATION
1849 rb_ary_push(opts, rb_str_new2("instructions unification"));
1850 #endif
1851 #if OPT_INLINE_METHOD_CACHE
1852 rb_ary_push(opts, rb_str_new2("inline method cache"));
1853 #endif
1854 #if OPT_BLOCKINLINING
1855 rb_ary_push(opts, rb_str_new2("block inlining"));
1856 #endif
1858 /* ::VM::InsnNameArray */
1859 rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", insns_name_array());
1861 /* debug functions ::VM::SDR(), ::VM::NSDR() */
1862 #if VMDEBUG
1863 rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
1864 rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
1865 #else
1866 (void)sdr;
1867 (void)nsdr;
1868 #endif
1870 /* VM bootstrap: phase 2 */
1872 rb_vm_t *vm = ruby_current_vm;
1873 rb_thread_t *th = GET_THREAD();
1874 VALUE filename = rb_str_new2("<dummy toplevel>");
1875 volatile VALUE iseqval = rb_iseq_new(0, filename, filename, 0, ISEQ_TYPE_TOP);
1876 volatile VALUE th_self;
1877 rb_iseq_t *iseq;
1879 /* create vm object */
1880 vm->self = Data_Wrap_Struct(rb_cRubyVM, rb_vm_mark, vm_free, vm);
1882 /* create main thread */
1883 th_self = th->self = Data_Wrap_Struct(rb_cThread, rb_thread_mark, thread_free, th);
1884 vm->main_thread = th;
1885 vm->running_thread = th;
1886 th->vm = vm;
1887 th->top_wrapper = 0;
1888 th->top_self = rb_vm_top_self();
1889 rb_thread_set_current(th);
1891 vm->living_threads = st_init_numtable();
1892 st_insert(vm->living_threads, th_self, (st_data_t) th->thread_id);
1894 rb_register_mark_object(iseqval);
1895 GetISeqPtr(iseqval, iseq);
1896 th->cfp->iseq = iseq;
1897 th->cfp->pc = iseq->iseq_encoded;
1899 vm_init_redefined_flag();
1902 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
1903 struct rb_objspace *rb_objspace_alloc(void);
1904 #endif
1905 void ruby_thread_init_stack(rb_thread_t *th);
1907 void
1908 Init_BareVM(void)
1910 /* VM bootstrap: phase 1 */
1911 rb_vm_t * vm = malloc(sizeof(*vm));
1912 rb_thread_t * th = malloc(sizeof(*th));
1913 if (!vm || !th) {
1914 fprintf(stderr, "[FATAL] failed to allocate memory\n");
1915 exit(EXIT_FAILURE);
1917 MEMZERO(th, rb_thread_t, 1);
1919 rb_thread_set_current_raw(th);
1921 vm_init2(vm);
1922 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
1923 vm->objspace = rb_objspace_alloc();
1924 #endif
1925 ruby_current_vm = vm;
1927 th_init2(th, 0);
1928 th->vm = vm;
1929 ruby_thread_init_stack(th);
1932 /* top self */
1934 static VALUE
1935 main_to_s(VALUE obj)
1937 return rb_str_new2("main");
1940 VALUE
1941 rb_vm_top_self(void)
1943 return GET_VM()->top_self;
1946 void
1947 Init_top_self(void)
1949 rb_vm_t *vm = GET_VM();
1951 vm->top_self = rb_obj_alloc(rb_cObject);
1952 rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
1955 VALUE *
1956 ruby_vm_verbose_ptr(rb_vm_t *vm)
1958 return &vm->verbose;
1961 VALUE *
1962 ruby_vm_debug_ptr(rb_vm_t *vm)
1964 return &vm->debug;
1967 VALUE *
1968 rb_ruby_verbose_ptr(void)
1970 return ruby_vm_verbose_ptr(GET_VM());
1973 VALUE *
1974 rb_ruby_debug_ptr(void)
1976 return ruby_vm_debug_ptr(GET_VM());