* docs/pmc.pod:
[parrot.git] / src / interpreter.c
blob5968cbe02281f0ffa6e8b8586473101e98a1ed6f
1 /*
2 Copyright (C) 2001-2006, The Perl Foundation.
3 $Id$
5 =head1 NAME
7 src/interpreter.c - Parrot Interpreter
9 =head1 DESCRIPTION
11 The interpreter API handles running the operations.
13 The predereferenced code chunk is pre-initialized with the opcode
14 function pointers, addresses, or opnumbers of the C<prederef__>
15 opcode. This opcode then calls the C<do_prederef()> function, which then
16 fills in the real function, address or op number.
18 Since the C<prederef__> opcode returns the same C<pc_prederef> it was
19 passed, the runops loop will re-execute the same location, which will
20 then have the pointer to the real C<prederef> opfunc and C<prederef>
21 args.
23 Pointer arithmetic is used to determine the index into the bytecode
24 corresponding to the currect opcode. The bytecode and prederef arrays
25 have the same number of elements since there is a one-to-one mapping.
27 =head2 Functions
29 =over 4
31 =cut
35 #include <assert.h>
36 #include "parrot/parrot.h"
37 #include "interp_guts.h"
38 #include "parrot/oplib/core_ops.h"
39 #include "parrot/oplib/core_ops_switch.h"
40 #include "parrot/oplib/ops.h"
41 #include "runops_cores.h"
42 #if JIT_CAPABLE
43 # include "parrot/exec.h"
44 # include "jit.h"
45 #endif
46 #ifdef HAVE_COMPUTED_GOTO
47 # include "parrot/oplib/core_ops_cg.h"
48 # include "parrot/oplib/core_ops_cgp.h"
49 #endif
50 #include "parrot/dynext.h"
53 void Parrot_setup_event_func_ptrs(Parrot_Interp interpreter);
57 =item C<static void
58 prederef_args(void **pc_prederef, Interp *interpreter,
59 opcode_t *pc, op_info_t *opinfo)>
61 Called from C<do_prederef()> to deal with any arguments.
63 C<pc_prederef> is the current opcode.
65 =cut
69 static void
70 prederef_args(void **pc_prederef, Interp *interpreter,
71 opcode_t *pc, op_info_t *opinfo)
73 struct PackFile_ConstTable * const_table
74 = interpreter->code->const_table;
75 int i, n, m, regs_n, regs_i, regs_p, regs_s;
76 PMC *sig = NULL;
78 regs_n = CONTEXT(interpreter->ctx)->n_regs_used[REGNO_NUM];
79 regs_i = CONTEXT(interpreter->ctx)->n_regs_used[REGNO_INT];
80 regs_p = CONTEXT(interpreter->ctx)->n_regs_used[REGNO_PMC];
81 regs_s = CONTEXT(interpreter->ctx)->n_regs_used[REGNO_STR];
82 /* prederef var part too */
83 n = m = opinfo->op_count;
84 ADD_OP_VAR_PART(interpreter, interpreter->code, pc, n);
85 for (i = 1; i < n; i++) {
86 opcode_t arg = pc[i];
87 int type;
88 if (i >= m) {
89 sig = (PMC*) pc_prederef[1];
90 type = SIG_ITEM(sig, i - m);
91 type &= (PARROT_ARG_TYPE_MASK | PARROT_ARG_CONSTANT);
93 else
94 type = opinfo->types[i - 1];
96 switch (type) {
98 case PARROT_ARG_KI:
99 case PARROT_ARG_I:
100 if (arg < 0 || arg >= regs_i)
101 internal_exception(INTERP_ERROR, "Illegal register number");
102 pc_prederef[i] = (void *)REG_OFFS_INT(arg);
103 break;
105 case PARROT_ARG_N:
106 if (arg < 0 || arg >= regs_n)
107 internal_exception(INTERP_ERROR, "Illegal register number");
108 pc_prederef[i] = (void *)REG_OFFS_NUM(arg);
109 break;
111 case PARROT_ARG_K:
112 case PARROT_ARG_P:
113 if (arg < 0 || arg >= regs_p)
114 internal_exception(INTERP_ERROR, "Illegal register number");
115 pc_prederef[i] = (void *)REG_OFFS_PMC(arg);
116 break;
118 case PARROT_ARG_S:
119 if (arg < 0 || arg >= regs_s)
120 internal_exception(INTERP_ERROR, "Illegal register number");
121 pc_prederef[i] = (void *)REG_OFFS_STR(arg);
122 break;
124 case PARROT_ARG_KIC:
125 case PARROT_ARG_IC:
126 pc_prederef[i] = (void *)pc[i];
127 break;
129 case PARROT_ARG_NC:
130 if (arg < 0 || arg >= const_table->const_count)
131 internal_exception(INTERP_ERROR, "Illegal constant number");
132 pc_prederef[i] = (void *) &const_table->constants[arg]->u.number;
133 break;
135 case PARROT_ARG_SC:
136 if (arg < 0 || arg >= const_table->const_count)
137 internal_exception(INTERP_ERROR, "Illegal constant number");
138 pc_prederef[i] = (void *)const_table->constants[arg]->u.string;
139 break;
141 case PARROT_ARG_PC:
142 case PARROT_ARG_KC:
143 if (arg < 0 || arg >= const_table->const_count)
144 internal_exception(INTERP_ERROR, "Illegal constant number");
145 pc_prederef[i] = (void *)const_table->constants[arg]->u.key;
146 break;
147 default:
148 internal_exception(ARG_OP_NOT_HANDLED,
149 "Unhandled argtype 0x%x\n", type);
150 break;
157 =item C<void
158 do_prederef(void **pc_prederef, Parrot_Interp interpreter, int type)>
160 This is called from within the run cores to predereference the current
161 opcode.
163 C<pc_prederef> is the current opcode, and C<type> is the run core type.
165 =cut
169 void
170 do_prederef(void **pc_prederef, Parrot_Interp interpreter, int type)
172 size_t offset = pc_prederef - interpreter->code->prederef.code;
173 opcode_t *pc = ((opcode_t *)interpreter->code->base.data) + offset;
174 op_func_t *prederef_op_func = interpreter->op_lib->op_func_table;
175 op_info_t *opinfo;
176 size_t n;
178 if (*pc < 0 || *pc >= (opcode_t)interpreter->op_count)
179 internal_exception(INTERP_ERROR, "Illegal opcode");
180 opinfo = &interpreter->op_info_table[*pc];
181 /* first arguments - PIC needs it */
182 prederef_args(pc_prederef, interpreter, pc, opinfo);
183 switch (type) {
184 case PARROT_SWITCH_CORE:
185 case PARROT_SWITCH_JIT_CORE:
186 case PARROT_CGP_CORE:
187 case PARROT_CGP_JIT_CORE:
188 parrot_PIC_prederef(interpreter, *pc, pc_prederef, type);
189 break;
190 default:
191 internal_exception(1, "Tried to prederef wrong core");
192 break;
195 * now remember backward branches, invoke and similar opcodes
197 n = opinfo->op_count;
198 if (((opinfo->jump & PARROT_JUMP_RELATIVE) &&
199 opinfo->types[n - 2] == PARROT_ARG_IC &&
200 pc[n - 1] < 0) || /* relative backward branch */
201 (opinfo->jump & PARROT_JUMP_ADDRESS)) {
202 Prederef *pi = &interpreter->code->prederef;
204 * first time prederef.branches == NULL:
205 * estimate size to 1/16th of opcodes
207 if (!pi->branches) {
208 size_t nb = interpreter->code->base.size / 16;
209 if (nb < 8)
210 nb = (size_t)8;
211 pi->branches = mem_sys_allocate( sizeof(Prederef_branch) * nb);
212 pi->n_allocated = nb;
213 pi->n_branches = 0;
215 else if (pi->n_branches >= pi->n_allocated) {
216 pi->n_allocated = (size_t) (pi->n_allocated * 1.5);
217 pi->branches = mem_sys_realloc( pi->branches,
218 sizeof(Prederef_branch) * pi->n_allocated);
220 pi->branches[pi->n_branches].offs = offset;
221 pi->branches[pi->n_branches].op = *pc_prederef;
222 ++pi->n_branches;
228 =item C<static void
229 turn_ev_check(Parrot_Interp interpreter, int on)>
231 Turn on or off event checking for prederefed cores.
233 Fills in the C<event_checker> opcode, or restores original op in all
234 branch locations of the opcode stream.
236 Note that when C<on> is true, this is being called from the event
237 handler thread.
239 =cut
243 static void
244 turn_ev_check(Parrot_Interp interpreter, int on)
246 Prederef *pi = &interpreter->code->prederef;
247 size_t i, offs;
249 if (!pi->branches)
250 return;
251 for (i = 0; i < pi->n_branches; ++i) {
252 offs = pi->branches[i].offs;
253 if (on) {
254 interpreter->code->prederef.code[offs] =
255 ((void **)interpreter->op_lib->op_func_table)
256 [CORE_OPS_check_events__];
258 else
259 interpreter->code->prederef.code[offs] = pi->branches[i].op;
265 =item C<static oplib_init_f
266 get_op_lib_init(int core_op, int which, PMC *lib)>
268 Returns an opcode's library C<op_lib> init function.
270 C<core_op> indicates whether the opcode represents a core Parrot operation.
272 C<which> is the run core type.
274 For dynamic oplibs C<core_op> will be 0 and C<lib> will be a
275 C<ParrotLibrary> PMC.
277 =cut
281 static oplib_init_f
282 get_op_lib_init(int core_op, int which, PMC *lib)
284 oplib_init_f init_func = (oplib_init_f)NULL;
285 if (core_op) {
286 switch (which) {
287 case PARROT_SWITCH_CORE:
288 case PARROT_SWITCH_JIT_CORE:
289 init_func = PARROT_CORE_SWITCH_OPLIB_INIT;
290 break;
291 #ifdef HAVE_COMPUTED_GOTO
292 case PARROT_CGP_CORE:
293 case PARROT_CGP_JIT_CORE:
294 init_func = PARROT_CORE_CGP_OPLIB_INIT;
295 break;
296 case PARROT_CGOTO_CORE:
297 init_func = PARROT_CORE_CG_OPLIB_INIT;
298 break;
299 #endif
300 case PARROT_EXEC_CORE: /* normal func core */
301 case PARROT_JIT_CORE: /* normal func core */
302 case PARROT_SLOW_CORE: /* normal func core */
303 case PARROT_FAST_CORE: /* normal func core */
304 init_func = PARROT_CORE_OPLIB_INIT;
305 break;
307 if (!init_func)
308 internal_exception(1, "Couldn't find init_func for core %d", which);
309 return init_func;
311 return (oplib_init_f) D2FPTR(PMC_struct_val(lib));
316 =item C<static void
317 load_prederef(Interp *interpreter, int which)>
319 C<< interpreter->op_lib >> = prederefed oplib.
321 =cut
325 static void
326 load_prederef(Interp *interpreter, int which)
328 oplib_init_f init_func = get_op_lib_init(1, which, NULL);
329 int (*get_op)(const char * name, int full);
331 get_op = interpreter->op_lib->op_code;
332 interpreter->op_lib = init_func(1);
333 /* preserve the get_op function */
334 interpreter->op_lib->op_code = get_op;
335 if (interpreter->op_lib->op_count != interpreter->op_count)
336 internal_exception(PREDEREF_LOAD_ERROR,
337 "Illegal op count (%d) in prederef oplib\n",
338 (int)interpreter->op_lib->op_count);
343 =item C<static void
344 init_prederef(Interp *interpreter, int which)>
346 Initialize: load prederef C<func_table>, file prederef.code.
348 =cut
352 static void
353 init_prederef(Interp *interpreter, int which)
355 load_prederef(interpreter, which);
356 if (!interpreter->code->prederef.code) {
357 size_t N = interpreter->code->base.size;
358 opcode_t *pc = interpreter->code->base.data;
359 size_t i, n, n_pics;
360 void *pred_func;
361 op_info_t *opinfo;
362 /* Parrot_memalign_if_possible in OpenBSD allocates 256 if you ask for 312
363 -- Need to verify this, it may have been a bug elsewhere. If it works now,
364 we can remove the mem_sys_allocate_zeroed line below. */
365 #if 0
366 void **temp = (void **)mem_sys_allocate_zeroed(N * sizeof(void *));
367 #else
368 void **temp = (void **)Parrot_memalign_if_possible(256,
369 N * sizeof(void *));
370 #endif
372 * calc and remember pred_offset
374 CONTEXT(interpreter->ctx)->pred_offset = pc - (opcode_t*)temp;
376 /* fill with the prederef__ opcode function */
377 if (which == PARROT_SWITCH_CORE || which == PARROT_SWITCH_JIT_CORE )
378 pred_func = (void*) CORE_OPS_prederef__;
379 else
380 pred_func = ((void **)
381 interpreter->op_lib->op_func_table)[CORE_OPS_prederef__];
382 for (i = n_pics = 0; i < N; ) {
383 opinfo = &interpreter->op_info_table[*pc];
384 temp[i] = pred_func;
385 n = opinfo->op_count;
386 ADD_OP_VAR_PART(interpreter, interpreter->code, pc, n);
387 /* count ops that need a PIC */
388 if (parrot_PIC_op_is_cached(interpreter, *pc))
389 n_pics++;
390 pc += n;
391 i += n;
394 interpreter->code->prederef.code = temp;
395 /* allocate pic store */
396 if (n_pics) {
397 /* pic_index is starting from 1 */
398 parrot_PIC_alloc_store(interpreter, interpreter->code, n_pics + 1);
405 =item C<static void
406 stop_prederef(Interp *interpreter)>
408 Restore the interpreter's op function tables to their initial state.
409 Also the event function pointers are re-created. This is only necessary
410 for run-core changes, but we don't know the old run core.
412 =cut
416 static void
417 stop_prederef(Interp *interpreter)
419 interpreter->op_func_table = PARROT_CORE_OPLIB_INIT(1)->op_func_table;
420 if (interpreter->evc_func_table) {
421 mem_sys_free(interpreter->evc_func_table);
422 interpreter->evc_func_table = NULL;
424 Parrot_setup_event_func_ptrs(interpreter);
427 #if EXEC_CAPABLE
431 =item C<void
432 exec_init_prederef(Interp *interpreter, void *prederef_arena)>
434 C<< interpreter->op_lib >> = prederefed oplib
436 The "normal" C<op_lib> has a copy in the interpreter structure - but get
437 the C<op_code> lookup function from standard core prederef has no
438 C<op_info_table>
440 =cut
444 void
445 exec_init_prederef(Interp *interpreter, void *prederef_arena)
447 load_prederef(interpreter, PARROT_CGP_CORE);
449 if (!interpreter->code->prederef.code) {
450 size_t N = interpreter->code->base.size;
451 void **temp = prederef_arena;
452 opcode_t *pc = interpreter->code->base.data;
454 interpreter->code->prederef.code = temp;
455 /* TODO */
458 #endif
462 =item C<void *
463 init_jit(Interp *interpreter, opcode_t *pc)>
465 Initializes JIT function for the specified opcode and returns it.
467 =cut
471 void *
472 init_jit(Interp *interpreter, opcode_t *pc)
474 #if JIT_CAPABLE
475 opcode_t *code_start;
476 UINTVAL code_size; /* in opcodes */
477 opcode_t *code_end;
478 Parrot_jit_info_t *jit_info;
480 if (interpreter->code->jit_info)
481 return ((Parrot_jit_info_t *)interpreter->code->jit_info)->arena.start;
483 code_start = interpreter->code->base.data;
484 code_size = interpreter->code->base.size;
485 code_end = code_start + code_size;
486 # if defined HAVE_COMPUTED_GOTO && PARROT_I386_JIT_CGP
487 # ifdef __GNUC__
488 # ifdef PARROT_I386
489 init_prederef(interpreter, PARROT_CGP_CORE);
490 # endif
491 # endif
492 # endif
494 interpreter->code->jit_info =
495 jit_info = parrot_build_asm(interpreter, code_start, code_end,
496 NULL, JIT_CODE_FILE);
497 return jit_info->arena.start;
498 #else
499 return NULL;
500 #endif
505 =item C<void
506 prepare_for_run(Parrot_Interp interpreter)>
508 Prepares to run the interpreter's run core.
510 =cut
514 void
515 prepare_for_run(Parrot_Interp interpreter)
517 switch (interpreter->run_core) {
518 case PARROT_JIT_CORE:
519 (void) init_jit(interpreter, interpreter->code->base.data);
520 break;
521 case PARROT_SWITCH_CORE:
522 case PARROT_SWITCH_JIT_CORE:
523 case PARROT_CGP_CORE:
524 case PARROT_CGP_JIT_CORE:
525 init_prederef(interpreter, interpreter->run_core);
526 break;
527 default:
528 break;
532 #ifdef PARROT_EXEC_OS_AIX
533 extern void* aix_get_toc( );
534 #endif
538 =item C<static opcode_t *
539 runops_jit(Interp *interpreter, opcode_t *pc)>
541 Runs the JIT code for the specified opcode.
543 =cut
547 static opcode_t *
548 runops_jit(Interp *interpreter, opcode_t *pc)
550 #if JIT_CAPABLE
551 # ifdef PARROT_EXEC_OS_AIX
552 /* AIX calling convention requires that function-call-by-ptr be made
553 through the following struct: */
554 struct { jit_f functPtr; void *toc; void *env; } ptrgl_t;
555 ptrgl_t.functPtr = (jit_f) D2FPTR(init_jit(interpreter, pc));
556 ptrgl_t.env = NULL;
558 /* r2 (TOC) needs to point back here so we can return from non-JIT
559 functions */
560 ptrgl_t.toc = aix_get_toc( );
562 ((jit_f) D2FPTR(&ptrgl_t)) (interpreter, pc);
563 # else
564 jit_f jit_code = (jit_f) D2FPTR(init_jit(interpreter, pc));
565 (jit_code) (interpreter, pc);
566 # endif
567 #endif
568 return NULL;
573 =item C<static opcode_t *
574 runops_exec(Interp *interpreter, opcode_t *pc)>
576 Runs the native executable version of the specified opcode.
578 =cut
582 static opcode_t *
583 runops_exec(Interp *interpreter, opcode_t *pc)
585 #if EXEC_CAPABLE
586 opcode_t *code_start;
587 UINTVAL code_size; /* in opcodes */
588 opcode_t *code_end;
589 extern int Parrot_exec_run;
591 code_start = interpreter->code->base.data;
592 code_size = interpreter->code->base.size;
593 code_end = code_start + code_size;
594 # if defined HAVE_COMPUTED_GOTO && defined USE_CGP
595 # ifdef __GNUC__
596 # ifdef PARROT_I386
597 init_prederef(interpreter, PARROT_CGP_CORE);
598 # endif
599 # endif
600 # endif
601 if (Parrot_exec_run == 2) {
602 Parrot_exec_run = 0;
603 Interp_core_SET(interpreter, PARROT_JIT_CORE);
604 runops_jit(interpreter, pc);
605 Interp_core_SET(interpreter, PARROT_EXEC_CORE);
607 else if (Parrot_exec_run == 1) {
608 Parrot_exec(interpreter, pc, code_start, code_end);
610 else
611 run_native(interpreter, pc, code_start);
613 #endif
614 return NULL;
620 =item C<static opcode_t *
621 runops_cgp(Interp *interpreter, opcode_t *pc)>
623 Runs the C C<goto>, predereferenced core.
625 =cut
629 static opcode_t *
630 runops_cgp(Interp *interpreter, opcode_t *pc)
632 #ifdef HAVE_COMPUTED_GOTO
633 opcode_t *code_start = (opcode_t *)interpreter->code->base.data;
634 void **pc_prederef;
635 init_prederef(interpreter, PARROT_CGP_CORE);
636 pc_prederef = interpreter->code->prederef.code + (pc - code_start);
637 pc = (opcode_t*) cgp_core(pc_prederef, interpreter);
638 return pc;
639 #else
640 PIO_eprintf(interpreter,
641 "Computed goto unavailable in this configuration.\n");
642 Parrot_exit(interpreter, 1);
643 return NULL;
644 #endif
649 =item C<static opcode_t *
650 runops_switch(Interp *interpreter, opcode_t *pc)>
652 Runs the C<switch> core.
654 =cut
658 static opcode_t *
659 runops_switch(Interp *interpreter, opcode_t *pc)
661 opcode_t *code_start = (opcode_t *)interpreter->code->base.data;
662 void **pc_prederef;
663 init_prederef(interpreter, PARROT_SWITCH_CORE);
664 pc_prederef = interpreter->code->prederef.code + (pc - code_start);
665 pc = (opcode_t*) switch_core(pc_prederef, interpreter);
666 return pc;
671 =item C<void
672 runops_int(Interp *interpreter, size_t offset)>
674 Run parrot operations of loaded code segment until an end opcode is
675 reached run core is selected depending on the C<Interp_flags> when a
676 C<restart> opcode is encountered a different core my be selected and
677 evaluation of opcode continues.
679 =cut
683 void
684 runops_int(Interp *interpreter, size_t offset)
686 int lo_var_ptr;
687 opcode_t *(*core) (Interp *, opcode_t *) =
688 (opcode_t *(*) (Interp *, opcode_t *)) 0;
690 if (!interpreter->lo_var_ptr) {
692 * if we are entering the run loop the first time
694 interpreter->lo_var_ptr = (void *)&lo_var_ptr;
698 * setup event function ptrs
700 if (!interpreter->save_func_table) {
701 Parrot_setup_event_func_ptrs(interpreter);
704 interpreter->resume_offset = offset;
705 interpreter->resume_flag |= RESUME_RESTART;
707 while (interpreter->resume_flag & RESUME_RESTART) {
708 opcode_t *pc = (opcode_t *)
709 interpreter->code->base.data + interpreter->resume_offset;
711 interpreter->resume_offset = 0;
712 interpreter->resume_flag &= ~(RESUME_RESTART | RESUME_INITIAL);
713 switch (interpreter->run_core) {
714 case PARROT_SLOW_CORE:
716 core = runops_slow_core;
718 if (Interp_flags_TEST(interpreter, PARROT_PROFILE_FLAG)) {
719 core = runops_profile_core;
720 if (interpreter->profile == NULL) {
721 interpreter->profile = (RunProfile *)
722 mem_sys_allocate_zeroed(sizeof(RunProfile));
723 interpreter->profile->data = (ProfData *)
724 mem_sys_allocate_zeroed((interpreter->op_count +
725 PARROT_PROF_EXTRA) * sizeof(ProfData));
728 break;
729 case PARROT_FAST_CORE:
730 core = runops_fast_core;
731 break;
732 case PARROT_CGOTO_CORE:
733 #ifdef HAVE_COMPUTED_GOTO
734 core = runops_cgoto_core;
735 #else
736 internal_exception(1, "Error: PARROT_CGOTO_CORE not available");
737 #endif
738 break;
739 case PARROT_CGP_CORE:
740 case PARROT_CGP_JIT_CORE:
741 #ifdef HAVE_COMPUTED_GOTO
742 core = runops_cgp;
743 #else
744 internal_exception(1, "Error: PARROT_CGP_CORE not available");
745 #endif
746 break;
747 case PARROT_SWITCH_CORE:
748 case PARROT_SWITCH_JIT_CORE:
749 core = runops_switch;
750 break;
751 case PARROT_JIT_CORE:
752 #if !JIT_CAPABLE
753 internal_exception(JIT_UNAVAILABLE,
754 "Error: PARROT_JIT_FLAG is set, "
755 "but interpreter is not JIT_CAPABLE!\n");
756 #endif
757 core = runops_jit;
758 break;
759 case PARROT_EXEC_CORE:
760 #if !EXEC_CAPABLE
761 internal_exception(EXEC_UNAVAILABLE,
762 "Error: PARROT_EXEC_FLAG is set, "
763 "but interpreter is not EXEC_CAPABLE!\n");
764 #endif
765 core = runops_exec;
766 break;
767 default:
768 internal_exception(UNIMPLEMENTED,
769 "ambigious runcore switch used");
770 break;
774 /* run it finally */
775 core(interpreter, pc);
776 /* if we have fallen out with resume and we were running CGOTO, set
777 * the stacktop again to a sane value, so that restarting the runloop
778 * is ok.
780 if (interpreter->resume_flag & RESUME_RESTART) {
781 if ((int)interpreter->resume_offset < 0)
782 internal_exception(1, "branch_cs: illegal resume offset");
783 stop_prederef(interpreter);
792 =item C<static void
793 Parrot_setup_event_func_ptrs(Parrot_Interp interpreter)>
795 Setup a C<func_table> containing pointers (or addresses) of the
796 C<check_event__> opcode.
798 TODO: Free it at destroy. Handle run-core changes.
800 =cut
804 void
805 Parrot_setup_event_func_ptrs(Parrot_Interp interpreter)
807 size_t i, n = interpreter->op_count;
808 oplib_init_f init_func = get_op_lib_init(1, interpreter->run_core, NULL);
809 op_lib_t *lib = init_func(1);
811 * remember op_func_table
813 interpreter->save_func_table = lib->op_func_table;
814 if (!lib->op_func_table)
815 return;
816 /* function or CG core - prepare func_table */
817 if (!interpreter->evc_func_table) {
818 interpreter->evc_func_table = mem_sys_allocate(sizeof(void *) * n);
819 for (i = 0; i < n; ++i)
820 interpreter->evc_func_table[i] = (op_func_t)
821 D2FPTR(((void**)lib->op_func_table)[CORE_OPS_check_events__]);
828 =back
830 =head2 Dynamic Loading Functions
832 =over 4
834 =cut
838 static void dynop_register_xx(Parrot_Interp, PMC*, size_t, size_t,
839 oplib_init_f init_func);
840 static void dynop_register_switch(Parrot_Interp, PMC*, size_t, size_t);
844 =item C<void
845 dynop_register(Parrot_Interp interpreter, PMC* lib_pmc)>
847 Register a dynamic oplib.
849 =cut
853 void
854 dynop_register(Parrot_Interp interpreter, PMC* lib_pmc)
856 op_lib_t *lib, *core;
857 oplib_init_f init_func;
858 op_func_t *new_func_table, *new_evc_func_table;
859 op_info_t *new_info_table;
860 size_t i, n_old, n_new, n_tot;
862 if (n_interpreters > 1) {
863 /* This is not supported because oplibs are always shared.
864 * If we mem_sys_reallocate() the op_func_table while another
865 * interpreter is running using that exact op_func_table,
866 * this will cause problems
867 * Also, the mapping from op name to op number is global even for
868 * dynops (!). The mapping is done by get_op in core_ops.c (even for
869 * dynops) and uses a global hash as a cache and relies on modifications
870 * to the static-scoped core_op_lib data structure to see dynops.
872 internal_exception(1, "loading a new dynoplib while more than "
873 "one thread is running is not supported.");
876 if (!interpreter->all_op_libs)
877 interpreter->all_op_libs = mem_sys_allocate(
878 sizeof(op_lib_t *) * (interpreter->n_libs + 1));
879 else
880 interpreter->all_op_libs = mem_sys_realloc(interpreter->all_op_libs,
881 sizeof(op_lib_t *) * (interpreter->n_libs + 1));
883 init_func = get_op_lib_init(0, 0, lib_pmc);
884 lib = init_func(1);
886 interpreter->all_op_libs[interpreter->n_libs++] = lib;
888 * if we are registering an op_lib variant, called from below
889 * the base names of this lib and the previous one are the same
891 if (interpreter->n_libs >= 2 &&
892 !strcmp(interpreter->all_op_libs[interpreter->n_libs-2]->name,
893 lib->name)) {
894 /* registering is handled below */
895 return;
898 * when called from yyparse, we have to set up the evc_func_table
900 Parrot_setup_event_func_ptrs(interpreter);
902 n_old = interpreter->op_count;
903 n_new = lib->op_count;
904 n_tot = n_old + n_new;
905 core = PARROT_CORE_OPLIB_INIT(1);
907 assert(interpreter->op_count == core->op_count);
908 new_evc_func_table = mem__sys_realloc(interpreter->evc_func_table,
909 sizeof (void *) * n_tot);
910 if (core->flags & OP_FUNC_IS_ALLOCATED) {
911 new_func_table = mem_sys_realloc(core->op_func_table,
912 sizeof (void *) * n_tot);
913 new_info_table = mem_sys_realloc(core->op_info_table,
914 sizeof (op_info_t) * n_tot);
916 else {
918 * allocate new op_func and info tables
920 new_func_table = mem_sys_allocate(sizeof (void *) * n_tot);
921 new_info_table = mem_sys_allocate(sizeof (op_info_t) * n_tot);
922 /* copy old */
923 for (i = 0; i < n_old; ++i) {
924 new_func_table[i] = interpreter->op_func_table[i];
925 new_info_table[i] = interpreter->op_info_table[i];
928 /* add new */
929 for (i = n_old; i < n_tot; ++i) {
930 new_func_table[i] = ((op_func_t*)lib->op_func_table)[i - n_old];
931 new_info_table[i] = lib->op_info_table[i - n_old];
933 * fill new ops of event checker func table
934 * if we are running a different core, entries are
935 * changed below
937 new_evc_func_table[i] =
938 interpreter->op_func_table[CORE_OPS_check_events__];
940 interpreter->evc_func_table = new_evc_func_table;
941 interpreter->save_func_table = new_func_table;
943 * deinit core, so that it gets rehashed
945 (void) PARROT_CORE_OPLIB_INIT(0);
946 /* set table */
947 core->op_func_table = interpreter->op_func_table = new_func_table;
948 core->op_info_table = interpreter->op_info_table = new_info_table;
949 core->op_count = interpreter->op_count = n_tot;
950 core->flags = OP_FUNC_IS_ALLOCATED | OP_INFO_IS_ALLOCATED;
951 /* done for plain core */
952 #if defined HAVE_COMPUTED_GOTO
953 dynop_register_xx(interpreter, lib_pmc, n_old, n_new,
954 PARROT_CORE_CGP_OPLIB_INIT);
955 dynop_register_xx(interpreter, lib_pmc, n_old, n_new,
956 PARROT_CORE_CG_OPLIB_INIT);
957 #endif
958 dynop_register_switch(interpreter, lib_pmc, n_old, n_new);
963 =item C<static void
964 dynop_register_xx(Parrot_Interp interpreter, PMC* lib_pmc,
965 size_t n_old, size_t n_new, oplib_init_f init_func)>
967 Register C<op_lib> with other cores.
969 =cut
973 static void
974 dynop_register_xx(Parrot_Interp interpreter, PMC* lib_pmc,
975 size_t n_old, size_t n_new, oplib_init_f init_func)
977 op_lib_t *cg_lib, *new_lib;
978 void **ops_addr = NULL;
979 size_t i, n_tot;
980 #if 0
981 /* related to CG and CGP ops issue below */
982 STRING *op_variant;
983 #endif
984 oplib_init_f new_init_func;
985 PMC *lib_variant;
987 n_tot = n_old + n_new;
988 cg_lib = init_func(1);
990 if (cg_lib->flags & OP_FUNC_IS_ALLOCATED) {
991 ops_addr = mem_sys_realloc(cg_lib->op_func_table,
992 n_tot * sizeof(void *));
994 else {
995 ops_addr = mem_sys_allocate(n_tot * sizeof(void *));
996 cg_lib->flags = OP_FUNC_IS_ALLOCATED;
997 for (i = 0; i < n_old; ++i)
998 ops_addr[i] = ((void **)cg_lib->op_func_table)[i];
1001 * XXX running CG and CGP ops currently works only via the wrapper
1003 * the problem is:
1004 * The actual runcores cg_core and cgp_core are very big functions.
1005 * The C compiler usually addresses "spilled" registers in the C stack.
1006 * The loaded opcode lib is another possibly big function, but with
1007 * a likely different stack layout. Directly jumping around between
1008 * code locations in these two opcode functions works, but access
1009 * to stack-ed (or spilled) variables fails badly.
1011 * We would need to prepare the assembly source of the opcode
1012 * lib so that all variable access on the stack has the same
1013 * layout and compile the prepared assembly to ops_cgp?.o
1015 * The switched core is different anyway, as we can't extend the
1016 * compiled big switch statement with the new cases. We have
1017 * always to use the wrapper__ opcode called from the default case.
1019 #if 0
1020 /* check if the lib_pmc exists with a _xx flavor */
1021 new_init_func = get_op_lib_init(0, 0, lib_pmc);
1022 new_lib = new_init_func(1);
1023 op_variant = Parrot_sprintf_c(interpreter, "%s_ops%s",
1024 new_lib->name, cg_lib->suffix);
1025 lib_variant = Parrot_load_lib(interpreter, op_variant, NULL);
1026 #endif
1028 * XXX running CG and CGP ops currently works only via the wrapper
1030 if (0 /*lib_variant */) {
1031 new_init_func = get_op_lib_init(0, 0, lib_variant);
1032 new_lib = new_init_func(1);
1033 for (i = n_old; i < n_tot; ++i)
1034 ops_addr[i] = ((void **)new_lib->op_func_table)[i - n_old];
1035 new_lib->op_func_table = (void *) ops_addr;
1036 new_lib->op_count = n_tot;
1037 new_init_func((long) ops_addr);
1039 else {
1040 /* if not install wrappers */
1041 /* fill new entries with the wrapper op */
1042 for (i = n_old; i < n_tot; ++i)
1043 ops_addr[i] = ((void **)cg_lib->op_func_table)[CORE_OPS_wrapper__];
1046 * if we are running this core, update event check ops
1048 if ((int)interpreter->run_core == cg_lib->core_type) {
1049 for (i = n_old; i < n_tot; ++i)
1050 interpreter->evc_func_table[i] =
1051 (op_func_t)D2FPTR(ops_addr[CORE_OPS_check_events__]);
1052 interpreter->save_func_table = (void *) ops_addr;
1055 * tell the cg_core about the new jump table
1057 cg_lib->op_func_table = (void *) ops_addr;
1058 cg_lib->op_count = n_tot;
1059 init_func((long) ops_addr);
1064 =item C<static void
1065 dynop_register_switch(Parrot_Interp interpreter, PMC* lib_pmc,
1066 size_t n_old, size_t n_new)>
1068 Description.
1070 =cut
1074 static void
1075 dynop_register_switch(Parrot_Interp interpreter, PMC* lib_pmc,
1076 size_t n_old, size_t n_new)
1078 op_lib_t *lib = PARROT_CORE_SWITCH_OPLIB_INIT(1);
1079 lib->op_count = n_old + n_new;
1084 =item C<static void
1085 notify_func_table(Parrot_Interp interpreter, void* table, int on)>
1087 Tell the interpreter's running core about the new function table.
1089 =cut
1093 static void
1094 notify_func_table(Parrot_Interp interpreter, void* table, int on)
1096 oplib_init_f init_func = get_op_lib_init(1, interpreter->run_core, NULL);
1097 op_lib_t *lib = init_func(1);
1098 init_func((long) table);
1099 switch (interpreter->run_core) {
1100 case PARROT_SLOW_CORE: /* normal func core */
1101 case PARROT_FAST_CORE: /* normal func core */
1102 case PARROT_CGOTO_CORE: /* cgoto address list */
1103 assert(table);
1104 interpreter->op_func_table = table;
1105 break;
1106 case PARROT_CGP_CORE:
1107 case PARROT_CGP_JIT_CORE:
1108 turn_ev_check(interpreter, on);
1109 break;
1110 default:
1111 break;
1117 =item C<void
1118 disable_event_checking(Parrot_Interp interpreter)>
1120 Restore old function table.
1122 XXX This is only implemented for the function core at present.
1124 =cut
1128 void
1129 disable_event_checking(Parrot_Interp interpreter)
1132 * restore func table
1134 assert(interpreter->save_func_table);
1135 notify_func_table(interpreter, interpreter->save_func_table, 0);
1140 =item C<void
1141 enable_event_checking(Parrot_Interp interpreter)>
1143 Replace func table with one that does event checking for all opcodes.
1145 NOTE: C<enable_event_checking()> is called async by the event handler
1146 thread. All action done from here has to be async safe.
1148 XXX This is only implemented for the function core at present.
1150 =cut
1154 void
1155 enable_event_checking(Parrot_Interp interpreter)
1158 * put table in place
1160 notify_func_table(interpreter, interpreter->evc_func_table, 1);
1166 =back
1168 =head1 SEE ALSO
1170 F<include/parrot/interpreter.h>, F<src/inter_cb.c>, F<src/inter_create.c>,
1171 F<src/inter_misc.c>, F<src/inter_run.c>.
1173 =cut
1179 * Local variables:
1180 * c-file-style: "parrot"
1181 * End:
1182 * vim: expandtab shiftwidth=4: