3 /**********************************************************************
8 created at: 04/01/01 23:36:57 JST
10 Copyright (C) 2004-2008 Koichi Sasada
12 **********************************************************************/
13 #include "internal/gc.h"
16 RUBY_EXTERN
const int ruby_api_version
[];
17 #define ISEQ_MAJOR_VERSION ((unsigned int)ruby_api_version[0])
18 #define ISEQ_MINOR_VERSION ((unsigned int)ruby_api_version[1])
20 #ifndef USE_ISEQ_NODE_ID
21 #define USE_ISEQ_NODE_ID 1
25 typedef struct rb_iseq_struct rb_iseq_t
;
26 #define rb_iseq_t rb_iseq_t
29 extern const ID rb_iseq_shared_exc_local_tbl
[];
31 #define ISEQ_COVERAGE(iseq) iseq->body->variable.coverage
32 #define ISEQ_COVERAGE_SET(iseq, cov) RB_OBJ_WRITE(iseq, &iseq->body->variable.coverage, cov)
33 #define ISEQ_LINE_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_LINES)
34 #define ISEQ_BRANCH_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_BRANCHES)
36 #define ISEQ_PC2BRANCHINDEX(iseq) iseq->body->variable.pc2branchindex
37 #define ISEQ_PC2BRANCHINDEX_SET(iseq, h) RB_OBJ_WRITE(iseq, &iseq->body->variable.pc2branchindex, h)
39 #define ISEQ_FLIP_CNT(iseq) (iseq)->body->variable.flip_count
41 static inline rb_snum_t
42 ISEQ_FLIP_CNT_INCREMENT(const rb_iseq_t
*iseq
)
44 rb_snum_t cnt
= iseq
->body
->variable
.flip_count
;
45 iseq
->body
->variable
.flip_count
+= 1;
50 ISEQ_ORIGINAL_ISEQ(const rb_iseq_t
*iseq
)
52 return iseq
->body
->variable
.original_iseq
;
56 ISEQ_ORIGINAL_ISEQ_CLEAR(const rb_iseq_t
*iseq
)
58 void *ptr
= iseq
->body
->variable
.original_iseq
;
59 iseq
->body
->variable
.original_iseq
= NULL
;
66 ISEQ_ORIGINAL_ISEQ_ALLOC(const rb_iseq_t
*iseq
, long size
)
68 return iseq
->body
->variable
.original_iseq
=
72 #define ISEQ_TRACE_EVENTS (RUBY_EVENT_LINE | \
78 RUBY_EVENT_C_RETURN| \
80 RUBY_EVENT_B_RETURN| \
81 RUBY_EVENT_COVERAGE_LINE| \
82 RUBY_EVENT_COVERAGE_BRANCH)
84 #define ISEQ_NOT_LOADED_YET IMEMO_FL_USER1
85 #define ISEQ_USE_COMPILE_DATA IMEMO_FL_USER2
86 #define ISEQ_TRANSLATED IMEMO_FL_USER3
87 #define ISEQ_MARKABLE_ISEQ IMEMO_FL_USER4
89 #define ISEQ_EXECUTABLE_P(iseq) (FL_TEST_RAW(((VALUE)iseq), ISEQ_NOT_LOADED_YET | ISEQ_USE_COMPILE_DATA) == 0)
91 struct iseq_compile_data
{
94 const VALUE catch_table_ary
; /* Array */
96 /* GC is not needed */
97 struct iseq_label_data
*start_label
;
98 struct iseq_label_data
*end_label
;
99 struct iseq_label_data
*redo_label
;
100 const rb_iseq_t
*current_block
;
101 struct iseq_compile_data_ensure_node_stack
*ensure_node_stack
;
103 struct iseq_compile_data_storage
*storage_head
;
104 struct iseq_compile_data_storage
*storage_current
;
107 struct iseq_compile_data_storage
*storage_head
;
108 struct iseq_compile_data_storage
*storage_current
;
111 int loopval_popped
; /* used by NODE_BREAK */
116 unsigned int ci_index
;
117 const rb_compile_option_t
*option
;
118 struct rb_id_table
*ivar_cache_table
;
119 const struct rb_builtin_function
*builtin_function_table
;
120 const NODE
*root_node
;
122 st_table
*labels_table
;
126 static inline struct iseq_compile_data
*
127 ISEQ_COMPILE_DATA(const rb_iseq_t
*iseq
)
129 if (iseq
->flags
& ISEQ_USE_COMPILE_DATA
) {
130 return iseq
->aux
.compile_data
;
138 ISEQ_COMPILE_DATA_ALLOC(rb_iseq_t
*iseq
)
140 iseq
->aux
.compile_data
= ZALLOC(struct iseq_compile_data
);
141 iseq
->flags
|= ISEQ_USE_COMPILE_DATA
;
145 ISEQ_COMPILE_DATA_CLEAR(rb_iseq_t
*iseq
)
147 iseq
->flags
&= ~ISEQ_USE_COMPILE_DATA
;
148 iseq
->aux
.compile_data
= NULL
;
151 static inline rb_iseq_t
*
152 iseq_imemo_alloc(void)
154 return (rb_iseq_t
*)rb_imemo_new(imemo_iseq
, 0, 0, 0, 0);
157 VALUE
rb_iseq_ibf_dump(const rb_iseq_t
*iseq
, VALUE opt
);
158 void rb_ibf_load_iseq_complete(rb_iseq_t
*iseq
);
159 const rb_iseq_t
*rb_iseq_ibf_load(VALUE str
);
160 const rb_iseq_t
*rb_iseq_ibf_load_bytes(const char *cstr
, size_t);
161 VALUE
rb_iseq_ibf_load_extra_data(VALUE str
);
162 void rb_iseq_init_trace(rb_iseq_t
*iseq
);
163 int rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t
*iseq
, rb_event_flag_t turnon_events
, VALUE tpval
, unsigned int target_line
, bool target_bmethod
);
164 int rb_iseq_remove_local_tracepoint_recursively(const rb_iseq_t
*iseq
, VALUE tpval
);
165 const rb_iseq_t
*rb_iseq_load_iseq(VALUE fname
);
167 #if VM_INSN_INFO_TABLE_IMPL == 2
168 unsigned int *rb_iseq_insns_info_decode_positions(const struct rb_iseq_constant_body
*body
);
171 int rb_vm_insn_addr2opcode(const void *addr
);
173 RUBY_SYMBOL_EXPORT_BEGIN
176 VALUE
rb_iseq_compile_node(rb_iseq_t
*iseq
, const NODE
*node
);
177 VALUE
rb_iseq_compile_callback(rb_iseq_t
*iseq
, const struct rb_iseq_new_with_callback_callback_func
* ifunc
);
178 VALUE
*rb_iseq_original_iseq(const rb_iseq_t
*iseq
);
179 void rb_iseq_build_from_ary(rb_iseq_t
*iseq
, VALUE misc
,
180 VALUE locals
, VALUE args
,
181 VALUE exception
, VALUE body
);
182 void rb_iseq_mark_insn_storage(struct iseq_compile_data_storage
*arena
);
185 VALUE
rb_iseq_load(VALUE data
, VALUE parent
, VALUE opt
);
186 VALUE
rb_iseq_parameters(const rb_iseq_t
*iseq
, int is_proc
);
187 unsigned int rb_iseq_line_no(const rb_iseq_t
*iseq
, size_t pos
);
188 #ifdef USE_ISEQ_NODE_ID
189 int rb_iseq_node_id(const rb_iseq_t
*iseq
, size_t pos
);
191 void rb_iseq_trace_set(const rb_iseq_t
*iseq
, rb_event_flag_t turnon_events
);
192 void rb_iseq_trace_set_all(rb_event_flag_t turnon_events
);
193 void rb_iseq_insns_info_encode_positions(const rb_iseq_t
*iseq
);
195 struct rb_iseq_constant_body
*rb_iseq_constant_body_alloc(void);
196 VALUE
rb_iseqw_new(const rb_iseq_t
*iseq
);
197 const rb_iseq_t
*rb_iseqw_to_iseq(VALUE iseqw
);
199 VALUE
rb_iseq_absolute_path(const rb_iseq_t
*iseq
); /* obsolete */
200 int rb_iseq_from_eval_p(const rb_iseq_t
*iseq
);
201 VALUE
rb_iseq_type(const rb_iseq_t
*iseq
);
202 VALUE
rb_iseq_label(const rb_iseq_t
*iseq
);
203 VALUE
rb_iseq_base_label(const rb_iseq_t
*iseq
);
204 VALUE
rb_iseq_first_lineno(const rb_iseq_t
*iseq
);
205 VALUE
rb_iseq_method_name(const rb_iseq_t
*iseq
);
206 void rb_iseq_code_location(const rb_iseq_t
*iseq
, int *first_lineno
, int *first_column
, int *last_lineno
, int *last_column
);
208 void rb_iseq_remove_coverage_all(void);
211 const rb_iseq_t
*rb_method_iseq(VALUE body
);
212 const rb_iseq_t
*rb_proc_get_iseq(VALUE proc
, int *is_proc
);
214 struct rb_compile_option_struct
{
215 unsigned int inline_const_cache
: 1;
216 unsigned int peephole_optimization
: 1;
217 unsigned int tailcall_optimization
: 1;
218 unsigned int specialized_instruction
: 1;
219 unsigned int operands_unification
: 1;
220 unsigned int instructions_unification
: 1;
221 unsigned int stack_caching
: 1;
222 unsigned int frozen_string_literal
: 1;
223 unsigned int debug_frozen_string_literal
: 1;
224 unsigned int coverage_enabled
: 1;
228 struct iseq_insn_info_entry
{
230 #ifdef USE_ISEQ_NODE_ID
233 rb_event_flag_t events
;
236 struct iseq_catch_table_entry
{
238 CATCH_TYPE_RESCUE
= INT2FIX(1),
239 CATCH_TYPE_ENSURE
= INT2FIX(2),
240 CATCH_TYPE_RETRY
= INT2FIX(3),
241 CATCH_TYPE_BREAK
= INT2FIX(4),
242 CATCH_TYPE_REDO
= INT2FIX(5),
243 CATCH_TYPE_NEXT
= INT2FIX(6)
248 * CATCH_TYPE_RESCUE, CATCH_TYPE_ENSURE:
249 * use iseq as continuation.
251 * CATCH_TYPE_BREAK (iter):
254 * CATCH_TYPE_BREAK (while), CATCH_TYPE_RETRY,
255 * CATCH_TYPE_REDO, CATCH_TYPE_NEXT:
266 PACKED_STRUCT_UNALIGNED(struct iseq_catch_table
{
268 struct iseq_catch_table_entry entries
[FLEX_ARY_LEN
];
272 iseq_catch_table_bytes(int n
)
275 catch_table_entry_size
= sizeof(struct iseq_catch_table_entry
),
276 catch_table_entries_max
= (INT_MAX
- offsetof(struct iseq_catch_table
, entries
)) / catch_table_entry_size
278 if (n
> catch_table_entries_max
) rb_fatal("too large iseq_catch_table - %d", n
);
279 return (int)(offsetof(struct iseq_catch_table
, entries
) +
280 n
* catch_table_entry_size
);
283 #define INITIAL_ISEQ_COMPILE_DATA_STORAGE_BUFF_SIZE (512)
285 struct iseq_compile_data_storage
{
286 struct iseq_compile_data_storage
*next
;
289 char buff
[FLEX_ARY_LEN
];
315 VALUE
rb_iseq_defined_string(enum defined_type type
);
318 VALUE
rb_iseq_local_variables(const rb_iseq_t
*iseq
);
320 RUBY_SYMBOL_EXPORT_END
322 #endif /* RUBY_ISEQ_H */