1 #ifndef RUBY_INSNHELPER_H
2 #define RUBY_INSNHELPER_H
3 /**********************************************************************
5 insnhelper.h - helper macros to implement each instructions
8 created at: 04/01/01 15:50:34 JST
10 Copyright (C) 2004-2007 Koichi Sasada
12 **********************************************************************/
14 RUBY_EXTERN VALUE ruby_vm_const_missing_count
;
15 RUBY_EXTERN rb_serial_t ruby_vm_constant_cache_invalidations
;
16 RUBY_EXTERN rb_serial_t ruby_vm_constant_cache_misses
;
17 RUBY_EXTERN rb_serial_t ruby_vm_global_cvar_state
;
19 #if USE_YJIT || USE_RJIT // We want vm_insns_count on any JIT-enabled build.
20 // Increment vm_insns_count for --yjit-stats. We increment this even when
21 // --yjit or --yjit-stats is not used because branching to skip it is slower.
22 // We also don't use ATOMIC_INC for performance, allowing inaccuracy on Ractors.
23 #define JIT_COLLECT_USAGE_INSN(insn) rb_vm_insns_count++
25 #define JIT_COLLECT_USAGE_INSN(insn) // none
28 #if VM_COLLECT_USAGE_DETAILS
29 #define COLLECT_USAGE_INSN(insn) vm_collect_usage_insn(insn)
30 #define COLLECT_USAGE_OPERAND(insn, n, op) vm_collect_usage_operand((insn), (n), ((VALUE)(op)))
31 #define COLLECT_USAGE_REGISTER(reg, s) vm_collect_usage_register((reg), (s))
33 #define COLLECT_USAGE_INSN(insn) JIT_COLLECT_USAGE_INSN(insn)
34 #define COLLECT_USAGE_OPERAND(insn, n, op) // none
35 #define COLLECT_USAGE_REGISTER(reg, s) // none
38 /**********************************************************/
40 /**********************************************************/
42 #define PUSH(x) (SET_SV(x), INC_SP(1))
43 #define TOPN(n) (*(GET_SP()-(n)-1))
44 #define POPN(n) (DEC_SP(n))
45 #define POP() (DEC_SP(1))
46 #define STACK_ADDR_FROM_TOP(n) (GET_SP()-(n))
48 /**********************************************************/
49 /* deal with registers */
50 /**********************************************************/
52 #define VM_REG_CFP (reg_cfp)
53 #define VM_REG_PC (VM_REG_CFP->pc)
54 #define VM_REG_SP (VM_REG_CFP->sp)
55 #define VM_REG_EP (VM_REG_CFP->ep)
57 #define RESTORE_REGS() do { \
58 VM_REG_CFP = ec->cfp; \
61 typedef enum call_type
{
69 #if VM_COLLECT_USAGE_DETAILS
70 enum vm_regan_regtype
{
78 enum vm_regan_acttype
{
83 #define COLLECT_USAGE_REGISTER_HELPER(a, b, v) \
84 (COLLECT_USAGE_REGISTER((VM_REGAN_##a), (VM_REGAN_ACT_##b)), (v))
86 #define COLLECT_USAGE_REGISTER_HELPER(a, b, v) (v)
90 #define GET_PC() (COLLECT_USAGE_REGISTER_HELPER(PC, GET, VM_REG_PC))
91 #define SET_PC(x) (VM_REG_PC = (COLLECT_USAGE_REGISTER_HELPER(PC, SET, (x))))
92 #define GET_CURRENT_INSN() (*GET_PC())
93 #define GET_OPERAND(n) (GET_PC()[(n)])
94 #define ADD_PC(n) (SET_PC(VM_REG_PC + (n)))
95 #define JUMP(dst) (SET_PC(VM_REG_PC + (dst)))
97 /* frame pointer, environment pointer */
98 #define GET_CFP() (COLLECT_USAGE_REGISTER_HELPER(CFP, GET, VM_REG_CFP))
99 #define GET_EP() (COLLECT_USAGE_REGISTER_HELPER(EP, GET, VM_REG_EP))
100 #define SET_EP(x) (VM_REG_EP = (COLLECT_USAGE_REGISTER_HELPER(EP, SET, (x))))
101 #define GET_LEP() (VM_EP_LEP(GET_EP()))
104 #define GET_SP() (COLLECT_USAGE_REGISTER_HELPER(SP, GET, VM_REG_SP))
105 #define SET_SP(x) (VM_REG_SP = (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
106 #define INC_SP(x) (VM_REG_SP += (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
107 #define DEC_SP(x) (VM_REG_SP -= (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
108 #define SET_SV(x) (*GET_SP() = rb_ractor_confirm_belonging(x))
109 /* set current stack value as x */
111 /* instruction sequence C struct */
112 #define GET_ISEQ() (GET_CFP()->iseq)
114 /**********************************************************/
115 /* deal with variables */
116 /**********************************************************/
118 #define GET_PREV_EP(ep) ((VALUE *)((ep)[VM_ENV_DATA_INDEX_SPECVAL] & ~0x03))
120 /**********************************************************/
121 /* deal with values */
122 /**********************************************************/
124 #define GET_SELF() (COLLECT_USAGE_REGISTER_HELPER(SELF, GET, GET_CFP()->self))
126 /**********************************************************/
127 /* deal with control flow 2: method/iterator */
128 /**********************************************************/
130 /* set fastpath when cached method is *NOT* protected
131 * because inline method cache does not care about receiver.
135 CC_SET_FASTPATH(const struct rb_callcache
*cc
, vm_call_handler func
, bool enabled
)
137 if (LIKELY(enabled
)) {
138 vm_cc_call_set(cc
, func
);
142 #define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL])
144 /**********************************************************/
145 /* deal with control flow 3: exception */
146 /**********************************************************/
149 /**********************************************************/
150 /* deal with stack canary */
151 /**********************************************************/
153 #if VM_CHECK_MODE > 0
154 #define SETUP_CANARY(cond) \
158 SET_SV(vm_stack_canary); \
161 SET_SV(Qfalse); /* cleanup */ \
163 #define CHECK_CANARY(cond, insn) \
165 if (*canary == vm_stack_canary) { \
166 *canary = Qfalse; /* cleanup */ \
169 rb_vm_canary_is_found_dead(insn, *canary); \
173 #define SETUP_CANARY(cond) if (cond) {} else {}
174 #define CHECK_CANARY(cond, insn) if (cond) {(void)(insn);}
177 /**********************************************************/
179 /**********************************************************/
181 #define CALL_SIMPLE_METHOD() do { \
182 rb_snum_t insn_width = attr_width_opt_send_without_block(0); \
183 ADD_PC(-insn_width); \
184 DISPATCH_ORIGINAL_INSN(opt_send_without_block); \
187 #define GET_GLOBAL_CVAR_STATE() (ruby_vm_global_cvar_state)
188 #define INC_GLOBAL_CVAR_STATE() (++ruby_vm_global_cvar_state)
190 static inline struct vm_throw_data
*
191 THROW_DATA_NEW(VALUE val
, const rb_control_frame_t
*cf
, int st
)
193 struct vm_throw_data
*obj
= IMEMO_NEW(struct vm_throw_data
, imemo_throw_data
, 0);
194 *((VALUE
*)&obj
->throw_obj
) = val
;
195 *((struct rb_control_frame_struct
**)&obj
->catch_frame
) = (struct rb_control_frame_struct
*)cf
;
196 obj
->throw_state
= st
;
202 THROW_DATA_VAL(const struct vm_throw_data
*obj
)
204 VM_ASSERT(THROW_DATA_P(obj
));
205 return obj
->throw_obj
;
208 static inline const rb_control_frame_t
*
209 THROW_DATA_CATCH_FRAME(const struct vm_throw_data
*obj
)
211 VM_ASSERT(THROW_DATA_P(obj
));
212 return obj
->catch_frame
;
216 THROW_DATA_STATE(const struct vm_throw_data
*obj
)
218 VM_ASSERT(THROW_DATA_P(obj
));
219 return obj
->throw_state
;
223 THROW_DATA_CONSUMED_P(const struct vm_throw_data
*obj
)
225 VM_ASSERT(THROW_DATA_P(obj
));
226 return obj
->flags
& THROW_DATA_CONSUMED
;
230 THROW_DATA_CATCH_FRAME_SET(struct vm_throw_data
*obj
, const rb_control_frame_t
*cfp
)
232 VM_ASSERT(THROW_DATA_P(obj
));
233 obj
->catch_frame
= cfp
;
237 THROW_DATA_STATE_SET(struct vm_throw_data
*obj
, int st
)
239 VM_ASSERT(THROW_DATA_P(obj
));
240 obj
->throw_state
= st
;
244 THROW_DATA_CONSUMED_SET(struct vm_throw_data
*obj
)
246 if (THROW_DATA_P(obj
) &&
247 THROW_DATA_STATE(obj
) == TAG_BREAK
) {
248 obj
->flags
|= THROW_DATA_CONSUMED
;
252 #define IS_ARGS_SPLAT(ci) (vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT)
253 #define IS_ARGS_KEYWORD(ci) (vm_ci_flag(ci) & VM_CALL_KWARG)
254 #define IS_ARGS_KW_SPLAT(ci) (vm_ci_flag(ci) & VM_CALL_KW_SPLAT)
255 #define IS_ARGS_KW_OR_KW_SPLAT(ci) (vm_ci_flag(ci) & (VM_CALL_KWARG | VM_CALL_KW_SPLAT))
256 #define IS_ARGS_KW_SPLAT_MUT(ci) (vm_ci_flag(ci) & VM_CALL_KW_SPLAT_MUT)
259 vm_call_cacheable(const struct rb_callinfo
*ci
, const struct rb_callcache
*cc
)
261 return (vm_ci_flag(ci
) & VM_CALL_FCALL
) ||
262 METHOD_ENTRY_VISI(vm_cc_cme(cc
)) != METHOD_VISI_PROTECTED
;
264 /* If this returns true, an optimized function returned by `vm_call_iseq_setup_func`
265 can be used as a fastpath. */
267 vm_call_iseq_optimizable_p(const struct rb_callinfo
*ci
, const struct rb_callcache
*cc
)
269 return !IS_ARGS_SPLAT(ci
) && !IS_ARGS_KEYWORD(ci
) && vm_call_cacheable(ci
, cc
);
272 #endif /* RUBY_INSNHELPER_H */