Optimize struct element initialization
[hiphop-php.git] / hphp / runtime / vm / vm-regs.h
blob547a99c464ac93f7bdb4f8412b0951b0ef7364b9
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #pragma once
19 #include "hphp/runtime/base/execution-context.h"
20 #include "hphp/runtime/base/rds-header.h"
21 #include "hphp/runtime/base/typed-value.h"
22 #include "hphp/runtime/vm/bytecode.h"
23 #include "hphp/runtime/vm/call-flags.h"
26 * This file contains accessors for the three primary VM registers:
28 * vmpc(): PC pointing to the currently executing bytecode instruction
29 * vmfp(): ActRec* pointing to the current frame
30 * vmsp(): TypedValue* pointing to the top of the eval stack
32 * The registers are physically located in the RDS header struct (defined in
33 * runtime/base/rds-header.h), allowing efficient access from translated code
34 * when needed. They are generally not kept up-to-date in translated code,
35 * though, so there are times when it is not safe to use them. This is tracked
36 * in the tl_regState variable, which is automatically checked in the accessors
37 * defined here. Certain parts of the runtime do need access to the registers
38 * while their state is expected to be dirty; the vmRegsUnsafe() function is
39 * provided for these rare cases.
41 * In a C++ function potentially called from translated code, VMRegAnchor
42 * should be used before accessing any of these registers. jit::FixupMap is
43 * currently responsible for doing the work required to sync the VM registers,
44 * though this is an implementation detail and should not matter to users of
45 * VMRegAnchor.
48 namespace HPHP {
51 * The current sync-state of the RDS vmRegs().
53 * CLEAN means that the RDS vmRegs are sync'd. DIRTY means we need to sync
54 * them (by traversing the stack and looking up fixups)---this is what the
55 * value of tl_regState should be whenever we enter native code from translated
56 * PHP code.
58 * Values above GUARDED_THRESHOLD are a special case of dirty which indicates
59 * that the state will be reset to DIRTY (via a scope guard) when returning to
60 * PHP code, and the actual value can be used as a start point for following the
61 * c++ callchain back into the VM. This makes it suitable for guarding callbacks
62 * through code compiled without frame pointers, and in places where we may
63 * end up needing to clean the registers multiple times.
65 enum VMRegState : uintptr_t {
66 CLEAN,
67 DIRTY,
68 GUARDED_THRESHOLD
70 extern __thread VMRegState tl_regState;
72 inline void checkVMRegState() {
73 assertx(tl_regState == VMRegState::CLEAN);
76 inline void checkVMRegStateGuarded() {
77 assertx(tl_regState != VMRegState::DIRTY);
80 inline VMRegs& vmRegsUnsafe() {
81 return rds::header()->vmRegs;
84 inline VMRegs& vmRegs() {
85 checkVMRegState();
86 return vmRegsUnsafe();
89 inline Stack& vmStack() {
90 return vmRegs().stack;
93 inline bool isValidVMStackAddress(const void* addr) {
94 return vmRegsUnsafe().stack.isValidAddress(uintptr_t(addr));
97 inline TypedValue*& vmsp() {
98 return vmRegs().stack.top();
101 inline ActRec*& vmfp() {
102 return vmRegs().fp;
105 inline const unsigned char*& vmpc() {
106 return vmRegs().pc;
109 inline Offset pcOff() {
110 return vmfp()->func()->offsetOf(vmpc());
113 inline ActRec*& vmFirstAR() {
114 // This is safe because firstAR is always updated directly.
115 return vmRegsUnsafe().firstAR;
118 inline MInstrState& vmMInstrState() {
119 // This is safe because mInstrState is always updated directly.
120 return vmRegsUnsafe().mInstrState;
123 inline ActRec*& vmJitCalledFrame() {
124 return vmRegsUnsafe().jitCalledFrame;
127 inline jit::TCA& vmJitReturnAddr() {
128 return vmRegsUnsafe().jitReturnAddr;
131 inline void assert_native_stack_aligned() {
132 #ifndef _MSC_VER
133 assertx(reinterpret_cast<uintptr_t>(__builtin_frame_address(0)) % 16 == 0);
134 #endif
137 inline void interp_set_regs(ActRec* ar, TypedValue* sp, Offset pcOff) {
138 assertx(tl_regState == VMRegState::DIRTY);
139 tl_regState = VMRegState::CLEAN;
140 vmfp() = ar;
141 vmsp() = sp;
142 vmpc() = ar->func()->at(pcOff);
143 vmJitReturnAddr() = nullptr; // We never elide frames around an interpOne
146 ///////////////////////////////////////////////////////////////////////////////
149 * This class is used as a scoped guard around code that is called from the JIT
150 * which needs the VM to be in a consistent state. JIT helpers use it to guard
151 * calls into HHVM's runtime. It is used like this:
153 * void helperFunction() {
154 * VMRegAnchor _;
155 * runtimeCall();
158 * VMRegAnchor should also be used before entering a C library compiled with
159 * -fomit-frame-pointer which will call back into HHVM. If VMRegAnchor is not
160 * used, HHVM's runtime will attempt to traverse the native stack, and will
161 * assert or crash if it attempts to parse a part of the stack with no frame
162 * pointers. VMRegAnchor forces the stack traversal to be done when it is
163 * constructed.
165 * A VMRegAnchor in "soft" mode will look for stashed VM metadata and only sync
166 * VM state if it finds it. (The default behavior is "hard" mode, where we
167 * assert if we don't find the fixup state).
169 struct VMRegAnchor {
170 enum Mode { Hard, Soft };
172 explicit VMRegAnchor(Mode mode = Hard);
174 ~VMRegAnchor() {
175 if (m_old < VMRegState::GUARDED_THRESHOLD) {
176 tl_regState = m_old;
180 VMRegAnchor(const VMRegAnchor&) = delete;
181 VMRegAnchor& operator=(const VMRegAnchor&) = delete;
183 VMRegState m_old;
187 * This class is used as an invocation guard equivalent to VMRegAnchor, except
188 * the sync is assumed to have already been done. This was part of a
189 * project aimed at improving performance by doing the fixup in advance, i.e.
190 * eagerly -- the benefits turned out to be marginal or negative in most cases.
192 struct EagerVMRegAnchor {
193 EagerVMRegAnchor() {
194 if (debug) {
195 auto& regs = vmRegsUnsafe();
196 DEBUG_ONLY auto const fp = regs.fp;
197 DEBUG_ONLY auto const sp = regs.stack.top();
198 DEBUG_ONLY auto const pc = regs.pc;
199 VMRegAnchor _;
200 assertx(regs.fp == fp);
201 assertx(regs.stack.top() == sp);
202 assertx(regs.pc == pc);
204 assertx(tl_regState < VMRegState::GUARDED_THRESHOLD);
205 m_old = tl_regState;
206 tl_regState = VMRegState::CLEAN;
209 ~EagerVMRegAnchor() {
210 tl_regState = m_old;
213 VMRegState m_old;
217 * A scoped guard used around native code that is called from the JIT and which
218 * /may conditionally/ need the VM to be in a consistent state.
220 * Using VMRegAnchor by itself would mean that in some cases---where we perform
221 * many independent operations which only conditionally require syncing---we'd
222 * have to choose between always (and sometimes spuriously) syncing, or syncing
223 * multiple times when we could have synced just once.
225 * VMRegGuard is intended to be used around these conditional syncs (i.e.,
226 * conditional instantiations of VMRegAnchor). It changes tl_regState to
227 * GUARDED, which tells sub-scoped VMRegAnchors that they may keep it set to
228 * CLEAN after they finish syncing.
230 * VMRegGuard also saves the current fp, making it suitable for guarding
231 * callbacks through library code that was compiled without frame pointers.
233 struct VMRegGuard {
235 * If we know the frame pointer returned by DECLARE_FRAME_POINTER is accurate,
236 * we can use ALWAYS_INLINE, and grab the frame pointer.
237 * If not, we have to use NEVER_INLINE to ensure we're one level in from the
238 * guard... but thats not quite enough because VMRegGuard::VMRegGuard is a
239 * leaf function, and so might not have a frame
241 #ifdef FRAME_POINTER_IS_ACCURATE
242 ALWAYS_INLINE VMRegGuard() : m_old(tl_regState) {
243 if (tl_regState == VMRegState::DIRTY) {
244 DECLARE_FRAME_POINTER(framePtr);
245 tl_regState = (VMRegState)(uintptr_t)framePtr;
248 #else
249 NEVER_INLINE VMRegGuard() : m_old(tl_regState) {
250 if (tl_regState == VMRegState::DIRTY) {
251 DECLARE_FRAME_POINTER(framePtr);
252 auto const fp = isVMFrame(framePtr->m_sfp) ? framePtr : framePtr->m_sfp;
253 tl_regState = (VMRegState)(uintptr_t)fp;
256 #endif
257 ~VMRegGuard() { tl_regState = m_old; }
259 VMRegGuard(const VMRegGuard&) = delete;
260 VMRegGuard& operator=(const VMRegGuard&) = delete;
262 VMRegState m_old;
265 ///////////////////////////////////////////////////////////////////////////////
267 #define SYNC_VM_REGS_SCOPED() \
268 HPHP::VMRegAnchor _anchorUnused
270 ///////////////////////////////////////////////////////////////////////////////