Free generator locals at return / frame unwind.
[hiphop-php.git] / hphp / runtime / vm / unwind.cpp
blob03e1c4590447a0f55cffcc80a752f55a1d77a2e4
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2014 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #include "hphp/runtime/vm/unwind.h"
18 #include <boost/implicit_cast.hpp>
20 #include "folly/ScopeGuard.h"
22 #include "hphp/util/trace.h"
23 #include "hphp/runtime/base/complex-types.h"
24 #include "hphp/runtime/ext/ext_continuation.h"
25 #include "hphp/runtime/ext/asio/static_exception_wait_handle.h"
26 #include "hphp/runtime/vm/bytecode.h"
27 #include "hphp/runtime/vm/func.h"
28 #include "hphp/runtime/vm/unit.h"
29 #include "hphp/runtime/vm/runtime.h"
30 #include "hphp/runtime/vm/debugger-hook.h"
32 namespace HPHP {
34 TRACE_SET_MOD(unwind);
35 using boost::implicit_cast;
37 namespace {
39 //////////////////////////////////////////////////////////////////////
40 #if (defined(DEBUG) || defined(USE_TRACE))
41 std::string describeFault(const Fault& f) {
42 switch (f.m_faultType) {
43 case Fault::Type::UserException:
44 return folly::format("[user exception] {}",
45 implicit_cast<void*>(f.m_userException)).str();
46 case Fault::Type::CppException:
47 return folly::format("[cpp exception] {}",
48 implicit_cast<void*>(f.m_cppException)).str();
50 not_reached();
52 #endif
54 void discardStackTemps(const ActRec* const fp,
55 Stack& stack,
56 Offset const bcOffset) {
57 FTRACE(2, "discardStackTemps with fp {} sp {} pc {}\n",
58 implicit_cast<const void*>(fp),
59 implicit_cast<void*>(stack.top()),
60 bcOffset);
62 visitStackElems(
63 fp, stack.top(), bcOffset,
64 [&] (ActRec* ar) {
65 assert(ar == reinterpret_cast<ActRec*>(stack.top()));
66 if (ar->isFromFPushCtor()) {
67 assert(ar->hasThis());
68 ar->getThis()->setNoDestruct();
70 FTRACE(2, " unwind pop AR : {}\n",
71 implicit_cast<void*>(stack.top()));
72 stack.popAR();
74 [&] (TypedValue* tv) {
75 assert(tv == stack.top());
76 FTRACE(2, " unwind pop TV : {}\n",
77 implicit_cast<void*>(stack.top()));
78 stack.popTV();
82 FTRACE(2, "discardStackTemps ends with sp = {}\n",
83 implicit_cast<void*>(stack.top()));
86 UnwindAction checkHandlers(const EHEnt* eh,
87 const ActRec* const fp,
88 PC& pc,
89 Fault& fault) {
90 auto const func = fp->m_func;
91 FTRACE(1, "checkHandlers: func {} ({})\n",
92 func->fullName()->data(),
93 func->unit()->filepath()->data());
95 // Always blindly propagate on fatal exception since those are
96 // unrecoverable anyway.
97 if (fault.m_faultType == Fault::Type::CppException) {
98 return UnwindAction::Propagate;
101 for (int i = 0;; ++i) {
102 // Skip the initial m_handledCount - 1 handlers that were
103 // considered before.
104 if (fault.m_handledCount <= i) {
105 fault.m_handledCount++;
106 switch (eh->m_type) {
107 case EHEnt::Type::Fault:
108 FTRACE(1, "checkHandlers: entering fault at {}: save {}\n",
109 eh->m_fault,
110 func->unit()->offsetOf(pc));
111 pc = func->unit()->entry() + eh->m_fault;
112 DEBUGGER_ATTACHED_ONLY(phpDebuggerExceptionHandlerHook());
113 return UnwindAction::ResumeVM;
114 case EHEnt::Type::Catch:
115 // Note: we skip catch clauses if we have a pending C++ exception
116 // as part of our efforts to avoid running more PHP code in the
117 // face of such exceptions.
118 if (fault.m_faultType == Fault::Type::UserException &&
119 ThreadInfo::s_threadInfo->m_pendingException == nullptr) {
120 auto const obj = fault.m_userException;
121 for (auto& idOff : eh->m_catches) {
122 FTRACE(1, "checkHandlers: catch candidate {}\n", idOff.second);
123 auto handler = func->unit()->at(idOff.second);
124 auto const cls = Unit::lookupClass(
125 func->unit()->lookupNamedEntityId(idOff.first)
127 if (!cls || !obj->instanceof(cls)) continue;
129 FTRACE(1, "checkHandlers: entering catch at {}\n", idOff.second);
130 pc = handler;
131 DEBUGGER_ATTACHED_ONLY(phpDebuggerExceptionHandlerHook());
132 return UnwindAction::ResumeVM;
135 break;
138 if (eh->m_parentIndex != -1) {
139 eh = &func->ehtab()[eh->m_parentIndex];
140 } else {
141 break;
144 return UnwindAction::Propagate;
147 void tearDownFrame(ActRec*& fp, Stack& stack, PC& pc) {
148 auto const func = fp->m_func;
149 auto const curOp = *reinterpret_cast<const Op*>(pc);
150 auto const unwindingReturningFrame =
151 curOp == OpRetC || curOp == OpRetV ||
152 curOp == OpCreateCont || curOp == OpAsyncSuspend;
153 auto const prevFp = fp->arGetSfp();
154 auto const soff = fp->m_soff;
156 FTRACE(1, "tearDownFrame: {} ({})\n fp {} prevFp {}\n",
157 func->fullName()->data(),
158 func->unit()->filepath()->data(),
159 implicit_cast<void*>(fp),
160 implicit_cast<void*>(prevFp));
162 // When throwing from a constructor, we normally want to avoid running the
163 // destructor on an object that hasn't been fully constructed yet. But if
164 // we're unwinding through the constructor's RetC, the constructor has
165 // logically finished and we're unwinding for some internal reason (timeout
166 // or user profiler, most likely). More importantly, fp->m_this may have
167 // already been destructed and/or overwritten due to sharing space with
168 // fp->m_r.
169 if (!unwindingReturningFrame && fp->isFromFPushCtor() && fp->hasThis()) {
170 fp->getThis()->setNoDestruct();
174 * If we're unwinding through a frame that's returning, it's only
175 * possible that its locals have already been decref'd.
177 * Here's why:
179 * - If a destructor for any of these things throws a php
180 * exception, it's swallowed at the dtor boundary and we keep
181 * running php.
183 * - If the destructor for any of these things throws a fatal,
184 * it's swallowed, and we set surprise flags to throw a fatal
185 * from now on.
187 * - If the second case happened and we have to run another
188 * destructor, its enter hook will throw, but it will be
189 * swallowed again.
191 * - Finally, the exit hook for the returning function can
192 * throw, but this happens last so everything is destructed.
195 if (!unwindingReturningFrame) {
196 try {
197 // Note that we must convert locals and the $this to
198 // uninit/zero during unwind. This is because a backtrace
199 // from another destructing object during this unwind may try
200 // to read them.
201 frame_free_locals_unwind(fp, func->numLocals());
202 } catch (...) {}
205 if (LIKELY(!fp->inGenerator())) {
206 // Free ActRec.
207 stack.ndiscard(func->numSlotsInFrame());
208 stack.discardAR();
209 } else {
210 // Mark the generator as finished and clear its m_value.
211 auto cont = frame_continuation(fp);
212 cont->setDone();
213 cellSet(make_tv<KindOfNull>(), cont->m_value);
217 * At the final ActRec in this nesting level. We don't need to set
218 * pc and fp since we're about to re-throw the exception. And we
219 * don't want to dereference prefFp since we just popped it.
221 if (prevFp == fp) return;
223 assert(stack.isValidAddress(reinterpret_cast<uintptr_t>(prevFp)) ||
224 prevFp->inGenerator());
225 auto const prevOff = soff + prevFp->m_func->base();
226 pc = prevFp->m_func->unit()->at(prevOff);
227 fp = prevFp;
230 void tearDownEagerAsyncFrame(ActRec*& fp, Stack& stack, PC& pc, ObjectData* e) {
231 auto const func = fp->m_func;
232 auto const prevFp = fp->arGetSfp();
233 auto const soff = fp->m_soff;
234 assert(!fp->inGenerator());
235 assert(func->isAsync());
236 assert(*reinterpret_cast<const Op*>(pc) != OpRetC);
238 FTRACE(1, "tearDownAsyncFrame: {} ({})\n fp {} prevFp {}\n",
239 func->fullName()->data(),
240 func->unit()->filepath()->data(),
241 implicit_cast<void*>(fp),
242 implicit_cast<void*>(prevFp));
244 try {
245 frame_free_locals_unwind(fp, func->numLocals());
246 } catch (...) {}
248 stack.ndiscard(func->numSlotsInFrame());
249 stack.ret();
250 assert(stack.topTV() == &fp->m_r);
251 tvWriteObject(c_StaticExceptionWaitHandle::Create(e), &fp->m_r);
252 e->decRefCount();
254 if (UNLIKELY(prevFp == fp)) {
255 pc = 0;
256 return;
259 assert(stack.isValidAddress(reinterpret_cast<uintptr_t>(prevFp)) ||
260 prevFp->inGenerator());
261 auto const prevOff = soff + prevFp->m_func->base();
262 pc = prevFp->m_func->unit()->at(prevOff);
263 fp = prevFp;
266 void chainFaultObjects(ObjectData* top, ObjectData* prev) {
267 static const StaticString nProp("previous");
268 bool visible, accessible, unset;
269 while (true) {
270 TypedValue* top_tv = top->getProp(
271 SystemLib::s_ExceptionClass,
272 nProp.get(),
273 visible, accessible, unset
275 assert(visible && accessible && !unset);
276 if (top_tv->m_type != KindOfObject ||
277 !top_tv->m_data.pobj->instanceof(
278 SystemLib::s_ExceptionClass)) {
279 // Since we are overwriting, decref.
280 tvRefcountedDecRef(top_tv);
281 // Objects held in m_faults are not refcounted, therefore
282 // we need to increase the ref count here.
283 top_tv->m_type = KindOfObject;
284 top_tv->m_data.pobj = prev;
285 prev->incRefCount();
286 break;
288 top = top_tv->m_data.pobj;
292 bool chainFaults(Fault& fault) {
293 always_assert(!g_context->m_faults.empty());
294 auto& faults = g_context->m_faults;
295 faults.pop_back();
296 if (faults.empty()) {
297 faults.push_back(fault);
298 return false;
300 auto prev = faults.back();
301 if (fault.m_faultType == Fault::Type::CppException &&
302 fault.m_raiseNesting == prev.m_raiseNesting &&
303 fault.m_raiseFrame == prev.m_raiseFrame) {
304 fault.m_raiseOffset = prev.m_raiseOffset;
305 fault.m_handledCount = prev.m_handledCount;
306 faults.pop_back();
307 faults.push_back(fault);
308 return true;
310 if (fault.m_faultType == Fault::Type::UserException &&
311 fault.m_raiseNesting == prev.m_raiseNesting &&
312 fault.m_raiseFrame == prev.m_raiseFrame) {
313 assert(prev.m_faultType == Fault::Type::UserException);
314 fault.m_raiseOffset = prev.m_raiseOffset;
315 fault.m_handledCount = prev.m_handledCount;
316 chainFaultObjects(fault.m_userException, prev.m_userException);
317 faults.pop_back();
318 faults.push_back(fault);
319 return true;
321 faults.push_back(fault);
322 return false;
326 * Unwinding proceeds as follows:
328 * - Discard all evaluation stack temporaries (including pre-live
329 * activation records).
331 * - Check if the faultOffset that raised the exception is inside a
332 * protected region, if so, if it can handle the Fault resume the
333 * VM at the handler.
335 * - Check if we are handling user exception in an eagerly executed
336 * async function. If so, pop its frame, wrap the exception into
337 * StaticExceptionWaitHandle object, leave it on the stack as
338 * a return value from the async function and resume VM.
340 * - Failing any of the above, pop the frame for the current
341 * function. If the current function was the last frame in the
342 * current VM nesting level, return UnwindAction::Propagate,
343 * otherwise go to the first step and repeat this process in the
344 * caller's frame.
346 * Note: it's important that the unwinder makes a copy of the Fault
347 * it's currently operating on, as the underlying faults vector may
348 * reallocate due to nested exception handling.
350 UnwindAction unwind(ActRec*& fp,
351 Stack& stack,
352 PC& pc,
353 Fault fault) {
354 FTRACE(1, "entering unwinder for fault: {}\n", describeFault(fault));
355 SCOPE_EXIT {
356 FTRACE(1, "leaving unwinder for fault: {}\n", describeFault(fault));
359 for (;;) {
360 bool discard = false;
361 if (fault.m_raiseOffset == kInvalidOffset) {
363 * This block executes whenever we want to treat the fault as if
364 * it was freshly thrown. Freshly thrown faults either were never
365 * previosly seen by the unwinder OR were propagated from the
366 * previous frame. In such a case, we fill in the fields with
367 * the information from the current frame.
369 always_assert(fault.m_raiseNesting == kInvalidNesting);
370 // Nesting is set to the current VM nesting.
371 fault.m_raiseNesting = g_context->m_nestedVMs.size();
372 // Raise frame is set to the current frame
373 fault.m_raiseFrame = fp;
374 // Raise offset is set to the offset of the current PC.
375 fault.m_raiseOffset = fp->m_func->unit()->offsetOf(pc);
376 // No handlers were yet examined for this fault.
377 fault.m_handledCount = 0;
378 // We will be also discarding stack temps.
379 discard = true;
382 FTRACE(1, "unwind: func {}, raiseOffset {} fp {}\n",
383 fp->m_func->name()->data(),
384 fault.m_raiseOffset,
385 implicit_cast<void*>(fp));
387 assert(fault.m_raiseNesting != kInvalidNesting);
388 assert(fault.m_raiseFrame != nullptr);
389 assert(fault.m_raiseOffset != kInvalidOffset);
392 * If the handledCount is non-zero, we've already seen this fault once
393 * while unwinding this frema, and popped all eval stack
394 * temporaries the first time it was thrown (before entering a
395 * fault funclet). When the Unwind instruction was executed in
396 * the funclet, the eval stack must have been left empty again.
398 * (We have to skip discardStackTemps in this case because it will
399 * look for FPI regions and assume the stack offsets correspond to
400 * what the FPI table expects.)
402 if (discard) {
403 discardStackTemps(fp, stack, fault.m_raiseOffset);
406 do {
407 const EHEnt* eh = fp->m_func->findEH(fault.m_raiseOffset);
408 if (eh != nullptr) {
409 switch (checkHandlers(eh, fp, pc, fault)) {
410 case UnwindAction::ResumeVM:
411 // We've kept our own copy of the Fault, because m_faults may
412 // change if we have a reentry during unwinding. When we're
413 // ready to resume, we need to replace the fault to reflect
414 // any state changes we've made (handledCount, etc).
415 g_context->m_faults.back() = fault;
416 return UnwindAction::ResumeVM;
417 case UnwindAction::Propagate:
418 break;
419 case UnwindAction::Return:
420 not_reached();
423 // If we came here, it means that no further EHs were found for
424 // the current fault offset and handledCount. This means we are
425 // allowed to chain the current exception with the previous
426 // one (if it exists). This is because the current exception
427 // escapes the exception handler where it was thrown.
428 } while (chainFaults(fault));
430 // If in an eagerly executed async function, wrap the user exception
431 // into a StaticExceptionWaitHandle and return it to the caller.
432 if (fp->m_func->isAsync() && !fp->inGenerator() &&
433 fault.m_faultType == Fault::Type::UserException) {
434 tearDownEagerAsyncFrame(fp, stack, pc, fault.m_userException);
435 g_context->m_faults.pop_back();
436 return pc ? UnwindAction::ResumeVM : UnwindAction::Return;
439 // We found no more handlers in this frame, so the nested fault
440 // count starts over for the caller frame.
441 auto const lastFrameForNesting = fp == fp->arGetSfp();
442 tearDownFrame(fp, stack, pc);
444 // Once we are done with EHs for the current frame we restore
445 // default values for the fields inside Fault. This makes sure
446 // that on another loop pass we will treat the fault just
447 // as if it was freshly thrown.
448 fault.m_raiseNesting = kInvalidNesting;
449 fault.m_raiseFrame = nullptr;
450 fault.m_raiseOffset = kInvalidOffset;
451 fault.m_handledCount = 0;
452 g_context->m_faults.back() = fault;
454 if (lastFrameForNesting) {
455 FTRACE(1, "unwind: reached the end of this nesting's ActRec chain\n");
456 break;
460 return UnwindAction::Propagate;
463 const StaticString s_hphpd_break("hphpd_break");
464 const StaticString s_fb_enable_code_coverage("fb_enable_code_coverage");
466 // Unwind the frame for a builtin. Currently only used when switching
467 // modes for hphpd_break and fb_enable_code_coverage.
468 void unwindBuiltinFrame() {
469 auto& stack = g_context->getStack();
470 auto& fp = g_context->m_fp;
472 assert(fp->m_func->methInfo());
473 assert(fp->m_func->name()->isame(s_hphpd_break.get()) ||
474 fp->m_func->name()->isame(s_fb_enable_code_coverage.get()));
476 // Free any values that may be on the eval stack. We know there
477 // can't be FPI regions and it can't be a generator body because
478 // it's a builtin frame.
479 auto const evalTop = reinterpret_cast<TypedValue*>(g_context->getFP());
480 while (stack.topTV() < evalTop) {
481 stack.popTV();
484 // Free the locals and VarEnv if there is one
485 frame_free_locals_inl(fp, fp->m_func->numLocals(), nullptr);
487 // Tear down the frame
488 Offset pc = -1;
489 ActRec* sfp = g_context->getPrevVMState(fp, &pc);
490 assert(pc != -1);
491 fp = sfp;
492 g_context->m_pc = fp->m_func->unit()->at(pc);
493 stack.discardAR();
496 void pushFault(Exception* e) {
497 Fault f;
498 f.m_faultType = Fault::Type::CppException;
499 f.m_cppException = e;
500 g_context->m_faults.push_back(f);
501 FTRACE(1, "pushing new fault: {}\n", describeFault(f));
504 void pushFault(const Object& o) {
505 Fault f;
506 f.m_faultType = Fault::Type::UserException;
507 f.m_userException = o.get();
508 f.m_userException->incRefCount();
509 g_context->m_faults.push_back(f);
510 FTRACE(1, "pushing new fault: {}\n", describeFault(f));
513 UnwindAction enterUnwinder() {
514 auto fault = g_context->m_faults.back();
515 return unwind(
516 g_context->m_fp, // by ref
517 g_context->getStack(),// by ref
518 g_context->m_pc, // by ref
519 fault
523 //////////////////////////////////////////////////////////////////////
527 UnwindAction exception_handler() noexcept {
528 FTRACE(1, "unwind exception_handler\n");
530 g_context->checkRegState();
532 try { throw; }
535 * Unwind (repropagating from a fault funclet) is slightly different
536 * from the throw cases, because we need to re-raise the exception
537 * as if it came from the same offset to handle nested fault
538 * handlers correctly, and we continue propagating the current Fault
539 * instead of pushing a new one.
541 catch (const VMPrepareUnwind&) {
542 Fault fault = g_context->m_faults.back();
543 FTRACE(1, "unwind: restoring offset {}\n", g_context->m_pc);
544 return unwind(
545 g_context->m_fp,
546 g_context->getStack(),
547 g_context->m_pc,
548 fault
552 catch (const Object& o) {
553 pushFault(o);
554 return enterUnwinder();
557 catch (VMSwitchMode&) {
558 return UnwindAction::ResumeVM;
561 catch (VMSwitchModeBuiltin&) {
562 unwindBuiltinFrame();
563 g_context->getStack().pushNull(); // return value
564 return UnwindAction::ResumeVM;
567 catch (VMReenterStackOverflow&) {
568 pushFault(new FatalErrorException("Stack overflow"));
569 return UnwindAction::Propagate;
572 catch (Exception& e) {
573 pushFault(e.clone());;
574 return enterUnwinder();
577 catch (std::exception& e) {
578 pushFault(new Exception("unexpected %s: %s", typeid(e).name(), e.what()));
579 return enterUnwinder();
582 catch (...) {
583 pushFault(new Exception("unknown exception"));
584 return enterUnwinder();
587 not_reached();
590 //////////////////////////////////////////////////////////////////////