Replace Catch opcode with implicitly pushed Throwables
[hiphop-php.git] / hphp / runtime / vm / verifier / cfg.h
blobf1d2419ebf809b7a3ed31c72b402b8763cec8d17
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #ifndef incl_HPHP_VM_VERIFIER_CFG_H_
18 #define incl_HPHP_VM_VERIFIER_CFG_H_
20 #include "hphp/runtime/vm/hhbc-codec.h"
21 #include "hphp/runtime/vm/repo.h"
22 #include "hphp/runtime/vm/verifier/util.h"
23 #include "hphp/util/arena.h"
24 #include <boost/dynamic_bitset.hpp>
26 namespace HPHP {
27 namespace Verifier {
29 /**
30 * Block is a standard basic block. The # of ordinary successors
31 * is determined by the last instruction and the # of exception
32 * edges is determined by the Graph that owns this Block.
34 * id and rpo_id are the block id and reverse-postorder-number,
35 * respectively. Both start from 0. Pass-specific data about
36 * blocks should be stored elsewhere and indexed by one of these ids,
37 * or in a Map<Block*> if you're a sadist.
39 struct Block {
40 explicit Block(PC start) :
41 start(start), last(0), end(0) , id(-1), rpo_id(-1), next_linear(0),
42 next_rpo(0), succs(0), exn(0) {
45 // Never copy Blocks.
46 explicit Block(const Block&) = delete;
47 Block& operator=(const Block&) = delete;
49 static bool reachable(Block* from, Block* to,
50 boost::dynamic_bitset<>& visited);
52 PC start; // first instruction
53 PC last; // last instruction (inclusive)
54 PC end; // points past last instruction (exclusive)
55 int id; // plain block id (creation order)
56 int rpo_id; // reverse postorder number. 0 = entry.
57 Block* next_linear; // next block in linear order
58 Block* next_rpo; // next block in reverse postorder
59 Block** succs; // array of succesors (can have nulls)
60 Block* exn; // exception edge (can be null)
63 /**
64 * Graph is a control-flow-graph container of Blocks. The idea is
65 * to Arena-allocate these (and Blocks) and only store pass-agnostic
66 * stuff about the function.
68 * Iteration over blocks can be done directly if necessary but when
69 * you can, use a predefined Range, like LinearBlocks or RpoBlocks.
71 struct Graph {
72 Graph() : first_linear(0), first_rpo(0), entries(0), block_count(0) {
75 explicit Graph(const Graph&) = delete;
76 Graph& operator=(const Graph&) = delete;
78 Block* first_linear; // blocks in linear order
79 Block* first_rpo; // blocks in reverse postorder
80 Block** entries; // entry points indexed by arg count [0:param_count]
81 int param_count;
82 int block_count;
85 inline bool isTF(PC pc) {
86 return (instrFlags(peek_op(pc)) & TF) != 0;
89 inline bool isCF(PC pc) {
90 return instrIsNonCallControlFlow(peek_op(pc));
93 inline bool isRet(PC pc) {
94 auto const op = peek_op(pc);
95 return op == Op::RetC || op == Op::RetCSuspended || op == Op::RetM;
98 // Return true if pc points to an Iter instruction whose first immedate
99 // argument is an iterator id.
100 inline bool isIter(PC pc) {
101 // IterBreak is not included, because it has a variable-length list of
102 // iterartor ids, rather than a single iterator id.
103 switch (peek_op(pc)) {
104 case Op::IterInit:
105 case Op::LIterInit:
106 case Op::IterInitK:
107 case Op::LIterInitK:
108 case Op::IterNext:
109 case Op::LIterNext:
110 case Op::IterNextK:
111 case Op::LIterNextK:
112 case Op::IterFree:
113 case Op::LIterFree:
114 return true;
115 default:
116 break;
118 return false;
121 inline int getImmIva(PC pc) {
122 return getImm(pc, 0).u_IVA;
125 inline int numSuccBlocks(const Block* b) {
126 return numSuccs(b->last);
129 #define APPLY(d, l, r) \
130 if (auto left = d.left()) return left->l; \
131 if (auto right = d.right()) return right->r; \
132 not_reached()
134 struct SomeUnit {
135 /* implicit */ SomeUnit(const Unit* u) : m_unit(u) {}
136 /* implicit */ SomeUnit(const UnitEmitter* u) : m_unit(u) {}
138 SomeUnit& operator=(const Unit* u) { m_unit = u; return *this; }
139 SomeUnit& operator=(const UnitEmitter* u) { m_unit = u; return *this; }
141 PC entry() const { APPLY(m_unit, entry(), bc()); }
142 PC at(Offset o) const { return entry() + o; }
143 Offset offsetOf(PC addr) const { return static_cast<Offset>(addr - entry()); }
145 private:
146 Either<const Unit*, const UnitEmitter*> m_unit;
149 struct SomeFunc {
150 /* implicit */ SomeFunc(const Func* f) : m_func(f) {}
151 /* implicit */ SomeFunc(const FuncEmitter* f) : m_func(f) {}
153 SomeFunc& operator=(const Func* f) { m_func = f; return *this; }
154 SomeFunc& operator=(const FuncEmitter* f) { m_func = f; return *this; }
156 SomeUnit unit() const {
157 return m_func.match(
158 [&] (const Func* f) -> SomeUnit { return f->unit(); },
159 [&] (const FuncEmitter* f) -> SomeUnit { return &f->ue(); }
162 Offset base() const { APPLY(m_func, base(), base); }
163 Offset past() const { APPLY(m_func, past(), past); }
165 size_t numParams() const { APPLY(m_func, params().size(), params.size()); }
166 const Func::ParamInfo& param(size_t idx) const {
167 APPLY(m_func, params()[idx], params[idx]);
170 size_t numEHEnts() const { APPLY(m_func, ehtab().size(), ehtab.size()); }
171 const EHEnt& ehent(size_t i) const { APPLY(m_func, ehtab()[i], ehtab[i]); }
173 const EHEnt* findEH(Offset off) const {
174 return m_func.match(
175 [&] (const Func* f) { return Func::findEH(f->ehtab(), off); },
176 [&] (const FuncEmitter* f) { return Func::findEH(f->ehtab, off); }
180 private:
181 Either<const Func*, const FuncEmitter*> m_func;
184 #undef APPLY
187 * A GraphBuilder holds the temporary state required for building
188 * a graph and is typically not needed once a Graph is built.
190 struct GraphBuilder {
191 private:
192 typedef hphp_hash_map<PC, Block*> BlockMap;
193 enum EdgeKind { FallThrough, Taken };
194 public:
195 template<class F>
196 GraphBuilder(Arena& arena, const F* func)
197 : m_arena(arena), m_func(func),
198 m_unit(m_func.unit()), m_graph(0) {
200 Graph* build();
201 Block* at(Offset off) const { return at(m_unit.at(off)); }
202 private:
203 void createBlocks();
204 void createExBlocks();
205 void linkBlocks();
206 void linkExBlocks();
207 Block* createBlock(PC pc);
208 Block* createBlock(Offset off) { return createBlock(m_unit.at(off)); }
209 Block* at(PC addr) const;
210 Offset offset(PC addr) const {
211 return m_unit.offsetOf(addr);
213 Block** succs(Block* b);
214 private:
215 BlockMap m_blocks;
216 Arena& m_arena;
217 const SomeFunc m_func;
218 const SomeUnit m_unit;
219 Graph* m_graph;
223 * Range over blocks in linear order
225 struct LinearBlocks {
226 LinearBlocks(Block* first, Block* end) : b(first), end(end) {
228 bool empty() const { return b == end; }
229 Block* front() const { assertx(!empty()); return b; }
230 Block* popFront() { Block* f = front(); b = b->next_linear; return f; }
231 private:
232 Block *b; // The current block.
233 Block *end; // End marker; empty when b==end.
237 * Range over the non-null Block* pointers in array.
239 struct BlockPtrRange {
240 BlockPtrRange(Block** a, int cap) : i(a), end(&a[cap]) {
241 trimFront();
242 trimBack();
244 bool empty() const { return i >= end; }
245 Block* front() const { assertx(!empty()); return i[0]; }
246 Block* back() const { assertx(!empty()); return end[-1]; }
247 Block* popFront() {
248 Block* b = front();
249 ++i; trimFront();
250 return b;
252 Block* popBack() {
253 Block* b = back();
254 --end; trimBack();
255 return b;
257 private:
258 void trimFront() { while (!empty() && !i[0]) ++i; }
259 void trimBack() { while (!empty() && !end[-1]) --end; }
260 private:
261 Block **i; // current
262 Block **end; // points past end (exclusive)
266 * Iterate through a contiguous range of instructions
268 struct InstrRange {
269 InstrRange(PC pc, PC end) : pc(pc), end(end) {
271 bool empty() const {
272 return pc >= end;
274 PC front() const {
275 assertx(!empty());
276 return pc;
278 PC popFront() {
279 PC i = front();
280 pc += instrLen(i);
281 return i;
283 private:
284 PC pc, end;
288 * Sort blocks in reverse postorder and reset each blocks's next_rpo and
289 * and rpo_id fields
291 void sortRpo(Graph* g);
293 inline InstrRange funcInstrs(SomeFunc func) {
294 return InstrRange(func.unit().at(func.base()),
295 func.unit().at(func.past()));
298 inline InstrRange blockInstrs(const Block* b) {
299 return InstrRange(b->start, b->end);
302 inline BlockPtrRange succBlocks(const Block* b) {
303 return BlockPtrRange(b->succs, numSuccBlocks(b));
306 inline BlockPtrRange entryBlocks(const Graph* g) {
307 return BlockPtrRange(g->entries, g->param_count + 1);
310 inline LinearBlocks linearBlocks(const Graph* g) {
311 return LinearBlocks(g->first_linear, 0);
314 // A callsite pushes 0 or more arguments on the stack, followed by a FPush* and
315 // FCall opcodes. The FPI Region protects the range of instructions that execute
316 // with the partial activation on the stack, which is the instruction after
317 // FPush* up to and including FCall. FPush* is not in the protected region. In
318 // other words, only FCall is part of the FPI region. This mechanism is being
319 // actively dismantled.
321 inline Offset fpiBase(const FPIEnt& fpi, PC bc) {
322 PC fpush = bc + fpi.m_fpushOff;
323 return fpush + instrLen(fpush) - bc;
326 inline Offset fpiPast(const FPIEnt& fpi, PC bc) {
327 PC endFpiOp = bc + fpi.m_fpiEndOff;
328 return endFpiOp + instrLen(endFpiOp) - bc;
331 }} // HPHP::Verifier
333 #endif // incl_HPHP_VM_VERIFIER_CFG_H_