2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2013 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #ifndef incl_HPHP_JIT_VASM_X64_H_
18 #define incl_HPHP_JIT_VASM_X64_H_
20 #include "hphp/runtime/base/stats.h"
21 #include "hphp/runtime/base/types.h"
22 #include "hphp/runtime/vm/jit/abi.h"
23 #include "hphp/runtime/vm/jit/containers.h"
24 #include "hphp/runtime/vm/jit/cpp-call.h"
25 #include "hphp/runtime/vm/jit/fixup.h"
26 #include "hphp/runtime/vm/jit/phys-reg.h"
27 #include "hphp/runtime/vm/jit/service-requests.h"
28 #include "hphp/runtime/vm/jit/target-cache.h"
29 #include "hphp/runtime/vm/jit/vasm.h"
30 #include "hphp/runtime/vm/srckey.h"
31 #include "hphp/util/asm-x64.h"
34 #include <boost/dynamic_bitset.hpp>
35 #include <folly/Range.h>
37 namespace HPHP
{ namespace jit
{
42 namespace HPHP
{ namespace jit
{
44 // XXX: This should go back to arg-group.h as part of work on t5297892
45 enum class DestType
: uint8_t {
46 None
, // return void (no valid registers)
47 SSA
, // return a single-register value
48 TV
, // return a TypedValue packed in two registers
49 Dbl
, // return scalar double in a single FP register
50 SIMD
, // return a TypedValue in one SIMD register
52 const char* destTypeName(DestType
);
57 // Vreg is like physreg, but numbers go beyond the physical register names.
58 // Since it is unconstrained, it has predicates to test whether rn is
59 // a gpr, xmm, or virtual register.
61 static constexpr auto kind
= VregKind::Any
;
62 static const unsigned kNumGP
{PhysReg::kSIMDOffset
}; // 33
63 static const unsigned kNumXMM
{30};
64 static const unsigned kNumSF
{1};
65 static const unsigned G0
{0};
66 static const unsigned X0
{kNumGP
};
67 static const unsigned S0
{X0
+kNumXMM
};
68 static const unsigned V0
{S0
+kNumSF
};
69 static const unsigned kInvalidReg
{0xffffffffU
};
70 Vreg() : rn(kInvalidReg
) {}
71 explicit Vreg(size_t r
) : rn(r
) {}
72 /* implicit */ Vreg(Reg64 r
) : rn(int(r
)) {}
73 /* implicit */ Vreg(Reg32 r
) : rn(int(r
)) {}
74 /* implicit */ Vreg(Reg8 r
) : rn(int(r
)) {}
75 /* implicit */ Vreg(RegXMM r
) : rn(X0
+int(r
)) {}
76 /* implicit */ Vreg(RegSF r
) : rn(S0
+int(r
)) {}
77 /* implicit */ Vreg(PhysReg r
) {
78 rn
= (r
== InvalidReg
) ? kInvalidReg
:
79 r
.isGP() ? G0
+int(Reg64(r
)) :
80 r
.isSIMD() ? X0
+int(RegXMM(r
)) :
81 /* r.isSF() ? */ S0
+int(RegSF(r
));
83 /* implicit */ operator size_t() const { return rn
; }
84 /* implicit */ operator Reg64() const {
86 return Reg64(rn
- G0
);
88 /* implicit */ operator RegXMM() const {
90 return RegXMM(rn
- X0
);
92 /* implicit */ operator RegSF() const {
94 return RegSF(rn
- S0
);
96 /* implicit */ operator PhysReg() const { return physReg(); }
98 static_assert(G0
< V0
&& X0
< V0
&& S0
< V0
&& V0
< kInvalidReg
, "");
101 bool isGP() const { return /* rn >= G0 && */ rn
< G0
+kNumGP
; }
102 bool isSIMD() const { return rn
>= X0
&& rn
< X0
+kNumXMM
; }
103 bool isSF() const { return rn
>= S0
&& rn
< S0
+kNumSF
; }
104 bool isVirt() const { return rn
>= V0
&& isValid(); }
105 bool isValid() const { return rn
!= kInvalidReg
; }
106 bool operator==(Vreg r
) const { return rn
== r
.rn
; }
107 bool operator!=(Vreg r
) const { return rn
!= r
.rn
; }
108 PhysReg
physReg() const {
109 assert(!isValid() || isPhys());
110 return !isValid() ? InvalidReg
:
111 isGP() ? PhysReg(/* implicit */operator Reg64()) :
112 isSIMD() ? PhysReg(/* implicit */operator RegXMM()) :
113 /* isSF() ? */ PhysReg(/* implicit */operator RegSF());
115 Vptr
operator[](int disp
) const;
116 Vptr
operator[](ScaledIndex
) const;
117 Vptr
operator[](ScaledIndexDisp
) const;
118 Vptr
operator[](Vptr
) const;
119 Vptr
operator[](DispReg
) const;
120 Vptr
operator*() const;
121 Vptr
operator[](Vscaled
) const;
122 Vscaled
operator*(int scale
) const;
123 Vptr
operator[](Vreg
) const;
124 Vptr
operator+(size_t d
) const;
126 unsigned rn
{kInvalidReg
};
129 // instantiations of this wrap virtual register numbers in in a strongly
130 // typed wrapper that conveys physical constraints, similar to Reg64,
131 // Reg32, RegXMM, etc.
132 template<class Reg
, VregKind Kind
, int Bits
> struct Vr
{
133 static constexpr auto bits
= Bits
;
134 static constexpr auto kind
= Kind
;
135 explicit Vr(size_t rn
) : rn(rn
) {}
136 /* implicit */ Vr(Vreg r
) : rn(size_t(r
)) {
137 if (kind
== VregKind::Gpr
) {
138 assert(!r
.isValid() || r
.isVirt() || r
.isGP());
139 } else if (kind
== VregKind::Simd
) {
140 assert(!r
.isValid() || r
.isVirt() || r
.isSIMD());
141 } else if (kind
== VregKind::Sf
) {
142 assert(!r
.isValid() || r
.isVirt() || r
.isSF());
145 /* implicit */ Vr(Reg r
) : Vr
{Vreg(r
)} {}
146 /* implicit */ Vr(PhysReg pr
) : Vr
{Vreg(pr
)} {}
149 return isGP() ? Reg(rn
) :
150 isSIMD() ? Reg(rn
-Vreg::X0
) :
151 /* isSF() ? */ Reg(rn
-Vreg::S0
);
153 /* implicit */ operator Reg() const { return asReg(); }
154 /* implicit */ operator Vreg() const { return Vreg(rn
); }
155 /* implicit */ operator size_t() const { return rn
; }
156 bool isPhys() const {
157 static_assert(Vreg::G0
== 0, "");
158 return rn
< Vreg::V0
;
160 bool isGP() const { return rn
>=Vreg::G0
&& rn
<Vreg::G0
+Vreg::kNumGP
; }
161 bool isSIMD() const { return rn
>=Vreg::X0
&& rn
<Vreg::X0
+Vreg::kNumXMM
; }
162 bool isSF() const { return rn
>=Vreg::S0
&& rn
<Vreg::S0
+Vreg::kNumSF
; }
163 bool isVirt() const { return rn
>= Vreg::V0
&& isValid(); }
164 bool operator==(Vr
<Reg
,Kind
,Bits
> r
) const { return rn
== r
.rn
; }
165 bool operator!=(Vr
<Reg
,Kind
,Bits
> r
) const { return rn
!= r
.rn
; }
166 bool isValid() const { return rn
!= Vreg::kInvalidReg
; }
167 Vptr
operator[](int disp
) const;
168 Vptr
operator[](ScaledIndex si
) const;
169 Vptr
operator[](ScaledIndexDisp sid
) const;
170 Vptr
operator[](Vptr
) const;
171 Vptr
operator[](DispReg
) const;
172 Vptr
operator*() const;
173 Vptr
operator+(size_t d
) const;
174 explicit operator PhysReg() const { return asReg(); }
178 typedef Vr
<Reg64
,VregKind::Gpr
,64> Vreg64
;
179 typedef Vr
<Reg32
,VregKind::Gpr
,32> Vreg32
;
180 typedef Vr
<Reg16
,VregKind::Gpr
,16> Vreg16
;
181 typedef Vr
<Reg8
,VregKind::Gpr
,8> Vreg8
;
182 typedef Vr
<RegXMM
,VregKind::Simd
,64> VregDbl
;
183 typedef Vr
<RegXMM
,VregKind::Simd
,128> Vreg128
;
184 typedef Vr
<RegSF
,VregKind::Sf
,4> VregSF
;
186 inline Reg64
r64(Vreg64 r
) { return r
; }
188 // base + index*scale + disp.
189 // base is optional, implying baseless address
192 enum Segment
: uint8_t { DS
, FS
};
193 template<class Base
> Vptr(Base b
, int d
)
194 : base(b
), index(0xffffffff), scale(1), disp(d
)
196 template<class Base
, class Index
> Vptr(Base b
, Index i
, int s
, int d
)
197 : base(b
), index(i
), scale(s
), disp(d
)
199 /* implicit */ Vptr(MemoryRef m
, Segment s
= DS
)
200 : base(m
.r
.base
), index(m
.r
.index
), scale(m
.r
.scale
)
201 , seg(s
), disp(m
.r
.disp
)
203 MemoryRef
mr() const {
204 if (index
.isValid()) {
205 return base
.isValid() ? r64(base
)[r64(index
) * scale
+ disp
] :
206 *(IndexedDispReg
{r64(index
) * scale
+ disp
});
208 return base
.isValid() ? r64(base
)[disp
] :
212 /* implicit */ operator MemoryRef() const {
217 Vreg64 base
; // optional, for baseless mode
218 Vreg64 index
; // optional
219 uint8_t scale
; // 1,2,4,8
220 Segment seg
{DS
}; // DS or FS
229 inline Vptr
operator+(Vptr lhs
, int32_t d
) {
230 return Vptr(lhs
.base
, lhs
.index
, lhs
.scale
, lhs
.disp
+ d
);
233 inline Vptr
operator+(Vptr lhs
, intptr_t d
) {
234 return Vptr(lhs
.base
, lhs
.index
, lhs
.scale
,
235 safe_cast
<int32_t>(lhs
.disp
+ d
));
238 inline Vptr
operator+(Vreg64 base
, int32_t d
) {
239 return Vptr(base
, d
);
242 // A Vloc is either a single or pair of vregs, for keeping track
243 // of where we have stored an SSATmp.
245 enum Kind
{ kPair
, kWide
};
247 explicit Vloc(Vreg r
) { m_regs
[0] = r
; }
248 Vloc(Vreg r0
, Vreg r1
) { m_regs
[0] = r0
; m_regs
[1] = r1
; }
249 Vloc(Kind kind
, Vreg r
) : m_kind(kind
) { m_regs
[0] = r
; }
250 bool hasReg(int i
= 0) const {
251 return m_regs
[i
].isValid();
253 Vreg
reg(int i
= 0) const {
256 int numAllocated() const {
257 return int(m_regs
[0].isValid()) + int(m_regs
[1].isValid());
259 int numWords() const {
260 return m_kind
== kWide
? 2 : numAllocated();
262 bool isFullSIMD() const {
263 return m_kind
== kWide
;
266 bool operator==(Vloc other
) const {
267 return m_kind
== other
.m_kind
&&
268 m_regs
[0] == other
.m_regs
[0] &&
269 m_regs
[1] == other
.m_regs
[1];
271 bool operator!=(Vloc other
) const {
272 return !(*this == other
);
280 inline Vscaled
Vreg::operator*(int scale
) const {
281 return Vscaled
{*this, scale
};
284 inline Vptr
Vreg::operator[](Vscaled si
) const {
285 return Vptr(*this, si
.index
, si
.scale
, 0);
288 inline Vptr
Vreg::operator*() const { return Vptr(*this, 0); }
289 inline Vptr
Vreg::operator[](int disp
) const { return Vptr(*this, disp
); }
290 inline Vptr
Vreg::operator[](ScaledIndex si
) const {
291 return Vptr(*this, si
.index
, si
.scale
, 0);
293 inline Vptr
Vreg::operator[](ScaledIndexDisp sid
) const {
294 return Vptr(*this, sid
.si
.index
, sid
.si
.scale
, sid
.disp
);
296 inline Vptr
Vreg::operator[](Vptr p
) const {
297 return Vptr(*this, p
.base
, 1, p
.disp
);
299 inline Vptr
Vreg::operator[](DispReg rd
) const {
300 return Vptr(*this, rd
.base
, 1, rd
.disp
);
302 inline Vptr
Vreg::operator[](Vreg index
) const {
303 return Vptr(*this, index
, 1, 0);
306 template<class Reg
, VregKind Kind
, int Bits
>
307 Vptr Vr
<Reg
,Kind
,Bits
>::operator*() const {
308 return Vptr(*this, 0);
311 template<class Reg
, VregKind Kind
, int Bits
>
312 Vptr Vr
<Reg
,Kind
,Bits
>::operator[](int disp
) const {
313 return Vptr(*this, disp
);
316 inline Vptr
operator+(Vreg base
, int32_t d
) {
317 return Vptr(base
, safe_cast
<int32_t>(d
));
320 inline Vptr
operator+(Vreg base
, intptr_t d
) {
321 return Vptr(base
, safe_cast
<int32_t>(d
));
324 inline Vptr
Vreg::operator+(size_t d
) const {
325 return Vptr(*this, safe_cast
<int32_t>(d
));
328 inline Vptr
operator+(Vreg64 base
, intptr_t d
) {
329 return Vptr(base
, safe_cast
<int32_t>(d
));
332 template<class Reg
, VregKind Kind
, int Bits
>
333 Vptr Vr
<Reg
,Kind
,Bits
>::operator[](ScaledIndex si
) const {
334 return Vptr(*this, si
.index
, si
.scale
, 0);
337 template<class Reg
, VregKind Kind
, int Bits
>
338 Vptr Vr
<Reg
,Kind
,Bits
>::operator[](ScaledIndexDisp sid
) const {
339 return Vptr(*this, sid
.si
.index
, sid
.si
.scale
, sid
.disp
);
342 template<class Reg
, VregKind Kind
, int Bits
>
343 Vptr Vr
<Reg
,Kind
,Bits
>::operator[](Vptr p
) const {
344 return Vptr(*this, p
.base
, 1, p
.disp
);
347 template<class Reg
, VregKind Kind
, int Bits
>
348 Vptr Vr
<Reg
,Kind
,Bits
>::operator[](DispReg rd
) const {
349 return Vptr(*this, rd
.base
, 1, rd
.disp
);
352 template<class Reg
, VregKind Kind
, int Bits
>
353 inline Vptr Vr
<Reg
,Kind
,Bits
>::operator+(size_t d
) const {
354 return Vptr(*this, safe_cast
<int32_t>(d
));
359 // Inone no immediates
361 // UA(s) use s, but s lifetime extends across the instruction
362 // UH(s,h) use s, try assigning same register as h
364 // DH(d,h) define d, try assigning same register as h
365 // Un,Dn no uses, defs
367 #define VASM_OPCODES\
368 /* service requests, PHP-level function calls */\
369 O(bindaddr, I(dest) I(sk), Un, Dn)\
370 O(bindcall, I(stub), U(args), Dn)\
371 O(bindexit, I(cc) I(target), U(sf) U(args), Dn)\
372 O(bindjcc1st, I(cc) I(targets[0]) I(targets[1]), U(sf) U(args), Dn)\
373 O(bindjcc2nd, I(cc) I(target), U(sf) U(args), Dn)\
374 O(bindjmp, I(target) I(trflags), U(args), Dn)\
375 O(callstub, I(target) I(kills) I(fix), U(args), Dn)\
376 O(contenter, Inone, U(fp) U(target) U(args), Dn)\
377 /* vasm intrinsics */\
378 O(copy, Inone, UH(s,d), DH(d,s))\
379 O(copy2, Inone, UH(s0,d0) UH(s1,d1), DH(d0,s0) DH(d1,s1))\
380 O(copyargs, Inone, UH(s,d), DH(d,s))\
381 O(debugtrap, Inone, Un, Dn)\
382 O(fallthru, Inone, Un, Dn)\
383 O(ldimm, I(s) I(saveflags), Un, D(d))\
384 O(fallback, I(dest), U(args), Dn)\
385 O(fallbackcc, I(cc) I(dest), U(sf) U(args), Dn)\
386 O(kpcall, I(target) I(callee) I(prologIndex), U(args), Dn)\
387 O(ldpoint, I(s), Un, D(d))\
388 O(load, Inone, U(s), D(d))\
389 O(mccall, I(target), U(args), Dn)\
390 O(mcprep, Inone, Un, D(d))\
391 O(nothrow, Inone, Un, Dn)\
392 O(phidef, Inone, Un, D(defs))\
393 O(phijmp, Inone, U(uses), Dn)\
394 O(phijcc, I(cc), U(uses) U(sf), Dn)\
395 O(point, I(p), Un, Dn)\
396 O(store, Inone, U(s) U(d), Dn)\
397 O(svcreq, I(req) I(stub_block), U(args), Dn)\
398 O(syncpoint, I(fix), Un, Dn)\
399 O(unwind, Inone, Un, Dn)\
400 O(vcall, I(call) I(destType) I(fixup), U(args), D(d))\
401 O(vinvoke, I(call) I(destType) I(fixup), U(args), D(d))\
402 O(landingpad, Inone, Un, Dn)\
403 O(defvmsp, Inone, Un, D(d))\
404 O(syncvmsp, Inone, U(s), Dn)\
405 O(syncvmfp, Inone, U(s), Dn)\
406 O(srem, Inone, U(s0) U(s1), D(d))\
407 O(sar, Inone, U(s0) U(s1), D(d) D(sf))\
408 O(shl, Inone, U(s0) U(s1), D(d) D(sf))\
409 O(ldretaddr, Inone, U(s), D(d))\
410 O(retctrl, Inone, U(s), Dn)\
411 O(absdbl, Inone, U(s), D(d))\
412 /* arm instructions */\
413 O(asrv, Inone, U(sl) U(sr), D(d))\
414 O(brk, I(code), Un, Dn)\
415 O(cbcc, I(cc), U(s), Dn)\
416 O(hcsync, I(fix) I(call), Un, Dn)\
417 O(hcnocatch, I(call), Un, Dn)\
418 O(hcunwind, I(call), Un, Dn)\
419 O(hostcall, I(argc) I(syncpoint), U(args), Dn)\
420 O(lslv, Inone, U(sl) U(sr), D(d))\
421 O(tbcc, I(cc) I(bit), U(s), Dn)\
422 /* x64 instructions */\
423 O(addli, I(s0), UH(s1,d), DH(d,s1) D(sf)) \
424 O(addlm, Inone, U(s0) U(m), D(sf)) \
425 O(addq, Inone, U(s0) U(s1), D(d) D(sf)) \
426 O(addqi, I(s0), UH(s1,d), DH(d,s1) D(sf)) \
427 O(addqim, I(s0), U(m), D(sf)) \
428 O(addsd, Inone, U(s0) U(s1), D(d))\
429 O(andb, Inone, U(s0) U(s1), D(d) D(sf)) \
430 O(andbi, I(s0), UH(s1,d), DH(d,s1) D(sf)) \
431 O(andbim, I(s), U(m), D(sf)) \
432 O(andl, Inone, U(s0) U(s1), D(d) D(sf)) \
433 O(andli, I(s0), UH(s1,d), DH(d,s1) D(sf)) \
434 O(andq, Inone, U(s0) U(s1), D(d) D(sf)) \
435 O(andqi, I(s0), UH(s1,d), DH(d,s1) D(sf)) \
436 O(call, I(target), U(args), Dn)\
437 O(callm, Inone, U(target) U(args), Dn)\
438 O(callr, Inone, U(target) U(args), Dn)\
439 O(cloadq, I(cc), U(sf) U(f) U(t), D(d))\
440 O(cmovq, I(cc), U(sf) U(f) U(t), D(d))\
441 O(cmpb, Inone, U(s0) U(s1), D(sf))\
442 O(cmpbi, I(s0), U(s1), D(sf))\
443 O(cmpbim, I(s0), U(s1), D(sf))\
444 O(cmpl, Inone, U(s0) U(s1), D(sf))\
445 O(cmpli, I(s0), U(s1), D(sf))\
446 O(cmplim, I(s0), U(s1), D(sf))\
447 O(cmplm, Inone, U(s0) U(s1), D(sf))\
448 O(cmpq, Inone, U(s0) U(s1), D(sf))\
449 O(cmpqi, I(s0), U(s1), D(sf))\
450 O(cmpqim, I(s0), U(s1), D(sf))\
451 O(cmpqm, Inone, U(s0) U(s1), D(sf))\
452 O(cmpsd, I(pred), UA(s0) U(s1), D(d))\
453 O(cqo, Inone, Un, Dn)\
454 O(cvttsd2siq, Inone, U(s), D(d))\
455 O(cvtsi2sd, Inone, U(s), D(d))\
456 O(cvtsi2sdm, Inone, U(s), D(d))\
457 O(decl, Inone, UH(s,d), DH(d,s) D(sf))\
458 O(declm, Inone, U(m), D(sf))\
459 O(decq, Inone, UH(s,d), DH(d,s) D(sf))\
460 O(decqm, Inone, U(m), D(sf))\
461 O(divsd, Inone, UA(s0) U(s1), D(d))\
462 O(idiv, Inone, U(s), D(sf))\
463 O(imul, Inone, U(s0) U(s1), D(d) D(sf))\
464 O(incwm, Inone, U(m), D(sf))\
465 O(incl, Inone, UH(s,d), DH(d,s) D(sf))\
466 O(inclm, Inone, U(m), D(sf))\
467 O(incq, Inone, UH(s,d), DH(d,s) D(sf))\
468 O(incqm, Inone, U(m), D(sf))\
469 O(incqmlock, Inone, U(m), D(sf))\
470 O(jcc, I(cc), U(sf), Dn)\
471 O(jmp, Inone, Un, Dn)\
472 O(jmpr, Inone, U(target) U(args), Dn)\
473 O(jmpm, Inone, U(target) U(args), Dn)\
474 O(lea, Inone, U(s), D(d))\
475 O(leap, I(s), Un, D(d))\
476 O(loaddqu, Inone, U(s), D(d))\
477 O(loadl, Inone, U(s), D(d))\
478 O(loadqp, I(s), Un, D(d))\
479 O(loadsd, Inone, U(s), D(d))\
480 O(loadzbl, Inone, U(s), D(d))\
481 O(movb, Inone, UH(s,d), DH(d,s))\
482 O(movl, Inone, UH(s,d), DH(d,s))\
483 O(movzbl, Inone, UH(s,d), DH(d,s))\
484 O(mulsd, Inone, U(s0) U(s1), D(d))\
485 O(mul, Inone, U(s0) U(s1), D(d))\
486 O(neg, Inone, UH(s,d), DH(d,s) D(sf))\
487 O(nop, Inone, Un, Dn)\
488 O(not, Inone, UH(s,d), DH(d,s))\
489 O(orq, Inone, U(s0) U(s1), D(d) D(sf))\
490 O(orqi, I(s0), UH(s1,d), DH(d,s1) D(sf)) \
491 O(orqim, I(s0), U(m), D(sf))\
492 O(pop, Inone, Un, D(d))\
493 O(popm, Inone, U(m), Dn)\
494 O(psllq, I(s0), UH(s1,d), DH(d,s1))\
495 O(psrlq, I(s0), UH(s1,d), DH(d,s1))\
496 O(push, Inone, U(s), Dn)\
497 O(pushm, Inone, U(s), Dn)\
498 O(ret, Inone, U(args), Dn)\
499 O(roundsd, I(dir), U(s), D(d))\
500 O(sarq, Inone, UH(s,d), DH(d,s) D(sf))\
501 O(sarqi, I(s0), UH(s1,d), DH(d,s1) D(sf))\
502 O(sbbl, Inone, U(sfu) UA(s0) U(s1), D(d) D(sfd))\
503 O(setcc, I(cc), U(sf), D(d))\
504 O(shlli, I(s0), UH(s1,d), DH(d,s1) D(sf))\
505 O(shlq, Inone, UH(s,d), DH(d,s) D(sf))\
506 O(shlqi, I(s0), UH(s1,d), DH(d,s1) D(sf))\
507 O(shrli, I(s0), UH(s1,d), DH(d,s1) D(sf))\
508 O(shrqi, I(s0), UH(s1,d), DH(d,s1) D(sf))\
509 O(sqrtsd, Inone, U(s), D(d))\
510 O(storeb, Inone, U(s) U(m), Dn)\
511 O(storebi, I(s), U(m), Dn)\
512 O(storedqu, Inone, U(s) U(m), Dn)\
513 O(storel, Inone, U(s) U(m), Dn)\
514 O(storeli, I(s), U(m), Dn)\
515 O(storeqi, I(s), U(m), Dn)\
516 O(storesd, Inone, U(s) U(m), Dn)\
517 O(storew, Inone, U(s) U(m), Dn)\
518 O(storewi, I(s), U(m), Dn)\
519 O(subl, Inone, UA(s0) U(s1), D(d) D(sf))\
520 O(subli, I(s0), UH(s1,d), DH(d,s1) D(sf))\
521 O(subq, Inone, UA(s0) U(s1), D(d) D(sf))\
522 O(subqi, I(s0), UH(s1,d), DH(d,s1) D(sf))\
523 O(subsd, Inone, UA(s0) U(s1), D(d))\
524 O(testb, Inone, U(s0) U(s1), D(sf))\
525 O(testbi, I(s0), U(s1), D(sf))\
526 O(testbim, I(s0), U(s1), D(sf))\
527 O(testl, Inone, U(s0) U(s1), D(sf))\
528 O(testli, I(s0), U(s1), D(sf))\
529 O(testlim, I(s0), U(s1), D(sf))\
530 O(testq, Inone, U(s0) U(s1), D(sf))\
531 O(testqm, Inone, U(s0) U(s1), D(sf))\
532 O(testqim, I(s0), U(s1), D(sf))\
533 O(ucomisd, Inone, U(s0) U(s1), D(sf))\
534 O(ud2, Inone, Un, Dn)\
535 O(unpcklpd, Inone, UA(s0) U(s1), D(d))\
536 O(xorb, Inone, U(s0) U(s1), D(d) D(sf))\
537 O(xorbi, I(s0), UH(s1,d), DH(d,s1) D(sf))\
538 O(xorq, Inone, U(s0) U(s1), D(d) D(sf))\
539 O(xorqi, I(s0), UH(s1,d), DH(d,s1) D(sf))\
542 struct bindaddr
{ TCA
* dest
; SrcKey sk
; };
543 struct bindcall
{ TCA stub
; RegSet args
; };
544 struct bindexit
{ ConditionCode cc
; VregSF sf
; SrcKey target
;
545 TransFlags trflags
; RegSet args
; };
546 struct bindjcc1st
{ ConditionCode cc
; VregSF sf
; Offset targets
[2];
548 struct bindjcc2nd
{ ConditionCode cc
; VregSF sf
; Offset target
; RegSet args
; };
549 struct bindjmp
{ SrcKey target
; TransFlags trflags
; RegSet args
; };
550 struct callstub
{ CodeAddress target
; RegSet args
, kills
; Fixup fix
; };
551 struct contenter
{ Vreg64 fp
, target
; RegSet args
; };
552 struct vcall
{ CppCall call
; VcallArgsId args
; Vtuple d
;
553 Fixup fixup
; DestType destType
; bool nothrow
; };
554 struct vinvoke
{ CppCall call
; VcallArgsId args
; Vtuple d
; Vlabel targets
[2];
555 Fixup fixup
; DestType destType
; bool smashable
; };
556 struct copy
{ Vreg s
, d
; };
557 struct copy2
{ Vreg64 s0
, s1
, d0
, d1
; };
558 struct copyargs
{ Vtuple s
, d
; };
560 struct ldretaddr
{ Vptr s
; Vreg d
; };
561 struct retctrl
{ Vreg s
; };
562 struct absdbl
{ Vreg s
, d
; };
564 // No-op, used for marking the end of a block that is intentionally going to
565 // fall-through. Only for use with Vauto.
568 struct ldimm
{ Immed64 s
; Vreg d
; bool saveflags
; };
569 struct fallback
{ SrcKey dest
; TransFlags trflags
; RegSet args
; };
570 struct fallbackcc
{ ConditionCode cc
; VregSF sf
; SrcKey dest
;
571 TransFlags trflags
; RegSet args
; };
572 struct kpcall
{ CodeAddress target
; const Func
* callee
; unsigned prologIndex
;
574 struct ldpoint
{ Vpoint s
; Vreg64 d
; };
575 struct load
{ Vptr s
; Vreg d
; };
576 struct mccall
{ CodeAddress target
; RegSet args
; };
577 struct mcprep
{ Vreg64 d
; };
579 struct phidef
{ Vtuple defs
; };
580 struct phijmp
{ Vlabel target
; Vtuple uses
; };
581 struct phijcc
{ ConditionCode cc
; VregSF sf
; Vlabel targets
[2]; Vtuple uses
; };
582 struct point
{ Vpoint p
; };
583 struct store
{ Vreg s
; Vptr d
; };
584 struct svcreq
{ ServiceRequest req
; Vtuple args
; TCA stub_block
; };
585 struct syncpoint
{ Fixup fix
; };
586 struct unwind
{ Vlabel targets
[2]; };
587 struct landingpad
{};
588 struct defvmsp
{ Vreg d
; };
589 struct syncvmsp
{ Vreg s
; };
590 struct syncvmfp
{ Vreg s
; };
591 struct srem
{ Vreg s0
, s1
, d
; };
592 struct sar
{ Vreg s0
, s1
, d
; VregSF sf
; };
593 struct shl
{ Vreg s0
, s1
, d
; VregSF sf
; };
595 // arm-specific intrinsics
596 struct hcsync
{ Fixup fix
; Vpoint call
; };
597 struct hcnocatch
{ Vpoint call
; };
598 struct hcunwind
{ Vpoint call
; Vlabel targets
[2]; };
600 // arm specific instructions
601 struct brk
{ uint16_t code
; };
602 struct hostcall
{ RegSet args
; uint8_t argc
; Vpoint syncpoint
; };
603 struct cbcc
{ vixl::Condition cc
; Vreg64 s
; Vlabel targets
[2]; };
604 struct tbcc
{ vixl::Condition cc
; unsigned bit
; Vreg64 s
; Vlabel targets
[2]; };
605 struct lslv
{ Vreg64 sl
, sr
, d
; };
606 struct asrv
{ Vreg64 sl
, sr
, d
; };
607 struct mul
{ Vreg64 s0
, s1
, d
; };
609 // ATT style operand order. for binary ops:
610 // op s0 s1 d: d = s1 op s0 => d=s1; d op= s0
611 // op imm s1 d: d = s1 op imm => d=s1; d op= imm
612 // cmp s0 s1: s1 cmp s0
614 // suffix conventions:
624 struct addli
{ Immed s0
; Vreg32 s1
, d
; VregSF sf
; };
625 struct addlm
{ Vreg32 s0
; Vptr m
; VregSF sf
; };
626 struct addq
{ Vreg64 s0
, s1
, d
; VregSF sf
; };
627 struct addqi
{ Immed s0
; Vreg64 s1
, d
; VregSF sf
; };
628 struct addqim
{ Immed s0
; Vptr m
; VregSF sf
; };
629 struct addsd
{ VregDbl s0
, s1
, d
; };
630 struct andb
{ Vreg8 s0
, s1
, d
; VregSF sf
; };
631 struct andbi
{ Immed s0
; Vreg8 s1
, d
; VregSF sf
; };
632 struct andbim
{ Immed s
; Vptr m
; VregSF sf
; };
633 struct andl
{ Vreg32 s0
, s1
, d
; VregSF sf
; };
634 struct andli
{ Immed s0
; Vreg32 s1
, d
; VregSF sf
; };
635 struct andq
{ Vreg64 s0
, s1
, d
; VregSF sf
; };
636 struct andqi
{ Immed s0
; Vreg64 s1
, d
; VregSF sf
; };
637 struct call
{ CodeAddress target
; RegSet args
; };
638 struct callm
{ Vptr target
; RegSet args
; };
639 struct callr
{ Vreg64 target
; RegSet args
; };
640 struct cloadq
{ ConditionCode cc
; VregSF sf
; Vreg64 f
; Vptr t
; Vreg64 d
; };
641 struct cmovq
{ ConditionCode cc
; VregSF sf
; Vreg64 f
, t
, d
; };
642 struct cmpb
{ Vreg8 s0
; Vreg8 s1
; VregSF sf
; };
643 struct cmpbi
{ Immed s0
; Vreg8 s1
; VregSF sf
; };
644 struct cmpbim
{ Immed s0
; Vptr s1
; VregSF sf
; };
645 struct cmpl
{ Vreg32 s0
; Vreg32 s1
; VregSF sf
; };
646 struct cmpli
{ Immed s0
; Vreg32 s1
; VregSF sf
; };
647 struct cmplim
{ Immed s0
; Vptr s1
; VregSF sf
; };
648 struct cmplm
{ Vreg32 s0
; Vptr s1
; VregSF sf
; };
649 struct cmpq
{ Vreg64 s0
; Vreg64 s1
; VregSF sf
; };
650 struct cmpqi
{ Immed s0
; Vreg64 s1
; VregSF sf
; };
651 struct cmpqim
{ Immed s0
; Vptr s1
; VregSF sf
; };
652 struct cmpqm
{ Vreg64 s0
; Vptr s1
; VregSF sf
; };
653 struct cmpsd
{ ComparisonPred pred
; VregDbl s0
, s1
, d
; };
655 struct cvttsd2siq
{ VregDbl s
; Vreg64 d
; };
656 struct cvtsi2sd
{ Vreg64 s
; VregDbl d
; };
657 struct cvtsi2sdm
{ Vptr s
; VregDbl d
; };
658 struct decl
{ Vreg32 s
, d
; VregSF sf
; };
659 struct declm
{ Vptr m
; VregSF sf
; };
660 struct decq
{ Vreg64 s
, d
; VregSF sf
; };
661 struct decqm
{ Vptr m
; VregSF sf
; };
662 struct divsd
{ VregDbl s0
, s1
, d
; };
663 struct idiv
{ Vreg64 s
; VregSF sf
; };
664 struct imul
{ Vreg64 s0
, s1
, d
; VregSF sf
; };
665 struct incl
{ Vreg32 s
, d
; VregSF sf
; };
666 struct inclm
{ Vptr m
; VregSF sf
; };
667 struct incq
{ Vreg64 s
, d
; VregSF sf
; };
668 struct incqm
{ Vptr m
; VregSF sf
; };
669 struct incqmlock
{ Vptr m
; VregSF sf
; };
670 struct incwm
{ Vptr m
; VregSF sf
; };
671 struct jcc
{ ConditionCode cc
; VregSF sf
; Vlabel targets
[2]; };
672 struct jmp
{ Vlabel target
; };
673 struct jmpr
{ Vreg64 target
; RegSet args
; };
674 struct jmpm
{ Vptr target
; RegSet args
; };
675 struct lea
{ Vptr s
; Vreg64 d
; };
676 struct leap
{ RIPRelativeRef s
; Vreg64 d
; };
677 struct loaddqu
{ Vptr s
; Vreg128 d
; };
678 struct loadl
{ Vptr s
; Vreg32 d
; };
679 struct loadqp
{ RIPRelativeRef s
; Vreg64 d
; };
680 struct loadsd
{ Vptr s
; VregDbl d
; };
681 struct loadzbl
{ Vptr s
; Vreg32 d
; };
682 struct movb
{ Vreg8 s
, d
; };
683 struct movl
{ Vreg32 s
, d
; };
684 struct movzbl
{ Vreg8 s
; Vreg32 d
; };
685 struct mulsd
{ VregDbl s0
, s1
, d
; };
686 struct neg
{ Vreg64 s
, d
; VregSF sf
; };
688 struct not { Vreg64 s
, d
; };
689 struct orq
{ Vreg64 s0
, s1
, d
; VregSF sf
; };
690 struct orqi
{ Immed s0
; Vreg64 s1
, d
; VregSF sf
; };
691 struct orqim
{ Immed s0
; Vptr m
; VregSF sf
; };
692 struct pop
{ Vreg64 d
; };
693 struct popm
{ Vptr m
; };
694 struct psllq
{ Immed s0
; VregDbl s1
, d
; };
695 struct psrlq
{ Immed s0
; VregDbl s1
, d
; };
696 struct push
{ Vreg64 s
; };
697 struct pushm
{ Vptr s
; };
698 struct ret
{ RegSet args
; };
699 struct roundsd
{ RoundDirection dir
; VregDbl s
, d
; };
700 struct sarq
{ Vreg64 s
, d
; VregSF sf
; }; // uses rcx
701 struct sarqi
{ Immed s0
; Vreg64 s1
, d
; VregSF sf
; };
702 struct sbbl
{ VregSF sfu
; Vreg32 s0
, s1
, d
; VregSF sfd
; };
703 struct setcc
{ ConditionCode cc
; VregSF sf
; Vreg8 d
; };
704 struct shlli
{ Immed s0
; Vreg32 s1
, d
; VregSF sf
; };
705 struct shlq
{ Vreg64 s
, d
; VregSF sf
; }; // uses rcx
706 struct shlqi
{ Immed s0
; Vreg64 s1
, d
; VregSF sf
; };
707 struct shrli
{ Immed s0
; Vreg32 s1
, d
; VregSF sf
; };
708 struct shrqi
{ Immed s0
; Vreg64 s1
, d
; VregSF sf
; };
709 struct sqrtsd
{ VregDbl s
, d
; };
710 struct storeb
{ Vreg8 s
; Vptr m
; };
711 struct storebi
{ Immed s
; Vptr m
; };
712 struct storedqu
{ Vreg128 s
; Vptr m
; };
713 struct storel
{ Vreg32 s
; Vptr m
; };
714 struct storeli
{ Immed s
; Vptr m
; };
715 struct storeqi
{ Immed s
; Vptr m
; };
716 struct storesd
{ VregDbl s
; Vptr m
; };
717 struct storew
{ Vreg16 s
; Vptr m
; };
718 struct storewi
{ Immed s
; Vptr m
; };
719 struct subl
{ Vreg32 s0
, s1
, d
; VregSF sf
; };
720 struct subli
{ Immed s0
; Vreg32 s1
, d
; VregSF sf
; };
721 struct subq
{ Vreg64 s0
, s1
, d
; VregSF sf
; };
722 struct subqi
{ Immed s0
; Vreg64 s1
, d
; VregSF sf
; };
723 struct subsd
{ VregDbl s0
, s1
, d
; };
724 struct testb
{ Vreg8 s0
, s1
; VregSF sf
; };
725 struct testbi
{ Immed s0
; Vreg8 s1
; VregSF sf
; };
726 struct testbim
{ Immed s0
; Vptr s1
; VregSF sf
; };
727 struct testl
{ Vreg32 s0
, s1
; VregSF sf
; };
728 struct testli
{ Immed s0
; Vreg32 s1
; VregSF sf
; };
729 struct testlim
{ Immed s0
; Vptr s1
; VregSF sf
; };
730 struct testq
{ Vreg64 s0
, s1
; VregSF sf
; };
731 struct testqm
{ Vreg64 s0
; Vptr s1
; VregSF sf
; };
732 struct testqim
{ Immed s0
; Vptr s1
; VregSF sf
; };
733 struct ucomisd
{ VregDbl s0
, s1
; VregSF sf
; };
735 struct unpcklpd
{ VregDbl s0
, s1
; Vreg128 d
; };
736 struct xorb
{ Vreg8 s0
, s1
, d
; VregSF sf
; };
737 struct xorbi
{ Immed s0
; Vreg8 s1
, d
; VregSF sf
; };
738 struct xorq
{ Vreg64 s0
, s1
, d
; VregSF sf
; };
739 struct xorqi
{ Immed s0
; Vreg64 s1
, d
; VregSF sf
; };
742 #define O(name, imms, uses, defs) name,
743 enum Opcode
: uint8_t { VASM_OPCODES
};
750 #define O(name, imms, uses, defs) \
751 /* implicit */ Vinstr(jit::name i) : op(name), name##_(i) {}
756 * Define an operator= for all instructions to preserve origin and pos.
758 #define O(name, ...) \
759 Vinstr& operator=(const jit::name& i) { \
767 template<typename Op
>
771 * Templated accessors for the union members.
773 template<typename Op
>
774 typename matcher
<Op
>::type
& get() {
775 return matcher
<Op
>::get(*this);
777 template<typename Op
>
778 const typename matcher
<Op
>::type
& get() const {
779 return matcher
<Op
>::get(*this);
785 * Instruction position, currently used only in vasm-xls.
790 * If present, the IRInstruction this Vinstr was originally created from.
792 const IRInstruction
* origin
{nullptr};
795 * A union of all possible instructions, descriminated by the op field.
797 #define O(name, imms, uses, defs) jit::name name##_;
798 union { VASM_OPCODES
};
802 #define O(name, ...) \
803 template<> struct Vinstr::matcher<name> { \
804 using type = jit::name; \
805 static type& get(Vinstr& inst) { \
806 assert(inst.op == name); \
807 return inst.name##_; \
809 static const type& get(const Vinstr& inst) { \
810 assert(inst.op == name); \
811 return inst.name##_; \
818 explicit Vblock(AreaIndex area
) : area(area
) {}
820 jit::vector
<Vinstr
> code
;
823 typedef jit::vector
<Vreg
> VregList
;
826 * Source operands for vcall/vinvoke instructions, packed into a struct for
827 * convenience and to keep the instructions compact.
830 VregList args
, simdArgs
, stkArgs
;
834 * A Vunit contains all the assets that make up a vasm compilation unit. It is
835 * responsible for allocating new blocks, Vregs, and tuples.
839 * Create a new block in the given area, returning its id.
841 Vlabel
makeBlock(AreaIndex area
);
844 * Create a block intended to be used temporarily, as part of modifying
845 * existing code. Although not necessary for correctness, the block may be
846 * freed with freeScratchBlock when finished.
848 Vlabel
makeScratchBlock();
851 * Free a scratch block when finished with it. There must be no references to
852 * this block in reachable code.
854 void freeScratchBlock(Vlabel
);
856 Vreg
makeReg() { return Vreg
{next_vr
++}; }
857 Vtuple
makeTuple(VregList
&& regs
);
858 Vtuple
makeTuple(const VregList
& regs
);
859 VcallArgsId
makeVcallArgs(VcallArgs
&& args
);
861 Vreg
makeConst(uint64_t);
862 Vreg
makeConst(double);
863 Vreg
makeConst(const void* p
) { return makeConst(uint64_t(p
)); }
864 Vreg
makeConst(uint32_t v
) { return makeConst(uint64_t(v
)); }
865 Vreg
makeConst(int64_t v
) { return makeConst(uint64_t(v
)); }
866 Vreg
makeConst(int32_t v
) { return makeConst(int64_t(v
)); }
867 Vreg
makeConst(DataType t
) { return makeConst(uint64_t(t
)); }
868 Vreg
makeConst(Immed64 v
) { return makeConst(uint64_t(v
.q())); }
871 typename
std::enable_if
<std::is_integral
<T
>::value
, Vreg
>::type
872 makeConst(T l
) { return makeConst(uint64_t(l
)); }
875 * Returns true iff this Vunit needs register allocation before it can be
876 * emitted, either because it uses virtual registers or contains instructions
877 * that must be lowered by xls.
879 bool needsRegAlloc() const;
881 unsigned next_vr
{Vreg::V0
};
882 unsigned next_point
{0};
884 jit::vector
<Vblock
> blocks
;
885 jit::hash_map
<uint64_t,Vreg
> cpool
;
886 jit::vector
<VregList
> tuples
;
887 jit::vector
<VcallArgs
> vcallArgs
;
890 // writer stream to add instructions to a block
892 Vout(Vunit
& u
, Vlabel b
, const IRInstruction
* origin
= nullptr)
893 : m_unit(u
), m_block(b
), m_origin(origin
)
896 Vout
& operator=(const Vout
& v
) {
897 assert(&v
.m_unit
== &m_unit
);
899 m_origin
= v
.m_origin
;
903 // implicit cast to label for initializing branch instructions
904 /* implicit */ operator Vlabel() const;
908 Vout
makeBlock(); // create a stream connected to a new empty block
910 // instruction emitter
911 Vout
& operator<<(const Vinstr
& inst
);
913 Vpoint
makePoint() { return Vpoint
{m_unit
.next_point
++}; }
914 Vunit
& unit() { return m_unit
; }
915 template<class T
> Vreg
cns(T v
) { return m_unit
.makeConst(v
); }
916 void use(Vlabel b
) { m_block
= b
; }
917 void setOrigin(const IRInstruction
* i
) { m_origin
= i
; }
918 Vreg
makeReg() { return m_unit
.makeReg(); }
919 AreaIndex
area() const { return m_unit
.blocks
[m_block
].area
; }
920 Vtuple
makeTuple(const VregList
& regs
) const {
921 return m_unit
.makeTuple(regs
);
923 Vtuple
makeTuple(VregList
&& regs
) const {
924 return m_unit
.makeTuple(std::move(regs
));
926 VcallArgsId
makeVcallArgs(VcallArgs
&& args
) const {
927 return m_unit
.makeVcallArgs(std::move(args
));
933 const IRInstruction
* m_origin
;
936 // Similar to X64Assembler, but buffers instructions as they
937 // are written, then generates code all at once at the end.
938 // Areas represent the separate sections we generate code into;
945 typedef jit::vector
<Area
> AreaList
;
948 m_areas
.reserve(size_t(AreaIndex::Max
));
951 void finishX64(const Abi
&, AsmInfo
* asmInfo
);
952 void finishARM(const Abi
&, AsmInfo
* asmInfo
);
954 // get an existing area
955 Vout
& main() { return area(AreaIndex::Main
).out
; }
956 Vout
& cold() { return area(AreaIndex::Cold
).out
; }
957 Vout
& frozen() { return area(AreaIndex::Frozen
).out
; }
960 Vout
& main(CodeBlock
& cb
) { return add(cb
, AreaIndex::Main
); }
961 Vout
& cold(CodeBlock
& cb
) { return add(cb
, AreaIndex::Cold
); }
962 Vout
& frozen(CodeBlock
& cb
) { return add(cb
, AreaIndex::Frozen
); }
963 Vout
& main(X64Assembler
& a
) { return main(a
.code()); }
964 Vout
& cold(X64Assembler
& a
) { return cold(a
.code()); }
965 Vout
& frozen(X64Assembler
& a
) { return frozen(a
.code()); }
966 Vunit
& unit() { return m_unit
; }
967 jit::vector
<Area
>& areas() { return m_areas
; }
970 Vout
& add(CodeBlock
&cb
, AreaIndex area
);
971 Area
& area(AreaIndex i
) {
972 assert((unsigned)i
< m_areas
.size());
973 return m_areas
[(unsigned)i
];
978 jit::vector
<Area
> m_areas
; // indexed by AreaIndex
982 * Vauto is a convenience helper for emitting small amounts of machine code
983 * using vasm. It always has a main code block; cold and frozen blocks may be
984 * added using the normal Vasm API after creation. When the Vauto goes out of
985 * scope, it will finalize and emit any code it contains.
987 struct Vauto
: Vasm
{
988 explicit Vauto(CodeBlock
& code
) {
989 unit().entry
= Vlabel(main(code
));
994 template<class F
> void visit(const Vunit
&, Vreg v
, F f
) {
997 template<class F
> void visit(const Vunit
&, Vptr p
, F f
) {
998 if (p
.base
.isValid()) f(p
.base
);
999 if (p
.index
.isValid()) f(p
.index
);
1001 template<class F
> void visit(const Vunit
& unit
, Vtuple t
, F f
) {
1002 for (auto r
: unit
.tuples
[t
]) f(r
);
1004 template<class F
> void visit(const Vunit
& unit
, VcallArgsId a
, F f
) {
1005 auto& args
= unit
.vcallArgs
[a
];
1006 for (auto r
: args
.args
) f(r
);
1007 for (auto r
: args
.simdArgs
) f(r
);
1008 for (auto r
: args
.stkArgs
) f(r
);
1010 template<class F
> void visit(const Vunit
& unit
, RegSet regs
, F f
) {
1011 regs
.forEach([&](Vreg r
) { f(r
); });
1015 void visitUses(const Vunit
& unit
, Vinstr
& inst
, Use use
) {
1017 #define O(name, imms, uses, defs) \
1018 case Vinstr::name: { \
1019 auto& i = inst.name##_; (void)i; \
1023 #define U(s) visit(unit, i.s, use);
1024 #define UA(s) visit(unit, i.s, use);
1025 #define UH(s,h) visit(unit, i.s, use);
1037 void visitDefs(const Vunit
& unit
, const Vinstr
& inst
, Def def
) {
1039 #define O(name, imms, uses, defs) \
1040 case Vinstr::name: { \
1041 auto& i = inst.name##_; (void)i; \
1045 #define D(d) visit(unit, i.d, def);
1046 #define DH(d,h) visit(unit, i.d, def);
1057 * visitOperands visits all operands of the given instruction, calling
1058 * visitor.imm(), visitor.use(), visitor.across(), and visitor.def() as defined
1059 * in the VASM_OPCODES macro.
1061 * The template spew is necessary to support callers that only have a const
1062 * Vinstr& as well as callers with a Vinstr& that wish to mutate the
1063 * instruction in the visitor.
1065 template<class MaybeConstVinstr
, class Visitor
>
1066 typename
std::enable_if
<
1067 std::is_same
<MaybeConstVinstr
, Vinstr
>::value
||
1068 std::is_same
<MaybeConstVinstr
, const Vinstr
>::value
1070 visitOperands(MaybeConstVinstr
& inst
, Visitor
& visitor
) {
1072 #define O(name, imms, uses, defs) \
1073 case Vinstr::name: { \
1074 auto& i = inst.name##_; (void)i; \
1080 #define I(f) visitor.imm(i.f);
1081 #define U(s) visitor.use(i.s);
1082 #define UA(s) visitor.across(i.s);
1083 #define UH(s,h) visitor.useHint(i.s, i.h);
1084 #define D(d) visitor.def(i.d);
1085 #define DH(d,h) visitor.defHint(i.d, i.h);
1103 // visit reachable blocks in postorder, calling fn on each one.
1104 struct PostorderWalker
{
1105 template<class Fn
> void dfs(Vlabel b
, Fn fn
) {
1106 if (visited
.test(b
)) return;
1108 for (auto s
: succs(unit
.blocks
[b
])) {
1113 template<class Fn
> void dfs(Fn fn
) {
1114 dfs(unit
.entry
, fn
);
1116 explicit PostorderWalker(const Vunit
& u
)
1118 , visited(u
.blocks
.size())
1121 boost::dynamic_bitset
<> visited
;
1124 extern const char* vinst_names
[];
1125 bool isBlockEnd(Vinstr
& inst
);
1126 std::string
format(Vreg
);
1128 bool checkBlockEnd(Vunit
& v
, Vlabel b
);
1130 // search for the phidef in block b, then return its dest tuple
1131 Vtuple
findDefs(const Vunit
& unit
, Vlabel b
);
1133 typedef jit::vector
<jit::vector
<Vlabel
>> PredVector
;
1134 PredVector
computePreds(const Vunit
& unit
);