codemod 2010-2016 to 2010-present
[hiphop-php.git] / hphp / runtime / vm / jit / code-gen-helpers.cpp
blobbda1c7714216d80850afee913aaa6f8ae8073489
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/runtime/vm/jit/code-gen-helpers.h"
19 #include "hphp/runtime/base/countable.h"
20 #include "hphp/runtime/base/datatype.h"
21 #include "hphp/runtime/base/header-kind.h"
22 #include "hphp/runtime/base/rds-header.h"
23 #include "hphp/runtime/base/runtime-option.h"
24 #include "hphp/runtime/base/tv-helpers.h"
25 #include "hphp/runtime/vm/class.h"
27 #include "hphp/runtime/vm/jit/abi.h"
28 #include "hphp/runtime/vm/jit/call-spec.h"
29 #include "hphp/runtime/vm/jit/code-gen-cf.h"
30 #include "hphp/runtime/vm/jit/ssa-tmp.h"
31 #include "hphp/runtime/vm/jit/trans-db.h"
32 #include "hphp/runtime/vm/jit/type.h"
33 #include "hphp/runtime/vm/jit/vasm-gen.h"
34 #include "hphp/runtime/vm/jit/vasm-instr.h"
35 #include "hphp/runtime/vm/jit/vasm-reg.h"
37 #include "hphp/util/asm-x64.h"
38 #include "hphp/util/abi-cxx.h"
39 #include "hphp/util/immed.h"
40 #include "hphp/util/low-ptr.h"
41 #include "hphp/util/ringbuffer.h"
42 #include "hphp/util/thread-local.h"
43 #include "hphp/util/trace.h"
45 namespace HPHP { namespace jit {
47 ///////////////////////////////////////////////////////////////////////////////
49 TRACE_SET_MOD(hhir);
51 namespace {
53 ///////////////////////////////////////////////////////////////////////////////
55 void assertSFNonNegative(Vout& v, Vreg sf) {
56 if (!RuntimeOption::EvalHHIRGenerateAsserts) return;
57 ifThen(v, CC_NGE, sf, [&] (Vout& v) { v << ud2{}; });
60 ///////////////////////////////////////////////////////////////////////////////
64 ///////////////////////////////////////////////////////////////////////////////
66 void emitImmStoreq(Vout& v, Immed64 imm, Vptr ref) {
67 if (imm.fits(sz::dword)) {
68 v << storeqi{imm.l(), ref};
69 } else {
70 v << store{v.cns(imm.q()), ref};
74 void emitLdLowPtr(Vout& v, Vptr mem, Vreg reg, size_t size) {
75 if (size == 8) {
76 v << load{mem, reg};
77 } else if (size == 4) {
78 v << loadzlq{mem, reg};
79 } else {
80 not_implemented();
84 void pack2(Vout& v, Vreg s0, Vreg s1, Vreg d0) {
85 auto prep = [&] (Vreg r) {
86 if (VregDbl::allowable(r)) return r;
87 auto t = v.makeReg();
88 v << copy{r, t};
89 return t;
91 // s0 and s1 must be valid VregDbl registers; prep() takes care of it.
92 v << unpcklpd{prep(s1), prep(s0), d0}; // s0,s1 -> d0[0],d0[1]
95 Vreg zeroExtendIfBool(Vout& v, Type ty, Vreg reg) {
96 if (!(ty <= TBool)) return reg;
98 // Zero-extend the bool from a byte to a quad.
99 auto extended = v.makeReg();
100 v << movzbq{reg, extended};
101 return extended;
104 ///////////////////////////////////////////////////////////////////////////////
106 void storeTV(Vout& v, Vptr dst, Vloc srcLoc, const SSATmp* src) {
107 auto const type = src->type();
109 if (srcLoc.isFullSIMD()) {
110 // The whole TV is stored in a single SIMD reg.
111 assertx(RuntimeOption::EvalHHIRAllocSIMDRegs);
112 v << storeups{srcLoc.reg(), dst};
113 return;
116 if (type.needsReg()) {
117 assertx(srcLoc.hasReg(1));
118 v << storeb{srcLoc.reg(1), dst + TVOFF(m_type)};
119 } else {
120 v << storeb{v.cns(type.toDataType()), dst + TVOFF(m_type)};
123 // We ignore the values of statically nullish types.
124 if (src->isA(TNull) || src->isA(TNullptr)) return;
126 // Store the value.
127 if (src->hasConstVal()) {
128 // Skip potential zero-extend if we know the value.
129 v << store{v.cns(src->rawVal()), dst + TVOFF(m_data)};
130 } else {
131 assertx(srcLoc.hasReg(0));
132 auto const extended = zeroExtendIfBool(v, src->type(), srcLoc.reg(0));
133 v << store{extended, dst + TVOFF(m_data)};
137 void loadTV(Vout& v, const SSATmp* dst, Vloc dstLoc, Vptr src,
138 bool aux /* = false */) {
139 auto const type = dst->type();
141 if (dstLoc.isFullSIMD()) {
142 // The whole TV is loaded into a single SIMD reg.
143 assertx(RuntimeOption::EvalHHIRAllocSIMDRegs);
144 v << loadups{src, dstLoc.reg()};
145 return;
148 if (type.needsReg()) {
149 assertx(dstLoc.hasReg(1));
150 if (aux) {
151 v << load{src + TVOFF(m_type), dstLoc.reg(1)};
152 } else {
153 v << loadb{src + TVOFF(m_type), dstLoc.reg(1)};
157 if (type <= TBool) {
158 v << loadtqb{src + TVOFF(m_data), dstLoc.reg(0)};
159 } else {
160 v << load{src + TVOFF(m_data), dstLoc.reg(0)};
164 void copyTV(Vout& v, Vreg data, Vreg type, Vloc srcLoc, const SSATmp* src) {
165 // SIMD register are not supported here.
166 assertx(!srcLoc.isFullSIMD());
168 if (src->type().needsReg()) {
169 assertx(srcLoc.hasReg(1));
170 v << copy{srcLoc.reg(1), type};
171 } else {
172 v << copy{v.cns(src->type().toDataType()), type};
175 // Ignore the values for nulls.
176 if (src->isA(TNull)) return;
178 if (src->hasConstVal()) {
179 // Skip potential zero-extend if we know the value.
180 v << copy{v.cns(src->rawVal()), data};
181 } else {
182 assertx(srcLoc.hasReg(0));
183 auto const extended = zeroExtendIfBool(v, src->type(), srcLoc.reg(0));
184 v << copy{extended, data};
188 void copyTV(Vout& v, Vloc src, Vloc dst, Type destType) {
189 auto src_arity = src.numAllocated();
190 auto dst_arity = dst.numAllocated();
192 if (dst_arity == 2) {
193 always_assert(src_arity == 2);
194 v << copy2{src.reg(0), src.reg(1), dst.reg(0), dst.reg(1)};
195 return;
197 always_assert(dst_arity == 1);
199 if (src_arity == 2 && dst.isFullSIMD()) {
200 pack2(v, src.reg(0), src.reg(1), dst.reg(0));
201 return;
203 always_assert(src_arity >= 1);
205 if (src_arity == 2 && destType <= TBool) {
206 v << movtqb{src.reg(0), dst.reg(0)};
207 } else {
208 v << copy{src.reg(0), dst.reg(0)};
212 void trashTV(Vout& v, Vreg ptr, int32_t offset, char byte) {
213 int32_t trash32;
214 memset(&trash32, byte, sizeof(trash32));
215 static_assert(sizeof(TypedValue) == 16, "");
216 v << storeli{trash32, ptr[offset + 0x0]};
217 v << storeli{trash32, ptr[offset + 0x4]};
218 v << storeli{trash32, ptr[offset + 0x8]};
219 v << storeli{trash32, ptr[offset + 0xc]};
222 void emitAssertRefCount(Vout& v, Vreg data) {
223 auto const sf = v.makeReg();
224 v << cmplim{StaticValue, data[FAST_REFCOUNT_OFFSET], sf};
226 ifThen(v, CC_NLE, sf, [&] (Vout& v) {
227 auto const sf = v.makeReg();
228 v << cmplim{RefCountMaxRealistic, data[FAST_REFCOUNT_OFFSET], sf};
230 ifThen(v, CC_NBE, sf, [&] (Vout& v) { v << ud2{}; });
234 void emitIncRef(Vout& v, Vreg base) {
235 if (RuntimeOption::EvalHHIRGenerateAsserts) {
236 emitAssertRefCount(v, base);
238 auto const sf = v.makeReg();
239 v << inclm{base[FAST_REFCOUNT_OFFSET], sf};
240 assertSFNonNegative(v, sf);
243 Vreg emitDecRef(Vout& v, Vreg base) {
244 auto const sf = v.makeReg();
245 v << declm{base[FAST_REFCOUNT_OFFSET], sf};
246 assertSFNonNegative(v, sf);
248 return sf;
251 void emitIncRefWork(Vout& v, Vreg data, Vreg type) {
252 auto const sf = v.makeReg();
253 emitCmpTVType(v, sf, KindOfRefCountThreshold, type);
254 // ifRefCountType
255 ifThen(v, CC_G, sf, [&] (Vout& v) {
256 auto const sf2 = v.makeReg();
257 // ifNonStatic
258 v << cmplim{0, data[FAST_REFCOUNT_OFFSET], sf2};
259 ifThen(v, CC_GE, sf2, [&] (Vout& v) { emitIncRef(v, data); });
263 void emitDecRefWorkObj(Vout& v, Vreg obj) {
264 auto const shouldRelease = v.makeReg();
265 v << cmplim{1, obj[FAST_REFCOUNT_OFFSET], shouldRelease};
266 ifThenElse(
267 v, CC_E, shouldRelease,
268 [&] (Vout& v) {
269 // Put fn inside vcall{} triggers a compiler internal error (gcc 4.4.7)
270 auto const fn = CallSpec::method(&ObjectData::release);
271 v << vcall{fn, v.makeVcallArgs({{obj}}), v.makeTuple({})};
273 [&] (Vout& v) {
274 emitDecRef(v, obj);
279 ///////////////////////////////////////////////////////////////////////////////
281 void emitCall(Vout& v, CallSpec target, RegSet args) {
282 using K = CallSpec::Kind;
284 switch (target.kind()) {
285 case K::Direct:
286 v << call{static_cast<TCA>(target.address()), args};
287 return;
289 case K::Smashable:
290 v << calls{static_cast<TCA>(target.address()), args};
291 return;
293 case K::ArrayVirt: {
294 auto const addr = reinterpret_cast<intptr_t>(target.arrayTable());
296 auto const arrkind = v.makeReg();
297 v << loadzbl{rarg(0)[HeaderKindOffset], arrkind};
299 if (deltaFits(addr, sz::dword)) {
300 v << callm{baseless(arrkind * 8 + addr), args};
301 } else {
302 auto const base = v.makeReg();
303 v << ldimmq{addr, base};
304 v << callm{base[arrkind * 8], args};
306 static_assert(sizeof(HeaderKind) == 1, "");
307 } return;
309 case K::Destructor: {
310 auto dtor = lookupDestructor(v, target.reg());
311 v << callm{dtor, args};
312 } return;
314 case K::Stub:
315 v << callstub{target.stubAddr(), args};
316 return;
318 not_reached();
321 Vptr lookupDestructor(Vout& v, Vreg type) {
322 auto const table = reinterpret_cast<intptr_t>(g_destructors);
324 auto const typel = v.makeReg();
325 auto const index = v.makeReg();
326 auto const indexl = v.makeReg();
328 // This movzbl is only needed because callers aren't required to zero-extend
329 // the type.
330 v << movzbl{type, typel};
331 v << shrli{kShiftDataTypeToDestrIndex, typel, indexl, v.makeReg()};
332 v << movzlq{indexl, index};
334 // The baseless form is more compact, but isn't supported for 64-bit
335 // displacements.
336 if (table <= std::numeric_limits<int>::max()) {
337 return baseless(index * 8 + safe_cast<int>(table));
339 return v.cns(table)[index * 8];
342 ///////////////////////////////////////////////////////////////////////////////
344 Vreg emitLdObjClass(Vout& v, Vreg obj, Vreg d) {
345 emitLdLowPtr(v, obj[ObjectData::getVMClassOffset()], d,
346 sizeof(LowPtr<Class>));
347 return d;
350 Vreg emitLdClsCctx(Vout& v, Vreg src, Vreg dst) {
351 static_assert(ActRec::kHasClassBit == 1,
352 "Fix the decq if you change kHasClassBit");
353 v << decq{src, dst, v.makeReg()};
354 return dst;
357 void cmpLowPtrImpl(Vout& v, Vreg sf, const void* ptr, Vptr mem, size_t size) {
358 if (size == 8) {
359 v << cmpqm{v.cns(ptr), mem, sf};
360 } else if (size == 4) {
361 auto const ptrImm = safe_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr));
362 v << cmplm{v.cns(ptrImm), mem, sf};
363 } else {
364 not_implemented();
368 void cmpLowPtrImpl(Vout& v, Vreg sf, Vreg reg, Vptr mem, size_t size) {
369 if (size == 8) {
370 v << cmpqm{reg, mem, sf};
371 } else if (size == 4) {
372 auto low = v.makeReg();
373 v << movtql{reg, low};
374 v << cmplm{low, mem, sf};
375 } else {
376 not_implemented();
380 void cmpLowPtrImpl(Vout& v, Vreg sf, Vreg reg1, Vreg reg2, size_t size) {
381 if (size == 8) {
382 v << cmpq{reg1, reg2, sf};
383 } else if (size == 4) {
384 auto const l1 = v.makeReg();
385 auto const l2 = v.makeReg();
386 v << movtql{reg1, l1};
387 v << movtql{reg2, l2};
388 v << cmpl{l1, l2, sf};
389 } else {
390 not_implemented();
394 void emitCmpVecLen(Vout& v, Vreg sf, Immed val, Vptr mem) {
395 auto const size = sizeof(Class::veclen_t);
396 if (size == 2) {
397 v << cmpwim{val, mem, sf};
398 } else if (size == 4) {
399 v << cmplim{val, mem, sf};
400 } else {
401 not_implemented();
405 ///////////////////////////////////////////////////////////////////////////////
407 void emitEagerSyncPoint(Vout& v, PC pc, Vreg rds, Vreg vmfp, Vreg vmsp) {
408 v << store{vmfp, rds[rds::kVmfpOff]};
409 v << store{vmsp, rds[rds::kVmspOff]};
410 emitImmStoreq(v, intptr_t(pc), rds[rds::kVmpcOff]);
413 void emitRB(Vout& v, Trace::RingBufferType t, const char* msg) {
414 if (!Trace::moduleEnabled(Trace::ringbuffer, 1)) return;
415 v << vcall{CallSpec::direct(Trace::ringbufferMsg),
416 v.makeVcallArgs({{v.cns(msg), v.cns(strlen(msg)), v.cns(t)}}),
417 v.makeTuple({})};
420 void emitIncStat(Vout& v, Stats::StatCounter stat, int n, bool force) {
421 if (!force && !Stats::enabled()) return;
422 intptr_t disp = uintptr_t(&Stats::tl_counters[stat]) - tlsBase();
423 v << addqim{n, Vptr{baseless(disp), Vptr::FS}, v.makeReg()};
426 ///////////////////////////////////////////////////////////////////////////////
428 Vreg checkRDSHandleInitialized(Vout& v, rds::Handle ch) {
429 assertx(rds::isNormalHandle(ch));
430 auto const gen = v.makeReg();
431 auto const sf = v.makeReg();
432 v << loadb{rvmtl()[rds::genNumberHandleFrom(ch)], gen};
433 v << cmpbm{gen, rvmtl()[rds::currentGenNumberHandle()], sf};
434 return sf;
437 void markRDSHandleInitialized(Vout& v, rds::Handle ch) {
438 assertx(rds::isNormalHandle(ch));
439 auto const gen = v.makeReg();
440 v << loadb{rvmtl()[rds::currentGenNumberHandle()], gen};
441 v << storeb{gen, rvmtl()[rds::genNumberHandleFrom(ch)]};
444 ////////////////////////////////////////////////////////////////////////////////