Add aws sqs rust sdk to fbsource/third-party
[hiphop-php.git] / hphp / runtime / vm / jit / code-gen-helpers.cpp
blob709e644a58d7739f710ab532d7ee5247c66b99dc
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/runtime/vm/jit/code-gen-helpers.h"
18 #include "hphp/runtime/vm/jit/code-gen-tls.h"
20 #include "hphp/runtime/base/countable.h"
21 #include "hphp/runtime/base/datatype.h"
22 #include "hphp/runtime/base/header-kind.h"
23 #include "hphp/runtime/base/rds-header.h"
24 #include "hphp/runtime/base/runtime-option.h"
25 #include "hphp/runtime/base/tv-mutate.h"
26 #include "hphp/runtime/base/tv-variant.h"
27 #include "hphp/runtime/vm/class.h"
29 #include "hphp/runtime/vm/jit/abi.h"
30 #include "hphp/runtime/vm/jit/call-spec.h"
31 #include "hphp/runtime/vm/jit/code-gen-cf.h"
32 #include "hphp/runtime/vm/jit/ssa-tmp.h"
33 #include "hphp/runtime/vm/jit/translator-inline.h"
34 #include "hphp/runtime/vm/jit/trans-db.h"
35 #include "hphp/runtime/vm/jit/type.h"
36 #include "hphp/runtime/vm/jit/vasm-gen.h"
37 #include "hphp/runtime/vm/jit/vasm-instr.h"
38 #include "hphp/runtime/vm/jit/vasm-reg.h"
40 #include "hphp/util/asm-x64.h"
41 #include "hphp/util/abi-cxx.h"
42 #include "hphp/util/configs/hhir.h"
43 #include "hphp/util/immed.h"
44 #include "hphp/util/low-ptr.h"
45 #include "hphp/util/ringbuffer.h"
46 #include "hphp/util/thread-local.h"
47 #include "hphp/util/trace.h"
49 namespace HPHP::jit {
51 ///////////////////////////////////////////////////////////////////////////////
53 TRACE_SET_MOD(hhir);
55 namespace {
57 ///////////////////////////////////////////////////////////////////////////////
59 void assertSFNonNegative(Vout& v, Vreg sf, Reason reason) {
60 if (!Cfg::HHIR::GenerateAsserts) return;
61 ifThen(v, CC_NGE, sf, [&] (Vout& v) { v << trap{reason, Fixup::none()}; });
64 ///////////////////////////////////////////////////////////////////////////////
68 ///////////////////////////////////////////////////////////////////////////////
70 Vreg emitMovtql(Vout& v, Vreg reg) {
71 auto it = v.unit().regToConst.find(reg);
72 if (it != v.unit().regToConst.end() && !it->second.isUndef) {
73 switch (it->second.kind) {
74 case Vconst::Double:
75 always_assert(false);
76 case Vconst::Quad:
77 return v.unit().makeConst(uint32_t(it->second.val));
78 case Vconst::Long:
79 case Vconst::Byte:
80 return reg;
83 auto const r = v.makeReg();
84 v << movtql{reg, r};
85 return r;
88 void emitImmStoreq(Vout& v, Immed64 imm, Vptr ref) {
89 if (imm.fits(sz::dword)) {
90 v << storeqi{imm.l(), ref};
91 } else {
92 v << store{v.cns(imm.q()), ref};
96 void emitLdLowPtr(Vout& v, Vptr mem, Vreg reg, size_t size) {
97 if (size == 8) {
98 v << load{mem, reg};
99 } else if (size == 4) {
100 v << loadzlq{mem, reg};
101 } else {
102 not_implemented();
106 void emitStLowPtr(Vout& v, Vreg reg, Vptr mem, size_t size) {
107 if (size == 8) {
108 v << store{reg, mem};
109 } else if (size == 4) {
110 auto const temp = emitMovtql(v, reg);
111 v << storel{temp, mem};
112 } else {
113 not_implemented();
117 void pack2(Vout& v, Vreg s0, Vreg s1, Vreg d0) {
118 auto prep = [&] (Vreg r) {
119 if (VregDbl::allowable(r)) return r;
120 auto t = v.makeReg();
121 v << copy{r, t};
122 return t;
124 // s0 and s1 must be valid VregDbl registers; prep() takes care of it.
125 v << unpcklpd{prep(s1), prep(s0), d0}; // s0,s1 -> d0[0],d0[1]
128 Vreg zeroExtendIfBool(Vout& v, Type ty, Vreg reg) {
129 if (!(ty <= TBool)) return reg;
131 // Zero-extend the bool from a byte to a quad.
132 auto extended = v.makeReg();
133 v << movzbq{reg, extended};
134 return extended;
137 ///////////////////////////////////////////////////////////////////////////////
139 Vreg materializeConstVal(Vout& v, Type ty) {
140 if (ty <= TNull) return v.cns(Vconst::Quad);
141 if (ty <= TNullptr) return v.cns(0);
142 if (!ty.hasConstVal()) return InvalidReg;
143 if (ty <= TBool) return v.cns(ty.boolVal());
144 if (ty <= TDbl) return v.cns(ty.dblVal());
145 return v.cns(ty.rawVal());
148 ///////////////////////////////////////////////////////////////////////////////
150 void storeTVVal(Vout& v, Type type, Vloc srcLoc, Vptr valPtr) {
151 // We ignore the values of statically nullish types.
152 if (type <= TNull || type <= TNullptr) return;
154 // Store the value.
155 if (type.hasConstVal()) {
156 // Skip potential zero-extend if we know the value.
157 v << store{v.cns(type.rawVal()), valPtr};
158 } else {
159 assertx(srcLoc.hasReg(0));
160 auto const extended = zeroExtendIfBool(v, type, srcLoc.reg(0));
161 v << store{extended, valPtr};
165 void storeTVType(Vout& v, Type type, Vloc srcLoc, Vptr typePtr, bool aux) {
166 if (type.needsReg()) {
167 assertx(srcLoc.hasReg(1));
168 if (aux) {
169 v << store{srcLoc.reg(1), typePtr};
170 } else {
171 v << storeb{srcLoc.reg(1), typePtr};
173 } else {
174 if (aux) {
175 v << store{v.cns(type.toDataType()), typePtr};
176 } else {
177 v << storeb{v.cns(type.toDataType()), typePtr};
182 void storeTV(Vout& v, Vptr dst, Vloc srcLoc, const SSATmp* src,
183 Type ty, bool aux) {
184 if (ty == TBottom) ty = src->type();
185 storeTV(v, ty, srcLoc, dst + TVOFF(m_type), dst + TVOFF(m_data), aux);
188 void storeTV(Vout& v, Type type, Vloc srcLoc,
189 Vptr typePtr, Vptr valPtr, bool aux) {
190 if (srcLoc.isFullSIMD()) {
191 // The whole TV is stored in a single SIMD reg.
192 assertx(Cfg::HHIR::AllocSIMDRegs);
193 always_assert(typePtr == valPtr + (TVOFF(m_type) - TVOFF(m_data)));
194 v << storeups{srcLoc.reg(), valPtr};
195 return;
197 storeTVType(v, type, srcLoc, typePtr, aux);
198 storeTVVal(v, type, srcLoc, valPtr);
201 void storeTVWithAux(Vout& v,
202 Vptr dst,
203 Vloc srcLoc,
204 const SSATmp* src,
205 AuxUnion aux) {
206 static_assert(TVOFF(m_type) == 8, "");
207 static_assert(TVOFF(m_aux) == 12, "");
208 assertx(!srcLoc.isFullSIMD());
210 auto const type = src->type();
211 auto const auxMask = auxToMask(aux);
213 if (type.needsReg()) {
214 assertx(srcLoc.hasReg(1));
216 // DataType is signed. We're using movzbq here to clear out the upper 7
217 // bytes of the register, not to actually extend the type value.
218 auto const typeReg = srcLoc.reg(1);
219 auto const extended = v.makeReg();
220 auto const result = v.makeReg();
221 v << movzbq{typeReg, extended};
222 v << orq{extended, v.cns(auxMask), result, v.makeReg()};
223 v << store{result, dst + TVOFF(m_type)};
224 } else {
225 auto const dt = static_cast<std::make_unsigned<data_type_t>::type>(
226 type.toDataType()
228 static_assert(std::numeric_limits<decltype(dt)>::digits <= 32, "");
229 v << store{v.cns(dt | auxMask), dst + TVOFF(m_type)};
232 storeTVVal(v, type, srcLoc, dst + TVOFF(m_data));
235 void loadTV(Vout& v, const SSATmp* dst, Vloc dstLoc, Vptr src,
236 bool aux /* = false */) {
237 loadTV(v, dst->type(), dstLoc, src + TVOFF(m_type), src + TVOFF(m_data), aux);
240 void loadTV(Vout& v, Type type, Vloc dstLoc, Vptr typePtr, Vptr valPtr,
241 bool aux) {
242 if (dstLoc.isFullSIMD()) {
243 // The whole TV is loaded into a single SIMD reg.
244 assertx(Cfg::HHIR::AllocSIMDRegs);
245 always_assert(typePtr == valPtr + (TVOFF(m_type) - TVOFF(m_data)));
246 v << loadups{valPtr, dstLoc.reg()};
247 return;
250 if (type.needsReg() || aux) {
251 assertx(dstLoc.hasReg(1));
252 if (aux) {
253 v << load{typePtr, dstLoc.reg(1)};
254 } else {
255 v << loadb{typePtr, dstLoc.reg(1)};
259 if (type <= TBool) {
260 v << loadtqb{valPtr, dstLoc.reg(0)};
261 } else {
262 v << load{valPtr, dstLoc.reg(0)};
266 void copyTV(Vout& v, Vreg data, Vreg type, Vloc srcLoc, const SSATmp* src) {
267 // SIMD register are not supported here.
268 assertx(!srcLoc.isFullSIMD());
270 if (src->type().needsReg()) {
271 assertx(srcLoc.hasReg(1));
272 v << copy{srcLoc.reg(1), type};
273 } else {
274 v << copy{v.cns(src->type().toDataType()), type};
277 // Ignore the values for nulls.
278 if (src->isA(TNull)) return;
280 if (src->hasConstVal()) {
281 // Skip potential zero-extend if we know the value.
282 v << copy{v.cns(src->rawVal()), data};
283 } else {
284 assertx(srcLoc.hasReg(0));
285 auto const extended = zeroExtendIfBool(v, src->type(), srcLoc.reg(0));
286 v << copy{extended, data};
290 void copyTV(Vout& v, Vloc src, Vloc dst, Type destType) {
291 auto src_arity = src.numAllocated();
292 auto dst_arity = dst.numAllocated();
294 if (dst_arity == 2) {
295 always_assert(src_arity == 2);
296 v << copyargs{
297 v.makeTuple({src.reg(0), src.reg(1)}),
298 v.makeTuple({dst.reg(0), dst.reg(1)})
300 return;
302 always_assert(dst_arity == 1);
304 if (src_arity == 2 && dst.isFullSIMD()) {
305 pack2(v, src.reg(0), src.reg(1), dst.reg(0));
306 return;
308 always_assert(src_arity >= 1);
310 if (src_arity == 2 && destType <= TBool) {
311 v << movtqb{src.reg(0), dst.reg(0)};
312 } else {
313 v << copy{src.reg(0), dst.reg(0)};
317 void trashFullTV(Vout& v, Vptr ptr, char byte) {
318 int32_t trash32;
319 memset(&trash32, byte, sizeof(trash32));
320 static_assert(sizeof(TypedValue) % sizeof(trash32) == 0, "");
322 for (int offset = 0; offset < sizeof(TypedValue);
323 offset += sizeof(trash32)) {
324 v << storeli{trash32, ptr + offset};
328 void trashTV(Vout& v, Vptr typePtr, Vptr valPtr, char byte) {
329 int32_t trash32;
330 memset(&trash32, byte, sizeof(trash32));
331 static_assert(sizeof(Value) == 8, "");
332 v << storeli{trash32, valPtr};
333 v << storeli{trash32, valPtr + 4};
335 static_assert(sizeof(DataType) == 1, "");
336 v << storebi{byte, typePtr};
339 void emitAssertRefCount(Vout& v, Vreg data, Reason reason) {
340 auto const sf = emitCmpRefCount(v, StaticValue, data);
342 ifThen(v, CC_NLE, sf, [&] (Vout& v) {
343 auto const sf = emitCmpRefCount(v, RefCountMaxRealistic, data);
344 ifThen(v, CC_NBE, sf, [&] (Vout& v) { v << trap{reason, Fixup::none()}; });
348 void emitIncRef(Vout& v, Vreg base, Reason reason) {
349 if (Cfg::HHIR::GenerateAsserts) {
350 emitAssertRefCount(v, base, reason);
353 auto const sf = v.makeReg();
354 v << inclm{base[FAST_REFCOUNT_OFFSET], sf};
355 assertSFNonNegative(v, sf, reason);
358 Vreg emitDecRef(Vout& v, Vreg base, Reason reason) {
359 auto const sf = emitDecRefCount(v, base);
360 assertSFNonNegative(v, sf, reason);
361 return sf;
364 void emitIncRefWork(Vout& v, Vreg data, Vreg type, Reason reason) {
365 auto const sf = v.makeReg();
366 auto const cc = emitIsTVTypeRefCounted(v, sf, type);
367 // ifRefCountedType
368 ifThen(v, cc, sf, [&] (Vout& v) {
369 // One-bit mode: do the IncRef if m_count == OneReference (0). Normal mode:
370 // do the IncRef if m_count >= 0.
371 auto const sf2 = emitCmpRefCount(v, 0, data);
372 auto const cc = CC_GE;
373 ifThen(v, cc, sf2, [&] (Vout& v) { emitIncRef(v, data, reason); });
377 void emitIncRefWork(Vout& v, Vloc loc, Type type, Reason reason) {
378 // If definitely not ref-counted, nothing to do
379 if (!type.maybe(TCounted)) return;
381 if (type <= TCounted) {
382 // Definitely ref-counted
383 emitIncRef(v, loc.reg(), reason);
384 return;
387 // It might be ref-counted, we need to check at runtime.
389 if (loc.hasReg(1)) {
390 // We don't know the type, so check it at runtime.
391 emitIncRefWork(v, loc.reg(0), loc.reg(1), reason);
392 return;
395 // We do know the type, but it might be persistent or counted. Check the
396 // ref-count.
397 auto const sf = emitCmpRefCount(v, 0, loc.reg());
398 auto const cc = CC_GE;
399 ifThen(v, cc, sf, [&] (Vout& v) { emitIncRef(v, loc.reg(), reason); });
402 void emitDecRefWorkObj(Vout& v, Vreg obj, Reason reason) {
403 auto const shouldRelease = emitCmpRefCount(v, OneReference, obj);
404 ifThenElse(
405 v, CC_E, shouldRelease,
406 [&] (Vout& v) {
407 // Put fn inside vcall{} triggers a compiler internal error (gcc 4.4.7)
408 auto const cls = emitLdObjClass(v, obj, v.makeReg());
409 auto const fn = CallSpec::objDestruct(cls);
410 v << vcall{fn, v.makeVcallArgs({{obj, cls}}), v.makeTuple({}),
411 Fixup::none()};
413 [&] (Vout& v) {
414 emitDecRef(v, obj, reason);
419 ///////////////////////////////////////////////////////////////////////////////
421 void emitCall(Vout& v, CallSpec target, RegSet args) {
422 using K = CallSpec::Kind;
424 switch (target.kind()) {
425 case K::Direct:
426 v << call{static_cast<TCA>(target.address()), args};
427 return;
429 case K::Smashable:
430 v << calls{static_cast<TCA>(target.address()), args};
431 return;
433 case K::Destructor: {
434 auto dtor = lookupDestructor(v, target.reg());
435 v << callm{dtor, args};
436 } return;
438 case K::ObjDestructor: {
439 auto const func = v.makeReg();
440 emitLdLowPtr(
442 target.reg()[Class::releaseFuncOff()],
443 func,
444 sizeof(ObjReleaseFunc)
446 v << callr{func, args};
447 } return;
449 case K::Stub:
450 v << callstub{target.stubAddr(), args};
451 return;
453 not_reached();
456 Vptr lookupDestructor(Vout& v, Vreg type, bool typeIsQuad) {
457 auto const elem_sz = static_cast<int>(sizeof(g_destructors[0]) / 2);
458 auto const table = reinterpret_cast<intptr_t>(&g_destructors[0]) -
459 kMinRefCountedDataType * elem_sz;
461 auto const index = [&] {
462 if (typeIsQuad) return type;
463 auto const r = v.makeReg();
464 v << movsbq{type, r};
465 return r;
466 }();
468 // The baseless form is more compact, but isn't supported for 64-bit
469 // displacements.
470 if (table <= std::numeric_limits<int>::max()) {
471 return baseless(index * elem_sz + safe_cast<int>(table));
473 return v.cns(table)[index * elem_sz];
476 ///////////////////////////////////////////////////////////////////////////////
478 Vreg emitLdObjClass(Vout& v, Vreg obj, Vreg d) {
479 emitLdLowPtr(v, obj[ObjectData::getVMClassOffset()], d,
480 sizeof(LowPtr<Class>));
481 return d;
484 void cmpLowPtrImpl(Vout& v, Vreg sf, const void* ptr, Vptr mem, size_t size) {
485 if (size == 8) {
486 v << cmpqm{v.cns(ptr), mem, sf};
487 } else if (size == 4) {
488 auto const ptrImm = safe_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr));
489 v << cmplm{v.cns(ptrImm), mem, sf};
490 } else {
491 not_implemented();
495 void cmpLowPtrImpl(Vout& v, Vreg sf, const void* ptr, Vreg reg, size_t size) {
496 if (size == 8) {
497 v << cmpq{v.cns(ptr), reg, sf};
498 } else if (size == 4) {
499 auto const ptrImm = safe_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr));
500 v << cmpl{v.cns(ptrImm), reg, sf};
501 } else {
502 not_implemented();
506 void cmpLowPtrImpl(Vout& v, Vreg sf, Vreg reg, Vptr mem, size_t size) {
507 if (size == 8) {
508 v << cmpqm{reg, mem, sf};
509 } else if (size == 4) {
510 auto low = emitMovtql(v, reg);
511 v << cmplm{low, mem, sf};
512 } else {
513 not_implemented();
517 void cmpLowPtrImpl(Vout& v, Vreg sf, Vreg reg1, Vreg reg2, size_t size) {
518 if (size == 8) {
519 v << cmpq{reg1, reg2, sf};
520 } else if (size == 4) {
521 auto const l1 = emitMovtql(v, reg1);
522 auto const l2 = emitMovtql(v, reg2);
523 v << cmpl{l1, l2, sf};
524 } else {
525 not_implemented();
530 * Generate range check for isCollection:
531 * set CC_BE if obj->m_kind - HeaderKind::Vector <= HeaderKind::ImmSet
533 Vreg emitIsCollection(Vout& v, Vreg obj) {
534 auto const sf = v.makeReg();
535 auto const mincol = static_cast<int>(HeaderKind::Vector);
536 auto const maxcol = static_cast<int>(HeaderKind::ImmSet);
537 auto const kind = v.makeReg();
538 auto const col_kind = v.makeReg();
539 v << loadzbl{obj[HeaderKindOffset], kind};
540 v << subli{mincol, kind, col_kind, v.makeReg()};
541 v << cmpli{maxcol - mincol, col_kind, sf};
542 return sf;
545 ///////////////////////////////////////////////////////////////////////////////
547 void emitSetVMRegState(Vout& v, VMRegState state) {
548 auto const regstate = rvmtl()[rds::kVmRegStateOff];
549 v << storeqi{static_cast<int32_t>(state), regstate};
552 void emitRB(Vout& v, Trace::RingBufferType t, const char* msg) {
553 if (!Trace::moduleEnabled(Trace::ringbuffer, 1)) return;
554 v << vcall{CallSpec::direct(Trace::ringbufferMsg),
555 v.makeVcallArgs({{v.cns(msg), v.cns(strlen(msg)), v.cns(t)}}),
556 v.makeTuple({}),
557 Fixup::none()};
560 void emitIncStat(Vout& v, Stats::StatCounter stat) {
561 if (!Stats::enabled()) return;
562 auto rdslocalBase = v.makeReg();
563 auto datum = tls_datum(rds::local::detail::rl_hotSection.rdslocal_base);
564 auto offset = Stats::rl_counters.getRawOffset() +
565 offsetof(Stats::StatCounters, counters) +
566 sizeof(decltype(stat))*stat;
567 v << load{emitTLSAddr(v, datum), rdslocalBase};
568 v << incqm{rdslocalBase[offset], v.makeReg()};
571 ///////////////////////////////////////////////////////////////////////////////
573 static Vptr getRDSHandleGenNumberAddr(rds::Handle handle) {
574 return rvmtl()[rds::genNumberHandleFrom(handle)];
577 static Vptr getRDSHandleGenNumberAddr(Vreg handle) {
578 return handle[DispReg(rvmtl(), -sizeof(rds::GenNumber))];
581 template<typename HandleT>
582 Vreg doCheckRDSHandleInitialized(Vout& v, HandleT ch) {
583 markRDSAccess(v, ch);
584 auto const gen = v.makeReg();
585 auto const sf = v.makeReg();
586 v << loadb{getRDSHandleGenNumberAddr(ch), gen};
587 v << cmpbm{gen, rvmtl()[rds::currentGenNumberHandle()], sf};
588 return sf;
591 Vreg checkRDSHandleInitialized(Vout& v, rds::Handle ch) {
592 assertx(rds::isNormalHandle(ch));
593 return doCheckRDSHandleInitialized(v, ch);
595 Vreg checkRDSHandleInitialized(Vout& v, Vreg ch) {
596 return doCheckRDSHandleInitialized(v, ch);
599 template<typename HandleT>
600 void doMarkRDSHandleInitialized(Vout& v, HandleT ch) {
601 markRDSAccess(v, ch);
602 auto const gen = v.makeReg();
603 v << loadb{rvmtl()[rds::currentGenNumberHandle()], gen};
604 v << storeb{gen, getRDSHandleGenNumberAddr(ch)};
607 void markRDSHandleInitialized(Vout& v, rds::Handle ch) {
608 assertx(rds::isNormalHandle(ch));
609 doMarkRDSHandleInitialized(v, ch);
612 void markRDSHandleInitialized(Vout& v, Vreg ch) {
613 doMarkRDSHandleInitialized(v, ch);
616 void markRDSAccess(Vout& v, rds::Handle ch) {
617 if (!rds::shouldProfileAccesses()) return;
618 auto const& vunit = v.unit();
619 if (vunit.context && !isProfiling(vunit.context->kind)) return;
620 auto const profile = rds::profileForHandle(ch);
621 if (profile == rds::kUninitHandle) return;
622 v << incqm{rvmtl()[profile], v.makeReg()};
625 void markRDSAccess(Vout& v, Vreg ch) {
626 if (!rds::shouldProfileAccesses()) return;
627 auto const& vunit = v.unit();
628 if (vunit.context && !isProfiling(vunit.context->kind)) return;
629 v << vcall{
630 CallSpec::direct(rds::markAccess),
631 v.makeVcallArgs({{ch}}),
632 v.makeTuple({}),
633 Fixup::none()
637 ////////////////////////////////////////////////////////////////////////////////
639 int offsetToLocalType(int id) {
640 return TVOFF(m_type) - cellsToBytes(id + 1);
642 int offsetToLocalData(int id) {
643 return TVOFF(m_data) - cellsToBytes(id + 1);
646 Vptr ptrToLocalType(Vreg fp, int id) {
647 return fp[offsetToLocalType(id)];
649 Vptr ptrToLocalData(Vreg fp, int id) {
650 return fp[offsetToLocalData(id)];
653 void nextLocal(Vout& v,
654 Vreg typeIn,
655 Vreg dataIn,
656 Vreg typeOut,
657 Vreg dataOut,
658 unsigned distance) {
659 v << subqi{(int32_t)(sizeof(TypedValue) * distance), typeIn, typeOut,
660 v.makeReg()};
661 v << subqi{(int32_t)(sizeof(TypedValue) * distance), dataIn, dataOut,
662 v.makeReg()};
665 void prevLocal(Vout& v,
666 Vreg typeIn,
667 Vreg dataIn,
668 Vreg typeOut,
669 Vreg dataOut) {
670 v << addqi{(int32_t)sizeof(TypedValue), typeIn, typeOut, v.makeReg()};
671 v << addqi{(int32_t)sizeof(TypedValue), dataIn, dataOut, v.makeReg()};
674 ////////////////////////////////////////////////////////////////////////////////
676 uint64_t auxToMask(AuxUnion aux) {
677 if (!aux.u_raw) return 0;
678 if (aux.u_raw == static_cast<uint32_t>(-1)) {
679 return static_cast<uint64_t>(-1) <<
680 std::numeric_limits<
681 std::make_unsigned<data_type_t>::type
682 >::digits;
684 return uint64_t{aux.u_raw} << 32;
687 ////////////////////////////////////////////////////////////////////////////////