2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2014 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/runtime/vm/jit/code-gen-arm.h"
20 #include "folly/Optional.h"
22 #include "hphp/runtime/ext/ext_collections.h"
23 #include "hphp/runtime/ext/ext_generator.h"
25 #include "hphp/runtime/vm/jit/abi-arm.h"
26 #include "hphp/runtime/vm/jit/arg-group.h"
27 #include "hphp/runtime/vm/jit/code-gen-helpers-arm.h"
28 #include "hphp/runtime/vm/jit/back-end-arm.h"
29 #include "hphp/runtime/vm/jit/native-calls.h"
30 #include "hphp/runtime/vm/jit/reg-algorithms.h"
31 #include "hphp/runtime/vm/jit/service-requests-arm.h"
32 #include "hphp/runtime/vm/jit/service-requests-inline.h"
33 #include "hphp/runtime/vm/jit/translator-inline.h"
35 namespace HPHP
{ namespace jit
{ namespace arm
{
41 //////////////////////////////////////////////////////////////////////
44 explicit RegSaver(RegSet regs
)
45 : m_gprs(CPURegister::kRegister
, kXRegSize
, 0)
46 , m_simds(CPURegister::kFPRegister
, kDRegSize
, 0)
47 , m_maybeOddGPR(folly::none
)
48 , m_maybeOddSIMD(folly::none
) {
49 regs
.forEach([&] (PhysReg r
) {
57 // The vixl helper requires you to pass it an even number of registers. If
58 // we have an odd number of regs to save, remove one from the list we pass,
59 // and save it ourselves.
60 if (m_gprs
.Count() % 2 == 1) {
61 m_maybeOddGPR
= m_gprs
.PopHighestIndex();
63 if (m_simds
.Count() % 2 == 1) {
64 m_maybeOddSIMD
= m_simds
.PopHighestIndex();
68 void emitPushes(MacroAssembler
& as
) {
69 assert(m_gprs
.Count() % 2 == 0);
70 assert(m_simds
.Count() % 2 == 0);
71 as
. PushCPURegList(m_gprs
);
72 as
. PushCPURegList(m_simds
);
74 if (m_maybeOddGPR
.hasValue()) {
75 // We're only storing a single reg, but the stack pointer must always be
76 // 16-byte aligned. This instruction subtracts 16 from the stack pointer,
77 // then writes the value.
78 as
. Str (m_maybeOddGPR
.value(), MemOperand(sp
, -16, PreIndex
));
80 if (m_maybeOddSIMD
.hasValue()) {
81 as
. Str (m_maybeOddSIMD
.value(), MemOperand(sp
, -16, PreIndex
));
85 void emitPops(MacroAssembler
& as
) {
86 assert(m_gprs
.Count() % 2 == 0);
87 assert(m_simds
.Count() % 2 == 0);
89 if (m_maybeOddSIMD
.hasValue()) {
90 // Read the value, then add 16 to the stack pointer.
91 as
. Ldr (m_maybeOddSIMD
.value(), MemOperand(sp
, 16, PostIndex
));
93 if (m_maybeOddGPR
.hasValue()) {
94 // Read the value, then add 16 to the stack pointer.
95 as
. Ldr (m_maybeOddGPR
.value(), MemOperand(sp
, 16, PostIndex
));
97 as
. PopCPURegList(m_simds
);
98 as
. PopCPURegList(m_gprs
);
104 folly::Optional
<CPURegister
> m_maybeOddGPR
;
105 folly::Optional
<CPURegister
> m_maybeOddSIMD
;
108 //////////////////////////////////////////////////////////////////////
110 #define NOOP_OPCODE(name) void CodeGenerator::cg##name(IRInstruction*) {}
112 NOOP_OPCODE(DefConst
)
115 NOOP_OPCODE(TrackLoc
)
116 NOOP_OPCODE(AssertLoc
)
117 NOOP_OPCODE(AssertStk
)
119 NOOP_OPCODE(DefLabel
)
120 NOOP_OPCODE(ExceptionBarrier
)
121 NOOP_OPCODE(TakeStack
)
123 NOOP_OPCODE(EndGuards
)
126 NOOP_OPCODE(DbgAssertPtr
);
128 // When implemented this shouldn't be a nop, but there's no reason to make us
129 // punt on everything until then.
130 NOOP_OPCODE(DbgAssertRetAddr
)
134 //////////////////////////////////////////////////////////////////////
136 #define CALL_OPCODE(name) \
137 void CodeGenerator::cg##name(IRInstruction* i) { cgCallNative(m_as, i); }
140 CALL_OPCODE(ConvIntToStr
)
142 CALL_OPCODE(AllocObj
)
144 CALL_OPCODE(ConcatStrStr
)
145 CALL_OPCODE(ConcatIntStr
)
146 CALL_OPCODE(ConcatStrInt
)
147 CALL_OPCODE(ConcatStr3
);
148 CALL_OPCODE(ConcatStr4
);
150 CALL_OPCODE(PrintStr
)
151 CALL_OPCODE(PrintInt
)
152 CALL_OPCODE(PrintBool
)
154 CALL_OPCODE(AddElemStrKey
)
156 CALL_OPCODE(ConvBoolToArr
)
157 CALL_OPCODE(ConvDblToArr
)
158 CALL_OPCODE(ConvIntToArr
)
159 CALL_OPCODE(ConvObjToArr
)
160 CALL_OPCODE(ConvStrToArr
)
161 CALL_OPCODE(ConvCellToArr
)
163 CALL_OPCODE(ConvStrToBool
)
164 CALL_OPCODE(ConvCellToBool
)
165 CALL_OPCODE(ConvArrToDbl
)
166 CALL_OPCODE(ConvObjToDbl
)
167 CALL_OPCODE(ConvStrToDbl
)
168 CALL_OPCODE(ConvCellToDbl
)
170 CALL_OPCODE(ConvObjToInt
)
171 CALL_OPCODE(ConvArrToInt
)
172 CALL_OPCODE(ConvStrToInt
)
174 CALL_OPCODE(RaiseWarning
)
175 CALL_OPCODE(RaiseError
)
176 CALL_OPCODE(ConvCellToObj
)
177 CALL_OPCODE(LookupClsMethod
)
178 CALL_OPCODE(RaiseNotice
)
179 CALL_OPCODE(LookupClsRDSHandle
)
180 CALL_OPCODE(LdSwitchStrIndex
)
181 CALL_OPCODE(LdSwitchDblIndex
)
182 CALL_OPCODE(LdSwitchObjIndex
)
183 CALL_OPCODE(CustomInstanceInit
)
184 CALL_OPCODE(LdClsCtor
)
186 CALL_OPCODE(LdArrFuncCtx
)
187 CALL_OPCODE(LdArrFPushCuf
)
188 CALL_OPCODE(LdStrFPushCuf
)
189 CALL_OPCODE(NewArray
)
190 CALL_OPCODE(NewMixedArray
)
191 CALL_OPCODE(NewVArray
)
192 CALL_OPCODE(NewMIArray
)
193 CALL_OPCODE(NewMSArray
)
194 CALL_OPCODE(NewLikeArray
)
195 CALL_OPCODE(NewPackedArray
)
198 CALL_OPCODE(ClosureStaticLocInit
)
199 CALL_OPCODE(VerifyParamCallable
)
200 CALL_OPCODE(VerifyParamFail
)
201 CALL_OPCODE(WarnNonObjProp
)
202 CALL_OPCODE(ThrowNonObjProp
)
203 CALL_OPCODE(RaiseUndefProp
)
204 CALL_OPCODE(AddNewElem
)
205 CALL_OPCODE(ColAddElemC
)
206 CALL_OPCODE(ColAddNewElemC
)
207 CALL_OPCODE(ArrayAdd
)
208 CALL_OPCODE(CreateCont
)
209 CALL_OPCODE(CreateAFWH
)
210 CALL_OPCODE(CreateSSWH
)
211 CALL_OPCODE(AFWHPrepareChild
)
212 CALL_OPCODE(ABCUnblock
)
213 CALL_OPCODE(TypeProfileFunc
)
214 CALL_OPCODE(IncStatGrouped
)
215 CALL_OPCODE(ZeroErrorLevel
)
216 CALL_OPCODE(RestoreErrorLevel
)
218 CALL_OPCODE(CountArray
)
220 /////////////////////////////////////////////////////////////////////
221 void cgPunt(const char* file
, int line
, const char* func
, uint32_t bcOff
,
222 const Func
* vmFunc
, bool resumed
, TransID profTransId
) {
223 FTRACE(1, "punting: {}\n", func
);
224 throw FailedCodeGen(file
, line
, func
, bcOff
, vmFunc
, resumed
, profTransId
);
227 #define PUNT_OPCODE(name) \
228 void CodeGenerator::cg##name(IRInstruction* inst) { \
229 cgPunt(__FILE__, __LINE__, #name, m_curInst->marker().bcOff(), \
230 curFunc(), resumed(), m_curInst->marker().profTransId()); \
233 #define CG_PUNT(instr) \
234 cgPunt(__FILE__, __LINE__, #instr, m_curInst->marker().bcOff(), \
235 curFunc(), resumed(), m_curInst->marker().profTransId())
237 /////////////////////////////////////////////////////////////////////
238 //TODO t3702757: Convert to CALL_OPCODE, the following set works on
239 // x86 but needs a closer look on arm
240 PUNT_OPCODE(AddElemIntKey
)
241 PUNT_OPCODE(ConvCellToInt
)
242 PUNT_OPCODE(ArrayIdx
)
243 PUNT_OPCODE(RaiseArrayIndexNotice
)
244 PUNT_OPCODE(RaiseUninitLoc
)
245 PUNT_OPCODE(VerifyRetCallable
)
246 PUNT_OPCODE(VerifyRetFail
)
247 PUNT_OPCODE(GenericIdx
)
248 // End of failing set
249 /////////////////////////////////////////////////////////////////////
251 PUNT_OPCODE(ProfileStr
)
252 PUNT_OPCODE(ConvArrToBool
)
253 PUNT_OPCODE(ConvDblToBool
)
254 PUNT_OPCODE(ConvIntToBool
)
255 PUNT_OPCODE(ConvObjToBool
)
256 PUNT_OPCODE(ConvBoolToDbl
)
257 PUNT_OPCODE(ConvIntToDbl
)
259 PUNT_OPCODE(ConvBoolToInt
)
260 PUNT_OPCODE(ConvDblToInt
)
262 PUNT_OPCODE(ConvBoolToStr
)
263 PUNT_OPCODE(ConvDblToStr
)
264 PUNT_OPCODE(ConvObjToStr
)
265 PUNT_OPCODE(ConvResToStr
)
266 PUNT_OPCODE(ConvCellToStr
)
268 PUNT_OPCODE(ProfileArray
)
269 PUNT_OPCODE(CheckTypeMem
)
270 PUNT_OPCODE(CheckLoc
)
272 PUNT_OPCODE(CastStkIntToDbl
)
273 PUNT_OPCODE(CoerceStk
)
274 PUNT_OPCODE(CheckDefinedClsEq
)
275 PUNT_OPCODE(TryEndCatch
)
276 PUNT_OPCODE(LdUnwinderValue
)
277 PUNT_OPCODE(DeleteUnwinderException
)
286 PUNT_OPCODE(ExtendsClass
)
287 PUNT_OPCODE(IsWaitHandle
)
288 PUNT_OPCODE(InstanceOf
)
289 PUNT_OPCODE(InstanceOfIface
)
290 PUNT_OPCODE(InterfaceSupportsArr
)
291 PUNT_OPCODE(InterfaceSupportsStr
)
292 PUNT_OPCODE(InterfaceSupportsInt
)
293 PUNT_OPCODE(InterfaceSupportsDbl
)
294 PUNT_OPCODE(IsTypeMem
)
295 PUNT_OPCODE(IsNTypeMem
)
318 PUNT_OPCODE(InstanceOfBitmask
)
319 PUNT_OPCODE(NInstanceOfBitmask
)
321 PUNT_OPCODE(IsScalarType
)
329 PUNT_OPCODE(JmpGtInt
)
330 PUNT_OPCODE(JmpGteInt
)
331 PUNT_OPCODE(JmpLtInt
)
332 PUNT_OPCODE(JmpLteInt
)
333 PUNT_OPCODE(JmpEqInt
)
334 PUNT_OPCODE(JmpNeqInt
)
336 PUNT_OPCODE(JmpNSame
)
337 PUNT_OPCODE(JmpInstanceOfBitmask
)
338 PUNT_OPCODE(JmpNInstanceOfBitmask
)
340 PUNT_OPCODE(JmpNZero
)
341 PUNT_OPCODE(ReqBindJmpGt
)
342 PUNT_OPCODE(ReqBindJmpGte
)
343 PUNT_OPCODE(ReqBindJmpLt
)
344 PUNT_OPCODE(ReqBindJmpLte
)
345 PUNT_OPCODE(ReqBindJmpEq
)
346 PUNT_OPCODE(ReqBindJmpNeq
)
347 PUNT_OPCODE(ReqBindJmpGtInt
)
348 PUNT_OPCODE(ReqBindJmpGteInt
)
349 PUNT_OPCODE(ReqBindJmpLtInt
)
350 PUNT_OPCODE(ReqBindJmpLteInt
)
351 PUNT_OPCODE(ReqBindJmpEqInt
)
352 PUNT_OPCODE(ReqBindJmpNeqInt
)
353 PUNT_OPCODE(ReqBindJmpSame
)
354 PUNT_OPCODE(ReqBindJmpNSame
)
355 PUNT_OPCODE(ReqBindJmpInstanceOfBitmask
)
356 PUNT_OPCODE(ReqBindJmpNInstanceOfBitmask
)
357 PUNT_OPCODE(ReqBindJmpZero
)
358 PUNT_OPCODE(ReqBindJmpNZero
)
359 PUNT_OPCODE(SideExitJmpGt
)
360 PUNT_OPCODE(SideExitJmpGte
)
361 PUNT_OPCODE(SideExitJmpLt
)
362 PUNT_OPCODE(SideExitJmpLte
)
363 PUNT_OPCODE(SideExitJmpEq
)
364 PUNT_OPCODE(SideExitJmpNeq
)
365 PUNT_OPCODE(SideExitJmpGtInt
)
366 PUNT_OPCODE(SideExitJmpGteInt
)
367 PUNT_OPCODE(SideExitJmpLtInt
)
368 PUNT_OPCODE(SideExitJmpLteInt
)
369 PUNT_OPCODE(SideExitJmpEqInt
)
370 PUNT_OPCODE(SideExitJmpNeqInt
)
371 PUNT_OPCODE(SideExitJmpSame
)
372 PUNT_OPCODE(SideExitJmpNSame
)
373 PUNT_OPCODE(SideExitJmpInstanceOfBitmask
)
374 PUNT_OPCODE(SideExitJmpNInstanceOfBitmask
)
375 PUNT_OPCODE(SideExitJmpZero
)
376 PUNT_OPCODE(SideExitJmpNZero
)
377 PUNT_OPCODE(SideExitGuardLoc
)
378 PUNT_OPCODE(JmpIndirect
)
379 PUNT_OPCODE(CheckSurpriseFlags
)
380 PUNT_OPCODE(SurpriseHook
)
381 PUNT_OPCODE(FunctionSuspendHook
)
382 PUNT_OPCODE(FunctionReturnHook
)
383 PUNT_OPCODE(ReleaseVVOrExit
)
384 PUNT_OPCODE(CheckInit
)
385 PUNT_OPCODE(CheckInitMem
)
386 PUNT_OPCODE(CheckCold
)
387 PUNT_OPCODE(CheckNullptr
)
388 PUNT_OPCODE(CheckBounds
)
389 PUNT_OPCODE(LdVectorSize
)
390 PUNT_OPCODE(CheckPackedArrayBounds
)
391 PUNT_OPCODE(CheckPackedArrayElemNull
)
392 PUNT_OPCODE(VectorHasImmCopy
)
393 PUNT_OPCODE(VectorDoCow
)
394 PUNT_OPCODE(CheckNonNull
)
395 PUNT_OPCODE(AssertNonNull
)
396 PUNT_OPCODE(UnboxPtr
)
398 PUNT_OPCODE(LdVectorBase
)
399 PUNT_OPCODE(LdPairBase
)
400 PUNT_OPCODE(LdLocAddr
)
404 PUNT_OPCODE(LdPackedArrayElem
)
408 PUNT_OPCODE(LdRetAddr
)
409 PUNT_OPCODE(ConvClsToCctx
)
413 PUNT_OPCODE(LdClsCached
)
414 PUNT_OPCODE(LdClsCachedSafe
)
415 PUNT_OPCODE(LdClsCtx
)
416 PUNT_OPCODE(LdClsCctx
)
417 PUNT_OPCODE(LdClsCns
)
418 PUNT_OPCODE(LookupClsCns
)
420 PUNT_OPCODE(LdClsInitData
)
421 PUNT_OPCODE(LookupCns
)
422 PUNT_OPCODE(LookupCnsE
)
423 PUNT_OPCODE(LookupCnsU
)
424 PUNT_OPCODE(DerefClsRDSHandle
)
425 PUNT_OPCODE(LookupClsMethodCache
)
426 PUNT_OPCODE(LdClsMethodCacheFunc
)
427 PUNT_OPCODE(LdClsMethodCacheCls
)
428 PUNT_OPCODE(LdClsMethodFCacheFunc
)
429 PUNT_OPCODE(LookupClsMethodFCache
)
430 PUNT_OPCODE(GetCtxFwdCallDyn
);
431 PUNT_OPCODE(GetCtxFwdCall
)
432 PUNT_OPCODE(LdClsMethod
)
433 PUNT_OPCODE(LdPropAddr
)
434 PUNT_OPCODE(LdClsPropAddrKnown
)
435 PUNT_OPCODE(LdClsPropAddrOrNull
)
436 PUNT_OPCODE(LdClsPropAddrOrRaise
)
437 PUNT_OPCODE(LdObjMethod
)
438 PUNT_OPCODE(LdObjInvoke
)
439 PUNT_OPCODE(LdGblAddrDef
)
440 PUNT_OPCODE(LdGblAddr
)
441 PUNT_OPCODE(LdObjClass
)
443 PUNT_OPCODE(LdFuncCachedU
)
444 PUNT_OPCODE(LdFuncCachedSafe
)
445 PUNT_OPCODE(LdBindAddr
)
446 PUNT_OPCODE(LdSSwitchDestFast
)
447 PUNT_OPCODE(LdSSwitchDestSlow
)
448 PUNT_OPCODE(JmpSwitchDest
)
449 PUNT_OPCODE(ConstructInstance
)
450 PUNT_OPCODE(CheckInitProps
)
451 PUNT_OPCODE(InitProps
)
452 PUNT_OPCODE(CheckInitSProps
)
453 PUNT_OPCODE(InitSProps
)
454 PUNT_OPCODE(RegisterLiveObj
)
455 PUNT_OPCODE(NewInstanceRaw
)
456 PUNT_OPCODE(InitObjProps
)
457 PUNT_OPCODE(StClosureFunc
)
458 PUNT_OPCODE(StClosureArg
)
459 PUNT_OPCODE(StClosureCtx
)
460 PUNT_OPCODE(NewStructArray
)
461 PUNT_OPCODE(FreeActRec
)
462 PUNT_OPCODE(CallArray
)
463 PUNT_OPCODE(NativeImpl
)
465 PUNT_OPCODE(StRetVal
)
466 PUNT_OPCODE(RetAdjustStack
)
473 PUNT_OPCODE(LdStaticLocCached
)
474 PUNT_OPCODE(CheckStaticLocInit
)
475 PUNT_OPCODE(StaticLocInitCached
)
476 PUNT_OPCODE(CufIterSpillFrame
)
477 PUNT_OPCODE(ReqRetranslateOpt
)
479 PUNT_OPCODE(LdMIStateAddr
)
480 PUNT_OPCODE(IncRefCtx
)
481 PUNT_OPCODE(DecRefThis
)
482 PUNT_OPCODE(GenericRetDecRefs
)
484 PUNT_OPCODE(DecRefNZ
)
485 PUNT_OPCODE(DefInlineFP
)
486 PUNT_OPCODE(InlineReturn
)
488 PUNT_OPCODE(OODeclExists
);
489 PUNT_OPCODE(VerifyParamCls
)
490 PUNT_OPCODE(VerifyRetCls
)
491 PUNT_OPCODE(ConcatCellCell
)
492 PUNT_OPCODE(AKExists
)
493 PUNT_OPCODE(ContEnter
)
494 PUNT_OPCODE(ContPreNext
)
495 PUNT_OPCODE(ContStartedCheck
)
496 PUNT_OPCODE(ContValid
)
497 PUNT_OPCODE(ContArIncKey
)
498 PUNT_OPCODE(ContArUpdateIdx
)
499 PUNT_OPCODE(LdContActRec
)
500 PUNT_OPCODE(StContArRaw
)
501 PUNT_OPCODE(LdContArValue
)
502 PUNT_OPCODE(StContArValue
)
503 PUNT_OPCODE(LdContArKey
)
504 PUNT_OPCODE(StContArKey
)
505 PUNT_OPCODE(StAsyncArRaw
)
506 PUNT_OPCODE(StAsyncArResult
)
507 PUNT_OPCODE(LdAsyncArParentChain
)
508 PUNT_OPCODE(AFWHBlockOn
)
509 PUNT_OPCODE(LdWHState
)
510 PUNT_OPCODE(LdWHResult
)
511 PUNT_OPCODE(LdAFWHActRec
)
512 PUNT_OPCODE(LdResumableArObj
)
513 PUNT_OPCODE(IterInit
)
514 PUNT_OPCODE(IterInitK
)
515 PUNT_OPCODE(IterNext
)
516 PUNT_OPCODE(IterNextK
)
517 PUNT_OPCODE(WIterInit
)
518 PUNT_OPCODE(WIterInitK
)
519 PUNT_OPCODE(WIterNext
)
520 PUNT_OPCODE(WIterNextK
)
521 PUNT_OPCODE(MIterInit
)
522 PUNT_OPCODE(MIterInitK
)
523 PUNT_OPCODE(MIterNext
)
524 PUNT_OPCODE(MIterNextK
)
525 PUNT_OPCODE(IterFree
)
526 PUNT_OPCODE(MIterFree
)
527 PUNT_OPCODE(DecodeCufIter
)
528 PUNT_OPCODE(CIterFree
)
529 PUNT_OPCODE(DefMIStateBase
)
533 PUNT_OPCODE(PropDXStk
)
534 PUNT_OPCODE(CGetProp
)
535 PUNT_OPCODE(VGetProp
)
536 PUNT_OPCODE(VGetPropStk
)
537 PUNT_OPCODE(BindProp
)
538 PUNT_OPCODE(BindPropStk
)
540 PUNT_OPCODE(SetPropStk
)
541 PUNT_OPCODE(UnsetProp
)
542 PUNT_OPCODE(SetOpProp
)
543 PUNT_OPCODE(SetOpPropStk
)
544 PUNT_OPCODE(IncDecProp
)
545 PUNT_OPCODE(IncDecPropStk
)
546 PUNT_OPCODE(EmptyProp
)
547 PUNT_OPCODE(IssetProp
)
549 PUNT_OPCODE(ElemArray
)
551 PUNT_OPCODE(ElemDXStk
)
553 PUNT_OPCODE(ElemUXStk
)
554 PUNT_OPCODE(ArrayGet
)
555 PUNT_OPCODE(StringGet
)
557 PUNT_OPCODE(CGetElem
)
558 PUNT_OPCODE(VGetElem
)
559 PUNT_OPCODE(VGetElemStk
)
560 PUNT_OPCODE(BindElem
)
561 PUNT_OPCODE(BindElemStk
)
562 PUNT_OPCODE(ArraySet
)
564 PUNT_OPCODE(ArraySetRef
)
566 PUNT_OPCODE(SetElemStk
)
567 PUNT_OPCODE(SetWithRefElem
)
568 PUNT_OPCODE(SetWithRefElemStk
)
569 PUNT_OPCODE(UnsetElem
)
570 PUNT_OPCODE(UnsetElemStk
)
571 PUNT_OPCODE(SetOpElem
)
572 PUNT_OPCODE(SetOpElemStk
)
573 PUNT_OPCODE(IncDecElem
)
574 PUNT_OPCODE(IncDecElemStk
)
575 PUNT_OPCODE(SetNewElem
)
576 PUNT_OPCODE(SetNewElemStk
)
577 PUNT_OPCODE(SetNewElemArray
)
578 PUNT_OPCODE(SetNewElemArrayStk
)
579 PUNT_OPCODE(SetWithRefNewElem
)
580 PUNT_OPCODE(SetWithRefNewElemStk
)
581 PUNT_OPCODE(BindNewElem
)
582 PUNT_OPCODE(BindNewElemStk
)
583 PUNT_OPCODE(ArrayIsset
)
584 PUNT_OPCODE(StringIsset
)
585 PUNT_OPCODE(VectorIsset
)
586 PUNT_OPCODE(PairIsset
)
587 PUNT_OPCODE(MapIsset
)
588 PUNT_OPCODE(IssetElem
)
589 PUNT_OPCODE(EmptyElem
)
592 PUNT_OPCODE(IncTransCounter
)
593 PUNT_OPCODE(IncProfCounter
)
594 PUNT_OPCODE(DbgAssertType
)
598 PUNT_OPCODE(EagerSyncVMRegs
)
599 PUNT_OPCODE(ColIsEmpty
)
600 PUNT_OPCODE(ColIsNEmpty
)
604 //////////////////////////////////////////////////////////////////////
606 void emitJumpToBlock(CodeBlock
& cb
, Block
* target
, ConditionCode cc
,
607 CodegenState
& state
) {
608 vixl::MacroAssembler as
{ cb
};
610 if (state
.addresses
[target
]) {
614 // The block hasn't been emitted yet. Record the location in CodegenState.
615 // CodegenState holds a map from Block* to the head of a linked list, where
616 // the jump instructions themselves are the list nodes.
617 auto next
= reinterpret_cast<TCA
>(state
.patches
[target
]);
618 auto here
= cb
.frontier();
620 // To avoid encoding 0x0 as the jump target. That would conflict with the use
621 // of nullptr as a sentinel return value from jmpTarget() and jccTarget().
622 // Consider switching those to use folly::Optional or something?
623 if (!next
) next
= kEndOfTargetChain
;
625 // This will never actually be executed as a jump to "next". It's just a
626 // pointer to the next jump instruction to retarget.
627 mcg
->backEnd().emitSmashableJump(cb
, next
, cc
);
628 state
.patches
[target
] = here
;
631 //////////////////////////////////////////////////////////////////////
633 void CodeGenerator::recordHostCallSyncPoint(vixl::MacroAssembler
& as
,
635 auto stackOff
= m_curInst
->marker().spOff();
636 auto pcOff
= m_curInst
->marker().bcOff() - m_curInst
->marker().func()->base();
637 mcg
->recordSyncPoint(tca
, pcOff
, stackOff
);
640 //////////////////////////////////////////////////////////////////////
642 void CodeGenerator::cgConjure(IRInstruction
* inst
) {
643 always_assert(false);
646 void CodeGenerator::cgHalt(IRInstruction
* inst
) {
647 always_assert(false);
650 //////////////////////////////////////////////////////////////////////
652 void CodeGenerator::cgJmp(IRInstruction
* inst
) {
653 emitJumpToBlock(m_mainCode
, inst
->taken(), CC_None
, m_state
);
656 void CodeGenerator::cgDbgAssertRefCount(IRInstruction
* inst
) {
658 auto base
= x2a(srcLoc(0).reg());
660 m_as
. Ldr (rCount
.W(), base
[FAST_REFCOUNT_OFFSET
]);
661 m_as
. Tbnz (rCount
, UncountedBitPos
, &done
);
662 m_as
. Cmp (rCount
, RefCountMaxRealistic
);
663 m_as
. B (&done
, vixl::ls
);
668 void CodeGenerator::cgIncRef(IRInstruction
* inst
) {
669 SSATmp
* src
= inst
->src(0);
670 auto loc
= srcLoc(0);
671 Type type
= src
->type();
673 if (type
.notCounted()) return;
675 auto increfMaybeStatic
= [&] {
676 auto base
= x2a(loc
.reg(0));
677 auto rCount
= rAsm
.W();
678 m_as
. Ldr (rCount
, base
[FAST_REFCOUNT_OFFSET
]);
679 if (!type
.needsStaticBitCheck()) {
680 m_as
. Add (rCount
, rAsm
.W(), 1);
681 m_as
. Str (rCount
, base
[FAST_REFCOUNT_OFFSET
]);
683 m_as
. Cmp (rCount
, 0);
684 static_assert(UncountedValue
< 0 && StaticValue
< 0, "");
685 ifThen(m_as
, vixl::ge
, [&] {
686 m_as
.Add(rCount
, rCount
, 1);
687 m_as
.Str(rCount
, base
[FAST_REFCOUNT_OFFSET
]);
692 if (type
.isKnownDataType()) {
693 assert(IS_REFCOUNTED_TYPE(type
.toDataType()));
696 m_as
. Cmp (x2a(loc
.reg(1)).W(), KindOfRefCountThreshold
);
697 ifThen(m_as
, vixl::gt
, [&] { increfMaybeStatic(); });
701 void CodeGenerator::cgAssertType(IRInstruction
* inst
) {
702 auto const srcRegs
= srcLoc(0);
703 auto const dstRegs
= dstLoc(0);
705 PhysReg::Map
<PhysReg
> moves
;
706 if (dstRegs
.reg(0) != InvalidReg
)
707 moves
[dstRegs
.reg(0)] = srcRegs
.reg(0);
708 if (dstRegs
.reg(1) != InvalidReg
)
709 moves
[dstRegs
.reg(1)] = srcRegs
.reg(1);
711 auto howTo
= doRegMoves(moves
, rAsm
);
712 for (auto& how
: howTo
) {
713 if (how
.m_kind
== MoveInfo::Kind::Move
) {
714 m_as
. Mov (x2a(how
.m_dst
), x2a(how
.m_src
));
716 emitXorSwap(m_as
, x2a(how
.m_dst
), x2a(how
.m_src
));
721 //////////////////////////////////////////////////////////////////////
723 void CodeGenerator::emitDecRefStaticType(Type type
,
724 vixl::Register dataReg
) {
725 assert(type
.isKnownDataType());
726 assert(!dataReg
.Is(rAsm2
));
730 m_as
. Ldr (rAsm2
.W(), dataReg
[FAST_REFCOUNT_OFFSET
]);
732 if (type
.needsStaticBitCheck()) {
733 m_as
.Tbnz (rAsm2
, UncountedBitPos
, &allDone
);
736 m_as
. Sub (rAsm2
.W(), rAsm2
.W(), 1, vixl::SetFlags
);
737 m_as
. Str (rAsm2
.W(), dataReg
[FAST_REFCOUNT_OFFSET
]);
739 m_as
. B (&allDone
, vixl::ne
);
741 MCGenerator::getDtorCall(type
.toDataType()),
743 SyncOptions::kSyncPoint
,
744 argGroup().reg(dataReg
));
746 m_as
. bind (&allDone
);
749 void CodeGenerator::emitDecRefDynamicType(vixl::Register baseReg
,
751 // Make sure both temp registers are still available
752 assert(!baseReg
.Is(rAsm
));
753 assert(!baseReg
.Is(rAsm2
));
758 m_as
. Ldrb (rAsm
.W(), baseReg
[offset
+ TVOFF(m_type
)]);
759 m_as
. Cmp (rAsm
.W(), KindOfRefCountThreshold
);
760 m_as
. B (&allDone
, vixl::le
);
762 // Type is refcounted. Load the refcount.
763 m_as
. Ldr (rAsm
, baseReg
[offset
+ TVOFF(m_data
)]);
764 m_as
. Ldr (rAsm2
.W(), rAsm
[FAST_REFCOUNT_OFFSET
]);
766 // Is it static? Note that only the lower 32 bits of rAsm2 are valid right
767 // now, but tbnz is only looking at a single one of them, so this is OK.
768 m_as
. Tbnz (rAsm2
, UncountedBitPos
, &allDone
);
770 // Not static. Decrement and write back.
771 m_as
. Sub (rAsm2
.W(), rAsm2
.W(), 1, vixl::SetFlags
);
772 m_as
. Str (rAsm2
.W(), rAsm
[FAST_REFCOUNT_OFFSET
]);
774 // Did it go to zero?
775 m_as
. B (&allDone
, vixl::ne
);
777 // Went to zero. Have to destruct.
779 CppCall::direct(tv_release_generic
),
781 SyncOptions::kSyncPoint
,
782 argGroup().addr(baseReg
, offset
));
784 m_as
. bind (&allDone
);
787 void CodeGenerator::emitDecRefMem(Type type
,
788 vixl::Register baseReg
,
790 if (type
.needsReg()) {
791 emitDecRefDynamicType(baseReg
, offset
);
792 } else if (type
.maybeCounted()) {
793 m_as
. Ldr (rAsm
, baseReg
[offset
+ TVOFF(m_data
)]);
794 emitDecRefStaticType(type
, rAsm
);
798 void CodeGenerator::cgDecRefStack(IRInstruction
* inst
) {
799 emitDecRefMem(inst
->typeParam(),
800 x2a(srcLoc(0).reg()),
801 cellsToBytes(inst
->extra
<DecRefStack
>()->offset
));
804 void CodeGenerator::cgDecRefLoc(IRInstruction
* inst
) {
805 emitDecRefMem(inst
->typeParam(),
806 x2a(srcLoc(0).reg()),
807 localOffset(inst
->extra
<DecRefLoc
>()->locId
));
810 void CodeGenerator::cgDecRefMem(IRInstruction
* inst
) {
811 emitDecRefMem(inst
->typeParam(),
812 x2a(srcLoc(0).reg()),
813 inst
->src(1)->intVal());
816 //////////////////////////////////////////////////////////////////////
817 // Arithmetic Instructions
819 void CodeGenerator::cgAddInt(IRInstruction
* inst
) {
820 auto dstReg
= dstLoc(0).reg();
821 auto srcRegL
= srcLoc(0).reg();
822 auto srcRegR
= srcLoc(1).reg();
824 if (srcRegR
!= InvalidReg
) {
825 m_as
. Add(x2a(dstReg
), x2a(srcRegL
), x2a(srcRegR
));
827 m_as
. Add(x2a(dstReg
), x2a(srcRegL
), inst
->src(1)->intVal());
831 void CodeGenerator::cgSubInt(IRInstruction
* inst
) {
832 auto dstReg
= dstLoc(0).reg();
833 auto srcRegL
= srcLoc(0).reg();
834 auto srcRegR
= srcLoc(1).reg();
836 if (srcRegR
!= InvalidReg
) {
837 m_as
. Sub(x2a(dstReg
), x2a(srcRegL
), x2a(srcRegR
));
839 m_as
. Sub(x2a(dstReg
), x2a(srcRegL
), inst
->src(1)->intVal());
843 void CodeGenerator::cgMulInt(IRInstruction
* inst
) {
844 auto dstReg
= dstLoc(0).reg();
845 auto srcRegL
= srcLoc(0).reg();
846 auto srcRegR
= srcLoc(1).reg();
848 m_as
. Mul(x2a(dstReg
), x2a(srcRegL
), x2a(srcRegR
));
851 //////////////////////////////////////////////////////////////////////
854 void CodeGenerator::cgAndInt(IRInstruction
* inst
) {
855 auto dstReg
= dstLoc(0).reg();
856 auto srcRegL
= srcLoc(0).reg();
857 auto srcRegR
= srcLoc(1).reg();
859 if (srcRegL
!= InvalidReg
) {
860 m_as
. And(x2a(dstReg
), x2a(srcRegL
), x2a(srcRegR
));
862 m_as
. And(x2a(dstReg
), x2a(srcRegL
), inst
->src(1)->intVal());
866 void CodeGenerator::cgOrInt(IRInstruction
* inst
) {
867 auto dstReg
= dstLoc(0).reg();
868 auto srcRegL
= srcLoc(0).reg();
869 auto srcRegR
= srcLoc(1).reg();
871 if (srcRegL
!= InvalidReg
) {
872 m_as
. Orr(x2a(dstReg
), x2a(srcRegL
), x2a(srcRegR
));
874 m_as
. Orr(x2a(dstReg
), x2a(srcRegL
), inst
->src(1)->intVal());
878 void CodeGenerator::cgXorInt(IRInstruction
* inst
) {
879 auto dstReg
= dstLoc(0).reg();
880 auto srcRegL
= srcLoc(0).reg();
881 auto srcRegR
= srcLoc(1).reg();
883 if (srcRegL
!= InvalidReg
) {
884 m_as
. Eor(x2a(dstReg
), x2a(srcRegL
), x2a(srcRegR
));
886 m_as
. Eor(x2a(dstReg
), x2a(srcRegL
), inst
->src(1)->intVal());
890 void CodeGenerator::cgShl(IRInstruction
* inst
) {
891 auto dstReg
= dstLoc(0).reg();
892 auto srcRegL
= srcLoc(0).reg();
893 auto srcRegR
= srcLoc(1).reg();
895 // TODO: t3870154 add shift-by-immediate support to vixl
896 m_as
. lslv(x2a(dstReg
), x2a(srcRegL
), x2a(srcRegR
));
899 void CodeGenerator::cgShr(IRInstruction
* inst
) {
900 auto dstReg
= dstLoc(0).reg();
901 auto srcRegL
= srcLoc(0).reg();
902 auto srcRegR
= srcLoc(1).reg();
904 // TODO: t3870154 add shift-by-immediate support to vixl
905 m_as
. asrv(x2a(dstReg
), x2a(srcRegL
), x2a(srcRegR
));
907 //////////////////////////////////////////////////////////////////////
908 // Comparison Operations
910 void CodeGenerator::emitCompareIntAndSet(IRInstruction
*inst
,
911 vixl::Condition cond
) {
912 auto dstReg
= dstLoc(0).reg();
913 emitCompareInt(inst
);
914 m_as
. Cset(x2a(dstReg
),cond
);
917 void CodeGenerator::emitCompareInt(IRInstruction
* inst
) {
918 auto srcRegL
= srcLoc(0).reg();
919 auto srcRegR
= srcLoc(1).reg();
921 if (srcRegR
!= InvalidReg
) {
922 m_as
. Cmp(x2a(srcRegL
), x2a(srcRegR
));
924 m_as
. Cmp(x2a(srcRegL
), inst
->src(1)->intVal());
928 void CodeGenerator::cgLtInt(IRInstruction
* inst
) {
929 emitCompareIntAndSet(inst
,vixl::Condition::lt
);
932 void CodeGenerator::cgGtInt(IRInstruction
* inst
) {
933 emitCompareIntAndSet(inst
,vixl::Condition::gt
);
937 void CodeGenerator::cgGteInt(IRInstruction
* inst
) {
938 emitCompareIntAndSet(inst
,vixl::Condition::ge
);
941 void CodeGenerator::cgLteInt(IRInstruction
* inst
) {
942 emitCompareIntAndSet(inst
,vixl::Condition::le
);
946 void CodeGenerator::cgEqInt(IRInstruction
* inst
) {
947 emitCompareIntAndSet(inst
,vixl::Condition::eq
);
950 void CodeGenerator::cgNeqInt(IRInstruction
* inst
) {
951 emitCompareIntAndSet(inst
,vixl::Condition::ne
);
954 //////////////////////////////////////////////////////////////////////
956 void CodeGenerator::cgShuffle(IRInstruction
* inst
) {
957 PhysReg::Map
<PhysReg
> moves
;
959 // Put required reg-reg moves in the map, and do spills at the same time.
960 for (auto i
= 0; i
< inst
->numSrcs(); ++i
) {
961 auto& rd
= inst
->extra
<Shuffle
>()->dests
[i
];
962 auto& rs
= srcLoc(i
);
964 if (rd
.numAllocated() == 0) continue;
966 for (auto j
= 0; j
< rd
.numAllocated(); ++j
) {
967 m_as
. Str (x2a(rs
.reg(j
)), vixl::MemOperand(vixl::sp
, rd
.offset(j
)));
969 } else if (!rs
.spilled()) {
970 if (rs
.reg(0) != InvalidReg
) moves
[rd
.reg(0)] = rs
.reg(0);
971 if (rs
.reg(1) != InvalidReg
) moves
[rd
.reg(1)] = rs
.reg(1);
976 auto howTo
= doRegMoves(moves
, rAsm
);
977 for (auto& how
: howTo
) {
978 if (how
.m_kind
== MoveInfo::Kind::Move
) {
979 emitRegGetsRegPlusImm(m_as
, x2a(how
.m_dst
), x2a(how
.m_src
), 0);
981 emitXorSwap(m_as
, x2a(how
.m_src
), x2a(how
.m_dst
));
985 // Now do reloads and reg<-imm.
986 for (auto i
= 0; i
< inst
->numSrcs(); ++i
) {
987 auto src
= inst
->src(i
);
988 auto& rd
= inst
->extra
<Shuffle
>()->dests
[i
];
989 auto& rs
= srcLoc(i
);
990 if (rd
.numAllocated() == 0) continue;
991 if (rd
.spilled()) continue;
993 for (auto j
= 0; j
< rd
.numAllocated(); ++j
) {
994 m_as
. Ldr (x2a(rd
.reg(j
)), vixl::MemOperand(vixl::sp
, rs
.offset(j
)));
998 if (rs
.numAllocated() == 0) {
999 assert(src
->inst()->op() == DefConst
);
1000 auto rDst
= rd
.reg(0);
1002 m_as
. Mov (x2a(rDst
), src
->rawVal());
1004 // Assembler::fmov (which you'd think shouldn't be a macro instruction)
1005 // will emit a ldr from a literal pool if IsImmFP64 is false. vixl's
1006 // literal pools don't work well with our codegen pattern, so if that
1007 // would happen, emit the raw bits into a GPR first and then move them
1008 // unmodified into a SIMD.
1009 if (vixl::Assembler::IsImmFP64(src
->dblVal())) {
1010 m_as
. Fmov (x2simd(rDst
), src
->dblVal());
1011 } else if (src
->dblVal() == 0.0) {
1012 // 0 is not encodeable as an immediate to Fmov, but this works.
1013 m_as
. Fmov (x2simd(rDst
), vixl::xzr
);
1015 m_as
. Mov (rAsm
, src
->rawVal());
1016 m_as
. Fmov (x2simd(rDst
), rAsm
);
1020 if (rd
.numAllocated() == 2 && rs
.numAllocated() < 2) {
1021 // Move src known type to register
1022 m_as
. Mov (x2a(rd
.reg(1)), src
->type().toDataType());
1028 //////////////////////////////////////////////////////////////////////
1030 static void shuffleArgs(vixl::MacroAssembler
& a
,
1033 PhysReg::Map
<PhysReg
> moves
;
1034 PhysReg::Map
<ArgDesc
*> argDescs
;
1036 for (size_t i
= 0; i
< args
.numGpArgs(); i
++) {
1037 auto& arg
= args
.gpArg(i
);
1038 auto kind
= arg
.kind();
1039 if (!(kind
== ArgDesc::Kind::Reg
||
1040 kind
== ArgDesc::Kind::Addr
||
1041 kind
== ArgDesc::Kind::TypeReg
)) {
1044 auto dstReg
= arg
.dstReg();
1045 auto srcReg
= arg
.srcReg();
1046 if (dstReg
!= srcReg
) {
1047 moves
[dstReg
] = srcReg
;
1048 argDescs
[dstReg
] = &arg
;
1050 switch (call
.kind()) {
1051 case CppCall::Kind::IndirectReg
:
1052 if (dstReg
== call
.reg()) {
1053 // an indirect call uses an argument register for the func ptr.
1054 // Use rAsm2 instead and update the CppCall
1055 moves
[rAsm2
] = call
.reg();
1056 call
.updateCallIndirect(rAsm2
);
1059 case CppCall::Kind::Direct
:
1060 case CppCall::Kind::Virtual
:
1061 case CppCall::Kind::ArrayVirt
:
1062 case CppCall::Kind::Destructor
:
1064 case CppCall::Kind::IndirectVreg
:
1065 always_assert(false);
1070 auto const howTo
= doRegMoves(moves
, rAsm
);
1072 for (auto& how
: howTo
) {
1073 vixl::CPURegister
srcReg(how
.m_src
);
1074 vixl::CPURegister
dstReg(how
.m_dst
);
1075 if (how
.m_kind
== MoveInfo::Kind::Move
) {
1076 auto* argDesc
= argDescs
[how
.m_dst
];
1078 auto kind
= argDesc
->kind();
1079 if (kind
== ArgDesc::Kind::Addr
) {
1080 emitRegGetsRegPlusImm(a
, vixl::Register
{dstReg
},
1081 vixl::Register
{srcReg
}, argDesc
->disp().l());
1083 if (argDesc
->isZeroExtend()) {
1084 // "Unsigned eXTend Byte". The dest reg is a 32-bit reg but this
1085 // zeroes the top 32 bits, so the intended effect is achieved.
1086 a
.Uxtb (dstReg
.W(), srcReg
.W());
1088 emitRegRegMove(a
, dstReg
, srcReg
);
1091 if (kind
!= ArgDesc::Kind::TypeReg
) {
1092 argDesc
->markDone();
1095 emitRegRegMove(a
, dstReg
, srcReg
);
1098 emitXorSwap(a
, vixl::Register
{dstReg
}, vixl::Register
{srcReg
});
1102 for (size_t i
= 0; i
< args
.numGpArgs(); ++i
) {
1103 auto& arg
= args
.gpArg(i
);
1104 if (arg
.done()) continue;
1105 auto kind
= arg
.kind();
1106 auto dstReg
= x2a(arg
.dstReg());
1107 if (kind
== ArgDesc::Kind::Imm
) {
1108 a
. Mov (dstReg
, arg
.imm().q());
1109 } else if (kind
== ArgDesc::Kind::Reg
|| kind
== ArgDesc::Kind::TypeReg
) {
1110 // Should have already been done
1117 void CodeGenerator::cgCallNative(vixl::MacroAssembler
& as
,
1118 IRInstruction
* inst
) {
1119 using namespace NativeCalls
;
1121 Opcode opc
= inst
->op();
1122 always_assert(CallMap::hasInfo(opc
));
1124 auto const& info
= CallMap::info(opc
);
1125 ArgGroup argGroup
= toArgGroup(info
, m_state
.regs
, inst
);
1127 auto call
= [&]() -> CppCall
{
1128 switch (info
.func
.type
) {
1129 case FuncType::Call
:
1130 return CppCall(info
.func
.call
);
1132 return CppCall::direct(
1133 reinterpret_cast<void(*)()>(inst
->src(info
.func
.srcIdx
)->tcaVal()));
1138 auto const dest
= [&]() -> CallDest
{
1139 switch (info
.dest
) {
1140 case DestType::None
: return kVoidDest
;
1142 case DestType::SIMD
: return callDestTV(inst
);
1143 case DestType::SSA
: return callDest(inst
);
1144 case DestType::Dbl
: return callDestDbl(inst
);
1149 cgCallHelper(as
, call
, dest
, info
.sync
, argGroup
);
1152 void CodeGenerator::cgCallHelper(vixl::MacroAssembler
& a
,
1154 const CallDest
& dstInfo
,
1158 assert(m_curInst
->isNative());
1160 auto dstReg0
= dstInfo
.reg0
;
1161 DEBUG_ONLY
auto dstReg1
= dstInfo
.reg1
;
1163 toSave
= toSave
& (kGPCallerSaved
| kSIMDCallerSaved
);
1164 assert((toSave
& RegSet().add(dstReg0
).add(dstReg1
)).empty());
1166 RegSaver saver
{toSave
};
1167 saver
.emitPushes(a
);
1168 SCOPE_EXIT
{ saver
.emitPops(a
); };
1170 for (size_t i
= 0; i
< args
.numGpArgs(); i
++) {
1171 args
.gpArg(i
).setDstReg(PhysReg
{argReg(i
)});
1174 args
.numStackArgs() == 0,
1175 "Stack arguments not yet supported on ARM: `{}'\n\n{}",
1178 shuffleArgs(a
, args
, call
);
1180 auto syncPoint
= emitCall(a
, call
);
1182 if (RuntimeOption::HHProfServerEnabled
|| sync
!= SyncOptions::kNoSyncPoint
) {
1183 recordHostCallSyncPoint(a
, syncPoint
);
1186 auto* taken
= m_curInst
->taken();
1187 if (taken
&& taken
->isCatch()) {
1188 auto& info
= m_state
.catches
[taken
];
1189 assert(!info
.afterCall
);
1190 info
.afterCall
= syncPoint
;
1191 info
.savedRegs
= toSave
;
1192 assert_not_implemented(args
.numStackArgs() == 0);
1193 info
.rspOffset
= args
.numStackArgs();
1194 } else if (!m_curInst
->is(Call
, CallArray
, ContEnter
)) {
1195 mcg
->registerCatchBlock(a
.frontier(), nullptr);
1198 vixl::CPURegister
armDst0(dstReg0
);
1200 switch (dstInfo
.type
) {
1201 case DestType::TV
: not_implemented();
1202 case DestType::SIMD
: not_implemented();
1204 assert(dstReg1
== InvalidReg
);
1205 if (armDst0
.IsValid() && !armDst0
.Is(vixl::x0
)) {
1206 emitRegRegMove(a
, armDst0
, vixl::x0
);
1209 case DestType::None
:
1210 assert(dstReg0
== InvalidReg
&& dstReg1
== InvalidReg
);
1213 assert(dstReg1
== InvalidReg
);
1214 if (armDst0
.IsValid() && !armDst0
.Is(vixl::d0
)) {
1215 emitRegRegMove(a
, armDst0
, vixl::d0
);
1221 void CodeGenerator::cgCallHelper(vixl::MacroAssembler
& a
,
1223 const CallDest
& dstInfo
,
1226 cgCallHelper(a
, call
, dstInfo
, sync
, args
, m_state
.liveRegs
[m_curInst
]);
1230 * XXX copypasta but has to be in the class because of curPhysLoc and
1231 * changing that would make callsites real messy
1234 CallDest
CodeGenerator::callDest(PhysReg reg0
,
1235 PhysReg reg1
/* = InvalidReg */) const {
1236 return { DestType::SSA
, reg0
, reg1
};
1239 CallDest
CodeGenerator::callDest(const IRInstruction
* inst
) const {
1240 if (!inst
->numDsts()) return kVoidDest
;
1241 auto loc
= dstLoc(0);
1242 return { DestType::SSA
, loc
.reg(0), loc
.reg(1) };
1245 CallDest
CodeGenerator::callDestTV(const IRInstruction
* inst
) const {
1246 if (!inst
->numDsts()) return kVoidDest
;
1247 auto loc
= dstLoc(0);
1248 if (loc
.isFullSIMD()) {
1249 return { DestType::SIMD
, loc
.reg(0), InvalidReg
};
1251 return { DestType::TV
, loc
.reg(0), loc
.reg(1) };
1254 CallDest
CodeGenerator::callDestDbl(const IRInstruction
* inst
) const {
1255 if (!inst
->numDsts()) return kVoidDest
;
1256 auto loc
= dstLoc(0);
1257 return { DestType::Dbl
, loc
.reg(0), loc
.reg(1) };
1260 //////////////////////////////////////////////////////////////////////
1262 static vixl::Register
enregister(vixl::MacroAssembler
& a
,
1263 vixl::MemOperand memRef
,
1264 vixl::Register scratch
) {
1265 a
. Ldr (scratch
, memRef
);
1269 static vixl::Register
enregister(vixl::MacroAssembler
& a
,
1271 vixl::Register scratch
) {
1275 template<class Loc
, class JmpFn
>
1276 void CodeGenerator::emitTypeTest(Type type
, vixl::Register typeReg
, Loc dataSrc
,
1278 assert(!(type
<= Type::Cls
));
1279 assert(typeReg
.Is32Bits());
1281 if (type
.equals(Type::Gen
)) {
1286 if (type
<= Type::Str
) {
1287 // Note: ARM can actually do better here; it has a fused test-and-branch
1288 // instruction. The way this code is factored makes it difficult to use,
1289 // though; the jump instruction will be written by some other code.
1290 m_as
. Tst (typeReg
, KindOfStringBit
);
1292 } else if (type
== Type::Null
) {
1293 m_as
. Cmp (typeReg
, KindOfNull
);
1295 } else if (type
== Type::UncountedInit
) {
1296 m_as
. Tst (typeReg
, KindOfUncountedInitBit
);
1298 } else if (type
== Type::Uncounted
) {
1299 m_as
. Cmp (typeReg
, KindOfRefCountThreshold
);
1301 } else if (type
== Type::Cell
) {
1302 m_as
. Cmp (typeReg
, KindOfRef
);
1305 assert(type
.isKnownDataType());
1306 DataType dataType
= type
.toDataType();
1307 assert(dataType
== KindOfRef
||
1308 (dataType
>= KindOfUninit
&& dataType
<= KindOfResource
));
1309 m_as
. Cmp (typeReg
, dataType
);
1313 if (type
< Type::Obj
) {
1314 assert(type
.getClass()->attrs() & AttrNoOverride
);
1315 auto dataReg
= enregister(m_as
, dataSrc
, rAsm
);
1316 emitLdLowPtr(m_as
, rAsm
, dataReg
[ObjectData::getVMClassOffset()],
1317 sizeof(LowClassPtr
));
1318 emitCmpClass(m_as
, rAsm
, type
.getClass());
1320 } else if (type
< Type::Res
) {
1321 CG_PUNT(TypeTest
-on
-Resource
);
1322 } else if (type
<= Type::Arr
&& type
.hasArrayKind()) {
1323 auto dataReg
= enregister(m_as
, dataSrc
, rAsm
);
1324 m_as
. Ldrb (rAsm
.W(), dataReg
[ArrayData::offsetofKind()]);
1325 m_as
. Cmp (rAsm
.W(), type
.getArrayKind());
1330 void CodeGenerator::cgGuardLoc(IRInstruction
* inst
) {
1331 auto const rFP
= x2a(srcLoc(0).reg());
1332 auto const baseOff
= localOffset(inst
->extra
<GuardLoc
>()->locId
);
1333 m_as
. Ldrb (rAsm
.W(), rFP
[baseOff
+ TVOFF(m_type
)]);
1337 rFP
[baseOff
+ TVOFF(m_data
)],
1338 [&] (ConditionCode cc
) {
1339 auto const destSK
= SrcKey(curFunc(), m_unit
.bcOff(), resumed());
1340 auto const destSR
= mcg
->tx().getSrcRec(destSK
);
1341 destSR
->emitFallbackJump(this->m_mainCode
, ccNegate(cc
));
1345 void CodeGenerator::cgGuardStk(IRInstruction
* inst
) {
1346 auto const rSP
= x2a(srcLoc(0).reg());
1347 auto const baseOff
= cellsToBytes(inst
->extra
<GuardStk
>()->offset
);
1348 m_as
. Ldrb (rAsm
.W(), rSP
[baseOff
+ TVOFF(m_type
)]);
1352 rSP
[baseOff
+ TVOFF(m_data
)],
1353 [&] (ConditionCode cc
) {
1354 auto const destSK
= SrcKey(curFunc(), m_unit
.bcOff(), resumed());
1355 auto const destSR
= mcg
->tx().getSrcRec(destSK
);
1356 destSR
->emitFallbackJump(this->m_mainCode
, ccNegate(cc
));
1360 void CodeGenerator::cgCheckStk(IRInstruction
* inst
) {
1361 auto const rSP
= x2a(srcLoc(0).reg());
1362 auto const baseOff
= cellsToBytes(inst
->extra
<CheckStk
>()->offset
);
1363 m_as
. Ldrb (rAsm
.W(), rSP
[baseOff
+ TVOFF(m_type
)]);
1367 rSP
[baseOff
+ TVOFF(m_data
)],
1368 [&] (ConditionCode cc
) {
1369 emitJumpToBlock(m_mainCode
, inst
->taken(), ccNegate(cc
), m_state
);
1374 void CodeGenerator::cgCheckType(IRInstruction
* inst
) {
1375 auto const src
= inst
->src(0);
1376 Type srcType
= src
->type();
1377 auto const rVal
= x2a(srcLoc(0).reg(0));
1378 auto const rType
= x2a(srcLoc(0).reg(1));
1381 auto const valDst
= x2a(dstLoc(0).reg(0));
1382 auto const typeDst
= x2a(dstLoc(0).reg(1));
1383 // TODO: #3626251: XLS: Let Uses say whether a constant is
1384 // allowed, and if not, assign a register.
1385 if (valDst
.IsValid()) {
1386 if (rVal
.IsValid()) {
1387 if (!valDst
.Is(rVal
)) m_as
.Mov(valDst
, rVal
);
1389 if (src
->isConst()) m_as
.Mov(valDst
, src
->rawVal());
1392 if (typeDst
.IsValid()) {
1393 if (rType
.IsValid()) {
1394 if (!typeDst
.Is(rType
)) m_as
.Mov(typeDst
, rType
);
1396 m_as
.Mov(typeDst
, srcType
.toDataType());
1401 auto doJcc
= [&] (ConditionCode cc
) {
1402 emitJumpToBlock(m_mainCode
, inst
->taken(), ccNegate(cc
), m_state
);
1405 Type typeParam
= inst
->typeParam();
1406 if (src
->isA(typeParam
) ||
1407 // Boxed types are checked lazily, so there's nothing to be done here.
1408 (srcType
.isBoxed() && typeParam
.isBoxed())) {
1412 if (srcType
.not(typeParam
)) {
1413 emitJumpToBlock(m_mainCode
, inst
->taken(), CC_None
, m_state
);
1417 if (rType
.IsValid()) {
1418 emitTypeTest(typeParam
, rType
.W(), rVal
, doJcc
);
1419 } else if (typeParam
<= Type::Uncounted
&&
1420 ((srcType
== Type::Str
&& typeParam
.maybe(Type::StaticStr
)) ||
1421 (srcType
== Type::Arr
&& typeParam
.maybe(Type::StaticArr
)))) {
1422 // We carry Str and Arr operands around without a type register,
1423 // even though they're union types. The static and non-static
1424 // subtypes are distinguised by the refcount field.
1425 assert(rVal
.IsValid());
1426 m_as
. Ldr (rAsm
.W(), rVal
[FAST_REFCOUNT_OFFSET
]);
1427 m_as
. Cmp (rAsm
, 0);
1433 return folly::format("Bad src: {} and dst: {} types in '{}'",
1434 srcType
, typeParam
, *inst
).str();
1440 void CodeGenerator::cgSideExitGuardStk(IRInstruction
* inst
) {
1441 auto const sp
= x2a(srcLoc(0).reg());
1442 auto const extra
= inst
->extra
<SideExitGuardStk
>();
1444 m_as
. Ldrb (rAsm
.W(), sp
[cellsToBytes(extra
->checkedSlot
) + TVOFF(m_type
)]);
1448 sp
[cellsToBytes(extra
->checkedSlot
) + TVOFF(m_data
)],
1449 [&] (ConditionCode cc
) {
1450 auto const sk
= SrcKey(curFunc(), extra
->taken
, resumed());
1451 emitBindSideExit(this->m_mainCode
, this->m_frozenCode
, sk
, ccNegate(cc
));
1456 template <class JmpFn
>
1457 void CodeGenerator::emitReffinessTest(IRInstruction
* inst
, JmpFn doJcc
) {
1458 assert(inst
->numSrcs() == 5);
1460 DEBUG_ONLY SSATmp
* nParamsTmp
= inst
->src(1);
1461 DEBUG_ONLY SSATmp
* firstBitNumTmp
= inst
->src(2);
1462 DEBUG_ONLY SSATmp
* mask64Tmp
= inst
->src(3);
1463 DEBUG_ONLY SSATmp
* vals64Tmp
= inst
->src(4);
1465 auto funcPtrLoc
= srcLoc(0);
1466 auto nParamsLoc
= srcLoc(1);
1467 auto mask64Loc
= srcLoc(3);
1468 auto vals64Loc
= srcLoc(4);
1470 // Get values in place
1471 auto funcPtrReg
= x2a(funcPtrLoc
.reg());
1472 assert(funcPtrReg
.IsValid());
1474 auto nParamsReg
= x2a(nParamsLoc
.reg());
1475 assert(nParamsReg
.IsValid() || nParamsTmp
->isConst());
1477 auto firstBitNum
= static_cast<uint32_t>(firstBitNumTmp
->intVal());
1478 auto mask64Reg
= x2a(mask64Loc
.reg());
1479 uint64_t mask64
= mask64Tmp
->intVal();
1480 assert(mask64Reg
.IsValid() || mask64
== uint32_t(mask64
));
1483 auto vals64Reg
= x2a(vals64Loc
.reg());
1484 uint64_t vals64
= vals64Tmp
->intVal();
1485 assert(vals64Reg
.IsValid() || vals64
== uint32_t(vals64
));
1486 assert((vals64
& mask64
) == vals64
);
1488 auto thenBody
= [&] {
1489 auto bitsOff
= sizeof(uint64_t) * (firstBitNum
/ 64);
1491 auto bitsPtrReg
= rAsm
;
1493 if (firstBitNum
== 0) {
1494 bitsOff
= Func::refBitValOff();
1495 bitsPtrReg
= funcPtrReg
;
1497 m_as
. Ldr (bitsPtrReg
, funcPtrReg
[Func::sharedOff()]);
1498 bitsOff
-= sizeof(uint64_t);
1501 // Don't need the bits pointer after this point
1502 auto bitsReg
= rAsm
;
1504 m_as
. Ldr (bitsReg
, bitsPtrReg
[bitsOff
]);
1506 // Mask the bits. There are restrictions on what can be encoded as an
1507 // immediate in ARM's logical instructions, and if they're not met,
1508 // the assembler will compensate using ip0 or ip1 as tmps.
1509 if (mask64Reg
.IsValid()) {
1510 m_as
. And (bitsReg
, bitsReg
, mask64Reg
);
1512 m_as
. And (bitsReg
, bitsReg
, mask64
);
1515 // Now do the compare. There are also restrictions on immediates in
1516 // arithmetic instructions (of which Cmp is one; it's just a subtract that
1517 // sets flags), so same deal as with the mask immediate above.
1518 if (vals64Reg
.IsValid()) {
1519 m_as
. Cmp (bitsReg
, vals64Reg
);
1521 m_as
. Cmp (bitsReg
, vals64
);
1526 if (firstBitNum
== 0) {
1527 assert(!nParamsReg
.IsValid());
1528 // This is the first 64 bits. No need to check
1532 assert(nParamsReg
.IsValid());
1533 // Check number of args...
1534 m_as
. Cmp (nParamsReg
, firstBitNum
);
1536 if (vals64
!= 0 && vals64
!= mask64
) {
1537 // If we're beyond nParams, then either all params
1538 // are refs, or all params are non-refs, so if vals64
1539 // isn't 0 and isnt mask64, there's no possibility of
1544 ifThenElse(m_as
, vixl::gt
, thenBody
, /* else */ [&] {
1545 // If not special builtin...
1546 m_as
. Ldr (rAsm
, funcPtrReg
[Func::attrsOff()]);
1547 m_as
. Tst (rAsm
, AttrVariadicByRef
);
1548 doJcc(vals64
? CC_Z
: CC_NZ
);
1554 void CodeGenerator::cgGuardRefs(IRInstruction
* inst
) {
1555 emitReffinessTest(inst
,
1556 [&](ConditionCode cc
) {
1557 auto const destSK
= SrcKey(curFunc(), inst
->marker().bcOff(), resumed());
1558 auto const destSR
= mcg
->tx().getSrcRec(destSK
);
1559 destSR
->emitFallbackJump(m_mainCode
, cc
);
1563 void CodeGenerator::cgCheckRefs(IRInstruction
* inst
) {
1564 emitReffinessTest(inst
,
1565 [&](ConditionCode cc
) {
1566 emitJumpToBlock(m_mainCode
, inst
->taken(), cc
, m_state
);
1570 //////////////////////////////////////////////////////////////////////
1572 void CodeGenerator::cgSyncABIRegs(IRInstruction
* inst
) {
1573 emitRegGetsRegPlusImm(m_as
, rVmFp
, x2a(srcLoc(0).reg()), 0);
1574 emitRegGetsRegPlusImm(m_as
, rVmSp
, x2a(srcLoc(1).reg()), 0);
1577 void CodeGenerator::cgReqBindJmp(IRInstruction
* inst
) {
1581 SrcKey(curFunc(), inst
->extra
<ReqBindJmp
>()->offset
, resumed())
1585 void CodeGenerator::cgReqRetranslate(IRInstruction
* inst
) {
1586 assert(m_unit
.bcOff() == inst
->marker().bcOff());
1587 auto const destSK
= SrcKey(curFunc(), m_unit
.bcOff(), resumed());
1588 auto const destSR
= mcg
->tx().getSrcRec(destSK
);
1589 destSR
->emitFallbackJump(m_mainCode
);
1592 void CodeGenerator::cgSpillFrame(IRInstruction
* inst
) {
1593 auto const func
= inst
->src(1);
1594 auto const objOrCls
= inst
->src(2);
1595 auto const invName
= inst
->extra
<SpillFrame
>()->invName
;
1596 auto const nArgs
= inst
->extra
<SpillFrame
>()->numArgs
;
1598 auto spReg
= x2a(srcLoc(0).reg());
1599 auto funcLoc
= srcLoc(1);
1600 auto objClsReg
= x2a(srcLoc(2).reg());
1601 auto spOff
= -kNumActRecCells
* sizeof(Cell
);
1603 // Num args. Careful here: nArgs is 32 bits and the high bit may be set. Mov's
1604 // immediate argument is intptr_t, and the implicit int32->intptr conversion
1605 // will sign-extend, which isn't what we want.
1606 m_as
. Mov (rAsm
.W(), (uint32_t)nArgs
);
1607 m_as
. Str (rAsm
.W(), spReg
[spOff
+ AROFF(m_numArgsAndFlags
)]);
1611 auto bits
= reinterpret_cast<uintptr_t>(invName
) | ActRec::kInvNameBit
;
1612 m_as
. Mov (rAsm
, bits
);
1613 m_as
. Str (rAsm
, spReg
[spOff
+ AROFF(m_invName
)]);
1615 m_as
. Str (vixl::xzr
, spReg
[spOff
+ AROFF(m_invName
)]);
1618 // Func and this/class are slightly tricky. The func may be a tuple of a Func*
1621 if (objOrCls
->isA(Type::Cls
)) {
1622 if (objOrCls
->isConst()) {
1623 m_as
.Mov (rAsm
, uintptr_t(objOrCls
->clsVal()) | 1);
1624 m_as
.Str (rAsm
, spReg
[spOff
+ AROFF(m_this
)]);
1626 m_as
.Orr (rAsm
, objClsReg
, 1);
1627 m_as
.Str (rAsm
, spReg
[spOff
+ AROFF(m_this
)]);
1629 } else if (objOrCls
->isA(Type::Obj
) || objOrCls
->isA(Type::Ctx
)) {
1630 m_as
. Str (objClsReg
, spReg
[spOff
+ AROFF(m_this
)]);
1632 assert(objOrCls
->isA(Type::Nullptr
));
1633 m_as
.Str (vixl::xzr
, spReg
[spOff
+ AROFF(m_this
)]);
1636 // Now set func, and possibly this/cls
1637 if (!func
->isA(Type::Nullptr
)) {
1638 auto reg0
= x2a(funcLoc
.reg(0));
1639 m_as
. Str (reg0
, spReg
[spOff
+ AROFF(m_func
)]);
1642 // Adjust stack pointer
1643 emitRegGetsRegPlusImm(m_as
, x2a(dstLoc(0).reg()), spReg
, spOff
);
1646 //////////////////////////////////////////////////////////////////////
1648 void CodeGenerator::cgCallBuiltin(IRInstruction
* inst
) {
1649 auto const func
= inst
->extra
<CallBuiltinData
>()->callee
;
1650 auto const numArgs
= func
->numParams();
1651 auto const funcReturnType
= func
->returnType();
1652 int returnOffset
= MISOFF(tvBuiltinReturn
);
1654 if (FixupMap::eagerRecord(func
)) {
1655 // Save VM registers
1656 auto const* pc
= curFunc()->unit()->entry() + m_curInst
->marker().bcOff();
1657 m_as
.Str (rVmFp
, rVmTl
[RDS::kVmfpOff
]);
1658 m_as
.Str (rVmSp
, rVmTl
[RDS::kVmspOff
]);
1659 m_as
.Mov (rAsm
, pc
);
1660 m_as
.Str (rAsm
, rVmTl
[RDS::kVmpcOff
]);
1663 // The stack pointer currently points to the MInstrState we need to use.
1665 m_as
. Mov (rAsm
, vixl::sp
);
1667 auto callArgs
= argGroup();
1668 if (isCppByRef(funcReturnType
)) {
1669 if (isSmartPtrRef(funcReturnType
)) {
1670 // first arg is pointer to storage for the return value
1671 returnOffset
+= TVOFF(m_data
);
1673 callArgs
.addr(misReg
, returnOffset
);
1676 auto srcNum
= uint32_t{0};
1677 if (func
->isMethod()) {
1678 callArgs
.ssa(srcNum
);
1681 for (auto i
= uint32_t{0}; i
< numArgs
; ++i
, ++srcNum
) {
1682 auto const& pi
= func
->params()[i
];
1683 if (TVOFF(m_data
) && isSmartPtrRef(pi
.builtinType
)) {
1684 callArgs
.addr(srcLoc(srcNum
).reg(), TVOFF(m_data
));
1686 callArgs
.ssa(srcNum
);
1692 auto dloc
= dstLoc(0);
1693 auto dstReg
= x2a(dloc
.reg(0));
1694 auto dstTypeReg
= x2a(dloc
.reg(1));
1697 CppCall::direct(func
->nativeFuncPtr()),
1698 isCppByRef(funcReturnType
) ? kVoidDest
: callDest(dstReg
),
1699 SyncOptions::kSyncPoint
,
1702 auto returnType
= inst
->typeParam();
1703 if (!dstReg
.IsValid() || returnType
.isSimpleType()) {
1707 if (returnType
.isReferenceType()) {
1708 assert(isCppByRef(funcReturnType
) && isSmartPtrRef(funcReturnType
));
1709 vixl::Label notNullptr
;
1711 m_as
. Ldr (dstReg
, misReg
[returnOffset
+ TVOFF(m_data
)]);
1712 m_as
. Cbnz (dstReg
, ¬Nullptr
);
1713 m_as
. Mov (dstTypeReg
, KindOfNull
);
1715 m_as
. bind (¬Nullptr
);
1716 m_as
. Mov (dstTypeReg
, returnType
.toDataType());
1721 if (returnType
<= Type::Cell
|| returnType
<= Type::BoxedCell
) {
1722 assert(isCppByRef(funcReturnType
) && !isSmartPtrRef(funcReturnType
));
1723 vixl::Label notUninit
;
1725 m_as
. Ldrb (dstTypeReg
.W(), misReg
[returnOffset
+ TVOFF(m_type
)]);
1726 m_as
. Cbnz (dstTypeReg
, ¬Uninit
);
1727 m_as
. Mov (dstTypeReg
, KindOfNull
);
1729 m_as
. bind (¬Uninit
);
1730 m_as
. Ldr (dstReg
, misReg
[returnOffset
+ TVOFF(m_data
)]);
1735 always_assert(false);
1738 void CodeGenerator::cgCall(IRInstruction
* inst
) {
1739 auto const extra
= inst
->extra
<Call
>();
1740 auto const rSP
= x2a(srcLoc(0).reg());
1741 auto const rFP
= x2a(srcLoc(1).reg());
1744 auto const ar
= extra
->numParams
* sizeof(TypedValue
);
1745 a
. Str (rFP
, rSP
[ar
+ AROFF(m_sfp
)]);
1746 a
. Mov (rAsm
.W(), extra
->after
);
1747 a
. Str (rAsm
.W(), rSP
[ar
+ AROFF(m_soff
)]);
1749 auto const srcKey
= m_curInst
->marker().sk();
1750 auto const adjust
= emitBindCall(m_mainCode
,
1756 assert(dstLoc(0).reg() == rVmSp
);
1757 emitRegGetsRegPlusImm(a
, rVmSp
, rVmSp
, adjust
);
1760 //////////////////////////////////////////////////////////////////////
1762 void CodeGenerator::cgBeginCatch(IRInstruction
* inst
) {
1763 auto const& info
= m_state
.catches
[inst
->block()];
1764 assert(info
.afterCall
);
1766 mcg
->registerCatchBlock(info
.afterCall
, m_as
.frontier());
1768 assert(info
.rspOffset
== 0);
1769 RegSaver
regSaver(info
.savedRegs
);
1770 regSaver
.emitPops(m_as
);
1773 static void unwindResumeHelper() {
1774 // We don't have this sorted out for native mode yet
1775 always_assert(RuntimeOption::EvalSimulateARM
);
1777 tl_regState
= VMRegState::CLEAN
;
1778 g_context
->m_activeSims
.back()->resume_last_exception();
1781 void CodeGenerator::cgEndCatch(IRInstruction
* inst
) {
1782 emitCall(m_as
, CppCall::direct(unwindResumeHelper
));
1785 //////////////////////////////////////////////////////////////////////
1787 void CodeGenerator::emitLoadTypedValue(PhysLoc dst
,
1788 vixl::Register base
,
1791 if (label
) not_implemented();
1792 if (dst
.reg(0).isSIMD()) {
1796 auto valueDstReg
= x2a(dst
.reg(0));
1797 auto typeDstReg
= x2a(dst
.reg(1));
1799 // Avoid clobbering the base reg if we'll need it later
1800 if (base
.Is(typeDstReg
) && valueDstReg
.IsValid()) {
1801 m_as
. Mov (rAsm
, base
);
1805 if (typeDstReg
.IsValid()) {
1806 m_as
. Ldrb (typeDstReg
.W(), base
[offset
+ TVOFF(m_type
)]);
1809 if (valueDstReg
.IsValid()) {
1810 m_as
. Ldr (valueDstReg
, base
[offset
+ TVOFF(m_data
)]);
1814 void CodeGenerator::emitStoreTypedValue(vixl::Register base
,
1817 assert(src
.numWords() == 2);
1818 auto reg0
= x2a(src
.reg(0));
1819 auto reg1
= x2a(src
.reg(1));
1820 m_as
. Str (reg0
, base
[offset
+ TVOFF(m_data
)]);
1821 m_as
. Strb (reg1
.W(), base
[offset
+ TVOFF(m_type
)]);
1824 void CodeGenerator::emitLoad(Type type
, PhysLoc dstLoc
,
1825 vixl::Register base
,
1827 Block
* label
/* = nullptr */) {
1828 if (type
.needsReg()) {
1829 return emitLoadTypedValue(dstLoc
, base
, offset
, label
);
1834 auto dst
= dstLoc
.reg();
1835 if (dst
== InvalidReg
) return; // nothing to load.
1837 m_as
. Ldr (x2a(dst
), base
[offset
+ TVOFF(m_data
)]);
1839 assert(type
<= Type::Dbl
);
1840 m_as
. Ldr (x2simd(dstLoc
.reg()), base
[offset
+ TVOFF(m_data
)]);
1844 void CodeGenerator::emitStore(vixl::Register base
,
1846 SSATmp
* src
, PhysLoc srcLoc
,
1847 bool genStoreType
/* = true */) {
1848 auto type
= src
->type();
1849 if (type
.needsReg()) {
1850 return emitStoreTypedValue(base
, offset
, srcLoc
);
1853 auto dt
= type
.toDataType();
1854 if (dt
== KindOfUninit
) {
1855 static_assert(KindOfUninit
== 0, "zero register hack");
1856 m_as
. Strb (vixl::wzr
, base
[offset
+ TVOFF(m_type
)]);
1858 m_as
. Mov (rAsm
, dt
);
1859 m_as
. Strb (rAsm
.W(), base
[offset
+ TVOFF(m_type
)]);
1862 if (type
<= Type::Null
) {
1866 if (srcLoc
.reg().isGP()) {
1867 auto reg
= x2a(srcLoc
.reg());
1868 assert(reg
.IsValid());
1869 if (src
->isA(Type::Bool
)) {
1870 m_as
. Uxtb (reg
.W(), reg
.W());
1872 m_as
. Str (x2a(srcLoc
.reg()), base
[offset
+ TVOFF(m_data
)]);
1874 assert(type
<= Type::Dbl
);
1875 m_as
. Str (x2simd(srcLoc
.reg()), base
[offset
+ TVOFF(m_data
)]);
1879 void CodeGenerator::cgLdLoc(IRInstruction
* inst
) {
1880 auto baseReg
= x2a(srcLoc(0).reg());
1881 auto offset
= localOffset(inst
->extra
<LdLoc
>()->locId
);
1882 emitLoad(inst
->dst()->type(), dstLoc(0), baseReg
, offset
);
1885 void CodeGenerator::cgStLocWork(IRInstruction
* inst
) {
1886 auto baseReg
= x2a(srcLoc(0).reg());
1887 auto offset
= localOffset(inst
->extra
<LocalId
>()->locId
);
1888 emitStore(baseReg
, offset
, inst
->src(1), srcLoc(1), true /* store type */);
1891 void CodeGenerator::cgStLoc(IRInstruction
* inst
) { cgStLocWork(inst
); }
1892 void CodeGenerator::cgStGbl(IRInstruction
* inst
) { cgStLocWork(inst
); }
1894 void CodeGenerator::cgLdStack(IRInstruction
* inst
) {
1895 assert(inst
->taken() == nullptr);
1896 auto srcReg
= x2a(srcLoc(0).reg());
1897 auto offset
= cellsToBytes(inst
->extra
<LdStack
>()->offset
);
1898 emitLoad(inst
->dst()->type(), dstLoc(0), srcReg
, offset
);
1901 void CodeGenerator::emitLdRaw(IRInstruction
* inst
, size_t extraOff
) {
1902 auto destReg
= x2a(dstLoc(0).reg());
1903 auto offset
= inst
->extra
<RawMemData
>()->info().offset
;
1904 auto src
= x2a(srcLoc(0).reg())[offset
+ extraOff
];
1906 switch (inst
->extra
<RawMemData
>()->info().size
) {
1907 case sz::byte
: m_as
. Ldrb (destReg
.W(), src
); break;
1908 case sz::dword
: m_as
. Ldr (destReg
.W(), src
); break;
1909 case sz::qword
: m_as
. Ldr (destReg
, src
); break;
1910 default: not_implemented();
1914 void CodeGenerator::cgLdRaw(IRInstruction
* inst
) {
1918 void CodeGenerator::cgLdContArRaw(IRInstruction
* inst
) {
1919 emitLdRaw(inst
, -c_Generator::arOff());
1922 void CodeGenerator::cgLdARFuncPtr(IRInstruction
* inst
) {
1923 auto dstReg
= x2a(dstLoc(0).reg());
1924 auto baseReg
= x2a(srcLoc(0).reg());
1925 auto offset
= inst
->src(1)->intVal();
1926 m_as
. Ldr (dstReg
, baseReg
[offset
+ AROFF(m_func
)]);
1929 void CodeGenerator::cgLdFuncCached(IRInstruction
* inst
) {
1930 auto dstReg
= x2a(dstLoc(0).reg());
1931 auto const name
= inst
->extra
<LdFuncCachedData
>()->name
;
1932 auto const ch
= NamedEntity::get(name
)->getFuncHandle();
1933 vixl::Label noLookup
;
1935 if (!dstReg
.IsValid()) {
1936 m_as
. Ldr (rAsm
, rVmTl
[ch
]);
1939 m_as
. Ldr (dstReg
, rVmTl
[ch
]);
1941 m_as
. Cbnz (dstReg
, &noLookup
);
1943 const Func
* (*const func
)(const StringData
*) = lookupUnknownFunc
;
1946 CppCall::direct(func
),
1948 SyncOptions::kSyncPoint
,
1949 argGroup().immPtr(inst
->extra
<LdFuncCached
>()->name
)
1952 m_as
. bind (&noLookup
);
1955 void CodeGenerator::cgLdStackAddr(IRInstruction
* inst
) {
1956 auto const dstReg
= x2a(dstLoc(0).reg());
1957 auto const baseReg
= x2a(srcLoc(0).reg());
1958 auto const offset
= cellsToBytes(inst
->extra
<LdStackAddr
>()->offset
);
1959 emitRegGetsRegPlusImm(m_as
, dstReg
, baseReg
, offset
);
1962 void CodeGenerator::cgSpillStack(IRInstruction
* inst
) {
1963 // TODO(2966414): so much of this logic could be shared. The opcode itself
1964 // should probably be broken up.
1965 auto const spDeficit
= inst
->src(1)->intVal();
1966 auto const spillVals
= inst
->srcs().subpiece(2);
1967 auto const numSpillSrcs
= spillVals
.size();
1968 auto const dstReg
= x2a(dstLoc(0).reg());
1969 auto const spReg
= x2a(srcLoc(0).reg());
1970 auto const spillCells
= spillValueCells(inst
);
1972 int64_t adjustment
= (spDeficit
- spillCells
) * sizeof(Cell
);
1973 for (uint32_t i
= 0; i
< numSpillSrcs
; ++i
) {
1974 const int64_t offset
= i
* sizeof(Cell
) + adjustment
;
1975 emitStore(spReg
, offset
, spillVals
[i
], srcLoc(i
+ 2));
1977 emitRegGetsRegPlusImm(m_as
, dstReg
, spReg
, adjustment
);
1980 void CodeGenerator::cgInterpOneCommon(IRInstruction
* inst
) {
1981 auto pcOff
= inst
->extra
<InterpOneData
>()->bcOff
;
1983 auto opc
= *(curFunc()->unit()->at(pcOff
));
1984 auto* interpOneHelper
= interpOneEntryPoints
[opc
];
1987 CppCall::direct(reinterpret_cast<void (*)()>(interpOneHelper
)),
1988 callDest(InvalidReg
),
1989 SyncOptions::kSyncPoint
,
1990 argGroup().ssa(1/*fp*/).ssa(0/*sp*/).imm(pcOff
));
1993 void CodeGenerator::cgInterpOne(IRInstruction
* inst
) {
1994 cgInterpOneCommon(inst
);
1996 auto const& extra
= *inst
->extra
<InterpOne
>();
1997 auto newSpReg
= x2a(dstLoc(0).reg());
1999 auto spAdjustBytes
= cellsToBytes(extra
.cellsPopped
- extra
.cellsPushed
);
2000 emitRegGetsRegPlusImm(m_as
, newSpReg
, newSpReg
, spAdjustBytes
);
2003 void CodeGenerator::cgInterpOneCF(IRInstruction
* inst
) {
2004 cgInterpOneCommon(inst
);
2006 m_as
. Ldr (rVmFp
, rVmTl
[RDS::kVmfpOff
]);
2007 m_as
. Ldr (rVmSp
, rVmTl
[RDS::kVmspOff
]);
2009 emitServiceReq(m_mainCode
, REQ_RESUME
);
2012 void CodeGenerator::cgLdClsName(IRInstruction
* inst
) {
2013 auto const dstReg
= x2a(dstLoc(0).reg());
2014 auto const srcReg
= x2a(srcLoc(0).reg());
2016 m_as
. Ldr (dstReg
, srcReg
[Class::preClassOff()]);
2017 m_as
. Ldr (dstReg
, dstReg
[PreClass::nameOffset()]);
2020 //////////////////////////////////////////////////////////////////////
2022 void CodeGenerator::cgCountArrayFast(IRInstruction
* inst
) {
2023 auto const baseReg
= x2a(srcLoc(0).reg());
2024 auto const dstReg
= x2a(dstLoc(0).reg());
2025 m_as
. Ldr (dstReg
.W(), baseReg
[ArrayData::offsetofSize()]);
2028 void CodeGenerator::cgCountCollection(IRInstruction
* inst
) {
2029 auto const baseReg
= x2a(srcLoc(0).reg());
2030 auto const dstReg
= x2a(dstLoc(0).reg());
2031 m_as
. Ldr (dstReg
.W(), baseReg
[FAST_COLLECTION_SIZE_OFFSET
]);
2034 //////////////////////////////////////////////////////////////////////
2036 void CodeGenerator::cgInst(IRInstruction
* inst
) {
2037 Opcode opc
= inst
->op();
2039 SCOPE_EXIT
{ m_curInst
= nullptr; };
2042 #define O(name, dsts, srcs, flags) \
2043 case name: FTRACE(7, "cg" #name "\n"); \
2044 cg ## name (inst); \
2049 always_assert(false);