When removing a function from the function set and adding it to deferred, we
[llvm.git] / lib / CodeGen / SelectionDAG / FastISel.cpp
blob490b857b0e9c80c0c4559b76f20cb13f8f58b4f8
1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
32 // in -O0 compiles.
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/CodeGen/FastISel.h"
47 #include "llvm/CodeGen/FunctionLoweringInfo.h"
48 #include "llvm/CodeGen/MachineInstrBuilder.h"
49 #include "llvm/CodeGen/MachineModuleInfo.h"
50 #include "llvm/CodeGen/MachineRegisterInfo.h"
51 #include "llvm/Analysis/DebugInfo.h"
52 #include "llvm/Analysis/Loads.h"
53 #include "llvm/Target/TargetData.h"
54 #include "llvm/Target/TargetInstrInfo.h"
55 #include "llvm/Target/TargetLowering.h"
56 #include "llvm/Target/TargetMachine.h"
57 #include "llvm/Support/ErrorHandling.h"
58 #include "llvm/Support/Debug.h"
59 using namespace llvm;
61 /// startNewBlock - Set the current block to which generated machine
62 /// instructions will be appended, and clear the local CSE map.
63 ///
64 void FastISel::startNewBlock() {
65 LocalValueMap.clear();
67 // Start out as null, meaining no local-value instructions have
68 // been emitted.
69 LastLocalValue = 0;
71 // Advance the last local value past any EH_LABEL instructions.
72 MachineBasicBlock::iterator
73 I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
74 while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
75 LastLocalValue = I;
76 ++I;
80 bool FastISel::hasTrivialKill(const Value *V) const {
81 // Don't consider constants or arguments to have trivial kills.
82 const Instruction *I = dyn_cast<Instruction>(V);
83 if (!I)
84 return false;
86 // No-op casts are trivially coalesced by fast-isel.
87 if (const CastInst *Cast = dyn_cast<CastInst>(I))
88 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
89 !hasTrivialKill(Cast->getOperand(0)))
90 return false;
92 // Only instructions with a single use in the same basic block are considered
93 // to have trivial kills.
94 return I->hasOneUse() &&
95 !(I->getOpcode() == Instruction::BitCast ||
96 I->getOpcode() == Instruction::PtrToInt ||
97 I->getOpcode() == Instruction::IntToPtr) &&
98 cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
101 unsigned FastISel::getRegForValue(const Value *V) {
102 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
103 // Don't handle non-simple values in FastISel.
104 if (!RealVT.isSimple())
105 return 0;
107 // Ignore illegal types. We must do this before looking up the value
108 // in ValueMap because Arguments are given virtual registers regardless
109 // of whether FastISel can handle them.
110 MVT VT = RealVT.getSimpleVT();
111 if (!TLI.isTypeLegal(VT)) {
112 // Promote MVT::i1 to a legal type though, because it's common and easy.
113 if (VT == MVT::i1)
114 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
115 else
116 return 0;
119 // Look up the value to see if we already have a register for it. We
120 // cache values defined by Instructions across blocks, and other values
121 // only locally. This is because Instructions already have the SSA
122 // def-dominates-use requirement enforced.
123 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
124 if (I != FuncInfo.ValueMap.end()) {
125 unsigned Reg = I->second;
126 return Reg;
128 unsigned Reg = LocalValueMap[V];
129 if (Reg != 0)
130 return Reg;
132 // In bottom-up mode, just create the virtual register which will be used
133 // to hold the value. It will be materialized later.
134 if (isa<Instruction>(V) &&
135 (!isa<AllocaInst>(V) ||
136 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
137 return FuncInfo.InitializeRegForValue(V);
139 SavePoint SaveInsertPt = enterLocalValueArea();
141 // Materialize the value in a register. Emit any instructions in the
142 // local value area.
143 Reg = materializeRegForValue(V, VT);
145 leaveLocalValueArea(SaveInsertPt);
147 return Reg;
150 /// materializeRegForValue - Helper for getRegForValue. This function is
151 /// called when the value isn't already available in a register and must
152 /// be materialized with new instructions.
153 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
154 unsigned Reg = 0;
156 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
157 if (CI->getValue().getActiveBits() <= 64)
158 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
159 } else if (isa<AllocaInst>(V)) {
160 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
161 } else if (isa<ConstantPointerNull>(V)) {
162 // Translate this as an integer zero so that it can be
163 // local-CSE'd with actual integer zeros.
164 Reg =
165 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
166 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
167 // Try to emit the constant directly.
168 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
170 if (!Reg) {
171 // Try to emit the constant by using an integer constant with a cast.
172 const APFloat &Flt = CF->getValueAPF();
173 EVT IntVT = TLI.getPointerTy();
175 uint64_t x[2];
176 uint32_t IntBitWidth = IntVT.getSizeInBits();
177 bool isExact;
178 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
179 APFloat::rmTowardZero, &isExact);
180 if (isExact) {
181 APInt IntVal(IntBitWidth, 2, x);
183 unsigned IntegerReg =
184 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
185 if (IntegerReg != 0)
186 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
187 IntegerReg, /*Kill=*/false);
190 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
191 if (!SelectOperator(Op, Op->getOpcode()))
192 if (!isa<Instruction>(Op) ||
193 !TargetSelectInstruction(cast<Instruction>(Op)))
194 return 0;
195 Reg = lookUpRegForValue(Op);
196 } else if (isa<UndefValue>(V)) {
197 Reg = createResultReg(TLI.getRegClassFor(VT));
198 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
199 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
202 // If target-independent code couldn't handle the value, give target-specific
203 // code a try.
204 if (!Reg && isa<Constant>(V))
205 Reg = TargetMaterializeConstant(cast<Constant>(V));
207 // Don't cache constant materializations in the general ValueMap.
208 // To do so would require tracking what uses they dominate.
209 if (Reg != 0) {
210 LocalValueMap[V] = Reg;
211 LastLocalValue = MRI.getVRegDef(Reg);
213 return Reg;
216 unsigned FastISel::lookUpRegForValue(const Value *V) {
217 // Look up the value to see if we already have a register for it. We
218 // cache values defined by Instructions across blocks, and other values
219 // only locally. This is because Instructions already have the SSA
220 // def-dominates-use requirement enforced.
221 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
222 if (I != FuncInfo.ValueMap.end())
223 return I->second;
224 return LocalValueMap[V];
227 /// UpdateValueMap - Update the value map to include the new mapping for this
228 /// instruction, or insert an extra copy to get the result in a previous
229 /// determined register.
230 /// NOTE: This is only necessary because we might select a block that uses
231 /// a value before we select the block that defines the value. It might be
232 /// possible to fix this by selecting blocks in reverse postorder.
233 unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
234 if (!isa<Instruction>(I)) {
235 LocalValueMap[I] = Reg;
236 return Reg;
239 unsigned &AssignedReg = FuncInfo.ValueMap[I];
240 if (AssignedReg == 0)
241 // Use the new register.
242 AssignedReg = Reg;
243 else if (Reg != AssignedReg) {
244 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
245 FuncInfo.RegFixups[AssignedReg] = Reg;
247 AssignedReg = Reg;
250 return AssignedReg;
253 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
254 unsigned IdxN = getRegForValue(Idx);
255 if (IdxN == 0)
256 // Unhandled operand. Halt "fast" selection and bail.
257 return std::pair<unsigned, bool>(0, false);
259 bool IdxNIsKill = hasTrivialKill(Idx);
261 // If the index is smaller or larger than intptr_t, truncate or extend it.
262 MVT PtrVT = TLI.getPointerTy();
263 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
264 if (IdxVT.bitsLT(PtrVT)) {
265 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
266 IdxN, IdxNIsKill);
267 IdxNIsKill = true;
269 else if (IdxVT.bitsGT(PtrVT)) {
270 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
271 IdxN, IdxNIsKill);
272 IdxNIsKill = true;
274 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
277 void FastISel::recomputeInsertPt() {
278 if (getLastLocalValue()) {
279 FuncInfo.InsertPt = getLastLocalValue();
280 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
281 ++FuncInfo.InsertPt;
282 } else
283 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
285 // Now skip past any EH_LABELs, which must remain at the beginning.
286 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
287 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
288 ++FuncInfo.InsertPt;
291 FastISel::SavePoint FastISel::enterLocalValueArea() {
292 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
293 DebugLoc OldDL = DL;
294 recomputeInsertPt();
295 DL = DebugLoc();
296 SavePoint SP = { OldInsertPt, OldDL };
297 return SP;
300 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
301 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
302 LastLocalValue = llvm::prior(FuncInfo.InsertPt);
304 // Restore the previous insert position.
305 FuncInfo.InsertPt = OldInsertPt.InsertPt;
306 DL = OldInsertPt.DL;
309 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
310 /// which has an opcode which directly corresponds to the given ISD opcode.
312 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
313 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
314 if (VT == MVT::Other || !VT.isSimple())
315 // Unhandled type. Halt "fast" selection and bail.
316 return false;
318 // We only handle legal types. For example, on x86-32 the instruction
319 // selector contains all of the 64-bit instructions from x86-64,
320 // under the assumption that i64 won't be used if the target doesn't
321 // support it.
322 if (!TLI.isTypeLegal(VT)) {
323 // MVT::i1 is special. Allow AND, OR, or XOR because they
324 // don't require additional zeroing, which makes them easy.
325 if (VT == MVT::i1 &&
326 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
327 ISDOpcode == ISD::XOR))
328 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
329 else
330 return false;
333 unsigned Op0 = getRegForValue(I->getOperand(0));
334 if (Op0 == 0)
335 // Unhandled operand. Halt "fast" selection and bail.
336 return false;
338 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
340 // Check if the second operand is a constant and handle it appropriately.
341 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
342 unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
343 ISDOpcode, Op0, Op0IsKill,
344 CI->getZExtValue());
345 if (ResultReg != 0) {
346 // We successfully emitted code for the given LLVM Instruction.
347 UpdateValueMap(I, ResultReg);
348 return true;
352 // Check if the second operand is a constant float.
353 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
354 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
355 ISDOpcode, Op0, Op0IsKill, CF);
356 if (ResultReg != 0) {
357 // We successfully emitted code for the given LLVM Instruction.
358 UpdateValueMap(I, ResultReg);
359 return true;
363 unsigned Op1 = getRegForValue(I->getOperand(1));
364 if (Op1 == 0)
365 // Unhandled operand. Halt "fast" selection and bail.
366 return false;
368 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
370 // Now we have both operands in registers. Emit the instruction.
371 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
372 ISDOpcode,
373 Op0, Op0IsKill,
374 Op1, Op1IsKill);
375 if (ResultReg == 0)
376 // Target-specific code wasn't able to find a machine opcode for
377 // the given ISD opcode and type. Halt "fast" selection and bail.
378 return false;
380 // We successfully emitted code for the given LLVM Instruction.
381 UpdateValueMap(I, ResultReg);
382 return true;
385 bool FastISel::SelectGetElementPtr(const User *I) {
386 unsigned N = getRegForValue(I->getOperand(0));
387 if (N == 0)
388 // Unhandled operand. Halt "fast" selection and bail.
389 return false;
391 bool NIsKill = hasTrivialKill(I->getOperand(0));
393 const Type *Ty = I->getOperand(0)->getType();
394 MVT VT = TLI.getPointerTy();
395 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
396 E = I->op_end(); OI != E; ++OI) {
397 const Value *Idx = *OI;
398 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
399 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
400 if (Field) {
401 // N = N + Offset
402 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
403 // FIXME: This can be optimized by combining the add with a
404 // subsequent one.
405 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
406 if (N == 0)
407 // Unhandled operand. Halt "fast" selection and bail.
408 return false;
409 NIsKill = true;
411 Ty = StTy->getElementType(Field);
412 } else {
413 Ty = cast<SequentialType>(Ty)->getElementType();
415 // If this is a constant subscript, handle it quickly.
416 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
417 if (CI->isZero()) continue;
418 uint64_t Offs =
419 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
420 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
421 if (N == 0)
422 // Unhandled operand. Halt "fast" selection and bail.
423 return false;
424 NIsKill = true;
425 continue;
428 // N = N + Idx * ElementSize;
429 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
430 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
431 unsigned IdxN = Pair.first;
432 bool IdxNIsKill = Pair.second;
433 if (IdxN == 0)
434 // Unhandled operand. Halt "fast" selection and bail.
435 return false;
437 if (ElementSize != 1) {
438 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
439 if (IdxN == 0)
440 // Unhandled operand. Halt "fast" selection and bail.
441 return false;
442 IdxNIsKill = true;
444 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
445 if (N == 0)
446 // Unhandled operand. Halt "fast" selection and bail.
447 return false;
451 // We successfully emitted code for the given LLVM Instruction.
452 UpdateValueMap(I, N);
453 return true;
456 bool FastISel::SelectCall(const User *I) {
457 const Function *F = cast<CallInst>(I)->getCalledFunction();
458 if (!F) return false;
460 // Handle selected intrinsic function calls.
461 unsigned IID = F->getIntrinsicID();
462 switch (IID) {
463 default: break;
464 case Intrinsic::dbg_declare: {
465 const DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
466 if (!DIVariable(DI->getVariable()).Verify() ||
467 !FuncInfo.MF->getMMI().hasDebugInfo())
468 return true;
470 const Value *Address = DI->getAddress();
471 if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address))
472 return true;
474 unsigned Reg = 0;
475 unsigned Offset = 0;
476 if (const Argument *Arg = dyn_cast<Argument>(Address)) {
477 if (Arg->hasByValAttr()) {
478 // Byval arguments' frame index is recorded during argument lowering.
479 // Use this info directly.
480 Offset = FuncInfo.getByValArgumentFrameIndex(Arg);
481 if (Offset)
482 Reg = TRI.getFrameRegister(*FuncInfo.MF);
485 if (!Reg)
486 Reg = getRegForValue(Address);
488 if (Reg)
489 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
490 TII.get(TargetOpcode::DBG_VALUE))
491 .addReg(Reg, RegState::Debug).addImm(Offset)
492 .addMetadata(DI->getVariable());
493 return true;
495 case Intrinsic::dbg_value: {
496 // This form of DBG_VALUE is target-independent.
497 const DbgValueInst *DI = cast<DbgValueInst>(I);
498 const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
499 const Value *V = DI->getValue();
500 if (!V) {
501 // Currently the optimizer can produce this; insert an undef to
502 // help debugging. Probably the optimizer should not do this.
503 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
504 .addReg(0U).addImm(DI->getOffset())
505 .addMetadata(DI->getVariable());
506 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
507 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
508 .addImm(CI->getZExtValue()).addImm(DI->getOffset())
509 .addMetadata(DI->getVariable());
510 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
511 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
512 .addFPImm(CF).addImm(DI->getOffset())
513 .addMetadata(DI->getVariable());
514 } else if (unsigned Reg = lookUpRegForValue(V)) {
515 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
516 .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
517 .addMetadata(DI->getVariable());
518 } else {
519 // We can't yet handle anything else here because it would require
520 // generating code, thus altering codegen because of debug info.
521 DEBUG(dbgs() << "Dropping debug info for " << DI);
523 return true;
525 case Intrinsic::eh_exception: {
526 EVT VT = TLI.getValueType(I->getType());
527 switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
528 default: break;
529 case TargetLowering::Expand: {
530 assert(FuncInfo.MBB->isLandingPad() &&
531 "Call to eh.exception not in landing pad!");
532 unsigned Reg = TLI.getExceptionAddressRegister();
533 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
534 unsigned ResultReg = createResultReg(RC);
535 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
536 ResultReg).addReg(Reg);
537 UpdateValueMap(I, ResultReg);
538 return true;
541 break;
543 case Intrinsic::eh_selector: {
544 EVT VT = TLI.getValueType(I->getType());
545 switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
546 default: break;
547 case TargetLowering::Expand: {
548 if (FuncInfo.MBB->isLandingPad())
549 AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB);
550 else {
551 #ifndef NDEBUG
552 FuncInfo.CatchInfoLost.insert(cast<CallInst>(I));
553 #endif
554 // FIXME: Mark exception selector register as live in. Hack for PR1508.
555 unsigned Reg = TLI.getExceptionSelectorRegister();
556 if (Reg) FuncInfo.MBB->addLiveIn(Reg);
559 unsigned Reg = TLI.getExceptionSelectorRegister();
560 EVT SrcVT = TLI.getPointerTy();
561 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
562 unsigned ResultReg = createResultReg(RC);
563 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
564 ResultReg).addReg(Reg);
566 bool ResultRegIsKill = hasTrivialKill(I);
568 // Cast the register to the type of the selector.
569 if (SrcVT.bitsGT(MVT::i32))
570 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
571 ResultReg, ResultRegIsKill);
572 else if (SrcVT.bitsLT(MVT::i32))
573 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
574 ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
575 if (ResultReg == 0)
576 // Unhandled operand. Halt "fast" selection and bail.
577 return false;
579 UpdateValueMap(I, ResultReg);
581 return true;
584 break;
588 // An arbitrary call. Bail.
589 return false;
592 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
593 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
594 EVT DstVT = TLI.getValueType(I->getType());
596 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
597 DstVT == MVT::Other || !DstVT.isSimple())
598 // Unhandled type. Halt "fast" selection and bail.
599 return false;
601 // Check if the destination type is legal. Or as a special case,
602 // it may be i1 if we're doing a truncate because that's
603 // easy and somewhat common.
604 if (!TLI.isTypeLegal(DstVT))
605 if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
606 // Unhandled type. Halt "fast" selection and bail.
607 return false;
609 // Check if the source operand is legal. Or as a special case,
610 // it may be i1 if we're doing zero-extension because that's
611 // easy and somewhat common.
612 if (!TLI.isTypeLegal(SrcVT))
613 if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
614 // Unhandled type. Halt "fast" selection and bail.
615 return false;
617 unsigned InputReg = getRegForValue(I->getOperand(0));
618 if (!InputReg)
619 // Unhandled operand. Halt "fast" selection and bail.
620 return false;
622 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
624 // If the operand is i1, arrange for the high bits in the register to be zero.
625 if (SrcVT == MVT::i1) {
626 SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
627 InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg, InputRegIsKill);
628 if (!InputReg)
629 return false;
630 InputRegIsKill = true;
632 // If the result is i1, truncate to the target's type for i1 first.
633 if (DstVT == MVT::i1)
634 DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT);
636 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
637 DstVT.getSimpleVT(),
638 Opcode,
639 InputReg, InputRegIsKill);
640 if (!ResultReg)
641 return false;
643 UpdateValueMap(I, ResultReg);
644 return true;
647 bool FastISel::SelectBitCast(const User *I) {
648 // If the bitcast doesn't change the type, just use the operand value.
649 if (I->getType() == I->getOperand(0)->getType()) {
650 unsigned Reg = getRegForValue(I->getOperand(0));
651 if (Reg == 0)
652 return false;
653 UpdateValueMap(I, Reg);
654 return true;
657 // Bitcasts of other values become reg-reg copies or BITCAST operators.
658 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
659 EVT DstVT = TLI.getValueType(I->getType());
661 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
662 DstVT == MVT::Other || !DstVT.isSimple() ||
663 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
664 // Unhandled type. Halt "fast" selection and bail.
665 return false;
667 unsigned Op0 = getRegForValue(I->getOperand(0));
668 if (Op0 == 0)
669 // Unhandled operand. Halt "fast" selection and bail.
670 return false;
672 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
674 // First, try to perform the bitcast by inserting a reg-reg copy.
675 unsigned ResultReg = 0;
676 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
677 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
678 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
679 // Don't attempt a cross-class copy. It will likely fail.
680 if (SrcClass == DstClass) {
681 ResultReg = createResultReg(DstClass);
682 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
683 ResultReg).addReg(Op0);
687 // If the reg-reg copy failed, select a BITCAST opcode.
688 if (!ResultReg)
689 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
690 ISD::BITCAST, Op0, Op0IsKill);
692 if (!ResultReg)
693 return false;
695 UpdateValueMap(I, ResultReg);
696 return true;
699 bool
700 FastISel::SelectInstruction(const Instruction *I) {
701 // Just before the terminator instruction, insert instructions to
702 // feed PHI nodes in successor blocks.
703 if (isa<TerminatorInst>(I))
704 if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
705 return false;
707 DL = I->getDebugLoc();
709 // First, try doing target-independent selection.
710 if (SelectOperator(I, I->getOpcode())) {
711 DL = DebugLoc();
712 return true;
715 // Next, try calling the target to attempt to handle the instruction.
716 if (TargetSelectInstruction(I)) {
717 DL = DebugLoc();
718 return true;
721 DL = DebugLoc();
722 return false;
725 /// FastEmitBranch - Emit an unconditional branch to the given block,
726 /// unless it is the immediate (fall-through) successor, and update
727 /// the CFG.
728 void
729 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
730 if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
731 // The unconditional fall-through case, which needs no instructions.
732 } else {
733 // The unconditional branch case.
734 TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
735 SmallVector<MachineOperand, 0>(), DL);
737 FuncInfo.MBB->addSuccessor(MSucc);
740 /// SelectFNeg - Emit an FNeg operation.
742 bool
743 FastISel::SelectFNeg(const User *I) {
744 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
745 if (OpReg == 0) return false;
747 bool OpRegIsKill = hasTrivialKill(I);
749 // If the target has ISD::FNEG, use it.
750 EVT VT = TLI.getValueType(I->getType());
751 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
752 ISD::FNEG, OpReg, OpRegIsKill);
753 if (ResultReg != 0) {
754 UpdateValueMap(I, ResultReg);
755 return true;
758 // Bitcast the value to integer, twiddle the sign bit with xor,
759 // and then bitcast it back to floating-point.
760 if (VT.getSizeInBits() > 64) return false;
761 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
762 if (!TLI.isTypeLegal(IntVT))
763 return false;
765 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
766 ISD::BITCAST, OpReg, OpRegIsKill);
767 if (IntReg == 0)
768 return false;
770 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
771 IntReg, /*Kill=*/true,
772 UINT64_C(1) << (VT.getSizeInBits()-1),
773 IntVT.getSimpleVT());
774 if (IntResultReg == 0)
775 return false;
777 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
778 ISD::BITCAST, IntResultReg, /*Kill=*/true);
779 if (ResultReg == 0)
780 return false;
782 UpdateValueMap(I, ResultReg);
783 return true;
786 bool
787 FastISel::SelectOperator(const User *I, unsigned Opcode) {
788 switch (Opcode) {
789 case Instruction::Add:
790 return SelectBinaryOp(I, ISD::ADD);
791 case Instruction::FAdd:
792 return SelectBinaryOp(I, ISD::FADD);
793 case Instruction::Sub:
794 return SelectBinaryOp(I, ISD::SUB);
795 case Instruction::FSub:
796 // FNeg is currently represented in LLVM IR as a special case of FSub.
797 if (BinaryOperator::isFNeg(I))
798 return SelectFNeg(I);
799 return SelectBinaryOp(I, ISD::FSUB);
800 case Instruction::Mul:
801 return SelectBinaryOp(I, ISD::MUL);
802 case Instruction::FMul:
803 return SelectBinaryOp(I, ISD::FMUL);
804 case Instruction::SDiv:
805 return SelectBinaryOp(I, ISD::SDIV);
806 case Instruction::UDiv:
807 return SelectBinaryOp(I, ISD::UDIV);
808 case Instruction::FDiv:
809 return SelectBinaryOp(I, ISD::FDIV);
810 case Instruction::SRem:
811 return SelectBinaryOp(I, ISD::SREM);
812 case Instruction::URem:
813 return SelectBinaryOp(I, ISD::UREM);
814 case Instruction::FRem:
815 return SelectBinaryOp(I, ISD::FREM);
816 case Instruction::Shl:
817 return SelectBinaryOp(I, ISD::SHL);
818 case Instruction::LShr:
819 return SelectBinaryOp(I, ISD::SRL);
820 case Instruction::AShr:
821 return SelectBinaryOp(I, ISD::SRA);
822 case Instruction::And:
823 return SelectBinaryOp(I, ISD::AND);
824 case Instruction::Or:
825 return SelectBinaryOp(I, ISD::OR);
826 case Instruction::Xor:
827 return SelectBinaryOp(I, ISD::XOR);
829 case Instruction::GetElementPtr:
830 return SelectGetElementPtr(I);
832 case Instruction::Br: {
833 const BranchInst *BI = cast<BranchInst>(I);
835 if (BI->isUnconditional()) {
836 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
837 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
838 FastEmitBranch(MSucc, BI->getDebugLoc());
839 return true;
842 // Conditional branches are not handed yet.
843 // Halt "fast" selection and bail.
844 return false;
847 case Instruction::Unreachable:
848 // Nothing to emit.
849 return true;
851 case Instruction::Alloca:
852 // FunctionLowering has the static-sized case covered.
853 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
854 return true;
856 // Dynamic-sized alloca is not handled yet.
857 return false;
859 case Instruction::Call:
860 return SelectCall(I);
862 case Instruction::BitCast:
863 return SelectBitCast(I);
865 case Instruction::FPToSI:
866 return SelectCast(I, ISD::FP_TO_SINT);
867 case Instruction::ZExt:
868 return SelectCast(I, ISD::ZERO_EXTEND);
869 case Instruction::SExt:
870 return SelectCast(I, ISD::SIGN_EXTEND);
871 case Instruction::Trunc:
872 return SelectCast(I, ISD::TRUNCATE);
873 case Instruction::SIToFP:
874 return SelectCast(I, ISD::SINT_TO_FP);
876 case Instruction::IntToPtr: // Deliberate fall-through.
877 case Instruction::PtrToInt: {
878 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
879 EVT DstVT = TLI.getValueType(I->getType());
880 if (DstVT.bitsGT(SrcVT))
881 return SelectCast(I, ISD::ZERO_EXTEND);
882 if (DstVT.bitsLT(SrcVT))
883 return SelectCast(I, ISD::TRUNCATE);
884 unsigned Reg = getRegForValue(I->getOperand(0));
885 if (Reg == 0) return false;
886 UpdateValueMap(I, Reg);
887 return true;
890 case Instruction::PHI:
891 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
893 default:
894 // Unhandled instruction. Halt "fast" selection and bail.
895 return false;
899 FastISel::FastISel(FunctionLoweringInfo &funcInfo)
900 : FuncInfo(funcInfo),
901 MRI(FuncInfo.MF->getRegInfo()),
902 MFI(*FuncInfo.MF->getFrameInfo()),
903 MCP(*FuncInfo.MF->getConstantPool()),
904 TM(FuncInfo.MF->getTarget()),
905 TD(*TM.getTargetData()),
906 TII(*TM.getInstrInfo()),
907 TLI(*TM.getTargetLowering()),
908 TRI(*TM.getRegisterInfo()) {
911 FastISel::~FastISel() {}
913 unsigned FastISel::FastEmit_(MVT, MVT,
914 unsigned) {
915 return 0;
918 unsigned FastISel::FastEmit_r(MVT, MVT,
919 unsigned,
920 unsigned /*Op0*/, bool /*Op0IsKill*/) {
921 return 0;
924 unsigned FastISel::FastEmit_rr(MVT, MVT,
925 unsigned,
926 unsigned /*Op0*/, bool /*Op0IsKill*/,
927 unsigned /*Op1*/, bool /*Op1IsKill*/) {
928 return 0;
931 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
932 return 0;
935 unsigned FastISel::FastEmit_f(MVT, MVT,
936 unsigned, const ConstantFP * /*FPImm*/) {
937 return 0;
940 unsigned FastISel::FastEmit_ri(MVT, MVT,
941 unsigned,
942 unsigned /*Op0*/, bool /*Op0IsKill*/,
943 uint64_t /*Imm*/) {
944 return 0;
947 unsigned FastISel::FastEmit_rf(MVT, MVT,
948 unsigned,
949 unsigned /*Op0*/, bool /*Op0IsKill*/,
950 const ConstantFP * /*FPImm*/) {
951 return 0;
954 unsigned FastISel::FastEmit_rri(MVT, MVT,
955 unsigned,
956 unsigned /*Op0*/, bool /*Op0IsKill*/,
957 unsigned /*Op1*/, bool /*Op1IsKill*/,
958 uint64_t /*Imm*/) {
959 return 0;
962 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
963 /// to emit an instruction with an immediate operand using FastEmit_ri.
964 /// If that fails, it materializes the immediate into a register and try
965 /// FastEmit_rr instead.
966 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
967 unsigned Op0, bool Op0IsKill,
968 uint64_t Imm, MVT ImmType) {
969 // First check if immediate type is legal. If not, we can't use the ri form.
970 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
971 if (ResultReg != 0)
972 return ResultReg;
973 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
974 if (MaterialReg == 0)
975 return 0;
976 return FastEmit_rr(VT, VT, Opcode,
977 Op0, Op0IsKill,
978 MaterialReg, /*Kill=*/true);
981 /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
982 /// to emit an instruction with a floating-point immediate operand using
983 /// FastEmit_rf. If that fails, it materializes the immediate into a register
984 /// and try FastEmit_rr instead.
985 unsigned FastISel::FastEmit_rf_(MVT VT, unsigned Opcode,
986 unsigned Op0, bool Op0IsKill,
987 const ConstantFP *FPImm, MVT ImmType) {
988 // First check if immediate type is legal. If not, we can't use the rf form.
989 unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, Op0IsKill, FPImm);
990 if (ResultReg != 0)
991 return ResultReg;
993 // Materialize the constant in a register.
994 unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
995 if (MaterialReg == 0) {
996 // If the target doesn't have a way to directly enter a floating-point
997 // value into a register, use an alternate approach.
998 // TODO: The current approach only supports floating-point constants
999 // that can be constructed by conversion from integer values. This should
1000 // be replaced by code that creates a load from a constant-pool entry,
1001 // which will require some target-specific work.
1002 const APFloat &Flt = FPImm->getValueAPF();
1003 EVT IntVT = TLI.getPointerTy();
1005 uint64_t x[2];
1006 uint32_t IntBitWidth = IntVT.getSizeInBits();
1007 bool isExact;
1008 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
1009 APFloat::rmTowardZero, &isExact);
1010 if (!isExact)
1011 return 0;
1012 APInt IntVal(IntBitWidth, 2, x);
1014 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
1015 ISD::Constant, IntVal.getZExtValue());
1016 if (IntegerReg == 0)
1017 return 0;
1018 MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
1019 ISD::SINT_TO_FP, IntegerReg, /*Kill=*/true);
1020 if (MaterialReg == 0)
1021 return 0;
1023 return FastEmit_rr(VT, VT, Opcode,
1024 Op0, Op0IsKill,
1025 MaterialReg, /*Kill=*/true);
1028 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1029 return MRI.createVirtualRegister(RC);
1032 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1033 const TargetRegisterClass* RC) {
1034 unsigned ResultReg = createResultReg(RC);
1035 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1037 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1038 return ResultReg;
1041 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1042 const TargetRegisterClass *RC,
1043 unsigned Op0, bool Op0IsKill) {
1044 unsigned ResultReg = createResultReg(RC);
1045 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1047 if (II.getNumDefs() >= 1)
1048 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1049 .addReg(Op0, Op0IsKill * RegState::Kill);
1050 else {
1051 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1052 .addReg(Op0, Op0IsKill * RegState::Kill);
1053 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1054 ResultReg).addReg(II.ImplicitDefs[0]);
1057 return ResultReg;
1060 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1061 const TargetRegisterClass *RC,
1062 unsigned Op0, bool Op0IsKill,
1063 unsigned Op1, bool Op1IsKill) {
1064 unsigned ResultReg = createResultReg(RC);
1065 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1067 if (II.getNumDefs() >= 1)
1068 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1069 .addReg(Op0, Op0IsKill * RegState::Kill)
1070 .addReg(Op1, Op1IsKill * RegState::Kill);
1071 else {
1072 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1073 .addReg(Op0, Op0IsKill * RegState::Kill)
1074 .addReg(Op1, Op1IsKill * RegState::Kill);
1075 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1076 ResultReg).addReg(II.ImplicitDefs[0]);
1078 return ResultReg;
1081 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1082 const TargetRegisterClass *RC,
1083 unsigned Op0, bool Op0IsKill,
1084 uint64_t Imm) {
1085 unsigned ResultReg = createResultReg(RC);
1086 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1088 if (II.getNumDefs() >= 1)
1089 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1090 .addReg(Op0, Op0IsKill * RegState::Kill)
1091 .addImm(Imm);
1092 else {
1093 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1094 .addReg(Op0, Op0IsKill * RegState::Kill)
1095 .addImm(Imm);
1096 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1097 ResultReg).addReg(II.ImplicitDefs[0]);
1099 return ResultReg;
1102 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1103 const TargetRegisterClass *RC,
1104 unsigned Op0, bool Op0IsKill,
1105 const ConstantFP *FPImm) {
1106 unsigned ResultReg = createResultReg(RC);
1107 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1109 if (II.getNumDefs() >= 1)
1110 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1111 .addReg(Op0, Op0IsKill * RegState::Kill)
1112 .addFPImm(FPImm);
1113 else {
1114 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1115 .addReg(Op0, Op0IsKill * RegState::Kill)
1116 .addFPImm(FPImm);
1117 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1118 ResultReg).addReg(II.ImplicitDefs[0]);
1120 return ResultReg;
1123 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1124 const TargetRegisterClass *RC,
1125 unsigned Op0, bool Op0IsKill,
1126 unsigned Op1, bool Op1IsKill,
1127 uint64_t Imm) {
1128 unsigned ResultReg = createResultReg(RC);
1129 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1131 if (II.getNumDefs() >= 1)
1132 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1133 .addReg(Op0, Op0IsKill * RegState::Kill)
1134 .addReg(Op1, Op1IsKill * RegState::Kill)
1135 .addImm(Imm);
1136 else {
1137 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1138 .addReg(Op0, Op0IsKill * RegState::Kill)
1139 .addReg(Op1, Op1IsKill * RegState::Kill)
1140 .addImm(Imm);
1141 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1142 ResultReg).addReg(II.ImplicitDefs[0]);
1144 return ResultReg;
1147 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1148 const TargetRegisterClass *RC,
1149 uint64_t Imm) {
1150 unsigned ResultReg = createResultReg(RC);
1151 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1153 if (II.getNumDefs() >= 1)
1154 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1155 else {
1156 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1157 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1158 ResultReg).addReg(II.ImplicitDefs[0]);
1160 return ResultReg;
1163 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1164 unsigned Op0, bool Op0IsKill,
1165 uint32_t Idx) {
1166 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1167 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1168 "Cannot yet extract from physregs");
1169 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1170 DL, TII.get(TargetOpcode::COPY), ResultReg)
1171 .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1172 return ResultReg;
1175 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1176 /// with all but the least significant bit set to zero.
1177 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1178 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1181 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1182 /// Emit code to ensure constants are copied into registers when needed.
1183 /// Remember the virtual registers that need to be added to the Machine PHI
1184 /// nodes as input. We cannot just directly add them, because expansion
1185 /// might result in multiple MBB's for one BB. As such, the start of the
1186 /// BB might correspond to a different MBB than the end.
1187 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1188 const TerminatorInst *TI = LLVMBB->getTerminator();
1190 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1191 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1193 // Check successor nodes' PHI nodes that expect a constant to be available
1194 // from this block.
1195 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1196 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1197 if (!isa<PHINode>(SuccBB->begin())) continue;
1198 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1200 // If this terminator has multiple identical successors (common for
1201 // switches), only handle each succ once.
1202 if (!SuccsHandled.insert(SuccMBB)) continue;
1204 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1206 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1207 // nodes and Machine PHI nodes, but the incoming operands have not been
1208 // emitted yet.
1209 for (BasicBlock::const_iterator I = SuccBB->begin();
1210 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1212 // Ignore dead phi's.
1213 if (PN->use_empty()) continue;
1215 // Only handle legal types. Two interesting things to note here. First,
1216 // by bailing out early, we may leave behind some dead instructions,
1217 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1218 // own moves. Second, this check is necessary becuase FastISel doesn't
1219 // use CreateRegs to create registers, so it always creates
1220 // exactly one register for each non-void instruction.
1221 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1222 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1223 // Promote MVT::i1.
1224 if (VT == MVT::i1)
1225 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1226 else {
1227 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1228 return false;
1232 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1234 // Set the DebugLoc for the copy. Prefer the location of the operand
1235 // if there is one; use the location of the PHI otherwise.
1236 DL = PN->getDebugLoc();
1237 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1238 DL = Inst->getDebugLoc();
1240 unsigned Reg = getRegForValue(PHIOp);
1241 if (Reg == 0) {
1242 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1243 return false;
1245 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
1246 DL = DebugLoc();
1250 return true;