Silence some -Asserts uninitialized variable warnings.
[llvm.git] / lib / CodeGen / SelectionDAG / FastISel.cpp
blobb26208ba195d30081ebbc16005cf218aa2af8fac
1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
32 // in -O0 compiles.
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/CodeGen/FastISel.h"
47 #include "llvm/CodeGen/FunctionLoweringInfo.h"
48 #include "llvm/CodeGen/MachineInstrBuilder.h"
49 #include "llvm/CodeGen/MachineModuleInfo.h"
50 #include "llvm/CodeGen/MachineRegisterInfo.h"
51 #include "llvm/Analysis/DebugInfo.h"
52 #include "llvm/Analysis/Loads.h"
53 #include "llvm/Target/TargetData.h"
54 #include "llvm/Target/TargetInstrInfo.h"
55 #include "llvm/Target/TargetLowering.h"
56 #include "llvm/Target/TargetMachine.h"
57 #include "llvm/Support/ErrorHandling.h"
58 using namespace llvm;
60 /// startNewBlock - Set the current block to which generated machine
61 /// instructions will be appended, and clear the local CSE map.
62 ///
63 void FastISel::startNewBlock() {
64 LocalValueMap.clear();
66 // Start out as null, meaining no local-value instructions have
67 // been emitted.
68 LastLocalValue = 0;
70 // Advance the last local value past any EH_LABEL instructions.
71 MachineBasicBlock::iterator
72 I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
73 while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
74 LastLocalValue = I;
75 ++I;
79 bool FastISel::hasTrivialKill(const Value *V) const {
80 // Don't consider constants or arguments to have trivial kills.
81 const Instruction *I = dyn_cast<Instruction>(V);
82 if (!I)
83 return false;
85 // No-op casts are trivially coalesced by fast-isel.
86 if (const CastInst *Cast = dyn_cast<CastInst>(I))
87 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
88 !hasTrivialKill(Cast->getOperand(0)))
89 return false;
91 // Only instructions with a single use in the same basic block are considered
92 // to have trivial kills.
93 return I->hasOneUse() &&
94 !(I->getOpcode() == Instruction::BitCast ||
95 I->getOpcode() == Instruction::PtrToInt ||
96 I->getOpcode() == Instruction::IntToPtr) &&
97 cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
100 unsigned FastISel::getRegForValue(const Value *V) {
101 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
102 // Don't handle non-simple values in FastISel.
103 if (!RealVT.isSimple())
104 return 0;
106 // Ignore illegal types. We must do this before looking up the value
107 // in ValueMap because Arguments are given virtual registers regardless
108 // of whether FastISel can handle them.
109 MVT VT = RealVT.getSimpleVT();
110 if (!TLI.isTypeLegal(VT)) {
111 // Promote MVT::i1 to a legal type though, because it's common and easy.
112 if (VT == MVT::i1)
113 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
114 else
115 return 0;
118 // Look up the value to see if we already have a register for it. We
119 // cache values defined by Instructions across blocks, and other values
120 // only locally. This is because Instructions already have the SSA
121 // def-dominates-use requirement enforced.
122 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
123 if (I != FuncInfo.ValueMap.end()) {
124 unsigned Reg = I->second;
125 return Reg;
127 unsigned Reg = LocalValueMap[V];
128 if (Reg != 0)
129 return Reg;
131 // In bottom-up mode, just create the virtual register which will be used
132 // to hold the value. It will be materialized later.
133 if (isa<Instruction>(V) &&
134 (!isa<AllocaInst>(V) ||
135 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
136 return FuncInfo.InitializeRegForValue(V);
138 SavePoint SaveInsertPt = enterLocalValueArea();
140 // Materialize the value in a register. Emit any instructions in the
141 // local value area.
142 Reg = materializeRegForValue(V, VT);
144 leaveLocalValueArea(SaveInsertPt);
146 return Reg;
149 /// materializeRegForValue - Helper for getRegForVale. This function is
150 /// called when the value isn't already available in a register and must
151 /// be materialized with new instructions.
152 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
153 unsigned Reg = 0;
155 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
156 if (CI->getValue().getActiveBits() <= 64)
157 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
158 } else if (isa<AllocaInst>(V)) {
159 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
160 } else if (isa<ConstantPointerNull>(V)) {
161 // Translate this as an integer zero so that it can be
162 // local-CSE'd with actual integer zeros.
163 Reg =
164 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
165 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
166 // Try to emit the constant directly.
167 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
169 if (!Reg) {
170 // Try to emit the constant by using an integer constant with a cast.
171 const APFloat &Flt = CF->getValueAPF();
172 EVT IntVT = TLI.getPointerTy();
174 uint64_t x[2];
175 uint32_t IntBitWidth = IntVT.getSizeInBits();
176 bool isExact;
177 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
178 APFloat::rmTowardZero, &isExact);
179 if (isExact) {
180 APInt IntVal(IntBitWidth, 2, x);
182 unsigned IntegerReg =
183 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
184 if (IntegerReg != 0)
185 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
186 IntegerReg, /*Kill=*/false);
189 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
190 if (!SelectOperator(Op, Op->getOpcode()))
191 if (!isa<Instruction>(Op) ||
192 !TargetSelectInstruction(cast<Instruction>(Op)))
193 return 0;
194 Reg = lookUpRegForValue(Op);
195 } else if (isa<UndefValue>(V)) {
196 Reg = createResultReg(TLI.getRegClassFor(VT));
197 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
198 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
201 // If target-independent code couldn't handle the value, give target-specific
202 // code a try.
203 if (!Reg && isa<Constant>(V))
204 Reg = TargetMaterializeConstant(cast<Constant>(V));
206 // Don't cache constant materializations in the general ValueMap.
207 // To do so would require tracking what uses they dominate.
208 if (Reg != 0) {
209 LocalValueMap[V] = Reg;
210 LastLocalValue = MRI.getVRegDef(Reg);
212 return Reg;
215 unsigned FastISel::lookUpRegForValue(const Value *V) {
216 // Look up the value to see if we already have a register for it. We
217 // cache values defined by Instructions across blocks, and other values
218 // only locally. This is because Instructions already have the SSA
219 // def-dominates-use requirement enforced.
220 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
221 if (I != FuncInfo.ValueMap.end())
222 return I->second;
223 return LocalValueMap[V];
226 /// UpdateValueMap - Update the value map to include the new mapping for this
227 /// instruction, or insert an extra copy to get the result in a previous
228 /// determined register.
229 /// NOTE: This is only necessary because we might select a block that uses
230 /// a value before we select the block that defines the value. It might be
231 /// possible to fix this by selecting blocks in reverse postorder.
232 unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
233 if (!isa<Instruction>(I)) {
234 LocalValueMap[I] = Reg;
235 return Reg;
238 unsigned &AssignedReg = FuncInfo.ValueMap[I];
239 if (AssignedReg == 0)
240 // Use the new register.
241 AssignedReg = Reg;
242 else if (Reg != AssignedReg) {
243 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
244 FuncInfo.RegFixups[AssignedReg] = Reg;
246 AssignedReg = Reg;
249 return AssignedReg;
252 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
253 unsigned IdxN = getRegForValue(Idx);
254 if (IdxN == 0)
255 // Unhandled operand. Halt "fast" selection and bail.
256 return std::pair<unsigned, bool>(0, false);
258 bool IdxNIsKill = hasTrivialKill(Idx);
260 // If the index is smaller or larger than intptr_t, truncate or extend it.
261 MVT PtrVT = TLI.getPointerTy();
262 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
263 if (IdxVT.bitsLT(PtrVT)) {
264 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
265 IdxN, IdxNIsKill);
266 IdxNIsKill = true;
268 else if (IdxVT.bitsGT(PtrVT)) {
269 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
270 IdxN, IdxNIsKill);
271 IdxNIsKill = true;
273 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
276 void FastISel::recomputeInsertPt() {
277 if (getLastLocalValue()) {
278 FuncInfo.InsertPt = getLastLocalValue();
279 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
280 ++FuncInfo.InsertPt;
281 } else
282 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
284 // Now skip past any EH_LABELs, which must remain at the beginning.
285 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
286 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
287 ++FuncInfo.InsertPt;
290 FastISel::SavePoint FastISel::enterLocalValueArea() {
291 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
292 DebugLoc OldDL = DL;
293 recomputeInsertPt();
294 DL = DebugLoc();
295 SavePoint SP = { OldInsertPt, OldDL };
296 return SP;
299 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
300 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
301 LastLocalValue = llvm::prior(FuncInfo.InsertPt);
303 // Restore the previous insert position.
304 FuncInfo.InsertPt = OldInsertPt.InsertPt;
305 DL = OldInsertPt.DL;
308 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
309 /// which has an opcode which directly corresponds to the given ISD opcode.
311 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
312 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
313 if (VT == MVT::Other || !VT.isSimple())
314 // Unhandled type. Halt "fast" selection and bail.
315 return false;
317 // We only handle legal types. For example, on x86-32 the instruction
318 // selector contains all of the 64-bit instructions from x86-64,
319 // under the assumption that i64 won't be used if the target doesn't
320 // support it.
321 if (!TLI.isTypeLegal(VT)) {
322 // MVT::i1 is special. Allow AND, OR, or XOR because they
323 // don't require additional zeroing, which makes them easy.
324 if (VT == MVT::i1 &&
325 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
326 ISDOpcode == ISD::XOR))
327 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
328 else
329 return false;
332 unsigned Op0 = getRegForValue(I->getOperand(0));
333 if (Op0 == 0)
334 // Unhandled operand. Halt "fast" selection and bail.
335 return false;
337 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
339 // Check if the second operand is a constant and handle it appropriately.
340 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
341 unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
342 ISDOpcode, Op0, Op0IsKill,
343 CI->getZExtValue());
344 if (ResultReg != 0) {
345 // We successfully emitted code for the given LLVM Instruction.
346 UpdateValueMap(I, ResultReg);
347 return true;
351 // Check if the second operand is a constant float.
352 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
353 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
354 ISDOpcode, Op0, Op0IsKill, CF);
355 if (ResultReg != 0) {
356 // We successfully emitted code for the given LLVM Instruction.
357 UpdateValueMap(I, ResultReg);
358 return true;
362 unsigned Op1 = getRegForValue(I->getOperand(1));
363 if (Op1 == 0)
364 // Unhandled operand. Halt "fast" selection and bail.
365 return false;
367 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
369 // Now we have both operands in registers. Emit the instruction.
370 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
371 ISDOpcode,
372 Op0, Op0IsKill,
373 Op1, Op1IsKill);
374 if (ResultReg == 0)
375 // Target-specific code wasn't able to find a machine opcode for
376 // the given ISD opcode and type. Halt "fast" selection and bail.
377 return false;
379 // We successfully emitted code for the given LLVM Instruction.
380 UpdateValueMap(I, ResultReg);
381 return true;
384 bool FastISel::SelectGetElementPtr(const User *I) {
385 unsigned N = getRegForValue(I->getOperand(0));
386 if (N == 0)
387 // Unhandled operand. Halt "fast" selection and bail.
388 return false;
390 bool NIsKill = hasTrivialKill(I->getOperand(0));
392 const Type *Ty = I->getOperand(0)->getType();
393 MVT VT = TLI.getPointerTy();
394 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
395 E = I->op_end(); OI != E; ++OI) {
396 const Value *Idx = *OI;
397 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
398 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
399 if (Field) {
400 // N = N + Offset
401 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
402 // FIXME: This can be optimized by combining the add with a
403 // subsequent one.
404 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
405 if (N == 0)
406 // Unhandled operand. Halt "fast" selection and bail.
407 return false;
408 NIsKill = true;
410 Ty = StTy->getElementType(Field);
411 } else {
412 Ty = cast<SequentialType>(Ty)->getElementType();
414 // If this is a constant subscript, handle it quickly.
415 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
416 if (CI->isZero()) continue;
417 uint64_t Offs =
418 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
419 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
420 if (N == 0)
421 // Unhandled operand. Halt "fast" selection and bail.
422 return false;
423 NIsKill = true;
424 continue;
427 // N = N + Idx * ElementSize;
428 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
429 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
430 unsigned IdxN = Pair.first;
431 bool IdxNIsKill = Pair.second;
432 if (IdxN == 0)
433 // Unhandled operand. Halt "fast" selection and bail.
434 return false;
436 if (ElementSize != 1) {
437 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
438 if (IdxN == 0)
439 // Unhandled operand. Halt "fast" selection and bail.
440 return false;
441 IdxNIsKill = true;
443 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
444 if (N == 0)
445 // Unhandled operand. Halt "fast" selection and bail.
446 return false;
450 // We successfully emitted code for the given LLVM Instruction.
451 UpdateValueMap(I, N);
452 return true;
455 bool FastISel::SelectCall(const User *I) {
456 const Function *F = cast<CallInst>(I)->getCalledFunction();
457 if (!F) return false;
459 // Handle selected intrinsic function calls.
460 unsigned IID = F->getIntrinsicID();
461 switch (IID) {
462 default: break;
463 case Intrinsic::dbg_declare: {
464 const DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
465 if (!DIVariable(DI->getVariable()).Verify() ||
466 !FuncInfo.MF->getMMI().hasDebugInfo())
467 return true;
469 const Value *Address = DI->getAddress();
470 if (!Address)
471 return true;
472 if (isa<UndefValue>(Address))
473 return true;
474 const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
475 // Don't handle byval struct arguments or VLAs, for example.
476 if (!AI)
477 // Building the map above is target independent. Generating DBG_VALUE
478 // inline is target dependent; do this now.
479 (void)TargetSelectInstruction(cast<Instruction>(I));
480 return true;
482 case Intrinsic::dbg_value: {
483 // This form of DBG_VALUE is target-independent.
484 const DbgValueInst *DI = cast<DbgValueInst>(I);
485 const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
486 const Value *V = DI->getValue();
487 if (!V) {
488 // Currently the optimizer can produce this; insert an undef to
489 // help debugging. Probably the optimizer should not do this.
490 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
491 .addReg(0U).addImm(DI->getOffset())
492 .addMetadata(DI->getVariable());
493 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
494 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
495 .addImm(CI->getZExtValue()).addImm(DI->getOffset())
496 .addMetadata(DI->getVariable());
497 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
498 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
499 .addFPImm(CF).addImm(DI->getOffset())
500 .addMetadata(DI->getVariable());
501 } else if (unsigned Reg = lookUpRegForValue(V)) {
502 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
503 .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
504 .addMetadata(DI->getVariable());
505 } else {
506 // We can't yet handle anything else here because it would require
507 // generating code, thus altering codegen because of debug info.
508 // Insert an undef so we can see what we dropped.
509 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
510 .addReg(0U).addImm(DI->getOffset())
511 .addMetadata(DI->getVariable());
513 return true;
515 case Intrinsic::eh_exception: {
516 EVT VT = TLI.getValueType(I->getType());
517 switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
518 default: break;
519 case TargetLowering::Expand: {
520 assert(FuncInfo.MBB->isLandingPad() &&
521 "Call to eh.exception not in landing pad!");
522 unsigned Reg = TLI.getExceptionAddressRegister();
523 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
524 unsigned ResultReg = createResultReg(RC);
525 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
526 ResultReg).addReg(Reg);
527 UpdateValueMap(I, ResultReg);
528 return true;
531 break;
533 case Intrinsic::eh_selector: {
534 EVT VT = TLI.getValueType(I->getType());
535 switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
536 default: break;
537 case TargetLowering::Expand: {
538 if (FuncInfo.MBB->isLandingPad())
539 AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB);
540 else {
541 #ifndef NDEBUG
542 FuncInfo.CatchInfoLost.insert(cast<CallInst>(I));
543 #endif
544 // FIXME: Mark exception selector register as live in. Hack for PR1508.
545 unsigned Reg = TLI.getExceptionSelectorRegister();
546 if (Reg) FuncInfo.MBB->addLiveIn(Reg);
549 unsigned Reg = TLI.getExceptionSelectorRegister();
550 EVT SrcVT = TLI.getPointerTy();
551 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
552 unsigned ResultReg = createResultReg(RC);
553 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
554 ResultReg).addReg(Reg);
556 bool ResultRegIsKill = hasTrivialKill(I);
558 // Cast the register to the type of the selector.
559 if (SrcVT.bitsGT(MVT::i32))
560 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
561 ResultReg, ResultRegIsKill);
562 else if (SrcVT.bitsLT(MVT::i32))
563 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
564 ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
565 if (ResultReg == 0)
566 // Unhandled operand. Halt "fast" selection and bail.
567 return false;
569 UpdateValueMap(I, ResultReg);
571 return true;
574 break;
578 // An arbitrary call. Bail.
579 return false;
582 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
583 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
584 EVT DstVT = TLI.getValueType(I->getType());
586 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
587 DstVT == MVT::Other || !DstVT.isSimple())
588 // Unhandled type. Halt "fast" selection and bail.
589 return false;
591 // Check if the destination type is legal. Or as a special case,
592 // it may be i1 if we're doing a truncate because that's
593 // easy and somewhat common.
594 if (!TLI.isTypeLegal(DstVT))
595 if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
596 // Unhandled type. Halt "fast" selection and bail.
597 return false;
599 // Check if the source operand is legal. Or as a special case,
600 // it may be i1 if we're doing zero-extension because that's
601 // easy and somewhat common.
602 if (!TLI.isTypeLegal(SrcVT))
603 if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
604 // Unhandled type. Halt "fast" selection and bail.
605 return false;
607 unsigned InputReg = getRegForValue(I->getOperand(0));
608 if (!InputReg)
609 // Unhandled operand. Halt "fast" selection and bail.
610 return false;
612 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
614 // If the operand is i1, arrange for the high bits in the register to be zero.
615 if (SrcVT == MVT::i1) {
616 SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
617 InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg, InputRegIsKill);
618 if (!InputReg)
619 return false;
620 InputRegIsKill = true;
622 // If the result is i1, truncate to the target's type for i1 first.
623 if (DstVT == MVT::i1)
624 DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT);
626 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
627 DstVT.getSimpleVT(),
628 Opcode,
629 InputReg, InputRegIsKill);
630 if (!ResultReg)
631 return false;
633 UpdateValueMap(I, ResultReg);
634 return true;
637 bool FastISel::SelectBitCast(const User *I) {
638 // If the bitcast doesn't change the type, just use the operand value.
639 if (I->getType() == I->getOperand(0)->getType()) {
640 unsigned Reg = getRegForValue(I->getOperand(0));
641 if (Reg == 0)
642 return false;
643 UpdateValueMap(I, Reg);
644 return true;
647 // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
648 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
649 EVT DstVT = TLI.getValueType(I->getType());
651 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
652 DstVT == MVT::Other || !DstVT.isSimple() ||
653 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
654 // Unhandled type. Halt "fast" selection and bail.
655 return false;
657 unsigned Op0 = getRegForValue(I->getOperand(0));
658 if (Op0 == 0)
659 // Unhandled operand. Halt "fast" selection and bail.
660 return false;
662 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
664 // First, try to perform the bitcast by inserting a reg-reg copy.
665 unsigned ResultReg = 0;
666 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
667 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
668 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
669 // Don't attempt a cross-class copy. It will likely fail.
670 if (SrcClass == DstClass) {
671 ResultReg = createResultReg(DstClass);
672 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
673 ResultReg).addReg(Op0);
677 // If the reg-reg copy failed, select a BIT_CONVERT opcode.
678 if (!ResultReg)
679 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
680 ISD::BIT_CONVERT, Op0, Op0IsKill);
682 if (!ResultReg)
683 return false;
685 UpdateValueMap(I, ResultReg);
686 return true;
689 bool
690 FastISel::SelectInstruction(const Instruction *I) {
691 // Just before the terminator instruction, insert instructions to
692 // feed PHI nodes in successor blocks.
693 if (isa<TerminatorInst>(I))
694 if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
695 return false;
697 DL = I->getDebugLoc();
699 // First, try doing target-independent selection.
700 if (SelectOperator(I, I->getOpcode())) {
701 DL = DebugLoc();
702 return true;
705 // Next, try calling the target to attempt to handle the instruction.
706 if (TargetSelectInstruction(I)) {
707 DL = DebugLoc();
708 return true;
711 DL = DebugLoc();
712 return false;
715 /// FastEmitBranch - Emit an unconditional branch to the given block,
716 /// unless it is the immediate (fall-through) successor, and update
717 /// the CFG.
718 void
719 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
720 if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
721 // The unconditional fall-through case, which needs no instructions.
722 } else {
723 // The unconditional branch case.
724 TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
725 SmallVector<MachineOperand, 0>(), DL);
727 FuncInfo.MBB->addSuccessor(MSucc);
730 /// SelectFNeg - Emit an FNeg operation.
732 bool
733 FastISel::SelectFNeg(const User *I) {
734 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
735 if (OpReg == 0) return false;
737 bool OpRegIsKill = hasTrivialKill(I);
739 // If the target has ISD::FNEG, use it.
740 EVT VT = TLI.getValueType(I->getType());
741 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
742 ISD::FNEG, OpReg, OpRegIsKill);
743 if (ResultReg != 0) {
744 UpdateValueMap(I, ResultReg);
745 return true;
748 // Bitcast the value to integer, twiddle the sign bit with xor,
749 // and then bitcast it back to floating-point.
750 if (VT.getSizeInBits() > 64) return false;
751 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
752 if (!TLI.isTypeLegal(IntVT))
753 return false;
755 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
756 ISD::BIT_CONVERT, OpReg, OpRegIsKill);
757 if (IntReg == 0)
758 return false;
760 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
761 IntReg, /*Kill=*/true,
762 UINT64_C(1) << (VT.getSizeInBits()-1),
763 IntVT.getSimpleVT());
764 if (IntResultReg == 0)
765 return false;
767 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
768 ISD::BIT_CONVERT, IntResultReg, /*Kill=*/true);
769 if (ResultReg == 0)
770 return false;
772 UpdateValueMap(I, ResultReg);
773 return true;
776 bool
777 FastISel::SelectOperator(const User *I, unsigned Opcode) {
778 switch (Opcode) {
779 case Instruction::Add:
780 return SelectBinaryOp(I, ISD::ADD);
781 case Instruction::FAdd:
782 return SelectBinaryOp(I, ISD::FADD);
783 case Instruction::Sub:
784 return SelectBinaryOp(I, ISD::SUB);
785 case Instruction::FSub:
786 // FNeg is currently represented in LLVM IR as a special case of FSub.
787 if (BinaryOperator::isFNeg(I))
788 return SelectFNeg(I);
789 return SelectBinaryOp(I, ISD::FSUB);
790 case Instruction::Mul:
791 return SelectBinaryOp(I, ISD::MUL);
792 case Instruction::FMul:
793 return SelectBinaryOp(I, ISD::FMUL);
794 case Instruction::SDiv:
795 return SelectBinaryOp(I, ISD::SDIV);
796 case Instruction::UDiv:
797 return SelectBinaryOp(I, ISD::UDIV);
798 case Instruction::FDiv:
799 return SelectBinaryOp(I, ISD::FDIV);
800 case Instruction::SRem:
801 return SelectBinaryOp(I, ISD::SREM);
802 case Instruction::URem:
803 return SelectBinaryOp(I, ISD::UREM);
804 case Instruction::FRem:
805 return SelectBinaryOp(I, ISD::FREM);
806 case Instruction::Shl:
807 return SelectBinaryOp(I, ISD::SHL);
808 case Instruction::LShr:
809 return SelectBinaryOp(I, ISD::SRL);
810 case Instruction::AShr:
811 return SelectBinaryOp(I, ISD::SRA);
812 case Instruction::And:
813 return SelectBinaryOp(I, ISD::AND);
814 case Instruction::Or:
815 return SelectBinaryOp(I, ISD::OR);
816 case Instruction::Xor:
817 return SelectBinaryOp(I, ISD::XOR);
819 case Instruction::GetElementPtr:
820 return SelectGetElementPtr(I);
822 case Instruction::Br: {
823 const BranchInst *BI = cast<BranchInst>(I);
825 if (BI->isUnconditional()) {
826 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
827 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
828 FastEmitBranch(MSucc, BI->getDebugLoc());
829 return true;
832 // Conditional branches are not handed yet.
833 // Halt "fast" selection and bail.
834 return false;
837 case Instruction::Unreachable:
838 // Nothing to emit.
839 return true;
841 case Instruction::Alloca:
842 // FunctionLowering has the static-sized case covered.
843 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
844 return true;
846 // Dynamic-sized alloca is not handled yet.
847 return false;
849 case Instruction::Call:
850 return SelectCall(I);
852 case Instruction::BitCast:
853 return SelectBitCast(I);
855 case Instruction::FPToSI:
856 return SelectCast(I, ISD::FP_TO_SINT);
857 case Instruction::ZExt:
858 return SelectCast(I, ISD::ZERO_EXTEND);
859 case Instruction::SExt:
860 return SelectCast(I, ISD::SIGN_EXTEND);
861 case Instruction::Trunc:
862 return SelectCast(I, ISD::TRUNCATE);
863 case Instruction::SIToFP:
864 return SelectCast(I, ISD::SINT_TO_FP);
866 case Instruction::IntToPtr: // Deliberate fall-through.
867 case Instruction::PtrToInt: {
868 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
869 EVT DstVT = TLI.getValueType(I->getType());
870 if (DstVT.bitsGT(SrcVT))
871 return SelectCast(I, ISD::ZERO_EXTEND);
872 if (DstVT.bitsLT(SrcVT))
873 return SelectCast(I, ISD::TRUNCATE);
874 unsigned Reg = getRegForValue(I->getOperand(0));
875 if (Reg == 0) return false;
876 UpdateValueMap(I, Reg);
877 return true;
880 case Instruction::PHI:
881 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
883 default:
884 // Unhandled instruction. Halt "fast" selection and bail.
885 return false;
889 FastISel::FastISel(FunctionLoweringInfo &funcInfo)
890 : FuncInfo(funcInfo),
891 MRI(FuncInfo.MF->getRegInfo()),
892 MFI(*FuncInfo.MF->getFrameInfo()),
893 MCP(*FuncInfo.MF->getConstantPool()),
894 TM(FuncInfo.MF->getTarget()),
895 TD(*TM.getTargetData()),
896 TII(*TM.getInstrInfo()),
897 TLI(*TM.getTargetLowering()),
898 TRI(*TM.getRegisterInfo()) {
901 FastISel::~FastISel() {}
903 unsigned FastISel::FastEmit_(MVT, MVT,
904 unsigned) {
905 return 0;
908 unsigned FastISel::FastEmit_r(MVT, MVT,
909 unsigned,
910 unsigned /*Op0*/, bool /*Op0IsKill*/) {
911 return 0;
914 unsigned FastISel::FastEmit_rr(MVT, MVT,
915 unsigned,
916 unsigned /*Op0*/, bool /*Op0IsKill*/,
917 unsigned /*Op1*/, bool /*Op1IsKill*/) {
918 return 0;
921 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
922 return 0;
925 unsigned FastISel::FastEmit_f(MVT, MVT,
926 unsigned, const ConstantFP * /*FPImm*/) {
927 return 0;
930 unsigned FastISel::FastEmit_ri(MVT, MVT,
931 unsigned,
932 unsigned /*Op0*/, bool /*Op0IsKill*/,
933 uint64_t /*Imm*/) {
934 return 0;
937 unsigned FastISel::FastEmit_rf(MVT, MVT,
938 unsigned,
939 unsigned /*Op0*/, bool /*Op0IsKill*/,
940 const ConstantFP * /*FPImm*/) {
941 return 0;
944 unsigned FastISel::FastEmit_rri(MVT, MVT,
945 unsigned,
946 unsigned /*Op0*/, bool /*Op0IsKill*/,
947 unsigned /*Op1*/, bool /*Op1IsKill*/,
948 uint64_t /*Imm*/) {
949 return 0;
952 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
953 /// to emit an instruction with an immediate operand using FastEmit_ri.
954 /// If that fails, it materializes the immediate into a register and try
955 /// FastEmit_rr instead.
956 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
957 unsigned Op0, bool Op0IsKill,
958 uint64_t Imm, MVT ImmType) {
959 // First check if immediate type is legal. If not, we can't use the ri form.
960 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
961 if (ResultReg != 0)
962 return ResultReg;
963 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
964 if (MaterialReg == 0)
965 return 0;
966 return FastEmit_rr(VT, VT, Opcode,
967 Op0, Op0IsKill,
968 MaterialReg, /*Kill=*/true);
971 /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
972 /// to emit an instruction with a floating-point immediate operand using
973 /// FastEmit_rf. If that fails, it materializes the immediate into a register
974 /// and try FastEmit_rr instead.
975 unsigned FastISel::FastEmit_rf_(MVT VT, unsigned Opcode,
976 unsigned Op0, bool Op0IsKill,
977 const ConstantFP *FPImm, MVT ImmType) {
978 // First check if immediate type is legal. If not, we can't use the rf form.
979 unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, Op0IsKill, FPImm);
980 if (ResultReg != 0)
981 return ResultReg;
983 // Materialize the constant in a register.
984 unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
985 if (MaterialReg == 0) {
986 // If the target doesn't have a way to directly enter a floating-point
987 // value into a register, use an alternate approach.
988 // TODO: The current approach only supports floating-point constants
989 // that can be constructed by conversion from integer values. This should
990 // be replaced by code that creates a load from a constant-pool entry,
991 // which will require some target-specific work.
992 const APFloat &Flt = FPImm->getValueAPF();
993 EVT IntVT = TLI.getPointerTy();
995 uint64_t x[2];
996 uint32_t IntBitWidth = IntVT.getSizeInBits();
997 bool isExact;
998 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
999 APFloat::rmTowardZero, &isExact);
1000 if (!isExact)
1001 return 0;
1002 APInt IntVal(IntBitWidth, 2, x);
1004 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
1005 ISD::Constant, IntVal.getZExtValue());
1006 if (IntegerReg == 0)
1007 return 0;
1008 MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
1009 ISD::SINT_TO_FP, IntegerReg, /*Kill=*/true);
1010 if (MaterialReg == 0)
1011 return 0;
1013 return FastEmit_rr(VT, VT, Opcode,
1014 Op0, Op0IsKill,
1015 MaterialReg, /*Kill=*/true);
1018 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1019 return MRI.createVirtualRegister(RC);
1022 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1023 const TargetRegisterClass* RC) {
1024 unsigned ResultReg = createResultReg(RC);
1025 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1027 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1028 return ResultReg;
1031 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1032 const TargetRegisterClass *RC,
1033 unsigned Op0, bool Op0IsKill) {
1034 unsigned ResultReg = createResultReg(RC);
1035 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1037 if (II.getNumDefs() >= 1)
1038 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1039 .addReg(Op0, Op0IsKill * RegState::Kill);
1040 else {
1041 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1042 .addReg(Op0, Op0IsKill * RegState::Kill);
1043 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1044 ResultReg).addReg(II.ImplicitDefs[0]);
1047 return ResultReg;
1050 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1051 const TargetRegisterClass *RC,
1052 unsigned Op0, bool Op0IsKill,
1053 unsigned Op1, bool Op1IsKill) {
1054 unsigned ResultReg = createResultReg(RC);
1055 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1057 if (II.getNumDefs() >= 1)
1058 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1059 .addReg(Op0, Op0IsKill * RegState::Kill)
1060 .addReg(Op1, Op1IsKill * RegState::Kill);
1061 else {
1062 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1063 .addReg(Op0, Op0IsKill * RegState::Kill)
1064 .addReg(Op1, Op1IsKill * RegState::Kill);
1065 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1066 ResultReg).addReg(II.ImplicitDefs[0]);
1068 return ResultReg;
1071 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1072 const TargetRegisterClass *RC,
1073 unsigned Op0, bool Op0IsKill,
1074 uint64_t Imm) {
1075 unsigned ResultReg = createResultReg(RC);
1076 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1078 if (II.getNumDefs() >= 1)
1079 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1080 .addReg(Op0, Op0IsKill * RegState::Kill)
1081 .addImm(Imm);
1082 else {
1083 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1084 .addReg(Op0, Op0IsKill * RegState::Kill)
1085 .addImm(Imm);
1086 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1087 ResultReg).addReg(II.ImplicitDefs[0]);
1089 return ResultReg;
1092 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1093 const TargetRegisterClass *RC,
1094 unsigned Op0, bool Op0IsKill,
1095 const ConstantFP *FPImm) {
1096 unsigned ResultReg = createResultReg(RC);
1097 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1099 if (II.getNumDefs() >= 1)
1100 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1101 .addReg(Op0, Op0IsKill * RegState::Kill)
1102 .addFPImm(FPImm);
1103 else {
1104 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1105 .addReg(Op0, Op0IsKill * RegState::Kill)
1106 .addFPImm(FPImm);
1107 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1108 ResultReg).addReg(II.ImplicitDefs[0]);
1110 return ResultReg;
1113 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1114 const TargetRegisterClass *RC,
1115 unsigned Op0, bool Op0IsKill,
1116 unsigned Op1, bool Op1IsKill,
1117 uint64_t Imm) {
1118 unsigned ResultReg = createResultReg(RC);
1119 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1121 if (II.getNumDefs() >= 1)
1122 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1123 .addReg(Op0, Op0IsKill * RegState::Kill)
1124 .addReg(Op1, Op1IsKill * RegState::Kill)
1125 .addImm(Imm);
1126 else {
1127 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1128 .addReg(Op0, Op0IsKill * RegState::Kill)
1129 .addReg(Op1, Op1IsKill * RegState::Kill)
1130 .addImm(Imm);
1131 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1132 ResultReg).addReg(II.ImplicitDefs[0]);
1134 return ResultReg;
1137 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1138 const TargetRegisterClass *RC,
1139 uint64_t Imm) {
1140 unsigned ResultReg = createResultReg(RC);
1141 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1143 if (II.getNumDefs() >= 1)
1144 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1145 else {
1146 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1147 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1148 ResultReg).addReg(II.ImplicitDefs[0]);
1150 return ResultReg;
1153 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1154 unsigned Op0, bool Op0IsKill,
1155 uint32_t Idx) {
1156 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1157 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1158 "Cannot yet extract from physregs");
1159 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1160 DL, TII.get(TargetOpcode::COPY), ResultReg)
1161 .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1162 return ResultReg;
1165 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1166 /// with all but the least significant bit set to zero.
1167 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1168 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1171 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1172 /// Emit code to ensure constants are copied into registers when needed.
1173 /// Remember the virtual registers that need to be added to the Machine PHI
1174 /// nodes as input. We cannot just directly add them, because expansion
1175 /// might result in multiple MBB's for one BB. As such, the start of the
1176 /// BB might correspond to a different MBB than the end.
1177 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1178 const TerminatorInst *TI = LLVMBB->getTerminator();
1180 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1181 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1183 // Check successor nodes' PHI nodes that expect a constant to be available
1184 // from this block.
1185 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1186 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1187 if (!isa<PHINode>(SuccBB->begin())) continue;
1188 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1190 // If this terminator has multiple identical successors (common for
1191 // switches), only handle each succ once.
1192 if (!SuccsHandled.insert(SuccMBB)) continue;
1194 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1196 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1197 // nodes and Machine PHI nodes, but the incoming operands have not been
1198 // emitted yet.
1199 for (BasicBlock::const_iterator I = SuccBB->begin();
1200 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1202 // Ignore dead phi's.
1203 if (PN->use_empty()) continue;
1205 // Only handle legal types. Two interesting things to note here. First,
1206 // by bailing out early, we may leave behind some dead instructions,
1207 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1208 // own moves. Second, this check is necessary becuase FastISel doesn't
1209 // use CreateRegs to create registers, so it always creates
1210 // exactly one register for each non-void instruction.
1211 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1212 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1213 // Promote MVT::i1.
1214 if (VT == MVT::i1)
1215 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1216 else {
1217 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1218 return false;
1222 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1224 // Set the DebugLoc for the copy. Prefer the location of the operand
1225 // if there is one; use the location of the PHI otherwise.
1226 DL = PN->getDebugLoc();
1227 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1228 DL = Inst->getDebugLoc();
1230 unsigned Reg = getRegForValue(PHIOp);
1231 if (Reg == 0) {
1232 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1233 return false;
1235 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
1236 DL = DebugLoc();
1240 return true;