[X86] LowerFunnelShift - use modulo constant shift amount.
[llvm-core.git] / lib / Target / X86 / X86ISelLowering.cpp
blob65a72fea3796f1863c4e842183edc5f52855e01c
1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that X86 uses to lower LLVM code into a
10 // selection DAG.
12 //===----------------------------------------------------------------------===//
14 #include "X86ISelLowering.h"
15 #include "Utils/X86ShuffleDecode.h"
16 #include "X86CallingConv.h"
17 #include "X86FrameLowering.h"
18 #include "X86InstrBuilder.h"
19 #include "X86IntrinsicsInfo.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/TargetLowering.h"
37 #include "llvm/CodeGen/WinEHFuncInfo.h"
38 #include "llvm/IR/CallSite.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Function.h"
44 #include "llvm/IR/GlobalAlias.h"
45 #include "llvm/IR/GlobalVariable.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/MC/MCAsmInfo.h"
49 #include "llvm/MC/MCContext.h"
50 #include "llvm/MC/MCExpr.h"
51 #include "llvm/MC/MCSymbol.h"
52 #include "llvm/Support/CommandLine.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/KnownBits.h"
56 #include "llvm/Support/MathExtras.h"
57 #include "llvm/Target/TargetOptions.h"
58 #include <algorithm>
59 #include <bitset>
60 #include <cctype>
61 #include <numeric>
62 using namespace llvm;
64 #define DEBUG_TYPE "x86-isel"
66 STATISTIC(NumTailCalls, "Number of tail calls");
68 static cl::opt<bool> ExperimentalVectorWideningLegalization(
69 "x86-experimental-vector-widening-legalization", cl::init(false),
70 cl::desc("Enable an experimental vector type legalization through widening "
71 "rather than promotion."),
72 cl::Hidden);
74 static cl::opt<int> ExperimentalPrefLoopAlignment(
75 "x86-experimental-pref-loop-alignment", cl::init(4),
76 cl::desc("Sets the preferable loop alignment for experiments "
77 "(the last x86-experimental-pref-loop-alignment bits"
78 " of the loop header PC will be 0)."),
79 cl::Hidden);
81 static cl::opt<bool> MulConstantOptimization(
82 "mul-constant-optimization", cl::init(true),
83 cl::desc("Replace 'mul x, Const' with more effective instructions like "
84 "SHIFT, LEA, etc."),
85 cl::Hidden);
87 /// Call this when the user attempts to do something unsupported, like
88 /// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
89 /// report_fatal_error, so calling code should attempt to recover without
90 /// crashing.
91 static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
92 const char *Msg) {
93 MachineFunction &MF = DAG.getMachineFunction();
94 DAG.getContext()->diagnose(
95 DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
98 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
99 const X86Subtarget &STI)
100 : TargetLowering(TM), Subtarget(STI) {
101 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
102 X86ScalarSSEf64 = Subtarget.hasSSE2();
103 X86ScalarSSEf32 = Subtarget.hasSSE1();
104 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
106 // Set up the TargetLowering object.
108 // X86 is weird. It always uses i8 for shift amounts and setcc results.
109 setBooleanContents(ZeroOrOneBooleanContent);
110 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
111 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
113 // For 64-bit, since we have so many registers, use the ILP scheduler.
114 // For 32-bit, use the register pressure specific scheduling.
115 // For Atom, always use ILP scheduling.
116 if (Subtarget.isAtom())
117 setSchedulingPreference(Sched::ILP);
118 else if (Subtarget.is64Bit())
119 setSchedulingPreference(Sched::ILP);
120 else
121 setSchedulingPreference(Sched::RegPressure);
122 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
123 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
125 // Bypass expensive divides and use cheaper ones.
126 if (TM.getOptLevel() >= CodeGenOpt::Default) {
127 if (Subtarget.hasSlowDivide32())
128 addBypassSlowDiv(32, 8);
129 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
130 addBypassSlowDiv(64, 32);
133 if (Subtarget.isTargetKnownWindowsMSVC() ||
134 Subtarget.isTargetWindowsItanium()) {
135 // Setup Windows compiler runtime calls.
136 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
137 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
138 setLibcallName(RTLIB::SREM_I64, "_allrem");
139 setLibcallName(RTLIB::UREM_I64, "_aullrem");
140 setLibcallName(RTLIB::MUL_I64, "_allmul");
141 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
142 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
143 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
144 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
145 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
148 if (Subtarget.isTargetDarwin()) {
149 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
150 setUseUnderscoreSetJmp(false);
151 setUseUnderscoreLongJmp(false);
152 } else if (Subtarget.isTargetWindowsGNU()) {
153 // MS runtime is weird: it exports _setjmp, but longjmp!
154 setUseUnderscoreSetJmp(true);
155 setUseUnderscoreLongJmp(false);
156 } else {
157 setUseUnderscoreSetJmp(true);
158 setUseUnderscoreLongJmp(true);
161 // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
162 // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
163 // FIXME: Should we be limitting the atomic size on other configs? Default is
164 // 1024.
165 if (!Subtarget.hasCmpxchg8b())
166 setMaxAtomicSizeInBitsSupported(32);
168 // Set up the register classes.
169 addRegisterClass(MVT::i8, &X86::GR8RegClass);
170 addRegisterClass(MVT::i16, &X86::GR16RegClass);
171 addRegisterClass(MVT::i32, &X86::GR32RegClass);
172 if (Subtarget.is64Bit())
173 addRegisterClass(MVT::i64, &X86::GR64RegClass);
175 for (MVT VT : MVT::integer_valuetypes())
176 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
178 // We don't accept any truncstore of integer registers.
179 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
180 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
181 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
182 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
183 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
184 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
186 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
188 // SETOEQ and SETUNE require checking two conditions.
189 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
190 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
191 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
192 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
193 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
194 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
196 // Integer absolute.
197 if (Subtarget.hasCMov()) {
198 setOperationAction(ISD::ABS , MVT::i16 , Custom);
199 setOperationAction(ISD::ABS , MVT::i32 , Custom);
201 setOperationAction(ISD::ABS , MVT::i64 , Custom);
203 // Funnel shifts.
204 for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
205 setOperationAction(ShiftOp , MVT::i16 , Custom);
206 setOperationAction(ShiftOp , MVT::i32 , Custom);
207 if (Subtarget.is64Bit())
208 setOperationAction(ShiftOp , MVT::i64 , Custom);
211 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
212 // operation.
213 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
214 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
215 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
217 if (Subtarget.is64Bit()) {
218 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512())
219 // f32/f64 are legal, f80 is custom.
220 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
221 else
222 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
223 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
224 } else if (!Subtarget.useSoftFloat()) {
225 // We have an algorithm for SSE2->double, and we turn this into a
226 // 64-bit FILD followed by conditional FADD for other targets.
227 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
228 // We have an algorithm for SSE2, and we turn this into a 64-bit
229 // FILD or VCVTUSI2SS/SD for other targets.
230 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
231 } else {
232 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
235 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
236 // this operation.
237 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
238 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
240 if (!Subtarget.useSoftFloat()) {
241 // SSE has no i16 to fp conversion, only i32.
242 if (X86ScalarSSEf32) {
243 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
244 // f32 and f64 cases are Legal, f80 case is not
245 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
246 } else {
247 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
248 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
250 } else {
251 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
252 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Expand);
255 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
256 // this operation.
257 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
258 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
260 if (!Subtarget.useSoftFloat()) {
261 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
262 // are Legal, f80 is custom lowered.
263 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
264 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
266 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
267 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
268 } else {
269 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
270 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Expand);
271 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Expand);
274 // Handle FP_TO_UINT by promoting the destination to a larger signed
275 // conversion.
276 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
277 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
278 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
280 if (Subtarget.is64Bit()) {
281 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
282 // FP_TO_UINT-i32/i64 is legal for f32/f64, but custom for f80.
283 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
284 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
285 } else {
286 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
287 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
289 } else if (!Subtarget.useSoftFloat()) {
290 // Since AVX is a superset of SSE3, only check for SSE here.
291 if (Subtarget.hasSSE1() && !Subtarget.hasSSE3())
292 // Expand FP_TO_UINT into a select.
293 // FIXME: We would like to use a Custom expander here eventually to do
294 // the optimal thing for SSE vs. the default expansion in the legalizer.
295 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
296 else
297 // With AVX512 we can use vcvts[ds]2usi for f32/f64->i32, f80 is custom.
298 // With SSE3 we can use fisttpll to convert to a signed i64; without
299 // SSE, we're stuck with a fistpll.
300 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
302 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
305 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
306 if (!X86ScalarSSEf64) {
307 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
308 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
309 if (Subtarget.is64Bit()) {
310 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
311 // Without SSE, i64->f64 goes through memory.
312 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
314 } else if (!Subtarget.is64Bit())
315 setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
317 // Scalar integer divide and remainder are lowered to use operations that
318 // produce two results, to match the available instructions. This exposes
319 // the two-result form to trivial CSE, which is able to combine x/y and x%y
320 // into a single instruction.
322 // Scalar integer multiply-high is also lowered to use two-result
323 // operations, to match the available instructions. However, plain multiply
324 // (low) operations are left as Legal, as there are single-result
325 // instructions for this in x86. Using the two-result multiply instructions
326 // when both high and low results are needed must be arranged by dagcombine.
327 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
328 setOperationAction(ISD::MULHS, VT, Expand);
329 setOperationAction(ISD::MULHU, VT, Expand);
330 setOperationAction(ISD::SDIV, VT, Expand);
331 setOperationAction(ISD::UDIV, VT, Expand);
332 setOperationAction(ISD::SREM, VT, Expand);
333 setOperationAction(ISD::UREM, VT, Expand);
336 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
337 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
338 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
339 MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
340 setOperationAction(ISD::BR_CC, VT, Expand);
341 setOperationAction(ISD::SELECT_CC, VT, Expand);
343 if (Subtarget.is64Bit())
344 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
345 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
346 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
347 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
348 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
350 setOperationAction(ISD::FREM , MVT::f32 , Expand);
351 setOperationAction(ISD::FREM , MVT::f64 , Expand);
352 setOperationAction(ISD::FREM , MVT::f80 , Expand);
353 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
355 // Promote the i8 variants and force them on up to i32 which has a shorter
356 // encoding.
357 setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
358 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
359 if (!Subtarget.hasBMI()) {
360 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
361 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
362 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal);
363 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
364 if (Subtarget.is64Bit()) {
365 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
366 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
370 if (Subtarget.hasLZCNT()) {
371 // When promoting the i8 variants, force them to i32 for a shorter
372 // encoding.
373 setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
374 setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
375 } else {
376 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
377 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
378 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
379 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
380 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
381 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
382 if (Subtarget.is64Bit()) {
383 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
384 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
388 // Special handling for half-precision floating point conversions.
389 // If we don't have F16C support, then lower half float conversions
390 // into library calls.
391 if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
392 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
393 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
396 // There's never any support for operations beyond MVT::f32.
397 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
398 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
399 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
400 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
402 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
403 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
404 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
405 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
406 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
407 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
409 if (Subtarget.hasPOPCNT()) {
410 setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
411 } else {
412 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
413 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
414 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
415 if (Subtarget.is64Bit())
416 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
417 else
418 setOperationAction(ISD::CTPOP , MVT::i64 , Custom);
421 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
423 if (!Subtarget.hasMOVBE())
424 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
426 // These should be promoted to a larger select which is supported.
427 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
428 // X86 wants to expand cmov itself.
429 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
430 setOperationAction(ISD::SELECT, VT, Custom);
431 setOperationAction(ISD::SETCC, VT, Custom);
433 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
434 if (VT == MVT::i64 && !Subtarget.is64Bit())
435 continue;
436 setOperationAction(ISD::SELECT, VT, Custom);
437 setOperationAction(ISD::SETCC, VT, Custom);
440 // Custom action for SELECT MMX and expand action for SELECT_CC MMX
441 setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
442 setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
444 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
445 // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
446 // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
447 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
448 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
449 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
450 if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
451 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
453 // Darwin ABI issue.
454 for (auto VT : { MVT::i32, MVT::i64 }) {
455 if (VT == MVT::i64 && !Subtarget.is64Bit())
456 continue;
457 setOperationAction(ISD::ConstantPool , VT, Custom);
458 setOperationAction(ISD::JumpTable , VT, Custom);
459 setOperationAction(ISD::GlobalAddress , VT, Custom);
460 setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
461 setOperationAction(ISD::ExternalSymbol , VT, Custom);
462 setOperationAction(ISD::BlockAddress , VT, Custom);
465 // 64-bit shl, sra, srl (iff 32-bit x86)
466 for (auto VT : { MVT::i32, MVT::i64 }) {
467 if (VT == MVT::i64 && !Subtarget.is64Bit())
468 continue;
469 setOperationAction(ISD::SHL_PARTS, VT, Custom);
470 setOperationAction(ISD::SRA_PARTS, VT, Custom);
471 setOperationAction(ISD::SRL_PARTS, VT, Custom);
474 if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow())
475 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
477 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
479 // Expand certain atomics
480 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
481 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
482 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
483 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
484 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
485 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
486 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
487 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
490 if (!Subtarget.is64Bit())
491 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
493 if (Subtarget.hasCmpxchg16b()) {
494 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
497 // FIXME - use subtarget debug flags
498 if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
499 !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
500 TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
501 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
504 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
505 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
507 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
508 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
510 setOperationAction(ISD::TRAP, MVT::Other, Legal);
511 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
513 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
514 setOperationAction(ISD::VASTART , MVT::Other, Custom);
515 setOperationAction(ISD::VAEND , MVT::Other, Expand);
516 bool Is64Bit = Subtarget.is64Bit();
517 setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
518 setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
520 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
521 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
523 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
525 // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
526 setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
527 setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
529 if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
530 // f32 and f64 use SSE.
531 // Set up the FP register classes.
532 addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
533 : &X86::FR32RegClass);
534 addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
535 : &X86::FR64RegClass);
537 // Disable f32->f64 extload as we can only generate this in one instruction
538 // under optsize. So its easier to pattern match (fpext (load)) for that
539 // case instead of needing to emit 2 instructions for extload in the
540 // non-optsize case.
541 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
543 for (auto VT : { MVT::f32, MVT::f64 }) {
544 // Use ANDPD to simulate FABS.
545 setOperationAction(ISD::FABS, VT, Custom);
547 // Use XORP to simulate FNEG.
548 setOperationAction(ISD::FNEG, VT, Custom);
550 // Use ANDPD and ORPD to simulate FCOPYSIGN.
551 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
553 // These might be better off as horizontal vector ops.
554 setOperationAction(ISD::FADD, VT, Custom);
555 setOperationAction(ISD::FSUB, VT, Custom);
557 // We don't support sin/cos/fmod
558 setOperationAction(ISD::FSIN , VT, Expand);
559 setOperationAction(ISD::FCOS , VT, Expand);
560 setOperationAction(ISD::FSINCOS, VT, Expand);
563 // Lower this to MOVMSK plus an AND.
564 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
565 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
567 } else if (!useSoftFloat() && X86ScalarSSEf32 && (UseX87 || Is64Bit)) {
568 // Use SSE for f32, x87 for f64.
569 // Set up the FP register classes.
570 addRegisterClass(MVT::f32, &X86::FR32RegClass);
571 if (UseX87)
572 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
574 // Use ANDPS to simulate FABS.
575 setOperationAction(ISD::FABS , MVT::f32, Custom);
577 // Use XORP to simulate FNEG.
578 setOperationAction(ISD::FNEG , MVT::f32, Custom);
580 if (UseX87)
581 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
583 // Use ANDPS and ORPS to simulate FCOPYSIGN.
584 if (UseX87)
585 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
586 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
588 // We don't support sin/cos/fmod
589 setOperationAction(ISD::FSIN , MVT::f32, Expand);
590 setOperationAction(ISD::FCOS , MVT::f32, Expand);
591 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
593 if (UseX87) {
594 // Always expand sin/cos functions even though x87 has an instruction.
595 setOperationAction(ISD::FSIN, MVT::f64, Expand);
596 setOperationAction(ISD::FCOS, MVT::f64, Expand);
597 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
599 } else if (UseX87) {
600 // f32 and f64 in x87.
601 // Set up the FP register classes.
602 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
603 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
605 for (auto VT : { MVT::f32, MVT::f64 }) {
606 setOperationAction(ISD::UNDEF, VT, Expand);
607 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
609 // Always expand sin/cos functions even though x87 has an instruction.
610 setOperationAction(ISD::FSIN , VT, Expand);
611 setOperationAction(ISD::FCOS , VT, Expand);
612 setOperationAction(ISD::FSINCOS, VT, Expand);
616 // Expand FP32 immediates into loads from the stack, save special cases.
617 if (isTypeLegal(MVT::f32)) {
618 if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
619 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
620 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
621 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
622 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
623 } else // SSE immediates.
624 addLegalFPImmediate(APFloat(+0.0f)); // xorps
626 // Expand FP64 immediates into loads from the stack, save special cases.
627 if (isTypeLegal(MVT::f64)) {
628 if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
629 addLegalFPImmediate(APFloat(+0.0)); // FLD0
630 addLegalFPImmediate(APFloat(+1.0)); // FLD1
631 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
632 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
633 } else // SSE immediates.
634 addLegalFPImmediate(APFloat(+0.0)); // xorpd
637 // We don't support FMA.
638 setOperationAction(ISD::FMA, MVT::f64, Expand);
639 setOperationAction(ISD::FMA, MVT::f32, Expand);
641 // Long double always uses X87, except f128 in MMX.
642 if (UseX87) {
643 if (Subtarget.is64Bit() && Subtarget.hasMMX()) {
644 addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
645 : &X86::VR128RegClass);
646 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
647 setOperationAction(ISD::FABS , MVT::f128, Custom);
648 setOperationAction(ISD::FNEG , MVT::f128, Custom);
649 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
652 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
653 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
654 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
656 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
657 addLegalFPImmediate(TmpFlt); // FLD0
658 TmpFlt.changeSign();
659 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
661 bool ignored;
662 APFloat TmpFlt2(+1.0);
663 TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
664 &ignored);
665 addLegalFPImmediate(TmpFlt2); // FLD1
666 TmpFlt2.changeSign();
667 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
670 // Always expand sin/cos functions even though x87 has an instruction.
671 setOperationAction(ISD::FSIN , MVT::f80, Expand);
672 setOperationAction(ISD::FCOS , MVT::f80, Expand);
673 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
675 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
676 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
677 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
678 setOperationAction(ISD::FRINT, MVT::f80, Expand);
679 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
680 setOperationAction(ISD::FMA, MVT::f80, Expand);
681 setOperationAction(ISD::LROUND, MVT::f80, Expand);
682 setOperationAction(ISD::LLROUND, MVT::f80, Expand);
683 setOperationAction(ISD::LRINT, MVT::f80, Expand);
684 setOperationAction(ISD::LLRINT, MVT::f80, Expand);
687 // Always use a library call for pow.
688 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
689 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
690 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
692 setOperationAction(ISD::FLOG, MVT::f80, Expand);
693 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
694 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
695 setOperationAction(ISD::FEXP, MVT::f80, Expand);
696 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
697 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
698 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
700 // Some FP actions are always expanded for vector types.
701 for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
702 MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
703 setOperationAction(ISD::FSIN, VT, Expand);
704 setOperationAction(ISD::FSINCOS, VT, Expand);
705 setOperationAction(ISD::FCOS, VT, Expand);
706 setOperationAction(ISD::FREM, VT, Expand);
707 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
708 setOperationAction(ISD::FPOW, VT, Expand);
709 setOperationAction(ISD::FLOG, VT, Expand);
710 setOperationAction(ISD::FLOG2, VT, Expand);
711 setOperationAction(ISD::FLOG10, VT, Expand);
712 setOperationAction(ISD::FEXP, VT, Expand);
713 setOperationAction(ISD::FEXP2, VT, Expand);
716 // First set operation action for all vector types to either promote
717 // (for widening) or expand (for scalarization). Then we will selectively
718 // turn on ones that can be effectively codegen'd.
719 for (MVT VT : MVT::vector_valuetypes()) {
720 setOperationAction(ISD::SDIV, VT, Expand);
721 setOperationAction(ISD::UDIV, VT, Expand);
722 setOperationAction(ISD::SREM, VT, Expand);
723 setOperationAction(ISD::UREM, VT, Expand);
724 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
725 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
726 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
727 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
728 setOperationAction(ISD::FMA, VT, Expand);
729 setOperationAction(ISD::FFLOOR, VT, Expand);
730 setOperationAction(ISD::FCEIL, VT, Expand);
731 setOperationAction(ISD::FTRUNC, VT, Expand);
732 setOperationAction(ISD::FRINT, VT, Expand);
733 setOperationAction(ISD::FNEARBYINT, VT, Expand);
734 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
735 setOperationAction(ISD::MULHS, VT, Expand);
736 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
737 setOperationAction(ISD::MULHU, VT, Expand);
738 setOperationAction(ISD::SDIVREM, VT, Expand);
739 setOperationAction(ISD::UDIVREM, VT, Expand);
740 setOperationAction(ISD::CTPOP, VT, Expand);
741 setOperationAction(ISD::CTTZ, VT, Expand);
742 setOperationAction(ISD::CTLZ, VT, Expand);
743 setOperationAction(ISD::ROTL, VT, Expand);
744 setOperationAction(ISD::ROTR, VT, Expand);
745 setOperationAction(ISD::BSWAP, VT, Expand);
746 setOperationAction(ISD::SETCC, VT, Expand);
747 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
748 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
749 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
750 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
751 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
752 setOperationAction(ISD::TRUNCATE, VT, Expand);
753 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
754 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
755 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
756 setOperationAction(ISD::SELECT_CC, VT, Expand);
757 for (MVT InnerVT : MVT::vector_valuetypes()) {
758 setTruncStoreAction(InnerVT, VT, Expand);
760 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
761 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
763 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
764 // types, we have to deal with them whether we ask for Expansion or not.
765 // Setting Expand causes its own optimisation problems though, so leave
766 // them legal.
767 if (VT.getVectorElementType() == MVT::i1)
768 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
770 // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
771 // split/scalarized right now.
772 if (VT.getVectorElementType() == MVT::f16)
773 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
777 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
778 // with -msoft-float, disable use of MMX as well.
779 if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
780 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
781 // No operations on x86mmx supported, everything uses intrinsics.
784 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
785 addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
786 : &X86::VR128RegClass);
788 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
789 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
790 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom);
791 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
792 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
793 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
794 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
795 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
796 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
799 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
800 addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
801 : &X86::VR128RegClass);
803 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
804 // registers cannot be used even for integer operations.
805 addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
806 : &X86::VR128RegClass);
807 addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
808 : &X86::VR128RegClass);
809 addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
810 : &X86::VR128RegClass);
811 addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
812 : &X86::VR128RegClass);
814 for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
815 MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
816 setOperationAction(ISD::SDIV, VT, Custom);
817 setOperationAction(ISD::SREM, VT, Custom);
818 setOperationAction(ISD::UDIV, VT, Custom);
819 setOperationAction(ISD::UREM, VT, Custom);
822 setOperationAction(ISD::MUL, MVT::v2i8, Custom);
823 setOperationAction(ISD::MUL, MVT::v2i16, Custom);
824 setOperationAction(ISD::MUL, MVT::v2i32, Custom);
825 setOperationAction(ISD::MUL, MVT::v4i8, Custom);
826 setOperationAction(ISD::MUL, MVT::v4i16, Custom);
827 setOperationAction(ISD::MUL, MVT::v8i8, Custom);
829 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
830 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
831 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
832 setOperationAction(ISD::MULHU, MVT::v4i32, Custom);
833 setOperationAction(ISD::MULHS, MVT::v4i32, Custom);
834 setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
835 setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
836 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
837 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
838 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
839 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
840 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
841 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom);
843 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
844 setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
845 setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
846 setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
847 setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
850 setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal);
851 setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal);
852 setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal);
853 setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal);
854 setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal);
855 setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal);
856 setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal);
857 setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal);
858 setOperationAction(ISD::UADDSAT, MVT::v4i32, Custom);
859 setOperationAction(ISD::USUBSAT, MVT::v4i32, Custom);
860 setOperationAction(ISD::UADDSAT, MVT::v2i64, Custom);
861 setOperationAction(ISD::USUBSAT, MVT::v2i64, Custom);
863 if (!ExperimentalVectorWideningLegalization) {
864 // Use widening instead of promotion.
865 for (auto VT : { MVT::v8i8, MVT::v4i8, MVT::v2i8,
866 MVT::v4i16, MVT::v2i16 }) {
867 setOperationAction(ISD::UADDSAT, VT, Custom);
868 setOperationAction(ISD::SADDSAT, VT, Custom);
869 setOperationAction(ISD::USUBSAT, VT, Custom);
870 setOperationAction(ISD::SSUBSAT, VT, Custom);
874 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
875 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
876 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
878 // Provide custom widening for v2f32 setcc. This is really for VLX when
879 // setcc result type returns v2i1/v4i1 vector for v2f32/v4f32 leading to
880 // type legalization changing the result type to v4i1 during widening.
881 // It works fine for SSE2 and is probably faster so no need to qualify with
882 // VLX support.
883 setOperationAction(ISD::SETCC, MVT::v2i32, Custom);
885 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
886 setOperationAction(ISD::SETCC, VT, Custom);
887 setOperationAction(ISD::CTPOP, VT, Custom);
888 setOperationAction(ISD::ABS, VT, Custom);
890 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
891 // setcc all the way to isel and prefer SETGT in some isel patterns.
892 setCondCodeAction(ISD::SETLT, VT, Custom);
893 setCondCodeAction(ISD::SETLE, VT, Custom);
896 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
897 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
898 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
899 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
900 setOperationAction(ISD::VSELECT, VT, Custom);
901 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
904 // We support custom legalizing of sext and anyext loads for specific
905 // memory vector types which we can load as a scalar (or sequence of
906 // scalars) and extend in-register to a legal 128-bit vector type. For sext
907 // loads these must work with a single scalar load.
908 for (MVT VT : MVT::integer_vector_valuetypes()) {
909 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
910 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
911 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
912 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
913 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
914 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
917 for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
918 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
919 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
920 setOperationAction(ISD::VSELECT, VT, Custom);
922 if (VT == MVT::v2i64 && !Subtarget.is64Bit())
923 continue;
925 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
926 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
929 // Custom lower v2i64 and v2f64 selects.
930 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
931 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
932 setOperationAction(ISD::SELECT, MVT::v4i32, Custom);
933 setOperationAction(ISD::SELECT, MVT::v8i16, Custom);
934 setOperationAction(ISD::SELECT, MVT::v16i8, Custom);
936 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
937 setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
938 setOperationAction(ISD::FP_TO_SINT, MVT::v2i16, Custom);
940 // Custom legalize these to avoid over promotion or custom promotion.
941 setOperationAction(ISD::FP_TO_SINT, MVT::v2i8, Custom);
942 setOperationAction(ISD::FP_TO_SINT, MVT::v4i8, Custom);
943 setOperationAction(ISD::FP_TO_SINT, MVT::v8i8, Custom);
944 setOperationAction(ISD::FP_TO_SINT, MVT::v2i16, Custom);
945 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
946 setOperationAction(ISD::FP_TO_UINT, MVT::v2i8, Custom);
947 setOperationAction(ISD::FP_TO_UINT, MVT::v4i8, Custom);
948 setOperationAction(ISD::FP_TO_UINT, MVT::v8i8, Custom);
949 setOperationAction(ISD::FP_TO_UINT, MVT::v2i16, Custom);
950 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
952 // By marking FP_TO_SINT v8i16 as Custom, will trick type legalization into
953 // promoting v8i8 FP_TO_UINT into FP_TO_SINT. When the v8i16 FP_TO_SINT is
954 // split again based on the input type, this will cause an AssertSExt i16 to
955 // be emitted instead of an AssertZExt. This will allow packssdw followed by
956 // packuswb to be used to truncate to v8i8. This is necessary since packusdw
957 // isn't available until sse4.1.
958 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
960 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
961 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
963 setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
965 // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
966 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
968 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
969 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
971 // We want to legalize this to an f64 load rather than an i64 load on
972 // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
973 // store.
974 setOperationAction(ISD::LOAD, MVT::v2f32, Custom);
975 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
976 setOperationAction(ISD::LOAD, MVT::v4i16, Custom);
977 setOperationAction(ISD::LOAD, MVT::v8i8, Custom);
978 setOperationAction(ISD::STORE, MVT::v2f32, Custom);
979 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
980 setOperationAction(ISD::STORE, MVT::v4i16, Custom);
981 setOperationAction(ISD::STORE, MVT::v8i8, Custom);
983 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
984 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
985 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
986 if (!Subtarget.hasAVX512())
987 setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
989 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
990 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
991 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
993 if (ExperimentalVectorWideningLegalization) {
994 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
996 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
997 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
998 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom);
999 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
1000 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
1001 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
1002 } else {
1003 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i64, Custom);
1006 // In the customized shift lowering, the legal v4i32/v2i64 cases
1007 // in AVX2 will be recognized.
1008 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1009 setOperationAction(ISD::SRL, VT, Custom);
1010 setOperationAction(ISD::SHL, VT, Custom);
1011 setOperationAction(ISD::SRA, VT, Custom);
1014 setOperationAction(ISD::ROTL, MVT::v4i32, Custom);
1015 setOperationAction(ISD::ROTL, MVT::v8i16, Custom);
1017 // With AVX512, expanding (and promoting the shifts) is better.
1018 if (!Subtarget.hasAVX512())
1019 setOperationAction(ISD::ROTL, MVT::v16i8, Custom);
1022 if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1023 setOperationAction(ISD::ABS, MVT::v16i8, Legal);
1024 setOperationAction(ISD::ABS, MVT::v8i16, Legal);
1025 setOperationAction(ISD::ABS, MVT::v4i32, Legal);
1026 setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
1027 setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
1028 setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
1029 setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
1030 setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1032 // These might be better off as horizontal vector ops.
1033 setOperationAction(ISD::ADD, MVT::i16, Custom);
1034 setOperationAction(ISD::ADD, MVT::i32, Custom);
1035 setOperationAction(ISD::SUB, MVT::i16, Custom);
1036 setOperationAction(ISD::SUB, MVT::i32, Custom);
1039 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1040 for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1041 setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
1042 setOperationAction(ISD::FCEIL, RoundedTy, Legal);
1043 setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
1044 setOperationAction(ISD::FRINT, RoundedTy, Legal);
1045 setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
1048 setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
1049 setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
1050 setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
1051 setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
1052 setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
1053 setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
1054 setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
1055 setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
1057 // FIXME: Do we need to handle scalar-to-vector here?
1058 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1060 // We directly match byte blends in the backend as they match the VSELECT
1061 // condition form.
1062 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1064 // SSE41 brings specific instructions for doing vector sign extend even in
1065 // cases where we don't have SRA.
1066 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1067 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1068 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1071 if (!ExperimentalVectorWideningLegalization) {
1072 // Avoid narrow result types when widening. The legal types are listed
1073 // in the next loop.
1074 for (MVT VT : MVT::integer_vector_valuetypes()) {
1075 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1076 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1077 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1081 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1082 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1083 setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8, Legal);
1084 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8, Legal);
1085 if (!ExperimentalVectorWideningLegalization)
1086 setLoadExtAction(LoadExtOp, MVT::v2i32, MVT::v2i8, Legal);
1087 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8, Legal);
1088 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1089 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1090 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1093 // i8 vectors are custom because the source register and source
1094 // source memory operand types are not the same width.
1095 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1098 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1099 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1100 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1101 setOperationAction(ISD::ROTL, VT, Custom);
1103 // XOP can efficiently perform BITREVERSE with VPPERM.
1104 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1105 setOperationAction(ISD::BITREVERSE, VT, Custom);
1107 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1108 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1109 setOperationAction(ISD::BITREVERSE, VT, Custom);
1112 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1113 bool HasInt256 = Subtarget.hasInt256();
1115 addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
1116 : &X86::VR256RegClass);
1117 addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1118 : &X86::VR256RegClass);
1119 addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1120 : &X86::VR256RegClass);
1121 addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1122 : &X86::VR256RegClass);
1123 addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1124 : &X86::VR256RegClass);
1125 addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1126 : &X86::VR256RegClass);
1128 for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1129 setOperationAction(ISD::FFLOOR, VT, Legal);
1130 setOperationAction(ISD::FCEIL, VT, Legal);
1131 setOperationAction(ISD::FTRUNC, VT, Legal);
1132 setOperationAction(ISD::FRINT, VT, Legal);
1133 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1134 setOperationAction(ISD::FNEG, VT, Custom);
1135 setOperationAction(ISD::FABS, VT, Custom);
1136 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1139 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1140 // even though v8i16 is a legal type.
1141 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1142 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1143 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1145 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1147 if (!Subtarget.hasAVX512())
1148 setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1150 // In the customized shift lowering, the legal v8i32/v4i64 cases
1151 // in AVX2 will be recognized.
1152 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1153 setOperationAction(ISD::SRL, VT, Custom);
1154 setOperationAction(ISD::SHL, VT, Custom);
1155 setOperationAction(ISD::SRA, VT, Custom);
1158 // These types need custom splitting if their input is a 128-bit vector.
1159 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1160 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1161 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1162 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1164 setOperationAction(ISD::ROTL, MVT::v8i32, Custom);
1165 setOperationAction(ISD::ROTL, MVT::v16i16, Custom);
1167 // With BWI, expanding (and promoting the shifts) is the better.
1168 if (!Subtarget.hasBWI())
1169 setOperationAction(ISD::ROTL, MVT::v32i8, Custom);
1171 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1172 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1173 setOperationAction(ISD::SELECT, MVT::v8i32, Custom);
1174 setOperationAction(ISD::SELECT, MVT::v16i16, Custom);
1175 setOperationAction(ISD::SELECT, MVT::v32i8, Custom);
1176 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1178 for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1179 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1180 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1181 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1184 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1185 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1186 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1187 setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
1189 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1190 setOperationAction(ISD::SETCC, VT, Custom);
1191 setOperationAction(ISD::CTPOP, VT, Custom);
1192 setOperationAction(ISD::CTLZ, VT, Custom);
1194 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1195 // setcc all the way to isel and prefer SETGT in some isel patterns.
1196 setCondCodeAction(ISD::SETLT, VT, Custom);
1197 setCondCodeAction(ISD::SETLE, VT, Custom);
1200 if (Subtarget.hasAnyFMA()) {
1201 for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1202 MVT::v2f64, MVT::v4f64 })
1203 setOperationAction(ISD::FMA, VT, Legal);
1206 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1207 setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1208 setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1211 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1212 setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
1213 setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
1214 setOperationAction(ISD::MUL, MVT::v32i8, Custom);
1216 setOperationAction(ISD::MULHU, MVT::v8i32, Custom);
1217 setOperationAction(ISD::MULHS, MVT::v8i32, Custom);
1218 setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
1219 setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
1220 setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
1221 setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
1223 setOperationAction(ISD::ABS, MVT::v4i64, Custom);
1224 setOperationAction(ISD::SMAX, MVT::v4i64, Custom);
1225 setOperationAction(ISD::UMAX, MVT::v4i64, Custom);
1226 setOperationAction(ISD::SMIN, MVT::v4i64, Custom);
1227 setOperationAction(ISD::UMIN, MVT::v4i64, Custom);
1229 setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1230 setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1231 setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1232 setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1233 setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1234 setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1235 setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1236 setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1238 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1239 setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom);
1240 setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1241 setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1242 setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1243 setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1246 for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1247 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1248 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1251 if (HasInt256) {
1252 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1253 // when we have a 256bit-wide blend with immediate.
1254 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1256 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1257 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1258 setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1259 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i8, Legal);
1260 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i8, Legal);
1261 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i16, Legal);
1262 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i16, Legal);
1263 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i32, Legal);
1267 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1268 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1269 setOperationAction(ISD::MLOAD, VT, Custom);
1270 setOperationAction(ISD::MSTORE, VT, Legal);
1273 // Extract subvector is special because the value type
1274 // (result) is 128-bit but the source is 256-bit wide.
1275 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1276 MVT::v4f32, MVT::v2f64 }) {
1277 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1280 // Custom lower several nodes for 256-bit types.
1281 for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1282 MVT::v8f32, MVT::v4f64 }) {
1283 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1284 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1285 setOperationAction(ISD::VSELECT, VT, Custom);
1286 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1287 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1288 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1289 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1290 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1291 setOperationAction(ISD::STORE, VT, Custom);
1294 if (HasInt256)
1295 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1297 if (HasInt256) {
1298 // Custom legalize 2x32 to get a little better code.
1299 setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1300 setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1302 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1303 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1304 setOperationAction(ISD::MGATHER, VT, Custom);
1308 // This block controls legalization of the mask vector sizes that are
1309 // available with AVX512. 512-bit vectors are in a separate block controlled
1310 // by useAVX512Regs.
1311 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1312 addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
1313 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1314 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1315 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1316 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1318 setOperationAction(ISD::SELECT, MVT::v1i1, Custom);
1319 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1320 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom);
1322 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1323 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1324 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1325 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1326 setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom);
1327 setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom);
1329 // There is no byte sized k-register load or store without AVX512DQ.
1330 if (!Subtarget.hasDQI()) {
1331 setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1332 setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1333 setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1334 setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1336 setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1337 setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1338 setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1339 setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1342 // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1343 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1344 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1345 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1346 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1349 for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1350 setOperationAction(ISD::ADD, VT, Custom);
1351 setOperationAction(ISD::SUB, VT, Custom);
1352 setOperationAction(ISD::MUL, VT, Custom);
1353 setOperationAction(ISD::SETCC, VT, Custom);
1354 setOperationAction(ISD::SELECT, VT, Custom);
1355 setOperationAction(ISD::TRUNCATE, VT, Custom);
1356 setOperationAction(ISD::UADDSAT, VT, Custom);
1357 setOperationAction(ISD::SADDSAT, VT, Custom);
1358 setOperationAction(ISD::USUBSAT, VT, Custom);
1359 setOperationAction(ISD::SSUBSAT, VT, Custom);
1361 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1362 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1363 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1364 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1365 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1366 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1367 setOperationAction(ISD::VSELECT, VT, Expand);
1370 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1371 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1374 // This block controls legalization for 512-bit operations with 32/64 bit
1375 // elements. 512-bits can be disabled based on prefer-vector-width and
1376 // required-vector-width function attributes.
1377 if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1378 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1379 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1380 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1381 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1383 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1384 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
1385 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1386 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
1387 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
1388 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
1391 for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1392 setOperationAction(ISD::FNEG, VT, Custom);
1393 setOperationAction(ISD::FABS, VT, Custom);
1394 setOperationAction(ISD::FMA, VT, Legal);
1395 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1398 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1399 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i16, MVT::v16i32);
1400 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i8, MVT::v16i32);
1401 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i1, MVT::v16i32);
1402 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1403 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i1, MVT::v16i32);
1404 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i8, MVT::v16i32);
1405 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i16, MVT::v16i32);
1406 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1407 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1409 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
1410 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
1411 setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
1412 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
1413 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
1415 // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1416 // to 512-bit rather than use the AVX2 instructions so that we can use
1417 // k-masks.
1418 for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1419 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1420 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1421 setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
1424 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1425 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1426 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1427 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1428 setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
1429 setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
1430 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1431 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1433 if (ExperimentalVectorWideningLegalization) {
1434 // Need to custom widen this if we don't have AVX512BW.
1435 setOperationAction(ISD::ANY_EXTEND, MVT::v8i8, Custom);
1436 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i8, Custom);
1437 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i8, Custom);
1440 for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1441 setOperationAction(ISD::FFLOOR, VT, Legal);
1442 setOperationAction(ISD::FCEIL, VT, Legal);
1443 setOperationAction(ISD::FTRUNC, VT, Legal);
1444 setOperationAction(ISD::FRINT, VT, Legal);
1445 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1447 setOperationAction(ISD::SELECT, VT, Custom);
1450 // Without BWI we need to use custom lowering to handle MVT::v64i8 input.
1451 for (auto VT : {MVT::v16i32, MVT::v8i64, MVT::v64i8}) {
1452 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1453 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1456 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1457 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1458 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1459 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1461 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1462 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1464 setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
1465 setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
1467 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1468 setOperationAction(ISD::SMAX, VT, Legal);
1469 setOperationAction(ISD::UMAX, VT, Legal);
1470 setOperationAction(ISD::SMIN, VT, Legal);
1471 setOperationAction(ISD::UMIN, VT, Legal);
1472 setOperationAction(ISD::ABS, VT, Legal);
1473 setOperationAction(ISD::SRL, VT, Custom);
1474 setOperationAction(ISD::SHL, VT, Custom);
1475 setOperationAction(ISD::SRA, VT, Custom);
1476 setOperationAction(ISD::CTPOP, VT, Custom);
1477 setOperationAction(ISD::ROTL, VT, Custom);
1478 setOperationAction(ISD::ROTR, VT, Custom);
1479 setOperationAction(ISD::SETCC, VT, Custom);
1480 setOperationAction(ISD::SELECT, VT, Custom);
1482 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1483 // setcc all the way to isel and prefer SETGT in some isel patterns.
1484 setCondCodeAction(ISD::SETLT, VT, Custom);
1485 setCondCodeAction(ISD::SETLE, VT, Custom);
1488 if (Subtarget.hasDQI()) {
1489 setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
1490 setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
1491 setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
1492 setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
1494 setOperationAction(ISD::MUL, MVT::v8i64, Legal);
1497 if (Subtarget.hasCDI()) {
1498 // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1499 for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1500 setOperationAction(ISD::CTLZ, VT, Legal);
1502 } // Subtarget.hasCDI()
1504 if (Subtarget.hasVPOPCNTDQ()) {
1505 for (auto VT : { MVT::v16i32, MVT::v8i64 })
1506 setOperationAction(ISD::CTPOP, VT, Legal);
1509 // Extract subvector is special because the value type
1510 // (result) is 256-bit but the source is 512-bit wide.
1511 // 128-bit was made Legal under AVX1.
1512 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1513 MVT::v8f32, MVT::v4f64 })
1514 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1516 for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1517 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1518 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1519 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1520 setOperationAction(ISD::VSELECT, VT, Custom);
1521 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1522 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1523 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1524 setOperationAction(ISD::MLOAD, VT, Legal);
1525 setOperationAction(ISD::MSTORE, VT, Legal);
1526 setOperationAction(ISD::MGATHER, VT, Custom);
1527 setOperationAction(ISD::MSCATTER, VT, Custom);
1529 // Need to custom split v32i16/v64i8 bitcasts.
1530 if (!Subtarget.hasBWI()) {
1531 setOperationAction(ISD::BITCAST, MVT::v32i16, Custom);
1532 setOperationAction(ISD::BITCAST, MVT::v64i8, Custom);
1535 if (Subtarget.hasVBMI2()) {
1536 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1537 setOperationAction(ISD::FSHL, VT, Custom);
1538 setOperationAction(ISD::FSHR, VT, Custom);
1541 }// has AVX-512
1543 // This block controls legalization for operations that don't have
1544 // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1545 // narrower widths.
1546 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1547 // These operations are handled on non-VLX by artificially widening in
1548 // isel patterns.
1549 // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
1551 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1552 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1553 setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
1554 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1555 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1557 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1558 setOperationAction(ISD::SMAX, VT, Legal);
1559 setOperationAction(ISD::UMAX, VT, Legal);
1560 setOperationAction(ISD::SMIN, VT, Legal);
1561 setOperationAction(ISD::UMIN, VT, Legal);
1562 setOperationAction(ISD::ABS, VT, Legal);
1565 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1566 setOperationAction(ISD::ROTL, VT, Custom);
1567 setOperationAction(ISD::ROTR, VT, Custom);
1570 // Custom legalize 2x32 to get a little better code.
1571 setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
1572 setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
1574 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1575 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1576 setOperationAction(ISD::MSCATTER, VT, Custom);
1578 if (Subtarget.hasDQI()) {
1579 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1580 setOperationAction(ISD::SINT_TO_FP, VT, Legal);
1581 setOperationAction(ISD::UINT_TO_FP, VT, Legal);
1582 setOperationAction(ISD::FP_TO_SINT, VT, Legal);
1583 setOperationAction(ISD::FP_TO_UINT, VT, Legal);
1585 setOperationAction(ISD::MUL, VT, Legal);
1589 if (Subtarget.hasCDI()) {
1590 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1591 setOperationAction(ISD::CTLZ, VT, Legal);
1593 } // Subtarget.hasCDI()
1595 if (Subtarget.hasVPOPCNTDQ()) {
1596 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
1597 setOperationAction(ISD::CTPOP, VT, Legal);
1601 // This block control legalization of v32i1/v64i1 which are available with
1602 // AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
1603 // useBWIRegs.
1604 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1605 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1606 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1608 for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
1609 setOperationAction(ISD::ADD, VT, Custom);
1610 setOperationAction(ISD::SUB, VT, Custom);
1611 setOperationAction(ISD::MUL, VT, Custom);
1612 setOperationAction(ISD::VSELECT, VT, Expand);
1613 setOperationAction(ISD::UADDSAT, VT, Custom);
1614 setOperationAction(ISD::SADDSAT, VT, Custom);
1615 setOperationAction(ISD::USUBSAT, VT, Custom);
1616 setOperationAction(ISD::SSUBSAT, VT, Custom);
1618 setOperationAction(ISD::TRUNCATE, VT, Custom);
1619 setOperationAction(ISD::SETCC, VT, Custom);
1620 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1621 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1622 setOperationAction(ISD::SELECT, VT, Custom);
1623 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1624 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1627 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom);
1628 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom);
1629 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom);
1630 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom);
1631 for (auto VT : { MVT::v16i1, MVT::v32i1 })
1632 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1634 // Extends from v32i1 masks to 256-bit vectors.
1635 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
1636 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
1637 setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom);
1640 // This block controls legalization for v32i16 and v64i8. 512-bits can be
1641 // disabled based on prefer-vector-width and required-vector-width function
1642 // attributes.
1643 if (!Subtarget.useSoftFloat() && Subtarget.useBWIRegs()) {
1644 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1645 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1647 // Extends from v64i1 masks to 512-bit vectors.
1648 setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
1649 setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
1650 setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom);
1652 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1653 setOperationAction(ISD::MUL, MVT::v64i8, Custom);
1654 setOperationAction(ISD::MULHS, MVT::v32i16, Legal);
1655 setOperationAction(ISD::MULHU, MVT::v32i16, Legal);
1656 setOperationAction(ISD::MULHS, MVT::v64i8, Custom);
1657 setOperationAction(ISD::MULHU, MVT::v64i8, Custom);
1658 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i16, Custom);
1659 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i8, Custom);
1660 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Legal);
1661 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Legal);
1662 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
1663 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
1664 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i16, Custom);
1665 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v64i8, Custom);
1666 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1667 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1668 setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
1669 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
1670 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8, Custom);
1671 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i16, Custom);
1672 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i8, Custom);
1673 setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom);
1674 setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
1676 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1677 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1679 setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
1681 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1682 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1683 setOperationAction(ISD::VSELECT, VT, Custom);
1684 setOperationAction(ISD::ABS, VT, Legal);
1685 setOperationAction(ISD::SRL, VT, Custom);
1686 setOperationAction(ISD::SHL, VT, Custom);
1687 setOperationAction(ISD::SRA, VT, Custom);
1688 setOperationAction(ISD::MLOAD, VT, Legal);
1689 setOperationAction(ISD::MSTORE, VT, Legal);
1690 setOperationAction(ISD::CTPOP, VT, Custom);
1691 setOperationAction(ISD::CTLZ, VT, Custom);
1692 setOperationAction(ISD::SMAX, VT, Legal);
1693 setOperationAction(ISD::UMAX, VT, Legal);
1694 setOperationAction(ISD::SMIN, VT, Legal);
1695 setOperationAction(ISD::UMIN, VT, Legal);
1696 setOperationAction(ISD::SETCC, VT, Custom);
1697 setOperationAction(ISD::UADDSAT, VT, Legal);
1698 setOperationAction(ISD::SADDSAT, VT, Legal);
1699 setOperationAction(ISD::USUBSAT, VT, Legal);
1700 setOperationAction(ISD::SSUBSAT, VT, Legal);
1701 setOperationAction(ISD::SELECT, VT, Custom);
1703 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1704 // setcc all the way to isel and prefer SETGT in some isel patterns.
1705 setCondCodeAction(ISD::SETLT, VT, Custom);
1706 setCondCodeAction(ISD::SETLE, VT, Custom);
1709 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1710 setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1713 if (Subtarget.hasBITALG()) {
1714 for (auto VT : { MVT::v64i8, MVT::v32i16 })
1715 setOperationAction(ISD::CTPOP, VT, Legal);
1718 if (Subtarget.hasVBMI2()) {
1719 setOperationAction(ISD::FSHL, MVT::v32i16, Custom);
1720 setOperationAction(ISD::FSHR, MVT::v32i16, Custom);
1724 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1725 for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
1726 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1727 setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
1730 // These operations are handled on non-VLX by artificially widening in
1731 // isel patterns.
1732 // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
1734 if (Subtarget.hasBITALG()) {
1735 for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
1736 setOperationAction(ISD::CTPOP, VT, Legal);
1740 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
1741 setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
1742 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
1743 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
1744 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
1745 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
1747 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
1748 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
1749 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
1750 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
1751 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
1753 if (Subtarget.hasDQI()) {
1754 // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1755 // v2f32 UINT_TO_FP is already custom under SSE2.
1756 setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom);
1757 assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
1758 "Unexpected operation action!");
1759 // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
1760 setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom);
1761 setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom);
1764 if (Subtarget.hasBWI()) {
1765 setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
1766 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
1769 if (Subtarget.hasVBMI2()) {
1770 // TODO: Make these legal even without VLX?
1771 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64,
1772 MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1773 setOperationAction(ISD::FSHL, VT, Custom);
1774 setOperationAction(ISD::FSHR, VT, Custom);
1779 // We want to custom lower some of our intrinsics.
1780 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1781 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1782 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1783 if (!Subtarget.is64Bit()) {
1784 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1787 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1788 // handle type legalization for these operations here.
1790 // FIXME: We really should do custom legalization for addition and
1791 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1792 // than generic legalization for 64-bit multiplication-with-overflow, though.
1793 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
1794 if (VT == MVT::i64 && !Subtarget.is64Bit())
1795 continue;
1796 // Add/Sub/Mul with overflow operations are custom lowered.
1797 setOperationAction(ISD::SADDO, VT, Custom);
1798 setOperationAction(ISD::UADDO, VT, Custom);
1799 setOperationAction(ISD::SSUBO, VT, Custom);
1800 setOperationAction(ISD::USUBO, VT, Custom);
1801 setOperationAction(ISD::SMULO, VT, Custom);
1802 setOperationAction(ISD::UMULO, VT, Custom);
1804 // Support carry in as value rather than glue.
1805 setOperationAction(ISD::ADDCARRY, VT, Custom);
1806 setOperationAction(ISD::SUBCARRY, VT, Custom);
1807 setOperationAction(ISD::SETCCCARRY, VT, Custom);
1810 if (!Subtarget.is64Bit()) {
1811 // These libcalls are not available in 32-bit.
1812 setLibcallName(RTLIB::SHL_I128, nullptr);
1813 setLibcallName(RTLIB::SRL_I128, nullptr);
1814 setLibcallName(RTLIB::SRA_I128, nullptr);
1815 setLibcallName(RTLIB::MUL_I128, nullptr);
1818 // Combine sin / cos into _sincos_stret if it is available.
1819 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
1820 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
1821 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1822 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1825 if (Subtarget.isTargetWin64()) {
1826 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1827 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1828 setOperationAction(ISD::SREM, MVT::i128, Custom);
1829 setOperationAction(ISD::UREM, MVT::i128, Custom);
1830 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1831 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1834 // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
1835 // is. We should promote the value to 64-bits to solve this.
1836 // This is what the CRT headers do - `fmodf` is an inline header
1837 // function casting to f64 and calling `fmod`.
1838 if (Subtarget.is32Bit() && (Subtarget.isTargetKnownWindowsMSVC() ||
1839 Subtarget.isTargetWindowsItanium()))
1840 for (ISD::NodeType Op :
1841 {ISD::FCEIL, ISD::FCOS, ISD::FEXP, ISD::FFLOOR, ISD::FREM, ISD::FLOG,
1842 ISD::FLOG10, ISD::FPOW, ISD::FSIN})
1843 if (isOperationExpand(Op, MVT::f32))
1844 setOperationAction(Op, MVT::f32, Promote);
1846 // We have target-specific dag combine patterns for the following nodes:
1847 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1848 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
1849 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1850 setTargetDAGCombine(ISD::CONCAT_VECTORS);
1851 setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
1852 setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
1853 setTargetDAGCombine(ISD::BITCAST);
1854 setTargetDAGCombine(ISD::VSELECT);
1855 setTargetDAGCombine(ISD::SELECT);
1856 setTargetDAGCombine(ISD::SHL);
1857 setTargetDAGCombine(ISD::SRA);
1858 setTargetDAGCombine(ISD::SRL);
1859 setTargetDAGCombine(ISD::OR);
1860 setTargetDAGCombine(ISD::AND);
1861 setTargetDAGCombine(ISD::ADD);
1862 setTargetDAGCombine(ISD::FADD);
1863 setTargetDAGCombine(ISD::FSUB);
1864 setTargetDAGCombine(ISD::FNEG);
1865 setTargetDAGCombine(ISD::FMA);
1866 setTargetDAGCombine(ISD::FMINNUM);
1867 setTargetDAGCombine(ISD::FMAXNUM);
1868 setTargetDAGCombine(ISD::SUB);
1869 setTargetDAGCombine(ISD::LOAD);
1870 setTargetDAGCombine(ISD::MLOAD);
1871 setTargetDAGCombine(ISD::STORE);
1872 setTargetDAGCombine(ISD::MSTORE);
1873 setTargetDAGCombine(ISD::TRUNCATE);
1874 setTargetDAGCombine(ISD::ZERO_EXTEND);
1875 setTargetDAGCombine(ISD::ANY_EXTEND);
1876 setTargetDAGCombine(ISD::SIGN_EXTEND);
1877 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1878 setTargetDAGCombine(ISD::ANY_EXTEND_VECTOR_INREG);
1879 setTargetDAGCombine(ISD::SIGN_EXTEND_VECTOR_INREG);
1880 setTargetDAGCombine(ISD::ZERO_EXTEND_VECTOR_INREG);
1881 setTargetDAGCombine(ISD::SINT_TO_FP);
1882 setTargetDAGCombine(ISD::UINT_TO_FP);
1883 setTargetDAGCombine(ISD::SETCC);
1884 setTargetDAGCombine(ISD::MUL);
1885 setTargetDAGCombine(ISD::XOR);
1886 setTargetDAGCombine(ISD::MSCATTER);
1887 setTargetDAGCombine(ISD::MGATHER);
1889 computeRegisterProperties(Subtarget.getRegisterInfo());
1891 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1892 MaxStoresPerMemsetOptSize = 8;
1893 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1894 MaxStoresPerMemcpyOptSize = 4;
1895 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1896 MaxStoresPerMemmoveOptSize = 4;
1898 // TODO: These control memcmp expansion in CGP and could be raised higher, but
1899 // that needs to benchmarked and balanced with the potential use of vector
1900 // load/store types (PR33329, PR33914).
1901 MaxLoadsPerMemcmp = 2;
1902 MaxLoadsPerMemcmpOptSize = 2;
1904 // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
1905 setPrefLoopAlignment(ExperimentalPrefLoopAlignment);
1907 // An out-of-order CPU can speculatively execute past a predictable branch,
1908 // but a conditional move could be stalled by an expensive earlier operation.
1909 PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
1910 EnableExtLdPromotion = true;
1911 setPrefFunctionAlignment(4); // 2^4 bytes.
1913 verifyIntrinsicTables();
1916 // This has so far only been implemented for 64-bit MachO.
1917 bool X86TargetLowering::useLoadStackGuardNode() const {
1918 return Subtarget.isTargetMachO() && Subtarget.is64Bit();
1921 bool X86TargetLowering::useStackGuardXorFP() const {
1922 // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
1923 return Subtarget.getTargetTriple().isOSMSVCRT();
1926 SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
1927 const SDLoc &DL) const {
1928 EVT PtrTy = getPointerTy(DAG.getDataLayout());
1929 unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
1930 MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
1931 return SDValue(Node, 0);
1934 TargetLoweringBase::LegalizeTypeAction
1935 X86TargetLowering::getPreferredVectorAction(MVT VT) const {
1936 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
1937 return TypeSplitVector;
1939 if (ExperimentalVectorWideningLegalization &&
1940 VT.getVectorNumElements() != 1 &&
1941 VT.getVectorElementType() != MVT::i1)
1942 return TypeWidenVector;
1944 return TargetLoweringBase::getPreferredVectorAction(VT);
1947 MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1948 CallingConv::ID CC,
1949 EVT VT) const {
1950 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
1951 return MVT::v32i8;
1952 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1955 unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1956 CallingConv::ID CC,
1957 EVT VT) const {
1958 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
1959 return 1;
1960 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1963 EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
1964 LLVMContext& Context,
1965 EVT VT) const {
1966 if (!VT.isVector())
1967 return MVT::i8;
1969 if (Subtarget.hasAVX512()) {
1970 const unsigned NumElts = VT.getVectorNumElements();
1972 // Figure out what this type will be legalized to.
1973 EVT LegalVT = VT;
1974 while (getTypeAction(Context, LegalVT) != TypeLegal)
1975 LegalVT = getTypeToTransformTo(Context, LegalVT);
1977 // If we got a 512-bit vector then we'll definitely have a vXi1 compare.
1978 if (LegalVT.getSimpleVT().is512BitVector())
1979 return EVT::getVectorVT(Context, MVT::i1, NumElts);
1981 if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
1982 // If we legalized to less than a 512-bit vector, then we will use a vXi1
1983 // compare for vXi32/vXi64 for sure. If we have BWI we will also support
1984 // vXi16/vXi8.
1985 MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
1986 if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
1987 return EVT::getVectorVT(Context, MVT::i1, NumElts);
1991 return VT.changeVectorElementTypeToInteger();
1994 /// Helper for getByValTypeAlignment to determine
1995 /// the desired ByVal argument alignment.
1996 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1997 if (MaxAlign == 16)
1998 return;
1999 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
2000 if (VTy->getBitWidth() == 128)
2001 MaxAlign = 16;
2002 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2003 unsigned EltAlign = 0;
2004 getMaxByValAlign(ATy->getElementType(), EltAlign);
2005 if (EltAlign > MaxAlign)
2006 MaxAlign = EltAlign;
2007 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2008 for (auto *EltTy : STy->elements()) {
2009 unsigned EltAlign = 0;
2010 getMaxByValAlign(EltTy, EltAlign);
2011 if (EltAlign > MaxAlign)
2012 MaxAlign = EltAlign;
2013 if (MaxAlign == 16)
2014 break;
2019 /// Return the desired alignment for ByVal aggregate
2020 /// function arguments in the caller parameter area. For X86, aggregates
2021 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
2022 /// are at 4-byte boundaries.
2023 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
2024 const DataLayout &DL) const {
2025 if (Subtarget.is64Bit()) {
2026 // Max of 8 and alignment of type.
2027 unsigned TyAlign = DL.getABITypeAlignment(Ty);
2028 if (TyAlign > 8)
2029 return TyAlign;
2030 return 8;
2033 unsigned Align = 4;
2034 if (Subtarget.hasSSE1())
2035 getMaxByValAlign(Ty, Align);
2036 return Align;
2039 /// Returns the target specific optimal type for load
2040 /// and store operations as a result of memset, memcpy, and memmove
2041 /// lowering. If DstAlign is zero that means it's safe to destination
2042 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
2043 /// means there isn't a need to check it against alignment requirement,
2044 /// probably because the source does not need to be loaded. If 'IsMemset' is
2045 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
2046 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
2047 /// source is constant so it does not need to be loaded.
2048 /// It returns EVT::Other if the type should be determined using generic
2049 /// target-independent logic.
2050 /// For vector ops we check that the overall size isn't larger than our
2051 /// preferred vector width.
2052 EVT X86TargetLowering::getOptimalMemOpType(
2053 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
2054 bool ZeroMemset, bool MemcpyStrSrc,
2055 const AttributeList &FuncAttributes) const {
2056 if (!FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
2057 if (Size >= 16 && (!Subtarget.isUnalignedMem16Slow() ||
2058 ((DstAlign == 0 || DstAlign >= 16) &&
2059 (SrcAlign == 0 || SrcAlign >= 16)))) {
2060 // FIXME: Check if unaligned 32-byte accesses are slow.
2061 if (Size >= 32 && Subtarget.hasAVX() &&
2062 (Subtarget.getPreferVectorWidth() >= 256)) {
2063 // Although this isn't a well-supported type for AVX1, we'll let
2064 // legalization and shuffle lowering produce the optimal codegen. If we
2065 // choose an optimal type with a vector element larger than a byte,
2066 // getMemsetStores() may create an intermediate splat (using an integer
2067 // multiply) before we splat as a vector.
2068 return MVT::v32i8;
2070 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
2071 return MVT::v16i8;
2072 // TODO: Can SSE1 handle a byte vector?
2073 // If we have SSE1 registers we should be able to use them.
2074 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
2075 (Subtarget.getPreferVectorWidth() >= 128))
2076 return MVT::v4f32;
2077 } else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 &&
2078 !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
2079 // Do not use f64 to lower memcpy if source is string constant. It's
2080 // better to use i32 to avoid the loads.
2081 // Also, do not use f64 to lower memset unless this is a memset of zeros.
2082 // The gymnastics of splatting a byte value into an XMM register and then
2083 // only using 8-byte stores (because this is a CPU with slow unaligned
2084 // 16-byte accesses) makes that a loser.
2085 return MVT::f64;
2088 // This is a compromise. If we reach here, unaligned accesses may be slow on
2089 // this target. However, creating smaller, aligned accesses could be even
2090 // slower and would certainly be a lot more code.
2091 if (Subtarget.is64Bit() && Size >= 8)
2092 return MVT::i64;
2093 return MVT::i32;
2096 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
2097 if (VT == MVT::f32)
2098 return X86ScalarSSEf32;
2099 else if (VT == MVT::f64)
2100 return X86ScalarSSEf64;
2101 return true;
2104 bool X86TargetLowering::allowsMisalignedMemoryAccesses(
2105 EVT VT, unsigned, unsigned Align, MachineMemOperand::Flags Flags,
2106 bool *Fast) const {
2107 if (Fast) {
2108 switch (VT.getSizeInBits()) {
2109 default:
2110 // 8-byte and under are always assumed to be fast.
2111 *Fast = true;
2112 break;
2113 case 128:
2114 *Fast = !Subtarget.isUnalignedMem16Slow();
2115 break;
2116 case 256:
2117 *Fast = !Subtarget.isUnalignedMem32Slow();
2118 break;
2119 // TODO: What about AVX-512 (512-bit) accesses?
2122 // NonTemporal vector memory ops must be aligned.
2123 if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2124 // NT loads can only be vector aligned, so if its less aligned than the
2125 // minimum vector size (which we can split the vector down to), we might as
2126 // well use a regular unaligned vector load.
2127 // We don't have any NT loads pre-SSE41.
2128 if (!!(Flags & MachineMemOperand::MOLoad))
2129 return (Align < 16 || !Subtarget.hasSSE41());
2130 return false;
2132 // Misaligned accesses of any size are always allowed.
2133 return true;
2136 /// Return the entry encoding for a jump table in the
2137 /// current function. The returned value is a member of the
2138 /// MachineJumpTableInfo::JTEntryKind enum.
2139 unsigned X86TargetLowering::getJumpTableEncoding() const {
2140 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
2141 // symbol.
2142 if (isPositionIndependent() && Subtarget.isPICStyleGOT())
2143 return MachineJumpTableInfo::EK_Custom32;
2145 // Otherwise, use the normal jump table encoding heuristics.
2146 return TargetLowering::getJumpTableEncoding();
2149 bool X86TargetLowering::useSoftFloat() const {
2150 return Subtarget.useSoftFloat();
2153 void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
2154 ArgListTy &Args) const {
2156 // Only relabel X86-32 for C / Stdcall CCs.
2157 if (Subtarget.is64Bit())
2158 return;
2159 if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
2160 return;
2161 unsigned ParamRegs = 0;
2162 if (auto *M = MF->getFunction().getParent())
2163 ParamRegs = M->getNumberRegisterParameters();
2165 // Mark the first N int arguments as having reg
2166 for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
2167 Type *T = Args[Idx].Ty;
2168 if (T->isIntOrPtrTy())
2169 if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
2170 unsigned numRegs = 1;
2171 if (MF->getDataLayout().getTypeAllocSize(T) > 4)
2172 numRegs = 2;
2173 if (ParamRegs < numRegs)
2174 return;
2175 ParamRegs -= numRegs;
2176 Args[Idx].IsInReg = true;
2181 const MCExpr *
2182 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
2183 const MachineBasicBlock *MBB,
2184 unsigned uid,MCContext &Ctx) const{
2185 assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
2186 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
2187 // entries.
2188 return MCSymbolRefExpr::create(MBB->getSymbol(),
2189 MCSymbolRefExpr::VK_GOTOFF, Ctx);
2192 /// Returns relocation base for the given PIC jumptable.
2193 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
2194 SelectionDAG &DAG) const {
2195 if (!Subtarget.is64Bit())
2196 // This doesn't have SDLoc associated with it, but is not really the
2197 // same as a Register.
2198 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
2199 getPointerTy(DAG.getDataLayout()));
2200 return Table;
2203 /// This returns the relocation base for the given PIC jumptable,
2204 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
2205 const MCExpr *X86TargetLowering::
2206 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
2207 MCContext &Ctx) const {
2208 // X86-64 uses RIP relative addressing based on the jump table label.
2209 if (Subtarget.isPICStyleRIPRel())
2210 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2212 // Otherwise, the reference is relative to the PIC base.
2213 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2216 std::pair<const TargetRegisterClass *, uint8_t>
2217 X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
2218 MVT VT) const {
2219 const TargetRegisterClass *RRC = nullptr;
2220 uint8_t Cost = 1;
2221 switch (VT.SimpleTy) {
2222 default:
2223 return TargetLowering::findRepresentativeClass(TRI, VT);
2224 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
2225 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
2226 break;
2227 case MVT::x86mmx:
2228 RRC = &X86::VR64RegClass;
2229 break;
2230 case MVT::f32: case MVT::f64:
2231 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
2232 case MVT::v4f32: case MVT::v2f64:
2233 case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
2234 case MVT::v8f32: case MVT::v4f64:
2235 case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
2236 case MVT::v16f32: case MVT::v8f64:
2237 RRC = &X86::VR128XRegClass;
2238 break;
2240 return std::make_pair(RRC, Cost);
2243 unsigned X86TargetLowering::getAddressSpace() const {
2244 if (Subtarget.is64Bit())
2245 return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
2246 return 256;
2249 static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
2250 return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
2251 (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
2254 static Constant* SegmentOffset(IRBuilder<> &IRB,
2255 unsigned Offset, unsigned AddressSpace) {
2256 return ConstantExpr::getIntToPtr(
2257 ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
2258 Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
2261 Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
2262 // glibc, bionic, and Fuchsia have a special slot for the stack guard in
2263 // tcbhead_t; use it instead of the usual global variable (see
2264 // sysdeps/{i386,x86_64}/nptl/tls.h)
2265 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
2266 if (Subtarget.isTargetFuchsia()) {
2267 // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
2268 return SegmentOffset(IRB, 0x10, getAddressSpace());
2269 } else {
2270 // %fs:0x28, unless we're using a Kernel code model, in which case
2271 // it's %gs:0x28. gs:0x14 on i386.
2272 unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
2273 return SegmentOffset(IRB, Offset, getAddressSpace());
2277 return TargetLowering::getIRStackGuard(IRB);
2280 void X86TargetLowering::insertSSPDeclarations(Module &M) const {
2281 // MSVC CRT provides functionalities for stack protection.
2282 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2283 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2284 // MSVC CRT has a global variable holding security cookie.
2285 M.getOrInsertGlobal("__security_cookie",
2286 Type::getInt8PtrTy(M.getContext()));
2288 // MSVC CRT has a function to validate security cookie.
2289 FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
2290 "__security_check_cookie", Type::getVoidTy(M.getContext()),
2291 Type::getInt8PtrTy(M.getContext()));
2292 if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
2293 F->setCallingConv(CallingConv::X86_FastCall);
2294 F->addAttribute(1, Attribute::AttrKind::InReg);
2296 return;
2298 // glibc, bionic, and Fuchsia have a special slot for the stack guard.
2299 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
2300 return;
2301 TargetLowering::insertSSPDeclarations(M);
2304 Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
2305 // MSVC CRT has a global variable holding security cookie.
2306 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2307 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2308 return M.getGlobalVariable("__security_cookie");
2310 return TargetLowering::getSDagStackGuard(M);
2313 Function *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
2314 // MSVC CRT has a function to validate security cookie.
2315 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2316 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2317 return M.getFunction("__security_check_cookie");
2319 return TargetLowering::getSSPStackGuardCheck(M);
2322 Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
2323 if (Subtarget.getTargetTriple().isOSContiki())
2324 return getDefaultSafeStackPointerLocation(IRB, false);
2326 // Android provides a fixed TLS slot for the SafeStack pointer. See the
2327 // definition of TLS_SLOT_SAFESTACK in
2328 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
2329 if (Subtarget.isTargetAndroid()) {
2330 // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
2331 // %gs:0x24 on i386
2332 unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
2333 return SegmentOffset(IRB, Offset, getAddressSpace());
2336 // Fuchsia is similar.
2337 if (Subtarget.isTargetFuchsia()) {
2338 // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
2339 return SegmentOffset(IRB, 0x18, getAddressSpace());
2342 return TargetLowering::getSafeStackPointerLocation(IRB);
2345 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2346 unsigned DestAS) const {
2347 assert(SrcAS != DestAS && "Expected different address spaces!");
2349 return SrcAS < 256 && DestAS < 256;
2352 //===----------------------------------------------------------------------===//
2353 // Return Value Calling Convention Implementation
2354 //===----------------------------------------------------------------------===//
2356 bool X86TargetLowering::CanLowerReturn(
2357 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
2358 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2359 SmallVector<CCValAssign, 16> RVLocs;
2360 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2361 return CCInfo.CheckReturn(Outs, RetCC_X86);
2364 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2365 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2366 return ScratchRegs;
2369 /// Lowers masks values (v*i1) to the local register values
2370 /// \returns DAG node after lowering to register type
2371 static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
2372 const SDLoc &Dl, SelectionDAG &DAG) {
2373 EVT ValVT = ValArg.getValueType();
2375 if (ValVT == MVT::v1i1)
2376 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
2377 DAG.getIntPtrConstant(0, Dl));
2379 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
2380 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
2381 // Two stage lowering might be required
2382 // bitcast: v8i1 -> i8 / v16i1 -> i16
2383 // anyextend: i8 -> i32 / i16 -> i32
2384 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
2385 SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
2386 if (ValLoc == MVT::i32)
2387 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
2388 return ValToCopy;
2391 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
2392 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
2393 // One stage lowering is required
2394 // bitcast: v32i1 -> i32 / v64i1 -> i64
2395 return DAG.getBitcast(ValLoc, ValArg);
2398 return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
2401 /// Breaks v64i1 value into two registers and adds the new node to the DAG
2402 static void Passv64i1ArgInRegs(
2403 const SDLoc &Dl, SelectionDAG &DAG, SDValue Chain, SDValue &Arg,
2404 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, CCValAssign &VA,
2405 CCValAssign &NextVA, const X86Subtarget &Subtarget) {
2406 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
2407 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2408 assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
2409 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2410 "The value should reside in two registers");
2412 // Before splitting the value we cast it to i64
2413 Arg = DAG.getBitcast(MVT::i64, Arg);
2415 // Splitting the value into two i32 types
2416 SDValue Lo, Hi;
2417 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2418 DAG.getConstant(0, Dl, MVT::i32));
2419 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2420 DAG.getConstant(1, Dl, MVT::i32));
2422 // Attach the two i32 types into corresponding registers
2423 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
2424 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
2427 SDValue
2428 X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2429 bool isVarArg,
2430 const SmallVectorImpl<ISD::OutputArg> &Outs,
2431 const SmallVectorImpl<SDValue> &OutVals,
2432 const SDLoc &dl, SelectionDAG &DAG) const {
2433 MachineFunction &MF = DAG.getMachineFunction();
2434 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2436 // In some cases we need to disable registers from the default CSR list.
2437 // For example, when they are used for argument passing.
2438 bool ShouldDisableCalleeSavedRegister =
2439 CallConv == CallingConv::X86_RegCall ||
2440 MF.getFunction().hasFnAttribute("no_caller_saved_registers");
2442 if (CallConv == CallingConv::X86_INTR && !Outs.empty())
2443 report_fatal_error("X86 interrupts may not return any value");
2445 SmallVector<CCValAssign, 16> RVLocs;
2446 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2447 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2449 SDValue Flag;
2450 SmallVector<SDValue, 6> RetOps;
2451 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2452 // Operand #1 = Bytes To Pop
2453 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
2454 MVT::i32));
2456 // Copy the result values into the output registers.
2457 for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
2458 ++I, ++OutsIndex) {
2459 CCValAssign &VA = RVLocs[I];
2460 assert(VA.isRegLoc() && "Can only return in registers!");
2462 // Add the register to the CalleeSaveDisableRegs list.
2463 if (ShouldDisableCalleeSavedRegister)
2464 MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
2466 SDValue ValToCopy = OutVals[OutsIndex];
2467 EVT ValVT = ValToCopy.getValueType();
2469 // Promote values to the appropriate types.
2470 if (VA.getLocInfo() == CCValAssign::SExt)
2471 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2472 else if (VA.getLocInfo() == CCValAssign::ZExt)
2473 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2474 else if (VA.getLocInfo() == CCValAssign::AExt) {
2475 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
2476 ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
2477 else
2478 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2480 else if (VA.getLocInfo() == CCValAssign::BCvt)
2481 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
2483 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2484 "Unexpected FP-extend for return value.");
2486 // If this is x86-64, and we disabled SSE, we can't return FP values,
2487 // or SSE or MMX vectors.
2488 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2489 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2490 (Subtarget.is64Bit() && !Subtarget.hasSSE1())) {
2491 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2492 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2493 } else if (ValVT == MVT::f64 &&
2494 (Subtarget.is64Bit() && !Subtarget.hasSSE2())) {
2495 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2496 // llvm-gcc has never done it right and no one has noticed, so this
2497 // should be OK for now.
2498 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
2499 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2502 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2503 // the RET instruction and handled by the FP Stackifier.
2504 if (VA.getLocReg() == X86::FP0 ||
2505 VA.getLocReg() == X86::FP1) {
2506 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2507 // change the value to the FP stack register class.
2508 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2509 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2510 RetOps.push_back(ValToCopy);
2511 // Don't emit a copytoreg.
2512 continue;
2515 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2516 // which is returned in RAX / RDX.
2517 if (Subtarget.is64Bit()) {
2518 if (ValVT == MVT::x86mmx) {
2519 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2520 ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
2521 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2522 ValToCopy);
2523 // If we don't have SSE2 available, convert to v4f32 so the generated
2524 // register is legal.
2525 if (!Subtarget.hasSSE2())
2526 ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
2531 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2533 if (VA.needsCustom()) {
2534 assert(VA.getValVT() == MVT::v64i1 &&
2535 "Currently the only custom case is when we split v64i1 to 2 regs");
2537 Passv64i1ArgInRegs(dl, DAG, Chain, ValToCopy, RegsToPass, VA, RVLocs[++I],
2538 Subtarget);
2540 assert(2 == RegsToPass.size() &&
2541 "Expecting two registers after Pass64BitArgInRegs");
2543 // Add the second register to the CalleeSaveDisableRegs list.
2544 if (ShouldDisableCalleeSavedRegister)
2545 MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
2546 } else {
2547 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
2550 // Add nodes to the DAG and add the values into the RetOps list
2551 for (auto &Reg : RegsToPass) {
2552 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, Flag);
2553 Flag = Chain.getValue(1);
2554 RetOps.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
2558 // Swift calling convention does not require we copy the sret argument
2559 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
2561 // All x86 ABIs require that for returning structs by value we copy
2562 // the sret argument into %rax/%eax (depending on ABI) for the return.
2563 // We saved the argument into a virtual register in the entry block,
2564 // so now we copy the value out and into %rax/%eax.
2566 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2567 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2568 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2569 // either case FuncInfo->setSRetReturnReg() will have been called.
2570 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2571 // When we have both sret and another return value, we should use the
2572 // original Chain stored in RetOps[0], instead of the current Chain updated
2573 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
2575 // For the case of sret and another return value, we have
2576 // Chain_0 at the function entry
2577 // Chain_1 = getCopyToReg(Chain_0) in the above loop
2578 // If we use Chain_1 in getCopyFromReg, we will have
2579 // Val = getCopyFromReg(Chain_1)
2580 // Chain_2 = getCopyToReg(Chain_1, Val) from below
2582 // getCopyToReg(Chain_0) will be glued together with
2583 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
2584 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
2585 // Data dependency from Unit B to Unit A due to usage of Val in
2586 // getCopyToReg(Chain_1, Val)
2587 // Chain dependency from Unit A to Unit B
2589 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
2590 SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
2591 getPointerTy(MF.getDataLayout()));
2593 unsigned RetValReg
2594 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
2595 X86::RAX : X86::EAX;
2596 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2597 Flag = Chain.getValue(1);
2599 // RAX/EAX now acts like a return value.
2600 RetOps.push_back(
2601 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
2603 // Add the returned register to the CalleeSaveDisableRegs list.
2604 if (ShouldDisableCalleeSavedRegister)
2605 MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
2608 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
2609 const MCPhysReg *I =
2610 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2611 if (I) {
2612 for (; *I; ++I) {
2613 if (X86::GR64RegClass.contains(*I))
2614 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2615 else
2616 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2620 RetOps[0] = Chain; // Update chain.
2622 // Add the flag if we have it.
2623 if (Flag.getNode())
2624 RetOps.push_back(Flag);
2626 X86ISD::NodeType opcode = X86ISD::RET_FLAG;
2627 if (CallConv == CallingConv::X86_INTR)
2628 opcode = X86ISD::IRET;
2629 return DAG.getNode(opcode, dl, MVT::Other, RetOps);
2632 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2633 if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
2634 return false;
2636 SDValue TCChain = Chain;
2637 SDNode *Copy = *N->use_begin();
2638 if (Copy->getOpcode() == ISD::CopyToReg) {
2639 // If the copy has a glue operand, we conservatively assume it isn't safe to
2640 // perform a tail call.
2641 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2642 return false;
2643 TCChain = Copy->getOperand(0);
2644 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2645 return false;
2647 bool HasRet = false;
2648 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2649 UI != UE; ++UI) {
2650 if (UI->getOpcode() != X86ISD::RET_FLAG)
2651 return false;
2652 // If we are returning more than one value, we can definitely
2653 // not make a tail call see PR19530
2654 if (UI->getNumOperands() > 4)
2655 return false;
2656 if (UI->getNumOperands() == 4 &&
2657 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2658 return false;
2659 HasRet = true;
2662 if (!HasRet)
2663 return false;
2665 Chain = TCChain;
2666 return true;
2669 EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
2670 ISD::NodeType ExtendKind) const {
2671 MVT ReturnMVT = MVT::i32;
2673 bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
2674 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
2675 // The ABI does not require i1, i8 or i16 to be extended.
2677 // On Darwin, there is code in the wild relying on Clang's old behaviour of
2678 // always extending i8/i16 return values, so keep doing that for now.
2679 // (PR26665).
2680 ReturnMVT = MVT::i8;
2683 EVT MinVT = getRegisterType(Context, ReturnMVT);
2684 return VT.bitsLT(MinVT) ? MinVT : VT;
2687 /// Reads two 32 bit registers and creates a 64 bit mask value.
2688 /// \param VA The current 32 bit value that need to be assigned.
2689 /// \param NextVA The next 32 bit value that need to be assigned.
2690 /// \param Root The parent DAG node.
2691 /// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
2692 /// glue purposes. In the case the DAG is already using
2693 /// physical register instead of virtual, we should glue
2694 /// our new SDValue to InFlag SDvalue.
2695 /// \return a new SDvalue of size 64bit.
2696 static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
2697 SDValue &Root, SelectionDAG &DAG,
2698 const SDLoc &Dl, const X86Subtarget &Subtarget,
2699 SDValue *InFlag = nullptr) {
2700 assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
2701 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2702 assert(VA.getValVT() == MVT::v64i1 &&
2703 "Expecting first location of 64 bit width type");
2704 assert(NextVA.getValVT() == VA.getValVT() &&
2705 "The locations should have the same type");
2706 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2707 "The values should reside in two registers");
2709 SDValue Lo, Hi;
2710 SDValue ArgValueLo, ArgValueHi;
2712 MachineFunction &MF = DAG.getMachineFunction();
2713 const TargetRegisterClass *RC = &X86::GR32RegClass;
2715 // Read a 32 bit value from the registers.
2716 if (nullptr == InFlag) {
2717 // When no physical register is present,
2718 // create an intermediate virtual register.
2719 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2720 ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2721 Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
2722 ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2723 } else {
2724 // When a physical register is available read the value from it and glue
2725 // the reads together.
2726 ArgValueLo =
2727 DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
2728 *InFlag = ArgValueLo.getValue(2);
2729 ArgValueHi =
2730 DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
2731 *InFlag = ArgValueHi.getValue(2);
2734 // Convert the i32 type into v32i1 type.
2735 Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
2737 // Convert the i32 type into v32i1 type.
2738 Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
2740 // Concatenate the two values together.
2741 return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
2744 /// The function will lower a register of various sizes (8/16/32/64)
2745 /// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
2746 /// \returns a DAG node contains the operand after lowering to mask type.
2747 static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
2748 const EVT &ValLoc, const SDLoc &Dl,
2749 SelectionDAG &DAG) {
2750 SDValue ValReturned = ValArg;
2752 if (ValVT == MVT::v1i1)
2753 return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
2755 if (ValVT == MVT::v64i1) {
2756 // In 32 bit machine, this case is handled by getv64i1Argument
2757 assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
2758 // In 64 bit machine, There is no need to truncate the value only bitcast
2759 } else {
2760 MVT maskLen;
2761 switch (ValVT.getSimpleVT().SimpleTy) {
2762 case MVT::v8i1:
2763 maskLen = MVT::i8;
2764 break;
2765 case MVT::v16i1:
2766 maskLen = MVT::i16;
2767 break;
2768 case MVT::v32i1:
2769 maskLen = MVT::i32;
2770 break;
2771 default:
2772 llvm_unreachable("Expecting a vector of i1 types");
2775 ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
2777 return DAG.getBitcast(ValVT, ValReturned);
2780 /// Lower the result values of a call into the
2781 /// appropriate copies out of appropriate physical registers.
2783 SDValue X86TargetLowering::LowerCallResult(
2784 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
2785 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2786 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
2787 uint32_t *RegMask) const {
2789 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2790 // Assign locations to each value returned by this call.
2791 SmallVector<CCValAssign, 16> RVLocs;
2792 bool Is64Bit = Subtarget.is64Bit();
2793 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2794 *DAG.getContext());
2795 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2797 // Copy all of the result registers out of their specified physreg.
2798 for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
2799 ++I, ++InsIndex) {
2800 CCValAssign &VA = RVLocs[I];
2801 EVT CopyVT = VA.getLocVT();
2803 // In some calling conventions we need to remove the used registers
2804 // from the register mask.
2805 if (RegMask) {
2806 for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
2807 SubRegs.isValid(); ++SubRegs)
2808 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
2811 // If this is x86-64, and we disabled SSE, we can't return FP values
2812 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64 || CopyVT == MVT::f128) &&
2813 ((Is64Bit || Ins[InsIndex].Flags.isInReg()) && !Subtarget.hasSSE1())) {
2814 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2815 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2818 // If we prefer to use the value in xmm registers, copy it out as f80 and
2819 // use a truncate to move it from fp stack reg to xmm reg.
2820 bool RoundAfterCopy = false;
2821 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2822 isScalarFPTypeInSSEReg(VA.getValVT())) {
2823 if (!Subtarget.hasX87())
2824 report_fatal_error("X87 register return with X87 disabled");
2825 CopyVT = MVT::f80;
2826 RoundAfterCopy = (CopyVT != VA.getLocVT());
2829 SDValue Val;
2830 if (VA.needsCustom()) {
2831 assert(VA.getValVT() == MVT::v64i1 &&
2832 "Currently the only custom case is when we split v64i1 to 2 regs");
2833 Val =
2834 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
2835 } else {
2836 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
2837 .getValue(1);
2838 Val = Chain.getValue(0);
2839 InFlag = Chain.getValue(2);
2842 if (RoundAfterCopy)
2843 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2844 // This truncation won't change the value.
2845 DAG.getIntPtrConstant(1, dl));
2847 if (VA.isExtInLoc() && (VA.getValVT().getScalarType() == MVT::i1)) {
2848 if (VA.getValVT().isVector() &&
2849 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
2850 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
2851 // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
2852 Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
2853 } else
2854 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
2857 InVals.push_back(Val);
2860 return Chain;
2863 //===----------------------------------------------------------------------===//
2864 // C & StdCall & Fast Calling Convention implementation
2865 //===----------------------------------------------------------------------===//
2866 // StdCall calling convention seems to be standard for many Windows' API
2867 // routines and around. It differs from C calling convention just a little:
2868 // callee should clean up the stack, not caller. Symbols should be also
2869 // decorated in some fancy way :) It doesn't support any vector arguments.
2870 // For info on fast calling convention see Fast Calling Convention (tail call)
2871 // implementation LowerX86_32FastCCCallTo.
2873 /// CallIsStructReturn - Determines whether a call uses struct return
2874 /// semantics.
2875 enum StructReturnType {
2876 NotStructReturn,
2877 RegStructReturn,
2878 StackStructReturn
2880 static StructReturnType
2881 callIsStructReturn(ArrayRef<ISD::OutputArg> Outs, bool IsMCU) {
2882 if (Outs.empty())
2883 return NotStructReturn;
2885 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2886 if (!Flags.isSRet())
2887 return NotStructReturn;
2888 if (Flags.isInReg() || IsMCU)
2889 return RegStructReturn;
2890 return StackStructReturn;
2893 /// Determines whether a function uses struct return semantics.
2894 static StructReturnType
2895 argsAreStructReturn(ArrayRef<ISD::InputArg> Ins, bool IsMCU) {
2896 if (Ins.empty())
2897 return NotStructReturn;
2899 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2900 if (!Flags.isSRet())
2901 return NotStructReturn;
2902 if (Flags.isInReg() || IsMCU)
2903 return RegStructReturn;
2904 return StackStructReturn;
2907 /// Make a copy of an aggregate at address specified by "Src" to address
2908 /// "Dst" with size and alignment information specified by the specific
2909 /// parameter attribute. The copy will be passed as a byval function parameter.
2910 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
2911 SDValue Chain, ISD::ArgFlagsTy Flags,
2912 SelectionDAG &DAG, const SDLoc &dl) {
2913 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
2915 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2916 /*isVolatile*/false, /*AlwaysInline=*/true,
2917 /*isTailCall*/false,
2918 MachinePointerInfo(), MachinePointerInfo());
2921 /// Return true if the calling convention is one that we can guarantee TCO for.
2922 static bool canGuaranteeTCO(CallingConv::ID CC) {
2923 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2924 CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
2925 CC == CallingConv::HHVM);
2928 /// Return true if we might ever do TCO for calls with this calling convention.
2929 static bool mayTailCallThisCC(CallingConv::ID CC) {
2930 switch (CC) {
2931 // C calling conventions:
2932 case CallingConv::C:
2933 case CallingConv::Win64:
2934 case CallingConv::X86_64_SysV:
2935 // Callee pop conventions:
2936 case CallingConv::X86_ThisCall:
2937 case CallingConv::X86_StdCall:
2938 case CallingConv::X86_VectorCall:
2939 case CallingConv::X86_FastCall:
2940 // Swift:
2941 case CallingConv::Swift:
2942 return true;
2943 default:
2944 return canGuaranteeTCO(CC);
2948 /// Return true if the function is being made into a tailcall target by
2949 /// changing its ABI.
2950 static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
2951 return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
2954 bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2955 auto Attr =
2956 CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
2957 if (!CI->isTailCall() || Attr.getValueAsString() == "true")
2958 return false;
2960 ImmutableCallSite CS(CI);
2961 CallingConv::ID CalleeCC = CS.getCallingConv();
2962 if (!mayTailCallThisCC(CalleeCC))
2963 return false;
2965 return true;
2968 SDValue
2969 X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
2970 const SmallVectorImpl<ISD::InputArg> &Ins,
2971 const SDLoc &dl, SelectionDAG &DAG,
2972 const CCValAssign &VA,
2973 MachineFrameInfo &MFI, unsigned i) const {
2974 // Create the nodes corresponding to a load from this parameter slot.
2975 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2976 bool AlwaysUseMutable = shouldGuaranteeTCO(
2977 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2978 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2979 EVT ValVT;
2980 MVT PtrVT = getPointerTy(DAG.getDataLayout());
2982 // If value is passed by pointer we have address passed instead of the value
2983 // itself. No need to extend if the mask value and location share the same
2984 // absolute size.
2985 bool ExtendedInMem =
2986 VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
2987 VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
2989 if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
2990 ValVT = VA.getLocVT();
2991 else
2992 ValVT = VA.getValVT();
2994 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2995 // changed with more analysis.
2996 // In case of tail call optimization mark all arguments mutable. Since they
2997 // could be overwritten by lowering of arguments in case of a tail call.
2998 if (Flags.isByVal()) {
2999 unsigned Bytes = Flags.getByValSize();
3000 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
3002 // FIXME: For now, all byval parameter objects are marked as aliasing. This
3003 // can be improved with deeper analysis.
3004 int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
3005 /*isAliased=*/true);
3006 return DAG.getFrameIndex(FI, PtrVT);
3009 // This is an argument in memory. We might be able to perform copy elision.
3010 // If the argument is passed directly in memory without any extension, then we
3011 // can perform copy elision. Large vector types, for example, may be passed
3012 // indirectly by pointer.
3013 if (Flags.isCopyElisionCandidate() &&
3014 VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem) {
3015 EVT ArgVT = Ins[i].ArgVT;
3016 SDValue PartAddr;
3017 if (Ins[i].PartOffset == 0) {
3018 // If this is a one-part value or the first part of a multi-part value,
3019 // create a stack object for the entire argument value type and return a
3020 // load from our portion of it. This assumes that if the first part of an
3021 // argument is in memory, the rest will also be in memory.
3022 int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
3023 /*Immutable=*/false);
3024 PartAddr = DAG.getFrameIndex(FI, PtrVT);
3025 return DAG.getLoad(
3026 ValVT, dl, Chain, PartAddr,
3027 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3028 } else {
3029 // This is not the first piece of an argument in memory. See if there is
3030 // already a fixed stack object including this offset. If so, assume it
3031 // was created by the PartOffset == 0 branch above and create a load from
3032 // the appropriate offset into it.
3033 int64_t PartBegin = VA.getLocMemOffset();
3034 int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
3035 int FI = MFI.getObjectIndexBegin();
3036 for (; MFI.isFixedObjectIndex(FI); ++FI) {
3037 int64_t ObjBegin = MFI.getObjectOffset(FI);
3038 int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
3039 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
3040 break;
3042 if (MFI.isFixedObjectIndex(FI)) {
3043 SDValue Addr =
3044 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
3045 DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
3046 return DAG.getLoad(
3047 ValVT, dl, Chain, Addr,
3048 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
3049 Ins[i].PartOffset));
3054 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
3055 VA.getLocMemOffset(), isImmutable);
3057 // Set SExt or ZExt flag.
3058 if (VA.getLocInfo() == CCValAssign::ZExt) {
3059 MFI.setObjectZExt(FI, true);
3060 } else if (VA.getLocInfo() == CCValAssign::SExt) {
3061 MFI.setObjectSExt(FI, true);
3064 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3065 SDValue Val = DAG.getLoad(
3066 ValVT, dl, Chain, FIN,
3067 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3068 return ExtendedInMem
3069 ? (VA.getValVT().isVector()
3070 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
3071 : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
3072 : Val;
3075 // FIXME: Get this from tablegen.
3076 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
3077 const X86Subtarget &Subtarget) {
3078 assert(Subtarget.is64Bit());
3080 if (Subtarget.isCallingConvWin64(CallConv)) {
3081 static const MCPhysReg GPR64ArgRegsWin64[] = {
3082 X86::RCX, X86::RDX, X86::R8, X86::R9
3084 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
3087 static const MCPhysReg GPR64ArgRegs64Bit[] = {
3088 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
3090 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
3093 // FIXME: Get this from tablegen.
3094 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
3095 CallingConv::ID CallConv,
3096 const X86Subtarget &Subtarget) {
3097 assert(Subtarget.is64Bit());
3098 if (Subtarget.isCallingConvWin64(CallConv)) {
3099 // The XMM registers which might contain var arg parameters are shadowed
3100 // in their paired GPR. So we only need to save the GPR to their home
3101 // slots.
3102 // TODO: __vectorcall will change this.
3103 return None;
3106 const Function &F = MF.getFunction();
3107 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
3108 bool isSoftFloat = Subtarget.useSoftFloat();
3109 assert(!(isSoftFloat && NoImplicitFloatOps) &&
3110 "SSE register cannot be used when SSE is disabled!");
3111 if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
3112 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
3113 // registers.
3114 return None;
3116 static const MCPhysReg XMMArgRegs64Bit[] = {
3117 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3118 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3120 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
3123 #ifndef NDEBUG
3124 static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
3125 return std::is_sorted(ArgLocs.begin(), ArgLocs.end(),
3126 [](const CCValAssign &A, const CCValAssign &B) -> bool {
3127 return A.getValNo() < B.getValNo();
3130 #endif
3132 SDValue X86TargetLowering::LowerFormalArguments(
3133 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3134 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3135 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3136 MachineFunction &MF = DAG.getMachineFunction();
3137 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3138 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3140 const Function &F = MF.getFunction();
3141 if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
3142 F.getName() == "main")
3143 FuncInfo->setForceFramePointer(true);
3145 MachineFrameInfo &MFI = MF.getFrameInfo();
3146 bool Is64Bit = Subtarget.is64Bit();
3147 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3149 assert(
3150 !(isVarArg && canGuaranteeTCO(CallConv)) &&
3151 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
3153 // Assign locations to all of the incoming arguments.
3154 SmallVector<CCValAssign, 16> ArgLocs;
3155 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3157 // Allocate shadow area for Win64.
3158 if (IsWin64)
3159 CCInfo.AllocateStack(32, 8);
3161 CCInfo.AnalyzeArguments(Ins, CC_X86);
3163 // In vectorcall calling convention a second pass is required for the HVA
3164 // types.
3165 if (CallingConv::X86_VectorCall == CallConv) {
3166 CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
3169 // The next loop assumes that the locations are in the same order of the
3170 // input arguments.
3171 assert(isSortedByValueNo(ArgLocs) &&
3172 "Argument Location list must be sorted before lowering");
3174 SDValue ArgValue;
3175 for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
3176 ++I, ++InsIndex) {
3177 assert(InsIndex < Ins.size() && "Invalid Ins index");
3178 CCValAssign &VA = ArgLocs[I];
3180 if (VA.isRegLoc()) {
3181 EVT RegVT = VA.getLocVT();
3182 if (VA.needsCustom()) {
3183 assert(
3184 VA.getValVT() == MVT::v64i1 &&
3185 "Currently the only custom case is when we split v64i1 to 2 regs");
3187 // v64i1 values, in regcall calling convention, that are
3188 // compiled to 32 bit arch, are split up into two registers.
3189 ArgValue =
3190 getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
3191 } else {
3192 const TargetRegisterClass *RC;
3193 if (RegVT == MVT::i8)
3194 RC = &X86::GR8RegClass;
3195 else if (RegVT == MVT::i16)
3196 RC = &X86::GR16RegClass;
3197 else if (RegVT == MVT::i32)
3198 RC = &X86::GR32RegClass;
3199 else if (Is64Bit && RegVT == MVT::i64)
3200 RC = &X86::GR64RegClass;
3201 else if (RegVT == MVT::f32)
3202 RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
3203 else if (RegVT == MVT::f64)
3204 RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
3205 else if (RegVT == MVT::f80)
3206 RC = &X86::RFP80RegClass;
3207 else if (RegVT == MVT::f128)
3208 RC = &X86::VR128RegClass;
3209 else if (RegVT.is512BitVector())
3210 RC = &X86::VR512RegClass;
3211 else if (RegVT.is256BitVector())
3212 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
3213 else if (RegVT.is128BitVector())
3214 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
3215 else if (RegVT == MVT::x86mmx)
3216 RC = &X86::VR64RegClass;
3217 else if (RegVT == MVT::v1i1)
3218 RC = &X86::VK1RegClass;
3219 else if (RegVT == MVT::v8i1)
3220 RC = &X86::VK8RegClass;
3221 else if (RegVT == MVT::v16i1)
3222 RC = &X86::VK16RegClass;
3223 else if (RegVT == MVT::v32i1)
3224 RC = &X86::VK32RegClass;
3225 else if (RegVT == MVT::v64i1)
3226 RC = &X86::VK64RegClass;
3227 else
3228 llvm_unreachable("Unknown argument type!");
3230 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3231 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3234 // If this is an 8 or 16-bit value, it is really passed promoted to 32
3235 // bits. Insert an assert[sz]ext to capture this, then truncate to the
3236 // right size.
3237 if (VA.getLocInfo() == CCValAssign::SExt)
3238 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
3239 DAG.getValueType(VA.getValVT()));
3240 else if (VA.getLocInfo() == CCValAssign::ZExt)
3241 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
3242 DAG.getValueType(VA.getValVT()));
3243 else if (VA.getLocInfo() == CCValAssign::BCvt)
3244 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
3246 if (VA.isExtInLoc()) {
3247 // Handle MMX values passed in XMM regs.
3248 if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
3249 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
3250 else if (VA.getValVT().isVector() &&
3251 VA.getValVT().getScalarType() == MVT::i1 &&
3252 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3253 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3254 // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3255 ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
3256 } else
3257 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3259 } else {
3260 assert(VA.isMemLoc());
3261 ArgValue =
3262 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
3265 // If value is passed via pointer - do a load.
3266 if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal())
3267 ArgValue =
3268 DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
3270 InVals.push_back(ArgValue);
3273 for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
3274 // Swift calling convention does not require we copy the sret argument
3275 // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
3276 if (CallConv == CallingConv::Swift)
3277 continue;
3279 // All x86 ABIs require that for returning structs by value we copy the
3280 // sret argument into %rax/%eax (depending on ABI) for the return. Save
3281 // the argument into a virtual register so that we can access it from the
3282 // return points.
3283 if (Ins[I].Flags.isSRet()) {
3284 unsigned Reg = FuncInfo->getSRetReturnReg();
3285 if (!Reg) {
3286 MVT PtrTy = getPointerTy(DAG.getDataLayout());
3287 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
3288 FuncInfo->setSRetReturnReg(Reg);
3290 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
3291 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
3292 break;
3296 unsigned StackSize = CCInfo.getNextStackOffset();
3297 // Align stack specially for tail calls.
3298 if (shouldGuaranteeTCO(CallConv,
3299 MF.getTarget().Options.GuaranteedTailCallOpt))
3300 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
3302 // If the function takes variable number of arguments, make a frame index for
3303 // the start of the first vararg value... for expansion of llvm.va_start. We
3304 // can skip this if there are no va_start calls.
3305 if (MFI.hasVAStart() &&
3306 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
3307 CallConv != CallingConv::X86_ThisCall))) {
3308 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
3311 // Figure out if XMM registers are in use.
3312 assert(!(Subtarget.useSoftFloat() &&
3313 F.hasFnAttribute(Attribute::NoImplicitFloat)) &&
3314 "SSE register cannot be used when SSE is disabled!");
3316 // 64-bit calling conventions support varargs and register parameters, so we
3317 // have to do extra work to spill them in the prologue.
3318 if (Is64Bit && isVarArg && MFI.hasVAStart()) {
3319 // Find the first unallocated argument registers.
3320 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
3321 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
3322 unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
3323 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
3324 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
3325 "SSE register cannot be used when SSE is disabled!");
3327 // Gather all the live in physical registers.
3328 SmallVector<SDValue, 6> LiveGPRs;
3329 SmallVector<SDValue, 8> LiveXMMRegs;
3330 SDValue ALVal;
3331 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
3332 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
3333 LiveGPRs.push_back(
3334 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
3336 if (!ArgXMMs.empty()) {
3337 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3338 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
3339 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
3340 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
3341 LiveXMMRegs.push_back(
3342 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
3346 if (IsWin64) {
3347 // Get to the caller-allocated home save location. Add 8 to account
3348 // for the return address.
3349 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
3350 FuncInfo->setRegSaveFrameIndex(
3351 MFI.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
3352 // Fixup to set vararg frame on shadow area (4 x i64).
3353 if (NumIntRegs < 4)
3354 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
3355 } else {
3356 // For X86-64, if there are vararg parameters that are passed via
3357 // registers, then we must store them to their spots on the stack so
3358 // they may be loaded by dereferencing the result of va_next.
3359 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
3360 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
3361 FuncInfo->setRegSaveFrameIndex(MFI.CreateStackObject(
3362 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
3365 // Store the integer parameter registers.
3366 SmallVector<SDValue, 8> MemOps;
3367 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
3368 getPointerTy(DAG.getDataLayout()));
3369 unsigned Offset = FuncInfo->getVarArgsGPOffset();
3370 for (SDValue Val : LiveGPRs) {
3371 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3372 RSFIN, DAG.getIntPtrConstant(Offset, dl));
3373 SDValue Store =
3374 DAG.getStore(Val.getValue(1), dl, Val, FIN,
3375 MachinePointerInfo::getFixedStack(
3376 DAG.getMachineFunction(),
3377 FuncInfo->getRegSaveFrameIndex(), Offset));
3378 MemOps.push_back(Store);
3379 Offset += 8;
3382 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
3383 // Now store the XMM (fp + vector) parameter registers.
3384 SmallVector<SDValue, 12> SaveXMMOps;
3385 SaveXMMOps.push_back(Chain);
3386 SaveXMMOps.push_back(ALVal);
3387 SaveXMMOps.push_back(DAG.getIntPtrConstant(
3388 FuncInfo->getRegSaveFrameIndex(), dl));
3389 SaveXMMOps.push_back(DAG.getIntPtrConstant(
3390 FuncInfo->getVarArgsFPOffset(), dl));
3391 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
3392 LiveXMMRegs.end());
3393 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
3394 MVT::Other, SaveXMMOps));
3397 if (!MemOps.empty())
3398 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3401 if (isVarArg && MFI.hasMustTailInVarArgFunc()) {
3402 // Find the largest legal vector type.
3403 MVT VecVT = MVT::Other;
3404 // FIXME: Only some x86_32 calling conventions support AVX512.
3405 if (Subtarget.hasAVX512() &&
3406 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
3407 CallConv == CallingConv::Intel_OCL_BI)))
3408 VecVT = MVT::v16f32;
3409 else if (Subtarget.hasAVX())
3410 VecVT = MVT::v8f32;
3411 else if (Subtarget.hasSSE2())
3412 VecVT = MVT::v4f32;
3414 // We forward some GPRs and some vector types.
3415 SmallVector<MVT, 2> RegParmTypes;
3416 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
3417 RegParmTypes.push_back(IntVT);
3418 if (VecVT != MVT::Other)
3419 RegParmTypes.push_back(VecVT);
3421 // Compute the set of forwarded registers. The rest are scratch.
3422 SmallVectorImpl<ForwardedRegister> &Forwards =
3423 FuncInfo->getForwardedMustTailRegParms();
3424 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
3426 // Conservatively forward AL on x86_64, since it might be used for varargs.
3427 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
3428 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3429 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
3432 // Copy all forwards from physical to virtual registers.
3433 for (ForwardedRegister &FR : Forwards) {
3434 // FIXME: Can we use a less constrained schedule?
3435 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, FR.VReg, FR.VT);
3436 FR.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(FR.VT));
3437 Chain = DAG.getCopyToReg(Chain, dl, FR.VReg, RegVal);
3441 // Some CCs need callee pop.
3442 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3443 MF.getTarget().Options.GuaranteedTailCallOpt)) {
3444 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
3445 } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
3446 // X86 interrupts must pop the error code (and the alignment padding) if
3447 // present.
3448 FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
3449 } else {
3450 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
3451 // If this is an sret function, the return should pop the hidden pointer.
3452 if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
3453 !Subtarget.getTargetTriple().isOSMSVCRT() &&
3454 argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
3455 FuncInfo->setBytesToPopOnReturn(4);
3458 if (!Is64Bit) {
3459 // RegSaveFrameIndex is X86-64 only.
3460 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
3461 if (CallConv == CallingConv::X86_FastCall ||
3462 CallConv == CallingConv::X86_ThisCall)
3463 // fastcc functions can't have varargs.
3464 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
3467 FuncInfo->setArgumentStackSize(StackSize);
3469 if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
3470 EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
3471 if (Personality == EHPersonality::CoreCLR) {
3472 assert(Is64Bit);
3473 // TODO: Add a mechanism to frame lowering that will allow us to indicate
3474 // that we'd prefer this slot be allocated towards the bottom of the frame
3475 // (i.e. near the stack pointer after allocating the frame). Every
3476 // funclet needs a copy of this slot in its (mostly empty) frame, and the
3477 // offset from the bottom of this and each funclet's frame must be the
3478 // same, so the size of funclets' (mostly empty) frames is dictated by
3479 // how far this slot is from the bottom (since they allocate just enough
3480 // space to accommodate holding this slot at the correct offset).
3481 int PSPSymFI = MFI.CreateStackObject(8, 8, /*isSS=*/false);
3482 EHInfo->PSPSymFrameIdx = PSPSymFI;
3486 if (CallConv == CallingConv::X86_RegCall ||
3487 F.hasFnAttribute("no_caller_saved_registers")) {
3488 MachineRegisterInfo &MRI = MF.getRegInfo();
3489 for (std::pair<unsigned, unsigned> Pair : MRI.liveins())
3490 MRI.disableCalleeSavedRegister(Pair.first);
3493 return Chain;
3496 SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
3497 SDValue Arg, const SDLoc &dl,
3498 SelectionDAG &DAG,
3499 const CCValAssign &VA,
3500 ISD::ArgFlagsTy Flags) const {
3501 unsigned LocMemOffset = VA.getLocMemOffset();
3502 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
3503 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3504 StackPtr, PtrOff);
3505 if (Flags.isByVal())
3506 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
3508 return DAG.getStore(
3509 Chain, dl, Arg, PtrOff,
3510 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
3513 /// Emit a load of return address if tail call
3514 /// optimization is performed and it is required.
3515 SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
3516 SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
3517 bool Is64Bit, int FPDiff, const SDLoc &dl) const {
3518 // Adjust the Return address stack slot.
3519 EVT VT = getPointerTy(DAG.getDataLayout());
3520 OutRetAddr = getReturnAddressFrameIndex(DAG);
3522 // Load the "old" Return address.
3523 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
3524 return SDValue(OutRetAddr.getNode(), 1);
3527 /// Emit a store of the return address if tail call
3528 /// optimization is performed and it is required (FPDiff!=0).
3529 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
3530 SDValue Chain, SDValue RetAddrFrIdx,
3531 EVT PtrVT, unsigned SlotSize,
3532 int FPDiff, const SDLoc &dl) {
3533 // Store the return address to the appropriate stack slot.
3534 if (!FPDiff) return Chain;
3535 // Calculate the new stack slot for the return address.
3536 int NewReturnAddrFI =
3537 MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
3538 false);
3539 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
3540 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
3541 MachinePointerInfo::getFixedStack(
3542 DAG.getMachineFunction(), NewReturnAddrFI));
3543 return Chain;
3546 /// Returns a vector_shuffle mask for an movs{s|d}, movd
3547 /// operation of specified width.
3548 static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
3549 SDValue V2) {
3550 unsigned NumElems = VT.getVectorNumElements();
3551 SmallVector<int, 8> Mask;
3552 Mask.push_back(NumElems);
3553 for (unsigned i = 1; i != NumElems; ++i)
3554 Mask.push_back(i);
3555 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
3558 SDValue
3559 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3560 SmallVectorImpl<SDValue> &InVals) const {
3561 SelectionDAG &DAG = CLI.DAG;
3562 SDLoc &dl = CLI.DL;
3563 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3564 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3565 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3566 SDValue Chain = CLI.Chain;
3567 SDValue Callee = CLI.Callee;
3568 CallingConv::ID CallConv = CLI.CallConv;
3569 bool &isTailCall = CLI.IsTailCall;
3570 bool isVarArg = CLI.IsVarArg;
3572 MachineFunction &MF = DAG.getMachineFunction();
3573 bool Is64Bit = Subtarget.is64Bit();
3574 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3575 StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
3576 bool IsSibcall = false;
3577 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
3578 auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
3579 const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
3580 const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
3581 bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
3582 (Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
3583 const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CS.getInstruction());
3584 bool HasNoCfCheck =
3585 (CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck());
3586 const Module *M = MF.getMMI().getModule();
3587 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
3589 MachineFunction::CallSiteInfo CSInfo;
3591 if (CallConv == CallingConv::X86_INTR)
3592 report_fatal_error("X86 interrupts may not be called directly");
3594 if (Attr.getValueAsString() == "true")
3595 isTailCall = false;
3597 if (Subtarget.isPICStyleGOT() &&
3598 !MF.getTarget().Options.GuaranteedTailCallOpt) {
3599 // If we are using a GOT, disable tail calls to external symbols with
3600 // default visibility. Tail calling such a symbol requires using a GOT
3601 // relocation, which forces early binding of the symbol. This breaks code
3602 // that require lazy function symbol resolution. Using musttail or
3603 // GuaranteedTailCallOpt will override this.
3604 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3605 if (!G || (!G->getGlobal()->hasLocalLinkage() &&
3606 G->getGlobal()->hasDefaultVisibility()))
3607 isTailCall = false;
3610 bool IsMustTail = CLI.CS && CLI.CS.isMustTailCall();
3611 if (IsMustTail) {
3612 // Force this to be a tail call. The verifier rules are enough to ensure
3613 // that we can lower this successfully without moving the return address
3614 // around.
3615 isTailCall = true;
3616 } else if (isTailCall) {
3617 // Check if it's really possible to do a tail call.
3618 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
3619 isVarArg, SR != NotStructReturn,
3620 MF.getFunction().hasStructRetAttr(), CLI.RetTy,
3621 Outs, OutVals, Ins, DAG);
3623 // Sibcalls are automatically detected tailcalls which do not require
3624 // ABI changes.
3625 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
3626 IsSibcall = true;
3628 if (isTailCall)
3629 ++NumTailCalls;
3632 assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
3633 "Var args not supported with calling convention fastcc, ghc or hipe");
3635 // Analyze operands of the call, assigning locations to each operand.
3636 SmallVector<CCValAssign, 16> ArgLocs;
3637 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3639 // Allocate shadow area for Win64.
3640 if (IsWin64)
3641 CCInfo.AllocateStack(32, 8);
3643 CCInfo.AnalyzeArguments(Outs, CC_X86);
3645 // In vectorcall calling convention a second pass is required for the HVA
3646 // types.
3647 if (CallingConv::X86_VectorCall == CallConv) {
3648 CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
3651 // Get a count of how many bytes are to be pushed on the stack.
3652 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3653 if (IsSibcall)
3654 // This is a sibcall. The memory operands are available in caller's
3655 // own caller's stack.
3656 NumBytes = 0;
3657 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
3658 canGuaranteeTCO(CallConv))
3659 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
3661 int FPDiff = 0;
3662 if (isTailCall && !IsSibcall && !IsMustTail) {
3663 // Lower arguments at fp - stackoffset + fpdiff.
3664 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
3666 FPDiff = NumBytesCallerPushed - NumBytes;
3668 // Set the delta of movement of the returnaddr stackslot.
3669 // But only set if delta is greater than previous delta.
3670 if (FPDiff < X86Info->getTCReturnAddrDelta())
3671 X86Info->setTCReturnAddrDelta(FPDiff);
3674 unsigned NumBytesToPush = NumBytes;
3675 unsigned NumBytesToPop = NumBytes;
3677 // If we have an inalloca argument, all stack space has already been allocated
3678 // for us and be right at the top of the stack. We don't support multiple
3679 // arguments passed in memory when using inalloca.
3680 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
3681 NumBytesToPush = 0;
3682 if (!ArgLocs.back().isMemLoc())
3683 report_fatal_error("cannot use inalloca attribute on a register "
3684 "parameter");
3685 if (ArgLocs.back().getLocMemOffset() != 0)
3686 report_fatal_error("any parameter with the inalloca attribute must be "
3687 "the only memory argument");
3690 if (!IsSibcall)
3691 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
3692 NumBytes - NumBytesToPush, dl);
3694 SDValue RetAddrFrIdx;
3695 // Load return address for tail calls.
3696 if (isTailCall && FPDiff)
3697 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
3698 Is64Bit, FPDiff, dl);
3700 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
3701 SmallVector<SDValue, 8> MemOpChains;
3702 SDValue StackPtr;
3704 // The next loop assumes that the locations are in the same order of the
3705 // input arguments.
3706 assert(isSortedByValueNo(ArgLocs) &&
3707 "Argument Location list must be sorted before lowering");
3709 // Walk the register/memloc assignments, inserting copies/loads. In the case
3710 // of tail call optimization arguments are handle later.
3711 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3712 for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
3713 ++I, ++OutIndex) {
3714 assert(OutIndex < Outs.size() && "Invalid Out index");
3715 // Skip inalloca arguments, they have already been written.
3716 ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
3717 if (Flags.isInAlloca())
3718 continue;
3720 CCValAssign &VA = ArgLocs[I];
3721 EVT RegVT = VA.getLocVT();
3722 SDValue Arg = OutVals[OutIndex];
3723 bool isByVal = Flags.isByVal();
3725 // Promote the value if needed.
3726 switch (VA.getLocInfo()) {
3727 default: llvm_unreachable("Unknown loc info!");
3728 case CCValAssign::Full: break;
3729 case CCValAssign::SExt:
3730 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
3731 break;
3732 case CCValAssign::ZExt:
3733 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
3734 break;
3735 case CCValAssign::AExt:
3736 if (Arg.getValueType().isVector() &&
3737 Arg.getValueType().getVectorElementType() == MVT::i1)
3738 Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
3739 else if (RegVT.is128BitVector()) {
3740 // Special case: passing MMX values in XMM registers.
3741 Arg = DAG.getBitcast(MVT::i64, Arg);
3742 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
3743 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
3744 } else
3745 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
3746 break;
3747 case CCValAssign::BCvt:
3748 Arg = DAG.getBitcast(RegVT, Arg);
3749 break;
3750 case CCValAssign::Indirect: {
3751 if (isByVal) {
3752 // Memcpy the argument to a temporary stack slot to prevent
3753 // the caller from seeing any modifications the callee may make
3754 // as guaranteed by the `byval` attribute.
3755 int FrameIdx = MF.getFrameInfo().CreateStackObject(
3756 Flags.getByValSize(), std::max(16, (int)Flags.getByValAlign()),
3757 false);
3758 SDValue StackSlot =
3759 DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
3760 Chain =
3761 CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
3762 // From now on treat this as a regular pointer
3763 Arg = StackSlot;
3764 isByVal = false;
3765 } else {
3766 // Store the argument.
3767 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
3768 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
3769 Chain = DAG.getStore(
3770 Chain, dl, Arg, SpillSlot,
3771 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3772 Arg = SpillSlot;
3774 break;
3778 if (VA.needsCustom()) {
3779 assert(VA.getValVT() == MVT::v64i1 &&
3780 "Currently the only custom case is when we split v64i1 to 2 regs");
3781 // Split v64i1 value into two registers
3782 Passv64i1ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++I],
3783 Subtarget);
3784 } else if (VA.isRegLoc()) {
3785 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3786 const TargetOptions &Options = DAG.getTarget().Options;
3787 if (Options.EnableDebugEntryValues)
3788 CSInfo.emplace_back(VA.getLocReg(), I);
3789 if (isVarArg && IsWin64) {
3790 // Win64 ABI requires argument XMM reg to be copied to the corresponding
3791 // shadow reg if callee is a varargs function.
3792 unsigned ShadowReg = 0;
3793 switch (VA.getLocReg()) {
3794 case X86::XMM0: ShadowReg = X86::RCX; break;
3795 case X86::XMM1: ShadowReg = X86::RDX; break;
3796 case X86::XMM2: ShadowReg = X86::R8; break;
3797 case X86::XMM3: ShadowReg = X86::R9; break;
3799 if (ShadowReg)
3800 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
3802 } else if (!IsSibcall && (!isTailCall || isByVal)) {
3803 assert(VA.isMemLoc());
3804 if (!StackPtr.getNode())
3805 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
3806 getPointerTy(DAG.getDataLayout()));
3807 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
3808 dl, DAG, VA, Flags));
3812 if (!MemOpChains.empty())
3813 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
3815 if (Subtarget.isPICStyleGOT()) {
3816 // ELF / PIC requires GOT in the EBX register before function calls via PLT
3817 // GOT pointer.
3818 if (!isTailCall) {
3819 RegsToPass.push_back(std::make_pair(
3820 unsigned(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
3821 getPointerTy(DAG.getDataLayout()))));
3822 } else {
3823 // If we are tail calling and generating PIC/GOT style code load the
3824 // address of the callee into ECX. The value in ecx is used as target of
3825 // the tail jump. This is done to circumvent the ebx/callee-saved problem
3826 // for tail calls on PIC/GOT architectures. Normally we would just put the
3827 // address of GOT into ebx and then call target@PLT. But for tail calls
3828 // ebx would be restored (since ebx is callee saved) before jumping to the
3829 // target@PLT.
3831 // Note: The actual moving to ECX is done further down.
3832 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3833 if (G && !G->getGlobal()->hasLocalLinkage() &&
3834 G->getGlobal()->hasDefaultVisibility())
3835 Callee = LowerGlobalAddress(Callee, DAG);
3836 else if (isa<ExternalSymbolSDNode>(Callee))
3837 Callee = LowerExternalSymbol(Callee, DAG);
3841 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
3842 // From AMD64 ABI document:
3843 // For calls that may call functions that use varargs or stdargs
3844 // (prototype-less calls or calls to functions containing ellipsis (...) in
3845 // the declaration) %al is used as hidden argument to specify the number
3846 // of SSE registers used. The contents of %al do not need to match exactly
3847 // the number of registers, but must be an ubound on the number of SSE
3848 // registers used and is in the range 0 - 8 inclusive.
3850 // Count the number of XMM registers allocated.
3851 static const MCPhysReg XMMArgRegs[] = {
3852 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3853 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3855 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
3856 assert((Subtarget.hasSSE1() || !NumXMMRegs)
3857 && "SSE registers cannot be used when SSE is disabled");
3859 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3860 DAG.getConstant(NumXMMRegs, dl,
3861 MVT::i8)));
3864 if (isVarArg && IsMustTail) {
3865 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3866 for (const auto &F : Forwards) {
3867 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3868 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3872 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3873 // don't need this because the eligibility check rejects calls that require
3874 // shuffling arguments passed in memory.
3875 if (!IsSibcall && isTailCall) {
3876 // Force all the incoming stack arguments to be loaded from the stack
3877 // before any new outgoing arguments are stored to the stack, because the
3878 // outgoing stack slots may alias the incoming argument stack slots, and
3879 // the alias isn't otherwise explicit. This is slightly more conservative
3880 // than necessary, because it means that each store effectively depends
3881 // on every argument instead of just those arguments it would clobber.
3882 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3884 SmallVector<SDValue, 8> MemOpChains2;
3885 SDValue FIN;
3886 int FI = 0;
3887 for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
3888 ++I, ++OutsIndex) {
3889 CCValAssign &VA = ArgLocs[I];
3891 if (VA.isRegLoc()) {
3892 if (VA.needsCustom()) {
3893 assert((CallConv == CallingConv::X86_RegCall) &&
3894 "Expecting custom case only in regcall calling convention");
3895 // This means that we are in special case where one argument was
3896 // passed through two register locations - Skip the next location
3897 ++I;
3900 continue;
3903 assert(VA.isMemLoc());
3904 SDValue Arg = OutVals[OutsIndex];
3905 ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
3906 // Skip inalloca arguments. They don't require any work.
3907 if (Flags.isInAlloca())
3908 continue;
3909 // Create frame index.
3910 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3911 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3912 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
3913 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3915 if (Flags.isByVal()) {
3916 // Copy relative to framepointer.
3917 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
3918 if (!StackPtr.getNode())
3919 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
3920 getPointerTy(DAG.getDataLayout()));
3921 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3922 StackPtr, Source);
3924 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3925 ArgChain,
3926 Flags, DAG, dl));
3927 } else {
3928 // Store relative to framepointer.
3929 MemOpChains2.push_back(DAG.getStore(
3930 ArgChain, dl, Arg, FIN,
3931 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
3935 if (!MemOpChains2.empty())
3936 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3938 // Store the return address to the appropriate stack slot.
3939 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3940 getPointerTy(DAG.getDataLayout()),
3941 RegInfo->getSlotSize(), FPDiff, dl);
3944 // Build a sequence of copy-to-reg nodes chained together with token chain
3945 // and flag operands which copy the outgoing args into registers.
3946 SDValue InFlag;
3947 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3948 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3949 RegsToPass[i].second, InFlag);
3950 InFlag = Chain.getValue(1);
3953 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3954 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3955 // In the 64-bit large code model, we have to make all calls
3956 // through a register, since the call instruction's 32-bit
3957 // pc-relative offset may not be large enough to hold the whole
3958 // address.
3959 } else if (Callee->getOpcode() == ISD::GlobalAddress ||
3960 Callee->getOpcode() == ISD::ExternalSymbol) {
3961 // Lower direct calls to global addresses and external symbols. Setting
3962 // ForCall to true here has the effect of removing WrapperRIP when possible
3963 // to allow direct calls to be selected without first materializing the
3964 // address into a register.
3965 Callee = LowerGlobalOrExternal(Callee, DAG, /*ForCall=*/true);
3966 } else if (Subtarget.isTarget64BitILP32() &&
3967 Callee->getValueType(0) == MVT::i32) {
3968 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3969 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3972 // Returns a chain & a flag for retval copy to use.
3973 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3974 SmallVector<SDValue, 8> Ops;
3976 if (!IsSibcall && isTailCall) {
3977 Chain = DAG.getCALLSEQ_END(Chain,
3978 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
3979 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
3980 InFlag = Chain.getValue(1);
3983 Ops.push_back(Chain);
3984 Ops.push_back(Callee);
3986 if (isTailCall)
3987 Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
3989 // Add argument registers to the end of the list so that they are known live
3990 // into the call.
3991 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3992 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3993 RegsToPass[i].second.getValueType()));
3995 // Add a register mask operand representing the call-preserved registers.
3996 // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists) then we
3997 // set X86_INTR calling convention because it has the same CSR mask
3998 // (same preserved registers).
3999 const uint32_t *Mask = RegInfo->getCallPreservedMask(
4000 MF, HasNCSR ? (CallingConv::ID)CallingConv::X86_INTR : CallConv);
4001 assert(Mask && "Missing call preserved mask for calling convention");
4003 // If this is an invoke in a 32-bit function using a funclet-based
4004 // personality, assume the function clobbers all registers. If an exception
4005 // is thrown, the runtime will not restore CSRs.
4006 // FIXME: Model this more precisely so that we can register allocate across
4007 // the normal edge and spill and fill across the exceptional edge.
4008 if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
4009 const Function &CallerFn = MF.getFunction();
4010 EHPersonality Pers =
4011 CallerFn.hasPersonalityFn()
4012 ? classifyEHPersonality(CallerFn.getPersonalityFn())
4013 : EHPersonality::Unknown;
4014 if (isFuncletEHPersonality(Pers))
4015 Mask = RegInfo->getNoPreservedMask();
4018 // Define a new register mask from the existing mask.
4019 uint32_t *RegMask = nullptr;
4021 // In some calling conventions we need to remove the used physical registers
4022 // from the reg mask.
4023 if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
4024 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
4026 // Allocate a new Reg Mask and copy Mask.
4027 RegMask = MF.allocateRegMask();
4028 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
4029 memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
4031 // Make sure all sub registers of the argument registers are reset
4032 // in the RegMask.
4033 for (auto const &RegPair : RegsToPass)
4034 for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
4035 SubRegs.isValid(); ++SubRegs)
4036 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
4038 // Create the RegMask Operand according to our updated mask.
4039 Ops.push_back(DAG.getRegisterMask(RegMask));
4040 } else {
4041 // Create the RegMask Operand according to the static mask.
4042 Ops.push_back(DAG.getRegisterMask(Mask));
4045 if (InFlag.getNode())
4046 Ops.push_back(InFlag);
4048 if (isTailCall) {
4049 // We used to do:
4050 //// If this is the first return lowered for this function, add the regs
4051 //// to the liveout set for the function.
4052 // This isn't right, although it's probably harmless on x86; liveouts
4053 // should be computed from returns not tail calls. Consider a void
4054 // function making a tail call to a function returning int.
4055 MF.getFrameInfo().setHasTailCall();
4056 SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
4057 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
4058 return Ret;
4061 if (HasNoCfCheck && IsCFProtectionSupported) {
4062 Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
4063 } else {
4064 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
4066 InFlag = Chain.getValue(1);
4067 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
4069 // Create the CALLSEQ_END node.
4070 unsigned NumBytesForCalleeToPop;
4071 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
4072 DAG.getTarget().Options.GuaranteedTailCallOpt))
4073 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
4074 else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
4075 !Subtarget.getTargetTriple().isOSMSVCRT() &&
4076 SR == StackStructReturn)
4077 // If this is a call to a struct-return function, the callee
4078 // pops the hidden struct pointer, so we have to push it back.
4079 // This is common for Darwin/X86, Linux & Mingw32 targets.
4080 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
4081 NumBytesForCalleeToPop = 4;
4082 else
4083 NumBytesForCalleeToPop = 0; // Callee pops nothing.
4085 if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
4086 // No need to reset the stack after the call if the call doesn't return. To
4087 // make the MI verify, we'll pretend the callee does it for us.
4088 NumBytesForCalleeToPop = NumBytes;
4091 // Returns a flag for retval copy to use.
4092 if (!IsSibcall) {
4093 Chain = DAG.getCALLSEQ_END(Chain,
4094 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4095 DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
4096 true),
4097 InFlag, dl);
4098 InFlag = Chain.getValue(1);
4101 // Handle result values, copying them out of physregs into vregs that we
4102 // return.
4103 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
4104 InVals, RegMask);
4107 //===----------------------------------------------------------------------===//
4108 // Fast Calling Convention (tail call) implementation
4109 //===----------------------------------------------------------------------===//
4111 // Like std call, callee cleans arguments, convention except that ECX is
4112 // reserved for storing the tail called function address. Only 2 registers are
4113 // free for argument passing (inreg). Tail call optimization is performed
4114 // provided:
4115 // * tailcallopt is enabled
4116 // * caller/callee are fastcc
4117 // On X86_64 architecture with GOT-style position independent code only local
4118 // (within module) calls are supported at the moment.
4119 // To keep the stack aligned according to platform abi the function
4120 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
4121 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
4122 // If a tail called function callee has more arguments than the caller the
4123 // caller needs to make sure that there is room to move the RETADDR to. This is
4124 // achieved by reserving an area the size of the argument delta right after the
4125 // original RETADDR, but before the saved framepointer or the spilled registers
4126 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
4127 // stack layout:
4128 // arg1
4129 // arg2
4130 // RETADDR
4131 // [ new RETADDR
4132 // move area ]
4133 // (possible EBP)
4134 // ESI
4135 // EDI
4136 // local1 ..
4138 /// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
4139 /// requirement.
4140 unsigned
4141 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
4142 SelectionDAG& DAG) const {
4143 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4144 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
4145 unsigned StackAlignment = TFI.getStackAlignment();
4146 uint64_t AlignMask = StackAlignment - 1;
4147 int64_t Offset = StackSize;
4148 unsigned SlotSize = RegInfo->getSlotSize();
4149 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
4150 // Number smaller than 12 so just add the difference.
4151 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
4152 } else {
4153 // Mask out lower bits, add stackalignment once plus the 12 bytes.
4154 Offset = ((~AlignMask) & Offset) + StackAlignment +
4155 (StackAlignment-SlotSize);
4157 return Offset;
4160 /// Return true if the given stack call argument is already available in the
4161 /// same position (relatively) of the caller's incoming argument stack.
4162 static
4163 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
4164 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
4165 const X86InstrInfo *TII, const CCValAssign &VA) {
4166 unsigned Bytes = Arg.getValueSizeInBits() / 8;
4168 for (;;) {
4169 // Look through nodes that don't alter the bits of the incoming value.
4170 unsigned Op = Arg.getOpcode();
4171 if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
4172 Arg = Arg.getOperand(0);
4173 continue;
4175 if (Op == ISD::TRUNCATE) {
4176 const SDValue &TruncInput = Arg.getOperand(0);
4177 if (TruncInput.getOpcode() == ISD::AssertZext &&
4178 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
4179 Arg.getValueType()) {
4180 Arg = TruncInput.getOperand(0);
4181 continue;
4184 break;
4187 int FI = INT_MAX;
4188 if (Arg.getOpcode() == ISD::CopyFromReg) {
4189 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
4190 if (!TargetRegisterInfo::isVirtualRegister(VR))
4191 return false;
4192 MachineInstr *Def = MRI->getVRegDef(VR);
4193 if (!Def)
4194 return false;
4195 if (!Flags.isByVal()) {
4196 if (!TII->isLoadFromStackSlot(*Def, FI))
4197 return false;
4198 } else {
4199 unsigned Opcode = Def->getOpcode();
4200 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
4201 Opcode == X86::LEA64_32r) &&
4202 Def->getOperand(1).isFI()) {
4203 FI = Def->getOperand(1).getIndex();
4204 Bytes = Flags.getByValSize();
4205 } else
4206 return false;
4208 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
4209 if (Flags.isByVal())
4210 // ByVal argument is passed in as a pointer but it's now being
4211 // dereferenced. e.g.
4212 // define @foo(%struct.X* %A) {
4213 // tail call @bar(%struct.X* byval %A)
4214 // }
4215 return false;
4216 SDValue Ptr = Ld->getBasePtr();
4217 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
4218 if (!FINode)
4219 return false;
4220 FI = FINode->getIndex();
4221 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
4222 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
4223 FI = FINode->getIndex();
4224 Bytes = Flags.getByValSize();
4225 } else
4226 return false;
4228 assert(FI != INT_MAX);
4229 if (!MFI.isFixedObjectIndex(FI))
4230 return false;
4232 if (Offset != MFI.getObjectOffset(FI))
4233 return false;
4235 // If this is not byval, check that the argument stack object is immutable.
4236 // inalloca and argument copy elision can create mutable argument stack
4237 // objects. Byval objects can be mutated, but a byval call intends to pass the
4238 // mutated memory.
4239 if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
4240 return false;
4242 if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {
4243 // If the argument location is wider than the argument type, check that any
4244 // extension flags match.
4245 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
4246 Flags.isSExt() != MFI.isObjectSExt(FI)) {
4247 return false;
4251 return Bytes == MFI.getObjectSize(FI);
4254 /// Check whether the call is eligible for tail call optimization. Targets
4255 /// that want to do tail call optimization should implement this function.
4256 bool X86TargetLowering::IsEligibleForTailCallOptimization(
4257 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
4258 bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
4259 const SmallVectorImpl<ISD::OutputArg> &Outs,
4260 const SmallVectorImpl<SDValue> &OutVals,
4261 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4262 if (!mayTailCallThisCC(CalleeCC))
4263 return false;
4265 // If -tailcallopt is specified, make fastcc functions tail-callable.
4266 MachineFunction &MF = DAG.getMachineFunction();
4267 const Function &CallerF = MF.getFunction();
4269 // If the function return type is x86_fp80 and the callee return type is not,
4270 // then the FP_EXTEND of the call result is not a nop. It's not safe to
4271 // perform a tailcall optimization here.
4272 if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
4273 return false;
4275 CallingConv::ID CallerCC = CallerF.getCallingConv();
4276 bool CCMatch = CallerCC == CalleeCC;
4277 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
4278 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
4280 // Win64 functions have extra shadow space for argument homing. Don't do the
4281 // sibcall if the caller and callee have mismatched expectations for this
4282 // space.
4283 if (IsCalleeWin64 != IsCallerWin64)
4284 return false;
4286 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
4287 if (canGuaranteeTCO(CalleeCC) && CCMatch)
4288 return true;
4289 return false;
4292 // Look for obvious safe cases to perform tail call optimization that do not
4293 // require ABI changes. This is what gcc calls sibcall.
4295 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
4296 // emit a special epilogue.
4297 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4298 if (RegInfo->needsStackRealignment(MF))
4299 return false;
4301 // Also avoid sibcall optimization if either caller or callee uses struct
4302 // return semantics.
4303 if (isCalleeStructRet || isCallerStructRet)
4304 return false;
4306 // Do not sibcall optimize vararg calls unless all arguments are passed via
4307 // registers.
4308 LLVMContext &C = *DAG.getContext();
4309 if (isVarArg && !Outs.empty()) {
4310 // Optimizing for varargs on Win64 is unlikely to be safe without
4311 // additional testing.
4312 if (IsCalleeWin64 || IsCallerWin64)
4313 return false;
4315 SmallVector<CCValAssign, 16> ArgLocs;
4316 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4318 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4319 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
4320 if (!ArgLocs[i].isRegLoc())
4321 return false;
4324 // If the call result is in ST0 / ST1, it needs to be popped off the x87
4325 // stack. Therefore, if it's not used by the call it is not safe to optimize
4326 // this into a sibcall.
4327 bool Unused = false;
4328 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4329 if (!Ins[i].Used) {
4330 Unused = true;
4331 break;
4334 if (Unused) {
4335 SmallVector<CCValAssign, 16> RVLocs;
4336 CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
4337 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
4338 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4339 CCValAssign &VA = RVLocs[i];
4340 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
4341 return false;
4345 // Check that the call results are passed in the same way.
4346 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
4347 RetCC_X86, RetCC_X86))
4348 return false;
4349 // The callee has to preserve all registers the caller needs to preserve.
4350 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4351 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
4352 if (!CCMatch) {
4353 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
4354 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
4355 return false;
4358 unsigned StackArgsSize = 0;
4360 // If the callee takes no arguments then go on to check the results of the
4361 // call.
4362 if (!Outs.empty()) {
4363 // Check if stack adjustment is needed. For now, do not do this if any
4364 // argument is passed on the stack.
4365 SmallVector<CCValAssign, 16> ArgLocs;
4366 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4368 // Allocate shadow area for Win64
4369 if (IsCalleeWin64)
4370 CCInfo.AllocateStack(32, 8);
4372 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4373 StackArgsSize = CCInfo.getNextStackOffset();
4375 if (CCInfo.getNextStackOffset()) {
4376 // Check if the arguments are already laid out in the right way as
4377 // the caller's fixed stack objects.
4378 MachineFrameInfo &MFI = MF.getFrameInfo();
4379 const MachineRegisterInfo *MRI = &MF.getRegInfo();
4380 const X86InstrInfo *TII = Subtarget.getInstrInfo();
4381 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4382 CCValAssign &VA = ArgLocs[i];
4383 SDValue Arg = OutVals[i];
4384 ISD::ArgFlagsTy Flags = Outs[i].Flags;
4385 if (VA.getLocInfo() == CCValAssign::Indirect)
4386 return false;
4387 if (!VA.isRegLoc()) {
4388 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
4389 MFI, MRI, TII, VA))
4390 return false;
4395 bool PositionIndependent = isPositionIndependent();
4396 // If the tailcall address may be in a register, then make sure it's
4397 // possible to register allocate for it. In 32-bit, the call address can
4398 // only target EAX, EDX, or ECX since the tail call must be scheduled after
4399 // callee-saved registers are restored. These happen to be the same
4400 // registers used to pass 'inreg' arguments so watch out for those.
4401 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
4402 !isa<ExternalSymbolSDNode>(Callee)) ||
4403 PositionIndependent)) {
4404 unsigned NumInRegs = 0;
4405 // In PIC we need an extra register to formulate the address computation
4406 // for the callee.
4407 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
4409 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4410 CCValAssign &VA = ArgLocs[i];
4411 if (!VA.isRegLoc())
4412 continue;
4413 unsigned Reg = VA.getLocReg();
4414 switch (Reg) {
4415 default: break;
4416 case X86::EAX: case X86::EDX: case X86::ECX:
4417 if (++NumInRegs == MaxInRegs)
4418 return false;
4419 break;
4424 const MachineRegisterInfo &MRI = MF.getRegInfo();
4425 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
4426 return false;
4429 bool CalleeWillPop =
4430 X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
4431 MF.getTarget().Options.GuaranteedTailCallOpt);
4433 if (unsigned BytesToPop =
4434 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
4435 // If we have bytes to pop, the callee must pop them.
4436 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
4437 if (!CalleePopMatches)
4438 return false;
4439 } else if (CalleeWillPop && StackArgsSize > 0) {
4440 // If we don't have bytes to pop, make sure the callee doesn't pop any.
4441 return false;
4444 return true;
4447 FastISel *
4448 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
4449 const TargetLibraryInfo *libInfo) const {
4450 return X86::createFastISel(funcInfo, libInfo);
4453 //===----------------------------------------------------------------------===//
4454 // Other Lowering Hooks
4455 //===----------------------------------------------------------------------===//
4457 static bool MayFoldLoad(SDValue Op) {
4458 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
4461 static bool MayFoldIntoStore(SDValue Op) {
4462 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
4465 static bool MayFoldIntoZeroExtend(SDValue Op) {
4466 if (Op.hasOneUse()) {
4467 unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
4468 return (ISD::ZERO_EXTEND == Opcode);
4470 return false;
4473 static bool isTargetShuffle(unsigned Opcode) {
4474 switch(Opcode) {
4475 default: return false;
4476 case X86ISD::BLENDI:
4477 case X86ISD::PSHUFB:
4478 case X86ISD::PSHUFD:
4479 case X86ISD::PSHUFHW:
4480 case X86ISD::PSHUFLW:
4481 case X86ISD::SHUFP:
4482 case X86ISD::INSERTPS:
4483 case X86ISD::EXTRQI:
4484 case X86ISD::INSERTQI:
4485 case X86ISD::PALIGNR:
4486 case X86ISD::VSHLDQ:
4487 case X86ISD::VSRLDQ:
4488 case X86ISD::MOVLHPS:
4489 case X86ISD::MOVHLPS:
4490 case X86ISD::MOVSHDUP:
4491 case X86ISD::MOVSLDUP:
4492 case X86ISD::MOVDDUP:
4493 case X86ISD::MOVSS:
4494 case X86ISD::MOVSD:
4495 case X86ISD::UNPCKL:
4496 case X86ISD::UNPCKH:
4497 case X86ISD::VBROADCAST:
4498 case X86ISD::VPERMILPI:
4499 case X86ISD::VPERMILPV:
4500 case X86ISD::VPERM2X128:
4501 case X86ISD::SHUF128:
4502 case X86ISD::VPERMIL2:
4503 case X86ISD::VPERMI:
4504 case X86ISD::VPPERM:
4505 case X86ISD::VPERMV:
4506 case X86ISD::VPERMV3:
4507 case X86ISD::VZEXT_MOVL:
4508 return true;
4512 static bool isTargetShuffleVariableMask(unsigned Opcode) {
4513 switch (Opcode) {
4514 default: return false;
4515 // Target Shuffles.
4516 case X86ISD::PSHUFB:
4517 case X86ISD::VPERMILPV:
4518 case X86ISD::VPERMIL2:
4519 case X86ISD::VPPERM:
4520 case X86ISD::VPERMV:
4521 case X86ISD::VPERMV3:
4522 return true;
4523 // 'Faux' Target Shuffles.
4524 case ISD::OR:
4525 case ISD::AND:
4526 case X86ISD::ANDNP:
4527 return true;
4531 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
4532 MachineFunction &MF = DAG.getMachineFunction();
4533 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4534 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
4535 int ReturnAddrIndex = FuncInfo->getRAIndex();
4537 if (ReturnAddrIndex == 0) {
4538 // Set up a frame object for the return address.
4539 unsigned SlotSize = RegInfo->getSlotSize();
4540 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
4541 -(int64_t)SlotSize,
4542 false);
4543 FuncInfo->setRAIndex(ReturnAddrIndex);
4546 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
4549 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
4550 bool hasSymbolicDisplacement) {
4551 // Offset should fit into 32 bit immediate field.
4552 if (!isInt<32>(Offset))
4553 return false;
4555 // If we don't have a symbolic displacement - we don't have any extra
4556 // restrictions.
4557 if (!hasSymbolicDisplacement)
4558 return true;
4560 // FIXME: Some tweaks might be needed for medium code model.
4561 if (M != CodeModel::Small && M != CodeModel::Kernel)
4562 return false;
4564 // For small code model we assume that latest object is 16MB before end of 31
4565 // bits boundary. We may also accept pretty large negative constants knowing
4566 // that all objects are in the positive half of address space.
4567 if (M == CodeModel::Small && Offset < 16*1024*1024)
4568 return true;
4570 // For kernel code model we know that all object resist in the negative half
4571 // of 32bits address space. We may not accept negative offsets, since they may
4572 // be just off and we may accept pretty large positive ones.
4573 if (M == CodeModel::Kernel && Offset >= 0)
4574 return true;
4576 return false;
4579 /// Determines whether the callee is required to pop its own arguments.
4580 /// Callee pop is necessary to support tail calls.
4581 bool X86::isCalleePop(CallingConv::ID CallingConv,
4582 bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
4583 // If GuaranteeTCO is true, we force some calls to be callee pop so that we
4584 // can guarantee TCO.
4585 if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
4586 return true;
4588 switch (CallingConv) {
4589 default:
4590 return false;
4591 case CallingConv::X86_StdCall:
4592 case CallingConv::X86_FastCall:
4593 case CallingConv::X86_ThisCall:
4594 case CallingConv::X86_VectorCall:
4595 return !is64Bit;
4599 /// Return true if the condition is an unsigned comparison operation.
4600 static bool isX86CCUnsigned(unsigned X86CC) {
4601 switch (X86CC) {
4602 default:
4603 llvm_unreachable("Invalid integer condition!");
4604 case X86::COND_E:
4605 case X86::COND_NE:
4606 case X86::COND_B:
4607 case X86::COND_A:
4608 case X86::COND_BE:
4609 case X86::COND_AE:
4610 return true;
4611 case X86::COND_G:
4612 case X86::COND_GE:
4613 case X86::COND_L:
4614 case X86::COND_LE:
4615 return false;
4619 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
4620 switch (SetCCOpcode) {
4621 default: llvm_unreachable("Invalid integer condition!");
4622 case ISD::SETEQ: return X86::COND_E;
4623 case ISD::SETGT: return X86::COND_G;
4624 case ISD::SETGE: return X86::COND_GE;
4625 case ISD::SETLT: return X86::COND_L;
4626 case ISD::SETLE: return X86::COND_LE;
4627 case ISD::SETNE: return X86::COND_NE;
4628 case ISD::SETULT: return X86::COND_B;
4629 case ISD::SETUGT: return X86::COND_A;
4630 case ISD::SETULE: return X86::COND_BE;
4631 case ISD::SETUGE: return X86::COND_AE;
4635 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
4636 /// condition code, returning the condition code and the LHS/RHS of the
4637 /// comparison to make.
4638 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
4639 bool isFP, SDValue &LHS, SDValue &RHS,
4640 SelectionDAG &DAG) {
4641 if (!isFP) {
4642 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4643 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
4644 // X > -1 -> X == 0, jump !sign.
4645 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4646 return X86::COND_NS;
4648 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
4649 // X < 0 -> X == 0, jump on sign.
4650 return X86::COND_S;
4652 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
4653 // X < 1 -> X <= 0
4654 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4655 return X86::COND_LE;
4659 return TranslateIntegerX86CC(SetCCOpcode);
4662 // First determine if it is required or is profitable to flip the operands.
4664 // If LHS is a foldable load, but RHS is not, flip the condition.
4665 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
4666 !ISD::isNON_EXTLoad(RHS.getNode())) {
4667 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
4668 std::swap(LHS, RHS);
4671 switch (SetCCOpcode) {
4672 default: break;
4673 case ISD::SETOLT:
4674 case ISD::SETOLE:
4675 case ISD::SETUGT:
4676 case ISD::SETUGE:
4677 std::swap(LHS, RHS);
4678 break;
4681 // On a floating point condition, the flags are set as follows:
4682 // ZF PF CF op
4683 // 0 | 0 | 0 | X > Y
4684 // 0 | 0 | 1 | X < Y
4685 // 1 | 0 | 0 | X == Y
4686 // 1 | 1 | 1 | unordered
4687 switch (SetCCOpcode) {
4688 default: llvm_unreachable("Condcode should be pre-legalized away");
4689 case ISD::SETUEQ:
4690 case ISD::SETEQ: return X86::COND_E;
4691 case ISD::SETOLT: // flipped
4692 case ISD::SETOGT:
4693 case ISD::SETGT: return X86::COND_A;
4694 case ISD::SETOLE: // flipped
4695 case ISD::SETOGE:
4696 case ISD::SETGE: return X86::COND_AE;
4697 case ISD::SETUGT: // flipped
4698 case ISD::SETULT:
4699 case ISD::SETLT: return X86::COND_B;
4700 case ISD::SETUGE: // flipped
4701 case ISD::SETULE:
4702 case ISD::SETLE: return X86::COND_BE;
4703 case ISD::SETONE:
4704 case ISD::SETNE: return X86::COND_NE;
4705 case ISD::SETUO: return X86::COND_P;
4706 case ISD::SETO: return X86::COND_NP;
4707 case ISD::SETOEQ:
4708 case ISD::SETUNE: return X86::COND_INVALID;
4712 /// Is there a floating point cmov for the specific X86 condition code?
4713 /// Current x86 isa includes the following FP cmov instructions:
4714 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
4715 static bool hasFPCMov(unsigned X86CC) {
4716 switch (X86CC) {
4717 default:
4718 return false;
4719 case X86::COND_B:
4720 case X86::COND_BE:
4721 case X86::COND_E:
4722 case X86::COND_P:
4723 case X86::COND_A:
4724 case X86::COND_AE:
4725 case X86::COND_NE:
4726 case X86::COND_NP:
4727 return true;
4732 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
4733 const CallInst &I,
4734 MachineFunction &MF,
4735 unsigned Intrinsic) const {
4737 const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
4738 if (!IntrData)
4739 return false;
4741 Info.flags = MachineMemOperand::MONone;
4742 Info.offset = 0;
4744 switch (IntrData->Type) {
4745 case TRUNCATE_TO_MEM_VI8:
4746 case TRUNCATE_TO_MEM_VI16:
4747 case TRUNCATE_TO_MEM_VI32: {
4748 Info.opc = ISD::INTRINSIC_VOID;
4749 Info.ptrVal = I.getArgOperand(0);
4750 MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
4751 MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
4752 if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
4753 ScalarVT = MVT::i8;
4754 else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
4755 ScalarVT = MVT::i16;
4756 else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
4757 ScalarVT = MVT::i32;
4759 Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
4760 Info.align = 1;
4761 Info.flags |= MachineMemOperand::MOStore;
4762 break;
4764 case GATHER:
4765 case GATHER_AVX2: {
4766 Info.opc = ISD::INTRINSIC_W_CHAIN;
4767 Info.ptrVal = nullptr;
4768 MVT DataVT = MVT::getVT(I.getType());
4769 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
4770 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
4771 IndexVT.getVectorNumElements());
4772 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
4773 Info.align = 1;
4774 Info.flags |= MachineMemOperand::MOLoad;
4775 break;
4777 case SCATTER: {
4778 Info.opc = ISD::INTRINSIC_VOID;
4779 Info.ptrVal = nullptr;
4780 MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
4781 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
4782 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
4783 IndexVT.getVectorNumElements());
4784 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
4785 Info.align = 1;
4786 Info.flags |= MachineMemOperand::MOStore;
4787 break;
4789 default:
4790 return false;
4793 return true;
4796 /// Returns true if the target can instruction select the
4797 /// specified FP immediate natively. If false, the legalizer will
4798 /// materialize the FP immediate as a load from a constant pool.
4799 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
4800 bool ForCodeSize) const {
4801 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
4802 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
4803 return true;
4805 return false;
4808 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
4809 ISD::LoadExtType ExtTy,
4810 EVT NewVT) const {
4811 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
4812 // relocation target a movq or addq instruction: don't let the load shrink.
4813 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
4814 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
4815 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
4816 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
4818 // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
4819 // those uses are extracted directly into a store, then the extract + store
4820 // can be store-folded. Therefore, it's probably not worth splitting the load.
4821 EVT VT = Load->getValueType(0);
4822 if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
4823 for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
4824 // Skip uses of the chain value. Result 0 of the node is the load value.
4825 if (UI.getUse().getResNo() != 0)
4826 continue;
4828 // If this use is not an extract + store, it's probably worth splitting.
4829 if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
4830 UI->use_begin()->getOpcode() != ISD::STORE)
4831 return true;
4833 // All non-chain uses are extract + store.
4834 return false;
4837 return true;
4840 /// Returns true if it is beneficial to convert a load of a constant
4841 /// to just the constant itself.
4842 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
4843 Type *Ty) const {
4844 assert(Ty->isIntegerTy());
4846 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4847 if (BitSize == 0 || BitSize > 64)
4848 return false;
4849 return true;
4852 bool X86TargetLowering::reduceSelectOfFPConstantLoads(bool IsFPSetCC) const {
4853 // If we are using XMM registers in the ABI and the condition of the select is
4854 // a floating-point compare and we have blendv or conditional move, then it is
4855 // cheaper to select instead of doing a cross-register move and creating a
4856 // load that depends on the compare result.
4857 return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
4860 bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
4861 // TODO: It might be a win to ease or lift this restriction, but the generic
4862 // folds in DAGCombiner conflict with vector folds for an AVX512 target.
4863 if (VT.isVector() && Subtarget.hasAVX512())
4864 return false;
4866 return true;
4869 bool X86TargetLowering::decomposeMulByConstant(EVT VT, SDValue C) const {
4870 // TODO: We handle scalars using custom code, but generic combining could make
4871 // that unnecessary.
4872 APInt MulC;
4873 if (!ISD::isConstantSplatVector(C.getNode(), MulC))
4874 return false;
4876 // If vector multiply is legal, assume that's faster than shl + add/sub.
4877 // TODO: Multiply is a complex op with higher latency and lower througput in
4878 // most implementations, so this check could be loosened based on type
4879 // and/or a CPU attribute.
4880 if (isOperationLegal(ISD::MUL, VT))
4881 return false;
4883 // shl+add, shl+sub, shl+add+neg
4884 return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
4885 (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
4888 bool X86TargetLowering::shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
4889 bool IsSigned) const {
4890 // f80 UINT_TO_FP is more efficient using Strict code if FCMOV is available.
4891 return !IsSigned && FpVT == MVT::f80 && Subtarget.hasCMov();
4894 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
4895 unsigned Index) const {
4896 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
4897 return false;
4899 // Mask vectors support all subregister combinations and operations that
4900 // extract half of vector.
4901 if (ResVT.getVectorElementType() == MVT::i1)
4902 return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
4903 (Index == ResVT.getVectorNumElements()));
4905 return (Index % ResVT.getVectorNumElements()) == 0;
4908 bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
4909 unsigned Opc = VecOp.getOpcode();
4911 // Assume target opcodes can't be scalarized.
4912 // TODO - do we have any exceptions?
4913 if (Opc >= ISD::BUILTIN_OP_END)
4914 return false;
4916 // If the vector op is not supported, try to convert to scalar.
4917 EVT VecVT = VecOp.getValueType();
4918 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
4919 return true;
4921 // If the vector op is supported, but the scalar op is not, the transform may
4922 // not be worthwhile.
4923 EVT ScalarVT = VecVT.getScalarType();
4924 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
4927 bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT) const {
4928 // TODO: Allow vectors?
4929 if (VT.isVector())
4930 return false;
4931 return VT.isSimple() || !isOperationExpand(Opcode, VT);
4934 bool X86TargetLowering::isCheapToSpeculateCttz() const {
4935 // Speculate cttz only if we can directly use TZCNT.
4936 return Subtarget.hasBMI();
4939 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
4940 // Speculate ctlz only if we can directly use LZCNT.
4941 return Subtarget.hasLZCNT();
4944 bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT,
4945 EVT BitcastVT) const {
4946 if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
4947 BitcastVT.getVectorElementType() == MVT::i1)
4948 return false;
4950 if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
4951 return false;
4953 return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT);
4956 bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
4957 const SelectionDAG &DAG) const {
4958 // Do not merge to float value size (128 bytes) if no implicit
4959 // float attribute is set.
4960 bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
4961 Attribute::NoImplicitFloat);
4963 if (NoFloat) {
4964 unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
4965 return (MemVT.getSizeInBits() <= MaxIntSize);
4967 // Make sure we don't merge greater than our preferred vector
4968 // width.
4969 if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
4970 return false;
4971 return true;
4974 bool X86TargetLowering::isCtlzFast() const {
4975 return Subtarget.hasFastLZCNT();
4978 bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
4979 const Instruction &AndI) const {
4980 return true;
4983 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
4984 EVT VT = Y.getValueType();
4986 if (VT.isVector())
4987 return false;
4989 if (!Subtarget.hasBMI())
4990 return false;
4992 // There are only 32-bit and 64-bit forms for 'andn'.
4993 if (VT != MVT::i32 && VT != MVT::i64)
4994 return false;
4996 return !isa<ConstantSDNode>(Y);
4999 bool X86TargetLowering::hasAndNot(SDValue Y) const {
5000 EVT VT = Y.getValueType();
5002 if (!VT.isVector())
5003 return hasAndNotCompare(Y);
5005 // Vector.
5007 if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
5008 return false;
5010 if (VT == MVT::v4i32)
5011 return true;
5013 return Subtarget.hasSSE2();
5016 bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
5017 const SDNode *N, CombineLevel Level) const {
5018 assert(((N->getOpcode() == ISD::SHL &&
5019 N->getOperand(0).getOpcode() == ISD::SRL) ||
5020 (N->getOpcode() == ISD::SRL &&
5021 N->getOperand(0).getOpcode() == ISD::SHL)) &&
5022 "Expected shift-shift mask");
5023 EVT VT = N->getValueType(0);
5024 if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
5025 (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
5026 // Only fold if the shift values are equal - so it folds to AND.
5027 // TODO - we should fold if either is a non-uniform vector but we don't do
5028 // the fold for non-splats yet.
5029 return N->getOperand(1) == N->getOperand(0).getOperand(1);
5031 return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
5034 bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
5035 EVT VT = Y.getValueType();
5037 // For vectors, we don't have a preference, but we probably want a mask.
5038 if (VT.isVector())
5039 return false;
5041 // 64-bit shifts on 32-bit targets produce really bad bloated code.
5042 if (VT == MVT::i64 && !Subtarget.is64Bit())
5043 return false;
5045 return true;
5048 bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
5049 // Any legal vector type can be splatted more efficiently than
5050 // loading/spilling from memory.
5051 return isTypeLegal(VT);
5054 MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
5055 MVT VT = MVT::getIntegerVT(NumBits);
5056 if (isTypeLegal(VT))
5057 return VT;
5059 // PMOVMSKB can handle this.
5060 if (NumBits == 128 && isTypeLegal(MVT::v16i8))
5061 return MVT::v16i8;
5063 // VPMOVMSKB can handle this.
5064 if (NumBits == 256 && isTypeLegal(MVT::v32i8))
5065 return MVT::v32i8;
5067 // TODO: Allow 64-bit type for 32-bit target.
5068 // TODO: 512-bit types should be allowed, but make sure that those
5069 // cases are handled in combineVectorSizedSetCCEquality().
5071 return MVT::INVALID_SIMPLE_VALUE_TYPE;
5074 /// Val is the undef sentinel value or equal to the specified value.
5075 static bool isUndefOrEqual(int Val, int CmpVal) {
5076 return ((Val == SM_SentinelUndef) || (Val == CmpVal));
5079 /// Val is either the undef or zero sentinel value.
5080 static bool isUndefOrZero(int Val) {
5081 return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
5084 /// Return true if every element in Mask, beginning from position Pos and ending
5085 /// in Pos+Size is the undef sentinel value.
5086 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
5087 for (unsigned i = Pos, e = Pos + Size; i != e; ++i)
5088 if (Mask[i] != SM_SentinelUndef)
5089 return false;
5090 return true;
5093 /// Return true if the mask creates a vector whose lower half is undefined.
5094 static bool isUndefLowerHalf(ArrayRef<int> Mask) {
5095 unsigned NumElts = Mask.size();
5096 return isUndefInRange(Mask, 0, NumElts / 2);
5099 /// Return true if the mask creates a vector whose upper half is undefined.
5100 static bool isUndefUpperHalf(ArrayRef<int> Mask) {
5101 unsigned NumElts = Mask.size();
5102 return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
5105 /// Return true if Val falls within the specified range (L, H].
5106 static bool isInRange(int Val, int Low, int Hi) {
5107 return (Val >= Low && Val < Hi);
5110 /// Return true if the value of any element in Mask falls within the specified
5111 /// range (L, H].
5112 static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
5113 for (int M : Mask)
5114 if (isInRange(M, Low, Hi))
5115 return true;
5116 return false;
5119 /// Return true if Val is undef or if its value falls within the
5120 /// specified range (L, H].
5121 static bool isUndefOrInRange(int Val, int Low, int Hi) {
5122 return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
5125 /// Return true if every element in Mask is undef or if its value
5126 /// falls within the specified range (L, H].
5127 static bool isUndefOrInRange(ArrayRef<int> Mask,
5128 int Low, int Hi) {
5129 for (int M : Mask)
5130 if (!isUndefOrInRange(M, Low, Hi))
5131 return false;
5132 return true;
5135 /// Return true if Val is undef, zero or if its value falls within the
5136 /// specified range (L, H].
5137 static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
5138 return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
5141 /// Return true if every element in Mask is undef, zero or if its value
5142 /// falls within the specified range (L, H].
5143 static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5144 for (int M : Mask)
5145 if (!isUndefOrZeroOrInRange(M, Low, Hi))
5146 return false;
5147 return true;
5150 /// Return true if every element in Mask, beginning
5151 /// from position Pos and ending in Pos + Size, falls within the specified
5152 /// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
5153 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
5154 unsigned Size, int Low, int Step = 1) {
5155 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5156 if (!isUndefOrEqual(Mask[i], Low))
5157 return false;
5158 return true;
5161 /// Return true if every element in Mask, beginning
5162 /// from position Pos and ending in Pos+Size, falls within the specified
5163 /// sequential range (Low, Low+Size], or is undef or is zero.
5164 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5165 unsigned Size, int Low) {
5166 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, ++Low)
5167 if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
5168 return false;
5169 return true;
5172 /// Return true if every element in Mask, beginning
5173 /// from position Pos and ending in Pos+Size is undef or is zero.
5174 static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5175 unsigned Size) {
5176 for (unsigned i = Pos, e = Pos + Size; i != e; ++i)
5177 if (!isUndefOrZero(Mask[i]))
5178 return false;
5179 return true;
5182 /// Helper function to test whether a shuffle mask could be
5183 /// simplified by widening the elements being shuffled.
5185 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
5186 /// leaves it in an unspecified state.
5188 /// NOTE: This must handle normal vector shuffle masks and *target* vector
5189 /// shuffle masks. The latter have the special property of a '-2' representing
5190 /// a zero-ed lane of a vector.
5191 static bool canWidenShuffleElements(ArrayRef<int> Mask,
5192 SmallVectorImpl<int> &WidenedMask) {
5193 WidenedMask.assign(Mask.size() / 2, 0);
5194 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
5195 int M0 = Mask[i];
5196 int M1 = Mask[i + 1];
5198 // If both elements are undef, its trivial.
5199 if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
5200 WidenedMask[i / 2] = SM_SentinelUndef;
5201 continue;
5204 // Check for an undef mask and a mask value properly aligned to fit with
5205 // a pair of values. If we find such a case, use the non-undef mask's value.
5206 if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
5207 WidenedMask[i / 2] = M1 / 2;
5208 continue;
5210 if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
5211 WidenedMask[i / 2] = M0 / 2;
5212 continue;
5215 // When zeroing, we need to spread the zeroing across both lanes to widen.
5216 if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
5217 if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
5218 (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
5219 WidenedMask[i / 2] = SM_SentinelZero;
5220 continue;
5222 return false;
5225 // Finally check if the two mask values are adjacent and aligned with
5226 // a pair.
5227 if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
5228 WidenedMask[i / 2] = M0 / 2;
5229 continue;
5232 // Otherwise we can't safely widen the elements used in this shuffle.
5233 return false;
5235 assert(WidenedMask.size() == Mask.size() / 2 &&
5236 "Incorrect size of mask after widening the elements!");
5238 return true;
5241 static bool canWidenShuffleElements(ArrayRef<int> Mask,
5242 const APInt &Zeroable,
5243 SmallVectorImpl<int> &WidenedMask) {
5244 SmallVector<int, 32> TargetMask(Mask.begin(), Mask.end());
5245 for (int i = 0, Size = TargetMask.size(); i < Size; ++i) {
5246 if (TargetMask[i] == SM_SentinelUndef)
5247 continue;
5248 if (Zeroable[i])
5249 TargetMask[i] = SM_SentinelZero;
5251 return canWidenShuffleElements(TargetMask, WidenedMask);
5254 static bool canWidenShuffleElements(ArrayRef<int> Mask) {
5255 SmallVector<int, 32> WidenedMask;
5256 return canWidenShuffleElements(Mask, WidenedMask);
5259 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
5260 bool X86::isZeroNode(SDValue Elt) {
5261 return isNullConstant(Elt) || isNullFPConstant(Elt);
5264 // Build a vector of constants.
5265 // Use an UNDEF node if MaskElt == -1.
5266 // Split 64-bit constants in the 32-bit mode.
5267 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
5268 const SDLoc &dl, bool IsMask = false) {
5270 SmallVector<SDValue, 32> Ops;
5271 bool Split = false;
5273 MVT ConstVecVT = VT;
5274 unsigned NumElts = VT.getVectorNumElements();
5275 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5276 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5277 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5278 Split = true;
5281 MVT EltVT = ConstVecVT.getVectorElementType();
5282 for (unsigned i = 0; i < NumElts; ++i) {
5283 bool IsUndef = Values[i] < 0 && IsMask;
5284 SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
5285 DAG.getConstant(Values[i], dl, EltVT);
5286 Ops.push_back(OpNode);
5287 if (Split)
5288 Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
5289 DAG.getConstant(0, dl, EltVT));
5291 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5292 if (Split)
5293 ConstsNode = DAG.getBitcast(VT, ConstsNode);
5294 return ConstsNode;
5297 static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
5298 MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5299 assert(Bits.size() == Undefs.getBitWidth() &&
5300 "Unequal constant and undef arrays");
5301 SmallVector<SDValue, 32> Ops;
5302 bool Split = false;
5304 MVT ConstVecVT = VT;
5305 unsigned NumElts = VT.getVectorNumElements();
5306 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5307 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5308 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5309 Split = true;
5312 MVT EltVT = ConstVecVT.getVectorElementType();
5313 for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
5314 if (Undefs[i]) {
5315 Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
5316 continue;
5318 const APInt &V = Bits[i];
5319 assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
5320 if (Split) {
5321 Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
5322 Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
5323 } else if (EltVT == MVT::f32) {
5324 APFloat FV(APFloat::IEEEsingle(), V);
5325 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5326 } else if (EltVT == MVT::f64) {
5327 APFloat FV(APFloat::IEEEdouble(), V);
5328 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5329 } else {
5330 Ops.push_back(DAG.getConstant(V, dl, EltVT));
5334 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5335 return DAG.getBitcast(VT, ConstsNode);
5338 /// Returns a vector of specified type with all zero elements.
5339 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
5340 SelectionDAG &DAG, const SDLoc &dl) {
5341 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
5342 VT.getVectorElementType() == MVT::i1) &&
5343 "Unexpected vector type");
5345 // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
5346 // type. This ensures they get CSE'd. But if the integer type is not
5347 // available, use a floating-point +0.0 instead.
5348 SDValue Vec;
5349 if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
5350 Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
5351 } else if (VT.getVectorElementType() == MVT::i1) {
5352 assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
5353 "Unexpected vector type");
5354 Vec = DAG.getConstant(0, dl, VT);
5355 } else {
5356 unsigned Num32BitElts = VT.getSizeInBits() / 32;
5357 Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
5359 return DAG.getBitcast(VT, Vec);
5362 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
5363 const SDLoc &dl, unsigned vectorWidth) {
5364 EVT VT = Vec.getValueType();
5365 EVT ElVT = VT.getVectorElementType();
5366 unsigned Factor = VT.getSizeInBits()/vectorWidth;
5367 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
5368 VT.getVectorNumElements()/Factor);
5370 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
5371 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
5372 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5374 // This is the index of the first element of the vectorWidth-bit chunk
5375 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5376 IdxVal &= ~(ElemsPerChunk - 1);
5378 // If the input is a buildvector just emit a smaller one.
5379 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
5380 return DAG.getBuildVector(ResultVT, dl,
5381 Vec->ops().slice(IdxVal, ElemsPerChunk));
5383 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5384 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
5387 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
5388 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
5389 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
5390 /// instructions or a simple subregister reference. Idx is an index in the
5391 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
5392 /// lowering EXTRACT_VECTOR_ELT operations easier.
5393 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
5394 SelectionDAG &DAG, const SDLoc &dl) {
5395 assert((Vec.getValueType().is256BitVector() ||
5396 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
5397 return extractSubVector(Vec, IdxVal, DAG, dl, 128);
5400 /// Generate a DAG to grab 256-bits from a 512-bit vector.
5401 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
5402 SelectionDAG &DAG, const SDLoc &dl) {
5403 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
5404 return extractSubVector(Vec, IdxVal, DAG, dl, 256);
5407 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5408 SelectionDAG &DAG, const SDLoc &dl,
5409 unsigned vectorWidth) {
5410 assert((vectorWidth == 128 || vectorWidth == 256) &&
5411 "Unsupported vector width");
5412 // Inserting UNDEF is Result
5413 if (Vec.isUndef())
5414 return Result;
5415 EVT VT = Vec.getValueType();
5416 EVT ElVT = VT.getVectorElementType();
5417 EVT ResultVT = Result.getValueType();
5419 // Insert the relevant vectorWidth bits.
5420 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
5421 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5423 // This is the index of the first element of the vectorWidth-bit chunk
5424 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5425 IdxVal &= ~(ElemsPerChunk - 1);
5427 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5428 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
5431 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
5432 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
5433 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
5434 /// simple superregister reference. Idx is an index in the 128 bits
5435 /// we want. It need not be aligned to a 128-bit boundary. That makes
5436 /// lowering INSERT_VECTOR_ELT operations easier.
5437 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5438 SelectionDAG &DAG, const SDLoc &dl) {
5439 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
5440 return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
5443 /// Widen a vector to a larger size with the same scalar type, with the new
5444 /// elements either zero or undef.
5445 static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
5446 const X86Subtarget &Subtarget, SelectionDAG &DAG,
5447 const SDLoc &dl) {
5448 assert(Vec.getValueSizeInBits() < VT.getSizeInBits() &&
5449 Vec.getValueType().getScalarType() == VT.getScalarType() &&
5450 "Unsupported vector widening type");
5451 SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
5452 : DAG.getUNDEF(VT);
5453 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
5454 DAG.getIntPtrConstant(0, dl));
5457 /// Widen a vector to a larger size with the same scalar type, with the new
5458 /// elements either zero or undef.
5459 static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
5460 const X86Subtarget &Subtarget, SelectionDAG &DAG,
5461 const SDLoc &dl, unsigned WideSizeInBits) {
5462 assert(Vec.getValueSizeInBits() < WideSizeInBits &&
5463 (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
5464 "Unsupported vector widening type");
5465 unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
5466 MVT SVT = Vec.getSimpleValueType().getScalarType();
5467 MVT VT = MVT::getVectorVT(SVT, WideNumElts);
5468 return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
5471 // Helper function to collect subvector ops that are concated together,
5472 // either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
5473 // The subvectors in Ops are guaranteed to be the same type.
5474 static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops) {
5475 assert(Ops.empty() && "Expected an empty ops vector");
5477 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
5478 Ops.append(N->op_begin(), N->op_end());
5479 return true;
5482 if (N->getOpcode() == ISD::INSERT_SUBVECTOR &&
5483 isa<ConstantSDNode>(N->getOperand(2))) {
5484 SDValue Src = N->getOperand(0);
5485 SDValue Sub = N->getOperand(1);
5486 const APInt &Idx = N->getConstantOperandAPInt(2);
5487 EVT VT = Src.getValueType();
5488 EVT SubVT = Sub.getValueType();
5490 // TODO - Handle more general insert_subvector chains.
5491 if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2) &&
5492 Idx == (VT.getVectorNumElements() / 2) &&
5493 Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
5494 isNullConstant(Src.getOperand(2))) {
5495 Ops.push_back(Src.getOperand(1));
5496 Ops.push_back(Sub);
5497 return true;
5501 return false;
5504 // Helper for splitting operands of an operation to legal target size and
5505 // apply a function on each part.
5506 // Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
5507 // 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
5508 // deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
5509 // The argument Builder is a function that will be applied on each split part:
5510 // SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
5511 template <typename F>
5512 SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
5513 const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
5514 F Builder, bool CheckBWI = true) {
5515 assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
5516 unsigned NumSubs = 1;
5517 if ((CheckBWI && Subtarget.useBWIRegs()) ||
5518 (!CheckBWI && Subtarget.useAVX512Regs())) {
5519 if (VT.getSizeInBits() > 512) {
5520 NumSubs = VT.getSizeInBits() / 512;
5521 assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
5523 } else if (Subtarget.hasAVX2()) {
5524 if (VT.getSizeInBits() > 256) {
5525 NumSubs = VT.getSizeInBits() / 256;
5526 assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
5528 } else {
5529 if (VT.getSizeInBits() > 128) {
5530 NumSubs = VT.getSizeInBits() / 128;
5531 assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
5535 if (NumSubs == 1)
5536 return Builder(DAG, DL, Ops);
5538 SmallVector<SDValue, 4> Subs;
5539 for (unsigned i = 0; i != NumSubs; ++i) {
5540 SmallVector<SDValue, 2> SubOps;
5541 for (SDValue Op : Ops) {
5542 EVT OpVT = Op.getValueType();
5543 unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
5544 unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
5545 SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
5547 Subs.push_back(Builder(DAG, DL, SubOps));
5549 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
5552 /// Insert i1-subvector to i1-vector.
5553 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
5554 const X86Subtarget &Subtarget) {
5556 SDLoc dl(Op);
5557 SDValue Vec = Op.getOperand(0);
5558 SDValue SubVec = Op.getOperand(1);
5559 SDValue Idx = Op.getOperand(2);
5561 if (!isa<ConstantSDNode>(Idx))
5562 return SDValue();
5564 // Inserting undef is a nop. We can just return the original vector.
5565 if (SubVec.isUndef())
5566 return Vec;
5568 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
5569 if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
5570 return Op;
5572 MVT OpVT = Op.getSimpleValueType();
5573 unsigned NumElems = OpVT.getVectorNumElements();
5575 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
5577 // Extend to natively supported kshift.
5578 MVT WideOpVT = OpVT;
5579 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
5580 WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
5582 // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
5583 // if necessary.
5584 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
5585 // May need to promote to a legal type.
5586 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5587 getZeroVector(WideOpVT, Subtarget, DAG, dl),
5588 SubVec, Idx);
5589 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5592 MVT SubVecVT = SubVec.getSimpleValueType();
5593 unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
5595 assert(IdxVal + SubVecNumElems <= NumElems &&
5596 IdxVal % SubVecVT.getSizeInBits() == 0 &&
5597 "Unexpected index value in INSERT_SUBVECTOR");
5599 SDValue Undef = DAG.getUNDEF(WideOpVT);
5601 if (IdxVal == 0) {
5602 // Zero lower bits of the Vec
5603 SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
5604 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
5605 ZeroIdx);
5606 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5607 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5608 // Merge them together, SubVec should be zero extended.
5609 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5610 getZeroVector(WideOpVT, Subtarget, DAG, dl),
5611 SubVec, ZeroIdx);
5612 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5613 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5616 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5617 Undef, SubVec, ZeroIdx);
5619 if (Vec.isUndef()) {
5620 assert(IdxVal != 0 && "Unexpected index");
5621 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5622 DAG.getConstant(IdxVal, dl, MVT::i8));
5623 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5626 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
5627 assert(IdxVal != 0 && "Unexpected index");
5628 NumElems = WideOpVT.getVectorNumElements();
5629 unsigned ShiftLeft = NumElems - SubVecNumElems;
5630 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5631 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5632 DAG.getConstant(ShiftLeft, dl, MVT::i8));
5633 if (ShiftRight != 0)
5634 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5635 DAG.getConstant(ShiftRight, dl, MVT::i8));
5636 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5639 // Simple case when we put subvector in the upper part
5640 if (IdxVal + SubVecNumElems == NumElems) {
5641 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5642 DAG.getConstant(IdxVal, dl, MVT::i8));
5643 if (SubVecNumElems * 2 == NumElems) {
5644 // Special case, use legal zero extending insert_subvector. This allows
5645 // isel to opimitize when bits are known zero.
5646 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
5647 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5648 getZeroVector(WideOpVT, Subtarget, DAG, dl),
5649 Vec, ZeroIdx);
5650 } else {
5651 // Otherwise use explicit shifts to zero the bits.
5652 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5653 Undef, Vec, ZeroIdx);
5654 NumElems = WideOpVT.getVectorNumElements();
5655 SDValue ShiftBits = DAG.getConstant(NumElems - IdxVal, dl, MVT::i8);
5656 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5657 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5659 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5660 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5663 // Inserting into the middle is more complicated.
5665 NumElems = WideOpVT.getVectorNumElements();
5667 // Widen the vector if needed.
5668 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
5669 // Move the current value of the bit to be replace to the lsbs.
5670 Op = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
5671 DAG.getConstant(IdxVal, dl, MVT::i8));
5672 // Xor with the new bit.
5673 Op = DAG.getNode(ISD::XOR, dl, WideOpVT, Op, SubVec);
5674 // Shift to MSB, filling bottom bits with 0.
5675 unsigned ShiftLeft = NumElems - SubVecNumElems;
5676 Op = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Op,
5677 DAG.getConstant(ShiftLeft, dl, MVT::i8));
5678 // Shift to the final position, filling upper bits with 0.
5679 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5680 Op = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Op,
5681 DAG.getConstant(ShiftRight, dl, MVT::i8));
5682 // Xor with original vector leaving the new value.
5683 Op = DAG.getNode(ISD::XOR, dl, WideOpVT, Vec, Op);
5684 // Reduce to original width if needed.
5685 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5688 static SDValue concatSubVectors(SDValue V1, SDValue V2, EVT VT,
5689 unsigned NumElems, SelectionDAG &DAG,
5690 const SDLoc &dl, unsigned VectorWidth) {
5691 SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, VectorWidth);
5692 return insertSubVector(V, V2, NumElems / 2, DAG, dl, VectorWidth);
5695 /// Returns a vector of specified type with all bits set.
5696 /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
5697 /// Then bitcast to their original type, ensuring they get CSE'd.
5698 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5699 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
5700 "Expected a 128/256/512-bit vector type");
5702 APInt Ones = APInt::getAllOnesValue(32);
5703 unsigned NumElts = VT.getSizeInBits() / 32;
5704 SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
5705 return DAG.getBitcast(VT, Vec);
5708 // Convert *_EXTEND to *_EXTEND_VECTOR_INREG opcode.
5709 static unsigned getOpcode_EXTEND_VECTOR_INREG(unsigned Opcode) {
5710 switch (Opcode) {
5711 case ISD::ANY_EXTEND:
5712 case ISD::ANY_EXTEND_VECTOR_INREG:
5713 return ISD::ANY_EXTEND_VECTOR_INREG;
5714 case ISD::ZERO_EXTEND:
5715 case ISD::ZERO_EXTEND_VECTOR_INREG:
5716 return ISD::ZERO_EXTEND_VECTOR_INREG;
5717 case ISD::SIGN_EXTEND:
5718 case ISD::SIGN_EXTEND_VECTOR_INREG:
5719 return ISD::SIGN_EXTEND_VECTOR_INREG;
5721 llvm_unreachable("Unknown opcode");
5724 static SDValue getExtendInVec(unsigned Opcode, const SDLoc &DL, EVT VT,
5725 SDValue In, SelectionDAG &DAG) {
5726 EVT InVT = In.getValueType();
5727 assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
5728 assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
5729 ISD::ZERO_EXTEND == Opcode) &&
5730 "Unknown extension opcode");
5732 // For 256-bit vectors, we only need the lower (128-bit) input half.
5733 // For 512-bit vectors, we only need the lower input half or quarter.
5734 if (InVT.getSizeInBits() > 128) {
5735 assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
5736 "Expected VTs to be the same size!");
5737 unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
5738 In = extractSubVector(In, 0, DAG, DL,
5739 std::max(128U, VT.getSizeInBits() / Scale));
5740 InVT = In.getValueType();
5743 if (VT.getVectorNumElements() != InVT.getVectorNumElements())
5744 Opcode = getOpcode_EXTEND_VECTOR_INREG(Opcode);
5746 return DAG.getNode(Opcode, DL, VT, In);
5749 /// Returns a vector_shuffle node for an unpackl operation.
5750 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
5751 SDValue V1, SDValue V2) {
5752 SmallVector<int, 8> Mask;
5753 createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
5754 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
5757 /// Returns a vector_shuffle node for an unpackh operation.
5758 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
5759 SDValue V1, SDValue V2) {
5760 SmallVector<int, 8> Mask;
5761 createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
5762 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
5765 /// Return a vector_shuffle of the specified vector of zero or undef vector.
5766 /// This produces a shuffle where the low element of V2 is swizzled into the
5767 /// zero/undef vector, landing at element Idx.
5768 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5769 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
5770 bool IsZero,
5771 const X86Subtarget &Subtarget,
5772 SelectionDAG &DAG) {
5773 MVT VT = V2.getSimpleValueType();
5774 SDValue V1 = IsZero
5775 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5776 int NumElems = VT.getVectorNumElements();
5777 SmallVector<int, 16> MaskVec(NumElems);
5778 for (int i = 0; i != NumElems; ++i)
5779 // If this is the insertion idx, put the low elt of V2 here.
5780 MaskVec[i] = (i == Idx) ? NumElems : i;
5781 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
5784 static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
5785 if (!Load)
5786 return nullptr;
5788 SDValue Ptr = Load->getBasePtr();
5789 if (Ptr->getOpcode() == X86ISD::Wrapper ||
5790 Ptr->getOpcode() == X86ISD::WrapperRIP)
5791 Ptr = Ptr->getOperand(0);
5793 auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
5794 if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
5795 return nullptr;
5797 return CNode->getConstVal();
5800 static const Constant *getTargetConstantFromNode(SDValue Op) {
5801 Op = peekThroughBitcasts(Op);
5802 return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
5805 const Constant *
5806 X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
5807 assert(LD && "Unexpected null LoadSDNode");
5808 return getTargetConstantFromNode(LD);
5811 // Extract raw constant bits from constant pools.
5812 static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
5813 APInt &UndefElts,
5814 SmallVectorImpl<APInt> &EltBits,
5815 bool AllowWholeUndefs = true,
5816 bool AllowPartialUndefs = true) {
5817 assert(EltBits.empty() && "Expected an empty EltBits vector");
5819 Op = peekThroughBitcasts(Op);
5821 EVT VT = Op.getValueType();
5822 unsigned SizeInBits = VT.getSizeInBits();
5823 assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
5824 unsigned NumElts = SizeInBits / EltSizeInBits;
5826 // Bitcast a source array of element bits to the target size.
5827 auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
5828 unsigned NumSrcElts = UndefSrcElts.getBitWidth();
5829 unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
5830 assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
5831 "Constant bit sizes don't match");
5833 // Don't split if we don't allow undef bits.
5834 bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
5835 if (UndefSrcElts.getBoolValue() && !AllowUndefs)
5836 return false;
5838 // If we're already the right size, don't bother bitcasting.
5839 if (NumSrcElts == NumElts) {
5840 UndefElts = UndefSrcElts;
5841 EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
5842 return true;
5845 // Extract all the undef/constant element data and pack into single bitsets.
5846 APInt UndefBits(SizeInBits, 0);
5847 APInt MaskBits(SizeInBits, 0);
5849 for (unsigned i = 0; i != NumSrcElts; ++i) {
5850 unsigned BitOffset = i * SrcEltSizeInBits;
5851 if (UndefSrcElts[i])
5852 UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
5853 MaskBits.insertBits(SrcEltBits[i], BitOffset);
5856 // Split the undef/constant single bitset data into the target elements.
5857 UndefElts = APInt(NumElts, 0);
5858 EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
5860 for (unsigned i = 0; i != NumElts; ++i) {
5861 unsigned BitOffset = i * EltSizeInBits;
5862 APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
5864 // Only treat an element as UNDEF if all bits are UNDEF.
5865 if (UndefEltBits.isAllOnesValue()) {
5866 if (!AllowWholeUndefs)
5867 return false;
5868 UndefElts.setBit(i);
5869 continue;
5872 // If only some bits are UNDEF then treat them as zero (or bail if not
5873 // supported).
5874 if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
5875 return false;
5877 EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
5879 return true;
5882 // Collect constant bits and insert into mask/undef bit masks.
5883 auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
5884 unsigned UndefBitIndex) {
5885 if (!Cst)
5886 return false;
5887 if (isa<UndefValue>(Cst)) {
5888 Undefs.setBit(UndefBitIndex);
5889 return true;
5891 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
5892 Mask = CInt->getValue();
5893 return true;
5895 if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
5896 Mask = CFP->getValueAPF().bitcastToAPInt();
5897 return true;
5899 return false;
5902 // Handle UNDEFs.
5903 if (Op.isUndef()) {
5904 APInt UndefSrcElts = APInt::getAllOnesValue(NumElts);
5905 SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
5906 return CastBitData(UndefSrcElts, SrcEltBits);
5909 // Extract scalar constant bits.
5910 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
5911 APInt UndefSrcElts = APInt::getNullValue(1);
5912 SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
5913 return CastBitData(UndefSrcElts, SrcEltBits);
5915 if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
5916 APInt UndefSrcElts = APInt::getNullValue(1);
5917 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
5918 SmallVector<APInt, 64> SrcEltBits(1, RawBits);
5919 return CastBitData(UndefSrcElts, SrcEltBits);
5922 // Extract constant bits from build vector.
5923 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
5924 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
5925 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
5927 APInt UndefSrcElts(NumSrcElts, 0);
5928 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
5929 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
5930 const SDValue &Src = Op.getOperand(i);
5931 if (Src.isUndef()) {
5932 UndefSrcElts.setBit(i);
5933 continue;
5935 auto *Cst = cast<ConstantSDNode>(Src);
5936 SrcEltBits[i] = Cst->getAPIntValue().zextOrTrunc(SrcEltSizeInBits);
5938 return CastBitData(UndefSrcElts, SrcEltBits);
5940 if (ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) {
5941 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
5942 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
5944 APInt UndefSrcElts(NumSrcElts, 0);
5945 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
5946 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
5947 const SDValue &Src = Op.getOperand(i);
5948 if (Src.isUndef()) {
5949 UndefSrcElts.setBit(i);
5950 continue;
5952 auto *Cst = cast<ConstantFPSDNode>(Src);
5953 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
5954 SrcEltBits[i] = RawBits.zextOrTrunc(SrcEltSizeInBits);
5956 return CastBitData(UndefSrcElts, SrcEltBits);
5959 // Extract constant bits from constant pool vector.
5960 if (auto *Cst = getTargetConstantFromNode(Op)) {
5961 Type *CstTy = Cst->getType();
5962 unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
5963 if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
5964 return false;
5966 unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
5967 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
5969 APInt UndefSrcElts(NumSrcElts, 0);
5970 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
5971 for (unsigned i = 0; i != NumSrcElts; ++i)
5972 if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
5973 UndefSrcElts, i))
5974 return false;
5976 return CastBitData(UndefSrcElts, SrcEltBits);
5979 // Extract constant bits from a broadcasted constant pool scalar.
5980 if (Op.getOpcode() == X86ISD::VBROADCAST &&
5981 EltSizeInBits <= VT.getScalarSizeInBits()) {
5982 if (auto *Broadcast = getTargetConstantFromNode(Op.getOperand(0))) {
5983 unsigned SrcEltSizeInBits = Broadcast->getType()->getScalarSizeInBits();
5984 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
5986 APInt UndefSrcElts(NumSrcElts, 0);
5987 SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
5988 if (CollectConstantBits(Broadcast, SrcEltBits[0], UndefSrcElts, 0)) {
5989 if (UndefSrcElts[0])
5990 UndefSrcElts.setBits(0, NumSrcElts);
5991 SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
5992 return CastBitData(UndefSrcElts, SrcEltBits);
5997 // Extract constant bits from a subvector broadcast.
5998 if (Op.getOpcode() == X86ISD::SUBV_BROADCAST) {
5999 SmallVector<APInt, 16> SubEltBits;
6000 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6001 UndefElts, SubEltBits, AllowWholeUndefs,
6002 AllowPartialUndefs)) {
6003 UndefElts = APInt::getSplat(NumElts, UndefElts);
6004 while (EltBits.size() < NumElts)
6005 EltBits.append(SubEltBits.begin(), SubEltBits.end());
6006 return true;
6010 // Extract a rematerialized scalar constant insertion.
6011 if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
6012 Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
6013 isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
6014 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6015 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6017 APInt UndefSrcElts(NumSrcElts, 0);
6018 SmallVector<APInt, 64> SrcEltBits;
6019 auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
6020 SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
6021 SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
6022 return CastBitData(UndefSrcElts, SrcEltBits);
6025 // Insert constant bits from a base and sub vector sources.
6026 if (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
6027 isa<ConstantSDNode>(Op.getOperand(2))) {
6028 // TODO - support insert_subvector through bitcasts.
6029 if (EltSizeInBits != VT.getScalarSizeInBits())
6030 return false;
6032 APInt UndefSubElts;
6033 SmallVector<APInt, 32> EltSubBits;
6034 if (getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6035 UndefSubElts, EltSubBits,
6036 AllowWholeUndefs, AllowPartialUndefs) &&
6037 getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6038 UndefElts, EltBits, AllowWholeUndefs,
6039 AllowPartialUndefs)) {
6040 unsigned BaseIdx = Op.getConstantOperandVal(2);
6041 UndefElts.insertBits(UndefSubElts, BaseIdx);
6042 for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
6043 EltBits[BaseIdx + i] = EltSubBits[i];
6044 return true;
6048 // Extract constant bits from a subvector's source.
6049 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6050 isa<ConstantSDNode>(Op.getOperand(1))) {
6051 // TODO - support extract_subvector through bitcasts.
6052 if (EltSizeInBits != VT.getScalarSizeInBits())
6053 return false;
6055 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6056 UndefElts, EltBits, AllowWholeUndefs,
6057 AllowPartialUndefs)) {
6058 EVT SrcVT = Op.getOperand(0).getValueType();
6059 unsigned NumSrcElts = SrcVT.getVectorNumElements();
6060 unsigned NumSubElts = VT.getVectorNumElements();
6061 unsigned BaseIdx = Op.getConstantOperandVal(1);
6062 UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
6063 if ((BaseIdx + NumSubElts) != NumSrcElts)
6064 EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
6065 if (BaseIdx != 0)
6066 EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
6067 return true;
6071 // Extract constant bits from shuffle node sources.
6072 if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
6073 // TODO - support shuffle through bitcasts.
6074 if (EltSizeInBits != VT.getScalarSizeInBits())
6075 return false;
6077 ArrayRef<int> Mask = SVN->getMask();
6078 if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
6079 llvm::any_of(Mask, [](int M) { return M < 0; }))
6080 return false;
6082 APInt UndefElts0, UndefElts1;
6083 SmallVector<APInt, 32> EltBits0, EltBits1;
6084 if (isAnyInRange(Mask, 0, NumElts) &&
6085 !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6086 UndefElts0, EltBits0, AllowWholeUndefs,
6087 AllowPartialUndefs))
6088 return false;
6089 if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
6090 !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6091 UndefElts1, EltBits1, AllowWholeUndefs,
6092 AllowPartialUndefs))
6093 return false;
6095 UndefElts = APInt::getNullValue(NumElts);
6096 for (int i = 0; i != (int)NumElts; ++i) {
6097 int M = Mask[i];
6098 if (M < 0) {
6099 UndefElts.setBit(i);
6100 EltBits.push_back(APInt::getNullValue(EltSizeInBits));
6101 } else if (M < (int)NumElts) {
6102 if (UndefElts0[M])
6103 UndefElts.setBit(i);
6104 EltBits.push_back(EltBits0[M]);
6105 } else {
6106 if (UndefElts1[M - NumElts])
6107 UndefElts.setBit(i);
6108 EltBits.push_back(EltBits1[M - NumElts]);
6111 return true;
6114 return false;
6117 static bool isConstantSplat(SDValue Op, APInt &SplatVal) {
6118 APInt UndefElts;
6119 SmallVector<APInt, 16> EltBits;
6120 if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
6121 UndefElts, EltBits, true, false)) {
6122 int SplatIndex = -1;
6123 for (int i = 0, e = EltBits.size(); i != e; ++i) {
6124 if (UndefElts[i])
6125 continue;
6126 if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
6127 SplatIndex = -1;
6128 break;
6130 SplatIndex = i;
6132 if (0 <= SplatIndex) {
6133 SplatVal = EltBits[SplatIndex];
6134 return true;
6138 return false;
6141 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
6142 unsigned MaskEltSizeInBits,
6143 SmallVectorImpl<uint64_t> &RawMask,
6144 APInt &UndefElts) {
6145 // Extract the raw target constant bits.
6146 SmallVector<APInt, 64> EltBits;
6147 if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
6148 EltBits, /* AllowWholeUndefs */ true,
6149 /* AllowPartialUndefs */ false))
6150 return false;
6152 // Insert the extracted elements into the mask.
6153 for (APInt Elt : EltBits)
6154 RawMask.push_back(Elt.getZExtValue());
6156 return true;
6159 /// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
6160 /// Note: This ignores saturation, so inputs must be checked first.
6161 static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
6162 bool Unary) {
6163 assert(Mask.empty() && "Expected an empty shuffle mask vector");
6164 unsigned NumElts = VT.getVectorNumElements();
6165 unsigned NumLanes = VT.getSizeInBits() / 128;
6166 unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
6167 unsigned Offset = Unary ? 0 : NumElts;
6169 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
6170 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
6171 Mask.push_back(Elt + (Lane * NumEltsPerLane));
6172 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
6173 Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
6177 // Split the demanded elts of a PACKSS/PACKUS node between its operands.
6178 static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
6179 APInt &DemandedLHS, APInt &DemandedRHS) {
6180 int NumLanes = VT.getSizeInBits() / 128;
6181 int NumElts = DemandedElts.getBitWidth();
6182 int NumInnerElts = NumElts / 2;
6183 int NumEltsPerLane = NumElts / NumLanes;
6184 int NumInnerEltsPerLane = NumInnerElts / NumLanes;
6186 DemandedLHS = APInt::getNullValue(NumInnerElts);
6187 DemandedRHS = APInt::getNullValue(NumInnerElts);
6189 // Map DemandedElts to the packed operands.
6190 for (int Lane = 0; Lane != NumLanes; ++Lane) {
6191 for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
6192 int OuterIdx = (Lane * NumEltsPerLane) + Elt;
6193 int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
6194 if (DemandedElts[OuterIdx])
6195 DemandedLHS.setBit(InnerIdx);
6196 if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
6197 DemandedRHS.setBit(InnerIdx);
6202 // Split the demanded elts of a HADD/HSUB node between its operands.
6203 static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
6204 APInt &DemandedLHS, APInt &DemandedRHS) {
6205 int NumLanes = VT.getSizeInBits() / 128;
6206 int NumElts = DemandedElts.getBitWidth();
6207 int NumEltsPerLane = NumElts / NumLanes;
6208 int HalfEltsPerLane = NumEltsPerLane / 2;
6210 DemandedLHS = APInt::getNullValue(NumElts);
6211 DemandedRHS = APInt::getNullValue(NumElts);
6213 // Map DemandedElts to the horizontal operands.
6214 for (int Idx = 0; Idx != NumElts; ++Idx) {
6215 if (!DemandedElts[Idx])
6216 continue;
6217 int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
6218 int LocalIdx = Idx % NumEltsPerLane;
6219 if (LocalIdx < HalfEltsPerLane) {
6220 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6221 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6222 } else {
6223 LocalIdx -= HalfEltsPerLane;
6224 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6225 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6230 /// Calculates the shuffle mask corresponding to the target-specific opcode.
6231 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
6232 /// operands in \p Ops, and returns true.
6233 /// Sets \p IsUnary to true if only one source is used. Note that this will set
6234 /// IsUnary for shuffles which use a single input multiple times, and in those
6235 /// cases it will adjust the mask to only have indices within that single input.
6236 /// It is an error to call this with non-empty Mask/Ops vectors.
6237 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
6238 SmallVectorImpl<SDValue> &Ops,
6239 SmallVectorImpl<int> &Mask, bool &IsUnary) {
6240 unsigned NumElems = VT.getVectorNumElements();
6241 unsigned MaskEltSize = VT.getScalarSizeInBits();
6242 SmallVector<uint64_t, 32> RawMask;
6243 APInt RawUndefs;
6244 SDValue ImmN;
6246 assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
6247 assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
6249 IsUnary = false;
6250 bool IsFakeUnary = false;
6251 switch (N->getOpcode()) {
6252 case X86ISD::BLENDI:
6253 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6254 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6255 ImmN = N->getOperand(N->getNumOperands() - 1);
6256 DecodeBLENDMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6257 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6258 break;
6259 case X86ISD::SHUFP:
6260 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6261 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6262 ImmN = N->getOperand(N->getNumOperands() - 1);
6263 DecodeSHUFPMask(NumElems, MaskEltSize,
6264 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6265 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6266 break;
6267 case X86ISD::INSERTPS:
6268 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6269 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6270 ImmN = N->getOperand(N->getNumOperands() - 1);
6271 DecodeINSERTPSMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6272 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6273 break;
6274 case X86ISD::EXTRQI:
6275 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6276 if (isa<ConstantSDNode>(N->getOperand(1)) &&
6277 isa<ConstantSDNode>(N->getOperand(2))) {
6278 int BitLen = N->getConstantOperandVal(1);
6279 int BitIdx = N->getConstantOperandVal(2);
6280 DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6281 IsUnary = true;
6283 break;
6284 case X86ISD::INSERTQI:
6285 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6286 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6287 if (isa<ConstantSDNode>(N->getOperand(2)) &&
6288 isa<ConstantSDNode>(N->getOperand(3))) {
6289 int BitLen = N->getConstantOperandVal(2);
6290 int BitIdx = N->getConstantOperandVal(3);
6291 DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6292 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6294 break;
6295 case X86ISD::UNPCKH:
6296 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6297 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6298 DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
6299 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6300 break;
6301 case X86ISD::UNPCKL:
6302 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6303 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6304 DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
6305 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6306 break;
6307 case X86ISD::MOVHLPS:
6308 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6309 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6310 DecodeMOVHLPSMask(NumElems, Mask);
6311 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6312 break;
6313 case X86ISD::MOVLHPS:
6314 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6315 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6316 DecodeMOVLHPSMask(NumElems, Mask);
6317 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6318 break;
6319 case X86ISD::PALIGNR:
6320 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6321 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6322 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6323 ImmN = N->getOperand(N->getNumOperands() - 1);
6324 DecodePALIGNRMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6325 Mask);
6326 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6327 Ops.push_back(N->getOperand(1));
6328 Ops.push_back(N->getOperand(0));
6329 break;
6330 case X86ISD::VSHLDQ:
6331 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6332 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6333 ImmN = N->getOperand(N->getNumOperands() - 1);
6334 DecodePSLLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6335 Mask);
6336 IsUnary = true;
6337 break;
6338 case X86ISD::VSRLDQ:
6339 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6340 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6341 ImmN = N->getOperand(N->getNumOperands() - 1);
6342 DecodePSRLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6343 Mask);
6344 IsUnary = true;
6345 break;
6346 case X86ISD::PSHUFD:
6347 case X86ISD::VPERMILPI:
6348 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6349 ImmN = N->getOperand(N->getNumOperands() - 1);
6350 DecodePSHUFMask(NumElems, MaskEltSize,
6351 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6352 IsUnary = true;
6353 break;
6354 case X86ISD::PSHUFHW:
6355 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6356 ImmN = N->getOperand(N->getNumOperands() - 1);
6357 DecodePSHUFHWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6358 Mask);
6359 IsUnary = true;
6360 break;
6361 case X86ISD::PSHUFLW:
6362 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6363 ImmN = N->getOperand(N->getNumOperands() - 1);
6364 DecodePSHUFLWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6365 Mask);
6366 IsUnary = true;
6367 break;
6368 case X86ISD::VZEXT_MOVL:
6369 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6370 DecodeZeroMoveLowMask(NumElems, Mask);
6371 IsUnary = true;
6372 break;
6373 case X86ISD::VBROADCAST: {
6374 SDValue N0 = N->getOperand(0);
6375 // See if we're broadcasting from index 0 of an EXTRACT_SUBVECTOR. If so,
6376 // add the pre-extracted value to the Ops vector.
6377 if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6378 N0.getOperand(0).getValueType() == VT &&
6379 N0.getConstantOperandVal(1) == 0)
6380 Ops.push_back(N0.getOperand(0));
6382 // We only decode broadcasts of same-sized vectors, unless the broadcast
6383 // came from an extract from the original width. If we found one, we
6384 // pushed it the Ops vector above.
6385 if (N0.getValueType() == VT || !Ops.empty()) {
6386 DecodeVectorBroadcast(NumElems, Mask);
6387 IsUnary = true;
6388 break;
6390 return false;
6392 case X86ISD::VPERMILPV: {
6393 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6394 IsUnary = true;
6395 SDValue MaskNode = N->getOperand(1);
6396 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6397 RawUndefs)) {
6398 DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
6399 break;
6401 return false;
6403 case X86ISD::PSHUFB: {
6404 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6405 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6406 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6407 IsUnary = true;
6408 SDValue MaskNode = N->getOperand(1);
6409 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6410 DecodePSHUFBMask(RawMask, RawUndefs, Mask);
6411 break;
6413 return false;
6415 case X86ISD::VPERMI:
6416 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6417 ImmN = N->getOperand(N->getNumOperands() - 1);
6418 DecodeVPERMMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6419 IsUnary = true;
6420 break;
6421 case X86ISD::MOVSS:
6422 case X86ISD::MOVSD:
6423 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6424 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6425 DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
6426 break;
6427 case X86ISD::VPERM2X128:
6428 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6429 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6430 ImmN = N->getOperand(N->getNumOperands() - 1);
6431 DecodeVPERM2X128Mask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6432 Mask);
6433 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6434 break;
6435 case X86ISD::SHUF128:
6436 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6437 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6438 ImmN = N->getOperand(N->getNumOperands() - 1);
6439 decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize,
6440 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6441 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6442 break;
6443 case X86ISD::MOVSLDUP:
6444 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6445 DecodeMOVSLDUPMask(NumElems, Mask);
6446 IsUnary = true;
6447 break;
6448 case X86ISD::MOVSHDUP:
6449 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6450 DecodeMOVSHDUPMask(NumElems, Mask);
6451 IsUnary = true;
6452 break;
6453 case X86ISD::MOVDDUP:
6454 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6455 DecodeMOVDDUPMask(NumElems, Mask);
6456 IsUnary = true;
6457 break;
6458 case X86ISD::VPERMIL2: {
6459 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6460 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6461 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6462 SDValue MaskNode = N->getOperand(2);
6463 SDValue CtrlNode = N->getOperand(3);
6464 if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
6465 unsigned CtrlImm = CtrlOp->getZExtValue();
6466 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6467 RawUndefs)) {
6468 DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
6469 Mask);
6470 break;
6473 return false;
6475 case X86ISD::VPPERM: {
6476 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6477 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6478 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6479 SDValue MaskNode = N->getOperand(2);
6480 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6481 DecodeVPPERMMask(RawMask, RawUndefs, Mask);
6482 break;
6484 return false;
6486 case X86ISD::VPERMV: {
6487 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6488 IsUnary = true;
6489 // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
6490 Ops.push_back(N->getOperand(1));
6491 SDValue MaskNode = N->getOperand(0);
6492 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6493 RawUndefs)) {
6494 DecodeVPERMVMask(RawMask, RawUndefs, Mask);
6495 break;
6497 return false;
6499 case X86ISD::VPERMV3: {
6500 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6501 assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
6502 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
6503 // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
6504 Ops.push_back(N->getOperand(0));
6505 Ops.push_back(N->getOperand(2));
6506 SDValue MaskNode = N->getOperand(1);
6507 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6508 RawUndefs)) {
6509 DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
6510 break;
6512 return false;
6514 default: llvm_unreachable("unknown target shuffle node");
6517 // Empty mask indicates the decode failed.
6518 if (Mask.empty())
6519 return false;
6521 // Check if we're getting a shuffle mask with zero'd elements.
6522 if (!AllowSentinelZero)
6523 if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
6524 return false;
6526 // If we have a fake unary shuffle, the shuffle mask is spread across two
6527 // inputs that are actually the same node. Re-map the mask to always point
6528 // into the first input.
6529 if (IsFakeUnary)
6530 for (int &M : Mask)
6531 if (M >= (int)Mask.size())
6532 M -= Mask.size();
6534 // If we didn't already add operands in the opcode-specific code, default to
6535 // adding 1 or 2 operands starting at 0.
6536 if (Ops.empty()) {
6537 Ops.push_back(N->getOperand(0));
6538 if (!IsUnary || IsFakeUnary)
6539 Ops.push_back(N->getOperand(1));
6542 return true;
6545 /// Check a target shuffle mask's inputs to see if we can set any values to
6546 /// SM_SentinelZero - this is for elements that are known to be zero
6547 /// (not just zeroable) from their inputs.
6548 /// Returns true if the target shuffle mask was decoded.
6549 static bool setTargetShuffleZeroElements(SDValue N,
6550 SmallVectorImpl<int> &Mask,
6551 SmallVectorImpl<SDValue> &Ops) {
6552 bool IsUnary;
6553 if (!isTargetShuffle(N.getOpcode()))
6554 return false;
6556 MVT VT = N.getSimpleValueType();
6557 if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
6558 return false;
6560 SDValue V1 = Ops[0];
6561 SDValue V2 = IsUnary ? V1 : Ops[1];
6563 V1 = peekThroughBitcasts(V1);
6564 V2 = peekThroughBitcasts(V2);
6566 assert((VT.getSizeInBits() % Mask.size()) == 0 &&
6567 "Illegal split of shuffle value type");
6568 unsigned EltSizeInBits = VT.getSizeInBits() / Mask.size();
6570 // Extract known constant input data.
6571 APInt UndefSrcElts[2];
6572 SmallVector<APInt, 32> SrcEltBits[2];
6573 bool IsSrcConstant[2] = {
6574 getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
6575 SrcEltBits[0], true, false),
6576 getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
6577 SrcEltBits[1], true, false)};
6579 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
6580 int M = Mask[i];
6582 // Already decoded as SM_SentinelZero / SM_SentinelUndef.
6583 if (M < 0)
6584 continue;
6586 // Determine shuffle input and normalize the mask.
6587 unsigned SrcIdx = M / Size;
6588 SDValue V = M < Size ? V1 : V2;
6589 M %= Size;
6591 // We are referencing an UNDEF input.
6592 if (V.isUndef()) {
6593 Mask[i] = SM_SentinelUndef;
6594 continue;
6597 // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
6598 // TODO: We currently only set UNDEF for integer types - floats use the same
6599 // registers as vectors and many of the scalar folded loads rely on the
6600 // SCALAR_TO_VECTOR pattern.
6601 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
6602 (Size % V.getValueType().getVectorNumElements()) == 0) {
6603 int Scale = Size / V.getValueType().getVectorNumElements();
6604 int Idx = M / Scale;
6605 if (Idx != 0 && !VT.isFloatingPoint())
6606 Mask[i] = SM_SentinelUndef;
6607 else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
6608 Mask[i] = SM_SentinelZero;
6609 continue;
6612 // Attempt to extract from the source's constant bits.
6613 if (IsSrcConstant[SrcIdx]) {
6614 if (UndefSrcElts[SrcIdx][M])
6615 Mask[i] = SM_SentinelUndef;
6616 else if (SrcEltBits[SrcIdx][M] == 0)
6617 Mask[i] = SM_SentinelZero;
6621 assert(VT.getVectorNumElements() == Mask.size() &&
6622 "Different mask size from vector size!");
6623 return true;
6626 // Forward declaration (for getFauxShuffleMask recursive check).
6627 static bool resolveTargetShuffleInputs(SDValue Op,
6628 SmallVectorImpl<SDValue> &Inputs,
6629 SmallVectorImpl<int> &Mask,
6630 SelectionDAG &DAG);
6632 // Attempt to decode ops that could be represented as a shuffle mask.
6633 // The decoded shuffle mask may contain a different number of elements to the
6634 // destination value type.
6635 static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
6636 SmallVectorImpl<int> &Mask,
6637 SmallVectorImpl<SDValue> &Ops,
6638 SelectionDAG &DAG) {
6639 Mask.clear();
6640 Ops.clear();
6642 MVT VT = N.getSimpleValueType();
6643 unsigned NumElts = VT.getVectorNumElements();
6644 unsigned NumSizeInBits = VT.getSizeInBits();
6645 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
6646 if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
6647 return false;
6648 assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
6650 unsigned Opcode = N.getOpcode();
6651 switch (Opcode) {
6652 case ISD::VECTOR_SHUFFLE: {
6653 // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
6654 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
6655 if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
6656 Mask.append(ShuffleMask.begin(), ShuffleMask.end());
6657 Ops.push_back(N.getOperand(0));
6658 Ops.push_back(N.getOperand(1));
6659 return true;
6661 return false;
6663 case ISD::AND:
6664 case X86ISD::ANDNP: {
6665 // Attempt to decode as a per-byte mask.
6666 APInt UndefElts;
6667 SmallVector<APInt, 32> EltBits;
6668 SDValue N0 = N.getOperand(0);
6669 SDValue N1 = N.getOperand(1);
6670 bool IsAndN = (X86ISD::ANDNP == Opcode);
6671 uint64_t ZeroMask = IsAndN ? 255 : 0;
6672 if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
6673 return false;
6674 for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
6675 if (UndefElts[i]) {
6676 Mask.push_back(SM_SentinelUndef);
6677 continue;
6679 uint64_t ByteBits = EltBits[i].getZExtValue();
6680 if (ByteBits != 0 && ByteBits != 255)
6681 return false;
6682 Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
6684 Ops.push_back(IsAndN ? N1 : N0);
6685 return true;
6687 case ISD::OR: {
6688 // Inspect each operand at the byte level. We can merge these into a
6689 // blend shuffle mask if for each byte at least one is masked out (zero).
6690 KnownBits Known0 = DAG.computeKnownBits(N.getOperand(0), DemandedElts);
6691 KnownBits Known1 = DAG.computeKnownBits(N.getOperand(1), DemandedElts);
6692 if (Known0.One.isNullValue() && Known1.One.isNullValue()) {
6693 bool IsByteMask = true;
6694 unsigned NumSizeInBytes = NumSizeInBits / 8;
6695 unsigned NumBytesPerElt = NumBitsPerElt / 8;
6696 APInt ZeroMask = APInt::getNullValue(NumBytesPerElt);
6697 APInt SelectMask = APInt::getNullValue(NumBytesPerElt);
6698 for (unsigned i = 0; i != NumBytesPerElt && IsByteMask; ++i) {
6699 unsigned LHS = Known0.Zero.extractBits(8, i * 8).getZExtValue();
6700 unsigned RHS = Known1.Zero.extractBits(8, i * 8).getZExtValue();
6701 if (LHS == 255 && RHS == 0)
6702 SelectMask.setBit(i);
6703 else if (LHS == 255 && RHS == 255)
6704 ZeroMask.setBit(i);
6705 else if (!(LHS == 0 && RHS == 255))
6706 IsByteMask = false;
6708 if (IsByteMask) {
6709 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt) {
6710 for (unsigned j = 0; j != NumBytesPerElt; ++j) {
6711 unsigned Ofs = (SelectMask[j] ? NumSizeInBytes : 0);
6712 int Idx = (ZeroMask[j] ? (int)SM_SentinelZero : (i + j + Ofs));
6713 Mask.push_back(Idx);
6716 Ops.push_back(N.getOperand(0));
6717 Ops.push_back(N.getOperand(1));
6718 return true;
6722 // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
6723 // is a valid shuffle index.
6724 SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
6725 SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
6726 if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
6727 return false;
6728 SmallVector<int, 64> SrcMask0, SrcMask1;
6729 SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
6730 if (!resolveTargetShuffleInputs(N0, SrcInputs0, SrcMask0, DAG) ||
6731 !resolveTargetShuffleInputs(N1, SrcInputs1, SrcMask1, DAG))
6732 return false;
6733 int MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
6734 SmallVector<int, 64> Mask0, Mask1;
6735 scaleShuffleMask<int>(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
6736 scaleShuffleMask<int>(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
6737 for (int i = 0; i != MaskSize; ++i) {
6738 if (Mask0[i] == SM_SentinelUndef && Mask1[i] == SM_SentinelUndef)
6739 Mask.push_back(SM_SentinelUndef);
6740 else if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
6741 Mask.push_back(SM_SentinelZero);
6742 else if (Mask1[i] == SM_SentinelZero)
6743 Mask.push_back(Mask0[i]);
6744 else if (Mask0[i] == SM_SentinelZero)
6745 Mask.push_back(Mask1[i] + (MaskSize * SrcInputs0.size()));
6746 else
6747 return false;
6749 for (SDValue &Op : SrcInputs0)
6750 Ops.push_back(Op);
6751 for (SDValue &Op : SrcInputs1)
6752 Ops.push_back(Op);
6753 return true;
6755 case ISD::INSERT_SUBVECTOR: {
6756 SDValue Src = N.getOperand(0);
6757 SDValue Sub = N.getOperand(1);
6758 EVT SubVT = Sub.getValueType();
6759 unsigned NumSubElts = SubVT.getVectorNumElements();
6760 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
6761 !N->isOnlyUserOf(Sub.getNode()))
6762 return false;
6763 int InsertIdx = N.getConstantOperandVal(2);
6764 // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
6765 if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6766 Sub.getOperand(0).getValueType() == VT &&
6767 isa<ConstantSDNode>(Sub.getOperand(1))) {
6768 int ExtractIdx = Sub.getConstantOperandVal(1);
6769 for (int i = 0; i != (int)NumElts; ++i)
6770 Mask.push_back(i);
6771 for (int i = 0; i != (int)NumSubElts; ++i)
6772 Mask[InsertIdx + i] = NumElts + ExtractIdx + i;
6773 Ops.push_back(Src);
6774 Ops.push_back(Sub.getOperand(0));
6775 return true;
6777 // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
6778 SmallVector<int, 64> SubMask;
6779 SmallVector<SDValue, 2> SubInputs;
6780 if (!resolveTargetShuffleInputs(peekThroughOneUseBitcasts(Sub), SubInputs,
6781 SubMask, DAG))
6782 return false;
6783 if (SubMask.size() != NumSubElts) {
6784 assert(((SubMask.size() % NumSubElts) == 0 ||
6785 (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
6786 if ((NumSubElts % SubMask.size()) == 0) {
6787 int Scale = NumSubElts / SubMask.size();
6788 SmallVector<int,64> ScaledSubMask;
6789 scaleShuffleMask<int>(Scale, SubMask, ScaledSubMask);
6790 SubMask = ScaledSubMask;
6791 } else {
6792 int Scale = SubMask.size() / NumSubElts;
6793 NumSubElts = SubMask.size();
6794 NumElts *= Scale;
6795 InsertIdx *= Scale;
6798 Ops.push_back(Src);
6799 for (SDValue &SubInput : SubInputs) {
6800 EVT SubSVT = SubInput.getValueType().getScalarType();
6801 EVT AltVT = EVT::getVectorVT(*DAG.getContext(), SubSVT,
6802 NumSizeInBits / SubSVT.getSizeInBits());
6803 Ops.push_back(DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), AltVT,
6804 DAG.getUNDEF(AltVT), SubInput,
6805 DAG.getIntPtrConstant(0, SDLoc(N))));
6807 for (int i = 0; i != (int)NumElts; ++i)
6808 Mask.push_back(i);
6809 for (int i = 0; i != (int)NumSubElts; ++i) {
6810 int M = SubMask[i];
6811 if (0 <= M) {
6812 int InputIdx = M / NumSubElts;
6813 M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
6815 Mask[i + InsertIdx] = M;
6817 return true;
6819 case ISD::SCALAR_TO_VECTOR: {
6820 // Match against a scalar_to_vector of an extract from a vector,
6821 // for PEXTRW/PEXTRB we must handle the implicit zext of the scalar.
6822 SDValue N0 = N.getOperand(0);
6823 SDValue SrcExtract;
6825 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6826 N0.getOperand(0).getValueType() == VT) ||
6827 (N0.getOpcode() == X86ISD::PEXTRW &&
6828 N0.getOperand(0).getValueType() == MVT::v8i16) ||
6829 (N0.getOpcode() == X86ISD::PEXTRB &&
6830 N0.getOperand(0).getValueType() == MVT::v16i8)) {
6831 SrcExtract = N0;
6834 if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
6835 return false;
6837 SDValue SrcVec = SrcExtract.getOperand(0);
6838 EVT SrcVT = SrcVec.getValueType();
6839 unsigned NumSrcElts = SrcVT.getVectorNumElements();
6840 unsigned NumZeros = (NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1;
6842 unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
6843 if (NumSrcElts <= SrcIdx)
6844 return false;
6846 Ops.push_back(SrcVec);
6847 Mask.push_back(SrcIdx);
6848 Mask.append(NumZeros, SM_SentinelZero);
6849 Mask.append(NumSrcElts - Mask.size(), SM_SentinelUndef);
6850 return true;
6852 case X86ISD::PINSRB:
6853 case X86ISD::PINSRW: {
6854 SDValue InVec = N.getOperand(0);
6855 SDValue InScl = N.getOperand(1);
6856 SDValue InIndex = N.getOperand(2);
6857 if (!isa<ConstantSDNode>(InIndex) ||
6858 cast<ConstantSDNode>(InIndex)->getAPIntValue().uge(NumElts))
6859 return false;
6860 uint64_t InIdx = N.getConstantOperandVal(2);
6862 // Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern.
6863 if (X86::isZeroNode(InScl)) {
6864 Ops.push_back(InVec);
6865 for (unsigned i = 0; i != NumElts; ++i)
6866 Mask.push_back(i == InIdx ? SM_SentinelZero : (int)i);
6867 return true;
6870 // Attempt to recognise a PINSR*(PEXTR*) shuffle pattern.
6871 // TODO: Expand this to support INSERT_VECTOR_ELT/etc.
6872 unsigned ExOp =
6873 (X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW);
6874 if (InScl.getOpcode() != ExOp)
6875 return false;
6877 SDValue ExVec = InScl.getOperand(0);
6878 SDValue ExIndex = InScl.getOperand(1);
6879 if (!isa<ConstantSDNode>(ExIndex) ||
6880 cast<ConstantSDNode>(ExIndex)->getAPIntValue().uge(NumElts))
6881 return false;
6882 uint64_t ExIdx = InScl.getConstantOperandVal(1);
6884 Ops.push_back(InVec);
6885 Ops.push_back(ExVec);
6886 for (unsigned i = 0; i != NumElts; ++i)
6887 Mask.push_back(i == InIdx ? NumElts + ExIdx : i);
6888 return true;
6890 case X86ISD::PACKSS:
6891 case X86ISD::PACKUS: {
6892 SDValue N0 = N.getOperand(0);
6893 SDValue N1 = N.getOperand(1);
6894 assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
6895 N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
6896 "Unexpected input value type");
6898 APInt EltsLHS, EltsRHS;
6899 getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
6901 // If we know input saturation won't happen we can treat this
6902 // as a truncation shuffle.
6903 if (Opcode == X86ISD::PACKSS) {
6904 if ((!N0.isUndef() &&
6905 DAG.ComputeNumSignBits(N0, EltsLHS) <= NumBitsPerElt) ||
6906 (!N1.isUndef() &&
6907 DAG.ComputeNumSignBits(N1, EltsRHS) <= NumBitsPerElt))
6908 return false;
6909 } else {
6910 APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
6911 if ((!N0.isUndef() && !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS)) ||
6912 (!N1.isUndef() && !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS)))
6913 return false;
6916 bool IsUnary = (N0 == N1);
6918 Ops.push_back(N0);
6919 if (!IsUnary)
6920 Ops.push_back(N1);
6922 createPackShuffleMask(VT, Mask, IsUnary);
6923 return true;
6925 case X86ISD::VSHLI:
6926 case X86ISD::VSRLI: {
6927 uint64_t ShiftVal = N.getConstantOperandVal(1);
6928 // Out of range bit shifts are guaranteed to be zero.
6929 if (NumBitsPerElt <= ShiftVal) {
6930 Mask.append(NumElts, SM_SentinelZero);
6931 return true;
6934 // We can only decode 'whole byte' bit shifts as shuffles.
6935 if ((ShiftVal % 8) != 0)
6936 break;
6938 uint64_t ByteShift = ShiftVal / 8;
6939 unsigned NumBytes = NumSizeInBits / 8;
6940 unsigned NumBytesPerElt = NumBitsPerElt / 8;
6941 Ops.push_back(N.getOperand(0));
6943 // Clear mask to all zeros and insert the shifted byte indices.
6944 Mask.append(NumBytes, SM_SentinelZero);
6946 if (X86ISD::VSHLI == Opcode) {
6947 for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
6948 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
6949 Mask[i + j] = i + j - ByteShift;
6950 } else {
6951 for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
6952 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
6953 Mask[i + j - ByteShift] = i + j;
6955 return true;
6957 case X86ISD::VBROADCAST: {
6958 SDValue Src = N.getOperand(0);
6959 MVT SrcVT = Src.getSimpleValueType();
6960 if (!SrcVT.isVector())
6961 return false;
6963 if (NumSizeInBits != SrcVT.getSizeInBits()) {
6964 assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&
6965 "Illegal broadcast type");
6966 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
6967 NumSizeInBits / SrcVT.getScalarSizeInBits());
6968 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
6969 DAG.getUNDEF(SrcVT), Src,
6970 DAG.getIntPtrConstant(0, SDLoc(N)));
6973 Ops.push_back(Src);
6974 Mask.append(NumElts, 0);
6975 return true;
6977 case ISD::ZERO_EXTEND:
6978 case ISD::ANY_EXTEND:
6979 case ISD::ZERO_EXTEND_VECTOR_INREG:
6980 case ISD::ANY_EXTEND_VECTOR_INREG: {
6981 SDValue Src = N.getOperand(0);
6982 EVT SrcVT = Src.getValueType();
6984 // Extended source must be a simple vector.
6985 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
6986 (SrcVT.getScalarSizeInBits() % 8) != 0)
6987 return false;
6989 unsigned NumSrcBitsPerElt = SrcVT.getScalarSizeInBits();
6990 bool IsAnyExtend =
6991 (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
6992 DecodeZeroExtendMask(NumSrcBitsPerElt, NumBitsPerElt, NumElts, IsAnyExtend,
6993 Mask);
6995 if (NumSizeInBits != SrcVT.getSizeInBits()) {
6996 assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&
6997 "Illegal zero-extension type");
6998 SrcVT = MVT::getVectorVT(SrcVT.getSimpleVT().getScalarType(),
6999 NumSizeInBits / NumSrcBitsPerElt);
7000 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
7001 DAG.getUNDEF(SrcVT), Src,
7002 DAG.getIntPtrConstant(0, SDLoc(N)));
7005 Ops.push_back(Src);
7006 return true;
7010 return false;
7013 /// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
7014 static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
7015 SmallVectorImpl<int> &Mask) {
7016 int MaskWidth = Mask.size();
7017 SmallVector<SDValue, 16> UsedInputs;
7018 for (int i = 0, e = Inputs.size(); i < e; ++i) {
7019 int lo = UsedInputs.size() * MaskWidth;
7020 int hi = lo + MaskWidth;
7022 // Strip UNDEF input usage.
7023 if (Inputs[i].isUndef())
7024 for (int &M : Mask)
7025 if ((lo <= M) && (M < hi))
7026 M = SM_SentinelUndef;
7028 // Check for unused inputs.
7029 if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
7030 for (int &M : Mask)
7031 if (lo <= M)
7032 M -= MaskWidth;
7033 continue;
7036 // Check for repeated inputs.
7037 bool IsRepeat = false;
7038 for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
7039 if (UsedInputs[j] != Inputs[i])
7040 continue;
7041 for (int &M : Mask)
7042 if (lo <= M)
7043 M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
7044 IsRepeat = true;
7045 break;
7047 if (IsRepeat)
7048 continue;
7050 UsedInputs.push_back(Inputs[i]);
7052 Inputs = UsedInputs;
7055 /// Calls setTargetShuffleZeroElements to resolve a target shuffle mask's inputs
7056 /// and set the SM_SentinelUndef and SM_SentinelZero values. Then check the
7057 /// remaining input indices in case we now have a unary shuffle and adjust the
7058 /// inputs accordingly.
7059 /// Returns true if the target shuffle mask was decoded.
7060 static bool resolveTargetShuffleInputs(SDValue Op,
7061 SmallVectorImpl<SDValue> &Inputs,
7062 SmallVectorImpl<int> &Mask,
7063 SelectionDAG &DAG) {
7064 unsigned NumElts = Op.getValueType().getVectorNumElements();
7065 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
7066 if (!setTargetShuffleZeroElements(Op, Mask, Inputs))
7067 if (!getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG))
7068 return false;
7070 resolveTargetShuffleInputsAndMask(Inputs, Mask);
7071 return true;
7074 /// Returns the scalar element that will make up the ith
7075 /// element of the result of the vector shuffle.
7076 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
7077 unsigned Depth) {
7078 if (Depth == 6)
7079 return SDValue(); // Limit search depth.
7081 SDValue V = SDValue(N, 0);
7082 EVT VT = V.getValueType();
7083 unsigned Opcode = V.getOpcode();
7085 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
7086 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
7087 int Elt = SV->getMaskElt(Index);
7089 if (Elt < 0)
7090 return DAG.getUNDEF(VT.getVectorElementType());
7092 unsigned NumElems = VT.getVectorNumElements();
7093 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
7094 : SV->getOperand(1);
7095 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
7098 // Recurse into target specific vector shuffles to find scalars.
7099 if (isTargetShuffle(Opcode)) {
7100 MVT ShufVT = V.getSimpleValueType();
7101 MVT ShufSVT = ShufVT.getVectorElementType();
7102 int NumElems = (int)ShufVT.getVectorNumElements();
7103 SmallVector<int, 16> ShuffleMask;
7104 SmallVector<SDValue, 16> ShuffleOps;
7105 bool IsUnary;
7107 if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary))
7108 return SDValue();
7110 int Elt = ShuffleMask[Index];
7111 if (Elt == SM_SentinelZero)
7112 return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT)
7113 : DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT);
7114 if (Elt == SM_SentinelUndef)
7115 return DAG.getUNDEF(ShufSVT);
7117 assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range");
7118 SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
7119 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
7120 Depth+1);
7123 // Recurse into insert_subvector base/sub vector to find scalars.
7124 if (Opcode == ISD::INSERT_SUBVECTOR &&
7125 isa<ConstantSDNode>(N->getOperand(2))) {
7126 SDValue Vec = N->getOperand(0);
7127 SDValue Sub = N->getOperand(1);
7128 EVT SubVT = Sub.getValueType();
7129 unsigned NumSubElts = SubVT.getVectorNumElements();
7130 uint64_t SubIdx = N->getConstantOperandVal(2);
7132 if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
7133 return getShuffleScalarElt(Sub.getNode(), Index - SubIdx, DAG, Depth + 1);
7134 return getShuffleScalarElt(Vec.getNode(), Index, DAG, Depth + 1);
7137 // Recurse into extract_subvector src vector to find scalars.
7138 if (Opcode == ISD::EXTRACT_SUBVECTOR &&
7139 isa<ConstantSDNode>(N->getOperand(1))) {
7140 SDValue Src = N->getOperand(0);
7141 uint64_t SrcIdx = N->getConstantOperandVal(1);
7142 return getShuffleScalarElt(Src.getNode(), Index + SrcIdx, DAG, Depth + 1);
7145 // Actual nodes that may contain scalar elements
7146 if (Opcode == ISD::BITCAST) {
7147 V = V.getOperand(0);
7148 EVT SrcVT = V.getValueType();
7149 unsigned NumElems = VT.getVectorNumElements();
7151 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
7152 return SDValue();
7155 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
7156 return (Index == 0) ? V.getOperand(0)
7157 : DAG.getUNDEF(VT.getVectorElementType());
7159 if (V.getOpcode() == ISD::BUILD_VECTOR)
7160 return V.getOperand(Index);
7162 return SDValue();
7165 // Use PINSRB/PINSRW/PINSRD to create a build vector.
7166 static SDValue LowerBuildVectorAsInsert(SDValue Op, unsigned NonZeros,
7167 unsigned NumNonZero, unsigned NumZero,
7168 SelectionDAG &DAG,
7169 const X86Subtarget &Subtarget) {
7170 MVT VT = Op.getSimpleValueType();
7171 unsigned NumElts = VT.getVectorNumElements();
7172 assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
7173 ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
7174 "Illegal vector insertion");
7176 SDLoc dl(Op);
7177 SDValue V;
7178 bool First = true;
7180 for (unsigned i = 0; i < NumElts; ++i) {
7181 bool IsNonZero = (NonZeros & (1 << i)) != 0;
7182 if (!IsNonZero)
7183 continue;
7185 // If the build vector contains zeros or our first insertion is not the
7186 // first index then insert into zero vector to break any register
7187 // dependency else use SCALAR_TO_VECTOR.
7188 if (First) {
7189 First = false;
7190 if (NumZero || 0 != i)
7191 V = getZeroVector(VT, Subtarget, DAG, dl);
7192 else {
7193 assert(0 == i && "Expected insertion into zero-index");
7194 V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7195 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
7196 V = DAG.getBitcast(VT, V);
7197 continue;
7200 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
7201 DAG.getIntPtrConstant(i, dl));
7204 return V;
7207 /// Custom lower build_vector of v16i8.
7208 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
7209 unsigned NumNonZero, unsigned NumZero,
7210 SelectionDAG &DAG,
7211 const X86Subtarget &Subtarget) {
7212 if (NumNonZero > 8 && !Subtarget.hasSSE41())
7213 return SDValue();
7215 // SSE4.1 - use PINSRB to insert each byte directly.
7216 if (Subtarget.hasSSE41())
7217 return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7218 Subtarget);
7220 SDLoc dl(Op);
7221 SDValue V;
7223 // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
7224 for (unsigned i = 0; i < 16; i += 2) {
7225 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
7226 bool NextIsNonZero = (NonZeros & (1 << (i + 1))) != 0;
7227 if (!ThisIsNonZero && !NextIsNonZero)
7228 continue;
7230 // FIXME: Investigate combining the first 4 bytes as a i32 instead.
7231 SDValue Elt;
7232 if (ThisIsNonZero) {
7233 if (NumZero || NextIsNonZero)
7234 Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7235 else
7236 Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7239 if (NextIsNonZero) {
7240 SDValue NextElt;
7241 if (i == 0 && NumZero)
7242 NextElt = DAG.getZExtOrTrunc(Op.getOperand(i+1), dl, MVT::i32);
7243 else
7244 NextElt = DAG.getAnyExtOrTrunc(Op.getOperand(i+1), dl, MVT::i32);
7245 NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
7246 DAG.getConstant(8, dl, MVT::i8));
7247 if (ThisIsNonZero)
7248 Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
7249 else
7250 Elt = NextElt;
7253 // If our first insertion is not the first index then insert into zero
7254 // vector to break any register dependency else use SCALAR_TO_VECTOR.
7255 if (!V) {
7256 if (i != 0)
7257 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
7258 else {
7259 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
7260 V = DAG.getBitcast(MVT::v8i16, V);
7261 continue;
7264 Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
7265 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
7266 DAG.getIntPtrConstant(i / 2, dl));
7269 return DAG.getBitcast(MVT::v16i8, V);
7272 /// Custom lower build_vector of v8i16.
7273 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
7274 unsigned NumNonZero, unsigned NumZero,
7275 SelectionDAG &DAG,
7276 const X86Subtarget &Subtarget) {
7277 if (NumNonZero > 4 && !Subtarget.hasSSE41())
7278 return SDValue();
7280 // Use PINSRW to insert each byte directly.
7281 return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7282 Subtarget);
7285 /// Custom lower build_vector of v4i32 or v4f32.
7286 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
7287 const X86Subtarget &Subtarget) {
7288 // If this is a splat of a pair of elements, use MOVDDUP (unless the target
7289 // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
7290 // Because we're creating a less complicated build vector here, we may enable
7291 // further folding of the MOVDDUP via shuffle transforms.
7292 if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
7293 Op.getOperand(0) == Op.getOperand(2) &&
7294 Op.getOperand(1) == Op.getOperand(3) &&
7295 Op.getOperand(0) != Op.getOperand(1)) {
7296 SDLoc DL(Op);
7297 MVT VT = Op.getSimpleValueType();
7298 MVT EltVT = VT.getVectorElementType();
7299 // Create a new build vector with the first 2 elements followed by undef
7300 // padding, bitcast to v2f64, duplicate, and bitcast back.
7301 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
7302 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
7303 SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
7304 SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
7305 return DAG.getBitcast(VT, Dup);
7308 // Find all zeroable elements.
7309 std::bitset<4> Zeroable, Undefs;
7310 for (int i = 0; i < 4; ++i) {
7311 SDValue Elt = Op.getOperand(i);
7312 Undefs[i] = Elt.isUndef();
7313 Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
7315 assert(Zeroable.size() - Zeroable.count() > 1 &&
7316 "We expect at least two non-zero elements!");
7318 // We only know how to deal with build_vector nodes where elements are either
7319 // zeroable or extract_vector_elt with constant index.
7320 SDValue FirstNonZero;
7321 unsigned FirstNonZeroIdx;
7322 for (unsigned i = 0; i < 4; ++i) {
7323 if (Zeroable[i])
7324 continue;
7325 SDValue Elt = Op.getOperand(i);
7326 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7327 !isa<ConstantSDNode>(Elt.getOperand(1)))
7328 return SDValue();
7329 // Make sure that this node is extracting from a 128-bit vector.
7330 MVT VT = Elt.getOperand(0).getSimpleValueType();
7331 if (!VT.is128BitVector())
7332 return SDValue();
7333 if (!FirstNonZero.getNode()) {
7334 FirstNonZero = Elt;
7335 FirstNonZeroIdx = i;
7339 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
7340 SDValue V1 = FirstNonZero.getOperand(0);
7341 MVT VT = V1.getSimpleValueType();
7343 // See if this build_vector can be lowered as a blend with zero.
7344 SDValue Elt;
7345 unsigned EltMaskIdx, EltIdx;
7346 int Mask[4];
7347 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
7348 if (Zeroable[EltIdx]) {
7349 // The zero vector will be on the right hand side.
7350 Mask[EltIdx] = EltIdx+4;
7351 continue;
7354 Elt = Op->getOperand(EltIdx);
7355 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
7356 EltMaskIdx = Elt.getConstantOperandVal(1);
7357 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
7358 break;
7359 Mask[EltIdx] = EltIdx;
7362 if (EltIdx == 4) {
7363 // Let the shuffle legalizer deal with blend operations.
7364 SDValue VZeroOrUndef = (Zeroable == Undefs)
7365 ? DAG.getUNDEF(VT)
7366 : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
7367 if (V1.getSimpleValueType() != VT)
7368 V1 = DAG.getBitcast(VT, V1);
7369 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
7372 // See if we can lower this build_vector to a INSERTPS.
7373 if (!Subtarget.hasSSE41())
7374 return SDValue();
7376 SDValue V2 = Elt.getOperand(0);
7377 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
7378 V1 = SDValue();
7380 bool CanFold = true;
7381 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
7382 if (Zeroable[i])
7383 continue;
7385 SDValue Current = Op->getOperand(i);
7386 SDValue SrcVector = Current->getOperand(0);
7387 if (!V1.getNode())
7388 V1 = SrcVector;
7389 CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
7392 if (!CanFold)
7393 return SDValue();
7395 assert(V1.getNode() && "Expected at least two non-zero elements!");
7396 if (V1.getSimpleValueType() != MVT::v4f32)
7397 V1 = DAG.getBitcast(MVT::v4f32, V1);
7398 if (V2.getSimpleValueType() != MVT::v4f32)
7399 V2 = DAG.getBitcast(MVT::v4f32, V2);
7401 // Ok, we can emit an INSERTPS instruction.
7402 unsigned ZMask = Zeroable.to_ulong();
7404 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
7405 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
7406 SDLoc DL(Op);
7407 SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
7408 DAG.getIntPtrConstant(InsertPSMask, DL));
7409 return DAG.getBitcast(VT, Result);
7412 /// Return a vector logical shift node.
7413 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
7414 SelectionDAG &DAG, const TargetLowering &TLI,
7415 const SDLoc &dl) {
7416 assert(VT.is128BitVector() && "Unknown type for VShift");
7417 MVT ShVT = MVT::v16i8;
7418 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
7419 SrcOp = DAG.getBitcast(ShVT, SrcOp);
7420 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
7421 SDValue ShiftVal = DAG.getConstant(NumBits/8, dl, MVT::i8);
7422 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
7425 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
7426 SelectionDAG &DAG) {
7428 // Check if the scalar load can be widened into a vector load. And if
7429 // the address is "base + cst" see if the cst can be "absorbed" into
7430 // the shuffle mask.
7431 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
7432 SDValue Ptr = LD->getBasePtr();
7433 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
7434 return SDValue();
7435 EVT PVT = LD->getValueType(0);
7436 if (PVT != MVT::i32 && PVT != MVT::f32)
7437 return SDValue();
7439 int FI = -1;
7440 int64_t Offset = 0;
7441 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
7442 FI = FINode->getIndex();
7443 Offset = 0;
7444 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
7445 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
7446 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7447 Offset = Ptr.getConstantOperandVal(1);
7448 Ptr = Ptr.getOperand(0);
7449 } else {
7450 return SDValue();
7453 // FIXME: 256-bit vector instructions don't require a strict alignment,
7454 // improve this code to support it better.
7455 unsigned RequiredAlign = VT.getSizeInBits()/8;
7456 SDValue Chain = LD->getChain();
7457 // Make sure the stack object alignment is at least 16 or 32.
7458 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7459 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
7460 if (MFI.isFixedObjectIndex(FI)) {
7461 // Can't change the alignment. FIXME: It's possible to compute
7462 // the exact stack offset and reference FI + adjust offset instead.
7463 // If someone *really* cares about this. That's the way to implement it.
7464 return SDValue();
7465 } else {
7466 MFI.setObjectAlignment(FI, RequiredAlign);
7470 // (Offset % 16 or 32) must be multiple of 4. Then address is then
7471 // Ptr + (Offset & ~15).
7472 if (Offset < 0)
7473 return SDValue();
7474 if ((Offset % RequiredAlign) & 3)
7475 return SDValue();
7476 int64_t StartOffset = Offset & ~int64_t(RequiredAlign - 1);
7477 if (StartOffset) {
7478 SDLoc DL(Ptr);
7479 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
7480 DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
7483 int EltNo = (Offset - StartOffset) >> 2;
7484 unsigned NumElems = VT.getVectorNumElements();
7486 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
7487 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
7488 LD->getPointerInfo().getWithOffset(StartOffset));
7490 SmallVector<int, 8> Mask(NumElems, EltNo);
7492 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
7495 return SDValue();
7498 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
7499 /// elements can be replaced by a single large load which has the same value as
7500 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
7502 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
7503 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
7504 const SDLoc &DL, SelectionDAG &DAG,
7505 const X86Subtarget &Subtarget,
7506 bool isAfterLegalize) {
7507 unsigned NumElems = Elts.size();
7509 int LastLoadedElt = -1;
7510 APInt LoadMask = APInt::getNullValue(NumElems);
7511 APInt ZeroMask = APInt::getNullValue(NumElems);
7512 APInt UndefMask = APInt::getNullValue(NumElems);
7514 // For each element in the initializer, see if we've found a load, zero or an
7515 // undef.
7516 for (unsigned i = 0; i < NumElems; ++i) {
7517 SDValue Elt = peekThroughBitcasts(Elts[i]);
7518 if (!Elt.getNode())
7519 return SDValue();
7521 if (Elt.isUndef())
7522 UndefMask.setBit(i);
7523 else if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode()))
7524 ZeroMask.setBit(i);
7525 else if (ISD::isNON_EXTLoad(Elt.getNode())) {
7526 LoadMask.setBit(i);
7527 LastLoadedElt = i;
7528 // Each loaded element must be the correct fractional portion of the
7529 // requested vector load.
7530 if ((NumElems * Elt.getValueSizeInBits()) != VT.getSizeInBits())
7531 return SDValue();
7532 } else
7533 return SDValue();
7535 assert((ZeroMask.countPopulation() + UndefMask.countPopulation() +
7536 LoadMask.countPopulation()) == NumElems &&
7537 "Incomplete element masks");
7539 // Handle Special Cases - all undef or undef/zero.
7540 if (UndefMask.countPopulation() == NumElems)
7541 return DAG.getUNDEF(VT);
7543 // FIXME: Should we return this as a BUILD_VECTOR instead?
7544 if ((ZeroMask.countPopulation() + UndefMask.countPopulation()) == NumElems)
7545 return VT.isInteger() ? DAG.getConstant(0, DL, VT)
7546 : DAG.getConstantFP(0.0, DL, VT);
7548 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7549 int FirstLoadedElt = LoadMask.countTrailingZeros();
7550 SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
7551 LoadSDNode *LDBase = cast<LoadSDNode>(EltBase);
7552 EVT LDBaseVT = EltBase.getValueType();
7554 // Consecutive loads can contain UNDEFS but not ZERO elements.
7555 // Consecutive loads with UNDEFs and ZEROs elements require a
7556 // an additional shuffle stage to clear the ZERO elements.
7557 bool IsConsecutiveLoad = true;
7558 bool IsConsecutiveLoadWithZeros = true;
7559 for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
7560 if (LoadMask[i]) {
7561 SDValue Elt = peekThroughBitcasts(Elts[i]);
7562 LoadSDNode *LD = cast<LoadSDNode>(Elt);
7563 if (!DAG.areNonVolatileConsecutiveLoads(
7564 LD, LDBase, Elt.getValueType().getStoreSizeInBits() / 8,
7565 i - FirstLoadedElt)) {
7566 IsConsecutiveLoad = false;
7567 IsConsecutiveLoadWithZeros = false;
7568 break;
7570 } else if (ZeroMask[i]) {
7571 IsConsecutiveLoad = false;
7575 SmallVector<LoadSDNode *, 8> Loads;
7576 for (int i = FirstLoadedElt; i <= LastLoadedElt; ++i)
7577 if (LoadMask[i])
7578 Loads.push_back(cast<LoadSDNode>(peekThroughBitcasts(Elts[i])));
7580 auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
7581 auto MMOFlags = LDBase->getMemOperand()->getFlags();
7582 assert(!(MMOFlags & MachineMemOperand::MOVolatile) &&
7583 "Cannot merge volatile loads.");
7584 SDValue NewLd =
7585 DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
7586 LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags);
7587 for (auto *LD : Loads)
7588 DAG.makeEquivalentMemoryOrdering(LD, NewLd);
7589 return NewLd;
7592 // LOAD - all consecutive load/undefs (must start/end with a load).
7593 // If we have found an entire vector of loads and undefs, then return a large
7594 // load of the entire vector width starting at the base pointer.
7595 // If the vector contains zeros, then attempt to shuffle those elements.
7596 if (FirstLoadedElt == 0 && LastLoadedElt == (int)(NumElems - 1) &&
7597 (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
7598 assert(LDBase && "Did not find base load for merging consecutive loads");
7599 EVT EltVT = LDBase->getValueType(0);
7600 // Ensure that the input vector size for the merged loads matches the
7601 // cumulative size of the input elements.
7602 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
7603 return SDValue();
7605 if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
7606 return SDValue();
7608 // Don't create 256-bit non-temporal aligned loads without AVX2 as these
7609 // will lower to regular temporal loads and use the cache.
7610 if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 &&
7611 VT.is256BitVector() && !Subtarget.hasInt256())
7612 return SDValue();
7614 if (NumElems == 1)
7615 return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
7617 if (IsConsecutiveLoad)
7618 return CreateLoad(VT, LDBase);
7620 // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
7621 // vector and a zero vector to clear out the zero elements.
7622 if (!isAfterLegalize && VT.isVector() && NumElems == VT.getVectorNumElements()) {
7623 SmallVector<int, 4> ClearMask(NumElems, -1);
7624 for (unsigned i = 0; i < NumElems; ++i) {
7625 if (ZeroMask[i])
7626 ClearMask[i] = i + NumElems;
7627 else if (LoadMask[i])
7628 ClearMask[i] = i;
7630 SDValue V = CreateLoad(VT, LDBase);
7631 SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
7632 : DAG.getConstantFP(0.0, DL, VT);
7633 return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
7637 unsigned BaseSize = LDBaseVT.getStoreSizeInBits();
7638 int LoadSize = (1 + LastLoadedElt - FirstLoadedElt) * BaseSize;
7640 // If the upper half of a ymm/zmm load is undef then just load the lower half.
7641 if (VT.is256BitVector() || VT.is512BitVector()) {
7642 unsigned HalfNumElems = NumElems / 2;
7643 if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnesValue()) {
7644 EVT HalfVT =
7645 EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
7646 SDValue HalfLD =
7647 EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
7648 DAG, Subtarget, isAfterLegalize);
7649 if (HalfLD)
7650 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
7651 HalfLD, DAG.getIntPtrConstant(0, DL));
7655 // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
7656 if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
7657 (LoadSize == 32 || LoadSize == 64) &&
7658 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
7659 MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSize)
7660 : MVT::getIntegerVT(LoadSize);
7661 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSize);
7662 if (TLI.isTypeLegal(VecVT)) {
7663 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
7664 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
7665 SDValue ResNode =
7666 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT,
7667 LDBase->getPointerInfo(),
7668 LDBase->getAlignment(),
7669 MachineMemOperand::MOLoad);
7670 for (auto *LD : Loads)
7671 DAG.makeEquivalentMemoryOrdering(LD, ResNode);
7672 return DAG.getBitcast(VT, ResNode);
7676 // BROADCAST - match the smallest possible repetition pattern, load that
7677 // scalar/subvector element and then broadcast to the entire vector.
7678 if (ZeroMask.isNullValue() && isPowerOf2_32(NumElems) &&
7679 (BaseSize % 8) == 0 && Subtarget.hasAVX() &&
7680 (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
7681 for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
7682 unsigned RepeatSize = SubElems * BaseSize;
7683 unsigned ScalarSize = std::min(RepeatSize, 64u);
7684 if (!Subtarget.hasAVX2() && ScalarSize < 32)
7685 continue;
7687 bool Match = true;
7688 SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(LDBaseVT));
7689 for (unsigned i = 0; i != NumElems && Match; ++i) {
7690 if (!LoadMask[i])
7691 continue;
7692 SDValue Elt = peekThroughBitcasts(Elts[i]);
7693 if (RepeatedLoads[i % SubElems].isUndef())
7694 RepeatedLoads[i % SubElems] = Elt;
7695 else
7696 Match &= (RepeatedLoads[i % SubElems] == Elt);
7699 // We must have loads at both ends of the repetition.
7700 Match &= !RepeatedLoads.front().isUndef();
7701 Match &= !RepeatedLoads.back().isUndef();
7702 if (!Match)
7703 continue;
7705 EVT RepeatVT =
7706 VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
7707 ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
7708 : EVT::getFloatingPointVT(ScalarSize);
7709 if (RepeatSize > ScalarSize)
7710 RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
7711 RepeatSize / ScalarSize);
7712 EVT BroadcastVT =
7713 EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
7714 VT.getSizeInBits() / ScalarSize);
7715 if (TLI.isTypeLegal(BroadcastVT)) {
7716 if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
7717 RepeatVT, RepeatedLoads, DL, DAG, Subtarget, isAfterLegalize)) {
7718 unsigned Opcode = RepeatSize > ScalarSize ? X86ISD::SUBV_BROADCAST
7719 : X86ISD::VBROADCAST;
7720 SDValue Broadcast = DAG.getNode(Opcode, DL, BroadcastVT, RepeatLoad);
7721 return DAG.getBitcast(VT, Broadcast);
7727 return SDValue();
7730 // Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
7731 // load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
7732 // are consecutive, non-overlapping, and in the right order.
7733 static SDValue combineToConsecutiveLoads(EVT VT, SDNode *N, const SDLoc &DL,
7734 SelectionDAG &DAG,
7735 const X86Subtarget &Subtarget,
7736 bool isAfterLegalize) {
7737 SmallVector<SDValue, 64> Elts;
7738 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
7739 if (SDValue Elt = getShuffleScalarElt(N, i, DAG, 0)) {
7740 Elts.push_back(Elt);
7741 continue;
7743 return SDValue();
7745 assert(Elts.size() == VT.getVectorNumElements());
7746 return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
7747 isAfterLegalize);
7750 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
7751 unsigned SplatBitSize, LLVMContext &C) {
7752 unsigned ScalarSize = VT.getScalarSizeInBits();
7753 unsigned NumElm = SplatBitSize / ScalarSize;
7755 SmallVector<Constant *, 32> ConstantVec;
7756 for (unsigned i = 0; i < NumElm; i++) {
7757 APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
7758 Constant *Const;
7759 if (VT.isFloatingPoint()) {
7760 if (ScalarSize == 32) {
7761 Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
7762 } else {
7763 assert(ScalarSize == 64 && "Unsupported floating point scalar size");
7764 Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
7766 } else
7767 Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
7768 ConstantVec.push_back(Const);
7770 return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
7773 static bool isFoldableUseOfShuffle(SDNode *N) {
7774 for (auto *U : N->uses()) {
7775 unsigned Opc = U->getOpcode();
7776 // VPERMV/VPERMV3 shuffles can never fold their index operands.
7777 if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
7778 return false;
7779 if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
7780 return false;
7781 if (isTargetShuffle(Opc))
7782 return true;
7783 if (Opc == ISD::BITCAST) // Ignore bitcasts
7784 return isFoldableUseOfShuffle(U);
7785 if (N->hasOneUse())
7786 return true;
7788 return false;
7791 // Check if the current node of build vector is a zero extended vector.
7792 // // If so, return the value extended.
7793 // // For example: (0,0,0,a,0,0,0,a,0,0,0,a,0,0,0,a) returns a.
7794 // // NumElt - return the number of zero extended identical values.
7795 // // EltType - return the type of the value include the zero extend.
7796 static SDValue isSplatZeroExtended(const BuildVectorSDNode *Op,
7797 unsigned &NumElt, MVT &EltType) {
7798 SDValue ExtValue = Op->getOperand(0);
7799 unsigned NumElts = Op->getNumOperands();
7800 unsigned Delta = NumElts;
7802 for (unsigned i = 1; i < NumElts; i++) {
7803 if (Op->getOperand(i) == ExtValue) {
7804 Delta = i;
7805 break;
7807 if (!(Op->getOperand(i).isUndef() || isNullConstant(Op->getOperand(i))))
7808 return SDValue();
7810 if (!isPowerOf2_32(Delta) || Delta == 1)
7811 return SDValue();
7813 for (unsigned i = Delta; i < NumElts; i++) {
7814 if (i % Delta == 0) {
7815 if (Op->getOperand(i) != ExtValue)
7816 return SDValue();
7817 } else if (!(isNullConstant(Op->getOperand(i)) ||
7818 Op->getOperand(i).isUndef()))
7819 return SDValue();
7821 unsigned EltSize = Op->getSimpleValueType(0).getScalarSizeInBits();
7822 unsigned ExtVTSize = EltSize * Delta;
7823 EltType = MVT::getIntegerVT(ExtVTSize);
7824 NumElt = NumElts / Delta;
7825 return ExtValue;
7828 /// Attempt to use the vbroadcast instruction to generate a splat value
7829 /// from a splat BUILD_VECTOR which uses:
7830 /// a. A single scalar load, or a constant.
7831 /// b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
7833 /// The VBROADCAST node is returned when a pattern is found,
7834 /// or SDValue() otherwise.
7835 static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
7836 const X86Subtarget &Subtarget,
7837 SelectionDAG &DAG) {
7838 // VBROADCAST requires AVX.
7839 // TODO: Splats could be generated for non-AVX CPUs using SSE
7840 // instructions, but there's less potential gain for only 128-bit vectors.
7841 if (!Subtarget.hasAVX())
7842 return SDValue();
7844 MVT VT = BVOp->getSimpleValueType(0);
7845 SDLoc dl(BVOp);
7847 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
7848 "Unsupported vector type for broadcast.");
7850 BitVector UndefElements;
7851 SDValue Ld = BVOp->getSplatValue(&UndefElements);
7853 // Attempt to use VBROADCASTM
7854 // From this paterrn:
7855 // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
7856 // b. t1 = (build_vector t0 t0)
7858 // Create (VBROADCASTM v2i1 X)
7859 if (Subtarget.hasCDI() && (VT.is512BitVector() || Subtarget.hasVLX())) {
7860 MVT EltType = VT.getScalarType();
7861 unsigned NumElts = VT.getVectorNumElements();
7862 SDValue BOperand;
7863 SDValue ZeroExtended = isSplatZeroExtended(BVOp, NumElts, EltType);
7864 if ((ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST) ||
7865 (Ld && Ld.getOpcode() == ISD::ZERO_EXTEND &&
7866 Ld.getOperand(0).getOpcode() == ISD::BITCAST)) {
7867 if (ZeroExtended)
7868 BOperand = ZeroExtended.getOperand(0);
7869 else
7870 BOperand = Ld.getOperand(0).getOperand(0);
7871 MVT MaskVT = BOperand.getSimpleValueType();
7872 if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
7873 (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
7874 SDValue Brdcst =
7875 DAG.getNode(X86ISD::VBROADCASTM, dl,
7876 MVT::getVectorVT(EltType, NumElts), BOperand);
7877 return DAG.getBitcast(VT, Brdcst);
7882 unsigned NumElts = VT.getVectorNumElements();
7883 unsigned NumUndefElts = UndefElements.count();
7884 if (!Ld || (NumElts - NumUndefElts) <= 1) {
7885 APInt SplatValue, Undef;
7886 unsigned SplatBitSize;
7887 bool HasUndef;
7888 // Check if this is a repeated constant pattern suitable for broadcasting.
7889 if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
7890 SplatBitSize > VT.getScalarSizeInBits() &&
7891 SplatBitSize < VT.getSizeInBits()) {
7892 // Avoid replacing with broadcast when it's a use of a shuffle
7893 // instruction to preserve the present custom lowering of shuffles.
7894 if (isFoldableUseOfShuffle(BVOp))
7895 return SDValue();
7896 // replace BUILD_VECTOR with broadcast of the repeated constants.
7897 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7898 LLVMContext *Ctx = DAG.getContext();
7899 MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
7900 if (Subtarget.hasAVX()) {
7901 if (SplatBitSize <= 64 && Subtarget.hasAVX2() &&
7902 !(SplatBitSize == 64 && Subtarget.is32Bit())) {
7903 // Splatted value can fit in one INTEGER constant in constant pool.
7904 // Load the constant and broadcast it.
7905 MVT CVT = MVT::getIntegerVT(SplatBitSize);
7906 Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
7907 Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
7908 SDValue CP = DAG.getConstantPool(C, PVT);
7909 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
7911 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
7912 Ld = DAG.getLoad(
7913 CVT, dl, DAG.getEntryNode(), CP,
7914 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
7915 Alignment);
7916 SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
7917 MVT::getVectorVT(CVT, Repeat), Ld);
7918 return DAG.getBitcast(VT, Brdcst);
7919 } else if (SplatBitSize == 32 || SplatBitSize == 64) {
7920 // Splatted value can fit in one FLOAT constant in constant pool.
7921 // Load the constant and broadcast it.
7922 // AVX have support for 32 and 64 bit broadcast for floats only.
7923 // No 64bit integer in 32bit subtarget.
7924 MVT CVT = MVT::getFloatingPointVT(SplatBitSize);
7925 // Lower the splat via APFloat directly, to avoid any conversion.
7926 Constant *C =
7927 SplatBitSize == 32
7928 ? ConstantFP::get(*Ctx,
7929 APFloat(APFloat::IEEEsingle(), SplatValue))
7930 : ConstantFP::get(*Ctx,
7931 APFloat(APFloat::IEEEdouble(), SplatValue));
7932 SDValue CP = DAG.getConstantPool(C, PVT);
7933 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
7935 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
7936 Ld = DAG.getLoad(
7937 CVT, dl, DAG.getEntryNode(), CP,
7938 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
7939 Alignment);
7940 SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
7941 MVT::getVectorVT(CVT, Repeat), Ld);
7942 return DAG.getBitcast(VT, Brdcst);
7943 } else if (SplatBitSize > 64) {
7944 // Load the vector of constants and broadcast it.
7945 MVT CVT = VT.getScalarType();
7946 Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
7947 *Ctx);
7948 SDValue VCP = DAG.getConstantPool(VecC, PVT);
7949 unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
7950 unsigned Alignment = cast<ConstantPoolSDNode>(VCP)->getAlignment();
7951 Ld = DAG.getLoad(
7952 MVT::getVectorVT(CVT, NumElm), dl, DAG.getEntryNode(), VCP,
7953 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
7954 Alignment);
7955 SDValue Brdcst = DAG.getNode(X86ISD::SUBV_BROADCAST, dl, VT, Ld);
7956 return DAG.getBitcast(VT, Brdcst);
7961 // If we are moving a scalar into a vector (Ld must be set and all elements
7962 // but 1 are undef) and that operation is not obviously supported by
7963 // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
7964 // That's better than general shuffling and may eliminate a load to GPR and
7965 // move from scalar to vector register.
7966 if (!Ld || NumElts - NumUndefElts != 1)
7967 return SDValue();
7968 unsigned ScalarSize = Ld.getValueSizeInBits();
7969 if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
7970 return SDValue();
7973 bool ConstSplatVal =
7974 (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
7976 // Make sure that all of the users of a non-constant load are from the
7977 // BUILD_VECTOR node.
7978 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
7979 return SDValue();
7981 unsigned ScalarSize = Ld.getValueSizeInBits();
7982 bool IsGE256 = (VT.getSizeInBits() >= 256);
7984 // When optimizing for size, generate up to 5 extra bytes for a broadcast
7985 // instruction to save 8 or more bytes of constant pool data.
7986 // TODO: If multiple splats are generated to load the same constant,
7987 // it may be detrimental to overall size. There needs to be a way to detect
7988 // that condition to know if this is truly a size win.
7989 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
7991 // Handle broadcasting a single constant scalar from the constant pool
7992 // into a vector.
7993 // On Sandybridge (no AVX2), it is still better to load a constant vector
7994 // from the constant pool and not to broadcast it from a scalar.
7995 // But override that restriction when optimizing for size.
7996 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
7997 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
7998 EVT CVT = Ld.getValueType();
7999 assert(!CVT.isVector() && "Must not broadcast a vector type");
8001 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
8002 // For size optimization, also splat v2f64 and v2i64, and for size opt
8003 // with AVX2, also splat i8 and i16.
8004 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
8005 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8006 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
8007 const Constant *C = nullptr;
8008 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
8009 C = CI->getConstantIntValue();
8010 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
8011 C = CF->getConstantFPValue();
8013 assert(C && "Invalid constant type");
8015 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8016 SDValue CP =
8017 DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
8018 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8019 Ld = DAG.getLoad(
8020 CVT, dl, DAG.getEntryNode(), CP,
8021 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8022 Alignment);
8024 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8028 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
8030 // Handle AVX2 in-register broadcasts.
8031 if (!IsLoad && Subtarget.hasInt256() &&
8032 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
8033 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8035 // The scalar source must be a normal load.
8036 if (!IsLoad)
8037 return SDValue();
8039 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8040 (Subtarget.hasVLX() && ScalarSize == 64))
8041 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8043 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
8044 // double since there is no vbroadcastsd xmm
8045 if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) {
8046 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
8047 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8050 // Unsupported broadcast.
8051 return SDValue();
8054 /// For an EXTRACT_VECTOR_ELT with a constant index return the real
8055 /// underlying vector and index.
8057 /// Modifies \p ExtractedFromVec to the real vector and returns the real
8058 /// index.
8059 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
8060 SDValue ExtIdx) {
8061 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
8062 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
8063 return Idx;
8065 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
8066 // lowered this:
8067 // (extract_vector_elt (v8f32 %1), Constant<6>)
8068 // to:
8069 // (extract_vector_elt (vector_shuffle<2,u,u,u>
8070 // (extract_subvector (v8f32 %0), Constant<4>),
8071 // undef)
8072 // Constant<0>)
8073 // In this case the vector is the extract_subvector expression and the index
8074 // is 2, as specified by the shuffle.
8075 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
8076 SDValue ShuffleVec = SVOp->getOperand(0);
8077 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
8078 assert(ShuffleVecVT.getVectorElementType() ==
8079 ExtractedFromVec.getSimpleValueType().getVectorElementType());
8081 int ShuffleIdx = SVOp->getMaskElt(Idx);
8082 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
8083 ExtractedFromVec = ShuffleVec;
8084 return ShuffleIdx;
8086 return Idx;
8089 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
8090 MVT VT = Op.getSimpleValueType();
8092 // Skip if insert_vec_elt is not supported.
8093 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8094 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
8095 return SDValue();
8097 SDLoc DL(Op);
8098 unsigned NumElems = Op.getNumOperands();
8100 SDValue VecIn1;
8101 SDValue VecIn2;
8102 SmallVector<unsigned, 4> InsertIndices;
8103 SmallVector<int, 8> Mask(NumElems, -1);
8105 for (unsigned i = 0; i != NumElems; ++i) {
8106 unsigned Opc = Op.getOperand(i).getOpcode();
8108 if (Opc == ISD::UNDEF)
8109 continue;
8111 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
8112 // Quit if more than 1 elements need inserting.
8113 if (InsertIndices.size() > 1)
8114 return SDValue();
8116 InsertIndices.push_back(i);
8117 continue;
8120 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
8121 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
8123 // Quit if non-constant index.
8124 if (!isa<ConstantSDNode>(ExtIdx))
8125 return SDValue();
8126 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
8128 // Quit if extracted from vector of different type.
8129 if (ExtractedFromVec.getValueType() != VT)
8130 return SDValue();
8132 if (!VecIn1.getNode())
8133 VecIn1 = ExtractedFromVec;
8134 else if (VecIn1 != ExtractedFromVec) {
8135 if (!VecIn2.getNode())
8136 VecIn2 = ExtractedFromVec;
8137 else if (VecIn2 != ExtractedFromVec)
8138 // Quit if more than 2 vectors to shuffle
8139 return SDValue();
8142 if (ExtractedFromVec == VecIn1)
8143 Mask[i] = Idx;
8144 else if (ExtractedFromVec == VecIn2)
8145 Mask[i] = Idx + NumElems;
8148 if (!VecIn1.getNode())
8149 return SDValue();
8151 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
8152 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
8154 for (unsigned Idx : InsertIndices)
8155 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
8156 DAG.getIntPtrConstant(Idx, DL));
8158 return NV;
8161 static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
8162 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
8163 Op.getScalarValueSizeInBits() == 1 &&
8164 "Can not convert non-constant vector");
8165 uint64_t Immediate = 0;
8166 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
8167 SDValue In = Op.getOperand(idx);
8168 if (!In.isUndef())
8169 Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
8171 SDLoc dl(Op);
8172 MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8));
8173 return DAG.getConstant(Immediate, dl, VT);
8175 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
8176 static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
8177 const X86Subtarget &Subtarget) {
8179 MVT VT = Op.getSimpleValueType();
8180 assert((VT.getVectorElementType() == MVT::i1) &&
8181 "Unexpected type in LowerBUILD_VECTORvXi1!");
8183 SDLoc dl(Op);
8184 if (ISD::isBuildVectorAllZeros(Op.getNode()))
8185 return Op;
8187 if (ISD::isBuildVectorAllOnes(Op.getNode()))
8188 return Op;
8190 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
8191 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
8192 // Split the pieces.
8193 SDValue Lower =
8194 DAG.getBuildVector(MVT::v32i1, dl, Op.getNode()->ops().slice(0, 32));
8195 SDValue Upper =
8196 DAG.getBuildVector(MVT::v32i1, dl, Op.getNode()->ops().slice(32, 32));
8197 // We have to manually lower both halves so getNode doesn't try to
8198 // reassemble the build_vector.
8199 Lower = LowerBUILD_VECTORvXi1(Lower, DAG, Subtarget);
8200 Upper = LowerBUILD_VECTORvXi1(Upper, DAG, Subtarget);
8201 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lower, Upper);
8203 SDValue Imm = ConvertI1VectorToInteger(Op, DAG);
8204 if (Imm.getValueSizeInBits() == VT.getSizeInBits())
8205 return DAG.getBitcast(VT, Imm);
8206 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm);
8207 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
8208 DAG.getIntPtrConstant(0, dl));
8211 // Vector has one or more non-const elements
8212 uint64_t Immediate = 0;
8213 SmallVector<unsigned, 16> NonConstIdx;
8214 bool IsSplat = true;
8215 bool HasConstElts = false;
8216 int SplatIdx = -1;
8217 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
8218 SDValue In = Op.getOperand(idx);
8219 if (In.isUndef())
8220 continue;
8221 if (!isa<ConstantSDNode>(In))
8222 NonConstIdx.push_back(idx);
8223 else {
8224 Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
8225 HasConstElts = true;
8227 if (SplatIdx < 0)
8228 SplatIdx = idx;
8229 else if (In != Op.getOperand(SplatIdx))
8230 IsSplat = false;
8233 // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
8234 if (IsSplat)
8235 return DAG.getSelect(dl, VT, Op.getOperand(SplatIdx),
8236 DAG.getConstant(1, dl, VT),
8237 DAG.getConstant(0, dl, VT));
8239 // insert elements one by one
8240 SDValue DstVec;
8241 SDValue Imm;
8242 if (Immediate) {
8243 MVT ImmVT = MVT::getIntegerVT(std::max((int)VT.getSizeInBits(), 8));
8244 Imm = DAG.getConstant(Immediate, dl, ImmVT);
8246 else if (HasConstElts)
8247 Imm = DAG.getConstant(0, dl, VT);
8248 else
8249 Imm = DAG.getUNDEF(VT);
8250 if (Imm.getValueSizeInBits() == VT.getSizeInBits())
8251 DstVec = DAG.getBitcast(VT, Imm);
8252 else {
8253 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm);
8254 DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
8255 DAG.getIntPtrConstant(0, dl));
8258 for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
8259 unsigned InsertIdx = NonConstIdx[i];
8260 DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
8261 Op.getOperand(InsertIdx),
8262 DAG.getIntPtrConstant(InsertIdx, dl));
8264 return DstVec;
8267 /// This is a helper function of LowerToHorizontalOp().
8268 /// This function checks that the build_vector \p N in input implements a
8269 /// 128-bit partial horizontal operation on a 256-bit vector, but that operation
8270 /// may not match the layout of an x86 256-bit horizontal instruction.
8271 /// In other words, if this returns true, then some extraction/insertion will
8272 /// be required to produce a valid horizontal instruction.
8274 /// Parameter \p Opcode defines the kind of horizontal operation to match.
8275 /// For example, if \p Opcode is equal to ISD::ADD, then this function
8276 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
8277 /// is equal to ISD::SUB, then this function checks if this is a horizontal
8278 /// arithmetic sub.
8280 /// This function only analyzes elements of \p N whose indices are
8281 /// in range [BaseIdx, LastIdx).
8283 /// TODO: This function was originally used to match both real and fake partial
8284 /// horizontal operations, but the index-matching logic is incorrect for that.
8285 /// See the corrected implementation in isHopBuildVector(). Can we reduce this
8286 /// code because it is only used for partial h-op matching now?
8287 static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
8288 SelectionDAG &DAG,
8289 unsigned BaseIdx, unsigned LastIdx,
8290 SDValue &V0, SDValue &V1) {
8291 EVT VT = N->getValueType(0);
8292 assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
8293 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
8294 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
8295 "Invalid Vector in input!");
8297 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
8298 bool CanFold = true;
8299 unsigned ExpectedVExtractIdx = BaseIdx;
8300 unsigned NumElts = LastIdx - BaseIdx;
8301 V0 = DAG.getUNDEF(VT);
8302 V1 = DAG.getUNDEF(VT);
8304 // Check if N implements a horizontal binop.
8305 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
8306 SDValue Op = N->getOperand(i + BaseIdx);
8308 // Skip UNDEFs.
8309 if (Op->isUndef()) {
8310 // Update the expected vector extract index.
8311 if (i * 2 == NumElts)
8312 ExpectedVExtractIdx = BaseIdx;
8313 ExpectedVExtractIdx += 2;
8314 continue;
8317 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
8319 if (!CanFold)
8320 break;
8322 SDValue Op0 = Op.getOperand(0);
8323 SDValue Op1 = Op.getOperand(1);
8325 // Try to match the following pattern:
8326 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
8327 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
8328 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
8329 Op0.getOperand(0) == Op1.getOperand(0) &&
8330 isa<ConstantSDNode>(Op0.getOperand(1)) &&
8331 isa<ConstantSDNode>(Op1.getOperand(1)));
8332 if (!CanFold)
8333 break;
8335 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
8336 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
8338 if (i * 2 < NumElts) {
8339 if (V0.isUndef()) {
8340 V0 = Op0.getOperand(0);
8341 if (V0.getValueType() != VT)
8342 return false;
8344 } else {
8345 if (V1.isUndef()) {
8346 V1 = Op0.getOperand(0);
8347 if (V1.getValueType() != VT)
8348 return false;
8350 if (i * 2 == NumElts)
8351 ExpectedVExtractIdx = BaseIdx;
8354 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
8355 if (I0 == ExpectedVExtractIdx)
8356 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
8357 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
8358 // Try to match the following dag sequence:
8359 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
8360 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
8361 } else
8362 CanFold = false;
8364 ExpectedVExtractIdx += 2;
8367 return CanFold;
8370 /// Emit a sequence of two 128-bit horizontal add/sub followed by
8371 /// a concat_vector.
8373 /// This is a helper function of LowerToHorizontalOp().
8374 /// This function expects two 256-bit vectors called V0 and V1.
8375 /// At first, each vector is split into two separate 128-bit vectors.
8376 /// Then, the resulting 128-bit vectors are used to implement two
8377 /// horizontal binary operations.
8379 /// The kind of horizontal binary operation is defined by \p X86Opcode.
8381 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
8382 /// the two new horizontal binop.
8383 /// When Mode is set, the first horizontal binop dag node would take as input
8384 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
8385 /// horizontal binop dag node would take as input the lower 128-bit of V1
8386 /// and the upper 128-bit of V1.
8387 /// Example:
8388 /// HADD V0_LO, V0_HI
8389 /// HADD V1_LO, V1_HI
8391 /// Otherwise, the first horizontal binop dag node takes as input the lower
8392 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
8393 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
8394 /// Example:
8395 /// HADD V0_LO, V1_LO
8396 /// HADD V0_HI, V1_HI
8398 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
8399 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
8400 /// the upper 128-bits of the result.
8401 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
8402 const SDLoc &DL, SelectionDAG &DAG,
8403 unsigned X86Opcode, bool Mode,
8404 bool isUndefLO, bool isUndefHI) {
8405 MVT VT = V0.getSimpleValueType();
8406 assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
8407 "Invalid nodes in input!");
8409 unsigned NumElts = VT.getVectorNumElements();
8410 SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
8411 SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
8412 SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
8413 SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
8414 MVT NewVT = V0_LO.getSimpleValueType();
8416 SDValue LO = DAG.getUNDEF(NewVT);
8417 SDValue HI = DAG.getUNDEF(NewVT);
8419 if (Mode) {
8420 // Don't emit a horizontal binop if the result is expected to be UNDEF.
8421 if (!isUndefLO && !V0->isUndef())
8422 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
8423 if (!isUndefHI && !V1->isUndef())
8424 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
8425 } else {
8426 // Don't emit a horizontal binop if the result is expected to be UNDEF.
8427 if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
8428 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
8430 if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
8431 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
8434 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
8437 /// Returns true iff \p BV builds a vector with the result equivalent to
8438 /// the result of ADDSUB/SUBADD operation.
8439 /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
8440 /// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
8441 /// \p Opnd0 and \p Opnd1.
8442 static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
8443 const X86Subtarget &Subtarget, SelectionDAG &DAG,
8444 SDValue &Opnd0, SDValue &Opnd1,
8445 unsigned &NumExtracts,
8446 bool &IsSubAdd) {
8448 MVT VT = BV->getSimpleValueType(0);
8449 if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
8450 return false;
8452 unsigned NumElts = VT.getVectorNumElements();
8453 SDValue InVec0 = DAG.getUNDEF(VT);
8454 SDValue InVec1 = DAG.getUNDEF(VT);
8456 NumExtracts = 0;
8458 // Odd-numbered elements in the input build vector are obtained from
8459 // adding/subtracting two integer/float elements.
8460 // Even-numbered elements in the input build vector are obtained from
8461 // subtracting/adding two integer/float elements.
8462 unsigned Opc[2] = {0, 0};
8463 for (unsigned i = 0, e = NumElts; i != e; ++i) {
8464 SDValue Op = BV->getOperand(i);
8466 // Skip 'undef' values.
8467 unsigned Opcode = Op.getOpcode();
8468 if (Opcode == ISD::UNDEF)
8469 continue;
8471 // Early exit if we found an unexpected opcode.
8472 if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
8473 return false;
8475 SDValue Op0 = Op.getOperand(0);
8476 SDValue Op1 = Op.getOperand(1);
8478 // Try to match the following pattern:
8479 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
8480 // Early exit if we cannot match that sequence.
8481 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8482 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8483 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
8484 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
8485 Op0.getOperand(1) != Op1.getOperand(1))
8486 return false;
8488 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
8489 if (I0 != i)
8490 return false;
8492 // We found a valid add/sub node, make sure its the same opcode as previous
8493 // elements for this parity.
8494 if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
8495 return false;
8496 Opc[i % 2] = Opcode;
8498 // Update InVec0 and InVec1.
8499 if (InVec0.isUndef()) {
8500 InVec0 = Op0.getOperand(0);
8501 if (InVec0.getSimpleValueType() != VT)
8502 return false;
8504 if (InVec1.isUndef()) {
8505 InVec1 = Op1.getOperand(0);
8506 if (InVec1.getSimpleValueType() != VT)
8507 return false;
8510 // Make sure that operands in input to each add/sub node always
8511 // come from a same pair of vectors.
8512 if (InVec0 != Op0.getOperand(0)) {
8513 if (Opcode == ISD::FSUB)
8514 return false;
8516 // FADD is commutable. Try to commute the operands
8517 // and then test again.
8518 std::swap(Op0, Op1);
8519 if (InVec0 != Op0.getOperand(0))
8520 return false;
8523 if (InVec1 != Op1.getOperand(0))
8524 return false;
8526 // Increment the number of extractions done.
8527 ++NumExtracts;
8530 // Ensure we have found an opcode for both parities and that they are
8531 // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
8532 // inputs are undef.
8533 if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
8534 InVec0.isUndef() || InVec1.isUndef())
8535 return false;
8537 IsSubAdd = Opc[0] == ISD::FADD;
8539 Opnd0 = InVec0;
8540 Opnd1 = InVec1;
8541 return true;
8544 /// Returns true if is possible to fold MUL and an idiom that has already been
8545 /// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
8546 /// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
8547 /// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
8549 /// Prior to calling this function it should be known that there is some
8550 /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
8551 /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
8552 /// before replacement of such SDNode with ADDSUB operation. Thus the number
8553 /// of \p Opnd0 uses is expected to be equal to 2.
8554 /// For example, this function may be called for the following IR:
8555 /// %AB = fmul fast <2 x double> %A, %B
8556 /// %Sub = fsub fast <2 x double> %AB, %C
8557 /// %Add = fadd fast <2 x double> %AB, %C
8558 /// %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
8559 /// <2 x i32> <i32 0, i32 3>
8560 /// There is a def for %Addsub here, which potentially can be replaced by
8561 /// X86ISD::ADDSUB operation:
8562 /// %Addsub = X86ISD::ADDSUB %AB, %C
8563 /// and such ADDSUB can further be replaced with FMADDSUB:
8564 /// %Addsub = FMADDSUB %A, %B, %C.
8566 /// The main reason why this method is called before the replacement of the
8567 /// recognized ADDSUB idiom with ADDSUB operation is that such replacement
8568 /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
8569 /// FMADDSUB is.
8570 static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
8571 SelectionDAG &DAG,
8572 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
8573 unsigned ExpectedUses) {
8574 if (Opnd0.getOpcode() != ISD::FMUL ||
8575 !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
8576 return false;
8578 // FIXME: These checks must match the similar ones in
8579 // DAGCombiner::visitFADDForFMACombine. It would be good to have one
8580 // function that would answer if it is Ok to fuse MUL + ADD to FMADD
8581 // or MUL + ADDSUB to FMADDSUB.
8582 const TargetOptions &Options = DAG.getTarget().Options;
8583 bool AllowFusion =
8584 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
8585 if (!AllowFusion)
8586 return false;
8588 Opnd2 = Opnd1;
8589 Opnd1 = Opnd0.getOperand(1);
8590 Opnd0 = Opnd0.getOperand(0);
8592 return true;
8595 /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
8596 /// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
8597 /// X86ISD::FMSUBADD node.
8598 static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
8599 const X86Subtarget &Subtarget,
8600 SelectionDAG &DAG) {
8601 SDValue Opnd0, Opnd1;
8602 unsigned NumExtracts;
8603 bool IsSubAdd;
8604 if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
8605 IsSubAdd))
8606 return SDValue();
8608 MVT VT = BV->getSimpleValueType(0);
8609 SDLoc DL(BV);
8611 // Try to generate X86ISD::FMADDSUB node here.
8612 SDValue Opnd2;
8613 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
8614 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
8615 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
8618 // We only support ADDSUB.
8619 if (IsSubAdd)
8620 return SDValue();
8622 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
8623 // the ADDSUB idiom has been successfully recognized. There are no known
8624 // X86 targets with 512-bit ADDSUB instructions!
8625 // 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
8626 // recognition.
8627 if (VT.is512BitVector())
8628 return SDValue();
8630 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
8633 static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
8634 unsigned &HOpcode, SDValue &V0, SDValue &V1) {
8635 // Initialize outputs to known values.
8636 MVT VT = BV->getSimpleValueType(0);
8637 HOpcode = ISD::DELETED_NODE;
8638 V0 = DAG.getUNDEF(VT);
8639 V1 = DAG.getUNDEF(VT);
8641 // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
8642 // half of the result is calculated independently from the 128-bit halves of
8643 // the inputs, so that makes the index-checking logic below more complicated.
8644 unsigned NumElts = VT.getVectorNumElements();
8645 unsigned GenericOpcode = ISD::DELETED_NODE;
8646 unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
8647 unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
8648 unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
8649 for (unsigned i = 0; i != Num128BitChunks; ++i) {
8650 for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
8651 // Ignore undef elements.
8652 SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
8653 if (Op.isUndef())
8654 continue;
8656 // If there's an opcode mismatch, we're done.
8657 if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
8658 return false;
8660 // Initialize horizontal opcode.
8661 if (HOpcode == ISD::DELETED_NODE) {
8662 GenericOpcode = Op.getOpcode();
8663 switch (GenericOpcode) {
8664 case ISD::ADD: HOpcode = X86ISD::HADD; break;
8665 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
8666 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
8667 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
8668 default: return false;
8672 SDValue Op0 = Op.getOperand(0);
8673 SDValue Op1 = Op.getOperand(1);
8674 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8675 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8676 Op0.getOperand(0) != Op1.getOperand(0) ||
8677 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
8678 !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
8679 return false;
8681 // The source vector is chosen based on which 64-bit half of the
8682 // destination vector is being calculated.
8683 if (j < NumEltsIn64Bits) {
8684 if (V0.isUndef())
8685 V0 = Op0.getOperand(0);
8686 } else {
8687 if (V1.isUndef())
8688 V1 = Op0.getOperand(0);
8691 SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
8692 if (SourceVec != Op0.getOperand(0))
8693 return false;
8695 // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
8696 unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
8697 unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
8698 unsigned ExpectedIndex = i * NumEltsIn128Bits +
8699 (j % NumEltsIn64Bits) * 2;
8700 if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
8701 continue;
8703 // If this is not a commutative op, this does not match.
8704 if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
8705 return false;
8707 // Addition is commutative, so try swapping the extract indexes.
8708 // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
8709 if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
8710 continue;
8712 // Extract indexes do not match horizontal requirement.
8713 return false;
8716 // We matched. Opcode and operands are returned by reference as arguments.
8717 return true;
8720 static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
8721 SelectionDAG &DAG, unsigned HOpcode,
8722 SDValue V0, SDValue V1) {
8723 // If either input vector is not the same size as the build vector,
8724 // extract/insert the low bits to the correct size.
8725 // This is free (examples: zmm --> xmm, xmm --> ymm).
8726 MVT VT = BV->getSimpleValueType(0);
8727 unsigned Width = VT.getSizeInBits();
8728 if (V0.getValueSizeInBits() > Width)
8729 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
8730 else if (V0.getValueSizeInBits() < Width)
8731 V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
8733 if (V1.getValueSizeInBits() > Width)
8734 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
8735 else if (V1.getValueSizeInBits() < Width)
8736 V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
8738 unsigned NumElts = VT.getVectorNumElements();
8739 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
8740 for (unsigned i = 0; i != NumElts; ++i)
8741 if (BV->getOperand(i).isUndef())
8742 DemandedElts.clearBit(i);
8744 // If we don't need the upper xmm, then perform as a xmm hop.
8745 unsigned HalfNumElts = NumElts / 2;
8746 if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
8747 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(), HalfNumElts);
8748 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
8749 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
8750 SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
8751 return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
8754 return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
8757 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
8758 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
8759 const X86Subtarget &Subtarget,
8760 SelectionDAG &DAG) {
8761 // We need at least 2 non-undef elements to make this worthwhile by default.
8762 unsigned NumNonUndefs = 0;
8763 for (const SDValue &V : BV->op_values())
8764 if (!V.isUndef())
8765 ++NumNonUndefs;
8767 if (NumNonUndefs < 2)
8768 return SDValue();
8770 // There are 4 sets of horizontal math operations distinguished by type:
8771 // int/FP at 128-bit/256-bit. Each type was introduced with a different
8772 // subtarget feature. Try to match those "native" patterns first.
8773 MVT VT = BV->getSimpleValueType(0);
8774 if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
8775 ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
8776 ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
8777 ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
8778 unsigned HOpcode;
8779 SDValue V0, V1;
8780 if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
8781 return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
8784 // Try harder to match 256-bit ops by using extract/concat.
8785 if (!Subtarget.hasAVX() || !VT.is256BitVector())
8786 return SDValue();
8788 // Count the number of UNDEF operands in the build_vector in input.
8789 unsigned NumElts = VT.getVectorNumElements();
8790 unsigned Half = NumElts / 2;
8791 unsigned NumUndefsLO = 0;
8792 unsigned NumUndefsHI = 0;
8793 for (unsigned i = 0, e = Half; i != e; ++i)
8794 if (BV->getOperand(i)->isUndef())
8795 NumUndefsLO++;
8797 for (unsigned i = Half, e = NumElts; i != e; ++i)
8798 if (BV->getOperand(i)->isUndef())
8799 NumUndefsHI++;
8801 SDLoc DL(BV);
8802 SDValue InVec0, InVec1;
8803 if (VT == MVT::v8i32 || VT == MVT::v16i16) {
8804 SDValue InVec2, InVec3;
8805 unsigned X86Opcode;
8806 bool CanFold = true;
8808 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
8809 isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
8810 InVec3) &&
8811 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
8812 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
8813 X86Opcode = X86ISD::HADD;
8814 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
8815 InVec1) &&
8816 isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
8817 InVec3) &&
8818 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
8819 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
8820 X86Opcode = X86ISD::HSUB;
8821 else
8822 CanFold = false;
8824 if (CanFold) {
8825 // Do not try to expand this build_vector into a pair of horizontal
8826 // add/sub if we can emit a pair of scalar add/sub.
8827 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
8828 return SDValue();
8830 // Convert this build_vector into a pair of horizontal binops followed by
8831 // a concat vector. We must adjust the outputs from the partial horizontal
8832 // matching calls above to account for undefined vector halves.
8833 SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
8834 SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
8835 assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
8836 bool isUndefLO = NumUndefsLO == Half;
8837 bool isUndefHI = NumUndefsHI == Half;
8838 return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
8839 isUndefHI);
8843 if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
8844 VT == MVT::v16i16) {
8845 unsigned X86Opcode;
8846 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
8847 X86Opcode = X86ISD::HADD;
8848 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
8849 InVec1))
8850 X86Opcode = X86ISD::HSUB;
8851 else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
8852 InVec1))
8853 X86Opcode = X86ISD::FHADD;
8854 else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
8855 InVec1))
8856 X86Opcode = X86ISD::FHSUB;
8857 else
8858 return SDValue();
8860 // Don't try to expand this build_vector into a pair of horizontal add/sub
8861 // if we can simply emit a pair of scalar add/sub.
8862 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
8863 return SDValue();
8865 // Convert this build_vector into two horizontal add/sub followed by
8866 // a concat vector.
8867 bool isUndefLO = NumUndefsLO == Half;
8868 bool isUndefHI = NumUndefsHI == Half;
8869 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
8870 isUndefLO, isUndefHI);
8873 return SDValue();
8876 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
8877 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
8878 /// just apply the bit to the vectors.
8879 /// NOTE: Its not in our interest to start make a general purpose vectorizer
8880 /// from this, but enough scalar bit operations are created from the later
8881 /// legalization + scalarization stages to need basic support.
8882 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
8883 SelectionDAG &DAG) {
8884 SDLoc DL(Op);
8885 MVT VT = Op->getSimpleValueType(0);
8886 unsigned NumElems = VT.getVectorNumElements();
8887 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8889 // Check that all elements have the same opcode.
8890 // TODO: Should we allow UNDEFS and if so how many?
8891 unsigned Opcode = Op->getOperand(0).getOpcode();
8892 for (unsigned i = 1; i < NumElems; ++i)
8893 if (Opcode != Op->getOperand(i).getOpcode())
8894 return SDValue();
8896 // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
8897 bool IsShift = false;
8898 switch (Opcode) {
8899 default:
8900 return SDValue();
8901 case ISD::SHL:
8902 case ISD::SRL:
8903 case ISD::SRA:
8904 IsShift = true;
8905 break;
8906 case ISD::AND:
8907 case ISD::XOR:
8908 case ISD::OR:
8909 // Don't do this if the buildvector is a splat - we'd replace one
8910 // constant with an entire vector.
8911 if (Op->getSplatValue())
8912 return SDValue();
8913 if (!TLI.isOperationLegalOrPromote(Opcode, VT))
8914 return SDValue();
8915 break;
8918 SmallVector<SDValue, 4> LHSElts, RHSElts;
8919 for (SDValue Elt : Op->ops()) {
8920 SDValue LHS = Elt.getOperand(0);
8921 SDValue RHS = Elt.getOperand(1);
8923 // We expect the canonicalized RHS operand to be the constant.
8924 if (!isa<ConstantSDNode>(RHS))
8925 return SDValue();
8927 // Extend shift amounts.
8928 if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
8929 if (!IsShift)
8930 return SDValue();
8931 RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
8934 LHSElts.push_back(LHS);
8935 RHSElts.push_back(RHS);
8938 // Limit to shifts by uniform immediates.
8939 // TODO: Only accept vXi8/vXi64 special cases?
8940 // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
8941 if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
8942 return SDValue();
8944 SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
8945 SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
8946 return DAG.getNode(Opcode, DL, VT, LHS, RHS);
8949 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
8950 /// functionality to do this, so it's all zeros, all ones, or some derivation
8951 /// that is cheap to calculate.
8952 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
8953 const X86Subtarget &Subtarget) {
8954 SDLoc DL(Op);
8955 MVT VT = Op.getSimpleValueType();
8957 // Vectors containing all zeros can be matched by pxor and xorps.
8958 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
8959 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
8960 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
8961 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
8962 return Op;
8964 return getZeroVector(VT, Subtarget, DAG, DL);
8967 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
8968 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
8969 // vpcmpeqd on 256-bit vectors.
8970 if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
8971 if (VT == MVT::v4i32 || VT == MVT::v16i32 ||
8972 (VT == MVT::v8i32 && Subtarget.hasInt256()))
8973 return Op;
8975 return getOnesVector(VT, DAG, DL);
8978 return SDValue();
8981 /// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
8982 /// from a vector of source values and a vector of extraction indices.
8983 /// The vectors might be manipulated to match the type of the permute op.
8984 static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
8985 SDLoc &DL, SelectionDAG &DAG,
8986 const X86Subtarget &Subtarget) {
8987 MVT ShuffleVT = VT;
8988 EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
8989 unsigned NumElts = VT.getVectorNumElements();
8990 unsigned SizeInBits = VT.getSizeInBits();
8992 // Adjust IndicesVec to match VT size.
8993 assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
8994 "Illegal variable permute mask size");
8995 if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
8996 IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
8997 NumElts * VT.getScalarSizeInBits());
8998 IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
9000 // Handle SrcVec that don't match VT type.
9001 if (SrcVec.getValueSizeInBits() != SizeInBits) {
9002 if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
9003 // Handle larger SrcVec by treating it as a larger permute.
9004 unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
9005 VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
9006 IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9007 IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
9008 Subtarget, DAG, SDLoc(IndicesVec));
9009 return extractSubVector(
9010 createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget), 0,
9011 DAG, DL, SizeInBits);
9012 } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
9013 // Widen smaller SrcVec to match VT.
9014 SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
9015 } else
9016 return SDValue();
9019 auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
9020 assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
9021 EVT SrcVT = Idx.getValueType();
9022 unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
9023 uint64_t IndexScale = 0;
9024 uint64_t IndexOffset = 0;
9026 // If we're scaling a smaller permute op, then we need to repeat the
9027 // indices, scaling and offsetting them as well.
9028 // e.g. v4i32 -> v16i8 (Scale = 4)
9029 // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
9030 // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
9031 for (uint64_t i = 0; i != Scale; ++i) {
9032 IndexScale |= Scale << (i * NumDstBits);
9033 IndexOffset |= i << (i * NumDstBits);
9036 Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
9037 DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
9038 Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
9039 DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
9040 return Idx;
9043 unsigned Opcode = 0;
9044 switch (VT.SimpleTy) {
9045 default:
9046 break;
9047 case MVT::v16i8:
9048 if (Subtarget.hasSSSE3())
9049 Opcode = X86ISD::PSHUFB;
9050 break;
9051 case MVT::v8i16:
9052 if (Subtarget.hasVLX() && Subtarget.hasBWI())
9053 Opcode = X86ISD::VPERMV;
9054 else if (Subtarget.hasSSSE3()) {
9055 Opcode = X86ISD::PSHUFB;
9056 ShuffleVT = MVT::v16i8;
9058 break;
9059 case MVT::v4f32:
9060 case MVT::v4i32:
9061 if (Subtarget.hasAVX()) {
9062 Opcode = X86ISD::VPERMILPV;
9063 ShuffleVT = MVT::v4f32;
9064 } else if (Subtarget.hasSSSE3()) {
9065 Opcode = X86ISD::PSHUFB;
9066 ShuffleVT = MVT::v16i8;
9068 break;
9069 case MVT::v2f64:
9070 case MVT::v2i64:
9071 if (Subtarget.hasAVX()) {
9072 // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
9073 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9074 Opcode = X86ISD::VPERMILPV;
9075 ShuffleVT = MVT::v2f64;
9076 } else if (Subtarget.hasSSE41()) {
9077 // SSE41 can compare v2i64 - select between indices 0 and 1.
9078 return DAG.getSelectCC(
9079 DL, IndicesVec,
9080 getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
9081 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
9082 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
9083 ISD::CondCode::SETEQ);
9085 break;
9086 case MVT::v32i8:
9087 if (Subtarget.hasVLX() && Subtarget.hasVBMI())
9088 Opcode = X86ISD::VPERMV;
9089 else if (Subtarget.hasXOP()) {
9090 SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
9091 SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
9092 SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
9093 SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
9094 return DAG.getNode(
9095 ISD::CONCAT_VECTORS, DL, VT,
9096 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
9097 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
9098 } else if (Subtarget.hasAVX()) {
9099 SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
9100 SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
9101 SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
9102 SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
9103 auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
9104 ArrayRef<SDValue> Ops) {
9105 // Permute Lo and Hi and then select based on index range.
9106 // This works as SHUFB uses bits[3:0] to permute elements and we don't
9107 // care about the bit[7] as its just an index vector.
9108 SDValue Idx = Ops[2];
9109 EVT VT = Idx.getValueType();
9110 return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
9111 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
9112 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
9113 ISD::CondCode::SETGT);
9115 SDValue Ops[] = {LoLo, HiHi, IndicesVec};
9116 return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
9117 PSHUFBBuilder);
9119 break;
9120 case MVT::v16i16:
9121 if (Subtarget.hasVLX() && Subtarget.hasBWI())
9122 Opcode = X86ISD::VPERMV;
9123 else if (Subtarget.hasAVX()) {
9124 // Scale to v32i8 and perform as v32i8.
9125 IndicesVec = ScaleIndices(IndicesVec, 2);
9126 return DAG.getBitcast(
9127 VT, createVariablePermute(
9128 MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
9129 DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
9131 break;
9132 case MVT::v8f32:
9133 case MVT::v8i32:
9134 if (Subtarget.hasAVX2())
9135 Opcode = X86ISD::VPERMV;
9136 else if (Subtarget.hasAVX()) {
9137 SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
9138 SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9139 {0, 1, 2, 3, 0, 1, 2, 3});
9140 SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9141 {4, 5, 6, 7, 4, 5, 6, 7});
9142 if (Subtarget.hasXOP())
9143 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32,
9144 LoLo, HiHi, IndicesVec,
9145 DAG.getConstant(0, DL, MVT::i8)));
9146 // Permute Lo and Hi and then select based on index range.
9147 // This works as VPERMILPS only uses index bits[0:1] to permute elements.
9148 SDValue Res = DAG.getSelectCC(
9149 DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
9150 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
9151 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
9152 ISD::CondCode::SETGT);
9153 return DAG.getBitcast(VT, Res);
9155 break;
9156 case MVT::v4i64:
9157 case MVT::v4f64:
9158 if (Subtarget.hasAVX512()) {
9159 if (!Subtarget.hasVLX()) {
9160 MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
9161 SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
9162 SDLoc(SrcVec));
9163 IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
9164 DAG, SDLoc(IndicesVec));
9165 SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
9166 DAG, Subtarget);
9167 return extract256BitVector(Res, 0, DAG, DL);
9169 Opcode = X86ISD::VPERMV;
9170 } else if (Subtarget.hasAVX()) {
9171 SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
9172 SDValue LoLo =
9173 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
9174 SDValue HiHi =
9175 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
9176 // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
9177 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9178 if (Subtarget.hasXOP())
9179 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64,
9180 LoLo, HiHi, IndicesVec,
9181 DAG.getConstant(0, DL, MVT::i8)));
9182 // Permute Lo and Hi and then select based on index range.
9183 // This works as VPERMILPD only uses index bit[1] to permute elements.
9184 SDValue Res = DAG.getSelectCC(
9185 DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
9186 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
9187 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
9188 ISD::CondCode::SETGT);
9189 return DAG.getBitcast(VT, Res);
9191 break;
9192 case MVT::v64i8:
9193 if (Subtarget.hasVBMI())
9194 Opcode = X86ISD::VPERMV;
9195 break;
9196 case MVT::v32i16:
9197 if (Subtarget.hasBWI())
9198 Opcode = X86ISD::VPERMV;
9199 break;
9200 case MVT::v16f32:
9201 case MVT::v16i32:
9202 case MVT::v8f64:
9203 case MVT::v8i64:
9204 if (Subtarget.hasAVX512())
9205 Opcode = X86ISD::VPERMV;
9206 break;
9208 if (!Opcode)
9209 return SDValue();
9211 assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
9212 (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
9213 "Illegal variable permute shuffle type");
9215 uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
9216 if (Scale > 1)
9217 IndicesVec = ScaleIndices(IndicesVec, Scale);
9219 EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
9220 IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
9222 SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
9223 SDValue Res = Opcode == X86ISD::VPERMV
9224 ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
9225 : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
9226 return DAG.getBitcast(VT, Res);
9229 // Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
9230 // reasoned to be a permutation of a vector by indices in a non-constant vector.
9231 // (build_vector (extract_elt V, (extract_elt I, 0)),
9232 // (extract_elt V, (extract_elt I, 1)),
9233 // ...
9234 // ->
9235 // (vpermv I, V)
9237 // TODO: Handle undefs
9238 // TODO: Utilize pshufb and zero mask blending to support more efficient
9239 // construction of vectors with constant-0 elements.
9240 static SDValue
9241 LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
9242 const X86Subtarget &Subtarget) {
9243 SDValue SrcVec, IndicesVec;
9244 // Check for a match of the permute source vector and permute index elements.
9245 // This is done by checking that the i-th build_vector operand is of the form:
9246 // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
9247 for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
9248 SDValue Op = V.getOperand(Idx);
9249 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9250 return SDValue();
9252 // If this is the first extract encountered in V, set the source vector,
9253 // otherwise verify the extract is from the previously defined source
9254 // vector.
9255 if (!SrcVec)
9256 SrcVec = Op.getOperand(0);
9257 else if (SrcVec != Op.getOperand(0))
9258 return SDValue();
9259 SDValue ExtractedIndex = Op->getOperand(1);
9260 // Peek through extends.
9261 if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
9262 ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
9263 ExtractedIndex = ExtractedIndex.getOperand(0);
9264 if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9265 return SDValue();
9267 // If this is the first extract from the index vector candidate, set the
9268 // indices vector, otherwise verify the extract is from the previously
9269 // defined indices vector.
9270 if (!IndicesVec)
9271 IndicesVec = ExtractedIndex.getOperand(0);
9272 else if (IndicesVec != ExtractedIndex.getOperand(0))
9273 return SDValue();
9275 auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
9276 if (!PermIdx || PermIdx->getZExtValue() != Idx)
9277 return SDValue();
9280 SDLoc DL(V);
9281 MVT VT = V.getSimpleValueType();
9282 return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
9285 SDValue
9286 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
9287 SDLoc dl(Op);
9289 MVT VT = Op.getSimpleValueType();
9290 MVT EltVT = VT.getVectorElementType();
9291 unsigned NumElems = Op.getNumOperands();
9293 // Generate vectors for predicate vectors.
9294 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
9295 return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
9297 if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
9298 return VectorConstant;
9300 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
9301 if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
9302 return AddSub;
9303 if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
9304 return HorizontalOp;
9305 if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
9306 return Broadcast;
9307 if (SDValue BitOp = lowerBuildVectorToBitOp(BV, DAG))
9308 return BitOp;
9310 unsigned EVTBits = EltVT.getSizeInBits();
9312 unsigned NumZero = 0;
9313 unsigned NumNonZero = 0;
9314 uint64_t NonZeros = 0;
9315 bool IsAllConstants = true;
9316 SmallSet<SDValue, 8> Values;
9317 unsigned NumConstants = NumElems;
9318 for (unsigned i = 0; i < NumElems; ++i) {
9319 SDValue Elt = Op.getOperand(i);
9320 if (Elt.isUndef())
9321 continue;
9322 Values.insert(Elt);
9323 if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
9324 IsAllConstants = false;
9325 NumConstants--;
9327 if (X86::isZeroNode(Elt))
9328 NumZero++;
9329 else {
9330 assert(i < sizeof(NonZeros) * 8); // Make sure the shift is within range.
9331 NonZeros |= ((uint64_t)1 << i);
9332 NumNonZero++;
9336 // All undef vector. Return an UNDEF. All zero vectors were handled above.
9337 if (NumNonZero == 0)
9338 return DAG.getUNDEF(VT);
9340 // If we are inserting one variable into a vector of non-zero constants, try
9341 // to avoid loading each constant element as a scalar. Load the constants as a
9342 // vector and then insert the variable scalar element. If insertion is not
9343 // supported, fall back to a shuffle to get the scalar blended with the
9344 // constants. Insertion into a zero vector is handled as a special-case
9345 // somewhere below here.
9346 if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
9347 (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
9348 isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
9349 // Create an all-constant vector. The variable element in the old
9350 // build vector is replaced by undef in the constant vector. Save the
9351 // variable scalar element and its index for use in the insertelement.
9352 LLVMContext &Context = *DAG.getContext();
9353 Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
9354 SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
9355 SDValue VarElt;
9356 SDValue InsIndex;
9357 for (unsigned i = 0; i != NumElems; ++i) {
9358 SDValue Elt = Op.getOperand(i);
9359 if (auto *C = dyn_cast<ConstantSDNode>(Elt))
9360 ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
9361 else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
9362 ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
9363 else if (!Elt.isUndef()) {
9364 assert(!VarElt.getNode() && !InsIndex.getNode() &&
9365 "Expected one variable element in this vector");
9366 VarElt = Elt;
9367 InsIndex = DAG.getConstant(i, dl, getVectorIdxTy(DAG.getDataLayout()));
9370 Constant *CV = ConstantVector::get(ConstVecOps);
9371 SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
9373 // The constants we just created may not be legal (eg, floating point). We
9374 // must lower the vector right here because we can not guarantee that we'll
9375 // legalize it before loading it. This is also why we could not just create
9376 // a new build vector here. If the build vector contains illegal constants,
9377 // it could get split back up into a series of insert elements.
9378 // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
9379 SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
9380 MachineFunction &MF = DAG.getMachineFunction();
9381 MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
9382 SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
9383 unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
9384 unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
9385 if (InsertC < NumEltsInLow128Bits)
9386 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
9388 // There's no good way to insert into the high elements of a >128-bit
9389 // vector, so use shuffles to avoid an extract/insert sequence.
9390 assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
9391 assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
9392 SmallVector<int, 8> ShuffleMask;
9393 unsigned NumElts = VT.getVectorNumElements();
9394 for (unsigned i = 0; i != NumElts; ++i)
9395 ShuffleMask.push_back(i == InsertC ? NumElts : i);
9396 SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
9397 return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
9400 // Special case for single non-zero, non-undef, element.
9401 if (NumNonZero == 1) {
9402 unsigned Idx = countTrailingZeros(NonZeros);
9403 SDValue Item = Op.getOperand(Idx);
9405 // If we have a constant or non-constant insertion into the low element of
9406 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
9407 // the rest of the elements. This will be matched as movd/movq/movss/movsd
9408 // depending on what the source datatype is.
9409 if (Idx == 0) {
9410 if (NumZero == 0)
9411 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9413 if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
9414 (EltVT == MVT::i64 && Subtarget.is64Bit())) {
9415 assert((VT.is128BitVector() || VT.is256BitVector() ||
9416 VT.is512BitVector()) &&
9417 "Expected an SSE value type!");
9418 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9419 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
9420 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9423 // We can't directly insert an i8 or i16 into a vector, so zero extend
9424 // it to i32 first.
9425 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
9426 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
9427 if (VT.getSizeInBits() >= 256) {
9428 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
9429 if (Subtarget.hasAVX()) {
9430 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
9431 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9432 } else {
9433 // Without AVX, we need to extend to a 128-bit vector and then
9434 // insert into the 256-bit vector.
9435 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
9436 SDValue ZeroVec = getZeroVector(ShufVT, Subtarget, DAG, dl);
9437 Item = insert128BitVector(ZeroVec, Item, 0, DAG, dl);
9439 } else {
9440 assert(VT.is128BitVector() && "Expected an SSE value type!");
9441 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
9442 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9444 return DAG.getBitcast(VT, Item);
9448 // Is it a vector logical left shift?
9449 if (NumElems == 2 && Idx == 1 &&
9450 X86::isZeroNode(Op.getOperand(0)) &&
9451 !X86::isZeroNode(Op.getOperand(1))) {
9452 unsigned NumBits = VT.getSizeInBits();
9453 return getVShift(true, VT,
9454 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
9455 VT, Op.getOperand(1)),
9456 NumBits/2, DAG, *this, dl);
9459 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
9460 return SDValue();
9462 // Otherwise, if this is a vector with i32 or f32 elements, and the element
9463 // is a non-constant being inserted into an element other than the low one,
9464 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
9465 // movd/movss) to move this into the low element, then shuffle it into
9466 // place.
9467 if (EVTBits == 32) {
9468 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9469 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
9473 // Splat is obviously ok. Let legalizer expand it to a shuffle.
9474 if (Values.size() == 1) {
9475 if (EVTBits == 32) {
9476 // Instead of a shuffle like this:
9477 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
9478 // Check if it's possible to issue this instead.
9479 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
9480 unsigned Idx = countTrailingZeros(NonZeros);
9481 SDValue Item = Op.getOperand(Idx);
9482 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
9483 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
9485 return SDValue();
9488 // A vector full of immediates; various special cases are already
9489 // handled, so this is best done with a single constant-pool load.
9490 if (IsAllConstants)
9491 return SDValue();
9493 if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
9494 return V;
9496 // See if we can use a vector load to get all of the elements.
9498 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
9499 if (SDValue LD =
9500 EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
9501 return LD;
9504 // If this is a splat of pairs of 32-bit elements, we can use a narrower
9505 // build_vector and broadcast it.
9506 // TODO: We could probably generalize this more.
9507 if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
9508 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
9509 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
9510 auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
9511 // Make sure all the even/odd operands match.
9512 for (unsigned i = 2; i != NumElems; ++i)
9513 if (Ops[i % 2] != Op.getOperand(i))
9514 return false;
9515 return true;
9517 if (CanSplat(Op, NumElems, Ops)) {
9518 MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
9519 MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
9520 // Create a new build vector and cast to v2i64/v2f64.
9521 SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
9522 DAG.getBuildVector(NarrowVT, dl, Ops));
9523 // Broadcast from v2i64/v2f64 and cast to final VT.
9524 MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems/2);
9525 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
9526 NewBV));
9530 // For AVX-length vectors, build the individual 128-bit pieces and use
9531 // shuffles to put them in place.
9532 if (VT.getSizeInBits() > 128) {
9533 MVT HVT = MVT::getVectorVT(EltVT, NumElems/2);
9535 // Build both the lower and upper subvector.
9536 SDValue Lower =
9537 DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
9538 SDValue Upper = DAG.getBuildVector(
9539 HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
9541 // Recreate the wider vector with the lower and upper part.
9542 return concatSubVectors(Lower, Upper, VT, NumElems, DAG, dl,
9543 VT.getSizeInBits() / 2);
9546 // Let legalizer expand 2-wide build_vectors.
9547 if (EVTBits == 64) {
9548 if (NumNonZero == 1) {
9549 // One half is zero or undef.
9550 unsigned Idx = countTrailingZeros(NonZeros);
9551 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
9552 Op.getOperand(Idx));
9553 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
9555 return SDValue();
9558 // If element VT is < 32 bits, convert it to inserts into a zero vector.
9559 if (EVTBits == 8 && NumElems == 16)
9560 if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
9561 DAG, Subtarget))
9562 return V;
9564 if (EVTBits == 16 && NumElems == 8)
9565 if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
9566 DAG, Subtarget))
9567 return V;
9569 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
9570 if (EVTBits == 32 && NumElems == 4)
9571 if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
9572 return V;
9574 // If element VT is == 32 bits, turn it into a number of shuffles.
9575 if (NumElems == 4 && NumZero > 0) {
9576 SmallVector<SDValue, 8> Ops(NumElems);
9577 for (unsigned i = 0; i < 4; ++i) {
9578 bool isZero = !(NonZeros & (1ULL << i));
9579 if (isZero)
9580 Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
9581 else
9582 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
9585 for (unsigned i = 0; i < 2; ++i) {
9586 switch ((NonZeros >> (i*2)) & 0x3) {
9587 default: llvm_unreachable("Unexpected NonZero count");
9588 case 0:
9589 Ops[i] = Ops[i*2]; // Must be a zero vector.
9590 break;
9591 case 1:
9592 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
9593 break;
9594 case 2:
9595 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
9596 break;
9597 case 3:
9598 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
9599 break;
9603 bool Reverse1 = (NonZeros & 0x3) == 2;
9604 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
9605 int MaskVec[] = {
9606 Reverse1 ? 1 : 0,
9607 Reverse1 ? 0 : 1,
9608 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
9609 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
9611 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
9614 assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
9616 // Check for a build vector from mostly shuffle plus few inserting.
9617 if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
9618 return Sh;
9620 // For SSE 4.1, use insertps to put the high elements into the low element.
9621 if (Subtarget.hasSSE41()) {
9622 SDValue Result;
9623 if (!Op.getOperand(0).isUndef())
9624 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
9625 else
9626 Result = DAG.getUNDEF(VT);
9628 for (unsigned i = 1; i < NumElems; ++i) {
9629 if (Op.getOperand(i).isUndef()) continue;
9630 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
9631 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
9633 return Result;
9636 // Otherwise, expand into a number of unpckl*, start by extending each of
9637 // our (non-undef) elements to the full vector width with the element in the
9638 // bottom slot of the vector (which generates no code for SSE).
9639 SmallVector<SDValue, 8> Ops(NumElems);
9640 for (unsigned i = 0; i < NumElems; ++i) {
9641 if (!Op.getOperand(i).isUndef())
9642 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
9643 else
9644 Ops[i] = DAG.getUNDEF(VT);
9647 // Next, we iteratively mix elements, e.g. for v4f32:
9648 // Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
9649 // : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
9650 // Step 2: unpcklpd X, Y ==> <3, 2, 1, 0>
9651 for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
9652 // Generate scaled UNPCKL shuffle mask.
9653 SmallVector<int, 16> Mask;
9654 for(unsigned i = 0; i != Scale; ++i)
9655 Mask.push_back(i);
9656 for (unsigned i = 0; i != Scale; ++i)
9657 Mask.push_back(NumElems+i);
9658 Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
9660 for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
9661 Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
9663 return Ops[0];
9666 // 256-bit AVX can use the vinsertf128 instruction
9667 // to create 256-bit vectors from two other 128-bit ones.
9668 // TODO: Detect subvector broadcast here instead of DAG combine?
9669 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
9670 const X86Subtarget &Subtarget) {
9671 SDLoc dl(Op);
9672 MVT ResVT = Op.getSimpleValueType();
9674 assert((ResVT.is256BitVector() ||
9675 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
9677 unsigned NumOperands = Op.getNumOperands();
9678 unsigned NumZero = 0;
9679 unsigned NumNonZero = 0;
9680 unsigned NonZeros = 0;
9681 for (unsigned i = 0; i != NumOperands; ++i) {
9682 SDValue SubVec = Op.getOperand(i);
9683 if (SubVec.isUndef())
9684 continue;
9685 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
9686 ++NumZero;
9687 else {
9688 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
9689 NonZeros |= 1 << i;
9690 ++NumNonZero;
9694 // If we have more than 2 non-zeros, build each half separately.
9695 if (NumNonZero > 2) {
9696 MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
9697 ResVT.getVectorNumElements()/2);
9698 ArrayRef<SDUse> Ops = Op->ops();
9699 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9700 Ops.slice(0, NumOperands/2));
9701 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9702 Ops.slice(NumOperands/2));
9703 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
9706 // Otherwise, build it up through insert_subvectors.
9707 SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
9708 : DAG.getUNDEF(ResVT);
9710 MVT SubVT = Op.getOperand(0).getSimpleValueType();
9711 unsigned NumSubElems = SubVT.getVectorNumElements();
9712 for (unsigned i = 0; i != NumOperands; ++i) {
9713 if ((NonZeros & (1 << i)) == 0)
9714 continue;
9716 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
9717 Op.getOperand(i),
9718 DAG.getIntPtrConstant(i * NumSubElems, dl));
9721 return Vec;
9724 // Returns true if the given node is a type promotion (by concatenating i1
9725 // zeros) of the result of a node that already zeros all upper bits of
9726 // k-register.
9727 // TODO: Merge this with LowerAVXCONCAT_VECTORS?
9728 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
9729 const X86Subtarget &Subtarget,
9730 SelectionDAG & DAG) {
9731 SDLoc dl(Op);
9732 MVT ResVT = Op.getSimpleValueType();
9733 unsigned NumOperands = Op.getNumOperands();
9735 assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
9736 "Unexpected number of operands in CONCAT_VECTORS");
9738 unsigned NumZero = 0;
9739 unsigned NumNonZero = 0;
9740 uint64_t NonZeros = 0;
9741 for (unsigned i = 0; i != NumOperands; ++i) {
9742 SDValue SubVec = Op.getOperand(i);
9743 if (SubVec.isUndef())
9744 continue;
9745 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
9746 ++NumZero;
9747 else {
9748 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
9749 NonZeros |= (uint64_t)1 << i;
9750 ++NumNonZero;
9755 // If there are zero or one non-zeros we can handle this very simply.
9756 if (NumNonZero <= 1) {
9757 SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
9758 : DAG.getUNDEF(ResVT);
9759 if (!NumNonZero)
9760 return Vec;
9761 unsigned Idx = countTrailingZeros(NonZeros);
9762 SDValue SubVec = Op.getOperand(Idx);
9763 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
9764 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
9765 DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
9768 if (NumOperands > 2) {
9769 MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
9770 ResVT.getVectorNumElements()/2);
9771 ArrayRef<SDUse> Ops = Op->ops();
9772 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9773 Ops.slice(0, NumOperands/2));
9774 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9775 Ops.slice(NumOperands/2));
9776 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
9779 assert(NumNonZero == 2 && "Simple cases not handled?");
9781 if (ResVT.getVectorNumElements() >= 16)
9782 return Op; // The operation is legal with KUNPCK
9784 SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
9785 DAG.getUNDEF(ResVT), Op.getOperand(0),
9786 DAG.getIntPtrConstant(0, dl));
9787 unsigned NumElems = ResVT.getVectorNumElements();
9788 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
9789 DAG.getIntPtrConstant(NumElems/2, dl));
9792 static SDValue LowerCONCAT_VECTORS(SDValue Op,
9793 const X86Subtarget &Subtarget,
9794 SelectionDAG &DAG) {
9795 MVT VT = Op.getSimpleValueType();
9796 if (VT.getVectorElementType() == MVT::i1)
9797 return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
9799 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
9800 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
9801 Op.getNumOperands() == 4)));
9803 // AVX can use the vinsertf128 instruction to create 256-bit vectors
9804 // from two other 128-bit ones.
9806 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
9807 return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
9810 //===----------------------------------------------------------------------===//
9811 // Vector shuffle lowering
9813 // This is an experimental code path for lowering vector shuffles on x86. It is
9814 // designed to handle arbitrary vector shuffles and blends, gracefully
9815 // degrading performance as necessary. It works hard to recognize idiomatic
9816 // shuffles and lower them to optimal instruction patterns without leaving
9817 // a framework that allows reasonably efficient handling of all vector shuffle
9818 // patterns.
9819 //===----------------------------------------------------------------------===//
9821 /// Tiny helper function to identify a no-op mask.
9823 /// This is a somewhat boring predicate function. It checks whether the mask
9824 /// array input, which is assumed to be a single-input shuffle mask of the kind
9825 /// used by the X86 shuffle instructions (not a fully general
9826 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
9827 /// in-place shuffle are 'no-op's.
9828 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
9829 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9830 assert(Mask[i] >= -1 && "Out of bound mask element!");
9831 if (Mask[i] >= 0 && Mask[i] != i)
9832 return false;
9834 return true;
9837 /// Test whether there are elements crossing 128-bit lanes in this
9838 /// shuffle mask.
9840 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
9841 /// and we routinely test for these.
9842 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
9843 int LaneSize = 128 / VT.getScalarSizeInBits();
9844 int Size = Mask.size();
9845 for (int i = 0; i < Size; ++i)
9846 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
9847 return true;
9848 return false;
9851 /// Test whether a shuffle mask is equivalent within each sub-lane.
9853 /// This checks a shuffle mask to see if it is performing the same
9854 /// lane-relative shuffle in each sub-lane. This trivially implies
9855 /// that it is also not lane-crossing. It may however involve a blend from the
9856 /// same lane of a second vector.
9858 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
9859 /// non-trivial to compute in the face of undef lanes. The representation is
9860 /// suitable for use with existing 128-bit shuffles as entries from the second
9861 /// vector have been remapped to [LaneSize, 2*LaneSize).
9862 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
9863 ArrayRef<int> Mask,
9864 SmallVectorImpl<int> &RepeatedMask) {
9865 auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
9866 RepeatedMask.assign(LaneSize, -1);
9867 int Size = Mask.size();
9868 for (int i = 0; i < Size; ++i) {
9869 assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
9870 if (Mask[i] < 0)
9871 continue;
9872 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
9873 // This entry crosses lanes, so there is no way to model this shuffle.
9874 return false;
9876 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
9877 // Adjust second vector indices to start at LaneSize instead of Size.
9878 int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
9879 : Mask[i] % LaneSize + LaneSize;
9880 if (RepeatedMask[i % LaneSize] < 0)
9881 // This is the first non-undef entry in this slot of a 128-bit lane.
9882 RepeatedMask[i % LaneSize] = LocalM;
9883 else if (RepeatedMask[i % LaneSize] != LocalM)
9884 // Found a mismatch with the repeated mask.
9885 return false;
9887 return true;
9890 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
9891 static bool
9892 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
9893 SmallVectorImpl<int> &RepeatedMask) {
9894 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
9897 static bool
9898 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
9899 SmallVector<int, 32> RepeatedMask;
9900 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
9903 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
9904 static bool
9905 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
9906 SmallVectorImpl<int> &RepeatedMask) {
9907 return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
9910 /// Test whether a target shuffle mask is equivalent within each sub-lane.
9911 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
9912 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
9913 ArrayRef<int> Mask,
9914 SmallVectorImpl<int> &RepeatedMask) {
9915 int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
9916 RepeatedMask.assign(LaneSize, SM_SentinelUndef);
9917 int Size = Mask.size();
9918 for (int i = 0; i < Size; ++i) {
9919 assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
9920 if (Mask[i] == SM_SentinelUndef)
9921 continue;
9922 if (Mask[i] == SM_SentinelZero) {
9923 if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
9924 return false;
9925 RepeatedMask[i % LaneSize] = SM_SentinelZero;
9926 continue;
9928 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
9929 // This entry crosses lanes, so there is no way to model this shuffle.
9930 return false;
9932 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
9933 // Adjust second vector indices to start at LaneSize instead of Size.
9934 int LocalM =
9935 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
9936 if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
9937 // This is the first non-undef entry in this slot of a 128-bit lane.
9938 RepeatedMask[i % LaneSize] = LocalM;
9939 else if (RepeatedMask[i % LaneSize] != LocalM)
9940 // Found a mismatch with the repeated mask.
9941 return false;
9943 return true;
9946 /// Checks whether a shuffle mask is equivalent to an explicit list of
9947 /// arguments.
9949 /// This is a fast way to test a shuffle mask against a fixed pattern:
9951 /// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
9953 /// It returns true if the mask is exactly as wide as the argument list, and
9954 /// each element of the mask is either -1 (signifying undef) or the value given
9955 /// in the argument.
9956 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
9957 ArrayRef<int> ExpectedMask) {
9958 if (Mask.size() != ExpectedMask.size())
9959 return false;
9961 int Size = Mask.size();
9963 // If the values are build vectors, we can look through them to find
9964 // equivalent inputs that make the shuffles equivalent.
9965 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
9966 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
9968 for (int i = 0; i < Size; ++i) {
9969 assert(Mask[i] >= -1 && "Out of bound mask element!");
9970 if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
9971 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
9972 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
9973 if (!MaskBV || !ExpectedBV ||
9974 MaskBV->getOperand(Mask[i] % Size) !=
9975 ExpectedBV->getOperand(ExpectedMask[i] % Size))
9976 return false;
9980 return true;
9983 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
9985 /// The masks must be exactly the same width.
9987 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
9988 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
9990 /// SM_SentinelZero is accepted as a valid negative index but must match in both.
9991 static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
9992 ArrayRef<int> ExpectedMask) {
9993 int Size = Mask.size();
9994 if (Size != (int)ExpectedMask.size())
9995 return false;
9997 for (int i = 0; i < Size; ++i)
9998 if (Mask[i] == SM_SentinelUndef)
9999 continue;
10000 else if (Mask[i] < 0 && Mask[i] != SM_SentinelZero)
10001 return false;
10002 else if (Mask[i] != ExpectedMask[i])
10003 return false;
10005 return true;
10008 // Merges a general DAG shuffle mask and zeroable bit mask into a target shuffle
10009 // mask.
10010 static SmallVector<int, 64> createTargetShuffleMask(ArrayRef<int> Mask,
10011 const APInt &Zeroable) {
10012 int NumElts = Mask.size();
10013 assert(NumElts == (int)Zeroable.getBitWidth() && "Mismatch mask sizes");
10015 SmallVector<int, 64> TargetMask(NumElts, SM_SentinelUndef);
10016 for (int i = 0; i != NumElts; ++i) {
10017 int M = Mask[i];
10018 if (M == SM_SentinelUndef)
10019 continue;
10020 assert(0 <= M && M < (2 * NumElts) && "Out of range shuffle index");
10021 TargetMask[i] = (Zeroable[i] ? SM_SentinelZero : M);
10023 return TargetMask;
10026 // Attempt to create a shuffle mask from a VSELECT condition mask.
10027 static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
10028 SDValue Cond) {
10029 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
10030 return false;
10032 unsigned Size = Cond.getValueType().getVectorNumElements();
10033 Mask.resize(Size, SM_SentinelUndef);
10035 for (int i = 0; i != (int)Size; ++i) {
10036 SDValue CondElt = Cond.getOperand(i);
10037 Mask[i] = i;
10038 // Arbitrarily choose from the 2nd operand if the select condition element
10039 // is undef.
10040 // TODO: Can we do better by matching patterns such as even/odd?
10041 if (CondElt.isUndef() || isNullConstant(CondElt))
10042 Mask[i] += Size;
10045 return true;
10048 // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
10049 // instructions.
10050 static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
10051 if (VT != MVT::v8i32 && VT != MVT::v8f32)
10052 return false;
10054 SmallVector<int, 8> Unpcklwd;
10055 createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
10056 /* Unary = */ false);
10057 SmallVector<int, 8> Unpckhwd;
10058 createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
10059 /* Unary = */ false);
10060 bool IsUnpackwdMask = (isTargetShuffleEquivalent(Mask, Unpcklwd) ||
10061 isTargetShuffleEquivalent(Mask, Unpckhwd));
10062 return IsUnpackwdMask;
10065 static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask) {
10066 // Create 128-bit vector type based on mask size.
10067 MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
10068 MVT VT = MVT::getVectorVT(EltVT, Mask.size());
10070 // We can't assume a canonical shuffle mask, so try the commuted version too.
10071 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
10072 ShuffleVectorSDNode::commuteMask(CommutedMask);
10074 // Match any of unary/binary or low/high.
10075 for (unsigned i = 0; i != 4; ++i) {
10076 SmallVector<int, 16> UnpackMask;
10077 createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
10078 if (isTargetShuffleEquivalent(Mask, UnpackMask) ||
10079 isTargetShuffleEquivalent(CommutedMask, UnpackMask))
10080 return true;
10082 return false;
10085 /// Return true if a shuffle mask chooses elements identically in its top and
10086 /// bottom halves. For example, any splat mask has the same top and bottom
10087 /// halves. If an element is undefined in only one half of the mask, the halves
10088 /// are not considered identical.
10089 static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
10090 assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
10091 unsigned HalfSize = Mask.size() / 2;
10092 for (unsigned i = 0; i != HalfSize; ++i) {
10093 if (Mask[i] != Mask[i + HalfSize])
10094 return false;
10096 return true;
10099 /// Get a 4-lane 8-bit shuffle immediate for a mask.
10101 /// This helper function produces an 8-bit shuffle immediate corresponding to
10102 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
10103 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
10104 /// example.
10106 /// NB: We rely heavily on "undef" masks preserving the input lane.
10107 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
10108 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
10109 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
10110 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
10111 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
10112 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
10114 unsigned Imm = 0;
10115 Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
10116 Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
10117 Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
10118 Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
10119 return Imm;
10122 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
10123 SelectionDAG &DAG) {
10124 return DAG.getConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
10127 /// Compute whether each element of a shuffle is zeroable.
10129 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
10130 /// Either it is an undef element in the shuffle mask, the element of the input
10131 /// referenced is undef, or the element of the input referenced is known to be
10132 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
10133 /// as many lanes with this technique as possible to simplify the remaining
10134 /// shuffle.
10135 static APInt computeZeroableShuffleElements(ArrayRef<int> Mask,
10136 SDValue V1, SDValue V2) {
10137 APInt Zeroable(Mask.size(), 0);
10138 V1 = peekThroughBitcasts(V1);
10139 V2 = peekThroughBitcasts(V2);
10141 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
10142 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
10144 int VectorSizeInBits = V1.getValueSizeInBits();
10145 int ScalarSizeInBits = VectorSizeInBits / Mask.size();
10146 assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
10148 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10149 int M = Mask[i];
10150 // Handle the easy cases.
10151 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
10152 Zeroable.setBit(i);
10153 continue;
10156 // Determine shuffle input and normalize the mask.
10157 SDValue V = M < Size ? V1 : V2;
10158 M %= Size;
10160 // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
10161 if (V.getOpcode() != ISD::BUILD_VECTOR)
10162 continue;
10164 // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
10165 // the (larger) source element must be UNDEF/ZERO.
10166 if ((Size % V.getNumOperands()) == 0) {
10167 int Scale = Size / V->getNumOperands();
10168 SDValue Op = V.getOperand(M / Scale);
10169 if (Op.isUndef() || X86::isZeroNode(Op))
10170 Zeroable.setBit(i);
10171 else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
10172 APInt Val = Cst->getAPIntValue();
10173 Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
10174 Val = Val.getLoBits(ScalarSizeInBits);
10175 if (Val == 0)
10176 Zeroable.setBit(i);
10177 } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
10178 APInt Val = Cst->getValueAPF().bitcastToAPInt();
10179 Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
10180 Val = Val.getLoBits(ScalarSizeInBits);
10181 if (Val == 0)
10182 Zeroable.setBit(i);
10184 continue;
10187 // If the BUILD_VECTOR has more elements then all the (smaller) source
10188 // elements must be UNDEF or ZERO.
10189 if ((V.getNumOperands() % Size) == 0) {
10190 int Scale = V->getNumOperands() / Size;
10191 bool AllZeroable = true;
10192 for (int j = 0; j < Scale; ++j) {
10193 SDValue Op = V.getOperand((M * Scale) + j);
10194 AllZeroable &= (Op.isUndef() || X86::isZeroNode(Op));
10196 if (AllZeroable)
10197 Zeroable.setBit(i);
10198 continue;
10202 return Zeroable;
10205 // The Shuffle result is as follow:
10206 // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
10207 // Each Zeroable's element correspond to a particular Mask's element.
10208 // As described in computeZeroableShuffleElements function.
10210 // The function looks for a sub-mask that the nonzero elements are in
10211 // increasing order. If such sub-mask exist. The function returns true.
10212 static bool isNonZeroElementsInOrder(const APInt &Zeroable,
10213 ArrayRef<int> Mask, const EVT &VectorType,
10214 bool &IsZeroSideLeft) {
10215 int NextElement = -1;
10216 // Check if the Mask's nonzero elements are in increasing order.
10217 for (int i = 0, e = Mask.size(); i < e; i++) {
10218 // Checks if the mask's zeros elements are built from only zeros.
10219 assert(Mask[i] >= -1 && "Out of bound mask element!");
10220 if (Mask[i] < 0)
10221 return false;
10222 if (Zeroable[i])
10223 continue;
10224 // Find the lowest non zero element
10225 if (NextElement < 0) {
10226 NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
10227 IsZeroSideLeft = NextElement != 0;
10229 // Exit if the mask's non zero elements are not in increasing order.
10230 if (NextElement != Mask[i])
10231 return false;
10232 NextElement++;
10234 return true;
10237 /// Try to lower a shuffle with a single PSHUFB of V1 or V2.
10238 static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
10239 ArrayRef<int> Mask, SDValue V1,
10240 SDValue V2, const APInt &Zeroable,
10241 const X86Subtarget &Subtarget,
10242 SelectionDAG &DAG) {
10243 int Size = Mask.size();
10244 int LaneSize = 128 / VT.getScalarSizeInBits();
10245 const int NumBytes = VT.getSizeInBits() / 8;
10246 const int NumEltBytes = VT.getScalarSizeInBits() / 8;
10248 assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
10249 (Subtarget.hasAVX2() && VT.is256BitVector()) ||
10250 (Subtarget.hasBWI() && VT.is512BitVector()));
10252 SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
10253 // Sign bit set in i8 mask means zero element.
10254 SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
10256 SDValue V;
10257 for (int i = 0; i < NumBytes; ++i) {
10258 int M = Mask[i / NumEltBytes];
10259 if (M < 0) {
10260 PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
10261 continue;
10263 if (Zeroable[i / NumEltBytes]) {
10264 PSHUFBMask[i] = ZeroMask;
10265 continue;
10268 // We can only use a single input of V1 or V2.
10269 SDValue SrcV = (M >= Size ? V2 : V1);
10270 if (V && V != SrcV)
10271 return SDValue();
10272 V = SrcV;
10273 M %= Size;
10275 // PSHUFB can't cross lanes, ensure this doesn't happen.
10276 if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
10277 return SDValue();
10279 M = M % LaneSize;
10280 M = M * NumEltBytes + (i % NumEltBytes);
10281 PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
10283 assert(V && "Failed to find a source input");
10285 MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
10286 return DAG.getBitcast(
10287 VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
10288 DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
10291 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
10292 const X86Subtarget &Subtarget, SelectionDAG &DAG,
10293 const SDLoc &dl);
10295 // X86 has dedicated shuffle that can be lowered to VEXPAND
10296 static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
10297 const APInt &Zeroable,
10298 ArrayRef<int> Mask, SDValue &V1,
10299 SDValue &V2, SelectionDAG &DAG,
10300 const X86Subtarget &Subtarget) {
10301 bool IsLeftZeroSide = true;
10302 if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
10303 IsLeftZeroSide))
10304 return SDValue();
10305 unsigned VEXPANDMask = (~Zeroable).getZExtValue();
10306 MVT IntegerType =
10307 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
10308 SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
10309 unsigned NumElts = VT.getVectorNumElements();
10310 assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
10311 "Unexpected number of vector elements");
10312 SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
10313 Subtarget, DAG, DL);
10314 SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
10315 SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
10316 return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
10319 static bool matchVectorShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
10320 unsigned &UnpackOpcode, bool IsUnary,
10321 ArrayRef<int> TargetMask,
10322 const SDLoc &DL, SelectionDAG &DAG,
10323 const X86Subtarget &Subtarget) {
10324 int NumElts = VT.getVectorNumElements();
10326 bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
10327 for (int i = 0; i != NumElts; i += 2) {
10328 int M1 = TargetMask[i + 0];
10329 int M2 = TargetMask[i + 1];
10330 Undef1 &= (SM_SentinelUndef == M1);
10331 Undef2 &= (SM_SentinelUndef == M2);
10332 Zero1 &= isUndefOrZero(M1);
10333 Zero2 &= isUndefOrZero(M2);
10335 assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
10336 "Zeroable shuffle detected");
10338 // Attempt to match the target mask against the unpack lo/hi mask patterns.
10339 SmallVector<int, 64> Unpckl, Unpckh;
10340 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
10341 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
10342 UnpackOpcode = X86ISD::UNPCKL;
10343 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
10344 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
10345 return true;
10348 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
10349 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
10350 UnpackOpcode = X86ISD::UNPCKH;
10351 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
10352 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
10353 return true;
10356 // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
10357 if (IsUnary && (Zero1 || Zero2)) {
10358 // Don't bother if we can blend instead.
10359 if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
10360 isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
10361 return false;
10363 bool MatchLo = true, MatchHi = true;
10364 for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
10365 int M = TargetMask[i];
10367 // Ignore if the input is known to be zero or the index is undef.
10368 if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
10369 (M == SM_SentinelUndef))
10370 continue;
10372 MatchLo &= (M == Unpckl[i]);
10373 MatchHi &= (M == Unpckh[i]);
10376 if (MatchLo || MatchHi) {
10377 UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
10378 V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
10379 V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
10380 return true;
10384 // If a binary shuffle, commute and try again.
10385 if (!IsUnary) {
10386 ShuffleVectorSDNode::commuteMask(Unpckl);
10387 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
10388 UnpackOpcode = X86ISD::UNPCKL;
10389 std::swap(V1, V2);
10390 return true;
10393 ShuffleVectorSDNode::commuteMask(Unpckh);
10394 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
10395 UnpackOpcode = X86ISD::UNPCKH;
10396 std::swap(V1, V2);
10397 return true;
10401 return false;
10404 // X86 has dedicated unpack instructions that can handle specific blend
10405 // operations: UNPCKH and UNPCKL.
10406 static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
10407 ArrayRef<int> Mask, SDValue V1, SDValue V2,
10408 SelectionDAG &DAG) {
10409 SmallVector<int, 8> Unpckl;
10410 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
10411 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
10412 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
10414 SmallVector<int, 8> Unpckh;
10415 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
10416 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
10417 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
10419 // Commute and try again.
10420 ShuffleVectorSDNode::commuteMask(Unpckl);
10421 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
10422 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
10424 ShuffleVectorSDNode::commuteMask(Unpckh);
10425 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
10426 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
10428 return SDValue();
10431 static bool matchVectorShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps,
10432 int Delta) {
10433 int Size = (int)Mask.size();
10434 int Split = Size / Delta;
10435 int TruncatedVectorStart = SwappedOps ? Size : 0;
10437 // Match for mask starting with e.g.: <8, 10, 12, 14,... or <0, 2, 4, 6,...
10438 if (!isSequentialOrUndefInRange(Mask, 0, Split, TruncatedVectorStart, Delta))
10439 return false;
10441 // The rest of the mask should not refer to the truncated vector's elements.
10442 if (isAnyInRange(Mask.slice(Split, Size - Split), TruncatedVectorStart,
10443 TruncatedVectorStart + Size))
10444 return false;
10446 return true;
10449 // Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
10451 // An example is the following:
10453 // t0: ch = EntryToken
10454 // t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
10455 // t25: v4i32 = truncate t2
10456 // t41: v8i16 = bitcast t25
10457 // t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
10458 // Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
10459 // t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
10460 // t18: v2i64 = bitcast t51
10462 // Without avx512vl, this is lowered to:
10464 // vpmovqd %zmm0, %ymm0
10465 // vpshufb {{.*#+}} xmm0 =
10466 // xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
10468 // But when avx512vl is available, one can just use a single vpmovdw
10469 // instruction.
10470 static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, ArrayRef<int> Mask,
10471 MVT VT, SDValue V1, SDValue V2,
10472 SelectionDAG &DAG,
10473 const X86Subtarget &Subtarget) {
10474 if (VT != MVT::v16i8 && VT != MVT::v8i16)
10475 return SDValue();
10477 if (Mask.size() != VT.getVectorNumElements())
10478 return SDValue();
10480 bool SwappedOps = false;
10482 if (!ISD::isBuildVectorAllZeros(V2.getNode())) {
10483 if (!ISD::isBuildVectorAllZeros(V1.getNode()))
10484 return SDValue();
10486 std::swap(V1, V2);
10487 SwappedOps = true;
10490 // Look for:
10492 // bitcast (truncate <8 x i32> %vec to <8 x i16>) to <16 x i8>
10493 // bitcast (truncate <4 x i64> %vec to <4 x i32>) to <8 x i16>
10495 // and similar ones.
10496 if (V1.getOpcode() != ISD::BITCAST)
10497 return SDValue();
10498 if (V1.getOperand(0).getOpcode() != ISD::TRUNCATE)
10499 return SDValue();
10501 SDValue Src = V1.getOperand(0).getOperand(0);
10502 MVT SrcVT = Src.getSimpleValueType();
10504 // The vptrunc** instructions truncating 128 bit and 256 bit vectors
10505 // are only available with avx512vl.
10506 if (!SrcVT.is512BitVector() && !Subtarget.hasVLX())
10507 return SDValue();
10509 // Down Convert Word to Byte is only available with avx512bw. The case with
10510 // 256-bit output doesn't contain a shuffle and is therefore not handled here.
10511 if (SrcVT.getVectorElementType() == MVT::i16 && VT == MVT::v16i8 &&
10512 !Subtarget.hasBWI())
10513 return SDValue();
10515 // The first half/quarter of the mask should refer to every second/fourth
10516 // element of the vector truncated and bitcasted.
10517 if (!matchVectorShuffleAsVPMOV(Mask, SwappedOps, 2) &&
10518 !matchVectorShuffleAsVPMOV(Mask, SwappedOps, 4))
10519 return SDValue();
10521 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Src);
10524 // X86 has dedicated pack instructions that can handle specific truncation
10525 // operations: PACKSS and PACKUS.
10526 static bool matchVectorShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1,
10527 SDValue &V2, unsigned &PackOpcode,
10528 ArrayRef<int> TargetMask,
10529 SelectionDAG &DAG,
10530 const X86Subtarget &Subtarget) {
10531 unsigned NumElts = VT.getVectorNumElements();
10532 unsigned BitSize = VT.getScalarSizeInBits();
10533 MVT PackSVT = MVT::getIntegerVT(BitSize * 2);
10534 MVT PackVT = MVT::getVectorVT(PackSVT, NumElts / 2);
10536 auto MatchPACK = [&](SDValue N1, SDValue N2) {
10537 SDValue VV1 = DAG.getBitcast(PackVT, N1);
10538 SDValue VV2 = DAG.getBitcast(PackVT, N2);
10539 if (Subtarget.hasSSE41() || PackSVT == MVT::i16) {
10540 APInt ZeroMask = APInt::getHighBitsSet(BitSize * 2, BitSize);
10541 if ((N1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) &&
10542 (N2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask))) {
10543 V1 = VV1;
10544 V2 = VV2;
10545 SrcVT = PackVT;
10546 PackOpcode = X86ISD::PACKUS;
10547 return true;
10550 if ((N1.isUndef() || DAG.ComputeNumSignBits(VV1) > BitSize) &&
10551 (N2.isUndef() || DAG.ComputeNumSignBits(VV2) > BitSize)) {
10552 V1 = VV1;
10553 V2 = VV2;
10554 SrcVT = PackVT;
10555 PackOpcode = X86ISD::PACKSS;
10556 return true;
10558 return false;
10561 // Try binary shuffle.
10562 SmallVector<int, 32> BinaryMask;
10563 createPackShuffleMask(VT, BinaryMask, false);
10564 if (isTargetShuffleEquivalent(TargetMask, BinaryMask))
10565 if (MatchPACK(V1, V2))
10566 return true;
10568 // Try unary shuffle.
10569 SmallVector<int, 32> UnaryMask;
10570 createPackShuffleMask(VT, UnaryMask, true);
10571 if (isTargetShuffleEquivalent(TargetMask, UnaryMask))
10572 if (MatchPACK(V1, V1))
10573 return true;
10575 return false;
10578 static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
10579 SDValue V1, SDValue V2, SelectionDAG &DAG,
10580 const X86Subtarget &Subtarget) {
10581 MVT PackVT;
10582 unsigned PackOpcode;
10583 if (matchVectorShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
10584 Subtarget))
10585 return DAG.getNode(PackOpcode, DL, VT, DAG.getBitcast(PackVT, V1),
10586 DAG.getBitcast(PackVT, V2));
10588 return SDValue();
10591 /// Try to emit a bitmask instruction for a shuffle.
10593 /// This handles cases where we can model a blend exactly as a bitmask due to
10594 /// one of the inputs being zeroable.
10595 static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
10596 SDValue V2, ArrayRef<int> Mask,
10597 const APInt &Zeroable,
10598 const X86Subtarget &Subtarget,
10599 SelectionDAG &DAG) {
10600 MVT MaskVT = VT;
10601 MVT EltVT = VT.getVectorElementType();
10602 SDValue Zero, AllOnes;
10603 // Use f64 if i64 isn't legal.
10604 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
10605 EltVT = MVT::f64;
10606 MaskVT = MVT::getVectorVT(EltVT, Mask.size());
10609 MVT LogicVT = VT;
10610 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
10611 Zero = DAG.getConstantFP(0.0, DL, EltVT);
10612 AllOnes = DAG.getConstantFP(
10613 APFloat::getAllOnesValue(EltVT.getSizeInBits(), true), DL, EltVT);
10614 LogicVT =
10615 MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
10616 } else {
10617 Zero = DAG.getConstant(0, DL, EltVT);
10618 AllOnes = DAG.getAllOnesConstant(DL, EltVT);
10621 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
10622 SDValue V;
10623 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10624 if (Zeroable[i])
10625 continue;
10626 if (Mask[i] % Size != i)
10627 return SDValue(); // Not a blend.
10628 if (!V)
10629 V = Mask[i] < Size ? V1 : V2;
10630 else if (V != (Mask[i] < Size ? V1 : V2))
10631 return SDValue(); // Can only let one input through the mask.
10633 VMaskOps[i] = AllOnes;
10635 if (!V)
10636 return SDValue(); // No non-zeroable elements!
10638 SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
10639 VMask = DAG.getBitcast(LogicVT, VMask);
10640 V = DAG.getBitcast(LogicVT, V);
10641 SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
10642 return DAG.getBitcast(VT, And);
10645 /// Try to emit a blend instruction for a shuffle using bit math.
10647 /// This is used as a fallback approach when first class blend instructions are
10648 /// unavailable. Currently it is only suitable for integer vectors, but could
10649 /// be generalized for floating point vectors if desirable.
10650 static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
10651 SDValue V2, ArrayRef<int> Mask,
10652 SelectionDAG &DAG) {
10653 assert(VT.isInteger() && "Only supports integer vector types!");
10654 MVT EltVT = VT.getVectorElementType();
10655 SDValue Zero = DAG.getConstant(0, DL, EltVT);
10656 SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
10657 SmallVector<SDValue, 16> MaskOps;
10658 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10659 if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
10660 return SDValue(); // Shuffled input!
10661 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
10664 SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
10665 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
10666 V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2);
10667 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
10670 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
10671 SDValue PreservedSrc,
10672 const X86Subtarget &Subtarget,
10673 SelectionDAG &DAG);
10675 static bool matchVectorShuffleAsBlend(SDValue V1, SDValue V2,
10676 MutableArrayRef<int> TargetMask,
10677 bool &ForceV1Zero, bool &ForceV2Zero,
10678 uint64_t &BlendMask) {
10679 bool V1IsZeroOrUndef =
10680 V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
10681 bool V2IsZeroOrUndef =
10682 V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
10684 BlendMask = 0;
10685 ForceV1Zero = false, ForceV2Zero = false;
10686 assert(TargetMask.size() <= 64 && "Shuffle mask too big for blend mask");
10688 // Attempt to generate the binary blend mask. If an input is zero then
10689 // we can use any lane.
10690 // TODO: generalize the zero matching to any scalar like isShuffleEquivalent.
10691 for (int i = 0, Size = TargetMask.size(); i < Size; ++i) {
10692 int M = TargetMask[i];
10693 if (M == SM_SentinelUndef)
10694 continue;
10695 if (M == i)
10696 continue;
10697 if (M == i + Size) {
10698 BlendMask |= 1ull << i;
10699 continue;
10701 if (M == SM_SentinelZero) {
10702 if (V1IsZeroOrUndef) {
10703 ForceV1Zero = true;
10704 TargetMask[i] = i;
10705 continue;
10707 if (V2IsZeroOrUndef) {
10708 ForceV2Zero = true;
10709 BlendMask |= 1ull << i;
10710 TargetMask[i] = i + Size;
10711 continue;
10714 return false;
10716 return true;
10719 static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
10720 int Scale) {
10721 uint64_t ScaledMask = 0;
10722 for (int i = 0; i != Size; ++i)
10723 if (BlendMask & (1ull << i))
10724 ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
10725 return ScaledMask;
10728 /// Try to emit a blend instruction for a shuffle.
10730 /// This doesn't do any checks for the availability of instructions for blending
10731 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
10732 /// be matched in the backend with the type given. What it does check for is
10733 /// that the shuffle mask is a blend, or convertible into a blend with zero.
10734 static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
10735 SDValue V2, ArrayRef<int> Original,
10736 const APInt &Zeroable,
10737 const X86Subtarget &Subtarget,
10738 SelectionDAG &DAG) {
10739 SmallVector<int, 64> Mask = createTargetShuffleMask(Original, Zeroable);
10741 uint64_t BlendMask = 0;
10742 bool ForceV1Zero = false, ForceV2Zero = false;
10743 if (!matchVectorShuffleAsBlend(V1, V2, Mask, ForceV1Zero, ForceV2Zero,
10744 BlendMask))
10745 return SDValue();
10747 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
10748 if (ForceV1Zero)
10749 V1 = getZeroVector(VT, Subtarget, DAG, DL);
10750 if (ForceV2Zero)
10751 V2 = getZeroVector(VT, Subtarget, DAG, DL);
10753 switch (VT.SimpleTy) {
10754 case MVT::v4i64:
10755 case MVT::v8i32:
10756 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
10757 LLVM_FALLTHROUGH;
10758 case MVT::v4f64:
10759 case MVT::v8f32:
10760 assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
10761 LLVM_FALLTHROUGH;
10762 case MVT::v2f64:
10763 case MVT::v2i64:
10764 case MVT::v4f32:
10765 case MVT::v4i32:
10766 case MVT::v8i16:
10767 assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
10768 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
10769 DAG.getConstant(BlendMask, DL, MVT::i8));
10770 case MVT::v16i16: {
10771 assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
10772 SmallVector<int, 8> RepeatedMask;
10773 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
10774 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
10775 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
10776 BlendMask = 0;
10777 for (int i = 0; i < 8; ++i)
10778 if (RepeatedMask[i] >= 8)
10779 BlendMask |= 1ull << i;
10780 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10781 DAG.getConstant(BlendMask, DL, MVT::i8));
10783 // Use PBLENDW for lower/upper lanes and then blend lanes.
10784 // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
10785 // merge to VSELECT where useful.
10786 uint64_t LoMask = BlendMask & 0xFF;
10787 uint64_t HiMask = (BlendMask >> 8) & 0xFF;
10788 if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
10789 SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10790 DAG.getConstant(LoMask, DL, MVT::i8));
10791 SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10792 DAG.getConstant(HiMask, DL, MVT::i8));
10793 return DAG.getVectorShuffle(
10794 MVT::v16i16, DL, Lo, Hi,
10795 {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
10797 LLVM_FALLTHROUGH;
10799 case MVT::v32i8:
10800 assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
10801 LLVM_FALLTHROUGH;
10802 case MVT::v16i8: {
10803 assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
10805 // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
10806 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
10807 Subtarget, DAG))
10808 return Masked;
10810 if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
10811 MVT IntegerType =
10812 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
10813 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
10814 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
10817 // Scale the blend by the number of bytes per element.
10818 int Scale = VT.getScalarSizeInBits() / 8;
10820 // This form of blend is always done on bytes. Compute the byte vector
10821 // type.
10822 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
10824 // x86 allows load folding with blendvb from the 2nd source operand. But
10825 // we are still using LLVM select here (see comment below), so that's V1.
10826 // If V2 can be load-folded and V1 cannot be load-folded, then commute to
10827 // allow that load-folding possibility.
10828 if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
10829 ShuffleVectorSDNode::commuteMask(Mask);
10830 std::swap(V1, V2);
10833 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
10834 // mix of LLVM's code generator and the x86 backend. We tell the code
10835 // generator that boolean values in the elements of an x86 vector register
10836 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
10837 // mapping a select to operand #1, and 'false' mapping to operand #2. The
10838 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
10839 // of the element (the remaining are ignored) and 0 in that high bit would
10840 // mean operand #1 while 1 in the high bit would mean operand #2. So while
10841 // the LLVM model for boolean values in vector elements gets the relevant
10842 // bit set, it is set backwards and over constrained relative to x86's
10843 // actual model.
10844 SmallVector<SDValue, 32> VSELECTMask;
10845 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10846 for (int j = 0; j < Scale; ++j)
10847 VSELECTMask.push_back(
10848 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
10849 : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
10850 MVT::i8));
10852 V1 = DAG.getBitcast(BlendVT, V1);
10853 V2 = DAG.getBitcast(BlendVT, V2);
10854 return DAG.getBitcast(
10856 DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
10857 V1, V2));
10859 case MVT::v16f32:
10860 case MVT::v8f64:
10861 case MVT::v8i64:
10862 case MVT::v16i32:
10863 case MVT::v32i16:
10864 case MVT::v64i8: {
10865 // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
10866 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
10867 if (!OptForSize) {
10868 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
10869 Subtarget, DAG))
10870 return Masked;
10873 // Otherwise load an immediate into a GPR, cast to k-register, and use a
10874 // masked move.
10875 MVT IntegerType =
10876 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
10877 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
10878 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
10880 default:
10881 llvm_unreachable("Not a supported integer vector type!");
10885 /// Try to lower as a blend of elements from two inputs followed by
10886 /// a single-input permutation.
10888 /// This matches the pattern where we can blend elements from two inputs and
10889 /// then reduce the shuffle to a single-input permutation.
10890 static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
10891 SDValue V1, SDValue V2,
10892 ArrayRef<int> Mask,
10893 SelectionDAG &DAG,
10894 bool ImmBlends = false) {
10895 // We build up the blend mask while checking whether a blend is a viable way
10896 // to reduce the shuffle.
10897 SmallVector<int, 32> BlendMask(Mask.size(), -1);
10898 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
10900 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10901 if (Mask[i] < 0)
10902 continue;
10904 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
10906 if (BlendMask[Mask[i] % Size] < 0)
10907 BlendMask[Mask[i] % Size] = Mask[i];
10908 else if (BlendMask[Mask[i] % Size] != Mask[i])
10909 return SDValue(); // Can't blend in the needed input!
10911 PermuteMask[i] = Mask[i] % Size;
10914 // If only immediate blends, then bail if the blend mask can't be widened to
10915 // i16.
10916 unsigned EltSize = VT.getScalarSizeInBits();
10917 if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
10918 return SDValue();
10920 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
10921 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
10924 /// Try to lower as an unpack of elements from two inputs followed by
10925 /// a single-input permutation.
10927 /// This matches the pattern where we can unpack elements from two inputs and
10928 /// then reduce the shuffle to a single-input (wider) permutation.
10929 static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
10930 SDValue V1, SDValue V2,
10931 ArrayRef<int> Mask,
10932 SelectionDAG &DAG) {
10933 int NumElts = Mask.size();
10934 int NumLanes = VT.getSizeInBits() / 128;
10935 int NumLaneElts = NumElts / NumLanes;
10936 int NumHalfLaneElts = NumLaneElts / 2;
10938 bool MatchLo = true, MatchHi = true;
10939 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
10941 // Determine UNPCKL/UNPCKH type and operand order.
10942 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
10943 for (int Elt = 0; Elt != NumLaneElts; ++Elt) {
10944 int M = Mask[Lane + Elt];
10945 if (M < 0)
10946 continue;
10948 SDValue &Op = Ops[Elt & 1];
10949 if (M < NumElts && (Op.isUndef() || Op == V1))
10950 Op = V1;
10951 else if (NumElts <= M && (Op.isUndef() || Op == V2))
10952 Op = V2;
10953 else
10954 return SDValue();
10956 int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
10957 MatchLo &= isUndefOrInRange(M, Lo, Mid) ||
10958 isUndefOrInRange(M, NumElts + Lo, NumElts + Mid);
10959 MatchHi &= isUndefOrInRange(M, Mid, Hi) ||
10960 isUndefOrInRange(M, NumElts + Mid, NumElts + Hi);
10961 if (!MatchLo && !MatchHi)
10962 return SDValue();
10965 assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
10967 // Now check that each pair of elts come from the same unpack pair
10968 // and set the permute mask based on each pair.
10969 // TODO - Investigate cases where we permute individual elements.
10970 SmallVector<int, 32> PermuteMask(NumElts, -1);
10971 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
10972 for (int Elt = 0; Elt != NumLaneElts; Elt += 2) {
10973 int M0 = Mask[Lane + Elt + 0];
10974 int M1 = Mask[Lane + Elt + 1];
10975 if (0 <= M0 && 0 <= M1 &&
10976 (M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts))
10977 return SDValue();
10978 if (0 <= M0)
10979 PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts));
10980 if (0 <= M1)
10981 PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1;
10985 unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
10986 SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
10987 return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
10990 /// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
10991 /// permuting the elements of the result in place.
10992 static SDValue lowerShuffleAsByteRotateAndPermute(
10993 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10994 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
10995 if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
10996 (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
10997 (VT.is512BitVector() && !Subtarget.hasBWI()))
10998 return SDValue();
11000 // We don't currently support lane crossing permutes.
11001 if (is128BitLaneCrossingShuffleMask(VT, Mask))
11002 return SDValue();
11004 int Scale = VT.getScalarSizeInBits() / 8;
11005 int NumLanes = VT.getSizeInBits() / 128;
11006 int NumElts = VT.getVectorNumElements();
11007 int NumEltsPerLane = NumElts / NumLanes;
11009 // Determine range of mask elts.
11010 bool Blend1 = true;
11011 bool Blend2 = true;
11012 std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
11013 std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
11014 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11015 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11016 int M = Mask[Lane + Elt];
11017 if (M < 0)
11018 continue;
11019 if (M < NumElts) {
11020 Blend1 &= (M == (Lane + Elt));
11021 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11022 M = M % NumEltsPerLane;
11023 Range1.first = std::min(Range1.first, M);
11024 Range1.second = std::max(Range1.second, M);
11025 } else {
11026 M -= NumElts;
11027 Blend2 &= (M == (Lane + Elt));
11028 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11029 M = M % NumEltsPerLane;
11030 Range2.first = std::min(Range2.first, M);
11031 Range2.second = std::max(Range2.second, M);
11036 // Bail if we don't need both elements.
11037 // TODO - it might be worth doing this for unary shuffles if the permute
11038 // can be widened.
11039 if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
11040 !(0 <= Range2.first && Range2.second < NumEltsPerLane))
11041 return SDValue();
11043 if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
11044 return SDValue();
11046 // Rotate the 2 ops so we can access both ranges, then permute the result.
11047 auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
11048 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11049 SDValue Rotate = DAG.getBitcast(
11050 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
11051 DAG.getBitcast(ByteVT, Lo),
11052 DAG.getConstant(Scale * RotAmt, DL, MVT::i8)));
11053 SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
11054 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11055 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11056 int M = Mask[Lane + Elt];
11057 if (M < 0)
11058 continue;
11059 if (M < NumElts)
11060 PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
11061 else
11062 PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
11065 return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
11068 // Check if the ranges are small enough to rotate from either direction.
11069 if (Range2.second < Range1.first)
11070 return RotateAndPermute(V1, V2, Range1.first, 0);
11071 if (Range1.second < Range2.first)
11072 return RotateAndPermute(V2, V1, Range2.first, NumElts);
11073 return SDValue();
11076 /// Generic routine to decompose a shuffle and blend into independent
11077 /// blends and permutes.
11079 /// This matches the extremely common pattern for handling combined
11080 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
11081 /// operations. It will try to pick the best arrangement of shuffles and
11082 /// blends.
11083 static SDValue lowerShuffleAsDecomposedShuffleBlend(
11084 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11085 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11086 // Shuffle the input elements into the desired positions in V1 and V2 and
11087 // blend them together.
11088 SmallVector<int, 32> V1Mask(Mask.size(), -1);
11089 SmallVector<int, 32> V2Mask(Mask.size(), -1);
11090 SmallVector<int, 32> BlendMask(Mask.size(), -1);
11091 for (int i = 0, Size = Mask.size(); i < Size; ++i)
11092 if (Mask[i] >= 0 && Mask[i] < Size) {
11093 V1Mask[i] = Mask[i];
11094 BlendMask[i] = i;
11095 } else if (Mask[i] >= Size) {
11096 V2Mask[i] = Mask[i] - Size;
11097 BlendMask[i] = i + Size;
11100 // Try to lower with the simpler initial blend/unpack/rotate strategies unless
11101 // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
11102 // the shuffle may be able to fold with a load or other benefit. However, when
11103 // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
11104 // pre-shuffle first is a better strategy.
11105 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
11106 // Only prefer immediate blends to unpack/rotate.
11107 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11108 DAG, true))
11109 return BlendPerm;
11110 if (SDValue UnpackPerm = lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask,
11111 DAG))
11112 return UnpackPerm;
11113 if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
11114 DL, VT, V1, V2, Mask, Subtarget, DAG))
11115 return RotatePerm;
11116 // Unpack/rotate failed - try again with variable blends.
11117 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11118 DAG))
11119 return BlendPerm;
11122 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
11123 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
11124 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
11127 /// Try to lower a vector shuffle as a rotation.
11129 /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
11130 static int matchShuffleAsRotate(SDValue &V1, SDValue &V2, ArrayRef<int> Mask) {
11131 int NumElts = Mask.size();
11133 // We need to detect various ways of spelling a rotation:
11134 // [11, 12, 13, 14, 15, 0, 1, 2]
11135 // [-1, 12, 13, 14, -1, -1, 1, -1]
11136 // [-1, -1, -1, -1, -1, -1, 1, 2]
11137 // [ 3, 4, 5, 6, 7, 8, 9, 10]
11138 // [-1, 4, 5, 6, -1, -1, 9, -1]
11139 // [-1, 4, 5, 6, -1, -1, -1, -1]
11140 int Rotation = 0;
11141 SDValue Lo, Hi;
11142 for (int i = 0; i < NumElts; ++i) {
11143 int M = Mask[i];
11144 assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
11145 "Unexpected mask index.");
11146 if (M < 0)
11147 continue;
11149 // Determine where a rotated vector would have started.
11150 int StartIdx = i - (M % NumElts);
11151 if (StartIdx == 0)
11152 // The identity rotation isn't interesting, stop.
11153 return -1;
11155 // If we found the tail of a vector the rotation must be the missing
11156 // front. If we found the head of a vector, it must be how much of the
11157 // head.
11158 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
11160 if (Rotation == 0)
11161 Rotation = CandidateRotation;
11162 else if (Rotation != CandidateRotation)
11163 // The rotations don't match, so we can't match this mask.
11164 return -1;
11166 // Compute which value this mask is pointing at.
11167 SDValue MaskV = M < NumElts ? V1 : V2;
11169 // Compute which of the two target values this index should be assigned
11170 // to. This reflects whether the high elements are remaining or the low
11171 // elements are remaining.
11172 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
11174 // Either set up this value if we've not encountered it before, or check
11175 // that it remains consistent.
11176 if (!TargetV)
11177 TargetV = MaskV;
11178 else if (TargetV != MaskV)
11179 // This may be a rotation, but it pulls from the inputs in some
11180 // unsupported interleaving.
11181 return -1;
11184 // Check that we successfully analyzed the mask, and normalize the results.
11185 assert(Rotation != 0 && "Failed to locate a viable rotation!");
11186 assert((Lo || Hi) && "Failed to find a rotated input vector!");
11187 if (!Lo)
11188 Lo = Hi;
11189 else if (!Hi)
11190 Hi = Lo;
11192 V1 = Lo;
11193 V2 = Hi;
11195 return Rotation;
11198 /// Try to lower a vector shuffle as a byte rotation.
11200 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
11201 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
11202 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
11203 /// try to generically lower a vector shuffle through such an pattern. It
11204 /// does not check for the profitability of lowering either as PALIGNR or
11205 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
11206 /// This matches shuffle vectors that look like:
11208 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
11210 /// Essentially it concatenates V1 and V2, shifts right by some number of
11211 /// elements, and takes the low elements as the result. Note that while this is
11212 /// specified as a *right shift* because x86 is little-endian, it is a *left
11213 /// rotate* of the vector lanes.
11214 static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
11215 ArrayRef<int> Mask) {
11216 // Don't accept any shuffles with zero elements.
11217 if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
11218 return -1;
11220 // PALIGNR works on 128-bit lanes.
11221 SmallVector<int, 16> RepeatedMask;
11222 if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
11223 return -1;
11225 int Rotation = matchShuffleAsRotate(V1, V2, RepeatedMask);
11226 if (Rotation <= 0)
11227 return -1;
11229 // PALIGNR rotates bytes, so we need to scale the
11230 // rotation based on how many bytes are in the vector lane.
11231 int NumElts = RepeatedMask.size();
11232 int Scale = 16 / NumElts;
11233 return Rotation * Scale;
11236 static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
11237 SDValue V2, ArrayRef<int> Mask,
11238 const X86Subtarget &Subtarget,
11239 SelectionDAG &DAG) {
11240 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11242 SDValue Lo = V1, Hi = V2;
11243 int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
11244 if (ByteRotation <= 0)
11245 return SDValue();
11247 // Cast the inputs to i8 vector of correct length to match PALIGNR or
11248 // PSLLDQ/PSRLDQ.
11249 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11250 Lo = DAG.getBitcast(ByteVT, Lo);
11251 Hi = DAG.getBitcast(ByteVT, Hi);
11253 // SSSE3 targets can use the palignr instruction.
11254 if (Subtarget.hasSSSE3()) {
11255 assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
11256 "512-bit PALIGNR requires BWI instructions");
11257 return DAG.getBitcast(
11258 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
11259 DAG.getConstant(ByteRotation, DL, MVT::i8)));
11262 assert(VT.is128BitVector() &&
11263 "Rotate-based lowering only supports 128-bit lowering!");
11264 assert(Mask.size() <= 16 &&
11265 "Can shuffle at most 16 bytes in a 128-bit vector!");
11266 assert(ByteVT == MVT::v16i8 &&
11267 "SSE2 rotate lowering only needed for v16i8!");
11269 // Default SSE2 implementation
11270 int LoByteShift = 16 - ByteRotation;
11271 int HiByteShift = ByteRotation;
11273 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
11274 DAG.getConstant(LoByteShift, DL, MVT::i8));
11275 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
11276 DAG.getConstant(HiByteShift, DL, MVT::i8));
11277 return DAG.getBitcast(VT,
11278 DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
11281 /// Try to lower a vector shuffle as a dword/qword rotation.
11283 /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
11284 /// rotation of the concatenation of two vectors; This routine will
11285 /// try to generically lower a vector shuffle through such an pattern.
11287 /// Essentially it concatenates V1 and V2, shifts right by some number of
11288 /// elements, and takes the low elements as the result. Note that while this is
11289 /// specified as a *right shift* because x86 is little-endian, it is a *left
11290 /// rotate* of the vector lanes.
11291 static SDValue lowerShuffleAsRotate(const SDLoc &DL, MVT VT, SDValue V1,
11292 SDValue V2, ArrayRef<int> Mask,
11293 const X86Subtarget &Subtarget,
11294 SelectionDAG &DAG) {
11295 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
11296 "Only 32-bit and 64-bit elements are supported!");
11298 // 128/256-bit vectors are only supported with VLX.
11299 assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
11300 && "VLX required for 128/256-bit vectors");
11302 SDValue Lo = V1, Hi = V2;
11303 int Rotation = matchShuffleAsRotate(Lo, Hi, Mask);
11304 if (Rotation <= 0)
11305 return SDValue();
11307 return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
11308 DAG.getConstant(Rotation, DL, MVT::i8));
11311 /// Try to lower a vector shuffle as a byte shift sequence.
11312 static SDValue lowerVectorShuffleAsByteShiftMask(
11313 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11314 const APInt &Zeroable, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11315 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11316 assert(VT.is128BitVector() && "Only 128-bit vectors supported");
11318 // We need a shuffle that has zeros at one/both ends and a sequential
11319 // shuffle from one source within.
11320 unsigned ZeroLo = Zeroable.countTrailingOnes();
11321 unsigned ZeroHi = Zeroable.countLeadingOnes();
11322 if (!ZeroLo && !ZeroHi)
11323 return SDValue();
11325 unsigned NumElts = Mask.size();
11326 unsigned Len = NumElts - (ZeroLo + ZeroHi);
11327 if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
11328 return SDValue();
11330 unsigned Scale = VT.getScalarSizeInBits() / 8;
11331 ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
11332 if (!isUndefOrInRange(StubMask, 0, NumElts) &&
11333 !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
11334 return SDValue();
11336 SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11337 Res = DAG.getBitcast(MVT::v16i8, Res);
11339 // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
11340 // inner sequential set of elements, possibly offset:
11341 // 01234567 --> zzzzzz01 --> 1zzzzzzz
11342 // 01234567 --> 4567zzzz --> zzzzz456
11343 // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
11344 if (ZeroLo == 0) {
11345 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11346 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11347 DAG.getConstant(Scale * Shift, DL, MVT::i8));
11348 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11349 DAG.getConstant(Scale * ZeroHi, DL, MVT::i8));
11350 } else if (ZeroHi == 0) {
11351 unsigned Shift = Mask[ZeroLo] % NumElts;
11352 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11353 DAG.getConstant(Scale * Shift, DL, MVT::i8));
11354 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11355 DAG.getConstant(Scale * ZeroLo, DL, MVT::i8));
11356 } else if (!Subtarget.hasSSSE3()) {
11357 // If we don't have PSHUFB then its worth avoiding an AND constant mask
11358 // by performing 3 byte shifts. Shuffle combining can kick in above that.
11359 // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
11360 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11361 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11362 DAG.getConstant(Scale * Shift, DL, MVT::i8));
11363 Shift += Mask[ZeroLo] % NumElts;
11364 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11365 DAG.getConstant(Scale * Shift, DL, MVT::i8));
11366 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11367 DAG.getConstant(Scale * ZeroLo, DL, MVT::i8));
11368 } else
11369 return SDValue();
11371 return DAG.getBitcast(VT, Res);
11374 /// Try to lower a vector shuffle as a bit shift (shifts in zeros).
11376 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
11377 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
11378 /// matches elements from one of the input vectors shuffled to the left or
11379 /// right with zeroable elements 'shifted in'. It handles both the strictly
11380 /// bit-wise element shifts and the byte shift across an entire 128-bit double
11381 /// quad word lane.
11383 /// PSHL : (little-endian) left bit shift.
11384 /// [ zz, 0, zz, 2 ]
11385 /// [ -1, 4, zz, -1 ]
11386 /// PSRL : (little-endian) right bit shift.
11387 /// [ 1, zz, 3, zz]
11388 /// [ -1, -1, 7, zz]
11389 /// PSLLDQ : (little-endian) left byte shift
11390 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
11391 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
11392 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
11393 /// PSRLDQ : (little-endian) right byte shift
11394 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
11395 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
11396 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
11397 static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
11398 unsigned ScalarSizeInBits, ArrayRef<int> Mask,
11399 int MaskOffset, const APInt &Zeroable,
11400 const X86Subtarget &Subtarget) {
11401 int Size = Mask.size();
11402 unsigned SizeInBits = Size * ScalarSizeInBits;
11404 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
11405 for (int i = 0; i < Size; i += Scale)
11406 for (int j = 0; j < Shift; ++j)
11407 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
11408 return false;
11410 return true;
11413 auto MatchShift = [&](int Shift, int Scale, bool Left) {
11414 for (int i = 0; i != Size; i += Scale) {
11415 unsigned Pos = Left ? i + Shift : i;
11416 unsigned Low = Left ? i : i + Shift;
11417 unsigned Len = Scale - Shift;
11418 if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
11419 return -1;
11422 int ShiftEltBits = ScalarSizeInBits * Scale;
11423 bool ByteShift = ShiftEltBits > 64;
11424 Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
11425 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
11426 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
11428 // Normalize the scale for byte shifts to still produce an i64 element
11429 // type.
11430 Scale = ByteShift ? Scale / 2 : Scale;
11432 // We need to round trip through the appropriate type for the shift.
11433 MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
11434 ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
11435 : MVT::getVectorVT(ShiftSVT, Size / Scale);
11436 return (int)ShiftAmt;
11439 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
11440 // keep doubling the size of the integer elements up to that. We can
11441 // then shift the elements of the integer vector by whole multiples of
11442 // their width within the elements of the larger integer vector. Test each
11443 // multiple to see if we can find a match with the moved element indices
11444 // and that the shifted in elements are all zeroable.
11445 unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
11446 for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
11447 for (int Shift = 1; Shift != Scale; ++Shift)
11448 for (bool Left : {true, false})
11449 if (CheckZeros(Shift, Scale, Left)) {
11450 int ShiftAmt = MatchShift(Shift, Scale, Left);
11451 if (0 < ShiftAmt)
11452 return ShiftAmt;
11455 // no match
11456 return -1;
11459 static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
11460 SDValue V2, ArrayRef<int> Mask,
11461 const APInt &Zeroable,
11462 const X86Subtarget &Subtarget,
11463 SelectionDAG &DAG) {
11464 int Size = Mask.size();
11465 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11467 MVT ShiftVT;
11468 SDValue V = V1;
11469 unsigned Opcode;
11471 // Try to match shuffle against V1 shift.
11472 int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11473 Mask, 0, Zeroable, Subtarget);
11475 // If V1 failed, try to match shuffle against V2 shift.
11476 if (ShiftAmt < 0) {
11477 ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11478 Mask, Size, Zeroable, Subtarget);
11479 V = V2;
11482 if (ShiftAmt < 0)
11483 return SDValue();
11485 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
11486 "Illegal integer vector type");
11487 V = DAG.getBitcast(ShiftVT, V);
11488 V = DAG.getNode(Opcode, DL, ShiftVT, V,
11489 DAG.getConstant(ShiftAmt, DL, MVT::i8));
11490 return DAG.getBitcast(VT, V);
11493 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
11494 // Remainder of lower half result is zero and upper half is all undef.
11495 static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
11496 ArrayRef<int> Mask, uint64_t &BitLen,
11497 uint64_t &BitIdx, const APInt &Zeroable) {
11498 int Size = Mask.size();
11499 int HalfSize = Size / 2;
11500 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11501 assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask");
11503 // Upper half must be undefined.
11504 if (!isUndefUpperHalf(Mask))
11505 return false;
11507 // Determine the extraction length from the part of the
11508 // lower half that isn't zeroable.
11509 int Len = HalfSize;
11510 for (; Len > 0; --Len)
11511 if (!Zeroable[Len - 1])
11512 break;
11513 assert(Len > 0 && "Zeroable shuffle mask");
11515 // Attempt to match first Len sequential elements from the lower half.
11516 SDValue Src;
11517 int Idx = -1;
11518 for (int i = 0; i != Len; ++i) {
11519 int M = Mask[i];
11520 if (M == SM_SentinelUndef)
11521 continue;
11522 SDValue &V = (M < Size ? V1 : V2);
11523 M = M % Size;
11525 // The extracted elements must start at a valid index and all mask
11526 // elements must be in the lower half.
11527 if (i > M || M >= HalfSize)
11528 return false;
11530 if (Idx < 0 || (Src == V && Idx == (M - i))) {
11531 Src = V;
11532 Idx = M - i;
11533 continue;
11535 return false;
11538 if (!Src || Idx < 0)
11539 return false;
11541 assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
11542 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
11543 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
11544 V1 = Src;
11545 return true;
11548 // INSERTQ: Extract lowest Len elements from lower half of second source and
11549 // insert over first source, starting at Idx.
11550 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
11551 static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
11552 ArrayRef<int> Mask, uint64_t &BitLen,
11553 uint64_t &BitIdx) {
11554 int Size = Mask.size();
11555 int HalfSize = Size / 2;
11556 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11558 // Upper half must be undefined.
11559 if (!isUndefUpperHalf(Mask))
11560 return false;
11562 for (int Idx = 0; Idx != HalfSize; ++Idx) {
11563 SDValue Base;
11565 // Attempt to match first source from mask before insertion point.
11566 if (isUndefInRange(Mask, 0, Idx)) {
11567 /* EMPTY */
11568 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
11569 Base = V1;
11570 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
11571 Base = V2;
11572 } else {
11573 continue;
11576 // Extend the extraction length looking to match both the insertion of
11577 // the second source and the remaining elements of the first.
11578 for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
11579 SDValue Insert;
11580 int Len = Hi - Idx;
11582 // Match insertion.
11583 if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
11584 Insert = V1;
11585 } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
11586 Insert = V2;
11587 } else {
11588 continue;
11591 // Match the remaining elements of the lower half.
11592 if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
11593 /* EMPTY */
11594 } else if ((!Base || (Base == V1)) &&
11595 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
11596 Base = V1;
11597 } else if ((!Base || (Base == V2)) &&
11598 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
11599 Size + Hi)) {
11600 Base = V2;
11601 } else {
11602 continue;
11605 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
11606 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
11607 V1 = Base;
11608 V2 = Insert;
11609 return true;
11613 return false;
11616 /// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
11617 static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
11618 SDValue V2, ArrayRef<int> Mask,
11619 const APInt &Zeroable, SelectionDAG &DAG) {
11620 uint64_t BitLen, BitIdx;
11621 if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
11622 return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
11623 DAG.getConstant(BitLen, DL, MVT::i8),
11624 DAG.getConstant(BitIdx, DL, MVT::i8));
11626 if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
11627 return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
11628 V2 ? V2 : DAG.getUNDEF(VT),
11629 DAG.getConstant(BitLen, DL, MVT::i8),
11630 DAG.getConstant(BitIdx, DL, MVT::i8));
11632 return SDValue();
11635 /// Lower a vector shuffle as a zero or any extension.
11637 /// Given a specific number of elements, element bit width, and extension
11638 /// stride, produce either a zero or any extension based on the available
11639 /// features of the subtarget. The extended elements are consecutive and
11640 /// begin and can start from an offsetted element index in the input; to
11641 /// avoid excess shuffling the offset must either being in the bottom lane
11642 /// or at the start of a higher lane. All extended elements must be from
11643 /// the same lane.
11644 static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
11645 const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
11646 ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11647 assert(Scale > 1 && "Need a scale to extend.");
11648 int EltBits = VT.getScalarSizeInBits();
11649 int NumElements = VT.getVectorNumElements();
11650 int NumEltsPerLane = 128 / EltBits;
11651 int OffsetLane = Offset / NumEltsPerLane;
11652 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
11653 "Only 8, 16, and 32 bit elements can be extended.");
11654 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
11655 assert(0 <= Offset && "Extension offset must be positive.");
11656 assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
11657 "Extension offset must be in the first lane or start an upper lane.");
11659 // Check that an index is in same lane as the base offset.
11660 auto SafeOffset = [&](int Idx) {
11661 return OffsetLane == (Idx / NumEltsPerLane);
11664 // Shift along an input so that the offset base moves to the first element.
11665 auto ShuffleOffset = [&](SDValue V) {
11666 if (!Offset)
11667 return V;
11669 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
11670 for (int i = 0; i * Scale < NumElements; ++i) {
11671 int SrcIdx = i + Offset;
11672 ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
11674 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
11677 // Found a valid zext mask! Try various lowering strategies based on the
11678 // input type and available ISA extensions.
11679 // TODO: Add AnyExt support.
11680 if (Subtarget.hasSSE41()) {
11681 // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
11682 // PUNPCK will catch this in a later shuffle match.
11683 if (Offset && Scale == 2 && VT.is128BitVector())
11684 return SDValue();
11685 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
11686 NumElements / Scale);
11687 InputV = ShuffleOffset(InputV);
11688 InputV = getExtendInVec(ISD::ZERO_EXTEND, DL, ExtVT, InputV, DAG);
11689 return DAG.getBitcast(VT, InputV);
11692 assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
11694 // For any extends we can cheat for larger element sizes and use shuffle
11695 // instructions that can fold with a load and/or copy.
11696 if (AnyExt && EltBits == 32) {
11697 int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
11698 -1};
11699 return DAG.getBitcast(
11700 VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
11701 DAG.getBitcast(MVT::v4i32, InputV),
11702 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
11704 if (AnyExt && EltBits == 16 && Scale > 2) {
11705 int PSHUFDMask[4] = {Offset / 2, -1,
11706 SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
11707 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
11708 DAG.getBitcast(MVT::v4i32, InputV),
11709 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
11710 int PSHUFWMask[4] = {1, -1, -1, -1};
11711 unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
11712 return DAG.getBitcast(
11713 VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
11714 DAG.getBitcast(MVT::v8i16, InputV),
11715 getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
11718 // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
11719 // to 64-bits.
11720 if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
11721 assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
11722 assert(VT.is128BitVector() && "Unexpected vector width!");
11724 int LoIdx = Offset * EltBits;
11725 SDValue Lo = DAG.getBitcast(
11726 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
11727 DAG.getConstant(EltBits, DL, MVT::i8),
11728 DAG.getConstant(LoIdx, DL, MVT::i8)));
11730 if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
11731 return DAG.getBitcast(VT, Lo);
11733 int HiIdx = (Offset + 1) * EltBits;
11734 SDValue Hi = DAG.getBitcast(
11735 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
11736 DAG.getConstant(EltBits, DL, MVT::i8),
11737 DAG.getConstant(HiIdx, DL, MVT::i8)));
11738 return DAG.getBitcast(VT,
11739 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
11742 // If this would require more than 2 unpack instructions to expand, use
11743 // pshufb when available. We can only use more than 2 unpack instructions
11744 // when zero extending i8 elements which also makes it easier to use pshufb.
11745 if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
11746 assert(NumElements == 16 && "Unexpected byte vector width!");
11747 SDValue PSHUFBMask[16];
11748 for (int i = 0; i < 16; ++i) {
11749 int Idx = Offset + (i / Scale);
11750 PSHUFBMask[i] = DAG.getConstant(
11751 (i % Scale == 0 && SafeOffset(Idx)) ? Idx : 0x80, DL, MVT::i8);
11753 InputV = DAG.getBitcast(MVT::v16i8, InputV);
11754 return DAG.getBitcast(
11755 VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
11756 DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
11759 // If we are extending from an offset, ensure we start on a boundary that
11760 // we can unpack from.
11761 int AlignToUnpack = Offset % (NumElements / Scale);
11762 if (AlignToUnpack) {
11763 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
11764 for (int i = AlignToUnpack; i < NumElements; ++i)
11765 ShMask[i - AlignToUnpack] = i;
11766 InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
11767 Offset -= AlignToUnpack;
11770 // Otherwise emit a sequence of unpacks.
11771 do {
11772 unsigned UnpackLoHi = X86ISD::UNPCKL;
11773 if (Offset >= (NumElements / 2)) {
11774 UnpackLoHi = X86ISD::UNPCKH;
11775 Offset -= (NumElements / 2);
11778 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
11779 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
11780 : getZeroVector(InputVT, Subtarget, DAG, DL);
11781 InputV = DAG.getBitcast(InputVT, InputV);
11782 InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
11783 Scale /= 2;
11784 EltBits *= 2;
11785 NumElements /= 2;
11786 } while (Scale > 1);
11787 return DAG.getBitcast(VT, InputV);
11790 /// Try to lower a vector shuffle as a zero extension on any microarch.
11792 /// This routine will try to do everything in its power to cleverly lower
11793 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
11794 /// check for the profitability of this lowering, it tries to aggressively
11795 /// match this pattern. It will use all of the micro-architectural details it
11796 /// can to emit an efficient lowering. It handles both blends with all-zero
11797 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
11798 /// masking out later).
11800 /// The reason we have dedicated lowering for zext-style shuffles is that they
11801 /// are both incredibly common and often quite performance sensitive.
11802 static SDValue lowerShuffleAsZeroOrAnyExtend(
11803 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11804 const APInt &Zeroable, const X86Subtarget &Subtarget,
11805 SelectionDAG &DAG) {
11806 int Bits = VT.getSizeInBits();
11807 int NumLanes = Bits / 128;
11808 int NumElements = VT.getVectorNumElements();
11809 int NumEltsPerLane = NumElements / NumLanes;
11810 assert(VT.getScalarSizeInBits() <= 32 &&
11811 "Exceeds 32-bit integer zero extension limit");
11812 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
11814 // Define a helper function to check a particular ext-scale and lower to it if
11815 // valid.
11816 auto Lower = [&](int Scale) -> SDValue {
11817 SDValue InputV;
11818 bool AnyExt = true;
11819 int Offset = 0;
11820 int Matches = 0;
11821 for (int i = 0; i < NumElements; ++i) {
11822 int M = Mask[i];
11823 if (M < 0)
11824 continue; // Valid anywhere but doesn't tell us anything.
11825 if (i % Scale != 0) {
11826 // Each of the extended elements need to be zeroable.
11827 if (!Zeroable[i])
11828 return SDValue();
11830 // We no longer are in the anyext case.
11831 AnyExt = false;
11832 continue;
11835 // Each of the base elements needs to be consecutive indices into the
11836 // same input vector.
11837 SDValue V = M < NumElements ? V1 : V2;
11838 M = M % NumElements;
11839 if (!InputV) {
11840 InputV = V;
11841 Offset = M - (i / Scale);
11842 } else if (InputV != V)
11843 return SDValue(); // Flip-flopping inputs.
11845 // Offset must start in the lowest 128-bit lane or at the start of an
11846 // upper lane.
11847 // FIXME: Is it ever worth allowing a negative base offset?
11848 if (!((0 <= Offset && Offset < NumEltsPerLane) ||
11849 (Offset % NumEltsPerLane) == 0))
11850 return SDValue();
11852 // If we are offsetting, all referenced entries must come from the same
11853 // lane.
11854 if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
11855 return SDValue();
11857 if ((M % NumElements) != (Offset + (i / Scale)))
11858 return SDValue(); // Non-consecutive strided elements.
11859 Matches++;
11862 // If we fail to find an input, we have a zero-shuffle which should always
11863 // have already been handled.
11864 // FIXME: Maybe handle this here in case during blending we end up with one?
11865 if (!InputV)
11866 return SDValue();
11868 // If we are offsetting, don't extend if we only match a single input, we
11869 // can always do better by using a basic PSHUF or PUNPCK.
11870 if (Offset != 0 && Matches < 2)
11871 return SDValue();
11873 return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
11874 InputV, Mask, Subtarget, DAG);
11877 // The widest scale possible for extending is to a 64-bit integer.
11878 assert(Bits % 64 == 0 &&
11879 "The number of bits in a vector must be divisible by 64 on x86!");
11880 int NumExtElements = Bits / 64;
11882 // Each iteration, try extending the elements half as much, but into twice as
11883 // many elements.
11884 for (; NumExtElements < NumElements; NumExtElements *= 2) {
11885 assert(NumElements % NumExtElements == 0 &&
11886 "The input vector size must be divisible by the extended size.");
11887 if (SDValue V = Lower(NumElements / NumExtElements))
11888 return V;
11891 // General extends failed, but 128-bit vectors may be able to use MOVQ.
11892 if (Bits != 128)
11893 return SDValue();
11895 // Returns one of the source operands if the shuffle can be reduced to a
11896 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
11897 auto CanZExtLowHalf = [&]() {
11898 for (int i = NumElements / 2; i != NumElements; ++i)
11899 if (!Zeroable[i])
11900 return SDValue();
11901 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
11902 return V1;
11903 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
11904 return V2;
11905 return SDValue();
11908 if (SDValue V = CanZExtLowHalf()) {
11909 V = DAG.getBitcast(MVT::v2i64, V);
11910 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
11911 return DAG.getBitcast(VT, V);
11914 // No viable ext lowering found.
11915 return SDValue();
11918 /// Try to get a scalar value for a specific element of a vector.
11920 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
11921 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
11922 SelectionDAG &DAG) {
11923 MVT VT = V.getSimpleValueType();
11924 MVT EltVT = VT.getVectorElementType();
11925 V = peekThroughBitcasts(V);
11927 // If the bitcasts shift the element size, we can't extract an equivalent
11928 // element from it.
11929 MVT NewVT = V.getSimpleValueType();
11930 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
11931 return SDValue();
11933 if (V.getOpcode() == ISD::BUILD_VECTOR ||
11934 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
11935 // Ensure the scalar operand is the same size as the destination.
11936 // FIXME: Add support for scalar truncation where possible.
11937 SDValue S = V.getOperand(Idx);
11938 if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
11939 return DAG.getBitcast(EltVT, S);
11942 return SDValue();
11945 /// Helper to test for a load that can be folded with x86 shuffles.
11947 /// This is particularly important because the set of instructions varies
11948 /// significantly based on whether the operand is a load or not.
11949 static bool isShuffleFoldableLoad(SDValue V) {
11950 V = peekThroughBitcasts(V);
11951 return ISD::isNON_EXTLoad(V.getNode());
11954 /// Try to lower insertion of a single element into a zero vector.
11956 /// This is a common pattern that we have especially efficient patterns to lower
11957 /// across all subtarget feature sets.
11958 static SDValue lowerShuffleAsElementInsertion(
11959 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11960 const APInt &Zeroable, const X86Subtarget &Subtarget,
11961 SelectionDAG &DAG) {
11962 MVT ExtVT = VT;
11963 MVT EltVT = VT.getVectorElementType();
11965 int V2Index =
11966 find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
11967 Mask.begin();
11968 bool IsV1Zeroable = true;
11969 for (int i = 0, Size = Mask.size(); i < Size; ++i)
11970 if (i != V2Index && !Zeroable[i]) {
11971 IsV1Zeroable = false;
11972 break;
11975 // Check for a single input from a SCALAR_TO_VECTOR node.
11976 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
11977 // all the smarts here sunk into that routine. However, the current
11978 // lowering of BUILD_VECTOR makes that nearly impossible until the old
11979 // vector shuffle lowering is dead.
11980 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
11981 DAG);
11982 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
11983 // We need to zext the scalar if it is smaller than an i32.
11984 V2S = DAG.getBitcast(EltVT, V2S);
11985 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
11986 // Using zext to expand a narrow element won't work for non-zero
11987 // insertions.
11988 if (!IsV1Zeroable)
11989 return SDValue();
11991 // Zero-extend directly to i32.
11992 ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
11993 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
11995 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
11996 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
11997 EltVT == MVT::i16) {
11998 // Either not inserting from the low element of the input or the input
11999 // element size is too small to use VZEXT_MOVL to clear the high bits.
12000 return SDValue();
12003 if (!IsV1Zeroable) {
12004 // If V1 can't be treated as a zero vector we have fewer options to lower
12005 // this. We can't support integer vectors or non-zero targets cheaply, and
12006 // the V1 elements can't be permuted in any way.
12007 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
12008 if (!VT.isFloatingPoint() || V2Index != 0)
12009 return SDValue();
12010 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
12011 V1Mask[V2Index] = -1;
12012 if (!isNoopShuffleMask(V1Mask))
12013 return SDValue();
12014 if (!VT.is128BitVector())
12015 return SDValue();
12017 // Otherwise, use MOVSD or MOVSS.
12018 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
12019 "Only two types of floating point element types to handle!");
12020 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
12021 ExtVT, V1, V2);
12024 // This lowering only works for the low element with floating point vectors.
12025 if (VT.isFloatingPoint() && V2Index != 0)
12026 return SDValue();
12028 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
12029 if (ExtVT != VT)
12030 V2 = DAG.getBitcast(VT, V2);
12032 if (V2Index != 0) {
12033 // If we have 4 or fewer lanes we can cheaply shuffle the element into
12034 // the desired position. Otherwise it is more efficient to do a vector
12035 // shift left. We know that we can do a vector shift left because all
12036 // the inputs are zero.
12037 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
12038 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
12039 V2Shuffle[V2Index] = 0;
12040 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
12041 } else {
12042 V2 = DAG.getBitcast(MVT::v16i8, V2);
12043 V2 = DAG.getNode(
12044 X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
12045 DAG.getConstant(V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
12046 V2 = DAG.getBitcast(VT, V2);
12049 return V2;
12052 /// Try to lower broadcast of a single - truncated - integer element,
12053 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
12055 /// This assumes we have AVX2.
12056 static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
12057 int BroadcastIdx,
12058 const X86Subtarget &Subtarget,
12059 SelectionDAG &DAG) {
12060 assert(Subtarget.hasAVX2() &&
12061 "We can only lower integer broadcasts with AVX2!");
12063 EVT EltVT = VT.getVectorElementType();
12064 EVT V0VT = V0.getValueType();
12066 assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
12067 assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
12069 EVT V0EltVT = V0VT.getVectorElementType();
12070 if (!V0EltVT.isInteger())
12071 return SDValue();
12073 const unsigned EltSize = EltVT.getSizeInBits();
12074 const unsigned V0EltSize = V0EltVT.getSizeInBits();
12076 // This is only a truncation if the original element type is larger.
12077 if (V0EltSize <= EltSize)
12078 return SDValue();
12080 assert(((V0EltSize % EltSize) == 0) &&
12081 "Scalar type sizes must all be powers of 2 on x86!");
12083 const unsigned V0Opc = V0.getOpcode();
12084 const unsigned Scale = V0EltSize / EltSize;
12085 const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
12087 if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
12088 V0Opc != ISD::BUILD_VECTOR)
12089 return SDValue();
12091 SDValue Scalar = V0.getOperand(V0BroadcastIdx);
12093 // If we're extracting non-least-significant bits, shift so we can truncate.
12094 // Hopefully, we can fold away the trunc/srl/load into the broadcast.
12095 // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
12096 // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
12097 if (const int OffsetIdx = BroadcastIdx % Scale)
12098 Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
12099 DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
12101 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
12102 DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
12105 /// Test whether this can be lowered with a single SHUFPS instruction.
12107 /// This is used to disable more specialized lowerings when the shufps lowering
12108 /// will happen to be efficient.
12109 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
12110 // This routine only handles 128-bit shufps.
12111 assert(Mask.size() == 4 && "Unsupported mask size!");
12112 assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
12113 assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
12114 assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
12115 assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
12117 // To lower with a single SHUFPS we need to have the low half and high half
12118 // each requiring a single input.
12119 if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
12120 return false;
12121 if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
12122 return false;
12124 return true;
12127 /// If we are extracting two 128-bit halves of a vector and shuffling the
12128 /// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
12129 /// multi-shuffle lowering.
12130 static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
12131 SDValue N1, ArrayRef<int> Mask,
12132 SelectionDAG &DAG) {
12133 EVT VT = N0.getValueType();
12134 assert((VT.is128BitVector() &&
12135 (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
12136 "VPERM* family of shuffles requires 32-bit or 64-bit elements");
12138 // Check that both sources are extracts of the same source vector.
12139 if (!N0.hasOneUse() || !N1.hasOneUse() ||
12140 N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12141 N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12142 N0.getOperand(0) != N1.getOperand(0))
12143 return SDValue();
12145 SDValue WideVec = N0.getOperand(0);
12146 EVT WideVT = WideVec.getValueType();
12147 if (!WideVT.is256BitVector() || !isa<ConstantSDNode>(N0.getOperand(1)) ||
12148 !isa<ConstantSDNode>(N1.getOperand(1)))
12149 return SDValue();
12151 // Match extracts of each half of the wide source vector. Commute the shuffle
12152 // if the extract of the low half is N1.
12153 unsigned NumElts = VT.getVectorNumElements();
12154 SmallVector<int, 4> NewMask(Mask.begin(), Mask.end());
12155 const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
12156 const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
12157 if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
12158 ShuffleVectorSDNode::commuteMask(NewMask);
12159 else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
12160 return SDValue();
12162 // Final bailout: if the mask is simple, we are better off using an extract
12163 // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
12164 // because that avoids a constant load from memory.
12165 if (NumElts == 4 &&
12166 (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask)))
12167 return SDValue();
12169 // Extend the shuffle mask with undef elements.
12170 NewMask.append(NumElts, -1);
12172 // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
12173 SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
12174 NewMask);
12175 // This is free: ymm -> xmm.
12176 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
12177 DAG.getIntPtrConstant(0, DL));
12180 /// Try to lower broadcast of a single element.
12182 /// For convenience, this code also bundles all of the subtarget feature set
12183 /// filtering. While a little annoying to re-dispatch on type here, there isn't
12184 /// a convenient way to factor it out.
12185 static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
12186 SDValue V2, ArrayRef<int> Mask,
12187 const X86Subtarget &Subtarget,
12188 SelectionDAG &DAG) {
12189 if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
12190 (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
12191 (Subtarget.hasAVX2() && VT.isInteger())))
12192 return SDValue();
12194 // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
12195 // we can only broadcast from a register with AVX2.
12196 unsigned NumElts = Mask.size();
12197 unsigned NumEltBits = VT.getScalarSizeInBits();
12198 unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
12199 ? X86ISD::MOVDDUP
12200 : X86ISD::VBROADCAST;
12201 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
12203 // Check that the mask is a broadcast.
12204 int BroadcastIdx = -1;
12205 for (int i = 0; i != (int)NumElts; ++i) {
12206 SmallVector<int, 8> BroadcastMask(NumElts, i);
12207 if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) {
12208 BroadcastIdx = i;
12209 break;
12213 if (BroadcastIdx < 0)
12214 return SDValue();
12215 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
12216 "a sorted mask where the broadcast "
12217 "comes from V1.");
12219 // Go up the chain of (vector) values to find a scalar load that we can
12220 // combine with the broadcast.
12221 int BitOffset = BroadcastIdx * NumEltBits;
12222 SDValue V = V1;
12223 for (;;) {
12224 switch (V.getOpcode()) {
12225 case ISD::BITCAST: {
12226 V = V.getOperand(0);
12227 continue;
12229 case ISD::CONCAT_VECTORS: {
12230 int OpBitWidth = V.getOperand(0).getValueSizeInBits();
12231 int OpIdx = BitOffset / OpBitWidth;
12232 V = V.getOperand(OpIdx);
12233 BitOffset %= OpBitWidth;
12234 continue;
12236 case ISD::INSERT_SUBVECTOR: {
12237 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
12238 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
12239 if (!ConstantIdx)
12240 break;
12242 int EltBitWidth = VOuter.getScalarValueSizeInBits();
12243 int Idx = (int)ConstantIdx->getZExtValue();
12244 int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
12245 int BeginOffset = Idx * EltBitWidth;
12246 int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
12247 if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
12248 BitOffset -= BeginOffset;
12249 V = VInner;
12250 } else {
12251 V = VOuter;
12253 continue;
12256 break;
12258 assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
12259 BroadcastIdx = BitOffset / NumEltBits;
12261 // Do we need to bitcast the source to retrieve the original broadcast index?
12262 bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
12264 // Check if this is a broadcast of a scalar. We special case lowering
12265 // for scalars so that we can more effectively fold with loads.
12266 // If the original value has a larger element type than the shuffle, the
12267 // broadcast element is in essence truncated. Make that explicit to ease
12268 // folding.
12269 if (BitCastSrc && VT.isInteger())
12270 if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
12271 DL, VT, V, BroadcastIdx, Subtarget, DAG))
12272 return TruncBroadcast;
12274 MVT BroadcastVT = VT;
12276 // Also check the simpler case, where we can directly reuse the scalar.
12277 if (!BitCastSrc &&
12278 ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
12279 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
12280 V = V.getOperand(BroadcastIdx);
12282 // If we can't broadcast from a register, check that the input is a load.
12283 if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
12284 return SDValue();
12285 } else if (MayFoldLoad(V) && !cast<LoadSDNode>(V)->isVolatile()) {
12286 // 32-bit targets need to load i64 as a f64 and then bitcast the result.
12287 if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
12288 BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
12289 Opcode = (BroadcastVT.is128BitVector() && !Subtarget.hasAVX2())
12290 ? X86ISD::MOVDDUP
12291 : Opcode;
12294 // If we are broadcasting a load that is only used by the shuffle
12295 // then we can reduce the vector load to the broadcasted scalar load.
12296 LoadSDNode *Ld = cast<LoadSDNode>(V);
12297 SDValue BaseAddr = Ld->getOperand(1);
12298 EVT SVT = BroadcastVT.getScalarType();
12299 unsigned Offset = BroadcastIdx * SVT.getStoreSize();
12300 assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
12301 SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
12302 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
12303 DAG.getMachineFunction().getMachineMemOperand(
12304 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
12305 DAG.makeEquivalentMemoryOrdering(Ld, V);
12306 } else if (!BroadcastFromReg) {
12307 // We can't broadcast from a vector register.
12308 return SDValue();
12309 } else if (BitOffset != 0) {
12310 // We can only broadcast from the zero-element of a vector register,
12311 // but it can be advantageous to broadcast from the zero-element of a
12312 // subvector.
12313 if (!VT.is256BitVector() && !VT.is512BitVector())
12314 return SDValue();
12316 // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
12317 if (VT == MVT::v4f64 || VT == MVT::v4i64)
12318 return SDValue();
12320 // Only broadcast the zero-element of a 128-bit subvector.
12321 if ((BitOffset % 128) != 0)
12322 return SDValue();
12324 assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
12325 "Unexpected bit-offset");
12326 assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
12327 "Unexpected vector size");
12328 unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
12329 V = extract128BitVector(V, ExtractIdx, DAG, DL);
12332 if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
12333 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
12334 DAG.getBitcast(MVT::f64, V));
12336 // Bitcast back to the same scalar type as BroadcastVT.
12337 if (V.getValueType().getScalarType() != BroadcastVT.getScalarType()) {
12338 assert(NumEltBits == BroadcastVT.getScalarSizeInBits() &&
12339 "Unexpected vector element size");
12340 MVT ExtVT;
12341 if (V.getValueType().isVector()) {
12342 unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
12343 ExtVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
12344 } else {
12345 ExtVT = BroadcastVT.getScalarType();
12347 V = DAG.getBitcast(ExtVT, V);
12350 // 32-bit targets need to load i64 as a f64 and then bitcast the result.
12351 if (!Subtarget.is64Bit() && V.getValueType() == MVT::i64) {
12352 V = DAG.getBitcast(MVT::f64, V);
12353 unsigned NumBroadcastElts = BroadcastVT.getVectorNumElements();
12354 BroadcastVT = MVT::getVectorVT(MVT::f64, NumBroadcastElts);
12357 // We only support broadcasting from 128-bit vectors to minimize the
12358 // number of patterns we need to deal with in isel. So extract down to
12359 // 128-bits, removing as many bitcasts as possible.
12360 if (V.getValueSizeInBits() > 128) {
12361 MVT ExtVT = V.getSimpleValueType().getScalarType();
12362 ExtVT = MVT::getVectorVT(ExtVT, 128 / ExtVT.getScalarSizeInBits());
12363 V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
12364 V = DAG.getBitcast(ExtVT, V);
12367 return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
12370 // Check for whether we can use INSERTPS to perform the shuffle. We only use
12371 // INSERTPS when the V1 elements are already in the correct locations
12372 // because otherwise we can just always use two SHUFPS instructions which
12373 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
12374 // perform INSERTPS if a single V1 element is out of place and all V2
12375 // elements are zeroable.
12376 static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
12377 unsigned &InsertPSMask,
12378 const APInt &Zeroable,
12379 ArrayRef<int> Mask, SelectionDAG &DAG) {
12380 assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
12381 assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
12382 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12384 // Attempt to match INSERTPS with one element from VA or VB being
12385 // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
12386 // are updated.
12387 auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
12388 ArrayRef<int> CandidateMask) {
12389 unsigned ZMask = 0;
12390 int VADstIndex = -1;
12391 int VBDstIndex = -1;
12392 bool VAUsedInPlace = false;
12394 for (int i = 0; i < 4; ++i) {
12395 // Synthesize a zero mask from the zeroable elements (includes undefs).
12396 if (Zeroable[i]) {
12397 ZMask |= 1 << i;
12398 continue;
12401 // Flag if we use any VA inputs in place.
12402 if (i == CandidateMask[i]) {
12403 VAUsedInPlace = true;
12404 continue;
12407 // We can only insert a single non-zeroable element.
12408 if (VADstIndex >= 0 || VBDstIndex >= 0)
12409 return false;
12411 if (CandidateMask[i] < 4) {
12412 // VA input out of place for insertion.
12413 VADstIndex = i;
12414 } else {
12415 // VB input for insertion.
12416 VBDstIndex = i;
12420 // Don't bother if we have no (non-zeroable) element for insertion.
12421 if (VADstIndex < 0 && VBDstIndex < 0)
12422 return false;
12424 // Determine element insertion src/dst indices. The src index is from the
12425 // start of the inserted vector, not the start of the concatenated vector.
12426 unsigned VBSrcIndex = 0;
12427 if (VADstIndex >= 0) {
12428 // If we have a VA input out of place, we use VA as the V2 element
12429 // insertion and don't use the original V2 at all.
12430 VBSrcIndex = CandidateMask[VADstIndex];
12431 VBDstIndex = VADstIndex;
12432 VB = VA;
12433 } else {
12434 VBSrcIndex = CandidateMask[VBDstIndex] - 4;
12437 // If no V1 inputs are used in place, then the result is created only from
12438 // the zero mask and the V2 insertion - so remove V1 dependency.
12439 if (!VAUsedInPlace)
12440 VA = DAG.getUNDEF(MVT::v4f32);
12442 // Update V1, V2 and InsertPSMask accordingly.
12443 V1 = VA;
12444 V2 = VB;
12446 // Insert the V2 element into the desired position.
12447 InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
12448 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
12449 return true;
12452 if (matchAsInsertPS(V1, V2, Mask))
12453 return true;
12455 // Commute and try again.
12456 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
12457 ShuffleVectorSDNode::commuteMask(CommutedMask);
12458 if (matchAsInsertPS(V2, V1, CommutedMask))
12459 return true;
12461 return false;
12464 static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
12465 ArrayRef<int> Mask, const APInt &Zeroable,
12466 SelectionDAG &DAG) {
12467 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12468 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12470 // Attempt to match the insertps pattern.
12471 unsigned InsertPSMask;
12472 if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
12473 return SDValue();
12475 // Insert the V2 element into the desired position.
12476 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
12477 DAG.getConstant(InsertPSMask, DL, MVT::i8));
12480 /// Try to lower a shuffle as a permute of the inputs followed by an
12481 /// UNPCK instruction.
12483 /// This specifically targets cases where we end up with alternating between
12484 /// the two inputs, and so can permute them into something that feeds a single
12485 /// UNPCK instruction. Note that this routine only targets integer vectors
12486 /// because for floating point vectors we have a generalized SHUFPS lowering
12487 /// strategy that handles everything that doesn't *exactly* match an unpack,
12488 /// making this clever lowering unnecessary.
12489 static SDValue lowerShuffleAsPermuteAndUnpack(
12490 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12491 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12492 assert(!VT.isFloatingPoint() &&
12493 "This routine only supports integer vectors.");
12494 assert(VT.is128BitVector() &&
12495 "This routine only works on 128-bit vectors.");
12496 assert(!V2.isUndef() &&
12497 "This routine should only be used when blending two inputs.");
12498 assert(Mask.size() >= 2 && "Single element masks are invalid.");
12500 int Size = Mask.size();
12502 int NumLoInputs =
12503 count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
12504 int NumHiInputs =
12505 count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
12507 bool UnpackLo = NumLoInputs >= NumHiInputs;
12509 auto TryUnpack = [&](int ScalarSize, int Scale) {
12510 SmallVector<int, 16> V1Mask((unsigned)Size, -1);
12511 SmallVector<int, 16> V2Mask((unsigned)Size, -1);
12513 for (int i = 0; i < Size; ++i) {
12514 if (Mask[i] < 0)
12515 continue;
12517 // Each element of the unpack contains Scale elements from this mask.
12518 int UnpackIdx = i / Scale;
12520 // We only handle the case where V1 feeds the first slots of the unpack.
12521 // We rely on canonicalization to ensure this is the case.
12522 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
12523 return SDValue();
12525 // Setup the mask for this input. The indexing is tricky as we have to
12526 // handle the unpack stride.
12527 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
12528 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
12529 Mask[i] % Size;
12532 // If we will have to shuffle both inputs to use the unpack, check whether
12533 // we can just unpack first and shuffle the result. If so, skip this unpack.
12534 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
12535 !isNoopShuffleMask(V2Mask))
12536 return SDValue();
12538 // Shuffle the inputs into place.
12539 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
12540 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
12542 // Cast the inputs to the type we will use to unpack them.
12543 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
12544 V1 = DAG.getBitcast(UnpackVT, V1);
12545 V2 = DAG.getBitcast(UnpackVT, V2);
12547 // Unpack the inputs and cast the result back to the desired type.
12548 return DAG.getBitcast(
12549 VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
12550 UnpackVT, V1, V2));
12553 // We try each unpack from the largest to the smallest to try and find one
12554 // that fits this mask.
12555 int OrigScalarSize = VT.getScalarSizeInBits();
12556 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
12557 if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
12558 return Unpack;
12560 // If we're shuffling with a zero vector then we're better off not doing
12561 // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
12562 if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
12563 ISD::isBuildVectorAllZeros(V2.getNode()))
12564 return SDValue();
12566 // If none of the unpack-rooted lowerings worked (or were profitable) try an
12567 // initial unpack.
12568 if (NumLoInputs == 0 || NumHiInputs == 0) {
12569 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
12570 "We have to have *some* inputs!");
12571 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
12573 // FIXME: We could consider the total complexity of the permute of each
12574 // possible unpacking. Or at the least we should consider how many
12575 // half-crossings are created.
12576 // FIXME: We could consider commuting the unpacks.
12578 SmallVector<int, 32> PermMask((unsigned)Size, -1);
12579 for (int i = 0; i < Size; ++i) {
12580 if (Mask[i] < 0)
12581 continue;
12583 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
12585 PermMask[i] =
12586 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
12588 return DAG.getVectorShuffle(
12589 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
12590 DL, VT, V1, V2),
12591 DAG.getUNDEF(VT), PermMask);
12594 return SDValue();
12597 /// Handle lowering of 2-lane 64-bit floating point shuffles.
12599 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
12600 /// support for floating point shuffles but not integer shuffles. These
12601 /// instructions will incur a domain crossing penalty on some chips though so
12602 /// it is better to avoid lowering through this for integer vectors where
12603 /// possible.
12604 static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12605 const APInt &Zeroable, SDValue V1, SDValue V2,
12606 const X86Subtarget &Subtarget,
12607 SelectionDAG &DAG) {
12608 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
12609 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
12610 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
12612 if (V2.isUndef()) {
12613 // Check for being able to broadcast a single element.
12614 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
12615 Mask, Subtarget, DAG))
12616 return Broadcast;
12618 // Straight shuffle of a single input vector. Simulate this by using the
12619 // single input as both of the "inputs" to this instruction..
12620 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
12622 if (Subtarget.hasAVX()) {
12623 // If we have AVX, we can use VPERMILPS which will allow folding a load
12624 // into the shuffle.
12625 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
12626 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
12629 return DAG.getNode(
12630 X86ISD::SHUFP, DL, MVT::v2f64,
12631 Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
12632 Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
12633 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
12635 assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
12636 assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
12637 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
12638 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
12640 if (Subtarget.hasAVX2())
12641 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12642 return Extract;
12644 // When loading a scalar and then shuffling it into a vector we can often do
12645 // the insertion cheaply.
12646 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12647 DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
12648 return Insertion;
12649 // Try inverting the insertion since for v2 masks it is easy to do and we
12650 // can't reliably sort the mask one way or the other.
12651 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
12652 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
12653 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12654 DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
12655 return Insertion;
12657 // Try to use one of the special instruction patterns to handle two common
12658 // blend patterns if a zero-blend above didn't work.
12659 if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
12660 isShuffleEquivalent(V1, V2, Mask, {1, 3}))
12661 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
12662 // We can either use a special instruction to load over the low double or
12663 // to move just the low double.
12664 return DAG.getNode(
12665 X86ISD::MOVSD, DL, MVT::v2f64, V2,
12666 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
12668 if (Subtarget.hasSSE41())
12669 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
12670 Zeroable, Subtarget, DAG))
12671 return Blend;
12673 // Use dedicated unpack instructions for masks that match their pattern.
12674 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
12675 return V;
12677 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
12678 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
12679 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
12682 /// Handle lowering of 2-lane 64-bit integer shuffles.
12684 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
12685 /// the integer unit to minimize domain crossing penalties. However, for blends
12686 /// it falls back to the floating point shuffle operation with appropriate bit
12687 /// casting.
12688 static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12689 const APInt &Zeroable, SDValue V1, SDValue V2,
12690 const X86Subtarget &Subtarget,
12691 SelectionDAG &DAG) {
12692 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
12693 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
12694 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
12696 if (V2.isUndef()) {
12697 // Check for being able to broadcast a single element.
12698 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
12699 Mask, Subtarget, DAG))
12700 return Broadcast;
12702 // Straight shuffle of a single input vector. For everything from SSE2
12703 // onward this has a single fast instruction with no scary immediates.
12704 // We have to map the mask as it is actually a v4i32 shuffle instruction.
12705 V1 = DAG.getBitcast(MVT::v4i32, V1);
12706 int WidenedMask[4] = {
12707 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
12708 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
12709 return DAG.getBitcast(
12710 MVT::v2i64,
12711 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
12712 getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
12714 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
12715 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
12716 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
12717 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
12719 if (Subtarget.hasAVX2())
12720 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12721 return Extract;
12723 // Try to use shift instructions.
12724 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
12725 Zeroable, Subtarget, DAG))
12726 return Shift;
12728 // When loading a scalar and then shuffling it into a vector we can often do
12729 // the insertion cheaply.
12730 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12731 DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
12732 return Insertion;
12733 // Try inverting the insertion since for v2 masks it is easy to do and we
12734 // can't reliably sort the mask one way or the other.
12735 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
12736 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12737 DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
12738 return Insertion;
12740 // We have different paths for blend lowering, but they all must use the
12741 // *exact* same predicate.
12742 bool IsBlendSupported = Subtarget.hasSSE41();
12743 if (IsBlendSupported)
12744 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
12745 Zeroable, Subtarget, DAG))
12746 return Blend;
12748 // Use dedicated unpack instructions for masks that match their pattern.
12749 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
12750 return V;
12752 // Try to use byte rotation instructions.
12753 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
12754 if (Subtarget.hasSSSE3()) {
12755 if (Subtarget.hasVLX())
12756 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v2i64, V1, V2, Mask,
12757 Subtarget, DAG))
12758 return Rotate;
12760 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
12761 Subtarget, DAG))
12762 return Rotate;
12765 // If we have direct support for blends, we should lower by decomposing into
12766 // a permute. That will be faster than the domain cross.
12767 if (IsBlendSupported)
12768 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2, Mask,
12769 Subtarget, DAG);
12771 // We implement this with SHUFPD which is pretty lame because it will likely
12772 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
12773 // However, all the alternatives are still more cycles and newer chips don't
12774 // have this problem. It would be really nice if x86 had better shuffles here.
12775 V1 = DAG.getBitcast(MVT::v2f64, V1);
12776 V2 = DAG.getBitcast(MVT::v2f64, V2);
12777 return DAG.getBitcast(MVT::v2i64,
12778 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
12781 /// Lower a vector shuffle using the SHUFPS instruction.
12783 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
12784 /// It makes no assumptions about whether this is the *best* lowering, it simply
12785 /// uses it.
12786 static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
12787 ArrayRef<int> Mask, SDValue V1,
12788 SDValue V2, SelectionDAG &DAG) {
12789 SDValue LowV = V1, HighV = V2;
12790 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
12792 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
12794 if (NumV2Elements == 1) {
12795 int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
12797 // Compute the index adjacent to V2Index and in the same half by toggling
12798 // the low bit.
12799 int V2AdjIndex = V2Index ^ 1;
12801 if (Mask[V2AdjIndex] < 0) {
12802 // Handles all the cases where we have a single V2 element and an undef.
12803 // This will only ever happen in the high lanes because we commute the
12804 // vector otherwise.
12805 if (V2Index < 2)
12806 std::swap(LowV, HighV);
12807 NewMask[V2Index] -= 4;
12808 } else {
12809 // Handle the case where the V2 element ends up adjacent to a V1 element.
12810 // To make this work, blend them together as the first step.
12811 int V1Index = V2AdjIndex;
12812 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
12813 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
12814 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
12816 // Now proceed to reconstruct the final blend as we have the necessary
12817 // high or low half formed.
12818 if (V2Index < 2) {
12819 LowV = V2;
12820 HighV = V1;
12821 } else {
12822 HighV = V2;
12824 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
12825 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
12827 } else if (NumV2Elements == 2) {
12828 if (Mask[0] < 4 && Mask[1] < 4) {
12829 // Handle the easy case where we have V1 in the low lanes and V2 in the
12830 // high lanes.
12831 NewMask[2] -= 4;
12832 NewMask[3] -= 4;
12833 } else if (Mask[2] < 4 && Mask[3] < 4) {
12834 // We also handle the reversed case because this utility may get called
12835 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
12836 // arrange things in the right direction.
12837 NewMask[0] -= 4;
12838 NewMask[1] -= 4;
12839 HighV = V1;
12840 LowV = V2;
12841 } else {
12842 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
12843 // trying to place elements directly, just blend them and set up the final
12844 // shuffle to place them.
12846 // The first two blend mask elements are for V1, the second two are for
12847 // V2.
12848 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
12849 Mask[2] < 4 ? Mask[2] : Mask[3],
12850 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
12851 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
12852 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
12853 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
12855 // Now we do a normal shuffle of V1 by giving V1 as both operands to
12856 // a blend.
12857 LowV = HighV = V1;
12858 NewMask[0] = Mask[0] < 4 ? 0 : 2;
12859 NewMask[1] = Mask[0] < 4 ? 2 : 0;
12860 NewMask[2] = Mask[2] < 4 ? 1 : 3;
12861 NewMask[3] = Mask[2] < 4 ? 3 : 1;
12864 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
12865 getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
12868 /// Lower 4-lane 32-bit floating point shuffles.
12870 /// Uses instructions exclusively from the floating point unit to minimize
12871 /// domain crossing penalties, as these are sufficient to implement all v4f32
12872 /// shuffles.
12873 static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12874 const APInt &Zeroable, SDValue V1, SDValue V2,
12875 const X86Subtarget &Subtarget,
12876 SelectionDAG &DAG) {
12877 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12878 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12879 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12881 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
12883 if (NumV2Elements == 0) {
12884 // Check for being able to broadcast a single element.
12885 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
12886 Mask, Subtarget, DAG))
12887 return Broadcast;
12889 // Use even/odd duplicate instructions for masks that match their pattern.
12890 if (Subtarget.hasSSE3()) {
12891 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
12892 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
12893 if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
12894 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
12897 if (Subtarget.hasAVX()) {
12898 // If we have AVX, we can use VPERMILPS which will allow folding a load
12899 // into the shuffle.
12900 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
12901 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
12904 // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
12905 // in SSE1 because otherwise they are widened to v2f64 and never get here.
12906 if (!Subtarget.hasSSE2()) {
12907 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1}))
12908 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
12909 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 2, 3}))
12910 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
12913 // Otherwise, use a straight shuffle of a single input vector. We pass the
12914 // input vector to both operands to simulate this with a SHUFPS.
12915 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
12916 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
12919 if (Subtarget.hasAVX2())
12920 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12921 return Extract;
12923 // There are special ways we can lower some single-element blends. However, we
12924 // have custom ways we can lower more complex single-element blends below that
12925 // we defer to if both this and BLENDPS fail to match, so restrict this to
12926 // when the V2 input is targeting element 0 of the mask -- that is the fast
12927 // case here.
12928 if (NumV2Elements == 1 && Mask[0] >= 4)
12929 if (SDValue V = lowerShuffleAsElementInsertion(
12930 DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
12931 return V;
12933 if (Subtarget.hasSSE41()) {
12934 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
12935 Zeroable, Subtarget, DAG))
12936 return Blend;
12938 // Use INSERTPS if we can complete the shuffle efficiently.
12939 if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
12940 return V;
12942 if (!isSingleSHUFPSMask(Mask))
12943 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
12944 V2, Mask, DAG))
12945 return BlendPerm;
12948 // Use low/high mov instructions. These are only valid in SSE1 because
12949 // otherwise they are widened to v2f64 and never get here.
12950 if (!Subtarget.hasSSE2()) {
12951 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
12952 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
12953 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
12954 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
12957 // Use dedicated unpack instructions for masks that match their pattern.
12958 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
12959 return V;
12961 // Otherwise fall back to a SHUFPS lowering strategy.
12962 return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
12965 /// Lower 4-lane i32 vector shuffles.
12967 /// We try to handle these with integer-domain shuffles where we can, but for
12968 /// blends we use the floating point domain blend instructions.
12969 static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12970 const APInt &Zeroable, SDValue V1, SDValue V2,
12971 const X86Subtarget &Subtarget,
12972 SelectionDAG &DAG) {
12973 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
12974 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
12975 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12977 // Whenever we can lower this as a zext, that instruction is strictly faster
12978 // than any alternative. It also allows us to fold memory operands into the
12979 // shuffle in many cases.
12980 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
12981 Zeroable, Subtarget, DAG))
12982 return ZExt;
12984 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
12986 if (NumV2Elements == 0) {
12987 // Check for being able to broadcast a single element.
12988 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
12989 Mask, Subtarget, DAG))
12990 return Broadcast;
12992 // Straight shuffle of a single input vector. For everything from SSE2
12993 // onward this has a single fast instruction with no scary immediates.
12994 // We coerce the shuffle pattern to be compatible with UNPCK instructions
12995 // but we aren't actually going to use the UNPCK instruction because doing
12996 // so prevents folding a load into this instruction or making a copy.
12997 const int UnpackLoMask[] = {0, 0, 1, 1};
12998 const int UnpackHiMask[] = {2, 2, 3, 3};
12999 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
13000 Mask = UnpackLoMask;
13001 else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
13002 Mask = UnpackHiMask;
13004 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13005 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13008 if (Subtarget.hasAVX2())
13009 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13010 return Extract;
13012 // Try to use shift instructions.
13013 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
13014 Zeroable, Subtarget, DAG))
13015 return Shift;
13017 // There are special ways we can lower some single-element blends.
13018 if (NumV2Elements == 1)
13019 if (SDValue V = lowerShuffleAsElementInsertion(
13020 DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13021 return V;
13023 // We have different paths for blend lowering, but they all must use the
13024 // *exact* same predicate.
13025 bool IsBlendSupported = Subtarget.hasSSE41();
13026 if (IsBlendSupported)
13027 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
13028 Zeroable, Subtarget, DAG))
13029 return Blend;
13031 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
13032 Zeroable, Subtarget, DAG))
13033 return Masked;
13035 // Use dedicated unpack instructions for masks that match their pattern.
13036 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
13037 return V;
13039 // Try to use byte rotation instructions.
13040 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13041 if (Subtarget.hasSSSE3()) {
13042 if (Subtarget.hasVLX())
13043 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i32, V1, V2, Mask,
13044 Subtarget, DAG))
13045 return Rotate;
13047 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
13048 Subtarget, DAG))
13049 return Rotate;
13052 // Assume that a single SHUFPS is faster than an alternative sequence of
13053 // multiple instructions (even if the CPU has a domain penalty).
13054 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
13055 if (!isSingleSHUFPSMask(Mask)) {
13056 // If we have direct support for blends, we should lower by decomposing into
13057 // a permute. That will be faster than the domain cross.
13058 if (IsBlendSupported)
13059 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2, Mask,
13060 Subtarget, DAG);
13062 // Try to lower by permuting the inputs into an unpack instruction.
13063 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
13064 Mask, Subtarget, DAG))
13065 return Unpack;
13068 // We implement this with SHUFPS because it can blend from two vectors.
13069 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
13070 // up the inputs, bypassing domain shift penalties that we would incur if we
13071 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
13072 // relevant.
13073 SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
13074 SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
13075 SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
13076 return DAG.getBitcast(MVT::v4i32, ShufPS);
13079 /// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
13080 /// shuffle lowering, and the most complex part.
13082 /// The lowering strategy is to try to form pairs of input lanes which are
13083 /// targeted at the same half of the final vector, and then use a dword shuffle
13084 /// to place them onto the right half, and finally unpack the paired lanes into
13085 /// their final position.
13087 /// The exact breakdown of how to form these dword pairs and align them on the
13088 /// correct sides is really tricky. See the comments within the function for
13089 /// more of the details.
13091 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
13092 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
13093 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
13094 /// vector, form the analogous 128-bit 8-element Mask.
13095 static SDValue lowerV8I16GeneralSingleInputShuffle(
13096 const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
13097 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13098 assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
13099 MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
13101 assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
13102 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
13103 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
13105 // Attempt to directly match PSHUFLW or PSHUFHW.
13106 if (isUndefOrInRange(LoMask, 0, 4) &&
13107 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
13108 return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13109 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13111 if (isUndefOrInRange(HiMask, 4, 8) &&
13112 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
13113 for (int i = 0; i != 4; ++i)
13114 HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
13115 return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13116 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13119 SmallVector<int, 4> LoInputs;
13120 copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
13121 array_pod_sort(LoInputs.begin(), LoInputs.end());
13122 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
13123 SmallVector<int, 4> HiInputs;
13124 copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
13125 array_pod_sort(HiInputs.begin(), HiInputs.end());
13126 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
13127 int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
13128 int NumHToL = LoInputs.size() - NumLToL;
13129 int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
13130 int NumHToH = HiInputs.size() - NumLToH;
13131 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
13132 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
13133 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
13134 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
13136 // If we are shuffling values from one half - check how many different DWORD
13137 // pairs we need to create. If only 1 or 2 then we can perform this as a
13138 // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
13139 auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
13140 ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
13141 V = DAG.getNode(ShufWOp, DL, VT, V,
13142 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13143 V = DAG.getBitcast(PSHUFDVT, V);
13144 V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
13145 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
13146 return DAG.getBitcast(VT, V);
13149 if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
13150 int PSHUFDMask[4] = { -1, -1, -1, -1 };
13151 SmallVector<std::pair<int, int>, 4> DWordPairs;
13152 int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
13154 // Collect the different DWORD pairs.
13155 for (int DWord = 0; DWord != 4; ++DWord) {
13156 int M0 = Mask[2 * DWord + 0];
13157 int M1 = Mask[2 * DWord + 1];
13158 M0 = (M0 >= 0 ? M0 % 4 : M0);
13159 M1 = (M1 >= 0 ? M1 % 4 : M1);
13160 if (M0 < 0 && M1 < 0)
13161 continue;
13163 bool Match = false;
13164 for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
13165 auto &DWordPair = DWordPairs[j];
13166 if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
13167 (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
13168 DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
13169 DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
13170 PSHUFDMask[DWord] = DOffset + j;
13171 Match = true;
13172 break;
13175 if (!Match) {
13176 PSHUFDMask[DWord] = DOffset + DWordPairs.size();
13177 DWordPairs.push_back(std::make_pair(M0, M1));
13181 if (DWordPairs.size() <= 2) {
13182 DWordPairs.resize(2, std::make_pair(-1, -1));
13183 int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
13184 DWordPairs[1].first, DWordPairs[1].second};
13185 if ((NumHToL + NumHToH) == 0)
13186 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
13187 if ((NumLToL + NumLToH) == 0)
13188 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
13192 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
13193 // such inputs we can swap two of the dwords across the half mark and end up
13194 // with <=2 inputs to each half in each half. Once there, we can fall through
13195 // to the generic code below. For example:
13197 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13198 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
13200 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
13201 // and an existing 2-into-2 on the other half. In this case we may have to
13202 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
13203 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
13204 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
13205 // because any other situation (including a 3-into-1 or 1-into-3 in the other
13206 // half than the one we target for fixing) will be fixed when we re-enter this
13207 // path. We will also combine away any sequence of PSHUFD instructions that
13208 // result into a single instruction. Here is an example of the tricky case:
13210 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13211 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
13213 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
13215 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
13216 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
13218 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
13219 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
13221 // The result is fine to be handled by the generic logic.
13222 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
13223 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
13224 int AOffset, int BOffset) {
13225 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
13226 "Must call this with A having 3 or 1 inputs from the A half.");
13227 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
13228 "Must call this with B having 1 or 3 inputs from the B half.");
13229 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
13230 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
13232 bool ThreeAInputs = AToAInputs.size() == 3;
13234 // Compute the index of dword with only one word among the three inputs in
13235 // a half by taking the sum of the half with three inputs and subtracting
13236 // the sum of the actual three inputs. The difference is the remaining
13237 // slot.
13238 int ADWord, BDWord;
13239 int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
13240 int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
13241 int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
13242 ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
13243 int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
13244 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
13245 int TripleNonInputIdx =
13246 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
13247 TripleDWord = TripleNonInputIdx / 2;
13249 // We use xor with one to compute the adjacent DWord to whichever one the
13250 // OneInput is in.
13251 OneInputDWord = (OneInput / 2) ^ 1;
13253 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
13254 // and BToA inputs. If there is also such a problem with the BToB and AToB
13255 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
13256 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
13257 // is essential that we don't *create* a 3<-1 as then we might oscillate.
13258 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
13259 // Compute how many inputs will be flipped by swapping these DWords. We
13260 // need
13261 // to balance this to ensure we don't form a 3-1 shuffle in the other
13262 // half.
13263 int NumFlippedAToBInputs =
13264 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
13265 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
13266 int NumFlippedBToBInputs =
13267 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
13268 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
13269 if ((NumFlippedAToBInputs == 1 &&
13270 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
13271 (NumFlippedBToBInputs == 1 &&
13272 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
13273 // We choose whether to fix the A half or B half based on whether that
13274 // half has zero flipped inputs. At zero, we may not be able to fix it
13275 // with that half. We also bias towards fixing the B half because that
13276 // will more commonly be the high half, and we have to bias one way.
13277 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
13278 ArrayRef<int> Inputs) {
13279 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
13280 bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
13281 // Determine whether the free index is in the flipped dword or the
13282 // unflipped dword based on where the pinned index is. We use this bit
13283 // in an xor to conditionally select the adjacent dword.
13284 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
13285 bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13286 if (IsFixIdxInput == IsFixFreeIdxInput)
13287 FixFreeIdx += 1;
13288 IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13289 assert(IsFixIdxInput != IsFixFreeIdxInput &&
13290 "We need to be changing the number of flipped inputs!");
13291 int PSHUFHalfMask[] = {0, 1, 2, 3};
13292 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
13293 V = DAG.getNode(
13294 FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
13295 MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
13296 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13298 for (int &M : Mask)
13299 if (M >= 0 && M == FixIdx)
13300 M = FixFreeIdx;
13301 else if (M >= 0 && M == FixFreeIdx)
13302 M = FixIdx;
13304 if (NumFlippedBToBInputs != 0) {
13305 int BPinnedIdx =
13306 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
13307 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
13308 } else {
13309 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
13310 int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
13311 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
13316 int PSHUFDMask[] = {0, 1, 2, 3};
13317 PSHUFDMask[ADWord] = BDWord;
13318 PSHUFDMask[BDWord] = ADWord;
13319 V = DAG.getBitcast(
13321 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13322 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13324 // Adjust the mask to match the new locations of A and B.
13325 for (int &M : Mask)
13326 if (M >= 0 && M/2 == ADWord)
13327 M = 2 * BDWord + M % 2;
13328 else if (M >= 0 && M/2 == BDWord)
13329 M = 2 * ADWord + M % 2;
13331 // Recurse back into this routine to re-compute state now that this isn't
13332 // a 3 and 1 problem.
13333 return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
13335 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
13336 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
13337 if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
13338 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
13340 // At this point there are at most two inputs to the low and high halves from
13341 // each half. That means the inputs can always be grouped into dwords and
13342 // those dwords can then be moved to the correct half with a dword shuffle.
13343 // We use at most one low and one high word shuffle to collect these paired
13344 // inputs into dwords, and finally a dword shuffle to place them.
13345 int PSHUFLMask[4] = {-1, -1, -1, -1};
13346 int PSHUFHMask[4] = {-1, -1, -1, -1};
13347 int PSHUFDMask[4] = {-1, -1, -1, -1};
13349 // First fix the masks for all the inputs that are staying in their
13350 // original halves. This will then dictate the targets of the cross-half
13351 // shuffles.
13352 auto fixInPlaceInputs =
13353 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
13354 MutableArrayRef<int> SourceHalfMask,
13355 MutableArrayRef<int> HalfMask, int HalfOffset) {
13356 if (InPlaceInputs.empty())
13357 return;
13358 if (InPlaceInputs.size() == 1) {
13359 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13360 InPlaceInputs[0] - HalfOffset;
13361 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
13362 return;
13364 if (IncomingInputs.empty()) {
13365 // Just fix all of the in place inputs.
13366 for (int Input : InPlaceInputs) {
13367 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
13368 PSHUFDMask[Input / 2] = Input / 2;
13370 return;
13373 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
13374 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13375 InPlaceInputs[0] - HalfOffset;
13376 // Put the second input next to the first so that they are packed into
13377 // a dword. We find the adjacent index by toggling the low bit.
13378 int AdjIndex = InPlaceInputs[0] ^ 1;
13379 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
13380 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
13381 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
13383 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
13384 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
13386 // Now gather the cross-half inputs and place them into a free dword of
13387 // their target half.
13388 // FIXME: This operation could almost certainly be simplified dramatically to
13389 // look more like the 3-1 fixing operation.
13390 auto moveInputsToRightHalf = [&PSHUFDMask](
13391 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
13392 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
13393 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
13394 int DestOffset) {
13395 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
13396 return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
13398 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
13399 int Word) {
13400 int LowWord = Word & ~1;
13401 int HighWord = Word | 1;
13402 return isWordClobbered(SourceHalfMask, LowWord) ||
13403 isWordClobbered(SourceHalfMask, HighWord);
13406 if (IncomingInputs.empty())
13407 return;
13409 if (ExistingInputs.empty()) {
13410 // Map any dwords with inputs from them into the right half.
13411 for (int Input : IncomingInputs) {
13412 // If the source half mask maps over the inputs, turn those into
13413 // swaps and use the swapped lane.
13414 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
13415 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
13416 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
13417 Input - SourceOffset;
13418 // We have to swap the uses in our half mask in one sweep.
13419 for (int &M : HalfMask)
13420 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
13421 M = Input;
13422 else if (M == Input)
13423 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13424 } else {
13425 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
13426 Input - SourceOffset &&
13427 "Previous placement doesn't match!");
13429 // Note that this correctly re-maps both when we do a swap and when
13430 // we observe the other side of the swap above. We rely on that to
13431 // avoid swapping the members of the input list directly.
13432 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13435 // Map the input's dword into the correct half.
13436 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
13437 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
13438 else
13439 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
13440 Input / 2 &&
13441 "Previous placement doesn't match!");
13444 // And just directly shift any other-half mask elements to be same-half
13445 // as we will have mirrored the dword containing the element into the
13446 // same position within that half.
13447 for (int &M : HalfMask)
13448 if (M >= SourceOffset && M < SourceOffset + 4) {
13449 M = M - SourceOffset + DestOffset;
13450 assert(M >= 0 && "This should never wrap below zero!");
13452 return;
13455 // Ensure we have the input in a viable dword of its current half. This
13456 // is particularly tricky because the original position may be clobbered
13457 // by inputs being moved and *staying* in that half.
13458 if (IncomingInputs.size() == 1) {
13459 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13460 int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
13461 SourceOffset;
13462 SourceHalfMask[InputFixed - SourceOffset] =
13463 IncomingInputs[0] - SourceOffset;
13464 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
13465 InputFixed);
13466 IncomingInputs[0] = InputFixed;
13468 } else if (IncomingInputs.size() == 2) {
13469 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
13470 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13471 // We have two non-adjacent or clobbered inputs we need to extract from
13472 // the source half. To do this, we need to map them into some adjacent
13473 // dword slot in the source mask.
13474 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
13475 IncomingInputs[1] - SourceOffset};
13477 // If there is a free slot in the source half mask adjacent to one of
13478 // the inputs, place the other input in it. We use (Index XOR 1) to
13479 // compute an adjacent index.
13480 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
13481 SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
13482 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
13483 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13484 InputsFixed[1] = InputsFixed[0] ^ 1;
13485 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
13486 SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
13487 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
13488 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
13489 InputsFixed[0] = InputsFixed[1] ^ 1;
13490 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
13491 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
13492 // The two inputs are in the same DWord but it is clobbered and the
13493 // adjacent DWord isn't used at all. Move both inputs to the free
13494 // slot.
13495 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
13496 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
13497 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
13498 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
13499 } else {
13500 // The only way we hit this point is if there is no clobbering
13501 // (because there are no off-half inputs to this half) and there is no
13502 // free slot adjacent to one of the inputs. In this case, we have to
13503 // swap an input with a non-input.
13504 for (int i = 0; i < 4; ++i)
13505 assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
13506 "We can't handle any clobbers here!");
13507 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
13508 "Cannot have adjacent inputs here!");
13510 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13511 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
13513 // We also have to update the final source mask in this case because
13514 // it may need to undo the above swap.
13515 for (int &M : FinalSourceHalfMask)
13516 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
13517 M = InputsFixed[1] + SourceOffset;
13518 else if (M == InputsFixed[1] + SourceOffset)
13519 M = (InputsFixed[0] ^ 1) + SourceOffset;
13521 InputsFixed[1] = InputsFixed[0] ^ 1;
13524 // Point everything at the fixed inputs.
13525 for (int &M : HalfMask)
13526 if (M == IncomingInputs[0])
13527 M = InputsFixed[0] + SourceOffset;
13528 else if (M == IncomingInputs[1])
13529 M = InputsFixed[1] + SourceOffset;
13531 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
13532 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
13534 } else {
13535 llvm_unreachable("Unhandled input size!");
13538 // Now hoist the DWord down to the right half.
13539 int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
13540 assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
13541 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
13542 for (int &M : HalfMask)
13543 for (int Input : IncomingInputs)
13544 if (M == Input)
13545 M = FreeDWord * 2 + Input % 2;
13547 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
13548 /*SourceOffset*/ 4, /*DestOffset*/ 0);
13549 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
13550 /*SourceOffset*/ 0, /*DestOffset*/ 4);
13552 // Now enact all the shuffles we've computed to move the inputs into their
13553 // target half.
13554 if (!isNoopShuffleMask(PSHUFLMask))
13555 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13556 getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
13557 if (!isNoopShuffleMask(PSHUFHMask))
13558 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13559 getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
13560 if (!isNoopShuffleMask(PSHUFDMask))
13561 V = DAG.getBitcast(
13563 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13564 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13566 // At this point, each half should contain all its inputs, and we can then
13567 // just shuffle them into their final position.
13568 assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
13569 "Failed to lift all the high half inputs to the low mask!");
13570 assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
13571 "Failed to lift all the low half inputs to the high mask!");
13573 // Do a half shuffle for the low mask.
13574 if (!isNoopShuffleMask(LoMask))
13575 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13576 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13578 // Do a half shuffle with the high mask after shifting its values down.
13579 for (int &M : HiMask)
13580 if (M >= 0)
13581 M -= 4;
13582 if (!isNoopShuffleMask(HiMask))
13583 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13584 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13586 return V;
13589 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
13590 /// blend if only one input is used.
13591 static SDValue lowerShuffleAsBlendOfPSHUFBs(
13592 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13593 const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
13594 assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
13595 "Lane crossing shuffle masks not supported");
13597 int NumBytes = VT.getSizeInBits() / 8;
13598 int Size = Mask.size();
13599 int Scale = NumBytes / Size;
13601 SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
13602 SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
13603 V1InUse = false;
13604 V2InUse = false;
13606 for (int i = 0; i < NumBytes; ++i) {
13607 int M = Mask[i / Scale];
13608 if (M < 0)
13609 continue;
13611 const int ZeroMask = 0x80;
13612 int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
13613 int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
13614 if (Zeroable[i / Scale])
13615 V1Idx = V2Idx = ZeroMask;
13617 V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
13618 V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
13619 V1InUse |= (ZeroMask != V1Idx);
13620 V2InUse |= (ZeroMask != V2Idx);
13623 MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
13624 if (V1InUse)
13625 V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
13626 DAG.getBuildVector(ShufVT, DL, V1Mask));
13627 if (V2InUse)
13628 V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
13629 DAG.getBuildVector(ShufVT, DL, V2Mask));
13631 // If we need shuffled inputs from both, blend the two.
13632 SDValue V;
13633 if (V1InUse && V2InUse)
13634 V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
13635 else
13636 V = V1InUse ? V1 : V2;
13638 // Cast the result back to the correct type.
13639 return DAG.getBitcast(VT, V);
13642 /// Generic lowering of 8-lane i16 shuffles.
13644 /// This handles both single-input shuffles and combined shuffle/blends with
13645 /// two inputs. The single input shuffles are immediately delegated to
13646 /// a dedicated lowering routine.
13648 /// The blends are lowered in one of three fundamental ways. If there are few
13649 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
13650 /// of the input is significantly cheaper when lowered as an interleaving of
13651 /// the two inputs, try to interleave them. Otherwise, blend the low and high
13652 /// halves of the inputs separately (making them have relatively few inputs)
13653 /// and then concatenate them.
13654 static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13655 const APInt &Zeroable, SDValue V1, SDValue V2,
13656 const X86Subtarget &Subtarget,
13657 SelectionDAG &DAG) {
13658 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
13659 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
13660 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
13662 // Whenever we can lower this as a zext, that instruction is strictly faster
13663 // than any alternative.
13664 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
13665 Zeroable, Subtarget, DAG))
13666 return ZExt;
13668 int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
13670 if (NumV2Inputs == 0) {
13671 // Check for being able to broadcast a single element.
13672 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
13673 Mask, Subtarget, DAG))
13674 return Broadcast;
13676 // Try to use shift instructions.
13677 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
13678 Zeroable, Subtarget, DAG))
13679 return Shift;
13681 // Use dedicated unpack instructions for masks that match their pattern.
13682 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
13683 return V;
13685 // Use dedicated pack instructions for masks that match their pattern.
13686 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
13687 Subtarget))
13688 return V;
13690 // Try to use byte rotation instructions.
13691 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
13692 Subtarget, DAG))
13693 return Rotate;
13695 // Make a copy of the mask so it can be modified.
13696 SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
13697 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
13698 Subtarget, DAG);
13701 assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
13702 "All single-input shuffles should be canonicalized to be V1-input "
13703 "shuffles.");
13705 // Try to use shift instructions.
13706 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
13707 Zeroable, Subtarget, DAG))
13708 return Shift;
13710 // See if we can use SSE4A Extraction / Insertion.
13711 if (Subtarget.hasSSE4A())
13712 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
13713 Zeroable, DAG))
13714 return V;
13716 // There are special ways we can lower some single-element blends.
13717 if (NumV2Inputs == 1)
13718 if (SDValue V = lowerShuffleAsElementInsertion(
13719 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
13720 return V;
13722 // We have different paths for blend lowering, but they all must use the
13723 // *exact* same predicate.
13724 bool IsBlendSupported = Subtarget.hasSSE41();
13725 if (IsBlendSupported)
13726 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
13727 Zeroable, Subtarget, DAG))
13728 return Blend;
13730 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
13731 Zeroable, Subtarget, DAG))
13732 return Masked;
13734 // Use dedicated unpack instructions for masks that match their pattern.
13735 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
13736 return V;
13738 // Use dedicated pack instructions for masks that match their pattern.
13739 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
13740 Subtarget))
13741 return V;
13743 // Try to use byte rotation instructions.
13744 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
13745 Subtarget, DAG))
13746 return Rotate;
13748 if (SDValue BitBlend =
13749 lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
13750 return BitBlend;
13752 // Try to use byte shift instructions to mask.
13753 if (SDValue V = lowerVectorShuffleAsByteShiftMask(
13754 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
13755 return V;
13757 // Try to lower by permuting the inputs into an unpack instruction.
13758 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
13759 Mask, Subtarget, DAG))
13760 return Unpack;
13762 // If we can't directly blend but can use PSHUFB, that will be better as it
13763 // can both shuffle and set up the inefficient blend.
13764 if (!IsBlendSupported && Subtarget.hasSSSE3()) {
13765 bool V1InUse, V2InUse;
13766 return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
13767 Zeroable, DAG, V1InUse, V2InUse);
13770 // We can always bit-blend if we have to so the fallback strategy is to
13771 // decompose into single-input permutes and blends.
13772 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
13773 Mask, Subtarget, DAG);
13776 /// Check whether a compaction lowering can be done by dropping even
13777 /// elements and compute how many times even elements must be dropped.
13779 /// This handles shuffles which take every Nth element where N is a power of
13780 /// two. Example shuffle masks:
13782 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
13783 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
13784 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
13785 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
13786 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
13787 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
13789 /// Any of these lanes can of course be undef.
13791 /// This routine only supports N <= 3.
13792 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
13793 /// for larger N.
13795 /// \returns N above, or the number of times even elements must be dropped if
13796 /// there is such a number. Otherwise returns zero.
13797 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
13798 bool IsSingleInput) {
13799 // The modulus for the shuffle vector entries is based on whether this is
13800 // a single input or not.
13801 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
13802 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
13803 "We should only be called with masks with a power-of-2 size!");
13805 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
13807 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
13808 // and 2^3 simultaneously. This is because we may have ambiguity with
13809 // partially undef inputs.
13810 bool ViableForN[3] = {true, true, true};
13812 for (int i = 0, e = Mask.size(); i < e; ++i) {
13813 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
13814 // want.
13815 if (Mask[i] < 0)
13816 continue;
13818 bool IsAnyViable = false;
13819 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
13820 if (ViableForN[j]) {
13821 uint64_t N = j + 1;
13823 // The shuffle mask must be equal to (i * 2^N) % M.
13824 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
13825 IsAnyViable = true;
13826 else
13827 ViableForN[j] = false;
13829 // Early exit if we exhaust the possible powers of two.
13830 if (!IsAnyViable)
13831 break;
13834 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
13835 if (ViableForN[j])
13836 return j + 1;
13838 // Return 0 as there is no viable power of two.
13839 return 0;
13842 static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
13843 ArrayRef<int> Mask, SDValue V1,
13844 SDValue V2, SelectionDAG &DAG) {
13845 MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
13846 MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
13848 SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
13849 if (V2.isUndef())
13850 return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
13852 return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
13855 /// Generic lowering of v16i8 shuffles.
13857 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
13858 /// detect any complexity reducing interleaving. If that doesn't help, it uses
13859 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
13860 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
13861 /// back together.
13862 static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13863 const APInt &Zeroable, SDValue V1, SDValue V2,
13864 const X86Subtarget &Subtarget,
13865 SelectionDAG &DAG) {
13866 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
13867 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
13868 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
13870 // Try to use shift instructions.
13871 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
13872 Zeroable, Subtarget, DAG))
13873 return Shift;
13875 // Try to use byte rotation instructions.
13876 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
13877 Subtarget, DAG))
13878 return Rotate;
13880 // Use dedicated pack instructions for masks that match their pattern.
13881 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
13882 Subtarget))
13883 return V;
13885 // Try to use a zext lowering.
13886 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
13887 Zeroable, Subtarget, DAG))
13888 return ZExt;
13890 // See if we can use SSE4A Extraction / Insertion.
13891 if (Subtarget.hasSSE4A())
13892 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
13893 Zeroable, DAG))
13894 return V;
13896 int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
13898 // For single-input shuffles, there are some nicer lowering tricks we can use.
13899 if (NumV2Elements == 0) {
13900 // Check for being able to broadcast a single element.
13901 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
13902 Mask, Subtarget, DAG))
13903 return Broadcast;
13905 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
13906 return V;
13908 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
13909 // Notably, this handles splat and partial-splat shuffles more efficiently.
13910 // However, it only makes sense if the pre-duplication shuffle simplifies
13911 // things significantly. Currently, this means we need to be able to
13912 // express the pre-duplication shuffle as an i16 shuffle.
13914 // FIXME: We should check for other patterns which can be widened into an
13915 // i16 shuffle as well.
13916 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
13917 for (int i = 0; i < 16; i += 2)
13918 if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
13919 return false;
13921 return true;
13923 auto tryToWidenViaDuplication = [&]() -> SDValue {
13924 if (!canWidenViaDuplication(Mask))
13925 return SDValue();
13926 SmallVector<int, 4> LoInputs;
13927 copy_if(Mask, std::back_inserter(LoInputs),
13928 [](int M) { return M >= 0 && M < 8; });
13929 array_pod_sort(LoInputs.begin(), LoInputs.end());
13930 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
13931 LoInputs.end());
13932 SmallVector<int, 4> HiInputs;
13933 copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
13934 array_pod_sort(HiInputs.begin(), HiInputs.end());
13935 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
13936 HiInputs.end());
13938 bool TargetLo = LoInputs.size() >= HiInputs.size();
13939 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
13940 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
13942 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
13943 SmallDenseMap<int, int, 8> LaneMap;
13944 for (int I : InPlaceInputs) {
13945 PreDupI16Shuffle[I/2] = I/2;
13946 LaneMap[I] = I;
13948 int j = TargetLo ? 0 : 4, je = j + 4;
13949 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
13950 // Check if j is already a shuffle of this input. This happens when
13951 // there are two adjacent bytes after we move the low one.
13952 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
13953 // If we haven't yet mapped the input, search for a slot into which
13954 // we can map it.
13955 while (j < je && PreDupI16Shuffle[j] >= 0)
13956 ++j;
13958 if (j == je)
13959 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
13960 return SDValue();
13962 // Map this input with the i16 shuffle.
13963 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
13966 // Update the lane map based on the mapping we ended up with.
13967 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
13969 V1 = DAG.getBitcast(
13970 MVT::v16i8,
13971 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
13972 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
13974 // Unpack the bytes to form the i16s that will be shuffled into place.
13975 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
13976 MVT::v16i8, V1, V1);
13978 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
13979 for (int i = 0; i < 16; ++i)
13980 if (Mask[i] >= 0) {
13981 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
13982 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
13983 if (PostDupI16Shuffle[i / 2] < 0)
13984 PostDupI16Shuffle[i / 2] = MappedMask;
13985 else
13986 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
13987 "Conflicting entries in the original shuffle!");
13989 return DAG.getBitcast(
13990 MVT::v16i8,
13991 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
13992 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
13994 if (SDValue V = tryToWidenViaDuplication())
13995 return V;
13998 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
13999 Zeroable, Subtarget, DAG))
14000 return Masked;
14002 // Use dedicated unpack instructions for masks that match their pattern.
14003 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14004 return V;
14006 // Try to use byte shift instructions to mask.
14007 if (SDValue V = lowerVectorShuffleAsByteShiftMask(
14008 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14009 return V;
14011 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
14012 // with PSHUFB. It is important to do this before we attempt to generate any
14013 // blends but after all of the single-input lowerings. If the single input
14014 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
14015 // want to preserve that and we can DAG combine any longer sequences into
14016 // a PSHUFB in the end. But once we start blending from multiple inputs,
14017 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
14018 // and there are *very* few patterns that would actually be faster than the
14019 // PSHUFB approach because of its ability to zero lanes.
14021 // FIXME: The only exceptions to the above are blends which are exact
14022 // interleavings with direct instructions supporting them. We currently don't
14023 // handle those well here.
14024 if (Subtarget.hasSSSE3()) {
14025 bool V1InUse = false;
14026 bool V2InUse = false;
14028 SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
14029 DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
14031 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
14032 // do so. This avoids using them to handle blends-with-zero which is
14033 // important as a single pshufb is significantly faster for that.
14034 if (V1InUse && V2InUse) {
14035 if (Subtarget.hasSSE41())
14036 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
14037 Zeroable, Subtarget, DAG))
14038 return Blend;
14040 // We can use an unpack to do the blending rather than an or in some
14041 // cases. Even though the or may be (very minorly) more efficient, we
14042 // preference this lowering because there are common cases where part of
14043 // the complexity of the shuffles goes away when we do the final blend as
14044 // an unpack.
14045 // FIXME: It might be worth trying to detect if the unpack-feeding
14046 // shuffles will both be pshufb, in which case we shouldn't bother with
14047 // this.
14048 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
14049 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14050 return Unpack;
14052 // If we have VBMI we can use one VPERM instead of multiple PSHUFBs.
14053 if (Subtarget.hasVBMI() && Subtarget.hasVLX())
14054 return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, DAG);
14056 // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
14057 // PALIGNR will be cheaper than the second PSHUFB+OR.
14058 if (SDValue V = lowerShuffleAsByteRotateAndPermute(
14059 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14060 return V;
14063 return PSHUFB;
14066 // There are special ways we can lower some single-element blends.
14067 if (NumV2Elements == 1)
14068 if (SDValue V = lowerShuffleAsElementInsertion(
14069 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14070 return V;
14072 if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
14073 return Blend;
14075 // Check whether a compaction lowering can be done. This handles shuffles
14076 // which take every Nth element for some even N. See the helper function for
14077 // details.
14079 // We special case these as they can be particularly efficiently handled with
14080 // the PACKUSB instruction on x86 and they show up in common patterns of
14081 // rearranging bytes to truncate wide elements.
14082 bool IsSingleInput = V2.isUndef();
14083 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) {
14084 // NumEvenDrops is the power of two stride of the elements. Another way of
14085 // thinking about it is that we need to drop the even elements this many
14086 // times to get the original input.
14088 // First we need to zero all the dropped bytes.
14089 assert(NumEvenDrops <= 3 &&
14090 "No support for dropping even elements more than 3 times.");
14091 // We use the mask type to pick which bytes are preserved based on how many
14092 // elements are dropped.
14093 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
14094 SDValue ByteClearMask = DAG.getBitcast(
14095 MVT::v16i8, DAG.getConstant(0xFF, DL, MaskVTs[NumEvenDrops - 1]));
14096 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
14097 if (!IsSingleInput)
14098 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
14100 // Now pack things back together.
14101 V1 = DAG.getBitcast(MVT::v8i16, V1);
14102 V2 = IsSingleInput ? V1 : DAG.getBitcast(MVT::v8i16, V2);
14103 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
14104 for (int i = 1; i < NumEvenDrops; ++i) {
14105 Result = DAG.getBitcast(MVT::v8i16, Result);
14106 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
14109 return Result;
14112 // Handle multi-input cases by blending single-input shuffles.
14113 if (NumV2Elements > 0)
14114 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2, Mask,
14115 Subtarget, DAG);
14117 // The fallback path for single-input shuffles widens this into two v8i16
14118 // vectors with unpacks, shuffles those, and then pulls them back together
14119 // with a pack.
14120 SDValue V = V1;
14122 std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14123 std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14124 for (int i = 0; i < 16; ++i)
14125 if (Mask[i] >= 0)
14126 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
14128 SDValue VLoHalf, VHiHalf;
14129 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
14130 // them out and avoid using UNPCK{L,H} to extract the elements of V as
14131 // i16s.
14132 if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
14133 none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
14134 // Use a mask to drop the high bytes.
14135 VLoHalf = DAG.getBitcast(MVT::v8i16, V);
14136 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
14137 DAG.getConstant(0x00FF, DL, MVT::v8i16));
14139 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
14140 VHiHalf = DAG.getUNDEF(MVT::v8i16);
14142 // Squash the masks to point directly into VLoHalf.
14143 for (int &M : LoBlendMask)
14144 if (M >= 0)
14145 M /= 2;
14146 for (int &M : HiBlendMask)
14147 if (M >= 0)
14148 M /= 2;
14149 } else {
14150 // Otherwise just unpack the low half of V into VLoHalf and the high half into
14151 // VHiHalf so that we can blend them as i16s.
14152 SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
14154 VLoHalf = DAG.getBitcast(
14155 MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
14156 VHiHalf = DAG.getBitcast(
14157 MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
14160 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
14161 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
14163 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
14166 /// Dispatching routine to lower various 128-bit x86 vector shuffles.
14168 /// This routine breaks down the specific type of 128-bit shuffle and
14169 /// dispatches to the lowering routines accordingly.
14170 static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
14171 MVT VT, SDValue V1, SDValue V2,
14172 const APInt &Zeroable,
14173 const X86Subtarget &Subtarget,
14174 SelectionDAG &DAG) {
14175 switch (VT.SimpleTy) {
14176 case MVT::v2i64:
14177 return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14178 case MVT::v2f64:
14179 return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14180 case MVT::v4i32:
14181 return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14182 case MVT::v4f32:
14183 return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14184 case MVT::v8i16:
14185 return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14186 case MVT::v16i8:
14187 return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14189 default:
14190 llvm_unreachable("Unimplemented!");
14194 /// Generic routine to split vector shuffle into half-sized shuffles.
14196 /// This routine just extracts two subvectors, shuffles them independently, and
14197 /// then concatenates them back together. This should work effectively with all
14198 /// AVX vector shuffle types.
14199 static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
14200 SDValue V2, ArrayRef<int> Mask,
14201 SelectionDAG &DAG) {
14202 assert(VT.getSizeInBits() >= 256 &&
14203 "Only for 256-bit or wider vector shuffles!");
14204 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
14205 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
14207 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
14208 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
14210 int NumElements = VT.getVectorNumElements();
14211 int SplitNumElements = NumElements / 2;
14212 MVT ScalarVT = VT.getVectorElementType();
14213 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
14215 // Rather than splitting build-vectors, just build two narrower build
14216 // vectors. This helps shuffling with splats and zeros.
14217 auto SplitVector = [&](SDValue V) {
14218 V = peekThroughBitcasts(V);
14220 MVT OrigVT = V.getSimpleValueType();
14221 int OrigNumElements = OrigVT.getVectorNumElements();
14222 int OrigSplitNumElements = OrigNumElements / 2;
14223 MVT OrigScalarVT = OrigVT.getVectorElementType();
14224 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
14226 SDValue LoV, HiV;
14228 auto *BV = dyn_cast<BuildVectorSDNode>(V);
14229 if (!BV) {
14230 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
14231 DAG.getIntPtrConstant(0, DL));
14232 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
14233 DAG.getIntPtrConstant(OrigSplitNumElements, DL));
14234 } else {
14236 SmallVector<SDValue, 16> LoOps, HiOps;
14237 for (int i = 0; i < OrigSplitNumElements; ++i) {
14238 LoOps.push_back(BV->getOperand(i));
14239 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
14241 LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
14242 HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
14244 return std::make_pair(DAG.getBitcast(SplitVT, LoV),
14245 DAG.getBitcast(SplitVT, HiV));
14248 SDValue LoV1, HiV1, LoV2, HiV2;
14249 std::tie(LoV1, HiV1) = SplitVector(V1);
14250 std::tie(LoV2, HiV2) = SplitVector(V2);
14252 // Now create two 4-way blends of these half-width vectors.
14253 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
14254 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
14255 SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
14256 SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
14257 SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
14258 for (int i = 0; i < SplitNumElements; ++i) {
14259 int M = HalfMask[i];
14260 if (M >= NumElements) {
14261 if (M >= NumElements + SplitNumElements)
14262 UseHiV2 = true;
14263 else
14264 UseLoV2 = true;
14265 V2BlendMask[i] = M - NumElements;
14266 BlendMask[i] = SplitNumElements + i;
14267 } else if (M >= 0) {
14268 if (M >= SplitNumElements)
14269 UseHiV1 = true;
14270 else
14271 UseLoV1 = true;
14272 V1BlendMask[i] = M;
14273 BlendMask[i] = i;
14277 // Because the lowering happens after all combining takes place, we need to
14278 // manually combine these blend masks as much as possible so that we create
14279 // a minimal number of high-level vector shuffle nodes.
14281 // First try just blending the halves of V1 or V2.
14282 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
14283 return DAG.getUNDEF(SplitVT);
14284 if (!UseLoV2 && !UseHiV2)
14285 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14286 if (!UseLoV1 && !UseHiV1)
14287 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14289 SDValue V1Blend, V2Blend;
14290 if (UseLoV1 && UseHiV1) {
14291 V1Blend =
14292 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14293 } else {
14294 // We only use half of V1 so map the usage down into the final blend mask.
14295 V1Blend = UseLoV1 ? LoV1 : HiV1;
14296 for (int i = 0; i < SplitNumElements; ++i)
14297 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
14298 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
14300 if (UseLoV2 && UseHiV2) {
14301 V2Blend =
14302 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14303 } else {
14304 // We only use half of V2 so map the usage down into the final blend mask.
14305 V2Blend = UseLoV2 ? LoV2 : HiV2;
14306 for (int i = 0; i < SplitNumElements; ++i)
14307 if (BlendMask[i] >= SplitNumElements)
14308 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
14310 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
14312 SDValue Lo = HalfBlend(LoMask);
14313 SDValue Hi = HalfBlend(HiMask);
14314 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
14317 /// Either split a vector in halves or decompose the shuffles and the
14318 /// blend.
14320 /// This is provided as a good fallback for many lowerings of non-single-input
14321 /// shuffles with more than one 128-bit lane. In those cases, we want to select
14322 /// between splitting the shuffle into 128-bit components and stitching those
14323 /// back together vs. extracting the single-input shuffles and blending those
14324 /// results.
14325 static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
14326 SDValue V2, ArrayRef<int> Mask,
14327 const X86Subtarget &Subtarget,
14328 SelectionDAG &DAG) {
14329 assert(!V2.isUndef() && "This routine must not be used to lower single-input "
14330 "shuffles as it could then recurse on itself.");
14331 int Size = Mask.size();
14333 // If this can be modeled as a broadcast of two elements followed by a blend,
14334 // prefer that lowering. This is especially important because broadcasts can
14335 // often fold with memory operands.
14336 auto DoBothBroadcast = [&] {
14337 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
14338 for (int M : Mask)
14339 if (M >= Size) {
14340 if (V2BroadcastIdx < 0)
14341 V2BroadcastIdx = M - Size;
14342 else if (M - Size != V2BroadcastIdx)
14343 return false;
14344 } else if (M >= 0) {
14345 if (V1BroadcastIdx < 0)
14346 V1BroadcastIdx = M;
14347 else if (M != V1BroadcastIdx)
14348 return false;
14350 return true;
14352 if (DoBothBroadcast())
14353 return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
14354 Subtarget, DAG);
14356 // If the inputs all stem from a single 128-bit lane of each input, then we
14357 // split them rather than blending because the split will decompose to
14358 // unusually few instructions.
14359 int LaneCount = VT.getSizeInBits() / 128;
14360 int LaneSize = Size / LaneCount;
14361 SmallBitVector LaneInputs[2];
14362 LaneInputs[0].resize(LaneCount, false);
14363 LaneInputs[1].resize(LaneCount, false);
14364 for (int i = 0; i < Size; ++i)
14365 if (Mask[i] >= 0)
14366 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
14367 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
14368 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14370 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
14371 // that the decomposed single-input shuffles don't end up here.
14372 return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, Subtarget,
14373 DAG);
14376 /// Lower a vector shuffle crossing multiple 128-bit lanes as
14377 /// a lane permutation followed by a per-lane permutation.
14379 /// This is mainly for cases where we can have non-repeating permutes
14380 /// in each lane.
14382 /// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
14383 /// we should investigate merging them.
14384 static SDValue lowerShuffleAsLanePermuteAndPermute(
14385 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14386 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14387 int NumElts = VT.getVectorNumElements();
14388 int NumLanes = VT.getSizeInBits() / 128;
14389 int NumEltsPerLane = NumElts / NumLanes;
14391 SmallVector<int, 4> SrcLaneMask(NumLanes, SM_SentinelUndef);
14392 SmallVector<int, 16> PermMask(NumElts, SM_SentinelUndef);
14394 for (int i = 0; i != NumElts; ++i) {
14395 int M = Mask[i];
14396 if (M < 0)
14397 continue;
14399 // Ensure that each lane comes from a single source lane.
14400 int SrcLane = M / NumEltsPerLane;
14401 int DstLane = i / NumEltsPerLane;
14402 if (!isUndefOrEqual(SrcLaneMask[DstLane], SrcLane))
14403 return SDValue();
14404 SrcLaneMask[DstLane] = SrcLane;
14406 PermMask[i] = (DstLane * NumEltsPerLane) + (M % NumEltsPerLane);
14409 // Make sure we set all elements of the lane mask, to avoid undef propagation.
14410 SmallVector<int, 16> LaneMask(NumElts, SM_SentinelUndef);
14411 for (int DstLane = 0; DstLane != NumLanes; ++DstLane) {
14412 int SrcLane = SrcLaneMask[DstLane];
14413 if (0 <= SrcLane)
14414 for (int j = 0; j != NumEltsPerLane; ++j) {
14415 LaneMask[(DstLane * NumEltsPerLane) + j] =
14416 (SrcLane * NumEltsPerLane) + j;
14420 // If we're only shuffling a single lowest lane and the rest are identity
14421 // then don't bother.
14422 // TODO - isShuffleMaskInputInPlace could be extended to something like this.
14423 int NumIdentityLanes = 0;
14424 bool OnlyShuffleLowestLane = true;
14425 for (int i = 0; i != NumLanes; ++i) {
14426 if (isSequentialOrUndefInRange(PermMask, i * NumEltsPerLane, NumEltsPerLane,
14427 i * NumEltsPerLane))
14428 NumIdentityLanes++;
14429 else if (SrcLaneMask[i] != 0 && SrcLaneMask[i] != NumLanes)
14430 OnlyShuffleLowestLane = false;
14432 if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
14433 return SDValue();
14435 SDValue LanePermute = DAG.getVectorShuffle(VT, DL, V1, V2, LaneMask);
14436 return DAG.getVectorShuffle(VT, DL, LanePermute, DAG.getUNDEF(VT), PermMask);
14439 /// Lower a vector shuffle crossing multiple 128-bit lanes as
14440 /// a permutation and blend of those lanes.
14442 /// This essentially blends the out-of-lane inputs to each lane into the lane
14443 /// from a permuted copy of the vector. This lowering strategy results in four
14444 /// instructions in the worst case for a single-input cross lane shuffle which
14445 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
14446 /// of. Special cases for each particular shuffle pattern should be handled
14447 /// prior to trying this lowering.
14448 static SDValue lowerShuffleAsLanePermuteAndBlend(
14449 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14450 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14451 // FIXME: This should probably be generalized for 512-bit vectors as well.
14452 assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
14453 int Size = Mask.size();
14454 int LaneSize = Size / 2;
14456 // If there are only inputs from one 128-bit lane, splitting will in fact be
14457 // less expensive. The flags track whether the given lane contains an element
14458 // that crosses to another lane.
14459 if (!Subtarget.hasAVX2()) {
14460 bool LaneCrossing[2] = {false, false};
14461 for (int i = 0; i < Size; ++i)
14462 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
14463 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
14464 if (!LaneCrossing[0] || !LaneCrossing[1])
14465 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14466 } else {
14467 bool LaneUsed[2] = {false, false};
14468 for (int i = 0; i < Size; ++i)
14469 if (Mask[i] >= 0)
14470 LaneUsed[(Mask[i] / LaneSize)] = true;
14471 if (!LaneUsed[0] || !LaneUsed[1])
14472 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14475 assert(V2.isUndef() &&
14476 "This last part of this routine only works on single input shuffles");
14478 SmallVector<int, 32> FlippedBlendMask(Size);
14479 for (int i = 0; i < Size; ++i)
14480 FlippedBlendMask[i] =
14481 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
14482 ? Mask[i]
14483 : Mask[i] % LaneSize +
14484 (i / LaneSize) * LaneSize + Size);
14486 // Flip the vector, and blend the results which should now be in-lane.
14487 MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
14488 SDValue Flipped = DAG.getBitcast(PVT, V1);
14489 Flipped = DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT),
14490 { 2, 3, 0, 1 });
14491 Flipped = DAG.getBitcast(VT, Flipped);
14492 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
14495 /// Handle lowering 2-lane 128-bit shuffles.
14496 static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
14497 SDValue V2, ArrayRef<int> Mask,
14498 const APInt &Zeroable,
14499 const X86Subtarget &Subtarget,
14500 SelectionDAG &DAG) {
14501 // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
14502 if (Subtarget.hasAVX2() && V2.isUndef())
14503 return SDValue();
14505 SmallVector<int, 4> WidenedMask;
14506 if (!canWidenShuffleElements(Mask, Zeroable, WidenedMask))
14507 return SDValue();
14509 bool IsLowZero = (Zeroable & 0x3) == 0x3;
14510 bool IsHighZero = (Zeroable & 0xc) == 0xc;
14512 // Try to use an insert into a zero vector.
14513 if (WidenedMask[0] == 0 && IsHighZero) {
14514 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
14515 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
14516 DAG.getIntPtrConstant(0, DL));
14517 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
14518 getZeroVector(VT, Subtarget, DAG, DL), LoV,
14519 DAG.getIntPtrConstant(0, DL));
14522 // TODO: If minimizing size and one of the inputs is a zero vector and the
14523 // the zero vector has only one use, we could use a VPERM2X128 to save the
14524 // instruction bytes needed to explicitly generate the zero vector.
14526 // Blends are faster and handle all the non-lane-crossing cases.
14527 if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
14528 Subtarget, DAG))
14529 return Blend;
14531 // If either input operand is a zero vector, use VPERM2X128 because its mask
14532 // allows us to replace the zero input with an implicit zero.
14533 if (!IsLowZero && !IsHighZero) {
14534 // Check for patterns which can be matched with a single insert of a 128-bit
14535 // subvector.
14536 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
14537 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
14539 // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
14540 // this will likely become vinsertf128 which can't fold a 256-bit memop.
14541 if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
14542 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
14543 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
14544 OnlyUsesV1 ? V1 : V2,
14545 DAG.getIntPtrConstant(0, DL));
14546 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
14547 DAG.getIntPtrConstant(2, DL));
14551 // Try to use SHUF128 if possible.
14552 if (Subtarget.hasVLX()) {
14553 if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
14554 unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
14555 ((WidenedMask[1] % 2) << 1);
14556 return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
14557 DAG.getConstant(PermMask, DL, MVT::i8));
14562 // Otherwise form a 128-bit permutation. After accounting for undefs,
14563 // convert the 64-bit shuffle mask selection values into 128-bit
14564 // selection bits by dividing the indexes by 2 and shifting into positions
14565 // defined by a vperm2*128 instruction's immediate control byte.
14567 // The immediate permute control byte looks like this:
14568 // [1:0] - select 128 bits from sources for low half of destination
14569 // [2] - ignore
14570 // [3] - zero low half of destination
14571 // [5:4] - select 128 bits from sources for high half of destination
14572 // [6] - ignore
14573 // [7] - zero high half of destination
14575 assert((WidenedMask[0] >= 0 || IsLowZero) &&
14576 (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
14578 unsigned PermMask = 0;
14579 PermMask |= IsLowZero ? 0x08 : (WidenedMask[0] << 0);
14580 PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
14582 // Check the immediate mask and replace unused sources with undef.
14583 if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
14584 V1 = DAG.getUNDEF(VT);
14585 if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
14586 V2 = DAG.getUNDEF(VT);
14588 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
14589 DAG.getConstant(PermMask, DL, MVT::i8));
14592 /// Lower a vector shuffle by first fixing the 128-bit lanes and then
14593 /// shuffling each lane.
14595 /// This attempts to create a repeated lane shuffle where each lane uses one
14596 /// or two of the lanes of the inputs. The lanes of the input vectors are
14597 /// shuffled in one or two independent shuffles to get the lanes into the
14598 /// position needed by the final shuffle.
14599 static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
14600 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14601 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14602 assert(!V2.isUndef() && "This is only useful with multiple inputs.");
14604 if (is128BitLaneRepeatedShuffleMask(VT, Mask))
14605 return SDValue();
14607 int Size = Mask.size();
14608 int NumLanes = VT.getSizeInBits() / 128;
14609 int LaneSize = 128 / VT.getScalarSizeInBits();
14610 SmallVector<int, 16> RepeatMask(LaneSize, -1);
14611 SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
14613 // First pass will try to fill in the RepeatMask from lanes that need two
14614 // sources.
14615 for (int Lane = 0; Lane != NumLanes; ++Lane) {
14616 int Srcs[2] = { -1, -1 };
14617 SmallVector<int, 16> InLaneMask(LaneSize, -1);
14618 for (int i = 0; i != LaneSize; ++i) {
14619 int M = Mask[(Lane * LaneSize) + i];
14620 if (M < 0)
14621 continue;
14622 // Determine which of the possible input lanes (NumLanes from each source)
14623 // this element comes from. Assign that as one of the sources for this
14624 // lane. We can assign up to 2 sources for this lane. If we run out
14625 // sources we can't do anything.
14626 int LaneSrc = M / LaneSize;
14627 int Src;
14628 if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
14629 Src = 0;
14630 else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
14631 Src = 1;
14632 else
14633 return SDValue();
14635 Srcs[Src] = LaneSrc;
14636 InLaneMask[i] = (M % LaneSize) + Src * Size;
14639 // If this lane has two sources, see if it fits with the repeat mask so far.
14640 if (Srcs[1] < 0)
14641 continue;
14643 LaneSrcs[Lane][0] = Srcs[0];
14644 LaneSrcs[Lane][1] = Srcs[1];
14646 auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
14647 assert(M1.size() == M2.size() && "Unexpected mask size");
14648 for (int i = 0, e = M1.size(); i != e; ++i)
14649 if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
14650 return false;
14651 return true;
14654 auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
14655 assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
14656 for (int i = 0, e = MergedMask.size(); i != e; ++i) {
14657 int M = Mask[i];
14658 if (M < 0)
14659 continue;
14660 assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
14661 "Unexpected mask element");
14662 MergedMask[i] = M;
14666 if (MatchMasks(InLaneMask, RepeatMask)) {
14667 // Merge this lane mask into the final repeat mask.
14668 MergeMasks(InLaneMask, RepeatMask);
14669 continue;
14672 // Didn't find a match. Swap the operands and try again.
14673 std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
14674 ShuffleVectorSDNode::commuteMask(InLaneMask);
14676 if (MatchMasks(InLaneMask, RepeatMask)) {
14677 // Merge this lane mask into the final repeat mask.
14678 MergeMasks(InLaneMask, RepeatMask);
14679 continue;
14682 // Couldn't find a match with the operands in either order.
14683 return SDValue();
14686 // Now handle any lanes with only one source.
14687 for (int Lane = 0; Lane != NumLanes; ++Lane) {
14688 // If this lane has already been processed, skip it.
14689 if (LaneSrcs[Lane][0] >= 0)
14690 continue;
14692 for (int i = 0; i != LaneSize; ++i) {
14693 int M = Mask[(Lane * LaneSize) + i];
14694 if (M < 0)
14695 continue;
14697 // If RepeatMask isn't defined yet we can define it ourself.
14698 if (RepeatMask[i] < 0)
14699 RepeatMask[i] = M % LaneSize;
14701 if (RepeatMask[i] < Size) {
14702 if (RepeatMask[i] != M % LaneSize)
14703 return SDValue();
14704 LaneSrcs[Lane][0] = M / LaneSize;
14705 } else {
14706 if (RepeatMask[i] != ((M % LaneSize) + Size))
14707 return SDValue();
14708 LaneSrcs[Lane][1] = M / LaneSize;
14712 if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
14713 return SDValue();
14716 SmallVector<int, 16> NewMask(Size, -1);
14717 for (int Lane = 0; Lane != NumLanes; ++Lane) {
14718 int Src = LaneSrcs[Lane][0];
14719 for (int i = 0; i != LaneSize; ++i) {
14720 int M = -1;
14721 if (Src >= 0)
14722 M = Src * LaneSize + i;
14723 NewMask[Lane * LaneSize + i] = M;
14726 SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
14727 // Ensure we didn't get back the shuffle we started with.
14728 // FIXME: This is a hack to make up for some splat handling code in
14729 // getVectorShuffle.
14730 if (isa<ShuffleVectorSDNode>(NewV1) &&
14731 cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
14732 return SDValue();
14734 for (int Lane = 0; Lane != NumLanes; ++Lane) {
14735 int Src = LaneSrcs[Lane][1];
14736 for (int i = 0; i != LaneSize; ++i) {
14737 int M = -1;
14738 if (Src >= 0)
14739 M = Src * LaneSize + i;
14740 NewMask[Lane * LaneSize + i] = M;
14743 SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
14744 // Ensure we didn't get back the shuffle we started with.
14745 // FIXME: This is a hack to make up for some splat handling code in
14746 // getVectorShuffle.
14747 if (isa<ShuffleVectorSDNode>(NewV2) &&
14748 cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
14749 return SDValue();
14751 for (int i = 0; i != Size; ++i) {
14752 NewMask[i] = RepeatMask[i % LaneSize];
14753 if (NewMask[i] < 0)
14754 continue;
14756 NewMask[i] += (i / LaneSize) * LaneSize;
14758 return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
14761 /// If the input shuffle mask results in a vector that is undefined in all upper
14762 /// or lower half elements and that mask accesses only 2 halves of the
14763 /// shuffle's operands, return true. A mask of half the width with mask indexes
14764 /// adjusted to access the extracted halves of the original shuffle operands is
14765 /// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
14766 /// lower half of each input operand is accessed.
14767 static bool
14768 getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
14769 int &HalfIdx1, int &HalfIdx2) {
14770 assert((Mask.size() == HalfMask.size() * 2) &&
14771 "Expected input mask to be twice as long as output");
14773 // Exactly one half of the result must be undef to allow narrowing.
14774 bool UndefLower = isUndefLowerHalf(Mask);
14775 bool UndefUpper = isUndefUpperHalf(Mask);
14776 if (UndefLower == UndefUpper)
14777 return false;
14779 unsigned HalfNumElts = HalfMask.size();
14780 unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
14781 HalfIdx1 = -1;
14782 HalfIdx2 = -1;
14783 for (unsigned i = 0; i != HalfNumElts; ++i) {
14784 int M = Mask[i + MaskIndexOffset];
14785 if (M < 0) {
14786 HalfMask[i] = M;
14787 continue;
14790 // Determine which of the 4 half vectors this element is from.
14791 // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
14792 int HalfIdx = M / HalfNumElts;
14794 // Determine the element index into its half vector source.
14795 int HalfElt = M % HalfNumElts;
14797 // We can shuffle with up to 2 half vectors, set the new 'half'
14798 // shuffle mask accordingly.
14799 if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
14800 HalfMask[i] = HalfElt;
14801 HalfIdx1 = HalfIdx;
14802 continue;
14804 if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
14805 HalfMask[i] = HalfElt + HalfNumElts;
14806 HalfIdx2 = HalfIdx;
14807 continue;
14810 // Too many half vectors referenced.
14811 return false;
14814 return true;
14817 /// Given the output values from getHalfShuffleMask(), create a half width
14818 /// shuffle of extracted vectors followed by an insert back to full width.
14819 static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
14820 ArrayRef<int> HalfMask, int HalfIdx1,
14821 int HalfIdx2, bool UndefLower,
14822 SelectionDAG &DAG) {
14823 assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
14824 assert(V1.getValueType().isSimple() && "Expecting only simple types");
14826 MVT VT = V1.getSimpleValueType();
14827 unsigned NumElts = VT.getVectorNumElements();
14828 unsigned HalfNumElts = NumElts / 2;
14829 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(), HalfNumElts);
14831 auto getHalfVector = [&](int HalfIdx) {
14832 if (HalfIdx < 0)
14833 return DAG.getUNDEF(HalfVT);
14834 SDValue V = (HalfIdx < 2 ? V1 : V2);
14835 HalfIdx = (HalfIdx % 2) * HalfNumElts;
14836 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
14837 DAG.getIntPtrConstant(HalfIdx, DL));
14840 // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
14841 SDValue Half1 = getHalfVector(HalfIdx1);
14842 SDValue Half2 = getHalfVector(HalfIdx2);
14843 SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
14844 unsigned Offset = UndefLower ? HalfNumElts : 0;
14845 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
14846 DAG.getIntPtrConstant(Offset, DL));
14849 /// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
14850 /// This allows for fast cases such as subvector extraction/insertion
14851 /// or shuffling smaller vector types which can lower more efficiently.
14852 static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
14853 SDValue V2, ArrayRef<int> Mask,
14854 const X86Subtarget &Subtarget,
14855 SelectionDAG &DAG) {
14856 assert((VT.is256BitVector() || VT.is512BitVector()) &&
14857 "Expected 256-bit or 512-bit vector");
14859 bool UndefLower = isUndefLowerHalf(Mask);
14860 if (!UndefLower && !isUndefUpperHalf(Mask))
14861 return SDValue();
14863 assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
14864 "Completely undef shuffle mask should have been simplified already");
14866 // Upper half is undef and lower half is whole upper subvector.
14867 // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
14868 unsigned NumElts = VT.getVectorNumElements();
14869 unsigned HalfNumElts = NumElts / 2;
14870 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(), HalfNumElts);
14871 if (!UndefLower &&
14872 isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
14873 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
14874 DAG.getIntPtrConstant(HalfNumElts, DL));
14875 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
14876 DAG.getIntPtrConstant(0, DL));
14879 // Lower half is undef and upper half is whole lower subvector.
14880 // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
14881 if (UndefLower &&
14882 isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
14883 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
14884 DAG.getIntPtrConstant(0, DL));
14885 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
14886 DAG.getIntPtrConstant(HalfNumElts, DL));
14889 int HalfIdx1, HalfIdx2;
14890 SmallVector<int, 8> HalfMask(HalfNumElts);
14891 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
14892 return SDValue();
14894 assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
14896 // Only shuffle the halves of the inputs when useful.
14897 unsigned NumLowerHalves =
14898 (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
14899 unsigned NumUpperHalves =
14900 (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
14901 assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
14903 // Determine the larger pattern of undef/halves, then decide if it's worth
14904 // splitting the shuffle based on subtarget capabilities and types.
14905 unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
14906 if (!UndefLower) {
14907 // XXXXuuuu: no insert is needed.
14908 // Always extract lowers when setting lower - these are all free subreg ops.
14909 if (NumUpperHalves == 0)
14910 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
14911 UndefLower, DAG);
14913 if (NumUpperHalves == 1) {
14914 // AVX2 has efficient 32/64-bit element cross-lane shuffles.
14915 if (Subtarget.hasAVX2()) {
14916 // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
14917 if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
14918 !is128BitUnpackShuffleMask(HalfMask) &&
14919 (!isSingleSHUFPSMask(HalfMask) ||
14920 Subtarget.hasFastVariableShuffle()))
14921 return SDValue();
14922 // If this is a unary shuffle (assume that the 2nd operand is
14923 // canonicalized to undef), then we can use vpermpd. Otherwise, we
14924 // are better off extracting the upper half of 1 operand and using a
14925 // narrow shuffle.
14926 if (EltWidth == 64 && V2.isUndef())
14927 return SDValue();
14929 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
14930 if (Subtarget.hasAVX512() && VT.is512BitVector())
14931 return SDValue();
14932 // Extract + narrow shuffle is better than the wide alternative.
14933 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
14934 UndefLower, DAG);
14937 // Don't extract both uppers, instead shuffle and then extract.
14938 assert(NumUpperHalves == 2 && "Half vector count went wrong");
14939 return SDValue();
14942 // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
14943 if (NumUpperHalves == 0) {
14944 // AVX2 has efficient 64-bit element cross-lane shuffles.
14945 // TODO: Refine to account for unary shuffle, splat, and other masks?
14946 if (Subtarget.hasAVX2() && EltWidth == 64)
14947 return SDValue();
14948 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
14949 if (Subtarget.hasAVX512() && VT.is512BitVector())
14950 return SDValue();
14951 // Narrow shuffle + insert is better than the wide alternative.
14952 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
14953 UndefLower, DAG);
14956 // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
14957 return SDValue();
14960 /// Test whether the specified input (0 or 1) is in-place blended by the
14961 /// given mask.
14963 /// This returns true if the elements from a particular input are already in the
14964 /// slot required by the given mask and require no permutation.
14965 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
14966 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
14967 int Size = Mask.size();
14968 for (int i = 0; i < Size; ++i)
14969 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
14970 return false;
14972 return true;
14975 /// Handle case where shuffle sources are coming from the same 128-bit lane and
14976 /// every lane can be represented as the same repeating mask - allowing us to
14977 /// shuffle the sources with the repeating shuffle and then permute the result
14978 /// to the destination lanes.
14979 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
14980 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14981 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14982 int NumElts = VT.getVectorNumElements();
14983 int NumLanes = VT.getSizeInBits() / 128;
14984 int NumLaneElts = NumElts / NumLanes;
14986 // On AVX2 we may be able to just shuffle the lowest elements and then
14987 // broadcast the result.
14988 if (Subtarget.hasAVX2()) {
14989 for (unsigned BroadcastSize : {16, 32, 64}) {
14990 if (BroadcastSize <= VT.getScalarSizeInBits())
14991 continue;
14992 int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
14994 // Attempt to match a repeating pattern every NumBroadcastElts,
14995 // accounting for UNDEFs but only references the lowest 128-bit
14996 // lane of the inputs.
14997 auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
14998 for (int i = 0; i != NumElts; i += NumBroadcastElts)
14999 for (int j = 0; j != NumBroadcastElts; ++j) {
15000 int M = Mask[i + j];
15001 if (M < 0)
15002 continue;
15003 int &R = RepeatMask[j];
15004 if (0 != ((M % NumElts) / NumLaneElts))
15005 return false;
15006 if (0 <= R && R != M)
15007 return false;
15008 R = M;
15010 return true;
15013 SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
15014 if (!FindRepeatingBroadcastMask(RepeatMask))
15015 continue;
15017 // Shuffle the (lowest) repeated elements in place for broadcast.
15018 SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
15020 // Shuffle the actual broadcast.
15021 SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
15022 for (int i = 0; i != NumElts; i += NumBroadcastElts)
15023 for (int j = 0; j != NumBroadcastElts; ++j)
15024 BroadcastMask[i + j] = j;
15025 return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
15026 BroadcastMask);
15030 // Bail if the shuffle mask doesn't cross 128-bit lanes.
15031 if (!is128BitLaneCrossingShuffleMask(VT, Mask))
15032 return SDValue();
15034 // Bail if we already have a repeated lane shuffle mask.
15035 SmallVector<int, 8> RepeatedShuffleMask;
15036 if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
15037 return SDValue();
15039 // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
15040 // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
15041 int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
15042 int NumSubLanes = NumLanes * SubLaneScale;
15043 int NumSubLaneElts = NumLaneElts / SubLaneScale;
15045 // Check that all the sources are coming from the same lane and see if we can
15046 // form a repeating shuffle mask (local to each sub-lane). At the same time,
15047 // determine the source sub-lane for each destination sub-lane.
15048 int TopSrcSubLane = -1;
15049 SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
15050 SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
15051 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
15052 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
15054 for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
15055 // Extract the sub-lane mask, check that it all comes from the same lane
15056 // and normalize the mask entries to come from the first lane.
15057 int SrcLane = -1;
15058 SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
15059 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15060 int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
15061 if (M < 0)
15062 continue;
15063 int Lane = (M % NumElts) / NumLaneElts;
15064 if ((0 <= SrcLane) && (SrcLane != Lane))
15065 return SDValue();
15066 SrcLane = Lane;
15067 int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
15068 SubLaneMask[Elt] = LocalM;
15071 // Whole sub-lane is UNDEF.
15072 if (SrcLane < 0)
15073 continue;
15075 // Attempt to match against the candidate repeated sub-lane masks.
15076 for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
15077 auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
15078 for (int i = 0; i != NumSubLaneElts; ++i) {
15079 if (M1[i] < 0 || M2[i] < 0)
15080 continue;
15081 if (M1[i] != M2[i])
15082 return false;
15084 return true;
15087 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
15088 if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
15089 continue;
15091 // Merge the sub-lane mask into the matching repeated sub-lane mask.
15092 for (int i = 0; i != NumSubLaneElts; ++i) {
15093 int M = SubLaneMask[i];
15094 if (M < 0)
15095 continue;
15096 assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
15097 "Unexpected mask element");
15098 RepeatedSubLaneMask[i] = M;
15101 // Track the top most source sub-lane - by setting the remaining to UNDEF
15102 // we can greatly simplify shuffle matching.
15103 int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
15104 TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
15105 Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
15106 break;
15109 // Bail if we failed to find a matching repeated sub-lane mask.
15110 if (Dst2SrcSubLanes[DstSubLane] < 0)
15111 return SDValue();
15113 assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
15114 "Unexpected source lane");
15116 // Create a repeating shuffle mask for the entire vector.
15117 SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
15118 for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
15119 int Lane = SubLane / SubLaneScale;
15120 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
15121 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15122 int M = RepeatedSubLaneMask[Elt];
15123 if (M < 0)
15124 continue;
15125 int Idx = (SubLane * NumSubLaneElts) + Elt;
15126 RepeatedMask[Idx] = M + (Lane * NumLaneElts);
15129 SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
15131 // Shuffle each source sub-lane to its destination.
15132 SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
15133 for (int i = 0; i != NumElts; i += NumSubLaneElts) {
15134 int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
15135 if (SrcSubLane < 0)
15136 continue;
15137 for (int j = 0; j != NumSubLaneElts; ++j)
15138 SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
15141 return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
15142 SubLaneMask);
15145 static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
15146 unsigned &ShuffleImm, ArrayRef<int> Mask) {
15147 int NumElts = VT.getVectorNumElements();
15148 assert(VT.getScalarSizeInBits() == 64 &&
15149 (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
15150 "Unexpected data type for VSHUFPD");
15152 // Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
15153 // Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
15154 ShuffleImm = 0;
15155 bool ShufpdMask = true;
15156 bool CommutableMask = true;
15157 for (int i = 0; i < NumElts; ++i) {
15158 if (Mask[i] == SM_SentinelUndef)
15159 continue;
15160 if (Mask[i] < 0)
15161 return false;
15162 int Val = (i & 6) + NumElts * (i & 1);
15163 int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
15164 if (Mask[i] < Val || Mask[i] > Val + 1)
15165 ShufpdMask = false;
15166 if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
15167 CommutableMask = false;
15168 ShuffleImm |= (Mask[i] % 2) << i;
15171 if (ShufpdMask)
15172 return true;
15173 if (CommutableMask) {
15174 std::swap(V1, V2);
15175 return true;
15178 return false;
15181 static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT,
15182 ArrayRef<int> Mask, SDValue V1,
15183 SDValue V2, SelectionDAG &DAG) {
15184 assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64)&&
15185 "Unexpected data type for VSHUFPD");
15187 unsigned Immediate = 0;
15188 if (!matchShuffleWithSHUFPD(VT, V1, V2, Immediate, Mask))
15189 return SDValue();
15191 return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
15192 DAG.getConstant(Immediate, DL, MVT::i8));
15195 /// Handle lowering of 4-lane 64-bit floating point shuffles.
15197 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
15198 /// isn't available.
15199 static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15200 const APInt &Zeroable, SDValue V1, SDValue V2,
15201 const X86Subtarget &Subtarget,
15202 SelectionDAG &DAG) {
15203 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15204 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15205 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15207 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
15208 Subtarget, DAG))
15209 return V;
15211 if (V2.isUndef()) {
15212 // Check for being able to broadcast a single element.
15213 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
15214 Mask, Subtarget, DAG))
15215 return Broadcast;
15217 // Use low duplicate instructions for masks that match their pattern.
15218 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
15219 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
15221 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
15222 // Non-half-crossing single input shuffles can be lowered with an
15223 // interleaved permutation.
15224 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
15225 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
15226 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
15227 DAG.getConstant(VPERMILPMask, DL, MVT::i8));
15230 // With AVX2 we have direct support for this permutation.
15231 if (Subtarget.hasAVX2())
15232 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
15233 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15235 // Try to create an in-lane repeating shuffle mask and then shuffle the
15236 // results into the target lanes.
15237 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15238 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15239 return V;
15241 // Try to permute the lanes and then use a per-lane permute.
15242 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
15243 Mask, DAG, Subtarget))
15244 return V;
15246 // Otherwise, fall back.
15247 return lowerShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask, DAG,
15248 Subtarget);
15251 // Use dedicated unpack instructions for masks that match their pattern.
15252 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
15253 return V;
15255 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
15256 Zeroable, Subtarget, DAG))
15257 return Blend;
15259 // Check if the blend happens to exactly fit that of SHUFPD.
15260 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, Mask, V1, V2, DAG))
15261 return Op;
15263 // If we have one input in place, then we can permute the other input and
15264 // blend the result.
15265 if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
15266 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
15267 Subtarget, DAG);
15269 // Try to create an in-lane repeating shuffle mask and then shuffle the
15270 // results into the target lanes.
15271 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15272 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15273 return V;
15275 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15276 // shuffle. However, if we have AVX2 and either inputs are already in place,
15277 // we will be able to shuffle even across lanes the other input in a single
15278 // instruction so skip this pattern.
15279 if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
15280 isShuffleMaskInputInPlace(1, Mask))))
15281 if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
15282 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15283 return V;
15285 // If we have VLX support, we can use VEXPAND.
15286 if (Subtarget.hasVLX())
15287 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
15288 DAG, Subtarget))
15289 return V;
15291 // If we have AVX2 then we always want to lower with a blend because an v4 we
15292 // can fully permute the elements.
15293 if (Subtarget.hasAVX2())
15294 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
15295 Subtarget, DAG);
15297 // Otherwise fall back on generic lowering.
15298 return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
15299 Subtarget, DAG);
15302 /// Handle lowering of 4-lane 64-bit integer shuffles.
15304 /// This routine is only called when we have AVX2 and thus a reasonable
15305 /// instruction set for v4i64 shuffling..
15306 static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15307 const APInt &Zeroable, SDValue V1, SDValue V2,
15308 const X86Subtarget &Subtarget,
15309 SelectionDAG &DAG) {
15310 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15311 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15312 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15313 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
15315 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
15316 Subtarget, DAG))
15317 return V;
15319 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
15320 Zeroable, Subtarget, DAG))
15321 return Blend;
15323 // Check for being able to broadcast a single element.
15324 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
15325 Subtarget, DAG))
15326 return Broadcast;
15328 if (V2.isUndef()) {
15329 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
15330 // can use lower latency instructions that will operate on both lanes.
15331 SmallVector<int, 2> RepeatedMask;
15332 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
15333 SmallVector<int, 4> PSHUFDMask;
15334 scaleShuffleMask<int>(2, RepeatedMask, PSHUFDMask);
15335 return DAG.getBitcast(
15336 MVT::v4i64,
15337 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
15338 DAG.getBitcast(MVT::v8i32, V1),
15339 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
15342 // AVX2 provides a direct instruction for permuting a single input across
15343 // lanes.
15344 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
15345 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15348 // Try to use shift instructions.
15349 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
15350 Zeroable, Subtarget, DAG))
15351 return Shift;
15353 // If we have VLX support, we can use VALIGN or VEXPAND.
15354 if (Subtarget.hasVLX()) {
15355 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i64, V1, V2, Mask,
15356 Subtarget, DAG))
15357 return Rotate;
15359 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
15360 DAG, Subtarget))
15361 return V;
15364 // Try to use PALIGNR.
15365 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
15366 Subtarget, DAG))
15367 return Rotate;
15369 // Use dedicated unpack instructions for masks that match their pattern.
15370 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
15371 return V;
15373 // If we have one input in place, then we can permute the other input and
15374 // blend the result.
15375 if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
15376 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
15377 Subtarget, DAG);
15379 // Try to create an in-lane repeating shuffle mask and then shuffle the
15380 // results into the target lanes.
15381 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15382 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
15383 return V;
15385 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15386 // shuffle. However, if we have AVX2 and either inputs are already in place,
15387 // we will be able to shuffle even across lanes the other input in a single
15388 // instruction so skip this pattern.
15389 if (!isShuffleMaskInputInPlace(0, Mask) &&
15390 !isShuffleMaskInputInPlace(1, Mask))
15391 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15392 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
15393 return Result;
15395 // Otherwise fall back on generic blend lowering.
15396 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
15397 Subtarget, DAG);
15400 /// Handle lowering of 8-lane 32-bit floating point shuffles.
15402 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
15403 /// isn't available.
15404 static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15405 const APInt &Zeroable, SDValue V1, SDValue V2,
15406 const X86Subtarget &Subtarget,
15407 SelectionDAG &DAG) {
15408 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
15409 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
15410 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
15412 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
15413 Zeroable, Subtarget, DAG))
15414 return Blend;
15416 // Check for being able to broadcast a single element.
15417 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
15418 Subtarget, DAG))
15419 return Broadcast;
15421 // If the shuffle mask is repeated in each 128-bit lane, we have many more
15422 // options to efficiently lower the shuffle.
15423 SmallVector<int, 4> RepeatedMask;
15424 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
15425 assert(RepeatedMask.size() == 4 &&
15426 "Repeated masks must be half the mask width!");
15428 // Use even/odd duplicate instructions for masks that match their pattern.
15429 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
15430 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
15431 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
15432 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
15434 if (V2.isUndef())
15435 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
15436 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
15438 // Use dedicated unpack instructions for masks that match their pattern.
15439 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
15440 return V;
15442 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
15443 // have already handled any direct blends.
15444 return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
15447 // Try to create an in-lane repeating shuffle mask and then shuffle the
15448 // results into the target lanes.
15449 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15450 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
15451 return V;
15453 // If we have a single input shuffle with different shuffle patterns in the
15454 // two 128-bit lanes use the variable mask to VPERMILPS.
15455 if (V2.isUndef()) {
15456 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
15457 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
15458 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
15460 if (Subtarget.hasAVX2())
15461 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
15463 // Otherwise, fall back.
15464 return lowerShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
15465 DAG, Subtarget);
15468 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15469 // shuffle.
15470 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15471 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
15472 return Result;
15474 // If we have VLX support, we can use VEXPAND.
15475 if (Subtarget.hasVLX())
15476 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
15477 DAG, Subtarget))
15478 return V;
15480 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
15481 // since after split we get a more efficient code using vpunpcklwd and
15482 // vpunpckhwd instrs than vblend.
15483 if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32))
15484 if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
15485 Subtarget, DAG))
15486 return V;
15488 // If we have AVX2 then we always want to lower with a blend because at v8 we
15489 // can fully permute the elements.
15490 if (Subtarget.hasAVX2())
15491 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2, Mask,
15492 Subtarget, DAG);
15494 // Otherwise fall back on generic lowering.
15495 return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
15496 Subtarget, DAG);
15499 /// Handle lowering of 8-lane 32-bit integer shuffles.
15501 /// This routine is only called when we have AVX2 and thus a reasonable
15502 /// instruction set for v8i32 shuffling..
15503 static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15504 const APInt &Zeroable, SDValue V1, SDValue V2,
15505 const X86Subtarget &Subtarget,
15506 SelectionDAG &DAG) {
15507 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
15508 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
15509 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
15510 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
15512 // Whenever we can lower this as a zext, that instruction is strictly faster
15513 // than any alternative. It also allows us to fold memory operands into the
15514 // shuffle in many cases.
15515 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
15516 Zeroable, Subtarget, DAG))
15517 return ZExt;
15519 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
15520 // since after split we get a more efficient code than vblend by using
15521 // vpunpcklwd and vpunpckhwd instrs.
15522 if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() &&
15523 !Subtarget.hasAVX512())
15524 if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask,
15525 Subtarget, DAG))
15526 return V;
15528 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
15529 Zeroable, Subtarget, DAG))
15530 return Blend;
15532 // Check for being able to broadcast a single element.
15533 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
15534 Subtarget, DAG))
15535 return Broadcast;
15537 // If the shuffle mask is repeated in each 128-bit lane we can use more
15538 // efficient instructions that mirror the shuffles across the two 128-bit
15539 // lanes.
15540 SmallVector<int, 4> RepeatedMask;
15541 bool Is128BitLaneRepeatedShuffle =
15542 is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
15543 if (Is128BitLaneRepeatedShuffle) {
15544 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
15545 if (V2.isUndef())
15546 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
15547 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
15549 // Use dedicated unpack instructions for masks that match their pattern.
15550 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
15551 return V;
15554 // Try to use shift instructions.
15555 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
15556 Zeroable, Subtarget, DAG))
15557 return Shift;
15559 // If we have VLX support, we can use VALIGN or EXPAND.
15560 if (Subtarget.hasVLX()) {
15561 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i32, V1, V2, Mask,
15562 Subtarget, DAG))
15563 return Rotate;
15565 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
15566 DAG, Subtarget))
15567 return V;
15570 // Try to use byte rotation instructions.
15571 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
15572 Subtarget, DAG))
15573 return Rotate;
15575 // Try to create an in-lane repeating shuffle mask and then shuffle the
15576 // results into the target lanes.
15577 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15578 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
15579 return V;
15581 // If the shuffle patterns aren't repeated but it is a single input, directly
15582 // generate a cross-lane VPERMD instruction.
15583 if (V2.isUndef()) {
15584 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
15585 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
15588 // Assume that a single SHUFPS is faster than an alternative sequence of
15589 // multiple instructions (even if the CPU has a domain penalty).
15590 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
15591 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
15592 SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
15593 SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
15594 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
15595 CastV1, CastV2, DAG);
15596 return DAG.getBitcast(MVT::v8i32, ShufPS);
15599 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15600 // shuffle.
15601 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15602 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
15603 return Result;
15605 // Otherwise fall back on generic blend lowering.
15606 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2, Mask,
15607 Subtarget, DAG);
15610 /// Handle lowering of 16-lane 16-bit integer shuffles.
15612 /// This routine is only called when we have AVX2 and thus a reasonable
15613 /// instruction set for v16i16 shuffling..
15614 static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15615 const APInt &Zeroable, SDValue V1, SDValue V2,
15616 const X86Subtarget &Subtarget,
15617 SelectionDAG &DAG) {
15618 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
15619 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
15620 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
15621 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
15623 // Whenever we can lower this as a zext, that instruction is strictly faster
15624 // than any alternative. It also allows us to fold memory operands into the
15625 // shuffle in many cases.
15626 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
15627 DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
15628 return ZExt;
15630 // Check for being able to broadcast a single element.
15631 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
15632 Subtarget, DAG))
15633 return Broadcast;
15635 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
15636 Zeroable, Subtarget, DAG))
15637 return Blend;
15639 // Use dedicated unpack instructions for masks that match their pattern.
15640 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
15641 return V;
15643 // Use dedicated pack instructions for masks that match their pattern.
15644 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
15645 Subtarget))
15646 return V;
15648 // Try to use shift instructions.
15649 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
15650 Zeroable, Subtarget, DAG))
15651 return Shift;
15653 // Try to use byte rotation instructions.
15654 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
15655 Subtarget, DAG))
15656 return Rotate;
15658 // Try to create an in-lane repeating shuffle mask and then shuffle the
15659 // results into the target lanes.
15660 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15661 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
15662 return V;
15664 if (V2.isUndef()) {
15665 // There are no generalized cross-lane shuffle operations available on i16
15666 // element types.
15667 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
15668 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
15669 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
15670 return V;
15672 return lowerShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2, Mask,
15673 DAG, Subtarget);
15676 SmallVector<int, 8> RepeatedMask;
15677 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
15678 // As this is a single-input shuffle, the repeated mask should be
15679 // a strictly valid v8i16 mask that we can pass through to the v8i16
15680 // lowering to handle even the v16 case.
15681 return lowerV8I16GeneralSingleInputShuffle(
15682 DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
15686 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
15687 Zeroable, Subtarget, DAG))
15688 return PSHUFB;
15690 // AVX512BWVL can lower to VPERMW.
15691 if (Subtarget.hasBWI() && Subtarget.hasVLX())
15692 return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, DAG);
15694 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15695 // shuffle.
15696 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15697 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
15698 return Result;
15700 // Try to permute the lanes and then use a per-lane permute.
15701 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
15702 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
15703 return V;
15705 // Otherwise fall back on generic lowering.
15706 return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
15707 Subtarget, DAG);
15710 /// Handle lowering of 32-lane 8-bit integer shuffles.
15712 /// This routine is only called when we have AVX2 and thus a reasonable
15713 /// instruction set for v32i8 shuffling..
15714 static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15715 const APInt &Zeroable, SDValue V1, SDValue V2,
15716 const X86Subtarget &Subtarget,
15717 SelectionDAG &DAG) {
15718 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
15719 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
15720 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
15721 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
15723 // Whenever we can lower this as a zext, that instruction is strictly faster
15724 // than any alternative. It also allows us to fold memory operands into the
15725 // shuffle in many cases.
15726 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
15727 Zeroable, Subtarget, DAG))
15728 return ZExt;
15730 // Check for being able to broadcast a single element.
15731 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
15732 Subtarget, DAG))
15733 return Broadcast;
15735 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
15736 Zeroable, Subtarget, DAG))
15737 return Blend;
15739 // Use dedicated unpack instructions for masks that match their pattern.
15740 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
15741 return V;
15743 // Use dedicated pack instructions for masks that match their pattern.
15744 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
15745 Subtarget))
15746 return V;
15748 // Try to use shift instructions.
15749 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
15750 Zeroable, Subtarget, DAG))
15751 return Shift;
15753 // Try to use byte rotation instructions.
15754 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
15755 Subtarget, DAG))
15756 return Rotate;
15758 // Try to create an in-lane repeating shuffle mask and then shuffle the
15759 // results into the target lanes.
15760 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15761 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
15762 return V;
15764 // There are no generalized cross-lane shuffle operations available on i8
15765 // element types.
15766 if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
15767 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
15768 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
15769 return V;
15771 return lowerShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2, Mask, DAG,
15772 Subtarget);
15775 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
15776 Zeroable, Subtarget, DAG))
15777 return PSHUFB;
15779 // AVX512VBMIVL can lower to VPERMB.
15780 if (Subtarget.hasVBMI() && Subtarget.hasVLX())
15781 return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, DAG);
15783 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15784 // shuffle.
15785 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15786 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
15787 return Result;
15789 // Try to permute the lanes and then use a per-lane permute.
15790 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
15791 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
15792 return V;
15794 // Otherwise fall back on generic lowering.
15795 return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
15796 Subtarget, DAG);
15799 /// High-level routine to lower various 256-bit x86 vector shuffles.
15801 /// This routine either breaks down the specific type of a 256-bit x86 vector
15802 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
15803 /// together based on the available instructions.
15804 static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
15805 SDValue V1, SDValue V2, const APInt &Zeroable,
15806 const X86Subtarget &Subtarget,
15807 SelectionDAG &DAG) {
15808 // If we have a single input to the zero element, insert that into V1 if we
15809 // can do so cheaply.
15810 int NumElts = VT.getVectorNumElements();
15811 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
15813 if (NumV2Elements == 1 && Mask[0] >= NumElts)
15814 if (SDValue Insertion = lowerShuffleAsElementInsertion(
15815 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
15816 return Insertion;
15818 // Handle special cases where the lower or upper half is UNDEF.
15819 if (SDValue V =
15820 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
15821 return V;
15823 // There is a really nice hard cut-over between AVX1 and AVX2 that means we
15824 // can check for those subtargets here and avoid much of the subtarget
15825 // querying in the per-vector-type lowering routines. With AVX1 we have
15826 // essentially *zero* ability to manipulate a 256-bit vector with integer
15827 // types. Since we'll use floating point types there eventually, just
15828 // immediately cast everything to a float and operate entirely in that domain.
15829 if (VT.isInteger() && !Subtarget.hasAVX2()) {
15830 int ElementBits = VT.getScalarSizeInBits();
15831 if (ElementBits < 32) {
15832 // No floating point type available, if we can't use the bit operations
15833 // for masking/blending then decompose into 128-bit vectors.
15834 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
15835 Subtarget, DAG))
15836 return V;
15837 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
15838 return V;
15839 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
15842 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
15843 VT.getVectorNumElements());
15844 V1 = DAG.getBitcast(FpVT, V1);
15845 V2 = DAG.getBitcast(FpVT, V2);
15846 return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
15849 switch (VT.SimpleTy) {
15850 case MVT::v4f64:
15851 return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15852 case MVT::v4i64:
15853 return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15854 case MVT::v8f32:
15855 return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15856 case MVT::v8i32:
15857 return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15858 case MVT::v16i16:
15859 return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15860 case MVT::v32i8:
15861 return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15863 default:
15864 llvm_unreachable("Not a valid 256-bit x86 vector type!");
15868 /// Try to lower a vector shuffle as a 128-bit shuffles.
15869 static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
15870 const APInt &Zeroable, SDValue V1, SDValue V2,
15871 const X86Subtarget &Subtarget,
15872 SelectionDAG &DAG) {
15873 assert(VT.getScalarSizeInBits() == 64 &&
15874 "Unexpected element type size for 128bit shuffle.");
15876 // To handle 256 bit vector requires VLX and most probably
15877 // function lowerV2X128VectorShuffle() is better solution.
15878 assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
15880 // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
15881 SmallVector<int, 4> WidenedMask;
15882 if (!canWidenShuffleElements(Mask, WidenedMask))
15883 return SDValue();
15885 // Try to use an insert into a zero vector.
15886 if (WidenedMask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
15887 (WidenedMask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
15888 unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
15889 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
15890 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
15891 DAG.getIntPtrConstant(0, DL));
15892 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
15893 getZeroVector(VT, Subtarget, DAG, DL), LoV,
15894 DAG.getIntPtrConstant(0, DL));
15897 // Check for patterns which can be matched with a single insert of a 256-bit
15898 // subvector.
15899 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask,
15900 {0, 1, 2, 3, 0, 1, 2, 3});
15901 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask,
15902 {0, 1, 2, 3, 8, 9, 10, 11})) {
15903 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
15904 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
15905 OnlyUsesV1 ? V1 : V2,
15906 DAG.getIntPtrConstant(0, DL));
15907 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
15908 DAG.getIntPtrConstant(4, DL));
15911 assert(WidenedMask.size() == 4);
15913 // See if this is an insertion of the lower 128-bits of V2 into V1.
15914 bool IsInsert = true;
15915 int V2Index = -1;
15916 for (int i = 0; i < 4; ++i) {
15917 assert(WidenedMask[i] >= -1);
15918 if (WidenedMask[i] < 0)
15919 continue;
15921 // Make sure all V1 subvectors are in place.
15922 if (WidenedMask[i] < 4) {
15923 if (WidenedMask[i] != i) {
15924 IsInsert = false;
15925 break;
15927 } else {
15928 // Make sure we only have a single V2 index and its the lowest 128-bits.
15929 if (V2Index >= 0 || WidenedMask[i] != 4) {
15930 IsInsert = false;
15931 break;
15933 V2Index = i;
15936 if (IsInsert && V2Index >= 0) {
15937 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
15938 SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
15939 DAG.getIntPtrConstant(0, DL));
15940 return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
15943 // Try to lower to vshuf64x2/vshuf32x4.
15944 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
15945 unsigned PermMask = 0;
15946 // Insure elements came from the same Op.
15947 for (int i = 0; i < 4; ++i) {
15948 assert(WidenedMask[i] >= -1);
15949 if (WidenedMask[i] < 0)
15950 continue;
15952 SDValue Op = WidenedMask[i] >= 4 ? V2 : V1;
15953 unsigned OpIndex = i / 2;
15954 if (Ops[OpIndex].isUndef())
15955 Ops[OpIndex] = Op;
15956 else if (Ops[OpIndex] != Op)
15957 return SDValue();
15959 // Convert the 128-bit shuffle mask selection values into 128-bit selection
15960 // bits defined by a vshuf64x2 instruction's immediate control byte.
15961 PermMask |= (WidenedMask[i] % 4) << (i * 2);
15964 return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
15965 DAG.getConstant(PermMask, DL, MVT::i8));
15968 /// Handle lowering of 8-lane 64-bit floating point shuffles.
15969 static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15970 const APInt &Zeroable, SDValue V1, SDValue V2,
15971 const X86Subtarget &Subtarget,
15972 SelectionDAG &DAG) {
15973 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
15974 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
15975 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
15977 if (V2.isUndef()) {
15978 // Use low duplicate instructions for masks that match their pattern.
15979 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
15980 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
15982 if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
15983 // Non-half-crossing single input shuffles can be lowered with an
15984 // interleaved permutation.
15985 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
15986 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
15987 ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
15988 ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
15989 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
15990 DAG.getConstant(VPERMILPMask, DL, MVT::i8));
15993 SmallVector<int, 4> RepeatedMask;
15994 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
15995 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
15996 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
15999 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
16000 V2, Subtarget, DAG))
16001 return Shuf128;
16003 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
16004 return Unpck;
16006 // Check if the blend happens to exactly fit that of SHUFPD.
16007 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, Mask, V1, V2, DAG))
16008 return Op;
16010 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
16011 DAG, Subtarget))
16012 return V;
16014 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
16015 Zeroable, Subtarget, DAG))
16016 return Blend;
16018 return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
16021 /// Handle lowering of 16-lane 32-bit floating point shuffles.
16022 static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16023 const APInt &Zeroable, SDValue V1, SDValue V2,
16024 const X86Subtarget &Subtarget,
16025 SelectionDAG &DAG) {
16026 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16027 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16028 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16030 // If the shuffle mask is repeated in each 128-bit lane, we have many more
16031 // options to efficiently lower the shuffle.
16032 SmallVector<int, 4> RepeatedMask;
16033 if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
16034 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16036 // Use even/odd duplicate instructions for masks that match their pattern.
16037 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
16038 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
16039 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
16040 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
16042 if (V2.isUndef())
16043 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
16044 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16046 // Use dedicated unpack instructions for masks that match their pattern.
16047 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
16048 return V;
16050 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
16051 Zeroable, Subtarget, DAG))
16052 return Blend;
16054 // Otherwise, fall back to a SHUFPS sequence.
16055 return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
16058 // If we have a single input shuffle with different shuffle patterns in the
16059 // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
16060 if (V2.isUndef() &&
16061 !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
16062 SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
16063 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
16066 // If we have AVX512F support, we can use VEXPAND.
16067 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
16068 V1, V2, DAG, Subtarget))
16069 return V;
16071 return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
16074 /// Handle lowering of 8-lane 64-bit integer shuffles.
16075 static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16076 const APInt &Zeroable, SDValue V1, SDValue V2,
16077 const X86Subtarget &Subtarget,
16078 SelectionDAG &DAG) {
16079 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16080 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16081 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16083 if (V2.isUndef()) {
16084 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
16085 // can use lower latency instructions that will operate on all four
16086 // 128-bit lanes.
16087 SmallVector<int, 2> Repeated128Mask;
16088 if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
16089 SmallVector<int, 4> PSHUFDMask;
16090 scaleShuffleMask<int>(2, Repeated128Mask, PSHUFDMask);
16091 return DAG.getBitcast(
16092 MVT::v8i64,
16093 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
16094 DAG.getBitcast(MVT::v16i32, V1),
16095 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16098 SmallVector<int, 4> Repeated256Mask;
16099 if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
16100 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
16101 getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
16104 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
16105 V2, Subtarget, DAG))
16106 return Shuf128;
16108 // Try to use shift instructions.
16109 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
16110 Zeroable, Subtarget, DAG))
16111 return Shift;
16113 // Try to use VALIGN.
16114 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i64, V1, V2, Mask,
16115 Subtarget, DAG))
16116 return Rotate;
16118 // Try to use PALIGNR.
16119 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
16120 Subtarget, DAG))
16121 return Rotate;
16123 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
16124 return Unpck;
16125 // If we have AVX512F support, we can use VEXPAND.
16126 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
16127 DAG, Subtarget))
16128 return V;
16130 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
16131 Zeroable, Subtarget, DAG))
16132 return Blend;
16134 return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
16137 /// Handle lowering of 16-lane 32-bit integer shuffles.
16138 static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16139 const APInt &Zeroable, SDValue V1, SDValue V2,
16140 const X86Subtarget &Subtarget,
16141 SelectionDAG &DAG) {
16142 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16143 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16144 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16146 // Whenever we can lower this as a zext, that instruction is strictly faster
16147 // than any alternative. It also allows us to fold memory operands into the
16148 // shuffle in many cases.
16149 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16150 DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
16151 return ZExt;
16153 // If the shuffle mask is repeated in each 128-bit lane we can use more
16154 // efficient instructions that mirror the shuffles across the four 128-bit
16155 // lanes.
16156 SmallVector<int, 4> RepeatedMask;
16157 bool Is128BitLaneRepeatedShuffle =
16158 is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
16159 if (Is128BitLaneRepeatedShuffle) {
16160 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16161 if (V2.isUndef())
16162 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
16163 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16165 // Use dedicated unpack instructions for masks that match their pattern.
16166 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
16167 return V;
16170 // Try to use shift instructions.
16171 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
16172 Zeroable, Subtarget, DAG))
16173 return Shift;
16175 // Try to use VALIGN.
16176 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v16i32, V1, V2, Mask,
16177 Subtarget, DAG))
16178 return Rotate;
16180 // Try to use byte rotation instructions.
16181 if (Subtarget.hasBWI())
16182 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
16183 Subtarget, DAG))
16184 return Rotate;
16186 // Assume that a single SHUFPS is faster than using a permv shuffle.
16187 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16188 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16189 SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
16190 SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
16191 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
16192 CastV1, CastV2, DAG);
16193 return DAG.getBitcast(MVT::v16i32, ShufPS);
16195 // If we have AVX512F support, we can use VEXPAND.
16196 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
16197 DAG, Subtarget))
16198 return V;
16200 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
16201 Zeroable, Subtarget, DAG))
16202 return Blend;
16203 return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
16206 /// Handle lowering of 32-lane 16-bit integer shuffles.
16207 static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16208 const APInt &Zeroable, SDValue V1, SDValue V2,
16209 const X86Subtarget &Subtarget,
16210 SelectionDAG &DAG) {
16211 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16212 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16213 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16214 assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
16216 // Whenever we can lower this as a zext, that instruction is strictly faster
16217 // than any alternative. It also allows us to fold memory operands into the
16218 // shuffle in many cases.
16219 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16220 DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16221 return ZExt;
16223 // Use dedicated unpack instructions for masks that match their pattern.
16224 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
16225 return V;
16227 // Try to use shift instructions.
16228 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
16229 Zeroable, Subtarget, DAG))
16230 return Shift;
16232 // Try to use byte rotation instructions.
16233 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
16234 Subtarget, DAG))
16235 return Rotate;
16237 if (V2.isUndef()) {
16238 SmallVector<int, 8> RepeatedMask;
16239 if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
16240 // As this is a single-input shuffle, the repeated mask should be
16241 // a strictly valid v8i16 mask that we can pass through to the v8i16
16242 // lowering to handle even the v32 case.
16243 return lowerV8I16GeneralSingleInputShuffle(
16244 DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG);
16248 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
16249 Zeroable, Subtarget, DAG))
16250 return Blend;
16252 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
16253 Zeroable, Subtarget, DAG))
16254 return PSHUFB;
16256 return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
16259 /// Handle lowering of 64-lane 8-bit integer shuffles.
16260 static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16261 const APInt &Zeroable, SDValue V1, SDValue V2,
16262 const X86Subtarget &Subtarget,
16263 SelectionDAG &DAG) {
16264 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16265 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16266 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
16267 assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
16269 // Whenever we can lower this as a zext, that instruction is strictly faster
16270 // than any alternative. It also allows us to fold memory operands into the
16271 // shuffle in many cases.
16272 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16273 DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
16274 return ZExt;
16276 // Use dedicated unpack instructions for masks that match their pattern.
16277 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
16278 return V;
16280 // Use dedicated pack instructions for masks that match their pattern.
16281 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
16282 Subtarget))
16283 return V;
16285 // Try to use shift instructions.
16286 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
16287 Zeroable, Subtarget, DAG))
16288 return Shift;
16290 // Try to use byte rotation instructions.
16291 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
16292 Subtarget, DAG))
16293 return Rotate;
16295 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
16296 Zeroable, Subtarget, DAG))
16297 return PSHUFB;
16299 // VBMI can use VPERMV/VPERMV3 byte shuffles.
16300 if (Subtarget.hasVBMI())
16301 return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, DAG);
16303 // Try to create an in-lane repeating shuffle mask and then shuffle the
16304 // results into the target lanes.
16305 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16306 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
16307 return V;
16309 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
16310 Zeroable, Subtarget, DAG))
16311 return Blend;
16313 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16314 // shuffle.
16315 if (!V2.isUndef())
16316 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16317 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
16318 return Result;
16320 // FIXME: Implement direct support for this type!
16321 return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
16324 /// High-level routine to lower various 512-bit x86 vector shuffles.
16326 /// This routine either breaks down the specific type of a 512-bit x86 vector
16327 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
16328 /// together based on the available instructions.
16329 static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
16330 MVT VT, SDValue V1, SDValue V2,
16331 const APInt &Zeroable,
16332 const X86Subtarget &Subtarget,
16333 SelectionDAG &DAG) {
16334 assert(Subtarget.hasAVX512() &&
16335 "Cannot lower 512-bit vectors w/ basic ISA!");
16337 // If we have a single input to the zero element, insert that into V1 if we
16338 // can do so cheaply.
16339 int NumElts = Mask.size();
16340 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16342 if (NumV2Elements == 1 && Mask[0] >= NumElts)
16343 if (SDValue Insertion = lowerShuffleAsElementInsertion(
16344 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16345 return Insertion;
16347 // Handle special cases where the lower or upper half is UNDEF.
16348 if (SDValue V =
16349 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16350 return V;
16352 // Check for being able to broadcast a single element.
16353 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
16354 Subtarget, DAG))
16355 return Broadcast;
16357 // Dispatch to each element type for lowering. If we don't have support for
16358 // specific element type shuffles at 512 bits, immediately split them and
16359 // lower them. Each lowering routine of a given type is allowed to assume that
16360 // the requisite ISA extensions for that element type are available.
16361 switch (VT.SimpleTy) {
16362 case MVT::v8f64:
16363 return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16364 case MVT::v16f32:
16365 return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16366 case MVT::v8i64:
16367 return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16368 case MVT::v16i32:
16369 return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16370 case MVT::v32i16:
16371 return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16372 case MVT::v64i8:
16373 return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16375 default:
16376 llvm_unreachable("Not a valid 512-bit x86 vector type!");
16380 // Determine if this shuffle can be implemented with a KSHIFT instruction.
16381 // Returns the shift amount if possible or -1 if not. This is a simplified
16382 // version of matchShuffleAsShift.
16383 static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
16384 int MaskOffset, const APInt &Zeroable) {
16385 int Size = Mask.size();
16387 auto CheckZeros = [&](int Shift, bool Left) {
16388 for (int j = 0; j < Shift; ++j)
16389 if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
16390 return false;
16392 return true;
16395 auto MatchShift = [&](int Shift, bool Left) {
16396 unsigned Pos = Left ? Shift : 0;
16397 unsigned Low = Left ? 0 : Shift;
16398 unsigned Len = Size - Shift;
16399 return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
16402 for (int Shift = 1; Shift != Size; ++Shift)
16403 for (bool Left : {true, false})
16404 if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
16405 Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
16406 return Shift;
16409 return -1;
16413 // Lower vXi1 vector shuffles.
16414 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
16415 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
16416 // vector, shuffle and then truncate it back.
16417 static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
16418 MVT VT, SDValue V1, SDValue V2,
16419 const APInt &Zeroable,
16420 const X86Subtarget &Subtarget,
16421 SelectionDAG &DAG) {
16422 assert(Subtarget.hasAVX512() &&
16423 "Cannot lower 512-bit vectors w/o basic ISA!");
16425 unsigned NumElts = Mask.size();
16427 // Try to recognize shuffles that are just padding a subvector with zeros.
16428 unsigned SubvecElts = 0;
16429 for (int i = 0; i != (int)NumElts; ++i) {
16430 if (Mask[i] >= 0 && Mask[i] != i)
16431 break;
16433 ++SubvecElts;
16435 assert(SubvecElts != NumElts && "Identity shuffle?");
16437 // Clip to a power 2.
16438 SubvecElts = PowerOf2Floor(SubvecElts);
16440 // Make sure the number of zeroable bits in the top at least covers the bits
16441 // not covered by the subvector.
16442 if (Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
16443 MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
16444 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
16445 V1, DAG.getIntPtrConstant(0, DL));
16446 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
16447 getZeroVector(VT, Subtarget, DAG, DL),
16448 Extract, DAG.getIntPtrConstant(0, DL));
16451 // Try to match KSHIFTs.
16452 // TODO: Support narrower than legal shifts by widening and extracting.
16453 if (NumElts >= 16 || (Subtarget.hasDQI() && NumElts == 8)) {
16454 unsigned Offset = 0;
16455 for (SDValue V : { V1, V2 }) {
16456 unsigned Opcode;
16457 int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
16458 if (ShiftAmt >= 0)
16459 return DAG.getNode(Opcode, DL, VT, V,
16460 DAG.getConstant(ShiftAmt, DL, MVT::i8));
16461 Offset += NumElts; // Increment for next iteration.
16466 MVT ExtVT;
16467 switch (VT.SimpleTy) {
16468 default:
16469 llvm_unreachable("Expected a vector of i1 elements");
16470 case MVT::v2i1:
16471 ExtVT = MVT::v2i64;
16472 break;
16473 case MVT::v4i1:
16474 ExtVT = MVT::v4i32;
16475 break;
16476 case MVT::v8i1:
16477 // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
16478 // shuffle.
16479 ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
16480 break;
16481 case MVT::v16i1:
16482 // Take 512-bit type, unless we are avoiding 512-bit types and have the
16483 // 256-bit operation available.
16484 ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
16485 break;
16486 case MVT::v32i1:
16487 // Take 512-bit type, unless we are avoiding 512-bit types and have the
16488 // 256-bit operation available.
16489 assert(Subtarget.hasBWI() && "Expected AVX512BW support");
16490 ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
16491 break;
16492 case MVT::v64i1:
16493 ExtVT = MVT::v64i8;
16494 break;
16497 V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
16498 V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
16500 SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
16501 // i1 was sign extended we can use X86ISD::CVT2MASK.
16502 int NumElems = VT.getVectorNumElements();
16503 if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
16504 (Subtarget.hasDQI() && (NumElems < 32)))
16505 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
16506 Shuffle, ISD::SETGT);
16508 return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
16511 /// Helper function that returns true if the shuffle mask should be
16512 /// commuted to improve canonicalization.
16513 static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
16514 int NumElements = Mask.size();
16516 int NumV1Elements = 0, NumV2Elements = 0;
16517 for (int M : Mask)
16518 if (M < 0)
16519 continue;
16520 else if (M < NumElements)
16521 ++NumV1Elements;
16522 else
16523 ++NumV2Elements;
16525 // Commute the shuffle as needed such that more elements come from V1 than
16526 // V2. This allows us to match the shuffle pattern strictly on how many
16527 // elements come from V1 without handling the symmetric cases.
16528 if (NumV2Elements > NumV1Elements)
16529 return true;
16531 assert(NumV1Elements > 0 && "No V1 indices");
16533 if (NumV2Elements == 0)
16534 return false;
16536 // When the number of V1 and V2 elements are the same, try to minimize the
16537 // number of uses of V2 in the low half of the vector. When that is tied,
16538 // ensure that the sum of indices for V1 is equal to or lower than the sum
16539 // indices for V2. When those are equal, try to ensure that the number of odd
16540 // indices for V1 is lower than the number of odd indices for V2.
16541 if (NumV1Elements == NumV2Elements) {
16542 int LowV1Elements = 0, LowV2Elements = 0;
16543 for (int M : Mask.slice(0, NumElements / 2))
16544 if (M >= NumElements)
16545 ++LowV2Elements;
16546 else if (M >= 0)
16547 ++LowV1Elements;
16548 if (LowV2Elements > LowV1Elements)
16549 return true;
16550 if (LowV2Elements == LowV1Elements) {
16551 int SumV1Indices = 0, SumV2Indices = 0;
16552 for (int i = 0, Size = Mask.size(); i < Size; ++i)
16553 if (Mask[i] >= NumElements)
16554 SumV2Indices += i;
16555 else if (Mask[i] >= 0)
16556 SumV1Indices += i;
16557 if (SumV2Indices < SumV1Indices)
16558 return true;
16559 if (SumV2Indices == SumV1Indices) {
16560 int NumV1OddIndices = 0, NumV2OddIndices = 0;
16561 for (int i = 0, Size = Mask.size(); i < Size; ++i)
16562 if (Mask[i] >= NumElements)
16563 NumV2OddIndices += i % 2;
16564 else if (Mask[i] >= 0)
16565 NumV1OddIndices += i % 2;
16566 if (NumV2OddIndices < NumV1OddIndices)
16567 return true;
16572 return false;
16575 /// Top-level lowering for x86 vector shuffles.
16577 /// This handles decomposition, canonicalization, and lowering of all x86
16578 /// vector shuffles. Most of the specific lowering strategies are encapsulated
16579 /// above in helper routines. The canonicalization attempts to widen shuffles
16580 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
16581 /// s.t. only one of the two inputs needs to be tested, etc.
16582 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget,
16583 SelectionDAG &DAG) {
16584 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
16585 ArrayRef<int> Mask = SVOp->getMask();
16586 SDValue V1 = Op.getOperand(0);
16587 SDValue V2 = Op.getOperand(1);
16588 MVT VT = Op.getSimpleValueType();
16589 int NumElements = VT.getVectorNumElements();
16590 SDLoc DL(Op);
16591 bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
16593 assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
16594 "Can't lower MMX shuffles");
16596 bool V1IsUndef = V1.isUndef();
16597 bool V2IsUndef = V2.isUndef();
16598 if (V1IsUndef && V2IsUndef)
16599 return DAG.getUNDEF(VT);
16601 // When we create a shuffle node we put the UNDEF node to second operand,
16602 // but in some cases the first operand may be transformed to UNDEF.
16603 // In this case we should just commute the node.
16604 if (V1IsUndef)
16605 return DAG.getCommutedVectorShuffle(*SVOp);
16607 // Check for non-undef masks pointing at an undef vector and make the masks
16608 // undef as well. This makes it easier to match the shuffle based solely on
16609 // the mask.
16610 if (V2IsUndef &&
16611 any_of(Mask, [NumElements](int M) { return M >= NumElements; })) {
16612 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
16613 for (int &M : NewMask)
16614 if (M >= NumElements)
16615 M = -1;
16616 return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
16619 // Check for illegal shuffle mask element index values.
16620 int MaskUpperLimit = Mask.size() * (V2IsUndef ? 1 : 2); (void)MaskUpperLimit;
16621 assert(llvm::all_of(Mask,
16622 [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
16623 "Out of bounds shuffle index");
16625 // We actually see shuffles that are entirely re-arrangements of a set of
16626 // zero inputs. This mostly happens while decomposing complex shuffles into
16627 // simple ones. Directly lower these as a buildvector of zeros.
16628 APInt Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
16629 if (Zeroable.isAllOnesValue())
16630 return getZeroVector(VT, Subtarget, DAG, DL);
16632 bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
16634 // Create an alternative mask with info about zeroable elements.
16635 // Here we do not set undef elements as zeroable.
16636 SmallVector<int, 64> ZeroableMask(Mask.begin(), Mask.end());
16637 if (V2IsZero) {
16638 assert(!Zeroable.isNullValue() && "V2's non-undef elements are used?!");
16639 for (int i = 0; i != NumElements; ++i)
16640 if (Mask[i] != SM_SentinelUndef && Zeroable[i])
16641 ZeroableMask[i] = SM_SentinelZero;
16644 // Try to collapse shuffles into using a vector type with fewer elements but
16645 // wider element types. We cap this to not form integers or floating point
16646 // elements wider than 64 bits, but it might be interesting to form i128
16647 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
16648 SmallVector<int, 16> WidenedMask;
16649 if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
16650 canWidenShuffleElements(ZeroableMask, WidenedMask)) {
16651 // Shuffle mask widening should not interfere with a broadcast opportunity
16652 // by obfuscating the operands with bitcasts.
16653 // TODO: Avoid lowering directly from this top-level function: make this
16654 // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
16655 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
16656 Subtarget, DAG))
16657 return Broadcast;
16659 MVT NewEltVT = VT.isFloatingPoint()
16660 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
16661 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
16662 int NewNumElts = NumElements / 2;
16663 MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
16664 // Make sure that the new vector type is legal. For example, v2f64 isn't
16665 // legal on SSE1.
16666 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
16667 if (V2IsZero) {
16668 // Modify the new Mask to take all zeros from the all-zero vector.
16669 // Choose indices that are blend-friendly.
16670 bool UsedZeroVector = false;
16671 assert(find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&
16672 "V2's non-undef elements are used?!");
16673 for (int i = 0; i != NewNumElts; ++i)
16674 if (WidenedMask[i] == SM_SentinelZero) {
16675 WidenedMask[i] = i + NewNumElts;
16676 UsedZeroVector = true;
16678 // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
16679 // some elements to be undef.
16680 if (UsedZeroVector)
16681 V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
16683 V1 = DAG.getBitcast(NewVT, V1);
16684 V2 = DAG.getBitcast(NewVT, V2);
16685 return DAG.getBitcast(
16686 VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
16690 // Commute the shuffle if it will improve canonicalization.
16691 if (canonicalizeShuffleMaskWithCommute(Mask))
16692 return DAG.getCommutedVectorShuffle(*SVOp);
16694 if (SDValue V = lowerShuffleWithVPMOV(DL, Mask, VT, V1, V2, DAG, Subtarget))
16695 return V;
16697 // For each vector width, delegate to a specialized lowering routine.
16698 if (VT.is128BitVector())
16699 return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
16701 if (VT.is256BitVector())
16702 return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
16704 if (VT.is512BitVector())
16705 return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
16707 if (Is1BitVector)
16708 return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
16710 llvm_unreachable("Unimplemented!");
16713 /// Try to lower a VSELECT instruction to a vector shuffle.
16714 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
16715 const X86Subtarget &Subtarget,
16716 SelectionDAG &DAG) {
16717 SDValue Cond = Op.getOperand(0);
16718 SDValue LHS = Op.getOperand(1);
16719 SDValue RHS = Op.getOperand(2);
16720 MVT VT = Op.getSimpleValueType();
16722 // Only non-legal VSELECTs reach this lowering, convert those into generic
16723 // shuffles and re-use the shuffle lowering path for blends.
16724 SmallVector<int, 32> Mask;
16725 if (createShuffleMaskFromVSELECT(Mask, Cond))
16726 return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
16728 return SDValue();
16731 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
16732 SDValue Cond = Op.getOperand(0);
16733 SDValue LHS = Op.getOperand(1);
16734 SDValue RHS = Op.getOperand(2);
16736 // A vselect where all conditions and data are constants can be optimized into
16737 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
16738 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
16739 ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
16740 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
16741 return SDValue();
16743 // Try to lower this to a blend-style vector shuffle. This can handle all
16744 // constant condition cases.
16745 if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
16746 return BlendOp;
16748 // If this VSELECT has a vector if i1 as a mask, it will be directly matched
16749 // with patterns on the mask registers on AVX-512.
16750 MVT CondVT = Cond.getSimpleValueType();
16751 unsigned CondEltSize = Cond.getScalarValueSizeInBits();
16752 if (CondEltSize == 1)
16753 return Op;
16755 // Variable blends are only legal from SSE4.1 onward.
16756 if (!Subtarget.hasSSE41())
16757 return SDValue();
16759 SDLoc dl(Op);
16760 MVT VT = Op.getSimpleValueType();
16761 unsigned EltSize = VT.getScalarSizeInBits();
16762 unsigned NumElts = VT.getVectorNumElements();
16764 // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
16765 // into an i1 condition so that we can use the mask-based 512-bit blend
16766 // instructions.
16767 if (VT.getSizeInBits() == 512) {
16768 // Build a mask by testing the condition against zero.
16769 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
16770 SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
16771 DAG.getConstant(0, dl, CondVT),
16772 ISD::SETNE);
16773 // Now return a new VSELECT using the mask.
16774 return DAG.getSelect(dl, VT, Mask, LHS, RHS);
16777 // SEXT/TRUNC cases where the mask doesn't match the destination size.
16778 if (CondEltSize != EltSize) {
16779 // If we don't have a sign splat, rely on the expansion.
16780 if (CondEltSize != DAG.ComputeNumSignBits(Cond))
16781 return SDValue();
16783 MVT NewCondSVT = MVT::getIntegerVT(EltSize);
16784 MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
16785 Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
16786 return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
16789 // Only some types will be legal on some subtargets. If we can emit a legal
16790 // VSELECT-matching blend, return Op, and but if we need to expand, return
16791 // a null value.
16792 switch (VT.SimpleTy) {
16793 default:
16794 // Most of the vector types have blends past SSE4.1.
16795 return Op;
16797 case MVT::v32i8:
16798 // The byte blends for AVX vectors were introduced only in AVX2.
16799 if (Subtarget.hasAVX2())
16800 return Op;
16802 return SDValue();
16804 case MVT::v8i16:
16805 case MVT::v16i16: {
16806 // Bitcast everything to the vXi8 type and use a vXi8 vselect.
16807 MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
16808 Cond = DAG.getBitcast(CastVT, Cond);
16809 LHS = DAG.getBitcast(CastVT, LHS);
16810 RHS = DAG.getBitcast(CastVT, RHS);
16811 SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
16812 return DAG.getBitcast(VT, Select);
16817 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
16818 MVT VT = Op.getSimpleValueType();
16819 SDLoc dl(Op);
16821 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
16822 return SDValue();
16824 if (VT.getSizeInBits() == 8) {
16825 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
16826 Op.getOperand(0), Op.getOperand(1));
16827 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
16830 if (VT == MVT::f32) {
16831 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
16832 // the result back to FR32 register. It's only worth matching if the
16833 // result has a single use which is a store or a bitcast to i32. And in
16834 // the case of a store, it's not worth it if the index is a constant 0,
16835 // because a MOVSSmr can be used instead, which is smaller and faster.
16836 if (!Op.hasOneUse())
16837 return SDValue();
16838 SDNode *User = *Op.getNode()->use_begin();
16839 if ((User->getOpcode() != ISD::STORE ||
16840 isNullConstant(Op.getOperand(1))) &&
16841 (User->getOpcode() != ISD::BITCAST ||
16842 User->getValueType(0) != MVT::i32))
16843 return SDValue();
16844 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
16845 DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
16846 Op.getOperand(1));
16847 return DAG.getBitcast(MVT::f32, Extract);
16850 if (VT == MVT::i32 || VT == MVT::i64) {
16851 // ExtractPS/pextrq works with constant index.
16852 if (isa<ConstantSDNode>(Op.getOperand(1)))
16853 return Op;
16856 return SDValue();
16859 /// Extract one bit from mask vector, like v16i1 or v8i1.
16860 /// AVX-512 feature.
16861 static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
16862 const X86Subtarget &Subtarget) {
16863 SDValue Vec = Op.getOperand(0);
16864 SDLoc dl(Vec);
16865 MVT VecVT = Vec.getSimpleValueType();
16866 SDValue Idx = Op.getOperand(1);
16867 MVT EltVT = Op.getSimpleValueType();
16869 assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
16870 "Unexpected vector type in ExtractBitFromMaskVector");
16872 // variable index can't be handled in mask registers,
16873 // extend vector to VR512/128
16874 if (!isa<ConstantSDNode>(Idx)) {
16875 unsigned NumElts = VecVT.getVectorNumElements();
16876 // Extending v8i1/v16i1 to 512-bit get better performance on KNL
16877 // than extending to 128/256bit.
16878 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
16879 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
16880 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
16881 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
16882 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
16885 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
16886 if (IdxVal == 0) // the operation is legal
16887 return Op;
16889 // Extend to natively supported kshift.
16890 unsigned NumElems = VecVT.getVectorNumElements();
16891 MVT WideVecVT = VecVT;
16892 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
16893 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
16894 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
16895 DAG.getUNDEF(WideVecVT), Vec,
16896 DAG.getIntPtrConstant(0, dl));
16899 // Use kshiftr instruction to move to the lower element.
16900 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
16901 DAG.getConstant(IdxVal, dl, MVT::i8));
16903 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
16904 DAG.getIntPtrConstant(0, dl));
16907 SDValue
16908 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
16909 SelectionDAG &DAG) const {
16910 SDLoc dl(Op);
16911 SDValue Vec = Op.getOperand(0);
16912 MVT VecVT = Vec.getSimpleValueType();
16913 SDValue Idx = Op.getOperand(1);
16915 if (VecVT.getVectorElementType() == MVT::i1)
16916 return ExtractBitFromMaskVector(Op, DAG, Subtarget);
16918 if (!isa<ConstantSDNode>(Idx)) {
16919 // Its more profitable to go through memory (1 cycles throughput)
16920 // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
16921 // IACA tool was used to get performance estimation
16922 // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
16924 // example : extractelement <16 x i8> %a, i32 %i
16926 // Block Throughput: 3.00 Cycles
16927 // Throughput Bottleneck: Port5
16929 // | Num Of | Ports pressure in cycles | |
16930 // | Uops | 0 - DV | 5 | 6 | 7 | |
16931 // ---------------------------------------------
16932 // | 1 | | 1.0 | | | CP | vmovd xmm1, edi
16933 // | 1 | | 1.0 | | | CP | vpshufb xmm0, xmm0, xmm1
16934 // | 2 | 1.0 | 1.0 | | | CP | vpextrb eax, xmm0, 0x0
16935 // Total Num Of Uops: 4
16938 // Block Throughput: 1.00 Cycles
16939 // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
16941 // | | Ports pressure in cycles | |
16942 // |Uops| 1 | 2 - D |3 - D | 4 | 5 | |
16943 // ---------------------------------------------------------
16944 // |2^ | | 0.5 | 0.5 |1.0| |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
16945 // |1 |0.5| | | |0.5| | lea rax, ptr [rsp-0x18]
16946 // |1 | |0.5, 0.5|0.5, 0.5| | |CP| mov al, byte ptr [rdi+rax*1]
16947 // Total Num Of Uops: 4
16949 return SDValue();
16952 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
16954 // If this is a 256-bit vector result, first extract the 128-bit vector and
16955 // then extract the element from the 128-bit vector.
16956 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
16957 // Get the 128-bit vector.
16958 Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
16959 MVT EltVT = VecVT.getVectorElementType();
16961 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
16962 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
16964 // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
16965 // this can be done with a mask.
16966 IdxVal &= ElemsPerChunk - 1;
16967 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
16968 DAG.getConstant(IdxVal, dl, MVT::i32));
16971 assert(VecVT.is128BitVector() && "Unexpected vector length");
16973 MVT VT = Op.getSimpleValueType();
16975 if (VT.getSizeInBits() == 16) {
16976 // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
16977 // we're going to zero extend the register or fold the store (SSE41 only).
16978 if (IdxVal == 0 && !MayFoldIntoZeroExtend(Op) &&
16979 !(Subtarget.hasSSE41() && MayFoldIntoStore(Op)))
16980 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
16981 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
16982 DAG.getBitcast(MVT::v4i32, Vec), Idx));
16984 // Transform it so it match pextrw which produces a 32-bit result.
16985 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
16986 Op.getOperand(0), Op.getOperand(1));
16987 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
16990 if (Subtarget.hasSSE41())
16991 if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
16992 return Res;
16994 // TODO: We only extract a single element from v16i8, we can probably afford
16995 // to be more aggressive here before using the default approach of spilling to
16996 // stack.
16997 if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
16998 // Extract either the lowest i32 or any i16, and extract the sub-byte.
16999 int DWordIdx = IdxVal / 4;
17000 if (DWordIdx == 0) {
17001 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17002 DAG.getBitcast(MVT::v4i32, Vec),
17003 DAG.getIntPtrConstant(DWordIdx, dl));
17004 int ShiftVal = (IdxVal % 4) * 8;
17005 if (ShiftVal != 0)
17006 Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
17007 DAG.getConstant(ShiftVal, dl, MVT::i8));
17008 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17011 int WordIdx = IdxVal / 2;
17012 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
17013 DAG.getBitcast(MVT::v8i16, Vec),
17014 DAG.getIntPtrConstant(WordIdx, dl));
17015 int ShiftVal = (IdxVal % 2) * 8;
17016 if (ShiftVal != 0)
17017 Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
17018 DAG.getConstant(ShiftVal, dl, MVT::i8));
17019 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17022 if (VT.getSizeInBits() == 32) {
17023 if (IdxVal == 0)
17024 return Op;
17026 // SHUFPS the element to the lowest double word, then movss.
17027 int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
17028 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17029 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17030 DAG.getIntPtrConstant(0, dl));
17033 if (VT.getSizeInBits() == 64) {
17034 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
17035 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
17036 // to match extract_elt for f64.
17037 if (IdxVal == 0)
17038 return Op;
17040 // UNPCKHPD the element to the lowest double word, then movsd.
17041 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
17042 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
17043 int Mask[2] = { 1, -1 };
17044 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17045 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17046 DAG.getIntPtrConstant(0, dl));
17049 return SDValue();
17052 /// Insert one bit to mask vector, like v16i1 or v8i1.
17053 /// AVX-512 feature.
17054 static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
17055 const X86Subtarget &Subtarget) {
17056 SDLoc dl(Op);
17057 SDValue Vec = Op.getOperand(0);
17058 SDValue Elt = Op.getOperand(1);
17059 SDValue Idx = Op.getOperand(2);
17060 MVT VecVT = Vec.getSimpleValueType();
17062 if (!isa<ConstantSDNode>(Idx)) {
17063 // Non constant index. Extend source and destination,
17064 // insert element and then truncate the result.
17065 unsigned NumElts = VecVT.getVectorNumElements();
17066 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17067 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17068 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
17069 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
17070 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
17071 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
17074 // Copy into a k-register, extract to v1i1 and insert_subvector.
17075 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
17077 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec,
17078 Op.getOperand(2));
17081 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
17082 SelectionDAG &DAG) const {
17083 MVT VT = Op.getSimpleValueType();
17084 MVT EltVT = VT.getVectorElementType();
17085 unsigned NumElts = VT.getVectorNumElements();
17087 if (EltVT == MVT::i1)
17088 return InsertBitToMaskVector(Op, DAG, Subtarget);
17090 SDLoc dl(Op);
17091 SDValue N0 = Op.getOperand(0);
17092 SDValue N1 = Op.getOperand(1);
17093 SDValue N2 = Op.getOperand(2);
17094 if (!isa<ConstantSDNode>(N2))
17095 return SDValue();
17096 auto *N2C = cast<ConstantSDNode>(N2);
17097 unsigned IdxVal = N2C->getZExtValue();
17099 bool IsZeroElt = X86::isZeroNode(N1);
17100 bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
17102 // If we are inserting a element, see if we can do this more efficiently with
17103 // a blend shuffle with a rematerializable vector than a costly integer
17104 // insertion.
17105 if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() &&
17106 16 <= EltVT.getSizeInBits()) {
17107 SmallVector<int, 8> BlendMask;
17108 for (unsigned i = 0; i != NumElts; ++i)
17109 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
17110 SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
17111 : getOnesVector(VT, DAG, dl);
17112 return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
17115 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
17116 // into that, and then insert the subvector back into the result.
17117 if (VT.is256BitVector() || VT.is512BitVector()) {
17118 // With a 256-bit vector, we can insert into the zero element efficiently
17119 // using a blend if we have AVX or AVX2 and the right data type.
17120 if (VT.is256BitVector() && IdxVal == 0) {
17121 // TODO: It is worthwhile to cast integer to floating point and back
17122 // and incur a domain crossing penalty if that's what we'll end up
17123 // doing anyway after extracting to a 128-bit vector.
17124 if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
17125 (Subtarget.hasAVX2() && EltVT == MVT::i32)) {
17126 SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
17127 N2 = DAG.getIntPtrConstant(1, dl);
17128 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2);
17132 // Get the desired 128-bit vector chunk.
17133 SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
17135 // Insert the element into the desired chunk.
17136 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
17137 assert(isPowerOf2_32(NumEltsIn128));
17138 // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
17139 unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
17141 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
17142 DAG.getConstant(IdxIn128, dl, MVT::i32));
17144 // Insert the changed part back into the bigger vector
17145 return insert128BitVector(N0, V, IdxVal, DAG, dl);
17147 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
17149 // This will be just movd/movq/movss/movsd.
17150 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode()) &&
17151 (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
17152 EltVT == MVT::i64)) {
17153 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
17154 return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
17157 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
17158 // argument. SSE41 required for pinsrb.
17159 if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
17160 unsigned Opc;
17161 if (VT == MVT::v8i16) {
17162 assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
17163 Opc = X86ISD::PINSRW;
17164 } else {
17165 assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
17166 assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
17167 Opc = X86ISD::PINSRB;
17170 if (N1.getValueType() != MVT::i32)
17171 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
17172 if (N2.getValueType() != MVT::i32)
17173 N2 = DAG.getIntPtrConstant(IdxVal, dl);
17174 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
17177 if (Subtarget.hasSSE41()) {
17178 if (EltVT == MVT::f32) {
17179 // Bits [7:6] of the constant are the source select. This will always be
17180 // zero here. The DAG Combiner may combine an extract_elt index into
17181 // these bits. For example (insert (extract, 3), 2) could be matched by
17182 // putting the '3' into bits [7:6] of X86ISD::INSERTPS.
17183 // Bits [5:4] of the constant are the destination select. This is the
17184 // value of the incoming immediate.
17185 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
17186 // combine either bitwise AND or insert of float 0.0 to set these bits.
17188 bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
17189 if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
17190 // If this is an insertion of 32-bits into the low 32-bits of
17191 // a vector, we prefer to generate a blend with immediate rather
17192 // than an insertps. Blends are simpler operations in hardware and so
17193 // will always have equal or better performance than insertps.
17194 // But if optimizing for size and there's a load folding opportunity,
17195 // generate insertps because blendps does not have a 32-bit memory
17196 // operand form.
17197 N2 = DAG.getIntPtrConstant(1, dl);
17198 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
17199 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1, N2);
17201 N2 = DAG.getIntPtrConstant(IdxVal << 4, dl);
17202 // Create this as a scalar to vector..
17203 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
17204 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
17207 // PINSR* works with constant index.
17208 if (EltVT == MVT::i32 || EltVT == MVT::i64)
17209 return Op;
17212 return SDValue();
17215 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
17216 SelectionDAG &DAG) {
17217 SDLoc dl(Op);
17218 MVT OpVT = Op.getSimpleValueType();
17220 // It's always cheaper to replace a xor+movd with xorps and simplifies further
17221 // combines.
17222 if (X86::isZeroNode(Op.getOperand(0)))
17223 return getZeroVector(OpVT, Subtarget, DAG, dl);
17225 // If this is a 256-bit vector result, first insert into a 128-bit
17226 // vector and then insert into the 256-bit vector.
17227 if (!OpVT.is128BitVector()) {
17228 // Insert into a 128-bit vector.
17229 unsigned SizeFactor = OpVT.getSizeInBits() / 128;
17230 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
17231 OpVT.getVectorNumElements() / SizeFactor);
17233 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
17235 // Insert the 128-bit vector.
17236 return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
17238 assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
17239 "Expected an SSE type!");
17241 // Pass through a v4i32 SCALAR_TO_VECTOR as that's what we use in tblgen.
17242 if (OpVT == MVT::v4i32)
17243 return Op;
17245 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
17246 return DAG.getBitcast(
17247 OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
17250 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
17251 // simple superregister reference or explicit instructions to insert
17252 // the upper bits of a vector.
17253 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
17254 SelectionDAG &DAG) {
17255 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
17257 return insert1BitVector(Op, DAG, Subtarget);
17260 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
17261 SelectionDAG &DAG) {
17262 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
17263 "Only vXi1 extract_subvectors need custom lowering");
17265 SDLoc dl(Op);
17266 SDValue Vec = Op.getOperand(0);
17267 SDValue Idx = Op.getOperand(1);
17269 if (!isa<ConstantSDNode>(Idx))
17270 return SDValue();
17272 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17273 if (IdxVal == 0) // the operation is legal
17274 return Op;
17276 MVT VecVT = Vec.getSimpleValueType();
17277 unsigned NumElems = VecVT.getVectorNumElements();
17279 // Extend to natively supported kshift.
17280 MVT WideVecVT = VecVT;
17281 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
17282 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17283 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
17284 DAG.getUNDEF(WideVecVT), Vec,
17285 DAG.getIntPtrConstant(0, dl));
17288 // Shift to the LSB.
17289 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
17290 DAG.getConstant(IdxVal, dl, MVT::i8));
17292 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
17293 DAG.getIntPtrConstant(0, dl));
17296 // Returns the appropriate wrapper opcode for a global reference.
17297 unsigned X86TargetLowering::getGlobalWrapperKind(
17298 const GlobalValue *GV, const unsigned char OpFlags) const {
17299 // References to absolute symbols are never PC-relative.
17300 if (GV && GV->isAbsoluteSymbolRef())
17301 return X86ISD::Wrapper;
17303 CodeModel::Model M = getTargetMachine().getCodeModel();
17304 if (Subtarget.isPICStyleRIPRel() &&
17305 (M == CodeModel::Small || M == CodeModel::Kernel))
17306 return X86ISD::WrapperRIP;
17308 // GOTPCREL references must always use RIP.
17309 if (OpFlags == X86II::MO_GOTPCREL)
17310 return X86ISD::WrapperRIP;
17312 return X86ISD::Wrapper;
17315 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
17316 // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
17317 // one of the above mentioned nodes. It has to be wrapped because otherwise
17318 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
17319 // be used to form addressing mode. These wrapped nodes will be selected
17320 // into MOV32ri.
17321 SDValue
17322 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
17323 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
17325 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
17326 // global base reg.
17327 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
17329 auto PtrVT = getPointerTy(DAG.getDataLayout());
17330 SDValue Result = DAG.getTargetConstantPool(
17331 CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), OpFlag);
17332 SDLoc DL(CP);
17333 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
17334 // With PIC, the address is actually $g + Offset.
17335 if (OpFlag) {
17336 Result =
17337 DAG.getNode(ISD::ADD, DL, PtrVT,
17338 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
17341 return Result;
17344 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
17345 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
17347 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
17348 // global base reg.
17349 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
17351 auto PtrVT = getPointerTy(DAG.getDataLayout());
17352 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
17353 SDLoc DL(JT);
17354 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
17356 // With PIC, the address is actually $g + Offset.
17357 if (OpFlag)
17358 Result =
17359 DAG.getNode(ISD::ADD, DL, PtrVT,
17360 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
17362 return Result;
17365 SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
17366 SelectionDAG &DAG) const {
17367 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
17370 SDValue
17371 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
17372 // Create the TargetBlockAddressAddress node.
17373 unsigned char OpFlags =
17374 Subtarget.classifyBlockAddressReference();
17375 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
17376 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
17377 SDLoc dl(Op);
17378 auto PtrVT = getPointerTy(DAG.getDataLayout());
17379 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
17380 Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
17382 // With PIC, the address is actually $g + Offset.
17383 if (isGlobalRelativeToPICBase(OpFlags)) {
17384 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
17385 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
17388 return Result;
17391 /// Creates target global address or external symbol nodes for calls or
17392 /// other uses.
17393 SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
17394 bool ForCall) const {
17395 // Unpack the global address or external symbol.
17396 const SDLoc &dl = SDLoc(Op);
17397 const GlobalValue *GV = nullptr;
17398 int64_t Offset = 0;
17399 const char *ExternalSym = nullptr;
17400 if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
17401 GV = G->getGlobal();
17402 Offset = G->getOffset();
17403 } else {
17404 const auto *ES = cast<ExternalSymbolSDNode>(Op);
17405 ExternalSym = ES->getSymbol();
17408 // Calculate some flags for address lowering.
17409 const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
17410 unsigned char OpFlags;
17411 if (ForCall)
17412 OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
17413 else
17414 OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
17415 bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
17416 bool NeedsLoad = isGlobalStubReference(OpFlags);
17418 CodeModel::Model M = DAG.getTarget().getCodeModel();
17419 auto PtrVT = getPointerTy(DAG.getDataLayout());
17420 SDValue Result;
17422 if (GV) {
17423 // Create a target global address if this is a global. If possible, fold the
17424 // offset into the global address reference. Otherwise, ADD it on later.
17425 int64_t GlobalOffset = 0;
17426 if (OpFlags == X86II::MO_NO_FLAG &&
17427 X86::isOffsetSuitableForCodeModel(Offset, M)) {
17428 std::swap(GlobalOffset, Offset);
17430 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
17431 } else {
17432 // If this is not a global address, this must be an external symbol.
17433 Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
17436 // If this is a direct call, avoid the wrapper if we don't need to do any
17437 // loads or adds. This allows SDAG ISel to match direct calls.
17438 if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
17439 return Result;
17441 Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
17443 // With PIC, the address is actually $g + Offset.
17444 if (HasPICReg) {
17445 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
17446 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
17449 // For globals that require a load from a stub to get the address, emit the
17450 // load.
17451 if (NeedsLoad)
17452 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
17453 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
17455 // If there was a non-zero offset that we didn't fold, create an explicit
17456 // addition for it.
17457 if (Offset != 0)
17458 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
17459 DAG.getConstant(Offset, dl, PtrVT));
17461 return Result;
17464 SDValue
17465 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
17466 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
17469 static SDValue
17470 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
17471 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
17472 unsigned char OperandFlags, bool LocalDynamic = false) {
17473 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
17474 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17475 SDLoc dl(GA);
17476 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
17477 GA->getValueType(0),
17478 GA->getOffset(),
17479 OperandFlags);
17481 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
17482 : X86ISD::TLSADDR;
17484 if (InFlag) {
17485 SDValue Ops[] = { Chain, TGA, *InFlag };
17486 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
17487 } else {
17488 SDValue Ops[] = { Chain, TGA };
17489 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
17492 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
17493 MFI.setAdjustsStack(true);
17494 MFI.setHasCalls(true);
17496 SDValue Flag = Chain.getValue(1);
17497 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
17500 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
17501 static SDValue
17502 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
17503 const EVT PtrVT) {
17504 SDValue InFlag;
17505 SDLoc dl(GA); // ? function entry point might be better
17506 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
17507 DAG.getNode(X86ISD::GlobalBaseReg,
17508 SDLoc(), PtrVT), InFlag);
17509 InFlag = Chain.getValue(1);
17511 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
17514 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
17515 static SDValue
17516 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
17517 const EVT PtrVT) {
17518 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
17519 X86::RAX, X86II::MO_TLSGD);
17522 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
17523 SelectionDAG &DAG,
17524 const EVT PtrVT,
17525 bool is64Bit) {
17526 SDLoc dl(GA);
17528 // Get the start address of the TLS block for this module.
17529 X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
17530 .getInfo<X86MachineFunctionInfo>();
17531 MFI->incNumLocalDynamicTLSAccesses();
17533 SDValue Base;
17534 if (is64Bit) {
17535 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
17536 X86II::MO_TLSLD, /*LocalDynamic=*/true);
17537 } else {
17538 SDValue InFlag;
17539 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
17540 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
17541 InFlag = Chain.getValue(1);
17542 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
17543 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
17546 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
17547 // of Base.
17549 // Build x@dtpoff.
17550 unsigned char OperandFlags = X86II::MO_DTPOFF;
17551 unsigned WrapperKind = X86ISD::Wrapper;
17552 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
17553 GA->getValueType(0),
17554 GA->getOffset(), OperandFlags);
17555 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
17557 // Add x@dtpoff with the base.
17558 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
17561 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
17562 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
17563 const EVT PtrVT, TLSModel::Model model,
17564 bool is64Bit, bool isPIC) {
17565 SDLoc dl(GA);
17567 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
17568 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
17569 is64Bit ? 257 : 256));
17571 SDValue ThreadPointer =
17572 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
17573 MachinePointerInfo(Ptr));
17575 unsigned char OperandFlags = 0;
17576 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
17577 // initialexec.
17578 unsigned WrapperKind = X86ISD::Wrapper;
17579 if (model == TLSModel::LocalExec) {
17580 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
17581 } else if (model == TLSModel::InitialExec) {
17582 if (is64Bit) {
17583 OperandFlags = X86II::MO_GOTTPOFF;
17584 WrapperKind = X86ISD::WrapperRIP;
17585 } else {
17586 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
17588 } else {
17589 llvm_unreachable("Unexpected model");
17592 // emit "addl x@ntpoff,%eax" (local exec)
17593 // or "addl x@indntpoff,%eax" (initial exec)
17594 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
17595 SDValue TGA =
17596 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
17597 GA->getOffset(), OperandFlags);
17598 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
17600 if (model == TLSModel::InitialExec) {
17601 if (isPIC && !is64Bit) {
17602 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
17603 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
17604 Offset);
17607 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
17608 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
17611 // The address of the thread local variable is the add of the thread
17612 // pointer with the offset of the variable.
17613 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
17616 SDValue
17617 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
17619 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
17621 if (DAG.getTarget().useEmulatedTLS())
17622 return LowerToTLSEmulatedModel(GA, DAG);
17624 const GlobalValue *GV = GA->getGlobal();
17625 auto PtrVT = getPointerTy(DAG.getDataLayout());
17626 bool PositionIndependent = isPositionIndependent();
17628 if (Subtarget.isTargetELF()) {
17629 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
17630 switch (model) {
17631 case TLSModel::GeneralDynamic:
17632 if (Subtarget.is64Bit())
17633 return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
17634 return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
17635 case TLSModel::LocalDynamic:
17636 return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
17637 Subtarget.is64Bit());
17638 case TLSModel::InitialExec:
17639 case TLSModel::LocalExec:
17640 return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
17641 PositionIndependent);
17643 llvm_unreachable("Unknown TLS model.");
17646 if (Subtarget.isTargetDarwin()) {
17647 // Darwin only has one model of TLS. Lower to that.
17648 unsigned char OpFlag = 0;
17649 unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
17650 X86ISD::WrapperRIP : X86ISD::Wrapper;
17652 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
17653 // global base reg.
17654 bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
17655 if (PIC32)
17656 OpFlag = X86II::MO_TLVP_PIC_BASE;
17657 else
17658 OpFlag = X86II::MO_TLVP;
17659 SDLoc DL(Op);
17660 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
17661 GA->getValueType(0),
17662 GA->getOffset(), OpFlag);
17663 SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
17665 // With PIC32, the address is actually $g + Offset.
17666 if (PIC32)
17667 Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
17668 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
17669 Offset);
17671 // Lowering the machine isd will make sure everything is in the right
17672 // location.
17673 SDValue Chain = DAG.getEntryNode();
17674 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17675 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
17676 SDValue Args[] = { Chain, Offset };
17677 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
17678 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
17679 DAG.getIntPtrConstant(0, DL, true),
17680 Chain.getValue(1), DL);
17682 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
17683 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
17684 MFI.setAdjustsStack(true);
17686 // And our return value (tls address) is in the standard call return value
17687 // location.
17688 unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
17689 return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
17692 if (Subtarget.isTargetKnownWindowsMSVC() ||
17693 Subtarget.isTargetWindowsItanium() ||
17694 Subtarget.isTargetWindowsGNU()) {
17695 // Just use the implicit TLS architecture
17696 // Need to generate something similar to:
17697 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
17698 // ; from TEB
17699 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
17700 // mov rcx, qword [rdx+rcx*8]
17701 // mov eax, .tls$:tlsvar
17702 // [rax+rcx] contains the address
17703 // Windows 64bit: gs:0x58
17704 // Windows 32bit: fs:__tls_array
17706 SDLoc dl(GA);
17707 SDValue Chain = DAG.getEntryNode();
17709 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
17710 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
17711 // use its literal value of 0x2C.
17712 Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
17713 ? Type::getInt8PtrTy(*DAG.getContext(),
17714 256)
17715 : Type::getInt32PtrTy(*DAG.getContext(),
17716 257));
17718 SDValue TlsArray = Subtarget.is64Bit()
17719 ? DAG.getIntPtrConstant(0x58, dl)
17720 : (Subtarget.isTargetWindowsGNU()
17721 ? DAG.getIntPtrConstant(0x2C, dl)
17722 : DAG.getExternalSymbol("_tls_array", PtrVT));
17724 SDValue ThreadPointer =
17725 DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
17727 SDValue res;
17728 if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
17729 res = ThreadPointer;
17730 } else {
17731 // Load the _tls_index variable
17732 SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
17733 if (Subtarget.is64Bit())
17734 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
17735 MachinePointerInfo(), MVT::i32);
17736 else
17737 IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
17739 auto &DL = DAG.getDataLayout();
17740 SDValue Scale =
17741 DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
17742 IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
17744 res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
17747 res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
17749 // Get the offset of start of .tls section
17750 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
17751 GA->getValueType(0),
17752 GA->getOffset(), X86II::MO_SECREL);
17753 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
17755 // The address of the thread local variable is the add of the thread
17756 // pointer with the offset of the variable.
17757 return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
17760 llvm_unreachable("TLS not implemented for this target.");
17763 /// Lower SRA_PARTS and friends, which return two i32 values
17764 /// and take a 2 x i32 value to shift plus a shift amount.
17765 /// TODO: Can this be moved to general expansion code?
17766 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
17767 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
17768 MVT VT = Op.getSimpleValueType();
17769 unsigned VTBits = VT.getSizeInBits();
17770 SDLoc dl(Op);
17771 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
17772 SDValue ShOpLo = Op.getOperand(0);
17773 SDValue ShOpHi = Op.getOperand(1);
17774 SDValue ShAmt = Op.getOperand(2);
17775 // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and
17776 // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's optimized away
17777 // during isel.
17778 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
17779 DAG.getConstant(VTBits - 1, dl, MVT::i8));
17780 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
17781 DAG.getConstant(VTBits - 1, dl, MVT::i8))
17782 : DAG.getConstant(0, dl, VT);
17784 SDValue Tmp2, Tmp3;
17785 if (Op.getOpcode() == ISD::SHL_PARTS) {
17786 Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt);
17787 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
17788 } else {
17789 Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt);
17790 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
17793 // If the shift amount is larger or equal than the width of a part we can't
17794 // rely on the results of shld/shrd. Insert a test and select the appropriate
17795 // values for large shift amounts.
17796 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
17797 DAG.getConstant(VTBits, dl, MVT::i8));
17798 SDValue Cond = DAG.getSetCC(dl, MVT::i8, AndNode,
17799 DAG.getConstant(0, dl, MVT::i8), ISD::SETNE);
17801 SDValue Hi, Lo;
17802 if (Op.getOpcode() == ISD::SHL_PARTS) {
17803 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
17804 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
17805 } else {
17806 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
17807 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
17810 return DAG.getMergeValues({ Lo, Hi }, dl);
17813 static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
17814 SelectionDAG &DAG) {
17815 MVT VT = Op.getSimpleValueType();
17816 assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
17817 "Unexpected funnel shift opcode!");
17819 SDLoc DL(Op);
17820 SDValue Op0 = Op.getOperand(0);
17821 SDValue Op1 = Op.getOperand(1);
17822 SDValue Amt = Op.getOperand(2);
17824 bool IsFSHR = Op.getOpcode() == ISD::FSHR;
17826 if (VT.isVector()) {
17827 assert(Subtarget.hasVBMI2() && "Expected VBMI2");
17829 if (IsFSHR)
17830 std::swap(Op0, Op1);
17832 APInt APIntShiftAmt;
17833 if (isConstantSplat(Amt, APIntShiftAmt)) {
17834 uint64_t ShiftAmt = APIntShiftAmt.urem(VT.getScalarSizeInBits());
17835 return DAG.getNode(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT,
17836 Op0, Op1, DAG.getConstant(ShiftAmt, DL, MVT::i8));
17839 return DAG.getNode(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
17840 Op0, Op1, Amt);
17843 assert((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
17844 "Unexpected funnel shift type!");
17846 // Expand slow SHLD/SHRD cases if we are not optimizing for size.
17847 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
17848 if (!OptForSize && Subtarget.isSHLDSlow())
17849 return SDValue();
17851 if (IsFSHR)
17852 std::swap(Op0, Op1);
17854 // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
17855 if (VT == MVT::i16)
17856 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
17857 DAG.getConstant(15, DL, Amt.getValueType()));
17859 unsigned SHDOp = (IsFSHR ? X86ISD::SHRD : X86ISD::SHLD);
17860 return DAG.getNode(SHDOp, DL, VT, Op0, Op1, Amt);
17863 // Try to use a packed vector operation to handle i64 on 32-bit targets when
17864 // AVX512DQ is enabled.
17865 static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
17866 const X86Subtarget &Subtarget) {
17867 assert((Op.getOpcode() == ISD::SINT_TO_FP ||
17868 Op.getOpcode() == ISD::UINT_TO_FP) && "Unexpected opcode!");
17869 SDValue Src = Op.getOperand(0);
17870 MVT SrcVT = Src.getSimpleValueType();
17871 MVT VT = Op.getSimpleValueType();
17873 if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
17874 (VT != MVT::f32 && VT != MVT::f64))
17875 return SDValue();
17877 // Pack the i64 into a vector, do the operation and extract.
17879 // Using 256-bit to ensure result is 128-bits for f32 case.
17880 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
17881 MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
17882 MVT VecVT = MVT::getVectorVT(VT, NumElts);
17884 SDLoc dl(Op);
17885 SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
17886 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
17887 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
17888 DAG.getIntPtrConstant(0, dl));
17891 static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
17892 const X86Subtarget &Subtarget) {
17893 switch (Opcode) {
17894 case ISD::SINT_TO_FP:
17895 // TODO: Handle wider types with AVX/AVX512.
17896 if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
17897 return false;
17898 // CVTDQ2PS or (V)CVTDQ2PD
17899 return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
17901 case ISD::UINT_TO_FP:
17902 // TODO: Handle wider types and i64 elements.
17903 if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
17904 return false;
17905 // VCVTUDQ2PS or VCVTUDQ2PD
17906 return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
17908 default:
17909 return false;
17913 /// Given a scalar cast operation that is extracted from a vector, try to
17914 /// vectorize the cast op followed by extraction. This will avoid an expensive
17915 /// round-trip between XMM and GPR.
17916 static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
17917 const X86Subtarget &Subtarget) {
17918 // TODO: This could be enhanced to handle smaller integer types by peeking
17919 // through an extend.
17920 SDValue Extract = Cast.getOperand(0);
17921 MVT DestVT = Cast.getSimpleValueType();
17922 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
17923 !isa<ConstantSDNode>(Extract.getOperand(1)))
17924 return SDValue();
17926 // See if we have a 128-bit vector cast op for this type of cast.
17927 SDValue VecOp = Extract.getOperand(0);
17928 MVT FromVT = VecOp.getSimpleValueType();
17929 unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
17930 MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
17931 MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
17932 if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
17933 return SDValue();
17935 // If we are extracting from a non-zero element, first shuffle the source
17936 // vector to allow extracting from element zero.
17937 SDLoc DL(Cast);
17938 if (!isNullConstant(Extract.getOperand(1))) {
17939 SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
17940 Mask[0] = Extract.getConstantOperandVal(1);
17941 VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
17943 // If the source vector is wider than 128-bits, extract the low part. Do not
17944 // create an unnecessarily wide vector cast op.
17945 if (FromVT != Vec128VT)
17946 VecOp = extract128BitVector(VecOp, 0, DAG, DL);
17948 // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
17949 // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
17950 SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
17951 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
17952 DAG.getIntPtrConstant(0, DL));
17955 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
17956 SelectionDAG &DAG) const {
17957 SDValue Src = Op.getOperand(0);
17958 MVT SrcVT = Src.getSimpleValueType();
17959 MVT VT = Op.getSimpleValueType();
17960 SDLoc dl(Op);
17962 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
17963 return Extract;
17965 if (SrcVT.isVector()) {
17966 if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
17967 return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
17968 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
17969 DAG.getUNDEF(SrcVT)));
17971 return SDValue();
17974 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
17975 "Unknown SINT_TO_FP to lower!");
17977 // These are really Legal; return the operand so the caller accepts it as
17978 // Legal.
17979 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(VT))
17980 return Op;
17981 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) && Subtarget.is64Bit())
17982 return Op;
17984 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
17985 return V;
17987 SDValue ValueToStore = Op.getOperand(0);
17988 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) &&
17989 !Subtarget.is64Bit())
17990 // Bitcasting to f64 here allows us to do a single 64-bit store from
17991 // an SSE register, avoiding the store forwarding penalty that would come
17992 // with two 32-bit stores.
17993 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
17995 unsigned Size = SrcVT.getSizeInBits()/8;
17996 MachineFunction &MF = DAG.getMachineFunction();
17997 auto PtrVT = getPointerTy(MF.getDataLayout());
17998 int SSFI = MF.getFrameInfo().CreateStackObject(Size, Size, false);
17999 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18000 SDValue Chain = DAG.getStore(
18001 DAG.getEntryNode(), dl, ValueToStore, StackSlot,
18002 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
18003 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
18006 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
18007 SDValue StackSlot,
18008 SelectionDAG &DAG) const {
18009 // Build the FILD
18010 SDLoc DL(Op);
18011 SDVTList Tys;
18012 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
18013 if (useSSE)
18014 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
18015 else
18016 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
18018 unsigned ByteSize = SrcVT.getSizeInBits() / 8;
18020 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
18021 MachineMemOperand *LoadMMO;
18022 if (FI) {
18023 int SSFI = FI->getIndex();
18024 LoadMMO = DAG.getMachineFunction().getMachineMemOperand(
18025 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18026 MachineMemOperand::MOLoad, ByteSize, ByteSize);
18027 } else {
18028 LoadMMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
18029 StackSlot = StackSlot.getOperand(1);
18031 SDValue FILDOps[] = {Chain, StackSlot};
18032 SDValue Result =
18033 DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, DL,
18034 Tys, FILDOps, SrcVT, LoadMMO);
18036 if (useSSE) {
18037 Chain = Result.getValue(1);
18038 SDValue InFlag = Result.getValue(2);
18040 // FIXME: Currently the FST is glued to the FILD_FLAG. This
18041 // shouldn't be necessary except that RFP cannot be live across
18042 // multiple blocks. When stackifier is fixed, they can be uncoupled.
18043 MachineFunction &MF = DAG.getMachineFunction();
18044 unsigned SSFISize = Op.getValueSizeInBits() / 8;
18045 int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false);
18046 auto PtrVT = getPointerTy(MF.getDataLayout());
18047 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18048 Tys = DAG.getVTList(MVT::Other);
18049 SDValue FSTOps[] = {Chain, Result, StackSlot, InFlag};
18050 MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
18051 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18052 MachineMemOperand::MOStore, SSFISize, SSFISize);
18054 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps,
18055 Op.getValueType(), StoreMMO);
18056 Result = DAG.getLoad(
18057 Op.getValueType(), DL, Chain, StackSlot,
18058 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
18061 return Result;
18064 /// 64-bit unsigned integer to double expansion.
18065 static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
18066 const X86Subtarget &Subtarget) {
18067 // This algorithm is not obvious. Here it is what we're trying to output:
18069 movq %rax, %xmm0
18070 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
18071 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
18072 #ifdef __SSE3__
18073 haddpd %xmm0, %xmm0
18074 #else
18075 pshufd $0x4e, %xmm0, %xmm1
18076 addpd %xmm1, %xmm0
18077 #endif
18080 SDLoc dl(Op);
18081 LLVMContext *Context = DAG.getContext();
18083 // Build some magic constants.
18084 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
18085 Constant *C0 = ConstantDataVector::get(*Context, CV0);
18086 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
18087 SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, 16);
18089 SmallVector<Constant*,2> CV1;
18090 CV1.push_back(
18091 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
18092 APInt(64, 0x4330000000000000ULL))));
18093 CV1.push_back(
18094 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
18095 APInt(64, 0x4530000000000000ULL))));
18096 Constant *C1 = ConstantVector::get(CV1);
18097 SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, 16);
18099 // Load the 64-bit value into an XMM register.
18100 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
18101 Op.getOperand(0));
18102 SDValue CLod0 =
18103 DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
18104 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
18105 /* Alignment = */ 16);
18106 SDValue Unpck1 =
18107 getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
18109 SDValue CLod1 =
18110 DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
18111 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
18112 /* Alignment = */ 16);
18113 SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
18114 // TODO: Are there any fast-math-flags to propagate here?
18115 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
18116 SDValue Result;
18118 if (Subtarget.hasSSE3()) {
18119 // FIXME: The 'haddpd' instruction may be slower than 'shuffle + addsd'.
18120 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
18121 } else {
18122 SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
18123 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
18126 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
18127 DAG.getIntPtrConstant(0, dl));
18130 /// 32-bit unsigned integer to float expansion.
18131 static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
18132 const X86Subtarget &Subtarget) {
18133 SDLoc dl(Op);
18134 // FP constant to bias correct the final result.
18135 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
18136 MVT::f64);
18138 // Load the 32-bit value into an XMM register.
18139 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
18140 Op.getOperand(0));
18142 // Zero out the upper parts of the register.
18143 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
18145 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
18146 DAG.getBitcast(MVT::v2f64, Load),
18147 DAG.getIntPtrConstant(0, dl));
18149 // Or the load with the bias.
18150 SDValue Or = DAG.getNode(
18151 ISD::OR, dl, MVT::v2i64,
18152 DAG.getBitcast(MVT::v2i64,
18153 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Load)),
18154 DAG.getBitcast(MVT::v2i64,
18155 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
18156 Or =
18157 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
18158 DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
18160 // Subtract the bias.
18161 // TODO: Are there any fast-math-flags to propagate here?
18162 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
18164 // Handle final rounding.
18165 return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
18168 static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
18169 const X86Subtarget &Subtarget,
18170 const SDLoc &DL) {
18171 if (Op.getSimpleValueType() != MVT::v2f64)
18172 return SDValue();
18174 SDValue N0 = Op.getOperand(0);
18175 assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
18177 // Legalize to v4i32 type.
18178 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
18179 DAG.getUNDEF(MVT::v2i32));
18181 if (Subtarget.hasAVX512())
18182 return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
18184 // Same implementation as VectorLegalizer::ExpandUINT_TO_FLOAT,
18185 // but using v2i32 to v2f64 with X86ISD::CVTSI2P.
18186 SDValue HalfWord = DAG.getConstant(16, DL, MVT::v4i32);
18187 SDValue HalfWordMask = DAG.getConstant(0x0000FFFF, DL, MVT::v4i32);
18189 // Two to the power of half-word-size.
18190 SDValue TWOHW = DAG.getConstantFP(1 << 16, DL, MVT::v2f64);
18192 // Clear upper part of LO, lower HI.
18193 SDValue HI = DAG.getNode(ISD::SRL, DL, MVT::v4i32, N0, HalfWord);
18194 SDValue LO = DAG.getNode(ISD::AND, DL, MVT::v4i32, N0, HalfWordMask);
18196 SDValue fHI = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, HI);
18197 fHI = DAG.getNode(ISD::FMUL, DL, MVT::v2f64, fHI, TWOHW);
18198 SDValue fLO = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, LO);
18200 // Add the two halves.
18201 return DAG.getNode(ISD::FADD, DL, MVT::v2f64, fHI, fLO);
18204 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
18205 const X86Subtarget &Subtarget) {
18206 // The algorithm is the following:
18207 // #ifdef __SSE4_1__
18208 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
18209 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
18210 // (uint4) 0x53000000, 0xaa);
18211 // #else
18212 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
18213 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
18214 // #endif
18215 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
18216 // return (float4) lo + fhi;
18218 // We shouldn't use it when unsafe-fp-math is enabled though: we might later
18219 // reassociate the two FADDs, and if we do that, the algorithm fails
18220 // spectacularly (PR24512).
18221 // FIXME: If we ever have some kind of Machine FMF, this should be marked
18222 // as non-fast and always be enabled. Why isn't SDAG FMF enough? Because
18223 // there's also the MachineCombiner reassociations happening on Machine IR.
18224 if (DAG.getTarget().Options.UnsafeFPMath)
18225 return SDValue();
18227 SDLoc DL(Op);
18228 SDValue V = Op->getOperand(0);
18229 MVT VecIntVT = V.getSimpleValueType();
18230 bool Is128 = VecIntVT == MVT::v4i32;
18231 MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
18232 // If we convert to something else than the supported type, e.g., to v4f64,
18233 // abort early.
18234 if (VecFloatVT != Op->getSimpleValueType(0))
18235 return SDValue();
18237 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
18238 "Unsupported custom type");
18240 // In the #idef/#else code, we have in common:
18241 // - The vector of constants:
18242 // -- 0x4b000000
18243 // -- 0x53000000
18244 // - A shift:
18245 // -- v >> 16
18247 // Create the splat vector for 0x4b000000.
18248 SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
18249 // Create the splat vector for 0x53000000.
18250 SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
18252 // Create the right shift.
18253 SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
18254 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
18256 SDValue Low, High;
18257 if (Subtarget.hasSSE41()) {
18258 MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
18259 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
18260 SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
18261 SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
18262 // Low will be bitcasted right away, so do not bother bitcasting back to its
18263 // original type.
18264 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
18265 VecCstLowBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
18266 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
18267 // (uint4) 0x53000000, 0xaa);
18268 SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
18269 SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
18270 // High will be bitcasted right away, so do not bother bitcasting back to
18271 // its original type.
18272 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
18273 VecCstHighBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
18274 } else {
18275 SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
18276 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
18277 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
18278 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
18280 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
18281 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
18284 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
18285 SDValue VecCstFAdd = DAG.getConstantFP(
18286 APFloat(APFloat::IEEEsingle(), APInt(32, 0xD3000080)), DL, VecFloatVT);
18288 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
18289 SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
18290 // TODO: Are there any fast-math-flags to propagate here?
18291 SDValue FHigh =
18292 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
18293 // return (float4) lo + fhi;
18294 SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
18295 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
18298 static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
18299 const X86Subtarget &Subtarget) {
18300 SDValue N0 = Op.getOperand(0);
18301 MVT SrcVT = N0.getSimpleValueType();
18302 SDLoc dl(Op);
18304 switch (SrcVT.SimpleTy) {
18305 default:
18306 llvm_unreachable("Custom UINT_TO_FP is not supported!");
18307 case MVT::v2i32:
18308 return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
18309 case MVT::v4i32:
18310 case MVT::v8i32:
18311 assert(!Subtarget.hasAVX512());
18312 return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
18316 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
18317 SelectionDAG &DAG) const {
18318 SDValue N0 = Op.getOperand(0);
18319 SDLoc dl(Op);
18320 auto PtrVT = getPointerTy(DAG.getDataLayout());
18322 if (Op.getSimpleValueType().isVector())
18323 return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
18325 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
18326 return Extract;
18328 MVT SrcVT = N0.getSimpleValueType();
18329 MVT DstVT = Op.getSimpleValueType();
18331 if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
18332 (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
18333 // Conversions from unsigned i32 to f32/f64 are legal,
18334 // using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
18335 return Op;
18338 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
18339 return V;
18341 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
18342 return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
18343 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
18344 return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
18345 if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
18346 return SDValue();
18348 // Make a 64-bit buffer, and use it to build an FILD.
18349 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
18350 if (SrcVT == MVT::i32) {
18351 SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
18352 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
18353 StackSlot, MachinePointerInfo());
18354 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
18355 OffsetSlot, MachinePointerInfo());
18356 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
18357 return Fild;
18360 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
18361 SDValue ValueToStore = Op.getOperand(0);
18362 if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit())
18363 // Bitcasting to f64 here allows us to do a single 64-bit store from
18364 // an SSE register, avoiding the store forwarding penalty that would come
18365 // with two 32-bit stores.
18366 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
18367 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, ValueToStore, StackSlot,
18368 MachinePointerInfo());
18369 // For i64 source, we need to add the appropriate power of 2 if the input
18370 // was negative. This is the same as the optimization in
18371 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
18372 // we must be careful to do the computation in x87 extended precision, not
18373 // in SSE. (The generic code can't know it's OK to do this, or how to.)
18374 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
18375 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
18376 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18377 MachineMemOperand::MOLoad, 8, 8);
18379 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
18380 SDValue Ops[] = { Store, StackSlot };
18381 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
18382 MVT::i64, MMO);
18384 APInt FF(32, 0x5F800000ULL);
18386 // Check whether the sign bit is set.
18387 SDValue SignSet = DAG.getSetCC(
18388 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
18389 Op.getOperand(0), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
18391 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
18392 SDValue FudgePtr = DAG.getConstantPool(
18393 ConstantInt::get(*DAG.getContext(), FF.zext(64)), PtrVT);
18395 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
18396 SDValue Zero = DAG.getIntPtrConstant(0, dl);
18397 SDValue Four = DAG.getIntPtrConstant(4, dl);
18398 SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Zero, Four);
18399 FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
18401 // Load the value out, extending it from f32 to f80.
18402 // FIXME: Avoid the extend by constructing the right constant pool?
18403 SDValue Fudge = DAG.getExtLoad(
18404 ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), FudgePtr,
18405 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
18406 /* Alignment = */ 4);
18407 // Extend everything to 80 bits to force it to be done on x87.
18408 // TODO: Are there any fast-math-flags to propagate here?
18409 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
18410 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
18411 DAG.getIntPtrConstant(0, dl));
18414 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
18415 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
18416 // just return an SDValue().
18417 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
18418 // to i16, i32 or i64, and we lower it to a legal sequence and return the
18419 // result.
18420 SDValue
18421 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
18422 bool IsSigned) const {
18423 SDLoc DL(Op);
18425 EVT DstTy = Op.getValueType();
18426 EVT TheVT = Op.getOperand(0).getValueType();
18427 auto PtrVT = getPointerTy(DAG.getDataLayout());
18429 if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
18430 // f16 must be promoted before using the lowering in this routine.
18431 // fp128 does not use this lowering.
18432 return SDValue();
18435 // If using FIST to compute an unsigned i64, we'll need some fixup
18436 // to handle values above the maximum signed i64. A FIST is always
18437 // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
18438 bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
18440 if (!IsSigned && DstTy != MVT::i64) {
18441 // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
18442 // The low 32 bits of the fist result will have the correct uint32 result.
18443 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
18444 DstTy = MVT::i64;
18447 assert(DstTy.getSimpleVT() <= MVT::i64 &&
18448 DstTy.getSimpleVT() >= MVT::i16 &&
18449 "Unknown FP_TO_INT to lower!");
18451 // We lower FP->int64 into FISTP64 followed by a load from a temporary
18452 // stack slot.
18453 MachineFunction &MF = DAG.getMachineFunction();
18454 unsigned MemSize = DstTy.getStoreSize();
18455 int SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
18456 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18458 SDValue Chain = DAG.getEntryNode();
18459 SDValue Value = Op.getOperand(0);
18460 SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
18462 if (UnsignedFixup) {
18464 // Conversion to unsigned i64 is implemented with a select,
18465 // depending on whether the source value fits in the range
18466 // of a signed i64. Let Thresh be the FP equivalent of
18467 // 0x8000000000000000ULL.
18469 // Adjust i32 = (Value < Thresh) ? 0 : 0x80000000;
18470 // FistSrc = (Value < Thresh) ? Value : (Value - Thresh);
18471 // Fist-to-mem64 FistSrc
18472 // Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
18473 // to XOR'ing the high 32 bits with Adjust.
18475 // Being a power of 2, Thresh is exactly representable in all FP formats.
18476 // For X87 we'd like to use the smallest FP type for this constant, but
18477 // for DAG type consistency we have to match the FP operand type.
18479 APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
18480 LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
18481 bool LosesInfo = false;
18482 if (TheVT == MVT::f64)
18483 // The rounding mode is irrelevant as the conversion should be exact.
18484 Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
18485 &LosesInfo);
18486 else if (TheVT == MVT::f80)
18487 Status = Thresh.convert(APFloat::x87DoubleExtended(),
18488 APFloat::rmNearestTiesToEven, &LosesInfo);
18490 assert(Status == APFloat::opOK && !LosesInfo &&
18491 "FP conversion should have been exact");
18493 SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
18495 SDValue Cmp = DAG.getSetCC(DL,
18496 getSetCCResultType(DAG.getDataLayout(),
18497 *DAG.getContext(), TheVT),
18498 Value, ThreshVal, ISD::SETLT);
18499 Adjust = DAG.getSelect(DL, MVT::i64, Cmp,
18500 DAG.getConstant(0, DL, MVT::i64),
18501 DAG.getConstant(APInt::getSignMask(64),
18502 DL, MVT::i64));
18503 SDValue Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal);
18504 Cmp = DAG.getSetCC(DL, getSetCCResultType(DAG.getDataLayout(),
18505 *DAG.getContext(), TheVT),
18506 Value, ThreshVal, ISD::SETLT);
18507 Value = DAG.getSelect(DL, TheVT, Cmp, Value, Sub);
18510 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
18512 // FIXME This causes a redundant load/store if the SSE-class value is already
18513 // in memory, such as if it is on the callstack.
18514 if (isScalarFPTypeInSSEReg(TheVT)) {
18515 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
18516 Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
18517 SDVTList Tys = DAG.getVTList(TheVT, MVT::Other);
18518 SDValue Ops[] = { Chain, StackSlot };
18520 unsigned FLDSize = TheVT.getStoreSize();
18521 assert(FLDSize <= MemSize && "Stack slot not big enough");
18522 MachineMemOperand *MMO = MF.getMachineMemOperand(
18523 MPI, MachineMemOperand::MOLoad, FLDSize, FLDSize);
18524 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
18525 Chain = Value.getValue(1);
18528 // Build the FP_TO_INT*_IN_MEM
18529 MachineMemOperand *MMO = MF.getMachineMemOperand(
18530 MPI, MachineMemOperand::MOStore, MemSize, MemSize);
18531 SDValue Ops[] = { Chain, Value, StackSlot };
18532 SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
18533 DAG.getVTList(MVT::Other),
18534 Ops, DstTy, MMO);
18536 SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
18538 // If we need an unsigned fixup, XOR the result with adjust.
18539 if (UnsignedFixup)
18540 Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
18542 return Res;
18545 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
18546 const X86Subtarget &Subtarget) {
18547 MVT VT = Op.getSimpleValueType();
18548 SDValue In = Op.getOperand(0);
18549 MVT InVT = In.getSimpleValueType();
18550 SDLoc dl(Op);
18551 unsigned Opc = Op.getOpcode();
18553 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
18554 assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
18555 "Unexpected extension opcode");
18556 assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&
18557 "Expected same number of elements");
18558 assert((VT.getVectorElementType() == MVT::i16 ||
18559 VT.getVectorElementType() == MVT::i32 ||
18560 VT.getVectorElementType() == MVT::i64) &&
18561 "Unexpected element type");
18562 assert((InVT.getVectorElementType() == MVT::i8 ||
18563 InVT.getVectorElementType() == MVT::i16 ||
18564 InVT.getVectorElementType() == MVT::i32) &&
18565 "Unexpected element type");
18567 unsigned ExtendInVecOpc = getOpcode_EXTEND_VECTOR_INREG(Opc);
18569 // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
18570 if (InVT == MVT::v8i8) {
18571 if (!ExperimentalVectorWideningLegalization || VT != MVT::v8i64)
18572 return SDValue();
18574 In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
18575 MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
18576 return DAG.getNode(ExtendInVecOpc, dl, VT, In);
18579 if (Subtarget.hasInt256())
18580 return Op;
18582 // Optimize vectors in AVX mode:
18584 // v8i16 -> v8i32
18585 // Use vpmovzwd for 4 lower elements v8i16 -> v4i32.
18586 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
18587 // Concat upper and lower parts.
18589 // v4i32 -> v4i64
18590 // Use vpmovzdq for 4 lower elements v4i32 -> v2i64.
18591 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
18592 // Concat upper and lower parts.
18595 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(),
18596 VT.getVectorNumElements() / 2);
18598 SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
18600 // Short-circuit if we can determine that each 128-bit half is the same value.
18601 // Otherwise, this is difficult to match and optimize.
18602 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
18603 if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
18604 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
18606 SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
18607 SDValue Undef = DAG.getUNDEF(InVT);
18608 bool NeedZero = Opc == ISD::ZERO_EXTEND;
18609 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
18610 OpHi = DAG.getBitcast(HalfVT, OpHi);
18612 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
18615 // Helper to split and extend a v16i1 mask to v16i8 or v16i16.
18616 static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
18617 const SDLoc &dl, SelectionDAG &DAG) {
18618 assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
18619 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
18620 DAG.getIntPtrConstant(0, dl));
18621 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
18622 DAG.getIntPtrConstant(8, dl));
18623 Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
18624 Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
18625 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
18626 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
18629 static SDValue LowerZERO_EXTEND_Mask(SDValue Op,
18630 const X86Subtarget &Subtarget,
18631 SelectionDAG &DAG) {
18632 MVT VT = Op->getSimpleValueType(0);
18633 SDValue In = Op->getOperand(0);
18634 MVT InVT = In.getSimpleValueType();
18635 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
18636 SDLoc DL(Op);
18637 unsigned NumElts = VT.getVectorNumElements();
18639 // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
18640 // avoids a constant pool load.
18641 if (VT.getVectorElementType() != MVT::i8) {
18642 SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
18643 return DAG.getNode(ISD::SRL, DL, VT, Extend,
18644 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
18647 // Extend VT if BWI is not supported.
18648 MVT ExtVT = VT;
18649 if (!Subtarget.hasBWI()) {
18650 // If v16i32 is to be avoided, we'll need to split and concatenate.
18651 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
18652 return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
18654 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
18657 // Widen to 512-bits if VLX is not supported.
18658 MVT WideVT = ExtVT;
18659 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
18660 NumElts *= 512 / ExtVT.getSizeInBits();
18661 InVT = MVT::getVectorVT(MVT::i1, NumElts);
18662 In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
18663 In, DAG.getIntPtrConstant(0, DL));
18664 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
18665 NumElts);
18668 SDValue One = DAG.getConstant(1, DL, WideVT);
18669 SDValue Zero = DAG.getConstant(0, DL, WideVT);
18671 SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
18673 // Truncate if we had to extend above.
18674 if (VT != ExtVT) {
18675 WideVT = MVT::getVectorVT(MVT::i8, NumElts);
18676 SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
18679 // Extract back to 128/256-bit if we widened.
18680 if (WideVT != VT)
18681 SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
18682 DAG.getIntPtrConstant(0, DL));
18684 return SelectedVal;
18687 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
18688 SelectionDAG &DAG) {
18689 SDValue In = Op.getOperand(0);
18690 MVT SVT = In.getSimpleValueType();
18692 if (SVT.getVectorElementType() == MVT::i1)
18693 return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
18695 assert(Subtarget.hasAVX() && "Expected AVX support");
18696 return LowerAVXExtend(Op, DAG, Subtarget);
18699 /// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
18700 /// It makes use of the fact that vectors with enough leading sign/zero bits
18701 /// prevent the PACKSS/PACKUS from saturating the results.
18702 /// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
18703 /// within each 128-bit lane.
18704 static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
18705 const SDLoc &DL, SelectionDAG &DAG,
18706 const X86Subtarget &Subtarget) {
18707 assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
18708 "Unexpected PACK opcode");
18709 assert(DstVT.isVector() && "VT not a vector?");
18711 // Requires SSE2 but AVX512 has fast vector truncate.
18712 if (!Subtarget.hasSSE2())
18713 return SDValue();
18715 EVT SrcVT = In.getValueType();
18717 // No truncation required, we might get here due to recursive calls.
18718 if (SrcVT == DstVT)
18719 return In;
18721 // We only support vector truncation to 64bits or greater from a
18722 // 128bits or greater source.
18723 unsigned DstSizeInBits = DstVT.getSizeInBits();
18724 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
18725 if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
18726 return SDValue();
18728 unsigned NumElems = SrcVT.getVectorNumElements();
18729 if (!isPowerOf2_32(NumElems))
18730 return SDValue();
18732 LLVMContext &Ctx = *DAG.getContext();
18733 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
18734 assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
18736 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
18738 // Pack to the largest type possible:
18739 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
18740 EVT InVT = MVT::i16, OutVT = MVT::i8;
18741 if (SrcVT.getScalarSizeInBits() > 16 &&
18742 (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
18743 InVT = MVT::i32;
18744 OutVT = MVT::i16;
18747 // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
18748 if (SrcVT.is128BitVector()) {
18749 InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
18750 OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
18751 In = DAG.getBitcast(InVT, In);
18752 SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, In);
18753 Res = extractSubVector(Res, 0, DAG, DL, 64);
18754 return DAG.getBitcast(DstVT, Res);
18757 // Extract lower/upper subvectors.
18758 unsigned NumSubElts = NumElems / 2;
18759 SDValue Lo = extractSubVector(In, 0 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
18760 SDValue Hi = extractSubVector(In, 1 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
18762 unsigned SubSizeInBits = SrcSizeInBits / 2;
18763 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
18764 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
18766 // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
18767 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
18768 Lo = DAG.getBitcast(InVT, Lo);
18769 Hi = DAG.getBitcast(InVT, Hi);
18770 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
18771 return DAG.getBitcast(DstVT, Res);
18774 // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
18775 // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
18776 if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
18777 Lo = DAG.getBitcast(InVT, Lo);
18778 Hi = DAG.getBitcast(InVT, Hi);
18779 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
18781 // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
18782 // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
18783 // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
18784 SmallVector<int, 64> Mask;
18785 int Scale = 64 / OutVT.getScalarSizeInBits();
18786 scaleShuffleMask<int>(Scale, ArrayRef<int>({ 0, 2, 1, 3 }), Mask);
18787 Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
18789 if (DstVT.is256BitVector())
18790 return DAG.getBitcast(DstVT, Res);
18792 // If 512bit -> 128bit truncate another stage.
18793 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
18794 Res = DAG.getBitcast(PackedVT, Res);
18795 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
18798 // Recursively pack lower/upper subvectors, concat result and pack again.
18799 assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
18800 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumSubElts);
18801 Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
18802 Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);
18804 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
18805 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
18806 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
18809 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
18810 const X86Subtarget &Subtarget) {
18812 SDLoc DL(Op);
18813 MVT VT = Op.getSimpleValueType();
18814 SDValue In = Op.getOperand(0);
18815 MVT InVT = In.getSimpleValueType();
18817 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
18819 // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
18820 unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
18821 if (InVT.getScalarSizeInBits() <= 16) {
18822 if (Subtarget.hasBWI()) {
18823 // legal, will go to VPMOVB2M, VPMOVW2M
18824 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
18825 // We need to shift to get the lsb into sign position.
18826 // Shift packed bytes not supported natively, bitcast to word
18827 MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
18828 In = DAG.getNode(ISD::SHL, DL, ExtVT,
18829 DAG.getBitcast(ExtVT, In),
18830 DAG.getConstant(ShiftInx, DL, ExtVT));
18831 In = DAG.getBitcast(InVT, In);
18833 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
18834 In, ISD::SETGT);
18836 // Use TESTD/Q, extended vector to packed dword/qword.
18837 assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
18838 "Unexpected vector type.");
18839 unsigned NumElts = InVT.getVectorNumElements();
18840 assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
18841 // We need to change to a wider element type that we have support for.
18842 // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
18843 // For 16 element vectors we extend to v16i32 unless we are explicitly
18844 // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
18845 // we need to split into two 8 element vectors which we can extend to v8i32,
18846 // truncate and concat the results. There's an additional complication if
18847 // the original type is v16i8. In that case we can't split the v16i8 so
18848 // first we pre-extend it to v16i16 which we can split to v8i16, then extend
18849 // to v8i32, truncate that to v8i1 and concat the two halves.
18850 if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
18851 if (InVT == MVT::v16i8) {
18852 // First we need to sign extend up to 256-bits so we can split that.
18853 InVT = MVT::v16i16;
18854 In = DAG.getNode(ISD::SIGN_EXTEND, DL, InVT, In);
18856 SDValue Lo = extract128BitVector(In, 0, DAG, DL);
18857 SDValue Hi = extract128BitVector(In, 8, DAG, DL);
18858 // We're split now, just emit two truncates and a concat. The two
18859 // truncates will trigger legalization to come back to this function.
18860 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
18861 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
18862 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
18864 // We either have 8 elements or we're allowed to use 512-bit vectors.
18865 // If we have VLX, we want to use the narrowest vector that can get the
18866 // job done so we use vXi32.
18867 MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
18868 MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
18869 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
18870 InVT = ExtVT;
18871 ShiftInx = InVT.getScalarSizeInBits() - 1;
18874 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
18875 // We need to shift to get the lsb into sign position.
18876 In = DAG.getNode(ISD::SHL, DL, InVT, In,
18877 DAG.getConstant(ShiftInx, DL, InVT));
18879 // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
18880 if (Subtarget.hasDQI())
18881 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
18882 return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
18885 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
18886 SDLoc DL(Op);
18887 MVT VT = Op.getSimpleValueType();
18888 SDValue In = Op.getOperand(0);
18889 MVT InVT = In.getSimpleValueType();
18890 unsigned InNumEltBits = InVT.getScalarSizeInBits();
18892 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
18893 "Invalid TRUNCATE operation");
18895 // If called by the legalizer just return.
18896 if (!DAG.getTargetLoweringInfo().isTypeLegal(InVT))
18897 return SDValue();
18899 if (VT.getVectorElementType() == MVT::i1)
18900 return LowerTruncateVecI1(Op, DAG, Subtarget);
18902 // vpmovqb/w/d, vpmovdb/w, vpmovwb
18903 if (Subtarget.hasAVX512()) {
18904 // word to byte only under BWI. Otherwise we have to promoted to v16i32
18905 // and then truncate that. But we should only do that if we haven't been
18906 // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
18907 // handled by isel patterns.
18908 if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
18909 Subtarget.canExtendTo512DQ())
18910 return Op;
18913 unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
18914 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
18916 // Truncate with PACKUS if we are truncating a vector with leading zero bits
18917 // that extend all the way to the packed/truncated value.
18918 // Pre-SSE41 we can only use PACKUSWB.
18919 KnownBits Known = DAG.computeKnownBits(In);
18920 if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
18921 if (SDValue V =
18922 truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
18923 return V;
18925 // Truncate with PACKSS if we are truncating a vector with sign-bits that
18926 // extend all the way to the packed/truncated value.
18927 if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
18928 if (SDValue V =
18929 truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
18930 return V;
18932 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
18933 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
18934 if (Subtarget.hasInt256()) {
18935 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
18936 In = DAG.getBitcast(MVT::v8i32, In);
18937 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
18938 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
18939 DAG.getIntPtrConstant(0, DL));
18942 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
18943 DAG.getIntPtrConstant(0, DL));
18944 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
18945 DAG.getIntPtrConstant(2, DL));
18946 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
18947 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
18948 static const int ShufMask[] = {0, 2, 4, 6};
18949 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
18952 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
18953 // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
18954 if (Subtarget.hasInt256()) {
18955 In = DAG.getBitcast(MVT::v32i8, In);
18957 // The PSHUFB mask:
18958 static const int ShufMask1[] = { 0, 1, 4, 5, 8, 9, 12, 13,
18959 -1, -1, -1, -1, -1, -1, -1, -1,
18960 16, 17, 20, 21, 24, 25, 28, 29,
18961 -1, -1, -1, -1, -1, -1, -1, -1 };
18962 In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
18963 In = DAG.getBitcast(MVT::v4i64, In);
18965 static const int ShufMask2[] = {0, 2, -1, -1};
18966 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
18967 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
18968 DAG.getIntPtrConstant(0, DL));
18969 return DAG.getBitcast(VT, In);
18972 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
18973 DAG.getIntPtrConstant(0, DL));
18975 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
18976 DAG.getIntPtrConstant(4, DL));
18978 OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
18979 OpHi = DAG.getBitcast(MVT::v16i8, OpHi);
18981 // The PSHUFB mask:
18982 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
18983 -1, -1, -1, -1, -1, -1, -1, -1};
18985 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, OpLo, ShufMask1);
18986 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, OpHi, ShufMask1);
18988 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
18989 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
18991 // The MOVLHPS Mask:
18992 static const int ShufMask2[] = {0, 1, 4, 5};
18993 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
18994 return DAG.getBitcast(MVT::v8i16, res);
18997 if (VT == MVT::v16i8 && InVT == MVT::v16i16) {
18998 // Use an AND to zero uppper bits for PACKUS.
18999 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT));
19001 SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
19002 DAG.getIntPtrConstant(0, DL));
19003 SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
19004 DAG.getIntPtrConstant(8, DL));
19005 return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi);
19008 // Handle truncation of V256 to V128 using shuffles.
19009 assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
19011 assert(Subtarget.hasAVX() && "256-bit vector without AVX!");
19013 unsigned NumElems = VT.getVectorNumElements();
19014 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
19016 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
19017 // Prepare truncation shuffle mask
19018 for (unsigned i = 0; i != NumElems; ++i)
19019 MaskVec[i] = i * 2;
19020 In = DAG.getBitcast(NVT, In);
19021 SDValue V = DAG.getVectorShuffle(NVT, DL, In, In, MaskVec);
19022 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
19023 DAG.getIntPtrConstant(0, DL));
19026 SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
19027 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
19028 MVT VT = Op.getSimpleValueType();
19029 SDValue Src = Op.getOperand(0);
19030 MVT SrcVT = Src.getSimpleValueType();
19031 SDLoc dl(Op);
19033 if (VT.isVector()) {
19034 if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
19035 MVT ResVT = MVT::v4i32;
19036 MVT TruncVT = MVT::v4i1;
19037 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
19038 if (!IsSigned && !Subtarget.hasVLX()) {
19039 // Widen to 512-bits.
19040 ResVT = MVT::v8i32;
19041 TruncVT = MVT::v8i1;
19042 Opc = ISD::FP_TO_UINT;
19043 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64,
19044 DAG.getUNDEF(MVT::v8f64),
19045 Src, DAG.getIntPtrConstant(0, dl));
19047 SDValue Res = DAG.getNode(Opc, dl, ResVT, Src);
19048 Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
19049 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
19050 DAG.getIntPtrConstant(0, dl));
19053 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
19054 if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
19055 return DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl, VT,
19056 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
19057 DAG.getUNDEF(MVT::v2f32)));
19060 return SDValue();
19063 assert(!VT.isVector());
19065 bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
19067 if (!IsSigned && Subtarget.hasAVX512()) {
19068 // Conversions from f32/f64 should be legal.
19069 if (UseSSEReg)
19070 return Op;
19072 // Use default expansion.
19073 if (VT == MVT::i64)
19074 return SDValue();
19077 // Promote i16 to i32 if we can use a SSE operation.
19078 if (VT == MVT::i16 && UseSSEReg) {
19079 assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
19080 SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
19081 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
19084 // If this is a SINT_TO_FP using SSEReg we're done.
19085 if (UseSSEReg && IsSigned)
19086 return Op;
19088 // Fall back to X87.
19089 if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned))
19090 return V;
19092 llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
19095 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
19096 SDLoc DL(Op);
19097 MVT VT = Op.getSimpleValueType();
19098 SDValue In = Op.getOperand(0);
19099 MVT SVT = In.getSimpleValueType();
19101 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
19103 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
19104 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
19105 In, DAG.getUNDEF(SVT)));
19108 /// Horizontal vector math instructions may be slower than normal math with
19109 /// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
19110 /// implementation, and likely shuffle complexity of the alternate sequence.
19111 static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
19112 const X86Subtarget &Subtarget) {
19113 bool IsOptimizingSize = DAG.getMachineFunction().getFunction().hasOptSize();
19114 bool HasFastHOps = Subtarget.hasFastHorizontalOps();
19115 return !IsSingleSource || IsOptimizingSize || HasFastHOps;
19118 /// Depending on uarch and/or optimizing for size, we might prefer to use a
19119 /// vector operation in place of the typical scalar operation.
19120 static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
19121 const X86Subtarget &Subtarget) {
19122 // If both operands have other uses, this is probably not profitable.
19123 SDValue LHS = Op.getOperand(0);
19124 SDValue RHS = Op.getOperand(1);
19125 if (!LHS.hasOneUse() && !RHS.hasOneUse())
19126 return Op;
19128 // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
19129 bool IsFP = Op.getSimpleValueType().isFloatingPoint();
19130 if (IsFP && !Subtarget.hasSSE3())
19131 return Op;
19132 if (!IsFP && !Subtarget.hasSSSE3())
19133 return Op;
19135 // Extract from a common vector.
19136 if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
19137 RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
19138 LHS.getOperand(0) != RHS.getOperand(0) ||
19139 !isa<ConstantSDNode>(LHS.getOperand(1)) ||
19140 !isa<ConstantSDNode>(RHS.getOperand(1)) ||
19141 !shouldUseHorizontalOp(true, DAG, Subtarget))
19142 return Op;
19144 // Allow commuted 'hadd' ops.
19145 // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
19146 unsigned HOpcode;
19147 switch (Op.getOpcode()) {
19148 case ISD::ADD: HOpcode = X86ISD::HADD; break;
19149 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
19150 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
19151 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
19152 default:
19153 llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
19155 unsigned LExtIndex = LHS.getConstantOperandVal(1);
19156 unsigned RExtIndex = RHS.getConstantOperandVal(1);
19157 if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
19158 (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
19159 std::swap(LExtIndex, RExtIndex);
19161 if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
19162 return Op;
19164 SDValue X = LHS.getOperand(0);
19165 EVT VecVT = X.getValueType();
19166 unsigned BitWidth = VecVT.getSizeInBits();
19167 unsigned NumLanes = BitWidth / 128;
19168 unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
19169 assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
19170 "Not expecting illegal vector widths here");
19172 // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
19173 // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
19174 SDLoc DL(Op);
19175 if (BitWidth == 256 || BitWidth == 512) {
19176 unsigned LaneIdx = LExtIndex / NumEltsPerLane;
19177 X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
19178 LExtIndex %= NumEltsPerLane;
19181 // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
19182 // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
19183 // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
19184 // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
19185 SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
19186 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
19187 DAG.getIntPtrConstant(LExtIndex / 2, DL));
19190 /// Depending on uarch and/or optimizing for size, we might prefer to use a
19191 /// vector operation in place of the typical scalar operation.
19192 static SDValue lowerFaddFsub(SDValue Op, SelectionDAG &DAG,
19193 const X86Subtarget &Subtarget) {
19194 assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
19195 "Only expecting float/double");
19196 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
19199 /// The only differences between FABS and FNEG are the mask and the logic op.
19200 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
19201 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
19202 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
19203 "Wrong opcode for lowering FABS or FNEG.");
19205 bool IsFABS = (Op.getOpcode() == ISD::FABS);
19207 // If this is a FABS and it has an FNEG user, bail out to fold the combination
19208 // into an FNABS. We'll lower the FABS after that if it is still in use.
19209 if (IsFABS)
19210 for (SDNode *User : Op->uses())
19211 if (User->getOpcode() == ISD::FNEG)
19212 return Op;
19214 SDLoc dl(Op);
19215 MVT VT = Op.getSimpleValueType();
19217 bool IsF128 = (VT == MVT::f128);
19218 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
19219 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
19220 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
19221 "Unexpected type in LowerFABSorFNEG");
19223 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
19224 // decide if we should generate a 16-byte constant mask when we only need 4 or
19225 // 8 bytes for the scalar case.
19227 // There are no scalar bitwise logical SSE/AVX instructions, so we
19228 // generate a 16-byte vector constant and logic op even for the scalar case.
19229 // Using a 16-byte mask allows folding the load of the mask with
19230 // the logic op, so it can save (~4 bytes) on code size.
19231 bool IsFakeVector = !VT.isVector() && !IsF128;
19232 MVT LogicVT = VT;
19233 if (IsFakeVector)
19234 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
19236 unsigned EltBits = VT.getScalarSizeInBits();
19237 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
19238 APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
19239 APInt::getSignMask(EltBits);
19240 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
19241 SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
19243 SDValue Op0 = Op.getOperand(0);
19244 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
19245 unsigned LogicOp = IsFABS ? X86ISD::FAND :
19246 IsFNABS ? X86ISD::FOR :
19247 X86ISD::FXOR;
19248 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
19250 if (VT.isVector() || IsF128)
19251 return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
19253 // For the scalar case extend to a 128-bit vector, perform the logic op,
19254 // and extract the scalar result back out.
19255 Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
19256 SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
19257 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
19258 DAG.getIntPtrConstant(0, dl));
19261 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
19262 SDValue Mag = Op.getOperand(0);
19263 SDValue Sign = Op.getOperand(1);
19264 SDLoc dl(Op);
19266 // If the sign operand is smaller, extend it first.
19267 MVT VT = Op.getSimpleValueType();
19268 if (Sign.getSimpleValueType().bitsLT(VT))
19269 Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
19271 // And if it is bigger, shrink it first.
19272 if (Sign.getSimpleValueType().bitsGT(VT))
19273 Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign, DAG.getIntPtrConstant(1, dl));
19275 // At this point the operands and the result should have the same
19276 // type, and that won't be f80 since that is not custom lowered.
19277 bool IsF128 = (VT == MVT::f128);
19278 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
19279 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
19280 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
19281 "Unexpected type in LowerFCOPYSIGN");
19283 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
19285 // Perform all scalar logic operations as 16-byte vectors because there are no
19286 // scalar FP logic instructions in SSE.
19287 // TODO: This isn't necessary. If we used scalar types, we might avoid some
19288 // unnecessary splats, but we might miss load folding opportunities. Should
19289 // this decision be based on OptimizeForSize?
19290 bool IsFakeVector = !VT.isVector() && !IsF128;
19291 MVT LogicVT = VT;
19292 if (IsFakeVector)
19293 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
19295 // The mask constants are automatically splatted for vector types.
19296 unsigned EltSizeInBits = VT.getScalarSizeInBits();
19297 SDValue SignMask = DAG.getConstantFP(
19298 APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
19299 SDValue MagMask = DAG.getConstantFP(
19300 APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
19302 // First, clear all bits but the sign bit from the second operand (sign).
19303 if (IsFakeVector)
19304 Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
19305 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
19307 // Next, clear the sign bit from the first operand (magnitude).
19308 // TODO: If we had general constant folding for FP logic ops, this check
19309 // wouldn't be necessary.
19310 SDValue MagBits;
19311 if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
19312 APFloat APF = Op0CN->getValueAPF();
19313 APF.clearSign();
19314 MagBits = DAG.getConstantFP(APF, dl, LogicVT);
19315 } else {
19316 // If the magnitude operand wasn't a constant, we need to AND out the sign.
19317 if (IsFakeVector)
19318 Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
19319 MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
19322 // OR the magnitude value with the sign bit.
19323 SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
19324 return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
19325 DAG.getIntPtrConstant(0, dl));
19328 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
19329 SDValue N0 = Op.getOperand(0);
19330 SDLoc dl(Op);
19331 MVT VT = Op.getSimpleValueType();
19333 MVT OpVT = N0.getSimpleValueType();
19334 assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
19335 "Unexpected type for FGETSIGN");
19337 // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
19338 MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
19339 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
19340 Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
19341 Res = DAG.getZExtOrTrunc(Res, dl, VT);
19342 Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
19343 return Res;
19346 /// Helper for creating a X86ISD::SETCC node.
19347 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
19348 SelectionDAG &DAG) {
19349 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
19350 DAG.getConstant(Cond, dl, MVT::i8), EFLAGS);
19353 /// Helper for matching OR(EXTRACTELT(X,0),OR(EXTRACTELT(X,1),...))
19354 /// style scalarized (associative) reduction patterns.
19355 static bool matchBitOpReduction(SDValue Op, ISD::NodeType BinOp,
19356 SmallVectorImpl<SDValue> &SrcOps) {
19357 SmallVector<SDValue, 8> Opnds;
19358 DenseMap<SDValue, APInt> SrcOpMap;
19359 EVT VT = MVT::Other;
19361 // Recognize a special case where a vector is casted into wide integer to
19362 // test all 0s.
19363 assert(Op.getOpcode() == unsigned(BinOp) &&
19364 "Unexpected bit reduction opcode");
19365 Opnds.push_back(Op.getOperand(0));
19366 Opnds.push_back(Op.getOperand(1));
19368 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
19369 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
19370 // BFS traverse all BinOp operands.
19371 if (I->getOpcode() == unsigned(BinOp)) {
19372 Opnds.push_back(I->getOperand(0));
19373 Opnds.push_back(I->getOperand(1));
19374 // Re-evaluate the number of nodes to be traversed.
19375 e += 2; // 2 more nodes (LHS and RHS) are pushed.
19376 continue;
19379 // Quit if a non-EXTRACT_VECTOR_ELT
19380 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
19381 return false;
19383 // Quit if without a constant index.
19384 SDValue Idx = I->getOperand(1);
19385 if (!isa<ConstantSDNode>(Idx))
19386 return false;
19388 SDValue Src = I->getOperand(0);
19389 DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
19390 if (M == SrcOpMap.end()) {
19391 VT = Src.getValueType();
19392 // Quit if not the same type.
19393 if (SrcOpMap.begin() != SrcOpMap.end() &&
19394 VT != SrcOpMap.begin()->first.getValueType())
19395 return false;
19396 unsigned NumElts = VT.getVectorNumElements();
19397 APInt EltCount = APInt::getNullValue(NumElts);
19398 M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
19399 SrcOps.push_back(Src);
19401 // Quit if element already used.
19402 unsigned CIdx = cast<ConstantSDNode>(Idx)->getZExtValue();
19403 if (M->second[CIdx])
19404 return false;
19405 M->second.setBit(CIdx);
19408 // Quit if not all elements are used.
19409 for (DenseMap<SDValue, APInt>::const_iterator I = SrcOpMap.begin(),
19410 E = SrcOpMap.end();
19411 I != E; ++I) {
19412 if (!I->second.isAllOnesValue())
19413 return false;
19416 return true;
19419 // Check whether an OR'd tree is PTEST-able.
19420 static SDValue LowerVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
19421 const X86Subtarget &Subtarget,
19422 SelectionDAG &DAG, SDValue &X86CC) {
19423 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
19425 if (!Subtarget.hasSSE41() || !Op->hasOneUse())
19426 return SDValue();
19428 SmallVector<SDValue, 8> VecIns;
19429 if (!matchBitOpReduction(Op, ISD::OR, VecIns))
19430 return SDValue();
19432 // Quit if not 128/256-bit vector.
19433 EVT VT = VecIns[0].getValueType();
19434 if (!VT.is128BitVector() && !VT.is256BitVector())
19435 return SDValue();
19437 SDLoc DL(Op);
19438 MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
19440 // Cast all vectors into TestVT for PTEST.
19441 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
19442 VecIns[i] = DAG.getBitcast(TestVT, VecIns[i]);
19444 // If more than one full vector is evaluated, OR them first before PTEST.
19445 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
19446 // Each iteration will OR 2 nodes and append the result until there is only
19447 // 1 node left, i.e. the final OR'd value of all vectors.
19448 SDValue LHS = VecIns[Slot];
19449 SDValue RHS = VecIns[Slot + 1];
19450 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
19453 X86CC = DAG.getConstant(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE, DL,
19454 MVT::i8);
19455 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, VecIns.back(), VecIns.back());
19458 /// return true if \c Op has a use that doesn't just read flags.
19459 static bool hasNonFlagsUse(SDValue Op) {
19460 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
19461 ++UI) {
19462 SDNode *User = *UI;
19463 unsigned UOpNo = UI.getOperandNo();
19464 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
19465 // Look pass truncate.
19466 UOpNo = User->use_begin().getOperandNo();
19467 User = *User->use_begin();
19470 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
19471 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
19472 return true;
19474 return false;
19477 /// Emit nodes that will be selected as "test Op0,Op0", or something
19478 /// equivalent.
19479 static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
19480 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
19481 // CF and OF aren't always set the way we want. Determine which
19482 // of these we need.
19483 bool NeedCF = false;
19484 bool NeedOF = false;
19485 switch (X86CC) {
19486 default: break;
19487 case X86::COND_A: case X86::COND_AE:
19488 case X86::COND_B: case X86::COND_BE:
19489 NeedCF = true;
19490 break;
19491 case X86::COND_G: case X86::COND_GE:
19492 case X86::COND_L: case X86::COND_LE:
19493 case X86::COND_O: case X86::COND_NO: {
19494 // Check if we really need to set the
19495 // Overflow flag. If NoSignedWrap is present
19496 // that is not actually needed.
19497 switch (Op->getOpcode()) {
19498 case ISD::ADD:
19499 case ISD::SUB:
19500 case ISD::MUL:
19501 case ISD::SHL:
19502 if (Op.getNode()->getFlags().hasNoSignedWrap())
19503 break;
19504 LLVM_FALLTHROUGH;
19505 default:
19506 NeedOF = true;
19507 break;
19509 break;
19512 // See if we can use the EFLAGS value from the operand instead of
19513 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
19514 // we prove that the arithmetic won't overflow, we can't use OF or CF.
19515 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
19516 // Emit a CMP with 0, which is the TEST pattern.
19517 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
19518 DAG.getConstant(0, dl, Op.getValueType()));
19520 unsigned Opcode = 0;
19521 unsigned NumOperands = 0;
19523 SDValue ArithOp = Op;
19525 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
19526 // which may be the result of a CAST. We use the variable 'Op', which is the
19527 // non-casted variable when we check for possible users.
19528 switch (ArithOp.getOpcode()) {
19529 case ISD::AND:
19530 // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
19531 // because a TEST instruction will be better.
19532 if (!hasNonFlagsUse(Op))
19533 break;
19535 LLVM_FALLTHROUGH;
19536 case ISD::ADD:
19537 case ISD::SUB:
19538 case ISD::OR:
19539 case ISD::XOR:
19540 // Transform to an x86-specific ALU node with flags if there is a chance of
19541 // using an RMW op or only the flags are used. Otherwise, leave
19542 // the node alone and emit a 'test' instruction.
19543 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
19544 UE = Op.getNode()->use_end(); UI != UE; ++UI)
19545 if (UI->getOpcode() != ISD::CopyToReg &&
19546 UI->getOpcode() != ISD::SETCC &&
19547 UI->getOpcode() != ISD::STORE)
19548 goto default_case;
19550 // Otherwise use a regular EFLAGS-setting instruction.
19551 switch (ArithOp.getOpcode()) {
19552 default: llvm_unreachable("unexpected operator!");
19553 case ISD::ADD: Opcode = X86ISD::ADD; break;
19554 case ISD::SUB: Opcode = X86ISD::SUB; break;
19555 case ISD::XOR: Opcode = X86ISD::XOR; break;
19556 case ISD::AND: Opcode = X86ISD::AND; break;
19557 case ISD::OR: Opcode = X86ISD::OR; break;
19560 NumOperands = 2;
19561 break;
19562 case X86ISD::ADD:
19563 case X86ISD::SUB:
19564 case X86ISD::OR:
19565 case X86ISD::XOR:
19566 case X86ISD::AND:
19567 return SDValue(Op.getNode(), 1);
19568 default:
19569 default_case:
19570 break;
19573 if (Opcode == 0) {
19574 // Emit a CMP with 0, which is the TEST pattern.
19575 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
19576 DAG.getConstant(0, dl, Op.getValueType()));
19578 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
19579 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
19581 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
19582 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
19583 return SDValue(New.getNode(), 1);
19586 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
19587 /// equivalent.
19588 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
19589 const SDLoc &dl, SelectionDAG &DAG) const {
19590 if (isNullConstant(Op1))
19591 return EmitTest(Op0, X86CC, dl, DAG, Subtarget);
19593 EVT CmpVT = Op0.getValueType();
19595 if (CmpVT.isFloatingPoint())
19596 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
19598 assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
19599 CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
19601 // Only promote the compare up to I32 if it is a 16 bit operation
19602 // with an immediate. 16 bit immediates are to be avoided.
19603 if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
19604 !DAG.getMachineFunction().getFunction().hasMinSize()) {
19605 ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
19606 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
19607 // Don't do this if the immediate can fit in 8-bits.
19608 if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
19609 (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
19610 unsigned ExtendOp =
19611 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
19612 if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
19613 // For equality comparisons try to use SIGN_EXTEND if the input was
19614 // truncate from something with enough sign bits.
19615 if (Op0.getOpcode() == ISD::TRUNCATE) {
19616 SDValue In = Op0.getOperand(0);
19617 unsigned EffBits =
19618 In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
19619 if (EffBits <= 16)
19620 ExtendOp = ISD::SIGN_EXTEND;
19621 } else if (Op1.getOpcode() == ISD::TRUNCATE) {
19622 SDValue In = Op1.getOperand(0);
19623 unsigned EffBits =
19624 In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
19625 if (EffBits <= 16)
19626 ExtendOp = ISD::SIGN_EXTEND;
19630 CmpVT = MVT::i32;
19631 Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
19632 Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
19635 // Use SUB instead of CMP to enable CSE between SUB and CMP.
19636 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
19637 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
19638 return Sub.getValue(1);
19641 /// Convert a comparison if required by the subtarget.
19642 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
19643 SelectionDAG &DAG) const {
19644 // If the subtarget does not support the FUCOMI instruction, floating-point
19645 // comparisons have to be converted.
19646 if (Subtarget.hasCMov() ||
19647 Cmp.getOpcode() != X86ISD::CMP ||
19648 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
19649 !Cmp.getOperand(1).getValueType().isFloatingPoint())
19650 return Cmp;
19652 // The instruction selector will select an FUCOM instruction instead of
19653 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
19654 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
19655 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
19656 SDLoc dl(Cmp);
19657 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
19658 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
19659 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
19660 DAG.getConstant(8, dl, MVT::i8));
19661 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
19663 // Some 64-bit targets lack SAHF support, but they do support FCOMI.
19664 assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
19665 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
19668 /// Check if replacement of SQRT with RSQRT should be disabled.
19669 bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
19670 EVT VT = Op.getValueType();
19672 // We never want to use both SQRT and RSQRT instructions for the same input.
19673 if (DAG.getNodeIfExists(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
19674 return false;
19676 if (VT.isVector())
19677 return Subtarget.hasFastVectorFSQRT();
19678 return Subtarget.hasFastScalarFSQRT();
19681 /// The minimum architected relative accuracy is 2^-12. We need one
19682 /// Newton-Raphson step to have a good float result (24 bits of precision).
19683 SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
19684 SelectionDAG &DAG, int Enabled,
19685 int &RefinementSteps,
19686 bool &UseOneConstNR,
19687 bool Reciprocal) const {
19688 EVT VT = Op.getValueType();
19690 // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
19691 // It is likely not profitable to do this for f64 because a double-precision
19692 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
19693 // instructions: convert to single, rsqrtss, convert back to double, refine
19694 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
19695 // along with FMA, this could be a throughput win.
19696 // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
19697 // after legalize types.
19698 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
19699 (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
19700 (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
19701 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
19702 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
19703 if (RefinementSteps == ReciprocalEstimate::Unspecified)
19704 RefinementSteps = 1;
19706 UseOneConstNR = false;
19707 // There is no FSQRT for 512-bits, but there is RSQRT14.
19708 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
19709 return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
19711 return SDValue();
19714 /// The minimum architected relative accuracy is 2^-12. We need one
19715 /// Newton-Raphson step to have a good float result (24 bits of precision).
19716 SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
19717 int Enabled,
19718 int &RefinementSteps) const {
19719 EVT VT = Op.getValueType();
19721 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
19722 // It is likely not profitable to do this for f64 because a double-precision
19723 // reciprocal estimate with refinement on x86 prior to FMA requires
19724 // 15 instructions: convert to single, rcpss, convert back to double, refine
19725 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
19726 // along with FMA, this could be a throughput win.
19728 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
19729 (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
19730 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
19731 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
19732 // Enable estimate codegen with 1 refinement step for vector division.
19733 // Scalar division estimates are disabled because they break too much
19734 // real-world code. These defaults are intended to match GCC behavior.
19735 if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
19736 return SDValue();
19738 if (RefinementSteps == ReciprocalEstimate::Unspecified)
19739 RefinementSteps = 1;
19741 // There is no FSQRT for 512-bits, but there is RCP14.
19742 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
19743 return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
19745 return SDValue();
19748 /// If we have at least two divisions that use the same divisor, convert to
19749 /// multiplication by a reciprocal. This may need to be adjusted for a given
19750 /// CPU if a division's cost is not at least twice the cost of a multiplication.
19751 /// This is because we still need one division to calculate the reciprocal and
19752 /// then we need two multiplies by that reciprocal as replacements for the
19753 /// original divisions.
19754 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
19755 return 2;
19758 /// Result of 'and' is compared against zero. Change to a BT node if possible.
19759 /// Returns the BT node and the condition code needed to use it.
19760 static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
19761 const SDLoc &dl, SelectionDAG &DAG,
19762 SDValue &X86CC) {
19763 assert(And.getOpcode() == ISD::AND && "Expected AND node!");
19764 SDValue Op0 = And.getOperand(0);
19765 SDValue Op1 = And.getOperand(1);
19766 if (Op0.getOpcode() == ISD::TRUNCATE)
19767 Op0 = Op0.getOperand(0);
19768 if (Op1.getOpcode() == ISD::TRUNCATE)
19769 Op1 = Op1.getOperand(0);
19771 SDValue Src, BitNo;
19772 if (Op1.getOpcode() == ISD::SHL)
19773 std::swap(Op0, Op1);
19774 if (Op0.getOpcode() == ISD::SHL) {
19775 if (isOneConstant(Op0.getOperand(0))) {
19776 // If we looked past a truncate, check that it's only truncating away
19777 // known zeros.
19778 unsigned BitWidth = Op0.getValueSizeInBits();
19779 unsigned AndBitWidth = And.getValueSizeInBits();
19780 if (BitWidth > AndBitWidth) {
19781 KnownBits Known = DAG.computeKnownBits(Op0);
19782 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
19783 return SDValue();
19785 Src = Op1;
19786 BitNo = Op0.getOperand(1);
19788 } else if (Op1.getOpcode() == ISD::Constant) {
19789 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
19790 uint64_t AndRHSVal = AndRHS->getZExtValue();
19791 SDValue AndLHS = Op0;
19793 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
19794 Src = AndLHS.getOperand(0);
19795 BitNo = AndLHS.getOperand(1);
19796 } else {
19797 // Use BT if the immediate can't be encoded in a TEST instruction or we
19798 // are optimizing for size and the immedaite won't fit in a byte.
19799 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
19800 if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
19801 isPowerOf2_64(AndRHSVal)) {
19802 Src = AndLHS;
19803 BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
19804 Src.getValueType());
19809 // No patterns found, give up.
19810 if (!Src.getNode())
19811 return SDValue();
19813 // If Src is i8, promote it to i32 with any_extend. There is no i8 BT
19814 // instruction. Since the shift amount is in-range-or-undefined, we know
19815 // that doing a bittest on the i32 value is ok. We extend to i32 because
19816 // the encoding for the i16 version is larger than the i32 version.
19817 // Also promote i16 to i32 for performance / code size reason.
19818 if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
19819 Src = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Src);
19821 // See if we can use the 32-bit instruction instead of the 64-bit one for a
19822 // shorter encoding. Since the former takes the modulo 32 of BitNo and the
19823 // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
19824 // known to be zero.
19825 if (Src.getValueType() == MVT::i64 &&
19826 DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
19827 Src = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
19829 // If the operand types disagree, extend the shift amount to match. Since
19830 // BT ignores high bits (like shifts) we can use anyextend.
19831 if (Src.getValueType() != BitNo.getValueType())
19832 BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);
19834 X86CC = DAG.getConstant(CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B,
19835 dl, MVT::i8);
19836 return DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
19839 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
19840 /// CMPs.
19841 static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
19842 SDValue &Op1) {
19843 unsigned SSECC;
19844 bool Swap = false;
19846 // SSE Condition code mapping:
19847 // 0 - EQ
19848 // 1 - LT
19849 // 2 - LE
19850 // 3 - UNORD
19851 // 4 - NEQ
19852 // 5 - NLT
19853 // 6 - NLE
19854 // 7 - ORD
19855 switch (SetCCOpcode) {
19856 default: llvm_unreachable("Unexpected SETCC condition");
19857 case ISD::SETOEQ:
19858 case ISD::SETEQ: SSECC = 0; break;
19859 case ISD::SETOGT:
19860 case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH;
19861 case ISD::SETLT:
19862 case ISD::SETOLT: SSECC = 1; break;
19863 case ISD::SETOGE:
19864 case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH;
19865 case ISD::SETLE:
19866 case ISD::SETOLE: SSECC = 2; break;
19867 case ISD::SETUO: SSECC = 3; break;
19868 case ISD::SETUNE:
19869 case ISD::SETNE: SSECC = 4; break;
19870 case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
19871 case ISD::SETUGE: SSECC = 5; break;
19872 case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
19873 case ISD::SETUGT: SSECC = 6; break;
19874 case ISD::SETO: SSECC = 7; break;
19875 case ISD::SETUEQ: SSECC = 8; break;
19876 case ISD::SETONE: SSECC = 12; break;
19878 if (Swap)
19879 std::swap(Op0, Op1);
19881 return SSECC;
19884 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
19885 /// concatenate the result back.
19886 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
19887 MVT VT = Op.getSimpleValueType();
19889 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
19890 "Unsupported value type for operation");
19892 unsigned NumElems = VT.getVectorNumElements();
19893 SDLoc dl(Op);
19894 SDValue CC = Op.getOperand(2);
19896 // Extract the LHS vectors
19897 SDValue LHS = Op.getOperand(0);
19898 SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
19899 SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
19901 // Extract the RHS vectors
19902 SDValue RHS = Op.getOperand(1);
19903 SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
19904 SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
19906 // Issue the operation on the smaller types and concatenate the result back
19907 MVT EltVT = VT.getVectorElementType();
19908 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19909 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
19910 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
19911 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
19914 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
19916 SDValue Op0 = Op.getOperand(0);
19917 SDValue Op1 = Op.getOperand(1);
19918 SDValue CC = Op.getOperand(2);
19919 MVT VT = Op.getSimpleValueType();
19920 SDLoc dl(Op);
19922 assert(VT.getVectorElementType() == MVT::i1 &&
19923 "Cannot set masked compare for this operation");
19925 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
19927 // If this is a seteq make sure any build vectors of all zeros are on the RHS.
19928 // This helps with vptestm matching.
19929 // TODO: Should we just canonicalize the setcc during DAG combine?
19930 if ((SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE) &&
19931 ISD::isBuildVectorAllZeros(Op0.getNode()))
19932 std::swap(Op0, Op1);
19934 // Prefer SETGT over SETLT.
19935 if (SetCCOpcode == ISD::SETLT) {
19936 SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
19937 std::swap(Op0, Op1);
19940 return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
19943 /// Given a buildvector constant, return a new vector constant with each element
19944 /// incremented or decremented. If incrementing or decrementing would result in
19945 /// unsigned overflow or underflow or this is not a simple vector constant,
19946 /// return an empty value.
19947 static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc) {
19948 auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
19949 if (!BV)
19950 return SDValue();
19952 MVT VT = V.getSimpleValueType();
19953 MVT EltVT = VT.getVectorElementType();
19954 unsigned NumElts = VT.getVectorNumElements();
19955 SmallVector<SDValue, 8> NewVecC;
19956 SDLoc DL(V);
19957 for (unsigned i = 0; i < NumElts; ++i) {
19958 auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
19959 if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
19960 return SDValue();
19962 // Avoid overflow/underflow.
19963 const APInt &EltC = Elt->getAPIntValue();
19964 if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isNullValue()))
19965 return SDValue();
19967 NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
19970 return DAG.getBuildVector(VT, DL, NewVecC);
19973 /// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
19974 /// Op0 u<= Op1:
19975 /// t = psubus Op0, Op1
19976 /// pcmpeq t, <0..0>
19977 static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
19978 ISD::CondCode Cond, const SDLoc &dl,
19979 const X86Subtarget &Subtarget,
19980 SelectionDAG &DAG) {
19981 if (!Subtarget.hasSSE2())
19982 return SDValue();
19984 MVT VET = VT.getVectorElementType();
19985 if (VET != MVT::i8 && VET != MVT::i16)
19986 return SDValue();
19988 switch (Cond) {
19989 default:
19990 return SDValue();
19991 case ISD::SETULT: {
19992 // If the comparison is against a constant we can turn this into a
19993 // setule. With psubus, setule does not require a swap. This is
19994 // beneficial because the constant in the register is no longer
19995 // destructed as the destination so it can be hoisted out of a loop.
19996 // Only do this pre-AVX since vpcmp* is no longer destructive.
19997 if (Subtarget.hasAVX())
19998 return SDValue();
19999 SDValue ULEOp1 = incDecVectorConstant(Op1, DAG, false);
20000 if (!ULEOp1)
20001 return SDValue();
20002 Op1 = ULEOp1;
20003 break;
20005 case ISD::SETUGT: {
20006 // If the comparison is against a constant, we can turn this into a setuge.
20007 // This is beneficial because materializing a constant 0 for the PCMPEQ is
20008 // probably cheaper than XOR+PCMPGT using 2 different vector constants:
20009 // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
20010 SDValue UGEOp1 = incDecVectorConstant(Op1, DAG, true);
20011 if (!UGEOp1)
20012 return SDValue();
20013 Op1 = Op0;
20014 Op0 = UGEOp1;
20015 break;
20017 // Psubus is better than flip-sign because it requires no inversion.
20018 case ISD::SETUGE:
20019 std::swap(Op0, Op1);
20020 break;
20021 case ISD::SETULE:
20022 break;
20025 SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
20026 return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
20027 DAG.getConstant(0, dl, VT));
20030 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
20031 SelectionDAG &DAG) {
20032 SDValue Op0 = Op.getOperand(0);
20033 SDValue Op1 = Op.getOperand(1);
20034 SDValue CC = Op.getOperand(2);
20035 MVT VT = Op.getSimpleValueType();
20036 ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
20037 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
20038 SDLoc dl(Op);
20040 if (isFP) {
20041 #ifndef NDEBUG
20042 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
20043 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
20044 #endif
20046 unsigned Opc;
20047 if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) {
20048 assert(VT.getVectorNumElements() <= 16);
20049 Opc = X86ISD::CMPM;
20050 } else {
20051 Opc = X86ISD::CMPP;
20052 // The SSE/AVX packed FP comparison nodes are defined with a
20053 // floating-point vector result that matches the operand type. This allows
20054 // them to work with an SSE1 target (integer vector types are not legal).
20055 VT = Op0.getSimpleValueType();
20058 // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
20059 // emit two comparisons and a logic op to tie them together.
20060 SDValue Cmp;
20061 unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1);
20062 if (SSECC >= 8 && !Subtarget.hasAVX()) {
20063 // LLVM predicate is SETUEQ or SETONE.
20064 unsigned CC0, CC1;
20065 unsigned CombineOpc;
20066 if (Cond == ISD::SETUEQ) {
20067 CC0 = 3; // UNORD
20068 CC1 = 0; // EQ
20069 CombineOpc = X86ISD::FOR;
20070 } else {
20071 assert(Cond == ISD::SETONE);
20072 CC0 = 7; // ORD
20073 CC1 = 4; // NEQ
20074 CombineOpc = X86ISD::FAND;
20077 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
20078 DAG.getConstant(CC0, dl, MVT::i8));
20079 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
20080 DAG.getConstant(CC1, dl, MVT::i8));
20081 Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
20082 } else {
20083 // Handle all other FP comparisons here.
20084 Cmp = DAG.getNode(Opc, dl, VT, Op0, Op1,
20085 DAG.getConstant(SSECC, dl, MVT::i8));
20088 // If this is SSE/AVX CMPP, bitcast the result back to integer to match the
20089 // result type of SETCC. The bitcast is expected to be optimized away
20090 // during combining/isel.
20091 if (Opc == X86ISD::CMPP)
20092 Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
20094 return Cmp;
20097 MVT VTOp0 = Op0.getSimpleValueType();
20098 assert(VTOp0 == Op1.getSimpleValueType() &&
20099 "Expected operands with same type!");
20100 assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
20101 "Invalid number of packed elements for source and destination!");
20103 // This is being called by type legalization because v2i32 is marked custom
20104 // for result type legalization for v2f32.
20105 if (VTOp0 == MVT::v2i32)
20106 return SDValue();
20108 // The non-AVX512 code below works under the assumption that source and
20109 // destination types are the same.
20110 assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
20111 "Value types for source and destination must be the same!");
20113 // The result is boolean, but operands are int/float
20114 if (VT.getVectorElementType() == MVT::i1) {
20115 // In AVX-512 architecture setcc returns mask with i1 elements,
20116 // But there is no compare instruction for i8 and i16 elements in KNL.
20117 assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
20118 "Unexpected operand type");
20119 return LowerIntVSETCC_AVX512(Op, DAG);
20122 // Lower using XOP integer comparisons.
20123 if (VT.is128BitVector() && Subtarget.hasXOP()) {
20124 // Translate compare code to XOP PCOM compare mode.
20125 unsigned CmpMode = 0;
20126 switch (Cond) {
20127 default: llvm_unreachable("Unexpected SETCC condition");
20128 case ISD::SETULT:
20129 case ISD::SETLT: CmpMode = 0x00; break;
20130 case ISD::SETULE:
20131 case ISD::SETLE: CmpMode = 0x01; break;
20132 case ISD::SETUGT:
20133 case ISD::SETGT: CmpMode = 0x02; break;
20134 case ISD::SETUGE:
20135 case ISD::SETGE: CmpMode = 0x03; break;
20136 case ISD::SETEQ: CmpMode = 0x04; break;
20137 case ISD::SETNE: CmpMode = 0x05; break;
20140 // Are we comparing unsigned or signed integers?
20141 unsigned Opc =
20142 ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
20144 return DAG.getNode(Opc, dl, VT, Op0, Op1,
20145 DAG.getConstant(CmpMode, dl, MVT::i8));
20148 // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
20149 // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
20150 if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
20151 SDValue BC0 = peekThroughBitcasts(Op0);
20152 if (BC0.getOpcode() == ISD::AND) {
20153 APInt UndefElts;
20154 SmallVector<APInt, 64> EltBits;
20155 if (getTargetConstantBitsFromNode(BC0.getOperand(1),
20156 VT.getScalarSizeInBits(), UndefElts,
20157 EltBits, false, false)) {
20158 if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
20159 Cond = ISD::SETEQ;
20160 Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
20166 // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
20167 if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
20168 Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
20169 ConstantSDNode *C1 = isConstOrConstSplat(Op1);
20170 if (C1 && C1->getAPIntValue().isPowerOf2()) {
20171 unsigned BitWidth = VT.getScalarSizeInBits();
20172 unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
20174 SDValue Result = Op0.getOperand(0);
20175 Result = DAG.getNode(ISD::SHL, dl, VT, Result,
20176 DAG.getConstant(ShiftAmt, dl, VT));
20177 Result = DAG.getNode(ISD::SRA, dl, VT, Result,
20178 DAG.getConstant(BitWidth - 1, dl, VT));
20179 return Result;
20183 // Break 256-bit integer vector compare into smaller ones.
20184 if (VT.is256BitVector() && !Subtarget.hasInt256())
20185 return Lower256IntVSETCC(Op, DAG);
20187 // If this is a SETNE against the signed minimum value, change it to SETGT.
20188 // If this is a SETNE against the signed maximum value, change it to SETLT.
20189 // which will be swapped to SETGT.
20190 // Otherwise we use PCMPEQ+invert.
20191 APInt ConstValue;
20192 if (Cond == ISD::SETNE &&
20193 ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
20194 if (ConstValue.isMinSignedValue())
20195 Cond = ISD::SETGT;
20196 else if (ConstValue.isMaxSignedValue())
20197 Cond = ISD::SETLT;
20200 // If both operands are known non-negative, then an unsigned compare is the
20201 // same as a signed compare and there's no need to flip signbits.
20202 // TODO: We could check for more general simplifications here since we're
20203 // computing known bits.
20204 bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
20205 !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
20207 // Special case: Use min/max operations for unsigned compares.
20208 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20209 if (ISD::isUnsignedIntSetCC(Cond) &&
20210 (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
20211 TLI.isOperationLegal(ISD::UMIN, VT)) {
20212 // If we have a constant operand, increment/decrement it and change the
20213 // condition to avoid an invert.
20214 if (Cond == ISD::SETUGT &&
20215 ISD::matchUnaryPredicate(Op1, [](ConstantSDNode *C) {
20216 return !C->getAPIntValue().isMaxValue();
20217 })) {
20218 // X > C --> X >= (C+1) --> X == umax(X, C+1)
20219 Op1 = DAG.getNode(ISD::ADD, dl, VT, Op1, DAG.getConstant(1, dl, VT));
20220 Cond = ISD::SETUGE;
20222 if (Cond == ISD::SETULT &&
20223 ISD::matchUnaryPredicate(Op1, [](ConstantSDNode *C) {
20224 return !C->getAPIntValue().isNullValue();
20225 })) {
20226 // X < C --> X <= (C-1) --> X == umin(X, C-1)
20227 Op1 = DAG.getNode(ISD::SUB, dl, VT, Op1, DAG.getConstant(1, dl, VT));
20228 Cond = ISD::SETULE;
20230 bool Invert = false;
20231 unsigned Opc;
20232 switch (Cond) {
20233 default: llvm_unreachable("Unexpected condition code");
20234 case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH;
20235 case ISD::SETULE: Opc = ISD::UMIN; break;
20236 case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH;
20237 case ISD::SETUGE: Opc = ISD::UMAX; break;
20240 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
20241 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
20243 // If the logical-not of the result is required, perform that now.
20244 if (Invert)
20245 Result = DAG.getNOT(dl, Result, VT);
20247 return Result;
20250 // Try to use SUBUS and PCMPEQ.
20251 if (SDValue V = LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
20252 return V;
20254 // We are handling one of the integer comparisons here. Since SSE only has
20255 // GT and EQ comparisons for integer, swapping operands and multiple
20256 // operations may be required for some comparisons.
20257 unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
20258 : X86ISD::PCMPGT;
20259 bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
20260 Cond == ISD::SETGE || Cond == ISD::SETUGE;
20261 bool Invert = Cond == ISD::SETNE ||
20262 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
20264 if (Swap)
20265 std::swap(Op0, Op1);
20267 // Check that the operation in question is available (most are plain SSE2,
20268 // but PCMPGTQ and PCMPEQQ have different requirements).
20269 if (VT == MVT::v2i64) {
20270 if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
20271 assert(Subtarget.hasSSE2() && "Don't know how to lower!");
20273 // Since SSE has no unsigned integer comparisons, we need to flip the sign
20274 // bits of the inputs before performing those operations. The lower
20275 // compare is always unsigned.
20276 SDValue SB;
20277 if (FlipSigns) {
20278 SB = DAG.getConstant(0x8000000080000000ULL, dl, MVT::v2i64);
20279 } else {
20280 SB = DAG.getConstant(0x0000000080000000ULL, dl, MVT::v2i64);
20282 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
20283 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
20285 // Cast everything to the right type.
20286 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
20287 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
20289 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
20290 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
20291 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
20293 // Create masks for only the low parts/high parts of the 64 bit integers.
20294 static const int MaskHi[] = { 1, 1, 3, 3 };
20295 static const int MaskLo[] = { 0, 0, 2, 2 };
20296 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
20297 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
20298 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
20300 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
20301 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
20303 if (Invert)
20304 Result = DAG.getNOT(dl, Result, MVT::v4i32);
20306 return DAG.getBitcast(VT, Result);
20309 if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
20310 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
20311 // pcmpeqd + pshufd + pand.
20312 assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
20314 // First cast everything to the right type.
20315 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
20316 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
20318 // Do the compare.
20319 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
20321 // Make sure the lower and upper halves are both all-ones.
20322 static const int Mask[] = { 1, 0, 3, 2 };
20323 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
20324 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
20326 if (Invert)
20327 Result = DAG.getNOT(dl, Result, MVT::v4i32);
20329 return DAG.getBitcast(VT, Result);
20333 // Since SSE has no unsigned integer comparisons, we need to flip the sign
20334 // bits of the inputs before performing those operations.
20335 if (FlipSigns) {
20336 MVT EltVT = VT.getVectorElementType();
20337 SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
20338 VT);
20339 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
20340 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
20343 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
20345 // If the logical-not of the result is required, perform that now.
20346 if (Invert)
20347 Result = DAG.getNOT(dl, Result, VT);
20349 return Result;
20352 // Try to select this as a KORTEST+SETCC if possible.
20353 static SDValue EmitKORTEST(SDValue Op0, SDValue Op1, ISD::CondCode CC,
20354 const SDLoc &dl, SelectionDAG &DAG,
20355 const X86Subtarget &Subtarget,
20356 SDValue &X86CC) {
20357 // Only support equality comparisons.
20358 if (CC != ISD::SETEQ && CC != ISD::SETNE)
20359 return SDValue();
20361 // Must be a bitcast from vXi1.
20362 if (Op0.getOpcode() != ISD::BITCAST)
20363 return SDValue();
20365 Op0 = Op0.getOperand(0);
20366 MVT VT = Op0.getSimpleValueType();
20367 if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
20368 !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
20369 !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
20370 return SDValue();
20372 X86::CondCode X86Cond;
20373 if (isNullConstant(Op1)) {
20374 X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
20375 } else if (isAllOnesConstant(Op1)) {
20376 // C flag is set for all ones.
20377 X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
20378 } else
20379 return SDValue();
20381 // If the input is an OR, we can combine it's operands into the KORTEST.
20382 SDValue LHS = Op0;
20383 SDValue RHS = Op0;
20384 if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
20385 LHS = Op0.getOperand(0);
20386 RHS = Op0.getOperand(1);
20389 X86CC = DAG.getConstant(X86Cond, dl, MVT::i8);
20390 return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
20393 /// Emit flags for the given setcc condition and operands. Also returns the
20394 /// corresponding X86 condition code constant in X86CC.
20395 SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
20396 ISD::CondCode CC, const SDLoc &dl,
20397 SelectionDAG &DAG,
20398 SDValue &X86CC) const {
20399 // Optimize to BT if possible.
20400 // Lower (X & (1 << N)) == 0 to BT(X, N).
20401 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
20402 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
20403 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
20404 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
20405 if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CC))
20406 return BT;
20409 // Try to use PTEST for a tree ORs equality compared with 0.
20410 // TODO: We could do AND tree with all 1s as well by using the C flag.
20411 if (Op0.getOpcode() == ISD::OR && isNullConstant(Op1) &&
20412 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
20413 if (SDValue PTEST = LowerVectorAllZeroTest(Op0, CC, Subtarget, DAG, X86CC))
20414 return PTEST;
20417 // Try to lower using KORTEST.
20418 if (SDValue KORTEST = EmitKORTEST(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
20419 return KORTEST;
20421 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
20422 // these.
20423 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
20424 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
20425 // If the input is a setcc, then reuse the input setcc or use a new one with
20426 // the inverted condition.
20427 if (Op0.getOpcode() == X86ISD::SETCC) {
20428 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
20430 X86CC = Op0.getOperand(0);
20431 if (Invert) {
20432 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
20433 CCode = X86::GetOppositeBranchCondition(CCode);
20434 X86CC = DAG.getConstant(CCode, dl, MVT::i8);
20437 return Op0.getOperand(1);
20441 bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
20442 X86::CondCode CondCode = TranslateX86CC(CC, dl, IsFP, Op0, Op1, DAG);
20443 if (CondCode == X86::COND_INVALID)
20444 return SDValue();
20446 SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG);
20447 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
20448 X86CC = DAG.getConstant(CondCode, dl, MVT::i8);
20449 return EFLAGS;
20452 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
20454 MVT VT = Op.getSimpleValueType();
20456 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
20458 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
20459 SDValue Op0 = Op.getOperand(0);
20460 SDValue Op1 = Op.getOperand(1);
20461 SDLoc dl(Op);
20462 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
20464 SDValue X86CC;
20465 SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
20466 if (!EFLAGS)
20467 return SDValue();
20469 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
20472 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
20473 SDValue LHS = Op.getOperand(0);
20474 SDValue RHS = Op.getOperand(1);
20475 SDValue Carry = Op.getOperand(2);
20476 SDValue Cond = Op.getOperand(3);
20477 SDLoc DL(Op);
20479 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
20480 X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
20482 // Recreate the carry if needed.
20483 EVT CarryVT = Carry.getValueType();
20484 APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
20485 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
20486 Carry, DAG.getConstant(NegOne, DL, CarryVT));
20488 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
20489 SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
20490 return getSETCC(CC, Cmp.getValue(1), DL, DAG);
20493 // This function returns three things: the arithmetic computation itself
20494 // (Value), an EFLAGS result (Overflow), and a condition code (Cond). The
20495 // flag and the condition code define the case in which the arithmetic
20496 // computation overflows.
20497 static std::pair<SDValue, SDValue>
20498 getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
20499 assert(Op.getResNo() == 0 && "Unexpected result number!");
20500 SDValue Value, Overflow;
20501 SDValue LHS = Op.getOperand(0);
20502 SDValue RHS = Op.getOperand(1);
20503 unsigned BaseOp = 0;
20504 SDLoc DL(Op);
20505 switch (Op.getOpcode()) {
20506 default: llvm_unreachable("Unknown ovf instruction!");
20507 case ISD::SADDO:
20508 BaseOp = X86ISD::ADD;
20509 Cond = X86::COND_O;
20510 break;
20511 case ISD::UADDO:
20512 BaseOp = X86ISD::ADD;
20513 Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
20514 break;
20515 case ISD::SSUBO:
20516 BaseOp = X86ISD::SUB;
20517 Cond = X86::COND_O;
20518 break;
20519 case ISD::USUBO:
20520 BaseOp = X86ISD::SUB;
20521 Cond = X86::COND_B;
20522 break;
20523 case ISD::SMULO:
20524 BaseOp = X86ISD::SMUL;
20525 Cond = X86::COND_O;
20526 break;
20527 case ISD::UMULO:
20528 BaseOp = X86ISD::UMUL;
20529 Cond = X86::COND_O;
20530 break;
20533 if (BaseOp) {
20534 // Also sets EFLAGS.
20535 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20536 Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
20537 Overflow = Value.getValue(1);
20540 return std::make_pair(Value, Overflow);
20543 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
20544 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
20545 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
20546 // looks for this combo and may remove the "setcc" instruction if the "setcc"
20547 // has only one use.
20548 SDLoc DL(Op);
20549 X86::CondCode Cond;
20550 SDValue Value, Overflow;
20551 std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
20553 SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
20554 assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
20555 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
20558 /// Return true if opcode is a X86 logical comparison.
20559 static bool isX86LogicalCmp(SDValue Op) {
20560 unsigned Opc = Op.getOpcode();
20561 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
20562 Opc == X86ISD::SAHF)
20563 return true;
20564 if (Op.getResNo() == 1 &&
20565 (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
20566 Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
20567 Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
20568 return true;
20570 return false;
20573 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
20574 if (V.getOpcode() != ISD::TRUNCATE)
20575 return false;
20577 SDValue VOp0 = V.getOperand(0);
20578 unsigned InBits = VOp0.getValueSizeInBits();
20579 unsigned Bits = V.getValueSizeInBits();
20580 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
20583 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
20584 bool AddTest = true;
20585 SDValue Cond = Op.getOperand(0);
20586 SDValue Op1 = Op.getOperand(1);
20587 SDValue Op2 = Op.getOperand(2);
20588 SDLoc DL(Op);
20589 MVT VT = Op1.getSimpleValueType();
20590 SDValue CC;
20592 // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
20593 // are available or VBLENDV if AVX is available.
20594 // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
20595 if (Cond.getOpcode() == ISD::SETCC &&
20596 ((Subtarget.hasSSE2() && VT == MVT::f64) ||
20597 (Subtarget.hasSSE1() && VT == MVT::f32)) &&
20598 VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
20599 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
20600 unsigned SSECC = translateX86FSETCC(
20601 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
20603 if (Subtarget.hasAVX512()) {
20604 SDValue Cmp = DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0,
20605 CondOp1, DAG.getConstant(SSECC, DL, MVT::i8));
20606 assert(!VT.isVector() && "Not a scalar type?");
20607 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
20610 if (SSECC < 8 || Subtarget.hasAVX()) {
20611 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
20612 DAG.getConstant(SSECC, DL, MVT::i8));
20614 // If we have AVX, we can use a variable vector select (VBLENDV) instead
20615 // of 3 logic instructions for size savings and potentially speed.
20616 // Unfortunately, there is no scalar form of VBLENDV.
20618 // If either operand is a +0.0 constant, don't try this. We can expect to
20619 // optimize away at least one of the logic instructions later in that
20620 // case, so that sequence would be faster than a variable blend.
20622 // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
20623 // uses XMM0 as the selection register. That may need just as many
20624 // instructions as the AND/ANDN/OR sequence due to register moves, so
20625 // don't bother.
20626 if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
20627 !isNullFPConstant(Op2)) {
20628 // Convert to vectors, do a VSELECT, and convert back to scalar.
20629 // All of the conversions should be optimized away.
20630 MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
20631 SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
20632 SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
20633 SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
20635 MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
20636 VCmp = DAG.getBitcast(VCmpVT, VCmp);
20638 SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
20640 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
20641 VSel, DAG.getIntPtrConstant(0, DL));
20643 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
20644 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
20645 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
20649 // AVX512 fallback is to lower selects of scalar floats to masked moves.
20650 if ((VT == MVT::f64 || VT == MVT::f32) && Subtarget.hasAVX512()) {
20651 SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
20652 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
20655 // For v64i1 without 64-bit support we need to split and rejoin.
20656 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
20657 assert(Subtarget.hasBWI() && "Expected BWI to be legal");
20658 SDValue Op1Lo = extractSubVector(Op1, 0, DAG, DL, 32);
20659 SDValue Op2Lo = extractSubVector(Op2, 0, DAG, DL, 32);
20660 SDValue Op1Hi = extractSubVector(Op1, 32, DAG, DL, 32);
20661 SDValue Op2Hi = extractSubVector(Op2, 32, DAG, DL, 32);
20662 SDValue Lo = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Lo, Op2Lo);
20663 SDValue Hi = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Hi, Op2Hi);
20664 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
20667 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
20668 SDValue Op1Scalar;
20669 if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode()))
20670 Op1Scalar = ConvertI1VectorToInteger(Op1, DAG);
20671 else if (Op1.getOpcode() == ISD::BITCAST && Op1.getOperand(0))
20672 Op1Scalar = Op1.getOperand(0);
20673 SDValue Op2Scalar;
20674 if (ISD::isBuildVectorOfConstantSDNodes(Op2.getNode()))
20675 Op2Scalar = ConvertI1VectorToInteger(Op2, DAG);
20676 else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0))
20677 Op2Scalar = Op2.getOperand(0);
20678 if (Op1Scalar.getNode() && Op2Scalar.getNode()) {
20679 SDValue newSelect = DAG.getSelect(DL, Op1Scalar.getValueType(), Cond,
20680 Op1Scalar, Op2Scalar);
20681 if (newSelect.getValueSizeInBits() == VT.getSizeInBits())
20682 return DAG.getBitcast(VT, newSelect);
20683 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, newSelect);
20684 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec,
20685 DAG.getIntPtrConstant(0, DL));
20689 if (Cond.getOpcode() == ISD::SETCC) {
20690 if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
20691 Cond = NewCond;
20692 // If the condition was updated, it's possible that the operands of the
20693 // select were also updated (for example, EmitTest has a RAUW). Refresh
20694 // the local references to the select operands in case they got stale.
20695 Op1 = Op.getOperand(1);
20696 Op2 = Op.getOperand(2);
20700 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
20701 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
20702 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
20703 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
20704 // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
20705 // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
20706 if (Cond.getOpcode() == X86ISD::SETCC &&
20707 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
20708 isNullConstant(Cond.getOperand(1).getOperand(1))) {
20709 SDValue Cmp = Cond.getOperand(1);
20710 unsigned CondCode =
20711 cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
20713 if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
20714 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
20715 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
20716 SDValue CmpOp0 = Cmp.getOperand(0);
20718 // Apply further optimizations for special cases
20719 // (select (x != 0), -1, 0) -> neg & sbb
20720 // (select (x == 0), 0, -1) -> neg & sbb
20721 if (isNullConstant(Y) &&
20722 (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
20723 SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
20724 SDValue CmpZero = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Zero, CmpOp0);
20725 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20726 Zero = DAG.getConstant(0, DL, Op.getValueType());
20727 return DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, CmpZero);
20730 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
20731 CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
20732 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
20734 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20735 SDValue Zero = DAG.getConstant(0, DL, Op.getValueType());
20736 SDValue Res = // Res = 0 or -1.
20737 DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp);
20739 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
20740 Res = DAG.getNOT(DL, Res, Res.getValueType());
20742 if (!isNullConstant(Op2))
20743 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
20744 return Res;
20745 } else if (!Subtarget.hasCMov() && CondCode == X86::COND_E &&
20746 Cmp.getOperand(0).getOpcode() == ISD::AND &&
20747 isOneConstant(Cmp.getOperand(0).getOperand(1))) {
20748 SDValue CmpOp0 = Cmp.getOperand(0);
20749 SDValue Src1, Src2;
20750 // true if Op2 is XOR or OR operator and one of its operands
20751 // is equal to Op1
20752 // ( a , a op b) || ( b , a op b)
20753 auto isOrXorPattern = [&]() {
20754 if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
20755 (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
20756 Src1 =
20757 Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
20758 Src2 = Op1;
20759 return true;
20761 return false;
20764 if (isOrXorPattern()) {
20765 SDValue Neg;
20766 unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
20767 // we need mask of all zeros or ones with same size of the other
20768 // operands.
20769 if (CmpSz > VT.getSizeInBits())
20770 Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
20771 else if (CmpSz < VT.getSizeInBits())
20772 Neg = DAG.getNode(ISD::AND, DL, VT,
20773 DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
20774 DAG.getConstant(1, DL, VT));
20775 else
20776 Neg = CmpOp0;
20777 SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
20778 Neg); // -(and (x, 0x1))
20779 SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
20780 return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2); // And Op y
20785 // Look past (and (setcc_carry (cmp ...)), 1).
20786 if (Cond.getOpcode() == ISD::AND &&
20787 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
20788 isOneConstant(Cond.getOperand(1)))
20789 Cond = Cond.getOperand(0);
20791 // If condition flag is set by a X86ISD::CMP, then use it as the condition
20792 // setting operand in place of the X86ISD::SETCC.
20793 unsigned CondOpcode = Cond.getOpcode();
20794 if (CondOpcode == X86ISD::SETCC ||
20795 CondOpcode == X86ISD::SETCC_CARRY) {
20796 CC = Cond.getOperand(0);
20798 SDValue Cmp = Cond.getOperand(1);
20799 MVT VT = Op.getSimpleValueType();
20801 bool IllegalFPCMov = false;
20802 if (VT.isFloatingPoint() && !VT.isVector() &&
20803 !isScalarFPTypeInSSEReg(VT)) // FPStack?
20804 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
20806 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
20807 Cmp.getOpcode() == X86ISD::BT) { // FIXME
20808 Cond = Cmp;
20809 AddTest = false;
20811 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
20812 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
20813 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
20814 SDValue Value;
20815 X86::CondCode X86Cond;
20816 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
20818 CC = DAG.getConstant(X86Cond, DL, MVT::i8);
20819 AddTest = false;
20822 if (AddTest) {
20823 // Look past the truncate if the high bits are known zero.
20824 if (isTruncWithZeroHighBitsInput(Cond, DAG))
20825 Cond = Cond.getOperand(0);
20827 // We know the result of AND is compared against zero. Try to match
20828 // it to BT.
20829 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
20830 SDValue BTCC;
20831 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, BTCC)) {
20832 CC = BTCC;
20833 Cond = BT;
20834 AddTest = false;
20839 if (AddTest) {
20840 CC = DAG.getConstant(X86::COND_NE, DL, MVT::i8);
20841 Cond = EmitCmp(Cond, DAG.getConstant(0, DL, Cond.getValueType()),
20842 X86::COND_NE, DL, DAG);
20845 // a < b ? -1 : 0 -> RES = ~setcc_carry
20846 // a < b ? 0 : -1 -> RES = setcc_carry
20847 // a >= b ? -1 : 0 -> RES = setcc_carry
20848 // a >= b ? 0 : -1 -> RES = ~setcc_carry
20849 if (Cond.getOpcode() == X86ISD::SUB) {
20850 Cond = ConvertCmpIfNecessary(Cond, DAG);
20851 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
20853 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
20854 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
20855 (isNullConstant(Op1) || isNullConstant(Op2))) {
20856 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
20857 DAG.getConstant(X86::COND_B, DL, MVT::i8),
20858 Cond);
20859 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
20860 return DAG.getNOT(DL, Res, Res.getValueType());
20861 return Res;
20865 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
20866 // widen the cmov and push the truncate through. This avoids introducing a new
20867 // branch during isel and doesn't add any extensions.
20868 if (Op.getValueType() == MVT::i8 &&
20869 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
20870 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
20871 if (T1.getValueType() == T2.getValueType() &&
20872 // Blacklist CopyFromReg to avoid partial register stalls.
20873 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
20874 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
20875 CC, Cond);
20876 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
20880 // Or finally, promote i8 cmovs if we have CMOV,
20881 // or i16 cmovs if it won't prevent folding a load.
20882 // FIXME: we should not limit promotion of i8 case to only when the CMOV is
20883 // legal, but EmitLoweredSelect() can not deal with these extensions
20884 // being inserted between two CMOV's. (in i16 case too TBN)
20885 // https://bugs.llvm.org/show_bug.cgi?id=40974
20886 if ((Op.getValueType() == MVT::i8 && Subtarget.hasCMov()) ||
20887 (Op.getValueType() == MVT::i16 && !MayFoldLoad(Op1) &&
20888 !MayFoldLoad(Op2))) {
20889 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
20890 Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
20891 SDValue Ops[] = { Op2, Op1, CC, Cond };
20892 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
20893 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
20896 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
20897 // condition is true.
20898 SDValue Ops[] = { Op2, Op1, CC, Cond };
20899 return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
20902 static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
20903 const X86Subtarget &Subtarget,
20904 SelectionDAG &DAG) {
20905 MVT VT = Op->getSimpleValueType(0);
20906 SDValue In = Op->getOperand(0);
20907 MVT InVT = In.getSimpleValueType();
20908 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
20909 MVT VTElt = VT.getVectorElementType();
20910 SDLoc dl(Op);
20912 unsigned NumElts = VT.getVectorNumElements();
20914 // Extend VT if the scalar type is i8/i16 and BWI is not supported.
20915 MVT ExtVT = VT;
20916 if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
20917 // If v16i32 is to be avoided, we'll need to split and concatenate.
20918 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
20919 return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
20921 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
20924 // Widen to 512-bits if VLX is not supported.
20925 MVT WideVT = ExtVT;
20926 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
20927 NumElts *= 512 / ExtVT.getSizeInBits();
20928 InVT = MVT::getVectorVT(MVT::i1, NumElts);
20929 In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
20930 In, DAG.getIntPtrConstant(0, dl));
20931 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
20934 SDValue V;
20935 MVT WideEltVT = WideVT.getVectorElementType();
20936 if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
20937 (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
20938 V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
20939 } else {
20940 SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
20941 SDValue Zero = DAG.getConstant(0, dl, WideVT);
20942 V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
20945 // Truncate if we had to extend i16/i8 above.
20946 if (VT != ExtVT) {
20947 WideVT = MVT::getVectorVT(VTElt, NumElts);
20948 V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
20951 // Extract back to 128/256-bit if we widened.
20952 if (WideVT != VT)
20953 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
20954 DAG.getIntPtrConstant(0, dl));
20956 return V;
20959 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
20960 SelectionDAG &DAG) {
20961 SDValue In = Op->getOperand(0);
20962 MVT InVT = In.getSimpleValueType();
20964 if (InVT.getVectorElementType() == MVT::i1)
20965 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
20967 assert(Subtarget.hasAVX() && "Expected AVX support");
20968 return LowerAVXExtend(Op, DAG, Subtarget);
20971 // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
20972 // For sign extend this needs to handle all vector sizes and SSE4.1 and
20973 // non-SSE4.1 targets. For zero extend this should only handle inputs of
20974 // MVT::v64i8 when BWI is not supported, but AVX512 is.
20975 static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
20976 const X86Subtarget &Subtarget,
20977 SelectionDAG &DAG) {
20978 SDValue In = Op->getOperand(0);
20979 MVT VT = Op->getSimpleValueType(0);
20980 MVT InVT = In.getSimpleValueType();
20982 MVT SVT = VT.getVectorElementType();
20983 MVT InSVT = InVT.getVectorElementType();
20984 assert(SVT.getSizeInBits() > InSVT.getSizeInBits());
20986 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
20987 return SDValue();
20988 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
20989 return SDValue();
20990 if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
20991 !(VT.is256BitVector() && Subtarget.hasAVX()) &&
20992 !(VT.is512BitVector() && Subtarget.hasAVX512()))
20993 return SDValue();
20995 SDLoc dl(Op);
20996 unsigned Opc = Op.getOpcode();
20997 unsigned NumElts = VT.getVectorNumElements();
20999 // For 256-bit vectors, we only need the lower (128-bit) half of the input.
21000 // For 512-bit vectors, we need 128-bits or 256-bits.
21001 if (InVT.getSizeInBits() > 128) {
21002 // Input needs to be at least the same number of elements as output, and
21003 // at least 128-bits.
21004 int InSize = InSVT.getSizeInBits() * NumElts;
21005 In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
21006 InVT = In.getSimpleValueType();
21009 // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
21010 // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
21011 // need to be handled here for 256/512-bit results.
21012 if (Subtarget.hasInt256()) {
21013 assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
21015 if (InVT.getVectorNumElements() != NumElts)
21016 return DAG.getNode(Op.getOpcode(), dl, VT, In);
21018 // FIXME: Apparently we create inreg operations that could be regular
21019 // extends.
21020 unsigned ExtOpc =
21021 Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
21022 : ISD::ZERO_EXTEND;
21023 return DAG.getNode(ExtOpc, dl, VT, In);
21026 // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
21027 if (Subtarget.hasAVX()) {
21028 assert(VT.is256BitVector() && "256-bit vector expected");
21029 int HalfNumElts = NumElts / 2;
21030 MVT HalfVT = MVT::getVectorVT(SVT, HalfNumElts);
21032 unsigned NumSrcElts = InVT.getVectorNumElements();
21033 SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
21034 for (int i = 0; i != HalfNumElts; ++i)
21035 HiMask[i] = HalfNumElts + i;
21037 SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
21038 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
21039 Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
21040 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
21043 // We should only get here for sign extend.
21044 assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
21045 assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
21047 // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
21048 SDValue Curr = In;
21049 SDValue SignExt = Curr;
21051 // As SRAI is only available on i16/i32 types, we expand only up to i32
21052 // and handle i64 separately.
21053 if (InVT != MVT::v4i32) {
21054 MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
21056 unsigned DestWidth = DestVT.getScalarSizeInBits();
21057 unsigned Scale = DestWidth / InSVT.getSizeInBits();
21059 unsigned InNumElts = InVT.getVectorNumElements();
21060 unsigned DestElts = DestVT.getVectorNumElements();
21062 // Build a shuffle mask that takes each input element and places it in the
21063 // MSBs of the new element size.
21064 SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
21065 for (unsigned i = 0; i != DestElts; ++i)
21066 Mask[i * Scale + (Scale - 1)] = i;
21068 Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
21069 Curr = DAG.getBitcast(DestVT, Curr);
21071 unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
21072 SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
21073 DAG.getConstant(SignExtShift, dl, MVT::i8));
21076 if (VT == MVT::v2i64) {
21077 assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
21078 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
21079 SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
21080 SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
21081 SignExt = DAG.getBitcast(VT, SignExt);
21084 return SignExt;
21087 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
21088 SelectionDAG &DAG) {
21089 MVT VT = Op->getSimpleValueType(0);
21090 SDValue In = Op->getOperand(0);
21091 MVT InVT = In.getSimpleValueType();
21092 SDLoc dl(Op);
21094 if (InVT.getVectorElementType() == MVT::i1)
21095 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
21097 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
21098 assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&
21099 "Expected same number of elements");
21100 assert((VT.getVectorElementType() == MVT::i16 ||
21101 VT.getVectorElementType() == MVT::i32 ||
21102 VT.getVectorElementType() == MVT::i64) &&
21103 "Unexpected element type");
21104 assert((InVT.getVectorElementType() == MVT::i8 ||
21105 InVT.getVectorElementType() == MVT::i16 ||
21106 InVT.getVectorElementType() == MVT::i32) &&
21107 "Unexpected element type");
21109 // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
21110 if (InVT == MVT::v8i8) {
21111 if (!ExperimentalVectorWideningLegalization || VT != MVT::v8i64)
21112 return SDValue();
21114 In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
21115 MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
21116 return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, VT, In);
21119 if (Subtarget.hasInt256())
21120 return Op;
21122 // Optimize vectors in AVX mode
21123 // Sign extend v8i16 to v8i32 and
21124 // v4i32 to v4i64
21126 // Divide input vector into two parts
21127 // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
21128 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
21129 // concat the vectors to original VT
21131 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(),
21132 VT.getVectorNumElements() / 2);
21134 SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
21136 unsigned NumElems = InVT.getVectorNumElements();
21137 SmallVector<int,8> ShufMask(NumElems, -1);
21138 for (unsigned i = 0; i != NumElems/2; ++i)
21139 ShufMask[i] = i + NumElems/2;
21141 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
21142 OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
21144 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
21147 /// Change a vector store into a pair of half-size vector stores.
21148 static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
21149 SDValue StoredVal = Store->getValue();
21150 assert((StoredVal.getValueType().is256BitVector() ||
21151 StoredVal.getValueType().is512BitVector()) &&
21152 "Expecting 256/512-bit op");
21154 // Splitting volatile memory ops is not allowed unless the operation was not
21155 // legal to begin with. We are assuming the input op is legal (this transform
21156 // is only used for targets with AVX).
21157 if (Store->isVolatile())
21158 return SDValue();
21160 MVT StoreVT = StoredVal.getSimpleValueType();
21161 unsigned NumElems = StoreVT.getVectorNumElements();
21162 unsigned HalfSize = StoredVal.getValueSizeInBits() / 2;
21163 unsigned HalfAlign = (128 == HalfSize ? 16 : 32);
21165 SDLoc DL(Store);
21166 SDValue Value0 = extractSubVector(StoredVal, 0, DAG, DL, HalfSize);
21167 SDValue Value1 = extractSubVector(StoredVal, NumElems / 2, DAG, DL, HalfSize);
21168 SDValue Ptr0 = Store->getBasePtr();
21169 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, HalfAlign, DL);
21170 unsigned Alignment = Store->getAlignment();
21171 SDValue Ch0 =
21172 DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
21173 Alignment, Store->getMemOperand()->getFlags());
21174 SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
21175 Store->getPointerInfo().getWithOffset(HalfAlign),
21176 MinAlign(Alignment, HalfAlign),
21177 Store->getMemOperand()->getFlags());
21178 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
21181 /// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
21182 /// type.
21183 static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
21184 SelectionDAG &DAG) {
21185 SDValue StoredVal = Store->getValue();
21186 assert(StoreVT.is128BitVector() &&
21187 StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
21188 StoredVal = DAG.getBitcast(StoreVT, StoredVal);
21190 // Splitting volatile memory ops is not allowed unless the operation was not
21191 // legal to begin with. We are assuming the input op is legal (this transform
21192 // is only used for targets with AVX).
21193 if (Store->isVolatile())
21194 return SDValue();
21196 MVT StoreSVT = StoreVT.getScalarType();
21197 unsigned NumElems = StoreVT.getVectorNumElements();
21198 unsigned ScalarSize = StoreSVT.getStoreSize();
21199 unsigned Alignment = Store->getAlignment();
21201 SDLoc DL(Store);
21202 SmallVector<SDValue, 4> Stores;
21203 for (unsigned i = 0; i != NumElems; ++i) {
21204 unsigned Offset = i * ScalarSize;
21205 SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(), Offset, DL);
21206 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
21207 DAG.getIntPtrConstant(i, DL));
21208 SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
21209 Store->getPointerInfo().getWithOffset(Offset),
21210 MinAlign(Alignment, Offset),
21211 Store->getMemOperand()->getFlags());
21212 Stores.push_back(Ch);
21214 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
21217 static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
21218 SelectionDAG &DAG) {
21219 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
21220 SDLoc dl(St);
21221 SDValue StoredVal = St->getValue();
21223 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
21224 if (StoredVal.getValueType().isVector() &&
21225 StoredVal.getValueType().getVectorElementType() == MVT::i1) {
21226 assert(StoredVal.getValueType().getVectorNumElements() <= 8 &&
21227 "Unexpected VT");
21228 assert(!St->isTruncatingStore() && "Expected non-truncating store");
21229 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
21230 "Expected AVX512F without AVX512DQI");
21232 StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
21233 DAG.getUNDEF(MVT::v16i1), StoredVal,
21234 DAG.getIntPtrConstant(0, dl));
21235 StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
21236 StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
21238 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
21239 St->getPointerInfo(), St->getAlignment(),
21240 St->getMemOperand()->getFlags());
21243 if (St->isTruncatingStore())
21244 return SDValue();
21246 // If this is a 256-bit store of concatenated ops, we are better off splitting
21247 // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
21248 // and each half can execute independently. Some cores would split the op into
21249 // halves anyway, so the concat (vinsertf128) is purely an extra op.
21250 MVT StoreVT = StoredVal.getSimpleValueType();
21251 if (StoreVT.is256BitVector()) {
21252 SmallVector<SDValue, 4> CatOps;
21253 if (StoredVal.hasOneUse() && collectConcatOps(StoredVal.getNode(), CatOps))
21254 return splitVectorStore(St, DAG);
21255 return SDValue();
21258 assert(StoreVT.isVector() && StoreVT.getSizeInBits() == 64 &&
21259 "Unexpected VT");
21260 if (DAG.getTargetLoweringInfo().getTypeAction(*DAG.getContext(), StoreVT) !=
21261 TargetLowering::TypeWidenVector)
21262 return SDValue();
21264 // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
21265 // and store it.
21266 MVT WideVT = MVT::getVectorVT(StoreVT.getVectorElementType(),
21267 StoreVT.getVectorNumElements() * 2);
21268 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
21269 DAG.getUNDEF(StoreVT));
21270 MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
21271 MVT CastVT = MVT::getVectorVT(StVT, 2);
21272 StoredVal = DAG.getBitcast(CastVT, StoredVal);
21273 StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
21274 DAG.getIntPtrConstant(0, dl));
21276 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
21277 St->getPointerInfo(), St->getAlignment(),
21278 St->getMemOperand()->getFlags());
21281 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
21282 // may emit an illegal shuffle but the expansion is still better than scalar
21283 // code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
21284 // we'll emit a shuffle and a arithmetic shift.
21285 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
21286 // TODO: It is possible to support ZExt by zeroing the undef values during
21287 // the shuffle phase or after the shuffle.
21288 static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
21289 SelectionDAG &DAG) {
21290 MVT RegVT = Op.getSimpleValueType();
21291 assert(RegVT.isVector() && "We only custom lower vector loads.");
21292 assert(RegVT.isInteger() &&
21293 "We only custom lower integer vector loads.");
21295 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
21296 SDLoc dl(Ld);
21297 EVT MemVT = Ld->getMemoryVT();
21299 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
21300 if (RegVT.getVectorElementType() == MVT::i1) {
21301 assert(EVT(RegVT) == MemVT && "Expected non-extending load");
21302 assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
21303 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
21304 "Expected AVX512F without AVX512DQI");
21306 SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
21307 Ld->getPointerInfo(), Ld->getAlignment(),
21308 Ld->getMemOperand()->getFlags());
21310 // Replace chain users with the new chain.
21311 assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
21313 SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
21314 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
21315 DAG.getBitcast(MVT::v16i1, Val),
21316 DAG.getIntPtrConstant(0, dl));
21317 return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
21320 // Nothing useful we can do without SSE2 shuffles.
21321 assert(Subtarget.hasSSE2() && "We only custom lower sext loads with SSE2.");
21323 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21324 unsigned RegSz = RegVT.getSizeInBits();
21326 ISD::LoadExtType Ext = Ld->getExtensionType();
21328 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
21329 && "Only anyext and sext are currently implemented.");
21330 assert(MemVT != RegVT && "Cannot extend to the same type");
21331 assert(MemVT.isVector() && "Must load a vector from memory");
21333 unsigned NumElems = RegVT.getVectorNumElements();
21334 unsigned MemSz = MemVT.getSizeInBits();
21335 assert(RegSz > MemSz && "Register size must be greater than the mem size");
21337 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget.hasInt256()) {
21338 // The only way in which we have a legal 256-bit vector result but not the
21339 // integer 256-bit operations needed to directly lower a sextload is if we
21340 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
21341 // a 128-bit vector and a normal sign_extend to 256-bits that should get
21342 // correctly legalized. We do this late to allow the canonical form of
21343 // sextload to persist throughout the rest of the DAG combiner -- it wants
21344 // to fold together any extensions it can, and so will fuse a sign_extend
21345 // of an sextload into a sextload targeting a wider value.
21346 SDValue Load;
21347 if (MemSz == 128) {
21348 // Just switch this to a normal load.
21349 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
21350 "it must be a legal 128-bit vector "
21351 "type!");
21352 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
21353 Ld->getPointerInfo(), Ld->getAlignment(),
21354 Ld->getMemOperand()->getFlags());
21355 } else {
21356 assert(MemSz < 128 &&
21357 "Can't extend a type wider than 128 bits to a 256 bit vector!");
21358 // Do an sext load to a 128-bit vector type. We want to use the same
21359 // number of elements, but elements half as wide. This will end up being
21360 // recursively lowered by this routine, but will succeed as we definitely
21361 // have all the necessary features if we're using AVX1.
21362 EVT HalfEltVT =
21363 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
21364 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
21365 Load =
21366 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
21367 Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
21368 Ld->getMemOperand()->getFlags());
21371 // Replace chain users with the new chain.
21372 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
21374 // Finally, do a normal sign-extend to the desired register.
21375 SDValue SExt = DAG.getSExtOrTrunc(Load, dl, RegVT);
21376 return DAG.getMergeValues({SExt, Load.getValue(1)}, dl);
21379 // All sizes must be a power of two.
21380 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
21381 "Non-power-of-two elements are not custom lowered!");
21383 // Attempt to load the original value using scalar loads.
21384 // Find the largest scalar type that divides the total loaded size.
21385 MVT SclrLoadTy = MVT::i8;
21386 for (MVT Tp : MVT::integer_valuetypes()) {
21387 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
21388 SclrLoadTy = Tp;
21392 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
21393 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
21394 (64 <= MemSz))
21395 SclrLoadTy = MVT::f64;
21397 // Calculate the number of scalar loads that we need to perform
21398 // in order to load our vector from memory.
21399 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
21401 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
21402 "Can only lower sext loads with a single scalar load!");
21404 unsigned loadRegSize = RegSz;
21405 if (Ext == ISD::SEXTLOAD && RegSz >= 256)
21406 loadRegSize = 128;
21408 // If we don't have BWI we won't be able to create the shuffle needed for
21409 // v8i8->v8i64.
21410 if (Ext == ISD::EXTLOAD && !Subtarget.hasBWI() && RegVT == MVT::v8i64 &&
21411 MemVT == MVT::v8i8)
21412 loadRegSize = 128;
21414 // Represent our vector as a sequence of elements which are the
21415 // largest scalar that we can load.
21416 EVT LoadUnitVecVT = EVT::getVectorVT(
21417 *DAG.getContext(), SclrLoadTy, loadRegSize / SclrLoadTy.getSizeInBits());
21419 // Represent the data using the same element type that is stored in
21420 // memory. In practice, we ''widen'' MemVT.
21421 EVT WideVecVT =
21422 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
21423 loadRegSize / MemVT.getScalarSizeInBits());
21425 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
21426 "Invalid vector type");
21428 // We can't shuffle using an illegal type.
21429 assert(TLI.isTypeLegal(WideVecVT) &&
21430 "We only lower types that form legal widened vector types");
21432 SmallVector<SDValue, 8> Chains;
21433 SDValue Ptr = Ld->getBasePtr();
21434 unsigned OffsetInc = SclrLoadTy.getSizeInBits() / 8;
21435 SDValue Increment = DAG.getConstant(OffsetInc, dl,
21436 TLI.getPointerTy(DAG.getDataLayout()));
21437 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
21439 unsigned Offset = 0;
21440 for (unsigned i = 0; i < NumLoads; ++i) {
21441 unsigned NewAlign = MinAlign(Ld->getAlignment(), Offset);
21443 // Perform a single load.
21444 SDValue ScalarLoad =
21445 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr,
21446 Ld->getPointerInfo().getWithOffset(Offset),
21447 NewAlign, Ld->getMemOperand()->getFlags());
21448 Chains.push_back(ScalarLoad.getValue(1));
21449 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
21450 // another round of DAGCombining.
21451 if (i == 0)
21452 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
21453 else
21454 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
21455 ScalarLoad, DAG.getIntPtrConstant(i, dl));
21457 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
21458 Offset += OffsetInc;
21461 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
21463 // Bitcast the loaded value to a vector of the original element type, in
21464 // the size of the target vector type.
21465 SDValue SlicedVec = DAG.getBitcast(WideVecVT, Res);
21466 unsigned SizeRatio = RegSz / MemSz;
21468 if (Ext == ISD::SEXTLOAD) {
21469 SDValue Sext = getExtendInVec(ISD::SIGN_EXTEND, dl, RegVT, SlicedVec, DAG);
21470 return DAG.getMergeValues({Sext, TF}, dl);
21473 if (Ext == ISD::EXTLOAD && !Subtarget.hasBWI() && RegVT == MVT::v8i64 &&
21474 MemVT == MVT::v8i8) {
21475 SDValue Sext = getExtendInVec(ISD::ZERO_EXTEND, dl, RegVT, SlicedVec, DAG);
21476 return DAG.getMergeValues({Sext, TF}, dl);
21479 // Redistribute the loaded elements into the different locations.
21480 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
21481 for (unsigned i = 0; i != NumElems; ++i)
21482 ShuffleVec[i * SizeRatio] = i;
21484 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
21485 DAG.getUNDEF(WideVecVT), ShuffleVec);
21487 // Bitcast to the requested type.
21488 Shuff = DAG.getBitcast(RegVT, Shuff);
21489 return DAG.getMergeValues({Shuff, TF}, dl);
21492 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
21493 /// each of which has no other use apart from the AND / OR.
21494 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
21495 Opc = Op.getOpcode();
21496 if (Opc != ISD::OR && Opc != ISD::AND)
21497 return false;
21498 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
21499 Op.getOperand(0).hasOneUse() &&
21500 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
21501 Op.getOperand(1).hasOneUse());
21504 /// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the
21505 /// SETCC node has a single use.
21506 static bool isXor1OfSetCC(SDValue Op) {
21507 if (Op.getOpcode() != ISD::XOR)
21508 return false;
21509 if (isOneConstant(Op.getOperand(1)))
21510 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
21511 Op.getOperand(0).hasOneUse();
21512 return false;
21515 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
21516 bool addTest = true;
21517 SDValue Chain = Op.getOperand(0);
21518 SDValue Cond = Op.getOperand(1);
21519 SDValue Dest = Op.getOperand(2);
21520 SDLoc dl(Op);
21521 SDValue CC;
21522 bool Inverted = false;
21524 if (Cond.getOpcode() == ISD::SETCC) {
21525 // Check for setcc([su]{add,sub,mul}o == 0).
21526 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
21527 isNullConstant(Cond.getOperand(1)) &&
21528 Cond.getOperand(0).getResNo() == 1 &&
21529 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
21530 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
21531 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
21532 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
21533 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
21534 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
21535 Inverted = true;
21536 Cond = Cond.getOperand(0);
21537 } else {
21538 if (SDValue NewCond = LowerSETCC(Cond, DAG))
21539 Cond = NewCond;
21542 #if 0
21543 // FIXME: LowerXALUO doesn't handle these!!
21544 else if (Cond.getOpcode() == X86ISD::ADD ||
21545 Cond.getOpcode() == X86ISD::SUB ||
21546 Cond.getOpcode() == X86ISD::SMUL ||
21547 Cond.getOpcode() == X86ISD::UMUL)
21548 Cond = LowerXALUO(Cond, DAG);
21549 #endif
21551 // Look pass (and (setcc_carry (cmp ...)), 1).
21552 if (Cond.getOpcode() == ISD::AND &&
21553 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
21554 isOneConstant(Cond.getOperand(1)))
21555 Cond = Cond.getOperand(0);
21557 // If condition flag is set by a X86ISD::CMP, then use it as the condition
21558 // setting operand in place of the X86ISD::SETCC.
21559 unsigned CondOpcode = Cond.getOpcode();
21560 if (CondOpcode == X86ISD::SETCC ||
21561 CondOpcode == X86ISD::SETCC_CARRY) {
21562 CC = Cond.getOperand(0);
21564 SDValue Cmp = Cond.getOperand(1);
21565 unsigned Opc = Cmp.getOpcode();
21566 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
21567 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
21568 Cond = Cmp;
21569 addTest = false;
21570 } else {
21571 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
21572 default: break;
21573 case X86::COND_O:
21574 case X86::COND_B:
21575 // These can only come from an arithmetic instruction with overflow,
21576 // e.g. SADDO, UADDO.
21577 Cond = Cond.getOperand(1);
21578 addTest = false;
21579 break;
21583 CondOpcode = Cond.getOpcode();
21584 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
21585 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
21586 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
21587 SDValue Value;
21588 X86::CondCode X86Cond;
21589 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
21591 if (Inverted)
21592 X86Cond = X86::GetOppositeBranchCondition(X86Cond);
21594 CC = DAG.getConstant(X86Cond, dl, MVT::i8);
21595 addTest = false;
21596 } else {
21597 unsigned CondOpc;
21598 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
21599 SDValue Cmp = Cond.getOperand(0).getOperand(1);
21600 if (CondOpc == ISD::OR) {
21601 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
21602 // two branches instead of an explicit OR instruction with a
21603 // separate test.
21604 if (Cmp == Cond.getOperand(1).getOperand(1) &&
21605 isX86LogicalCmp(Cmp)) {
21606 CC = Cond.getOperand(0).getOperand(0);
21607 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
21608 Chain, Dest, CC, Cmp);
21609 CC = Cond.getOperand(1).getOperand(0);
21610 Cond = Cmp;
21611 addTest = false;
21613 } else { // ISD::AND
21614 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
21615 // two branches instead of an explicit AND instruction with a
21616 // separate test. However, we only do this if this block doesn't
21617 // have a fall-through edge, because this requires an explicit
21618 // jmp when the condition is false.
21619 if (Cmp == Cond.getOperand(1).getOperand(1) &&
21620 isX86LogicalCmp(Cmp) &&
21621 Op.getNode()->hasOneUse()) {
21622 X86::CondCode CCode =
21623 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
21624 CCode = X86::GetOppositeBranchCondition(CCode);
21625 CC = DAG.getConstant(CCode, dl, MVT::i8);
21626 SDNode *User = *Op.getNode()->use_begin();
21627 // Look for an unconditional branch following this conditional branch.
21628 // We need this because we need to reverse the successors in order
21629 // to implement FCMP_OEQ.
21630 if (User->getOpcode() == ISD::BR) {
21631 SDValue FalseBB = User->getOperand(1);
21632 SDNode *NewBR =
21633 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
21634 assert(NewBR == User);
21635 (void)NewBR;
21636 Dest = FalseBB;
21638 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
21639 Chain, Dest, CC, Cmp);
21640 X86::CondCode CCode =
21641 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
21642 CCode = X86::GetOppositeBranchCondition(CCode);
21643 CC = DAG.getConstant(CCode, dl, MVT::i8);
21644 Cond = Cmp;
21645 addTest = false;
21649 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
21650 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
21651 // It should be transformed during dag combiner except when the condition
21652 // is set by a arithmetics with overflow node.
21653 X86::CondCode CCode =
21654 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
21655 CCode = X86::GetOppositeBranchCondition(CCode);
21656 CC = DAG.getConstant(CCode, dl, MVT::i8);
21657 Cond = Cond.getOperand(0).getOperand(1);
21658 addTest = false;
21659 } else if (Cond.getOpcode() == ISD::SETCC &&
21660 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
21661 // For FCMP_OEQ, we can emit
21662 // two branches instead of an explicit AND instruction with a
21663 // separate test. However, we only do this if this block doesn't
21664 // have a fall-through edge, because this requires an explicit
21665 // jmp when the condition is false.
21666 if (Op.getNode()->hasOneUse()) {
21667 SDNode *User = *Op.getNode()->use_begin();
21668 // Look for an unconditional branch following this conditional branch.
21669 // We need this because we need to reverse the successors in order
21670 // to implement FCMP_OEQ.
21671 if (User->getOpcode() == ISD::BR) {
21672 SDValue FalseBB = User->getOperand(1);
21673 SDNode *NewBR =
21674 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
21675 assert(NewBR == User);
21676 (void)NewBR;
21677 Dest = FalseBB;
21679 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
21680 Cond.getOperand(0), Cond.getOperand(1));
21681 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
21682 CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
21683 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
21684 Chain, Dest, CC, Cmp);
21685 CC = DAG.getConstant(X86::COND_P, dl, MVT::i8);
21686 Cond = Cmp;
21687 addTest = false;
21690 } else if (Cond.getOpcode() == ISD::SETCC &&
21691 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
21692 // For FCMP_UNE, we can emit
21693 // two branches instead of an explicit OR instruction with a
21694 // separate test.
21695 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
21696 Cond.getOperand(0), Cond.getOperand(1));
21697 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
21698 CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
21699 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
21700 Chain, Dest, CC, Cmp);
21701 CC = DAG.getConstant(X86::COND_P, dl, MVT::i8);
21702 Cond = Cmp;
21703 addTest = false;
21707 if (addTest) {
21708 // Look pass the truncate if the high bits are known zero.
21709 if (isTruncWithZeroHighBitsInput(Cond, DAG))
21710 Cond = Cond.getOperand(0);
21712 // We know the result of AND is compared against zero. Try to match
21713 // it to BT.
21714 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
21715 SDValue BTCC;
21716 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, dl, DAG, BTCC)) {
21717 CC = BTCC;
21718 Cond = BT;
21719 addTest = false;
21724 if (addTest) {
21725 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
21726 CC = DAG.getConstant(X86Cond, dl, MVT::i8);
21727 Cond = EmitCmp(Cond, DAG.getConstant(0, dl, Cond.getValueType()),
21728 X86Cond, dl, DAG);
21730 Cond = ConvertCmpIfNecessary(Cond, DAG);
21731 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
21732 Chain, Dest, CC, Cond);
21735 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
21736 // Calls to _alloca are needed to probe the stack when allocating more than 4k
21737 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
21738 // that the guard pages used by the OS virtual memory manager are allocated in
21739 // correct sequence.
21740 SDValue
21741 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
21742 SelectionDAG &DAG) const {
21743 MachineFunction &MF = DAG.getMachineFunction();
21744 bool SplitStack = MF.shouldSplitStack();
21745 bool EmitStackProbe = !getStackProbeSymbolName(MF).empty();
21746 bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
21747 SplitStack || EmitStackProbe;
21748 SDLoc dl(Op);
21750 // Get the inputs.
21751 SDNode *Node = Op.getNode();
21752 SDValue Chain = Op.getOperand(0);
21753 SDValue Size = Op.getOperand(1);
21754 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
21755 EVT VT = Node->getValueType(0);
21757 // Chain the dynamic stack allocation so that it doesn't modify the stack
21758 // pointer when other instructions are using the stack.
21759 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
21761 bool Is64Bit = Subtarget.is64Bit();
21762 MVT SPTy = getPointerTy(DAG.getDataLayout());
21764 SDValue Result;
21765 if (!Lower) {
21766 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21767 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
21768 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
21769 " not tell us which reg is the stack pointer!");
21771 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
21772 Chain = SP.getValue(1);
21773 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
21774 unsigned StackAlign = TFI.getStackAlignment();
21775 Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
21776 if (Align > StackAlign)
21777 Result = DAG.getNode(ISD::AND, dl, VT, Result,
21778 DAG.getConstant(-(uint64_t)Align, dl, VT));
21779 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
21780 } else if (SplitStack) {
21781 MachineRegisterInfo &MRI = MF.getRegInfo();
21783 if (Is64Bit) {
21784 // The 64 bit implementation of segmented stacks needs to clobber both r10
21785 // r11. This makes it impossible to use it along with nested parameters.
21786 const Function &F = MF.getFunction();
21787 for (const auto &A : F.args()) {
21788 if (A.hasNestAttr())
21789 report_fatal_error("Cannot use segmented stacks with functions that "
21790 "have nested arguments.");
21794 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
21795 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
21796 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
21797 Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
21798 DAG.getRegister(Vreg, SPTy));
21799 } else {
21800 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
21801 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
21802 MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
21804 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
21805 unsigned SPReg = RegInfo->getStackRegister();
21806 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
21807 Chain = SP.getValue(1);
21809 if (Align) {
21810 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
21811 DAG.getConstant(-(uint64_t)Align, dl, VT));
21812 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
21815 Result = SP;
21818 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
21819 DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
21821 SDValue Ops[2] = {Result, Chain};
21822 return DAG.getMergeValues(Ops, dl);
21825 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
21826 MachineFunction &MF = DAG.getMachineFunction();
21827 auto PtrVT = getPointerTy(MF.getDataLayout());
21828 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
21830 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
21831 SDLoc DL(Op);
21833 if (!Subtarget.is64Bit() ||
21834 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
21835 // vastart just stores the address of the VarArgsFrameIndex slot into the
21836 // memory location argument.
21837 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
21838 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
21839 MachinePointerInfo(SV));
21842 // __va_list_tag:
21843 // gp_offset (0 - 6 * 8)
21844 // fp_offset (48 - 48 + 8 * 16)
21845 // overflow_arg_area (point to parameters coming in memory).
21846 // reg_save_area
21847 SmallVector<SDValue, 8> MemOps;
21848 SDValue FIN = Op.getOperand(1);
21849 // Store gp_offset
21850 SDValue Store = DAG.getStore(
21851 Op.getOperand(0), DL,
21852 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
21853 MachinePointerInfo(SV));
21854 MemOps.push_back(Store);
21856 // Store fp_offset
21857 FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
21858 Store = DAG.getStore(
21859 Op.getOperand(0), DL,
21860 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
21861 MachinePointerInfo(SV, 4));
21862 MemOps.push_back(Store);
21864 // Store ptr to overflow_arg_area
21865 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
21866 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
21867 Store =
21868 DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
21869 MemOps.push_back(Store);
21871 // Store ptr to reg_save_area.
21872 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
21873 Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
21874 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
21875 Store = DAG.getStore(
21876 Op.getOperand(0), DL, RSFIN, FIN,
21877 MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
21878 MemOps.push_back(Store);
21879 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
21882 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
21883 assert(Subtarget.is64Bit() &&
21884 "LowerVAARG only handles 64-bit va_arg!");
21885 assert(Op.getNumOperands() == 4);
21887 MachineFunction &MF = DAG.getMachineFunction();
21888 if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
21889 // The Win64 ABI uses char* instead of a structure.
21890 return DAG.expandVAArg(Op.getNode());
21892 SDValue Chain = Op.getOperand(0);
21893 SDValue SrcPtr = Op.getOperand(1);
21894 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
21895 unsigned Align = Op.getConstantOperandVal(3);
21896 SDLoc dl(Op);
21898 EVT ArgVT = Op.getNode()->getValueType(0);
21899 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
21900 uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
21901 uint8_t ArgMode;
21903 // Decide which area this value should be read from.
21904 // TODO: Implement the AMD64 ABI in its entirety. This simple
21905 // selection mechanism works only for the basic types.
21906 if (ArgVT == MVT::f80) {
21907 llvm_unreachable("va_arg for f80 not yet implemented");
21908 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
21909 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
21910 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
21911 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
21912 } else {
21913 llvm_unreachable("Unhandled argument type in LowerVAARG");
21916 if (ArgMode == 2) {
21917 // Sanity Check: Make sure using fp_offset makes sense.
21918 assert(!Subtarget.useSoftFloat() &&
21919 !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
21920 Subtarget.hasSSE1());
21923 // Insert VAARG_64 node into the DAG
21924 // VAARG_64 returns two values: Variable Argument Address, Chain
21925 SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
21926 DAG.getConstant(ArgMode, dl, MVT::i8),
21927 DAG.getConstant(Align, dl, MVT::i32)};
21928 SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
21929 SDValue VAARG = DAG.getMemIntrinsicNode(
21930 X86ISD::VAARG_64, dl,
21931 VTs, InstOps, MVT::i64,
21932 MachinePointerInfo(SV),
21933 /*Align=*/0,
21934 MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
21935 Chain = VAARG.getValue(1);
21937 // Load the next argument and return it
21938 return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
21941 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
21942 SelectionDAG &DAG) {
21943 // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
21944 // where a va_list is still an i8*.
21945 assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
21946 if (Subtarget.isCallingConvWin64(
21947 DAG.getMachineFunction().getFunction().getCallingConv()))
21948 // Probably a Win64 va_copy.
21949 return DAG.expandVACopy(Op.getNode());
21951 SDValue Chain = Op.getOperand(0);
21952 SDValue DstPtr = Op.getOperand(1);
21953 SDValue SrcPtr = Op.getOperand(2);
21954 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
21955 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
21956 SDLoc DL(Op);
21958 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
21959 DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false,
21960 false, false,
21961 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
21964 // Helper to get immediate/variable SSE shift opcode from other shift opcodes.
21965 static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
21966 switch (Opc) {
21967 case ISD::SHL:
21968 case X86ISD::VSHL:
21969 case X86ISD::VSHLI:
21970 return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
21971 case ISD::SRL:
21972 case X86ISD::VSRL:
21973 case X86ISD::VSRLI:
21974 return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
21975 case ISD::SRA:
21976 case X86ISD::VSRA:
21977 case X86ISD::VSRAI:
21978 return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
21980 llvm_unreachable("Unknown target vector shift node");
21983 /// Handle vector element shifts where the shift amount is a constant.
21984 /// Takes immediate version of shift as input.
21985 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
21986 SDValue SrcOp, uint64_t ShiftAmt,
21987 SelectionDAG &DAG) {
21988 MVT ElementType = VT.getVectorElementType();
21990 // Bitcast the source vector to the output type, this is mainly necessary for
21991 // vXi8/vXi64 shifts.
21992 if (VT != SrcOp.getSimpleValueType())
21993 SrcOp = DAG.getBitcast(VT, SrcOp);
21995 // Fold this packed shift into its first operand if ShiftAmt is 0.
21996 if (ShiftAmt == 0)
21997 return SrcOp;
21999 // Check for ShiftAmt >= element width
22000 if (ShiftAmt >= ElementType.getSizeInBits()) {
22001 if (Opc == X86ISD::VSRAI)
22002 ShiftAmt = ElementType.getSizeInBits() - 1;
22003 else
22004 return DAG.getConstant(0, dl, VT);
22007 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
22008 && "Unknown target vector shift-by-constant node");
22010 // Fold this packed vector shift into a build vector if SrcOp is a
22011 // vector of Constants or UNDEFs.
22012 if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
22013 SmallVector<SDValue, 8> Elts;
22014 unsigned NumElts = SrcOp->getNumOperands();
22016 switch (Opc) {
22017 default: llvm_unreachable("Unknown opcode!");
22018 case X86ISD::VSHLI:
22019 for (unsigned i = 0; i != NumElts; ++i) {
22020 SDValue CurrentOp = SrcOp->getOperand(i);
22021 if (CurrentOp->isUndef()) {
22022 Elts.push_back(CurrentOp);
22023 continue;
22025 auto *ND = cast<ConstantSDNode>(CurrentOp);
22026 const APInt &C = ND->getAPIntValue();
22027 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
22029 break;
22030 case X86ISD::VSRLI:
22031 for (unsigned i = 0; i != NumElts; ++i) {
22032 SDValue CurrentOp = SrcOp->getOperand(i);
22033 if (CurrentOp->isUndef()) {
22034 Elts.push_back(CurrentOp);
22035 continue;
22037 auto *ND = cast<ConstantSDNode>(CurrentOp);
22038 const APInt &C = ND->getAPIntValue();
22039 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
22041 break;
22042 case X86ISD::VSRAI:
22043 for (unsigned i = 0; i != NumElts; ++i) {
22044 SDValue CurrentOp = SrcOp->getOperand(i);
22045 if (CurrentOp->isUndef()) {
22046 Elts.push_back(CurrentOp);
22047 continue;
22049 auto *ND = cast<ConstantSDNode>(CurrentOp);
22050 const APInt &C = ND->getAPIntValue();
22051 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
22053 break;
22056 return DAG.getBuildVector(VT, dl, Elts);
22059 return DAG.getNode(Opc, dl, VT, SrcOp,
22060 DAG.getConstant(ShiftAmt, dl, MVT::i8));
22063 /// Handle vector element shifts where the shift amount may or may not be a
22064 /// constant. Takes immediate version of shift as input.
22065 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
22066 SDValue SrcOp, SDValue ShAmt,
22067 const X86Subtarget &Subtarget,
22068 SelectionDAG &DAG) {
22069 MVT SVT = ShAmt.getSimpleValueType();
22070 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
22072 // Catch shift-by-constant.
22073 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
22074 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
22075 CShAmt->getZExtValue(), DAG);
22077 // Change opcode to non-immediate version.
22078 Opc = getTargetVShiftUniformOpcode(Opc, true);
22080 // Need to build a vector containing shift amount.
22081 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
22082 // +====================+============+=======================================+
22083 // | ShAmt is | HasSSE4.1? | Construct ShAmt vector as |
22084 // +====================+============+=======================================+
22085 // | i64 | Yes, No | Use ShAmt as lowest elt |
22086 // | i32 | Yes | zero-extend in-reg |
22087 // | (i32 zext(i16/i8)) | Yes | zero-extend in-reg |
22088 // | (i32 zext(i16/i8)) | No | byte-shift-in-reg |
22089 // | i16/i32 | No | v4i32 build_vector(ShAmt, 0, ud, ud)) |
22090 // +====================+============+=======================================+
22092 if (SVT == MVT::i64)
22093 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt);
22094 else if (ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
22095 ShAmt.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
22096 (ShAmt.getOperand(0).getSimpleValueType() == MVT::i16 ||
22097 ShAmt.getOperand(0).getSimpleValueType() == MVT::i8)) {
22098 ShAmt = ShAmt.getOperand(0);
22099 MVT AmtTy = ShAmt.getSimpleValueType() == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
22100 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), AmtTy, ShAmt);
22101 if (Subtarget.hasSSE41())
22102 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
22103 MVT::v2i64, ShAmt);
22104 else {
22105 SDValue ByteShift = DAG.getConstant(
22106 (128 - AmtTy.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
22107 ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
22108 ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
22109 ByteShift);
22110 ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
22111 ByteShift);
22113 } else if (Subtarget.hasSSE41() &&
22114 ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
22115 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt);
22116 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
22117 MVT::v2i64, ShAmt);
22118 } else {
22119 SDValue ShOps[4] = {ShAmt, DAG.getConstant(0, dl, SVT), DAG.getUNDEF(SVT),
22120 DAG.getUNDEF(SVT)};
22121 ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps);
22124 // The return type has to be a 128-bit type with the same element
22125 // type as the input type.
22126 MVT EltVT = VT.getVectorElementType();
22127 MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
22129 ShAmt = DAG.getBitcast(ShVT, ShAmt);
22130 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
22133 /// Return Mask with the necessary casting or extending
22134 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
22135 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
22136 const X86Subtarget &Subtarget, SelectionDAG &DAG,
22137 const SDLoc &dl) {
22139 if (isAllOnesConstant(Mask))
22140 return DAG.getConstant(1, dl, MaskVT);
22141 if (X86::isZeroNode(Mask))
22142 return DAG.getConstant(0, dl, MaskVT);
22144 assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
22146 if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
22147 assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
22148 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
22149 // In case 32bit mode, bitcast i64 is illegal, extend/split it.
22150 SDValue Lo, Hi;
22151 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
22152 DAG.getConstant(0, dl, MVT::i32));
22153 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
22154 DAG.getConstant(1, dl, MVT::i32));
22156 Lo = DAG.getBitcast(MVT::v32i1, Lo);
22157 Hi = DAG.getBitcast(MVT::v32i1, Hi);
22159 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
22160 } else {
22161 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
22162 Mask.getSimpleValueType().getSizeInBits());
22163 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
22164 // are extracted by EXTRACT_SUBVECTOR.
22165 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
22166 DAG.getBitcast(BitcastVT, Mask),
22167 DAG.getIntPtrConstant(0, dl));
22171 /// Return (and \p Op, \p Mask) for compare instructions or
22172 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
22173 /// necessary casting or extending for \p Mask when lowering masking intrinsics
22174 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
22175 SDValue PreservedSrc,
22176 const X86Subtarget &Subtarget,
22177 SelectionDAG &DAG) {
22178 MVT VT = Op.getSimpleValueType();
22179 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
22180 unsigned OpcodeSelect = ISD::VSELECT;
22181 SDLoc dl(Op);
22183 if (isAllOnesConstant(Mask))
22184 return Op;
22186 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
22188 if (PreservedSrc.isUndef())
22189 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
22190 return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
22193 /// Creates an SDNode for a predicated scalar operation.
22194 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
22195 /// The mask is coming as MVT::i8 and it should be transformed
22196 /// to MVT::v1i1 while lowering masking intrinsics.
22197 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
22198 /// "X86select" instead of "vselect". We just can't create the "vselect" node
22199 /// for a scalar instruction.
22200 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
22201 SDValue PreservedSrc,
22202 const X86Subtarget &Subtarget,
22203 SelectionDAG &DAG) {
22205 if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
22206 if (MaskConst->getZExtValue() & 0x1)
22207 return Op;
22209 MVT VT = Op.getSimpleValueType();
22210 SDLoc dl(Op);
22212 assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
22213 SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
22214 DAG.getBitcast(MVT::v8i1, Mask),
22215 DAG.getIntPtrConstant(0, dl));
22216 if (Op.getOpcode() == X86ISD::FSETCCM ||
22217 Op.getOpcode() == X86ISD::FSETCCM_SAE ||
22218 Op.getOpcode() == X86ISD::VFPCLASSS)
22219 return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
22221 if (PreservedSrc.isUndef())
22222 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
22223 return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
22226 static int getSEHRegistrationNodeSize(const Function *Fn) {
22227 if (!Fn->hasPersonalityFn())
22228 report_fatal_error(
22229 "querying registration node size for function without personality");
22230 // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
22231 // WinEHStatePass for the full struct definition.
22232 switch (classifyEHPersonality(Fn->getPersonalityFn())) {
22233 case EHPersonality::MSVC_X86SEH: return 24;
22234 case EHPersonality::MSVC_CXX: return 16;
22235 default: break;
22237 report_fatal_error(
22238 "can only recover FP for 32-bit MSVC EH personality functions");
22241 /// When the MSVC runtime transfers control to us, either to an outlined
22242 /// function or when returning to a parent frame after catching an exception, we
22243 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
22244 /// Here's the math:
22245 /// RegNodeBase = EntryEBP - RegNodeSize
22246 /// ParentFP = RegNodeBase - ParentFrameOffset
22247 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
22248 /// subtracting the offset (negative on x86) takes us back to the parent FP.
22249 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
22250 SDValue EntryEBP) {
22251 MachineFunction &MF = DAG.getMachineFunction();
22252 SDLoc dl;
22254 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22255 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
22257 // It's possible that the parent function no longer has a personality function
22258 // if the exceptional code was optimized away, in which case we just return
22259 // the incoming EBP.
22260 if (!Fn->hasPersonalityFn())
22261 return EntryEBP;
22263 // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
22264 // registration, or the .set_setframe offset.
22265 MCSymbol *OffsetSym =
22266 MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
22267 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
22268 SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
22269 SDValue ParentFrameOffset =
22270 DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
22272 // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
22273 // prologue to RBP in the parent function.
22274 const X86Subtarget &Subtarget =
22275 static_cast<const X86Subtarget &>(DAG.getSubtarget());
22276 if (Subtarget.is64Bit())
22277 return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
22279 int RegNodeSize = getSEHRegistrationNodeSize(Fn);
22280 // RegNodeBase = EntryEBP - RegNodeSize
22281 // ParentFP = RegNodeBase - ParentFrameOffset
22282 SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
22283 DAG.getConstant(RegNodeSize, dl, PtrVT));
22284 return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
22287 SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
22288 SelectionDAG &DAG) const {
22289 // Helper to detect if the operand is CUR_DIRECTION rounding mode.
22290 auto isRoundModeCurDirection = [](SDValue Rnd) {
22291 if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
22292 return C->getZExtValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
22294 return false;
22296 auto isRoundModeSAE = [](SDValue Rnd) {
22297 if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
22298 return C->getZExtValue() == X86::STATIC_ROUNDING::NO_EXC;
22300 return false;
22302 auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
22303 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
22304 RC = C->getZExtValue();
22305 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
22306 // Clear the NO_EXC bit and check remaining bits.
22307 RC ^= X86::STATIC_ROUNDING::NO_EXC;
22308 return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
22309 RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
22310 RC == X86::STATIC_ROUNDING::TO_POS_INF ||
22311 RC == X86::STATIC_ROUNDING::TO_ZERO;
22315 return false;
22318 SDLoc dl(Op);
22319 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
22320 MVT VT = Op.getSimpleValueType();
22321 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
22322 if (IntrData) {
22323 switch(IntrData->Type) {
22324 case INTR_TYPE_1OP: {
22325 // We specify 2 possible opcodes for intrinsics with rounding modes.
22326 // First, we check if the intrinsic may have non-default rounding mode,
22327 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
22328 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22329 if (IntrWithRoundingModeOpcode != 0) {
22330 SDValue Rnd = Op.getOperand(2);
22331 unsigned RC = 0;
22332 if (isRoundModeSAEToX(Rnd, RC))
22333 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22334 Op.getOperand(1),
22335 DAG.getTargetConstant(RC, dl, MVT::i32));
22336 if (!isRoundModeCurDirection(Rnd))
22337 return SDValue();
22339 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
22341 case INTR_TYPE_1OP_SAE: {
22342 SDValue Sae = Op.getOperand(2);
22344 unsigned Opc;
22345 if (isRoundModeCurDirection(Sae))
22346 Opc = IntrData->Opc0;
22347 else if (isRoundModeSAE(Sae))
22348 Opc = IntrData->Opc1;
22349 else
22350 return SDValue();
22352 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
22354 case INTR_TYPE_2OP: {
22355 SDValue Src2 = Op.getOperand(2);
22357 // We specify 2 possible opcodes for intrinsics with rounding modes.
22358 // First, we check if the intrinsic may have non-default rounding mode,
22359 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
22360 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22361 if (IntrWithRoundingModeOpcode != 0) {
22362 SDValue Rnd = Op.getOperand(3);
22363 unsigned RC = 0;
22364 if (isRoundModeSAEToX(Rnd, RC))
22365 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22366 Op.getOperand(1), Src2,
22367 DAG.getTargetConstant(RC, dl, MVT::i32));
22368 if (!isRoundModeCurDirection(Rnd))
22369 return SDValue();
22372 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
22373 Op.getOperand(1), Src2);
22375 case INTR_TYPE_2OP_SAE: {
22376 SDValue Sae = Op.getOperand(3);
22378 unsigned Opc;
22379 if (isRoundModeCurDirection(Sae))
22380 Opc = IntrData->Opc0;
22381 else if (isRoundModeSAE(Sae))
22382 Opc = IntrData->Opc1;
22383 else
22384 return SDValue();
22386 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
22387 Op.getOperand(2));
22389 case INTR_TYPE_3OP:
22390 case INTR_TYPE_3OP_IMM8: {
22391 SDValue Src1 = Op.getOperand(1);
22392 SDValue Src2 = Op.getOperand(2);
22393 SDValue Src3 = Op.getOperand(3);
22395 if (IntrData->Type == INTR_TYPE_3OP_IMM8)
22396 Src3 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src3);
22398 // We specify 2 possible opcodes for intrinsics with rounding modes.
22399 // First, we check if the intrinsic may have non-default rounding mode,
22400 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
22401 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22402 if (IntrWithRoundingModeOpcode != 0) {
22403 SDValue Rnd = Op.getOperand(4);
22404 unsigned RC = 0;
22405 if (isRoundModeSAEToX(Rnd, RC))
22406 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22407 Src1, Src2, Src3,
22408 DAG.getTargetConstant(RC, dl, MVT::i32));
22409 if (!isRoundModeCurDirection(Rnd))
22410 return SDValue();
22413 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
22414 Src1, Src2, Src3);
22416 case INTR_TYPE_4OP:
22417 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
22418 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
22419 case INTR_TYPE_1OP_MASK: {
22420 SDValue Src = Op.getOperand(1);
22421 SDValue PassThru = Op.getOperand(2);
22422 SDValue Mask = Op.getOperand(3);
22423 // We add rounding mode to the Node when
22424 // - RC Opcode is specified and
22425 // - RC is not "current direction".
22426 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22427 if (IntrWithRoundingModeOpcode != 0) {
22428 SDValue Rnd = Op.getOperand(4);
22429 unsigned RC = 0;
22430 if (isRoundModeSAEToX(Rnd, RC))
22431 return getVectorMaskingNode(
22432 DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22433 Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
22434 Mask, PassThru, Subtarget, DAG);
22435 if (!isRoundModeCurDirection(Rnd))
22436 return SDValue();
22438 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src),
22439 Mask, PassThru, Subtarget, DAG);
22441 case INTR_TYPE_1OP_MASK_SAE: {
22442 SDValue Src = Op.getOperand(1);
22443 SDValue PassThru = Op.getOperand(2);
22444 SDValue Mask = Op.getOperand(3);
22445 SDValue Rnd = Op.getOperand(4);
22447 unsigned Opc;
22448 if (isRoundModeCurDirection(Rnd))
22449 Opc = IntrData->Opc0;
22450 else if (isRoundModeSAE(Rnd))
22451 Opc = IntrData->Opc1;
22452 else
22453 return SDValue();
22455 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src),
22456 Mask, PassThru, Subtarget, DAG);
22458 case INTR_TYPE_SCALAR_MASK: {
22459 SDValue Src1 = Op.getOperand(1);
22460 SDValue Src2 = Op.getOperand(2);
22461 SDValue passThru = Op.getOperand(3);
22462 SDValue Mask = Op.getOperand(4);
22463 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22464 // There are 2 kinds of intrinsics in this group:
22465 // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
22466 // (2) With rounding mode and sae - 7 operands.
22467 bool HasRounding = IntrWithRoundingModeOpcode != 0;
22468 if (Op.getNumOperands() == (5U + HasRounding)) {
22469 if (HasRounding) {
22470 SDValue Rnd = Op.getOperand(5);
22471 unsigned RC = 0;
22472 if (isRoundModeSAEToX(Rnd, RC))
22473 return getScalarMaskingNode(
22474 DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
22475 DAG.getTargetConstant(RC, dl, MVT::i32)),
22476 Mask, passThru, Subtarget, DAG);
22477 if (!isRoundModeCurDirection(Rnd))
22478 return SDValue();
22480 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
22481 Src2),
22482 Mask, passThru, Subtarget, DAG);
22485 assert(Op.getNumOperands() == (6U + HasRounding) &&
22486 "Unexpected intrinsic form");
22487 SDValue RoundingMode = Op.getOperand(5);
22488 unsigned Opc = IntrData->Opc0;
22489 if (HasRounding) {
22490 SDValue Sae = Op.getOperand(6);
22491 if (isRoundModeSAE(Sae))
22492 Opc = IntrWithRoundingModeOpcode;
22493 else if (!isRoundModeCurDirection(Sae))
22494 return SDValue();
22496 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
22497 Src2, RoundingMode),
22498 Mask, passThru, Subtarget, DAG);
22500 case INTR_TYPE_SCALAR_MASK_RND: {
22501 SDValue Src1 = Op.getOperand(1);
22502 SDValue Src2 = Op.getOperand(2);
22503 SDValue passThru = Op.getOperand(3);
22504 SDValue Mask = Op.getOperand(4);
22505 SDValue Rnd = Op.getOperand(5);
22507 SDValue NewOp;
22508 unsigned RC = 0;
22509 if (isRoundModeCurDirection(Rnd))
22510 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
22511 else if (isRoundModeSAEToX(Rnd, RC))
22512 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
22513 DAG.getTargetConstant(RC, dl, MVT::i32));
22514 else
22515 return SDValue();
22517 return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
22519 case INTR_TYPE_SCALAR_MASK_SAE: {
22520 SDValue Src1 = Op.getOperand(1);
22521 SDValue Src2 = Op.getOperand(2);
22522 SDValue passThru = Op.getOperand(3);
22523 SDValue Mask = Op.getOperand(4);
22524 SDValue Sae = Op.getOperand(5);
22525 unsigned Opc;
22526 if (isRoundModeCurDirection(Sae))
22527 Opc = IntrData->Opc0;
22528 else if (isRoundModeSAE(Sae))
22529 Opc = IntrData->Opc1;
22530 else
22531 return SDValue();
22533 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
22534 Mask, passThru, Subtarget, DAG);
22536 case INTR_TYPE_2OP_MASK: {
22537 SDValue Src1 = Op.getOperand(1);
22538 SDValue Src2 = Op.getOperand(2);
22539 SDValue PassThru = Op.getOperand(3);
22540 SDValue Mask = Op.getOperand(4);
22541 SDValue NewOp;
22542 if (IntrData->Opc1 != 0) {
22543 SDValue Rnd = Op.getOperand(5);
22544 unsigned RC = 0;
22545 if (isRoundModeSAEToX(Rnd, RC))
22546 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
22547 DAG.getTargetConstant(RC, dl, MVT::i32));
22548 else if (!isRoundModeCurDirection(Rnd))
22549 return SDValue();
22551 if (!NewOp)
22552 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
22553 return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
22555 case INTR_TYPE_2OP_MASK_SAE: {
22556 SDValue Src1 = Op.getOperand(1);
22557 SDValue Src2 = Op.getOperand(2);
22558 SDValue PassThru = Op.getOperand(3);
22559 SDValue Mask = Op.getOperand(4);
22561 unsigned Opc = IntrData->Opc0;
22562 if (IntrData->Opc1 != 0) {
22563 SDValue Sae = Op.getOperand(5);
22564 if (isRoundModeSAE(Sae))
22565 Opc = IntrData->Opc1;
22566 else if (!isRoundModeCurDirection(Sae))
22567 return SDValue();
22570 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
22571 Mask, PassThru, Subtarget, DAG);
22573 case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
22574 SDValue Src1 = Op.getOperand(1);
22575 SDValue Src2 = Op.getOperand(2);
22576 SDValue Src3 = Op.getOperand(3);
22577 SDValue PassThru = Op.getOperand(4);
22578 SDValue Mask = Op.getOperand(5);
22579 SDValue Sae = Op.getOperand(6);
22580 unsigned Opc;
22581 if (isRoundModeCurDirection(Sae))
22582 Opc = IntrData->Opc0;
22583 else if (isRoundModeSAE(Sae))
22584 Opc = IntrData->Opc1;
22585 else
22586 return SDValue();
22588 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
22589 Mask, PassThru, Subtarget, DAG);
22591 case INTR_TYPE_3OP_MASK_SAE: {
22592 SDValue Src1 = Op.getOperand(1);
22593 SDValue Src2 = Op.getOperand(2);
22594 SDValue Src3 = Op.getOperand(3);
22595 SDValue PassThru = Op.getOperand(4);
22596 SDValue Mask = Op.getOperand(5);
22598 unsigned Opc = IntrData->Opc0;
22599 if (IntrData->Opc1 != 0) {
22600 SDValue Sae = Op.getOperand(6);
22601 if (isRoundModeSAE(Sae))
22602 Opc = IntrData->Opc1;
22603 else if (!isRoundModeCurDirection(Sae))
22604 return SDValue();
22606 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
22607 Mask, PassThru, Subtarget, DAG);
22609 case BLENDV: {
22610 SDValue Src1 = Op.getOperand(1);
22611 SDValue Src2 = Op.getOperand(2);
22612 SDValue Src3 = Op.getOperand(3);
22614 EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
22615 Src3 = DAG.getBitcast(MaskVT, Src3);
22617 // Reverse the operands to match VSELECT order.
22618 return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
22620 case VPERM_2OP : {
22621 SDValue Src1 = Op.getOperand(1);
22622 SDValue Src2 = Op.getOperand(2);
22624 // Swap Src1 and Src2 in the node creation
22625 return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
22627 case IFMA_OP:
22628 // NOTE: We need to swizzle the operands to pass the multiply operands
22629 // first.
22630 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
22631 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
22632 case FPCLASSS: {
22633 SDValue Src1 = Op.getOperand(1);
22634 SDValue Imm = Op.getOperand(2);
22635 SDValue Mask = Op.getOperand(3);
22636 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
22637 SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
22638 Subtarget, DAG);
22639 // Need to fill with zeros to ensure the bitcast will produce zeroes
22640 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
22641 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
22642 DAG.getConstant(0, dl, MVT::v8i1),
22643 FPclassMask, DAG.getIntPtrConstant(0, dl));
22644 return DAG.getBitcast(MVT::i8, Ins);
22647 case CMP_MASK_CC: {
22648 MVT MaskVT = Op.getSimpleValueType();
22649 SDValue CC = Op.getOperand(3);
22650 CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, CC);
22651 // We specify 2 possible opcodes for intrinsics with rounding modes.
22652 // First, we check if the intrinsic may have non-default rounding mode,
22653 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
22654 if (IntrData->Opc1 != 0) {
22655 SDValue Sae = Op.getOperand(4);
22656 if (isRoundModeSAE(Sae))
22657 return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
22658 Op.getOperand(2), CC, Sae);
22659 if (!isRoundModeCurDirection(Sae))
22660 return SDValue();
22662 //default rounding mode
22663 return DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
22664 Op.getOperand(2), CC);
22666 case CMP_MASK_SCALAR_CC: {
22667 SDValue Src1 = Op.getOperand(1);
22668 SDValue Src2 = Op.getOperand(2);
22669 SDValue CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op.getOperand(3));
22670 SDValue Mask = Op.getOperand(4);
22672 SDValue Cmp;
22673 if (IntrData->Opc1 != 0) {
22674 SDValue Sae = Op.getOperand(5);
22675 if (isRoundModeSAE(Sae))
22676 Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
22677 else if (!isRoundModeCurDirection(Sae))
22678 return SDValue();
22680 //default rounding mode
22681 if (!Cmp.getNode())
22682 Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
22684 SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
22685 Subtarget, DAG);
22686 // Need to fill with zeros to ensure the bitcast will produce zeroes
22687 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
22688 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
22689 DAG.getConstant(0, dl, MVT::v8i1),
22690 CmpMask, DAG.getIntPtrConstant(0, dl));
22691 return DAG.getBitcast(MVT::i8, Ins);
22693 case COMI: { // Comparison intrinsics
22694 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
22695 SDValue LHS = Op.getOperand(1);
22696 SDValue RHS = Op.getOperand(2);
22697 SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
22698 SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS);
22699 SDValue SetCC;
22700 switch (CC) {
22701 case ISD::SETEQ: { // (ZF = 0 and PF = 0)
22702 SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
22703 SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
22704 SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
22705 break;
22707 case ISD::SETNE: { // (ZF = 1 or PF = 1)
22708 SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
22709 SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
22710 SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
22711 break;
22713 case ISD::SETGT: // (CF = 0 and ZF = 0)
22714 SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
22715 break;
22716 case ISD::SETLT: { // The condition is opposite to GT. Swap the operands.
22717 SetCC = getSETCC(X86::COND_A, InvComi, dl, DAG);
22718 break;
22720 case ISD::SETGE: // CF = 0
22721 SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
22722 break;
22723 case ISD::SETLE: // The condition is opposite to GE. Swap the operands.
22724 SetCC = getSETCC(X86::COND_AE, InvComi, dl, DAG);
22725 break;
22726 default:
22727 llvm_unreachable("Unexpected illegal condition!");
22729 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
22731 case COMI_RM: { // Comparison intrinsics with Sae
22732 SDValue LHS = Op.getOperand(1);
22733 SDValue RHS = Op.getOperand(2);
22734 unsigned CondVal = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
22735 SDValue Sae = Op.getOperand(4);
22737 SDValue FCmp;
22738 if (isRoundModeCurDirection(Sae))
22739 FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
22740 DAG.getConstant(CondVal, dl, MVT::i8));
22741 else if (isRoundModeSAE(Sae))
22742 FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
22743 DAG.getConstant(CondVal, dl, MVT::i8), Sae);
22744 else
22745 return SDValue();
22746 // Need to fill with zeros to ensure the bitcast will produce zeroes
22747 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
22748 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
22749 DAG.getConstant(0, dl, MVT::v16i1),
22750 FCmp, DAG.getIntPtrConstant(0, dl));
22751 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
22752 DAG.getBitcast(MVT::i16, Ins));
22754 case VSHIFT:
22755 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
22756 Op.getOperand(1), Op.getOperand(2), Subtarget,
22757 DAG);
22758 case COMPRESS_EXPAND_IN_REG: {
22759 SDValue Mask = Op.getOperand(3);
22760 SDValue DataToCompress = Op.getOperand(1);
22761 SDValue PassThru = Op.getOperand(2);
22762 if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
22763 return Op.getOperand(1);
22765 // Avoid false dependency.
22766 if (PassThru.isUndef())
22767 PassThru = DAG.getConstant(0, dl, VT);
22769 return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
22770 Mask);
22772 case FIXUPIMM:
22773 case FIXUPIMM_MASKZ: {
22774 SDValue Src1 = Op.getOperand(1);
22775 SDValue Src2 = Op.getOperand(2);
22776 SDValue Src3 = Op.getOperand(3);
22777 SDValue Imm = Op.getOperand(4);
22778 SDValue Mask = Op.getOperand(5);
22779 SDValue Passthru = (IntrData->Type == FIXUPIMM)
22780 ? Src1
22781 : getZeroVector(VT, Subtarget, DAG, dl);
22783 unsigned Opc = IntrData->Opc0;
22784 if (IntrData->Opc1 != 0) {
22785 SDValue Sae = Op.getOperand(6);
22786 if (isRoundModeSAE(Sae))
22787 Opc = IntrData->Opc1;
22788 else if (!isRoundModeCurDirection(Sae))
22789 return SDValue();
22792 SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
22794 if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
22795 return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
22797 return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
22799 case ROUNDP: {
22800 assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
22801 // Clear the upper bits of the rounding immediate so that the legacy
22802 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
22803 SDValue RoundingMode = DAG.getNode(ISD::AND, dl, MVT::i32,
22804 Op.getOperand(2),
22805 DAG.getConstant(0xf, dl, MVT::i32));
22806 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
22807 Op.getOperand(1), RoundingMode);
22809 case ROUNDS: {
22810 assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
22811 // Clear the upper bits of the rounding immediate so that the legacy
22812 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
22813 SDValue RoundingMode = DAG.getNode(ISD::AND, dl, MVT::i32,
22814 Op.getOperand(3),
22815 DAG.getConstant(0xf, dl, MVT::i32));
22816 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
22817 Op.getOperand(1), Op.getOperand(2), RoundingMode);
22819 // ADC/ADCX/SBB
22820 case ADX: {
22821 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
22822 SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
22824 SDValue Res;
22825 // If the carry in is zero, then we should just use ADD/SUB instead of
22826 // ADC/SBB.
22827 if (isNullConstant(Op.getOperand(1))) {
22828 Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
22829 Op.getOperand(3));
22830 } else {
22831 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
22832 DAG.getConstant(-1, dl, MVT::i8));
22833 Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
22834 Op.getOperand(3), GenCF.getValue(1));
22836 SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
22837 SDValue Results[] = { SetCC, Res };
22838 return DAG.getMergeValues(Results, dl);
22840 case CVTPD2PS_MASK:
22841 case CVTPD2DQ_MASK:
22842 case CVTQQ2PS_MASK:
22843 case TRUNCATE_TO_REG: {
22844 SDValue Src = Op.getOperand(1);
22845 SDValue PassThru = Op.getOperand(2);
22846 SDValue Mask = Op.getOperand(3);
22848 if (isAllOnesConstant(Mask))
22849 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
22851 MVT SrcVT = Src.getSimpleValueType();
22852 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
22853 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
22854 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
22855 Mask);
22857 case CVTPS2PH_MASK: {
22858 SDValue Src = Op.getOperand(1);
22859 SDValue Rnd = Op.getOperand(2);
22860 SDValue PassThru = Op.getOperand(3);
22861 SDValue Mask = Op.getOperand(4);
22863 if (isAllOnesConstant(Mask))
22864 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src, Rnd);
22866 MVT SrcVT = Src.getSimpleValueType();
22867 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
22868 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
22869 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, Rnd,
22870 PassThru, Mask);
22873 case CVTNEPS2BF16_MASK: {
22874 SDValue Src = Op.getOperand(1);
22875 SDValue PassThru = Op.getOperand(2);
22876 SDValue Mask = Op.getOperand(3);
22878 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
22879 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
22881 // Break false dependency.
22882 if (PassThru.isUndef())
22883 PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
22885 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
22886 Mask);
22888 default:
22889 break;
22893 switch (IntNo) {
22894 default: return SDValue(); // Don't custom lower most intrinsics.
22896 // ptest and testp intrinsics. The intrinsic these come from are designed to
22897 // return an integer value, not just an instruction so lower it to the ptest
22898 // or testp pattern and a setcc for the result.
22899 case Intrinsic::x86_avx512_ktestc_b:
22900 case Intrinsic::x86_avx512_ktestc_w:
22901 case Intrinsic::x86_avx512_ktestc_d:
22902 case Intrinsic::x86_avx512_ktestc_q:
22903 case Intrinsic::x86_avx512_ktestz_b:
22904 case Intrinsic::x86_avx512_ktestz_w:
22905 case Intrinsic::x86_avx512_ktestz_d:
22906 case Intrinsic::x86_avx512_ktestz_q:
22907 case Intrinsic::x86_sse41_ptestz:
22908 case Intrinsic::x86_sse41_ptestc:
22909 case Intrinsic::x86_sse41_ptestnzc:
22910 case Intrinsic::x86_avx_ptestz_256:
22911 case Intrinsic::x86_avx_ptestc_256:
22912 case Intrinsic::x86_avx_ptestnzc_256:
22913 case Intrinsic::x86_avx_vtestz_ps:
22914 case Intrinsic::x86_avx_vtestc_ps:
22915 case Intrinsic::x86_avx_vtestnzc_ps:
22916 case Intrinsic::x86_avx_vtestz_pd:
22917 case Intrinsic::x86_avx_vtestc_pd:
22918 case Intrinsic::x86_avx_vtestnzc_pd:
22919 case Intrinsic::x86_avx_vtestz_ps_256:
22920 case Intrinsic::x86_avx_vtestc_ps_256:
22921 case Intrinsic::x86_avx_vtestnzc_ps_256:
22922 case Intrinsic::x86_avx_vtestz_pd_256:
22923 case Intrinsic::x86_avx_vtestc_pd_256:
22924 case Intrinsic::x86_avx_vtestnzc_pd_256: {
22925 unsigned TestOpc = X86ISD::PTEST;
22926 X86::CondCode X86CC;
22927 switch (IntNo) {
22928 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
22929 case Intrinsic::x86_avx512_ktestc_b:
22930 case Intrinsic::x86_avx512_ktestc_w:
22931 case Intrinsic::x86_avx512_ktestc_d:
22932 case Intrinsic::x86_avx512_ktestc_q:
22933 // CF = 1
22934 TestOpc = X86ISD::KTEST;
22935 X86CC = X86::COND_B;
22936 break;
22937 case Intrinsic::x86_avx512_ktestz_b:
22938 case Intrinsic::x86_avx512_ktestz_w:
22939 case Intrinsic::x86_avx512_ktestz_d:
22940 case Intrinsic::x86_avx512_ktestz_q:
22941 TestOpc = X86ISD::KTEST;
22942 X86CC = X86::COND_E;
22943 break;
22944 case Intrinsic::x86_avx_vtestz_ps:
22945 case Intrinsic::x86_avx_vtestz_pd:
22946 case Intrinsic::x86_avx_vtestz_ps_256:
22947 case Intrinsic::x86_avx_vtestz_pd_256:
22948 TestOpc = X86ISD::TESTP;
22949 LLVM_FALLTHROUGH;
22950 case Intrinsic::x86_sse41_ptestz:
22951 case Intrinsic::x86_avx_ptestz_256:
22952 // ZF = 1
22953 X86CC = X86::COND_E;
22954 break;
22955 case Intrinsic::x86_avx_vtestc_ps:
22956 case Intrinsic::x86_avx_vtestc_pd:
22957 case Intrinsic::x86_avx_vtestc_ps_256:
22958 case Intrinsic::x86_avx_vtestc_pd_256:
22959 TestOpc = X86ISD::TESTP;
22960 LLVM_FALLTHROUGH;
22961 case Intrinsic::x86_sse41_ptestc:
22962 case Intrinsic::x86_avx_ptestc_256:
22963 // CF = 1
22964 X86CC = X86::COND_B;
22965 break;
22966 case Intrinsic::x86_avx_vtestnzc_ps:
22967 case Intrinsic::x86_avx_vtestnzc_pd:
22968 case Intrinsic::x86_avx_vtestnzc_ps_256:
22969 case Intrinsic::x86_avx_vtestnzc_pd_256:
22970 TestOpc = X86ISD::TESTP;
22971 LLVM_FALLTHROUGH;
22972 case Intrinsic::x86_sse41_ptestnzc:
22973 case Intrinsic::x86_avx_ptestnzc_256:
22974 // ZF and CF = 0
22975 X86CC = X86::COND_A;
22976 break;
22979 SDValue LHS = Op.getOperand(1);
22980 SDValue RHS = Op.getOperand(2);
22981 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
22982 SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
22983 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
22986 case Intrinsic::x86_sse42_pcmpistria128:
22987 case Intrinsic::x86_sse42_pcmpestria128:
22988 case Intrinsic::x86_sse42_pcmpistric128:
22989 case Intrinsic::x86_sse42_pcmpestric128:
22990 case Intrinsic::x86_sse42_pcmpistrio128:
22991 case Intrinsic::x86_sse42_pcmpestrio128:
22992 case Intrinsic::x86_sse42_pcmpistris128:
22993 case Intrinsic::x86_sse42_pcmpestris128:
22994 case Intrinsic::x86_sse42_pcmpistriz128:
22995 case Intrinsic::x86_sse42_pcmpestriz128: {
22996 unsigned Opcode;
22997 X86::CondCode X86CC;
22998 switch (IntNo) {
22999 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
23000 case Intrinsic::x86_sse42_pcmpistria128:
23001 Opcode = X86ISD::PCMPISTR;
23002 X86CC = X86::COND_A;
23003 break;
23004 case Intrinsic::x86_sse42_pcmpestria128:
23005 Opcode = X86ISD::PCMPESTR;
23006 X86CC = X86::COND_A;
23007 break;
23008 case Intrinsic::x86_sse42_pcmpistric128:
23009 Opcode = X86ISD::PCMPISTR;
23010 X86CC = X86::COND_B;
23011 break;
23012 case Intrinsic::x86_sse42_pcmpestric128:
23013 Opcode = X86ISD::PCMPESTR;
23014 X86CC = X86::COND_B;
23015 break;
23016 case Intrinsic::x86_sse42_pcmpistrio128:
23017 Opcode = X86ISD::PCMPISTR;
23018 X86CC = X86::COND_O;
23019 break;
23020 case Intrinsic::x86_sse42_pcmpestrio128:
23021 Opcode = X86ISD::PCMPESTR;
23022 X86CC = X86::COND_O;
23023 break;
23024 case Intrinsic::x86_sse42_pcmpistris128:
23025 Opcode = X86ISD::PCMPISTR;
23026 X86CC = X86::COND_S;
23027 break;
23028 case Intrinsic::x86_sse42_pcmpestris128:
23029 Opcode = X86ISD::PCMPESTR;
23030 X86CC = X86::COND_S;
23031 break;
23032 case Intrinsic::x86_sse42_pcmpistriz128:
23033 Opcode = X86ISD::PCMPISTR;
23034 X86CC = X86::COND_E;
23035 break;
23036 case Intrinsic::x86_sse42_pcmpestriz128:
23037 Opcode = X86ISD::PCMPESTR;
23038 X86CC = X86::COND_E;
23039 break;
23041 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
23042 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
23043 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
23044 SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
23045 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
23048 case Intrinsic::x86_sse42_pcmpistri128:
23049 case Intrinsic::x86_sse42_pcmpestri128: {
23050 unsigned Opcode;
23051 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
23052 Opcode = X86ISD::PCMPISTR;
23053 else
23054 Opcode = X86ISD::PCMPESTR;
23056 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
23057 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
23058 return DAG.getNode(Opcode, dl, VTs, NewOps);
23061 case Intrinsic::x86_sse42_pcmpistrm128:
23062 case Intrinsic::x86_sse42_pcmpestrm128: {
23063 unsigned Opcode;
23064 if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
23065 Opcode = X86ISD::PCMPISTR;
23066 else
23067 Opcode = X86ISD::PCMPESTR;
23069 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
23070 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
23071 return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
23074 case Intrinsic::eh_sjlj_lsda: {
23075 MachineFunction &MF = DAG.getMachineFunction();
23076 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23077 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
23078 auto &Context = MF.getMMI().getContext();
23079 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
23080 Twine(MF.getFunctionNumber()));
23081 return DAG.getNode(getGlobalWrapperKind(), dl, VT,
23082 DAG.getMCSymbol(S, PtrVT));
23085 case Intrinsic::x86_seh_lsda: {
23086 // Compute the symbol for the LSDA. We know it'll get emitted later.
23087 MachineFunction &MF = DAG.getMachineFunction();
23088 SDValue Op1 = Op.getOperand(1);
23089 auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
23090 MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
23091 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
23093 // Generate a simple absolute symbol reference. This intrinsic is only
23094 // supported on 32-bit Windows, which isn't PIC.
23095 SDValue Result = DAG.getMCSymbol(LSDASym, VT);
23096 return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
23099 case Intrinsic::eh_recoverfp: {
23100 SDValue FnOp = Op.getOperand(1);
23101 SDValue IncomingFPOp = Op.getOperand(2);
23102 GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
23103 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
23104 if (!Fn)
23105 report_fatal_error(
23106 "llvm.eh.recoverfp must take a function as the first argument");
23107 return recoverFramePointer(DAG, Fn, IncomingFPOp);
23110 case Intrinsic::localaddress: {
23111 // Returns one of the stack, base, or frame pointer registers, depending on
23112 // which is used to reference local variables.
23113 MachineFunction &MF = DAG.getMachineFunction();
23114 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23115 unsigned Reg;
23116 if (RegInfo->hasBasePointer(MF))
23117 Reg = RegInfo->getBaseRegister();
23118 else { // Handles the SP or FP case.
23119 bool CantUseFP = RegInfo->needsStackRealignment(MF);
23120 if (CantUseFP)
23121 Reg = RegInfo->getPtrSizedStackRegister(MF);
23122 else
23123 Reg = RegInfo->getPtrSizedFrameRegister(MF);
23125 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
23128 case Intrinsic::x86_avx512_vp2intersect_q_512:
23129 case Intrinsic::x86_avx512_vp2intersect_q_256:
23130 case Intrinsic::x86_avx512_vp2intersect_q_128:
23131 case Intrinsic::x86_avx512_vp2intersect_d_512:
23132 case Intrinsic::x86_avx512_vp2intersect_d_256:
23133 case Intrinsic::x86_avx512_vp2intersect_d_128: {
23134 MVT MaskVT = Op.getSimpleValueType();
23136 SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
23137 SDLoc DL(Op);
23139 SDValue Operation =
23140 DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
23141 Op->getOperand(1), Op->getOperand(2));
23143 SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
23144 MaskVT, Operation);
23145 SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
23146 MaskVT, Operation);
23147 return DAG.getMergeValues({Result0, Result1}, DL);
23152 static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
23153 SDValue Src, SDValue Mask, SDValue Base,
23154 SDValue Index, SDValue ScaleOp, SDValue Chain,
23155 const X86Subtarget &Subtarget) {
23156 SDLoc dl(Op);
23157 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23158 // Scale must be constant.
23159 if (!C)
23160 return SDValue();
23161 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
23162 EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
23163 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
23164 // If source is undef or we know it won't be used, use a zero vector
23165 // to break register dependency.
23166 // TODO: use undef instead and let BreakFalseDeps deal with it?
23167 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
23168 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
23170 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
23172 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
23173 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
23174 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
23175 return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
23178 static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
23179 SDValue Src, SDValue Mask, SDValue Base,
23180 SDValue Index, SDValue ScaleOp, SDValue Chain,
23181 const X86Subtarget &Subtarget) {
23182 MVT VT = Op.getSimpleValueType();
23183 SDLoc dl(Op);
23184 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23185 // Scale must be constant.
23186 if (!C)
23187 return SDValue();
23188 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
23189 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
23190 VT.getVectorNumElements());
23191 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
23193 // We support two versions of the gather intrinsics. One with scalar mask and
23194 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
23195 if (Mask.getValueType() != MaskVT)
23196 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23198 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
23199 // If source is undef or we know it won't be used, use a zero vector
23200 // to break register dependency.
23201 // TODO: use undef instead and let BreakFalseDeps deal with it?
23202 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
23203 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
23205 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
23207 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
23208 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
23209 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
23210 return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
23213 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
23214 SDValue Src, SDValue Mask, SDValue Base,
23215 SDValue Index, SDValue ScaleOp, SDValue Chain,
23216 const X86Subtarget &Subtarget) {
23217 SDLoc dl(Op);
23218 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23219 // Scale must be constant.
23220 if (!C)
23221 return SDValue();
23222 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
23223 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
23224 Src.getSimpleValueType().getVectorNumElements());
23225 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
23227 // We support two versions of the scatter intrinsics. One with scalar mask and
23228 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
23229 if (Mask.getValueType() != MaskVT)
23230 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23232 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
23234 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
23235 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
23236 SDValue Res = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
23237 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
23238 return Res.getValue(1);
23241 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
23242 SDValue Mask, SDValue Base, SDValue Index,
23243 SDValue ScaleOp, SDValue Chain,
23244 const X86Subtarget &Subtarget) {
23245 SDLoc dl(Op);
23246 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23247 // Scale must be constant.
23248 if (!C)
23249 return SDValue();
23250 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
23251 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
23252 SDValue Segment = DAG.getRegister(0, MVT::i32);
23253 MVT MaskVT =
23254 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
23255 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23256 SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
23257 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
23258 return SDValue(Res, 0);
23261 /// Handles the lowering of builtin intrinsics with chain that return their
23262 /// value into registers EDX:EAX.
23263 /// If operand ScrReg is a valid register identifier, then operand 2 of N is
23264 /// copied to SrcReg. The assumption is that SrcReg is an implicit input to
23265 /// TargetOpcode.
23266 /// Returns a Glue value which can be used to add extra copy-from-reg if the
23267 /// expanded intrinsics implicitly defines extra registers (i.e. not just
23268 /// EDX:EAX).
23269 static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
23270 SelectionDAG &DAG,
23271 unsigned TargetOpcode,
23272 unsigned SrcReg,
23273 const X86Subtarget &Subtarget,
23274 SmallVectorImpl<SDValue> &Results) {
23275 SDValue Chain = N->getOperand(0);
23276 SDValue Glue;
23278 if (SrcReg) {
23279 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
23280 Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
23281 Glue = Chain.getValue(1);
23284 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
23285 SDValue N1Ops[] = {Chain, Glue};
23286 SDNode *N1 = DAG.getMachineNode(
23287 TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
23288 Chain = SDValue(N1, 0);
23290 // Reads the content of XCR and returns it in registers EDX:EAX.
23291 SDValue LO, HI;
23292 if (Subtarget.is64Bit()) {
23293 LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
23294 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
23295 LO.getValue(2));
23296 } else {
23297 LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
23298 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
23299 LO.getValue(2));
23301 Chain = HI.getValue(1);
23302 Glue = HI.getValue(2);
23304 if (Subtarget.is64Bit()) {
23305 // Merge the two 32-bit values into a 64-bit one.
23306 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
23307 DAG.getConstant(32, DL, MVT::i8));
23308 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
23309 Results.push_back(Chain);
23310 return Glue;
23313 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
23314 SDValue Ops[] = { LO, HI };
23315 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
23316 Results.push_back(Pair);
23317 Results.push_back(Chain);
23318 return Glue;
23321 /// Handles the lowering of builtin intrinsics that read the time stamp counter
23322 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
23323 /// READCYCLECOUNTER nodes.
23324 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
23325 SelectionDAG &DAG,
23326 const X86Subtarget &Subtarget,
23327 SmallVectorImpl<SDValue> &Results) {
23328 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
23329 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
23330 // and the EAX register is loaded with the low-order 32 bits.
23331 SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
23332 /* NoRegister */0, Subtarget,
23333 Results);
23334 if (Opcode != X86::RDTSCP)
23335 return;
23337 SDValue Chain = Results[1];
23338 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
23339 // the ECX register. Add 'ecx' explicitly to the chain.
23340 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
23341 Results[1] = ecx;
23342 Results.push_back(ecx.getValue(1));
23345 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
23346 SelectionDAG &DAG) {
23347 SmallVector<SDValue, 3> Results;
23348 SDLoc DL(Op);
23349 getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
23350 Results);
23351 return DAG.getMergeValues(Results, DL);
23354 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
23355 MachineFunction &MF = DAG.getMachineFunction();
23356 SDValue Chain = Op.getOperand(0);
23357 SDValue RegNode = Op.getOperand(2);
23358 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
23359 if (!EHInfo)
23360 report_fatal_error("EH registrations only live in functions using WinEH");
23362 // Cast the operand to an alloca, and remember the frame index.
23363 auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
23364 if (!FINode)
23365 report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
23366 EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
23368 // Return the chain operand without making any DAG nodes.
23369 return Chain;
23372 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
23373 MachineFunction &MF = DAG.getMachineFunction();
23374 SDValue Chain = Op.getOperand(0);
23375 SDValue EHGuard = Op.getOperand(2);
23376 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
23377 if (!EHInfo)
23378 report_fatal_error("EHGuard only live in functions using WinEH");
23380 // Cast the operand to an alloca, and remember the frame index.
23381 auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
23382 if (!FINode)
23383 report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
23384 EHInfo->EHGuardFrameIndex = FINode->getIndex();
23386 // Return the chain operand without making any DAG nodes.
23387 return Chain;
23390 /// Emit Truncating Store with signed or unsigned saturation.
23391 static SDValue
23392 EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
23393 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
23394 SelectionDAG &DAG) {
23396 SDVTList VTs = DAG.getVTList(MVT::Other);
23397 SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
23398 SDValue Ops[] = { Chain, Val, Ptr, Undef };
23399 return SignedSat ?
23400 DAG.getTargetMemSDNode<TruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
23401 DAG.getTargetMemSDNode<TruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
23404 /// Emit Masked Truncating Store with signed or unsigned saturation.
23405 static SDValue
23406 EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
23407 SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
23408 MachineMemOperand *MMO, SelectionDAG &DAG) {
23410 SDVTList VTs = DAG.getVTList(MVT::Other);
23411 SDValue Ops[] = { Chain, Val, Ptr, Mask };
23412 return SignedSat ?
23413 DAG.getTargetMemSDNode<MaskedTruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
23414 DAG.getTargetMemSDNode<MaskedTruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
23417 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
23418 SelectionDAG &DAG) {
23419 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
23421 const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
23422 if (!IntrData) {
23423 switch (IntNo) {
23424 case llvm::Intrinsic::x86_seh_ehregnode:
23425 return MarkEHRegistrationNode(Op, DAG);
23426 case llvm::Intrinsic::x86_seh_ehguard:
23427 return MarkEHGuard(Op, DAG);
23428 case llvm::Intrinsic::x86_rdpkru: {
23429 SDLoc dl(Op);
23430 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
23431 // Create a RDPKRU node and pass 0 to the ECX parameter.
23432 return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
23433 DAG.getConstant(0, dl, MVT::i32));
23435 case llvm::Intrinsic::x86_wrpkru: {
23436 SDLoc dl(Op);
23437 // Create a WRPKRU node, pass the input to the EAX parameter, and pass 0
23438 // to the EDX and ECX parameters.
23439 return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
23440 Op.getOperand(0), Op.getOperand(2),
23441 DAG.getConstant(0, dl, MVT::i32),
23442 DAG.getConstant(0, dl, MVT::i32));
23444 case llvm::Intrinsic::x86_flags_read_u32:
23445 case llvm::Intrinsic::x86_flags_read_u64:
23446 case llvm::Intrinsic::x86_flags_write_u32:
23447 case llvm::Intrinsic::x86_flags_write_u64: {
23448 // We need a frame pointer because this will get lowered to a PUSH/POP
23449 // sequence.
23450 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
23451 MFI.setHasCopyImplyingStackAdjustment(true);
23452 // Don't do anything here, we will expand these intrinsics out later
23453 // during FinalizeISel in EmitInstrWithCustomInserter.
23454 return SDValue();
23456 case Intrinsic::x86_lwpins32:
23457 case Intrinsic::x86_lwpins64:
23458 case Intrinsic::x86_umwait:
23459 case Intrinsic::x86_tpause: {
23460 SDLoc dl(Op);
23461 SDValue Chain = Op->getOperand(0);
23462 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
23463 unsigned Opcode;
23465 switch (IntNo) {
23466 default: llvm_unreachable("Impossible intrinsic");
23467 case Intrinsic::x86_umwait:
23468 Opcode = X86ISD::UMWAIT;
23469 break;
23470 case Intrinsic::x86_tpause:
23471 Opcode = X86ISD::TPAUSE;
23472 break;
23473 case Intrinsic::x86_lwpins32:
23474 case Intrinsic::x86_lwpins64:
23475 Opcode = X86ISD::LWPINS;
23476 break;
23479 SDValue Operation =
23480 DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
23481 Op->getOperand(3), Op->getOperand(4));
23482 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
23483 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
23484 Operation.getValue(1));
23486 case Intrinsic::x86_enqcmd:
23487 case Intrinsic::x86_enqcmds: {
23488 SDLoc dl(Op);
23489 SDValue Chain = Op.getOperand(0);
23490 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
23491 unsigned Opcode;
23492 switch (IntNo) {
23493 default: llvm_unreachable("Impossible intrinsic!");
23494 case Intrinsic::x86_enqcmd:
23495 Opcode = X86ISD::ENQCMD;
23496 break;
23497 case Intrinsic::x86_enqcmds:
23498 Opcode = X86ISD::ENQCMDS;
23499 break;
23501 SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
23502 Op.getOperand(3));
23503 SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
23504 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
23505 Operation.getValue(1));
23508 return SDValue();
23511 SDLoc dl(Op);
23512 switch(IntrData->Type) {
23513 default: llvm_unreachable("Unknown Intrinsic Type");
23514 case RDSEED:
23515 case RDRAND: {
23516 // Emit the node with the right value type.
23517 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
23518 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
23520 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
23521 // Otherwise return the value from Rand, which is always 0, casted to i32.
23522 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
23523 DAG.getConstant(1, dl, Op->getValueType(1)),
23524 DAG.getConstant(X86::COND_B, dl, MVT::i8),
23525 SDValue(Result.getNode(), 1) };
23526 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
23528 // Return { result, isValid, chain }.
23529 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
23530 SDValue(Result.getNode(), 2));
23532 case GATHER_AVX2: {
23533 SDValue Chain = Op.getOperand(0);
23534 SDValue Src = Op.getOperand(2);
23535 SDValue Base = Op.getOperand(3);
23536 SDValue Index = Op.getOperand(4);
23537 SDValue Mask = Op.getOperand(5);
23538 SDValue Scale = Op.getOperand(6);
23539 return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
23540 Scale, Chain, Subtarget);
23542 case GATHER: {
23543 //gather(v1, mask, index, base, scale);
23544 SDValue Chain = Op.getOperand(0);
23545 SDValue Src = Op.getOperand(2);
23546 SDValue Base = Op.getOperand(3);
23547 SDValue Index = Op.getOperand(4);
23548 SDValue Mask = Op.getOperand(5);
23549 SDValue Scale = Op.getOperand(6);
23550 return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
23551 Chain, Subtarget);
23553 case SCATTER: {
23554 //scatter(base, mask, index, v1, scale);
23555 SDValue Chain = Op.getOperand(0);
23556 SDValue Base = Op.getOperand(2);
23557 SDValue Mask = Op.getOperand(3);
23558 SDValue Index = Op.getOperand(4);
23559 SDValue Src = Op.getOperand(5);
23560 SDValue Scale = Op.getOperand(6);
23561 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
23562 Scale, Chain, Subtarget);
23564 case PREFETCH: {
23565 SDValue Hint = Op.getOperand(6);
23566 unsigned HintVal = cast<ConstantSDNode>(Hint)->getZExtValue();
23567 assert((HintVal == 2 || HintVal == 3) &&
23568 "Wrong prefetch hint in intrinsic: should be 2 or 3");
23569 unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
23570 SDValue Chain = Op.getOperand(0);
23571 SDValue Mask = Op.getOperand(2);
23572 SDValue Index = Op.getOperand(3);
23573 SDValue Base = Op.getOperand(4);
23574 SDValue Scale = Op.getOperand(5);
23575 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
23576 Subtarget);
23578 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
23579 case RDTSC: {
23580 SmallVector<SDValue, 2> Results;
23581 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
23582 Results);
23583 return DAG.getMergeValues(Results, dl);
23585 // Read Performance Monitoring Counters.
23586 case RDPMC:
23587 // GetExtended Control Register.
23588 case XGETBV: {
23589 SmallVector<SDValue, 2> Results;
23591 // RDPMC uses ECX to select the index of the performance counter to read.
23592 // XGETBV uses ECX to select the index of the XCR register to return.
23593 // The result is stored into registers EDX:EAX.
23594 expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
23595 Subtarget, Results);
23596 return DAG.getMergeValues(Results, dl);
23598 // XTEST intrinsics.
23599 case XTEST: {
23600 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
23601 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
23603 SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
23604 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
23605 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
23606 Ret, SDValue(InTrans.getNode(), 1));
23608 case TRUNCATE_TO_MEM_VI8:
23609 case TRUNCATE_TO_MEM_VI16:
23610 case TRUNCATE_TO_MEM_VI32: {
23611 SDValue Mask = Op.getOperand(4);
23612 SDValue DataToTruncate = Op.getOperand(3);
23613 SDValue Addr = Op.getOperand(2);
23614 SDValue Chain = Op.getOperand(0);
23616 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
23617 assert(MemIntr && "Expected MemIntrinsicSDNode!");
23619 EVT MemVT = MemIntr->getMemoryVT();
23621 uint16_t TruncationOp = IntrData->Opc0;
23622 switch (TruncationOp) {
23623 case X86ISD::VTRUNC: {
23624 if (isAllOnesConstant(Mask)) // return just a truncate store
23625 return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
23626 MemIntr->getMemOperand());
23628 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
23629 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23631 return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, VMask, MemVT,
23632 MemIntr->getMemOperand(), true /* truncating */);
23634 case X86ISD::VTRUNCUS:
23635 case X86ISD::VTRUNCS: {
23636 bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
23637 if (isAllOnesConstant(Mask))
23638 return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
23639 MemIntr->getMemOperand(), DAG);
23641 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
23642 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23644 return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
23645 VMask, MemVT, MemIntr->getMemOperand(), DAG);
23647 default:
23648 llvm_unreachable("Unsupported truncstore intrinsic");
23654 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
23655 SelectionDAG &DAG) const {
23656 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
23657 MFI.setReturnAddressIsTaken(true);
23659 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
23660 return SDValue();
23662 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
23663 SDLoc dl(Op);
23664 EVT PtrVT = getPointerTy(DAG.getDataLayout());
23666 if (Depth > 0) {
23667 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
23668 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23669 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
23670 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
23671 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
23672 MachinePointerInfo());
23675 // Just load the return address.
23676 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
23677 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
23678 MachinePointerInfo());
23681 SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
23682 SelectionDAG &DAG) const {
23683 DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
23684 return getReturnAddressFrameIndex(DAG);
23687 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
23688 MachineFunction &MF = DAG.getMachineFunction();
23689 MachineFrameInfo &MFI = MF.getFrameInfo();
23690 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
23691 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23692 EVT VT = Op.getValueType();
23694 MFI.setFrameAddressIsTaken(true);
23696 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
23697 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
23698 // is not possible to crawl up the stack without looking at the unwind codes
23699 // simultaneously.
23700 int FrameAddrIndex = FuncInfo->getFAIndex();
23701 if (!FrameAddrIndex) {
23702 // Set up a frame object for the return address.
23703 unsigned SlotSize = RegInfo->getSlotSize();
23704 FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
23705 SlotSize, /*Offset=*/0, /*IsImmutable=*/false);
23706 FuncInfo->setFAIndex(FrameAddrIndex);
23708 return DAG.getFrameIndex(FrameAddrIndex, VT);
23711 unsigned FrameReg =
23712 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
23713 SDLoc dl(Op); // FIXME probably not meaningful
23714 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
23715 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
23716 (FrameReg == X86::EBP && VT == MVT::i32)) &&
23717 "Invalid Frame Register!");
23718 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
23719 while (Depth--)
23720 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
23721 MachinePointerInfo());
23722 return FrameAddr;
23725 // FIXME? Maybe this could be a TableGen attribute on some registers and
23726 // this table could be generated automatically from RegInfo.
23727 unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
23728 SelectionDAG &DAG) const {
23729 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
23730 const MachineFunction &MF = DAG.getMachineFunction();
23732 unsigned Reg = StringSwitch<unsigned>(RegName)
23733 .Case("esp", X86::ESP)
23734 .Case("rsp", X86::RSP)
23735 .Case("ebp", X86::EBP)
23736 .Case("rbp", X86::RBP)
23737 .Default(0);
23739 if (Reg == X86::EBP || Reg == X86::RBP) {
23740 if (!TFI.hasFP(MF))
23741 report_fatal_error("register " + StringRef(RegName) +
23742 " is allocatable: function has no frame pointer");
23743 #ifndef NDEBUG
23744 else {
23745 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23746 unsigned FrameReg =
23747 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
23748 assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
23749 "Invalid Frame Register!");
23751 #endif
23754 if (Reg)
23755 return Reg;
23757 report_fatal_error("Invalid register name global variable");
23760 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
23761 SelectionDAG &DAG) const {
23762 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23763 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
23766 unsigned X86TargetLowering::getExceptionPointerRegister(
23767 const Constant *PersonalityFn) const {
23768 if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
23769 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
23771 return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
23774 unsigned X86TargetLowering::getExceptionSelectorRegister(
23775 const Constant *PersonalityFn) const {
23776 // Funclet personalities don't use selectors (the runtime does the selection).
23777 assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
23778 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
23781 bool X86TargetLowering::needsFixedCatchObjects() const {
23782 return Subtarget.isTargetWin64();
23785 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
23786 SDValue Chain = Op.getOperand(0);
23787 SDValue Offset = Op.getOperand(1);
23788 SDValue Handler = Op.getOperand(2);
23789 SDLoc dl (Op);
23791 EVT PtrVT = getPointerTy(DAG.getDataLayout());
23792 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23793 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
23794 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
23795 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
23796 "Invalid Frame Register!");
23797 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
23798 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
23800 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
23801 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
23802 dl));
23803 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
23804 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
23805 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
23807 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
23808 DAG.getRegister(StoreAddrReg, PtrVT));
23811 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
23812 SelectionDAG &DAG) const {
23813 SDLoc DL(Op);
23814 // If the subtarget is not 64bit, we may need the global base reg
23815 // after isel expand pseudo, i.e., after CGBR pass ran.
23816 // Therefore, ask for the GlobalBaseReg now, so that the pass
23817 // inserts the code for us in case we need it.
23818 // Otherwise, we will end up in a situation where we will
23819 // reference a virtual register that is not defined!
23820 if (!Subtarget.is64Bit()) {
23821 const X86InstrInfo *TII = Subtarget.getInstrInfo();
23822 (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
23824 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
23825 DAG.getVTList(MVT::i32, MVT::Other),
23826 Op.getOperand(0), Op.getOperand(1));
23829 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
23830 SelectionDAG &DAG) const {
23831 SDLoc DL(Op);
23832 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
23833 Op.getOperand(0), Op.getOperand(1));
23836 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
23837 SelectionDAG &DAG) const {
23838 SDLoc DL(Op);
23839 return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
23840 Op.getOperand(0));
23843 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
23844 return Op.getOperand(0);
23847 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
23848 SelectionDAG &DAG) const {
23849 SDValue Root = Op.getOperand(0);
23850 SDValue Trmp = Op.getOperand(1); // trampoline
23851 SDValue FPtr = Op.getOperand(2); // nested function
23852 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
23853 SDLoc dl (Op);
23855 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
23856 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
23858 if (Subtarget.is64Bit()) {
23859 SDValue OutChains[6];
23861 // Large code-model.
23862 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
23863 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
23865 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
23866 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
23868 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
23870 // Load the pointer to the nested function into R11.
23871 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
23872 SDValue Addr = Trmp;
23873 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
23874 Addr, MachinePointerInfo(TrmpAddr));
23876 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
23877 DAG.getConstant(2, dl, MVT::i64));
23878 OutChains[1] =
23879 DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
23880 /* Alignment = */ 2);
23882 // Load the 'nest' parameter value into R10.
23883 // R10 is specified in X86CallingConv.td
23884 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
23885 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
23886 DAG.getConstant(10, dl, MVT::i64));
23887 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
23888 Addr, MachinePointerInfo(TrmpAddr, 10));
23890 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
23891 DAG.getConstant(12, dl, MVT::i64));
23892 OutChains[3] =
23893 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
23894 /* Alignment = */ 2);
23896 // Jump to the nested function.
23897 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
23898 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
23899 DAG.getConstant(20, dl, MVT::i64));
23900 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
23901 Addr, MachinePointerInfo(TrmpAddr, 20));
23903 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
23904 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
23905 DAG.getConstant(22, dl, MVT::i64));
23906 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
23907 Addr, MachinePointerInfo(TrmpAddr, 22));
23909 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
23910 } else {
23911 const Function *Func =
23912 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
23913 CallingConv::ID CC = Func->getCallingConv();
23914 unsigned NestReg;
23916 switch (CC) {
23917 default:
23918 llvm_unreachable("Unsupported calling convention");
23919 case CallingConv::C:
23920 case CallingConv::X86_StdCall: {
23921 // Pass 'nest' parameter in ECX.
23922 // Must be kept in sync with X86CallingConv.td
23923 NestReg = X86::ECX;
23925 // Check that ECX wasn't needed by an 'inreg' parameter.
23926 FunctionType *FTy = Func->getFunctionType();
23927 const AttributeList &Attrs = Func->getAttributes();
23929 if (!Attrs.isEmpty() && !Func->isVarArg()) {
23930 unsigned InRegCount = 0;
23931 unsigned Idx = 1;
23933 for (FunctionType::param_iterator I = FTy->param_begin(),
23934 E = FTy->param_end(); I != E; ++I, ++Idx)
23935 if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
23936 auto &DL = DAG.getDataLayout();
23937 // FIXME: should only count parameters that are lowered to integers.
23938 InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
23941 if (InRegCount > 2) {
23942 report_fatal_error("Nest register in use - reduce number of inreg"
23943 " parameters!");
23946 break;
23948 case CallingConv::X86_FastCall:
23949 case CallingConv::X86_ThisCall:
23950 case CallingConv::Fast:
23951 // Pass 'nest' parameter in EAX.
23952 // Must be kept in sync with X86CallingConv.td
23953 NestReg = X86::EAX;
23954 break;
23957 SDValue OutChains[4];
23958 SDValue Addr, Disp;
23960 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
23961 DAG.getConstant(10, dl, MVT::i32));
23962 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
23964 // This is storing the opcode for MOV32ri.
23965 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
23966 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
23967 OutChains[0] =
23968 DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
23969 Trmp, MachinePointerInfo(TrmpAddr));
23971 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
23972 DAG.getConstant(1, dl, MVT::i32));
23973 OutChains[1] =
23974 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
23975 /* Alignment = */ 1);
23977 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
23978 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
23979 DAG.getConstant(5, dl, MVT::i32));
23980 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
23981 Addr, MachinePointerInfo(TrmpAddr, 5),
23982 /* Alignment = */ 1);
23984 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
23985 DAG.getConstant(6, dl, MVT::i32));
23986 OutChains[3] =
23987 DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
23988 /* Alignment = */ 1);
23990 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
23994 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
23995 SelectionDAG &DAG) const {
23997 The rounding mode is in bits 11:10 of FPSR, and has the following
23998 settings:
23999 00 Round to nearest
24000 01 Round to -inf
24001 10 Round to +inf
24002 11 Round to 0
24004 FLT_ROUNDS, on the other hand, expects the following:
24005 -1 Undefined
24006 0 Round to 0
24007 1 Round to nearest
24008 2 Round to +inf
24009 3 Round to -inf
24011 To perform the conversion, we do:
24012 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
24015 MachineFunction &MF = DAG.getMachineFunction();
24016 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
24017 unsigned StackAlignment = TFI.getStackAlignment();
24018 MVT VT = Op.getSimpleValueType();
24019 SDLoc DL(Op);
24021 // Save FP Control Word to stack slot
24022 int SSFI = MF.getFrameInfo().CreateStackObject(2, StackAlignment, false);
24023 SDValue StackSlot =
24024 DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
24026 MachineMemOperand *MMO =
24027 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
24028 MachineMemOperand::MOStore, 2, 2);
24030 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
24031 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
24032 DAG.getVTList(MVT::Other),
24033 Ops, MVT::i16, MMO);
24035 // Load FP Control Word from stack slot
24036 SDValue CWD =
24037 DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo());
24039 // Transform as necessary
24040 SDValue CWD1 =
24041 DAG.getNode(ISD::SRL, DL, MVT::i16,
24042 DAG.getNode(ISD::AND, DL, MVT::i16,
24043 CWD, DAG.getConstant(0x800, DL, MVT::i16)),
24044 DAG.getConstant(11, DL, MVT::i8));
24045 SDValue CWD2 =
24046 DAG.getNode(ISD::SRL, DL, MVT::i16,
24047 DAG.getNode(ISD::AND, DL, MVT::i16,
24048 CWD, DAG.getConstant(0x400, DL, MVT::i16)),
24049 DAG.getConstant(9, DL, MVT::i8));
24051 SDValue RetVal =
24052 DAG.getNode(ISD::AND, DL, MVT::i16,
24053 DAG.getNode(ISD::ADD, DL, MVT::i16,
24054 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
24055 DAG.getConstant(1, DL, MVT::i16)),
24056 DAG.getConstant(3, DL, MVT::i16));
24058 return DAG.getNode((VT.getSizeInBits() < 16 ?
24059 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
24062 // Split an unary integer op into 2 half sized ops.
24063 static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
24064 MVT VT = Op.getSimpleValueType();
24065 unsigned NumElems = VT.getVectorNumElements();
24066 unsigned SizeInBits = VT.getSizeInBits();
24067 MVT EltVT = VT.getVectorElementType();
24068 SDValue Src = Op.getOperand(0);
24069 assert(EltVT == Src.getSimpleValueType().getVectorElementType() &&
24070 "Src and Op should have the same element type!");
24072 // Extract the Lo/Hi vectors
24073 SDLoc dl(Op);
24074 SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2);
24075 SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2);
24077 MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
24078 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24079 DAG.getNode(Op.getOpcode(), dl, NewVT, Lo),
24080 DAG.getNode(Op.getOpcode(), dl, NewVT, Hi));
24083 // Decompose 256-bit ops into smaller 128-bit ops.
24084 static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) {
24085 assert(Op.getSimpleValueType().is256BitVector() &&
24086 Op.getSimpleValueType().isInteger() &&
24087 "Only handle AVX 256-bit vector integer operation");
24088 return LowerVectorIntUnary(Op, DAG);
24091 // Decompose 512-bit ops into smaller 256-bit ops.
24092 static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) {
24093 assert(Op.getSimpleValueType().is512BitVector() &&
24094 Op.getSimpleValueType().isInteger() &&
24095 "Only handle AVX 512-bit vector integer operation");
24096 return LowerVectorIntUnary(Op, DAG);
24099 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
24101 // i8/i16 vector implemented using dword LZCNT vector instruction
24102 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
24103 // split the vector, perform operation on it's Lo a Hi part and
24104 // concatenate the results.
24105 static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
24106 const X86Subtarget &Subtarget) {
24107 assert(Op.getOpcode() == ISD::CTLZ);
24108 SDLoc dl(Op);
24109 MVT VT = Op.getSimpleValueType();
24110 MVT EltVT = VT.getVectorElementType();
24111 unsigned NumElems = VT.getVectorNumElements();
24113 assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
24114 "Unsupported element type");
24116 // Split vector, it's Lo and Hi parts will be handled in next iteration.
24117 if (NumElems > 16 ||
24118 (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
24119 return LowerVectorIntUnary(Op, DAG);
24121 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
24122 assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
24123 "Unsupported value type for operation");
24125 // Use native supported vector instruction vplzcntd.
24126 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
24127 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
24128 SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
24129 SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
24131 return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
24134 // Lower CTLZ using a PSHUFB lookup table implementation.
24135 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
24136 const X86Subtarget &Subtarget,
24137 SelectionDAG &DAG) {
24138 MVT VT = Op.getSimpleValueType();
24139 int NumElts = VT.getVectorNumElements();
24140 int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
24141 MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
24143 // Per-nibble leading zero PSHUFB lookup table.
24144 const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
24145 /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
24146 /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
24147 /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
24149 SmallVector<SDValue, 64> LUTVec;
24150 for (int i = 0; i < NumBytes; ++i)
24151 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
24152 SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
24154 // Begin by bitcasting the input to byte vector, then split those bytes
24155 // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
24156 // If the hi input nibble is zero then we add both results together, otherwise
24157 // we just take the hi result (by masking the lo result to zero before the
24158 // add).
24159 SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
24160 SDValue Zero = DAG.getConstant(0, DL, CurrVT);
24162 SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
24163 SDValue Lo = Op0;
24164 SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
24165 SDValue HiZ;
24166 if (CurrVT.is512BitVector()) {
24167 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
24168 HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
24169 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
24170 } else {
24171 HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
24174 Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
24175 Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
24176 Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
24177 SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
24179 // Merge result back from vXi8 back to VT, working on the lo/hi halves
24180 // of the current vector width in the same way we did for the nibbles.
24181 // If the upper half of the input element is zero then add the halves'
24182 // leading zero counts together, otherwise just use the upper half's.
24183 // Double the width of the result until we are at target width.
24184 while (CurrVT != VT) {
24185 int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
24186 int CurrNumElts = CurrVT.getVectorNumElements();
24187 MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
24188 MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
24189 SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
24191 // Check if the upper half of the input element is zero.
24192 if (CurrVT.is512BitVector()) {
24193 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
24194 HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
24195 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
24196 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
24197 } else {
24198 HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
24199 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
24201 HiZ = DAG.getBitcast(NextVT, HiZ);
24203 // Move the upper/lower halves to the lower bits as we'll be extending to
24204 // NextVT. Mask the lower result to zero if HiZ is true and add the results
24205 // together.
24206 SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
24207 SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
24208 SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
24209 R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
24210 Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
24211 CurrVT = NextVT;
24214 return Res;
24217 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
24218 const X86Subtarget &Subtarget,
24219 SelectionDAG &DAG) {
24220 MVT VT = Op.getSimpleValueType();
24222 if (Subtarget.hasCDI() &&
24223 // vXi8 vectors need to be promoted to 512-bits for vXi32.
24224 (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
24225 return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
24227 // Decompose 256-bit ops into smaller 128-bit ops.
24228 if (VT.is256BitVector() && !Subtarget.hasInt256())
24229 return Lower256IntUnary(Op, DAG);
24231 // Decompose 512-bit ops into smaller 256-bit ops.
24232 if (VT.is512BitVector() && !Subtarget.hasBWI())
24233 return Lower512IntUnary(Op, DAG);
24235 assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
24236 return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
24239 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
24240 SelectionDAG &DAG) {
24241 MVT VT = Op.getSimpleValueType();
24242 MVT OpVT = VT;
24243 unsigned NumBits = VT.getSizeInBits();
24244 SDLoc dl(Op);
24245 unsigned Opc = Op.getOpcode();
24247 if (VT.isVector())
24248 return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
24250 Op = Op.getOperand(0);
24251 if (VT == MVT::i8) {
24252 // Zero extend to i32 since there is not an i8 bsr.
24253 OpVT = MVT::i32;
24254 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
24257 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
24258 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
24259 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
24261 if (Opc == ISD::CTLZ) {
24262 // If src is zero (i.e. bsr sets ZF), returns NumBits.
24263 SDValue Ops[] = {
24265 DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
24266 DAG.getConstant(X86::COND_E, dl, MVT::i8),
24267 Op.getValue(1)
24269 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
24272 // Finally xor with NumBits-1.
24273 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
24274 DAG.getConstant(NumBits - 1, dl, OpVT));
24276 if (VT == MVT::i8)
24277 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
24278 return Op;
24281 static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
24282 SelectionDAG &DAG) {
24283 MVT VT = Op.getSimpleValueType();
24284 unsigned NumBits = VT.getScalarSizeInBits();
24285 SDValue N0 = Op.getOperand(0);
24286 SDLoc dl(Op);
24288 assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
24289 "Only scalar CTTZ requires custom lowering");
24291 // Issue a bsf (scan bits forward) which also sets EFLAGS.
24292 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
24293 Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
24295 // If src is zero (i.e. bsf sets ZF), returns NumBits.
24296 SDValue Ops[] = {
24298 DAG.getConstant(NumBits, dl, VT),
24299 DAG.getConstant(X86::COND_E, dl, MVT::i8),
24300 Op.getValue(1)
24302 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
24305 /// Break a 256-bit integer operation into two new 128-bit ones and then
24306 /// concatenate the result back.
24307 static SDValue split256IntArith(SDValue Op, SelectionDAG &DAG) {
24308 MVT VT = Op.getSimpleValueType();
24310 assert(VT.is256BitVector() && VT.isInteger() &&
24311 "Unsupported value type for operation");
24313 unsigned NumElems = VT.getVectorNumElements();
24314 SDLoc dl(Op);
24316 // Extract the LHS vectors
24317 SDValue LHS = Op.getOperand(0);
24318 SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
24319 SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
24321 // Extract the RHS vectors
24322 SDValue RHS = Op.getOperand(1);
24323 SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
24324 SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
24326 MVT EltVT = VT.getVectorElementType();
24327 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
24329 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24330 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
24331 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
24334 /// Break a 512-bit integer operation into two new 256-bit ones and then
24335 /// concatenate the result back.
24336 static SDValue split512IntArith(SDValue Op, SelectionDAG &DAG) {
24337 MVT VT = Op.getSimpleValueType();
24339 assert(VT.is512BitVector() && VT.isInteger() &&
24340 "Unsupported value type for operation");
24342 unsigned NumElems = VT.getVectorNumElements();
24343 SDLoc dl(Op);
24345 // Extract the LHS vectors
24346 SDValue LHS = Op.getOperand(0);
24347 SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
24348 SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);
24350 // Extract the RHS vectors
24351 SDValue RHS = Op.getOperand(1);
24352 SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
24353 SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);
24355 MVT EltVT = VT.getVectorElementType();
24356 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
24358 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24359 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
24360 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
24363 static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
24364 const X86Subtarget &Subtarget) {
24365 MVT VT = Op.getSimpleValueType();
24366 if (VT == MVT::i16 || VT == MVT::i32)
24367 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
24369 if (VT.getScalarType() == MVT::i1)
24370 return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
24371 Op.getOperand(0), Op.getOperand(1));
24373 assert(Op.getSimpleValueType().is256BitVector() &&
24374 Op.getSimpleValueType().isInteger() &&
24375 "Only handle AVX 256-bit vector integer operation");
24376 return split256IntArith(Op, DAG);
24379 static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
24380 const X86Subtarget &Subtarget) {
24381 MVT VT = Op.getSimpleValueType();
24382 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
24383 unsigned Opcode = Op.getOpcode();
24384 if (VT.getScalarType() == MVT::i1) {
24385 SDLoc dl(Op);
24386 switch (Opcode) {
24387 default: llvm_unreachable("Expected saturated arithmetic opcode");
24388 case ISD::UADDSAT:
24389 case ISD::SADDSAT:
24390 // *addsat i1 X, Y --> X | Y
24391 return DAG.getNode(ISD::OR, dl, VT, X, Y);
24392 case ISD::USUBSAT:
24393 case ISD::SSUBSAT:
24394 // *subsat i1 X, Y --> X & ~Y
24395 return DAG.getNode(ISD::AND, dl, VT, X, DAG.getNOT(dl, Y, VT));
24399 if (VT.is128BitVector()) {
24400 // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
24401 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24402 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
24403 *DAG.getContext(), VT);
24404 SDLoc DL(Op);
24405 if (Opcode == ISD::UADDSAT && !TLI.isOperationLegal(ISD::UMIN, VT)) {
24406 // uaddsat X, Y --> (X >u (X + Y)) ? -1 : X + Y
24407 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, X, Y);
24408 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Add, ISD::SETUGT);
24409 return DAG.getSelect(DL, VT, Cmp, DAG.getAllOnesConstant(DL, VT), Add);
24411 if (Opcode == ISD::USUBSAT && !TLI.isOperationLegal(ISD::UMAX, VT)) {
24412 // usubsat X, Y --> (X >u Y) ? X - Y : 0
24413 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
24414 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
24415 return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
24417 // Use default expansion.
24418 return SDValue();
24421 assert(Op.getSimpleValueType().is256BitVector() &&
24422 Op.getSimpleValueType().isInteger() &&
24423 "Only handle AVX 256-bit vector integer operation");
24424 return split256IntArith(Op, DAG);
24427 static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
24428 SelectionDAG &DAG) {
24429 MVT VT = Op.getSimpleValueType();
24430 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
24431 // Since X86 does not have CMOV for 8-bit integer, we don't convert
24432 // 8-bit integer abs to NEG and CMOV.
24433 SDLoc DL(Op);
24434 SDValue N0 = Op.getOperand(0);
24435 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
24436 DAG.getConstant(0, DL, VT), N0);
24437 SDValue Ops[] = {N0, Neg, DAG.getConstant(X86::COND_GE, DL, MVT::i8),
24438 SDValue(Neg.getNode(), 1)};
24439 return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
24442 // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
24443 if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
24444 SDLoc DL(Op);
24445 SDValue Src = Op.getOperand(0);
24446 SDValue Sub =
24447 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
24448 return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
24451 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
24452 assert(VT.isInteger() &&
24453 "Only handle AVX 256-bit vector integer operation");
24454 return Lower256IntUnary(Op, DAG);
24457 // Default to expand.
24458 return SDValue();
24461 static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
24462 MVT VT = Op.getSimpleValueType();
24464 // For AVX1 cases, split to use legal ops (everything but v4i64).
24465 if (VT.getScalarType() != MVT::i64 && VT.is256BitVector())
24466 return split256IntArith(Op, DAG);
24468 SDLoc DL(Op);
24469 unsigned Opcode = Op.getOpcode();
24470 SDValue N0 = Op.getOperand(0);
24471 SDValue N1 = Op.getOperand(1);
24473 // For pre-SSE41, we can perform UMIN/UMAX v8i16 by flipping the signbit,
24474 // using the SMIN/SMAX instructions and flipping the signbit back.
24475 if (VT == MVT::v8i16) {
24476 assert((Opcode == ISD::UMIN || Opcode == ISD::UMAX) &&
24477 "Unexpected MIN/MAX opcode");
24478 SDValue Sign = DAG.getConstant(APInt::getSignedMinValue(16), DL, VT);
24479 N0 = DAG.getNode(ISD::XOR, DL, VT, N0, Sign);
24480 N1 = DAG.getNode(ISD::XOR, DL, VT, N1, Sign);
24481 Opcode = (Opcode == ISD::UMIN ? ISD::SMIN : ISD::SMAX);
24482 SDValue Result = DAG.getNode(Opcode, DL, VT, N0, N1);
24483 return DAG.getNode(ISD::XOR, DL, VT, Result, Sign);
24486 // Else, expand to a compare/select.
24487 ISD::CondCode CC;
24488 switch (Opcode) {
24489 case ISD::SMIN: CC = ISD::CondCode::SETLT; break;
24490 case ISD::SMAX: CC = ISD::CondCode::SETGT; break;
24491 case ISD::UMIN: CC = ISD::CondCode::SETULT; break;
24492 case ISD::UMAX: CC = ISD::CondCode::SETUGT; break;
24493 default: llvm_unreachable("Unknown MINMAX opcode");
24496 SDValue Cond = DAG.getSetCC(DL, VT, N0, N1, CC);
24497 return DAG.getSelect(DL, VT, Cond, N0, N1);
24500 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
24501 SelectionDAG &DAG) {
24502 SDLoc dl(Op);
24503 MVT VT = Op.getSimpleValueType();
24505 if (VT.getScalarType() == MVT::i1)
24506 return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
24508 // Decompose 256-bit ops into 128-bit ops.
24509 if (VT.is256BitVector() && !Subtarget.hasInt256())
24510 return split256IntArith(Op, DAG);
24512 SDValue A = Op.getOperand(0);
24513 SDValue B = Op.getOperand(1);
24515 // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
24516 // vector pairs, multiply and truncate.
24517 if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
24518 unsigned NumElts = VT.getVectorNumElements();
24520 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
24521 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
24522 MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
24523 return DAG.getNode(
24524 ISD::TRUNCATE, dl, VT,
24525 DAG.getNode(ISD::MUL, dl, ExVT,
24526 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
24527 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
24530 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
24532 // Extract the lo/hi parts to any extend to i16.
24533 // We're going to mask off the low byte of each result element of the
24534 // pmullw, so it doesn't matter what's in the high byte of each 16-bit
24535 // element.
24536 SDValue Undef = DAG.getUNDEF(VT);
24537 SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
24538 SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
24540 SDValue BLo, BHi;
24541 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
24542 // If the LHS is a constant, manually unpackl/unpackh.
24543 SmallVector<SDValue, 16> LoOps, HiOps;
24544 for (unsigned i = 0; i != NumElts; i += 16) {
24545 for (unsigned j = 0; j != 8; ++j) {
24546 LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
24547 MVT::i16));
24548 HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
24549 MVT::i16));
24553 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
24554 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
24555 } else {
24556 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
24557 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
24560 // Multiply, mask the lower 8bits of the lo/hi results and pack.
24561 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
24562 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
24563 RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
24564 RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
24565 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
24568 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
24569 if (VT == MVT::v4i32) {
24570 assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
24571 "Should not custom lower when pmulld is available!");
24573 // Extract the odd parts.
24574 static const int UnpackMask[] = { 1, -1, 3, -1 };
24575 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
24576 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
24578 // Multiply the even parts.
24579 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
24580 DAG.getBitcast(MVT::v2i64, A),
24581 DAG.getBitcast(MVT::v2i64, B));
24582 // Now multiply odd parts.
24583 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
24584 DAG.getBitcast(MVT::v2i64, Aodds),
24585 DAG.getBitcast(MVT::v2i64, Bodds));
24587 Evens = DAG.getBitcast(VT, Evens);
24588 Odds = DAG.getBitcast(VT, Odds);
24590 // Merge the two vectors back together with a shuffle. This expands into 2
24591 // shuffles.
24592 static const int ShufMask[] = { 0, 4, 2, 6 };
24593 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
24596 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
24597 "Only know how to lower V2I64/V4I64/V8I64 multiply");
24598 assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
24600 // Ahi = psrlqi(a, 32);
24601 // Bhi = psrlqi(b, 32);
24603 // AloBlo = pmuludq(a, b);
24604 // AloBhi = pmuludq(a, Bhi);
24605 // AhiBlo = pmuludq(Ahi, b);
24607 // Hi = psllqi(AloBhi + AhiBlo, 32);
24608 // return AloBlo + Hi;
24609 KnownBits AKnown = DAG.computeKnownBits(A);
24610 KnownBits BKnown = DAG.computeKnownBits(B);
24612 APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
24613 bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
24614 bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
24616 APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
24617 bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
24618 bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
24620 SDValue Zero = DAG.getConstant(0, dl, VT);
24622 // Only multiply lo/hi halves that aren't known to be zero.
24623 SDValue AloBlo = Zero;
24624 if (!ALoIsZero && !BLoIsZero)
24625 AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
24627 SDValue AloBhi = Zero;
24628 if (!ALoIsZero && !BHiIsZero) {
24629 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
24630 AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
24633 SDValue AhiBlo = Zero;
24634 if (!AHiIsZero && !BLoIsZero) {
24635 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
24636 AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
24639 SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
24640 Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
24642 return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
24645 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
24646 SelectionDAG &DAG) {
24647 SDLoc dl(Op);
24648 MVT VT = Op.getSimpleValueType();
24649 bool IsSigned = Op->getOpcode() == ISD::MULHS;
24650 unsigned NumElts = VT.getVectorNumElements();
24651 SDValue A = Op.getOperand(0);
24652 SDValue B = Op.getOperand(1);
24654 // Decompose 256-bit ops into 128-bit ops.
24655 if (VT.is256BitVector() && !Subtarget.hasInt256())
24656 return split256IntArith(Op, DAG);
24658 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
24659 assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
24660 (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
24661 (VT == MVT::v16i32 && Subtarget.hasAVX512()));
24663 // PMULxD operations multiply each even value (starting at 0) of LHS with
24664 // the related value of RHS and produce a widen result.
24665 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
24666 // => <2 x i64> <ae|cg>
24668 // In other word, to have all the results, we need to perform two PMULxD:
24669 // 1. one with the even values.
24670 // 2. one with the odd values.
24671 // To achieve #2, with need to place the odd values at an even position.
24673 // Place the odd value at an even position (basically, shift all values 1
24674 // step to the left):
24675 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1,
24676 9, -1, 11, -1, 13, -1, 15, -1};
24677 // <a|b|c|d> => <b|undef|d|undef>
24678 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, A, A,
24679 makeArrayRef(&Mask[0], NumElts));
24680 // <e|f|g|h> => <f|undef|h|undef>
24681 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, B, B,
24682 makeArrayRef(&Mask[0], NumElts));
24684 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
24685 // ints.
24686 MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
24687 unsigned Opcode =
24688 (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
24689 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
24690 // => <2 x i64> <ae|cg>
24691 SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
24692 DAG.getBitcast(MulVT, A),
24693 DAG.getBitcast(MulVT, B)));
24694 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
24695 // => <2 x i64> <bf|dh>
24696 SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
24697 DAG.getBitcast(MulVT, Odd0),
24698 DAG.getBitcast(MulVT, Odd1)));
24700 // Shuffle it back into the right order.
24701 SmallVector<int, 16> ShufMask(NumElts);
24702 for (int i = 0; i != (int)NumElts; ++i)
24703 ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
24705 SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
24707 // If we have a signed multiply but no PMULDQ fix up the result of an
24708 // unsigned multiply.
24709 if (IsSigned && !Subtarget.hasSSE41()) {
24710 SDValue Zero = DAG.getConstant(0, dl, VT);
24711 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
24712 DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
24713 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
24714 DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
24716 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
24717 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
24720 return Res;
24723 // Only i8 vectors should need custom lowering after this.
24724 assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
24725 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
24726 "Unsupported vector type");
24728 // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
24729 // logical shift down the upper half and pack back to i8.
24731 // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
24732 // and then ashr/lshr the upper bits down to the lower bits before multiply.
24733 unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
24735 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
24736 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
24737 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
24738 SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
24739 SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
24740 SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
24741 Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
24742 return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
24745 // For signed 512-bit vectors, split into 256-bit vectors to allow the
24746 // sign-extension to occur.
24747 if (VT == MVT::v64i8 && IsSigned)
24748 return split512IntArith(Op, DAG);
24750 // Signed AVX2 implementation - extend xmm subvectors to ymm.
24751 if (VT == MVT::v32i8 && IsSigned) {
24752 MVT ExVT = MVT::v16i16;
24753 SDValue ALo = extract128BitVector(A, 0, DAG, dl);
24754 SDValue BLo = extract128BitVector(B, 0, DAG, dl);
24755 SDValue AHi = extract128BitVector(A, NumElts / 2, DAG, dl);
24756 SDValue BHi = extract128BitVector(B, NumElts / 2, DAG, dl);
24757 ALo = DAG.getNode(ExAVX, dl, ExVT, ALo);
24758 BLo = DAG.getNode(ExAVX, dl, ExVT, BLo);
24759 AHi = DAG.getNode(ExAVX, dl, ExVT, AHi);
24760 BHi = DAG.getNode(ExAVX, dl, ExVT, BHi);
24761 SDValue Lo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
24762 SDValue Hi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
24763 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Lo, 8, DAG);
24764 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Hi, 8, DAG);
24766 // Bitcast back to VT and then pack all the even elements from Lo and Hi.
24767 // Shuffle lowering should turn this into PACKUS+PERMQ
24768 Lo = DAG.getBitcast(VT, Lo);
24769 Hi = DAG.getBitcast(VT, Hi);
24770 return DAG.getVectorShuffle(VT, dl, Lo, Hi,
24771 { 0, 2, 4, 6, 8, 10, 12, 14,
24772 16, 18, 20, 22, 24, 26, 28, 30,
24773 32, 34, 36, 38, 40, 42, 44, 46,
24774 48, 50, 52, 54, 56, 58, 60, 62});
24777 // For signed v16i8 and all unsigned vXi8 we will unpack the low and high
24778 // half of each 128 bit lane to widen to a vXi16 type. Do the multiplies,
24779 // shift the results and pack the half lane results back together.
24781 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
24783 static const int PSHUFDMask[] = { 8, 9, 10, 11, 12, 13, 14, 15,
24784 -1, -1, -1, -1, -1, -1, -1, -1};
24786 // Extract the lo parts and zero/sign extend to i16.
24787 // Only use SSE4.1 instructions for signed v16i8 where using unpack requires
24788 // shifts to sign extend. Using unpack for unsigned only requires an xor to
24789 // create zeros and a copy due to tied registers contraints pre-avx. But using
24790 // zero_extend_vector_inreg would require an additional pshufd for the high
24791 // part.
24793 SDValue ALo, AHi;
24794 if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
24795 ALo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, A);
24797 AHi = DAG.getVectorShuffle(VT, dl, A, A, PSHUFDMask);
24798 AHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, AHi);
24799 } else if (IsSigned) {
24800 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), A));
24801 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), A));
24803 ALo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, ALo, 8, DAG);
24804 AHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, AHi, 8, DAG);
24805 } else {
24806 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A,
24807 DAG.getConstant(0, dl, VT)));
24808 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A,
24809 DAG.getConstant(0, dl, VT)));
24812 SDValue BLo, BHi;
24813 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
24814 // If the LHS is a constant, manually unpackl/unpackh and extend.
24815 SmallVector<SDValue, 16> LoOps, HiOps;
24816 for (unsigned i = 0; i != NumElts; i += 16) {
24817 for (unsigned j = 0; j != 8; ++j) {
24818 SDValue LoOp = B.getOperand(i + j);
24819 SDValue HiOp = B.getOperand(i + j + 8);
24821 if (IsSigned) {
24822 LoOp = DAG.getSExtOrTrunc(LoOp, dl, MVT::i16);
24823 HiOp = DAG.getSExtOrTrunc(HiOp, dl, MVT::i16);
24824 } else {
24825 LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
24826 HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
24829 LoOps.push_back(LoOp);
24830 HiOps.push_back(HiOp);
24834 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
24835 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
24836 } else if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
24837 BLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, B);
24839 BHi = DAG.getVectorShuffle(VT, dl, B, B, PSHUFDMask);
24840 BHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, BHi);
24841 } else if (IsSigned) {
24842 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), B));
24843 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), B));
24845 BLo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BLo, 8, DAG);
24846 BHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BHi, 8, DAG);
24847 } else {
24848 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B,
24849 DAG.getConstant(0, dl, VT)));
24850 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B,
24851 DAG.getConstant(0, dl, VT)));
24854 // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
24855 // pack back to vXi8.
24856 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
24857 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
24858 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RLo, 8, DAG);
24859 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RHi, 8, DAG);
24861 // Bitcast back to VT and then pack all the even elements from Lo and Hi.
24862 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
24865 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
24866 assert(Subtarget.isTargetWin64() && "Unexpected target");
24867 EVT VT = Op.getValueType();
24868 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
24869 "Unexpected return type for lowering");
24871 RTLIB::Libcall LC;
24872 bool isSigned;
24873 switch (Op->getOpcode()) {
24874 default: llvm_unreachable("Unexpected request for libcall!");
24875 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
24876 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
24877 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
24878 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
24879 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
24880 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
24883 SDLoc dl(Op);
24884 SDValue InChain = DAG.getEntryNode();
24886 TargetLowering::ArgListTy Args;
24887 TargetLowering::ArgListEntry Entry;
24888 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
24889 EVT ArgVT = Op->getOperand(i).getValueType();
24890 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
24891 "Unexpected argument type for lowering");
24892 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
24893 Entry.Node = StackPtr;
24894 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
24895 MachinePointerInfo(), /* Alignment = */ 16);
24896 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
24897 Entry.Ty = PointerType::get(ArgTy,0);
24898 Entry.IsSExt = false;
24899 Entry.IsZExt = false;
24900 Args.push_back(Entry);
24903 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
24904 getPointerTy(DAG.getDataLayout()));
24906 TargetLowering::CallLoweringInfo CLI(DAG);
24907 CLI.setDebugLoc(dl)
24908 .setChain(InChain)
24909 .setLibCallee(
24910 getLibcallCallingConv(LC),
24911 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
24912 std::move(Args))
24913 .setInRegister()
24914 .setSExtResult(isSigned)
24915 .setZExtResult(!isSigned);
24917 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
24918 return DAG.getBitcast(VT, CallInfo.first);
24921 // Return true if the required (according to Opcode) shift-imm form is natively
24922 // supported by the Subtarget
24923 static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
24924 unsigned Opcode) {
24925 if (VT.getScalarSizeInBits() < 16)
24926 return false;
24928 if (VT.is512BitVector() && Subtarget.hasAVX512() &&
24929 (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
24930 return true;
24932 bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
24933 (VT.is256BitVector() && Subtarget.hasInt256());
24935 bool AShift = LShift && (Subtarget.hasAVX512() ||
24936 (VT != MVT::v2i64 && VT != MVT::v4i64));
24937 return (Opcode == ISD::SRA) ? AShift : LShift;
24940 // The shift amount is a variable, but it is the same for all vector lanes.
24941 // These instructions are defined together with shift-immediate.
24942 static
24943 bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
24944 unsigned Opcode) {
24945 return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
24948 // Return true if the required (according to Opcode) variable-shift form is
24949 // natively supported by the Subtarget
24950 static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
24951 unsigned Opcode) {
24953 if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
24954 return false;
24956 // vXi16 supported only on AVX-512, BWI
24957 if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
24958 return false;
24960 if (Subtarget.hasAVX512())
24961 return true;
24963 bool LShift = VT.is128BitVector() || VT.is256BitVector();
24964 bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
24965 return (Opcode == ISD::SRA) ? AShift : LShift;
24968 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
24969 const X86Subtarget &Subtarget) {
24970 MVT VT = Op.getSimpleValueType();
24971 SDLoc dl(Op);
24972 SDValue R = Op.getOperand(0);
24973 SDValue Amt = Op.getOperand(1);
24974 unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
24976 auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
24977 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
24978 MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
24979 SDValue Ex = DAG.getBitcast(ExVT, R);
24981 // ashr(R, 63) === cmp_slt(R, 0)
24982 if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
24983 assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
24984 "Unsupported PCMPGT op");
24985 return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
24988 if (ShiftAmt >= 32) {
24989 // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
24990 SDValue Upper =
24991 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
24992 SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
24993 ShiftAmt - 32, DAG);
24994 if (VT == MVT::v2i64)
24995 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
24996 if (VT == MVT::v4i64)
24997 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
24998 {9, 1, 11, 3, 13, 5, 15, 7});
24999 } else {
25000 // SRA upper i32, SRL whole i64 and select lower i32.
25001 SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
25002 ShiftAmt, DAG);
25003 SDValue Lower =
25004 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
25005 Lower = DAG.getBitcast(ExVT, Lower);
25006 if (VT == MVT::v2i64)
25007 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
25008 if (VT == MVT::v4i64)
25009 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
25010 {8, 1, 10, 3, 12, 5, 14, 7});
25012 return DAG.getBitcast(VT, Ex);
25015 // Optimize shl/srl/sra with constant shift amount.
25016 APInt APIntShiftAmt;
25017 if (!isConstantSplat(Amt, APIntShiftAmt))
25018 return SDValue();
25019 uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
25021 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
25022 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
25024 // i64 SRA needs to be performed as partial shifts.
25025 if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
25026 (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
25027 Op.getOpcode() == ISD::SRA)
25028 return ArithmeticShiftRight64(ShiftAmt);
25030 if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
25031 VT == MVT::v64i8) {
25032 unsigned NumElts = VT.getVectorNumElements();
25033 MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25035 // Simple i8 add case
25036 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
25037 return DAG.getNode(ISD::ADD, dl, VT, R, R);
25039 // ashr(R, 7) === cmp_slt(R, 0)
25040 if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
25041 SDValue Zeros = DAG.getConstant(0, dl, VT);
25042 if (VT.is512BitVector()) {
25043 assert(VT == MVT::v64i8 && "Unexpected element type!");
25044 SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
25045 return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
25047 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
25050 // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
25051 if (VT == MVT::v16i8 && Subtarget.hasXOP())
25052 return SDValue();
25054 if (Op.getOpcode() == ISD::SHL) {
25055 // Make a large shift.
25056 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
25057 ShiftAmt, DAG);
25058 SHL = DAG.getBitcast(VT, SHL);
25059 // Zero out the rightmost bits.
25060 return DAG.getNode(ISD::AND, dl, VT, SHL,
25061 DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, VT));
25063 if (Op.getOpcode() == ISD::SRL) {
25064 // Make a large shift.
25065 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
25066 ShiftAmt, DAG);
25067 SRL = DAG.getBitcast(VT, SRL);
25068 // Zero out the leftmost bits.
25069 return DAG.getNode(ISD::AND, dl, VT, SRL,
25070 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
25072 if (Op.getOpcode() == ISD::SRA) {
25073 // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
25074 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
25076 SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
25077 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
25078 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
25079 return Res;
25081 llvm_unreachable("Unknown shift opcode.");
25084 return SDValue();
25087 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
25088 const X86Subtarget &Subtarget) {
25089 MVT VT = Op.getSimpleValueType();
25090 SDLoc dl(Op);
25091 SDValue R = Op.getOperand(0);
25092 SDValue Amt = Op.getOperand(1);
25093 unsigned Opcode = Op.getOpcode();
25094 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
25095 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opcode, true);
25097 if (SDValue BaseShAmt = DAG.getSplatValue(Amt)) {
25098 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) {
25099 MVT EltVT = VT.getVectorElementType();
25100 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
25101 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
25102 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
25103 else if (EltVT.bitsLT(MVT::i32))
25104 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
25106 return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG);
25109 // vXi8 shifts - shift as v8i16 + mask result.
25110 if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
25111 (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
25112 VT == MVT::v64i8) &&
25113 !Subtarget.hasXOP()) {
25114 unsigned NumElts = VT.getVectorNumElements();
25115 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25116 if (SupportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
25117 unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
25118 unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
25119 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
25121 // Create the mask using vXi16 shifts. For shift-rights we need to move
25122 // the upper byte down before splatting the vXi8 mask.
25123 SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
25124 BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
25125 BaseShAmt, Subtarget, DAG);
25126 if (Opcode != ISD::SHL)
25127 BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
25128 8, DAG);
25129 BitMask = DAG.getBitcast(VT, BitMask);
25130 BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
25131 SmallVector<int, 64>(NumElts, 0));
25133 SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
25134 DAG.getBitcast(ExtVT, R), BaseShAmt,
25135 Subtarget, DAG);
25136 Res = DAG.getBitcast(VT, Res);
25137 Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
25139 if (Opcode == ISD::SRA) {
25140 // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
25141 // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
25142 SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
25143 SignMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask,
25144 BaseShAmt, Subtarget, DAG);
25145 SignMask = DAG.getBitcast(VT, SignMask);
25146 Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
25147 Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
25149 return Res;
25154 // Check cases (mainly 32-bit) where i64 is expanded into high and low parts.
25155 if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST &&
25156 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
25157 Amt = Amt.getOperand(0);
25158 unsigned Ratio = 64 / Amt.getScalarValueSizeInBits();
25159 std::vector<SDValue> Vals(Ratio);
25160 for (unsigned i = 0; i != Ratio; ++i)
25161 Vals[i] = Amt.getOperand(i);
25162 for (unsigned i = Ratio, e = Amt.getNumOperands(); i != e; i += Ratio) {
25163 for (unsigned j = 0; j != Ratio; ++j)
25164 if (Vals[j] != Amt.getOperand(i + j))
25165 return SDValue();
25168 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
25169 return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
25171 return SDValue();
25174 // Convert a shift/rotate left amount to a multiplication scale factor.
25175 static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
25176 const X86Subtarget &Subtarget,
25177 SelectionDAG &DAG) {
25178 MVT VT = Amt.getSimpleValueType();
25179 if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
25180 (Subtarget.hasInt256() && VT == MVT::v16i16) ||
25181 (!Subtarget.hasAVX512() && VT == MVT::v16i8)))
25182 return SDValue();
25184 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
25185 SmallVector<SDValue, 8> Elts;
25186 MVT SVT = VT.getVectorElementType();
25187 unsigned SVTBits = SVT.getSizeInBits();
25188 APInt One(SVTBits, 1);
25189 unsigned NumElems = VT.getVectorNumElements();
25191 for (unsigned i = 0; i != NumElems; ++i) {
25192 SDValue Op = Amt->getOperand(i);
25193 if (Op->isUndef()) {
25194 Elts.push_back(Op);
25195 continue;
25198 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
25199 APInt C(SVTBits, ND->getAPIntValue().getZExtValue());
25200 uint64_t ShAmt = C.getZExtValue();
25201 if (ShAmt >= SVTBits) {
25202 Elts.push_back(DAG.getUNDEF(SVT));
25203 continue;
25205 Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
25207 return DAG.getBuildVector(VT, dl, Elts);
25210 // If the target doesn't support variable shifts, use either FP conversion
25211 // or integer multiplication to avoid shifting each element individually.
25212 if (VT == MVT::v4i32) {
25213 Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
25214 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
25215 DAG.getConstant(0x3f800000U, dl, VT));
25216 Amt = DAG.getBitcast(MVT::v4f32, Amt);
25217 return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
25220 // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
25221 if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
25222 SDValue Z = DAG.getConstant(0, dl, VT);
25223 SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
25224 SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
25225 Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
25226 Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
25227 if (Subtarget.hasSSE41())
25228 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
25230 return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, Lo),
25231 DAG.getBitcast(VT, Hi),
25232 {0, 2, 4, 6, 8, 10, 12, 14});
25235 return SDValue();
25238 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
25239 SelectionDAG &DAG) {
25240 MVT VT = Op.getSimpleValueType();
25241 SDLoc dl(Op);
25242 SDValue R = Op.getOperand(0);
25243 SDValue Amt = Op.getOperand(1);
25244 unsigned EltSizeInBits = VT.getScalarSizeInBits();
25245 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
25247 unsigned Opc = Op.getOpcode();
25248 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
25249 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
25251 assert(VT.isVector() && "Custom lowering only for vector shifts!");
25252 assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
25254 if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
25255 return V;
25257 if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
25258 return V;
25260 if (SupportedVectorVarShift(VT, Subtarget, Opc))
25261 return Op;
25263 // XOP has 128-bit variable logical/arithmetic shifts.
25264 // +ve/-ve Amt = shift left/right.
25265 if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
25266 VT == MVT::v8i16 || VT == MVT::v16i8)) {
25267 if (Opc == ISD::SRL || Opc == ISD::SRA) {
25268 SDValue Zero = DAG.getConstant(0, dl, VT);
25269 Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
25271 if (Opc == ISD::SHL || Opc == ISD::SRL)
25272 return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
25273 if (Opc == ISD::SRA)
25274 return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
25277 // 2i64 vector logical shifts can efficiently avoid scalarization - do the
25278 // shifts per-lane and then shuffle the partial results back together.
25279 if (VT == MVT::v2i64 && Opc != ISD::SRA) {
25280 // Splat the shift amounts so the scalar shifts above will catch it.
25281 SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
25282 SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
25283 SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
25284 SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
25285 return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
25288 // i64 vector arithmetic shift can be emulated with the transform:
25289 // M = lshr(SIGN_MASK, Amt)
25290 // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
25291 if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
25292 Opc == ISD::SRA) {
25293 SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
25294 SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
25295 R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
25296 R = DAG.getNode(ISD::XOR, dl, VT, R, M);
25297 R = DAG.getNode(ISD::SUB, dl, VT, R, M);
25298 return R;
25301 // If possible, lower this shift as a sequence of two shifts by
25302 // constant plus a BLENDing shuffle instead of scalarizing it.
25303 // Example:
25304 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
25306 // Could be rewritten as:
25307 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
25309 // The advantage is that the two shifts from the example would be
25310 // lowered as X86ISD::VSRLI nodes in parallel before blending.
25311 if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
25312 (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
25313 SDValue Amt1, Amt2;
25314 unsigned NumElts = VT.getVectorNumElements();
25315 SmallVector<int, 8> ShuffleMask;
25316 for (unsigned i = 0; i != NumElts; ++i) {
25317 SDValue A = Amt->getOperand(i);
25318 if (A.isUndef()) {
25319 ShuffleMask.push_back(SM_SentinelUndef);
25320 continue;
25322 if (!Amt1 || Amt1 == A) {
25323 ShuffleMask.push_back(i);
25324 Amt1 = A;
25325 continue;
25327 if (!Amt2 || Amt2 == A) {
25328 ShuffleMask.push_back(i + NumElts);
25329 Amt2 = A;
25330 continue;
25332 break;
25335 // Only perform this blend if we can perform it without loading a mask.
25336 if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
25337 (VT != MVT::v16i16 ||
25338 is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
25339 (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
25340 canWidenShuffleElements(ShuffleMask))) {
25341 auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
25342 auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
25343 if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
25344 Cst2->getAPIntValue().ult(EltSizeInBits)) {
25345 SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
25346 Cst1->getZExtValue(), DAG);
25347 SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
25348 Cst2->getZExtValue(), DAG);
25349 return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
25354 // If possible, lower this packed shift into a vector multiply instead of
25355 // expanding it into a sequence of scalar shifts.
25356 if (Opc == ISD::SHL)
25357 if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
25358 return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
25360 // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
25361 // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
25362 if (Opc == ISD::SRL && ConstantAmt &&
25363 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
25364 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
25365 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
25366 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
25367 SDValue Zero = DAG.getConstant(0, dl, VT);
25368 SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
25369 SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
25370 return DAG.getSelect(dl, VT, ZAmt, R, Res);
25374 // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
25375 // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
25376 // TODO: Special case handling for shift by 0/1, really we can afford either
25377 // of these cases in pre-SSE41/XOP/AVX512 but not both.
25378 if (Opc == ISD::SRA && ConstantAmt &&
25379 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
25380 ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
25381 !Subtarget.hasAVX512()) ||
25382 DAG.isKnownNeverZero(Amt))) {
25383 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
25384 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
25385 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
25386 SDValue Amt0 =
25387 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
25388 SDValue Amt1 =
25389 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
25390 SDValue Sra1 =
25391 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
25392 SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
25393 Res = DAG.getSelect(dl, VT, Amt0, R, Res);
25394 return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
25398 // v4i32 Non Uniform Shifts.
25399 // If the shift amount is constant we can shift each lane using the SSE2
25400 // immediate shifts, else we need to zero-extend each lane to the lower i64
25401 // and shift using the SSE2 variable shifts.
25402 // The separate results can then be blended together.
25403 if (VT == MVT::v4i32) {
25404 SDValue Amt0, Amt1, Amt2, Amt3;
25405 if (ConstantAmt) {
25406 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
25407 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
25408 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
25409 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
25410 } else {
25411 // The SSE2 shifts use the lower i64 as the same shift amount for
25412 // all lanes and the upper i64 is ignored. On AVX we're better off
25413 // just zero-extending, but for SSE just duplicating the top 16-bits is
25414 // cheaper and has the same effect for out of range values.
25415 if (Subtarget.hasAVX()) {
25416 SDValue Z = DAG.getConstant(0, dl, VT);
25417 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
25418 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
25419 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
25420 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
25421 } else {
25422 SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
25423 SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
25424 {4, 5, 6, 7, -1, -1, -1, -1});
25425 Amt0 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
25426 {0, 1, 1, 1, -1, -1, -1, -1});
25427 Amt1 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
25428 {2, 3, 3, 3, -1, -1, -1, -1});
25429 Amt2 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
25430 {0, 1, 1, 1, -1, -1, -1, -1});
25431 Amt3 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
25432 {2, 3, 3, 3, -1, -1, -1, -1});
25436 unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
25437 SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
25438 SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
25439 SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
25440 SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
25442 // Merge the shifted lane results optimally with/without PBLENDW.
25443 // TODO - ideally shuffle combining would handle this.
25444 if (Subtarget.hasSSE41()) {
25445 SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
25446 SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
25447 return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
25449 SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
25450 SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
25451 return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
25454 // It's worth extending once and using the vXi16/vXi32 shifts for smaller
25455 // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
25456 // make the existing SSE solution better.
25457 // NOTE: We honor prefered vector width before promoting to 512-bits.
25458 if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
25459 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
25460 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
25461 (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
25462 (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
25463 assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
25464 "Unexpected vector type");
25465 MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
25466 MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
25467 unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
25468 R = DAG.getNode(ExtOpc, dl, ExtVT, R);
25469 Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
25470 return DAG.getNode(ISD::TRUNCATE, dl, VT,
25471 DAG.getNode(Opc, dl, ExtVT, R, Amt));
25474 // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
25475 // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
25476 if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
25477 (VT == MVT::v16i8 || VT == MVT::v64i8 ||
25478 (VT == MVT::v32i8 && Subtarget.hasInt256())) &&
25479 !Subtarget.hasXOP()) {
25480 int NumElts = VT.getVectorNumElements();
25481 SDValue Cst8 = DAG.getConstant(8, dl, MVT::i8);
25483 // Extend constant shift amount to vXi16 (it doesn't matter if the type
25484 // isn't legal).
25485 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
25486 Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
25487 Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
25488 Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
25489 assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
25490 "Constant build vector expected");
25492 if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
25493 R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT)
25494 : DAG.getZExtOrTrunc(R, dl, ExVT);
25495 R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
25496 R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
25497 return DAG.getZExtOrTrunc(R, dl, VT);
25500 SmallVector<SDValue, 16> LoAmt, HiAmt;
25501 for (int i = 0; i != NumElts; i += 16) {
25502 for (int j = 0; j != 8; ++j) {
25503 LoAmt.push_back(Amt.getOperand(i + j));
25504 HiAmt.push_back(Amt.getOperand(i + j + 8));
25508 MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
25509 SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
25510 SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
25512 SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
25513 SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
25514 LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
25515 HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
25516 LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
25517 HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
25518 LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
25519 HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
25520 return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
25523 if (VT == MVT::v16i8 ||
25524 (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
25525 (VT == MVT::v64i8 && Subtarget.hasBWI())) {
25526 MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
25528 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
25529 if (VT.is512BitVector()) {
25530 // On AVX512BW targets we make use of the fact that VSELECT lowers
25531 // to a masked blend which selects bytes based just on the sign bit
25532 // extracted to a mask.
25533 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
25534 V0 = DAG.getBitcast(VT, V0);
25535 V1 = DAG.getBitcast(VT, V1);
25536 Sel = DAG.getBitcast(VT, Sel);
25537 Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
25538 ISD::SETGT);
25539 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
25540 } else if (Subtarget.hasSSE41()) {
25541 // On SSE41 targets we make use of the fact that VSELECT lowers
25542 // to PBLENDVB which selects bytes based just on the sign bit.
25543 V0 = DAG.getBitcast(VT, V0);
25544 V1 = DAG.getBitcast(VT, V1);
25545 Sel = DAG.getBitcast(VT, Sel);
25546 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
25548 // On pre-SSE41 targets we test for the sign bit by comparing to
25549 // zero - a negative value will set all bits of the lanes to true
25550 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
25551 SDValue Z = DAG.getConstant(0, dl, SelVT);
25552 SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
25553 return DAG.getSelect(dl, SelVT, C, V0, V1);
25556 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
25557 // We can safely do this using i16 shifts as we're only interested in
25558 // the 3 lower bits of each byte.
25559 Amt = DAG.getBitcast(ExtVT, Amt);
25560 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
25561 Amt = DAG.getBitcast(VT, Amt);
25563 if (Opc == ISD::SHL || Opc == ISD::SRL) {
25564 // r = VSELECT(r, shift(r, 4), a);
25565 SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
25566 R = SignBitSelect(VT, Amt, M, R);
25568 // a += a
25569 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
25571 // r = VSELECT(r, shift(r, 2), a);
25572 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
25573 R = SignBitSelect(VT, Amt, M, R);
25575 // a += a
25576 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
25578 // return VSELECT(r, shift(r, 1), a);
25579 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
25580 R = SignBitSelect(VT, Amt, M, R);
25581 return R;
25584 if (Opc == ISD::SRA) {
25585 // For SRA we need to unpack each byte to the higher byte of a i16 vector
25586 // so we can correctly sign extend. We don't care what happens to the
25587 // lower byte.
25588 SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
25589 SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
25590 SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
25591 SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
25592 ALo = DAG.getBitcast(ExtVT, ALo);
25593 AHi = DAG.getBitcast(ExtVT, AHi);
25594 RLo = DAG.getBitcast(ExtVT, RLo);
25595 RHi = DAG.getBitcast(ExtVT, RHi);
25597 // r = VSELECT(r, shift(r, 4), a);
25598 SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
25599 SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
25600 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
25601 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
25603 // a += a
25604 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
25605 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
25607 // r = VSELECT(r, shift(r, 2), a);
25608 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
25609 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
25610 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
25611 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
25613 // a += a
25614 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
25615 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
25617 // r = VSELECT(r, shift(r, 1), a);
25618 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
25619 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
25620 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
25621 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
25623 // Logical shift the result back to the lower byte, leaving a zero upper
25624 // byte meaning that we can safely pack with PACKUSWB.
25625 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
25626 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
25627 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
25631 if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
25632 MVT ExtVT = MVT::v8i32;
25633 SDValue Z = DAG.getConstant(0, dl, VT);
25634 SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
25635 SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
25636 SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
25637 SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
25638 ALo = DAG.getBitcast(ExtVT, ALo);
25639 AHi = DAG.getBitcast(ExtVT, AHi);
25640 RLo = DAG.getBitcast(ExtVT, RLo);
25641 RHi = DAG.getBitcast(ExtVT, RHi);
25642 SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
25643 SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
25644 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
25645 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
25646 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
25649 if (VT == MVT::v8i16) {
25650 // If we have a constant shift amount, the non-SSE41 path is best as
25651 // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
25652 bool UseSSE41 = Subtarget.hasSSE41() &&
25653 !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
25655 auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
25656 // On SSE41 targets we make use of the fact that VSELECT lowers
25657 // to PBLENDVB which selects bytes based just on the sign bit.
25658 if (UseSSE41) {
25659 MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
25660 V0 = DAG.getBitcast(ExtVT, V0);
25661 V1 = DAG.getBitcast(ExtVT, V1);
25662 Sel = DAG.getBitcast(ExtVT, Sel);
25663 return DAG.getBitcast(VT, DAG.getSelect(dl, ExtVT, Sel, V0, V1));
25665 // On pre-SSE41 targets we splat the sign bit - a negative value will
25666 // set all bits of the lanes to true and VSELECT uses that in
25667 // its OR(AND(V0,C),AND(V1,~C)) lowering.
25668 SDValue C =
25669 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
25670 return DAG.getSelect(dl, VT, C, V0, V1);
25673 // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
25674 if (UseSSE41) {
25675 // On SSE41 targets we need to replicate the shift mask in both
25676 // bytes for PBLENDVB.
25677 Amt = DAG.getNode(
25678 ISD::OR, dl, VT,
25679 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
25680 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
25681 } else {
25682 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
25685 // r = VSELECT(r, shift(r, 8), a);
25686 SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
25687 R = SignBitSelect(Amt, M, R);
25689 // a += a
25690 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
25692 // r = VSELECT(r, shift(r, 4), a);
25693 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
25694 R = SignBitSelect(Amt, M, R);
25696 // a += a
25697 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
25699 // r = VSELECT(r, shift(r, 2), a);
25700 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
25701 R = SignBitSelect(Amt, M, R);
25703 // a += a
25704 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
25706 // return VSELECT(r, shift(r, 1), a);
25707 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
25708 R = SignBitSelect(Amt, M, R);
25709 return R;
25712 // Decompose 256-bit shifts into 128-bit shifts.
25713 if (VT.is256BitVector())
25714 return split256IntArith(Op, DAG);
25716 return SDValue();
25719 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
25720 SelectionDAG &DAG) {
25721 MVT VT = Op.getSimpleValueType();
25722 assert(VT.isVector() && "Custom lowering only for vector rotates!");
25724 SDLoc DL(Op);
25725 SDValue R = Op.getOperand(0);
25726 SDValue Amt = Op.getOperand(1);
25727 unsigned Opcode = Op.getOpcode();
25728 unsigned EltSizeInBits = VT.getScalarSizeInBits();
25729 int NumElts = VT.getVectorNumElements();
25731 // Check for constant splat rotation amount.
25732 APInt UndefElts;
25733 SmallVector<APInt, 32> EltBits;
25734 int CstSplatIndex = -1;
25735 if (getTargetConstantBitsFromNode(Amt, EltSizeInBits, UndefElts, EltBits))
25736 for (int i = 0; i != NumElts; ++i)
25737 if (!UndefElts[i]) {
25738 if (CstSplatIndex < 0 || EltBits[i] == EltBits[CstSplatIndex]) {
25739 CstSplatIndex = i;
25740 continue;
25742 CstSplatIndex = -1;
25743 break;
25746 // AVX512 implicitly uses modulo rotation amounts.
25747 if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
25748 // Attempt to rotate by immediate.
25749 if (0 <= CstSplatIndex) {
25750 unsigned Op = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI);
25751 uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
25752 return DAG.getNode(Op, DL, VT, R,
25753 DAG.getConstant(RotateAmt, DL, MVT::i8));
25756 // Else, fall-back on VPROLV/VPRORV.
25757 return Op;
25760 assert((Opcode == ISD::ROTL) && "Only ROTL supported");
25762 // XOP has 128-bit vector variable + immediate rotates.
25763 // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
25764 // XOP implicitly uses modulo rotation amounts.
25765 if (Subtarget.hasXOP()) {
25766 if (VT.is256BitVector())
25767 return split256IntArith(Op, DAG);
25768 assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
25770 // Attempt to rotate by immediate.
25771 if (0 <= CstSplatIndex) {
25772 uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
25773 return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
25774 DAG.getConstant(RotateAmt, DL, MVT::i8));
25777 // Use general rotate by variable (per-element).
25778 return Op;
25781 // Split 256-bit integers on pre-AVX2 targets.
25782 if (VT.is256BitVector() && !Subtarget.hasAVX2())
25783 return split256IntArith(Op, DAG);
25785 assert((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
25786 ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
25787 Subtarget.hasAVX2())) &&
25788 "Only vXi32/vXi16/vXi8 vector rotates supported");
25790 // Rotate by an uniform constant - expand back to shifts.
25791 if (0 <= CstSplatIndex)
25792 return SDValue();
25794 bool IsSplatAmt = DAG.isSplatValue(Amt);
25796 // v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by
25797 // the amount bit.
25798 if (EltSizeInBits == 8 && !IsSplatAmt) {
25799 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()))
25800 return SDValue();
25802 // We don't need ModuloAmt here as we just peek at individual bits.
25803 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25805 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
25806 if (Subtarget.hasSSE41()) {
25807 // On SSE41 targets we make use of the fact that VSELECT lowers
25808 // to PBLENDVB which selects bytes based just on the sign bit.
25809 V0 = DAG.getBitcast(VT, V0);
25810 V1 = DAG.getBitcast(VT, V1);
25811 Sel = DAG.getBitcast(VT, Sel);
25812 return DAG.getBitcast(SelVT, DAG.getSelect(DL, VT, Sel, V0, V1));
25814 // On pre-SSE41 targets we test for the sign bit by comparing to
25815 // zero - a negative value will set all bits of the lanes to true
25816 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
25817 SDValue Z = DAG.getConstant(0, DL, SelVT);
25818 SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
25819 return DAG.getSelect(DL, SelVT, C, V0, V1);
25822 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
25823 // We can safely do this using i16 shifts as we're only interested in
25824 // the 3 lower bits of each byte.
25825 Amt = DAG.getBitcast(ExtVT, Amt);
25826 Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
25827 Amt = DAG.getBitcast(VT, Amt);
25829 // r = VSELECT(r, rot(r, 4), a);
25830 SDValue M;
25831 M = DAG.getNode(
25832 ISD::OR, DL, VT,
25833 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(4, DL, VT)),
25834 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(4, DL, VT)));
25835 R = SignBitSelect(VT, Amt, M, R);
25837 // a += a
25838 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
25840 // r = VSELECT(r, rot(r, 2), a);
25841 M = DAG.getNode(
25842 ISD::OR, DL, VT,
25843 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(2, DL, VT)),
25844 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(6, DL, VT)));
25845 R = SignBitSelect(VT, Amt, M, R);
25847 // a += a
25848 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
25850 // return VSELECT(r, rot(r, 1), a);
25851 M = DAG.getNode(
25852 ISD::OR, DL, VT,
25853 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(1, DL, VT)),
25854 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(7, DL, VT)));
25855 return SignBitSelect(VT, Amt, M, R);
25858 // ISD::ROT* uses modulo rotate amounts.
25859 Amt = DAG.getNode(ISD::AND, DL, VT, Amt,
25860 DAG.getConstant(EltSizeInBits - 1, DL, VT));
25862 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
25863 bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
25864 SupportedVectorVarShift(VT, Subtarget, ISD::SRL);
25866 // Fallback for splats + all supported variable shifts.
25867 // Fallback for non-constants AVX2 vXi16 as well.
25868 if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
25869 SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
25870 AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
25871 SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
25872 SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
25873 return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
25876 // As with shifts, convert the rotation amount to a multiplication factor.
25877 SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
25878 assert(Scale && "Failed to convert ROTL amount to scale");
25880 // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
25881 if (EltSizeInBits == 16) {
25882 SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
25883 SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
25884 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
25887 // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
25888 // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
25889 // that can then be OR'd with the lower 32-bits.
25890 assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
25891 static const int OddMask[] = {1, -1, 3, -1};
25892 SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
25893 SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
25895 SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
25896 DAG.getBitcast(MVT::v2i64, R),
25897 DAG.getBitcast(MVT::v2i64, Scale));
25898 SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
25899 DAG.getBitcast(MVT::v2i64, R13),
25900 DAG.getBitcast(MVT::v2i64, Scale13));
25901 Res02 = DAG.getBitcast(VT, Res02);
25902 Res13 = DAG.getBitcast(VT, Res13);
25904 return DAG.getNode(ISD::OR, DL, VT,
25905 DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
25906 DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
25909 /// Returns true if the operand type is exactly twice the native width, and
25910 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
25911 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
25912 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
25913 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
25914 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
25916 if (OpWidth == 64)
25917 return Subtarget.hasCmpxchg8b() && !Subtarget.is64Bit();
25918 if (OpWidth == 128)
25919 return Subtarget.hasCmpxchg16b();
25921 return false;
25924 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
25925 // TODO: In 32-bit mode, use FISTP when X87 is available?
25926 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
25927 Type *MemType = SI->getValueOperand()->getType();
25929 bool NoImplicitFloatOps =
25930 SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
25931 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
25932 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2())
25933 return false;
25935 return needsCmpXchgNb(MemType);
25938 // Note: this turns large loads into lock cmpxchg8b/16b.
25939 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
25940 TargetLowering::AtomicExpansionKind
25941 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
25942 Type *MemType = LI->getType();
25944 // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
25945 // can use movq to do the load. If we have X87 we can load into an 80-bit
25946 // X87 register and store it to a stack temporary.
25947 bool NoImplicitFloatOps =
25948 LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
25949 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
25950 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
25951 (Subtarget.hasSSE2() || Subtarget.hasX87()))
25952 return AtomicExpansionKind::None;
25954 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
25955 : AtomicExpansionKind::None;
25958 TargetLowering::AtomicExpansionKind
25959 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
25960 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
25961 Type *MemType = AI->getType();
25963 // If the operand is too big, we must see if cmpxchg8/16b is available
25964 // and default to library calls otherwise.
25965 if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
25966 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
25967 : AtomicExpansionKind::None;
25970 AtomicRMWInst::BinOp Op = AI->getOperation();
25971 switch (Op) {
25972 default:
25973 llvm_unreachable("Unknown atomic operation");
25974 case AtomicRMWInst::Xchg:
25975 case AtomicRMWInst::Add:
25976 case AtomicRMWInst::Sub:
25977 // It's better to use xadd, xsub or xchg for these in all cases.
25978 return AtomicExpansionKind::None;
25979 case AtomicRMWInst::Or:
25980 case AtomicRMWInst::And:
25981 case AtomicRMWInst::Xor:
25982 // If the atomicrmw's result isn't actually used, we can just add a "lock"
25983 // prefix to a normal instruction for these operations.
25984 return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
25985 : AtomicExpansionKind::None;
25986 case AtomicRMWInst::Nand:
25987 case AtomicRMWInst::Max:
25988 case AtomicRMWInst::Min:
25989 case AtomicRMWInst::UMax:
25990 case AtomicRMWInst::UMin:
25991 case AtomicRMWInst::FAdd:
25992 case AtomicRMWInst::FSub:
25993 // These always require a non-trivial set of data operations on x86. We must
25994 // use a cmpxchg loop.
25995 return AtomicExpansionKind::CmpXChg;
25999 LoadInst *
26000 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
26001 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
26002 Type *MemType = AI->getType();
26003 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
26004 // there is no benefit in turning such RMWs into loads, and it is actually
26005 // harmful as it introduces a mfence.
26006 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
26007 return nullptr;
26009 // If this is a canonical idempotent atomicrmw w/no uses, we have a better
26010 // lowering available in lowerAtomicArith.
26011 // TODO: push more cases through this path.
26012 if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
26013 if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
26014 AI->use_empty())
26015 return nullptr;
26017 auto Builder = IRBuilder<>(AI);
26018 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
26019 auto SSID = AI->getSyncScopeID();
26020 // We must restrict the ordering to avoid generating loads with Release or
26021 // ReleaseAcquire orderings.
26022 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
26024 // Before the load we need a fence. Here is an example lifted from
26025 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
26026 // is required:
26027 // Thread 0:
26028 // x.store(1, relaxed);
26029 // r1 = y.fetch_add(0, release);
26030 // Thread 1:
26031 // y.fetch_add(42, acquire);
26032 // r2 = x.load(relaxed);
26033 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
26034 // lowered to just a load without a fence. A mfence flushes the store buffer,
26035 // making the optimization clearly correct.
26036 // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
26037 // otherwise, we might be able to be more aggressive on relaxed idempotent
26038 // rmw. In practice, they do not look useful, so we don't try to be
26039 // especially clever.
26040 if (SSID == SyncScope::SingleThread)
26041 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
26042 // the IR level, so we must wrap it in an intrinsic.
26043 return nullptr;
26045 if (!Subtarget.hasMFence())
26046 // FIXME: it might make sense to use a locked operation here but on a
26047 // different cache-line to prevent cache-line bouncing. In practice it
26048 // is probably a small win, and x86 processors without mfence are rare
26049 // enough that we do not bother.
26050 return nullptr;
26052 Function *MFence =
26053 llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
26054 Builder.CreateCall(MFence, {});
26056 // Finally we can emit the atomic load.
26057 LoadInst *Loaded =
26058 Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(),
26059 AI->getType()->getPrimitiveSizeInBits());
26060 Loaded->setAtomic(Order, SSID);
26061 AI->replaceAllUsesWith(Loaded);
26062 AI->eraseFromParent();
26063 return Loaded;
26066 /// Emit a locked operation on a stack location which does not change any
26067 /// memory location, but does involve a lock prefix. Location is chosen to be
26068 /// a) very likely accessed only by a single thread to minimize cache traffic,
26069 /// and b) definitely dereferenceable. Returns the new Chain result.
26070 static SDValue emitLockedStackOp(SelectionDAG &DAG,
26071 const X86Subtarget &Subtarget,
26072 SDValue Chain, SDLoc DL) {
26073 // Implementation notes:
26074 // 1) LOCK prefix creates a full read/write reordering barrier for memory
26075 // operations issued by the current processor. As such, the location
26076 // referenced is not relevant for the ordering properties of the instruction.
26077 // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
26078 // 8.2.3.9 Loads and Stores Are Not Reordered with Locked Instructions
26079 // 2) Using an immediate operand appears to be the best encoding choice
26080 // here since it doesn't require an extra register.
26081 // 3) OR appears to be very slightly faster than ADD. (Though, the difference
26082 // is small enough it might just be measurement noise.)
26083 // 4) When choosing offsets, there are several contributing factors:
26084 // a) If there's no redzone, we default to TOS. (We could allocate a cache
26085 // line aligned stack object to improve this case.)
26086 // b) To minimize our chances of introducing a false dependence, we prefer
26087 // to offset the stack usage from TOS slightly.
26088 // c) To minimize concerns about cross thread stack usage - in particular,
26089 // the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
26090 // captures state in the TOS frame and accesses it from many threads -
26091 // we want to use an offset such that the offset is in a distinct cache
26092 // line from the TOS frame.
26094 // For a general discussion of the tradeoffs and benchmark results, see:
26095 // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
26097 auto &MF = DAG.getMachineFunction();
26098 auto &TFL = *Subtarget.getFrameLowering();
26099 const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
26101 if (Subtarget.is64Bit()) {
26102 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
26103 SDValue Ops[] = {
26104 DAG.getRegister(X86::RSP, MVT::i64), // Base
26105 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
26106 DAG.getRegister(0, MVT::i64), // Index
26107 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
26108 DAG.getRegister(0, MVT::i16), // Segment.
26109 Zero,
26110 Chain};
26111 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
26112 MVT::Other, Ops);
26113 return SDValue(Res, 1);
26116 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
26117 SDValue Ops[] = {
26118 DAG.getRegister(X86::ESP, MVT::i32), // Base
26119 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
26120 DAG.getRegister(0, MVT::i32), // Index
26121 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
26122 DAG.getRegister(0, MVT::i16), // Segment.
26123 Zero,
26124 Chain
26126 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
26127 MVT::Other, Ops);
26128 return SDValue(Res, 1);
26131 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
26132 SelectionDAG &DAG) {
26133 SDLoc dl(Op);
26134 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
26135 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
26136 SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
26137 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
26139 // The only fence that needs an instruction is a sequentially-consistent
26140 // cross-thread fence.
26141 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
26142 FenceSSID == SyncScope::System) {
26143 if (Subtarget.hasMFence())
26144 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
26146 SDValue Chain = Op.getOperand(0);
26147 return emitLockedStackOp(DAG, Subtarget, Chain, dl);
26150 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
26151 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
26154 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
26155 SelectionDAG &DAG) {
26156 MVT T = Op.getSimpleValueType();
26157 SDLoc DL(Op);
26158 unsigned Reg = 0;
26159 unsigned size = 0;
26160 switch(T.SimpleTy) {
26161 default: llvm_unreachable("Invalid value type!");
26162 case MVT::i8: Reg = X86::AL; size = 1; break;
26163 case MVT::i16: Reg = X86::AX; size = 2; break;
26164 case MVT::i32: Reg = X86::EAX; size = 4; break;
26165 case MVT::i64:
26166 assert(Subtarget.is64Bit() && "Node not type legal!");
26167 Reg = X86::RAX; size = 8;
26168 break;
26170 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
26171 Op.getOperand(2), SDValue());
26172 SDValue Ops[] = { cpIn.getValue(0),
26173 Op.getOperand(1),
26174 Op.getOperand(3),
26175 DAG.getTargetConstant(size, DL, MVT::i8),
26176 cpIn.getValue(1) };
26177 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
26178 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
26179 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
26180 Ops, T, MMO);
26182 SDValue cpOut =
26183 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
26184 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
26185 MVT::i32, cpOut.getValue(2));
26186 SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
26188 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
26189 cpOut, Success, EFLAGS.getValue(1));
26192 // Create MOVMSKB, taking into account whether we need to split for AVX1.
26193 static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
26194 const X86Subtarget &Subtarget) {
26195 MVT InVT = V.getSimpleValueType();
26197 if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
26198 SDValue Lo, Hi;
26199 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
26200 Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
26201 Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
26202 Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
26203 DAG.getConstant(16, DL, MVT::i8));
26204 return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
26207 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
26210 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
26211 SelectionDAG &DAG) {
26212 SDValue Src = Op.getOperand(0);
26213 MVT SrcVT = Src.getSimpleValueType();
26214 MVT DstVT = Op.getSimpleValueType();
26216 // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
26217 // half to v32i1 and concatenating the result.
26218 if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
26219 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
26220 assert(Subtarget.hasBWI() && "Expected BWI target");
26221 SDLoc dl(Op);
26222 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
26223 DAG.getIntPtrConstant(0, dl));
26224 Lo = DAG.getBitcast(MVT::v32i1, Lo);
26225 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
26226 DAG.getIntPtrConstant(1, dl));
26227 Hi = DAG.getBitcast(MVT::v32i1, Hi);
26228 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
26231 // Custom splitting for BWI types when AVX512F is available but BWI isn't.
26232 if ((SrcVT == MVT::v32i16 || SrcVT == MVT::v64i8) && DstVT.isVector() &&
26233 DAG.getTargetLoweringInfo().isTypeLegal(DstVT)) {
26234 SDLoc dl(Op);
26235 SDValue Lo, Hi;
26236 std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
26237 EVT CastVT = MVT::getVectorVT(DstVT.getVectorElementType(),
26238 DstVT.getVectorNumElements() / 2);
26239 Lo = DAG.getBitcast(CastVT, Lo);
26240 Hi = DAG.getBitcast(CastVT, Hi);
26241 return DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
26244 // Use MOVMSK for vector to scalar conversion to prevent scalarization.
26245 if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
26246 assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
26247 MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
26248 SDLoc DL(Op);
26249 SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
26250 V = getPMOVMSKB(DL, V, DAG, Subtarget);
26251 return DAG.getZExtOrTrunc(V, DL, DstVT);
26254 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
26255 SrcVT == MVT::i64) {
26256 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
26257 if (DstVT != MVT::f64 && DstVT != MVT::i64 &&
26258 !(DstVT == MVT::x86mmx && SrcVT.isVector()))
26259 // This conversion needs to be expanded.
26260 return SDValue();
26262 SDLoc dl(Op);
26263 if (SrcVT.isVector()) {
26264 // Widen the vector in input in the case of MVT::v2i32.
26265 // Example: from MVT::v2i32 to MVT::v4i32.
26266 MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
26267 SrcVT.getVectorNumElements() * 2);
26268 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
26269 DAG.getUNDEF(SrcVT));
26270 } else {
26271 assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
26272 "Unexpected source type in LowerBITCAST");
26273 Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
26276 MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
26277 Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
26279 if (DstVT == MVT::x86mmx)
26280 return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
26282 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
26283 DAG.getIntPtrConstant(0, dl));
26286 assert(Subtarget.is64Bit() && !Subtarget.hasSSE2() &&
26287 Subtarget.hasMMX() && "Unexpected custom BITCAST");
26288 assert((DstVT == MVT::i64 ||
26289 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
26290 "Unexpected custom BITCAST");
26291 // i64 <=> MMX conversions are Legal.
26292 if (SrcVT==MVT::i64 && DstVT.isVector())
26293 return Op;
26294 if (DstVT==MVT::i64 && SrcVT.isVector())
26295 return Op;
26296 // MMX <=> MMX conversions are Legal.
26297 if (SrcVT.isVector() && DstVT.isVector())
26298 return Op;
26299 // All other conversions need to be expanded.
26300 return SDValue();
26303 /// Compute the horizontal sum of bytes in V for the elements of VT.
26305 /// Requires V to be a byte vector and VT to be an integer vector type with
26306 /// wider elements than V's type. The width of the elements of VT determines
26307 /// how many bytes of V are summed horizontally to produce each element of the
26308 /// result.
26309 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
26310 const X86Subtarget &Subtarget,
26311 SelectionDAG &DAG) {
26312 SDLoc DL(V);
26313 MVT ByteVecVT = V.getSimpleValueType();
26314 MVT EltVT = VT.getVectorElementType();
26315 assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
26316 "Expected value to have byte element type.");
26317 assert(EltVT != MVT::i8 &&
26318 "Horizontal byte sum only makes sense for wider elements!");
26319 unsigned VecSize = VT.getSizeInBits();
26320 assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
26322 // PSADBW instruction horizontally add all bytes and leave the result in i64
26323 // chunks, thus directly computes the pop count for v2i64 and v4i64.
26324 if (EltVT == MVT::i64) {
26325 SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
26326 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
26327 V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
26328 return DAG.getBitcast(VT, V);
26331 if (EltVT == MVT::i32) {
26332 // We unpack the low half and high half into i32s interleaved with zeros so
26333 // that we can use PSADBW to horizontally sum them. The most useful part of
26334 // this is that it lines up the results of two PSADBW instructions to be
26335 // two v2i64 vectors which concatenated are the 4 population counts. We can
26336 // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
26337 SDValue Zeros = DAG.getConstant(0, DL, VT);
26338 SDValue V32 = DAG.getBitcast(VT, V);
26339 SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
26340 SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
26342 // Do the horizontal sums into two v2i64s.
26343 Zeros = DAG.getConstant(0, DL, ByteVecVT);
26344 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
26345 Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
26346 DAG.getBitcast(ByteVecVT, Low), Zeros);
26347 High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
26348 DAG.getBitcast(ByteVecVT, High), Zeros);
26350 // Merge them together.
26351 MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
26352 V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
26353 DAG.getBitcast(ShortVecVT, Low),
26354 DAG.getBitcast(ShortVecVT, High));
26356 return DAG.getBitcast(VT, V);
26359 // The only element type left is i16.
26360 assert(EltVT == MVT::i16 && "Unknown how to handle type");
26362 // To obtain pop count for each i16 element starting from the pop count for
26363 // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
26364 // right by 8. It is important to shift as i16s as i8 vector shift isn't
26365 // directly supported.
26366 SDValue ShifterV = DAG.getConstant(8, DL, VT);
26367 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
26368 V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
26369 DAG.getBitcast(ByteVecVT, V));
26370 return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
26373 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
26374 const X86Subtarget &Subtarget,
26375 SelectionDAG &DAG) {
26376 MVT VT = Op.getSimpleValueType();
26377 MVT EltVT = VT.getVectorElementType();
26378 int NumElts = VT.getVectorNumElements();
26379 (void)EltVT;
26380 assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
26382 // Implement a lookup table in register by using an algorithm based on:
26383 // http://wm.ite.pl/articles/sse-popcount.html
26385 // The general idea is that every lower byte nibble in the input vector is an
26386 // index into a in-register pre-computed pop count table. We then split up the
26387 // input vector in two new ones: (1) a vector with only the shifted-right
26388 // higher nibbles for each byte and (2) a vector with the lower nibbles (and
26389 // masked out higher ones) for each byte. PSHUFB is used separately with both
26390 // to index the in-register table. Next, both are added and the result is a
26391 // i8 vector where each element contains the pop count for input byte.
26392 const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
26393 /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
26394 /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
26395 /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
26397 SmallVector<SDValue, 64> LUTVec;
26398 for (int i = 0; i < NumElts; ++i)
26399 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
26400 SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
26401 SDValue M0F = DAG.getConstant(0x0F, DL, VT);
26403 // High nibbles
26404 SDValue FourV = DAG.getConstant(4, DL, VT);
26405 SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
26407 // Low nibbles
26408 SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
26410 // The input vector is used as the shuffle mask that index elements into the
26411 // LUT. After counting low and high nibbles, add the vector to obtain the
26412 // final pop count per i8 element.
26413 SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
26414 SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
26415 return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
26418 // Please ensure that any codegen change from LowerVectorCTPOP is reflected in
26419 // updated cost models in X86TTIImpl::getIntrinsicInstrCost.
26420 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
26421 SelectionDAG &DAG) {
26422 MVT VT = Op.getSimpleValueType();
26423 assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
26424 "Unknown CTPOP type to handle");
26425 SDLoc DL(Op.getNode());
26426 SDValue Op0 = Op.getOperand(0);
26428 // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
26429 if (Subtarget.hasVPOPCNTDQ()) {
26430 unsigned NumElems = VT.getVectorNumElements();
26431 assert((VT.getVectorElementType() == MVT::i8 ||
26432 VT.getVectorElementType() == MVT::i16) && "Unexpected type");
26433 if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
26434 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
26435 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
26436 Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
26437 return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
26441 // Decompose 256-bit ops into smaller 128-bit ops.
26442 if (VT.is256BitVector() && !Subtarget.hasInt256())
26443 return Lower256IntUnary(Op, DAG);
26445 // Decompose 512-bit ops into smaller 256-bit ops.
26446 if (VT.is512BitVector() && !Subtarget.hasBWI())
26447 return Lower512IntUnary(Op, DAG);
26449 // For element types greater than i8, do vXi8 pop counts and a bytesum.
26450 if (VT.getScalarType() != MVT::i8) {
26451 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
26452 SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
26453 SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
26454 return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
26457 // We can't use the fast LUT approach, so fall back on LegalizeDAG.
26458 if (!Subtarget.hasSSSE3())
26459 return SDValue();
26461 return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
26464 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
26465 SelectionDAG &DAG) {
26466 assert(Op.getSimpleValueType().isVector() &&
26467 "We only do custom lowering for vector population count.");
26468 return LowerVectorCTPOP(Op, Subtarget, DAG);
26471 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
26472 MVT VT = Op.getSimpleValueType();
26473 SDValue In = Op.getOperand(0);
26474 SDLoc DL(Op);
26476 // For scalars, its still beneficial to transfer to/from the SIMD unit to
26477 // perform the BITREVERSE.
26478 if (!VT.isVector()) {
26479 MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
26480 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
26481 Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
26482 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
26483 DAG.getIntPtrConstant(0, DL));
26486 int NumElts = VT.getVectorNumElements();
26487 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
26489 // Decompose 256-bit ops into smaller 128-bit ops.
26490 if (VT.is256BitVector())
26491 return Lower256IntUnary(Op, DAG);
26493 assert(VT.is128BitVector() &&
26494 "Only 128-bit vector bitreverse lowering supported.");
26496 // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
26497 // perform the BSWAP in the shuffle.
26498 // Its best to shuffle using the second operand as this will implicitly allow
26499 // memory folding for multiple vectors.
26500 SmallVector<SDValue, 16> MaskElts;
26501 for (int i = 0; i != NumElts; ++i) {
26502 for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
26503 int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
26504 int PermuteByte = SourceByte | (2 << 5);
26505 MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
26509 SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
26510 SDValue Res = DAG.getBitcast(MVT::v16i8, In);
26511 Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
26512 Res, Mask);
26513 return DAG.getBitcast(VT, Res);
26516 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
26517 SelectionDAG &DAG) {
26518 MVT VT = Op.getSimpleValueType();
26520 if (Subtarget.hasXOP() && !VT.is512BitVector())
26521 return LowerBITREVERSE_XOP(Op, DAG);
26523 assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
26525 SDValue In = Op.getOperand(0);
26526 SDLoc DL(Op);
26528 unsigned NumElts = VT.getVectorNumElements();
26529 assert(VT.getScalarType() == MVT::i8 &&
26530 "Only byte vector BITREVERSE supported");
26532 // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
26533 if (VT.is256BitVector() && !Subtarget.hasInt256())
26534 return Lower256IntUnary(Op, DAG);
26536 // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
26537 // two nibbles and a PSHUFB lookup to find the bitreverse of each
26538 // 0-15 value (moved to the other nibble).
26539 SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
26540 SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
26541 SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
26543 const int LoLUT[16] = {
26544 /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
26545 /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
26546 /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
26547 /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
26548 const int HiLUT[16] = {
26549 /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
26550 /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
26551 /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
26552 /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
26554 SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
26555 for (unsigned i = 0; i < NumElts; ++i) {
26556 LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
26557 HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
26560 SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
26561 SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
26562 Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
26563 Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
26564 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
26567 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
26568 const X86Subtarget &Subtarget) {
26569 unsigned NewOpc = 0;
26570 switch (N->getOpcode()) {
26571 case ISD::ATOMIC_LOAD_ADD:
26572 NewOpc = X86ISD::LADD;
26573 break;
26574 case ISD::ATOMIC_LOAD_SUB:
26575 NewOpc = X86ISD::LSUB;
26576 break;
26577 case ISD::ATOMIC_LOAD_OR:
26578 NewOpc = X86ISD::LOR;
26579 break;
26580 case ISD::ATOMIC_LOAD_XOR:
26581 NewOpc = X86ISD::LXOR;
26582 break;
26583 case ISD::ATOMIC_LOAD_AND:
26584 NewOpc = X86ISD::LAND;
26585 break;
26586 default:
26587 llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
26590 MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
26592 return DAG.getMemIntrinsicNode(
26593 NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
26594 {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
26595 /*MemVT=*/N->getSimpleValueType(0), MMO);
26598 /// Lower atomic_load_ops into LOCK-prefixed operations.
26599 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
26600 const X86Subtarget &Subtarget) {
26601 AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
26602 SDValue Chain = N->getOperand(0);
26603 SDValue LHS = N->getOperand(1);
26604 SDValue RHS = N->getOperand(2);
26605 unsigned Opc = N->getOpcode();
26606 MVT VT = N->getSimpleValueType(0);
26607 SDLoc DL(N);
26609 // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
26610 // can only be lowered when the result is unused. They should have already
26611 // been transformed into a cmpxchg loop in AtomicExpand.
26612 if (N->hasAnyUseOfValue(0)) {
26613 // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
26614 // select LXADD if LOCK_SUB can't be selected.
26615 if (Opc == ISD::ATOMIC_LOAD_SUB) {
26616 RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
26617 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
26618 RHS, AN->getMemOperand());
26620 assert(Opc == ISD::ATOMIC_LOAD_ADD &&
26621 "Used AtomicRMW ops other than Add should have been expanded!");
26622 return N;
26625 // Specialized lowering for the canonical form of an idemptotent atomicrmw.
26626 // The core idea here is that since the memory location isn't actually
26627 // changing, all we need is a lowering for the *ordering* impacts of the
26628 // atomicrmw. As such, we can chose a different operation and memory
26629 // location to minimize impact on other code.
26630 if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS)) {
26631 // On X86, the only ordering which actually requires an instruction is
26632 // seq_cst which isn't SingleThread, everything just needs to be preserved
26633 // during codegen and then dropped. Note that we expect (but don't assume),
26634 // that orderings other than seq_cst and acq_rel have been canonicalized to
26635 // a store or load.
26636 if (AN->getOrdering() == AtomicOrdering::SequentiallyConsistent &&
26637 AN->getSyncScopeID() == SyncScope::System) {
26638 // Prefer a locked operation against a stack location to minimize cache
26639 // traffic. This assumes that stack locations are very likely to be
26640 // accessed only by the owning thread.
26641 SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
26642 assert(!N->hasAnyUseOfValue(0));
26643 // NOTE: The getUNDEF is needed to give something for the unused result 0.
26644 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
26645 DAG.getUNDEF(VT), NewChain);
26647 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
26648 SDValue NewChain = DAG.getNode(X86ISD::MEMBARRIER, DL, MVT::Other, Chain);
26649 assert(!N->hasAnyUseOfValue(0));
26650 // NOTE: The getUNDEF is needed to give something for the unused result 0.
26651 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
26652 DAG.getUNDEF(VT), NewChain);
26655 SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
26656 // RAUW the chain, but don't worry about the result, as it's unused.
26657 assert(!N->hasAnyUseOfValue(0));
26658 // NOTE: The getUNDEF is needed to give something for the unused result 0.
26659 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
26660 DAG.getUNDEF(VT), LockOp.getValue(1));
26663 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
26664 const X86Subtarget &Subtarget) {
26665 auto *Node = cast<AtomicSDNode>(Op.getNode());
26666 SDLoc dl(Node);
26667 EVT VT = Node->getMemoryVT();
26669 bool IsSeqCst = Node->getOrdering() == AtomicOrdering::SequentiallyConsistent;
26670 bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
26672 // If this store is not sequentially consistent and the type is legal
26673 // we can just keep it.
26674 if (!IsSeqCst && IsTypeLegal)
26675 return Op;
26677 if (VT == MVT::i64 && !IsTypeLegal) {
26678 // For illegal i64 atomic_stores, we can try to use MOVQ if SSE2 is enabled.
26679 // FIXME: Use movlps with SSE1.
26680 // FIXME: Use fist with X87.
26681 bool NoImplicitFloatOps =
26682 DAG.getMachineFunction().getFunction().hasFnAttribute(
26683 Attribute::NoImplicitFloat);
26684 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
26685 Subtarget.hasSSE2()) {
26686 SDValue SclToVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
26687 Node->getOperand(2));
26688 SDVTList Tys = DAG.getVTList(MVT::Other);
26689 SDValue Ops[] = { Node->getChain(), SclToVec, Node->getBasePtr() };
26690 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys,
26691 Ops, MVT::i64,
26692 Node->getMemOperand());
26694 // If this is a sequentially consistent store, also emit an appropriate
26695 // barrier.
26696 if (IsSeqCst)
26697 Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
26699 return Chain;
26703 // Convert seq_cst store -> xchg
26704 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
26705 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
26706 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
26707 Node->getMemoryVT(),
26708 Node->getOperand(0),
26709 Node->getOperand(1), Node->getOperand(2),
26710 Node->getMemOperand());
26711 return Swap.getValue(1);
26714 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
26715 SDNode *N = Op.getNode();
26716 MVT VT = N->getSimpleValueType(0);
26718 // Let legalize expand this if it isn't a legal type yet.
26719 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
26720 return SDValue();
26722 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
26723 SDLoc DL(N);
26725 // Set the carry flag.
26726 SDValue Carry = Op.getOperand(2);
26727 EVT CarryVT = Carry.getValueType();
26728 APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
26729 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
26730 Carry, DAG.getConstant(NegOne, DL, CarryVT));
26732 unsigned Opc = Op.getOpcode() == ISD::ADDCARRY ? X86ISD::ADC : X86ISD::SBB;
26733 SDValue Sum = DAG.getNode(Opc, DL, VTs, Op.getOperand(0),
26734 Op.getOperand(1), Carry.getValue(1));
26736 SDValue SetCC = getSETCC(X86::COND_B, Sum.getValue(1), DL, DAG);
26737 if (N->getValueType(1) == MVT::i1)
26738 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
26740 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
26743 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
26744 SelectionDAG &DAG) {
26745 assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
26747 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
26748 // which returns the values as { float, float } (in XMM0) or
26749 // { double, double } (which is returned in XMM0, XMM1).
26750 SDLoc dl(Op);
26751 SDValue Arg = Op.getOperand(0);
26752 EVT ArgVT = Arg.getValueType();
26753 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
26755 TargetLowering::ArgListTy Args;
26756 TargetLowering::ArgListEntry Entry;
26758 Entry.Node = Arg;
26759 Entry.Ty = ArgTy;
26760 Entry.IsSExt = false;
26761 Entry.IsZExt = false;
26762 Args.push_back(Entry);
26764 bool isF64 = ArgVT == MVT::f64;
26765 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
26766 // the small struct {f32, f32} is returned in (eax, edx). For f64,
26767 // the results are returned via SRet in memory.
26768 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26769 RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
26770 const char *LibcallName = TLI.getLibcallName(LC);
26771 SDValue Callee =
26772 DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
26774 Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
26775 : (Type *)VectorType::get(ArgTy, 4);
26777 TargetLowering::CallLoweringInfo CLI(DAG);
26778 CLI.setDebugLoc(dl)
26779 .setChain(DAG.getEntryNode())
26780 .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
26782 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
26784 if (isF64)
26785 // Returned in xmm0 and xmm1.
26786 return CallResult.first;
26788 // Returned in bits 0:31 and 32:64 xmm0.
26789 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
26790 CallResult.first, DAG.getIntPtrConstant(0, dl));
26791 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
26792 CallResult.first, DAG.getIntPtrConstant(1, dl));
26793 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
26794 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
26797 /// Widen a vector input to a vector of NVT. The
26798 /// input vector must have the same element type as NVT.
26799 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
26800 bool FillWithZeroes = false) {
26801 // Check if InOp already has the right width.
26802 MVT InVT = InOp.getSimpleValueType();
26803 if (InVT == NVT)
26804 return InOp;
26806 if (InOp.isUndef())
26807 return DAG.getUNDEF(NVT);
26809 assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
26810 "input and widen element type must match");
26812 unsigned InNumElts = InVT.getVectorNumElements();
26813 unsigned WidenNumElts = NVT.getVectorNumElements();
26814 assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
26815 "Unexpected request for vector widening");
26817 SDLoc dl(InOp);
26818 if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
26819 InOp.getNumOperands() == 2) {
26820 SDValue N1 = InOp.getOperand(1);
26821 if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
26822 N1.isUndef()) {
26823 InOp = InOp.getOperand(0);
26824 InVT = InOp.getSimpleValueType();
26825 InNumElts = InVT.getVectorNumElements();
26828 if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
26829 ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
26830 SmallVector<SDValue, 16> Ops;
26831 for (unsigned i = 0; i < InNumElts; ++i)
26832 Ops.push_back(InOp.getOperand(i));
26834 EVT EltVT = InOp.getOperand(0).getValueType();
26836 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
26837 DAG.getUNDEF(EltVT);
26838 for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
26839 Ops.push_back(FillVal);
26840 return DAG.getBuildVector(NVT, dl, Ops);
26842 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
26843 DAG.getUNDEF(NVT);
26844 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
26845 InOp, DAG.getIntPtrConstant(0, dl));
26848 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
26849 SelectionDAG &DAG) {
26850 assert(Subtarget.hasAVX512() &&
26851 "MGATHER/MSCATTER are supported on AVX-512 arch only");
26853 MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
26854 SDValue Src = N->getValue();
26855 MVT VT = Src.getSimpleValueType();
26856 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
26857 SDLoc dl(Op);
26859 SDValue Scale = N->getScale();
26860 SDValue Index = N->getIndex();
26861 SDValue Mask = N->getMask();
26862 SDValue Chain = N->getChain();
26863 SDValue BasePtr = N->getBasePtr();
26865 if (VT == MVT::v2f32) {
26866 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
26867 // If the index is v2i64 and we have VLX we can use xmm for data and index.
26868 if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
26869 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
26870 DAG.getUNDEF(MVT::v2f32));
26871 SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other);
26872 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
26873 SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
26874 VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
26875 return SDValue(NewScatter.getNode(), 1);
26877 return SDValue();
26880 if (VT == MVT::v2i32) {
26881 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
26882 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
26883 DAG.getUNDEF(MVT::v2i32));
26884 // If the index is v2i64 and we have VLX we can use xmm for data and index.
26885 if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
26886 SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other);
26887 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
26888 SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
26889 VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
26890 return SDValue(NewScatter.getNode(), 1);
26892 // Custom widen all the operands to avoid promotion.
26893 EVT NewIndexVT = EVT::getVectorVT(
26894 *DAG.getContext(), Index.getValueType().getVectorElementType(), 4);
26895 Index = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewIndexVT, Index,
26896 DAG.getUNDEF(Index.getValueType()));
26897 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
26898 DAG.getConstant(0, dl, MVT::v2i1));
26899 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
26900 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), N->getMemoryVT(), dl,
26901 Ops, N->getMemOperand());
26904 MVT IndexVT = Index.getSimpleValueType();
26905 MVT MaskVT = Mask.getSimpleValueType();
26907 // If the index is v2i32, we're being called by type legalization and we
26908 // should just let the default handling take care of it.
26909 if (IndexVT == MVT::v2i32)
26910 return SDValue();
26912 // If we don't have VLX and neither the passthru or index is 512-bits, we
26913 // need to widen until one is.
26914 if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
26915 !Index.getSimpleValueType().is512BitVector()) {
26916 // Determine how much we need to widen by to get a 512-bit type.
26917 unsigned Factor = std::min(512/VT.getSizeInBits(),
26918 512/IndexVT.getSizeInBits());
26919 unsigned NumElts = VT.getVectorNumElements() * Factor;
26921 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
26922 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
26923 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
26925 Src = ExtendToType(Src, VT, DAG);
26926 Index = ExtendToType(Index, IndexVT, DAG);
26927 Mask = ExtendToType(Mask, MaskVT, DAG, true);
26930 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
26931 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
26932 SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
26933 VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
26934 return SDValue(NewScatter.getNode(), 1);
26937 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
26938 SelectionDAG &DAG) {
26940 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
26941 MVT VT = Op.getSimpleValueType();
26942 MVT ScalarVT = VT.getScalarType();
26943 SDValue Mask = N->getMask();
26944 MVT MaskVT = Mask.getSimpleValueType();
26945 SDValue PassThru = N->getPassThru();
26946 SDLoc dl(Op);
26948 // Handle AVX masked loads which don't support passthru other than 0.
26949 if (MaskVT.getVectorElementType() != MVT::i1) {
26950 // We also allow undef in the isel pattern.
26951 if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
26952 return Op;
26954 SDValue NewLoad = DAG.getMaskedLoad(VT, dl, N->getChain(),
26955 N->getBasePtr(), Mask,
26956 getZeroVector(VT, Subtarget, DAG, dl),
26957 N->getMemoryVT(), N->getMemOperand(),
26958 N->getExtensionType(),
26959 N->isExpandingLoad());
26960 // Emit a blend.
26961 SDValue Select = DAG.getNode(ISD::VSELECT, dl, MaskVT, Mask, NewLoad,
26962 PassThru);
26963 return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
26966 assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
26967 "Expanding masked load is supported on AVX-512 target only!");
26969 assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
26970 "Expanding masked load is supported for 32 and 64-bit types only!");
26972 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
26973 "Cannot lower masked load op.");
26975 assert((ScalarVT.getSizeInBits() >= 32 ||
26976 (Subtarget.hasBWI() &&
26977 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
26978 "Unsupported masked load op.");
26980 // This operation is legal for targets with VLX, but without
26981 // VLX the vector should be widened to 512 bit
26982 unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
26983 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
26984 PassThru = ExtendToType(PassThru, WideDataVT, DAG);
26986 // Mask element has to be i1.
26987 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
26988 "Unexpected mask type");
26990 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
26992 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
26993 SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(),
26994 N->getBasePtr(), Mask, PassThru,
26995 N->getMemoryVT(), N->getMemOperand(),
26996 N->getExtensionType(),
26997 N->isExpandingLoad());
26999 SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
27000 NewLoad.getValue(0),
27001 DAG.getIntPtrConstant(0, dl));
27002 SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
27003 return DAG.getMergeValues(RetOps, dl);
27006 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
27007 SelectionDAG &DAG) {
27008 MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
27009 SDValue DataToStore = N->getValue();
27010 MVT VT = DataToStore.getSimpleValueType();
27011 MVT ScalarVT = VT.getScalarType();
27012 SDValue Mask = N->getMask();
27013 SDLoc dl(Op);
27015 assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
27016 "Expanding masked load is supported on AVX-512 target only!");
27018 assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
27019 "Expanding masked load is supported for 32 and 64-bit types only!");
27021 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
27022 "Cannot lower masked store op.");
27024 assert((ScalarVT.getSizeInBits() >= 32 ||
27025 (Subtarget.hasBWI() &&
27026 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
27027 "Unsupported masked store op.");
27029 // This operation is legal for targets with VLX, but without
27030 // VLX the vector should be widened to 512 bit
27031 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
27032 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
27034 // Mask element has to be i1.
27035 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
27036 "Unexpected mask type");
27038 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
27040 DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
27041 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
27042 return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
27043 Mask, N->getMemoryVT(), N->getMemOperand(),
27044 N->isTruncatingStore(), N->isCompressingStore());
27047 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
27048 SelectionDAG &DAG) {
27049 assert(Subtarget.hasAVX2() &&
27050 "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
27052 MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
27053 SDLoc dl(Op);
27054 MVT VT = Op.getSimpleValueType();
27055 SDValue Index = N->getIndex();
27056 SDValue Mask = N->getMask();
27057 SDValue PassThru = N->getPassThru();
27058 MVT IndexVT = Index.getSimpleValueType();
27059 MVT MaskVT = Mask.getSimpleValueType();
27061 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
27063 // If the index is v2i32, we're being called by type legalization.
27064 if (IndexVT == MVT::v2i32)
27065 return SDValue();
27067 // If we don't have VLX and neither the passthru or index is 512-bits, we
27068 // need to widen until one is.
27069 MVT OrigVT = VT;
27070 if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
27071 !IndexVT.is512BitVector()) {
27072 // Determine how much we need to widen by to get a 512-bit type.
27073 unsigned Factor = std::min(512/VT.getSizeInBits(),
27074 512/IndexVT.getSizeInBits());
27076 unsigned NumElts = VT.getVectorNumElements() * Factor;
27078 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
27079 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
27080 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
27082 PassThru = ExtendToType(PassThru, VT, DAG);
27083 Index = ExtendToType(Index, IndexVT, DAG);
27084 Mask = ExtendToType(Mask, MaskVT, DAG, true);
27087 SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
27088 N->getScale() };
27089 SDValue NewGather = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
27090 DAG.getVTList(VT, MaskVT, MVT::Other), Ops, dl, N->getMemoryVT(),
27091 N->getMemOperand());
27092 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
27093 NewGather, DAG.getIntPtrConstant(0, dl));
27094 return DAG.getMergeValues({Extract, NewGather.getValue(2)}, dl);
27097 SDValue X86TargetLowering::LowerGC_TRANSITION_START(SDValue Op,
27098 SelectionDAG &DAG) const {
27099 // TODO: Eventually, the lowering of these nodes should be informed by or
27100 // deferred to the GC strategy for the function in which they appear. For
27101 // now, however, they must be lowered to something. Since they are logically
27102 // no-ops in the case of a null GC strategy (or a GC strategy which does not
27103 // require special handling for these nodes), lower them as literal NOOPs for
27104 // the time being.
27105 SmallVector<SDValue, 2> Ops;
27107 Ops.push_back(Op.getOperand(0));
27108 if (Op->getGluedNode())
27109 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
27111 SDLoc OpDL(Op);
27112 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
27113 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
27115 return NOOP;
27118 SDValue X86TargetLowering::LowerGC_TRANSITION_END(SDValue Op,
27119 SelectionDAG &DAG) const {
27120 // TODO: Eventually, the lowering of these nodes should be informed by or
27121 // deferred to the GC strategy for the function in which they appear. For
27122 // now, however, they must be lowered to something. Since they are logically
27123 // no-ops in the case of a null GC strategy (or a GC strategy which does not
27124 // require special handling for these nodes), lower them as literal NOOPs for
27125 // the time being.
27126 SmallVector<SDValue, 2> Ops;
27128 Ops.push_back(Op.getOperand(0));
27129 if (Op->getGluedNode())
27130 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
27132 SDLoc OpDL(Op);
27133 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
27134 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
27136 return NOOP;
27139 /// Provide custom lowering hooks for some operations.
27140 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
27141 switch (Op.getOpcode()) {
27142 default: llvm_unreachable("Should not custom lower this!");
27143 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
27144 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
27145 return LowerCMP_SWAP(Op, Subtarget, DAG);
27146 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
27147 case ISD::ATOMIC_LOAD_ADD:
27148 case ISD::ATOMIC_LOAD_SUB:
27149 case ISD::ATOMIC_LOAD_OR:
27150 case ISD::ATOMIC_LOAD_XOR:
27151 case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
27152 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG, Subtarget);
27153 case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
27154 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
27155 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
27156 case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG);
27157 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
27158 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
27159 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
27160 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
27161 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
27162 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
27163 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
27164 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
27165 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
27166 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
27167 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
27168 case ISD::SHL_PARTS:
27169 case ISD::SRA_PARTS:
27170 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
27171 case ISD::FSHL:
27172 case ISD::FSHR: return LowerFunnelShift(Op, Subtarget, DAG);
27173 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
27174 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
27175 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
27176 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
27177 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
27178 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
27179 case ISD::ZERO_EXTEND_VECTOR_INREG:
27180 case ISD::SIGN_EXTEND_VECTOR_INREG:
27181 return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
27182 case ISD::FP_TO_SINT:
27183 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
27184 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
27185 case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG);
27186 case ISD::STORE: return LowerStore(Op, Subtarget, DAG);
27187 case ISD::FADD:
27188 case ISD::FSUB: return lowerFaddFsub(Op, DAG, Subtarget);
27189 case ISD::FABS:
27190 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
27191 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
27192 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
27193 case ISD::SETCC: return LowerSETCC(Op, DAG);
27194 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG);
27195 case ISD::SELECT: return LowerSELECT(Op, DAG);
27196 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
27197 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
27198 case ISD::VASTART: return LowerVASTART(Op, DAG);
27199 case ISD::VAARG: return LowerVAARG(Op, DAG);
27200 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
27201 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
27202 case ISD::INTRINSIC_VOID:
27203 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
27204 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
27205 case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG);
27206 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
27207 case ISD::FRAME_TO_ARGS_OFFSET:
27208 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
27209 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
27210 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
27211 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
27212 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
27213 case ISD::EH_SJLJ_SETUP_DISPATCH:
27214 return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
27215 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
27216 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
27217 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
27218 case ISD::CTLZ:
27219 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
27220 case ISD::CTTZ:
27221 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, Subtarget, DAG);
27222 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
27223 case ISD::MULHS:
27224 case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
27225 case ISD::ROTL:
27226 case ISD::ROTR: return LowerRotate(Op, Subtarget, DAG);
27227 case ISD::SRA:
27228 case ISD::SRL:
27229 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
27230 case ISD::SADDO:
27231 case ISD::UADDO:
27232 case ISD::SSUBO:
27233 case ISD::USUBO:
27234 case ISD::SMULO:
27235 case ISD::UMULO: return LowerXALUO(Op, DAG);
27236 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
27237 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
27238 case ISD::ADDCARRY:
27239 case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG);
27240 case ISD::ADD:
27241 case ISD::SUB: return lowerAddSub(Op, DAG, Subtarget);
27242 case ISD::UADDSAT:
27243 case ISD::SADDSAT:
27244 case ISD::USUBSAT:
27245 case ISD::SSUBSAT: return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
27246 case ISD::SMAX:
27247 case ISD::SMIN:
27248 case ISD::UMAX:
27249 case ISD::UMIN: return LowerMINMAX(Op, DAG);
27250 case ISD::ABS: return LowerABS(Op, Subtarget, DAG);
27251 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
27252 case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
27253 case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
27254 case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
27255 case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
27256 case ISD::GC_TRANSITION_START:
27257 return LowerGC_TRANSITION_START(Op, DAG);
27258 case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION_END(Op, DAG);
27262 /// Places new result values for the node in Results (their number
27263 /// and types must exactly match those of the original return values of
27264 /// the node), or leaves Results empty, which indicates that the node is not
27265 /// to be custom lowered after all.
27266 void X86TargetLowering::LowerOperationWrapper(SDNode *N,
27267 SmallVectorImpl<SDValue> &Results,
27268 SelectionDAG &DAG) const {
27269 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
27271 if (!Res.getNode())
27272 return;
27274 // If the original node has one result, take the return value from
27275 // LowerOperation as is. It might not be result number 0.
27276 if (N->getNumValues() == 1) {
27277 Results.push_back(Res);
27278 return;
27281 // If the original node has multiple results, then the return node should
27282 // have the same number of results.
27283 assert((N->getNumValues() == Res->getNumValues()) &&
27284 "Lowering returned the wrong number of results!");
27286 // Places new result values base on N result number.
27287 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
27288 Results.push_back(Res.getValue(I));
27291 /// Replace a node with an illegal result type with a new node built out of
27292 /// custom code.
27293 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
27294 SmallVectorImpl<SDValue>&Results,
27295 SelectionDAG &DAG) const {
27296 SDLoc dl(N);
27297 switch (N->getOpcode()) {
27298 default:
27299 #ifndef NDEBUG
27300 dbgs() << "ReplaceNodeResults: ";
27301 N->dump(&DAG);
27302 #endif
27303 llvm_unreachable("Do not know how to custom type legalize this operation!");
27304 case ISD::CTPOP: {
27305 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
27306 // Use a v2i64 if possible.
27307 bool NoImplicitFloatOps =
27308 DAG.getMachineFunction().getFunction().hasFnAttribute(
27309 Attribute::NoImplicitFloat);
27310 if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
27311 SDValue Wide =
27312 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
27313 Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
27314 // Bit count should fit in 32-bits, extract it as that and then zero
27315 // extend to i64. Otherwise we end up extracting bits 63:32 separately.
27316 Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
27317 Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
27318 DAG.getIntPtrConstant(0, dl));
27319 Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
27320 Results.push_back(Wide);
27322 return;
27324 case ISD::MUL: {
27325 EVT VT = N->getValueType(0);
27326 assert(VT.isVector() && "Unexpected VT");
27327 if (getTypeAction(*DAG.getContext(), VT) == TypePromoteInteger &&
27328 VT.getVectorNumElements() == 2) {
27329 // Promote to a pattern that will be turned into PMULUDQ.
27330 SDValue N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v2i64,
27331 N->getOperand(0));
27332 SDValue N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v2i64,
27333 N->getOperand(1));
27334 SDValue Mul = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, N0, N1);
27335 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, VT, Mul));
27336 } else if (getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
27337 VT.getVectorElementType() == MVT::i8) {
27338 // Pre-promote these to vXi16 to avoid op legalization thinking all 16
27339 // elements are needed.
27340 MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
27341 SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
27342 SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
27343 SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
27344 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
27345 unsigned NumConcats = 16 / VT.getVectorNumElements();
27346 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
27347 ConcatOps[0] = Res;
27348 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
27349 Results.push_back(Res);
27351 return;
27353 case ISD::UADDSAT:
27354 case ISD::SADDSAT:
27355 case ISD::USUBSAT:
27356 case ISD::SSUBSAT:
27357 case X86ISD::VPMADDWD:
27358 case X86ISD::AVG: {
27359 // Legalize types for ISD::UADDSAT/SADDSAT/USUBSAT/SSUBSAT and
27360 // X86ISD::AVG/VPMADDWD by widening.
27361 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
27363 EVT VT = N->getValueType(0);
27364 EVT InVT = N->getOperand(0).getValueType();
27365 assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
27366 "Expected a VT that divides into 128 bits.");
27367 unsigned NumConcat = 128 / InVT.getSizeInBits();
27369 EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
27370 InVT.getVectorElementType(),
27371 NumConcat * InVT.getVectorNumElements());
27372 EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
27373 VT.getVectorElementType(),
27374 NumConcat * VT.getVectorNumElements());
27376 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
27377 Ops[0] = N->getOperand(0);
27378 SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
27379 Ops[0] = N->getOperand(1);
27380 SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
27382 SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
27383 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
27384 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
27385 DAG.getIntPtrConstant(0, dl));
27386 Results.push_back(Res);
27387 return;
27389 case ISD::ABS: {
27390 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27391 assert(N->getValueType(0) == MVT::i64 &&
27392 "Unexpected type (!= i64) on ABS.");
27393 MVT HalfT = MVT::i32;
27394 SDValue Lo, Hi, Tmp;
27395 SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);
27397 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
27398 DAG.getConstant(0, dl, HalfT));
27399 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
27400 DAG.getConstant(1, dl, HalfT));
27401 Tmp = DAG.getNode(
27402 ISD::SRA, dl, HalfT, Hi,
27403 DAG.getConstant(HalfT.getSizeInBits() - 1, dl,
27404 TLI.getShiftAmountTy(HalfT, DAG.getDataLayout())));
27405 Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
27406 Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
27407 SDValue(Lo.getNode(), 1));
27408 Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
27409 Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);
27410 Results.push_back(Lo);
27411 Results.push_back(Hi);
27412 return;
27414 case ISD::SETCC: {
27415 // Widen v2i32 (setcc v2f32). This is really needed for AVX512VL when
27416 // setCC result type is v2i1 because type legalzation will end up with
27417 // a v4i1 setcc plus an extend.
27418 assert(N->getValueType(0) == MVT::v2i32 && "Unexpected type");
27419 if (N->getOperand(0).getValueType() != MVT::v2f32 ||
27420 getTypeAction(*DAG.getContext(), MVT::v2i32) == TypeWidenVector)
27421 return;
27422 SDValue UNDEF = DAG.getUNDEF(MVT::v2f32);
27423 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
27424 N->getOperand(0), UNDEF);
27425 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
27426 N->getOperand(1), UNDEF);
27427 SDValue Res = DAG.getNode(ISD::SETCC, dl, MVT::v4i32, LHS, RHS,
27428 N->getOperand(2));
27429 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
27430 DAG.getIntPtrConstant(0, dl));
27431 Results.push_back(Res);
27432 return;
27434 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
27435 case X86ISD::FMINC:
27436 case X86ISD::FMIN:
27437 case X86ISD::FMAXC:
27438 case X86ISD::FMAX: {
27439 EVT VT = N->getValueType(0);
27440 assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
27441 SDValue UNDEF = DAG.getUNDEF(VT);
27442 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
27443 N->getOperand(0), UNDEF);
27444 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
27445 N->getOperand(1), UNDEF);
27446 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
27447 return;
27449 case ISD::SDIV:
27450 case ISD::UDIV:
27451 case ISD::SREM:
27452 case ISD::UREM: {
27453 EVT VT = N->getValueType(0);
27454 if (getTypeAction(*DAG.getContext(), VT) == TypeWidenVector) {
27455 // If this RHS is a constant splat vector we can widen this and let
27456 // division/remainder by constant optimize it.
27457 // TODO: Can we do something for non-splat?
27458 APInt SplatVal;
27459 if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
27460 unsigned NumConcats = 128 / VT.getSizeInBits();
27461 SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
27462 Ops0[0] = N->getOperand(0);
27463 EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
27464 SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
27465 SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
27466 SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
27467 Results.push_back(Res);
27469 return;
27472 if (VT == MVT::v2i32) {
27473 // Legalize v2i32 div/rem by unrolling. Otherwise we promote to the
27474 // v2i64 and unroll later. But then we create i64 scalar ops which
27475 // might be slow in 64-bit mode or require a libcall in 32-bit mode.
27476 Results.push_back(DAG.UnrollVectorOp(N));
27477 return;
27480 if (VT.isVector())
27481 return;
27483 LLVM_FALLTHROUGH;
27485 case ISD::SDIVREM:
27486 case ISD::UDIVREM: {
27487 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
27488 Results.push_back(V);
27489 return;
27491 case ISD::TRUNCATE: {
27492 MVT VT = N->getSimpleValueType(0);
27493 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
27494 return;
27496 // The generic legalizer will try to widen the input type to the same
27497 // number of elements as the widened result type. But this isn't always
27498 // the best thing so do some custom legalization to avoid some cases.
27499 MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
27500 SDValue In = N->getOperand(0);
27501 EVT InVT = In.getValueType();
27503 unsigned InBits = InVT.getSizeInBits();
27504 if (128 % InBits == 0) {
27505 // 128 bit and smaller inputs should avoid truncate all together and
27506 // just use a build_vector that will become a shuffle.
27507 // TODO: Widen and use a shuffle directly?
27508 MVT InEltVT = InVT.getSimpleVT().getVectorElementType();
27509 EVT EltVT = VT.getVectorElementType();
27510 unsigned WidenNumElts = WidenVT.getVectorNumElements();
27511 SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
27512 // Use the original element count so we don't do more scalar opts than
27513 // necessary.
27514 unsigned MinElts = VT.getVectorNumElements();
27515 for (unsigned i=0; i < MinElts; ++i) {
27516 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
27517 DAG.getIntPtrConstant(i, dl));
27518 Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
27520 Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
27521 return;
27523 // With AVX512 there are some cases that can use a target specific
27524 // truncate node to go from 256/512 to less than 128 with zeros in the
27525 // upper elements of the 128 bit result.
27526 if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
27527 // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
27528 if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
27529 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
27530 return;
27532 // There's one case we can widen to 512 bits and use VTRUNC.
27533 if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
27534 In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
27535 DAG.getUNDEF(MVT::v4i64));
27536 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
27537 return;
27540 return;
27542 case ISD::SIGN_EXTEND_VECTOR_INREG: {
27543 if (ExperimentalVectorWideningLegalization)
27544 return;
27546 EVT VT = N->getValueType(0);
27547 SDValue In = N->getOperand(0);
27548 EVT InVT = In.getValueType();
27549 if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
27550 (InVT == MVT::v16i16 || InVT == MVT::v32i8)) {
27551 // Custom split this so we can extend i8/i16->i32 invec. This is better
27552 // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
27553 // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
27554 // we allow the sra from the extend to i32 to be shared by the split.
27555 EVT ExtractVT = EVT::getVectorVT(*DAG.getContext(),
27556 InVT.getVectorElementType(),
27557 InVT.getVectorNumElements() / 2);
27558 MVT ExtendVT = MVT::getVectorVT(MVT::i32,
27559 VT.getVectorNumElements());
27560 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ExtractVT,
27561 In, DAG.getIntPtrConstant(0, dl));
27562 In = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, MVT::v4i32, In);
27564 // Fill a vector with sign bits for each element.
27565 SDValue Zero = DAG.getConstant(0, dl, ExtendVT);
27566 SDValue SignBits = DAG.getSetCC(dl, ExtendVT, Zero, In, ISD::SETGT);
27568 EVT LoVT, HiVT;
27569 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
27571 // Create an unpackl and unpackh to interleave the sign bits then bitcast
27572 // to vXi64.
27573 SDValue Lo = getUnpackl(DAG, dl, ExtendVT, In, SignBits);
27574 Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
27575 SDValue Hi = getUnpackh(DAG, dl, ExtendVT, In, SignBits);
27576 Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
27578 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
27579 Results.push_back(Res);
27580 return;
27582 return;
27584 case ISD::SIGN_EXTEND:
27585 case ISD::ZERO_EXTEND: {
27586 EVT VT = N->getValueType(0);
27587 SDValue In = N->getOperand(0);
27588 EVT InVT = In.getValueType();
27589 if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
27590 (InVT == MVT::v4i16 || InVT == MVT::v4i8) &&
27591 getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector) {
27592 assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
27593 // Custom split this so we can extend i8/i16->i32 invec. This is better
27594 // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
27595 // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
27596 // we allow the sra from the extend to i32 to be shared by the split.
27597 In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
27599 // Fill a vector with sign bits for each element.
27600 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
27601 SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
27603 // Create an unpackl and unpackh to interleave the sign bits then bitcast
27604 // to v2i64.
27605 SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
27606 {0, 4, 1, 5});
27607 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
27608 SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
27609 {2, 6, 3, 7});
27610 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
27612 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
27613 Results.push_back(Res);
27614 return;
27617 if (VT == MVT::v16i32 || VT == MVT::v8i64) {
27618 if (!InVT.is128BitVector()) {
27619 // Not a 128 bit vector, but maybe type legalization will promote
27620 // it to 128 bits.
27621 if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
27622 return;
27623 InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
27624 if (!InVT.is128BitVector())
27625 return;
27627 // Promote the input to 128 bits. Type legalization will turn this into
27628 // zext_inreg/sext_inreg.
27629 In = DAG.getNode(N->getOpcode(), dl, InVT, In);
27632 // Perform custom splitting instead of the two stage extend we would get
27633 // by default.
27634 EVT LoVT, HiVT;
27635 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
27636 assert(isTypeLegal(LoVT) && "Split VT not legal?");
27638 SDValue Lo = getExtendInVec(N->getOpcode(), dl, LoVT, In, DAG);
27640 // We need to shift the input over by half the number of elements.
27641 unsigned NumElts = InVT.getVectorNumElements();
27642 unsigned HalfNumElts = NumElts / 2;
27643 SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
27644 for (unsigned i = 0; i != HalfNumElts; ++i)
27645 ShufMask[i] = i + HalfNumElts;
27647 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
27648 Hi = getExtendInVec(N->getOpcode(), dl, HiVT, Hi, DAG);
27650 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
27651 Results.push_back(Res);
27653 return;
27655 case ISD::FP_TO_SINT:
27656 case ISD::FP_TO_UINT: {
27657 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
27658 EVT VT = N->getValueType(0);
27659 SDValue Src = N->getOperand(0);
27660 EVT SrcVT = Src.getValueType();
27662 // Promote these manually to avoid over promotion to v2i64. Type
27663 // legalization will revisit the v2i32 operation for more cleanup.
27664 if ((VT == MVT::v2i8 || VT == MVT::v2i16) &&
27665 getTypeAction(*DAG.getContext(), VT) == TypePromoteInteger) {
27666 // AVX512DQ provides instructions that produce a v2i64 result.
27667 if (Subtarget.hasDQI())
27668 return;
27670 SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v2i32, Src);
27671 Res = DAG.getNode(N->getOpcode() == ISD::FP_TO_UINT ? ISD::AssertZext
27672 : ISD::AssertSext,
27673 dl, MVT::v2i32, Res,
27674 DAG.getValueType(VT.getVectorElementType()));
27675 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
27676 Results.push_back(Res);
27677 return;
27680 if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
27681 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
27682 return;
27684 // Try to create a 128 bit vector, but don't exceed a 32 bit element.
27685 unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
27686 MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
27687 VT.getVectorNumElements());
27688 SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
27690 // Preserve what we know about the size of the original result. Except
27691 // when the result is v2i32 since we can't widen the assert.
27692 if (PromoteVT != MVT::v2i32)
27693 Res = DAG.getNode(N->getOpcode() == ISD::FP_TO_UINT ? ISD::AssertZext
27694 : ISD::AssertSext,
27695 dl, PromoteVT, Res,
27696 DAG.getValueType(VT.getVectorElementType()));
27698 // Truncate back to the original width.
27699 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
27701 // Now widen to 128 bits.
27702 unsigned NumConcats = 128 / VT.getSizeInBits();
27703 MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
27704 VT.getVectorNumElements() * NumConcats);
27705 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
27706 ConcatOps[0] = Res;
27707 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
27708 Results.push_back(Res);
27709 return;
27713 if (VT == MVT::v2i32) {
27714 assert((IsSigned || Subtarget.hasAVX512()) &&
27715 "Can only handle signed conversion without AVX512");
27716 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
27717 bool Widenv2i32 =
27718 getTypeAction(*DAG.getContext(), MVT::v2i32) == TypeWidenVector;
27719 if (Src.getValueType() == MVT::v2f64) {
27720 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
27721 if (!IsSigned && !Subtarget.hasVLX()) {
27722 // If v2i32 is widened, we can defer to the generic legalizer.
27723 if (Widenv2i32)
27724 return;
27725 // Custom widen by doubling to a legal vector with. Isel will
27726 // further widen to v8f64.
27727 Opc = ISD::FP_TO_UINT;
27728 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64,
27729 Src, DAG.getUNDEF(MVT::v2f64));
27731 SDValue Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
27732 if (!Widenv2i32)
27733 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
27734 DAG.getIntPtrConstant(0, dl));
27735 Results.push_back(Res);
27736 return;
27738 if (SrcVT == MVT::v2f32 &&
27739 getTypeAction(*DAG.getContext(), VT) != TypeWidenVector) {
27740 SDValue Idx = DAG.getIntPtrConstant(0, dl);
27741 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
27742 DAG.getUNDEF(MVT::v2f32));
27743 Res = DAG.getNode(IsSigned ? ISD::FP_TO_SINT
27744 : ISD::FP_TO_UINT, dl, MVT::v4i32, Res);
27745 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res, Idx);
27746 Results.push_back(Res);
27747 return;
27750 // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
27751 // so early out here.
27752 return;
27755 if (Subtarget.hasDQI() && VT == MVT::i64 &&
27756 (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
27757 assert(!Subtarget.is64Bit() && "i64 should be legal");
27758 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
27759 // Using a 256-bit input here to guarantee 128-bit input for f32 case.
27760 // TODO: Use 128-bit vectors for f64 case?
27761 // TODO: Use 128-bit vectors for f32 by using CVTTP2SI/CVTTP2UI.
27762 MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
27763 MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), NumElts);
27765 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
27766 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
27767 DAG.getConstantFP(0.0, dl, VecInVT), Src,
27768 ZeroIdx);
27769 Res = DAG.getNode(N->getOpcode(), SDLoc(N), VecVT, Res);
27770 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
27771 Results.push_back(Res);
27772 return;
27775 if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned))
27776 Results.push_back(V);
27777 return;
27779 case ISD::SINT_TO_FP: {
27780 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
27781 SDValue Src = N->getOperand(0);
27782 if (N->getValueType(0) != MVT::v2f32 || Src.getValueType() != MVT::v2i64)
27783 return;
27784 Results.push_back(DAG.getNode(X86ISD::CVTSI2P, dl, MVT::v4f32, Src));
27785 return;
27787 case ISD::UINT_TO_FP: {
27788 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
27789 EVT VT = N->getValueType(0);
27790 if (VT != MVT::v2f32)
27791 return;
27792 SDValue Src = N->getOperand(0);
27793 EVT SrcVT = Src.getValueType();
27794 if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
27795 Results.push_back(DAG.getNode(X86ISD::CVTUI2P, dl, MVT::v4f32, Src));
27796 return;
27798 if (SrcVT != MVT::v2i32)
27799 return;
27800 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
27801 SDValue VBias =
27802 DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
27803 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
27804 DAG.getBitcast(MVT::v2i64, VBias));
27805 Or = DAG.getBitcast(MVT::v2f64, Or);
27806 // TODO: Are there any fast-math-flags to propagate here?
27807 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
27808 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
27809 return;
27811 case ISD::FP_ROUND: {
27812 if (!isTypeLegal(N->getOperand(0).getValueType()))
27813 return;
27814 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
27815 Results.push_back(V);
27816 return;
27818 case ISD::FP_EXTEND: {
27819 // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
27820 // No other ValueType for FP_EXTEND should reach this point.
27821 assert(N->getValueType(0) == MVT::v2f32 &&
27822 "Do not know how to legalize this Node");
27823 return;
27825 case ISD::INTRINSIC_W_CHAIN: {
27826 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
27827 switch (IntNo) {
27828 default : llvm_unreachable("Do not know how to custom type "
27829 "legalize this intrinsic operation!");
27830 case Intrinsic::x86_rdtsc:
27831 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
27832 Results);
27833 case Intrinsic::x86_rdtscp:
27834 return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
27835 Results);
27836 case Intrinsic::x86_rdpmc:
27837 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
27838 Results);
27839 return;
27840 case Intrinsic::x86_xgetbv:
27841 expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
27842 Results);
27843 return;
27846 case ISD::READCYCLECOUNTER: {
27847 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
27849 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
27850 EVT T = N->getValueType(0);
27851 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
27852 bool Regs64bit = T == MVT::i128;
27853 assert((!Regs64bit || Subtarget.hasCmpxchg16b()) &&
27854 "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
27855 MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
27856 SDValue cpInL, cpInH;
27857 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
27858 DAG.getConstant(0, dl, HalfT));
27859 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
27860 DAG.getConstant(1, dl, HalfT));
27861 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
27862 Regs64bit ? X86::RAX : X86::EAX,
27863 cpInL, SDValue());
27864 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
27865 Regs64bit ? X86::RDX : X86::EDX,
27866 cpInH, cpInL.getValue(1));
27867 SDValue swapInL, swapInH;
27868 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
27869 DAG.getConstant(0, dl, HalfT));
27870 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
27871 DAG.getConstant(1, dl, HalfT));
27872 swapInH =
27873 DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
27874 swapInH, cpInH.getValue(1));
27875 // If the current function needs the base pointer, RBX,
27876 // we shouldn't use cmpxchg directly.
27877 // Indeed the lowering of that instruction will clobber
27878 // that register and since RBX will be a reserved register
27879 // the register allocator will not make sure its value will
27880 // be properly saved and restored around this live-range.
27881 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
27882 SDValue Result;
27883 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
27884 unsigned BasePtr = TRI->getBaseRegister();
27885 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
27886 if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
27887 (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
27888 // ISel prefers the LCMPXCHG64 variant.
27889 // If that assert breaks, that means it is not the case anymore,
27890 // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
27891 // not just EBX. This is a matter of accepting i64 input for that
27892 // pseudo, and restoring into the register of the right wide
27893 // in expand pseudo. Everything else should just work.
27894 assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
27895 "Saving only half of the RBX");
27896 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
27897 : X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
27898 SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
27899 Regs64bit ? X86::RBX : X86::EBX,
27900 HalfT, swapInH.getValue(1));
27901 SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
27902 RBXSave,
27903 /*Glue*/ RBXSave.getValue(2)};
27904 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
27905 } else {
27906 unsigned Opcode =
27907 Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
27908 swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
27909 Regs64bit ? X86::RBX : X86::EBX, swapInL,
27910 swapInH.getValue(1));
27911 SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
27912 swapInL.getValue(1)};
27913 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
27915 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
27916 Regs64bit ? X86::RAX : X86::EAX,
27917 HalfT, Result.getValue(1));
27918 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
27919 Regs64bit ? X86::RDX : X86::EDX,
27920 HalfT, cpOutL.getValue(2));
27921 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
27923 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
27924 MVT::i32, cpOutH.getValue(2));
27925 SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
27926 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
27928 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
27929 Results.push_back(Success);
27930 Results.push_back(EFLAGS.getValue(1));
27931 return;
27933 case ISD::ATOMIC_LOAD: {
27934 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
27935 bool NoImplicitFloatOps =
27936 DAG.getMachineFunction().getFunction().hasFnAttribute(
27937 Attribute::NoImplicitFloat);
27938 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
27939 auto *Node = cast<AtomicSDNode>(N);
27940 if (Subtarget.hasSSE2()) {
27941 // Use a VZEXT_LOAD which will be selected as MOVQ. Then extract the
27942 // lower 64-bits.
27943 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
27944 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
27945 SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
27946 MVT::i64, Node->getMemOperand());
27947 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
27948 DAG.getIntPtrConstant(0, dl));
27949 Results.push_back(Res);
27950 Results.push_back(Ld.getValue(1));
27951 return;
27953 if (Subtarget.hasX87()) {
27954 // First load this into an 80-bit X87 register. This will put the whole
27955 // integer into the significand.
27956 // FIXME: Do we need to glue? See FIXME comment in BuildFILD.
27957 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other, MVT::Glue);
27958 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
27959 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD_FLAG,
27960 dl, Tys, Ops, MVT::i64,
27961 Node->getMemOperand());
27962 SDValue Chain = Result.getValue(1);
27963 SDValue InFlag = Result.getValue(2);
27965 // Now store the X87 register to a stack temporary and convert to i64.
27966 // This store is not atomic and doesn't need to be.
27967 // FIXME: We don't need a stack temporary if the result of the load
27968 // is already being stored. We could just directly store there.
27969 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
27970 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
27971 MachinePointerInfo MPI =
27972 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
27973 SDValue StoreOps[] = { Chain, Result, StackPtr, InFlag };
27974 Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, dl,
27975 DAG.getVTList(MVT::Other), StoreOps,
27976 MVT::i64, MPI, 0 /*Align*/,
27977 MachineMemOperand::MOStore);
27979 // Finally load the value back from the stack temporary and return it.
27980 // This load is not atomic and doesn't need to be.
27981 // This load will be further type legalized.
27982 Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
27983 Results.push_back(Result);
27984 Results.push_back(Result.getValue(1));
27985 return;
27988 // TODO: Use MOVLPS when SSE1 is available?
27989 // Delegate to generic TypeLegalization. Situations we can really handle
27990 // should have already been dealt with by AtomicExpandPass.cpp.
27991 break;
27993 case ISD::ATOMIC_SWAP:
27994 case ISD::ATOMIC_LOAD_ADD:
27995 case ISD::ATOMIC_LOAD_SUB:
27996 case ISD::ATOMIC_LOAD_AND:
27997 case ISD::ATOMIC_LOAD_OR:
27998 case ISD::ATOMIC_LOAD_XOR:
27999 case ISD::ATOMIC_LOAD_NAND:
28000 case ISD::ATOMIC_LOAD_MIN:
28001 case ISD::ATOMIC_LOAD_MAX:
28002 case ISD::ATOMIC_LOAD_UMIN:
28003 case ISD::ATOMIC_LOAD_UMAX:
28004 // Delegate to generic TypeLegalization. Situations we can really handle
28005 // should have already been dealt with by AtomicExpandPass.cpp.
28006 break;
28008 case ISD::BITCAST: {
28009 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
28010 EVT DstVT = N->getValueType(0);
28011 EVT SrcVT = N->getOperand(0).getValueType();
28013 // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
28014 // we can split using the k-register rather than memory.
28015 if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
28016 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
28017 SDValue Lo, Hi;
28018 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
28019 Lo = DAG.getBitcast(MVT::i32, Lo);
28020 Hi = DAG.getBitcast(MVT::i32, Hi);
28021 SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
28022 Results.push_back(Res);
28023 return;
28026 // Custom splitting for BWI types when AVX512F is available but BWI isn't.
28027 if ((DstVT == MVT::v32i16 || DstVT == MVT::v64i8) &&
28028 SrcVT.isVector() && isTypeLegal(SrcVT)) {
28029 SDValue Lo, Hi;
28030 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
28031 MVT CastVT = (DstVT == MVT::v32i16) ? MVT::v16i16 : MVT::v32i8;
28032 Lo = DAG.getBitcast(CastVT, Lo);
28033 Hi = DAG.getBitcast(CastVT, Hi);
28034 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
28035 Results.push_back(Res);
28036 return;
28039 if (SrcVT != MVT::f64 ||
28040 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8) ||
28041 getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector)
28042 return;
28044 unsigned NumElts = DstVT.getVectorNumElements();
28045 EVT SVT = DstVT.getVectorElementType();
28046 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
28047 SDValue Res;
28048 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, N->getOperand(0));
28049 Res = DAG.getBitcast(WiderVT, Res);
28050 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, Res,
28051 DAG.getIntPtrConstant(0, dl));
28052 Results.push_back(Res);
28053 return;
28055 case ISD::MGATHER: {
28056 EVT VT = N->getValueType(0);
28057 if (VT == MVT::v2f32 && (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
28058 auto *Gather = cast<MaskedGatherSDNode>(N);
28059 SDValue Index = Gather->getIndex();
28060 if (Index.getValueType() != MVT::v2i64)
28061 return;
28062 SDValue Mask = Gather->getMask();
28063 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
28064 SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
28065 Gather->getPassThru(),
28066 DAG.getUNDEF(MVT::v2f32));
28067 if (!Subtarget.hasVLX()) {
28068 // We need to widen the mask, but the instruction will only use 2
28069 // of its elements. So we can use undef.
28070 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
28071 DAG.getUNDEF(MVT::v2i1));
28072 Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
28074 SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
28075 Gather->getBasePtr(), Index, Gather->getScale() };
28076 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
28077 DAG.getVTList(MVT::v4f32, Mask.getValueType(), MVT::Other), Ops, dl,
28078 Gather->getMemoryVT(), Gather->getMemOperand());
28079 Results.push_back(Res);
28080 Results.push_back(Res.getValue(2));
28081 return;
28083 if (VT == MVT::v2i32) {
28084 auto *Gather = cast<MaskedGatherSDNode>(N);
28085 SDValue Index = Gather->getIndex();
28086 SDValue Mask = Gather->getMask();
28087 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
28088 SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32,
28089 Gather->getPassThru(),
28090 DAG.getUNDEF(MVT::v2i32));
28091 // If the index is v2i64 we can use it directly.
28092 if (Index.getValueType() == MVT::v2i64 &&
28093 (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
28094 if (!Subtarget.hasVLX()) {
28095 // We need to widen the mask, but the instruction will only use 2
28096 // of its elements. So we can use undef.
28097 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
28098 DAG.getUNDEF(MVT::v2i1));
28099 Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
28101 SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
28102 Gather->getBasePtr(), Index, Gather->getScale() };
28103 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
28104 DAG.getVTList(MVT::v4i32, Mask.getValueType(), MVT::Other), Ops, dl,
28105 Gather->getMemoryVT(), Gather->getMemOperand());
28106 SDValue Chain = Res.getValue(2);
28107 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
28108 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
28109 DAG.getIntPtrConstant(0, dl));
28110 Results.push_back(Res);
28111 Results.push_back(Chain);
28112 return;
28114 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector) {
28115 EVT IndexVT = Index.getValueType();
28116 EVT NewIndexVT = EVT::getVectorVT(*DAG.getContext(),
28117 IndexVT.getScalarType(), 4);
28118 // Otherwise we need to custom widen everything to avoid promotion.
28119 Index = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewIndexVT, Index,
28120 DAG.getUNDEF(IndexVT));
28121 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
28122 DAG.getConstant(0, dl, MVT::v2i1));
28123 SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
28124 Gather->getBasePtr(), Index, Gather->getScale() };
28125 SDValue Res = DAG.getMaskedGather(DAG.getVTList(MVT::v4i32, MVT::Other),
28126 Gather->getMemoryVT(), dl, Ops,
28127 Gather->getMemOperand());
28128 SDValue Chain = Res.getValue(1);
28129 if (getTypeAction(*DAG.getContext(), MVT::v2i32) != TypeWidenVector)
28130 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
28131 DAG.getIntPtrConstant(0, dl));
28132 Results.push_back(Res);
28133 Results.push_back(Chain);
28134 return;
28137 return;
28139 case ISD::LOAD: {
28140 // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
28141 // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
28142 // cast since type legalization will try to use an i64 load.
28143 MVT VT = N->getSimpleValueType(0);
28144 assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
28145 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
28146 return;
28147 if (!ISD::isNON_EXTLoad(N))
28148 return;
28149 auto *Ld = cast<LoadSDNode>(N);
28150 MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
28151 SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
28152 Ld->getPointerInfo(),
28153 Ld->getAlignment(),
28154 Ld->getMemOperand()->getFlags());
28155 SDValue Chain = Res.getValue(1);
28156 MVT WideVT = MVT::getVectorVT(LdVT, 2);
28157 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, WideVT, Res);
28158 MVT CastVT = MVT::getVectorVT(VT.getVectorElementType(),
28159 VT.getVectorNumElements() * 2);
28160 Res = DAG.getBitcast(CastVT, Res);
28161 Results.push_back(Res);
28162 Results.push_back(Chain);
28163 return;
28168 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
28169 switch ((X86ISD::NodeType)Opcode) {
28170 case X86ISD::FIRST_NUMBER: break;
28171 case X86ISD::BSF: return "X86ISD::BSF";
28172 case X86ISD::BSR: return "X86ISD::BSR";
28173 case X86ISD::SHLD: return "X86ISD::SHLD";
28174 case X86ISD::SHRD: return "X86ISD::SHRD";
28175 case X86ISD::FAND: return "X86ISD::FAND";
28176 case X86ISD::FANDN: return "X86ISD::FANDN";
28177 case X86ISD::FOR: return "X86ISD::FOR";
28178 case X86ISD::FXOR: return "X86ISD::FXOR";
28179 case X86ISD::FILD: return "X86ISD::FILD";
28180 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
28181 case X86ISD::FIST: return "X86ISD::FIST";
28182 case X86ISD::FP_TO_INT_IN_MEM: return "X86ISD::FP_TO_INT_IN_MEM";
28183 case X86ISD::FLD: return "X86ISD::FLD";
28184 case X86ISD::FST: return "X86ISD::FST";
28185 case X86ISD::CALL: return "X86ISD::CALL";
28186 case X86ISD::BT: return "X86ISD::BT";
28187 case X86ISD::CMP: return "X86ISD::CMP";
28188 case X86ISD::COMI: return "X86ISD::COMI";
28189 case X86ISD::UCOMI: return "X86ISD::UCOMI";
28190 case X86ISD::CMPM: return "X86ISD::CMPM";
28191 case X86ISD::CMPM_SAE: return "X86ISD::CMPM_SAE";
28192 case X86ISD::SETCC: return "X86ISD::SETCC";
28193 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
28194 case X86ISD::FSETCC: return "X86ISD::FSETCC";
28195 case X86ISD::FSETCCM: return "X86ISD::FSETCCM";
28196 case X86ISD::FSETCCM_SAE: return "X86ISD::FSETCCM_SAE";
28197 case X86ISD::CMOV: return "X86ISD::CMOV";
28198 case X86ISD::BRCOND: return "X86ISD::BRCOND";
28199 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
28200 case X86ISD::IRET: return "X86ISD::IRET";
28201 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
28202 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
28203 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
28204 case X86ISD::Wrapper: return "X86ISD::Wrapper";
28205 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
28206 case X86ISD::MOVDQ2Q: return "X86ISD::MOVDQ2Q";
28207 case X86ISD::MMX_MOVD2W: return "X86ISD::MMX_MOVD2W";
28208 case X86ISD::MMX_MOVW2D: return "X86ISD::MMX_MOVW2D";
28209 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
28210 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
28211 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
28212 case X86ISD::PINSRB: return "X86ISD::PINSRB";
28213 case X86ISD::PINSRW: return "X86ISD::PINSRW";
28214 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
28215 case X86ISD::ANDNP: return "X86ISD::ANDNP";
28216 case X86ISD::BLENDI: return "X86ISD::BLENDI";
28217 case X86ISD::BLENDV: return "X86ISD::BLENDV";
28218 case X86ISD::HADD: return "X86ISD::HADD";
28219 case X86ISD::HSUB: return "X86ISD::HSUB";
28220 case X86ISD::FHADD: return "X86ISD::FHADD";
28221 case X86ISD::FHSUB: return "X86ISD::FHSUB";
28222 case X86ISD::CONFLICT: return "X86ISD::CONFLICT";
28223 case X86ISD::FMAX: return "X86ISD::FMAX";
28224 case X86ISD::FMAXS: return "X86ISD::FMAXS";
28225 case X86ISD::FMAX_SAE: return "X86ISD::FMAX_SAE";
28226 case X86ISD::FMAXS_SAE: return "X86ISD::FMAXS_SAE";
28227 case X86ISD::FMIN: return "X86ISD::FMIN";
28228 case X86ISD::FMINS: return "X86ISD::FMINS";
28229 case X86ISD::FMIN_SAE: return "X86ISD::FMIN_SAE";
28230 case X86ISD::FMINS_SAE: return "X86ISD::FMINS_SAE";
28231 case X86ISD::FMAXC: return "X86ISD::FMAXC";
28232 case X86ISD::FMINC: return "X86ISD::FMINC";
28233 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
28234 case X86ISD::FRCP: return "X86ISD::FRCP";
28235 case X86ISD::EXTRQI: return "X86ISD::EXTRQI";
28236 case X86ISD::INSERTQI: return "X86ISD::INSERTQI";
28237 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
28238 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
28239 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
28240 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
28241 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
28242 case X86ISD::EH_SJLJ_SETUP_DISPATCH:
28243 return "X86ISD::EH_SJLJ_SETUP_DISPATCH";
28244 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
28245 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
28246 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
28247 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
28248 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
28249 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
28250 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
28251 case X86ISD::LCMPXCHG8_SAVE_EBX_DAG:
28252 return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG";
28253 case X86ISD::LCMPXCHG16_SAVE_RBX_DAG:
28254 return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG";
28255 case X86ISD::LADD: return "X86ISD::LADD";
28256 case X86ISD::LSUB: return "X86ISD::LSUB";
28257 case X86ISD::LOR: return "X86ISD::LOR";
28258 case X86ISD::LXOR: return "X86ISD::LXOR";
28259 case X86ISD::LAND: return "X86ISD::LAND";
28260 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
28261 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
28262 case X86ISD::VEXTRACT_STORE: return "X86ISD::VEXTRACT_STORE";
28263 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
28264 case X86ISD::VTRUNCS: return "X86ISD::VTRUNCS";
28265 case X86ISD::VTRUNCUS: return "X86ISD::VTRUNCUS";
28266 case X86ISD::VMTRUNC: return "X86ISD::VMTRUNC";
28267 case X86ISD::VMTRUNCS: return "X86ISD::VMTRUNCS";
28268 case X86ISD::VMTRUNCUS: return "X86ISD::VMTRUNCUS";
28269 case X86ISD::VTRUNCSTORES: return "X86ISD::VTRUNCSTORES";
28270 case X86ISD::VTRUNCSTOREUS: return "X86ISD::VTRUNCSTOREUS";
28271 case X86ISD::VMTRUNCSTORES: return "X86ISD::VMTRUNCSTORES";
28272 case X86ISD::VMTRUNCSTOREUS: return "X86ISD::VMTRUNCSTOREUS";
28273 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
28274 case X86ISD::VFPEXT_SAE: return "X86ISD::VFPEXT_SAE";
28275 case X86ISD::VFPEXTS: return "X86ISD::VFPEXTS";
28276 case X86ISD::VFPEXTS_SAE: return "X86ISD::VFPEXTS_SAE";
28277 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
28278 case X86ISD::VMFPROUND: return "X86ISD::VMFPROUND";
28279 case X86ISD::VFPROUND_RND: return "X86ISD::VFPROUND_RND";
28280 case X86ISD::VFPROUNDS: return "X86ISD::VFPROUNDS";
28281 case X86ISD::VFPROUNDS_RND: return "X86ISD::VFPROUNDS_RND";
28282 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
28283 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
28284 case X86ISD::VSHL: return "X86ISD::VSHL";
28285 case X86ISD::VSRL: return "X86ISD::VSRL";
28286 case X86ISD::VSRA: return "X86ISD::VSRA";
28287 case X86ISD::VSHLI: return "X86ISD::VSHLI";
28288 case X86ISD::VSRLI: return "X86ISD::VSRLI";
28289 case X86ISD::VSRAI: return "X86ISD::VSRAI";
28290 case X86ISD::VSHLV: return "X86ISD::VSHLV";
28291 case X86ISD::VSRLV: return "X86ISD::VSRLV";
28292 case X86ISD::VSRAV: return "X86ISD::VSRAV";
28293 case X86ISD::VROTLI: return "X86ISD::VROTLI";
28294 case X86ISD::VROTRI: return "X86ISD::VROTRI";
28295 case X86ISD::VPPERM: return "X86ISD::VPPERM";
28296 case X86ISD::CMPP: return "X86ISD::CMPP";
28297 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
28298 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
28299 case X86ISD::PHMINPOS: return "X86ISD::PHMINPOS";
28300 case X86ISD::ADD: return "X86ISD::ADD";
28301 case X86ISD::SUB: return "X86ISD::SUB";
28302 case X86ISD::ADC: return "X86ISD::ADC";
28303 case X86ISD::SBB: return "X86ISD::SBB";
28304 case X86ISD::SMUL: return "X86ISD::SMUL";
28305 case X86ISD::UMUL: return "X86ISD::UMUL";
28306 case X86ISD::OR: return "X86ISD::OR";
28307 case X86ISD::XOR: return "X86ISD::XOR";
28308 case X86ISD::AND: return "X86ISD::AND";
28309 case X86ISD::BEXTR: return "X86ISD::BEXTR";
28310 case X86ISD::BZHI: return "X86ISD::BZHI";
28311 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
28312 case X86ISD::MOVMSK: return "X86ISD::MOVMSK";
28313 case X86ISD::PTEST: return "X86ISD::PTEST";
28314 case X86ISD::TESTP: return "X86ISD::TESTP";
28315 case X86ISD::KORTEST: return "X86ISD::KORTEST";
28316 case X86ISD::KTEST: return "X86ISD::KTEST";
28317 case X86ISD::KADD: return "X86ISD::KADD";
28318 case X86ISD::KSHIFTL: return "X86ISD::KSHIFTL";
28319 case X86ISD::KSHIFTR: return "X86ISD::KSHIFTR";
28320 case X86ISD::PACKSS: return "X86ISD::PACKSS";
28321 case X86ISD::PACKUS: return "X86ISD::PACKUS";
28322 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
28323 case X86ISD::VALIGN: return "X86ISD::VALIGN";
28324 case X86ISD::VSHLD: return "X86ISD::VSHLD";
28325 case X86ISD::VSHRD: return "X86ISD::VSHRD";
28326 case X86ISD::VSHLDV: return "X86ISD::VSHLDV";
28327 case X86ISD::VSHRDV: return "X86ISD::VSHRDV";
28328 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
28329 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
28330 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
28331 case X86ISD::SHUFP: return "X86ISD::SHUFP";
28332 case X86ISD::SHUF128: return "X86ISD::SHUF128";
28333 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
28334 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
28335 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
28336 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
28337 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
28338 case X86ISD::MOVSD: return "X86ISD::MOVSD";
28339 case X86ISD::MOVSS: return "X86ISD::MOVSS";
28340 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
28341 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
28342 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
28343 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
28344 case X86ISD::SUBV_BROADCAST: return "X86ISD::SUBV_BROADCAST";
28345 case X86ISD::VPERMILPV: return "X86ISD::VPERMILPV";
28346 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
28347 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
28348 case X86ISD::VPERMV: return "X86ISD::VPERMV";
28349 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
28350 case X86ISD::VPERMI: return "X86ISD::VPERMI";
28351 case X86ISD::VPTERNLOG: return "X86ISD::VPTERNLOG";
28352 case X86ISD::VFIXUPIMM: return "X86ISD::VFIXUPIMM";
28353 case X86ISD::VFIXUPIMM_SAE: return "X86ISD::VFIXUPIMM_SAE";
28354 case X86ISD::VFIXUPIMMS: return "X86ISD::VFIXUPIMMS";
28355 case X86ISD::VFIXUPIMMS_SAE: return "X86ISD::VFIXUPIMMS_SAE";
28356 case X86ISD::VRANGE: return "X86ISD::VRANGE";
28357 case X86ISD::VRANGE_SAE: return "X86ISD::VRANGE_SAE";
28358 case X86ISD::VRANGES: return "X86ISD::VRANGES";
28359 case X86ISD::VRANGES_SAE: return "X86ISD::VRANGES_SAE";
28360 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
28361 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
28362 case X86ISD::PSADBW: return "X86ISD::PSADBW";
28363 case X86ISD::DBPSADBW: return "X86ISD::DBPSADBW";
28364 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
28365 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
28366 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
28367 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
28368 case X86ISD::MFENCE: return "X86ISD::MFENCE";
28369 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
28370 case X86ISD::SAHF: return "X86ISD::SAHF";
28371 case X86ISD::RDRAND: return "X86ISD::RDRAND";
28372 case X86ISD::RDSEED: return "X86ISD::RDSEED";
28373 case X86ISD::RDPKRU: return "X86ISD::RDPKRU";
28374 case X86ISD::WRPKRU: return "X86ISD::WRPKRU";
28375 case X86ISD::VPMADDUBSW: return "X86ISD::VPMADDUBSW";
28376 case X86ISD::VPMADDWD: return "X86ISD::VPMADDWD";
28377 case X86ISD::VPSHA: return "X86ISD::VPSHA";
28378 case X86ISD::VPSHL: return "X86ISD::VPSHL";
28379 case X86ISD::VPCOM: return "X86ISD::VPCOM";
28380 case X86ISD::VPCOMU: return "X86ISD::VPCOMU";
28381 case X86ISD::VPERMIL2: return "X86ISD::VPERMIL2";
28382 case X86ISD::FMSUB: return "X86ISD::FMSUB";
28383 case X86ISD::FNMADD: return "X86ISD::FNMADD";
28384 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
28385 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
28386 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
28387 case X86ISD::FMADD_RND: return "X86ISD::FMADD_RND";
28388 case X86ISD::FNMADD_RND: return "X86ISD::FNMADD_RND";
28389 case X86ISD::FMSUB_RND: return "X86ISD::FMSUB_RND";
28390 case X86ISD::FNMSUB_RND: return "X86ISD::FNMSUB_RND";
28391 case X86ISD::FMADDSUB_RND: return "X86ISD::FMADDSUB_RND";
28392 case X86ISD::FMSUBADD_RND: return "X86ISD::FMSUBADD_RND";
28393 case X86ISD::VPMADD52H: return "X86ISD::VPMADD52H";
28394 case X86ISD::VPMADD52L: return "X86ISD::VPMADD52L";
28395 case X86ISD::VRNDSCALE: return "X86ISD::VRNDSCALE";
28396 case X86ISD::VRNDSCALE_SAE: return "X86ISD::VRNDSCALE_SAE";
28397 case X86ISD::VRNDSCALES: return "X86ISD::VRNDSCALES";
28398 case X86ISD::VRNDSCALES_SAE: return "X86ISD::VRNDSCALES_SAE";
28399 case X86ISD::VREDUCE: return "X86ISD::VREDUCE";
28400 case X86ISD::VREDUCE_SAE: return "X86ISD::VREDUCE_SAE";
28401 case X86ISD::VREDUCES: return "X86ISD::VREDUCES";
28402 case X86ISD::VREDUCES_SAE: return "X86ISD::VREDUCES_SAE";
28403 case X86ISD::VGETMANT: return "X86ISD::VGETMANT";
28404 case X86ISD::VGETMANT_SAE: return "X86ISD::VGETMANT_SAE";
28405 case X86ISD::VGETMANTS: return "X86ISD::VGETMANTS";
28406 case X86ISD::VGETMANTS_SAE: return "X86ISD::VGETMANTS_SAE";
28407 case X86ISD::PCMPESTR: return "X86ISD::PCMPESTR";
28408 case X86ISD::PCMPISTR: return "X86ISD::PCMPISTR";
28409 case X86ISD::XTEST: return "X86ISD::XTEST";
28410 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
28411 case X86ISD::EXPAND: return "X86ISD::EXPAND";
28412 case X86ISD::SELECTS: return "X86ISD::SELECTS";
28413 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
28414 case X86ISD::RCP14: return "X86ISD::RCP14";
28415 case X86ISD::RCP14S: return "X86ISD::RCP14S";
28416 case X86ISD::RCP28: return "X86ISD::RCP28";
28417 case X86ISD::RCP28_SAE: return "X86ISD::RCP28_SAE";
28418 case X86ISD::RCP28S: return "X86ISD::RCP28S";
28419 case X86ISD::RCP28S_SAE: return "X86ISD::RCP28S_SAE";
28420 case X86ISD::EXP2: return "X86ISD::EXP2";
28421 case X86ISD::EXP2_SAE: return "X86ISD::EXP2_SAE";
28422 case X86ISD::RSQRT14: return "X86ISD::RSQRT14";
28423 case X86ISD::RSQRT14S: return "X86ISD::RSQRT14S";
28424 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
28425 case X86ISD::RSQRT28_SAE: return "X86ISD::RSQRT28_SAE";
28426 case X86ISD::RSQRT28S: return "X86ISD::RSQRT28S";
28427 case X86ISD::RSQRT28S_SAE: return "X86ISD::RSQRT28S_SAE";
28428 case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
28429 case X86ISD::FADDS: return "X86ISD::FADDS";
28430 case X86ISD::FADDS_RND: return "X86ISD::FADDS_RND";
28431 case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
28432 case X86ISD::FSUBS: return "X86ISD::FSUBS";
28433 case X86ISD::FSUBS_RND: return "X86ISD::FSUBS_RND";
28434 case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
28435 case X86ISD::FMULS: return "X86ISD::FMULS";
28436 case X86ISD::FMULS_RND: return "X86ISD::FMULS_RND";
28437 case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
28438 case X86ISD::FDIVS: return "X86ISD::FDIVS";
28439 case X86ISD::FDIVS_RND: return "X86ISD::FDIVS_RND";
28440 case X86ISD::FSQRT_RND: return "X86ISD::FSQRT_RND";
28441 case X86ISD::FSQRTS: return "X86ISD::FSQRTS";
28442 case X86ISD::FSQRTS_RND: return "X86ISD::FSQRTS_RND";
28443 case X86ISD::FGETEXP: return "X86ISD::FGETEXP";
28444 case X86ISD::FGETEXP_SAE: return "X86ISD::FGETEXP_SAE";
28445 case X86ISD::FGETEXPS: return "X86ISD::FGETEXPS";
28446 case X86ISD::FGETEXPS_SAE: return "X86ISD::FGETEXPS_SAE";
28447 case X86ISD::SCALEF: return "X86ISD::SCALEF";
28448 case X86ISD::SCALEF_RND: return "X86ISD::SCALEF_RND";
28449 case X86ISD::SCALEFS: return "X86ISD::SCALEFS";
28450 case X86ISD::SCALEFS_RND: return "X86ISD::SCALEFS_RND";
28451 case X86ISD::AVG: return "X86ISD::AVG";
28452 case X86ISD::MULHRS: return "X86ISD::MULHRS";
28453 case X86ISD::SINT_TO_FP_RND: return "X86ISD::SINT_TO_FP_RND";
28454 case X86ISD::UINT_TO_FP_RND: return "X86ISD::UINT_TO_FP_RND";
28455 case X86ISD::CVTTP2SI: return "X86ISD::CVTTP2SI";
28456 case X86ISD::CVTTP2UI: return "X86ISD::CVTTP2UI";
28457 case X86ISD::MCVTTP2SI: return "X86ISD::MCVTTP2SI";
28458 case X86ISD::MCVTTP2UI: return "X86ISD::MCVTTP2UI";
28459 case X86ISD::CVTTP2SI_SAE: return "X86ISD::CVTTP2SI_SAE";
28460 case X86ISD::CVTTP2UI_SAE: return "X86ISD::CVTTP2UI_SAE";
28461 case X86ISD::CVTTS2SI: return "X86ISD::CVTTS2SI";
28462 case X86ISD::CVTTS2UI: return "X86ISD::CVTTS2UI";
28463 case X86ISD::CVTTS2SI_SAE: return "X86ISD::CVTTS2SI_SAE";
28464 case X86ISD::CVTTS2UI_SAE: return "X86ISD::CVTTS2UI_SAE";
28465 case X86ISD::CVTSI2P: return "X86ISD::CVTSI2P";
28466 case X86ISD::CVTUI2P: return "X86ISD::CVTUI2P";
28467 case X86ISD::MCVTSI2P: return "X86ISD::MCVTSI2P";
28468 case X86ISD::MCVTUI2P: return "X86ISD::MCVTUI2P";
28469 case X86ISD::VFPCLASS: return "X86ISD::VFPCLASS";
28470 case X86ISD::VFPCLASSS: return "X86ISD::VFPCLASSS";
28471 case X86ISD::MULTISHIFT: return "X86ISD::MULTISHIFT";
28472 case X86ISD::SCALAR_SINT_TO_FP: return "X86ISD::SCALAR_SINT_TO_FP";
28473 case X86ISD::SCALAR_SINT_TO_FP_RND: return "X86ISD::SCALAR_SINT_TO_FP_RND";
28474 case X86ISD::SCALAR_UINT_TO_FP: return "X86ISD::SCALAR_UINT_TO_FP";
28475 case X86ISD::SCALAR_UINT_TO_FP_RND: return "X86ISD::SCALAR_UINT_TO_FP_RND";
28476 case X86ISD::CVTPS2PH: return "X86ISD::CVTPS2PH";
28477 case X86ISD::MCVTPS2PH: return "X86ISD::MCVTPS2PH";
28478 case X86ISD::CVTPH2PS: return "X86ISD::CVTPH2PS";
28479 case X86ISD::CVTPH2PS_SAE: return "X86ISD::CVTPH2PS_SAE";
28480 case X86ISD::CVTP2SI: return "X86ISD::CVTP2SI";
28481 case X86ISD::CVTP2UI: return "X86ISD::CVTP2UI";
28482 case X86ISD::MCVTP2SI: return "X86ISD::MCVTP2SI";
28483 case X86ISD::MCVTP2UI: return "X86ISD::MCVTP2UI";
28484 case X86ISD::CVTP2SI_RND: return "X86ISD::CVTP2SI_RND";
28485 case X86ISD::CVTP2UI_RND: return "X86ISD::CVTP2UI_RND";
28486 case X86ISD::CVTS2SI: return "X86ISD::CVTS2SI";
28487 case X86ISD::CVTS2UI: return "X86ISD::CVTS2UI";
28488 case X86ISD::CVTS2SI_RND: return "X86ISD::CVTS2SI_RND";
28489 case X86ISD::CVTS2UI_RND: return "X86ISD::CVTS2UI_RND";
28490 case X86ISD::CVTNE2PS2BF16: return "X86ISD::CVTNE2PS2BF16";
28491 case X86ISD::CVTNEPS2BF16: return "X86ISD::CVTNEPS2BF16";
28492 case X86ISD::MCVTNEPS2BF16: return "X86ISD::MCVTNEPS2BF16";
28493 case X86ISD::DPBF16PS: return "X86ISD::DPBF16PS";
28494 case X86ISD::LWPINS: return "X86ISD::LWPINS";
28495 case X86ISD::MGATHER: return "X86ISD::MGATHER";
28496 case X86ISD::MSCATTER: return "X86ISD::MSCATTER";
28497 case X86ISD::VPDPBUSD: return "X86ISD::VPDPBUSD";
28498 case X86ISD::VPDPBUSDS: return "X86ISD::VPDPBUSDS";
28499 case X86ISD::VPDPWSSD: return "X86ISD::VPDPWSSD";
28500 case X86ISD::VPDPWSSDS: return "X86ISD::VPDPWSSDS";
28501 case X86ISD::VPSHUFBITQMB: return "X86ISD::VPSHUFBITQMB";
28502 case X86ISD::GF2P8MULB: return "X86ISD::GF2P8MULB";
28503 case X86ISD::GF2P8AFFINEQB: return "X86ISD::GF2P8AFFINEQB";
28504 case X86ISD::GF2P8AFFINEINVQB: return "X86ISD::GF2P8AFFINEINVQB";
28505 case X86ISD::NT_CALL: return "X86ISD::NT_CALL";
28506 case X86ISD::NT_BRIND: return "X86ISD::NT_BRIND";
28507 case X86ISD::UMWAIT: return "X86ISD::UMWAIT";
28508 case X86ISD::TPAUSE: return "X86ISD::TPAUSE";
28509 case X86ISD::ENQCMD: return "X86ISD:ENQCMD";
28510 case X86ISD::ENQCMDS: return "X86ISD:ENQCMDS";
28511 case X86ISD::VP2INTERSECT: return "X86ISD::VP2INTERSECT";
28513 return nullptr;
28516 /// Return true if the addressing mode represented by AM is legal for this
28517 /// target, for a load/store of the specified type.
28518 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
28519 const AddrMode &AM, Type *Ty,
28520 unsigned AS,
28521 Instruction *I) const {
28522 // X86 supports extremely general addressing modes.
28523 CodeModel::Model M = getTargetMachine().getCodeModel();
28525 // X86 allows a sign-extended 32-bit immediate field as a displacement.
28526 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
28527 return false;
28529 if (AM.BaseGV) {
28530 unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
28532 // If a reference to this global requires an extra load, we can't fold it.
28533 if (isGlobalStubReference(GVFlags))
28534 return false;
28536 // If BaseGV requires a register for the PIC base, we cannot also have a
28537 // BaseReg specified.
28538 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
28539 return false;
28541 // If lower 4G is not available, then we must use rip-relative addressing.
28542 if ((M != CodeModel::Small || isPositionIndependent()) &&
28543 Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
28544 return false;
28547 switch (AM.Scale) {
28548 case 0:
28549 case 1:
28550 case 2:
28551 case 4:
28552 case 8:
28553 // These scales always work.
28554 break;
28555 case 3:
28556 case 5:
28557 case 9:
28558 // These scales are formed with basereg+scalereg. Only accept if there is
28559 // no basereg yet.
28560 if (AM.HasBaseReg)
28561 return false;
28562 break;
28563 default: // Other stuff never works.
28564 return false;
28567 return true;
28570 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
28571 unsigned Bits = Ty->getScalarSizeInBits();
28573 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
28574 // particularly cheaper than those without.
28575 if (Bits == 8)
28576 return false;
28578 // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
28579 if (Subtarget.hasXOP() && Ty->getPrimitiveSizeInBits() == 128 &&
28580 (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
28581 return false;
28583 // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
28584 // shifts just as cheap as scalar ones.
28585 if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
28586 return false;
28588 // AVX512BW has shifts such as vpsllvw.
28589 if (Subtarget.hasBWI() && Bits == 16)
28590 return false;
28592 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
28593 // fully general vector.
28594 return true;
28597 bool X86TargetLowering::isBinOp(unsigned Opcode) const {
28598 switch (Opcode) {
28599 // These are non-commutative binops.
28600 // TODO: Add more X86ISD opcodes once we have test coverage.
28601 case X86ISD::ANDNP:
28602 case X86ISD::PCMPGT:
28603 case X86ISD::FMAX:
28604 case X86ISD::FMIN:
28605 case X86ISD::FANDN:
28606 return true;
28609 return TargetLoweringBase::isBinOp(Opcode);
28612 bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
28613 switch (Opcode) {
28614 // TODO: Add more X86ISD opcodes once we have test coverage.
28615 case X86ISD::PCMPEQ:
28616 case X86ISD::PMULDQ:
28617 case X86ISD::PMULUDQ:
28618 case X86ISD::FMAXC:
28619 case X86ISD::FMINC:
28620 case X86ISD::FAND:
28621 case X86ISD::FOR:
28622 case X86ISD::FXOR:
28623 return true;
28626 return TargetLoweringBase::isCommutativeBinOp(Opcode);
28629 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
28630 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
28631 return false;
28632 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
28633 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
28634 return NumBits1 > NumBits2;
28637 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
28638 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
28639 return false;
28641 if (!isTypeLegal(EVT::getEVT(Ty1)))
28642 return false;
28644 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
28646 // Assuming the caller doesn't have a zeroext or signext return parameter,
28647 // truncation all the way down to i1 is valid.
28648 return true;
28651 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
28652 return isInt<32>(Imm);
28655 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
28656 // Can also use sub to handle negated immediates.
28657 return isInt<32>(Imm);
28660 bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
28661 return isInt<32>(Imm);
28664 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
28665 if (!VT1.isInteger() || !VT2.isInteger())
28666 return false;
28667 unsigned NumBits1 = VT1.getSizeInBits();
28668 unsigned NumBits2 = VT2.getSizeInBits();
28669 return NumBits1 > NumBits2;
28672 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
28673 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
28674 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
28677 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
28678 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
28679 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
28682 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
28683 EVT VT1 = Val.getValueType();
28684 if (isZExtFree(VT1, VT2))
28685 return true;
28687 if (Val.getOpcode() != ISD::LOAD)
28688 return false;
28690 if (!VT1.isSimple() || !VT1.isInteger() ||
28691 !VT2.isSimple() || !VT2.isInteger())
28692 return false;
28694 switch (VT1.getSimpleVT().SimpleTy) {
28695 default: break;
28696 case MVT::i8:
28697 case MVT::i16:
28698 case MVT::i32:
28699 // X86 has 8, 16, and 32-bit zero-extending loads.
28700 return true;
28703 return false;
28706 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
28707 EVT SrcVT = ExtVal.getOperand(0).getValueType();
28709 // There is no extending load for vXi1.
28710 if (SrcVT.getScalarType() == MVT::i1)
28711 return false;
28713 return true;
28716 bool
28717 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
28718 if (!Subtarget.hasAnyFMA())
28719 return false;
28721 VT = VT.getScalarType();
28723 if (!VT.isSimple())
28724 return false;
28726 switch (VT.getSimpleVT().SimpleTy) {
28727 case MVT::f32:
28728 case MVT::f64:
28729 return true;
28730 default:
28731 break;
28734 return false;
28737 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
28738 // i16 instructions are longer (0x66 prefix) and potentially slower.
28739 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
28742 /// Targets can use this to indicate that they only support *some*
28743 /// VECTOR_SHUFFLE operations, those with specific masks.
28744 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
28745 /// are assumed to be legal.
28746 bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
28747 if (!VT.isSimple())
28748 return false;
28750 // Not for i1 vectors
28751 if (VT.getSimpleVT().getScalarType() == MVT::i1)
28752 return false;
28754 // Very little shuffling can be done for 64-bit vectors right now.
28755 if (VT.getSimpleVT().getSizeInBits() == 64)
28756 return false;
28758 // We only care that the types being shuffled are legal. The lowering can
28759 // handle any possible shuffle mask that results.
28760 return isTypeLegal(VT.getSimpleVT());
28763 bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
28764 EVT VT) const {
28765 // Don't convert an 'and' into a shuffle that we don't directly support.
28766 // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
28767 if (!Subtarget.hasAVX2())
28768 if (VT == MVT::v32i8 || VT == MVT::v16i16)
28769 return false;
28771 // Just delegate to the generic legality, clear masks aren't special.
28772 return isShuffleMaskLegal(Mask, VT);
28775 bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
28776 // If the subtarget is using retpolines, we need to not generate jump tables.
28777 if (Subtarget.useRetpolineIndirectBranches())
28778 return false;
28780 // Otherwise, fallback on the generic logic.
28781 return TargetLowering::areJTsAllowed(Fn);
28784 //===----------------------------------------------------------------------===//
28785 // X86 Scheduler Hooks
28786 //===----------------------------------------------------------------------===//
28788 /// Utility function to emit xbegin specifying the start of an RTM region.
28789 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
28790 const TargetInstrInfo *TII) {
28791 DebugLoc DL = MI.getDebugLoc();
28793 const BasicBlock *BB = MBB->getBasicBlock();
28794 MachineFunction::iterator I = ++MBB->getIterator();
28796 // For the v = xbegin(), we generate
28798 // thisMBB:
28799 // xbegin sinkMBB
28801 // mainMBB:
28802 // s0 = -1
28804 // fallBB:
28805 // eax = # XABORT_DEF
28806 // s1 = eax
28808 // sinkMBB:
28809 // v = phi(s0/mainBB, s1/fallBB)
28811 MachineBasicBlock *thisMBB = MBB;
28812 MachineFunction *MF = MBB->getParent();
28813 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
28814 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
28815 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
28816 MF->insert(I, mainMBB);
28817 MF->insert(I, fallMBB);
28818 MF->insert(I, sinkMBB);
28820 // Transfer the remainder of BB and its successor edges to sinkMBB.
28821 sinkMBB->splice(sinkMBB->begin(), MBB,
28822 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
28823 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
28825 MachineRegisterInfo &MRI = MF->getRegInfo();
28826 unsigned DstReg = MI.getOperand(0).getReg();
28827 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
28828 unsigned mainDstReg = MRI.createVirtualRegister(RC);
28829 unsigned fallDstReg = MRI.createVirtualRegister(RC);
28831 // thisMBB:
28832 // xbegin fallMBB
28833 // # fallthrough to mainMBB
28834 // # abortion to fallMBB
28835 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
28836 thisMBB->addSuccessor(mainMBB);
28837 thisMBB->addSuccessor(fallMBB);
28839 // mainMBB:
28840 // mainDstReg := -1
28841 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
28842 BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
28843 mainMBB->addSuccessor(sinkMBB);
28845 // fallMBB:
28846 // ; pseudo instruction to model hardware's definition from XABORT
28847 // EAX := XABORT_DEF
28848 // fallDstReg := EAX
28849 BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
28850 BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
28851 .addReg(X86::EAX);
28852 fallMBB->addSuccessor(sinkMBB);
28854 // sinkMBB:
28855 // DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
28856 BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
28857 .addReg(mainDstReg).addMBB(mainMBB)
28858 .addReg(fallDstReg).addMBB(fallMBB);
28860 MI.eraseFromParent();
28861 return sinkMBB;
28866 MachineBasicBlock *
28867 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
28868 MachineBasicBlock *MBB) const {
28869 // Emit va_arg instruction on X86-64.
28871 // Operands to this pseudo-instruction:
28872 // 0 ) Output : destination address (reg)
28873 // 1-5) Input : va_list address (addr, i64mem)
28874 // 6 ) ArgSize : Size (in bytes) of vararg type
28875 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
28876 // 8 ) Align : Alignment of type
28877 // 9 ) EFLAGS (implicit-def)
28879 assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
28880 static_assert(X86::AddrNumOperands == 5,
28881 "VAARG_64 assumes 5 address operands");
28883 unsigned DestReg = MI.getOperand(0).getReg();
28884 MachineOperand &Base = MI.getOperand(1);
28885 MachineOperand &Scale = MI.getOperand(2);
28886 MachineOperand &Index = MI.getOperand(3);
28887 MachineOperand &Disp = MI.getOperand(4);
28888 MachineOperand &Segment = MI.getOperand(5);
28889 unsigned ArgSize = MI.getOperand(6).getImm();
28890 unsigned ArgMode = MI.getOperand(7).getImm();
28891 unsigned Align = MI.getOperand(8).getImm();
28893 MachineFunction *MF = MBB->getParent();
28895 // Memory Reference
28896 assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
28898 MachineMemOperand *OldMMO = MI.memoperands().front();
28900 // Clone the MMO into two separate MMOs for loading and storing
28901 MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
28902 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
28903 MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
28904 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
28906 // Machine Information
28907 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
28908 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
28909 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
28910 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
28911 DebugLoc DL = MI.getDebugLoc();
28913 // struct va_list {
28914 // i32 gp_offset
28915 // i32 fp_offset
28916 // i64 overflow_area (address)
28917 // i64 reg_save_area (address)
28918 // }
28919 // sizeof(va_list) = 24
28920 // alignment(va_list) = 8
28922 unsigned TotalNumIntRegs = 6;
28923 unsigned TotalNumXMMRegs = 8;
28924 bool UseGPOffset = (ArgMode == 1);
28925 bool UseFPOffset = (ArgMode == 2);
28926 unsigned MaxOffset = TotalNumIntRegs * 8 +
28927 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
28929 /* Align ArgSize to a multiple of 8 */
28930 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
28931 bool NeedsAlign = (Align > 8);
28933 MachineBasicBlock *thisMBB = MBB;
28934 MachineBasicBlock *overflowMBB;
28935 MachineBasicBlock *offsetMBB;
28936 MachineBasicBlock *endMBB;
28938 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
28939 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
28940 unsigned OffsetReg = 0;
28942 if (!UseGPOffset && !UseFPOffset) {
28943 // If we only pull from the overflow region, we don't create a branch.
28944 // We don't need to alter control flow.
28945 OffsetDestReg = 0; // unused
28946 OverflowDestReg = DestReg;
28948 offsetMBB = nullptr;
28949 overflowMBB = thisMBB;
28950 endMBB = thisMBB;
28951 } else {
28952 // First emit code to check if gp_offset (or fp_offset) is below the bound.
28953 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
28954 // If not, pull from overflow_area. (branch to overflowMBB)
28956 // thisMBB
28957 // | .
28958 // | .
28959 // offsetMBB overflowMBB
28960 // | .
28961 // | .
28962 // endMBB
28964 // Registers for the PHI in endMBB
28965 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
28966 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
28968 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
28969 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
28970 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
28971 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
28973 MachineFunction::iterator MBBIter = ++MBB->getIterator();
28975 // Insert the new basic blocks
28976 MF->insert(MBBIter, offsetMBB);
28977 MF->insert(MBBIter, overflowMBB);
28978 MF->insert(MBBIter, endMBB);
28980 // Transfer the remainder of MBB and its successor edges to endMBB.
28981 endMBB->splice(endMBB->begin(), thisMBB,
28982 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
28983 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
28985 // Make offsetMBB and overflowMBB successors of thisMBB
28986 thisMBB->addSuccessor(offsetMBB);
28987 thisMBB->addSuccessor(overflowMBB);
28989 // endMBB is a successor of both offsetMBB and overflowMBB
28990 offsetMBB->addSuccessor(endMBB);
28991 overflowMBB->addSuccessor(endMBB);
28993 // Load the offset value into a register
28994 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
28995 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
28996 .add(Base)
28997 .add(Scale)
28998 .add(Index)
28999 .addDisp(Disp, UseFPOffset ? 4 : 0)
29000 .add(Segment)
29001 .setMemRefs(LoadOnlyMMO);
29003 // Check if there is enough room left to pull this argument.
29004 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
29005 .addReg(OffsetReg)
29006 .addImm(MaxOffset + 8 - ArgSizeA8);
29008 // Branch to "overflowMBB" if offset >= max
29009 // Fall through to "offsetMBB" otherwise
29010 BuildMI(thisMBB, DL, TII->get(X86::JCC_1))
29011 .addMBB(overflowMBB).addImm(X86::COND_AE);
29014 // In offsetMBB, emit code to use the reg_save_area.
29015 if (offsetMBB) {
29016 assert(OffsetReg != 0);
29018 // Read the reg_save_area address.
29019 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
29020 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
29021 .add(Base)
29022 .add(Scale)
29023 .add(Index)
29024 .addDisp(Disp, 16)
29025 .add(Segment)
29026 .setMemRefs(LoadOnlyMMO);
29028 // Zero-extend the offset
29029 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
29030 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
29031 .addImm(0)
29032 .addReg(OffsetReg)
29033 .addImm(X86::sub_32bit);
29035 // Add the offset to the reg_save_area to get the final address.
29036 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
29037 .addReg(OffsetReg64)
29038 .addReg(RegSaveReg);
29040 // Compute the offset for the next argument
29041 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
29042 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
29043 .addReg(OffsetReg)
29044 .addImm(UseFPOffset ? 16 : 8);
29046 // Store it back into the va_list.
29047 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
29048 .add(Base)
29049 .add(Scale)
29050 .add(Index)
29051 .addDisp(Disp, UseFPOffset ? 4 : 0)
29052 .add(Segment)
29053 .addReg(NextOffsetReg)
29054 .setMemRefs(StoreOnlyMMO);
29056 // Jump to endMBB
29057 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
29058 .addMBB(endMBB);
29062 // Emit code to use overflow area
29065 // Load the overflow_area address into a register.
29066 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
29067 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
29068 .add(Base)
29069 .add(Scale)
29070 .add(Index)
29071 .addDisp(Disp, 8)
29072 .add(Segment)
29073 .setMemRefs(LoadOnlyMMO);
29075 // If we need to align it, do so. Otherwise, just copy the address
29076 // to OverflowDestReg.
29077 if (NeedsAlign) {
29078 // Align the overflow address
29079 assert(isPowerOf2_32(Align) && "Alignment must be a power of 2");
29080 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
29082 // aligned_addr = (addr + (align-1)) & ~(align-1)
29083 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
29084 .addReg(OverflowAddrReg)
29085 .addImm(Align-1);
29087 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
29088 .addReg(TmpReg)
29089 .addImm(~(uint64_t)(Align-1));
29090 } else {
29091 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
29092 .addReg(OverflowAddrReg);
29095 // Compute the next overflow address after this argument.
29096 // (the overflow address should be kept 8-byte aligned)
29097 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
29098 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
29099 .addReg(OverflowDestReg)
29100 .addImm(ArgSizeA8);
29102 // Store the new overflow address.
29103 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
29104 .add(Base)
29105 .add(Scale)
29106 .add(Index)
29107 .addDisp(Disp, 8)
29108 .add(Segment)
29109 .addReg(NextAddrReg)
29110 .setMemRefs(StoreOnlyMMO);
29112 // If we branched, emit the PHI to the front of endMBB.
29113 if (offsetMBB) {
29114 BuildMI(*endMBB, endMBB->begin(), DL,
29115 TII->get(X86::PHI), DestReg)
29116 .addReg(OffsetDestReg).addMBB(offsetMBB)
29117 .addReg(OverflowDestReg).addMBB(overflowMBB);
29120 // Erase the pseudo instruction
29121 MI.eraseFromParent();
29123 return endMBB;
29126 MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
29127 MachineInstr &MI, MachineBasicBlock *MBB) const {
29128 // Emit code to save XMM registers to the stack. The ABI says that the
29129 // number of registers to save is given in %al, so it's theoretically
29130 // possible to do an indirect jump trick to avoid saving all of them,
29131 // however this code takes a simpler approach and just executes all
29132 // of the stores if %al is non-zero. It's less code, and it's probably
29133 // easier on the hardware branch predictor, and stores aren't all that
29134 // expensive anyway.
29136 // Create the new basic blocks. One block contains all the XMM stores,
29137 // and one block is the final destination regardless of whether any
29138 // stores were performed.
29139 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
29140 MachineFunction *F = MBB->getParent();
29141 MachineFunction::iterator MBBIter = ++MBB->getIterator();
29142 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
29143 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
29144 F->insert(MBBIter, XMMSaveMBB);
29145 F->insert(MBBIter, EndMBB);
29147 // Transfer the remainder of MBB and its successor edges to EndMBB.
29148 EndMBB->splice(EndMBB->begin(), MBB,
29149 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
29150 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
29152 // The original block will now fall through to the XMM save block.
29153 MBB->addSuccessor(XMMSaveMBB);
29154 // The XMMSaveMBB will fall through to the end block.
29155 XMMSaveMBB->addSuccessor(EndMBB);
29157 // Now add the instructions.
29158 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29159 DebugLoc DL = MI.getDebugLoc();
29161 unsigned CountReg = MI.getOperand(0).getReg();
29162 int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
29163 int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
29165 if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
29166 // If %al is 0, branch around the XMM save block.
29167 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
29168 BuildMI(MBB, DL, TII->get(X86::JCC_1)).addMBB(EndMBB).addImm(X86::COND_E);
29169 MBB->addSuccessor(EndMBB);
29172 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
29173 // that was just emitted, but clearly shouldn't be "saved".
29174 assert((MI.getNumOperands() <= 3 ||
29175 !MI.getOperand(MI.getNumOperands() - 1).isReg() ||
29176 MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
29177 "Expected last argument to be EFLAGS");
29178 unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
29179 // In the XMM save block, save all the XMM argument registers.
29180 for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
29181 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
29182 MachineMemOperand *MMO = F->getMachineMemOperand(
29183 MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
29184 MachineMemOperand::MOStore,
29185 /*Size=*/16, /*Align=*/16);
29186 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
29187 .addFrameIndex(RegSaveFrameIndex)
29188 .addImm(/*Scale=*/1)
29189 .addReg(/*IndexReg=*/0)
29190 .addImm(/*Disp=*/Offset)
29191 .addReg(/*Segment=*/0)
29192 .addReg(MI.getOperand(i).getReg())
29193 .addMemOperand(MMO);
29196 MI.eraseFromParent(); // The pseudo instruction is gone now.
29198 return EndMBB;
29201 // The EFLAGS operand of SelectItr might be missing a kill marker
29202 // because there were multiple uses of EFLAGS, and ISel didn't know
29203 // which to mark. Figure out whether SelectItr should have had a
29204 // kill marker, and set it if it should. Returns the correct kill
29205 // marker value.
29206 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
29207 MachineBasicBlock* BB,
29208 const TargetRegisterInfo* TRI) {
29209 // Scan forward through BB for a use/def of EFLAGS.
29210 MachineBasicBlock::iterator miI(std::next(SelectItr));
29211 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
29212 const MachineInstr& mi = *miI;
29213 if (mi.readsRegister(X86::EFLAGS))
29214 return false;
29215 if (mi.definesRegister(X86::EFLAGS))
29216 break; // Should have kill-flag - update below.
29219 // If we hit the end of the block, check whether EFLAGS is live into a
29220 // successor.
29221 if (miI == BB->end()) {
29222 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
29223 sEnd = BB->succ_end();
29224 sItr != sEnd; ++sItr) {
29225 MachineBasicBlock* succ = *sItr;
29226 if (succ->isLiveIn(X86::EFLAGS))
29227 return false;
29231 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
29232 // out. SelectMI should have a kill flag on EFLAGS.
29233 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
29234 return true;
29237 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
29238 // together with other CMOV pseudo-opcodes into a single basic-block with
29239 // conditional jump around it.
29240 static bool isCMOVPseudo(MachineInstr &MI) {
29241 switch (MI.getOpcode()) {
29242 case X86::CMOV_FR32:
29243 case X86::CMOV_FR64:
29244 case X86::CMOV_GR8:
29245 case X86::CMOV_GR16:
29246 case X86::CMOV_GR32:
29247 case X86::CMOV_RFP32:
29248 case X86::CMOV_RFP64:
29249 case X86::CMOV_RFP80:
29250 case X86::CMOV_VR128:
29251 case X86::CMOV_VR128X:
29252 case X86::CMOV_VR256:
29253 case X86::CMOV_VR256X:
29254 case X86::CMOV_VR512:
29255 case X86::CMOV_VK2:
29256 case X86::CMOV_VK4:
29257 case X86::CMOV_VK8:
29258 case X86::CMOV_VK16:
29259 case X86::CMOV_VK32:
29260 case X86::CMOV_VK64:
29261 return true;
29263 default:
29264 return false;
29268 // Helper function, which inserts PHI functions into SinkMBB:
29269 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
29270 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
29271 // in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
29272 // the last PHI function inserted.
29273 static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
29274 MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
29275 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
29276 MachineBasicBlock *SinkMBB) {
29277 MachineFunction *MF = TrueMBB->getParent();
29278 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
29279 DebugLoc DL = MIItBegin->getDebugLoc();
29281 X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
29282 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
29284 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
29286 // As we are creating the PHIs, we have to be careful if there is more than
29287 // one. Later CMOVs may reference the results of earlier CMOVs, but later
29288 // PHIs have to reference the individual true/false inputs from earlier PHIs.
29289 // That also means that PHI construction must work forward from earlier to
29290 // later, and that the code must maintain a mapping from earlier PHI's
29291 // destination registers, and the registers that went into the PHI.
29292 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
29293 MachineInstrBuilder MIB;
29295 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
29296 unsigned DestReg = MIIt->getOperand(0).getReg();
29297 unsigned Op1Reg = MIIt->getOperand(1).getReg();
29298 unsigned Op2Reg = MIIt->getOperand(2).getReg();
29300 // If this CMOV we are generating is the opposite condition from
29301 // the jump we generated, then we have to swap the operands for the
29302 // PHI that is going to be generated.
29303 if (MIIt->getOperand(3).getImm() == OppCC)
29304 std::swap(Op1Reg, Op2Reg);
29306 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
29307 Op1Reg = RegRewriteTable[Op1Reg].first;
29309 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
29310 Op2Reg = RegRewriteTable[Op2Reg].second;
29312 MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
29313 .addReg(Op1Reg)
29314 .addMBB(FalseMBB)
29315 .addReg(Op2Reg)
29316 .addMBB(TrueMBB);
29318 // Add this PHI to the rewrite table.
29319 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
29322 return MIB;
29325 // Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
29326 MachineBasicBlock *
29327 X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
29328 MachineInstr &SecondCascadedCMOV,
29329 MachineBasicBlock *ThisMBB) const {
29330 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29331 DebugLoc DL = FirstCMOV.getDebugLoc();
29333 // We lower cascaded CMOVs such as
29335 // (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
29337 // to two successive branches.
29339 // Without this, we would add a PHI between the two jumps, which ends up
29340 // creating a few copies all around. For instance, for
29342 // (sitofp (zext (fcmp une)))
29344 // we would generate:
29346 // ucomiss %xmm1, %xmm0
29347 // movss <1.0f>, %xmm0
29348 // movaps %xmm0, %xmm1
29349 // jne .LBB5_2
29350 // xorps %xmm1, %xmm1
29351 // .LBB5_2:
29352 // jp .LBB5_4
29353 // movaps %xmm1, %xmm0
29354 // .LBB5_4:
29355 // retq
29357 // because this custom-inserter would have generated:
29359 // A
29360 // | \
29361 // | B
29362 // | /
29363 // C
29364 // | \
29365 // | D
29366 // | /
29367 // E
29369 // A: X = ...; Y = ...
29370 // B: empty
29371 // C: Z = PHI [X, A], [Y, B]
29372 // D: empty
29373 // E: PHI [X, C], [Z, D]
29375 // If we lower both CMOVs in a single step, we can instead generate:
29377 // A
29378 // | \
29379 // | C
29380 // | /|
29381 // |/ |
29382 // | |
29383 // | D
29384 // | /
29385 // E
29387 // A: X = ...; Y = ...
29388 // D: empty
29389 // E: PHI [X, A], [X, C], [Y, D]
29391 // Which, in our sitofp/fcmp example, gives us something like:
29393 // ucomiss %xmm1, %xmm0
29394 // movss <1.0f>, %xmm0
29395 // jne .LBB5_4
29396 // jp .LBB5_4
29397 // xorps %xmm0, %xmm0
29398 // .LBB5_4:
29399 // retq
29402 // We lower cascaded CMOV into two successive branches to the same block.
29403 // EFLAGS is used by both, so mark it as live in the second.
29404 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
29405 MachineFunction *F = ThisMBB->getParent();
29406 MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
29407 MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
29408 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
29410 MachineFunction::iterator It = ++ThisMBB->getIterator();
29411 F->insert(It, FirstInsertedMBB);
29412 F->insert(It, SecondInsertedMBB);
29413 F->insert(It, SinkMBB);
29415 // For a cascaded CMOV, we lower it to two successive branches to
29416 // the same block (SinkMBB). EFLAGS is used by both, so mark it as live in
29417 // the FirstInsertedMBB.
29418 FirstInsertedMBB->addLiveIn(X86::EFLAGS);
29420 // If the EFLAGS register isn't dead in the terminator, then claim that it's
29421 // live into the sink and copy blocks.
29422 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
29423 if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
29424 !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
29425 SecondInsertedMBB->addLiveIn(X86::EFLAGS);
29426 SinkMBB->addLiveIn(X86::EFLAGS);
29429 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
29430 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
29431 std::next(MachineBasicBlock::iterator(FirstCMOV)),
29432 ThisMBB->end());
29433 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
29435 // Fallthrough block for ThisMBB.
29436 ThisMBB->addSuccessor(FirstInsertedMBB);
29437 // The true block target of the first branch is always SinkMBB.
29438 ThisMBB->addSuccessor(SinkMBB);
29439 // Fallthrough block for FirstInsertedMBB.
29440 FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
29441 // The true block for the branch of FirstInsertedMBB.
29442 FirstInsertedMBB->addSuccessor(SinkMBB);
29443 // This is fallthrough.
29444 SecondInsertedMBB->addSuccessor(SinkMBB);
29446 // Create the conditional branch instructions.
29447 X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
29448 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
29450 X86::CondCode SecondCC =
29451 X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
29452 BuildMI(FirstInsertedMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(SecondCC);
29454 // SinkMBB:
29455 // %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
29456 unsigned DestReg = FirstCMOV.getOperand(0).getReg();
29457 unsigned Op1Reg = FirstCMOV.getOperand(1).getReg();
29458 unsigned Op2Reg = FirstCMOV.getOperand(2).getReg();
29459 MachineInstrBuilder MIB =
29460 BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
29461 .addReg(Op1Reg)
29462 .addMBB(SecondInsertedMBB)
29463 .addReg(Op2Reg)
29464 .addMBB(ThisMBB);
29466 // The second SecondInsertedMBB provides the same incoming value as the
29467 // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
29468 MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
29469 // Copy the PHI result to the register defined by the second CMOV.
29470 BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())), DL,
29471 TII->get(TargetOpcode::COPY),
29472 SecondCascadedCMOV.getOperand(0).getReg())
29473 .addReg(FirstCMOV.getOperand(0).getReg());
29475 // Now remove the CMOVs.
29476 FirstCMOV.eraseFromParent();
29477 SecondCascadedCMOV.eraseFromParent();
29479 return SinkMBB;
29482 MachineBasicBlock *
29483 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
29484 MachineBasicBlock *ThisMBB) const {
29485 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29486 DebugLoc DL = MI.getDebugLoc();
29488 // To "insert" a SELECT_CC instruction, we actually have to insert the
29489 // diamond control-flow pattern. The incoming instruction knows the
29490 // destination vreg to set, the condition code register to branch on, the
29491 // true/false values to select between and a branch opcode to use.
29493 // ThisMBB:
29494 // ...
29495 // TrueVal = ...
29496 // cmpTY ccX, r1, r2
29497 // bCC copy1MBB
29498 // fallthrough --> FalseMBB
29500 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
29501 // as described above, by inserting a BB, and then making a PHI at the join
29502 // point to select the true and false operands of the CMOV in the PHI.
29504 // The code also handles two different cases of multiple CMOV opcodes
29505 // in a row.
29507 // Case 1:
29508 // In this case, there are multiple CMOVs in a row, all which are based on
29509 // the same condition setting (or the exact opposite condition setting).
29510 // In this case we can lower all the CMOVs using a single inserted BB, and
29511 // then make a number of PHIs at the join point to model the CMOVs. The only
29512 // trickiness here, is that in a case like:
29514 // t2 = CMOV cond1 t1, f1
29515 // t3 = CMOV cond1 t2, f2
29517 // when rewriting this into PHIs, we have to perform some renaming on the
29518 // temps since you cannot have a PHI operand refer to a PHI result earlier
29519 // in the same block. The "simple" but wrong lowering would be:
29521 // t2 = PHI t1(BB1), f1(BB2)
29522 // t3 = PHI t2(BB1), f2(BB2)
29524 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
29525 // renaming is to note that on the path through BB1, t2 is really just a
29526 // copy of t1, and do that renaming, properly generating:
29528 // t2 = PHI t1(BB1), f1(BB2)
29529 // t3 = PHI t1(BB1), f2(BB2)
29531 // Case 2:
29532 // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
29533 // function - EmitLoweredCascadedSelect.
29535 X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
29536 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
29537 MachineInstr *LastCMOV = &MI;
29538 MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
29540 // Check for case 1, where there are multiple CMOVs with the same condition
29541 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
29542 // number of jumps the most.
29544 if (isCMOVPseudo(MI)) {
29545 // See if we have a string of CMOVS with the same condition. Skip over
29546 // intervening debug insts.
29547 while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
29548 (NextMIIt->getOperand(3).getImm() == CC ||
29549 NextMIIt->getOperand(3).getImm() == OppCC)) {
29550 LastCMOV = &*NextMIIt;
29551 ++NextMIIt;
29552 NextMIIt = skipDebugInstructionsForward(NextMIIt, ThisMBB->end());
29556 // This checks for case 2, but only do this if we didn't already find
29557 // case 1, as indicated by LastCMOV == MI.
29558 if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
29559 NextMIIt->getOpcode() == MI.getOpcode() &&
29560 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
29561 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
29562 NextMIIt->getOperand(1).isKill()) {
29563 return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
29566 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
29567 MachineFunction *F = ThisMBB->getParent();
29568 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
29569 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
29571 MachineFunction::iterator It = ++ThisMBB->getIterator();
29572 F->insert(It, FalseMBB);
29573 F->insert(It, SinkMBB);
29575 // If the EFLAGS register isn't dead in the terminator, then claim that it's
29576 // live into the sink and copy blocks.
29577 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
29578 if (!LastCMOV->killsRegister(X86::EFLAGS) &&
29579 !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
29580 FalseMBB->addLiveIn(X86::EFLAGS);
29581 SinkMBB->addLiveIn(X86::EFLAGS);
29584 // Transfer any debug instructions inside the CMOV sequence to the sunk block.
29585 auto DbgEnd = MachineBasicBlock::iterator(LastCMOV);
29586 auto DbgIt = MachineBasicBlock::iterator(MI);
29587 while (DbgIt != DbgEnd) {
29588 auto Next = std::next(DbgIt);
29589 if (DbgIt->isDebugInstr())
29590 SinkMBB->push_back(DbgIt->removeFromParent());
29591 DbgIt = Next;
29594 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
29595 SinkMBB->splice(SinkMBB->end(), ThisMBB,
29596 std::next(MachineBasicBlock::iterator(LastCMOV)),
29597 ThisMBB->end());
29598 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
29600 // Fallthrough block for ThisMBB.
29601 ThisMBB->addSuccessor(FalseMBB);
29602 // The true block target of the first (or only) branch is always a SinkMBB.
29603 ThisMBB->addSuccessor(SinkMBB);
29604 // Fallthrough block for FalseMBB.
29605 FalseMBB->addSuccessor(SinkMBB);
29607 // Create the conditional branch instruction.
29608 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
29610 // SinkMBB:
29611 // %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
29612 // ...
29613 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
29614 MachineBasicBlock::iterator MIItEnd =
29615 std::next(MachineBasicBlock::iterator(LastCMOV));
29616 createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
29618 // Now remove the CMOV(s).
29619 ThisMBB->erase(MIItBegin, MIItEnd);
29621 return SinkMBB;
29624 MachineBasicBlock *
29625 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
29626 MachineBasicBlock *BB) const {
29627 MachineFunction *MF = BB->getParent();
29628 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29629 DebugLoc DL = MI.getDebugLoc();
29630 const BasicBlock *LLVM_BB = BB->getBasicBlock();
29632 assert(MF->shouldSplitStack());
29634 const bool Is64Bit = Subtarget.is64Bit();
29635 const bool IsLP64 = Subtarget.isTarget64BitLP64();
29637 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
29638 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
29640 // BB:
29641 // ... [Till the alloca]
29642 // If stacklet is not large enough, jump to mallocMBB
29644 // bumpMBB:
29645 // Allocate by subtracting from RSP
29646 // Jump to continueMBB
29648 // mallocMBB:
29649 // Allocate by call to runtime
29651 // continueMBB:
29652 // ...
29653 // [rest of original BB]
29656 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29657 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29658 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29660 MachineRegisterInfo &MRI = MF->getRegInfo();
29661 const TargetRegisterClass *AddrRegClass =
29662 getRegClassFor(getPointerTy(MF->getDataLayout()));
29664 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
29665 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
29666 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
29667 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
29668 sizeVReg = MI.getOperand(1).getReg(),
29669 physSPReg =
29670 IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
29672 MachineFunction::iterator MBBIter = ++BB->getIterator();
29674 MF->insert(MBBIter, bumpMBB);
29675 MF->insert(MBBIter, mallocMBB);
29676 MF->insert(MBBIter, continueMBB);
29678 continueMBB->splice(continueMBB->begin(), BB,
29679 std::next(MachineBasicBlock::iterator(MI)), BB->end());
29680 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
29682 // Add code to the main basic block to check if the stack limit has been hit,
29683 // and if so, jump to mallocMBB otherwise to bumpMBB.
29684 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
29685 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
29686 .addReg(tmpSPVReg).addReg(sizeVReg);
29687 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
29688 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
29689 .addReg(SPLimitVReg);
29690 BuildMI(BB, DL, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
29692 // bumpMBB simply decreases the stack pointer, since we know the current
29693 // stacklet has enough space.
29694 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
29695 .addReg(SPLimitVReg);
29696 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
29697 .addReg(SPLimitVReg);
29698 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
29700 // Calls into a routine in libgcc to allocate more space from the heap.
29701 const uint32_t *RegMask =
29702 Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
29703 if (IsLP64) {
29704 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
29705 .addReg(sizeVReg);
29706 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
29707 .addExternalSymbol("__morestack_allocate_stack_space")
29708 .addRegMask(RegMask)
29709 .addReg(X86::RDI, RegState::Implicit)
29710 .addReg(X86::RAX, RegState::ImplicitDefine);
29711 } else if (Is64Bit) {
29712 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
29713 .addReg(sizeVReg);
29714 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
29715 .addExternalSymbol("__morestack_allocate_stack_space")
29716 .addRegMask(RegMask)
29717 .addReg(X86::EDI, RegState::Implicit)
29718 .addReg(X86::EAX, RegState::ImplicitDefine);
29719 } else {
29720 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
29721 .addImm(12);
29722 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
29723 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
29724 .addExternalSymbol("__morestack_allocate_stack_space")
29725 .addRegMask(RegMask)
29726 .addReg(X86::EAX, RegState::ImplicitDefine);
29729 if (!Is64Bit)
29730 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
29731 .addImm(16);
29733 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
29734 .addReg(IsLP64 ? X86::RAX : X86::EAX);
29735 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
29737 // Set up the CFG correctly.
29738 BB->addSuccessor(bumpMBB);
29739 BB->addSuccessor(mallocMBB);
29740 mallocMBB->addSuccessor(continueMBB);
29741 bumpMBB->addSuccessor(continueMBB);
29743 // Take care of the PHI nodes.
29744 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
29745 MI.getOperand(0).getReg())
29746 .addReg(mallocPtrVReg)
29747 .addMBB(mallocMBB)
29748 .addReg(bumpSPPtrVReg)
29749 .addMBB(bumpMBB);
29751 // Delete the original pseudo instruction.
29752 MI.eraseFromParent();
29754 // And we're done.
29755 return continueMBB;
29758 MachineBasicBlock *
29759 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
29760 MachineBasicBlock *BB) const {
29761 MachineFunction *MF = BB->getParent();
29762 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
29763 MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
29764 DebugLoc DL = MI.getDebugLoc();
29766 assert(!isAsynchronousEHPersonality(
29767 classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
29768 "SEH does not use catchret!");
29770 // Only 32-bit EH needs to worry about manually restoring stack pointers.
29771 if (!Subtarget.is32Bit())
29772 return BB;
29774 // C++ EH creates a new target block to hold the restore code, and wires up
29775 // the new block to the return destination with a normal JMP_4.
29776 MachineBasicBlock *RestoreMBB =
29777 MF->CreateMachineBasicBlock(BB->getBasicBlock());
29778 assert(BB->succ_size() == 1);
29779 MF->insert(std::next(BB->getIterator()), RestoreMBB);
29780 RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
29781 BB->addSuccessor(RestoreMBB);
29782 MI.getOperand(0).setMBB(RestoreMBB);
29784 auto RestoreMBBI = RestoreMBB->begin();
29785 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
29786 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
29787 return BB;
29790 MachineBasicBlock *
29791 X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
29792 MachineBasicBlock *BB) const {
29793 MachineFunction *MF = BB->getParent();
29794 const Constant *PerFn = MF->getFunction().getPersonalityFn();
29795 bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
29796 // Only 32-bit SEH requires special handling for catchpad.
29797 if (IsSEH && Subtarget.is32Bit()) {
29798 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
29799 DebugLoc DL = MI.getDebugLoc();
29800 BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
29802 MI.eraseFromParent();
29803 return BB;
29806 MachineBasicBlock *
29807 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
29808 MachineBasicBlock *BB) const {
29809 // So, here we replace TLSADDR with the sequence:
29810 // adjust_stackdown -> TLSADDR -> adjust_stackup.
29811 // We need this because TLSADDR is lowered into calls
29812 // inside MC, therefore without the two markers shrink-wrapping
29813 // may push the prologue/epilogue pass them.
29814 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
29815 DebugLoc DL = MI.getDebugLoc();
29816 MachineFunction &MF = *BB->getParent();
29818 // Emit CALLSEQ_START right before the instruction.
29819 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
29820 MachineInstrBuilder CallseqStart =
29821 BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
29822 BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
29824 // Emit CALLSEQ_END right after the instruction.
29825 // We don't call erase from parent because we want to keep the
29826 // original instruction around.
29827 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
29828 MachineInstrBuilder CallseqEnd =
29829 BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
29830 BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
29832 return BB;
29835 MachineBasicBlock *
29836 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
29837 MachineBasicBlock *BB) const {
29838 // This is pretty easy. We're taking the value that we received from
29839 // our load from the relocation, sticking it in either RDI (x86-64)
29840 // or EAX and doing an indirect call. The return value will then
29841 // be in the normal return register.
29842 MachineFunction *F = BB->getParent();
29843 const X86InstrInfo *TII = Subtarget.getInstrInfo();
29844 DebugLoc DL = MI.getDebugLoc();
29846 assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
29847 assert(MI.getOperand(3).isGlobal() && "This should be a global");
29849 // Get a register mask for the lowered call.
29850 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
29851 // proper register mask.
29852 const uint32_t *RegMask =
29853 Subtarget.is64Bit() ?
29854 Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
29855 Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
29856 if (Subtarget.is64Bit()) {
29857 MachineInstrBuilder MIB =
29858 BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
29859 .addReg(X86::RIP)
29860 .addImm(0)
29861 .addReg(0)
29862 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
29863 MI.getOperand(3).getTargetFlags())
29864 .addReg(0);
29865 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
29866 addDirectMem(MIB, X86::RDI);
29867 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
29868 } else if (!isPositionIndependent()) {
29869 MachineInstrBuilder MIB =
29870 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
29871 .addReg(0)
29872 .addImm(0)
29873 .addReg(0)
29874 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
29875 MI.getOperand(3).getTargetFlags())
29876 .addReg(0);
29877 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
29878 addDirectMem(MIB, X86::EAX);
29879 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
29880 } else {
29881 MachineInstrBuilder MIB =
29882 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
29883 .addReg(TII->getGlobalBaseReg(F))
29884 .addImm(0)
29885 .addReg(0)
29886 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
29887 MI.getOperand(3).getTargetFlags())
29888 .addReg(0);
29889 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
29890 addDirectMem(MIB, X86::EAX);
29891 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
29894 MI.eraseFromParent(); // The pseudo instruction is gone now.
29895 return BB;
29898 static unsigned getOpcodeForRetpoline(unsigned RPOpc) {
29899 switch (RPOpc) {
29900 case X86::RETPOLINE_CALL32:
29901 return X86::CALLpcrel32;
29902 case X86::RETPOLINE_CALL64:
29903 return X86::CALL64pcrel32;
29904 case X86::RETPOLINE_TCRETURN32:
29905 return X86::TCRETURNdi;
29906 case X86::RETPOLINE_TCRETURN64:
29907 return X86::TCRETURNdi64;
29909 llvm_unreachable("not retpoline opcode");
29912 static const char *getRetpolineSymbol(const X86Subtarget &Subtarget,
29913 unsigned Reg) {
29914 if (Subtarget.useRetpolineExternalThunk()) {
29915 // When using an external thunk for retpolines, we pick names that match the
29916 // names GCC happens to use as well. This helps simplify the implementation
29917 // of the thunks for kernels where they have no easy ability to create
29918 // aliases and are doing non-trivial configuration of the thunk's body. For
29919 // example, the Linux kernel will do boot-time hot patching of the thunk
29920 // bodies and cannot easily export aliases of these to loaded modules.
29922 // Note that at any point in the future, we may need to change the semantics
29923 // of how we implement retpolines and at that time will likely change the
29924 // name of the called thunk. Essentially, there is no hard guarantee that
29925 // LLVM will generate calls to specific thunks, we merely make a best-effort
29926 // attempt to help out kernels and other systems where duplicating the
29927 // thunks is costly.
29928 switch (Reg) {
29929 case X86::EAX:
29930 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
29931 return "__x86_indirect_thunk_eax";
29932 case X86::ECX:
29933 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
29934 return "__x86_indirect_thunk_ecx";
29935 case X86::EDX:
29936 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
29937 return "__x86_indirect_thunk_edx";
29938 case X86::EDI:
29939 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
29940 return "__x86_indirect_thunk_edi";
29941 case X86::R11:
29942 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
29943 return "__x86_indirect_thunk_r11";
29945 llvm_unreachable("unexpected reg for retpoline");
29948 // When targeting an internal COMDAT thunk use an LLVM-specific name.
29949 switch (Reg) {
29950 case X86::EAX:
29951 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
29952 return "__llvm_retpoline_eax";
29953 case X86::ECX:
29954 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
29955 return "__llvm_retpoline_ecx";
29956 case X86::EDX:
29957 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
29958 return "__llvm_retpoline_edx";
29959 case X86::EDI:
29960 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
29961 return "__llvm_retpoline_edi";
29962 case X86::R11:
29963 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
29964 return "__llvm_retpoline_r11";
29966 llvm_unreachable("unexpected reg for retpoline");
29969 MachineBasicBlock *
29970 X86TargetLowering::EmitLoweredRetpoline(MachineInstr &MI,
29971 MachineBasicBlock *BB) const {
29972 // Copy the virtual register into the R11 physical register and
29973 // call the retpoline thunk.
29974 DebugLoc DL = MI.getDebugLoc();
29975 const X86InstrInfo *TII = Subtarget.getInstrInfo();
29976 unsigned CalleeVReg = MI.getOperand(0).getReg();
29977 unsigned Opc = getOpcodeForRetpoline(MI.getOpcode());
29979 // Find an available scratch register to hold the callee. On 64-bit, we can
29980 // just use R11, but we scan for uses anyway to ensure we don't generate
29981 // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
29982 // already a register use operand to the call to hold the callee. If none
29983 // are available, use EDI instead. EDI is chosen because EBX is the PIC base
29984 // register and ESI is the base pointer to realigned stack frames with VLAs.
29985 SmallVector<unsigned, 3> AvailableRegs;
29986 if (Subtarget.is64Bit())
29987 AvailableRegs.push_back(X86::R11);
29988 else
29989 AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
29991 // Zero out any registers that are already used.
29992 for (const auto &MO : MI.operands()) {
29993 if (MO.isReg() && MO.isUse())
29994 for (unsigned &Reg : AvailableRegs)
29995 if (Reg == MO.getReg())
29996 Reg = 0;
29999 // Choose the first remaining non-zero available register.
30000 unsigned AvailableReg = 0;
30001 for (unsigned MaybeReg : AvailableRegs) {
30002 if (MaybeReg) {
30003 AvailableReg = MaybeReg;
30004 break;
30007 if (!AvailableReg)
30008 report_fatal_error("calling convention incompatible with retpoline, no "
30009 "available registers");
30011 const char *Symbol = getRetpolineSymbol(Subtarget, AvailableReg);
30013 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
30014 .addReg(CalleeVReg);
30015 MI.getOperand(0).ChangeToES(Symbol);
30016 MI.setDesc(TII->get(Opc));
30017 MachineInstrBuilder(*BB->getParent(), &MI)
30018 .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
30019 return BB;
30022 /// SetJmp implies future control flow change upon calling the corresponding
30023 /// LongJmp.
30024 /// Instead of using the 'return' instruction, the long jump fixes the stack and
30025 /// performs an indirect branch. To do so it uses the registers that were stored
30026 /// in the jump buffer (when calling SetJmp).
30027 /// In case the shadow stack is enabled we need to fix it as well, because some
30028 /// return addresses will be skipped.
30029 /// The function will save the SSP for future fixing in the function
30030 /// emitLongJmpShadowStackFix.
30031 /// \sa emitLongJmpShadowStackFix
30032 /// \param [in] MI The temporary Machine Instruction for the builtin.
30033 /// \param [in] MBB The Machine Basic Block that will be modified.
30034 void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
30035 MachineBasicBlock *MBB) const {
30036 DebugLoc DL = MI.getDebugLoc();
30037 MachineFunction *MF = MBB->getParent();
30038 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30039 MachineRegisterInfo &MRI = MF->getRegInfo();
30040 MachineInstrBuilder MIB;
30042 // Memory Reference.
30043 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30044 MI.memoperands_end());
30046 // Initialize a register with zero.
30047 MVT PVT = getPointerTy(MF->getDataLayout());
30048 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
30049 unsigned ZReg = MRI.createVirtualRegister(PtrRC);
30050 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
30051 BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
30052 .addDef(ZReg)
30053 .addReg(ZReg, RegState::Undef)
30054 .addReg(ZReg, RegState::Undef);
30056 // Read the current SSP Register value to the zeroed register.
30057 unsigned SSPCopyReg = MRI.createVirtualRegister(PtrRC);
30058 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
30059 BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
30061 // Write the SSP register value to offset 3 in input memory buffer.
30062 unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
30063 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
30064 const int64_t SSPOffset = 3 * PVT.getStoreSize();
30065 const unsigned MemOpndSlot = 1;
30066 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30067 if (i == X86::AddrDisp)
30068 MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
30069 else
30070 MIB.add(MI.getOperand(MemOpndSlot + i));
30072 MIB.addReg(SSPCopyReg);
30073 MIB.setMemRefs(MMOs);
30076 MachineBasicBlock *
30077 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
30078 MachineBasicBlock *MBB) const {
30079 DebugLoc DL = MI.getDebugLoc();
30080 MachineFunction *MF = MBB->getParent();
30081 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30082 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
30083 MachineRegisterInfo &MRI = MF->getRegInfo();
30085 const BasicBlock *BB = MBB->getBasicBlock();
30086 MachineFunction::iterator I = ++MBB->getIterator();
30088 // Memory Reference
30089 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30090 MI.memoperands_end());
30092 unsigned DstReg;
30093 unsigned MemOpndSlot = 0;
30095 unsigned CurOp = 0;
30097 DstReg = MI.getOperand(CurOp++).getReg();
30098 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
30099 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
30100 (void)TRI;
30101 unsigned mainDstReg = MRI.createVirtualRegister(RC);
30102 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
30104 MemOpndSlot = CurOp;
30106 MVT PVT = getPointerTy(MF->getDataLayout());
30107 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
30108 "Invalid Pointer Size!");
30110 // For v = setjmp(buf), we generate
30112 // thisMBB:
30113 // buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
30114 // SjLjSetup restoreMBB
30116 // mainMBB:
30117 // v_main = 0
30119 // sinkMBB:
30120 // v = phi(main, restore)
30122 // restoreMBB:
30123 // if base pointer being used, load it from frame
30124 // v_restore = 1
30126 MachineBasicBlock *thisMBB = MBB;
30127 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
30128 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
30129 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
30130 MF->insert(I, mainMBB);
30131 MF->insert(I, sinkMBB);
30132 MF->push_back(restoreMBB);
30133 restoreMBB->setHasAddressTaken();
30135 MachineInstrBuilder MIB;
30137 // Transfer the remainder of BB and its successor edges to sinkMBB.
30138 sinkMBB->splice(sinkMBB->begin(), MBB,
30139 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
30140 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
30142 // thisMBB:
30143 unsigned PtrStoreOpc = 0;
30144 unsigned LabelReg = 0;
30145 const int64_t LabelOffset = 1 * PVT.getStoreSize();
30146 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
30147 !isPositionIndependent();
30149 // Prepare IP either in reg or imm.
30150 if (!UseImmLabel) {
30151 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
30152 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
30153 LabelReg = MRI.createVirtualRegister(PtrRC);
30154 if (Subtarget.is64Bit()) {
30155 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
30156 .addReg(X86::RIP)
30157 .addImm(0)
30158 .addReg(0)
30159 .addMBB(restoreMBB)
30160 .addReg(0);
30161 } else {
30162 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
30163 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
30164 .addReg(XII->getGlobalBaseReg(MF))
30165 .addImm(0)
30166 .addReg(0)
30167 .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
30168 .addReg(0);
30170 } else
30171 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
30172 // Store IP
30173 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
30174 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30175 if (i == X86::AddrDisp)
30176 MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
30177 else
30178 MIB.add(MI.getOperand(MemOpndSlot + i));
30180 if (!UseImmLabel)
30181 MIB.addReg(LabelReg);
30182 else
30183 MIB.addMBB(restoreMBB);
30184 MIB.setMemRefs(MMOs);
30186 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
30187 emitSetJmpShadowStackFix(MI, thisMBB);
30190 // Setup
30191 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
30192 .addMBB(restoreMBB);
30194 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
30195 MIB.addRegMask(RegInfo->getNoPreservedMask());
30196 thisMBB->addSuccessor(mainMBB);
30197 thisMBB->addSuccessor(restoreMBB);
30199 // mainMBB:
30200 // EAX = 0
30201 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
30202 mainMBB->addSuccessor(sinkMBB);
30204 // sinkMBB:
30205 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
30206 TII->get(X86::PHI), DstReg)
30207 .addReg(mainDstReg).addMBB(mainMBB)
30208 .addReg(restoreDstReg).addMBB(restoreMBB);
30210 // restoreMBB:
30211 if (RegInfo->hasBasePointer(*MF)) {
30212 const bool Uses64BitFramePtr =
30213 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
30214 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
30215 X86FI->setRestoreBasePointer(MF);
30216 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
30217 unsigned BasePtr = RegInfo->getBaseRegister();
30218 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
30219 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
30220 FramePtr, true, X86FI->getRestoreBasePointerOffset())
30221 .setMIFlag(MachineInstr::FrameSetup);
30223 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
30224 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
30225 restoreMBB->addSuccessor(sinkMBB);
30227 MI.eraseFromParent();
30228 return sinkMBB;
30231 /// Fix the shadow stack using the previously saved SSP pointer.
30232 /// \sa emitSetJmpShadowStackFix
30233 /// \param [in] MI The temporary Machine Instruction for the builtin.
30234 /// \param [in] MBB The Machine Basic Block that will be modified.
30235 /// \return The sink MBB that will perform the future indirect branch.
30236 MachineBasicBlock *
30237 X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
30238 MachineBasicBlock *MBB) const {
30239 DebugLoc DL = MI.getDebugLoc();
30240 MachineFunction *MF = MBB->getParent();
30241 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30242 MachineRegisterInfo &MRI = MF->getRegInfo();
30244 // Memory Reference
30245 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30246 MI.memoperands_end());
30248 MVT PVT = getPointerTy(MF->getDataLayout());
30249 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
30251 // checkSspMBB:
30252 // xor vreg1, vreg1
30253 // rdssp vreg1
30254 // test vreg1, vreg1
30255 // je sinkMBB # Jump if Shadow Stack is not supported
30256 // fallMBB:
30257 // mov buf+24/12(%rip), vreg2
30258 // sub vreg1, vreg2
30259 // jbe sinkMBB # No need to fix the Shadow Stack
30260 // fixShadowMBB:
30261 // shr 3/2, vreg2
30262 // incssp vreg2 # fix the SSP according to the lower 8 bits
30263 // shr 8, vreg2
30264 // je sinkMBB
30265 // fixShadowLoopPrepareMBB:
30266 // shl vreg2
30267 // mov 128, vreg3
30268 // fixShadowLoopMBB:
30269 // incssp vreg3
30270 // dec vreg2
30271 // jne fixShadowLoopMBB # Iterate until you finish fixing
30272 // # the Shadow Stack
30273 // sinkMBB:
30275 MachineFunction::iterator I = ++MBB->getIterator();
30276 const BasicBlock *BB = MBB->getBasicBlock();
30278 MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
30279 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
30280 MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
30281 MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
30282 MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
30283 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
30284 MF->insert(I, checkSspMBB);
30285 MF->insert(I, fallMBB);
30286 MF->insert(I, fixShadowMBB);
30287 MF->insert(I, fixShadowLoopPrepareMBB);
30288 MF->insert(I, fixShadowLoopMBB);
30289 MF->insert(I, sinkMBB);
30291 // Transfer the remainder of BB and its successor edges to sinkMBB.
30292 sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
30293 MBB->end());
30294 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
30296 MBB->addSuccessor(checkSspMBB);
30298 // Initialize a register with zero.
30299 unsigned ZReg = MRI.createVirtualRegister(PtrRC);
30300 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
30301 BuildMI(checkSspMBB, DL, TII->get(XorRROpc))
30302 .addDef(ZReg)
30303 .addReg(ZReg, RegState::Undef)
30304 .addReg(ZReg, RegState::Undef);
30306 // Read the current SSP Register value to the zeroed register.
30307 unsigned SSPCopyReg = MRI.createVirtualRegister(PtrRC);
30308 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
30309 BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
30311 // Check whether the result of the SSP register is zero and jump directly
30312 // to the sink.
30313 unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
30314 BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
30315 .addReg(SSPCopyReg)
30316 .addReg(SSPCopyReg);
30317 BuildMI(checkSspMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
30318 checkSspMBB->addSuccessor(sinkMBB);
30319 checkSspMBB->addSuccessor(fallMBB);
30321 // Reload the previously saved SSP register value.
30322 unsigned PrevSSPReg = MRI.createVirtualRegister(PtrRC);
30323 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
30324 const int64_t SPPOffset = 3 * PVT.getStoreSize();
30325 MachineInstrBuilder MIB =
30326 BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
30327 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30328 const MachineOperand &MO = MI.getOperand(i);
30329 if (i == X86::AddrDisp)
30330 MIB.addDisp(MO, SPPOffset);
30331 else if (MO.isReg()) // Don't add the whole operand, we don't want to
30332 // preserve kill flags.
30333 MIB.addReg(MO.getReg());
30334 else
30335 MIB.add(MO);
30337 MIB.setMemRefs(MMOs);
30339 // Subtract the current SSP from the previous SSP.
30340 unsigned SspSubReg = MRI.createVirtualRegister(PtrRC);
30341 unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
30342 BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
30343 .addReg(PrevSSPReg)
30344 .addReg(SSPCopyReg);
30346 // Jump to sink in case PrevSSPReg <= SSPCopyReg.
30347 BuildMI(fallMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_BE);
30348 fallMBB->addSuccessor(sinkMBB);
30349 fallMBB->addSuccessor(fixShadowMBB);
30351 // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
30352 unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
30353 unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
30354 unsigned SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
30355 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
30356 .addReg(SspSubReg)
30357 .addImm(Offset);
30359 // Increase SSP when looking only on the lower 8 bits of the delta.
30360 unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
30361 BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
30363 // Reset the lower 8 bits.
30364 unsigned SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
30365 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
30366 .addReg(SspFirstShrReg)
30367 .addImm(8);
30369 // Jump if the result of the shift is zero.
30370 BuildMI(fixShadowMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
30371 fixShadowMBB->addSuccessor(sinkMBB);
30372 fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
30374 // Do a single shift left.
30375 unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
30376 unsigned SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
30377 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
30378 .addReg(SspSecondShrReg);
30380 // Save the value 128 to a register (will be used next with incssp).
30381 unsigned Value128InReg = MRI.createVirtualRegister(PtrRC);
30382 unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
30383 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
30384 .addImm(128);
30385 fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
30387 // Since incssp only looks at the lower 8 bits, we might need to do several
30388 // iterations of incssp until we finish fixing the shadow stack.
30389 unsigned DecReg = MRI.createVirtualRegister(PtrRC);
30390 unsigned CounterReg = MRI.createVirtualRegister(PtrRC);
30391 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
30392 .addReg(SspAfterShlReg)
30393 .addMBB(fixShadowLoopPrepareMBB)
30394 .addReg(DecReg)
30395 .addMBB(fixShadowLoopMBB);
30397 // Every iteration we increase the SSP by 128.
30398 BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);
30400 // Every iteration we decrement the counter by 1.
30401 unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
30402 BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);
30404 // Jump if the counter is not zero yet.
30405 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JCC_1)).addMBB(fixShadowLoopMBB).addImm(X86::COND_NE);
30406 fixShadowLoopMBB->addSuccessor(sinkMBB);
30407 fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
30409 return sinkMBB;
30412 MachineBasicBlock *
30413 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
30414 MachineBasicBlock *MBB) const {
30415 DebugLoc DL = MI.getDebugLoc();
30416 MachineFunction *MF = MBB->getParent();
30417 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30418 MachineRegisterInfo &MRI = MF->getRegInfo();
30420 // Memory Reference
30421 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30422 MI.memoperands_end());
30424 MVT PVT = getPointerTy(MF->getDataLayout());
30425 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
30426 "Invalid Pointer Size!");
30428 const TargetRegisterClass *RC =
30429 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
30430 unsigned Tmp = MRI.createVirtualRegister(RC);
30431 // Since FP is only updated here but NOT referenced, it's treated as GPR.
30432 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
30433 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
30434 unsigned SP = RegInfo->getStackRegister();
30436 MachineInstrBuilder MIB;
30438 const int64_t LabelOffset = 1 * PVT.getStoreSize();
30439 const int64_t SPOffset = 2 * PVT.getStoreSize();
30441 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
30442 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
30444 MachineBasicBlock *thisMBB = MBB;
30446 // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
30447 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
30448 thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
30451 // Reload FP
30452 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
30453 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30454 const MachineOperand &MO = MI.getOperand(i);
30455 if (MO.isReg()) // Don't add the whole operand, we don't want to
30456 // preserve kill flags.
30457 MIB.addReg(MO.getReg());
30458 else
30459 MIB.add(MO);
30461 MIB.setMemRefs(MMOs);
30463 // Reload IP
30464 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
30465 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30466 const MachineOperand &MO = MI.getOperand(i);
30467 if (i == X86::AddrDisp)
30468 MIB.addDisp(MO, LabelOffset);
30469 else if (MO.isReg()) // Don't add the whole operand, we don't want to
30470 // preserve kill flags.
30471 MIB.addReg(MO.getReg());
30472 else
30473 MIB.add(MO);
30475 MIB.setMemRefs(MMOs);
30477 // Reload SP
30478 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
30479 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30480 if (i == X86::AddrDisp)
30481 MIB.addDisp(MI.getOperand(i), SPOffset);
30482 else
30483 MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
30484 // the last instruction of the expansion.
30486 MIB.setMemRefs(MMOs);
30488 // Jump
30489 BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
30491 MI.eraseFromParent();
30492 return thisMBB;
30495 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
30496 MachineBasicBlock *MBB,
30497 MachineBasicBlock *DispatchBB,
30498 int FI) const {
30499 DebugLoc DL = MI.getDebugLoc();
30500 MachineFunction *MF = MBB->getParent();
30501 MachineRegisterInfo *MRI = &MF->getRegInfo();
30502 const X86InstrInfo *TII = Subtarget.getInstrInfo();
30504 MVT PVT = getPointerTy(MF->getDataLayout());
30505 assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
30507 unsigned Op = 0;
30508 unsigned VR = 0;
30510 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
30511 !isPositionIndependent();
30513 if (UseImmLabel) {
30514 Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
30515 } else {
30516 const TargetRegisterClass *TRC =
30517 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
30518 VR = MRI->createVirtualRegister(TRC);
30519 Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
30521 if (Subtarget.is64Bit())
30522 BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
30523 .addReg(X86::RIP)
30524 .addImm(1)
30525 .addReg(0)
30526 .addMBB(DispatchBB)
30527 .addReg(0);
30528 else
30529 BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
30530 .addReg(0) /* TII->getGlobalBaseReg(MF) */
30531 .addImm(1)
30532 .addReg(0)
30533 .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
30534 .addReg(0);
30537 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
30538 addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
30539 if (UseImmLabel)
30540 MIB.addMBB(DispatchBB);
30541 else
30542 MIB.addReg(VR);
30545 MachineBasicBlock *
30546 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
30547 MachineBasicBlock *BB) const {
30548 DebugLoc DL = MI.getDebugLoc();
30549 MachineFunction *MF = BB->getParent();
30550 MachineRegisterInfo *MRI = &MF->getRegInfo();
30551 const X86InstrInfo *TII = Subtarget.getInstrInfo();
30552 int FI = MF->getFrameInfo().getFunctionContextIndex();
30554 // Get a mapping of the call site numbers to all of the landing pads they're
30555 // associated with.
30556 DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
30557 unsigned MaxCSNum = 0;
30558 for (auto &MBB : *MF) {
30559 if (!MBB.isEHPad())
30560 continue;
30562 MCSymbol *Sym = nullptr;
30563 for (const auto &MI : MBB) {
30564 if (MI.isDebugInstr())
30565 continue;
30567 assert(MI.isEHLabel() && "expected EH_LABEL");
30568 Sym = MI.getOperand(0).getMCSymbol();
30569 break;
30572 if (!MF->hasCallSiteLandingPad(Sym))
30573 continue;
30575 for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
30576 CallSiteNumToLPad[CSI].push_back(&MBB);
30577 MaxCSNum = std::max(MaxCSNum, CSI);
30581 // Get an ordered list of the machine basic blocks for the jump table.
30582 std::vector<MachineBasicBlock *> LPadList;
30583 SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
30584 LPadList.reserve(CallSiteNumToLPad.size());
30586 for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
30587 for (auto &LP : CallSiteNumToLPad[CSI]) {
30588 LPadList.push_back(LP);
30589 InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
30593 assert(!LPadList.empty() &&
30594 "No landing pad destinations for the dispatch jump table!");
30596 // Create the MBBs for the dispatch code.
30598 // Shove the dispatch's address into the return slot in the function context.
30599 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
30600 DispatchBB->setIsEHPad(true);
30602 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
30603 BuildMI(TrapBB, DL, TII->get(X86::TRAP));
30604 DispatchBB->addSuccessor(TrapBB);
30606 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
30607 DispatchBB->addSuccessor(DispContBB);
30609 // Insert MBBs.
30610 MF->push_back(DispatchBB);
30611 MF->push_back(DispContBB);
30612 MF->push_back(TrapBB);
30614 // Insert code into the entry block that creates and registers the function
30615 // context.
30616 SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
30618 // Create the jump table and associated information
30619 unsigned JTE = getJumpTableEncoding();
30620 MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
30621 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
30623 const X86RegisterInfo &RI = TII->getRegisterInfo();
30624 // Add a register mask with no preserved registers. This results in all
30625 // registers being marked as clobbered.
30626 if (RI.hasBasePointer(*MF)) {
30627 const bool FPIs64Bit =
30628 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
30629 X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
30630 MFI->setRestoreBasePointer(MF);
30632 unsigned FP = RI.getFrameRegister(*MF);
30633 unsigned BP = RI.getBaseRegister();
30634 unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
30635 addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
30636 MFI->getRestoreBasePointerOffset())
30637 .addRegMask(RI.getNoPreservedMask());
30638 } else {
30639 BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
30640 .addRegMask(RI.getNoPreservedMask());
30643 // IReg is used as an index in a memory operand and therefore can't be SP
30644 unsigned IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
30645 addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
30646 Subtarget.is64Bit() ? 8 : 4);
30647 BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
30648 .addReg(IReg)
30649 .addImm(LPadList.size());
30650 BuildMI(DispatchBB, DL, TII->get(X86::JCC_1)).addMBB(TrapBB).addImm(X86::COND_AE);
30652 if (Subtarget.is64Bit()) {
30653 unsigned BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
30654 unsigned IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
30656 // leaq .LJTI0_0(%rip), BReg
30657 BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
30658 .addReg(X86::RIP)
30659 .addImm(1)
30660 .addReg(0)
30661 .addJumpTableIndex(MJTI)
30662 .addReg(0);
30663 // movzx IReg64, IReg
30664 BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
30665 .addImm(0)
30666 .addReg(IReg)
30667 .addImm(X86::sub_32bit);
30669 switch (JTE) {
30670 case MachineJumpTableInfo::EK_BlockAddress:
30671 // jmpq *(BReg,IReg64,8)
30672 BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
30673 .addReg(BReg)
30674 .addImm(8)
30675 .addReg(IReg64)
30676 .addImm(0)
30677 .addReg(0);
30678 break;
30679 case MachineJumpTableInfo::EK_LabelDifference32: {
30680 unsigned OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
30681 unsigned OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
30682 unsigned TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
30684 // movl (BReg,IReg64,4), OReg
30685 BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
30686 .addReg(BReg)
30687 .addImm(4)
30688 .addReg(IReg64)
30689 .addImm(0)
30690 .addReg(0);
30691 // movsx OReg64, OReg
30692 BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
30693 // addq BReg, OReg64, TReg
30694 BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
30695 .addReg(OReg64)
30696 .addReg(BReg);
30697 // jmpq *TReg
30698 BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
30699 break;
30701 default:
30702 llvm_unreachable("Unexpected jump table encoding");
30704 } else {
30705 // jmpl *.LJTI0_0(,IReg,4)
30706 BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
30707 .addReg(0)
30708 .addImm(4)
30709 .addReg(IReg)
30710 .addJumpTableIndex(MJTI)
30711 .addReg(0);
30714 // Add the jump table entries as successors to the MBB.
30715 SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
30716 for (auto &LP : LPadList)
30717 if (SeenMBBs.insert(LP).second)
30718 DispContBB->addSuccessor(LP);
30720 // N.B. the order the invoke BBs are processed in doesn't matter here.
30721 SmallVector<MachineBasicBlock *, 64> MBBLPads;
30722 const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
30723 for (MachineBasicBlock *MBB : InvokeBBs) {
30724 // Remove the landing pad successor from the invoke block and replace it
30725 // with the new dispatch block.
30726 // Keep a copy of Successors since it's modified inside the loop.
30727 SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
30728 MBB->succ_rend());
30729 // FIXME: Avoid quadratic complexity.
30730 for (auto MBBS : Successors) {
30731 if (MBBS->isEHPad()) {
30732 MBB->removeSuccessor(MBBS);
30733 MBBLPads.push_back(MBBS);
30737 MBB->addSuccessor(DispatchBB);
30739 // Find the invoke call and mark all of the callee-saved registers as
30740 // 'implicit defined' so that they're spilled. This prevents code from
30741 // moving instructions to before the EH block, where they will never be
30742 // executed.
30743 for (auto &II : reverse(*MBB)) {
30744 if (!II.isCall())
30745 continue;
30747 DenseMap<unsigned, bool> DefRegs;
30748 for (auto &MOp : II.operands())
30749 if (MOp.isReg())
30750 DefRegs[MOp.getReg()] = true;
30752 MachineInstrBuilder MIB(*MF, &II);
30753 for (unsigned RI = 0; SavedRegs[RI]; ++RI) {
30754 unsigned Reg = SavedRegs[RI];
30755 if (!DefRegs[Reg])
30756 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
30759 break;
30763 // Mark all former landing pads as non-landing pads. The dispatch is the only
30764 // landing pad now.
30765 for (auto &LP : MBBLPads)
30766 LP->setIsEHPad(false);
30768 // The instruction is gone now.
30769 MI.eraseFromParent();
30770 return BB;
30773 MachineBasicBlock *
30774 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
30775 MachineBasicBlock *BB) const {
30776 MachineFunction *MF = BB->getParent();
30777 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30778 DebugLoc DL = MI.getDebugLoc();
30780 switch (MI.getOpcode()) {
30781 default: llvm_unreachable("Unexpected instr type to insert");
30782 case X86::TLS_addr32:
30783 case X86::TLS_addr64:
30784 case X86::TLS_base_addr32:
30785 case X86::TLS_base_addr64:
30786 return EmitLoweredTLSAddr(MI, BB);
30787 case X86::RETPOLINE_CALL32:
30788 case X86::RETPOLINE_CALL64:
30789 case X86::RETPOLINE_TCRETURN32:
30790 case X86::RETPOLINE_TCRETURN64:
30791 return EmitLoweredRetpoline(MI, BB);
30792 case X86::CATCHRET:
30793 return EmitLoweredCatchRet(MI, BB);
30794 case X86::CATCHPAD:
30795 return EmitLoweredCatchPad(MI, BB);
30796 case X86::SEG_ALLOCA_32:
30797 case X86::SEG_ALLOCA_64:
30798 return EmitLoweredSegAlloca(MI, BB);
30799 case X86::TLSCall_32:
30800 case X86::TLSCall_64:
30801 return EmitLoweredTLSCall(MI, BB);
30802 case X86::CMOV_FR32:
30803 case X86::CMOV_FR32X:
30804 case X86::CMOV_FR64:
30805 case X86::CMOV_FR64X:
30806 case X86::CMOV_GR8:
30807 case X86::CMOV_GR16:
30808 case X86::CMOV_GR32:
30809 case X86::CMOV_RFP32:
30810 case X86::CMOV_RFP64:
30811 case X86::CMOV_RFP80:
30812 case X86::CMOV_VR128:
30813 case X86::CMOV_VR128X:
30814 case X86::CMOV_VR256:
30815 case X86::CMOV_VR256X:
30816 case X86::CMOV_VR512:
30817 case X86::CMOV_VK2:
30818 case X86::CMOV_VK4:
30819 case X86::CMOV_VK8:
30820 case X86::CMOV_VK16:
30821 case X86::CMOV_VK32:
30822 case X86::CMOV_VK64:
30823 return EmitLoweredSelect(MI, BB);
30825 case X86::RDFLAGS32:
30826 case X86::RDFLAGS64: {
30827 unsigned PushF =
30828 MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
30829 unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
30830 MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
30831 // Permit reads of the EFLAGS and DF registers without them being defined.
30832 // This intrinsic exists to read external processor state in flags, such as
30833 // the trap flag, interrupt flag, and direction flag, none of which are
30834 // modeled by the backend.
30835 assert(Push->getOperand(2).getReg() == X86::EFLAGS &&
30836 "Unexpected register in operand!");
30837 Push->getOperand(2).setIsUndef();
30838 assert(Push->getOperand(3).getReg() == X86::DF &&
30839 "Unexpected register in operand!");
30840 Push->getOperand(3).setIsUndef();
30841 BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
30843 MI.eraseFromParent(); // The pseudo is gone now.
30844 return BB;
30847 case X86::WRFLAGS32:
30848 case X86::WRFLAGS64: {
30849 unsigned Push =
30850 MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
30851 unsigned PopF =
30852 MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
30853 BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
30854 BuildMI(*BB, MI, DL, TII->get(PopF));
30856 MI.eraseFromParent(); // The pseudo is gone now.
30857 return BB;
30860 case X86::FP32_TO_INT16_IN_MEM:
30861 case X86::FP32_TO_INT32_IN_MEM:
30862 case X86::FP32_TO_INT64_IN_MEM:
30863 case X86::FP64_TO_INT16_IN_MEM:
30864 case X86::FP64_TO_INT32_IN_MEM:
30865 case X86::FP64_TO_INT64_IN_MEM:
30866 case X86::FP80_TO_INT16_IN_MEM:
30867 case X86::FP80_TO_INT32_IN_MEM:
30868 case X86::FP80_TO_INT64_IN_MEM: {
30869 // Change the floating point control register to use "round towards zero"
30870 // mode when truncating to an integer value.
30871 int OrigCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
30872 addFrameReference(BuildMI(*BB, MI, DL,
30873 TII->get(X86::FNSTCW16m)), OrigCWFrameIdx);
30875 // Load the old value of the control word...
30876 unsigned OldCW =
30877 MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
30878 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
30879 OrigCWFrameIdx);
30881 // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
30882 unsigned NewCW =
30883 MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
30884 BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
30885 .addReg(OldCW, RegState::Kill).addImm(0xC00);
30887 // Extract to 16 bits.
30888 unsigned NewCW16 =
30889 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
30890 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
30891 .addReg(NewCW, RegState::Kill, X86::sub_16bit);
30893 // Prepare memory for FLDCW.
30894 int NewCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
30895 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
30896 NewCWFrameIdx)
30897 .addReg(NewCW16, RegState::Kill);
30899 // Reload the modified control word now...
30900 addFrameReference(BuildMI(*BB, MI, DL,
30901 TII->get(X86::FLDCW16m)), NewCWFrameIdx);
30903 // Get the X86 opcode to use.
30904 unsigned Opc;
30905 switch (MI.getOpcode()) {
30906 default: llvm_unreachable("illegal opcode!");
30907 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
30908 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
30909 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
30910 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
30911 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
30912 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
30913 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
30914 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
30915 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
30918 X86AddressMode AM = getAddressFromInstr(&MI, 0);
30919 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
30920 .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
30922 // Reload the original control word now.
30923 addFrameReference(BuildMI(*BB, MI, DL,
30924 TII->get(X86::FLDCW16m)), OrigCWFrameIdx);
30926 MI.eraseFromParent(); // The pseudo instruction is gone now.
30927 return BB;
30930 // xbegin
30931 case X86::XBEGIN:
30932 return emitXBegin(MI, BB, Subtarget.getInstrInfo());
30934 case X86::VASTART_SAVE_XMM_REGS:
30935 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
30937 case X86::VAARG_64:
30938 return EmitVAARG64WithCustomInserter(MI, BB);
30940 case X86::EH_SjLj_SetJmp32:
30941 case X86::EH_SjLj_SetJmp64:
30942 return emitEHSjLjSetJmp(MI, BB);
30944 case X86::EH_SjLj_LongJmp32:
30945 case X86::EH_SjLj_LongJmp64:
30946 return emitEHSjLjLongJmp(MI, BB);
30948 case X86::Int_eh_sjlj_setup_dispatch:
30949 return EmitSjLjDispatchBlock(MI, BB);
30951 case TargetOpcode::STATEPOINT:
30952 // As an implementation detail, STATEPOINT shares the STACKMAP format at
30953 // this point in the process. We diverge later.
30954 return emitPatchPoint(MI, BB);
30956 case TargetOpcode::STACKMAP:
30957 case TargetOpcode::PATCHPOINT:
30958 return emitPatchPoint(MI, BB);
30960 case TargetOpcode::PATCHABLE_EVENT_CALL:
30961 return emitXRayCustomEvent(MI, BB);
30963 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
30964 return emitXRayTypedEvent(MI, BB);
30966 case X86::LCMPXCHG8B: {
30967 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
30968 // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
30969 // requires a memory operand. If it happens that current architecture is
30970 // i686 and for current function we need a base pointer
30971 // - which is ESI for i686 - register allocator would not be able to
30972 // allocate registers for an address in form of X(%reg, %reg, Y)
30973 // - there never would be enough unreserved registers during regalloc
30974 // (without the need for base ptr the only option would be X(%edi, %esi, Y).
30975 // We are giving a hand to register allocator by precomputing the address in
30976 // a new vreg using LEA.
30978 // If it is not i686 or there is no base pointer - nothing to do here.
30979 if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
30980 return BB;
30982 // Even though this code does not necessarily needs the base pointer to
30983 // be ESI, we check for that. The reason: if this assert fails, there are
30984 // some changes happened in the compiler base pointer handling, which most
30985 // probably have to be addressed somehow here.
30986 assert(TRI->getBaseRegister() == X86::ESI &&
30987 "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
30988 "base pointer in mind");
30990 MachineRegisterInfo &MRI = MF->getRegInfo();
30991 MVT SPTy = getPointerTy(MF->getDataLayout());
30992 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
30993 unsigned computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
30995 X86AddressMode AM = getAddressFromInstr(&MI, 0);
30996 // Regalloc does not need any help when the memory operand of CMPXCHG8B
30997 // does not use index register.
30998 if (AM.IndexReg == X86::NoRegister)
30999 return BB;
31001 // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
31002 // four operand definitions that are E[ABCD] registers. We skip them and
31003 // then insert the LEA.
31004 MachineBasicBlock::iterator MBBI(MI);
31005 while (MBBI->definesRegister(X86::EAX) || MBBI->definesRegister(X86::EBX) ||
31006 MBBI->definesRegister(X86::ECX) || MBBI->definesRegister(X86::EDX))
31007 --MBBI;
31008 addFullAddress(
31009 BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
31011 setDirectAddressInInstr(&MI, 0, computedAddrVReg);
31013 return BB;
31015 case X86::LCMPXCHG16B:
31016 return BB;
31017 case X86::LCMPXCHG8B_SAVE_EBX:
31018 case X86::LCMPXCHG16B_SAVE_RBX: {
31019 unsigned BasePtr =
31020 MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
31021 if (!BB->isLiveIn(BasePtr))
31022 BB->addLiveIn(BasePtr);
31023 return BB;
31028 //===----------------------------------------------------------------------===//
31029 // X86 Optimization Hooks
31030 //===----------------------------------------------------------------------===//
31032 bool
31033 X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
31034 const APInt &Demanded,
31035 TargetLoweringOpt &TLO) const {
31036 // Only optimize Ands to prevent shrinking a constant that could be
31037 // matched by movzx.
31038 if (Op.getOpcode() != ISD::AND)
31039 return false;
31041 EVT VT = Op.getValueType();
31043 // Ignore vectors.
31044 if (VT.isVector())
31045 return false;
31047 unsigned Size = VT.getSizeInBits();
31049 // Make sure the RHS really is a constant.
31050 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
31051 if (!C)
31052 return false;
31054 const APInt &Mask = C->getAPIntValue();
31056 // Clear all non-demanded bits initially.
31057 APInt ShrunkMask = Mask & Demanded;
31059 // Find the width of the shrunk mask.
31060 unsigned Width = ShrunkMask.getActiveBits();
31062 // If the mask is all 0s there's nothing to do here.
31063 if (Width == 0)
31064 return false;
31066 // Find the next power of 2 width, rounding up to a byte.
31067 Width = PowerOf2Ceil(std::max(Width, 8U));
31068 // Truncate the width to size to handle illegal types.
31069 Width = std::min(Width, Size);
31071 // Calculate a possible zero extend mask for this constant.
31072 APInt ZeroExtendMask = APInt::getLowBitsSet(Size, Width);
31074 // If we aren't changing the mask, just return true to keep it and prevent
31075 // the caller from optimizing.
31076 if (ZeroExtendMask == Mask)
31077 return true;
31079 // Make sure the new mask can be represented by a combination of mask bits
31080 // and non-demanded bits.
31081 if (!ZeroExtendMask.isSubsetOf(Mask | ~Demanded))
31082 return false;
31084 // Replace the constant with the zero extend mask.
31085 SDLoc DL(Op);
31086 SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
31087 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
31088 return TLO.CombineTo(Op, NewOp);
31091 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
31092 KnownBits &Known,
31093 const APInt &DemandedElts,
31094 const SelectionDAG &DAG,
31095 unsigned Depth) const {
31096 unsigned BitWidth = Known.getBitWidth();
31097 unsigned Opc = Op.getOpcode();
31098 EVT VT = Op.getValueType();
31099 assert((Opc >= ISD::BUILTIN_OP_END ||
31100 Opc == ISD::INTRINSIC_WO_CHAIN ||
31101 Opc == ISD::INTRINSIC_W_CHAIN ||
31102 Opc == ISD::INTRINSIC_VOID) &&
31103 "Should use MaskedValueIsZero if you don't know whether Op"
31104 " is a target node!");
31106 Known.resetAll();
31107 switch (Opc) {
31108 default: break;
31109 case X86ISD::SETCC:
31110 Known.Zero.setBitsFrom(1);
31111 break;
31112 case X86ISD::MOVMSK: {
31113 unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
31114 Known.Zero.setBitsFrom(NumLoBits);
31115 break;
31117 case X86ISD::PEXTRB:
31118 case X86ISD::PEXTRW: {
31119 SDValue Src = Op.getOperand(0);
31120 EVT SrcVT = Src.getValueType();
31121 APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
31122 Op.getConstantOperandVal(1));
31123 Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
31124 Known = Known.zextOrTrunc(BitWidth, false);
31125 Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
31126 break;
31128 case X86ISD::VSRAI:
31129 case X86ISD::VSHLI:
31130 case X86ISD::VSRLI: {
31131 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
31132 if (ShiftImm->getAPIntValue().uge(VT.getScalarSizeInBits())) {
31133 Known.setAllZero();
31134 break;
31137 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
31138 unsigned ShAmt = ShiftImm->getZExtValue();
31139 if (Opc == X86ISD::VSHLI) {
31140 Known.Zero <<= ShAmt;
31141 Known.One <<= ShAmt;
31142 // Low bits are known zero.
31143 Known.Zero.setLowBits(ShAmt);
31144 } else if (Opc == X86ISD::VSRLI) {
31145 Known.Zero.lshrInPlace(ShAmt);
31146 Known.One.lshrInPlace(ShAmt);
31147 // High bits are known zero.
31148 Known.Zero.setHighBits(ShAmt);
31149 } else {
31150 Known.Zero.ashrInPlace(ShAmt);
31151 Known.One.ashrInPlace(ShAmt);
31154 break;
31156 case X86ISD::PACKUS: {
31157 // PACKUS is just a truncation if the upper half is zero.
31158 APInt DemandedLHS, DemandedRHS;
31159 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
31161 Known.One = APInt::getAllOnesValue(BitWidth * 2);
31162 Known.Zero = APInt::getAllOnesValue(BitWidth * 2);
31164 KnownBits Known2;
31165 if (!!DemandedLHS) {
31166 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
31167 Known.One &= Known2.One;
31168 Known.Zero &= Known2.Zero;
31170 if (!!DemandedRHS) {
31171 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
31172 Known.One &= Known2.One;
31173 Known.Zero &= Known2.Zero;
31176 if (Known.countMinLeadingZeros() < BitWidth)
31177 Known.resetAll();
31178 Known = Known.trunc(BitWidth);
31179 break;
31181 case X86ISD::ANDNP: {
31182 KnownBits Known2;
31183 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
31184 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
31186 // ANDNP = (~X & Y);
31187 Known.One &= Known2.Zero;
31188 Known.Zero |= Known2.One;
31189 break;
31191 case X86ISD::FOR: {
31192 KnownBits Known2;
31193 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
31194 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
31196 // Output known-0 bits are only known if clear in both the LHS & RHS.
31197 Known.Zero &= Known2.Zero;
31198 // Output known-1 are known to be set if set in either the LHS | RHS.
31199 Known.One |= Known2.One;
31200 break;
31202 case X86ISD::CMOV: {
31203 Known = DAG.computeKnownBits(Op.getOperand(1), Depth+1);
31204 // If we don't know any bits, early out.
31205 if (Known.isUnknown())
31206 break;
31207 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth+1);
31209 // Only known if known in both the LHS and RHS.
31210 Known.One &= Known2.One;
31211 Known.Zero &= Known2.Zero;
31212 break;
31216 // Handle target shuffles.
31217 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
31218 if (isTargetShuffle(Opc)) {
31219 bool IsUnary;
31220 SmallVector<int, 64> Mask;
31221 SmallVector<SDValue, 2> Ops;
31222 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
31223 IsUnary)) {
31224 unsigned NumOps = Ops.size();
31225 unsigned NumElts = VT.getVectorNumElements();
31226 if (Mask.size() == NumElts) {
31227 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
31228 Known.Zero.setAllBits(); Known.One.setAllBits();
31229 for (unsigned i = 0; i != NumElts; ++i) {
31230 if (!DemandedElts[i])
31231 continue;
31232 int M = Mask[i];
31233 if (M == SM_SentinelUndef) {
31234 // For UNDEF elements, we don't know anything about the common state
31235 // of the shuffle result.
31236 Known.resetAll();
31237 break;
31238 } else if (M == SM_SentinelZero) {
31239 Known.One.clearAllBits();
31240 continue;
31242 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
31243 "Shuffle index out of range");
31245 unsigned OpIdx = (unsigned)M / NumElts;
31246 unsigned EltIdx = (unsigned)M % NumElts;
31247 if (Ops[OpIdx].getValueType() != VT) {
31248 // TODO - handle target shuffle ops with different value types.
31249 Known.resetAll();
31250 break;
31252 DemandedOps[OpIdx].setBit(EltIdx);
31254 // Known bits are the values that are shared by every demanded element.
31255 for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
31256 if (!DemandedOps[i])
31257 continue;
31258 KnownBits Known2 =
31259 DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
31260 Known.One &= Known2.One;
31261 Known.Zero &= Known2.Zero;
31268 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
31269 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
31270 unsigned Depth) const {
31271 unsigned VTBits = Op.getScalarValueSizeInBits();
31272 unsigned Opcode = Op.getOpcode();
31273 switch (Opcode) {
31274 case X86ISD::SETCC_CARRY:
31275 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
31276 return VTBits;
31278 case X86ISD::VTRUNC: {
31279 // TODO: Add DemandedElts support.
31280 SDValue Src = Op.getOperand(0);
31281 unsigned NumSrcBits = Src.getScalarValueSizeInBits();
31282 assert(VTBits < NumSrcBits && "Illegal truncation input type");
31283 unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1);
31284 if (Tmp > (NumSrcBits - VTBits))
31285 return Tmp - (NumSrcBits - VTBits);
31286 return 1;
31289 case X86ISD::PACKSS: {
31290 // PACKSS is just a truncation if the sign bits extend to the packed size.
31291 APInt DemandedLHS, DemandedRHS;
31292 getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
31293 DemandedRHS);
31295 unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
31296 unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
31297 if (!!DemandedLHS)
31298 Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
31299 if (!!DemandedRHS)
31300 Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
31301 unsigned Tmp = std::min(Tmp0, Tmp1);
31302 if (Tmp > (SrcBits - VTBits))
31303 return Tmp - (SrcBits - VTBits);
31304 return 1;
31307 case X86ISD::VSHLI: {
31308 SDValue Src = Op.getOperand(0);
31309 const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
31310 if (ShiftVal.uge(VTBits))
31311 return VTBits; // Shifted all bits out --> zero.
31312 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
31313 if (ShiftVal.uge(Tmp))
31314 return 1; // Shifted all sign bits out --> unknown.
31315 return Tmp - ShiftVal.getZExtValue();
31318 case X86ISD::VSRAI: {
31319 SDValue Src = Op.getOperand(0);
31320 APInt ShiftVal = Op.getConstantOperandAPInt(1);
31321 if (ShiftVal.uge(VTBits - 1))
31322 return VTBits; // Sign splat.
31323 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
31324 ShiftVal += Tmp;
31325 return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
31328 case X86ISD::PCMPGT:
31329 case X86ISD::PCMPEQ:
31330 case X86ISD::CMPP:
31331 case X86ISD::VPCOM:
31332 case X86ISD::VPCOMU:
31333 // Vector compares return zero/all-bits result values.
31334 return VTBits;
31336 case X86ISD::ANDNP: {
31337 unsigned Tmp0 =
31338 DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
31339 if (Tmp0 == 1) return 1; // Early out.
31340 unsigned Tmp1 =
31341 DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
31342 return std::min(Tmp0, Tmp1);
31345 case X86ISD::CMOV: {
31346 unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
31347 if (Tmp0 == 1) return 1; // Early out.
31348 unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
31349 return std::min(Tmp0, Tmp1);
31353 // Fallback case.
31354 return 1;
31357 SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
31358 if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
31359 return N->getOperand(0);
31360 return N;
31363 // Attempt to match a combined shuffle mask against supported unary shuffle
31364 // instructions.
31365 // TODO: Investigate sharing more of this with shuffle lowering.
31366 static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
31367 bool AllowFloatDomain, bool AllowIntDomain,
31368 SDValue &V1, const SDLoc &DL, SelectionDAG &DAG,
31369 const X86Subtarget &Subtarget, unsigned &Shuffle,
31370 MVT &SrcVT, MVT &DstVT) {
31371 unsigned NumMaskElts = Mask.size();
31372 unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
31374 // Match against a VZEXT_MOVL vXi32 zero-extending instruction.
31375 if (MaskEltSize == 32 && isUndefOrEqual(Mask[0], 0) &&
31376 isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) {
31377 Shuffle = X86ISD::VZEXT_MOVL;
31378 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
31379 return true;
31382 // Match against a ANY/ZERO_EXTEND_VECTOR_INREG instruction.
31383 // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
31384 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
31385 (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
31386 unsigned MaxScale = 64 / MaskEltSize;
31387 for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
31388 bool MatchAny = true;
31389 bool MatchZero = true;
31390 unsigned NumDstElts = NumMaskElts / Scale;
31391 for (unsigned i = 0; i != NumDstElts && (MatchAny || MatchZero); ++i) {
31392 if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
31393 MatchAny = MatchZero = false;
31394 break;
31396 MatchAny &= isUndefInRange(Mask, (i * Scale) + 1, Scale - 1);
31397 MatchZero &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
31399 if (MatchAny || MatchZero) {
31400 assert(MatchZero && "Failed to match zext but matched aext?");
31401 unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
31402 MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
31403 MVT::getIntegerVT(MaskEltSize);
31404 SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
31406 if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits())
31407 V1 = extractSubVector(V1, 0, DAG, DL, SrcSize);
31409 Shuffle = unsigned(MatchAny ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND);
31410 if (SrcVT.getVectorNumElements() != NumDstElts)
31411 Shuffle = getOpcode_EXTEND_VECTOR_INREG(Shuffle);
31413 DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
31414 DstVT = MVT::getVectorVT(DstVT, NumDstElts);
31415 return true;
31420 // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
31421 if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) &&
31422 isUndefOrEqual(Mask[0], 0) &&
31423 isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
31424 Shuffle = X86ISD::VZEXT_MOVL;
31425 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
31426 return true;
31429 // Check if we have SSE3 which will let us use MOVDDUP etc. The
31430 // instructions are no slower than UNPCKLPD but has the option to
31431 // fold the input operand into even an unaligned memory load.
31432 if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
31433 if (isTargetShuffleEquivalent(Mask, {0, 0})) {
31434 Shuffle = X86ISD::MOVDDUP;
31435 SrcVT = DstVT = MVT::v2f64;
31436 return true;
31438 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
31439 Shuffle = X86ISD::MOVSLDUP;
31440 SrcVT = DstVT = MVT::v4f32;
31441 return true;
31443 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
31444 Shuffle = X86ISD::MOVSHDUP;
31445 SrcVT = DstVT = MVT::v4f32;
31446 return true;
31450 if (MaskVT.is256BitVector() && AllowFloatDomain) {
31451 assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
31452 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
31453 Shuffle = X86ISD::MOVDDUP;
31454 SrcVT = DstVT = MVT::v4f64;
31455 return true;
31457 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
31458 Shuffle = X86ISD::MOVSLDUP;
31459 SrcVT = DstVT = MVT::v8f32;
31460 return true;
31462 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
31463 Shuffle = X86ISD::MOVSHDUP;
31464 SrcVT = DstVT = MVT::v8f32;
31465 return true;
31469 if (MaskVT.is512BitVector() && AllowFloatDomain) {
31470 assert(Subtarget.hasAVX512() &&
31471 "AVX512 required for 512-bit vector shuffles");
31472 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
31473 Shuffle = X86ISD::MOVDDUP;
31474 SrcVT = DstVT = MVT::v8f64;
31475 return true;
31477 if (isTargetShuffleEquivalent(
31478 Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
31479 Shuffle = X86ISD::MOVSLDUP;
31480 SrcVT = DstVT = MVT::v16f32;
31481 return true;
31483 if (isTargetShuffleEquivalent(
31484 Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
31485 Shuffle = X86ISD::MOVSHDUP;
31486 SrcVT = DstVT = MVT::v16f32;
31487 return true;
31491 return false;
31494 // Attempt to match a combined shuffle mask against supported unary immediate
31495 // permute instructions.
31496 // TODO: Investigate sharing more of this with shuffle lowering.
31497 static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
31498 const APInt &Zeroable,
31499 bool AllowFloatDomain, bool AllowIntDomain,
31500 const X86Subtarget &Subtarget,
31501 unsigned &Shuffle, MVT &ShuffleVT,
31502 unsigned &PermuteImm) {
31503 unsigned NumMaskElts = Mask.size();
31504 unsigned InputSizeInBits = MaskVT.getSizeInBits();
31505 unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
31506 MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
31508 bool ContainsZeros =
31509 llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
31511 // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
31512 if (!ContainsZeros && MaskScalarSizeInBits == 64) {
31513 // Check for lane crossing permutes.
31514 if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
31515 // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
31516 if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
31517 Shuffle = X86ISD::VPERMI;
31518 ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
31519 PermuteImm = getV4X86ShuffleImm(Mask);
31520 return true;
31522 if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
31523 SmallVector<int, 4> RepeatedMask;
31524 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
31525 Shuffle = X86ISD::VPERMI;
31526 ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
31527 PermuteImm = getV4X86ShuffleImm(RepeatedMask);
31528 return true;
31531 } else if (AllowFloatDomain && Subtarget.hasAVX()) {
31532 // VPERMILPD can permute with a non-repeating shuffle.
31533 Shuffle = X86ISD::VPERMILPI;
31534 ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
31535 PermuteImm = 0;
31536 for (int i = 0, e = Mask.size(); i != e; ++i) {
31537 int M = Mask[i];
31538 if (M == SM_SentinelUndef)
31539 continue;
31540 assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
31541 PermuteImm |= (M & 1) << i;
31543 return true;
31547 // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
31548 // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
31549 // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
31550 if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
31551 !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
31552 SmallVector<int, 4> RepeatedMask;
31553 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
31554 // Narrow the repeated mask to create 32-bit element permutes.
31555 SmallVector<int, 4> WordMask = RepeatedMask;
31556 if (MaskScalarSizeInBits == 64)
31557 scaleShuffleMask<int>(2, RepeatedMask, WordMask);
31559 Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
31560 ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
31561 ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
31562 PermuteImm = getV4X86ShuffleImm(WordMask);
31563 return true;
31567 // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
31568 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) {
31569 SmallVector<int, 4> RepeatedMask;
31570 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
31571 ArrayRef<int> LoMask(Mask.data() + 0, 4);
31572 ArrayRef<int> HiMask(Mask.data() + 4, 4);
31574 // PSHUFLW: permute lower 4 elements only.
31575 if (isUndefOrInRange(LoMask, 0, 4) &&
31576 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
31577 Shuffle = X86ISD::PSHUFLW;
31578 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
31579 PermuteImm = getV4X86ShuffleImm(LoMask);
31580 return true;
31583 // PSHUFHW: permute upper 4 elements only.
31584 if (isUndefOrInRange(HiMask, 4, 8) &&
31585 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
31586 // Offset the HiMask so that we can create the shuffle immediate.
31587 int OffsetHiMask[4];
31588 for (int i = 0; i != 4; ++i)
31589 OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
31591 Shuffle = X86ISD::PSHUFHW;
31592 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
31593 PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
31594 return true;
31599 // Attempt to match against byte/bit shifts.
31600 // FIXME: Add 512-bit support.
31601 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
31602 (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
31603 int ShiftAmt = matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits,
31604 Mask, 0, Zeroable, Subtarget);
31605 if (0 < ShiftAmt) {
31606 PermuteImm = (unsigned)ShiftAmt;
31607 return true;
31611 return false;
31614 // Attempt to match a combined unary shuffle mask against supported binary
31615 // shuffle instructions.
31616 // TODO: Investigate sharing more of this with shuffle lowering.
31617 static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
31618 bool AllowFloatDomain, bool AllowIntDomain,
31619 SDValue &V1, SDValue &V2, const SDLoc &DL,
31620 SelectionDAG &DAG, const X86Subtarget &Subtarget,
31621 unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
31622 bool IsUnary) {
31623 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
31625 if (MaskVT.is128BitVector()) {
31626 if (isTargetShuffleEquivalent(Mask, {0, 0}) && AllowFloatDomain) {
31627 V2 = V1;
31628 V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
31629 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
31630 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
31631 return true;
31633 if (isTargetShuffleEquivalent(Mask, {1, 1}) && AllowFloatDomain) {
31634 V2 = V1;
31635 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
31636 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
31637 return true;
31639 if (isTargetShuffleEquivalent(Mask, {0, 3}) && Subtarget.hasSSE2() &&
31640 (AllowFloatDomain || !Subtarget.hasSSE41())) {
31641 std::swap(V1, V2);
31642 Shuffle = X86ISD::MOVSD;
31643 SrcVT = DstVT = MVT::v2f64;
31644 return true;
31646 if (isTargetShuffleEquivalent(Mask, {4, 1, 2, 3}) &&
31647 (AllowFloatDomain || !Subtarget.hasSSE41())) {
31648 Shuffle = X86ISD::MOVSS;
31649 SrcVT = DstVT = MVT::v4f32;
31650 return true;
31654 // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
31655 if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
31656 ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
31657 ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
31658 if (matchVectorShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
31659 Subtarget)) {
31660 DstVT = MaskVT;
31661 return true;
31665 // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
31666 if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
31667 (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
31668 (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
31669 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
31670 (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
31671 if (matchVectorShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL,
31672 DAG, Subtarget)) {
31673 SrcVT = DstVT = MaskVT;
31674 if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
31675 SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
31676 return true;
31680 return false;
31683 static bool matchBinaryPermuteShuffle(
31684 MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
31685 bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
31686 const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
31687 unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
31688 unsigned NumMaskElts = Mask.size();
31689 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
31691 // Attempt to match against PALIGNR byte rotate.
31692 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
31693 (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
31694 int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
31695 if (0 < ByteRotation) {
31696 Shuffle = X86ISD::PALIGNR;
31697 ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
31698 PermuteImm = ByteRotation;
31699 return true;
31703 // Attempt to combine to X86ISD::BLENDI.
31704 if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
31705 (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
31706 (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
31707 uint64_t BlendMask = 0;
31708 bool ForceV1Zero = false, ForceV2Zero = false;
31709 SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
31710 if (matchVectorShuffleAsBlend(V1, V2, TargetMask, ForceV1Zero, ForceV2Zero,
31711 BlendMask)) {
31712 if (MaskVT == MVT::v16i16) {
31713 // We can only use v16i16 PBLENDW if the lanes are repeated.
31714 SmallVector<int, 8> RepeatedMask;
31715 if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
31716 RepeatedMask)) {
31717 assert(RepeatedMask.size() == 8 &&
31718 "Repeated mask size doesn't match!");
31719 PermuteImm = 0;
31720 for (int i = 0; i < 8; ++i)
31721 if (RepeatedMask[i] >= 8)
31722 PermuteImm |= 1 << i;
31723 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
31724 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
31725 Shuffle = X86ISD::BLENDI;
31726 ShuffleVT = MaskVT;
31727 return true;
31729 } else {
31730 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
31731 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
31732 PermuteImm = (unsigned)BlendMask;
31733 Shuffle = X86ISD::BLENDI;
31734 ShuffleVT = MaskVT;
31735 return true;
31740 // Attempt to combine to INSERTPS.
31741 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
31742 MaskVT.is128BitVector()) {
31743 if (Zeroable.getBoolValue() &&
31744 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
31745 Shuffle = X86ISD::INSERTPS;
31746 ShuffleVT = MVT::v4f32;
31747 return true;
31751 // Attempt to combine to SHUFPD.
31752 if (AllowFloatDomain && EltSizeInBits == 64 &&
31753 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
31754 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
31755 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
31756 if (matchShuffleWithSHUFPD(MaskVT, V1, V2, PermuteImm, Mask)) {
31757 Shuffle = X86ISD::SHUFP;
31758 ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
31759 return true;
31763 // Attempt to combine to SHUFPS.
31764 if (AllowFloatDomain && EltSizeInBits == 32 &&
31765 ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
31766 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
31767 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
31768 SmallVector<int, 4> RepeatedMask;
31769 if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
31770 // Match each half of the repeated mask, to determine if its just
31771 // referencing one of the vectors, is zeroable or entirely undef.
31772 auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
31773 int M0 = RepeatedMask[Offset];
31774 int M1 = RepeatedMask[Offset + 1];
31776 if (isUndefInRange(RepeatedMask, Offset, 2)) {
31777 return DAG.getUNDEF(MaskVT);
31778 } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
31779 S0 = (SM_SentinelUndef == M0 ? -1 : 0);
31780 S1 = (SM_SentinelUndef == M1 ? -1 : 1);
31781 return getZeroVector(MaskVT, Subtarget, DAG, DL);
31782 } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
31783 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
31784 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
31785 return V1;
31786 } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
31787 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
31788 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
31789 return V2;
31792 return SDValue();
31795 int ShufMask[4] = {-1, -1, -1, -1};
31796 SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
31797 SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
31799 if (Lo && Hi) {
31800 V1 = Lo;
31801 V2 = Hi;
31802 Shuffle = X86ISD::SHUFP;
31803 ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
31804 PermuteImm = getV4X86ShuffleImm(ShufMask);
31805 return true;
31810 return false;
31813 static SDValue combineX86ShuffleChainWithExtract(
31814 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
31815 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
31816 const X86Subtarget &Subtarget);
31818 /// Combine an arbitrary chain of shuffles into a single instruction if
31819 /// possible.
31821 /// This is the leaf of the recursive combine below. When we have found some
31822 /// chain of single-use x86 shuffle instructions and accumulated the combined
31823 /// shuffle mask represented by them, this will try to pattern match that mask
31824 /// into either a single instruction if there is a special purpose instruction
31825 /// for this operation, or into a PSHUFB instruction which is a fully general
31826 /// instruction but should only be used to replace chains over a certain depth.
31827 static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
31828 ArrayRef<int> BaseMask, int Depth,
31829 bool HasVariableMask,
31830 bool AllowVariableMask, SelectionDAG &DAG,
31831 const X86Subtarget &Subtarget) {
31832 assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
31833 assert((Inputs.size() == 1 || Inputs.size() == 2) &&
31834 "Unexpected number of shuffle inputs!");
31836 // Find the inputs that enter the chain. Note that multiple uses are OK
31837 // here, we're not going to remove the operands we find.
31838 bool UnaryShuffle = (Inputs.size() == 1);
31839 SDValue V1 = peekThroughBitcasts(Inputs[0]);
31840 SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
31841 : peekThroughBitcasts(Inputs[1]));
31843 MVT VT1 = V1.getSimpleValueType();
31844 MVT VT2 = V2.getSimpleValueType();
31845 MVT RootVT = Root.getSimpleValueType();
31846 assert(VT1.getSizeInBits() == RootVT.getSizeInBits() &&
31847 VT2.getSizeInBits() == RootVT.getSizeInBits() &&
31848 "Vector size mismatch");
31850 SDLoc DL(Root);
31851 SDValue Res;
31853 unsigned NumBaseMaskElts = BaseMask.size();
31854 if (NumBaseMaskElts == 1) {
31855 assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
31856 return DAG.getBitcast(RootVT, V1);
31859 unsigned RootSizeInBits = RootVT.getSizeInBits();
31860 unsigned NumRootElts = RootVT.getVectorNumElements();
31861 unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
31862 bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
31863 (RootVT.isFloatingPoint() && Depth >= 2) ||
31864 (RootVT.is256BitVector() && !Subtarget.hasAVX2());
31866 // Don't combine if we are a AVX512/EVEX target and the mask element size
31867 // is different from the root element size - this would prevent writemasks
31868 // from being reused.
31869 // TODO - this currently prevents all lane shuffles from occurring.
31870 // TODO - check for writemasks usage instead of always preventing combining.
31871 // TODO - attempt to narrow Mask back to writemask size.
31872 bool IsEVEXShuffle =
31873 RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128);
31875 // Attempt to match a subvector broadcast.
31876 // shuffle(insert_subvector(undef, sub, 0), undef, 0, 0, 0, 0)
31877 if (UnaryShuffle &&
31878 (BaseMaskEltSizeInBits == 128 || BaseMaskEltSizeInBits == 256)) {
31879 SmallVector<int, 64> BroadcastMask(NumBaseMaskElts, 0);
31880 if (isTargetShuffleEquivalent(BaseMask, BroadcastMask)) {
31881 SDValue Src = Inputs[0];
31882 if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
31883 Src.getOperand(0).isUndef() &&
31884 Src.getOperand(1).getValueSizeInBits() == BaseMaskEltSizeInBits &&
31885 MayFoldLoad(Src.getOperand(1)) && isNullConstant(Src.getOperand(2))) {
31886 return DAG.getBitcast(RootVT, DAG.getNode(X86ISD::SUBV_BROADCAST, DL,
31887 Src.getValueType(),
31888 Src.getOperand(1)));
31893 // TODO - handle 128/256-bit lane shuffles of 512-bit vectors.
31895 // Handle 128-bit lane shuffles of 256-bit vectors.
31896 // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
31897 // we need to use the zeroing feature.
31898 // TODO - this should support binary shuffles.
31899 if (UnaryShuffle && RootVT.is256BitVector() && NumBaseMaskElts == 2 &&
31900 !(Subtarget.hasAVX2() && BaseMask[0] >= -1 && BaseMask[1] >= -1) &&
31901 !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) {
31902 if (Depth == 1 && Root.getOpcode() == X86ISD::VPERM2X128)
31903 return SDValue(); // Nothing to do!
31904 MVT ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
31905 unsigned PermMask = 0;
31906 PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
31907 PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
31909 Res = DAG.getBitcast(ShuffleVT, V1);
31910 Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
31911 DAG.getUNDEF(ShuffleVT),
31912 DAG.getConstant(PermMask, DL, MVT::i8));
31913 return DAG.getBitcast(RootVT, Res);
31916 // For masks that have been widened to 128-bit elements or more,
31917 // narrow back down to 64-bit elements.
31918 SmallVector<int, 64> Mask;
31919 if (BaseMaskEltSizeInBits > 64) {
31920 assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
31921 int MaskScale = BaseMaskEltSizeInBits / 64;
31922 scaleShuffleMask<int>(MaskScale, BaseMask, Mask);
31923 } else {
31924 Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
31927 unsigned NumMaskElts = Mask.size();
31928 unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
31930 // Determine the effective mask value type.
31931 FloatDomain &= (32 <= MaskEltSizeInBits);
31932 MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
31933 : MVT::getIntegerVT(MaskEltSizeInBits);
31934 MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
31936 // Only allow legal mask types.
31937 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
31938 return SDValue();
31940 // Attempt to match the mask against known shuffle patterns.
31941 MVT ShuffleSrcVT, ShuffleVT;
31942 unsigned Shuffle, PermuteImm;
31944 // Which shuffle domains are permitted?
31945 // Permit domain crossing at higher combine depths.
31946 bool AllowFloatDomain = FloatDomain || (Depth > 3);
31947 bool AllowIntDomain = (!FloatDomain || (Depth > 3)) && Subtarget.hasSSE2() &&
31948 (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
31950 // Determine zeroable mask elements.
31951 APInt Zeroable(NumMaskElts, 0);
31952 for (unsigned i = 0; i != NumMaskElts; ++i)
31953 if (isUndefOrZero(Mask[i]))
31954 Zeroable.setBit(i);
31956 if (UnaryShuffle) {
31957 // If we are shuffling a X86ISD::VZEXT_LOAD then we can use the load
31958 // directly if we don't shuffle the lower element and we shuffle the upper
31959 // (zero) elements within themselves.
31960 if (V1.getOpcode() == X86ISD::VZEXT_LOAD &&
31961 (V1.getScalarValueSizeInBits() % MaskEltSizeInBits) == 0) {
31962 unsigned Scale = V1.getScalarValueSizeInBits() / MaskEltSizeInBits;
31963 ArrayRef<int> HiMask(Mask.data() + Scale, NumMaskElts - Scale);
31964 if (isSequentialOrUndefInRange(Mask, 0, Scale, 0) &&
31965 isUndefOrZeroOrInRange(HiMask, Scale, NumMaskElts)) {
31966 return DAG.getBitcast(RootVT, V1);
31970 // Attempt to match against broadcast-from-vector.
31971 // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
31972 if ((Subtarget.hasAVX2() || (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits))
31973 && (!IsEVEXShuffle || NumRootElts == NumMaskElts)) {
31974 SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
31975 if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
31976 if (V1.getValueType() == MaskVT &&
31977 V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
31978 MayFoldLoad(V1.getOperand(0))) {
31979 if (Depth == 1 && Root.getOpcode() == X86ISD::VBROADCAST)
31980 return SDValue(); // Nothing to do!
31981 Res = V1.getOperand(0);
31982 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
31983 return DAG.getBitcast(RootVT, Res);
31985 if (Subtarget.hasAVX2()) {
31986 if (Depth == 1 && Root.getOpcode() == X86ISD::VBROADCAST)
31987 return SDValue(); // Nothing to do!
31988 Res = DAG.getBitcast(MaskVT, V1);
31989 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
31990 return DAG.getBitcast(RootVT, Res);
31995 SDValue NewV1 = V1; // Save operand in case early exit happens.
31996 if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
31997 DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
31998 ShuffleVT) &&
31999 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32000 if (Depth == 1 && Root.getOpcode() == Shuffle)
32001 return SDValue(); // Nothing to do!
32002 Res = DAG.getBitcast(ShuffleSrcVT, NewV1);
32003 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
32004 return DAG.getBitcast(RootVT, Res);
32007 if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
32008 AllowIntDomain, Subtarget, Shuffle, ShuffleVT,
32009 PermuteImm) &&
32010 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32011 if (Depth == 1 && Root.getOpcode() == Shuffle)
32012 return SDValue(); // Nothing to do!
32013 Res = DAG.getBitcast(ShuffleVT, V1);
32014 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
32015 DAG.getConstant(PermuteImm, DL, MVT::i8));
32016 return DAG.getBitcast(RootVT, Res);
32020 SDValue NewV1 = V1; // Save operands in case early exit happens.
32021 SDValue NewV2 = V2;
32022 if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
32023 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
32024 ShuffleVT, UnaryShuffle) &&
32025 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32026 if (Depth == 1 && Root.getOpcode() == Shuffle)
32027 return SDValue(); // Nothing to do!
32028 NewV1 = DAG.getBitcast(ShuffleSrcVT, NewV1);
32029 NewV2 = DAG.getBitcast(ShuffleSrcVT, NewV2);
32030 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
32031 return DAG.getBitcast(RootVT, Res);
32034 NewV1 = V1; // Save operands in case early exit happens.
32035 NewV2 = V2;
32036 if (matchBinaryPermuteShuffle(
32037 MaskVT, Mask, Zeroable, AllowFloatDomain, AllowIntDomain, NewV1,
32038 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
32039 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32040 if (Depth == 1 && Root.getOpcode() == Shuffle)
32041 return SDValue(); // Nothing to do!
32042 NewV1 = DAG.getBitcast(ShuffleVT, NewV1);
32043 NewV2 = DAG.getBitcast(ShuffleVT, NewV2);
32044 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
32045 DAG.getConstant(PermuteImm, DL, MVT::i8));
32046 return DAG.getBitcast(RootVT, Res);
32049 // Typically from here on, we need an integer version of MaskVT.
32050 MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
32051 IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
32053 // Annoyingly, SSE4A instructions don't map into the above match helpers.
32054 if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
32055 uint64_t BitLen, BitIdx;
32056 if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
32057 Zeroable)) {
32058 if (Depth == 1 && Root.getOpcode() == X86ISD::EXTRQI)
32059 return SDValue(); // Nothing to do!
32060 V1 = DAG.getBitcast(IntMaskVT, V1);
32061 Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
32062 DAG.getConstant(BitLen, DL, MVT::i8),
32063 DAG.getConstant(BitIdx, DL, MVT::i8));
32064 return DAG.getBitcast(RootVT, Res);
32067 if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
32068 if (Depth == 1 && Root.getOpcode() == X86ISD::INSERTQI)
32069 return SDValue(); // Nothing to do!
32070 V1 = DAG.getBitcast(IntMaskVT, V1);
32071 V2 = DAG.getBitcast(IntMaskVT, V2);
32072 Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
32073 DAG.getConstant(BitLen, DL, MVT::i8),
32074 DAG.getConstant(BitIdx, DL, MVT::i8));
32075 return DAG.getBitcast(RootVT, Res);
32079 // Don't try to re-form single instruction chains under any circumstances now
32080 // that we've done encoding canonicalization for them.
32081 if (Depth < 2)
32082 return SDValue();
32084 // Depth threshold above which we can efficiently use variable mask shuffles.
32085 int VariableShuffleDepth = Subtarget.hasFastVariableShuffle() ? 2 : 3;
32086 AllowVariableMask &= (Depth >= VariableShuffleDepth) || HasVariableMask;
32088 bool MaskContainsZeros =
32089 any_of(Mask, [](int M) { return M == SM_SentinelZero; });
32091 if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
32092 // If we have a single input lane-crossing shuffle then lower to VPERMV.
32093 if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
32094 ((Subtarget.hasAVX2() &&
32095 (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32096 (Subtarget.hasAVX512() &&
32097 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32098 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32099 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32100 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
32101 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32102 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
32103 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32104 Res = DAG.getBitcast(MaskVT, V1);
32105 Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
32106 return DAG.getBitcast(RootVT, Res);
32109 // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
32110 // vector as the second source.
32111 if (UnaryShuffle && AllowVariableMask &&
32112 ((Subtarget.hasAVX512() &&
32113 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32114 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32115 (Subtarget.hasVLX() &&
32116 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
32117 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32118 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32119 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
32120 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32121 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
32122 // Adjust shuffle mask - replace SM_SentinelZero with second source index.
32123 for (unsigned i = 0; i != NumMaskElts; ++i)
32124 if (Mask[i] == SM_SentinelZero)
32125 Mask[i] = NumMaskElts + i;
32127 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32128 Res = DAG.getBitcast(MaskVT, V1);
32129 SDValue Zero = getZeroVector(MaskVT, Subtarget, DAG, DL);
32130 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, Res, VPermMask, Zero);
32131 return DAG.getBitcast(RootVT, Res);
32134 // If that failed and either input is extracted then try to combine as a
32135 // shuffle with the larger type.
32136 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
32137 Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
32138 DAG, Subtarget))
32139 return WideShuffle;
32141 // If we have a dual input lane-crossing shuffle then lower to VPERMV3.
32142 if (AllowVariableMask && !MaskContainsZeros &&
32143 ((Subtarget.hasAVX512() &&
32144 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32145 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32146 (Subtarget.hasVLX() &&
32147 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
32148 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32149 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32150 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
32151 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32152 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
32153 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32154 V1 = DAG.getBitcast(MaskVT, V1);
32155 V2 = DAG.getBitcast(MaskVT, V2);
32156 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
32157 return DAG.getBitcast(RootVT, Res);
32159 return SDValue();
32162 // See if we can combine a single input shuffle with zeros to a bit-mask,
32163 // which is much simpler than any shuffle.
32164 if (UnaryShuffle && MaskContainsZeros && AllowVariableMask &&
32165 isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
32166 DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
32167 APInt Zero = APInt::getNullValue(MaskEltSizeInBits);
32168 APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits);
32169 APInt UndefElts(NumMaskElts, 0);
32170 SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
32171 for (unsigned i = 0; i != NumMaskElts; ++i) {
32172 int M = Mask[i];
32173 if (M == SM_SentinelUndef) {
32174 UndefElts.setBit(i);
32175 continue;
32177 if (M == SM_SentinelZero)
32178 continue;
32179 EltBits[i] = AllOnes;
32181 SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
32182 Res = DAG.getBitcast(MaskVT, V1);
32183 unsigned AndOpcode =
32184 FloatDomain ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
32185 Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
32186 return DAG.getBitcast(RootVT, Res);
32189 // If we have a single input shuffle with different shuffle patterns in the
32190 // the 128-bit lanes use the variable mask to VPERMILPS.
32191 // TODO Combine other mask types at higher depths.
32192 if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
32193 ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
32194 (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
32195 SmallVector<SDValue, 16> VPermIdx;
32196 for (int M : Mask) {
32197 SDValue Idx =
32198 M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
32199 VPermIdx.push_back(Idx);
32201 SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
32202 Res = DAG.getBitcast(MaskVT, V1);
32203 Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
32204 return DAG.getBitcast(RootVT, Res);
32207 // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
32208 // to VPERMIL2PD/VPERMIL2PS.
32209 if (AllowVariableMask && Subtarget.hasXOP() &&
32210 (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
32211 MaskVT == MVT::v8f32)) {
32212 // VPERMIL2 Operation.
32213 // Bits[3] - Match Bit.
32214 // Bits[2:1] - (Per Lane) PD Shuffle Mask.
32215 // Bits[2:0] - (Per Lane) PS Shuffle Mask.
32216 unsigned NumLanes = MaskVT.getSizeInBits() / 128;
32217 unsigned NumEltsPerLane = NumMaskElts / NumLanes;
32218 SmallVector<int, 8> VPerm2Idx;
32219 unsigned M2ZImm = 0;
32220 for (int M : Mask) {
32221 if (M == SM_SentinelUndef) {
32222 VPerm2Idx.push_back(-1);
32223 continue;
32225 if (M == SM_SentinelZero) {
32226 M2ZImm = 2;
32227 VPerm2Idx.push_back(8);
32228 continue;
32230 int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
32231 Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
32232 VPerm2Idx.push_back(Index);
32234 V1 = DAG.getBitcast(MaskVT, V1);
32235 V2 = DAG.getBitcast(MaskVT, V2);
32236 SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
32237 Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
32238 DAG.getConstant(M2ZImm, DL, MVT::i8));
32239 return DAG.getBitcast(RootVT, Res);
32242 // If we have 3 or more shuffle instructions or a chain involving a variable
32243 // mask, we can replace them with a single PSHUFB instruction profitably.
32244 // Intel's manuals suggest only using PSHUFB if doing so replacing 5
32245 // instructions, but in practice PSHUFB tends to be *very* fast so we're
32246 // more aggressive.
32247 if (UnaryShuffle && AllowVariableMask &&
32248 ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
32249 (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
32250 (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
32251 SmallVector<SDValue, 16> PSHUFBMask;
32252 int NumBytes = RootVT.getSizeInBits() / 8;
32253 int Ratio = NumBytes / NumMaskElts;
32254 for (int i = 0; i < NumBytes; ++i) {
32255 int M = Mask[i / Ratio];
32256 if (M == SM_SentinelUndef) {
32257 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
32258 continue;
32260 if (M == SM_SentinelZero) {
32261 PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8));
32262 continue;
32264 M = Ratio * M + i % Ratio;
32265 assert((M / 16) == (i / 16) && "Lane crossing detected");
32266 PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
32268 MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
32269 Res = DAG.getBitcast(ByteVT, V1);
32270 SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
32271 Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
32272 return DAG.getBitcast(RootVT, Res);
32275 // With XOP, if we have a 128-bit binary input shuffle we can always combine
32276 // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
32277 // slower than PSHUFB on targets that support both.
32278 if (AllowVariableMask && RootVT.is128BitVector() && Subtarget.hasXOP()) {
32279 // VPPERM Mask Operation
32280 // Bits[4:0] - Byte Index (0 - 31)
32281 // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
32282 SmallVector<SDValue, 16> VPPERMMask;
32283 int NumBytes = 16;
32284 int Ratio = NumBytes / NumMaskElts;
32285 for (int i = 0; i < NumBytes; ++i) {
32286 int M = Mask[i / Ratio];
32287 if (M == SM_SentinelUndef) {
32288 VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
32289 continue;
32291 if (M == SM_SentinelZero) {
32292 VPPERMMask.push_back(DAG.getConstant(128, DL, MVT::i8));
32293 continue;
32295 M = Ratio * M + i % Ratio;
32296 VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
32298 MVT ByteVT = MVT::v16i8;
32299 V1 = DAG.getBitcast(ByteVT, V1);
32300 V2 = DAG.getBitcast(ByteVT, V2);
32301 SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
32302 Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
32303 return DAG.getBitcast(RootVT, Res);
32306 // If that failed and either input is extracted then try to combine as a
32307 // shuffle with the larger type.
32308 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
32309 Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
32310 DAG, Subtarget))
32311 return WideShuffle;
32313 // If we have a dual input shuffle then lower to VPERMV3.
32314 if (!UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
32315 ((Subtarget.hasAVX512() &&
32316 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32317 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32318 (Subtarget.hasVLX() &&
32319 (MaskVT == MVT::v2f64 || MaskVT == MVT::v2i64 || MaskVT == MVT::v4f64 ||
32320 MaskVT == MVT::v4i64 || MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 ||
32321 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32322 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32323 (Subtarget.hasBWI() && Subtarget.hasVLX() &&
32324 (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16)) ||
32325 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32326 (Subtarget.hasVBMI() && Subtarget.hasVLX() &&
32327 (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8)))) {
32328 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32329 V1 = DAG.getBitcast(MaskVT, V1);
32330 V2 = DAG.getBitcast(MaskVT, V2);
32331 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
32332 return DAG.getBitcast(RootVT, Res);
32335 // Failed to find any combines.
32336 return SDValue();
32339 // Combine an arbitrary chain of shuffles + extract_subvectors into a single
32340 // instruction if possible.
32342 // Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
32343 // type size to attempt to combine:
32344 // shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
32345 // -->
32346 // extract_subvector(shuffle(x,y,m2),0)
32347 static SDValue combineX86ShuffleChainWithExtract(
32348 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
32349 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
32350 const X86Subtarget &Subtarget) {
32351 unsigned NumMaskElts = BaseMask.size();
32352 unsigned NumInputs = Inputs.size();
32353 if (NumInputs == 0)
32354 return SDValue();
32356 SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
32357 SmallVector<unsigned, 4> Offsets(NumInputs, 0);
32359 // Peek through subvectors.
32360 // TODO: Support inter-mixed EXTRACT_SUBVECTORs + BITCASTs?
32361 unsigned WideSizeInBits = WideInputs[0].getValueSizeInBits();
32362 for (unsigned i = 0; i != NumInputs; ++i) {
32363 SDValue &Src = WideInputs[i];
32364 unsigned &Offset = Offsets[i];
32365 Src = peekThroughBitcasts(Src);
32366 EVT BaseVT = Src.getValueType();
32367 while (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
32368 isa<ConstantSDNode>(Src.getOperand(1))) {
32369 Offset += Src.getConstantOperandVal(1);
32370 Src = Src.getOperand(0);
32372 WideSizeInBits = std::max(WideSizeInBits, Src.getValueSizeInBits());
32373 assert((Offset % BaseVT.getVectorNumElements()) == 0 &&
32374 "Unexpected subvector extraction");
32375 Offset /= BaseVT.getVectorNumElements();
32376 Offset *= NumMaskElts;
32379 // Bail if we're always extracting from the lowest subvectors,
32380 // combineX86ShuffleChain should match this for the current width.
32381 if (llvm::all_of(Offsets, [](unsigned Offset) { return Offset == 0; }))
32382 return SDValue();
32384 EVT RootVT = Root.getValueType();
32385 unsigned RootSizeInBits = RootVT.getSizeInBits();
32386 unsigned Scale = WideSizeInBits / RootSizeInBits;
32387 assert((WideSizeInBits % RootSizeInBits) == 0 &&
32388 "Unexpected subvector extraction");
32390 // If the src vector types aren't the same, see if we can extend
32391 // them to match each other.
32392 // TODO: Support different scalar types?
32393 EVT WideSVT = WideInputs[0].getValueType().getScalarType();
32394 if (llvm::any_of(WideInputs, [&WideSVT, &DAG](SDValue Op) {
32395 return !DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()) ||
32396 Op.getValueType().getScalarType() != WideSVT;
32398 return SDValue();
32400 for (SDValue &NewInput : WideInputs) {
32401 assert((WideSizeInBits % NewInput.getValueSizeInBits()) == 0 &&
32402 "Shuffle vector size mismatch");
32403 if (WideSizeInBits > NewInput.getValueSizeInBits())
32404 NewInput = widenSubVector(NewInput, false, Subtarget, DAG,
32405 SDLoc(NewInput), WideSizeInBits);
32406 assert(WideSizeInBits == NewInput.getValueSizeInBits() &&
32407 "Unexpected subvector extraction");
32410 // Create new mask for larger type.
32411 for (unsigned i = 1; i != NumInputs; ++i)
32412 Offsets[i] += i * Scale * NumMaskElts;
32414 SmallVector<int, 64> WideMask(BaseMask.begin(), BaseMask.end());
32415 for (int &M : WideMask) {
32416 if (M < 0)
32417 continue;
32418 M = (M % NumMaskElts) + Offsets[M / NumMaskElts];
32420 WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
32422 // Remove unused/repeated shuffle source ops.
32423 resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
32424 assert(!WideInputs.empty() && "Shuffle with no inputs detected");
32426 if (WideInputs.size() > 2)
32427 return SDValue();
32429 // Attempt to combine wider chain.
32430 // TODO: Can we use a better Root?
32431 SDValue WideRoot = WideInputs[0];
32432 if (SDValue WideShuffle = combineX86ShuffleChain(
32433 WideInputs, WideRoot, WideMask, Depth, HasVariableMask,
32434 AllowVariableMask, DAG, Subtarget)) {
32435 WideShuffle =
32436 extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
32437 return DAG.getBitcast(RootVT, WideShuffle);
32439 return SDValue();
32442 // Attempt to constant fold all of the constant source ops.
32443 // Returns true if the entire shuffle is folded to a constant.
32444 // TODO: Extend this to merge multiple constant Ops and update the mask.
32445 static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
32446 ArrayRef<int> Mask, SDValue Root,
32447 bool HasVariableMask,
32448 SelectionDAG &DAG,
32449 const X86Subtarget &Subtarget) {
32450 MVT VT = Root.getSimpleValueType();
32452 unsigned SizeInBits = VT.getSizeInBits();
32453 unsigned NumMaskElts = Mask.size();
32454 unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
32455 unsigned NumOps = Ops.size();
32457 // Extract constant bits from each source op.
32458 bool OneUseConstantOp = false;
32459 SmallVector<APInt, 16> UndefEltsOps(NumOps);
32460 SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
32461 for (unsigned i = 0; i != NumOps; ++i) {
32462 SDValue SrcOp = Ops[i];
32463 OneUseConstantOp |= SrcOp.hasOneUse();
32464 if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
32465 RawBitsOps[i]))
32466 return SDValue();
32469 // Only fold if at least one of the constants is only used once or
32470 // the combined shuffle has included a variable mask shuffle, this
32471 // is to avoid constant pool bloat.
32472 if (!OneUseConstantOp && !HasVariableMask)
32473 return SDValue();
32475 // Shuffle the constant bits according to the mask.
32476 APInt UndefElts(NumMaskElts, 0);
32477 APInt ZeroElts(NumMaskElts, 0);
32478 APInt ConstantElts(NumMaskElts, 0);
32479 SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
32480 APInt::getNullValue(MaskSizeInBits));
32481 for (unsigned i = 0; i != NumMaskElts; ++i) {
32482 int M = Mask[i];
32483 if (M == SM_SentinelUndef) {
32484 UndefElts.setBit(i);
32485 continue;
32486 } else if (M == SM_SentinelZero) {
32487 ZeroElts.setBit(i);
32488 continue;
32490 assert(0 <= M && M < (int)(NumMaskElts * NumOps));
32492 unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
32493 unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
32495 auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
32496 if (SrcUndefElts[SrcMaskIdx]) {
32497 UndefElts.setBit(i);
32498 continue;
32501 auto &SrcEltBits = RawBitsOps[SrcOpIdx];
32502 APInt &Bits = SrcEltBits[SrcMaskIdx];
32503 if (!Bits) {
32504 ZeroElts.setBit(i);
32505 continue;
32508 ConstantElts.setBit(i);
32509 ConstantBitData[i] = Bits;
32511 assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue());
32513 // Create the constant data.
32514 MVT MaskSVT;
32515 if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
32516 MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
32517 else
32518 MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
32520 MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
32522 SDLoc DL(Root);
32523 SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
32524 return DAG.getBitcast(VT, CstOp);
32527 /// Fully generic combining of x86 shuffle instructions.
32529 /// This should be the last combine run over the x86 shuffle instructions. Once
32530 /// they have been fully optimized, this will recursively consider all chains
32531 /// of single-use shuffle instructions, build a generic model of the cumulative
32532 /// shuffle operation, and check for simpler instructions which implement this
32533 /// operation. We use this primarily for two purposes:
32535 /// 1) Collapse generic shuffles to specialized single instructions when
32536 /// equivalent. In most cases, this is just an encoding size win, but
32537 /// sometimes we will collapse multiple generic shuffles into a single
32538 /// special-purpose shuffle.
32539 /// 2) Look for sequences of shuffle instructions with 3 or more total
32540 /// instructions, and replace them with the slightly more expensive SSSE3
32541 /// PSHUFB instruction if available. We do this as the last combining step
32542 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
32543 /// a suitable short sequence of other instructions. The PSHUFB will either
32544 /// use a register or have to read from memory and so is slightly (but only
32545 /// slightly) more expensive than the other shuffle instructions.
32547 /// Because this is inherently a quadratic operation (for each shuffle in
32548 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
32549 /// This should never be an issue in practice as the shuffle lowering doesn't
32550 /// produce sequences of more than 8 instructions.
32552 /// FIXME: We will currently miss some cases where the redundant shuffling
32553 /// would simplify under the threshold for PSHUFB formation because of
32554 /// combine-ordering. To fix this, we should do the redundant instruction
32555 /// combining in this recursive walk.
32556 static SDValue combineX86ShufflesRecursively(
32557 ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
32558 ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
32559 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
32560 const X86Subtarget &Subtarget) {
32561 // Bound the depth of our recursive combine because this is ultimately
32562 // quadratic in nature.
32563 const unsigned MaxRecursionDepth = 8;
32564 if (Depth > MaxRecursionDepth)
32565 return SDValue();
32567 // Directly rip through bitcasts to find the underlying operand.
32568 SDValue Op = SrcOps[SrcOpIndex];
32569 Op = peekThroughOneUseBitcasts(Op);
32571 MVT VT = Op.getSimpleValueType();
32572 if (!VT.isVector())
32573 return SDValue(); // Bail if we hit a non-vector.
32575 assert(Root.getSimpleValueType().isVector() &&
32576 "Shuffles operate on vector types!");
32577 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
32578 "Can only combine shuffles of the same vector register size.");
32580 // Extract target shuffle mask and resolve sentinels and inputs.
32581 SmallVector<int, 64> OpMask;
32582 SmallVector<SDValue, 2> OpInputs;
32583 if (!resolveTargetShuffleInputs(Op, OpInputs, OpMask, DAG))
32584 return SDValue();
32586 // Add the inputs to the Ops list, avoiding duplicates.
32587 SmallVector<SDValue, 16> Ops(SrcOps.begin(), SrcOps.end());
32589 auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
32590 // Attempt to find an existing match.
32591 SDValue InputBC = peekThroughBitcasts(Input);
32592 for (int i = 0, e = Ops.size(); i < e; ++i)
32593 if (InputBC == peekThroughBitcasts(Ops[i]))
32594 return i;
32595 // Match failed - should we replace an existing Op?
32596 if (InsertionPoint >= 0) {
32597 Ops[InsertionPoint] = Input;
32598 return InsertionPoint;
32600 // Add to the end of the Ops list.
32601 Ops.push_back(Input);
32602 return Ops.size() - 1;
32605 SmallVector<int, 2> OpInputIdx;
32606 for (SDValue OpInput : OpInputs)
32607 OpInputIdx.push_back(AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
32609 assert(((RootMask.size() > OpMask.size() &&
32610 RootMask.size() % OpMask.size() == 0) ||
32611 (OpMask.size() > RootMask.size() &&
32612 OpMask.size() % RootMask.size() == 0) ||
32613 OpMask.size() == RootMask.size()) &&
32614 "The smaller number of elements must divide the larger.");
32616 // This function can be performance-critical, so we rely on the power-of-2
32617 // knowledge that we have about the mask sizes to replace div/rem ops with
32618 // bit-masks and shifts.
32619 assert(isPowerOf2_32(RootMask.size()) && "Non-power-of-2 shuffle mask sizes");
32620 assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes");
32621 unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
32622 unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());
32624 unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
32625 unsigned RootRatio = std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
32626 unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
32627 assert((RootRatio == 1 || OpRatio == 1) &&
32628 "Must not have a ratio for both incoming and op masks!");
32630 assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
32631 assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
32632 assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
32633 unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
32634 unsigned OpRatioLog2 = countTrailingZeros(OpRatio);
32636 SmallVector<int, 64> Mask(MaskWidth, SM_SentinelUndef);
32638 // Merge this shuffle operation's mask into our accumulated mask. Note that
32639 // this shuffle's mask will be the first applied to the input, followed by the
32640 // root mask to get us all the way to the root value arrangement. The reason
32641 // for this order is that we are recursing up the operation chain.
32642 for (unsigned i = 0; i < MaskWidth; ++i) {
32643 unsigned RootIdx = i >> RootRatioLog2;
32644 if (RootMask[RootIdx] < 0) {
32645 // This is a zero or undef lane, we're done.
32646 Mask[i] = RootMask[RootIdx];
32647 continue;
32650 unsigned RootMaskedIdx =
32651 RootRatio == 1
32652 ? RootMask[RootIdx]
32653 : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
32655 // Just insert the scaled root mask value if it references an input other
32656 // than the SrcOp we're currently inserting.
32657 if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
32658 (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
32659 Mask[i] = RootMaskedIdx;
32660 continue;
32663 RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
32664 unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
32665 if (OpMask[OpIdx] < 0) {
32666 // The incoming lanes are zero or undef, it doesn't matter which ones we
32667 // are using.
32668 Mask[i] = OpMask[OpIdx];
32669 continue;
32672 // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
32673 unsigned OpMaskedIdx =
32674 OpRatio == 1
32675 ? OpMask[OpIdx]
32676 : (OpMask[OpIdx] << OpRatioLog2) + (RootMaskedIdx & (OpRatio - 1));
32678 OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
32679 int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
32680 assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
32681 OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
32683 Mask[i] = OpMaskedIdx;
32686 // Handle the all undef/zero cases early.
32687 if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
32688 return DAG.getUNDEF(Root.getValueType());
32690 // TODO - should we handle the mixed zero/undef case as well? Just returning
32691 // a zero mask will lose information on undef elements possibly reducing
32692 // future combine possibilities.
32693 if (all_of(Mask, [](int Idx) { return Idx < 0; }))
32694 return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG,
32695 SDLoc(Root));
32697 // Remove unused/repeated shuffle source ops.
32698 resolveTargetShuffleInputsAndMask(Ops, Mask);
32699 assert(!Ops.empty() && "Shuffle with no inputs detected");
32701 HasVariableMask |= isTargetShuffleVariableMask(Op.getOpcode());
32703 // Update the list of shuffle nodes that have been combined so far.
32704 SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
32705 SrcNodes.end());
32706 CombinedNodes.push_back(Op.getNode());
32708 // See if we can recurse into each shuffle source op (if it's a target
32709 // shuffle). The source op should only be generally combined if it either has
32710 // a single use (i.e. current Op) or all its users have already been combined,
32711 // if not then we can still combine but should prevent generation of variable
32712 // shuffles to avoid constant pool bloat.
32713 // Don't recurse if we already have more source ops than we can combine in
32714 // the remaining recursion depth.
32715 if (Ops.size() < (MaxRecursionDepth - Depth)) {
32716 for (int i = 0, e = Ops.size(); i < e; ++i) {
32717 bool AllowVar = false;
32718 if (Ops[i].getNode()->hasOneUse() ||
32719 SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode()))
32720 AllowVar = AllowVariableMask;
32721 if (SDValue Res = combineX86ShufflesRecursively(
32722 Ops, i, Root, Mask, CombinedNodes, Depth + 1, HasVariableMask,
32723 AllowVar, DAG, Subtarget))
32724 return Res;
32728 // Attempt to constant fold all of the constant source ops.
32729 if (SDValue Cst = combineX86ShufflesConstants(
32730 Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
32731 return Cst;
32733 // We can only combine unary and binary shuffle mask cases.
32734 if (Ops.size() > 2)
32735 return SDValue();
32737 // Minor canonicalization of the accumulated shuffle mask to make it easier
32738 // to match below. All this does is detect masks with sequential pairs of
32739 // elements, and shrink them to the half-width mask. It does this in a loop
32740 // so it will reduce the size of the mask to the minimal width mask which
32741 // performs an equivalent shuffle.
32742 SmallVector<int, 64> WidenedMask;
32743 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
32744 Mask = std::move(WidenedMask);
32747 // Canonicalization of binary shuffle masks to improve pattern matching by
32748 // commuting the inputs.
32749 if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
32750 ShuffleVectorSDNode::commuteMask(Mask);
32751 std::swap(Ops[0], Ops[1]);
32754 // Finally, try to combine into a single shuffle instruction.
32755 return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask,
32756 AllowVariableMask, DAG, Subtarget);
32759 /// Helper entry wrapper to combineX86ShufflesRecursively.
32760 static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
32761 const X86Subtarget &Subtarget) {
32762 return combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 1,
32763 /*HasVarMask*/ false,
32764 /*AllowVarMask*/ true, DAG, Subtarget);
32767 /// Get the PSHUF-style mask from PSHUF node.
32769 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
32770 /// PSHUF-style masks that can be reused with such instructions.
32771 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
32772 MVT VT = N.getSimpleValueType();
32773 SmallVector<int, 4> Mask;
32774 SmallVector<SDValue, 2> Ops;
32775 bool IsUnary;
32776 bool HaveMask =
32777 getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
32778 (void)HaveMask;
32779 assert(HaveMask);
32781 // If we have more than 128-bits, only the low 128-bits of shuffle mask
32782 // matter. Check that the upper masks are repeats and remove them.
32783 if (VT.getSizeInBits() > 128) {
32784 int LaneElts = 128 / VT.getScalarSizeInBits();
32785 #ifndef NDEBUG
32786 for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
32787 for (int j = 0; j < LaneElts; ++j)
32788 assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
32789 "Mask doesn't repeat in high 128-bit lanes!");
32790 #endif
32791 Mask.resize(LaneElts);
32794 switch (N.getOpcode()) {
32795 case X86ISD::PSHUFD:
32796 return Mask;
32797 case X86ISD::PSHUFLW:
32798 Mask.resize(4);
32799 return Mask;
32800 case X86ISD::PSHUFHW:
32801 Mask.erase(Mask.begin(), Mask.begin() + 4);
32802 for (int &M : Mask)
32803 M -= 4;
32804 return Mask;
32805 default:
32806 llvm_unreachable("No valid shuffle instruction found!");
32810 /// Search for a combinable shuffle across a chain ending in pshufd.
32812 /// We walk up the chain and look for a combinable shuffle, skipping over
32813 /// shuffles that we could hoist this shuffle's transformation past without
32814 /// altering anything.
32815 static SDValue
32816 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
32817 SelectionDAG &DAG) {
32818 assert(N.getOpcode() == X86ISD::PSHUFD &&
32819 "Called with something other than an x86 128-bit half shuffle!");
32820 SDLoc DL(N);
32822 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
32823 // of the shuffles in the chain so that we can form a fresh chain to replace
32824 // this one.
32825 SmallVector<SDValue, 8> Chain;
32826 SDValue V = N.getOperand(0);
32827 for (; V.hasOneUse(); V = V.getOperand(0)) {
32828 switch (V.getOpcode()) {
32829 default:
32830 return SDValue(); // Nothing combined!
32832 case ISD::BITCAST:
32833 // Skip bitcasts as we always know the type for the target specific
32834 // instructions.
32835 continue;
32837 case X86ISD::PSHUFD:
32838 // Found another dword shuffle.
32839 break;
32841 case X86ISD::PSHUFLW:
32842 // Check that the low words (being shuffled) are the identity in the
32843 // dword shuffle, and the high words are self-contained.
32844 if (Mask[0] != 0 || Mask[1] != 1 ||
32845 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
32846 return SDValue();
32848 Chain.push_back(V);
32849 continue;
32851 case X86ISD::PSHUFHW:
32852 // Check that the high words (being shuffled) are the identity in the
32853 // dword shuffle, and the low words are self-contained.
32854 if (Mask[2] != 2 || Mask[3] != 3 ||
32855 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
32856 return SDValue();
32858 Chain.push_back(V);
32859 continue;
32861 case X86ISD::UNPCKL:
32862 case X86ISD::UNPCKH:
32863 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
32864 // shuffle into a preceding word shuffle.
32865 if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
32866 V.getSimpleValueType().getVectorElementType() != MVT::i16)
32867 return SDValue();
32869 // Search for a half-shuffle which we can combine with.
32870 unsigned CombineOp =
32871 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
32872 if (V.getOperand(0) != V.getOperand(1) ||
32873 !V->isOnlyUserOf(V.getOperand(0).getNode()))
32874 return SDValue();
32875 Chain.push_back(V);
32876 V = V.getOperand(0);
32877 do {
32878 switch (V.getOpcode()) {
32879 default:
32880 return SDValue(); // Nothing to combine.
32882 case X86ISD::PSHUFLW:
32883 case X86ISD::PSHUFHW:
32884 if (V.getOpcode() == CombineOp)
32885 break;
32887 Chain.push_back(V);
32889 LLVM_FALLTHROUGH;
32890 case ISD::BITCAST:
32891 V = V.getOperand(0);
32892 continue;
32894 break;
32895 } while (V.hasOneUse());
32896 break;
32898 // Break out of the loop if we break out of the switch.
32899 break;
32902 if (!V.hasOneUse())
32903 // We fell out of the loop without finding a viable combining instruction.
32904 return SDValue();
32906 // Merge this node's mask and our incoming mask.
32907 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
32908 for (int &M : Mask)
32909 M = VMask[M];
32910 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
32911 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
32913 // Rebuild the chain around this new shuffle.
32914 while (!Chain.empty()) {
32915 SDValue W = Chain.pop_back_val();
32917 if (V.getValueType() != W.getOperand(0).getValueType())
32918 V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
32920 switch (W.getOpcode()) {
32921 default:
32922 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
32924 case X86ISD::UNPCKL:
32925 case X86ISD::UNPCKH:
32926 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
32927 break;
32929 case X86ISD::PSHUFD:
32930 case X86ISD::PSHUFLW:
32931 case X86ISD::PSHUFHW:
32932 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
32933 break;
32936 if (V.getValueType() != N.getValueType())
32937 V = DAG.getBitcast(N.getValueType(), V);
32939 // Return the new chain to replace N.
32940 return V;
32943 /// Try to combine x86 target specific shuffles.
32944 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
32945 TargetLowering::DAGCombinerInfo &DCI,
32946 const X86Subtarget &Subtarget) {
32947 SDLoc DL(N);
32948 MVT VT = N.getSimpleValueType();
32949 SmallVector<int, 4> Mask;
32950 unsigned Opcode = N.getOpcode();
32952 // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
32953 // single instruction.
32954 if (VT.getScalarSizeInBits() == 64 &&
32955 (Opcode == X86ISD::MOVSD || Opcode == X86ISD::UNPCKH ||
32956 Opcode == X86ISD::UNPCKL)) {
32957 auto BC0 = peekThroughBitcasts(N.getOperand(0));
32958 auto BC1 = peekThroughBitcasts(N.getOperand(1));
32959 EVT VT0 = BC0.getValueType();
32960 EVT VT1 = BC1.getValueType();
32961 unsigned Opcode0 = BC0.getOpcode();
32962 unsigned Opcode1 = BC1.getOpcode();
32963 if (Opcode0 == Opcode1 && VT0 == VT1 &&
32964 (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
32965 Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB ||
32966 Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) {
32967 SDValue Lo, Hi;
32968 if (Opcode == X86ISD::MOVSD) {
32969 Lo = BC1.getOperand(0);
32970 Hi = BC0.getOperand(1);
32971 } else {
32972 Lo = BC0.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
32973 Hi = BC1.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
32975 SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
32976 return DAG.getBitcast(VT, Horiz);
32980 switch (Opcode) {
32981 case X86ISD::VBROADCAST: {
32982 SDValue Src = N.getOperand(0);
32983 SDValue BC = peekThroughBitcasts(Src);
32984 EVT SrcVT = Src.getValueType();
32985 EVT BCVT = BC.getValueType();
32987 // If broadcasting from another shuffle, attempt to simplify it.
32988 // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
32989 if (isTargetShuffle(BC.getOpcode()) &&
32990 VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
32991 unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
32992 SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
32993 SM_SentinelUndef);
32994 for (unsigned i = 0; i != Scale; ++i)
32995 DemandedMask[i] = i;
32996 if (SDValue Res = combineX86ShufflesRecursively(
32997 {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 1,
32998 /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
32999 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
33000 DAG.getBitcast(SrcVT, Res));
33003 // broadcast(bitcast(src)) -> bitcast(broadcast(src))
33004 // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
33005 if (Src.getOpcode() == ISD::BITCAST &&
33006 SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits()) {
33007 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
33008 VT.getVectorNumElements());
33009 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
33012 // Reduce broadcast source vector to lowest 128-bits.
33013 if (SrcVT.getSizeInBits() > 128)
33014 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
33015 extract128BitVector(Src, 0, DAG, DL));
33017 // broadcast(scalar_to_vector(x)) -> broadcast(x).
33018 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
33019 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
33021 // Share broadcast with the longest vector and extract low subvector (free).
33022 for (SDNode *User : Src->uses())
33023 if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
33024 User->getValueSizeInBits(0) > VT.getSizeInBits()) {
33025 return extractSubVector(SDValue(User, 0), 0, DAG, DL,
33026 VT.getSizeInBits());
33029 return SDValue();
33031 case X86ISD::BLENDI: {
33032 SDValue N0 = N.getOperand(0);
33033 SDValue N1 = N.getOperand(1);
33035 // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
33036 // TODO: Handle MVT::v16i16 repeated blend mask.
33037 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
33038 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
33039 MVT SrcVT = N0.getOperand(0).getSimpleValueType();
33040 if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
33041 SrcVT.getScalarSizeInBits() >= 32) {
33042 unsigned Mask = N.getConstantOperandVal(2);
33043 unsigned Size = VT.getVectorNumElements();
33044 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
33045 unsigned ScaleMask = scaleVectorShuffleBlendMask(Mask, Size, Scale);
33046 return DAG.getBitcast(
33047 VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
33048 N1.getOperand(0),
33049 DAG.getConstant(ScaleMask, DL, MVT::i8)));
33052 return SDValue();
33054 case X86ISD::PSHUFD:
33055 case X86ISD::PSHUFLW:
33056 case X86ISD::PSHUFHW:
33057 Mask = getPSHUFShuffleMask(N);
33058 assert(Mask.size() == 4);
33059 break;
33060 case X86ISD::MOVSD:
33061 case X86ISD::MOVSS: {
33062 SDValue N0 = N.getOperand(0);
33063 SDValue N1 = N.getOperand(1);
33065 // Canonicalize scalar FPOps:
33066 // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
33067 // If commutable, allow OP(N1[0], N0[0]).
33068 unsigned Opcode1 = N1.getOpcode();
33069 if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
33070 Opcode1 == ISD::FDIV) {
33071 SDValue N10 = N1.getOperand(0);
33072 SDValue N11 = N1.getOperand(1);
33073 if (N10 == N0 ||
33074 (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
33075 if (N10 != N0)
33076 std::swap(N10, N11);
33077 MVT SVT = VT.getVectorElementType();
33078 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
33079 N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
33080 N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
33081 SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
33082 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
33083 return DAG.getNode(Opcode, DL, VT, N0, SclVec);
33087 return SDValue();
33089 case X86ISD::INSERTPS: {
33090 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
33091 SDValue Op0 = N.getOperand(0);
33092 SDValue Op1 = N.getOperand(1);
33093 SDValue Op2 = N.getOperand(2);
33094 unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
33095 unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
33096 unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
33097 unsigned ZeroMask = InsertPSMask & 0xF;
33099 // If we zero out all elements from Op0 then we don't need to reference it.
33100 if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
33101 return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
33102 DAG.getConstant(InsertPSMask, DL, MVT::i8));
33104 // If we zero out the element from Op1 then we don't need to reference it.
33105 if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
33106 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
33107 DAG.getConstant(InsertPSMask, DL, MVT::i8));
33109 // Attempt to merge insertps Op1 with an inner target shuffle node.
33110 SmallVector<int, 8> TargetMask1;
33111 SmallVector<SDValue, 2> Ops1;
33112 if (setTargetShuffleZeroElements(Op1, TargetMask1, Ops1)) {
33113 int M = TargetMask1[SrcIdx];
33114 if (isUndefOrZero(M)) {
33115 // Zero/UNDEF insertion - zero out element and remove dependency.
33116 InsertPSMask |= (1u << DstIdx);
33117 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
33118 DAG.getConstant(InsertPSMask, DL, MVT::i8));
33120 // Update insertps mask srcidx and reference the source input directly.
33121 assert(0 <= M && M < 8 && "Shuffle index out of range");
33122 InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
33123 Op1 = Ops1[M < 4 ? 0 : 1];
33124 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
33125 DAG.getConstant(InsertPSMask, DL, MVT::i8));
33128 // Attempt to merge insertps Op0 with an inner target shuffle node.
33129 SmallVector<int, 8> TargetMask0;
33130 SmallVector<SDValue, 2> Ops0;
33131 if (!setTargetShuffleZeroElements(Op0, TargetMask0, Ops0))
33132 return SDValue();
33134 bool Updated = false;
33135 bool UseInput00 = false;
33136 bool UseInput01 = false;
33137 for (int i = 0; i != 4; ++i) {
33138 int M = TargetMask0[i];
33139 if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
33140 // No change if element is already zero or the inserted element.
33141 continue;
33142 } else if (isUndefOrZero(M)) {
33143 // If the target mask is undef/zero then we must zero the element.
33144 InsertPSMask |= (1u << i);
33145 Updated = true;
33146 continue;
33149 // The input vector element must be inline.
33150 if (M != i && M != (i + 4))
33151 return SDValue();
33153 // Determine which inputs of the target shuffle we're using.
33154 UseInput00 |= (0 <= M && M < 4);
33155 UseInput01 |= (4 <= M);
33158 // If we're not using both inputs of the target shuffle then use the
33159 // referenced input directly.
33160 if (UseInput00 && !UseInput01) {
33161 Updated = true;
33162 Op0 = Ops0[0];
33163 } else if (!UseInput00 && UseInput01) {
33164 Updated = true;
33165 Op0 = Ops0[1];
33168 if (Updated)
33169 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
33170 DAG.getConstant(InsertPSMask, DL, MVT::i8));
33172 return SDValue();
33174 default:
33175 return SDValue();
33178 // Nuke no-op shuffles that show up after combining.
33179 if (isNoopShuffleMask(Mask))
33180 return N.getOperand(0);
33182 // Look for simplifications involving one or two shuffle instructions.
33183 SDValue V = N.getOperand(0);
33184 switch (N.getOpcode()) {
33185 default:
33186 break;
33187 case X86ISD::PSHUFLW:
33188 case X86ISD::PSHUFHW:
33189 assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
33191 // See if this reduces to a PSHUFD which is no more expensive and can
33192 // combine with more operations. Note that it has to at least flip the
33193 // dwords as otherwise it would have been removed as a no-op.
33194 if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
33195 int DMask[] = {0, 1, 2, 3};
33196 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
33197 DMask[DOffset + 0] = DOffset + 1;
33198 DMask[DOffset + 1] = DOffset + 0;
33199 MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
33200 V = DAG.getBitcast(DVT, V);
33201 V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
33202 getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
33203 return DAG.getBitcast(VT, V);
33206 // Look for shuffle patterns which can be implemented as a single unpack.
33207 // FIXME: This doesn't handle the location of the PSHUFD generically, and
33208 // only works when we have a PSHUFD followed by two half-shuffles.
33209 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
33210 (V.getOpcode() == X86ISD::PSHUFLW ||
33211 V.getOpcode() == X86ISD::PSHUFHW) &&
33212 V.getOpcode() != N.getOpcode() &&
33213 V.hasOneUse()) {
33214 SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
33215 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
33216 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
33217 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
33218 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
33219 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
33220 int WordMask[8];
33221 for (int i = 0; i < 4; ++i) {
33222 WordMask[i + NOffset] = Mask[i] + NOffset;
33223 WordMask[i + VOffset] = VMask[i] + VOffset;
33225 // Map the word mask through the DWord mask.
33226 int MappedMask[8];
33227 for (int i = 0; i < 8; ++i)
33228 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
33229 if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
33230 makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
33231 // We can replace all three shuffles with an unpack.
33232 V = DAG.getBitcast(VT, D.getOperand(0));
33233 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
33234 : X86ISD::UNPCKH,
33235 DL, VT, V, V);
33240 break;
33242 case X86ISD::PSHUFD:
33243 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
33244 return NewN;
33246 break;
33249 return SDValue();
33252 /// Checks if the shuffle mask takes subsequent elements
33253 /// alternately from two vectors.
33254 /// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
33255 static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
33257 int ParitySrc[2] = {-1, -1};
33258 unsigned Size = Mask.size();
33259 for (unsigned i = 0; i != Size; ++i) {
33260 int M = Mask[i];
33261 if (M < 0)
33262 continue;
33264 // Make sure we are using the matching element from the input.
33265 if ((M % Size) != i)
33266 return false;
33268 // Make sure we use the same input for all elements of the same parity.
33269 int Src = M / Size;
33270 if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
33271 return false;
33272 ParitySrc[i % 2] = Src;
33275 // Make sure each input is used.
33276 if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
33277 return false;
33279 Op0Even = ParitySrc[0] == 0;
33280 return true;
33283 /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
33284 /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
33285 /// are written to the parameters \p Opnd0 and \p Opnd1.
33287 /// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
33288 /// so it is easier to generically match. We also insert dummy vector shuffle
33289 /// nodes for the operands which explicitly discard the lanes which are unused
33290 /// by this operation to try to flow through the rest of the combiner
33291 /// the fact that they're unused.
33292 static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
33293 SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
33294 bool &IsSubAdd) {
33296 EVT VT = N->getValueType(0);
33297 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
33298 if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
33299 !VT.getSimpleVT().isFloatingPoint())
33300 return false;
33302 // We only handle target-independent shuffles.
33303 // FIXME: It would be easy and harmless to use the target shuffle mask
33304 // extraction tool to support more.
33305 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
33306 return false;
33308 SDValue V1 = N->getOperand(0);
33309 SDValue V2 = N->getOperand(1);
33311 // Make sure we have an FADD and an FSUB.
33312 if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
33313 (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
33314 V1.getOpcode() == V2.getOpcode())
33315 return false;
33317 // If there are other uses of these operations we can't fold them.
33318 if (!V1->hasOneUse() || !V2->hasOneUse())
33319 return false;
33321 // Ensure that both operations have the same operands. Note that we can
33322 // commute the FADD operands.
33323 SDValue LHS, RHS;
33324 if (V1.getOpcode() == ISD::FSUB) {
33325 LHS = V1->getOperand(0); RHS = V1->getOperand(1);
33326 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
33327 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
33328 return false;
33329 } else {
33330 assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
33331 LHS = V2->getOperand(0); RHS = V2->getOperand(1);
33332 if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
33333 (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
33334 return false;
33337 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
33338 bool Op0Even;
33339 if (!isAddSubOrSubAddMask(Mask, Op0Even))
33340 return false;
33342 // It's a subadd if the vector in the even parity is an FADD.
33343 IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
33344 : V2->getOpcode() == ISD::FADD;
33346 Opnd0 = LHS;
33347 Opnd1 = RHS;
33348 return true;
33351 /// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
33352 static SDValue combineShuffleToFMAddSub(SDNode *N,
33353 const X86Subtarget &Subtarget,
33354 SelectionDAG &DAG) {
33355 // We only handle target-independent shuffles.
33356 // FIXME: It would be easy and harmless to use the target shuffle mask
33357 // extraction tool to support more.
33358 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
33359 return SDValue();
33361 MVT VT = N->getSimpleValueType(0);
33362 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
33363 if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
33364 return SDValue();
33366 // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
33367 SDValue Op0 = N->getOperand(0);
33368 SDValue Op1 = N->getOperand(1);
33369 SDValue FMAdd = Op0, FMSub = Op1;
33370 if (FMSub.getOpcode() != X86ISD::FMSUB)
33371 std::swap(FMAdd, FMSub);
33373 if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
33374 FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
33375 FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
33376 FMAdd.getOperand(2) != FMSub.getOperand(2))
33377 return SDValue();
33379 // Check for correct shuffle mask.
33380 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
33381 bool Op0Even;
33382 if (!isAddSubOrSubAddMask(Mask, Op0Even))
33383 return SDValue();
33385 // FMAddSub takes zeroth operand from FMSub node.
33386 SDLoc DL(N);
33387 bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
33388 unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
33389 return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
33390 FMAdd.getOperand(2));
33393 /// Try to combine a shuffle into a target-specific add-sub or
33394 /// mul-add-sub node.
33395 static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
33396 const X86Subtarget &Subtarget,
33397 SelectionDAG &DAG) {
33398 if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
33399 return V;
33401 SDValue Opnd0, Opnd1;
33402 bool IsSubAdd;
33403 if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
33404 return SDValue();
33406 MVT VT = N->getSimpleValueType(0);
33407 SDLoc DL(N);
33409 // Try to generate X86ISD::FMADDSUB node here.
33410 SDValue Opnd2;
33411 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
33412 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
33413 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
33416 if (IsSubAdd)
33417 return SDValue();
33419 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
33420 // the ADDSUB idiom has been successfully recognized. There are no known
33421 // X86 targets with 512-bit ADDSUB instructions!
33422 if (VT.is512BitVector())
33423 return SDValue();
33425 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
33428 // We are looking for a shuffle where both sources are concatenated with undef
33429 // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
33430 // if we can express this as a single-source shuffle, that's preferable.
33431 static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
33432 const X86Subtarget &Subtarget) {
33433 if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
33434 return SDValue();
33436 EVT VT = N->getValueType(0);
33438 // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
33439 if (!VT.is128BitVector() && !VT.is256BitVector())
33440 return SDValue();
33442 if (VT.getVectorElementType() != MVT::i32 &&
33443 VT.getVectorElementType() != MVT::i64 &&
33444 VT.getVectorElementType() != MVT::f32 &&
33445 VT.getVectorElementType() != MVT::f64)
33446 return SDValue();
33448 SDValue N0 = N->getOperand(0);
33449 SDValue N1 = N->getOperand(1);
33451 // Check that both sources are concats with undef.
33452 if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
33453 N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
33454 N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
33455 !N1.getOperand(1).isUndef())
33456 return SDValue();
33458 // Construct the new shuffle mask. Elements from the first source retain their
33459 // index, but elements from the second source no longer need to skip an undef.
33460 SmallVector<int, 8> Mask;
33461 int NumElts = VT.getVectorNumElements();
33463 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
33464 for (int Elt : SVOp->getMask())
33465 Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
33467 SDLoc DL(N);
33468 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
33469 N1.getOperand(0));
33470 return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
33473 /// Eliminate a redundant shuffle of a horizontal math op.
33474 static SDValue foldShuffleOfHorizOp(SDNode *N) {
33475 unsigned Opcode = N->getOpcode();
33476 if (Opcode != X86ISD::MOVDDUP && Opcode != X86ISD::VBROADCAST)
33477 if (Opcode != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
33478 return SDValue();
33480 // For a broadcast, peek through an extract element of index 0 to find the
33481 // horizontal op: broadcast (ext_vec_elt HOp, 0)
33482 EVT VT = N->getValueType(0);
33483 if (Opcode == X86ISD::VBROADCAST) {
33484 SDValue SrcOp = N->getOperand(0);
33485 if (SrcOp.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
33486 SrcOp.getValueType() == MVT::f64 &&
33487 SrcOp.getOperand(0).getValueType() == VT &&
33488 isNullConstant(SrcOp.getOperand(1)))
33489 N = SrcOp.getNode();
33492 SDValue HOp = N->getOperand(0);
33493 if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD &&
33494 HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
33495 return SDValue();
33497 // 128-bit horizontal math instructions are defined to operate on adjacent
33498 // lanes of each operand as:
33499 // v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3]
33500 // ...similarly for v2f64 and v8i16.
33501 if (!HOp.getOperand(0).isUndef() && !HOp.getOperand(1).isUndef() &&
33502 HOp.getOperand(0) != HOp.getOperand(1))
33503 return SDValue();
33505 // When the operands of a horizontal math op are identical, the low half of
33506 // the result is the same as the high half. If a target shuffle is also
33507 // replicating low and high halves, we don't need the shuffle.
33508 if (Opcode == X86ISD::MOVDDUP || Opcode == X86ISD::VBROADCAST) {
33509 if (HOp.getScalarValueSizeInBits() == 64) {
33510 // movddup (hadd X, X) --> hadd X, X
33511 // broadcast (extract_vec_elt (hadd X, X), 0) --> hadd X, X
33512 assert((HOp.getValueType() == MVT::v2f64 ||
33513 HOp.getValueType() == MVT::v4f64) && HOp.getValueType() == VT &&
33514 "Unexpected type for h-op");
33515 return HOp;
33517 return SDValue();
33520 // shuffle (hadd X, X), undef, [low half...high half] --> hadd X, X
33521 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
33522 // TODO: Other mask possibilities like {1,1} and {1,0} could be added here,
33523 // but this should be tied to whatever horizontal op matching and shuffle
33524 // canonicalization are producing.
33525 if (HOp.getValueSizeInBits() == 128 &&
33526 (isTargetShuffleEquivalent(Mask, {0, 0}) ||
33527 isTargetShuffleEquivalent(Mask, {0, 1, 0, 1}) ||
33528 isTargetShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3})))
33529 return HOp;
33531 if (HOp.getValueSizeInBits() == 256 &&
33532 (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2}) ||
33533 isTargetShuffleEquivalent(Mask, {0, 1, 0, 1, 4, 5, 4, 5}) ||
33534 isTargetShuffleEquivalent(
33535 Mask, {0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11})))
33536 return HOp;
33538 return SDValue();
33541 /// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
33542 /// low half of each source vector and does not set any high half elements in
33543 /// the destination vector, narrow the shuffle to half its original size.
33544 static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
33545 if (!Shuf->getValueType(0).isSimple())
33546 return SDValue();
33547 MVT VT = Shuf->getSimpleValueType(0);
33548 if (!VT.is256BitVector() && !VT.is512BitVector())
33549 return SDValue();
33551 // See if we can ignore all of the high elements of the shuffle.
33552 ArrayRef<int> Mask = Shuf->getMask();
33553 if (!isUndefUpperHalf(Mask))
33554 return SDValue();
33556 // Check if the shuffle mask accesses only the low half of each input vector
33557 // (half-index output is 0 or 2).
33558 int HalfIdx1, HalfIdx2;
33559 SmallVector<int, 8> HalfMask(Mask.size() / 2);
33560 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
33561 (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
33562 return SDValue();
33564 // Create a half-width shuffle to replace the unnecessarily wide shuffle.
33565 // The trick is knowing that all of the insert/extract are actually free
33566 // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
33567 // of narrow inputs into a narrow output, and that is always cheaper than
33568 // the wide shuffle that we started with.
33569 return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
33570 Shuf->getOperand(1), HalfMask, HalfIdx1,
33571 HalfIdx2, false, DAG);
33574 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
33575 TargetLowering::DAGCombinerInfo &DCI,
33576 const X86Subtarget &Subtarget) {
33577 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
33578 if (SDValue V = narrowShuffle(Shuf, DAG))
33579 return V;
33581 // If we have legalized the vector types, look for blends of FADD and FSUB
33582 // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
33583 SDLoc dl(N);
33584 EVT VT = N->getValueType(0);
33585 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
33586 if (TLI.isTypeLegal(VT)) {
33587 if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
33588 return AddSub;
33590 if (SDValue HAddSub = foldShuffleOfHorizOp(N))
33591 return HAddSub;
33594 // During Type Legalization, when promoting illegal vector types,
33595 // the backend might introduce new shuffle dag nodes and bitcasts.
33597 // This code performs the following transformation:
33598 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
33599 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
33601 // We do this only if both the bitcast and the BINOP dag nodes have
33602 // one use. Also, perform this transformation only if the new binary
33603 // operation is legal. This is to avoid introducing dag nodes that
33604 // potentially need to be further expanded (or custom lowered) into a
33605 // less optimal sequence of dag nodes.
33606 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
33607 N->getOpcode() == ISD::VECTOR_SHUFFLE &&
33608 N->getOperand(0).getOpcode() == ISD::BITCAST &&
33609 N->getOperand(1).isUndef() && N->getOperand(0).hasOneUse()) {
33610 SDValue N0 = N->getOperand(0);
33611 SDValue N1 = N->getOperand(1);
33613 SDValue BC0 = N0.getOperand(0);
33614 EVT SVT = BC0.getValueType();
33615 unsigned Opcode = BC0.getOpcode();
33616 unsigned NumElts = VT.getVectorNumElements();
33618 if (BC0.hasOneUse() && SVT.isVector() &&
33619 SVT.getVectorNumElements() * 2 == NumElts &&
33620 TLI.isOperationLegal(Opcode, VT)) {
33621 bool CanFold = false;
33622 switch (Opcode) {
33623 default : break;
33624 case ISD::ADD:
33625 case ISD::SUB:
33626 case ISD::MUL:
33627 // isOperationLegal lies for integer ops on floating point types.
33628 CanFold = VT.isInteger();
33629 break;
33630 case ISD::FADD:
33631 case ISD::FSUB:
33632 case ISD::FMUL:
33633 // isOperationLegal lies for floating point ops on integer types.
33634 CanFold = VT.isFloatingPoint();
33635 break;
33638 unsigned SVTNumElts = SVT.getVectorNumElements();
33639 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
33640 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
33641 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
33642 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
33643 CanFold = SVOp->getMaskElt(i) < 0;
33645 if (CanFold) {
33646 SDValue BC00 = DAG.getBitcast(VT, BC0.getOperand(0));
33647 SDValue BC01 = DAG.getBitcast(VT, BC0.getOperand(1));
33648 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
33649 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, SVOp->getMask());
33654 // Attempt to combine into a vector load/broadcast.
33655 if (SDValue LD = combineToConsecutiveLoads(VT, N, dl, DAG, Subtarget, true))
33656 return LD;
33658 // For AVX2, we sometimes want to combine
33659 // (vector_shuffle <mask> (concat_vectors t1, undef)
33660 // (concat_vectors t2, undef))
33661 // Into:
33662 // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
33663 // Since the latter can be efficiently lowered with VPERMD/VPERMQ
33664 if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
33665 return ShufConcat;
33667 if (isTargetShuffle(N->getOpcode())) {
33668 SDValue Op(N, 0);
33669 if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
33670 return Shuffle;
33672 // Try recursively combining arbitrary sequences of x86 shuffle
33673 // instructions into higher-order shuffles. We do this after combining
33674 // specific PSHUF instruction sequences into their minimal form so that we
33675 // can evaluate how many specialized shuffle instructions are involved in
33676 // a particular chain.
33677 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
33678 return Res;
33680 // Simplify source operands based on shuffle mask.
33681 // TODO - merge this into combineX86ShufflesRecursively.
33682 APInt KnownUndef, KnownZero;
33683 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
33684 if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, DCI))
33685 return SDValue(N, 0);
33688 // Look for a v2i64/v2f64 VZEXT_MOVL of a node that already produces zeros
33689 // in the upper 64 bits.
33690 // TODO: Can we generalize this using computeKnownBits.
33691 if (N->getOpcode() == X86ISD::VZEXT_MOVL &&
33692 (VT == MVT::v2f64 || VT == MVT::v2i64) &&
33693 N->getOperand(0).getOpcode() == ISD::BITCAST &&
33694 (N->getOperand(0).getOperand(0).getValueType() == MVT::v4f32 ||
33695 N->getOperand(0).getOperand(0).getValueType() == MVT::v4i32)) {
33696 SDValue In = N->getOperand(0).getOperand(0);
33697 switch (In.getOpcode()) {
33698 default:
33699 break;
33700 case X86ISD::CVTP2SI: case X86ISD::CVTP2UI:
33701 case X86ISD::MCVTP2SI: case X86ISD::MCVTP2UI:
33702 case X86ISD::CVTTP2SI: case X86ISD::CVTTP2UI:
33703 case X86ISD::MCVTTP2SI: case X86ISD::MCVTTP2UI:
33704 case X86ISD::CVTSI2P: case X86ISD::CVTUI2P:
33705 case X86ISD::MCVTSI2P: case X86ISD::MCVTUI2P:
33706 case X86ISD::VFPROUND: case X86ISD::VMFPROUND:
33707 if (In.getOperand(0).getValueType() == MVT::v2f64 ||
33708 In.getOperand(0).getValueType() == MVT::v2i64)
33709 return N->getOperand(0); // return the bitcast
33710 break;
33714 // Pull subvector inserts into undef through VZEXT_MOVL by making it an
33715 // insert into a zero vector. This helps get VZEXT_MOVL closer to
33716 // scalar_to_vectors where 256/512 are canonicalized to an insert and a
33717 // 128-bit scalar_to_vector. This reduces the number of isel patterns.
33718 if (N->getOpcode() == X86ISD::VZEXT_MOVL && !DCI.isBeforeLegalizeOps() &&
33719 N->getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR &&
33720 N->getOperand(0).hasOneUse() &&
33721 N->getOperand(0).getOperand(0).isUndef() &&
33722 isNullConstant(N->getOperand(0).getOperand(2))) {
33723 SDValue In = N->getOperand(0).getOperand(1);
33724 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, dl, In.getValueType(), In);
33725 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT,
33726 getZeroVector(VT.getSimpleVT(), Subtarget, DAG, dl),
33727 Movl, N->getOperand(0).getOperand(2));
33730 // If this a vzmovl of a full vector load, replace it with a vzload, unless
33731 // the load is volatile.
33732 if (N->getOpcode() == X86ISD::VZEXT_MOVL && N->getOperand(0).hasOneUse() &&
33733 ISD::isNormalLoad(N->getOperand(0).getNode())) {
33734 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
33735 if (!LN->isVolatile()) {
33736 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
33737 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
33738 SDValue VZLoad =
33739 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
33740 VT.getVectorElementType(),
33741 LN->getPointerInfo(),
33742 LN->getAlignment(),
33743 MachineMemOperand::MOLoad);
33744 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
33745 return VZLoad;
33750 // Look for a truncating shuffle to v2i32 of a PMULUDQ where one of the
33751 // operands is an extend from v2i32 to v2i64. Turn it into a pmulld.
33752 // FIXME: This can probably go away once we default to widening legalization.
33753 if (Subtarget.hasSSE41() && VT == MVT::v4i32 &&
33754 N->getOpcode() == ISD::VECTOR_SHUFFLE &&
33755 N->getOperand(0).getOpcode() == ISD::BITCAST &&
33756 N->getOperand(0).getOperand(0).getOpcode() == X86ISD::PMULUDQ) {
33757 SDValue BC = N->getOperand(0);
33758 SDValue MULUDQ = BC.getOperand(0);
33759 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
33760 ArrayRef<int> Mask = SVOp->getMask();
33761 if (BC.hasOneUse() && MULUDQ.hasOneUse() &&
33762 Mask[0] == 0 && Mask[1] == 2 && Mask[2] == -1 && Mask[3] == -1) {
33763 SDValue Op0 = MULUDQ.getOperand(0);
33764 SDValue Op1 = MULUDQ.getOperand(1);
33765 if (Op0.getOpcode() == ISD::BITCAST &&
33766 Op0.getOperand(0).getOpcode() == ISD::VECTOR_SHUFFLE &&
33767 Op0.getOperand(0).getValueType() == MVT::v4i32) {
33768 ShuffleVectorSDNode *SVOp0 =
33769 cast<ShuffleVectorSDNode>(Op0.getOperand(0));
33770 ArrayRef<int> Mask2 = SVOp0->getMask();
33771 if (Mask2[0] == 0 && Mask2[1] == -1 &&
33772 Mask2[2] == 1 && Mask2[3] == -1) {
33773 Op0 = SVOp0->getOperand(0);
33774 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
33775 Op1 = DAG.getVectorShuffle(MVT::v4i32, dl, Op1, Op1, Mask);
33776 return DAG.getNode(ISD::MUL, dl, MVT::v4i32, Op0, Op1);
33779 if (Op1.getOpcode() == ISD::BITCAST &&
33780 Op1.getOperand(0).getOpcode() == ISD::VECTOR_SHUFFLE &&
33781 Op1.getOperand(0).getValueType() == MVT::v4i32) {
33782 ShuffleVectorSDNode *SVOp1 =
33783 cast<ShuffleVectorSDNode>(Op1.getOperand(0));
33784 ArrayRef<int> Mask2 = SVOp1->getMask();
33785 if (Mask2[0] == 0 && Mask2[1] == -1 &&
33786 Mask2[2] == 1 && Mask2[3] == -1) {
33787 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
33788 Op0 = DAG.getVectorShuffle(MVT::v4i32, dl, Op0, Op0, Mask);
33789 Op1 = SVOp1->getOperand(0);
33790 return DAG.getNode(ISD::MUL, dl, MVT::v4i32, Op0, Op1);
33796 return SDValue();
33799 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
33800 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
33801 TargetLoweringOpt &TLO, unsigned Depth) const {
33802 int NumElts = DemandedElts.getBitWidth();
33803 unsigned Opc = Op.getOpcode();
33804 EVT VT = Op.getValueType();
33806 // Handle special case opcodes.
33807 switch (Opc) {
33808 case X86ISD::PMULDQ:
33809 case X86ISD::PMULUDQ: {
33810 APInt LHSUndef, LHSZero;
33811 APInt RHSUndef, RHSZero;
33812 SDValue LHS = Op.getOperand(0);
33813 SDValue RHS = Op.getOperand(1);
33814 if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
33815 Depth + 1))
33816 return true;
33817 if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
33818 Depth + 1))
33819 return true;
33820 // Multiply by zero.
33821 KnownZero = LHSZero | RHSZero;
33822 break;
33824 case X86ISD::VSHL:
33825 case X86ISD::VSRL:
33826 case X86ISD::VSRA: {
33827 // We only need the bottom 64-bits of the (128-bit) shift amount.
33828 SDValue Amt = Op.getOperand(1);
33829 MVT AmtVT = Amt.getSimpleValueType();
33830 assert(AmtVT.is128BitVector() && "Unexpected value type");
33832 // If we reuse the shift amount just for sse shift amounts then we know that
33833 // only the bottom 64-bits are only ever used.
33834 bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
33835 unsigned Opc = Use->getOpcode();
33836 return (Opc == X86ISD::VSHL || Opc == X86ISD::VSRL ||
33837 Opc == X86ISD::VSRA) &&
33838 Use->getOperand(0) != Amt;
33841 APInt AmtUndef, AmtZero;
33842 unsigned NumAmtElts = AmtVT.getVectorNumElements();
33843 APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
33844 if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
33845 Depth + 1, AssumeSingleUse))
33846 return true;
33847 LLVM_FALLTHROUGH;
33849 case X86ISD::VSHLI:
33850 case X86ISD::VSRLI:
33851 case X86ISD::VSRAI: {
33852 SDValue Src = Op.getOperand(0);
33853 APInt SrcUndef;
33854 if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
33855 Depth + 1))
33856 return true;
33857 // TODO convert SrcUndef to KnownUndef.
33858 break;
33860 case X86ISD::CVTSI2P:
33861 case X86ISD::CVTUI2P: {
33862 SDValue Src = Op.getOperand(0);
33863 MVT SrcVT = Src.getSimpleValueType();
33864 APInt SrcUndef, SrcZero;
33865 APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
33866 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
33867 Depth + 1))
33868 return true;
33869 break;
33871 case X86ISD::PACKSS:
33872 case X86ISD::PACKUS: {
33873 APInt DemandedLHS, DemandedRHS;
33874 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
33876 APInt SrcUndef, SrcZero;
33877 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, SrcUndef,
33878 SrcZero, TLO, Depth + 1))
33879 return true;
33880 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, SrcUndef,
33881 SrcZero, TLO, Depth + 1))
33882 return true;
33883 break;
33885 case X86ISD::HADD:
33886 case X86ISD::HSUB:
33887 case X86ISD::FHADD:
33888 case X86ISD::FHSUB: {
33889 APInt DemandedLHS, DemandedRHS;
33890 getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
33892 APInt LHSUndef, LHSZero;
33893 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, LHSUndef,
33894 LHSZero, TLO, Depth + 1))
33895 return true;
33896 APInt RHSUndef, RHSZero;
33897 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, RHSUndef,
33898 RHSZero, TLO, Depth + 1))
33899 return true;
33900 break;
33902 case X86ISD::VTRUNC:
33903 case X86ISD::VTRUNCS:
33904 case X86ISD::VTRUNCUS: {
33905 SDValue Src = Op.getOperand(0);
33906 MVT SrcVT = Src.getSimpleValueType();
33907 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
33908 APInt SrcUndef, SrcZero;
33909 if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
33910 Depth + 1))
33911 return true;
33912 KnownZero = SrcZero.zextOrTrunc(NumElts);
33913 KnownUndef = SrcUndef.zextOrTrunc(NumElts);
33914 break;
33916 case X86ISD::BLENDV: {
33917 APInt SelUndef, SelZero;
33918 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
33919 SelZero, TLO, Depth + 1))
33920 return true;
33922 // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
33923 APInt LHSUndef, LHSZero;
33924 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
33925 LHSZero, TLO, Depth + 1))
33926 return true;
33928 APInt RHSUndef, RHSZero;
33929 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
33930 RHSZero, TLO, Depth + 1))
33931 return true;
33933 KnownZero = LHSZero & RHSZero;
33934 KnownUndef = LHSUndef & RHSUndef;
33935 break;
33937 case X86ISD::VBROADCAST: {
33938 SDValue Src = Op.getOperand(0);
33939 MVT SrcVT = Src.getSimpleValueType();
33940 if (!SrcVT.isVector())
33941 return false;
33942 // Don't bother broadcasting if we just need the 0'th element.
33943 if (DemandedElts == 1) {
33944 if (Src.getValueType() != VT)
33945 Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
33946 SDLoc(Op));
33947 return TLO.CombineTo(Op, Src);
33949 APInt SrcUndef, SrcZero;
33950 APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
33951 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
33952 Depth + 1))
33953 return true;
33954 break;
33956 case X86ISD::SUBV_BROADCAST: {
33957 // Reduce size of broadcast if we don't need the upper half.
33958 unsigned HalfElts = NumElts / 2;
33959 if (DemandedElts.extractBits(HalfElts, HalfElts).isNullValue()) {
33960 SDValue Src = Op.getOperand(0);
33961 MVT SrcVT = Src.getSimpleValueType();
33963 SDValue Half = Src;
33964 if (SrcVT.getVectorNumElements() != HalfElts) {
33965 MVT HalfVT = MVT::getVectorVT(SrcVT.getScalarType(), HalfElts);
33966 Half = TLO.DAG.getNode(X86ISD::SUBV_BROADCAST, SDLoc(Op), HalfVT, Src);
33969 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Half, 0,
33970 TLO.DAG, SDLoc(Op),
33971 Half.getValueSizeInBits()));
33973 break;
33975 case X86ISD::VPERMV: {
33976 SDValue Mask = Op.getOperand(0);
33977 APInt MaskUndef, MaskZero;
33978 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
33979 Depth + 1))
33980 return true;
33981 break;
33983 case X86ISD::PSHUFB:
33984 case X86ISD::VPERMV3:
33985 case X86ISD::VPERMILPV: {
33986 SDValue Mask = Op.getOperand(1);
33987 APInt MaskUndef, MaskZero;
33988 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
33989 Depth + 1))
33990 return true;
33991 break;
33993 case X86ISD::VPPERM:
33994 case X86ISD::VPERMIL2: {
33995 SDValue Mask = Op.getOperand(2);
33996 APInt MaskUndef, MaskZero;
33997 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
33998 Depth + 1))
33999 return true;
34000 break;
34004 // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
34005 // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
34006 // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
34007 if ((VT.is256BitVector() || VT.is512BitVector()) &&
34008 DemandedElts.lshr(NumElts / 2) == 0) {
34009 unsigned SizeInBits = VT.getSizeInBits();
34010 unsigned ExtSizeInBits = SizeInBits / 2;
34012 // See if 512-bit ops only use the bottom 128-bits.
34013 if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
34014 ExtSizeInBits = SizeInBits / 4;
34016 switch (Opc) {
34017 // Zero upper elements.
34018 case X86ISD::VZEXT_MOVL: {
34019 SDLoc DL(Op);
34020 SDValue Ext0 =
34021 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
34022 SDValue ExtOp =
34023 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0);
34024 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34025 SDValue Insert =
34026 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
34027 return TLO.CombineTo(Op, Insert);
34029 // Byte shifts by immediate.
34030 case X86ISD::VSHLDQ:
34031 case X86ISD::VSRLDQ:
34032 // Shift by uniform.
34033 case X86ISD::VSHL:
34034 case X86ISD::VSRL:
34035 case X86ISD::VSRA:
34036 // Shift by immediate.
34037 case X86ISD::VSHLI:
34038 case X86ISD::VSRLI:
34039 case X86ISD::VSRAI: {
34040 SDLoc DL(Op);
34041 SDValue Ext0 =
34042 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
34043 SDValue ExtOp =
34044 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
34045 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34046 SDValue Insert =
34047 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
34048 return TLO.CombineTo(Op, Insert);
34050 case X86ISD::VPERMI: {
34051 // Simplify PERMPD/PERMQ to extract_subvector.
34052 // TODO: This should be done in shuffle combining.
34053 if (VT == MVT::v4f64 || VT == MVT::v4i64) {
34054 SmallVector<int, 4> Mask;
34055 DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
34056 if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
34057 SDLoc DL(Op);
34058 SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
34059 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34060 SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
34061 return TLO.CombineTo(Op, Insert);
34064 break;
34066 // Target Shuffles.
34067 case X86ISD::PSHUFB:
34068 case X86ISD::UNPCKL:
34069 case X86ISD::UNPCKH:
34070 // Saturated Packs.
34071 case X86ISD::PACKSS:
34072 case X86ISD::PACKUS:
34073 // Horizontal Ops.
34074 case X86ISD::HADD:
34075 case X86ISD::HSUB:
34076 case X86ISD::FHADD:
34077 case X86ISD::FHSUB: {
34078 SDLoc DL(Op);
34079 MVT ExtVT = VT.getSimpleVT();
34080 ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
34081 ExtSizeInBits / ExtVT.getScalarSizeInBits());
34082 SDValue Ext0 =
34083 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
34084 SDValue Ext1 =
34085 extractSubVector(Op.getOperand(1), 0, TLO.DAG, DL, ExtSizeInBits);
34086 SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ext0, Ext1);
34087 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34088 SDValue Insert =
34089 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
34090 return TLO.CombineTo(Op, Insert);
34095 // Simplify target shuffles.
34096 if (!isTargetShuffle(Opc) || !VT.isSimple())
34097 return false;
34099 // Get target shuffle mask.
34100 bool IsUnary;
34101 SmallVector<int, 64> OpMask;
34102 SmallVector<SDValue, 2> OpInputs;
34103 if (!getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, OpInputs,
34104 OpMask, IsUnary))
34105 return false;
34107 // Shuffle inputs must be the same type as the result.
34108 if (llvm::any_of(OpInputs,
34109 [VT](SDValue V) { return VT != V.getValueType(); }))
34110 return false;
34112 // Clear known elts that might have been set above.
34113 KnownZero.clearAllBits();
34114 KnownUndef.clearAllBits();
34116 // Check if shuffle mask can be simplified to undef/zero/identity.
34117 int NumSrcs = OpInputs.size();
34118 for (int i = 0; i != NumElts; ++i) {
34119 int &M = OpMask[i];
34120 if (!DemandedElts[i])
34121 M = SM_SentinelUndef;
34122 else if (0 <= M && OpInputs[M / NumElts].isUndef())
34123 M = SM_SentinelUndef;
34126 if (isUndefInRange(OpMask, 0, NumElts)) {
34127 KnownUndef.setAllBits();
34128 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
34130 if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
34131 KnownZero.setAllBits();
34132 return TLO.CombineTo(
34133 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
34135 for (int Src = 0; Src != NumSrcs; ++Src)
34136 if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
34137 return TLO.CombineTo(Op, OpInputs[Src]);
34139 // Attempt to simplify inputs.
34140 for (int Src = 0; Src != NumSrcs; ++Src) {
34141 int Lo = Src * NumElts;
34142 APInt SrcElts = APInt::getNullValue(NumElts);
34143 for (int i = 0; i != NumElts; ++i)
34144 if (DemandedElts[i]) {
34145 int M = OpMask[i] - Lo;
34146 if (0 <= M && M < NumElts)
34147 SrcElts.setBit(M);
34150 APInt SrcUndef, SrcZero;
34151 if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
34152 TLO, Depth + 1))
34153 return true;
34156 // Extract known zero/undef elements.
34157 // TODO - Propagate input undef/zero elts.
34158 for (int i = 0; i != NumElts; ++i) {
34159 if (OpMask[i] == SM_SentinelUndef)
34160 KnownUndef.setBit(i);
34161 if (OpMask[i] == SM_SentinelZero)
34162 KnownZero.setBit(i);
34165 return false;
34168 bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
34169 SDValue Op, const APInt &OriginalDemandedBits,
34170 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
34171 unsigned Depth) const {
34172 EVT VT = Op.getValueType();
34173 unsigned BitWidth = OriginalDemandedBits.getBitWidth();
34174 unsigned Opc = Op.getOpcode();
34175 switch(Opc) {
34176 case X86ISD::PMULDQ:
34177 case X86ISD::PMULUDQ: {
34178 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
34179 KnownBits KnownOp;
34180 SDValue LHS = Op.getOperand(0);
34181 SDValue RHS = Op.getOperand(1);
34182 // FIXME: Can we bound this better?
34183 APInt DemandedMask = APInt::getLowBitsSet(64, 32);
34184 if (SimplifyDemandedBits(LHS, DemandedMask, OriginalDemandedElts, KnownOp,
34185 TLO, Depth + 1))
34186 return true;
34187 if (SimplifyDemandedBits(RHS, DemandedMask, OriginalDemandedElts, KnownOp,
34188 TLO, Depth + 1))
34189 return true;
34190 break;
34192 case X86ISD::VSHLI: {
34193 SDValue Op0 = Op.getOperand(0);
34194 SDValue Op1 = Op.getOperand(1);
34196 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) {
34197 if (ShiftImm->getAPIntValue().uge(BitWidth))
34198 break;
34200 unsigned ShAmt = ShiftImm->getZExtValue();
34201 APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
34203 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
34204 // single shift. We can do this if the bottom bits (which are shifted
34205 // out) are never demanded.
34206 if (Op0.getOpcode() == X86ISD::VSRLI &&
34207 OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
34208 if (auto *Shift2Imm = dyn_cast<ConstantSDNode>(Op0.getOperand(1))) {
34209 if (Shift2Imm->getAPIntValue().ult(BitWidth)) {
34210 int Diff = ShAmt - Shift2Imm->getZExtValue();
34211 if (Diff == 0)
34212 return TLO.CombineTo(Op, Op0.getOperand(0));
34214 unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
34215 SDValue NewShift = TLO.DAG.getNode(
34216 NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
34217 TLO.DAG.getConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
34218 return TLO.CombineTo(Op, NewShift);
34223 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
34224 TLO, Depth + 1))
34225 return true;
34227 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
34228 Known.Zero <<= ShAmt;
34229 Known.One <<= ShAmt;
34231 // Low bits known zero.
34232 Known.Zero.setLowBits(ShAmt);
34234 break;
34236 case X86ISD::VSRLI: {
34237 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
34238 if (ShiftImm->getAPIntValue().uge(BitWidth))
34239 break;
34241 unsigned ShAmt = ShiftImm->getZExtValue();
34242 APInt DemandedMask = OriginalDemandedBits << ShAmt;
34244 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
34245 OriginalDemandedElts, Known, TLO, Depth + 1))
34246 return true;
34248 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
34249 Known.Zero.lshrInPlace(ShAmt);
34250 Known.One.lshrInPlace(ShAmt);
34252 // High bits known zero.
34253 Known.Zero.setHighBits(ShAmt);
34255 break;
34257 case X86ISD::VSRAI: {
34258 SDValue Op0 = Op.getOperand(0);
34259 SDValue Op1 = Op.getOperand(1);
34261 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) {
34262 if (ShiftImm->getAPIntValue().uge(BitWidth))
34263 break;
34265 unsigned ShAmt = ShiftImm->getZExtValue();
34266 APInt DemandedMask = OriginalDemandedBits << ShAmt;
34268 // If we just want the sign bit then we don't need to shift it.
34269 if (OriginalDemandedBits.isSignMask())
34270 return TLO.CombineTo(Op, Op0);
34272 // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
34273 if (Op0.getOpcode() == X86ISD::VSHLI && Op1 == Op0.getOperand(1)) {
34274 SDValue Op00 = Op0.getOperand(0);
34275 unsigned NumSignBits =
34276 TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
34277 if (ShAmt < NumSignBits)
34278 return TLO.CombineTo(Op, Op00);
34281 // If any of the demanded bits are produced by the sign extension, we also
34282 // demand the input sign bit.
34283 if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
34284 DemandedMask.setSignBit();
34286 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
34287 TLO, Depth + 1))
34288 return true;
34290 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
34291 Known.Zero.lshrInPlace(ShAmt);
34292 Known.One.lshrInPlace(ShAmt);
34294 // If the input sign bit is known to be zero, or if none of the top bits
34295 // are demanded, turn this into an unsigned shift right.
34296 if (Known.Zero[BitWidth - ShAmt - 1] ||
34297 OriginalDemandedBits.countLeadingZeros() >= ShAmt)
34298 return TLO.CombineTo(
34299 Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
34301 // High bits are known one.
34302 if (Known.One[BitWidth - ShAmt - 1])
34303 Known.One.setHighBits(ShAmt);
34305 break;
34307 case X86ISD::PEXTRB:
34308 case X86ISD::PEXTRW: {
34309 SDValue Vec = Op.getOperand(0);
34310 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
34311 MVT VecVT = Vec.getSimpleValueType();
34312 unsigned NumVecElts = VecVT.getVectorNumElements();
34314 if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
34315 unsigned Idx = CIdx->getZExtValue();
34316 unsigned VecBitWidth = VecVT.getScalarSizeInBits();
34318 // If we demand no bits from the vector then we must have demanded
34319 // bits from the implict zext - simplify to zero.
34320 APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
34321 if (DemandedVecBits == 0)
34322 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
34324 APInt KnownUndef, KnownZero;
34325 APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
34326 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
34327 KnownZero, TLO, Depth + 1))
34328 return true;
34330 KnownBits KnownVec;
34331 if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
34332 KnownVec, TLO, Depth + 1))
34333 return true;
34335 Known = KnownVec.zext(BitWidth, true);
34336 return false;
34338 break;
34340 case X86ISD::PINSRB:
34341 case X86ISD::PINSRW: {
34342 SDValue Vec = Op.getOperand(0);
34343 SDValue Scl = Op.getOperand(1);
34344 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
34345 MVT VecVT = Vec.getSimpleValueType();
34347 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
34348 unsigned Idx = CIdx->getZExtValue();
34349 if (!OriginalDemandedElts[Idx])
34350 return TLO.CombineTo(Op, Vec);
34352 KnownBits KnownVec;
34353 APInt DemandedVecElts(OriginalDemandedElts);
34354 DemandedVecElts.clearBit(Idx);
34355 if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
34356 KnownVec, TLO, Depth + 1))
34357 return true;
34359 KnownBits KnownScl;
34360 unsigned NumSclBits = Scl.getScalarValueSizeInBits();
34361 APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
34362 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
34363 return true;
34365 KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
34366 Known.One = KnownVec.One & KnownScl.One;
34367 Known.Zero = KnownVec.Zero & KnownScl.Zero;
34368 return false;
34370 break;
34372 case X86ISD::PACKSS:
34373 // PACKSS saturates to MIN/MAX integer values. So if we just want the
34374 // sign bit then we can just ask for the source operands sign bit.
34375 // TODO - add known bits handling.
34376 if (OriginalDemandedBits.isSignMask()) {
34377 APInt DemandedLHS, DemandedRHS;
34378 getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
34380 KnownBits KnownLHS, KnownRHS;
34381 APInt SignMask = APInt::getSignMask(BitWidth * 2);
34382 if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
34383 KnownLHS, TLO, Depth + 1))
34384 return true;
34385 if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
34386 KnownRHS, TLO, Depth + 1))
34387 return true;
34389 // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
34390 break;
34391 case X86ISD::PCMPGT:
34392 // icmp sgt(0, R) == ashr(R, BitWidth-1).
34393 // iff we only need the sign bit then we can use R directly.
34394 if (OriginalDemandedBits.isSignMask() &&
34395 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
34396 return TLO.CombineTo(Op, Op.getOperand(1));
34397 break;
34398 case X86ISD::MOVMSK: {
34399 SDValue Src = Op.getOperand(0);
34400 MVT SrcVT = Src.getSimpleValueType();
34401 unsigned SrcBits = SrcVT.getScalarSizeInBits();
34402 unsigned NumElts = SrcVT.getVectorNumElements();
34404 // If we don't need the sign bits at all just return zero.
34405 if (OriginalDemandedBits.countTrailingZeros() >= NumElts)
34406 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
34408 // Only demand the vector elements of the sign bits we need.
34409 APInt KnownUndef, KnownZero;
34410 APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
34411 if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
34412 TLO, Depth + 1))
34413 return true;
34415 Known.Zero = KnownZero.zextOrSelf(BitWidth);
34416 Known.Zero.setHighBits(BitWidth - NumElts);
34418 // MOVMSK only uses the MSB from each vector element.
34419 KnownBits KnownSrc;
34420 if (SimplifyDemandedBits(Src, APInt::getSignMask(SrcBits), DemandedElts,
34421 KnownSrc, TLO, Depth + 1))
34422 return true;
34424 if (KnownSrc.One[SrcBits - 1])
34425 Known.One.setLowBits(NumElts);
34426 else if (KnownSrc.Zero[SrcBits - 1])
34427 Known.Zero.setLowBits(NumElts);
34428 return false;
34432 return TargetLowering::SimplifyDemandedBitsForTargetNode(
34433 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
34436 /// Check if a vector extract from a target-specific shuffle of a load can be
34437 /// folded into a single element load.
34438 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
34439 /// shuffles have been custom lowered so we need to handle those here.
34440 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
34441 TargetLowering::DAGCombinerInfo &DCI) {
34442 if (DCI.isBeforeLegalizeOps())
34443 return SDValue();
34445 SDValue InVec = N->getOperand(0);
34446 SDValue EltNo = N->getOperand(1);
34447 EVT EltVT = N->getValueType(0);
34449 if (!isa<ConstantSDNode>(EltNo))
34450 return SDValue();
34452 EVT OriginalVT = InVec.getValueType();
34454 // Peek through bitcasts, don't duplicate a load with other uses.
34455 InVec = peekThroughOneUseBitcasts(InVec);
34457 EVT CurrentVT = InVec.getValueType();
34458 if (!CurrentVT.isVector() ||
34459 CurrentVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
34460 return SDValue();
34462 if (!isTargetShuffle(InVec.getOpcode()))
34463 return SDValue();
34465 // Don't duplicate a load with other uses.
34466 if (!InVec.hasOneUse())
34467 return SDValue();
34469 SmallVector<int, 16> ShuffleMask;
34470 SmallVector<SDValue, 2> ShuffleOps;
34471 bool UnaryShuffle;
34472 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(), true,
34473 ShuffleOps, ShuffleMask, UnaryShuffle))
34474 return SDValue();
34476 // Select the input vector, guarding against out of range extract vector.
34477 unsigned NumElems = CurrentVT.getVectorNumElements();
34478 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
34479 int Idx = (Elt > (int)NumElems) ? SM_SentinelUndef : ShuffleMask[Elt];
34481 if (Idx == SM_SentinelZero)
34482 return EltVT.isInteger() ? DAG.getConstant(0, SDLoc(N), EltVT)
34483 : DAG.getConstantFP(+0.0, SDLoc(N), EltVT);
34484 if (Idx == SM_SentinelUndef)
34485 return DAG.getUNDEF(EltVT);
34487 // Bail if any mask element is SM_SentinelZero - getVectorShuffle below
34488 // won't handle it.
34489 if (llvm::any_of(ShuffleMask, [](int M) { return M == SM_SentinelZero; }))
34490 return SDValue();
34492 assert(0 <= Idx && Idx < (int)(2 * NumElems) && "Shuffle index out of range");
34493 SDValue LdNode = (Idx < (int)NumElems) ? ShuffleOps[0] : ShuffleOps[1];
34495 // If inputs to shuffle are the same for both ops, then allow 2 uses
34496 unsigned AllowedUses =
34497 (ShuffleOps.size() > 1 && ShuffleOps[0] == ShuffleOps[1]) ? 2 : 1;
34499 if (LdNode.getOpcode() == ISD::BITCAST) {
34500 // Don't duplicate a load with other uses.
34501 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
34502 return SDValue();
34504 AllowedUses = 1; // only allow 1 load use if we have a bitcast
34505 LdNode = LdNode.getOperand(0);
34508 if (!ISD::isNormalLoad(LdNode.getNode()))
34509 return SDValue();
34511 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
34513 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
34514 return SDValue();
34516 // If there's a bitcast before the shuffle, check if the load type and
34517 // alignment is valid.
34518 unsigned Align = LN0->getAlignment();
34519 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
34520 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
34521 EltVT.getTypeForEVT(*DAG.getContext()));
34523 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
34524 return SDValue();
34526 // All checks match so transform back to vector_shuffle so that DAG combiner
34527 // can finish the job
34528 SDLoc dl(N);
34530 // Create shuffle node taking into account the case that its a unary shuffle
34531 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT) : ShuffleOps[1];
34532 Shuffle = DAG.getVectorShuffle(CurrentVT, dl, ShuffleOps[0], Shuffle,
34533 ShuffleMask);
34534 Shuffle = DAG.getBitcast(OriginalVT, Shuffle);
34535 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
34536 EltNo);
34539 // Helper to peek through bitops/setcc to determine size of source vector.
34540 // Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
34541 static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size) {
34542 switch (Src.getOpcode()) {
34543 case ISD::SETCC:
34544 return Src.getOperand(0).getValueSizeInBits() == Size;
34545 case ISD::AND:
34546 case ISD::XOR:
34547 case ISD::OR:
34548 return checkBitcastSrcVectorSize(Src.getOperand(0), Size) &&
34549 checkBitcastSrcVectorSize(Src.getOperand(1), Size);
34551 return false;
34554 // Try to match patterns such as
34555 // (i16 bitcast (v16i1 x))
34556 // ->
34557 // (i16 movmsk (16i8 sext (v16i1 x)))
34558 // before the illegal vector is scalarized on subtargets that don't have legal
34559 // vxi1 types.
34560 static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
34561 const SDLoc &DL,
34562 const X86Subtarget &Subtarget) {
34563 EVT SrcVT = Src.getValueType();
34564 if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
34565 return SDValue();
34567 // If the input is a truncate from v16i8 or v32i8 go ahead and use a
34568 // movmskb even with avx512. This will be better than truncating to vXi1 and
34569 // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
34570 // vpcmpeqb/vpcmpgtb.
34571 bool IsTruncated = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
34572 (Src.getOperand(0).getValueType() == MVT::v16i8 ||
34573 Src.getOperand(0).getValueType() == MVT::v32i8 ||
34574 Src.getOperand(0).getValueType() == MVT::v64i8);
34576 // With AVX512 vxi1 types are legal and we prefer using k-regs.
34577 // MOVMSK is supported in SSE2 or later.
34578 if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !IsTruncated))
34579 return SDValue();
34581 // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
34582 // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
34583 // v8i16 and v16i16.
34584 // For these two cases, we can shuffle the upper element bytes to a
34585 // consecutive sequence at the start of the vector and treat the results as
34586 // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
34587 // for v16i16 this is not the case, because the shuffle is expensive, so we
34588 // avoid sign-extending to this type entirely.
34589 // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
34590 // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
34591 MVT SExtVT;
34592 switch (SrcVT.getSimpleVT().SimpleTy) {
34593 default:
34594 return SDValue();
34595 case MVT::v2i1:
34596 SExtVT = MVT::v2i64;
34597 break;
34598 case MVT::v4i1:
34599 SExtVT = MVT::v4i32;
34600 // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
34601 // sign-extend to a 256-bit operation to avoid truncation.
34602 if (Subtarget.hasAVX() && checkBitcastSrcVectorSize(Src, 256))
34603 SExtVT = MVT::v4i64;
34604 break;
34605 case MVT::v8i1:
34606 SExtVT = MVT::v8i16;
34607 // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
34608 // sign-extend to a 256-bit operation to match the compare.
34609 // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
34610 // 256-bit because the shuffle is cheaper than sign extending the result of
34611 // the compare.
34612 // TODO : use checkBitcastSrcVectorSize
34613 if (Src.getOpcode() == ISD::SETCC && Subtarget.hasAVX() &&
34614 (Src.getOperand(0).getValueType().is256BitVector() ||
34615 Src.getOperand(0).getValueType().is512BitVector())) {
34616 SExtVT = MVT::v8i32;
34618 break;
34619 case MVT::v16i1:
34620 SExtVT = MVT::v16i8;
34621 // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
34622 // it is not profitable to sign-extend to 256-bit because this will
34623 // require an extra cross-lane shuffle which is more expensive than
34624 // truncating the result of the compare to 128-bits.
34625 break;
34626 case MVT::v32i1:
34627 SExtVT = MVT::v32i8;
34628 break;
34629 case MVT::v64i1:
34630 // If we have AVX512F, but not AVX512BW and the input is truncated from
34631 // v64i8 checked earlier. Then split the input and make two pmovmskbs.
34632 if (Subtarget.hasAVX512() && !Subtarget.hasBWI()) {
34633 SExtVT = MVT::v64i8;
34634 break;
34636 return SDValue();
34639 SDValue V = DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
34641 if (SExtVT == MVT::v64i8) {
34642 SDValue Lo, Hi;
34643 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
34644 Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
34645 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
34646 Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
34647 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
34648 Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
34649 DAG.getConstant(32, DL, MVT::i8));
34650 V = DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
34651 } else if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8) {
34652 V = getPMOVMSKB(DL, V, DAG, Subtarget);
34653 } else {
34654 if (SExtVT == MVT::v8i16)
34655 V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
34656 DAG.getUNDEF(MVT::v8i16));
34657 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
34660 EVT IntVT =
34661 EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
34662 V = DAG.getZExtOrTrunc(V, DL, IntVT);
34663 return DAG.getBitcast(VT, V);
34666 // Convert a vXi1 constant build vector to the same width scalar integer.
34667 static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
34668 EVT SrcVT = Op.getValueType();
34669 assert(SrcVT.getVectorElementType() == MVT::i1 &&
34670 "Expected a vXi1 vector");
34671 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
34672 "Expected a constant build vector");
34674 APInt Imm(SrcVT.getVectorNumElements(), 0);
34675 for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
34676 SDValue In = Op.getOperand(Idx);
34677 if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
34678 Imm.setBit(Idx);
34680 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
34681 return DAG.getConstant(Imm, SDLoc(Op), IntVT);
34684 static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
34685 TargetLowering::DAGCombinerInfo &DCI,
34686 const X86Subtarget &Subtarget) {
34687 assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
34689 if (!DCI.isBeforeLegalizeOps())
34690 return SDValue();
34692 // Only do this if we have k-registers.
34693 if (!Subtarget.hasAVX512())
34694 return SDValue();
34696 EVT DstVT = N->getValueType(0);
34697 SDValue Op = N->getOperand(0);
34698 EVT SrcVT = Op.getValueType();
34700 if (!Op.hasOneUse())
34701 return SDValue();
34703 // Look for logic ops.
34704 if (Op.getOpcode() != ISD::AND &&
34705 Op.getOpcode() != ISD::OR &&
34706 Op.getOpcode() != ISD::XOR)
34707 return SDValue();
34709 // Make sure we have a bitcast between mask registers and a scalar type.
34710 if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
34711 DstVT.isScalarInteger()) &&
34712 !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
34713 SrcVT.isScalarInteger()))
34714 return SDValue();
34716 SDValue LHS = Op.getOperand(0);
34717 SDValue RHS = Op.getOperand(1);
34719 if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
34720 LHS.getOperand(0).getValueType() == DstVT)
34721 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
34722 DAG.getBitcast(DstVT, RHS));
34724 if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
34725 RHS.getOperand(0).getValueType() == DstVT)
34726 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
34727 DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
34729 // If the RHS is a vXi1 build vector, this is a good reason to flip too.
34730 // Most of these have to move a constant from the scalar domain anyway.
34731 if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
34732 RHS = combinevXi1ConstantToInteger(RHS, DAG);
34733 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
34734 DAG.getBitcast(DstVT, LHS), RHS);
34737 return SDValue();
34740 static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
34741 const X86Subtarget &Subtarget) {
34742 SDLoc DL(BV);
34743 unsigned NumElts = BV->getNumOperands();
34744 SDValue Splat = BV->getSplatValue();
34746 // Build MMX element from integer GPR or SSE float values.
34747 auto CreateMMXElement = [&](SDValue V) {
34748 if (V.isUndef())
34749 return DAG.getUNDEF(MVT::x86mmx);
34750 if (V.getValueType().isFloatingPoint()) {
34751 if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
34752 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
34753 V = DAG.getBitcast(MVT::v2i64, V);
34754 return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
34756 V = DAG.getBitcast(MVT::i32, V);
34757 } else {
34758 V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
34760 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
34763 // Convert build vector ops to MMX data in the bottom elements.
34764 SmallVector<SDValue, 8> Ops;
34766 // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
34767 if (Splat) {
34768 if (Splat.isUndef())
34769 return DAG.getUNDEF(MVT::x86mmx);
34771 Splat = CreateMMXElement(Splat);
34773 if (Subtarget.hasSSE1()) {
34774 // Unpack v8i8 to splat i8 elements to lowest 16-bits.
34775 if (NumElts == 8)
34776 Splat = DAG.getNode(
34777 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
34778 DAG.getConstant(Intrinsic::x86_mmx_punpcklbw, DL, MVT::i32), Splat,
34779 Splat);
34781 // Use PSHUFW to repeat 16-bit elements.
34782 unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
34783 return DAG.getNode(
34784 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
34785 DAG.getConstant(Intrinsic::x86_sse_pshuf_w, DL, MVT::i32), Splat,
34786 DAG.getConstant(ShufMask, DL, MVT::i8));
34788 Ops.append(NumElts, Splat);
34789 } else {
34790 for (unsigned i = 0; i != NumElts; ++i)
34791 Ops.push_back(CreateMMXElement(BV->getOperand(i)));
34794 // Use tree of PUNPCKLs to build up general MMX vector.
34795 while (Ops.size() > 1) {
34796 unsigned NumOps = Ops.size();
34797 unsigned IntrinOp =
34798 (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
34799 : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
34800 : Intrinsic::x86_mmx_punpcklbw));
34801 SDValue Intrin = DAG.getConstant(IntrinOp, DL, MVT::i32);
34802 for (unsigned i = 0; i != NumOps; i += 2)
34803 Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
34804 Ops[i], Ops[i + 1]);
34805 Ops.resize(NumOps / 2);
34808 return Ops[0];
34811 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
34812 TargetLowering::DAGCombinerInfo &DCI,
34813 const X86Subtarget &Subtarget) {
34814 SDValue N0 = N->getOperand(0);
34815 EVT VT = N->getValueType(0);
34816 EVT SrcVT = N0.getValueType();
34818 // Try to match patterns such as
34819 // (i16 bitcast (v16i1 x))
34820 // ->
34821 // (i16 movmsk (16i8 sext (v16i1 x)))
34822 // before the setcc result is scalarized on subtargets that don't have legal
34823 // vxi1 types.
34824 if (DCI.isBeforeLegalize()) {
34825 SDLoc dl(N);
34826 if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
34827 return V;
34829 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
34830 // type, widen both sides to avoid a trip through memory.
34831 if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
34832 Subtarget.hasAVX512()) {
34833 N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
34834 N0 = DAG.getBitcast(MVT::v8i1, N0);
34835 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
34836 DAG.getIntPtrConstant(0, dl));
34839 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
34840 // type, widen both sides to avoid a trip through memory.
34841 if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
34842 Subtarget.hasAVX512()) {
34843 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
34844 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
34845 Ops[0] = N0;
34846 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
34847 N0 = DAG.getBitcast(MVT::i8, N0);
34848 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
34852 // Since MMX types are special and don't usually play with other vector types,
34853 // it's better to handle them early to be sure we emit efficient code by
34854 // avoiding store-load conversions.
34855 if (VT == MVT::x86mmx) {
34856 // Detect MMX constant vectors.
34857 APInt UndefElts;
34858 SmallVector<APInt, 1> EltBits;
34859 if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
34860 SDLoc DL(N0);
34861 // Handle zero-extension of i32 with MOVD.
34862 if (EltBits[0].countLeadingZeros() >= 32)
34863 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
34864 DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
34865 // Else, bitcast to a double.
34866 // TODO - investigate supporting sext 32-bit immediates on x86_64.
34867 APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
34868 return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
34871 // Detect bitcasts to x86mmx low word.
34872 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
34873 (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
34874 N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
34875 bool LowUndef = true, AllUndefOrZero = true;
34876 for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
34877 SDValue Op = N0.getOperand(i);
34878 LowUndef &= Op.isUndef() || (i >= e/2);
34879 AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
34881 if (AllUndefOrZero) {
34882 SDValue N00 = N0.getOperand(0);
34883 SDLoc dl(N00);
34884 N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
34885 : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
34886 return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
34890 // Detect bitcasts of 64-bit build vectors and convert to a
34891 // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
34892 // lowest element.
34893 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
34894 (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
34895 SrcVT == MVT::v8i8))
34896 return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
34898 // Detect bitcasts between element or subvector extraction to x86mmx.
34899 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
34900 N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
34901 isNullConstant(N0.getOperand(1))) {
34902 SDValue N00 = N0.getOperand(0);
34903 if (N00.getValueType().is128BitVector())
34904 return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
34905 DAG.getBitcast(MVT::v2i64, N00));
34908 // Detect bitcasts from FP_TO_SINT to x86mmx.
34909 if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
34910 SDLoc DL(N0);
34911 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
34912 DAG.getUNDEF(MVT::v2i32));
34913 return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
34914 DAG.getBitcast(MVT::v2i64, Res));
34918 // Try to remove a bitcast of constant vXi1 vector. We have to legalize
34919 // most of these to scalar anyway.
34920 if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
34921 SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
34922 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
34923 return combinevXi1ConstantToInteger(N0, DAG);
34926 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
34927 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
34928 isa<ConstantSDNode>(N0)) {
34929 auto *C = cast<ConstantSDNode>(N0);
34930 if (C->isAllOnesValue())
34931 return DAG.getConstant(1, SDLoc(N0), VT);
34932 if (C->isNullValue())
34933 return DAG.getConstant(0, SDLoc(N0), VT);
34936 // Try to remove bitcasts from input and output of mask arithmetic to
34937 // remove GPR<->K-register crossings.
34938 if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
34939 return V;
34941 // Convert a bitcasted integer logic operation that has one bitcasted
34942 // floating-point operand into a floating-point logic operation. This may
34943 // create a load of a constant, but that is cheaper than materializing the
34944 // constant in an integer register and transferring it to an SSE register or
34945 // transferring the SSE operand to integer register and back.
34946 unsigned FPOpcode;
34947 switch (N0.getOpcode()) {
34948 case ISD::AND: FPOpcode = X86ISD::FAND; break;
34949 case ISD::OR: FPOpcode = X86ISD::FOR; break;
34950 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
34951 default: return SDValue();
34954 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
34955 (Subtarget.hasSSE2() && VT == MVT::f64)))
34956 return SDValue();
34958 SDValue LogicOp0 = N0.getOperand(0);
34959 SDValue LogicOp1 = N0.getOperand(1);
34960 SDLoc DL0(N0);
34962 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
34963 if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
34964 LogicOp0.hasOneUse() && LogicOp0.getOperand(0).getValueType() == VT &&
34965 !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
34966 SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
34967 return DAG.getNode(FPOpcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
34969 // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
34970 if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
34971 LogicOp1.hasOneUse() && LogicOp1.getOperand(0).getValueType() == VT &&
34972 !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
34973 SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
34974 return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
34977 return SDValue();
34980 // Given a ABS node, detect the following pattern:
34981 // (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
34982 // This is useful as it is the input into a SAD pattern.
34983 static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
34984 SDValue AbsOp1 = Abs->getOperand(0);
34985 if (AbsOp1.getOpcode() != ISD::SUB)
34986 return false;
34988 Op0 = AbsOp1.getOperand(0);
34989 Op1 = AbsOp1.getOperand(1);
34991 // Check if the operands of the sub are zero-extended from vectors of i8.
34992 if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
34993 Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
34994 Op1.getOpcode() != ISD::ZERO_EXTEND ||
34995 Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
34996 return false;
34998 return true;
35001 // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
35002 // to these zexts.
35003 static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
35004 const SDValue &Zext1, const SDLoc &DL,
35005 const X86Subtarget &Subtarget) {
35006 // Find the appropriate width for the PSADBW.
35007 EVT InVT = Zext0.getOperand(0).getValueType();
35008 unsigned RegSize = std::max(128u, InVT.getSizeInBits());
35010 // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
35011 // fill in the missing vector elements with 0.
35012 unsigned NumConcat = RegSize / InVT.getSizeInBits();
35013 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
35014 Ops[0] = Zext0.getOperand(0);
35015 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
35016 SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
35017 Ops[0] = Zext1.getOperand(0);
35018 SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
35020 // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
35021 auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
35022 ArrayRef<SDValue> Ops) {
35023 MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
35024 return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
35026 MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
35027 return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
35028 PSADBWBuilder);
35031 // Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
35032 // PHMINPOSUW.
35033 static SDValue combineHorizontalMinMaxResult(SDNode *Extract, SelectionDAG &DAG,
35034 const X86Subtarget &Subtarget) {
35035 // Bail without SSE41.
35036 if (!Subtarget.hasSSE41())
35037 return SDValue();
35039 EVT ExtractVT = Extract->getValueType(0);
35040 if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
35041 return SDValue();
35043 // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
35044 ISD::NodeType BinOp;
35045 SDValue Src = DAG.matchBinOpReduction(
35046 Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN});
35047 if (!Src)
35048 return SDValue();
35050 EVT SrcVT = Src.getValueType();
35051 EVT SrcSVT = SrcVT.getScalarType();
35052 if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
35053 return SDValue();
35055 SDLoc DL(Extract);
35056 SDValue MinPos = Src;
35058 // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
35059 while (SrcVT.getSizeInBits() > 128) {
35060 unsigned NumElts = SrcVT.getVectorNumElements();
35061 unsigned NumSubElts = NumElts / 2;
35062 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcSVT, NumSubElts);
35063 unsigned SubSizeInBits = SrcVT.getSizeInBits();
35064 SDValue Lo = extractSubVector(MinPos, 0, DAG, DL, SubSizeInBits);
35065 SDValue Hi = extractSubVector(MinPos, NumSubElts, DAG, DL, SubSizeInBits);
35066 MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
35068 assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
35069 (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
35070 "Unexpected value type");
35072 // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
35073 // to flip the value accordingly.
35074 SDValue Mask;
35075 unsigned MaskEltsBits = ExtractVT.getSizeInBits();
35076 if (BinOp == ISD::SMAX)
35077 Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
35078 else if (BinOp == ISD::SMIN)
35079 Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
35080 else if (BinOp == ISD::UMAX)
35081 Mask = DAG.getConstant(APInt::getAllOnesValue(MaskEltsBits), DL, SrcVT);
35083 if (Mask)
35084 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
35086 // For v16i8 cases we need to perform UMIN on pairs of byte elements,
35087 // shuffling each upper element down and insert zeros. This means that the
35088 // v16i8 UMIN will leave the upper element as zero, performing zero-extension
35089 // ready for the PHMINPOS.
35090 if (ExtractVT == MVT::i8) {
35091 SDValue Upper = DAG.getVectorShuffle(
35092 SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
35093 {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
35094 MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
35097 // Perform the PHMINPOS on a v8i16 vector,
35098 MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
35099 MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
35100 MinPos = DAG.getBitcast(SrcVT, MinPos);
35102 if (Mask)
35103 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
35105 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
35106 DAG.getIntPtrConstant(0, DL));
35109 // Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
35110 static SDValue combineHorizontalPredicateResult(SDNode *Extract,
35111 SelectionDAG &DAG,
35112 const X86Subtarget &Subtarget) {
35113 // Bail without SSE2.
35114 if (!Subtarget.hasSSE2())
35115 return SDValue();
35117 EVT ExtractVT = Extract->getValueType(0);
35118 unsigned BitWidth = ExtractVT.getSizeInBits();
35119 if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
35120 ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
35121 return SDValue();
35123 // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
35124 ISD::NodeType BinOp;
35125 SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
35126 if (!Match && ExtractVT == MVT::i1)
35127 Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
35128 if (!Match)
35129 return SDValue();
35131 // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
35132 // which we can't support here for now.
35133 if (Match.getScalarValueSizeInBits() != BitWidth)
35134 return SDValue();
35136 SDValue Movmsk;
35137 SDLoc DL(Extract);
35138 EVT MatchVT = Match.getValueType();
35139 unsigned NumElts = MatchVT.getVectorNumElements();
35141 if (ExtractVT == MVT::i1) {
35142 // Special case for (pre-legalization) vXi1 reductions.
35143 if (NumElts > 32)
35144 return SDValue();
35145 if (DAG.getTargetLoweringInfo().isTypeLegal(MatchVT)) {
35146 // If this is a legal AVX512 predicate type then we can just bitcast.
35147 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
35148 Movmsk = DAG.getBitcast(MovmskVT, Match);
35149 } else {
35150 // Use combineBitcastvxi1 to create the MOVMSK.
35151 if (NumElts == 32 && !Subtarget.hasInt256()) {
35152 SDValue Lo, Hi;
35153 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
35154 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
35155 NumElts = 16;
35157 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
35158 Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
35160 if (!Movmsk)
35161 return SDValue();
35162 Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, MVT::i32);
35163 } else {
35164 // Bail with AVX512VL (which uses predicate registers).
35165 if (Subtarget.hasVLX())
35166 return SDValue();
35168 unsigned MatchSizeInBits = Match.getValueSizeInBits();
35169 if (!(MatchSizeInBits == 128 ||
35170 (MatchSizeInBits == 256 && Subtarget.hasAVX())))
35171 return SDValue();
35173 // Make sure this isn't a vector of 1 element. The perf win from using
35174 // MOVMSK diminishes with less elements in the reduction, but it is
35175 // generally better to get the comparison over to the GPRs as soon as
35176 // possible to reduce the number of vector ops.
35177 if (Match.getValueType().getVectorNumElements() < 2)
35178 return SDValue();
35180 // Check that we are extracting a reduction of all sign bits.
35181 if (DAG.ComputeNumSignBits(Match) != BitWidth)
35182 return SDValue();
35184 if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
35185 SDValue Lo, Hi;
35186 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
35187 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
35188 MatchSizeInBits = Match.getValueSizeInBits();
35191 // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
35192 MVT MaskSrcVT;
35193 if (64 == BitWidth || 32 == BitWidth)
35194 MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
35195 MatchSizeInBits / BitWidth);
35196 else
35197 MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
35199 SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
35200 Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
35201 NumElts = MaskSrcVT.getVectorNumElements();
35203 assert(NumElts <= 32 && "Not expecting more than 32 elements");
35205 if (BinOp == ISD::XOR) {
35206 // parity -> (AND (CTPOP(MOVMSK X)), 1)
35207 SDValue Mask = DAG.getConstant(1, DL, MVT::i32);
35208 SDValue Result = DAG.getNode(ISD::CTPOP, DL, MVT::i32, Movmsk);
35209 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result, Mask);
35210 return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
35213 SDValue CmpC;
35214 ISD::CondCode CondCode;
35215 if (BinOp == ISD::OR) {
35216 // any_of -> MOVMSK != 0
35217 CmpC = DAG.getConstant(0, DL, MVT::i32);
35218 CondCode = ISD::CondCode::SETNE;
35219 } else {
35220 // all_of -> MOVMSK == ((1 << NumElts) - 1)
35221 CmpC = DAG.getConstant((1ULL << NumElts) - 1, DL, MVT::i32);
35222 CondCode = ISD::CondCode::SETEQ;
35225 // The setcc produces an i8 of 0/1, so extend that to the result width and
35226 // negate to get the final 0/-1 mask value.
35227 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35228 EVT SetccVT =
35229 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
35230 SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
35231 SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
35232 SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
35233 return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
35236 static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
35237 const X86Subtarget &Subtarget) {
35238 // PSADBW is only supported on SSE2 and up.
35239 if (!Subtarget.hasSSE2())
35240 return SDValue();
35242 // Verify the type we're extracting from is any integer type above i16.
35243 EVT VT = Extract->getOperand(0).getValueType();
35244 if (!VT.isSimple() || !(VT.getVectorElementType().getSizeInBits() > 16))
35245 return SDValue();
35247 unsigned RegSize = 128;
35248 if (Subtarget.useBWIRegs())
35249 RegSize = 512;
35250 else if (Subtarget.hasAVX())
35251 RegSize = 256;
35253 // We handle upto v16i* for SSE2 / v32i* for AVX / v64i* for AVX512.
35254 // TODO: We should be able to handle larger vectors by splitting them before
35255 // feeding them into several SADs, and then reducing over those.
35256 if (RegSize / VT.getVectorNumElements() < 8)
35257 return SDValue();
35259 // Match shuffle + add pyramid.
35260 ISD::NodeType BinOp;
35261 SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
35263 // The operand is expected to be zero extended from i8
35264 // (verified in detectZextAbsDiff).
35265 // In order to convert to i64 and above, additional any/zero/sign
35266 // extend is expected.
35267 // The zero extend from 32 bit has no mathematical effect on the result.
35268 // Also the sign extend is basically zero extend
35269 // (extends the sign bit which is zero).
35270 // So it is correct to skip the sign/zero extend instruction.
35271 if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
35272 Root.getOpcode() == ISD::ZERO_EXTEND ||
35273 Root.getOpcode() == ISD::ANY_EXTEND))
35274 Root = Root.getOperand(0);
35276 // If there was a match, we want Root to be a select that is the root of an
35277 // abs-diff pattern.
35278 if (!Root || Root.getOpcode() != ISD::ABS)
35279 return SDValue();
35281 // Check whether we have an abs-diff pattern feeding into the select.
35282 SDValue Zext0, Zext1;
35283 if (!detectZextAbsDiff(Root, Zext0, Zext1))
35284 return SDValue();
35286 // Create the SAD instruction.
35287 SDLoc DL(Extract);
35288 SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
35290 // If the original vector was wider than 8 elements, sum over the results
35291 // in the SAD vector.
35292 unsigned Stages = Log2_32(VT.getVectorNumElements());
35293 MVT SadVT = SAD.getSimpleValueType();
35294 if (Stages > 3) {
35295 unsigned SadElems = SadVT.getVectorNumElements();
35297 for(unsigned i = Stages - 3; i > 0; --i) {
35298 SmallVector<int, 16> Mask(SadElems, -1);
35299 for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
35300 Mask[j] = MaskEnd + j;
35302 SDValue Shuffle =
35303 DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
35304 SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
35308 MVT Type = Extract->getSimpleValueType(0);
35309 unsigned TypeSizeInBits = Type.getSizeInBits();
35310 // Return the lowest TypeSizeInBits bits.
35311 MVT ResVT = MVT::getVectorVT(Type, SadVT.getSizeInBits() / TypeSizeInBits);
35312 SAD = DAG.getBitcast(ResVT, SAD);
35313 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Type, SAD,
35314 Extract->getOperand(1));
35317 // Attempt to peek through a target shuffle and extract the scalar from the
35318 // source.
35319 static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
35320 TargetLowering::DAGCombinerInfo &DCI,
35321 const X86Subtarget &Subtarget) {
35322 if (DCI.isBeforeLegalizeOps())
35323 return SDValue();
35325 SDValue Src = N->getOperand(0);
35326 SDValue Idx = N->getOperand(1);
35328 EVT VT = N->getValueType(0);
35329 EVT SrcVT = Src.getValueType();
35330 EVT SrcSVT = SrcVT.getVectorElementType();
35331 unsigned NumSrcElts = SrcVT.getVectorNumElements();
35333 // Don't attempt this for boolean mask vectors or unknown extraction indices.
35334 if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
35335 return SDValue();
35337 SDValue SrcBC = peekThroughBitcasts(Src);
35339 // Handle extract(broadcast(scalar_value)), it doesn't matter what index is.
35340 if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
35341 SDValue SrcOp = SrcBC.getOperand(0);
35342 if (SrcOp.getValueSizeInBits() == VT.getSizeInBits())
35343 return DAG.getBitcast(VT, SrcOp);
35346 // Resolve the target shuffle inputs and mask.
35347 SmallVector<int, 16> Mask;
35348 SmallVector<SDValue, 2> Ops;
35349 if (!resolveTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
35350 return SDValue();
35352 // Attempt to narrow/widen the shuffle mask to the correct size.
35353 if (Mask.size() != NumSrcElts) {
35354 if ((NumSrcElts % Mask.size()) == 0) {
35355 SmallVector<int, 16> ScaledMask;
35356 int Scale = NumSrcElts / Mask.size();
35357 scaleShuffleMask<int>(Scale, Mask, ScaledMask);
35358 Mask = std::move(ScaledMask);
35359 } else if ((Mask.size() % NumSrcElts) == 0) {
35360 // Simplify Mask based on demanded element.
35361 int ExtractIdx = (int)N->getConstantOperandVal(1);
35362 int Scale = Mask.size() / NumSrcElts;
35363 int Lo = Scale * ExtractIdx;
35364 int Hi = Scale * (ExtractIdx + 1);
35365 for (int i = 0, e = (int)Mask.size(); i != e; ++i)
35366 if (i < Lo || Hi <= i)
35367 Mask[i] = SM_SentinelUndef;
35369 SmallVector<int, 16> WidenedMask;
35370 while (Mask.size() > NumSrcElts &&
35371 canWidenShuffleElements(Mask, WidenedMask))
35372 Mask = std::move(WidenedMask);
35373 // TODO - investigate support for wider shuffle masks with known upper
35374 // undef/zero elements for implicit zero-extension.
35378 // Check if narrowing/widening failed.
35379 if (Mask.size() != NumSrcElts)
35380 return SDValue();
35382 int SrcIdx = Mask[N->getConstantOperandVal(1)];
35383 SDLoc dl(N);
35385 // If the shuffle source element is undef/zero then we can just accept it.
35386 if (SrcIdx == SM_SentinelUndef)
35387 return DAG.getUNDEF(VT);
35389 if (SrcIdx == SM_SentinelZero)
35390 return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
35391 : DAG.getConstant(0, dl, VT);
35393 SDValue SrcOp = Ops[SrcIdx / Mask.size()];
35394 SrcIdx = SrcIdx % Mask.size();
35396 // We can only extract other elements from 128-bit vectors and in certain
35397 // circumstances, depending on SSE-level.
35398 // TODO: Investigate using extract_subvector for larger vectors.
35399 // TODO: Investigate float/double extraction if it will be just stored.
35400 if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) &&
35401 ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
35402 assert(SrcSVT == VT && "Unexpected extraction type");
35403 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
35404 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp,
35405 DAG.getIntPtrConstant(SrcIdx, dl));
35408 if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
35409 (SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) {
35410 assert(VT.getSizeInBits() >= SrcSVT.getSizeInBits() &&
35411 "Unexpected extraction type");
35412 unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
35413 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
35414 SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp,
35415 DAG.getIntPtrConstant(SrcIdx, dl));
35416 return DAG.getZExtOrTrunc(ExtOp, dl, VT);
35419 return SDValue();
35422 /// Extracting a scalar FP value from vector element 0 is free, so extract each
35423 /// operand first, then perform the math as a scalar op.
35424 static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG) {
35425 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
35426 SDValue Vec = ExtElt->getOperand(0);
35427 SDValue Index = ExtElt->getOperand(1);
35428 EVT VT = ExtElt->getValueType(0);
35429 EVT VecVT = Vec.getValueType();
35431 // TODO: If this is a unary/expensive/expand op, allow extraction from a
35432 // non-zero element because the shuffle+scalar op will be cheaper?
35433 if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
35434 return SDValue();
35436 // Vector FP compares don't fit the pattern of FP math ops (propagate, not
35437 // extract, the condition code), so deal with those as a special-case.
35438 if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
35439 EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
35440 if (OpVT != MVT::f32 && OpVT != MVT::f64)
35441 return SDValue();
35443 // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
35444 SDLoc DL(ExtElt);
35445 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
35446 Vec.getOperand(0), Index);
35447 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
35448 Vec.getOperand(1), Index);
35449 return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
35452 if (VT != MVT::f32 && VT != MVT::f64)
35453 return SDValue();
35455 // Vector FP selects don't fit the pattern of FP math ops (because the
35456 // condition has a different type and we have to change the opcode), so deal
35457 // with those here.
35458 // FIXME: This is restricted to pre type legalization by ensuring the setcc
35459 // has i1 elements. If we loosen this we need to convert vector bool to a
35460 // scalar bool.
35461 if (Vec.getOpcode() == ISD::VSELECT &&
35462 Vec.getOperand(0).getOpcode() == ISD::SETCC &&
35463 Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
35464 Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
35465 // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
35466 SDLoc DL(ExtElt);
35467 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
35468 Vec.getOperand(0).getValueType().getScalarType(),
35469 Vec.getOperand(0), Index);
35470 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
35471 Vec.getOperand(1), Index);
35472 SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
35473 Vec.getOperand(2), Index);
35474 return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
35477 // TODO: This switch could include FNEG and the x86-specific FP logic ops
35478 // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
35479 // missed load folding and fma+fneg combining.
35480 switch (Vec.getOpcode()) {
35481 case ISD::FMA: // Begin 3 operands
35482 case ISD::FMAD:
35483 case ISD::FADD: // Begin 2 operands
35484 case ISD::FSUB:
35485 case ISD::FMUL:
35486 case ISD::FDIV:
35487 case ISD::FREM:
35488 case ISD::FCOPYSIGN:
35489 case ISD::FMINNUM:
35490 case ISD::FMAXNUM:
35491 case ISD::FMINNUM_IEEE:
35492 case ISD::FMAXNUM_IEEE:
35493 case ISD::FMAXIMUM:
35494 case ISD::FMINIMUM:
35495 case X86ISD::FMAX:
35496 case X86ISD::FMIN:
35497 case ISD::FABS: // Begin 1 operand
35498 case ISD::FSQRT:
35499 case ISD::FRINT:
35500 case ISD::FCEIL:
35501 case ISD::FTRUNC:
35502 case ISD::FNEARBYINT:
35503 case ISD::FROUND:
35504 case ISD::FFLOOR:
35505 case X86ISD::FRCP:
35506 case X86ISD::FRSQRT: {
35507 // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
35508 SDLoc DL(ExtElt);
35509 SmallVector<SDValue, 4> ExtOps;
35510 for (SDValue Op : Vec->ops())
35511 ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
35512 return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
35514 default:
35515 return SDValue();
35517 llvm_unreachable("All opcodes should return within switch");
35520 /// Detect vector gather/scatter index generation and convert it from being a
35521 /// bunch of shuffles and extracts into a somewhat faster sequence.
35522 /// For i686, the best sequence is apparently storing the value and loading
35523 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
35524 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
35525 TargetLowering::DAGCombinerInfo &DCI,
35526 const X86Subtarget &Subtarget) {
35527 if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
35528 return NewOp;
35530 SDValue InputVector = N->getOperand(0);
35531 SDValue EltIdx = N->getOperand(1);
35532 auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
35534 EVT SrcVT = InputVector.getValueType();
35535 EVT VT = N->getValueType(0);
35536 SDLoc dl(InputVector);
35537 bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
35539 if (CIdx && CIdx->getAPIntValue().uge(SrcVT.getVectorNumElements()))
35540 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
35542 // Integer Constant Folding.
35543 if (CIdx && VT.isInteger()) {
35544 APInt UndefVecElts;
35545 SmallVector<APInt, 16> EltBits;
35546 unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
35547 if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
35548 EltBits, true, false)) {
35549 uint64_t Idx = CIdx->getZExtValue();
35550 if (UndefVecElts[Idx])
35551 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
35552 return DAG.getConstant(EltBits[Idx].zextOrSelf(VT.getScalarSizeInBits()),
35553 dl, VT);
35557 // TODO - Remove this once we can handle the implicit zero-extension of
35558 // X86ISD::PEXTRW/X86ISD::PEXTRB in:
35559 // XFormVExtractWithShuffleIntoLoad, combineHorizontalPredicateResult and
35560 // combineBasicSADPattern.
35561 if (IsPextr) {
35562 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35563 if (TLI.SimplifyDemandedBits(
35564 SDValue(N, 0), APInt::getAllOnesValue(VT.getSizeInBits()), DCI))
35565 return SDValue(N, 0);
35566 return SDValue();
35569 if (SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI))
35570 return NewOp;
35572 // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
35573 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
35574 VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
35575 SDValue MMXSrc = InputVector.getOperand(0);
35577 // The bitcast source is a direct mmx result.
35578 if (MMXSrc.getValueType() == MVT::x86mmx)
35579 return DAG.getBitcast(VT, InputVector);
35582 // Detect mmx to i32 conversion through a v2i32 elt extract.
35583 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
35584 VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
35585 SDValue MMXSrc = InputVector.getOperand(0);
35587 // The bitcast source is a direct mmx result.
35588 if (MMXSrc.getValueType() == MVT::x86mmx)
35589 return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
35592 // Check whether this extract is the root of a sum of absolute differences
35593 // pattern. This has to be done here because we really want it to happen
35594 // pre-legalization,
35595 if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
35596 return SAD;
35598 // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
35599 if (SDValue Cmp = combineHorizontalPredicateResult(N, DAG, Subtarget))
35600 return Cmp;
35602 // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
35603 if (SDValue MinMax = combineHorizontalMinMaxResult(N, DAG, Subtarget))
35604 return MinMax;
35606 if (SDValue V = scalarizeExtEltFP(N, DAG))
35607 return V;
35609 // Attempt to extract a i1 element by using MOVMSK to extract the signbits
35610 // and then testing the relevant element.
35611 if (CIdx && SrcVT.getScalarType() == MVT::i1) {
35612 SmallVector<SDNode *, 16> BoolExtracts;
35613 auto IsBoolExtract = [&BoolExtracts](SDNode *Use) {
35614 if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
35615 isa<ConstantSDNode>(Use->getOperand(1)) &&
35616 Use->getValueType(0) == MVT::i1) {
35617 BoolExtracts.push_back(Use);
35618 return true;
35620 return false;
35622 if (all_of(InputVector->uses(), IsBoolExtract) &&
35623 BoolExtracts.size() > 1) {
35624 unsigned NumSrcElts = SrcVT.getVectorNumElements();
35625 EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
35626 if (SDValue BC =
35627 combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
35628 for (SDNode *Use : BoolExtracts) {
35629 // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
35630 unsigned MaskIdx = Use->getConstantOperandVal(1);
35631 APInt MaskBit = APInt::getOneBitSet(NumSrcElts, MaskIdx);
35632 SDValue Mask = DAG.getConstant(MaskBit, dl, BCVT);
35633 SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
35634 Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
35635 DCI.CombineTo(Use, Res);
35637 return SDValue(N, 0);
35642 return SDValue();
35645 /// If a vector select has an operand that is -1 or 0, try to simplify the
35646 /// select to a bitwise logic operation.
35647 /// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
35648 static SDValue
35649 combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
35650 TargetLowering::DAGCombinerInfo &DCI,
35651 const X86Subtarget &Subtarget) {
35652 SDValue Cond = N->getOperand(0);
35653 SDValue LHS = N->getOperand(1);
35654 SDValue RHS = N->getOperand(2);
35655 EVT VT = LHS.getValueType();
35656 EVT CondVT = Cond.getValueType();
35657 SDLoc DL(N);
35658 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35660 if (N->getOpcode() != ISD::VSELECT)
35661 return SDValue();
35663 assert(CondVT.isVector() && "Vector select expects a vector selector!");
35665 // Check if the first operand is all zeros and Cond type is vXi1.
35666 // This situation only applies to avx512.
35667 // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
35668 // TODO: Can we assert that both operands are not zeros (because that should
35669 // get simplified at node creation time)?
35670 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
35671 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
35672 if (TValIsAllZeros && !FValIsAllZeros && Subtarget.hasAVX512() &&
35673 Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1) {
35674 // Invert the cond to not(cond) : xor(op,allones)=not(op)
35675 SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
35676 // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
35677 return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
35680 // To use the condition operand as a bitwise mask, it must have elements that
35681 // are the same size as the select elements. Ie, the condition operand must
35682 // have already been promoted from the IR select condition type <N x i1>.
35683 // Don't check if the types themselves are equal because that excludes
35684 // vector floating-point selects.
35685 if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
35686 return SDValue();
35688 // Try to invert the condition if true value is not all 1s and false value is
35689 // not all 0s. Only do this if the condition has one use.
35690 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
35691 if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
35692 // Check if the selector will be produced by CMPP*/PCMP*.
35693 Cond.getOpcode() == ISD::SETCC &&
35694 // Check if SETCC has already been promoted.
35695 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
35696 CondVT) {
35697 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
35699 if (TValIsAllZeros || FValIsAllOnes) {
35700 SDValue CC = Cond.getOperand(2);
35701 ISD::CondCode NewCC =
35702 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
35703 Cond.getOperand(0).getValueType().isInteger());
35704 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
35705 NewCC);
35706 std::swap(LHS, RHS);
35707 TValIsAllOnes = FValIsAllOnes;
35708 FValIsAllZeros = TValIsAllZeros;
35712 // Cond value must be 'sign splat' to be converted to a logical op.
35713 if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
35714 return SDValue();
35716 // vselect Cond, 111..., 000... -> Cond
35717 if (TValIsAllOnes && FValIsAllZeros)
35718 return DAG.getBitcast(VT, Cond);
35720 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(CondVT))
35721 return SDValue();
35723 // vselect Cond, 111..., X -> or Cond, X
35724 if (TValIsAllOnes) {
35725 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
35726 SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
35727 return DAG.getBitcast(VT, Or);
35730 // vselect Cond, X, 000... -> and Cond, X
35731 if (FValIsAllZeros) {
35732 SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
35733 SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
35734 return DAG.getBitcast(VT, And);
35737 // vselect Cond, 000..., X -> andn Cond, X
35738 if (TValIsAllZeros) {
35739 MVT AndNVT = MVT::getVectorVT(MVT::i64, CondVT.getSizeInBits() / 64);
35740 SDValue CastCond = DAG.getBitcast(AndNVT, Cond);
35741 SDValue CastRHS = DAG.getBitcast(AndNVT, RHS);
35742 SDValue AndN = DAG.getNode(X86ISD::ANDNP, DL, AndNVT, CastCond, CastRHS);
35743 return DAG.getBitcast(VT, AndN);
35746 return SDValue();
35749 /// If both arms of a vector select are concatenated vectors, split the select,
35750 /// and concatenate the result to eliminate a wide (256-bit) vector instruction:
35751 /// vselect Cond, (concat T0, T1), (concat F0, F1) -->
35752 /// concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
35753 static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
35754 const X86Subtarget &Subtarget) {
35755 unsigned Opcode = N->getOpcode();
35756 if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
35757 return SDValue();
35759 // TODO: Split 512-bit vectors too?
35760 EVT VT = N->getValueType(0);
35761 if (!VT.is256BitVector())
35762 return SDValue();
35764 // TODO: Split as long as any 2 of the 3 operands are concatenated?
35765 SDValue Cond = N->getOperand(0);
35766 SDValue TVal = N->getOperand(1);
35767 SDValue FVal = N->getOperand(2);
35768 SmallVector<SDValue, 4> CatOpsT, CatOpsF;
35769 if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
35770 !collectConcatOps(TVal.getNode(), CatOpsT) ||
35771 !collectConcatOps(FVal.getNode(), CatOpsF))
35772 return SDValue();
35774 auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
35775 ArrayRef<SDValue> Ops) {
35776 return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
35778 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
35779 makeBlend, /*CheckBWI*/ false);
35782 static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
35783 SDValue Cond = N->getOperand(0);
35784 SDValue LHS = N->getOperand(1);
35785 SDValue RHS = N->getOperand(2);
35786 SDLoc DL(N);
35788 auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
35789 auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
35790 if (!TrueC || !FalseC)
35791 return SDValue();
35793 // Don't do this for crazy integer types.
35794 EVT VT = N->getValueType(0);
35795 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
35796 return SDValue();
35798 // We're going to use the condition bit in math or logic ops. We could allow
35799 // this with a wider condition value (post-legalization it becomes an i8),
35800 // but if nothing is creating selects that late, it doesn't matter.
35801 if (Cond.getValueType() != MVT::i1)
35802 return SDValue();
35804 // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
35805 // 3, 5, or 9 with i32/i64, so those get transformed too.
35806 // TODO: For constants that overflow or do not differ by power-of-2 or small
35807 // multiplier, convert to 'and' + 'add'.
35808 const APInt &TrueVal = TrueC->getAPIntValue();
35809 const APInt &FalseVal = FalseC->getAPIntValue();
35810 bool OV;
35811 APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
35812 if (OV)
35813 return SDValue();
35815 APInt AbsDiff = Diff.abs();
35816 if (AbsDiff.isPowerOf2() ||
35817 ((VT == MVT::i32 || VT == MVT::i64) &&
35818 (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
35820 // We need a positive multiplier constant for shift/LEA codegen. The 'not'
35821 // of the condition can usually be folded into a compare predicate, but even
35822 // without that, the sequence should be cheaper than a CMOV alternative.
35823 if (TrueVal.slt(FalseVal)) {
35824 Cond = DAG.getNOT(DL, Cond, MVT::i1);
35825 std::swap(TrueC, FalseC);
35828 // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
35829 SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
35831 // Multiply condition by the difference if non-one.
35832 if (!AbsDiff.isOneValue())
35833 R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
35835 // Add the base if non-zero.
35836 if (!FalseC->isNullValue())
35837 R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
35839 return R;
35842 return SDValue();
35845 /// If this is a *dynamic* select (non-constant condition) and we can match
35846 /// this node with one of the variable blend instructions, restructure the
35847 /// condition so that blends can use the high (sign) bit of each element.
35848 /// This function will also call SimplifyDemandedBits on already created
35849 /// BLENDV to perform additional simplifications.
35850 static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
35851 TargetLowering::DAGCombinerInfo &DCI,
35852 const X86Subtarget &Subtarget) {
35853 SDValue Cond = N->getOperand(0);
35854 if ((N->getOpcode() != ISD::VSELECT &&
35855 N->getOpcode() != X86ISD::BLENDV) ||
35856 ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
35857 return SDValue();
35859 // Don't optimize before the condition has been transformed to a legal type
35860 // and don't ever optimize vector selects that map to AVX512 mask-registers.
35861 unsigned BitWidth = Cond.getScalarValueSizeInBits();
35862 if (BitWidth < 8 || BitWidth > 64)
35863 return SDValue();
35865 // We can only handle the cases where VSELECT is directly legal on the
35866 // subtarget. We custom lower VSELECT nodes with constant conditions and
35867 // this makes it hard to see whether a dynamic VSELECT will correctly
35868 // lower, so we both check the operation's status and explicitly handle the
35869 // cases where a *dynamic* blend will fail even though a constant-condition
35870 // blend could be custom lowered.
35871 // FIXME: We should find a better way to handle this class of problems.
35872 // Potentially, we should combine constant-condition vselect nodes
35873 // pre-legalization into shuffles and not mark as many types as custom
35874 // lowered.
35875 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35876 EVT VT = N->getValueType(0);
35877 if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
35878 return SDValue();
35879 // FIXME: We don't support i16-element blends currently. We could and
35880 // should support them by making *all* the bits in the condition be set
35881 // rather than just the high bit and using an i8-element blend.
35882 if (VT.getVectorElementType() == MVT::i16)
35883 return SDValue();
35884 // Dynamic blending was only available from SSE4.1 onward.
35885 if (VT.is128BitVector() && !Subtarget.hasSSE41())
35886 return SDValue();
35887 // Byte blends are only available in AVX2
35888 if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
35889 return SDValue();
35890 // There are no 512-bit blend instructions that use sign bits.
35891 if (VT.is512BitVector())
35892 return SDValue();
35894 // TODO: Add other opcodes eventually lowered into BLEND.
35895 for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
35896 UI != UE; ++UI)
35897 if ((UI->getOpcode() != ISD::VSELECT &&
35898 UI->getOpcode() != X86ISD::BLENDV) ||
35899 UI.getOperandNo() != 0)
35900 return SDValue();
35902 APInt DemandedMask(APInt::getSignMask(BitWidth));
35903 KnownBits Known;
35904 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
35905 !DCI.isBeforeLegalizeOps());
35906 if (!TLI.SimplifyDemandedBits(Cond, DemandedMask, Known, TLO, 0, true))
35907 return SDValue();
35909 // If we changed the computation somewhere in the DAG, this change will
35910 // affect all users of Cond. Update all the nodes so that we do not use
35911 // the generic VSELECT anymore. Otherwise, we may perform wrong
35912 // optimizations as we messed with the actual expectation for the vector
35913 // boolean values.
35914 for (SDNode *U : Cond->uses()) {
35915 if (U->getOpcode() == X86ISD::BLENDV)
35916 continue;
35918 SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
35919 Cond, U->getOperand(1), U->getOperand(2));
35920 DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
35921 DCI.AddToWorklist(U);
35923 DCI.CommitTargetLoweringOpt(TLO);
35924 return SDValue(N, 0);
35927 /// Do target-specific dag combines on SELECT and VSELECT nodes.
35928 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
35929 TargetLowering::DAGCombinerInfo &DCI,
35930 const X86Subtarget &Subtarget) {
35931 SDLoc DL(N);
35932 SDValue Cond = N->getOperand(0);
35933 SDValue LHS = N->getOperand(1);
35934 SDValue RHS = N->getOperand(2);
35936 // Try simplification again because we use this function to optimize
35937 // BLENDV nodes that are not handled by the generic combiner.
35938 if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
35939 return V;
35941 EVT VT = LHS.getValueType();
35942 EVT CondVT = Cond.getValueType();
35943 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35945 // Convert vselects with constant condition into shuffles.
35946 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
35947 DCI.isBeforeLegalizeOps()) {
35948 SmallVector<int, 64> Mask;
35949 if (createShuffleMaskFromVSELECT(Mask, Cond))
35950 return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
35953 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
35954 // instructions match the semantics of the common C idiom x<y?x:y but not
35955 // x<=y?x:y, because of how they handle negative zero (which can be
35956 // ignored in unsafe-math mode).
35957 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
35958 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
35959 VT != MVT::f80 && VT != MVT::f128 &&
35960 (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
35961 (Subtarget.hasSSE2() ||
35962 (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
35963 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
35965 unsigned Opcode = 0;
35966 // Check for x CC y ? x : y.
35967 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
35968 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
35969 switch (CC) {
35970 default: break;
35971 case ISD::SETULT:
35972 // Converting this to a min would handle NaNs incorrectly, and swapping
35973 // the operands would cause it to handle comparisons between positive
35974 // and negative zero incorrectly.
35975 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
35976 if (!DAG.getTarget().Options.UnsafeFPMath &&
35977 !(DAG.isKnownNeverZeroFloat(LHS) ||
35978 DAG.isKnownNeverZeroFloat(RHS)))
35979 break;
35980 std::swap(LHS, RHS);
35982 Opcode = X86ISD::FMIN;
35983 break;
35984 case ISD::SETOLE:
35985 // Converting this to a min would handle comparisons between positive
35986 // and negative zero incorrectly.
35987 if (!DAG.getTarget().Options.UnsafeFPMath &&
35988 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
35989 break;
35990 Opcode = X86ISD::FMIN;
35991 break;
35992 case ISD::SETULE:
35993 // Converting this to a min would handle both negative zeros and NaNs
35994 // incorrectly, but we can swap the operands to fix both.
35995 std::swap(LHS, RHS);
35996 LLVM_FALLTHROUGH;
35997 case ISD::SETOLT:
35998 case ISD::SETLT:
35999 case ISD::SETLE:
36000 Opcode = X86ISD::FMIN;
36001 break;
36003 case ISD::SETOGE:
36004 // Converting this to a max would handle comparisons between positive
36005 // and negative zero incorrectly.
36006 if (!DAG.getTarget().Options.UnsafeFPMath &&
36007 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
36008 break;
36009 Opcode = X86ISD::FMAX;
36010 break;
36011 case ISD::SETUGT:
36012 // Converting this to a max would handle NaNs incorrectly, and swapping
36013 // the operands would cause it to handle comparisons between positive
36014 // and negative zero incorrectly.
36015 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
36016 if (!DAG.getTarget().Options.UnsafeFPMath &&
36017 !(DAG.isKnownNeverZeroFloat(LHS) ||
36018 DAG.isKnownNeverZeroFloat(RHS)))
36019 break;
36020 std::swap(LHS, RHS);
36022 Opcode = X86ISD::FMAX;
36023 break;
36024 case ISD::SETUGE:
36025 // Converting this to a max would handle both negative zeros and NaNs
36026 // incorrectly, but we can swap the operands to fix both.
36027 std::swap(LHS, RHS);
36028 LLVM_FALLTHROUGH;
36029 case ISD::SETOGT:
36030 case ISD::SETGT:
36031 case ISD::SETGE:
36032 Opcode = X86ISD::FMAX;
36033 break;
36035 // Check for x CC y ? y : x -- a min/max with reversed arms.
36036 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
36037 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
36038 switch (CC) {
36039 default: break;
36040 case ISD::SETOGE:
36041 // Converting this to a min would handle comparisons between positive
36042 // and negative zero incorrectly, and swapping the operands would
36043 // cause it to handle NaNs incorrectly.
36044 if (!DAG.getTarget().Options.UnsafeFPMath &&
36045 !(DAG.isKnownNeverZeroFloat(LHS) ||
36046 DAG.isKnownNeverZeroFloat(RHS))) {
36047 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
36048 break;
36049 std::swap(LHS, RHS);
36051 Opcode = X86ISD::FMIN;
36052 break;
36053 case ISD::SETUGT:
36054 // Converting this to a min would handle NaNs incorrectly.
36055 if (!DAG.getTarget().Options.UnsafeFPMath &&
36056 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
36057 break;
36058 Opcode = X86ISD::FMIN;
36059 break;
36060 case ISD::SETUGE:
36061 // Converting this to a min would handle both negative zeros and NaNs
36062 // incorrectly, but we can swap the operands to fix both.
36063 std::swap(LHS, RHS);
36064 LLVM_FALLTHROUGH;
36065 case ISD::SETOGT:
36066 case ISD::SETGT:
36067 case ISD::SETGE:
36068 Opcode = X86ISD::FMIN;
36069 break;
36071 case ISD::SETULT:
36072 // Converting this to a max would handle NaNs incorrectly.
36073 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
36074 break;
36075 Opcode = X86ISD::FMAX;
36076 break;
36077 case ISD::SETOLE:
36078 // Converting this to a max would handle comparisons between positive
36079 // and negative zero incorrectly, and swapping the operands would
36080 // cause it to handle NaNs incorrectly.
36081 if (!DAG.getTarget().Options.UnsafeFPMath &&
36082 !DAG.isKnownNeverZeroFloat(LHS) &&
36083 !DAG.isKnownNeverZeroFloat(RHS)) {
36084 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
36085 break;
36086 std::swap(LHS, RHS);
36088 Opcode = X86ISD::FMAX;
36089 break;
36090 case ISD::SETULE:
36091 // Converting this to a max would handle both negative zeros and NaNs
36092 // incorrectly, but we can swap the operands to fix both.
36093 std::swap(LHS, RHS);
36094 LLVM_FALLTHROUGH;
36095 case ISD::SETOLT:
36096 case ISD::SETLT:
36097 case ISD::SETLE:
36098 Opcode = X86ISD::FMAX;
36099 break;
36103 if (Opcode)
36104 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
36107 // Some mask scalar intrinsics rely on checking if only one bit is set
36108 // and implement it in C code like this:
36109 // A[0] = (U & 1) ? A[0] : W[0];
36110 // This creates some redundant instructions that break pattern matching.
36111 // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
36112 if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
36113 Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
36114 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
36115 SDValue AndNode = Cond.getOperand(0);
36116 if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
36117 isNullConstant(Cond.getOperand(1)) &&
36118 isOneConstant(AndNode.getOperand(1))) {
36119 // LHS and RHS swapped due to
36120 // setcc outputting 1 when AND resulted in 0 and vice versa.
36121 AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
36122 return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
36126 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
36127 // lowering on KNL. In this case we convert it to
36128 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
36129 // The same situation all vectors of i8 and i16 without BWI.
36130 // Make sure we extend these even before type legalization gets a chance to
36131 // split wide vectors.
36132 // Since SKX these selects have a proper lowering.
36133 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
36134 CondVT.getVectorElementType() == MVT::i1 &&
36135 (ExperimentalVectorWideningLegalization ||
36136 VT.getVectorNumElements() > 4) &&
36137 (VT.getVectorElementType() == MVT::i8 ||
36138 VT.getVectorElementType() == MVT::i16)) {
36139 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
36140 return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
36143 // AVX512 - Extend select with zero to merge with target shuffle.
36144 // select(mask, extract_subvector(shuffle(x)), zero) -->
36145 // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
36146 // TODO - support non target shuffles as well.
36147 if (Subtarget.hasAVX512() && CondVT.isVector() &&
36148 CondVT.getVectorElementType() == MVT::i1) {
36149 auto SelectableOp = [&TLI](SDValue Op) {
36150 return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
36151 isTargetShuffle(Op.getOperand(0).getOpcode()) &&
36152 isNullConstant(Op.getOperand(1)) &&
36153 TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
36154 Op.hasOneUse() && Op.getOperand(0).hasOneUse();
36157 bool SelectableLHS = SelectableOp(LHS);
36158 bool SelectableRHS = SelectableOp(RHS);
36159 bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
36160 bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
36162 if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
36163 EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
36164 : RHS.getOperand(0).getValueType();
36165 unsigned NumSrcElts = SrcVT.getVectorNumElements();
36166 EVT SrcCondVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumSrcElts);
36167 LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
36168 VT.getSizeInBits());
36169 RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
36170 VT.getSizeInBits());
36171 Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
36172 DAG.getUNDEF(SrcCondVT), Cond,
36173 DAG.getIntPtrConstant(0, DL));
36174 SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
36175 return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
36179 if (SDValue V = combineSelectOfTwoConstants(N, DAG))
36180 return V;
36182 // Canonicalize max and min:
36183 // (x > y) ? x : y -> (x >= y) ? x : y
36184 // (x < y) ? x : y -> (x <= y) ? x : y
36185 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
36186 // the need for an extra compare
36187 // against zero. e.g.
36188 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
36189 // subl %esi, %edi
36190 // testl %edi, %edi
36191 // movl $0, %eax
36192 // cmovgl %edi, %eax
36193 // =>
36194 // xorl %eax, %eax
36195 // subl %esi, $edi
36196 // cmovsl %eax, %edi
36197 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
36198 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
36199 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
36200 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
36201 switch (CC) {
36202 default: break;
36203 case ISD::SETLT:
36204 case ISD::SETGT: {
36205 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
36206 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
36207 Cond.getOperand(0), Cond.getOperand(1), NewCC);
36208 return DAG.getSelect(DL, VT, Cond, LHS, RHS);
36213 // Match VSELECTs into subs with unsigned saturation.
36214 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
36215 // psubus is available in SSE2 for i8 and i16 vectors.
36216 Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
36217 isPowerOf2_32(VT.getVectorNumElements()) &&
36218 (VT.getVectorElementType() == MVT::i8 ||
36219 VT.getVectorElementType() == MVT::i16)) {
36220 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
36222 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
36223 // left side invert the predicate to simplify logic below.
36224 SDValue Other;
36225 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
36226 Other = RHS;
36227 CC = ISD::getSetCCInverse(CC, true);
36228 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
36229 Other = LHS;
36232 if (Other.getNode() && Other->getNumOperands() == 2 &&
36233 Other->getOperand(0) == Cond.getOperand(0)) {
36234 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
36235 SDValue CondRHS = Cond->getOperand(1);
36237 // Look for a general sub with unsigned saturation first.
36238 // x >= y ? x-y : 0 --> subus x, y
36239 // x > y ? x-y : 0 --> subus x, y
36240 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
36241 Other->getOpcode() == ISD::SUB && OpRHS == CondRHS)
36242 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
36244 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
36245 if (isa<BuildVectorSDNode>(CondRHS)) {
36246 // If the RHS is a constant we have to reverse the const
36247 // canonicalization.
36248 // x > C-1 ? x+-C : 0 --> subus x, C
36249 auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
36250 return (!Op && !Cond) ||
36251 (Op && Cond &&
36252 Cond->getAPIntValue() == (-Op->getAPIntValue() - 1));
36254 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
36255 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT,
36256 /*AllowUndefs*/ true)) {
36257 OpRHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
36258 OpRHS);
36259 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
36262 // Another special case: If C was a sign bit, the sub has been
36263 // canonicalized into a xor.
36264 // FIXME: Would it be better to use computeKnownBits to determine
36265 // whether it's safe to decanonicalize the xor?
36266 // x s< 0 ? x^C : 0 --> subus x, C
36267 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
36268 if (CC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
36269 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
36270 OpRHSConst->getAPIntValue().isSignMask()) {
36271 // Note that we have to rebuild the RHS constant here to ensure we
36272 // don't rely on particular values of undef lanes.
36273 OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
36274 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
36282 // Match VSELECTs into add with unsigned saturation.
36283 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
36284 // paddus is available in SSE2 for i8 and i16 vectors.
36285 Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
36286 isPowerOf2_32(VT.getVectorNumElements()) &&
36287 (VT.getVectorElementType() == MVT::i8 ||
36288 VT.getVectorElementType() == MVT::i16)) {
36289 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
36291 SDValue CondLHS = Cond->getOperand(0);
36292 SDValue CondRHS = Cond->getOperand(1);
36294 // Check if one of the arms of the VSELECT is vector with all bits set.
36295 // If it's on the left side invert the predicate to simplify logic below.
36296 SDValue Other;
36297 if (ISD::isBuildVectorAllOnes(LHS.getNode())) {
36298 Other = RHS;
36299 CC = ISD::getSetCCInverse(CC, true);
36300 } else if (ISD::isBuildVectorAllOnes(RHS.getNode())) {
36301 Other = LHS;
36304 if (Other.getNode() && Other.getOpcode() == ISD::ADD) {
36305 SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
36307 // Canonicalize condition operands.
36308 if (CC == ISD::SETUGE) {
36309 std::swap(CondLHS, CondRHS);
36310 CC = ISD::SETULE;
36313 // We can test against either of the addition operands.
36314 // x <= x+y ? x+y : ~0 --> addus x, y
36315 // x+y >= x ? x+y : ~0 --> addus x, y
36316 if (CC == ISD::SETULE && Other == CondRHS &&
36317 (OpLHS == CondLHS || OpRHS == CondLHS))
36318 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
36320 if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
36321 CondLHS == OpLHS) {
36322 // If the RHS is a constant we have to reverse the const
36323 // canonicalization.
36324 // x > ~C ? x+C : ~0 --> addus x, C
36325 auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
36326 return Cond->getAPIntValue() == ~Op->getAPIntValue();
36328 if (CC == ISD::SETULE &&
36329 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
36330 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
36335 // Early exit check
36336 if (!TLI.isTypeLegal(VT))
36337 return SDValue();
36339 if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
36340 return V;
36342 if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
36343 return V;
36345 if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
36346 return V;
36348 // Custom action for SELECT MMX
36349 if (VT == MVT::x86mmx) {
36350 LHS = DAG.getBitcast(MVT::i64, LHS);
36351 RHS = DAG.getBitcast(MVT::i64, RHS);
36352 SDValue newSelect = DAG.getNode(ISD::SELECT, DL, MVT::i64, Cond, LHS, RHS);
36353 return DAG.getBitcast(VT, newSelect);
36356 return SDValue();
36359 /// Combine:
36360 /// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
36361 /// to:
36362 /// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
36363 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
36364 /// Note that this is only legal for some op/cc combinations.
36365 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
36366 SelectionDAG &DAG,
36367 const X86Subtarget &Subtarget) {
36368 // This combine only operates on CMP-like nodes.
36369 if (!(Cmp.getOpcode() == X86ISD::CMP ||
36370 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
36371 return SDValue();
36373 // Can't replace the cmp if it has more uses than the one we're looking at.
36374 // FIXME: We would like to be able to handle this, but would need to make sure
36375 // all uses were updated.
36376 if (!Cmp.hasOneUse())
36377 return SDValue();
36379 // This only applies to variations of the common case:
36380 // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
36381 // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
36382 // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
36383 // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
36384 // Using the proper condcodes (see below), overflow is checked for.
36386 // FIXME: We can generalize both constraints:
36387 // - XOR/OR/AND (if they were made to survive AtomicExpand)
36388 // - LHS != 1
36389 // if the result is compared.
36391 SDValue CmpLHS = Cmp.getOperand(0);
36392 SDValue CmpRHS = Cmp.getOperand(1);
36394 if (!CmpLHS.hasOneUse())
36395 return SDValue();
36397 unsigned Opc = CmpLHS.getOpcode();
36398 if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
36399 return SDValue();
36401 SDValue OpRHS = CmpLHS.getOperand(2);
36402 auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
36403 if (!OpRHSC)
36404 return SDValue();
36406 APInt Addend = OpRHSC->getAPIntValue();
36407 if (Opc == ISD::ATOMIC_LOAD_SUB)
36408 Addend = -Addend;
36410 auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
36411 if (!CmpRHSC)
36412 return SDValue();
36414 APInt Comparison = CmpRHSC->getAPIntValue();
36416 // If the addend is the negation of the comparison value, then we can do
36417 // a full comparison by emitting the atomic arithmetic as a locked sub.
36418 if (Comparison == -Addend) {
36419 // The CC is fine, but we need to rewrite the LHS of the comparison as an
36420 // atomic sub.
36421 auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
36422 auto AtomicSub = DAG.getAtomic(
36423 ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpLHS.getValueType(),
36424 /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
36425 /*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()),
36426 AN->getMemOperand());
36427 auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
36428 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
36429 DAG.getUNDEF(CmpLHS.getValueType()));
36430 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
36431 return LockOp;
36434 // We can handle comparisons with zero in a number of cases by manipulating
36435 // the CC used.
36436 if (!Comparison.isNullValue())
36437 return SDValue();
36439 if (CC == X86::COND_S && Addend == 1)
36440 CC = X86::COND_LE;
36441 else if (CC == X86::COND_NS && Addend == 1)
36442 CC = X86::COND_G;
36443 else if (CC == X86::COND_G && Addend == -1)
36444 CC = X86::COND_GE;
36445 else if (CC == X86::COND_LE && Addend == -1)
36446 CC = X86::COND_L;
36447 else
36448 return SDValue();
36450 SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
36451 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
36452 DAG.getUNDEF(CmpLHS.getValueType()));
36453 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
36454 return LockOp;
36457 // Check whether a boolean test is testing a boolean value generated by
36458 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
36459 // code.
36461 // Simplify the following patterns:
36462 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
36463 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
36464 // to (Op EFLAGS Cond)
36466 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
36467 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
36468 // to (Op EFLAGS !Cond)
36470 // where Op could be BRCOND or CMOV.
36472 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
36473 // This combine only operates on CMP-like nodes.
36474 if (!(Cmp.getOpcode() == X86ISD::CMP ||
36475 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
36476 return SDValue();
36478 // Quit if not used as a boolean value.
36479 if (CC != X86::COND_E && CC != X86::COND_NE)
36480 return SDValue();
36482 // Check CMP operands. One of them should be 0 or 1 and the other should be
36483 // an SetCC or extended from it.
36484 SDValue Op1 = Cmp.getOperand(0);
36485 SDValue Op2 = Cmp.getOperand(1);
36487 SDValue SetCC;
36488 const ConstantSDNode* C = nullptr;
36489 bool needOppositeCond = (CC == X86::COND_E);
36490 bool checkAgainstTrue = false; // Is it a comparison against 1?
36492 if ((C = dyn_cast<ConstantSDNode>(Op1)))
36493 SetCC = Op2;
36494 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
36495 SetCC = Op1;
36496 else // Quit if all operands are not constants.
36497 return SDValue();
36499 if (C->getZExtValue() == 1) {
36500 needOppositeCond = !needOppositeCond;
36501 checkAgainstTrue = true;
36502 } else if (C->getZExtValue() != 0)
36503 // Quit if the constant is neither 0 or 1.
36504 return SDValue();
36506 bool truncatedToBoolWithAnd = false;
36507 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
36508 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
36509 SetCC.getOpcode() == ISD::TRUNCATE ||
36510 SetCC.getOpcode() == ISD::AND) {
36511 if (SetCC.getOpcode() == ISD::AND) {
36512 int OpIdx = -1;
36513 if (isOneConstant(SetCC.getOperand(0)))
36514 OpIdx = 1;
36515 if (isOneConstant(SetCC.getOperand(1)))
36516 OpIdx = 0;
36517 if (OpIdx < 0)
36518 break;
36519 SetCC = SetCC.getOperand(OpIdx);
36520 truncatedToBoolWithAnd = true;
36521 } else
36522 SetCC = SetCC.getOperand(0);
36525 switch (SetCC.getOpcode()) {
36526 case X86ISD::SETCC_CARRY:
36527 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
36528 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
36529 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
36530 // truncated to i1 using 'and'.
36531 if (checkAgainstTrue && !truncatedToBoolWithAnd)
36532 break;
36533 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
36534 "Invalid use of SETCC_CARRY!");
36535 LLVM_FALLTHROUGH;
36536 case X86ISD::SETCC:
36537 // Set the condition code or opposite one if necessary.
36538 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
36539 if (needOppositeCond)
36540 CC = X86::GetOppositeBranchCondition(CC);
36541 return SetCC.getOperand(1);
36542 case X86ISD::CMOV: {
36543 // Check whether false/true value has canonical one, i.e. 0 or 1.
36544 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
36545 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
36546 // Quit if true value is not a constant.
36547 if (!TVal)
36548 return SDValue();
36549 // Quit if false value is not a constant.
36550 if (!FVal) {
36551 SDValue Op = SetCC.getOperand(0);
36552 // Skip 'zext' or 'trunc' node.
36553 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
36554 Op.getOpcode() == ISD::TRUNCATE)
36555 Op = Op.getOperand(0);
36556 // A special case for rdrand/rdseed, where 0 is set if false cond is
36557 // found.
36558 if ((Op.getOpcode() != X86ISD::RDRAND &&
36559 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
36560 return SDValue();
36562 // Quit if false value is not the constant 0 or 1.
36563 bool FValIsFalse = true;
36564 if (FVal && FVal->getZExtValue() != 0) {
36565 if (FVal->getZExtValue() != 1)
36566 return SDValue();
36567 // If FVal is 1, opposite cond is needed.
36568 needOppositeCond = !needOppositeCond;
36569 FValIsFalse = false;
36571 // Quit if TVal is not the constant opposite of FVal.
36572 if (FValIsFalse && TVal->getZExtValue() != 1)
36573 return SDValue();
36574 if (!FValIsFalse && TVal->getZExtValue() != 0)
36575 return SDValue();
36576 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
36577 if (needOppositeCond)
36578 CC = X86::GetOppositeBranchCondition(CC);
36579 return SetCC.getOperand(3);
36583 return SDValue();
36586 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
36587 /// Match:
36588 /// (X86or (X86setcc) (X86setcc))
36589 /// (X86cmp (and (X86setcc) (X86setcc)), 0)
36590 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
36591 X86::CondCode &CC1, SDValue &Flags,
36592 bool &isAnd) {
36593 if (Cond->getOpcode() == X86ISD::CMP) {
36594 if (!isNullConstant(Cond->getOperand(1)))
36595 return false;
36597 Cond = Cond->getOperand(0);
36600 isAnd = false;
36602 SDValue SetCC0, SetCC1;
36603 switch (Cond->getOpcode()) {
36604 default: return false;
36605 case ISD::AND:
36606 case X86ISD::AND:
36607 isAnd = true;
36608 LLVM_FALLTHROUGH;
36609 case ISD::OR:
36610 case X86ISD::OR:
36611 SetCC0 = Cond->getOperand(0);
36612 SetCC1 = Cond->getOperand(1);
36613 break;
36616 // Make sure we have SETCC nodes, using the same flags value.
36617 if (SetCC0.getOpcode() != X86ISD::SETCC ||
36618 SetCC1.getOpcode() != X86ISD::SETCC ||
36619 SetCC0->getOperand(1) != SetCC1->getOperand(1))
36620 return false;
36622 CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
36623 CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
36624 Flags = SetCC0->getOperand(1);
36625 return true;
36628 // When legalizing carry, we create carries via add X, -1
36629 // If that comes from an actual carry, via setcc, we use the
36630 // carry directly.
36631 static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
36632 if (EFLAGS.getOpcode() == X86ISD::ADD) {
36633 if (isAllOnesConstant(EFLAGS.getOperand(1))) {
36634 SDValue Carry = EFLAGS.getOperand(0);
36635 while (Carry.getOpcode() == ISD::TRUNCATE ||
36636 Carry.getOpcode() == ISD::ZERO_EXTEND ||
36637 Carry.getOpcode() == ISD::SIGN_EXTEND ||
36638 Carry.getOpcode() == ISD::ANY_EXTEND ||
36639 (Carry.getOpcode() == ISD::AND &&
36640 isOneConstant(Carry.getOperand(1))))
36641 Carry = Carry.getOperand(0);
36642 if (Carry.getOpcode() == X86ISD::SETCC ||
36643 Carry.getOpcode() == X86ISD::SETCC_CARRY) {
36644 // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
36645 uint64_t CarryCC = Carry.getConstantOperandVal(0);
36646 SDValue CarryOp1 = Carry.getOperand(1);
36647 if (CarryCC == X86::COND_B)
36648 return CarryOp1;
36649 if (CarryCC == X86::COND_A) {
36650 // Try to convert COND_A into COND_B in an attempt to facilitate
36651 // materializing "setb reg".
36653 // Do not flip "e > c", where "c" is a constant, because Cmp
36654 // instruction cannot take an immediate as its first operand.
36656 if (CarryOp1.getOpcode() == X86ISD::SUB &&
36657 CarryOp1.getNode()->hasOneUse() &&
36658 CarryOp1.getValueType().isInteger() &&
36659 !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
36660 SDValue SubCommute =
36661 DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
36662 CarryOp1.getOperand(1), CarryOp1.getOperand(0));
36663 return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
36666 // If this is a check of the z flag of an add with 1, switch to the
36667 // C flag.
36668 if (CarryCC == X86::COND_E &&
36669 CarryOp1.getOpcode() == X86ISD::ADD &&
36670 isOneConstant(CarryOp1.getOperand(1)))
36671 return CarryOp1;
36676 return SDValue();
36679 /// Optimize an EFLAGS definition used according to the condition code \p CC
36680 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
36681 /// uses of chain values.
36682 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
36683 SelectionDAG &DAG,
36684 const X86Subtarget &Subtarget) {
36685 if (CC == X86::COND_B)
36686 if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
36687 return Flags;
36689 if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
36690 return R;
36691 return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
36694 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
36695 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
36696 TargetLowering::DAGCombinerInfo &DCI,
36697 const X86Subtarget &Subtarget) {
36698 SDLoc DL(N);
36700 SDValue FalseOp = N->getOperand(0);
36701 SDValue TrueOp = N->getOperand(1);
36702 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
36703 SDValue Cond = N->getOperand(3);
36705 // Try to simplify the EFLAGS and condition code operands.
36706 // We can't always do this as FCMOV only supports a subset of X86 cond.
36707 if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
36708 if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
36709 SDValue Ops[] = {FalseOp, TrueOp, DAG.getConstant(CC, DL, MVT::i8),
36710 Flags};
36711 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
36715 // If this is a select between two integer constants, try to do some
36716 // optimizations. Note that the operands are ordered the opposite of SELECT
36717 // operands.
36718 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
36719 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
36720 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
36721 // larger than FalseC (the false value).
36722 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
36723 CC = X86::GetOppositeBranchCondition(CC);
36724 std::swap(TrueC, FalseC);
36725 std::swap(TrueOp, FalseOp);
36728 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
36729 // This is efficient for any integer data type (including i8/i16) and
36730 // shift amount.
36731 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
36732 Cond = getSETCC(CC, Cond, DL, DAG);
36734 // Zero extend the condition if needed.
36735 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
36737 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
36738 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
36739 DAG.getConstant(ShAmt, DL, MVT::i8));
36740 return Cond;
36743 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
36744 // for any integer data type, including i8/i16.
36745 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
36746 Cond = getSETCC(CC, Cond, DL, DAG);
36748 // Zero extend the condition if needed.
36749 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
36750 FalseC->getValueType(0), Cond);
36751 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
36752 SDValue(FalseC, 0));
36753 return Cond;
36756 // Optimize cases that will turn into an LEA instruction. This requires
36757 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
36758 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
36759 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
36760 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
36762 bool isFastMultiplier = false;
36763 if (Diff < 10) {
36764 switch ((unsigned char)Diff) {
36765 default: break;
36766 case 1: // result = add base, cond
36767 case 2: // result = lea base( , cond*2)
36768 case 3: // result = lea base(cond, cond*2)
36769 case 4: // result = lea base( , cond*4)
36770 case 5: // result = lea base(cond, cond*4)
36771 case 8: // result = lea base( , cond*8)
36772 case 9: // result = lea base(cond, cond*8)
36773 isFastMultiplier = true;
36774 break;
36778 if (isFastMultiplier) {
36779 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
36780 Cond = getSETCC(CC, Cond, DL ,DAG);
36781 // Zero extend the condition if needed.
36782 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
36783 Cond);
36784 // Scale the condition by the difference.
36785 if (Diff != 1)
36786 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
36787 DAG.getConstant(Diff, DL, Cond.getValueType()));
36789 // Add the base if non-zero.
36790 if (FalseC->getAPIntValue() != 0)
36791 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
36792 SDValue(FalseC, 0));
36793 return Cond;
36799 // Handle these cases:
36800 // (select (x != c), e, c) -> select (x != c), e, x),
36801 // (select (x == c), c, e) -> select (x == c), x, e)
36802 // where the c is an integer constant, and the "select" is the combination
36803 // of CMOV and CMP.
36805 // The rationale for this change is that the conditional-move from a constant
36806 // needs two instructions, however, conditional-move from a register needs
36807 // only one instruction.
36809 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
36810 // some instruction-combining opportunities. This opt needs to be
36811 // postponed as late as possible.
36813 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
36814 // the DCI.xxxx conditions are provided to postpone the optimization as
36815 // late as possible.
36817 ConstantSDNode *CmpAgainst = nullptr;
36818 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
36819 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
36820 !isa<ConstantSDNode>(Cond.getOperand(0))) {
36822 if (CC == X86::COND_NE &&
36823 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
36824 CC = X86::GetOppositeBranchCondition(CC);
36825 std::swap(TrueOp, FalseOp);
36828 if (CC == X86::COND_E &&
36829 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
36830 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
36831 DAG.getConstant(CC, DL, MVT::i8), Cond };
36832 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
36837 // Fold and/or of setcc's to double CMOV:
36838 // (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
36839 // (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
36841 // This combine lets us generate:
36842 // cmovcc1 (jcc1 if we don't have CMOV)
36843 // cmovcc2 (same)
36844 // instead of:
36845 // setcc1
36846 // setcc2
36847 // and/or
36848 // cmovne (jne if we don't have CMOV)
36849 // When we can't use the CMOV instruction, it might increase branch
36850 // mispredicts.
36851 // When we can use CMOV, or when there is no mispredict, this improves
36852 // throughput and reduces register pressure.
36854 if (CC == X86::COND_NE) {
36855 SDValue Flags;
36856 X86::CondCode CC0, CC1;
36857 bool isAndSetCC;
36858 if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
36859 if (isAndSetCC) {
36860 std::swap(FalseOp, TrueOp);
36861 CC0 = X86::GetOppositeBranchCondition(CC0);
36862 CC1 = X86::GetOppositeBranchCondition(CC1);
36865 SDValue LOps[] = {FalseOp, TrueOp, DAG.getConstant(CC0, DL, MVT::i8),
36866 Flags};
36867 SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
36868 SDValue Ops[] = {LCMOV, TrueOp, DAG.getConstant(CC1, DL, MVT::i8), Flags};
36869 SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
36870 return CMOV;
36874 // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
36875 // (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
36876 // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
36877 // (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
36878 if ((CC == X86::COND_NE || CC == X86::COND_E) &&
36879 Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
36880 SDValue Add = TrueOp;
36881 SDValue Const = FalseOp;
36882 // Canonicalize the condition code for easier matching and output.
36883 if (CC == X86::COND_E)
36884 std::swap(Add, Const);
36886 // We might have replaced the constant in the cmov with the LHS of the
36887 // compare. If so change it to the RHS of the compare.
36888 if (Const == Cond.getOperand(0))
36889 Const = Cond.getOperand(1);
36891 // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
36892 if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
36893 Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
36894 (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
36895 Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
36896 Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
36897 EVT VT = N->getValueType(0);
36898 // This should constant fold.
36899 SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
36900 SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
36901 DAG.getConstant(X86::COND_NE, DL, MVT::i8),
36902 Cond);
36903 return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
36907 return SDValue();
36910 /// Different mul shrinking modes.
36911 enum ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
36913 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
36914 EVT VT = N->getOperand(0).getValueType();
36915 if (VT.getScalarSizeInBits() != 32)
36916 return false;
36918 assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
36919 unsigned SignBits[2] = {1, 1};
36920 bool IsPositive[2] = {false, false};
36921 for (unsigned i = 0; i < 2; i++) {
36922 SDValue Opd = N->getOperand(i);
36924 SignBits[i] = DAG.ComputeNumSignBits(Opd);
36925 IsPositive[i] = DAG.SignBitIsZero(Opd);
36928 bool AllPositive = IsPositive[0] && IsPositive[1];
36929 unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
36930 // When ranges are from -128 ~ 127, use MULS8 mode.
36931 if (MinSignBits >= 25)
36932 Mode = MULS8;
36933 // When ranges are from 0 ~ 255, use MULU8 mode.
36934 else if (AllPositive && MinSignBits >= 24)
36935 Mode = MULU8;
36936 // When ranges are from -32768 ~ 32767, use MULS16 mode.
36937 else if (MinSignBits >= 17)
36938 Mode = MULS16;
36939 // When ranges are from 0 ~ 65535, use MULU16 mode.
36940 else if (AllPositive && MinSignBits >= 16)
36941 Mode = MULU16;
36942 else
36943 return false;
36944 return true;
36947 /// When the operands of vector mul are extended from smaller size values,
36948 /// like i8 and i16, the type of mul may be shrinked to generate more
36949 /// efficient code. Two typical patterns are handled:
36950 /// Pattern1:
36951 /// %2 = sext/zext <N x i8> %1 to <N x i32>
36952 /// %4 = sext/zext <N x i8> %3 to <N x i32>
36953 // or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
36954 /// %5 = mul <N x i32> %2, %4
36956 /// Pattern2:
36957 /// %2 = zext/sext <N x i16> %1 to <N x i32>
36958 /// %4 = zext/sext <N x i16> %3 to <N x i32>
36959 /// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
36960 /// %5 = mul <N x i32> %2, %4
36962 /// There are four mul shrinking modes:
36963 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
36964 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
36965 /// generate pmullw+sext32 for it (MULS8 mode).
36966 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
36967 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
36968 /// generate pmullw+zext32 for it (MULU8 mode).
36969 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
36970 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
36971 /// generate pmullw+pmulhw for it (MULS16 mode).
36972 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
36973 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
36974 /// generate pmullw+pmulhuw for it (MULU16 mode).
36975 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
36976 const X86Subtarget &Subtarget) {
36977 // Check for legality
36978 // pmullw/pmulhw are not supported by SSE.
36979 if (!Subtarget.hasSSE2())
36980 return SDValue();
36982 // Check for profitability
36983 // pmulld is supported since SSE41. It is better to use pmulld
36984 // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
36985 // the expansion.
36986 bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
36987 if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
36988 return SDValue();
36990 ShrinkMode Mode;
36991 if (!canReduceVMulWidth(N, DAG, Mode))
36992 return SDValue();
36994 SDLoc DL(N);
36995 SDValue N0 = N->getOperand(0);
36996 SDValue N1 = N->getOperand(1);
36997 EVT VT = N->getOperand(0).getValueType();
36998 unsigned NumElts = VT.getVectorNumElements();
36999 if ((NumElts % 2) != 0)
37000 return SDValue();
37002 unsigned RegSize = 128;
37003 MVT OpsVT = MVT::getVectorVT(MVT::i16, RegSize / 16);
37004 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
37006 // Shrink the operands of mul.
37007 SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
37008 SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
37010 if (ExperimentalVectorWideningLegalization ||
37011 NumElts >= OpsVT.getVectorNumElements()) {
37012 // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
37013 // lower part is needed.
37014 SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
37015 if (Mode == MULU8 || Mode == MULS8)
37016 return DAG.getNode((Mode == MULU8) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND,
37017 DL, VT, MulLo);
37019 MVT ResVT = MVT::getVectorVT(MVT::i32, NumElts / 2);
37020 // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
37021 // the higher part is also needed.
37022 SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
37023 ReducedVT, NewN0, NewN1);
37025 // Repack the lower part and higher part result of mul into a wider
37026 // result.
37027 // Generate shuffle functioning as punpcklwd.
37028 SmallVector<int, 16> ShuffleMask(NumElts);
37029 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
37030 ShuffleMask[2 * i] = i;
37031 ShuffleMask[2 * i + 1] = i + NumElts;
37033 SDValue ResLo =
37034 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
37035 ResLo = DAG.getBitcast(ResVT, ResLo);
37036 // Generate shuffle functioning as punpckhwd.
37037 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
37038 ShuffleMask[2 * i] = i + NumElts / 2;
37039 ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
37041 SDValue ResHi =
37042 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
37043 ResHi = DAG.getBitcast(ResVT, ResHi);
37044 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
37047 // When VT.getVectorNumElements() < OpsVT.getVectorNumElements(), we want
37048 // to legalize the mul explicitly because implicit legalization for type
37049 // <4 x i16> to <4 x i32> sometimes involves unnecessary unpack
37050 // instructions which will not exist when we explicitly legalize it by
37051 // extending <4 x i16> to <8 x i16> (concatenating the <4 x i16> val with
37052 // <4 x i16> undef).
37054 // Legalize the operands of mul.
37055 // FIXME: We may be able to handle non-concatenated vectors by insertion.
37056 unsigned ReducedSizeInBits = ReducedVT.getSizeInBits();
37057 if ((RegSize % ReducedSizeInBits) != 0)
37058 return SDValue();
37060 SmallVector<SDValue, 16> Ops(RegSize / ReducedSizeInBits,
37061 DAG.getUNDEF(ReducedVT));
37062 Ops[0] = NewN0;
37063 NewN0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
37064 Ops[0] = NewN1;
37065 NewN1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
37067 if (Mode == MULU8 || Mode == MULS8) {
37068 // Generate lower part of mul: pmullw. For MULU8/MULS8, only the lower
37069 // part is needed.
37070 SDValue Mul = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
37072 // convert the type of mul result to VT.
37073 MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
37074 SDValue Res = DAG.getNode(Mode == MULU8 ? ISD::ZERO_EXTEND_VECTOR_INREG
37075 : ISD::SIGN_EXTEND_VECTOR_INREG,
37076 DL, ResVT, Mul);
37077 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
37078 DAG.getIntPtrConstant(0, DL));
37081 // Generate the lower and higher part of mul: pmulhw/pmulhuw. For
37082 // MULU16/MULS16, both parts are needed.
37083 SDValue MulLo = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
37084 SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
37085 OpsVT, NewN0, NewN1);
37087 // Repack the lower part and higher part result of mul into a wider
37088 // result. Make sure the type of mul result is VT.
37089 MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
37090 SDValue Res = getUnpackl(DAG, DL, OpsVT, MulLo, MulHi);
37091 Res = DAG.getBitcast(ResVT, Res);
37092 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
37093 DAG.getIntPtrConstant(0, DL));
37096 static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
37097 EVT VT, const SDLoc &DL) {
37099 auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
37100 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
37101 DAG.getConstant(Mult, DL, VT));
37102 Result = DAG.getNode(ISD::SHL, DL, VT, Result,
37103 DAG.getConstant(Shift, DL, MVT::i8));
37104 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
37105 N->getOperand(0));
37106 return Result;
37109 auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
37110 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
37111 DAG.getConstant(Mul1, DL, VT));
37112 Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
37113 DAG.getConstant(Mul2, DL, VT));
37114 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
37115 N->getOperand(0));
37116 return Result;
37119 switch (MulAmt) {
37120 default:
37121 break;
37122 case 11:
37123 // mul x, 11 => add ((shl (mul x, 5), 1), x)
37124 return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
37125 case 21:
37126 // mul x, 21 => add ((shl (mul x, 5), 2), x)
37127 return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
37128 case 41:
37129 // mul x, 41 => add ((shl (mul x, 5), 3), x)
37130 return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
37131 case 22:
37132 // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
37133 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
37134 combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
37135 case 19:
37136 // mul x, 19 => add ((shl (mul x, 9), 1), x)
37137 return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
37138 case 37:
37139 // mul x, 37 => add ((shl (mul x, 9), 2), x)
37140 return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
37141 case 73:
37142 // mul x, 73 => add ((shl (mul x, 9), 3), x)
37143 return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
37144 case 13:
37145 // mul x, 13 => add ((shl (mul x, 3), 2), x)
37146 return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
37147 case 23:
37148 // mul x, 23 => sub ((shl (mul x, 3), 3), x)
37149 return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
37150 case 26:
37151 // mul x, 26 => add ((mul (mul x, 5), 5), x)
37152 return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
37153 case 28:
37154 // mul x, 28 => add ((mul (mul x, 9), 3), x)
37155 return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
37156 case 29:
37157 // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
37158 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
37159 combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
37162 // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
37163 // by a single LEA.
37164 // First check if this a sum of two power of 2s because that's easy. Then
37165 // count how many zeros are up to the first bit.
37166 // TODO: We can do this even without LEA at a cost of two shifts and an add.
37167 if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
37168 unsigned ScaleShift = countTrailingZeros(MulAmt);
37169 if (ScaleShift >= 1 && ScaleShift < 4) {
37170 unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
37171 SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37172 DAG.getConstant(ShiftAmt, DL, MVT::i8));
37173 SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37174 DAG.getConstant(ScaleShift, DL, MVT::i8));
37175 return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
37179 return SDValue();
37182 // If the upper 17 bits of each element are zero then we can use PMADDWD,
37183 // which is always at least as quick as PMULLD, except on KNL.
37184 static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
37185 const X86Subtarget &Subtarget) {
37186 if (!Subtarget.hasSSE2())
37187 return SDValue();
37189 if (Subtarget.isPMADDWDSlow())
37190 return SDValue();
37192 EVT VT = N->getValueType(0);
37194 // Only support vXi32 vectors.
37195 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
37196 return SDValue();
37198 // Make sure the vXi16 type is legal. This covers the AVX512 without BWI case.
37199 // Also allow v2i32 if it will be widened.
37200 MVT WVT = MVT::getVectorVT(MVT::i16, 2 * VT.getVectorNumElements());
37201 if (!((ExperimentalVectorWideningLegalization && VT == MVT::v2i32) ||
37202 DAG.getTargetLoweringInfo().isTypeLegal(WVT)))
37203 return SDValue();
37205 SDValue N0 = N->getOperand(0);
37206 SDValue N1 = N->getOperand(1);
37208 // If we are zero extending two steps without SSE4.1, its better to reduce
37209 // the vmul width instead.
37210 if (!Subtarget.hasSSE41() &&
37211 (N0.getOpcode() == ISD::ZERO_EXTEND &&
37212 N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
37213 (N1.getOpcode() == ISD::ZERO_EXTEND &&
37214 N1.getOperand(0).getScalarValueSizeInBits() <= 8))
37215 return SDValue();
37217 APInt Mask17 = APInt::getHighBitsSet(32, 17);
37218 if (!DAG.MaskedValueIsZero(N1, Mask17) ||
37219 !DAG.MaskedValueIsZero(N0, Mask17))
37220 return SDValue();
37222 // Use SplitOpsAndApply to handle AVX splitting.
37223 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
37224 ArrayRef<SDValue> Ops) {
37225 MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
37226 return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
37228 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
37229 { DAG.getBitcast(WVT, N0), DAG.getBitcast(WVT, N1) },
37230 PMADDWDBuilder);
37233 static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
37234 const X86Subtarget &Subtarget) {
37235 if (!Subtarget.hasSSE2())
37236 return SDValue();
37238 EVT VT = N->getValueType(0);
37240 // Only support vXi64 vectors.
37241 if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
37242 VT.getVectorNumElements() < 2 ||
37243 !isPowerOf2_32(VT.getVectorNumElements()))
37244 return SDValue();
37246 SDValue N0 = N->getOperand(0);
37247 SDValue N1 = N->getOperand(1);
37249 // MULDQ returns the 64-bit result of the signed multiplication of the lower
37250 // 32-bits. We can lower with this if the sign bits stretch that far.
37251 if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
37252 DAG.ComputeNumSignBits(N1) > 32) {
37253 auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
37254 ArrayRef<SDValue> Ops) {
37255 return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
37257 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
37258 PMULDQBuilder, /*CheckBWI*/false);
37261 // If the upper bits are zero we can use a single pmuludq.
37262 APInt Mask = APInt::getHighBitsSet(64, 32);
37263 if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
37264 auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
37265 ArrayRef<SDValue> Ops) {
37266 return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
37268 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
37269 PMULUDQBuilder, /*CheckBWI*/false);
37272 return SDValue();
37275 /// Optimize a single multiply with constant into two operations in order to
37276 /// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
37277 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
37278 TargetLowering::DAGCombinerInfo &DCI,
37279 const X86Subtarget &Subtarget) {
37280 EVT VT = N->getValueType(0);
37282 if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
37283 return V;
37285 if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
37286 return V;
37288 if (DCI.isBeforeLegalize() && VT.isVector())
37289 return reduceVMULWidth(N, DAG, Subtarget);
37291 if (!MulConstantOptimization)
37292 return SDValue();
37293 // An imul is usually smaller than the alternative sequence.
37294 if (DAG.getMachineFunction().getFunction().hasMinSize())
37295 return SDValue();
37297 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
37298 return SDValue();
37300 if (VT != MVT::i64 && VT != MVT::i32)
37301 return SDValue();
37303 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
37304 if (!C)
37305 return SDValue();
37306 if (isPowerOf2_64(C->getZExtValue()))
37307 return SDValue();
37309 int64_t SignMulAmt = C->getSExtValue();
37310 assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
37311 uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
37313 SDLoc DL(N);
37314 if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
37315 SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
37316 DAG.getConstant(AbsMulAmt, DL, VT));
37317 if (SignMulAmt < 0)
37318 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
37319 NewMul);
37321 return NewMul;
37324 uint64_t MulAmt1 = 0;
37325 uint64_t MulAmt2 = 0;
37326 if ((AbsMulAmt % 9) == 0) {
37327 MulAmt1 = 9;
37328 MulAmt2 = AbsMulAmt / 9;
37329 } else if ((AbsMulAmt % 5) == 0) {
37330 MulAmt1 = 5;
37331 MulAmt2 = AbsMulAmt / 5;
37332 } else if ((AbsMulAmt % 3) == 0) {
37333 MulAmt1 = 3;
37334 MulAmt2 = AbsMulAmt / 3;
37337 SDValue NewMul;
37338 // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
37339 if (MulAmt2 &&
37340 (isPowerOf2_64(MulAmt2) ||
37341 (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
37343 if (isPowerOf2_64(MulAmt2) &&
37344 !(SignMulAmt >= 0 && N->hasOneUse() &&
37345 N->use_begin()->getOpcode() == ISD::ADD))
37346 // If second multiplifer is pow2, issue it first. We want the multiply by
37347 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
37348 // is an add. Only do this for positive multiply amounts since the
37349 // negate would prevent it from being used as an address mode anyway.
37350 std::swap(MulAmt1, MulAmt2);
37352 if (isPowerOf2_64(MulAmt1))
37353 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37354 DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
37355 else
37356 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
37357 DAG.getConstant(MulAmt1, DL, VT));
37359 if (isPowerOf2_64(MulAmt2))
37360 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
37361 DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
37362 else
37363 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
37364 DAG.getConstant(MulAmt2, DL, VT));
37366 // Negate the result.
37367 if (SignMulAmt < 0)
37368 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
37369 NewMul);
37370 } else if (!Subtarget.slowLEA())
37371 NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
37373 if (!NewMul) {
37374 assert(C->getZExtValue() != 0 &&
37375 C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
37376 "Both cases that could cause potential overflows should have "
37377 "already been handled.");
37378 if (isPowerOf2_64(AbsMulAmt - 1)) {
37379 // (mul x, 2^N + 1) => (add (shl x, N), x)
37380 NewMul = DAG.getNode(
37381 ISD::ADD, DL, VT, N->getOperand(0),
37382 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37383 DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
37384 MVT::i8)));
37385 // To negate, subtract the number from zero
37386 if (SignMulAmt < 0)
37387 NewMul = DAG.getNode(ISD::SUB, DL, VT,
37388 DAG.getConstant(0, DL, VT), NewMul);
37389 } else if (isPowerOf2_64(AbsMulAmt + 1)) {
37390 // (mul x, 2^N - 1) => (sub (shl x, N), x)
37391 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37392 DAG.getConstant(Log2_64(AbsMulAmt + 1),
37393 DL, MVT::i8));
37394 // To negate, reverse the operands of the subtract.
37395 if (SignMulAmt < 0)
37396 NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
37397 else
37398 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
37399 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
37400 // (mul x, 2^N + 2) => (add (add (shl x, N), x), x)
37401 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37402 DAG.getConstant(Log2_64(AbsMulAmt - 2),
37403 DL, MVT::i8));
37404 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
37405 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
37406 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
37407 // (mul x, 2^N - 2) => (sub (sub (shl x, N), x), x)
37408 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37409 DAG.getConstant(Log2_64(AbsMulAmt + 2),
37410 DL, MVT::i8));
37411 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
37412 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
37416 return NewMul;
37419 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
37420 SDValue N0 = N->getOperand(0);
37421 SDValue N1 = N->getOperand(1);
37422 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
37423 EVT VT = N0.getValueType();
37425 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
37426 // since the result of setcc_c is all zero's or all ones.
37427 if (VT.isInteger() && !VT.isVector() &&
37428 N1C && N0.getOpcode() == ISD::AND &&
37429 N0.getOperand(1).getOpcode() == ISD::Constant) {
37430 SDValue N00 = N0.getOperand(0);
37431 APInt Mask = N0.getConstantOperandAPInt(1);
37432 Mask <<= N1C->getAPIntValue();
37433 bool MaskOK = false;
37434 // We can handle cases concerning bit-widening nodes containing setcc_c if
37435 // we carefully interrogate the mask to make sure we are semantics
37436 // preserving.
37437 // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
37438 // of the underlying setcc_c operation if the setcc_c was zero extended.
37439 // Consider the following example:
37440 // zext(setcc_c) -> i32 0x0000FFFF
37441 // c1 -> i32 0x0000FFFF
37442 // c2 -> i32 0x00000001
37443 // (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
37444 // (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
37445 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
37446 MaskOK = true;
37447 } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
37448 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
37449 MaskOK = true;
37450 } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
37451 N00.getOpcode() == ISD::ANY_EXTEND) &&
37452 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
37453 MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
37455 if (MaskOK && Mask != 0) {
37456 SDLoc DL(N);
37457 return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
37461 // Hardware support for vector shifts is sparse which makes us scalarize the
37462 // vector operations in many cases. Also, on sandybridge ADD is faster than
37463 // shl.
37464 // (shl V, 1) -> add V,V
37465 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
37466 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
37467 assert(N0.getValueType().isVector() && "Invalid vector shift type");
37468 // We shift all of the values by one. In many cases we do not have
37469 // hardware support for this operation. This is better expressed as an ADD
37470 // of two values.
37471 if (N1SplatC->getAPIntValue() == 1)
37472 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
37475 return SDValue();
37478 static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG) {
37479 SDValue N0 = N->getOperand(0);
37480 SDValue N1 = N->getOperand(1);
37481 EVT VT = N0.getValueType();
37482 unsigned Size = VT.getSizeInBits();
37484 // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
37485 // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
37486 // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
37487 // depending on sign of (SarConst - [56,48,32,24,16])
37489 // sexts in X86 are MOVs. The MOVs have the same code size
37490 // as above SHIFTs (only SHIFT on 1 has lower code size).
37491 // However the MOVs have 2 advantages to a SHIFT:
37492 // 1. MOVs can write to a register that differs from source
37493 // 2. MOVs accept memory operands
37495 if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
37496 N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
37497 N0.getOperand(1).getOpcode() != ISD::Constant)
37498 return SDValue();
37500 SDValue N00 = N0.getOperand(0);
37501 SDValue N01 = N0.getOperand(1);
37502 APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
37503 APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
37504 EVT CVT = N1.getValueType();
37506 if (SarConst.isNegative())
37507 return SDValue();
37509 for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
37510 unsigned ShiftSize = SVT.getSizeInBits();
37511 // skipping types without corresponding sext/zext and
37512 // ShlConst that is not one of [56,48,32,24,16]
37513 if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
37514 continue;
37515 SDLoc DL(N);
37516 SDValue NN =
37517 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
37518 SarConst = SarConst - (Size - ShiftSize);
37519 if (SarConst == 0)
37520 return NN;
37521 else if (SarConst.isNegative())
37522 return DAG.getNode(ISD::SHL, DL, VT, NN,
37523 DAG.getConstant(-SarConst, DL, CVT));
37524 else
37525 return DAG.getNode(ISD::SRA, DL, VT, NN,
37526 DAG.getConstant(SarConst, DL, CVT));
37528 return SDValue();
37531 static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
37532 TargetLowering::DAGCombinerInfo &DCI) {
37533 SDValue N0 = N->getOperand(0);
37534 SDValue N1 = N->getOperand(1);
37535 EVT VT = N0.getValueType();
37537 // Only do this on the last DAG combine as it can interfere with other
37538 // combines.
37539 if (!DCI.isAfterLegalizeDAG())
37540 return SDValue();
37542 // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
37543 // TODO: This is a generic DAG combine that became an x86-only combine to
37544 // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
37545 // and-not ('andn').
37546 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
37547 return SDValue();
37549 auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
37550 auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
37551 if (!ShiftC || !AndC)
37552 return SDValue();
37554 // If we can shrink the constant mask below 8-bits or 32-bits, then this
37555 // transform should reduce code size. It may also enable secondary transforms
37556 // from improved known-bits analysis or instruction selection.
37557 APInt MaskVal = AndC->getAPIntValue();
37559 // If this can be matched by a zero extend, don't optimize.
37560 if (MaskVal.isMask()) {
37561 unsigned TO = MaskVal.countTrailingOnes();
37562 if (TO >= 8 && isPowerOf2_32(TO))
37563 return SDValue();
37566 APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
37567 unsigned OldMaskSize = MaskVal.getMinSignedBits();
37568 unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
37569 if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
37570 (OldMaskSize > 32 && NewMaskSize <= 32)) {
37571 // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
37572 SDLoc DL(N);
37573 SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
37574 SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
37575 return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
37577 return SDValue();
37580 static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
37581 TargetLowering::DAGCombinerInfo &DCI,
37582 const X86Subtarget &Subtarget) {
37583 unsigned Opcode = N->getOpcode();
37584 assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
37585 "Unexpected shift opcode");
37587 EVT VT = N->getValueType(0);
37588 SDValue N0 = N->getOperand(0);
37589 SDValue N1 = N->getOperand(1);
37590 unsigned DstBitsPerElt = VT.getScalarSizeInBits();
37591 unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
37592 assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
37593 N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
37594 "Unexpected PACKSS/PACKUS input type");
37596 bool IsSigned = (X86ISD::PACKSS == Opcode);
37598 // Constant Folding.
37599 APInt UndefElts0, UndefElts1;
37600 SmallVector<APInt, 32> EltBits0, EltBits1;
37601 if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
37602 (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
37603 getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
37604 getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
37605 unsigned NumLanes = VT.getSizeInBits() / 128;
37606 unsigned NumDstElts = VT.getVectorNumElements();
37607 unsigned NumSrcElts = NumDstElts / 2;
37608 unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
37609 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
37611 APInt Undefs(NumDstElts, 0);
37612 SmallVector<APInt, 32> Bits(NumDstElts, APInt::getNullValue(DstBitsPerElt));
37613 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
37614 for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
37615 unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
37616 auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
37617 auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
37619 if (UndefElts[SrcIdx]) {
37620 Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
37621 continue;
37624 APInt &Val = EltBits[SrcIdx];
37625 if (IsSigned) {
37626 // PACKSS: Truncate signed value with signed saturation.
37627 // Source values less than dst minint are saturated to minint.
37628 // Source values greater than dst maxint are saturated to maxint.
37629 if (Val.isSignedIntN(DstBitsPerElt))
37630 Val = Val.trunc(DstBitsPerElt);
37631 else if (Val.isNegative())
37632 Val = APInt::getSignedMinValue(DstBitsPerElt);
37633 else
37634 Val = APInt::getSignedMaxValue(DstBitsPerElt);
37635 } else {
37636 // PACKUS: Truncate signed value with unsigned saturation.
37637 // Source values less than zero are saturated to zero.
37638 // Source values greater than dst maxuint are saturated to maxuint.
37639 if (Val.isIntN(DstBitsPerElt))
37640 Val = Val.trunc(DstBitsPerElt);
37641 else if (Val.isNegative())
37642 Val = APInt::getNullValue(DstBitsPerElt);
37643 else
37644 Val = APInt::getAllOnesValue(DstBitsPerElt);
37646 Bits[Lane * NumDstEltsPerLane + Elt] = Val;
37650 return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
37653 // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
37654 // truncate to create a larger truncate.
37655 if (Subtarget.hasAVX512() &&
37656 N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
37657 N0.getOperand(0).getValueType() == MVT::v8i32) {
37658 if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
37659 (!IsSigned &&
37660 DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
37661 if (Subtarget.hasVLX())
37662 return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
37664 // Widen input to v16i32 so we can truncate that.
37665 SDLoc dl(N);
37666 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
37667 N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
37668 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
37672 // Attempt to combine as shuffle.
37673 SDValue Op(N, 0);
37674 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
37675 return Res;
37677 return SDValue();
37680 static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
37681 TargetLowering::DAGCombinerInfo &DCI,
37682 const X86Subtarget &Subtarget) {
37683 assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
37684 X86ISD::VSRL == N->getOpcode()) &&
37685 "Unexpected shift opcode");
37686 EVT VT = N->getValueType(0);
37687 SDValue N0 = N->getOperand(0);
37688 SDValue N1 = N->getOperand(1);
37690 // Shift zero -> zero.
37691 if (ISD::isBuildVectorAllZeros(N0.getNode()))
37692 return DAG.getConstant(0, SDLoc(N), VT);
37694 // Detect constant shift amounts.
37695 APInt UndefElts;
37696 SmallVector<APInt, 32> EltBits;
37697 if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
37698 unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
37699 return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
37700 EltBits[0].getZExtValue(), DAG);
37703 APInt KnownUndef, KnownZero;
37704 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37705 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
37706 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
37707 KnownZero, DCI))
37708 return SDValue(N, 0);
37710 return SDValue();
37713 static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
37714 TargetLowering::DAGCombinerInfo &DCI,
37715 const X86Subtarget &Subtarget) {
37716 unsigned Opcode = N->getOpcode();
37717 assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
37718 X86ISD::VSRLI == Opcode) &&
37719 "Unexpected shift opcode");
37720 bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
37721 EVT VT = N->getValueType(0);
37722 SDValue N0 = N->getOperand(0);
37723 SDValue N1 = N->getOperand(1);
37724 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
37725 assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
37726 "Unexpected value type");
37727 assert(N1.getValueType() == MVT::i8 && "Unexpected shift amount type");
37729 // Out of range logical bit shifts are guaranteed to be zero.
37730 // Out of range arithmetic bit shifts splat the sign bit.
37731 unsigned ShiftVal = cast<ConstantSDNode>(N1)->getZExtValue();
37732 if (ShiftVal >= NumBitsPerElt) {
37733 if (LogicalShift)
37734 return DAG.getConstant(0, SDLoc(N), VT);
37735 else
37736 ShiftVal = NumBitsPerElt - 1;
37739 // Shift N0 by zero -> N0.
37740 if (!ShiftVal)
37741 return N0;
37743 // Shift zero -> zero.
37744 if (ISD::isBuildVectorAllZeros(N0.getNode()))
37745 return DAG.getConstant(0, SDLoc(N), VT);
37747 // Fold (VSRAI (VSRAI X, C1), C2) --> (VSRAI X, (C1 + C2)) with (C1 + C2)
37748 // clamped to (NumBitsPerElt - 1).
37749 if (Opcode == X86ISD::VSRAI && N0.getOpcode() == X86ISD::VSRAI) {
37750 unsigned ShiftVal2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
37751 unsigned NewShiftVal = ShiftVal + ShiftVal2;
37752 if (NewShiftVal >= NumBitsPerElt)
37753 NewShiftVal = NumBitsPerElt - 1;
37754 return DAG.getNode(X86ISD::VSRAI, SDLoc(N), VT, N0.getOperand(0),
37755 DAG.getConstant(NewShiftVal, SDLoc(N), MVT::i8));
37758 // We can decode 'whole byte' logical bit shifts as shuffles.
37759 if (LogicalShift && (ShiftVal % 8) == 0) {
37760 SDValue Op(N, 0);
37761 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
37762 return Res;
37765 // Constant Folding.
37766 APInt UndefElts;
37767 SmallVector<APInt, 32> EltBits;
37768 if (N->isOnlyUserOf(N0.getNode()) &&
37769 getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
37770 assert(EltBits.size() == VT.getVectorNumElements() &&
37771 "Unexpected shift value type");
37772 for (APInt &Elt : EltBits) {
37773 if (X86ISD::VSHLI == Opcode)
37774 Elt <<= ShiftVal;
37775 else if (X86ISD::VSRAI == Opcode)
37776 Elt.ashrInPlace(ShiftVal);
37777 else
37778 Elt.lshrInPlace(ShiftVal);
37780 return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
37783 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37784 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
37785 APInt::getAllOnesValue(NumBitsPerElt), DCI))
37786 return SDValue(N, 0);
37788 return SDValue();
37791 static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
37792 TargetLowering::DAGCombinerInfo &DCI,
37793 const X86Subtarget &Subtarget) {
37794 EVT VT = N->getValueType(0);
37795 assert(((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) ||
37796 (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16)) &&
37797 "Unexpected vector insertion");
37799 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
37800 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37801 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
37802 APInt::getAllOnesValue(NumBitsPerElt), DCI))
37803 return SDValue(N, 0);
37805 // Attempt to combine PINSRB/PINSRW patterns to a shuffle.
37806 SDValue Op(N, 0);
37807 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
37808 return Res;
37810 return SDValue();
37813 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
37814 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
37815 /// OR -> CMPNEQSS.
37816 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
37817 TargetLowering::DAGCombinerInfo &DCI,
37818 const X86Subtarget &Subtarget) {
37819 unsigned opcode;
37821 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
37822 // we're requiring SSE2 for both.
37823 if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
37824 SDValue N0 = N->getOperand(0);
37825 SDValue N1 = N->getOperand(1);
37826 SDValue CMP0 = N0.getOperand(1);
37827 SDValue CMP1 = N1.getOperand(1);
37828 SDLoc DL(N);
37830 // The SETCCs should both refer to the same CMP.
37831 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
37832 return SDValue();
37834 SDValue CMP00 = CMP0->getOperand(0);
37835 SDValue CMP01 = CMP0->getOperand(1);
37836 EVT VT = CMP00.getValueType();
37838 if (VT == MVT::f32 || VT == MVT::f64) {
37839 bool ExpectingFlags = false;
37840 // Check for any users that want flags:
37841 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
37842 !ExpectingFlags && UI != UE; ++UI)
37843 switch (UI->getOpcode()) {
37844 default:
37845 case ISD::BR_CC:
37846 case ISD::BRCOND:
37847 case ISD::SELECT:
37848 ExpectingFlags = true;
37849 break;
37850 case ISD::CopyToReg:
37851 case ISD::SIGN_EXTEND:
37852 case ISD::ZERO_EXTEND:
37853 case ISD::ANY_EXTEND:
37854 break;
37857 if (!ExpectingFlags) {
37858 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
37859 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
37861 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
37862 X86::CondCode tmp = cc0;
37863 cc0 = cc1;
37864 cc1 = tmp;
37867 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
37868 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
37869 // FIXME: need symbolic constants for these magic numbers.
37870 // See X86ATTInstPrinter.cpp:printSSECC().
37871 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
37872 if (Subtarget.hasAVX512()) {
37873 SDValue FSetCC =
37874 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
37875 DAG.getConstant(x86cc, DL, MVT::i8));
37876 // Need to fill with zeros to ensure the bitcast will produce zeroes
37877 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
37878 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
37879 DAG.getConstant(0, DL, MVT::v16i1),
37880 FSetCC, DAG.getIntPtrConstant(0, DL));
37881 return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
37882 N->getSimpleValueType(0));
37884 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
37885 CMP00.getValueType(), CMP00, CMP01,
37886 DAG.getConstant(x86cc, DL,
37887 MVT::i8));
37889 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
37890 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
37892 if (is64BitFP && !Subtarget.is64Bit()) {
37893 // On a 32-bit target, we cannot bitcast the 64-bit float to a
37894 // 64-bit integer, since that's not a legal type. Since
37895 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
37896 // bits, but can do this little dance to extract the lowest 32 bits
37897 // and work with those going forward.
37898 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
37899 OnesOrZeroesF);
37900 SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
37901 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
37902 Vector32, DAG.getIntPtrConstant(0, DL));
37903 IntVT = MVT::i32;
37906 SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
37907 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
37908 DAG.getConstant(1, DL, IntVT));
37909 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
37910 ANDed);
37911 return OneBitOfTruth;
37916 return SDValue();
37919 // Match (xor X, -1) -> X.
37920 // Match extract_subvector(xor X, -1) -> extract_subvector(X).
37921 // Match concat_vectors(xor X, -1, xor Y, -1) -> concat_vectors(X, Y).
37922 static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
37923 V = peekThroughBitcasts(V);
37924 if (V.getOpcode() == ISD::XOR &&
37925 ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()))
37926 return V.getOperand(0);
37927 if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
37928 (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
37929 if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
37930 Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
37931 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
37932 Not, V.getOperand(1));
37935 SmallVector<SDValue, 2> CatOps;
37936 if (collectConcatOps(V.getNode(), CatOps)) {
37937 for (SDValue &CatOp : CatOps) {
37938 SDValue NotCat = IsNOT(CatOp, DAG);
37939 if (!NotCat) return SDValue();
37940 CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
37942 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
37944 return SDValue();
37947 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
37948 static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
37949 assert(N->getOpcode() == ISD::AND);
37951 MVT VT = N->getSimpleValueType(0);
37952 if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
37953 return SDValue();
37955 SDValue X, Y;
37956 SDValue N0 = N->getOperand(0);
37957 SDValue N1 = N->getOperand(1);
37959 if (SDValue Not = IsNOT(N0, DAG)) {
37960 X = Not;
37961 Y = N1;
37962 } else if (SDValue Not = IsNOT(N1, DAG)) {
37963 X = Not;
37964 Y = N0;
37965 } else
37966 return SDValue();
37968 X = DAG.getBitcast(VT, X);
37969 Y = DAG.getBitcast(VT, Y);
37970 return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
37973 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
37974 // register. In most cases we actually compare or select YMM-sized registers
37975 // and mixing the two types creates horrible code. This method optimizes
37976 // some of the transition sequences.
37977 // Even with AVX-512 this is still useful for removing casts around logical
37978 // operations on vXi1 mask types.
37979 static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
37980 const X86Subtarget &Subtarget) {
37981 EVT VT = N->getValueType(0);
37982 assert(VT.isVector() && "Expected vector type");
37984 assert((N->getOpcode() == ISD::ANY_EXTEND ||
37985 N->getOpcode() == ISD::ZERO_EXTEND ||
37986 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
37988 SDValue Narrow = N->getOperand(0);
37989 EVT NarrowVT = Narrow.getValueType();
37991 if (Narrow->getOpcode() != ISD::XOR &&
37992 Narrow->getOpcode() != ISD::AND &&
37993 Narrow->getOpcode() != ISD::OR)
37994 return SDValue();
37996 SDValue N0 = Narrow->getOperand(0);
37997 SDValue N1 = Narrow->getOperand(1);
37998 SDLoc DL(Narrow);
38000 // The Left side has to be a trunc.
38001 if (N0.getOpcode() != ISD::TRUNCATE)
38002 return SDValue();
38004 // The type of the truncated inputs.
38005 if (N0.getOperand(0).getValueType() != VT)
38006 return SDValue();
38008 // The right side has to be a 'trunc' or a constant vector.
38009 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
38010 N1.getOperand(0).getValueType() == VT;
38011 if (!RHSTrunc &&
38012 !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
38013 return SDValue();
38015 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38017 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), VT))
38018 return SDValue();
38020 // Set N0 and N1 to hold the inputs to the new wide operation.
38021 N0 = N0.getOperand(0);
38022 if (RHSTrunc)
38023 N1 = N1.getOperand(0);
38024 else
38025 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
38027 // Generate the wide operation.
38028 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, VT, N0, N1);
38029 unsigned Opcode = N->getOpcode();
38030 switch (Opcode) {
38031 default: llvm_unreachable("Unexpected opcode");
38032 case ISD::ANY_EXTEND:
38033 return Op;
38034 case ISD::ZERO_EXTEND:
38035 return DAG.getZeroExtendInReg(Op, DL, NarrowVT.getScalarType());
38036 case ISD::SIGN_EXTEND:
38037 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
38038 Op, DAG.getValueType(NarrowVT));
38042 /// If both input operands of a logic op are being cast from floating point
38043 /// types, try to convert this into a floating point logic node to avoid
38044 /// unnecessary moves from SSE to integer registers.
38045 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
38046 const X86Subtarget &Subtarget) {
38047 EVT VT = N->getValueType(0);
38048 SDValue N0 = N->getOperand(0);
38049 SDValue N1 = N->getOperand(1);
38050 SDLoc DL(N);
38052 if (N0.getOpcode() != ISD::BITCAST || N1.getOpcode() != ISD::BITCAST)
38053 return SDValue();
38055 SDValue N00 = N0.getOperand(0);
38056 SDValue N10 = N1.getOperand(0);
38057 EVT N00Type = N00.getValueType();
38058 EVT N10Type = N10.getValueType();
38060 // Ensure that both types are the same and are legal scalar fp types.
38061 if (N00Type != N10Type ||
38062 !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
38063 (Subtarget.hasSSE2() && N00Type == MVT::f64)))
38064 return SDValue();
38066 unsigned FPOpcode;
38067 switch (N->getOpcode()) {
38068 default: llvm_unreachable("Unexpected input node for FP logic conversion");
38069 case ISD::AND: FPOpcode = X86ISD::FAND; break;
38070 case ISD::OR: FPOpcode = X86ISD::FOR; break;
38071 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
38074 SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
38075 return DAG.getBitcast(VT, FPLogic);
38078 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
38079 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
38080 /// with a shift-right to eliminate loading the vector constant mask value.
38081 static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
38082 const X86Subtarget &Subtarget) {
38083 SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
38084 SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
38085 EVT VT0 = Op0.getValueType();
38086 EVT VT1 = Op1.getValueType();
38088 if (VT0 != VT1 || !VT0.isSimple() || !VT0.isInteger())
38089 return SDValue();
38091 APInt SplatVal;
38092 if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
38093 !SplatVal.isMask())
38094 return SDValue();
38096 // Don't prevent creation of ANDN.
38097 if (isBitwiseNot(Op0))
38098 return SDValue();
38100 if (!SupportedVectorShiftWithImm(VT0.getSimpleVT(), Subtarget, ISD::SRL))
38101 return SDValue();
38103 unsigned EltBitWidth = VT0.getScalarSizeInBits();
38104 if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
38105 return SDValue();
38107 SDLoc DL(N);
38108 unsigned ShiftVal = SplatVal.countTrailingOnes();
38109 SDValue ShAmt = DAG.getConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
38110 SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
38111 return DAG.getBitcast(N->getValueType(0), Shift);
38114 // Get the index node from the lowered DAG of a GEP IR instruction with one
38115 // indexing dimension.
38116 static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
38117 if (Ld->isIndexed())
38118 return SDValue();
38120 SDValue Base = Ld->getBasePtr();
38122 if (Base.getOpcode() != ISD::ADD)
38123 return SDValue();
38125 SDValue ShiftedIndex = Base.getOperand(0);
38127 if (ShiftedIndex.getOpcode() != ISD::SHL)
38128 return SDValue();
38130 return ShiftedIndex.getOperand(0);
38134 static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
38135 if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
38136 switch (VT.getSizeInBits()) {
38137 default: return false;
38138 case 64: return Subtarget.is64Bit() ? true : false;
38139 case 32: return true;
38142 return false;
38145 // This function recognizes cases where X86 bzhi instruction can replace and
38146 // 'and-load' sequence.
38147 // In case of loading integer value from an array of constants which is defined
38148 // as follows:
38150 // int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
38152 // then applying a bitwise and on the result with another input.
38153 // It's equivalent to performing bzhi (zero high bits) on the input, with the
38154 // same index of the load.
38155 static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
38156 const X86Subtarget &Subtarget) {
38157 MVT VT = Node->getSimpleValueType(0);
38158 SDLoc dl(Node);
38160 // Check if subtarget has BZHI instruction for the node's type
38161 if (!hasBZHI(Subtarget, VT))
38162 return SDValue();
38164 // Try matching the pattern for both operands.
38165 for (unsigned i = 0; i < 2; i++) {
38166 SDValue N = Node->getOperand(i);
38167 LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
38169 // continue if the operand is not a load instruction
38170 if (!Ld)
38171 return SDValue();
38173 const Value *MemOp = Ld->getMemOperand()->getValue();
38175 if (!MemOp)
38176 return SDValue();
38178 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
38179 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
38180 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
38182 Constant *Init = GV->getInitializer();
38183 Type *Ty = Init->getType();
38184 if (!isa<ConstantDataArray>(Init) ||
38185 !Ty->getArrayElementType()->isIntegerTy() ||
38186 Ty->getArrayElementType()->getScalarSizeInBits() !=
38187 VT.getSizeInBits() ||
38188 Ty->getArrayNumElements() >
38189 Ty->getArrayElementType()->getScalarSizeInBits())
38190 continue;
38192 // Check if the array's constant elements are suitable to our case.
38193 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
38194 bool ConstantsMatch = true;
38195 for (uint64_t j = 0; j < ArrayElementCount; j++) {
38196 ConstantInt *Elem =
38197 dyn_cast<ConstantInt>(Init->getAggregateElement(j));
38198 if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
38199 ConstantsMatch = false;
38200 break;
38203 if (!ConstantsMatch)
38204 continue;
38206 // Do the transformation (For 32-bit type):
38207 // -> (and (load arr[idx]), inp)
38208 // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
38209 // that will be replaced with one bzhi instruction.
38210 SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
38211 SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
38213 // Get the Node which indexes into the array.
38214 SDValue Index = getIndexFromUnindexedLoad(Ld);
38215 if (!Index)
38216 return SDValue();
38217 Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
38219 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
38220 Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
38222 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
38223 SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
38225 return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
38230 return SDValue();
38233 // Look for (and (ctpop X), 1) which is the IR form of __builtin_parity.
38234 // Turn it into series of XORs and a setnp.
38235 static SDValue combineParity(SDNode *N, SelectionDAG &DAG,
38236 const X86Subtarget &Subtarget) {
38237 EVT VT = N->getValueType(0);
38239 // We only support 64-bit and 32-bit. 64-bit requires special handling
38240 // unless the 64-bit popcnt instruction is legal.
38241 if (VT != MVT::i32 && VT != MVT::i64)
38242 return SDValue();
38244 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38245 if (TLI.isTypeLegal(VT) && TLI.isOperationLegal(ISD::CTPOP, VT))
38246 return SDValue();
38248 SDValue N0 = N->getOperand(0);
38249 SDValue N1 = N->getOperand(1);
38251 // LHS needs to be a single use CTPOP.
38252 if (N0.getOpcode() != ISD::CTPOP || !N0.hasOneUse())
38253 return SDValue();
38255 // RHS needs to be 1.
38256 if (!isOneConstant(N1))
38257 return SDValue();
38259 SDLoc DL(N);
38260 SDValue X = N0.getOperand(0);
38262 // If this is 64-bit, its always best to xor the two 32-bit pieces together
38263 // even if we have popcnt.
38264 if (VT == MVT::i64) {
38265 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
38266 DAG.getNode(ISD::SRL, DL, VT, X,
38267 DAG.getConstant(32, DL, MVT::i8)));
38268 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
38269 X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
38270 // Generate a 32-bit parity idiom. This will bring us back here if we need
38271 // to expand it too.
38272 SDValue Parity = DAG.getNode(ISD::AND, DL, MVT::i32,
38273 DAG.getNode(ISD::CTPOP, DL, MVT::i32, X),
38274 DAG.getConstant(1, DL, MVT::i32));
38275 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Parity);
38277 assert(VT == MVT::i32 && "Unexpected VT!");
38279 // Xor the high and low 16-bits together using a 32-bit operation.
38280 SDValue Hi16 = DAG.getNode(ISD::SRL, DL, VT, X,
38281 DAG.getConstant(16, DL, MVT::i8));
38282 X = DAG.getNode(ISD::XOR, DL, VT, X, Hi16);
38284 // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
38285 // This should allow an h-reg to be used to save a shift.
38286 // FIXME: We only get an h-reg in 32-bit mode.
38287 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
38288 DAG.getNode(ISD::SRL, DL, VT, X,
38289 DAG.getConstant(8, DL, MVT::i8)));
38290 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
38291 SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
38292 SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
38294 // Copy the inverse of the parity flag into a register with setcc.
38295 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
38296 // Zero extend to original type.
38297 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0), Setnp);
38300 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
38301 TargetLowering::DAGCombinerInfo &DCI,
38302 const X86Subtarget &Subtarget) {
38303 EVT VT = N->getValueType(0);
38305 // If this is SSE1 only convert to FAND to avoid scalarization.
38306 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
38307 return DAG.getBitcast(
38308 MVT::v4i32, DAG.getNode(X86ISD::FAND, SDLoc(N), MVT::v4f32,
38309 DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
38310 DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
38313 // Use a 32-bit and+zext if upper bits known zero.
38314 if (VT == MVT::i64 && Subtarget.is64Bit() &&
38315 !isa<ConstantSDNode>(N->getOperand(1))) {
38316 APInt HiMask = APInt::getHighBitsSet(64, 32);
38317 if (DAG.MaskedValueIsZero(N->getOperand(1), HiMask) ||
38318 DAG.MaskedValueIsZero(N->getOperand(0), HiMask)) {
38319 SDLoc dl(N);
38320 SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(0));
38321 SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(1));
38322 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
38323 DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
38327 // This must be done before legalization has expanded the ctpop.
38328 if (SDValue V = combineParity(N, DAG, Subtarget))
38329 return V;
38331 // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
38332 // TODO: Support multiple SrcOps.
38333 if (VT == MVT::i1) {
38334 SmallVector<SDValue, 2> SrcOps;
38335 if (matchBitOpReduction(SDValue(N, 0), ISD::AND, SrcOps) &&
38336 SrcOps.size() == 1) {
38337 SDLoc dl(N);
38338 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
38339 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
38340 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
38341 if (Mask) {
38342 APInt AllBits = APInt::getAllOnesValue(NumElts);
38343 return DAG.getSetCC(dl, MVT::i1, Mask,
38344 DAG.getConstant(AllBits, dl, MaskVT), ISD::SETEQ);
38349 if (DCI.isBeforeLegalizeOps())
38350 return SDValue();
38352 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
38353 return R;
38355 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
38356 return FPLogic;
38358 if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
38359 return R;
38361 if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
38362 return ShiftRight;
38364 if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
38365 return R;
38367 // Attempt to recursively combine a bitmask AND with shuffles.
38368 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
38369 SDValue Op(N, 0);
38370 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
38371 return Res;
38374 // Attempt to combine a scalar bitmask AND with an extracted shuffle.
38375 if ((VT.getScalarSizeInBits() % 8) == 0 &&
38376 N->getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
38377 isa<ConstantSDNode>(N->getOperand(0).getOperand(1))) {
38378 SDValue BitMask = N->getOperand(1);
38379 SDValue SrcVec = N->getOperand(0).getOperand(0);
38380 EVT SrcVecVT = SrcVec.getValueType();
38382 // Check that the constant bitmask masks whole bytes.
38383 APInt UndefElts;
38384 SmallVector<APInt, 64> EltBits;
38385 if (VT == SrcVecVT.getScalarType() &&
38386 N->getOperand(0)->isOnlyUserOf(SrcVec.getNode()) &&
38387 getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
38388 llvm::all_of(EltBits, [](APInt M) {
38389 return M.isNullValue() || M.isAllOnesValue();
38390 })) {
38391 unsigned NumElts = SrcVecVT.getVectorNumElements();
38392 unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
38393 unsigned Idx = N->getOperand(0).getConstantOperandVal(1);
38395 // Create a root shuffle mask from the byte mask and the extracted index.
38396 SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
38397 for (unsigned i = 0; i != Scale; ++i) {
38398 if (UndefElts[i])
38399 continue;
38400 int VecIdx = Scale * Idx + i;
38401 ShuffleMask[VecIdx] =
38402 EltBits[i].isNullValue() ? SM_SentinelZero : VecIdx;
38405 if (SDValue Shuffle = combineX86ShufflesRecursively(
38406 {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 2,
38407 /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
38408 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), VT, Shuffle,
38409 N->getOperand(0).getOperand(1));
38413 return SDValue();
38416 // Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
38417 static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
38418 const X86Subtarget &Subtarget) {
38419 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
38421 EVT VT = N->getValueType(0);
38422 if (!VT.isVector() || (VT.getScalarSizeInBits() % 8) != 0)
38423 return SDValue();
38425 SDValue N0 = peekThroughBitcasts(N->getOperand(0));
38426 SDValue N1 = peekThroughBitcasts(N->getOperand(1));
38427 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
38428 return SDValue();
38430 // On XOP we'll lower to PCMOV so accept one use, otherwise only
38431 // do this if either mask has multiple uses already.
38432 if (!(Subtarget.hasXOP() || !N0.getOperand(1).hasOneUse() ||
38433 !N1.getOperand(1).hasOneUse()))
38434 return SDValue();
38436 // Attempt to extract constant byte masks.
38437 APInt UndefElts0, UndefElts1;
38438 SmallVector<APInt, 32> EltBits0, EltBits1;
38439 if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
38440 false, false))
38441 return SDValue();
38442 if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
38443 false, false))
38444 return SDValue();
38446 for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
38447 // TODO - add UNDEF elts support.
38448 if (UndefElts0[i] || UndefElts1[i])
38449 return SDValue();
38450 if (EltBits0[i] != ~EltBits1[i])
38451 return SDValue();
38454 SDLoc DL(N);
38455 SDValue X = N->getOperand(0);
38456 SDValue Y =
38457 DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
38458 DAG.getBitcast(VT, N1.getOperand(0)));
38459 return DAG.getNode(ISD::OR, DL, VT, X, Y);
38462 // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
38463 static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
38464 if (N->getOpcode() != ISD::OR)
38465 return false;
38467 SDValue N0 = N->getOperand(0);
38468 SDValue N1 = N->getOperand(1);
38470 // Canonicalize AND to LHS.
38471 if (N1.getOpcode() == ISD::AND)
38472 std::swap(N0, N1);
38474 // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
38475 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
38476 return false;
38478 Mask = N1.getOperand(0);
38479 X = N1.getOperand(1);
38481 // Check to see if the mask appeared in both the AND and ANDNP.
38482 if (N0.getOperand(0) == Mask)
38483 Y = N0.getOperand(1);
38484 else if (N0.getOperand(1) == Mask)
38485 Y = N0.getOperand(0);
38486 else
38487 return false;
38489 // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
38490 // ANDNP combine allows other combines to happen that prevent matching.
38491 return true;
38494 // Try to match:
38495 // (or (and (M, (sub 0, X)), (pandn M, X)))
38496 // which is a special case of vselect:
38497 // (vselect M, (sub 0, X), X)
38498 // Per:
38499 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
38500 // We know that, if fNegate is 0 or 1:
38501 // (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
38503 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
38504 // ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
38505 // ( M ? -X : X) == ((X ^ M ) + (M & 1))
38506 // This lets us transform our vselect to:
38507 // (add (xor X, M), (and M, 1))
38508 // And further to:
38509 // (sub (xor X, M), M)
38510 static SDValue combineLogicBlendIntoConditionalNegate(
38511 EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
38512 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
38513 EVT MaskVT = Mask.getValueType();
38514 assert(MaskVT.isInteger() &&
38515 DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
38516 "Mask must be zero/all-bits");
38518 if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
38519 return SDValue();
38520 if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
38521 return SDValue();
38523 auto IsNegV = [](SDNode *N, SDValue V) {
38524 return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
38525 ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
38528 SDValue V;
38529 if (IsNegV(Y.getNode(), X))
38530 V = X;
38531 else if (IsNegV(X.getNode(), Y))
38532 V = Y;
38533 else
38534 return SDValue();
38536 SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
38537 SDValue SubOp2 = Mask;
38539 // If the negate was on the false side of the select, then
38540 // the operands of the SUB need to be swapped. PR 27251.
38541 // This is because the pattern being matched above is
38542 // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
38543 // but if the pattern matched was
38544 // (vselect M, X, (sub (0, X))), that is really negation of the pattern
38545 // above, -(vselect M, (sub 0, X), X), and therefore the replacement
38546 // pattern also needs to be a negation of the replacement pattern above.
38547 // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
38548 // sub accomplishes the negation of the replacement pattern.
38549 if (V == Y)
38550 std::swap(SubOp1, SubOp2);
38552 SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
38553 return DAG.getBitcast(VT, Res);
38556 // Try to fold:
38557 // (or (and (m, y), (pandn m, x)))
38558 // into:
38559 // (vselect m, x, y)
38560 // As a special case, try to fold:
38561 // (or (and (m, (sub 0, x)), (pandn m, x)))
38562 // into:
38563 // (sub (xor X, M), M)
38564 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
38565 const X86Subtarget &Subtarget) {
38566 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
38568 EVT VT = N->getValueType(0);
38569 if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
38570 (VT.is256BitVector() && Subtarget.hasInt256())))
38571 return SDValue();
38573 SDValue X, Y, Mask;
38574 if (!matchLogicBlend(N, X, Y, Mask))
38575 return SDValue();
38577 // Validate that X, Y, and Mask are bitcasts, and see through them.
38578 Mask = peekThroughBitcasts(Mask);
38579 X = peekThroughBitcasts(X);
38580 Y = peekThroughBitcasts(Y);
38582 EVT MaskVT = Mask.getValueType();
38583 unsigned EltBits = MaskVT.getScalarSizeInBits();
38585 // TODO: Attempt to handle floating point cases as well?
38586 if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
38587 return SDValue();
38589 SDLoc DL(N);
38591 // Attempt to combine to conditional negate: (sub (xor X, M), M)
38592 if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
38593 DAG, Subtarget))
38594 return Res;
38596 // PBLENDVB is only available on SSE 4.1.
38597 if (!Subtarget.hasSSE41())
38598 return SDValue();
38600 MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
38602 X = DAG.getBitcast(BlendVT, X);
38603 Y = DAG.getBitcast(BlendVT, Y);
38604 Mask = DAG.getBitcast(BlendVT, Mask);
38605 Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
38606 return DAG.getBitcast(VT, Mask);
38609 // Helper function for combineOrCmpEqZeroToCtlzSrl
38610 // Transforms:
38611 // seteq(cmp x, 0)
38612 // into:
38613 // srl(ctlz x), log2(bitsize(x))
38614 // Input pattern is checked by caller.
38615 static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, EVT ExtTy,
38616 SelectionDAG &DAG) {
38617 SDValue Cmp = Op.getOperand(1);
38618 EVT VT = Cmp.getOperand(0).getValueType();
38619 unsigned Log2b = Log2_32(VT.getSizeInBits());
38620 SDLoc dl(Op);
38621 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
38622 // The result of the shift is true or false, and on X86, the 32-bit
38623 // encoding of shr and lzcnt is more desirable.
38624 SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
38625 SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
38626 DAG.getConstant(Log2b, dl, MVT::i8));
38627 return DAG.getZExtOrTrunc(Scc, dl, ExtTy);
38630 // Try to transform:
38631 // zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
38632 // into:
38633 // srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
38634 // Will also attempt to match more generic cases, eg:
38635 // zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
38636 // Only applies if the target supports the FastLZCNT feature.
38637 static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
38638 TargetLowering::DAGCombinerInfo &DCI,
38639 const X86Subtarget &Subtarget) {
38640 if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
38641 return SDValue();
38643 auto isORCandidate = [](SDValue N) {
38644 return (N->getOpcode() == ISD::OR && N->hasOneUse());
38647 // Check the zero extend is extending to 32-bit or more. The code generated by
38648 // srl(ctlz) for 16-bit or less variants of the pattern would require extra
38649 // instructions to clear the upper bits.
38650 if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
38651 !isORCandidate(N->getOperand(0)))
38652 return SDValue();
38654 // Check the node matches: setcc(eq, cmp 0)
38655 auto isSetCCCandidate = [](SDValue N) {
38656 return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
38657 X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
38658 N->getOperand(1).getOpcode() == X86ISD::CMP &&
38659 isNullConstant(N->getOperand(1).getOperand(1)) &&
38660 N->getOperand(1).getValueType().bitsGE(MVT::i32);
38663 SDNode *OR = N->getOperand(0).getNode();
38664 SDValue LHS = OR->getOperand(0);
38665 SDValue RHS = OR->getOperand(1);
38667 // Save nodes matching or(or, setcc(eq, cmp 0)).
38668 SmallVector<SDNode *, 2> ORNodes;
38669 while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
38670 (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
38671 ORNodes.push_back(OR);
38672 OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
38673 LHS = OR->getOperand(0);
38674 RHS = OR->getOperand(1);
38677 // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
38678 if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
38679 !isORCandidate(SDValue(OR, 0)))
38680 return SDValue();
38682 // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
38683 // to
38684 // or(srl(ctlz),srl(ctlz)).
38685 // The dag combiner can then fold it into:
38686 // srl(or(ctlz, ctlz)).
38687 EVT VT = OR->getValueType(0);
38688 SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, VT, DAG);
38689 SDValue Ret, NewRHS;
38690 if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG)))
38691 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, NewLHS, NewRHS);
38693 if (!Ret)
38694 return SDValue();
38696 // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
38697 while (ORNodes.size() > 0) {
38698 OR = ORNodes.pop_back_val();
38699 LHS = OR->getOperand(0);
38700 RHS = OR->getOperand(1);
38701 // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
38702 if (RHS->getOpcode() == ISD::OR)
38703 std::swap(LHS, RHS);
38704 NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG);
38705 if (!NewRHS)
38706 return SDValue();
38707 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, Ret, NewRHS);
38710 if (Ret)
38711 Ret = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
38713 return Ret;
38716 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
38717 TargetLowering::DAGCombinerInfo &DCI,
38718 const X86Subtarget &Subtarget) {
38719 SDValue N0 = N->getOperand(0);
38720 SDValue N1 = N->getOperand(1);
38721 EVT VT = N->getValueType(0);
38723 // If this is SSE1 only convert to FOR to avoid scalarization.
38724 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
38725 return DAG.getBitcast(MVT::v4i32,
38726 DAG.getNode(X86ISD::FOR, SDLoc(N), MVT::v4f32,
38727 DAG.getBitcast(MVT::v4f32, N0),
38728 DAG.getBitcast(MVT::v4f32, N1)));
38731 if (DCI.isBeforeLegalizeOps())
38732 return SDValue();
38734 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
38735 return R;
38737 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
38738 return FPLogic;
38740 if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
38741 return R;
38743 if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
38744 return R;
38746 // Attempt to recursively combine an OR of shuffles.
38747 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
38748 SDValue Op(N, 0);
38749 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
38750 return Res;
38753 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
38754 return SDValue();
38756 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
38757 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
38758 unsigned Bits = VT.getScalarSizeInBits();
38760 // SHLD/SHRD instructions have lower register pressure, but on some
38761 // platforms they have higher latency than the equivalent
38762 // series of shifts/or that would otherwise be generated.
38763 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
38764 // have higher latencies and we are not optimizing for size.
38765 if (!OptForSize && Subtarget.isSHLDSlow())
38766 return SDValue();
38768 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
38769 std::swap(N0, N1);
38770 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
38771 return SDValue();
38772 if (!N0.hasOneUse() || !N1.hasOneUse())
38773 return SDValue();
38775 SDValue ShAmt0 = N0.getOperand(1);
38776 if (ShAmt0.getValueType() != MVT::i8)
38777 return SDValue();
38778 SDValue ShAmt1 = N1.getOperand(1);
38779 if (ShAmt1.getValueType() != MVT::i8)
38780 return SDValue();
38782 // Peek through any modulo shift masks.
38783 SDValue ShMsk0;
38784 if (ShAmt0.getOpcode() == ISD::AND &&
38785 isa<ConstantSDNode>(ShAmt0.getOperand(1)) &&
38786 ShAmt0.getConstantOperandAPInt(1) == (Bits - 1)) {
38787 ShMsk0 = ShAmt0;
38788 ShAmt0 = ShAmt0.getOperand(0);
38790 SDValue ShMsk1;
38791 if (ShAmt1.getOpcode() == ISD::AND &&
38792 isa<ConstantSDNode>(ShAmt1.getOperand(1)) &&
38793 ShAmt1.getConstantOperandAPInt(1) == (Bits - 1)) {
38794 ShMsk1 = ShAmt1;
38795 ShAmt1 = ShAmt1.getOperand(0);
38798 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
38799 ShAmt0 = ShAmt0.getOperand(0);
38800 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
38801 ShAmt1 = ShAmt1.getOperand(0);
38803 SDLoc DL(N);
38804 unsigned Opc = ISD::FSHL;
38805 SDValue Op0 = N0.getOperand(0);
38806 SDValue Op1 = N1.getOperand(0);
38807 if (ShAmt0.getOpcode() == ISD::SUB || ShAmt0.getOpcode() == ISD::XOR) {
38808 Opc = ISD::FSHR;
38809 std::swap(Op0, Op1);
38810 std::swap(ShAmt0, ShAmt1);
38811 std::swap(ShMsk0, ShMsk1);
38814 auto GetFunnelShift = [&DAG, &DL, VT, Opc](SDValue Op0, SDValue Op1,
38815 SDValue Amt) {
38816 if (Opc == ISD::FSHR)
38817 std::swap(Op0, Op1);
38818 return DAG.getNode(Opc, DL, VT, Op0, Op1,
38819 DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Amt));
38822 // OR( SHL( X, C ), SRL( Y, 32 - C ) ) -> FSHL( X, Y, C )
38823 // OR( SRL( X, C ), SHL( Y, 32 - C ) ) -> FSHR( Y, X, C )
38824 // OR( SHL( X, C ), SRL( SRL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHL( X, Y, C )
38825 // OR( SRL( X, C ), SHL( SHL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHR( Y, X, C )
38826 // OR( SHL( X, AND( C, 31 ) ), SRL( Y, AND( 0 - C, 31 ) ) ) -> FSHL( X, Y, C )
38827 // OR( SRL( X, AND( C, 31 ) ), SHL( Y, AND( 0 - C, 31 ) ) ) -> FSHR( Y, X, C )
38828 if (ShAmt1.getOpcode() == ISD::SUB) {
38829 SDValue Sum = ShAmt1.getOperand(0);
38830 if (auto *SumC = dyn_cast<ConstantSDNode>(Sum)) {
38831 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
38832 if (ShAmt1Op1.getOpcode() == ISD::AND &&
38833 isa<ConstantSDNode>(ShAmt1Op1.getOperand(1)) &&
38834 ShAmt1Op1.getConstantOperandAPInt(1) == (Bits - 1)) {
38835 ShMsk1 = ShAmt1Op1;
38836 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
38838 if (ShAmt1Op1.getOpcode() == ISD::TRUNCATE)
38839 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
38840 if ((SumC->getAPIntValue() == Bits ||
38841 (SumC->getAPIntValue() == 0 && ShMsk1)) &&
38842 ShAmt1Op1 == ShAmt0)
38843 return GetFunnelShift(Op0, Op1, ShAmt0);
38845 } else if (auto *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
38846 auto *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
38847 if (ShAmt0C && (ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue()) == Bits)
38848 return GetFunnelShift(Op0, Op1, ShAmt0);
38849 } else if (ShAmt1.getOpcode() == ISD::XOR) {
38850 SDValue Mask = ShAmt1.getOperand(1);
38851 if (auto *MaskC = dyn_cast<ConstantSDNode>(Mask)) {
38852 unsigned InnerShift = (ISD::FSHL == Opc ? ISD::SRL : ISD::SHL);
38853 SDValue ShAmt1Op0 = ShAmt1.getOperand(0);
38854 if (ShAmt1Op0.getOpcode() == ISD::TRUNCATE)
38855 ShAmt1Op0 = ShAmt1Op0.getOperand(0);
38856 if (MaskC->getSExtValue() == (Bits - 1) &&
38857 (ShAmt1Op0 == ShAmt0 || ShAmt1Op0 == ShMsk0)) {
38858 if (Op1.getOpcode() == InnerShift &&
38859 isa<ConstantSDNode>(Op1.getOperand(1)) &&
38860 Op1.getConstantOperandAPInt(1) == 1) {
38861 return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
38863 // Test for ADD( Y, Y ) as an equivalent to SHL( Y, 1 ).
38864 if (InnerShift == ISD::SHL && Op1.getOpcode() == ISD::ADD &&
38865 Op1.getOperand(0) == Op1.getOperand(1)) {
38866 return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
38872 return SDValue();
38875 /// Try to turn tests against the signbit in the form of:
38876 /// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
38877 /// into:
38878 /// SETGT(X, -1)
38879 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
38880 // This is only worth doing if the output type is i8 or i1.
38881 EVT ResultType = N->getValueType(0);
38882 if (ResultType != MVT::i8 && ResultType != MVT::i1)
38883 return SDValue();
38885 SDValue N0 = N->getOperand(0);
38886 SDValue N1 = N->getOperand(1);
38888 // We should be performing an xor against a truncated shift.
38889 if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
38890 return SDValue();
38892 // Make sure we are performing an xor against one.
38893 if (!isOneConstant(N1))
38894 return SDValue();
38896 // SetCC on x86 zero extends so only act on this if it's a logical shift.
38897 SDValue Shift = N0.getOperand(0);
38898 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
38899 return SDValue();
38901 // Make sure we are truncating from one of i16, i32 or i64.
38902 EVT ShiftTy = Shift.getValueType();
38903 if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
38904 return SDValue();
38906 // Make sure the shift amount extracts the sign bit.
38907 if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
38908 Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
38909 return SDValue();
38911 // Create a greater-than comparison against -1.
38912 // N.B. Using SETGE against 0 works but we want a canonical looking
38913 // comparison, using SETGT matches up with what TranslateX86CC.
38914 SDLoc DL(N);
38915 SDValue ShiftOp = Shift.getOperand(0);
38916 EVT ShiftOpTy = ShiftOp.getValueType();
38917 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38918 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
38919 *DAG.getContext(), ResultType);
38920 SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
38921 DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
38922 if (SetCCResultType != ResultType)
38923 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
38924 return Cond;
38927 /// Turn vector tests of the signbit in the form of:
38928 /// xor (sra X, elt_size(X)-1), -1
38929 /// into:
38930 /// pcmpgt X, -1
38932 /// This should be called before type legalization because the pattern may not
38933 /// persist after that.
38934 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
38935 const X86Subtarget &Subtarget) {
38936 EVT VT = N->getValueType(0);
38937 if (!VT.isSimple())
38938 return SDValue();
38940 switch (VT.getSimpleVT().SimpleTy) {
38941 default: return SDValue();
38942 case MVT::v16i8:
38943 case MVT::v8i16:
38944 case MVT::v4i32: if (!Subtarget.hasSSE2()) return SDValue(); break;
38945 case MVT::v2i64: if (!Subtarget.hasSSE42()) return SDValue(); break;
38946 case MVT::v32i8:
38947 case MVT::v16i16:
38948 case MVT::v8i32:
38949 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
38952 // There must be a shift right algebraic before the xor, and the xor must be a
38953 // 'not' operation.
38954 SDValue Shift = N->getOperand(0);
38955 SDValue Ones = N->getOperand(1);
38956 if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
38957 !ISD::isBuildVectorAllOnes(Ones.getNode()))
38958 return SDValue();
38960 // The shift should be smearing the sign bit across each vector element.
38961 auto *ShiftAmt =
38962 isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
38963 if (!ShiftAmt ||
38964 ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
38965 return SDValue();
38967 // Create a greater-than comparison against -1. We don't use the more obvious
38968 // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
38969 return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones);
38972 /// Check if truncation with saturation form type \p SrcVT to \p DstVT
38973 /// is valid for the given \p Subtarget.
38974 static bool isSATValidOnAVX512Subtarget(EVT SrcVT, EVT DstVT,
38975 const X86Subtarget &Subtarget) {
38976 if (!Subtarget.hasAVX512())
38977 return false;
38979 // FIXME: Scalar type may be supported if we move it to vector register.
38980 if (!SrcVT.isVector())
38981 return false;
38983 EVT SrcElVT = SrcVT.getScalarType();
38984 EVT DstElVT = DstVT.getScalarType();
38985 if (DstElVT != MVT::i8 && DstElVT != MVT::i16 && DstElVT != MVT::i32)
38986 return false;
38987 if (SrcVT.is512BitVector() || Subtarget.hasVLX())
38988 return SrcElVT.getSizeInBits() >= 32 || Subtarget.hasBWI();
38989 return false;
38992 /// Detect patterns of truncation with unsigned saturation:
38994 /// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
38995 /// Return the source value x to be truncated or SDValue() if the pattern was
38996 /// not matched.
38998 /// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
38999 /// where C1 >= 0 and C2 is unsigned max of destination type.
39001 /// (truncate (smax (smin (x, C2), C1)) to dest_type)
39002 /// where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
39004 /// These two patterns are equivalent to:
39005 /// (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
39006 /// So return the smax(x, C1) value to be truncated or SDValue() if the
39007 /// pattern was not matched.
39008 static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
39009 const SDLoc &DL) {
39010 EVT InVT = In.getValueType();
39012 // Saturation with truncation. We truncate from InVT to VT.
39013 assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
39014 "Unexpected types for truncate operation");
39016 // Match min/max and return limit value as a parameter.
39017 auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
39018 if (V.getOpcode() == Opcode &&
39019 ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
39020 return V.getOperand(0);
39021 return SDValue();
39024 APInt C1, C2;
39025 if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
39026 // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
39027 // the element size of the destination type.
39028 if (C2.isMask(VT.getScalarSizeInBits()))
39029 return UMin;
39031 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
39032 if (MatchMinMax(SMin, ISD::SMAX, C1))
39033 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
39034 return SMin;
39036 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
39037 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
39038 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
39039 C2.uge(C1)) {
39040 return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
39043 return SDValue();
39046 /// Detect patterns of truncation with signed saturation:
39047 /// (truncate (smin ((smax (x, signed_min_of_dest_type)),
39048 /// signed_max_of_dest_type)) to dest_type)
39049 /// or:
39050 /// (truncate (smax ((smin (x, signed_max_of_dest_type)),
39051 /// signed_min_of_dest_type)) to dest_type).
39052 /// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
39053 /// Return the source value to be truncated or SDValue() if the pattern was not
39054 /// matched.
39055 static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
39056 unsigned NumDstBits = VT.getScalarSizeInBits();
39057 unsigned NumSrcBits = In.getScalarValueSizeInBits();
39058 assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
39060 auto MatchMinMax = [](SDValue V, unsigned Opcode,
39061 const APInt &Limit) -> SDValue {
39062 APInt C;
39063 if (V.getOpcode() == Opcode &&
39064 ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
39065 return V.getOperand(0);
39066 return SDValue();
39069 APInt SignedMax, SignedMin;
39070 if (MatchPackUS) {
39071 SignedMax = APInt::getAllOnesValue(NumDstBits).zext(NumSrcBits);
39072 SignedMin = APInt(NumSrcBits, 0);
39073 } else {
39074 SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
39075 SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
39078 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
39079 if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
39080 return SMax;
39082 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
39083 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
39084 return SMin;
39086 return SDValue();
39089 /// Detect a pattern of truncation with signed saturation.
39090 /// The types should allow to use VPMOVSS* instruction on AVX512.
39091 /// Return the source value to be truncated or SDValue() if the pattern was not
39092 /// matched.
39093 static SDValue detectAVX512SSatPattern(SDValue In, EVT VT,
39094 const X86Subtarget &Subtarget,
39095 const TargetLowering &TLI) {
39096 if (!TLI.isTypeLegal(In.getValueType()))
39097 return SDValue();
39098 if (!isSATValidOnAVX512Subtarget(In.getValueType(), VT, Subtarget))
39099 return SDValue();
39100 return detectSSatPattern(In, VT);
39103 /// Detect a pattern of truncation with saturation:
39104 /// (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
39105 /// The types should allow to use VPMOVUS* instruction on AVX512.
39106 /// Return the source value to be truncated or SDValue() if the pattern was not
39107 /// matched.
39108 static SDValue detectAVX512USatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
39109 const SDLoc &DL,
39110 const X86Subtarget &Subtarget,
39111 const TargetLowering &TLI) {
39112 if (!TLI.isTypeLegal(In.getValueType()))
39113 return SDValue();
39114 if (!isSATValidOnAVX512Subtarget(In.getValueType(), VT, Subtarget))
39115 return SDValue();
39116 return detectUSatPattern(In, VT, DAG, DL);
39119 static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
39120 SelectionDAG &DAG,
39121 const X86Subtarget &Subtarget) {
39122 EVT SVT = VT.getScalarType();
39123 EVT InVT = In.getValueType();
39124 EVT InSVT = InVT.getScalarType();
39125 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39126 if (TLI.isTypeLegal(InVT) && TLI.isTypeLegal(VT) &&
39127 isSATValidOnAVX512Subtarget(InVT, VT, Subtarget)) {
39128 if (auto SSatVal = detectSSatPattern(In, VT))
39129 return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
39130 if (auto USatVal = detectUSatPattern(In, VT, DAG, DL))
39131 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
39133 if (VT.isVector() && isPowerOf2_32(VT.getVectorNumElements()) &&
39134 !Subtarget.hasAVX512() &&
39135 (SVT == MVT::i8 || SVT == MVT::i16) &&
39136 (InSVT == MVT::i16 || InSVT == MVT::i32)) {
39137 if (auto USatVal = detectSSatPattern(In, VT, true)) {
39138 // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
39139 if (SVT == MVT::i8 && InSVT == MVT::i32) {
39140 EVT MidVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
39141 VT.getVectorNumElements());
39142 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
39143 DAG, Subtarget);
39144 if (Mid)
39145 return truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
39146 Subtarget);
39147 } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
39148 return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
39149 Subtarget);
39151 if (auto SSatVal = detectSSatPattern(In, VT))
39152 return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
39153 Subtarget);
39155 return SDValue();
39158 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
39159 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
39160 /// X86ISD::AVG instruction.
39161 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
39162 const X86Subtarget &Subtarget,
39163 const SDLoc &DL) {
39164 if (!VT.isVector())
39165 return SDValue();
39166 EVT InVT = In.getValueType();
39167 unsigned NumElems = VT.getVectorNumElements();
39169 EVT ScalarVT = VT.getVectorElementType();
39170 if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
39171 NumElems >= 2 && isPowerOf2_32(NumElems)))
39172 return SDValue();
39174 // InScalarVT is the intermediate type in AVG pattern and it should be greater
39175 // than the original input type (i8/i16).
39176 EVT InScalarVT = InVT.getVectorElementType();
39177 if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
39178 return SDValue();
39180 if (!Subtarget.hasSSE2())
39181 return SDValue();
39183 // Detect the following pattern:
39185 // %1 = zext <N x i8> %a to <N x i32>
39186 // %2 = zext <N x i8> %b to <N x i32>
39187 // %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
39188 // %4 = add nuw nsw <N x i32> %3, %2
39189 // %5 = lshr <N x i32> %N, <i32 1 x N>
39190 // %6 = trunc <N x i32> %5 to <N x i8>
39192 // In AVX512, the last instruction can also be a trunc store.
39193 if (In.getOpcode() != ISD::SRL)
39194 return SDValue();
39196 // A lambda checking the given SDValue is a constant vector and each element
39197 // is in the range [Min, Max].
39198 auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
39199 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V);
39200 if (!BV || !BV->isConstant())
39201 return false;
39202 for (SDValue Op : V->ops()) {
39203 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
39204 if (!C)
39205 return false;
39206 const APInt &Val = C->getAPIntValue();
39207 if (Val.ult(Min) || Val.ugt(Max))
39208 return false;
39210 return true;
39213 // Check if each element of the vector is left-shifted by one.
39214 auto LHS = In.getOperand(0);
39215 auto RHS = In.getOperand(1);
39216 if (!IsConstVectorInRange(RHS, 1, 1))
39217 return SDValue();
39218 if (LHS.getOpcode() != ISD::ADD)
39219 return SDValue();
39221 // Detect a pattern of a + b + 1 where the order doesn't matter.
39222 SDValue Operands[3];
39223 Operands[0] = LHS.getOperand(0);
39224 Operands[1] = LHS.getOperand(1);
39226 auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
39227 ArrayRef<SDValue> Ops) {
39228 return DAG.getNode(X86ISD::AVG, DL, Ops[0].getValueType(), Ops);
39231 // Take care of the case when one of the operands is a constant vector whose
39232 // element is in the range [1, 256].
39233 if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
39234 Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
39235 Operands[0].getOperand(0).getValueType() == VT) {
39236 // The pattern is detected. Subtract one from the constant vector, then
39237 // demote it and emit X86ISD::AVG instruction.
39238 SDValue VecOnes = DAG.getConstant(1, DL, InVT);
39239 Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
39240 Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
39241 return SplitOpsAndApply(DAG, Subtarget, DL, VT,
39242 { Operands[0].getOperand(0), Operands[1] },
39243 AVGBuilder);
39246 // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
39247 // Match the or case only if its 'add-like' - can be replaced by an add.
39248 auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
39249 if (ISD::ADD == V.getOpcode()) {
39250 Op0 = V.getOperand(0);
39251 Op1 = V.getOperand(1);
39252 return true;
39254 if (ISD::ZERO_EXTEND != V.getOpcode())
39255 return false;
39256 V = V.getOperand(0);
39257 if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
39258 !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
39259 return false;
39260 Op0 = V.getOperand(0);
39261 Op1 = V.getOperand(1);
39262 return true;
39265 SDValue Op0, Op1;
39266 if (FindAddLike(Operands[0], Op0, Op1))
39267 std::swap(Operands[0], Operands[1]);
39268 else if (!FindAddLike(Operands[1], Op0, Op1))
39269 return SDValue();
39270 Operands[2] = Op0;
39271 Operands[1] = Op1;
39273 // Now we have three operands of two additions. Check that one of them is a
39274 // constant vector with ones, and the other two can be promoted from i8/i16.
39275 for (int i = 0; i < 3; ++i) {
39276 if (!IsConstVectorInRange(Operands[i], 1, 1))
39277 continue;
39278 std::swap(Operands[i], Operands[2]);
39280 // Check if Operands[0] and Operands[1] are results of type promotion.
39281 for (int j = 0; j < 2; ++j)
39282 if (Operands[j].getValueType() != VT) {
39283 if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
39284 Operands[j].getOperand(0).getValueType() != VT)
39285 return SDValue();
39286 Operands[j] = Operands[j].getOperand(0);
39289 // The pattern is detected, emit X86ISD::AVG instruction(s).
39290 return SplitOpsAndApply(DAG, Subtarget, DL, VT, {Operands[0], Operands[1]},
39291 AVGBuilder);
39294 return SDValue();
39297 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
39298 TargetLowering::DAGCombinerInfo &DCI,
39299 const X86Subtarget &Subtarget) {
39300 LoadSDNode *Ld = cast<LoadSDNode>(N);
39301 EVT RegVT = Ld->getValueType(0);
39302 EVT MemVT = Ld->getMemoryVT();
39303 SDLoc dl(Ld);
39304 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39306 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
39307 // into two 16-byte operations. Also split non-temporal aligned loads on
39308 // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
39309 ISD::LoadExtType Ext = Ld->getExtensionType();
39310 bool Fast;
39311 unsigned Alignment = Ld->getAlignment();
39312 if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
39313 Ext == ISD::NON_EXTLOAD &&
39314 ((Ld->isNonTemporal() && !Subtarget.hasInt256() && Alignment >= 16) ||
39315 (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
39316 *Ld->getMemOperand(), &Fast) &&
39317 !Fast))) {
39318 unsigned NumElems = RegVT.getVectorNumElements();
39319 if (NumElems < 2)
39320 return SDValue();
39322 unsigned HalfAlign = 16;
39323 SDValue Ptr1 = Ld->getBasePtr();
39324 SDValue Ptr2 = DAG.getMemBasePlusOffset(Ptr1, HalfAlign, dl);
39325 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
39326 NumElems / 2);
39327 SDValue Load1 =
39328 DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
39329 Alignment, Ld->getMemOperand()->getFlags());
39330 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
39331 Ld->getPointerInfo().getWithOffset(HalfAlign),
39332 MinAlign(Alignment, HalfAlign),
39333 Ld->getMemOperand()->getFlags());
39334 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
39335 Load1.getValue(1), Load2.getValue(1));
39337 SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
39338 return DCI.CombineTo(N, NewVec, TF, true);
39341 // Bool vector load - attempt to cast to an integer, as we have good
39342 // (vXiY *ext(vXi1 bitcast(iX))) handling.
39343 if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
39344 RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
39345 unsigned NumElts = RegVT.getVectorNumElements();
39346 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
39347 if (TLI.isTypeLegal(IntVT)) {
39348 SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
39349 Ld->getPointerInfo(), Alignment,
39350 Ld->getMemOperand()->getFlags());
39351 SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
39352 return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
39356 return SDValue();
39359 /// If V is a build vector of boolean constants and exactly one of those
39360 /// constants is true, return the operand index of that true element.
39361 /// Otherwise, return -1.
39362 static int getOneTrueElt(SDValue V) {
39363 // This needs to be a build vector of booleans.
39364 // TODO: Checking for the i1 type matches the IR definition for the mask,
39365 // but the mask check could be loosened to i8 or other types. That might
39366 // also require checking more than 'allOnesValue'; eg, the x86 HW
39367 // instructions only require that the MSB is set for each mask element.
39368 // The ISD::MSTORE comments/definition do not specify how the mask operand
39369 // is formatted.
39370 auto *BV = dyn_cast<BuildVectorSDNode>(V);
39371 if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
39372 return -1;
39374 int TrueIndex = -1;
39375 unsigned NumElts = BV->getValueType(0).getVectorNumElements();
39376 for (unsigned i = 0; i < NumElts; ++i) {
39377 const SDValue &Op = BV->getOperand(i);
39378 if (Op.isUndef())
39379 continue;
39380 auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
39381 if (!ConstNode)
39382 return -1;
39383 if (ConstNode->getAPIntValue().isAllOnesValue()) {
39384 // If we already found a one, this is too many.
39385 if (TrueIndex >= 0)
39386 return -1;
39387 TrueIndex = i;
39390 return TrueIndex;
39393 /// Given a masked memory load/store operation, return true if it has one mask
39394 /// bit set. If it has one mask bit set, then also return the memory address of
39395 /// the scalar element to load/store, the vector index to insert/extract that
39396 /// scalar element, and the alignment for the scalar memory access.
39397 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
39398 SelectionDAG &DAG, SDValue &Addr,
39399 SDValue &Index, unsigned &Alignment) {
39400 int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
39401 if (TrueMaskElt < 0)
39402 return false;
39404 // Get the address of the one scalar element that is specified by the mask
39405 // using the appropriate offset from the base pointer.
39406 EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
39407 Addr = MaskedOp->getBasePtr();
39408 if (TrueMaskElt != 0) {
39409 unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
39410 Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
39413 Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
39414 Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
39415 return true;
39418 /// If exactly one element of the mask is set for a non-extending masked load,
39419 /// it is a scalar load and vector insert.
39420 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
39421 /// mask have already been optimized in IR, so we don't bother with those here.
39422 static SDValue
39423 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
39424 TargetLowering::DAGCombinerInfo &DCI) {
39425 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
39426 // However, some target hooks may need to be added to know when the transform
39427 // is profitable. Endianness would also have to be considered.
39429 SDValue Addr, VecIndex;
39430 unsigned Alignment;
39431 if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
39432 return SDValue();
39434 // Load the one scalar element that is specified by the mask using the
39435 // appropriate offset from the base pointer.
39436 SDLoc DL(ML);
39437 EVT VT = ML->getValueType(0);
39438 EVT EltVT = VT.getVectorElementType();
39439 SDValue Load =
39440 DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
39441 Alignment, ML->getMemOperand()->getFlags());
39443 // Insert the loaded element into the appropriate place in the vector.
39444 SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
39445 ML->getPassThru(), Load, VecIndex);
39446 return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
39449 static SDValue
39450 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
39451 TargetLowering::DAGCombinerInfo &DCI) {
39452 if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
39453 return SDValue();
39455 SDLoc DL(ML);
39456 EVT VT = ML->getValueType(0);
39458 // If we are loading the first and last elements of a vector, it is safe and
39459 // always faster to load the whole vector. Replace the masked load with a
39460 // vector load and select.
39461 unsigned NumElts = VT.getVectorNumElements();
39462 BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
39463 bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
39464 bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
39465 if (LoadFirstElt && LoadLastElt) {
39466 SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
39467 ML->getMemOperand());
39468 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
39469 ML->getPassThru());
39470 return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
39473 // Convert a masked load with a constant mask into a masked load and a select.
39474 // This allows the select operation to use a faster kind of select instruction
39475 // (for example, vblendvps -> vblendps).
39477 // Don't try this if the pass-through operand is already undefined. That would
39478 // cause an infinite loop because that's what we're about to create.
39479 if (ML->getPassThru().isUndef())
39480 return SDValue();
39482 if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
39483 return SDValue();
39485 // The new masked load has an undef pass-through operand. The select uses the
39486 // original pass-through operand.
39487 SDValue NewML = DAG.getMaskedLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
39488 ML->getMask(), DAG.getUNDEF(VT),
39489 ML->getMemoryVT(), ML->getMemOperand(),
39490 ML->getExtensionType());
39491 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
39492 ML->getPassThru());
39494 return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
39497 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
39498 TargetLowering::DAGCombinerInfo &DCI,
39499 const X86Subtarget &Subtarget) {
39500 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
39502 // TODO: Expanding load with constant mask may be optimized as well.
39503 if (Mld->isExpandingLoad())
39504 return SDValue();
39506 if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
39507 if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
39508 return ScalarLoad;
39509 // TODO: Do some AVX512 subsets benefit from this transform?
39510 if (!Subtarget.hasAVX512())
39511 if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
39512 return Blend;
39515 if (Mld->getExtensionType() != ISD::EXTLOAD)
39516 return SDValue();
39518 // Resolve extending loads.
39519 EVT VT = Mld->getValueType(0);
39520 unsigned NumElems = VT.getVectorNumElements();
39521 EVT LdVT = Mld->getMemoryVT();
39522 SDLoc dl(Mld);
39524 assert(LdVT != VT && "Cannot extend to the same type");
39525 unsigned ToSz = VT.getScalarSizeInBits();
39526 unsigned FromSz = LdVT.getScalarSizeInBits();
39527 // From/To sizes and ElemCount must be pow of two.
39528 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
39529 "Unexpected size for extending masked load");
39531 unsigned SizeRatio = ToSz / FromSz;
39532 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
39534 // Create a type on which we perform the shuffle.
39535 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
39536 LdVT.getScalarType(), NumElems*SizeRatio);
39537 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
39539 // Convert PassThru value.
39540 SDValue WidePassThru = DAG.getBitcast(WideVecVT, Mld->getPassThru());
39541 if (!Mld->getPassThru().isUndef()) {
39542 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
39543 for (unsigned i = 0; i != NumElems; ++i)
39544 ShuffleVec[i] = i * SizeRatio;
39546 // Can't shuffle using an illegal type.
39547 assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
39548 "WideVecVT should be legal");
39549 WidePassThru = DAG.getVectorShuffle(WideVecVT, dl, WidePassThru,
39550 DAG.getUNDEF(WideVecVT), ShuffleVec);
39553 // Prepare the new mask.
39554 SDValue NewMask;
39555 SDValue Mask = Mld->getMask();
39556 if (Mask.getValueType() == VT) {
39557 // Mask and original value have the same type.
39558 NewMask = DAG.getBitcast(WideVecVT, Mask);
39559 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
39560 for (unsigned i = 0; i != NumElems; ++i)
39561 ShuffleVec[i] = i * SizeRatio;
39562 for (unsigned i = NumElems; i != NumElems * SizeRatio; ++i)
39563 ShuffleVec[i] = NumElems * SizeRatio;
39564 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
39565 DAG.getConstant(0, dl, WideVecVT),
39566 ShuffleVec);
39567 } else {
39568 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
39569 unsigned WidenNumElts = NumElems*SizeRatio;
39570 unsigned MaskNumElts = VT.getVectorNumElements();
39571 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
39572 WidenNumElts);
39574 unsigned NumConcat = WidenNumElts / MaskNumElts;
39575 SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
39576 SmallVector<SDValue, 16> Ops(NumConcat, ZeroVal);
39577 Ops[0] = Mask;
39578 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
39581 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
39582 Mld->getBasePtr(), NewMask, WidePassThru,
39583 Mld->getMemoryVT(), Mld->getMemOperand(),
39584 ISD::NON_EXTLOAD);
39586 SDValue SlicedVec = DAG.getBitcast(WideVecVT, WideLd);
39587 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
39588 for (unsigned i = 0; i != NumElems; ++i)
39589 ShuffleVec[i * SizeRatio] = i;
39591 // Can't shuffle using an illegal type.
39592 assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
39593 "WideVecVT should be legal");
39594 SlicedVec = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
39595 DAG.getUNDEF(WideVecVT), ShuffleVec);
39596 SlicedVec = DAG.getBitcast(VT, SlicedVec);
39598 return DCI.CombineTo(N, SlicedVec, WideLd.getValue(1), true);
39601 /// If exactly one element of the mask is set for a non-truncating masked store,
39602 /// it is a vector extract and scalar store.
39603 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
39604 /// mask have already been optimized in IR, so we don't bother with those here.
39605 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
39606 SelectionDAG &DAG) {
39607 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
39608 // However, some target hooks may need to be added to know when the transform
39609 // is profitable. Endianness would also have to be considered.
39611 SDValue Addr, VecIndex;
39612 unsigned Alignment;
39613 if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
39614 return SDValue();
39616 // Extract the one scalar element that is actually being stored.
39617 SDLoc DL(MS);
39618 EVT VT = MS->getValue().getValueType();
39619 EVT EltVT = VT.getVectorElementType();
39620 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
39621 MS->getValue(), VecIndex);
39623 // Store that element at the appropriate offset from the base pointer.
39624 return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
39625 Alignment, MS->getMemOperand()->getFlags());
39628 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
39629 TargetLowering::DAGCombinerInfo &DCI,
39630 const X86Subtarget &Subtarget) {
39631 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
39632 if (Mst->isCompressingStore())
39633 return SDValue();
39635 EVT VT = Mst->getValue().getValueType();
39636 EVT StVT = Mst->getMemoryVT();
39637 SDLoc dl(Mst);
39638 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39640 if (!Mst->isTruncatingStore()) {
39641 if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG))
39642 return ScalarStore;
39644 // If the mask value has been legalized to a non-boolean vector, try to
39645 // simplify ops leading up to it. We only demand the MSB of each lane.
39646 SDValue Mask = Mst->getMask();
39647 if (Mask.getScalarValueSizeInBits() != 1) {
39648 APInt DemandedMask(APInt::getSignMask(VT.getScalarSizeInBits()));
39649 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
39650 return SDValue(N, 0);
39653 // TODO: AVX512 targets should also be able to simplify something like the
39654 // pattern above, but that pattern will be different. It will either need to
39655 // match setcc more generally or match PCMPGTM later (in tablegen?).
39657 SDValue Value = Mst->getValue();
39658 if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
39659 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
39660 Mst->getMemoryVT())) {
39661 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
39662 Mst->getBasePtr(), Mask,
39663 Mst->getMemoryVT(), Mst->getMemOperand(), true);
39666 return SDValue();
39669 // Resolve truncating stores.
39670 unsigned NumElems = VT.getVectorNumElements();
39672 assert(StVT != VT && "Cannot truncate to the same type");
39673 unsigned FromSz = VT.getScalarSizeInBits();
39674 unsigned ToSz = StVT.getScalarSizeInBits();
39676 // The truncating store is legal in some cases. For example
39677 // vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
39678 // are designated for truncate store.
39679 // In this case we don't need any further transformations.
39680 if (TLI.isTruncStoreLegal(VT, StVT))
39681 return SDValue();
39683 // From/To sizes and ElemCount must be pow of two.
39684 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
39685 "Unexpected size for truncating masked store");
39686 // We are going to use the original vector elt for storing.
39687 // Accumulated smaller vector elements must be a multiple of the store size.
39688 assert (((NumElems * FromSz) % ToSz) == 0 &&
39689 "Unexpected ratio for truncating masked store");
39691 unsigned SizeRatio = FromSz / ToSz;
39692 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
39694 // Create a type on which we perform the shuffle.
39695 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
39696 StVT.getScalarType(), NumElems*SizeRatio);
39698 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
39700 SDValue WideVec = DAG.getBitcast(WideVecVT, Mst->getValue());
39701 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
39702 for (unsigned i = 0; i != NumElems; ++i)
39703 ShuffleVec[i] = i * SizeRatio;
39705 // Can't shuffle using an illegal type.
39706 assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
39707 "WideVecVT should be legal");
39709 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
39710 DAG.getUNDEF(WideVecVT),
39711 ShuffleVec);
39713 SDValue NewMask;
39714 SDValue Mask = Mst->getMask();
39715 if (Mask.getValueType() == VT) {
39716 // Mask and original value have the same type.
39717 NewMask = DAG.getBitcast(WideVecVT, Mask);
39718 for (unsigned i = 0; i != NumElems; ++i)
39719 ShuffleVec[i] = i * SizeRatio;
39720 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
39721 ShuffleVec[i] = NumElems*SizeRatio;
39722 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
39723 DAG.getConstant(0, dl, WideVecVT),
39724 ShuffleVec);
39725 } else {
39726 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
39727 unsigned WidenNumElts = NumElems*SizeRatio;
39728 unsigned MaskNumElts = VT.getVectorNumElements();
39729 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
39730 WidenNumElts);
39732 unsigned NumConcat = WidenNumElts / MaskNumElts;
39733 SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
39734 SmallVector<SDValue, 16> Ops(NumConcat, ZeroVal);
39735 Ops[0] = Mask;
39736 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
39739 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal,
39740 Mst->getBasePtr(), NewMask, StVT,
39741 Mst->getMemOperand(), false);
39744 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
39745 TargetLowering::DAGCombinerInfo &DCI,
39746 const X86Subtarget &Subtarget) {
39747 StoreSDNode *St = cast<StoreSDNode>(N);
39748 EVT VT = St->getValue().getValueType();
39749 EVT StVT = St->getMemoryVT();
39750 SDLoc dl(St);
39751 unsigned Alignment = St->getAlignment();
39752 SDValue StoredVal = St->getOperand(1);
39753 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39755 // Convert a store of vXi1 into a store of iX and a bitcast.
39756 if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
39757 VT.getVectorElementType() == MVT::i1) {
39759 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
39760 StoredVal = DAG.getBitcast(NewVT, StoredVal);
39762 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
39763 St->getPointerInfo(), St->getAlignment(),
39764 St->getMemOperand()->getFlags());
39767 // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
39768 // This will avoid a copy to k-register.
39769 if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
39770 StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
39771 StoredVal.getOperand(0).getValueType() == MVT::i8) {
39772 return DAG.getStore(St->getChain(), dl, StoredVal.getOperand(0),
39773 St->getBasePtr(), St->getPointerInfo(),
39774 St->getAlignment(), St->getMemOperand()->getFlags());
39777 // Widen v2i1/v4i1 stores to v8i1.
39778 if ((VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
39779 Subtarget.hasAVX512()) {
39780 unsigned NumConcats = 8 / VT.getVectorNumElements();
39781 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(VT));
39782 Ops[0] = StoredVal;
39783 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
39784 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
39785 St->getPointerInfo(), St->getAlignment(),
39786 St->getMemOperand()->getFlags());
39789 // Turn vXi1 stores of constants into a scalar store.
39790 if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
39791 VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
39792 ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
39793 // If its a v64i1 store without 64-bit support, we need two stores.
39794 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
39795 SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
39796 StoredVal->ops().slice(0, 32));
39797 Lo = combinevXi1ConstantToInteger(Lo, DAG);
39798 SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
39799 StoredVal->ops().slice(32, 32));
39800 Hi = combinevXi1ConstantToInteger(Hi, DAG);
39802 SDValue Ptr0 = St->getBasePtr();
39803 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 4, dl);
39805 SDValue Ch0 =
39806 DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
39807 Alignment, St->getMemOperand()->getFlags());
39808 SDValue Ch1 =
39809 DAG.getStore(St->getChain(), dl, Hi, Ptr1,
39810 St->getPointerInfo().getWithOffset(4),
39811 MinAlign(Alignment, 4U),
39812 St->getMemOperand()->getFlags());
39813 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
39816 StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
39817 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
39818 St->getPointerInfo(), St->getAlignment(),
39819 St->getMemOperand()->getFlags());
39822 // If we are saving a concatenation of two XMM registers and 32-byte stores
39823 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
39824 bool Fast;
39825 if (VT.is256BitVector() && StVT == VT &&
39826 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
39827 *St->getMemOperand(), &Fast) &&
39828 !Fast) {
39829 unsigned NumElems = VT.getVectorNumElements();
39830 if (NumElems < 2)
39831 return SDValue();
39833 return splitVectorStore(St, DAG);
39836 // Split under-aligned vector non-temporal stores.
39837 if (St->isNonTemporal() && StVT == VT && Alignment < VT.getStoreSize()) {
39838 // ZMM/YMM nt-stores - either it can be stored as a series of shorter
39839 // vectors or the legalizer can scalarize it to use MOVNTI.
39840 if (VT.is256BitVector() || VT.is512BitVector()) {
39841 unsigned NumElems = VT.getVectorNumElements();
39842 if (NumElems < 2)
39843 return SDValue();
39844 return splitVectorStore(St, DAG);
39847 // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
39848 // to use MOVNTI.
39849 if (VT.is128BitVector() && Subtarget.hasSSE2()) {
39850 MVT NTVT = Subtarget.hasSSE4A()
39851 ? MVT::v2f64
39852 : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
39853 return scalarizeVectorStore(St, NTVT, DAG);
39857 // Try to optimize v16i16->v16i8 truncating stores when BWI is not
39858 // supported, but avx512f is by extending to v16i32 and truncating.
39859 if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
39860 St->getValue().getOpcode() == ISD::TRUNCATE &&
39861 St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
39862 TLI.isTruncStoreLegalOrCustom(MVT::v16i32, MVT::v16i8) &&
39863 !DCI.isBeforeLegalizeOps()) {
39864 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32, St->getValue());
39865 return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
39866 MVT::v16i8, St->getMemOperand());
39869 // Optimize trunc store (of multiple scalars) to shuffle and store.
39870 // First, pack all of the elements in one place. Next, store to memory
39871 // in fewer chunks.
39872 if (St->isTruncatingStore() && VT.isVector()) {
39873 // Check if we can detect an AVG pattern from the truncation. If yes,
39874 // replace the trunc store by a normal store with the result of X86ISD::AVG
39875 // instruction.
39876 if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
39877 Subtarget, dl))
39878 return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
39879 St->getPointerInfo(), St->getAlignment(),
39880 St->getMemOperand()->getFlags());
39882 if (SDValue Val =
39883 detectAVX512SSatPattern(St->getValue(), St->getMemoryVT(), Subtarget,
39884 TLI))
39885 return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
39886 dl, Val, St->getBasePtr(),
39887 St->getMemoryVT(), St->getMemOperand(), DAG);
39888 if (SDValue Val = detectAVX512USatPattern(St->getValue(), St->getMemoryVT(),
39889 DAG, dl, Subtarget, TLI))
39890 return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
39891 dl, Val, St->getBasePtr(),
39892 St->getMemoryVT(), St->getMemOperand(), DAG);
39894 unsigned NumElems = VT.getVectorNumElements();
39895 assert(StVT != VT && "Cannot truncate to the same type");
39896 unsigned FromSz = VT.getScalarSizeInBits();
39897 unsigned ToSz = StVT.getScalarSizeInBits();
39899 // The truncating store is legal in some cases. For example
39900 // vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
39901 // are designated for truncate store.
39902 // In this case we don't need any further transformations.
39903 if (TLI.isTruncStoreLegalOrCustom(VT, StVT))
39904 return SDValue();
39906 // From, To sizes and ElemCount must be pow of two
39907 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
39908 // We are going to use the original vector elt for storing.
39909 // Accumulated smaller vector elements must be a multiple of the store size.
39910 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
39912 unsigned SizeRatio = FromSz / ToSz;
39914 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
39916 // Create a type on which we perform the shuffle
39917 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
39918 StVT.getScalarType(), NumElems*SizeRatio);
39920 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
39922 SDValue WideVec = DAG.getBitcast(WideVecVT, St->getValue());
39923 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
39924 for (unsigned i = 0; i != NumElems; ++i)
39925 ShuffleVec[i] = i * SizeRatio;
39927 // Can't shuffle using an illegal type.
39928 if (!TLI.isTypeLegal(WideVecVT))
39929 return SDValue();
39931 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
39932 DAG.getUNDEF(WideVecVT),
39933 ShuffleVec);
39934 // At this point all of the data is stored at the bottom of the
39935 // register. We now need to save it to mem.
39937 // Find the largest store unit
39938 MVT StoreType = MVT::i8;
39939 for (MVT Tp : MVT::integer_valuetypes()) {
39940 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
39941 StoreType = Tp;
39944 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
39945 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
39946 (64 <= NumElems * ToSz))
39947 StoreType = MVT::f64;
39949 // Bitcast the original vector into a vector of store-size units
39950 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
39951 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
39952 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
39953 SDValue ShuffWide = DAG.getBitcast(StoreVecVT, Shuff);
39954 SmallVector<SDValue, 8> Chains;
39955 SDValue Ptr = St->getBasePtr();
39957 // Perform one or more big stores into memory.
39958 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
39959 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
39960 StoreType, ShuffWide,
39961 DAG.getIntPtrConstant(i, dl));
39962 SDValue Ch =
39963 DAG.getStore(St->getChain(), dl, SubVec, Ptr, St->getPointerInfo(),
39964 St->getAlignment(), St->getMemOperand()->getFlags());
39965 Ptr = DAG.getMemBasePlusOffset(Ptr, StoreType.getStoreSize(), dl);
39966 Chains.push_back(Ch);
39969 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
39972 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
39973 // the FP state in cases where an emms may be missing.
39974 // A preferable solution to the general problem is to figure out the right
39975 // places to insert EMMS. This qualifies as a quick hack.
39977 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
39978 if (VT.getSizeInBits() != 64)
39979 return SDValue();
39981 const Function &F = DAG.getMachineFunction().getFunction();
39982 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
39983 bool F64IsLegal =
39984 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
39985 if ((VT.isVector() ||
39986 (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit())) &&
39987 isa<LoadSDNode>(St->getValue()) &&
39988 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
39989 St->getChain().hasOneUse() && !St->isVolatile()) {
39990 LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
39991 SmallVector<SDValue, 8> Ops;
39993 if (!ISD::isNormalLoad(Ld))
39994 return SDValue();
39996 // If this is not the MMX case, i.e. we are just turning i64 load/store
39997 // into f64 load/store, avoid the transformation if there are multiple
39998 // uses of the loaded value.
39999 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
40000 return SDValue();
40002 SDLoc LdDL(Ld);
40003 SDLoc StDL(N);
40004 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
40005 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
40006 // pair instead.
40007 if (Subtarget.is64Bit() || F64IsLegal) {
40008 MVT LdVT = (Subtarget.is64Bit() &&
40009 (!VT.isFloatingPoint() || !F64IsLegal)) ? MVT::i64 : MVT::f64;
40010 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
40011 Ld->getMemOperand());
40013 // Make sure new load is placed in same chain order.
40014 DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
40015 return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
40016 St->getMemOperand());
40019 // Otherwise, lower to two pairs of 32-bit loads / stores.
40020 SDValue LoAddr = Ld->getBasePtr();
40021 SDValue HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, LdDL);
40023 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
40024 Ld->getPointerInfo(), Ld->getAlignment(),
40025 Ld->getMemOperand()->getFlags());
40026 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
40027 Ld->getPointerInfo().getWithOffset(4),
40028 MinAlign(Ld->getAlignment(), 4),
40029 Ld->getMemOperand()->getFlags());
40030 // Make sure new loads are placed in same chain order.
40031 DAG.makeEquivalentMemoryOrdering(Ld, LoLd);
40032 DAG.makeEquivalentMemoryOrdering(Ld, HiLd);
40034 LoAddr = St->getBasePtr();
40035 HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, StDL);
40037 SDValue LoSt =
40038 DAG.getStore(St->getChain(), StDL, LoLd, LoAddr, St->getPointerInfo(),
40039 St->getAlignment(), St->getMemOperand()->getFlags());
40040 SDValue HiSt = DAG.getStore(St->getChain(), StDL, HiLd, HiAddr,
40041 St->getPointerInfo().getWithOffset(4),
40042 MinAlign(St->getAlignment(), 4),
40043 St->getMemOperand()->getFlags());
40044 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
40047 // This is similar to the above case, but here we handle a scalar 64-bit
40048 // integer store that is extracted from a vector on a 32-bit target.
40049 // If we have SSE2, then we can treat it like a floating-point double
40050 // to get past legalization. The execution dependencies fixup pass will
40051 // choose the optimal machine instruction for the store if this really is
40052 // an integer or v2f32 rather than an f64.
40053 if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
40054 St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
40055 SDValue OldExtract = St->getOperand(1);
40056 SDValue ExtOp0 = OldExtract.getOperand(0);
40057 unsigned VecSize = ExtOp0.getValueSizeInBits();
40058 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
40059 SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
40060 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
40061 BitCast, OldExtract.getOperand(1));
40062 return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
40063 St->getPointerInfo(), St->getAlignment(),
40064 St->getMemOperand()->getFlags());
40067 return SDValue();
40070 /// Return 'true' if this vector operation is "horizontal"
40071 /// and return the operands for the horizontal operation in LHS and RHS. A
40072 /// horizontal operation performs the binary operation on successive elements
40073 /// of its first operand, then on successive elements of its second operand,
40074 /// returning the resulting values in a vector. For example, if
40075 /// A = < float a0, float a1, float a2, float a3 >
40076 /// and
40077 /// B = < float b0, float b1, float b2, float b3 >
40078 /// then the result of doing a horizontal operation on A and B is
40079 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
40080 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
40081 /// A horizontal-op B, for some already available A and B, and if so then LHS is
40082 /// set to A, RHS to B, and the routine returns 'true'.
40083 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, SelectionDAG &DAG,
40084 const X86Subtarget &Subtarget,
40085 bool IsCommutative) {
40086 // If either operand is undef, bail out. The binop should be simplified.
40087 if (LHS.isUndef() || RHS.isUndef())
40088 return false;
40090 // Look for the following pattern:
40091 // A = < float a0, float a1, float a2, float a3 >
40092 // B = < float b0, float b1, float b2, float b3 >
40093 // and
40094 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
40095 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
40096 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
40097 // which is A horizontal-op B.
40099 MVT VT = LHS.getSimpleValueType();
40100 assert((VT.is128BitVector() || VT.is256BitVector()) &&
40101 "Unsupported vector type for horizontal add/sub");
40102 unsigned NumElts = VT.getVectorNumElements();
40104 // TODO - can we make a general helper method that does all of this for us?
40105 auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
40106 SmallVectorImpl<int> &ShuffleMask) {
40107 if (Op.getOpcode() == ISD::VECTOR_SHUFFLE) {
40108 if (!Op.getOperand(0).isUndef())
40109 N0 = Op.getOperand(0);
40110 if (!Op.getOperand(1).isUndef())
40111 N1 = Op.getOperand(1);
40112 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
40113 ShuffleMask.append(Mask.begin(), Mask.end());
40114 return;
40116 bool UseSubVector = false;
40117 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
40118 Op.getOperand(0).getValueType().is256BitVector() &&
40119 llvm::isNullConstant(Op.getOperand(1))) {
40120 Op = Op.getOperand(0);
40121 UseSubVector = true;
40123 bool IsUnary;
40124 SmallVector<SDValue, 2> SrcOps;
40125 SmallVector<int, 16> SrcShuffleMask;
40126 SDValue BC = peekThroughBitcasts(Op);
40127 if (isTargetShuffle(BC.getOpcode()) &&
40128 getTargetShuffleMask(BC.getNode(), BC.getSimpleValueType(), false,
40129 SrcOps, SrcShuffleMask, IsUnary)) {
40130 if (!UseSubVector && SrcShuffleMask.size() == NumElts &&
40131 SrcOps.size() <= 2) {
40132 N0 = SrcOps.size() > 0 ? SrcOps[0] : SDValue();
40133 N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
40134 ShuffleMask.append(SrcShuffleMask.begin(), SrcShuffleMask.end());
40136 if (UseSubVector && (SrcShuffleMask.size() == (NumElts * 2)) &&
40137 SrcOps.size() == 1) {
40138 N0 = extract128BitVector(SrcOps[0], 0, DAG, SDLoc(Op));
40139 N1 = extract128BitVector(SrcOps[0], NumElts, DAG, SDLoc(Op));
40140 ArrayRef<int> Mask = ArrayRef<int>(SrcShuffleMask).slice(0, NumElts);
40141 ShuffleMask.append(Mask.begin(), Mask.end());
40146 // View LHS in the form
40147 // LHS = VECTOR_SHUFFLE A, B, LMask
40148 // If LHS is not a shuffle, then pretend it is the identity shuffle:
40149 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
40150 // NOTE: A default initialized SDValue represents an UNDEF of type VT.
40151 SDValue A, B;
40152 SmallVector<int, 16> LMask;
40153 GetShuffle(LHS, A, B, LMask);
40155 // Likewise, view RHS in the form
40156 // RHS = VECTOR_SHUFFLE C, D, RMask
40157 SDValue C, D;
40158 SmallVector<int, 16> RMask;
40159 GetShuffle(RHS, C, D, RMask);
40161 // At least one of the operands should be a vector shuffle.
40162 unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
40163 if (NumShuffles == 0)
40164 return false;
40166 if (LMask.empty()) {
40167 A = LHS;
40168 for (unsigned i = 0; i != NumElts; ++i)
40169 LMask.push_back(i);
40172 if (RMask.empty()) {
40173 C = RHS;
40174 for (unsigned i = 0; i != NumElts; ++i)
40175 RMask.push_back(i);
40178 // If A and B occur in reverse order in RHS, then canonicalize by commuting
40179 // RHS operands and shuffle mask.
40180 if (A != C) {
40181 std::swap(C, D);
40182 ShuffleVectorSDNode::commuteMask(RMask);
40184 // Check that the shuffles are both shuffling the same vectors.
40185 if (!(A == C && B == D))
40186 return false;
40188 // LHS and RHS are now:
40189 // LHS = shuffle A, B, LMask
40190 // RHS = shuffle A, B, RMask
40191 // Check that the masks correspond to performing a horizontal operation.
40192 // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
40193 // so we just repeat the inner loop if this is a 256-bit op.
40194 unsigned Num128BitChunks = VT.getSizeInBits() / 128;
40195 unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
40196 assert((NumEltsPer128BitChunk % 2 == 0) &&
40197 "Vector type should have an even number of elements in each lane");
40198 for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
40199 for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
40200 // Ignore undefined components.
40201 int LIdx = LMask[i + j], RIdx = RMask[i + j];
40202 if (LIdx < 0 || RIdx < 0 ||
40203 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
40204 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
40205 continue;
40207 // The low half of the 128-bit result must choose from A.
40208 // The high half of the 128-bit result must choose from B,
40209 // unless B is undef. In that case, we are always choosing from A.
40210 unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
40211 unsigned Src = B.getNode() ? i >= NumEltsPer64BitChunk : 0;
40213 // Check that successive elements are being operated on. If not, this is
40214 // not a horizontal operation.
40215 int Index = 2 * (i % NumEltsPer64BitChunk) + NumElts * Src + j;
40216 if (!(LIdx == Index && RIdx == Index + 1) &&
40217 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
40218 return false;
40222 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
40223 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
40225 if (!shouldUseHorizontalOp(LHS == RHS && NumShuffles < 2, DAG, Subtarget))
40226 return false;
40228 LHS = DAG.getBitcast(VT, LHS);
40229 RHS = DAG.getBitcast(VT, RHS);
40230 return true;
40233 /// Do target-specific dag combines on floating-point adds/subs.
40234 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
40235 const X86Subtarget &Subtarget) {
40236 EVT VT = N->getValueType(0);
40237 SDValue LHS = N->getOperand(0);
40238 SDValue RHS = N->getOperand(1);
40239 bool IsFadd = N->getOpcode() == ISD::FADD;
40240 auto HorizOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
40241 assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
40243 // Try to synthesize horizontal add/sub from adds/subs of shuffles.
40244 if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
40245 (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
40246 isHorizontalBinOp(LHS, RHS, DAG, Subtarget, IsFadd))
40247 return DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
40249 return SDValue();
40252 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
40253 /// the codegen.
40254 /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
40255 /// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
40256 /// anything that is guaranteed to be transformed by DAGCombiner.
40257 static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
40258 const X86Subtarget &Subtarget,
40259 const SDLoc &DL) {
40260 assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
40261 SDValue Src = N->getOperand(0);
40262 unsigned SrcOpcode = Src.getOpcode();
40263 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40265 EVT VT = N->getValueType(0);
40266 EVT SrcVT = Src.getValueType();
40268 auto IsFreeTruncation = [VT](SDValue Op) {
40269 unsigned TruncSizeInBits = VT.getScalarSizeInBits();
40271 // See if this has been extended from a smaller/equal size to
40272 // the truncation size, allowing a truncation to combine with the extend.
40273 unsigned Opcode = Op.getOpcode();
40274 if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
40275 Opcode == ISD::ZERO_EXTEND) &&
40276 Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
40277 return true;
40279 // See if this is a single use constant which can be constant folded.
40280 // NOTE: We don't peek throught bitcasts here because there is currently
40281 // no support for constant folding truncate+bitcast+vector_of_constants. So
40282 // we'll just send up with a truncate on both operands which will
40283 // get turned back into (truncate (binop)) causing an infinite loop.
40284 return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
40287 auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
40288 SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
40289 SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
40290 return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
40293 // Don't combine if the operation has other uses.
40294 if (!Src.hasOneUse())
40295 return SDValue();
40297 // Only support vector truncation for now.
40298 // TODO: i64 scalar math would benefit as well.
40299 if (!VT.isVector())
40300 return SDValue();
40302 // In most cases its only worth pre-truncating if we're only facing the cost
40303 // of one truncation.
40304 // i.e. if one of the inputs will constant fold or the input is repeated.
40305 switch (SrcOpcode) {
40306 case ISD::AND:
40307 case ISD::XOR:
40308 case ISD::OR: {
40309 SDValue Op0 = Src.getOperand(0);
40310 SDValue Op1 = Src.getOperand(1);
40311 if (TLI.isOperationLegalOrPromote(SrcOpcode, VT) &&
40312 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
40313 return TruncateArithmetic(Op0, Op1);
40314 break;
40317 case ISD::MUL:
40318 // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
40319 // better to truncate if we have the chance.
40320 if (SrcVT.getScalarType() == MVT::i64 &&
40321 TLI.isOperationLegal(SrcOpcode, VT) &&
40322 !TLI.isOperationLegal(SrcOpcode, SrcVT))
40323 return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
40324 LLVM_FALLTHROUGH;
40325 case ISD::ADD: {
40326 SDValue Op0 = Src.getOperand(0);
40327 SDValue Op1 = Src.getOperand(1);
40328 if (TLI.isOperationLegal(SrcOpcode, VT) &&
40329 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
40330 return TruncateArithmetic(Op0, Op1);
40331 break;
40333 case ISD::SUB: {
40334 // TODO: ISD::SUB We are conservative and require both sides to be freely
40335 // truncatable to avoid interfering with combineSubToSubus.
40336 SDValue Op0 = Src.getOperand(0);
40337 SDValue Op1 = Src.getOperand(1);
40338 if (TLI.isOperationLegal(SrcOpcode, VT) &&
40339 (Op0 == Op1 || (IsFreeTruncation(Op0) && IsFreeTruncation(Op1))))
40340 return TruncateArithmetic(Op0, Op1);
40341 break;
40345 return SDValue();
40348 /// Truncate using ISD::AND mask and X86ISD::PACKUS.
40349 /// e.g. trunc <8 x i32> X to <8 x i16> -->
40350 /// MaskX = X & 0xffff (clear high bits to prevent saturation)
40351 /// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
40352 static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
40353 const X86Subtarget &Subtarget,
40354 SelectionDAG &DAG) {
40355 SDValue In = N->getOperand(0);
40356 EVT InVT = In.getValueType();
40357 EVT OutVT = N->getValueType(0);
40359 APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
40360 OutVT.getScalarSizeInBits());
40361 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
40362 return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
40365 /// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
40366 static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
40367 const X86Subtarget &Subtarget,
40368 SelectionDAG &DAG) {
40369 SDValue In = N->getOperand(0);
40370 EVT InVT = In.getValueType();
40371 EVT OutVT = N->getValueType(0);
40372 In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
40373 DAG.getValueType(OutVT));
40374 return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
40377 /// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
40378 /// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
40379 /// legalization the truncation will be translated into a BUILD_VECTOR with each
40380 /// element that is extracted from a vector and then truncated, and it is
40381 /// difficult to do this optimization based on them.
40382 static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
40383 const X86Subtarget &Subtarget) {
40384 EVT OutVT = N->getValueType(0);
40385 if (!OutVT.isVector())
40386 return SDValue();
40388 SDValue In = N->getOperand(0);
40389 if (!In.getValueType().isSimple())
40390 return SDValue();
40392 EVT InVT = In.getValueType();
40393 unsigned NumElems = OutVT.getVectorNumElements();
40395 // TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
40396 // SSE2, and we need to take care of it specially.
40397 // AVX512 provides vpmovdb.
40398 if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
40399 return SDValue();
40401 EVT OutSVT = OutVT.getVectorElementType();
40402 EVT InSVT = InVT.getVectorElementType();
40403 if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
40404 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
40405 NumElems >= 8))
40406 return SDValue();
40408 // SSSE3's pshufb results in less instructions in the cases below.
40409 if (Subtarget.hasSSSE3() && NumElems == 8 &&
40410 ((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
40411 (InSVT == MVT::i32 && OutSVT == MVT::i16)))
40412 return SDValue();
40414 SDLoc DL(N);
40415 // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
40416 // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
40417 // truncate 2 x v4i32 to v8i16.
40418 if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
40419 return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
40420 if (InSVT == MVT::i32)
40421 return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);
40423 return SDValue();
40426 /// This function transforms vector truncation of 'extended sign-bits' or
40427 /// 'extended zero-bits' values.
40428 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
40429 static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
40430 SelectionDAG &DAG,
40431 const X86Subtarget &Subtarget) {
40432 // Requires SSE2 but AVX512 has fast truncate.
40433 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
40434 return SDValue();
40436 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
40437 return SDValue();
40439 SDValue In = N->getOperand(0);
40440 if (!In.getValueType().isSimple())
40441 return SDValue();
40443 MVT VT = N->getValueType(0).getSimpleVT();
40444 MVT SVT = VT.getScalarType();
40446 MVT InVT = In.getValueType().getSimpleVT();
40447 MVT InSVT = InVT.getScalarType();
40449 // Check we have a truncation suited for PACKSS/PACKUS.
40450 if (!VT.is128BitVector() && !VT.is256BitVector())
40451 return SDValue();
40452 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
40453 return SDValue();
40454 if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
40455 return SDValue();
40457 unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
40458 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
40460 // Use PACKUS if the input has zero-bits that extend all the way to the
40461 // packed/truncated value. e.g. masks, zext_in_reg, etc.
40462 KnownBits Known = DAG.computeKnownBits(In);
40463 unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
40464 if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
40465 return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
40467 // Use PACKSS if the input has sign-bits that extend all the way to the
40468 // packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
40469 unsigned NumSignBits = DAG.ComputeNumSignBits(In);
40470 if (NumSignBits > (InSVT.getSizeInBits() - NumPackedSignBits))
40471 return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
40473 return SDValue();
40476 // Try to form a MULHU or MULHS node by looking for
40477 // (trunc (srl (mul ext, ext), 16))
40478 // TODO: This is X86 specific because we want to be able to handle wide types
40479 // before type legalization. But we can only do it if the vector will be
40480 // legalized via widening/splitting. Type legalization can't handle promotion
40481 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
40482 // combiner.
40483 static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
40484 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
40485 // First instruction should be a right shift of a multiply.
40486 if (Src.getOpcode() != ISD::SRL ||
40487 Src.getOperand(0).getOpcode() != ISD::MUL)
40488 return SDValue();
40490 if (!Subtarget.hasSSE2())
40491 return SDValue();
40493 // Only handle vXi16 types that are at least 128-bits unless they will be
40494 // widened.
40495 if (!VT.isVector() || VT.getVectorElementType() != MVT::i16 ||
40496 (!ExperimentalVectorWideningLegalization &&
40497 VT.getVectorNumElements() < 8))
40498 return SDValue();
40500 // Input type should be vXi32.
40501 EVT InVT = Src.getValueType();
40502 if (InVT.getVectorElementType() != MVT::i32)
40503 return SDValue();
40505 // Need a shift by 16.
40506 APInt ShiftAmt;
40507 if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
40508 ShiftAmt != 16)
40509 return SDValue();
40511 SDValue LHS = Src.getOperand(0).getOperand(0);
40512 SDValue RHS = Src.getOperand(0).getOperand(1);
40514 unsigned ExtOpc = LHS.getOpcode();
40515 if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
40516 RHS.getOpcode() != ExtOpc)
40517 return SDValue();
40519 // Peek through the extends.
40520 LHS = LHS.getOperand(0);
40521 RHS = RHS.getOperand(0);
40523 // Ensure the input types match.
40524 if (LHS.getValueType() != VT || RHS.getValueType() != VT)
40525 return SDValue();
40527 unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
40528 return DAG.getNode(Opc, DL, VT, LHS, RHS);
40531 // Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
40532 // from one vector with signed bytes from another vector, adds together
40533 // adjacent pairs of 16-bit products, and saturates the result before
40534 // truncating to 16-bits.
40536 // Which looks something like this:
40537 // (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
40538 // (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
40539 static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
40540 const X86Subtarget &Subtarget,
40541 const SDLoc &DL) {
40542 if (!VT.isVector() || !Subtarget.hasSSSE3())
40543 return SDValue();
40545 unsigned NumElems = VT.getVectorNumElements();
40546 EVT ScalarVT = VT.getVectorElementType();
40547 if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
40548 return SDValue();
40550 SDValue SSatVal = detectSSatPattern(In, VT);
40551 if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
40552 return SDValue();
40554 // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
40555 // of multiplies from even/odd elements.
40556 SDValue N0 = SSatVal.getOperand(0);
40557 SDValue N1 = SSatVal.getOperand(1);
40559 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
40560 return SDValue();
40562 SDValue N00 = N0.getOperand(0);
40563 SDValue N01 = N0.getOperand(1);
40564 SDValue N10 = N1.getOperand(0);
40565 SDValue N11 = N1.getOperand(1);
40567 // TODO: Handle constant vectors and use knownbits/computenumsignbits?
40568 // Canonicalize zero_extend to LHS.
40569 if (N01.getOpcode() == ISD::ZERO_EXTEND)
40570 std::swap(N00, N01);
40571 if (N11.getOpcode() == ISD::ZERO_EXTEND)
40572 std::swap(N10, N11);
40574 // Ensure we have a zero_extend and a sign_extend.
40575 if (N00.getOpcode() != ISD::ZERO_EXTEND ||
40576 N01.getOpcode() != ISD::SIGN_EXTEND ||
40577 N10.getOpcode() != ISD::ZERO_EXTEND ||
40578 N11.getOpcode() != ISD::SIGN_EXTEND)
40579 return SDValue();
40581 // Peek through the extends.
40582 N00 = N00.getOperand(0);
40583 N01 = N01.getOperand(0);
40584 N10 = N10.getOperand(0);
40585 N11 = N11.getOperand(0);
40587 // Ensure the extend is from vXi8.
40588 if (N00.getValueType().getVectorElementType() != MVT::i8 ||
40589 N01.getValueType().getVectorElementType() != MVT::i8 ||
40590 N10.getValueType().getVectorElementType() != MVT::i8 ||
40591 N11.getValueType().getVectorElementType() != MVT::i8)
40592 return SDValue();
40594 // All inputs should be build_vectors.
40595 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
40596 N01.getOpcode() != ISD::BUILD_VECTOR ||
40597 N10.getOpcode() != ISD::BUILD_VECTOR ||
40598 N11.getOpcode() != ISD::BUILD_VECTOR)
40599 return SDValue();
40601 // N00/N10 are zero extended. N01/N11 are sign extended.
40603 // For each element, we need to ensure we have an odd element from one vector
40604 // multiplied by the odd element of another vector and the even element from
40605 // one of the same vectors being multiplied by the even element from the
40606 // other vector. So we need to make sure for each element i, this operator
40607 // is being performed:
40608 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
40609 SDValue ZExtIn, SExtIn;
40610 for (unsigned i = 0; i != NumElems; ++i) {
40611 SDValue N00Elt = N00.getOperand(i);
40612 SDValue N01Elt = N01.getOperand(i);
40613 SDValue N10Elt = N10.getOperand(i);
40614 SDValue N11Elt = N11.getOperand(i);
40615 // TODO: Be more tolerant to undefs.
40616 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
40617 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
40618 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
40619 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
40620 return SDValue();
40621 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
40622 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
40623 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
40624 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
40625 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
40626 return SDValue();
40627 unsigned IdxN00 = ConstN00Elt->getZExtValue();
40628 unsigned IdxN01 = ConstN01Elt->getZExtValue();
40629 unsigned IdxN10 = ConstN10Elt->getZExtValue();
40630 unsigned IdxN11 = ConstN11Elt->getZExtValue();
40631 // Add is commutative so indices can be reordered.
40632 if (IdxN00 > IdxN10) {
40633 std::swap(IdxN00, IdxN10);
40634 std::swap(IdxN01, IdxN11);
40636 // N0 indices be the even element. N1 indices must be the next odd element.
40637 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
40638 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
40639 return SDValue();
40640 SDValue N00In = N00Elt.getOperand(0);
40641 SDValue N01In = N01Elt.getOperand(0);
40642 SDValue N10In = N10Elt.getOperand(0);
40643 SDValue N11In = N11Elt.getOperand(0);
40644 // First time we find an input capture it.
40645 if (!ZExtIn) {
40646 ZExtIn = N00In;
40647 SExtIn = N01In;
40649 if (ZExtIn != N00In || SExtIn != N01In ||
40650 ZExtIn != N10In || SExtIn != N11In)
40651 return SDValue();
40654 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
40655 ArrayRef<SDValue> Ops) {
40656 // Shrink by adding truncate nodes and let DAGCombine fold with the
40657 // sources.
40658 EVT InVT = Ops[0].getValueType();
40659 assert(InVT.getScalarType() == MVT::i8 &&
40660 "Unexpected scalar element type");
40661 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
40662 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
40663 InVT.getVectorNumElements() / 2);
40664 return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
40666 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
40667 PMADDBuilder);
40670 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
40671 const X86Subtarget &Subtarget) {
40672 EVT VT = N->getValueType(0);
40673 SDValue Src = N->getOperand(0);
40674 SDLoc DL(N);
40676 // Attempt to pre-truncate inputs to arithmetic ops instead.
40677 if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
40678 return V;
40680 // Try to detect AVG pattern first.
40681 if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
40682 return Avg;
40684 // Try to detect PMADD
40685 if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
40686 return PMAdd;
40688 // Try to combine truncation with signed/unsigned saturation.
40689 if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
40690 return Val;
40692 // Try to combine PMULHUW/PMULHW for vXi16.
40693 if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
40694 return V;
40696 // The bitcast source is a direct mmx result.
40697 // Detect bitcasts between i32 to x86mmx
40698 if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
40699 SDValue BCSrc = Src.getOperand(0);
40700 if (BCSrc.getValueType() == MVT::x86mmx)
40701 return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
40704 // Try to truncate extended sign/zero bits with PACKSS/PACKUS.
40705 if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
40706 return V;
40708 return combineVectorTruncation(N, DAG, Subtarget);
40711 /// Returns the negated value if the node \p N flips sign of FP value.
40713 /// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
40714 /// or FSUB(0, x)
40715 /// AVX512F does not have FXOR, so FNEG is lowered as
40716 /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
40717 /// In this case we go though all bitcasts.
40718 /// This also recognizes splat of a negated value and returns the splat of that
40719 /// value.
40720 static SDValue isFNEG(SelectionDAG &DAG, SDNode *N) {
40721 if (N->getOpcode() == ISD::FNEG)
40722 return N->getOperand(0);
40724 unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
40726 SDValue Op = peekThroughBitcasts(SDValue(N, 0));
40727 EVT VT = Op->getValueType(0);
40728 // Make sure the element size does't change.
40729 if (VT.getScalarSizeInBits() != ScalarSize)
40730 return SDValue();
40732 if (auto SVOp = dyn_cast<ShuffleVectorSDNode>(Op.getNode())) {
40733 // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
40734 // of this is VECTOR_SHUFFLE(-VEC1, UNDEF). The mask can be anything here.
40735 if (!SVOp->getOperand(1).isUndef())
40736 return SDValue();
40737 if (SDValue NegOp0 = isFNEG(DAG, SVOp->getOperand(0).getNode()))
40738 if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
40739 return DAG.getVectorShuffle(VT, SDLoc(SVOp), NegOp0, DAG.getUNDEF(VT),
40740 SVOp->getMask());
40741 return SDValue();
40743 unsigned Opc = Op.getOpcode();
40744 if (Opc == ISD::INSERT_VECTOR_ELT) {
40745 // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
40746 // -V, INDEX).
40747 SDValue InsVector = Op.getOperand(0);
40748 SDValue InsVal = Op.getOperand(1);
40749 if (!InsVector.isUndef())
40750 return SDValue();
40751 if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode()))
40752 if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
40753 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
40754 NegInsVal, Op.getOperand(2));
40755 return SDValue();
40758 if (Opc != X86ISD::FXOR && Opc != ISD::XOR && Opc != ISD::FSUB)
40759 return SDValue();
40761 SDValue Op1 = Op.getOperand(1);
40762 SDValue Op0 = Op.getOperand(0);
40764 // For XOR and FXOR, we want to check if constant bits of Op1 are sign bit
40765 // masks. For FSUB, we have to check if constant bits of Op0 are sign bit
40766 // masks and hence we swap the operands.
40767 if (Opc == ISD::FSUB)
40768 std::swap(Op0, Op1);
40770 APInt UndefElts;
40771 SmallVector<APInt, 16> EltBits;
40772 // Extract constant bits and see if they are all sign bit masks. Ignore the
40773 // undef elements.
40774 if (getTargetConstantBitsFromNode(Op1, ScalarSize,
40775 UndefElts, EltBits,
40776 /* AllowWholeUndefs */ true,
40777 /* AllowPartialUndefs */ false)) {
40778 for (unsigned I = 0, E = EltBits.size(); I < E; I++)
40779 if (!UndefElts[I] && !EltBits[I].isSignMask())
40780 return SDValue();
40782 return peekThroughBitcasts(Op0);
40785 return SDValue();
40788 /// Do target-specific dag combines on floating point negations.
40789 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
40790 const X86Subtarget &Subtarget) {
40791 EVT OrigVT = N->getValueType(0);
40792 SDValue Arg = isFNEG(DAG, N);
40793 if (!Arg)
40794 return SDValue();
40796 EVT VT = Arg.getValueType();
40797 EVT SVT = VT.getScalarType();
40798 SDLoc DL(N);
40800 // Let legalize expand this if it isn't a legal type yet.
40801 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
40802 return SDValue();
40804 // If we're negating a FMUL node on a target with FMA, then we can avoid the
40805 // use of a constant by performing (-0 - A*B) instead.
40806 // FIXME: Check rounding control flags as well once it becomes available.
40807 if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
40808 Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
40809 SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
40810 SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
40811 Arg.getOperand(1), Zero);
40812 return DAG.getBitcast(OrigVT, NewNode);
40815 // If we're negating an FMA node, then we can adjust the
40816 // instruction to include the extra negation.
40817 unsigned NewOpcode = 0;
40818 if (Arg.hasOneUse() && Subtarget.hasAnyFMA()) {
40819 switch (Arg.getOpcode()) {
40820 case ISD::FMA: NewOpcode = X86ISD::FNMSUB; break;
40821 case X86ISD::FMSUB: NewOpcode = X86ISD::FNMADD; break;
40822 case X86ISD::FNMADD: NewOpcode = X86ISD::FMSUB; break;
40823 case X86ISD::FNMSUB: NewOpcode = ISD::FMA; break;
40824 case X86ISD::FMADD_RND: NewOpcode = X86ISD::FNMSUB_RND; break;
40825 case X86ISD::FMSUB_RND: NewOpcode = X86ISD::FNMADD_RND; break;
40826 case X86ISD::FNMADD_RND: NewOpcode = X86ISD::FMSUB_RND; break;
40827 case X86ISD::FNMSUB_RND: NewOpcode = X86ISD::FMADD_RND; break;
40828 // We can't handle scalar intrinsic node here because it would only
40829 // invert one element and not the whole vector. But we could try to handle
40830 // a negation of the lower element only.
40833 if (NewOpcode)
40834 return DAG.getBitcast(OrigVT, DAG.getNode(NewOpcode, DL, VT,
40835 Arg.getNode()->ops()));
40837 return SDValue();
40840 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
40841 const X86Subtarget &Subtarget) {
40842 MVT VT = N->getSimpleValueType(0);
40843 // If we have integer vector types available, use the integer opcodes.
40844 if (!VT.isVector() || !Subtarget.hasSSE2())
40845 return SDValue();
40847 SDLoc dl(N);
40849 unsigned IntBits = VT.getScalarSizeInBits();
40850 MVT IntSVT = MVT::getIntegerVT(IntBits);
40851 MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
40853 SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
40854 SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
40855 unsigned IntOpcode;
40856 switch (N->getOpcode()) {
40857 default: llvm_unreachable("Unexpected FP logic op");
40858 case X86ISD::FOR: IntOpcode = ISD::OR; break;
40859 case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
40860 case X86ISD::FAND: IntOpcode = ISD::AND; break;
40861 case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
40863 SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
40864 return DAG.getBitcast(VT, IntOp);
40868 /// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
40869 static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
40870 if (N->getOpcode() != ISD::XOR)
40871 return SDValue();
40873 SDValue LHS = N->getOperand(0);
40874 auto *RHSC = dyn_cast<ConstantSDNode>(N->getOperand(1));
40875 if (!RHSC || RHSC->getZExtValue() != 1 || LHS->getOpcode() != X86ISD::SETCC)
40876 return SDValue();
40878 X86::CondCode NewCC = X86::GetOppositeBranchCondition(
40879 X86::CondCode(LHS->getConstantOperandVal(0)));
40880 SDLoc DL(N);
40881 return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
40884 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
40885 TargetLowering::DAGCombinerInfo &DCI,
40886 const X86Subtarget &Subtarget) {
40887 // If this is SSE1 only convert to FXOR to avoid scalarization.
40888 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() &&
40889 N->getValueType(0) == MVT::v4i32) {
40890 return DAG.getBitcast(
40891 MVT::v4i32, DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
40892 DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
40893 DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
40896 if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
40897 return Cmp;
40899 if (DCI.isBeforeLegalizeOps())
40900 return SDValue();
40902 if (SDValue SetCC = foldXor1SetCC(N, DAG))
40903 return SetCC;
40905 if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
40906 return RV;
40908 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
40909 return FPLogic;
40911 return combineFneg(N, DAG, Subtarget);
40914 static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
40915 TargetLowering::DAGCombinerInfo &DCI,
40916 const X86Subtarget &Subtarget) {
40917 SDValue Op0 = N->getOperand(0);
40918 SDValue Op1 = N->getOperand(1);
40919 EVT VT = N->getValueType(0);
40920 unsigned NumBits = VT.getSizeInBits();
40922 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40924 // TODO - Constant Folding.
40925 if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
40926 // Reduce Cst1 to the bottom 16-bits.
40927 // NOTE: SimplifyDemandedBits won't do this for constants.
40928 const APInt &Val1 = Cst1->getAPIntValue();
40929 APInt MaskedVal1 = Val1 & 0xFFFF;
40930 if (MaskedVal1 != Val1)
40931 return DAG.getNode(X86ISD::BEXTR, SDLoc(N), VT, Op0,
40932 DAG.getConstant(MaskedVal1, SDLoc(N), VT));
40935 // Only bottom 16-bits of the control bits are required.
40936 APInt DemandedMask(APInt::getLowBitsSet(NumBits, 16));
40937 if (TLI.SimplifyDemandedBits(Op1, DemandedMask, DCI))
40938 return SDValue(N, 0);
40940 return SDValue();
40943 static bool isNullFPScalarOrVectorConst(SDValue V) {
40944 return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
40947 /// If a value is a scalar FP zero or a vector FP zero (potentially including
40948 /// undefined elements), return a zero constant that may be used to fold away
40949 /// that value. In the case of a vector, the returned constant will not contain
40950 /// undefined elements even if the input parameter does. This makes it suitable
40951 /// to be used as a replacement operand with operations (eg, bitwise-and) where
40952 /// an undef should not propagate.
40953 static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
40954 const X86Subtarget &Subtarget) {
40955 if (!isNullFPScalarOrVectorConst(V))
40956 return SDValue();
40958 if (V.getValueType().isVector())
40959 return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
40961 return V;
40964 static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
40965 const X86Subtarget &Subtarget) {
40966 SDValue N0 = N->getOperand(0);
40967 SDValue N1 = N->getOperand(1);
40968 EVT VT = N->getValueType(0);
40969 SDLoc DL(N);
40971 // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
40972 if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
40973 (VT == MVT::f64 && Subtarget.hasSSE2()) ||
40974 (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
40975 return SDValue();
40977 auto isAllOnesConstantFP = [](SDValue V) {
40978 if (V.getSimpleValueType().isVector())
40979 return ISD::isBuildVectorAllOnes(V.getNode());
40980 auto *C = dyn_cast<ConstantFPSDNode>(V);
40981 return C && C->getConstantFPValue()->isAllOnesValue();
40984 // fand (fxor X, -1), Y --> fandn X, Y
40985 if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
40986 return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
40988 // fand X, (fxor Y, -1) --> fandn Y, X
40989 if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
40990 return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
40992 return SDValue();
40995 /// Do target-specific dag combines on X86ISD::FAND nodes.
40996 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
40997 const X86Subtarget &Subtarget) {
40998 // FAND(0.0, x) -> 0.0
40999 if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
41000 return V;
41002 // FAND(x, 0.0) -> 0.0
41003 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
41004 return V;
41006 if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
41007 return V;
41009 return lowerX86FPLogicOp(N, DAG, Subtarget);
41012 /// Do target-specific dag combines on X86ISD::FANDN nodes.
41013 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
41014 const X86Subtarget &Subtarget) {
41015 // FANDN(0.0, x) -> x
41016 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
41017 return N->getOperand(1);
41019 // FANDN(x, 0.0) -> 0.0
41020 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
41021 return V;
41023 return lowerX86FPLogicOp(N, DAG, Subtarget);
41026 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
41027 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
41028 const X86Subtarget &Subtarget) {
41029 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
41031 // F[X]OR(0.0, x) -> x
41032 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
41033 return N->getOperand(1);
41035 // F[X]OR(x, 0.0) -> x
41036 if (isNullFPScalarOrVectorConst(N->getOperand(1)))
41037 return N->getOperand(0);
41039 if (SDValue NewVal = combineFneg(N, DAG, Subtarget))
41040 return NewVal;
41042 return lowerX86FPLogicOp(N, DAG, Subtarget);
41045 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
41046 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
41047 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
41049 // Only perform optimizations if UnsafeMath is used.
41050 if (!DAG.getTarget().Options.UnsafeFPMath)
41051 return SDValue();
41053 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
41054 // into FMINC and FMAXC, which are Commutative operations.
41055 unsigned NewOp = 0;
41056 switch (N->getOpcode()) {
41057 default: llvm_unreachable("unknown opcode");
41058 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
41059 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
41062 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
41063 N->getOperand(0), N->getOperand(1));
41066 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
41067 const X86Subtarget &Subtarget) {
41068 if (Subtarget.useSoftFloat())
41069 return SDValue();
41071 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41073 EVT VT = N->getValueType(0);
41074 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
41075 (Subtarget.hasSSE2() && VT == MVT::f64) ||
41076 (VT.isVector() && TLI.isTypeLegal(VT))))
41077 return SDValue();
41079 SDValue Op0 = N->getOperand(0);
41080 SDValue Op1 = N->getOperand(1);
41081 SDLoc DL(N);
41082 auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
41084 // If we don't have to respect NaN inputs, this is a direct translation to x86
41085 // min/max instructions.
41086 if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
41087 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
41089 // If one of the operands is known non-NaN use the native min/max instructions
41090 // with the non-NaN input as second operand.
41091 if (DAG.isKnownNeverNaN(Op1))
41092 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
41093 if (DAG.isKnownNeverNaN(Op0))
41094 return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
41096 // If we have to respect NaN inputs, this takes at least 3 instructions.
41097 // Favor a library call when operating on a scalar and minimizing code size.
41098 if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
41099 return SDValue();
41101 EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
41102 VT);
41104 // There are 4 possibilities involving NaN inputs, and these are the required
41105 // outputs:
41106 // Op1
41107 // Num NaN
41108 // ----------------
41109 // Num | Max | Op0 |
41110 // Op0 ----------------
41111 // NaN | Op1 | NaN |
41112 // ----------------
41114 // The SSE FP max/min instructions were not designed for this case, but rather
41115 // to implement:
41116 // Min = Op1 < Op0 ? Op1 : Op0
41117 // Max = Op1 > Op0 ? Op1 : Op0
41119 // So they always return Op0 if either input is a NaN. However, we can still
41120 // use those instructions for fmaxnum by selecting away a NaN input.
41122 // If either operand is NaN, the 2nd source operand (Op0) is passed through.
41123 SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
41124 SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
41126 // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
41127 // are NaN, the NaN value of Op1 is the result.
41128 return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
41131 static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
41132 TargetLowering::DAGCombinerInfo &DCI) {
41133 EVT VT = N->getValueType(0);
41134 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41136 APInt KnownUndef, KnownZero;
41137 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
41138 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
41139 KnownZero, DCI))
41140 return SDValue(N, 0);
41142 // Convert a full vector load into vzload when not all bits are needed.
41143 SDValue In = N->getOperand(0);
41144 MVT InVT = In.getSimpleValueType();
41145 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
41146 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
41147 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
41148 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
41149 // Unless the load is volatile.
41150 if (!LN->isVolatile()) {
41151 SDLoc dl(N);
41152 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
41153 MVT MemVT = MVT::getIntegerVT(NumBits);
41154 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
41155 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
41156 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41157 SDValue VZLoad =
41158 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
41159 LN->getPointerInfo(),
41160 LN->getAlignment(),
41161 LN->getMemOperand()->getFlags());
41162 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
41163 DAG.getBitcast(InVT, VZLoad));
41164 DCI.CombineTo(N, Convert);
41165 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
41166 return SDValue(N, 0);
41170 return SDValue();
41173 static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
41174 TargetLowering::DAGCombinerInfo &DCI) {
41175 EVT VT = N->getValueType(0);
41177 // Convert a full vector load into vzload when not all bits are needed.
41178 SDValue In = N->getOperand(0);
41179 MVT InVT = In.getSimpleValueType();
41180 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
41181 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
41182 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
41183 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
41184 // Unless the load is volatile.
41185 if (!LN->isVolatile()) {
41186 SDLoc dl(N);
41187 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
41188 MVT MemVT = MVT::getFloatingPointVT(NumBits);
41189 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
41190 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
41191 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41192 SDValue VZLoad =
41193 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
41194 LN->getPointerInfo(),
41195 LN->getAlignment(),
41196 LN->getMemOperand()->getFlags());
41197 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
41198 DAG.getBitcast(InVT, VZLoad));
41199 DCI.CombineTo(N, Convert);
41200 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
41201 return SDValue(N, 0);
41205 return SDValue();
41208 /// Do target-specific dag combines on X86ISD::ANDNP nodes.
41209 static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
41210 TargetLowering::DAGCombinerInfo &DCI,
41211 const X86Subtarget &Subtarget) {
41212 MVT VT = N->getSimpleValueType(0);
41214 // ANDNP(0, x) -> x
41215 if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
41216 return N->getOperand(1);
41218 // ANDNP(x, 0) -> 0
41219 if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
41220 return DAG.getConstant(0, SDLoc(N), VT);
41222 // Turn ANDNP back to AND if input is inverted.
41223 if (SDValue Not = IsNOT(N->getOperand(0), DAG))
41224 return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not),
41225 N->getOperand(1));
41227 // Attempt to recursively combine a bitmask ANDNP with shuffles.
41228 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
41229 SDValue Op(N, 0);
41230 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
41231 return Res;
41234 return SDValue();
41237 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
41238 TargetLowering::DAGCombinerInfo &DCI) {
41239 SDValue N0 = N->getOperand(0);
41240 SDValue N1 = N->getOperand(1);
41242 // BT ignores high bits in the bit index operand.
41243 unsigned BitWidth = N1.getValueSizeInBits();
41244 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
41245 if (SDValue DemandedN1 = DAG.GetDemandedBits(N1, DemandedMask))
41246 return DAG.getNode(X86ISD::BT, SDLoc(N), MVT::i32, N0, DemandedN1);
41248 return SDValue();
41251 // Try to combine sext_in_reg of a cmov of constants by extending the constants.
41252 static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
41253 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
41255 EVT DstVT = N->getValueType(0);
41257 SDValue N0 = N->getOperand(0);
41258 SDValue N1 = N->getOperand(1);
41259 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
41261 if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
41262 return SDValue();
41264 // Look through single use any_extends / truncs.
41265 SDValue IntermediateBitwidthOp;
41266 if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
41267 N0.hasOneUse()) {
41268 IntermediateBitwidthOp = N0;
41269 N0 = N0.getOperand(0);
41272 // See if we have a single use cmov.
41273 if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
41274 return SDValue();
41276 SDValue CMovOp0 = N0.getOperand(0);
41277 SDValue CMovOp1 = N0.getOperand(1);
41279 // Make sure both operands are constants.
41280 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
41281 !isa<ConstantSDNode>(CMovOp1.getNode()))
41282 return SDValue();
41284 SDLoc DL(N);
41286 // If we looked through an any_extend/trunc above, add one to the constants.
41287 if (IntermediateBitwidthOp) {
41288 unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
41289 CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
41290 CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
41293 CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
41294 CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
41296 EVT CMovVT = DstVT;
41297 // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
41298 if (DstVT == MVT::i16) {
41299 CMovVT = MVT::i32;
41300 CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
41301 CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
41304 SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
41305 N0.getOperand(2), N0.getOperand(3));
41307 if (CMovVT != DstVT)
41308 CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
41310 return CMov;
41313 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
41314 const X86Subtarget &Subtarget) {
41315 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
41317 if (SDValue V = combineSextInRegCmov(N, DAG))
41318 return V;
41320 EVT VT = N->getValueType(0);
41321 SDValue N0 = N->getOperand(0);
41322 SDValue N1 = N->getOperand(1);
41323 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
41324 SDLoc dl(N);
41326 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
41327 // both SSE and AVX2 since there is no sign-extended shift right
41328 // operation on a vector with 64-bit elements.
41329 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
41330 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
41331 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
41332 N0.getOpcode() == ISD::SIGN_EXTEND)) {
41333 SDValue N00 = N0.getOperand(0);
41335 // EXTLOAD has a better solution on AVX2,
41336 // it may be replaced with X86ISD::VSEXT node.
41337 if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
41338 if (!ISD::isNormalLoad(N00.getNode()))
41339 return SDValue();
41341 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
41342 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
41343 N00, N1);
41344 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
41347 return SDValue();
41350 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
41351 /// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
41352 /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
41353 /// opportunities to combine math ops, use an LEA, or use a complex addressing
41354 /// mode. This can eliminate extend, add, and shift instructions.
41355 static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
41356 const X86Subtarget &Subtarget) {
41357 if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
41358 Ext->getOpcode() != ISD::ZERO_EXTEND)
41359 return SDValue();
41361 // TODO: This should be valid for other integer types.
41362 EVT VT = Ext->getValueType(0);
41363 if (VT != MVT::i64)
41364 return SDValue();
41366 SDValue Add = Ext->getOperand(0);
41367 if (Add.getOpcode() != ISD::ADD)
41368 return SDValue();
41370 bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
41371 bool NSW = Add->getFlags().hasNoSignedWrap();
41372 bool NUW = Add->getFlags().hasNoUnsignedWrap();
41374 // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
41375 // into the 'zext'
41376 if ((Sext && !NSW) || (!Sext && !NUW))
41377 return SDValue();
41379 // Having a constant operand to the 'add' ensures that we are not increasing
41380 // the instruction count because the constant is extended for free below.
41381 // A constant operand can also become the displacement field of an LEA.
41382 auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
41383 if (!AddOp1)
41384 return SDValue();
41386 // Don't make the 'add' bigger if there's no hope of combining it with some
41387 // other 'add' or 'shl' instruction.
41388 // TODO: It may be profitable to generate simpler LEA instructions in place
41389 // of single 'add' instructions, but the cost model for selecting an LEA
41390 // currently has a high threshold.
41391 bool HasLEAPotential = false;
41392 for (auto *User : Ext->uses()) {
41393 if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
41394 HasLEAPotential = true;
41395 break;
41398 if (!HasLEAPotential)
41399 return SDValue();
41401 // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
41402 int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
41403 SDValue AddOp0 = Add.getOperand(0);
41404 SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
41405 SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
41407 // The wider add is guaranteed to not wrap because both operands are
41408 // sign-extended.
41409 SDNodeFlags Flags;
41410 Flags.setNoSignedWrap(NSW);
41411 Flags.setNoUnsignedWrap(NUW);
41412 return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
41415 // If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
41416 // operands and the result of CMOV is not used anywhere else - promote CMOV
41417 // itself instead of promoting its result. This could be beneficial, because:
41418 // 1) X86TargetLowering::EmitLoweredSelect later can do merging of two
41419 // (or more) pseudo-CMOVs only when they go one-after-another and
41420 // getting rid of result extension code after CMOV will help that.
41421 // 2) Promotion of constant CMOV arguments is free, hence the
41422 // {ANY,SIGN,ZERO}_EXTEND will just be deleted.
41423 // 3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
41424 // promotion is also good in terms of code-size.
41425 // (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
41426 // promotion).
41427 static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
41428 SDValue CMovN = Extend->getOperand(0);
41429 if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
41430 return SDValue();
41432 EVT TargetVT = Extend->getValueType(0);
41433 unsigned ExtendOpcode = Extend->getOpcode();
41434 SDLoc DL(Extend);
41436 EVT VT = CMovN.getValueType();
41437 SDValue CMovOp0 = CMovN.getOperand(0);
41438 SDValue CMovOp1 = CMovN.getOperand(1);
41440 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
41441 !isa<ConstantSDNode>(CMovOp1.getNode()))
41442 return SDValue();
41444 // Only extend to i32 or i64.
41445 if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
41446 return SDValue();
41448 // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
41449 // are free.
41450 if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
41451 return SDValue();
41453 // If this a zero extend to i64, we should only extend to i32 and use a free
41454 // zero extend to finish.
41455 EVT ExtendVT = TargetVT;
41456 if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
41457 ExtendVT = MVT::i32;
41459 CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
41460 CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
41462 SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
41463 CMovN.getOperand(2), CMovN.getOperand(3));
41465 // Finish extending if needed.
41466 if (ExtendVT != TargetVT)
41467 Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
41469 return Res;
41472 // Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
41473 // This is more or less the reverse of combineBitcastvxi1.
41474 static SDValue
41475 combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
41476 TargetLowering::DAGCombinerInfo &DCI,
41477 const X86Subtarget &Subtarget) {
41478 unsigned Opcode = N->getOpcode();
41479 if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
41480 Opcode != ISD::ANY_EXTEND)
41481 return SDValue();
41482 if (!DCI.isBeforeLegalizeOps())
41483 return SDValue();
41484 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
41485 return SDValue();
41487 SDValue N0 = N->getOperand(0);
41488 EVT VT = N->getValueType(0);
41489 EVT SVT = VT.getScalarType();
41490 EVT InSVT = N0.getValueType().getScalarType();
41491 unsigned EltSizeInBits = SVT.getSizeInBits();
41493 // Input type must be extending a bool vector (bit-casted from a scalar
41494 // integer) to legal integer types.
41495 if (!VT.isVector())
41496 return SDValue();
41497 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
41498 return SDValue();
41499 if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
41500 return SDValue();
41502 SDValue N00 = N0.getOperand(0);
41503 EVT SclVT = N0.getOperand(0).getValueType();
41504 if (!SclVT.isScalarInteger())
41505 return SDValue();
41507 SDLoc DL(N);
41508 SDValue Vec;
41509 SmallVector<int, 32> ShuffleMask;
41510 unsigned NumElts = VT.getVectorNumElements();
41511 assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
41513 // Broadcast the scalar integer to the vector elements.
41514 if (NumElts > EltSizeInBits) {
41515 // If the scalar integer is greater than the vector element size, then we
41516 // must split it down into sub-sections for broadcasting. For example:
41517 // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
41518 // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
41519 assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
41520 unsigned Scale = NumElts / EltSizeInBits;
41521 EVT BroadcastVT =
41522 EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
41523 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
41524 Vec = DAG.getBitcast(VT, Vec);
41526 for (unsigned i = 0; i != Scale; ++i)
41527 ShuffleMask.append(EltSizeInBits, i);
41528 } else {
41529 // For smaller scalar integers, we can simply any-extend it to the vector
41530 // element size (we don't care about the upper bits) and broadcast it to all
41531 // elements.
41532 SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
41533 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
41534 ShuffleMask.append(NumElts, 0);
41536 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
41538 // Now, mask the relevant bit in each element.
41539 SmallVector<SDValue, 32> Bits;
41540 for (unsigned i = 0; i != NumElts; ++i) {
41541 int BitIdx = (i % EltSizeInBits);
41542 APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
41543 Bits.push_back(DAG.getConstant(Bit, DL, SVT));
41545 SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
41546 Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
41548 // Compare against the bitmask and extend the result.
41549 EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
41550 Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
41551 Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
41553 // For SEXT, this is now done, otherwise shift the result down for
41554 // zero-extension.
41555 if (Opcode == ISD::SIGN_EXTEND)
41556 return Vec;
41557 return DAG.getNode(ISD::SRL, DL, VT, Vec,
41558 DAG.getConstant(EltSizeInBits - 1, DL, VT));
41561 /// Convert a SEXT or ZEXT of a vector to a SIGN_EXTEND_VECTOR_INREG or
41562 /// ZERO_EXTEND_VECTOR_INREG, this requires the splitting (or concatenating
41563 /// with UNDEFs) of the input to vectors of the same size as the target type
41564 /// which then extends the lowest elements.
41565 static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG,
41566 TargetLowering::DAGCombinerInfo &DCI,
41567 const X86Subtarget &Subtarget) {
41568 if (ExperimentalVectorWideningLegalization)
41569 return SDValue();
41571 unsigned Opcode = N->getOpcode();
41572 // TODO - add ANY_EXTEND support.
41573 if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND)
41574 return SDValue();
41575 if (!DCI.isBeforeLegalizeOps())
41576 return SDValue();
41577 if (!Subtarget.hasSSE2())
41578 return SDValue();
41580 SDValue N0 = N->getOperand(0);
41581 EVT VT = N->getValueType(0);
41582 EVT SVT = VT.getScalarType();
41583 EVT InVT = N0.getValueType();
41584 EVT InSVT = InVT.getScalarType();
41586 // FIXME: Generic DAGCombiner previously had a bug that would cause a
41587 // sign_extend of setcc to sometimes return the original node and tricked it
41588 // into thinking CombineTo was used which prevented the target combines from
41589 // running.
41590 // Earlying out here to avoid regressions like this
41591 // (v4i32 (sext (v4i1 (setcc (v4i16)))))
41592 // Becomes
41593 // (v4i32 (sext_invec (v8i16 (concat (v4i16 (setcc (v4i16))), undef))))
41594 // Type legalized to
41595 // (v4i32 (sext_invec (v8i16 (trunc_invec (v4i32 (setcc (v4i32)))))))
41596 // Leading to a packssdw+pmovsxwd
41597 // We could write a DAG combine to fix this, but really we shouldn't be
41598 // creating sext_invec that's forcing v8i16 into the DAG.
41599 if (N0.getOpcode() == ISD::SETCC)
41600 return SDValue();
41602 // Input type must be a vector and we must be extending legal integer types.
41603 if (!VT.isVector() || VT.getVectorNumElements() < 2)
41604 return SDValue();
41605 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
41606 return SDValue();
41607 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
41608 return SDValue();
41610 // If the input/output types are both legal then we have at least AVX1 and
41611 // we will be able to use SIGN_EXTEND/ZERO_EXTEND directly.
41612 if (DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
41613 DAG.getTargetLoweringInfo().isTypeLegal(InVT))
41614 return SDValue();
41616 SDLoc DL(N);
41618 auto ExtendVecSize = [&DAG](const SDLoc &DL, SDValue N, unsigned Size) {
41619 EVT SrcVT = N.getValueType();
41620 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(),
41621 Size / SrcVT.getScalarSizeInBits());
41622 SmallVector<SDValue, 8> Opnds(Size / SrcVT.getSizeInBits(),
41623 DAG.getUNDEF(SrcVT));
41624 Opnds[0] = N;
41625 return DAG.getNode(ISD::CONCAT_VECTORS, DL, DstVT, Opnds);
41628 // If target-size is less than 128-bits, extend to a type that would extend
41629 // to 128 bits, extend that and extract the original target vector.
41630 if (VT.getSizeInBits() < 128 && !(128 % VT.getSizeInBits())) {
41631 unsigned Scale = 128 / VT.getSizeInBits();
41632 EVT ExVT =
41633 EVT::getVectorVT(*DAG.getContext(), SVT, 128 / SVT.getSizeInBits());
41634 SDValue Ex = ExtendVecSize(DL, N0, Scale * InVT.getSizeInBits());
41635 SDValue SExt = DAG.getNode(Opcode, DL, ExVT, Ex);
41636 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SExt,
41637 DAG.getIntPtrConstant(0, DL));
41640 // If target-size is 128-bits (or 256-bits on AVX target), then convert to
41641 // ISD::*_EXTEND_VECTOR_INREG which ensures lowering to X86ISD::V*EXT.
41642 // Also use this if we don't have SSE41 to allow the legalizer do its job.
41643 if (!Subtarget.hasSSE41() || VT.is128BitVector() ||
41644 (VT.is256BitVector() && Subtarget.hasAVX()) ||
41645 (VT.is512BitVector() && Subtarget.useAVX512Regs())) {
41646 SDValue ExOp = ExtendVecSize(DL, N0, VT.getSizeInBits());
41647 Opcode = getOpcode_EXTEND_VECTOR_INREG(Opcode);
41648 return DAG.getNode(Opcode, DL, VT, ExOp);
41651 auto SplitAndExtendInReg = [&](unsigned SplitSize) {
41652 unsigned NumVecs = VT.getSizeInBits() / SplitSize;
41653 unsigned NumSubElts = SplitSize / SVT.getSizeInBits();
41654 EVT SubVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumSubElts);
41655 EVT InSubVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubElts);
41657 unsigned IROpc = getOpcode_EXTEND_VECTOR_INREG(Opcode);
41658 SmallVector<SDValue, 8> Opnds;
41659 for (unsigned i = 0, Offset = 0; i != NumVecs; ++i, Offset += NumSubElts) {
41660 SDValue SrcVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InSubVT, N0,
41661 DAG.getIntPtrConstant(Offset, DL));
41662 SrcVec = ExtendVecSize(DL, SrcVec, SplitSize);
41663 SrcVec = DAG.getNode(IROpc, DL, SubVT, SrcVec);
41664 Opnds.push_back(SrcVec);
41666 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Opnds);
41669 // On pre-AVX targets, split into 128-bit nodes of
41670 // ISD::*_EXTEND_VECTOR_INREG.
41671 if (!Subtarget.hasAVX() && !(VT.getSizeInBits() % 128))
41672 return SplitAndExtendInReg(128);
41674 // On pre-AVX512 targets, split into 256-bit nodes of
41675 // ISD::*_EXTEND_VECTOR_INREG.
41676 if (!Subtarget.useAVX512Regs() && !(VT.getSizeInBits() % 256))
41677 return SplitAndExtendInReg(256);
41679 return SDValue();
41682 // Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
41683 // result type.
41684 static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
41685 const X86Subtarget &Subtarget) {
41686 SDValue N0 = N->getOperand(0);
41687 EVT VT = N->getValueType(0);
41688 SDLoc dl(N);
41690 // Only do this combine with AVX512 for vector extends.
41691 if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
41692 return SDValue();
41694 // Only combine legal element types.
41695 EVT SVT = VT.getVectorElementType();
41696 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
41697 SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
41698 return SDValue();
41700 // We can only do this if the vector size in 256 bits or less.
41701 unsigned Size = VT.getSizeInBits();
41702 if (Size > 256)
41703 return SDValue();
41705 // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
41706 // that's the only integer compares with we have.
41707 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
41708 if (ISD::isUnsignedIntSetCC(CC))
41709 return SDValue();
41711 // Only do this combine if the extension will be fully consumed by the setcc.
41712 EVT N00VT = N0.getOperand(0).getValueType();
41713 EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
41714 if (Size != MatchingVecType.getSizeInBits())
41715 return SDValue();
41717 SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
41719 if (N->getOpcode() == ISD::ZERO_EXTEND)
41720 Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType().getScalarType());
41722 return Res;
41725 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
41726 TargetLowering::DAGCombinerInfo &DCI,
41727 const X86Subtarget &Subtarget) {
41728 SDValue N0 = N->getOperand(0);
41729 EVT VT = N->getValueType(0);
41730 EVT InVT = N0.getValueType();
41731 SDLoc DL(N);
41733 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
41734 return NewCMov;
41736 if (!DCI.isBeforeLegalizeOps())
41737 return SDValue();
41739 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
41740 return V;
41742 if (InVT == MVT::i1 && N0.getOpcode() == ISD::XOR &&
41743 isAllOnesConstant(N0.getOperand(1)) && N0.hasOneUse()) {
41744 // Invert and sign-extend a boolean is the same as zero-extend and subtract
41745 // 1 because 0 becomes -1 and 1 becomes 0. The subtract is efficiently
41746 // lowered with an LEA or a DEC. This is the same as: select Bool, 0, -1.
41747 // sext (xor Bool, -1) --> sub (zext Bool), 1
41748 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
41749 return DAG.getNode(ISD::SUB, DL, VT, Zext, DAG.getConstant(1, DL, VT));
41752 if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
41753 return V;
41755 if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
41756 return V;
41758 if (VT.isVector())
41759 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
41760 return R;
41762 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
41763 return NewAdd;
41765 return SDValue();
41768 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc) {
41769 if (NegMul) {
41770 switch (Opcode) {
41771 default: llvm_unreachable("Unexpected opcode");
41772 case ISD::FMA: Opcode = X86ISD::FNMADD; break;
41773 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break;
41774 case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break;
41775 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break;
41776 case X86ISD::FNMADD: Opcode = ISD::FMA; break;
41777 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break;
41778 case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break;
41779 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break;
41783 if (NegAcc) {
41784 switch (Opcode) {
41785 default: llvm_unreachable("Unexpected opcode");
41786 case ISD::FMA: Opcode = X86ISD::FMSUB; break;
41787 case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
41788 case X86ISD::FMSUB: Opcode = ISD::FMA; break;
41789 case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
41790 case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break;
41791 case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
41792 case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break;
41793 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
41797 return Opcode;
41800 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
41801 const X86Subtarget &Subtarget) {
41802 SDLoc dl(N);
41803 EVT VT = N->getValueType(0);
41805 // Let legalize expand this if it isn't a legal type yet.
41806 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
41807 return SDValue();
41809 EVT ScalarVT = VT.getScalarType();
41810 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
41811 return SDValue();
41813 SDValue A = N->getOperand(0);
41814 SDValue B = N->getOperand(1);
41815 SDValue C = N->getOperand(2);
41817 auto invertIfNegative = [&DAG](SDValue &V) {
41818 if (SDValue NegVal = isFNEG(DAG, V.getNode())) {
41819 V = DAG.getBitcast(V.getValueType(), NegVal);
41820 return true;
41822 // Look through extract_vector_elts. If it comes from an FNEG, create a
41823 // new extract from the FNEG input.
41824 if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
41825 isNullConstant(V.getOperand(1))) {
41826 if (SDValue NegVal = isFNEG(DAG, V.getOperand(0).getNode())) {
41827 NegVal = DAG.getBitcast(V.getOperand(0).getValueType(), NegVal);
41828 V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
41829 NegVal, V.getOperand(1));
41830 return true;
41834 return false;
41837 // Do not convert the passthru input of scalar intrinsics.
41838 // FIXME: We could allow negations of the lower element only.
41839 bool NegA = invertIfNegative(A);
41840 bool NegB = invertIfNegative(B);
41841 bool NegC = invertIfNegative(C);
41843 if (!NegA && !NegB && !NegC)
41844 return SDValue();
41846 unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC);
41848 if (N->getNumOperands() == 4)
41849 return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
41850 return DAG.getNode(NewOpcode, dl, VT, A, B, C);
41853 // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
41854 static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
41855 const X86Subtarget &Subtarget) {
41856 SDLoc dl(N);
41857 EVT VT = N->getValueType(0);
41859 SDValue NegVal = isFNEG(DAG, N->getOperand(2).getNode());
41860 if (!NegVal)
41861 return SDValue();
41863 // FIXME: Should we bitcast instead?
41864 if (NegVal.getValueType() != VT)
41865 return SDValue();
41867 unsigned NewOpcode;
41868 switch (N->getOpcode()) {
41869 default: llvm_unreachable("Unexpected opcode!");
41870 case X86ISD::FMADDSUB: NewOpcode = X86ISD::FMSUBADD; break;
41871 case X86ISD::FMADDSUB_RND: NewOpcode = X86ISD::FMSUBADD_RND; break;
41872 case X86ISD::FMSUBADD: NewOpcode = X86ISD::FMADDSUB; break;
41873 case X86ISD::FMSUBADD_RND: NewOpcode = X86ISD::FMADDSUB_RND; break;
41876 if (N->getNumOperands() == 4)
41877 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
41878 NegVal, N->getOperand(3));
41879 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
41880 NegVal);
41883 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
41884 TargetLowering::DAGCombinerInfo &DCI,
41885 const X86Subtarget &Subtarget) {
41886 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
41887 // (and (i32 x86isd::setcc_carry), 1)
41888 // This eliminates the zext. This transformation is necessary because
41889 // ISD::SETCC is always legalized to i8.
41890 SDLoc dl(N);
41891 SDValue N0 = N->getOperand(0);
41892 EVT VT = N->getValueType(0);
41894 if (N0.getOpcode() == ISD::AND &&
41895 N0.hasOneUse() &&
41896 N0.getOperand(0).hasOneUse()) {
41897 SDValue N00 = N0.getOperand(0);
41898 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
41899 if (!isOneConstant(N0.getOperand(1)))
41900 return SDValue();
41901 return DAG.getNode(ISD::AND, dl, VT,
41902 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
41903 N00.getOperand(0), N00.getOperand(1)),
41904 DAG.getConstant(1, dl, VT));
41908 if (N0.getOpcode() == ISD::TRUNCATE &&
41909 N0.hasOneUse() &&
41910 N0.getOperand(0).hasOneUse()) {
41911 SDValue N00 = N0.getOperand(0);
41912 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
41913 return DAG.getNode(ISD::AND, dl, VT,
41914 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
41915 N00.getOperand(0), N00.getOperand(1)),
41916 DAG.getConstant(1, dl, VT));
41920 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
41921 return NewCMov;
41923 if (DCI.isBeforeLegalizeOps())
41924 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
41925 return V;
41927 if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
41928 return V;
41930 if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
41931 return V;
41933 if (VT.isVector())
41934 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
41935 return R;
41937 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
41938 return NewAdd;
41940 if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
41941 return R;
41943 // TODO: Combine with any target/faux shuffle.
41944 if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
41945 VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
41946 SDValue N00 = N0.getOperand(0);
41947 SDValue N01 = N0.getOperand(1);
41948 unsigned NumSrcElts = N00.getValueType().getVectorNumElements();
41949 unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
41950 APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
41951 if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
41952 (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
41953 return concatSubVectors(N00, N01, VT, NumSrcElts * 2, DAG, dl, 128);
41957 return SDValue();
41960 /// Try to map a 128-bit or larger integer comparison to vector instructions
41961 /// before type legalization splits it up into chunks.
41962 static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
41963 const X86Subtarget &Subtarget) {
41964 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
41965 assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
41967 // We're looking for an oversized integer equality comparison.
41968 SDValue X = SetCC->getOperand(0);
41969 SDValue Y = SetCC->getOperand(1);
41970 EVT OpVT = X.getValueType();
41971 unsigned OpSize = OpVT.getSizeInBits();
41972 if (!OpVT.isScalarInteger() || OpSize < 128)
41973 return SDValue();
41975 // Ignore a comparison with zero because that gets special treatment in
41976 // EmitTest(). But make an exception for the special case of a pair of
41977 // logically-combined vector-sized operands compared to zero. This pattern may
41978 // be generated by the memcmp expansion pass with oversized integer compares
41979 // (see PR33325).
41980 bool IsOrXorXorCCZero = isNullConstant(Y) && X.getOpcode() == ISD::OR &&
41981 X.getOperand(0).getOpcode() == ISD::XOR &&
41982 X.getOperand(1).getOpcode() == ISD::XOR;
41983 if (isNullConstant(Y) && !IsOrXorXorCCZero)
41984 return SDValue();
41986 // Don't perform this combine if constructing the vector will be expensive.
41987 auto IsVectorBitCastCheap = [](SDValue X) {
41988 X = peekThroughBitcasts(X);
41989 return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
41990 X.getOpcode() == ISD::LOAD;
41992 if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
41993 !IsOrXorXorCCZero)
41994 return SDValue();
41996 // TODO: Use PXOR + PTEST for SSE4.1 or later?
41997 EVT VT = SetCC->getValueType(0);
41998 SDLoc DL(SetCC);
41999 if ((OpSize == 128 && Subtarget.hasSSE2()) ||
42000 (OpSize == 256 && Subtarget.hasAVX2()) ||
42001 (OpSize == 512 && Subtarget.useAVX512Regs())) {
42002 EVT VecVT = OpSize == 512 ? MVT::v16i32 :
42003 OpSize == 256 ? MVT::v32i8 :
42004 MVT::v16i8;
42005 EVT CmpVT = OpSize == 512 ? MVT::v16i1 : VecVT;
42006 SDValue Cmp;
42007 if (IsOrXorXorCCZero) {
42008 // This is a bitwise-combined equality comparison of 2 pairs of vectors:
42009 // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
42010 // Use 2 vector equality compares and 'and' the results before doing a
42011 // MOVMSK.
42012 SDValue A = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(0));
42013 SDValue B = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(1));
42014 SDValue C = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(0));
42015 SDValue D = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(1));
42016 SDValue Cmp1 = DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
42017 SDValue Cmp2 = DAG.getSetCC(DL, CmpVT, C, D, ISD::SETEQ);
42018 Cmp = DAG.getNode(ISD::AND, DL, CmpVT, Cmp1, Cmp2);
42019 } else {
42020 SDValue VecX = DAG.getBitcast(VecVT, X);
42021 SDValue VecY = DAG.getBitcast(VecVT, Y);
42022 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
42024 // For 512-bits we want to emit a setcc that will lower to kortest.
42025 if (OpSize == 512)
42026 return DAG.getSetCC(DL, VT, DAG.getBitcast(MVT::i16, Cmp),
42027 DAG.getConstant(0xFFFF, DL, MVT::i16), CC);
42028 // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
42029 // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
42030 // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
42031 // setcc i256 X, Y, eq --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, eq
42032 // setcc i256 X, Y, ne --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, ne
42033 SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
42034 SDValue FFFFs = DAG.getConstant(OpSize == 128 ? 0xFFFF : 0xFFFFFFFF, DL,
42035 MVT::i32);
42036 return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
42039 return SDValue();
42042 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
42043 const X86Subtarget &Subtarget) {
42044 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
42045 SDValue LHS = N->getOperand(0);
42046 SDValue RHS = N->getOperand(1);
42047 EVT VT = N->getValueType(0);
42048 EVT OpVT = LHS.getValueType();
42049 SDLoc DL(N);
42051 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
42052 // 0-x == y --> x+y == 0
42053 // 0-x != y --> x+y != 0
42054 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
42055 LHS.hasOneUse()) {
42056 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, RHS, LHS.getOperand(1));
42057 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
42059 // x == 0-y --> x+y == 0
42060 // x != 0-y --> x+y != 0
42061 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
42062 RHS.hasOneUse()) {
42063 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
42064 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
42067 if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
42068 return V;
42071 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
42072 (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
42073 // Put build_vectors on the right.
42074 if (LHS.getOpcode() == ISD::BUILD_VECTOR) {
42075 std::swap(LHS, RHS);
42076 CC = ISD::getSetCCSwappedOperands(CC);
42079 bool IsSEXT0 =
42080 (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
42081 (LHS.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
42082 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
42084 if (IsSEXT0 && IsVZero1) {
42085 assert(VT == LHS.getOperand(0).getValueType() &&
42086 "Uexpected operand type");
42087 if (CC == ISD::SETGT)
42088 return DAG.getConstant(0, DL, VT);
42089 if (CC == ISD::SETLE)
42090 return DAG.getConstant(1, DL, VT);
42091 if (CC == ISD::SETEQ || CC == ISD::SETGE)
42092 return DAG.getNOT(DL, LHS.getOperand(0), VT);
42094 assert((CC == ISD::SETNE || CC == ISD::SETLT) &&
42095 "Unexpected condition code!");
42096 return LHS.getOperand(0);
42100 // If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
42101 // pre-promote its result type since vXi1 vectors don't get promoted
42102 // during type legalization.
42103 // NOTE: The element count check is to ignore operand types that need to
42104 // go through type promotion to a 128-bit vector.
42105 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
42106 VT.getVectorElementType() == MVT::i1 &&
42107 (ExperimentalVectorWideningLegalization ||
42108 VT.getVectorNumElements() > 4) &&
42109 (OpVT.getVectorElementType() == MVT::i8 ||
42110 OpVT.getVectorElementType() == MVT::i16)) {
42111 SDValue Setcc = DAG.getNode(ISD::SETCC, DL, OpVT, LHS, RHS,
42112 N->getOperand(2));
42113 return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
42116 // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
42117 // to avoid scalarization via legalization because v4i32 is not a legal type.
42118 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
42119 LHS.getValueType() == MVT::v4f32)
42120 return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
42122 return SDValue();
42125 static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
42126 TargetLowering::DAGCombinerInfo &DCI) {
42127 SDValue Src = N->getOperand(0);
42128 MVT SrcVT = Src.getSimpleValueType();
42129 MVT VT = N->getSimpleValueType(0);
42130 unsigned NumBits = VT.getScalarSizeInBits();
42131 unsigned NumElts = SrcVT.getVectorNumElements();
42133 // Perform constant folding.
42134 if (ISD::isBuildVectorOfConstantSDNodes(Src.getNode())) {
42135 assert(VT == MVT::i32 && "Unexpected result type");
42136 APInt Imm(32, 0);
42137 for (unsigned Idx = 0, e = Src.getNumOperands(); Idx < e; ++Idx) {
42138 if (!Src.getOperand(Idx).isUndef() &&
42139 Src.getConstantOperandAPInt(Idx).isNegative())
42140 Imm.setBit(Idx);
42142 return DAG.getConstant(Imm, SDLoc(N), VT);
42145 // Look through int->fp bitcasts that don't change the element width.
42146 unsigned EltWidth = SrcVT.getScalarSizeInBits();
42147 if (Src.getOpcode() == ISD::BITCAST &&
42148 Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
42149 return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
42151 // Fold movmsk(not(x)) -> not(movmsk) to improve folding of movmsk results
42152 // with scalar comparisons.
42153 if (SDValue NotSrc = IsNOT(Src, DAG)) {
42154 SDLoc DL(N);
42155 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
42156 NotSrc = DAG.getBitcast(SrcVT, NotSrc);
42157 return DAG.getNode(ISD::XOR, DL, VT,
42158 DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
42159 DAG.getConstant(NotMask, DL, VT));
42162 // Simplify the inputs.
42163 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42164 APInt DemandedMask(APInt::getAllOnesValue(NumBits));
42165 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
42166 return SDValue(N, 0);
42168 return SDValue();
42171 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
42172 TargetLowering::DAGCombinerInfo &DCI,
42173 const X86Subtarget &Subtarget) {
42174 SDLoc DL(N);
42176 if (DCI.isBeforeLegalizeOps()) {
42177 SDValue Index = N->getOperand(4);
42178 // Remove any sign extends from 32 or smaller to larger than 32.
42179 // Only do this before LegalizeOps in case we need the sign extend for
42180 // legalization.
42181 if (Index.getOpcode() == ISD::SIGN_EXTEND) {
42182 if (Index.getScalarValueSizeInBits() > 32 &&
42183 Index.getOperand(0).getScalarValueSizeInBits() <= 32) {
42184 SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
42185 NewOps[4] = Index.getOperand(0);
42186 SDNode *Res = DAG.UpdateNodeOperands(N, NewOps);
42187 if (Res == N) {
42188 // The original sign extend has less users, add back to worklist in
42189 // case it needs to be removed
42190 DCI.AddToWorklist(Index.getNode());
42191 DCI.AddToWorklist(N);
42193 return SDValue(Res, 0);
42197 // Make sure the index is either i32 or i64
42198 unsigned ScalarSize = Index.getScalarValueSizeInBits();
42199 if (ScalarSize != 32 && ScalarSize != 64) {
42200 MVT EltVT = ScalarSize > 32 ? MVT::i64 : MVT::i32;
42201 EVT IndexVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
42202 Index.getValueType().getVectorNumElements());
42203 Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
42204 SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
42205 NewOps[4] = Index;
42206 SDNode *Res = DAG.UpdateNodeOperands(N, NewOps);
42207 if (Res == N)
42208 DCI.AddToWorklist(N);
42209 return SDValue(Res, 0);
42212 // Try to remove zero extends from 32->64 if we know the sign bit of
42213 // the input is zero.
42214 if (Index.getOpcode() == ISD::ZERO_EXTEND &&
42215 Index.getScalarValueSizeInBits() == 64 &&
42216 Index.getOperand(0).getScalarValueSizeInBits() == 32) {
42217 if (DAG.SignBitIsZero(Index.getOperand(0))) {
42218 SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
42219 NewOps[4] = Index.getOperand(0);
42220 SDNode *Res = DAG.UpdateNodeOperands(N, NewOps);
42221 if (Res == N) {
42222 // The original sign extend has less users, add back to worklist in
42223 // case it needs to be removed
42224 DCI.AddToWorklist(Index.getNode());
42225 DCI.AddToWorklist(N);
42227 return SDValue(Res, 0);
42232 // With AVX2 we only demand the upper bit of the mask.
42233 if (!Subtarget.hasAVX512()) {
42234 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42235 SDValue Mask = N->getOperand(2);
42236 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
42237 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
42238 return SDValue(N, 0);
42241 return SDValue();
42244 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
42245 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
42246 const X86Subtarget &Subtarget) {
42247 SDLoc DL(N);
42248 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
42249 SDValue EFLAGS = N->getOperand(1);
42251 // Try to simplify the EFLAGS and condition code operands.
42252 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
42253 return getSETCC(CC, Flags, DL, DAG);
42255 return SDValue();
42258 /// Optimize branch condition evaluation.
42259 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
42260 const X86Subtarget &Subtarget) {
42261 SDLoc DL(N);
42262 SDValue EFLAGS = N->getOperand(3);
42263 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
42265 // Try to simplify the EFLAGS and condition code operands.
42266 // Make sure to not keep references to operands, as combineSetCCEFLAGS can
42267 // RAUW them under us.
42268 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
42269 SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
42270 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
42271 N->getOperand(1), Cond, Flags);
42274 return SDValue();
42277 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
42278 SelectionDAG &DAG) {
42279 // Take advantage of vector comparisons producing 0 or -1 in each lane to
42280 // optimize away operation when it's from a constant.
42282 // The general transformation is:
42283 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
42284 // AND(VECTOR_CMP(x,y), constant2)
42285 // constant2 = UNARYOP(constant)
42287 // Early exit if this isn't a vector operation, the operand of the
42288 // unary operation isn't a bitwise AND, or if the sizes of the operations
42289 // aren't the same.
42290 EVT VT = N->getValueType(0);
42291 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
42292 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
42293 VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits())
42294 return SDValue();
42296 // Now check that the other operand of the AND is a constant. We could
42297 // make the transformation for non-constant splats as well, but it's unclear
42298 // that would be a benefit as it would not eliminate any operations, just
42299 // perform one more step in scalar code before moving to the vector unit.
42300 if (auto *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(0).getOperand(1))) {
42301 // Bail out if the vector isn't a constant.
42302 if (!BV->isConstant())
42303 return SDValue();
42305 // Everything checks out. Build up the new and improved node.
42306 SDLoc DL(N);
42307 EVT IntVT = BV->getValueType(0);
42308 // Create a new constant of the appropriate type for the transformed
42309 // DAG.
42310 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
42311 // The AND node needs bitcasts to/from an integer vector type around it.
42312 SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
42313 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
42314 N->getOperand(0)->getOperand(0), MaskConst);
42315 SDValue Res = DAG.getBitcast(VT, NewAnd);
42316 return Res;
42319 return SDValue();
42322 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
42323 const X86Subtarget &Subtarget) {
42324 SDValue Op0 = N->getOperand(0);
42325 EVT VT = N->getValueType(0);
42326 EVT InVT = Op0.getValueType();
42328 // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
42329 // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
42330 // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
42331 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
42332 SDLoc dl(N);
42333 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
42334 InVT.getVectorNumElements());
42335 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
42337 // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
42338 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
42341 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
42342 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
42343 // the optimization here.
42344 if (DAG.SignBitIsZero(Op0))
42345 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
42347 return SDValue();
42350 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
42351 const X86Subtarget &Subtarget) {
42352 // First try to optimize away the conversion entirely when it's
42353 // conditionally from a constant. Vectors only.
42354 if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
42355 return Res;
42357 // Now move on to more general possibilities.
42358 SDValue Op0 = N->getOperand(0);
42359 EVT VT = N->getValueType(0);
42360 EVT InVT = Op0.getValueType();
42362 // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
42363 // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
42364 // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
42365 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
42366 SDLoc dl(N);
42367 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
42368 InVT.getVectorNumElements());
42369 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
42370 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
42373 // Without AVX512DQ we only support i64 to float scalar conversion. For both
42374 // vectors and scalars, see if we know that the upper bits are all the sign
42375 // bit, in which case we can truncate the input to i32 and convert from that.
42376 if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
42377 unsigned BitWidth = InVT.getScalarSizeInBits();
42378 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
42379 if (NumSignBits >= (BitWidth - 31)) {
42380 EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), 32);
42381 if (InVT.isVector())
42382 TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT,
42383 InVT.getVectorNumElements());
42384 SDLoc dl(N);
42385 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
42386 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
42390 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
42391 // a 32-bit target where SSE doesn't support i64->FP operations.
42392 if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
42393 Op0.getOpcode() == ISD::LOAD) {
42394 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
42395 EVT LdVT = Ld->getValueType(0);
42397 // This transformation is not supported if the result type is f16 or f128.
42398 if (VT == MVT::f16 || VT == MVT::f128)
42399 return SDValue();
42401 // If we have AVX512DQ we can use packed conversion instructions unless
42402 // the VT is f80.
42403 if (Subtarget.hasDQI() && VT != MVT::f80)
42404 return SDValue();
42406 if (!Ld->isVolatile() && !VT.isVector() &&
42407 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
42408 !Subtarget.is64Bit() && LdVT == MVT::i64) {
42409 SDValue FILDChain = Subtarget.getTargetLowering()->BuildFILD(
42410 SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
42411 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
42412 return FILDChain;
42415 return SDValue();
42418 static bool needCarryOrOverflowFlag(SDValue Flags) {
42419 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
42421 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
42422 UI != UE; ++UI) {
42423 SDNode *User = *UI;
42425 X86::CondCode CC;
42426 switch (User->getOpcode()) {
42427 default:
42428 // Be conservative.
42429 return true;
42430 case X86ISD::SETCC:
42431 case X86ISD::SETCC_CARRY:
42432 CC = (X86::CondCode)User->getConstantOperandVal(0);
42433 break;
42434 case X86ISD::BRCOND:
42435 CC = (X86::CondCode)User->getConstantOperandVal(2);
42436 break;
42437 case X86ISD::CMOV:
42438 CC = (X86::CondCode)User->getConstantOperandVal(2);
42439 break;
42442 switch (CC) {
42443 default: break;
42444 case X86::COND_A: case X86::COND_AE:
42445 case X86::COND_B: case X86::COND_BE:
42446 case X86::COND_O: case X86::COND_NO:
42447 case X86::COND_G: case X86::COND_GE:
42448 case X86::COND_L: case X86::COND_LE:
42449 return true;
42453 return false;
42456 static bool onlyZeroFlagUsed(SDValue Flags) {
42457 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
42459 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
42460 UI != UE; ++UI) {
42461 SDNode *User = *UI;
42463 unsigned CCOpNo;
42464 switch (User->getOpcode()) {
42465 default:
42466 // Be conservative.
42467 return false;
42468 case X86ISD::SETCC: CCOpNo = 0; break;
42469 case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
42470 case X86ISD::BRCOND: CCOpNo = 2; break;
42471 case X86ISD::CMOV: CCOpNo = 2; break;
42474 X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
42475 if (CC != X86::COND_E && CC != X86::COND_NE)
42476 return false;
42479 return true;
42482 static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
42483 // Only handle test patterns.
42484 if (!isNullConstant(N->getOperand(1)))
42485 return SDValue();
42487 // If we have a CMP of a truncated binop, see if we can make a smaller binop
42488 // and use its flags directly.
42489 // TODO: Maybe we should try promoting compares that only use the zero flag
42490 // first if we can prove the upper bits with computeKnownBits?
42491 SDLoc dl(N);
42492 SDValue Op = N->getOperand(0);
42493 EVT VT = Op.getValueType();
42495 // If we have a constant logical shift that's only used in a comparison
42496 // against zero turn it into an equivalent AND. This allows turning it into
42497 // a TEST instruction later.
42498 if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
42499 Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
42500 onlyZeroFlagUsed(SDValue(N, 0))) {
42501 unsigned BitWidth = VT.getSizeInBits();
42502 const APInt &ShAmt = Op.getConstantOperandAPInt(1);
42503 if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
42504 unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
42505 APInt Mask = Op.getOpcode() == ISD::SRL
42506 ? APInt::getHighBitsSet(BitWidth, MaskBits)
42507 : APInt::getLowBitsSet(BitWidth, MaskBits);
42508 if (Mask.isSignedIntN(32)) {
42509 Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
42510 DAG.getConstant(Mask, dl, VT));
42511 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
42512 DAG.getConstant(0, dl, VT));
42517 // Look for a truncate with a single use.
42518 if (Op.getOpcode() != ISD::TRUNCATE || !Op.hasOneUse())
42519 return SDValue();
42521 Op = Op.getOperand(0);
42523 // Arithmetic op can only have one use.
42524 if (!Op.hasOneUse())
42525 return SDValue();
42527 unsigned NewOpc;
42528 switch (Op.getOpcode()) {
42529 default: return SDValue();
42530 case ISD::AND:
42531 // Skip and with constant. We have special handling for and with immediate
42532 // during isel to generate test instructions.
42533 if (isa<ConstantSDNode>(Op.getOperand(1)))
42534 return SDValue();
42535 NewOpc = X86ISD::AND;
42536 break;
42537 case ISD::OR: NewOpc = X86ISD::OR; break;
42538 case ISD::XOR: NewOpc = X86ISD::XOR; break;
42539 case ISD::ADD:
42540 // If the carry or overflow flag is used, we can't truncate.
42541 if (needCarryOrOverflowFlag(SDValue(N, 0)))
42542 return SDValue();
42543 NewOpc = X86ISD::ADD;
42544 break;
42545 case ISD::SUB:
42546 // If the carry or overflow flag is used, we can't truncate.
42547 if (needCarryOrOverflowFlag(SDValue(N, 0)))
42548 return SDValue();
42549 NewOpc = X86ISD::SUB;
42550 break;
42553 // We found an op we can narrow. Truncate its inputs.
42554 SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
42555 SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
42557 // Use a X86 specific opcode to avoid DAG combine messing with it.
42558 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
42559 Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
42561 // For AND, keep a CMP so that we can match the test pattern.
42562 if (NewOpc == X86ISD::AND)
42563 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
42564 DAG.getConstant(0, dl, VT));
42566 // Return the flags.
42567 return Op.getValue(1);
42570 static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
42571 TargetLowering::DAGCombinerInfo &DCI) {
42572 assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
42573 "Expected X86ISD::ADD or X86ISD::SUB");
42575 SDValue LHS = N->getOperand(0);
42576 SDValue RHS = N->getOperand(1);
42577 MVT VT = LHS.getSimpleValueType();
42578 unsigned GenericOpc = X86ISD::ADD == N->getOpcode() ? ISD::ADD : ISD::SUB;
42580 // If we don't use the flag result, simplify back to a generic ADD/SUB.
42581 if (!N->hasAnyUseOfValue(1)) {
42582 SDLoc DL(N);
42583 SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
42584 return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
42587 // Fold any similar generic ADD/SUB opcodes to reuse this node.
42588 auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
42589 // TODO: Add SUB(RHS, LHS) -> SUB(0, SUB(LHS, RHS)) negation support, this
42590 // currently causes regressions as we don't have broad x86sub combines.
42591 if (Negate)
42592 return;
42593 SDValue Ops[] = {N0, N1};
42594 SDVTList VTs = DAG.getVTList(N->getValueType(0));
42595 if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops))
42596 DCI.CombineTo(GenericAddSub, SDValue(N, 0));
42598 MatchGeneric(LHS, RHS, false);
42599 MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
42601 return SDValue();
42604 static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
42605 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
42606 MVT VT = N->getSimpleValueType(0);
42607 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
42608 return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs,
42609 N->getOperand(0), N->getOperand(1),
42610 Flags);
42613 // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
42614 // iff the flag result is dead.
42615 SDValue Op0 = N->getOperand(0);
42616 SDValue Op1 = N->getOperand(1);
42617 if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op1) &&
42618 !N->hasAnyUseOfValue(1))
42619 return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), Op0.getOperand(0),
42620 Op0.getOperand(1), N->getOperand(2));
42622 return SDValue();
42625 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
42626 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
42627 TargetLowering::DAGCombinerInfo &DCI) {
42628 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
42629 // the result is either zero or one (depending on the input carry bit).
42630 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
42631 if (X86::isZeroNode(N->getOperand(0)) &&
42632 X86::isZeroNode(N->getOperand(1)) &&
42633 // We don't have a good way to replace an EFLAGS use, so only do this when
42634 // dead right now.
42635 SDValue(N, 1).use_empty()) {
42636 SDLoc DL(N);
42637 EVT VT = N->getValueType(0);
42638 SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
42639 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
42640 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
42641 DAG.getConstant(X86::COND_B, DL,
42642 MVT::i8),
42643 N->getOperand(2)),
42644 DAG.getConstant(1, DL, VT));
42645 return DCI.CombineTo(N, Res1, CarryOut);
42648 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
42649 MVT VT = N->getSimpleValueType(0);
42650 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
42651 return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs,
42652 N->getOperand(0), N->getOperand(1),
42653 Flags);
42656 return SDValue();
42659 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
42660 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
42661 /// with CMP+{ADC, SBB}.
42662 static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
42663 bool IsSub = N->getOpcode() == ISD::SUB;
42664 SDValue X = N->getOperand(0);
42665 SDValue Y = N->getOperand(1);
42667 // If this is an add, canonicalize a zext operand to the RHS.
42668 // TODO: Incomplete? What if both sides are zexts?
42669 if (!IsSub && X.getOpcode() == ISD::ZERO_EXTEND &&
42670 Y.getOpcode() != ISD::ZERO_EXTEND)
42671 std::swap(X, Y);
42673 // Look through a one-use zext.
42674 bool PeekedThroughZext = false;
42675 if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse()) {
42676 Y = Y.getOperand(0);
42677 PeekedThroughZext = true;
42680 // If this is an add, canonicalize a setcc operand to the RHS.
42681 // TODO: Incomplete? What if both sides are setcc?
42682 // TODO: Should we allow peeking through a zext of the other operand?
42683 if (!IsSub && !PeekedThroughZext && X.getOpcode() == X86ISD::SETCC &&
42684 Y.getOpcode() != X86ISD::SETCC)
42685 std::swap(X, Y);
42687 if (Y.getOpcode() != X86ISD::SETCC || !Y.hasOneUse())
42688 return SDValue();
42690 SDLoc DL(N);
42691 EVT VT = N->getValueType(0);
42692 X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0);
42694 // If X is -1 or 0, then we have an opportunity to avoid constants required in
42695 // the general case below.
42696 auto *ConstantX = dyn_cast<ConstantSDNode>(X);
42697 if (ConstantX) {
42698 if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnesValue()) ||
42699 (IsSub && CC == X86::COND_B && ConstantX->isNullValue())) {
42700 // This is a complicated way to get -1 or 0 from the carry flag:
42701 // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
42702 // 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
42703 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
42704 DAG.getConstant(X86::COND_B, DL, MVT::i8),
42705 Y.getOperand(1));
42708 if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnesValue()) ||
42709 (IsSub && CC == X86::COND_A && ConstantX->isNullValue())) {
42710 SDValue EFLAGS = Y->getOperand(1);
42711 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
42712 EFLAGS.getValueType().isInteger() &&
42713 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
42714 // Swap the operands of a SUB, and we have the same pattern as above.
42715 // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
42716 // 0 - SETA (SUB A, B) --> 0 - SETB (SUB B, A) --> SUB + SBB
42717 SDValue NewSub = DAG.getNode(
42718 X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
42719 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
42720 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
42721 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
42722 DAG.getConstant(X86::COND_B, DL, MVT::i8),
42723 NewEFLAGS);
42728 if (CC == X86::COND_B) {
42729 // X + SETB Z --> adc X, 0
42730 // X - SETB Z --> sbb X, 0
42731 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
42732 DAG.getVTList(VT, MVT::i32), X,
42733 DAG.getConstant(0, DL, VT), Y.getOperand(1));
42736 if (CC == X86::COND_A) {
42737 SDValue EFLAGS = Y->getOperand(1);
42738 // Try to convert COND_A into COND_B in an attempt to facilitate
42739 // materializing "setb reg".
42741 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
42742 // cannot take an immediate as its first operand.
42744 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
42745 EFLAGS.getValueType().isInteger() &&
42746 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
42747 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
42748 EFLAGS.getNode()->getVTList(),
42749 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
42750 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
42751 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
42752 DAG.getVTList(VT, MVT::i32), X,
42753 DAG.getConstant(0, DL, VT), NewEFLAGS);
42757 if (CC != X86::COND_E && CC != X86::COND_NE)
42758 return SDValue();
42760 SDValue Cmp = Y.getOperand(1);
42761 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
42762 !X86::isZeroNode(Cmp.getOperand(1)) ||
42763 !Cmp.getOperand(0).getValueType().isInteger())
42764 return SDValue();
42766 SDValue Z = Cmp.getOperand(0);
42767 EVT ZVT = Z.getValueType();
42769 // If X is -1 or 0, then we have an opportunity to avoid constants required in
42770 // the general case below.
42771 if (ConstantX) {
42772 // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
42773 // fake operands:
42774 // 0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
42775 // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
42776 if ((IsSub && CC == X86::COND_NE && ConstantX->isNullValue()) ||
42777 (!IsSub && CC == X86::COND_E && ConstantX->isAllOnesValue())) {
42778 SDValue Zero = DAG.getConstant(0, DL, ZVT);
42779 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
42780 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
42781 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
42782 DAG.getConstant(X86::COND_B, DL, MVT::i8),
42783 SDValue(Neg.getNode(), 1));
42786 // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
42787 // with fake operands:
42788 // 0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
42789 // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
42790 if ((IsSub && CC == X86::COND_E && ConstantX->isNullValue()) ||
42791 (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnesValue())) {
42792 SDValue One = DAG.getConstant(1, DL, ZVT);
42793 SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
42794 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
42795 DAG.getConstant(X86::COND_B, DL, MVT::i8), Cmp1);
42799 // (cmp Z, 1) sets the carry flag if Z is 0.
42800 SDValue One = DAG.getConstant(1, DL, ZVT);
42801 SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
42803 // Add the flags type for ADC/SBB nodes.
42804 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
42806 // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
42807 // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
42808 if (CC == X86::COND_NE)
42809 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
42810 DAG.getConstant(-1ULL, DL, VT), Cmp1);
42812 // X - (Z == 0) --> sub X, (zext(sete Z, 0)) --> sbb X, 0, (cmp Z, 1)
42813 // X + (Z == 0) --> add X, (zext(sete Z, 0)) --> adc X, 0, (cmp Z, 1)
42814 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
42815 DAG.getConstant(0, DL, VT), Cmp1);
42818 static SDValue combineLoopMAddPattern(SDNode *N, SelectionDAG &DAG,
42819 const X86Subtarget &Subtarget) {
42820 if (!Subtarget.hasSSE2())
42821 return SDValue();
42823 SDValue Op0 = N->getOperand(0);
42824 SDValue Op1 = N->getOperand(1);
42826 EVT VT = N->getValueType(0);
42828 // If the vector size is less than 128, or greater than the supported RegSize,
42829 // do not use PMADD.
42830 if (!VT.isVector() || VT.getVectorNumElements() < 8)
42831 return SDValue();
42833 if (Op0.getOpcode() != ISD::MUL)
42834 std::swap(Op0, Op1);
42835 if (Op0.getOpcode() != ISD::MUL)
42836 return SDValue();
42838 ShrinkMode Mode;
42839 if (!canReduceVMulWidth(Op0.getNode(), DAG, Mode) || Mode == MULU16)
42840 return SDValue();
42842 SDLoc DL(N);
42843 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
42844 VT.getVectorNumElements());
42845 EVT MAddVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
42846 VT.getVectorNumElements() / 2);
42848 // Madd vector size is half of the original vector size
42849 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
42850 ArrayRef<SDValue> Ops) {
42851 MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
42852 return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
42855 auto BuildPMADDWD = [&](SDValue Mul) {
42856 // Shrink the operands of mul.
42857 SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, Mul.getOperand(0));
42858 SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, Mul.getOperand(1));
42860 SDValue Madd = SplitOpsAndApply(DAG, Subtarget, DL, MAddVT, { N0, N1 },
42861 PMADDWDBuilder);
42862 // Fill the rest of the output with 0
42863 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Madd,
42864 DAG.getConstant(0, DL, MAddVT));
42867 Op0 = BuildPMADDWD(Op0);
42869 // It's possible that Op1 is also a mul we can reduce.
42870 if (Op1.getOpcode() == ISD::MUL &&
42871 canReduceVMulWidth(Op1.getNode(), DAG, Mode) && Mode != MULU16) {
42872 Op1 = BuildPMADDWD(Op1);
42875 return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
42878 static SDValue combineLoopSADPattern(SDNode *N, SelectionDAG &DAG,
42879 const X86Subtarget &Subtarget) {
42880 if (!Subtarget.hasSSE2())
42881 return SDValue();
42883 SDLoc DL(N);
42884 EVT VT = N->getValueType(0);
42885 SDValue Op0 = N->getOperand(0);
42886 SDValue Op1 = N->getOperand(1);
42888 // TODO: There's nothing special about i32, any integer type above i16 should
42889 // work just as well.
42890 if (!VT.isVector() || !VT.isSimple() ||
42891 !(VT.getVectorElementType() == MVT::i32))
42892 return SDValue();
42894 unsigned RegSize = 128;
42895 if (Subtarget.useBWIRegs())
42896 RegSize = 512;
42897 else if (Subtarget.hasAVX())
42898 RegSize = 256;
42900 // We only handle v16i32 for SSE2 / v32i32 for AVX / v64i32 for AVX512.
42901 // TODO: We should be able to handle larger vectors by splitting them before
42902 // feeding them into several SADs, and then reducing over those.
42903 if (VT.getSizeInBits() / 4 > RegSize)
42904 return SDValue();
42906 // We know N is a reduction add, which means one of its operands is a phi.
42907 // To match SAD, we need the other operand to be a ABS.
42908 if (Op0.getOpcode() != ISD::ABS)
42909 std::swap(Op0, Op1);
42910 if (Op0.getOpcode() != ISD::ABS)
42911 return SDValue();
42913 auto BuildPSADBW = [&](SDValue Op0, SDValue Op1) {
42914 // SAD pattern detected. Now build a SAD instruction and an addition for
42915 // reduction. Note that the number of elements of the result of SAD is less
42916 // than the number of elements of its input. Therefore, we could only update
42917 // part of elements in the reduction vector.
42918 SDValue Sad = createPSADBW(DAG, Op0, Op1, DL, Subtarget);
42920 // The output of PSADBW is a vector of i64.
42921 // We need to turn the vector of i64 into a vector of i32.
42922 // If the reduction vector is at least as wide as the psadbw result, just
42923 // bitcast. If it's narrower, truncate - the high i32 of each i64 is zero
42924 // anyway.
42925 MVT ResVT = MVT::getVectorVT(MVT::i32, Sad.getValueSizeInBits() / 32);
42926 if (VT.getSizeInBits() >= ResVT.getSizeInBits())
42927 Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
42928 else
42929 Sad = DAG.getNode(ISD::TRUNCATE, DL, VT, Sad);
42931 if (VT.getSizeInBits() > ResVT.getSizeInBits()) {
42932 // Fill the upper elements with zero to match the add width.
42933 SDValue Zero = DAG.getConstant(0, DL, VT);
42934 Sad = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Zero, Sad,
42935 DAG.getIntPtrConstant(0, DL));
42938 return Sad;
42941 // Check whether we have an abs-diff pattern feeding into the select.
42942 SDValue SadOp0, SadOp1;
42943 if (!detectZextAbsDiff(Op0, SadOp0, SadOp1))
42944 return SDValue();
42946 Op0 = BuildPSADBW(SadOp0, SadOp1);
42948 // It's possible we have a sad on the other side too.
42949 if (Op1.getOpcode() == ISD::ABS &&
42950 detectZextAbsDiff(Op1, SadOp0, SadOp1)) {
42951 Op1 = BuildPSADBW(SadOp0, SadOp1);
42954 return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
42957 /// Convert vector increment or decrement to sub/add with an all-ones constant:
42958 /// add X, <1, 1...> --> sub X, <-1, -1...>
42959 /// sub X, <1, 1...> --> add X, <-1, -1...>
42960 /// The all-ones vector constant can be materialized using a pcmpeq instruction
42961 /// that is commonly recognized as an idiom (has no register dependency), so
42962 /// that's better/smaller than loading a splat 1 constant.
42963 static SDValue combineIncDecVector(SDNode *N, SelectionDAG &DAG) {
42964 assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
42965 "Unexpected opcode for increment/decrement transform");
42967 // Pseudo-legality check: getOnesVector() expects one of these types, so bail
42968 // out and wait for legalization if we have an unsupported vector length.
42969 EVT VT = N->getValueType(0);
42970 if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
42971 return SDValue();
42973 APInt SplatVal;
42974 if (!isConstantSplat(N->getOperand(1), SplatVal) || !SplatVal.isOneValue())
42975 return SDValue();
42977 SDValue AllOnesVec = getOnesVector(VT, DAG, SDLoc(N));
42978 unsigned NewOpcode = N->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD;
42979 return DAG.getNode(NewOpcode, SDLoc(N), VT, N->getOperand(0), AllOnesVec);
42982 static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
42983 const SDLoc &DL, EVT VT,
42984 const X86Subtarget &Subtarget) {
42985 // Example of pattern we try to detect:
42986 // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
42987 //(add (build_vector (extract_elt t, 0),
42988 // (extract_elt t, 2),
42989 // (extract_elt t, 4),
42990 // (extract_elt t, 6)),
42991 // (build_vector (extract_elt t, 1),
42992 // (extract_elt t, 3),
42993 // (extract_elt t, 5),
42994 // (extract_elt t, 7)))
42996 if (!Subtarget.hasSSE2())
42997 return SDValue();
42999 if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
43000 Op1.getOpcode() != ISD::BUILD_VECTOR)
43001 return SDValue();
43003 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
43004 VT.getVectorNumElements() < 4 ||
43005 !isPowerOf2_32(VT.getVectorNumElements()))
43006 return SDValue();
43008 // Check if one of Op0,Op1 is of the form:
43009 // (build_vector (extract_elt Mul, 0),
43010 // (extract_elt Mul, 2),
43011 // (extract_elt Mul, 4),
43012 // ...
43013 // the other is of the form:
43014 // (build_vector (extract_elt Mul, 1),
43015 // (extract_elt Mul, 3),
43016 // (extract_elt Mul, 5),
43017 // ...
43018 // and identify Mul.
43019 SDValue Mul;
43020 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
43021 SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
43022 Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
43023 // TODO: Be more tolerant to undefs.
43024 if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43025 Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43026 Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43027 Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
43028 return SDValue();
43029 auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
43030 auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
43031 auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
43032 auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
43033 if (!Const0L || !Const1L || !Const0H || !Const1H)
43034 return SDValue();
43035 unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
43036 Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
43037 // Commutativity of mul allows factors of a product to reorder.
43038 if (Idx0L > Idx1L)
43039 std::swap(Idx0L, Idx1L);
43040 if (Idx0H > Idx1H)
43041 std::swap(Idx0H, Idx1H);
43042 // Commutativity of add allows pairs of factors to reorder.
43043 if (Idx0L > Idx0H) {
43044 std::swap(Idx0L, Idx0H);
43045 std::swap(Idx1L, Idx1H);
43047 if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
43048 Idx1H != 2 * i + 3)
43049 return SDValue();
43050 if (!Mul) {
43051 // First time an extract_elt's source vector is visited. Must be a MUL
43052 // with 2X number of vector elements than the BUILD_VECTOR.
43053 // Both extracts must be from same MUL.
43054 Mul = Op0L->getOperand(0);
43055 if (Mul->getOpcode() != ISD::MUL ||
43056 Mul.getValueType().getVectorNumElements() != 2 * e)
43057 return SDValue();
43059 // Check that the extract is from the same MUL previously seen.
43060 if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
43061 Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
43062 return SDValue();
43065 // Check if the Mul source can be safely shrunk.
43066 ShrinkMode Mode;
43067 if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) || Mode == MULU16)
43068 return SDValue();
43070 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43071 ArrayRef<SDValue> Ops) {
43072 // Shrink by adding truncate nodes and let DAGCombine fold with the
43073 // sources.
43074 EVT InVT = Ops[0].getValueType();
43075 assert(InVT.getScalarType() == MVT::i32 &&
43076 "Unexpected scalar element type");
43077 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
43078 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
43079 InVT.getVectorNumElements() / 2);
43080 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
43081 InVT.getVectorNumElements());
43082 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
43083 DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[0]),
43084 DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[1]));
43086 return SplitOpsAndApply(DAG, Subtarget, DL, VT,
43087 { Mul.getOperand(0), Mul.getOperand(1) },
43088 PMADDBuilder);
43091 // Attempt to turn this pattern into PMADDWD.
43092 // (mul (add (zext (build_vector)), (zext (build_vector))),
43093 // (add (zext (build_vector)), (zext (build_vector)))
43094 static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
43095 const SDLoc &DL, EVT VT,
43096 const X86Subtarget &Subtarget) {
43097 if (!Subtarget.hasSSE2())
43098 return SDValue();
43100 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
43101 return SDValue();
43103 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
43104 VT.getVectorNumElements() < 4 ||
43105 !isPowerOf2_32(VT.getVectorNumElements()))
43106 return SDValue();
43108 SDValue N00 = N0.getOperand(0);
43109 SDValue N01 = N0.getOperand(1);
43110 SDValue N10 = N1.getOperand(0);
43111 SDValue N11 = N1.getOperand(1);
43113 // All inputs need to be sign extends.
43114 // TODO: Support ZERO_EXTEND from known positive?
43115 if (N00.getOpcode() != ISD::SIGN_EXTEND ||
43116 N01.getOpcode() != ISD::SIGN_EXTEND ||
43117 N10.getOpcode() != ISD::SIGN_EXTEND ||
43118 N11.getOpcode() != ISD::SIGN_EXTEND)
43119 return SDValue();
43121 // Peek through the extends.
43122 N00 = N00.getOperand(0);
43123 N01 = N01.getOperand(0);
43124 N10 = N10.getOperand(0);
43125 N11 = N11.getOperand(0);
43127 // Must be extending from vXi16.
43128 EVT InVT = N00.getValueType();
43129 if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
43130 N10.getValueType() != InVT || N11.getValueType() != InVT)
43131 return SDValue();
43133 // All inputs should be build_vectors.
43134 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
43135 N01.getOpcode() != ISD::BUILD_VECTOR ||
43136 N10.getOpcode() != ISD::BUILD_VECTOR ||
43137 N11.getOpcode() != ISD::BUILD_VECTOR)
43138 return SDValue();
43140 // For each element, we need to ensure we have an odd element from one vector
43141 // multiplied by the odd element of another vector and the even element from
43142 // one of the same vectors being multiplied by the even element from the
43143 // other vector. So we need to make sure for each element i, this operator
43144 // is being performed:
43145 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
43146 SDValue In0, In1;
43147 for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
43148 SDValue N00Elt = N00.getOperand(i);
43149 SDValue N01Elt = N01.getOperand(i);
43150 SDValue N10Elt = N10.getOperand(i);
43151 SDValue N11Elt = N11.getOperand(i);
43152 // TODO: Be more tolerant to undefs.
43153 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43154 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43155 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43156 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
43157 return SDValue();
43158 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
43159 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
43160 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
43161 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
43162 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
43163 return SDValue();
43164 unsigned IdxN00 = ConstN00Elt->getZExtValue();
43165 unsigned IdxN01 = ConstN01Elt->getZExtValue();
43166 unsigned IdxN10 = ConstN10Elt->getZExtValue();
43167 unsigned IdxN11 = ConstN11Elt->getZExtValue();
43168 // Add is commutative so indices can be reordered.
43169 if (IdxN00 > IdxN10) {
43170 std::swap(IdxN00, IdxN10);
43171 std::swap(IdxN01, IdxN11);
43173 // N0 indices be the even element. N1 indices must be the next odd element.
43174 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
43175 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
43176 return SDValue();
43177 SDValue N00In = N00Elt.getOperand(0);
43178 SDValue N01In = N01Elt.getOperand(0);
43179 SDValue N10In = N10Elt.getOperand(0);
43180 SDValue N11In = N11Elt.getOperand(0);
43181 // First time we find an input capture it.
43182 if (!In0) {
43183 In0 = N00In;
43184 In1 = N01In;
43186 // Mul is commutative so the input vectors can be in any order.
43187 // Canonicalize to make the compares easier.
43188 if (In0 != N00In)
43189 std::swap(N00In, N01In);
43190 if (In0 != N10In)
43191 std::swap(N10In, N11In);
43192 if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
43193 return SDValue();
43196 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43197 ArrayRef<SDValue> Ops) {
43198 // Shrink by adding truncate nodes and let DAGCombine fold with the
43199 // sources.
43200 EVT OpVT = Ops[0].getValueType();
43201 assert(OpVT.getScalarType() == MVT::i16 &&
43202 "Unexpected scalar element type");
43203 assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
43204 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
43205 OpVT.getVectorNumElements() / 2);
43206 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
43208 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
43209 PMADDBuilder);
43212 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
43213 const X86Subtarget &Subtarget) {
43214 const SDNodeFlags Flags = N->getFlags();
43215 if (Flags.hasVectorReduction()) {
43216 if (SDValue Sad = combineLoopSADPattern(N, DAG, Subtarget))
43217 return Sad;
43218 if (SDValue MAdd = combineLoopMAddPattern(N, DAG, Subtarget))
43219 return MAdd;
43221 EVT VT = N->getValueType(0);
43222 SDValue Op0 = N->getOperand(0);
43223 SDValue Op1 = N->getOperand(1);
43225 if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
43226 return MAdd;
43227 if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
43228 return MAdd;
43230 // Try to synthesize horizontal adds from adds of shuffles.
43231 if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
43232 VT == MVT::v8i32) &&
43233 Subtarget.hasSSSE3() &&
43234 isHorizontalBinOp(Op0, Op1, DAG, Subtarget, true)) {
43235 auto HADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43236 ArrayRef<SDValue> Ops) {
43237 return DAG.getNode(X86ISD::HADD, DL, Ops[0].getValueType(), Ops);
43239 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
43240 HADDBuilder);
43243 if (SDValue V = combineIncDecVector(N, DAG))
43244 return V;
43246 return combineAddOrSubToADCOrSBB(N, DAG);
43249 static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG,
43250 const X86Subtarget &Subtarget) {
43251 SDValue Op0 = N->getOperand(0);
43252 SDValue Op1 = N->getOperand(1);
43253 EVT VT = N->getValueType(0);
43255 // PSUBUS is supported, starting from SSE2, but truncation for v8i32
43256 // is only worth it with SSSE3 (PSHUFB).
43257 if (!(Subtarget.hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) &&
43258 !(Subtarget.hasSSSE3() && (VT == MVT::v8i32 || VT == MVT::v8i64)) &&
43259 !(Subtarget.hasAVX() && (VT == MVT::v32i8 || VT == MVT::v16i16)) &&
43260 !(Subtarget.useBWIRegs() && (VT == MVT::v64i8 || VT == MVT::v32i16 ||
43261 VT == MVT::v16i32 || VT == MVT::v8i64)))
43262 return SDValue();
43264 SDValue SubusLHS, SubusRHS;
43265 // Try to find umax(a,b) - b or a - umin(a,b) patterns
43266 // they may be converted to subus(a,b).
43267 // TODO: Need to add IR canonicalization for this code.
43268 if (Op0.getOpcode() == ISD::UMAX) {
43269 SubusRHS = Op1;
43270 SDValue MaxLHS = Op0.getOperand(0);
43271 SDValue MaxRHS = Op0.getOperand(1);
43272 if (MaxLHS == Op1)
43273 SubusLHS = MaxRHS;
43274 else if (MaxRHS == Op1)
43275 SubusLHS = MaxLHS;
43276 else
43277 return SDValue();
43278 } else if (Op1.getOpcode() == ISD::UMIN) {
43279 SubusLHS = Op0;
43280 SDValue MinLHS = Op1.getOperand(0);
43281 SDValue MinRHS = Op1.getOperand(1);
43282 if (MinLHS == Op0)
43283 SubusRHS = MinRHS;
43284 else if (MinRHS == Op0)
43285 SubusRHS = MinLHS;
43286 else
43287 return SDValue();
43288 } else
43289 return SDValue();
43291 auto USUBSATBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43292 ArrayRef<SDValue> Ops) {
43293 return DAG.getNode(ISD::USUBSAT, DL, Ops[0].getValueType(), Ops);
43296 // PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with
43297 // special preprocessing in some cases.
43298 if (VT != MVT::v8i32 && VT != MVT::v16i32 && VT != MVT::v8i64)
43299 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
43300 { SubusLHS, SubusRHS }, USUBSATBuilder);
43302 // Special preprocessing case can be only applied
43303 // if the value was zero extended from 16 bit,
43304 // so we require first 16 bits to be zeros for 32 bit
43305 // values, or first 48 bits for 64 bit values.
43306 KnownBits Known = DAG.computeKnownBits(SubusLHS);
43307 unsigned NumZeros = Known.countMinLeadingZeros();
43308 if ((VT == MVT::v8i64 && NumZeros < 48) || NumZeros < 16)
43309 return SDValue();
43311 EVT ExtType = SubusLHS.getValueType();
43312 EVT ShrinkedType;
43313 if (VT == MVT::v8i32 || VT == MVT::v8i64)
43314 ShrinkedType = MVT::v8i16;
43315 else
43316 ShrinkedType = NumZeros >= 24 ? MVT::v16i8 : MVT::v16i16;
43318 // If SubusLHS is zeroextended - truncate SubusRHS to it's
43319 // size SubusRHS = umin(0xFFF.., SubusRHS).
43320 SDValue SaturationConst =
43321 DAG.getConstant(APInt::getLowBitsSet(ExtType.getScalarSizeInBits(),
43322 ShrinkedType.getScalarSizeInBits()),
43323 SDLoc(SubusLHS), ExtType);
43324 SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubusLHS), ExtType, SubusRHS,
43325 SaturationConst);
43326 SDValue NewSubusLHS =
43327 DAG.getZExtOrTrunc(SubusLHS, SDLoc(SubusLHS), ShrinkedType);
43328 SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType);
43329 SDValue Psubus =
43330 SplitOpsAndApply(DAG, Subtarget, SDLoc(N), ShrinkedType,
43331 { NewSubusLHS, NewSubusRHS }, USUBSATBuilder);
43332 // Zero extend the result, it may be used somewhere as 32 bit,
43333 // if not zext and following trunc will shrink.
43334 return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType);
43337 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
43338 const X86Subtarget &Subtarget) {
43339 SDValue Op0 = N->getOperand(0);
43340 SDValue Op1 = N->getOperand(1);
43342 // X86 can't encode an immediate LHS of a sub. See if we can push the
43343 // negation into a preceding instruction.
43344 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
43345 // If the RHS of the sub is a XOR with one use and a constant, invert the
43346 // immediate. Then add one to the LHS of the sub so we can turn
43347 // X-Y -> X+~Y+1, saving one register.
43348 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
43349 isa<ConstantSDNode>(Op1.getOperand(1))) {
43350 const APInt &XorC = Op1.getConstantOperandAPInt(1);
43351 EVT VT = Op0.getValueType();
43352 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
43353 Op1.getOperand(0),
43354 DAG.getConstant(~XorC, SDLoc(Op1), VT));
43355 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
43356 DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
43360 // Try to synthesize horizontal subs from subs of shuffles.
43361 EVT VT = N->getValueType(0);
43362 if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
43363 VT == MVT::v8i32) &&
43364 Subtarget.hasSSSE3() &&
43365 isHorizontalBinOp(Op0, Op1, DAG, Subtarget, false)) {
43366 auto HSUBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43367 ArrayRef<SDValue> Ops) {
43368 return DAG.getNode(X86ISD::HSUB, DL, Ops[0].getValueType(), Ops);
43370 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
43371 HSUBBuilder);
43374 if (SDValue V = combineIncDecVector(N, DAG))
43375 return V;
43377 // Try to create PSUBUS if SUB's argument is max/min
43378 if (SDValue V = combineSubToSubus(N, DAG, Subtarget))
43379 return V;
43381 return combineAddOrSubToADCOrSBB(N, DAG);
43384 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
43385 const X86Subtarget &Subtarget) {
43386 MVT VT = N->getSimpleValueType(0);
43387 SDLoc DL(N);
43389 if (N->getOperand(0) == N->getOperand(1)) {
43390 if (N->getOpcode() == X86ISD::PCMPEQ)
43391 return DAG.getConstant(-1, DL, VT);
43392 if (N->getOpcode() == X86ISD::PCMPGT)
43393 return DAG.getConstant(0, DL, VT);
43396 return SDValue();
43399 /// Helper that combines an array of subvector ops as if they were the operands
43400 /// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
43401 /// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
43402 static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
43403 ArrayRef<SDValue> Ops, SelectionDAG &DAG,
43404 TargetLowering::DAGCombinerInfo &DCI,
43405 const X86Subtarget &Subtarget) {
43406 assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
43408 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
43409 return DAG.getUNDEF(VT);
43411 if (llvm::all_of(Ops, [](SDValue Op) {
43412 return ISD::isBuildVectorAllZeros(Op.getNode());
43414 return getZeroVector(VT, Subtarget, DAG, DL);
43416 SDValue Op0 = Ops[0];
43418 // Fold subvector loads into one.
43419 // If needed, look through bitcasts to get to the load.
43420 if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
43421 bool Fast;
43422 const X86TargetLowering *TLI = Subtarget.getTargetLowering();
43423 if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
43424 *FirstLd->getMemOperand(), &Fast) &&
43425 Fast) {
43426 if (SDValue Ld =
43427 EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
43428 return Ld;
43432 // Repeated subvectors.
43433 if (llvm::all_of(Ops, [Op0](SDValue Op) { return Op == Op0; })) {
43434 // If this broadcast/subv_broadcast is inserted into both halves, use a
43435 // larger broadcast/subv_broadcast.
43436 if (Op0.getOpcode() == X86ISD::VBROADCAST ||
43437 Op0.getOpcode() == X86ISD::SUBV_BROADCAST)
43438 return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
43440 // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
43441 if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
43442 (Subtarget.hasAVX2() || MayFoldLoad(Op0.getOperand(0))))
43443 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
43444 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
43445 Op0.getOperand(0),
43446 DAG.getIntPtrConstant(0, DL)));
43448 // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
43449 if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
43450 (Subtarget.hasAVX2() ||
43451 (VT.getScalarSizeInBits() >= 32 && MayFoldLoad(Op0.getOperand(0)))) &&
43452 Op0.getOperand(0).getValueType() == VT.getScalarType())
43453 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
43456 bool IsSplat = llvm::all_of(Ops, [&Op0](SDValue Op) { return Op == Op0; });
43458 // Repeated opcode.
43459 // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
43460 // but it currently struggles with different vector widths.
43461 if (llvm::all_of(Ops, [Op0](SDValue Op) {
43462 return Op.getOpcode() == Op0.getOpcode();
43463 })) {
43464 unsigned NumOps = Ops.size();
43465 switch (Op0.getOpcode()) {
43466 case X86ISD::PSHUFHW:
43467 case X86ISD::PSHUFLW:
43468 case X86ISD::PSHUFD:
43469 if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
43470 Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
43471 SmallVector<SDValue, 2> Src;
43472 for (unsigned i = 0; i != NumOps; ++i)
43473 Src.push_back(Ops[i].getOperand(0));
43474 return DAG.getNode(Op0.getOpcode(), DL, VT,
43475 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
43476 Op0.getOperand(1));
43478 break;
43479 case X86ISD::VPERMILPI:
43480 // TODO - AVX1 must use VPERMILPI + v8f32 for v8i32 shuffles.
43481 // TODO - add support for vXf64/vXi64 shuffles.
43482 if (!IsSplat && NumOps == 2 && VT == MVT::v8f32 && Subtarget.hasAVX() &&
43483 Op0.getOperand(1) == Ops[1].getOperand(1)) {
43484 SmallVector<SDValue, 2> Src;
43485 for (unsigned i = 0; i != NumOps; ++i)
43486 Src.push_back(Ops[i].getOperand(0));
43487 return DAG.getNode(Op0.getOpcode(), DL, VT,
43488 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
43489 Op0.getOperand(1));
43491 break;
43492 case X86ISD::PACKUS:
43493 if (NumOps == 2 && VT.is256BitVector() && Subtarget.hasInt256()) {
43494 SmallVector<SDValue, 2> LHS, RHS;
43495 for (unsigned i = 0; i != NumOps; ++i) {
43496 LHS.push_back(Ops[i].getOperand(0));
43497 RHS.push_back(Ops[i].getOperand(1));
43499 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
43500 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
43501 NumOps * SrcVT.getVectorNumElements());
43502 return DAG.getNode(Op0.getOpcode(), DL, VT,
43503 DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, LHS),
43504 DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, RHS));
43506 break;
43510 // If we're inserting all zeros into the upper half, change this to
43511 // an insert into an all zeros vector. We will match this to a move
43512 // with implicit upper bit zeroing during isel.
43513 if (Ops.size() == 2 && ISD::isBuildVectorAllZeros(Ops[1].getNode()))
43514 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
43515 getZeroVector(VT, Subtarget, DAG, DL), Ops[0],
43516 DAG.getIntPtrConstant(0, DL));
43518 return SDValue();
43521 static SDValue combineConcatVectors(SDNode *N, SelectionDAG &DAG,
43522 TargetLowering::DAGCombinerInfo &DCI,
43523 const X86Subtarget &Subtarget) {
43524 EVT VT = N->getValueType(0);
43525 EVT SrcVT = N->getOperand(0).getValueType();
43526 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43528 if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
43529 SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
43530 if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
43531 DCI, Subtarget))
43532 return R;
43535 return SDValue();
43538 static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
43539 TargetLowering::DAGCombinerInfo &DCI,
43540 const X86Subtarget &Subtarget) {
43541 if (DCI.isBeforeLegalizeOps())
43542 return SDValue();
43544 MVT OpVT = N->getSimpleValueType(0);
43546 bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
43548 SDLoc dl(N);
43549 SDValue Vec = N->getOperand(0);
43550 SDValue SubVec = N->getOperand(1);
43552 unsigned IdxVal = N->getConstantOperandVal(2);
43553 MVT SubVecVT = SubVec.getSimpleValueType();
43555 if (Vec.isUndef() && SubVec.isUndef())
43556 return DAG.getUNDEF(OpVT);
43558 // Inserting undefs/zeros into zeros/undefs is a zero vector.
43559 if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
43560 (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
43561 return getZeroVector(OpVT, Subtarget, DAG, dl);
43563 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
43564 // If we're inserting into a zero vector and then into a larger zero vector,
43565 // just insert into the larger zero vector directly.
43566 if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
43567 ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
43568 unsigned Idx2Val = SubVec.getConstantOperandVal(2);
43569 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
43570 getZeroVector(OpVT, Subtarget, DAG, dl),
43571 SubVec.getOperand(1),
43572 DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
43575 // If we're inserting into a zero vector and our input was extracted from an
43576 // insert into a zero vector of the same type and the extraction was at
43577 // least as large as the original insertion. Just insert the original
43578 // subvector into a zero vector.
43579 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
43580 SubVec.getConstantOperandAPInt(1) == 0 &&
43581 SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
43582 SDValue Ins = SubVec.getOperand(0);
43583 if (Ins.getConstantOperandAPInt(2) == 0 &&
43584 ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
43585 Ins.getOperand(1).getValueSizeInBits() <= SubVecVT.getSizeInBits())
43586 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
43587 getZeroVector(OpVT, Subtarget, DAG, dl),
43588 Ins.getOperand(1), N->getOperand(2));
43592 // Stop here if this is an i1 vector.
43593 if (IsI1Vector)
43594 return SDValue();
43596 // If this is an insert of an extract, combine to a shuffle. Don't do this
43597 // if the insert or extract can be represented with a subregister operation.
43598 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
43599 SubVec.getOperand(0).getSimpleValueType() == OpVT &&
43600 (IdxVal != 0 || !Vec.isUndef())) {
43601 int ExtIdxVal = SubVec.getConstantOperandVal(1);
43602 if (ExtIdxVal != 0) {
43603 int VecNumElts = OpVT.getVectorNumElements();
43604 int SubVecNumElts = SubVecVT.getVectorNumElements();
43605 SmallVector<int, 64> Mask(VecNumElts);
43606 // First create an identity shuffle mask.
43607 for (int i = 0; i != VecNumElts; ++i)
43608 Mask[i] = i;
43609 // Now insert the extracted portion.
43610 for (int i = 0; i != SubVecNumElts; ++i)
43611 Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
43613 return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
43617 // Match concat_vector style patterns.
43618 SmallVector<SDValue, 2> SubVectorOps;
43619 if (collectConcatOps(N, SubVectorOps))
43620 if (SDValue Fold =
43621 combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
43622 return Fold;
43624 // If we are inserting into both halves of the vector, the starting vector
43625 // should be undef. If it isn't, make it so. Only do this if the early insert
43626 // has no other uses.
43627 // TODO: Should this be a generic DAG combine?
43628 // TODO: Why doesn't SimplifyDemandedVectorElts catch this?
43629 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
43630 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
43631 OpVT.getSizeInBits() == SubVecVT.getSizeInBits() * 2 &&
43632 isNullConstant(Vec.getOperand(2)) && !Vec.getOperand(0).isUndef() &&
43633 Vec.hasOneUse()) {
43634 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, DAG.getUNDEF(OpVT),
43635 Vec.getOperand(1), Vec.getOperand(2));
43636 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Vec, SubVec,
43637 N->getOperand(2));
43640 // If this is a broadcast insert into an upper undef, use a larger broadcast.
43641 if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
43642 return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
43644 return SDValue();
43647 /// If we are extracting a subvector of a vector select and the select condition
43648 /// is composed of concatenated vectors, try to narrow the select width. This
43649 /// is a common pattern for AVX1 integer code because 256-bit selects may be
43650 /// legal, but there is almost no integer math/logic available for 256-bit.
43651 /// This function should only be called with legal types (otherwise, the calls
43652 /// to get simple value types will assert).
43653 static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
43654 SDValue Sel = peekThroughBitcasts(Ext->getOperand(0));
43655 SmallVector<SDValue, 4> CatOps;
43656 if (Sel.getOpcode() != ISD::VSELECT ||
43657 !collectConcatOps(Sel.getOperand(0).getNode(), CatOps))
43658 return SDValue();
43660 // Note: We assume simple value types because this should only be called with
43661 // legal operations/types.
43662 // TODO: This can be extended to handle extraction to 256-bits.
43663 MVT VT = Ext->getSimpleValueType(0);
43664 if (!VT.is128BitVector())
43665 return SDValue();
43667 MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
43668 if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
43669 return SDValue();
43671 MVT WideVT = Ext->getOperand(0).getSimpleValueType();
43672 MVT SelVT = Sel.getSimpleValueType();
43673 assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
43674 "Unexpected vector type with legal operations");
43676 unsigned SelElts = SelVT.getVectorNumElements();
43677 unsigned CastedElts = WideVT.getVectorNumElements();
43678 unsigned ExtIdx = cast<ConstantSDNode>(Ext->getOperand(1))->getZExtValue();
43679 if (SelElts % CastedElts == 0) {
43680 // The select has the same or more (narrower) elements than the extract
43681 // operand. The extraction index gets scaled by that factor.
43682 ExtIdx *= (SelElts / CastedElts);
43683 } else if (CastedElts % SelElts == 0) {
43684 // The select has less (wider) elements than the extract operand. Make sure
43685 // that the extraction index can be divided evenly.
43686 unsigned IndexDivisor = CastedElts / SelElts;
43687 if (ExtIdx % IndexDivisor != 0)
43688 return SDValue();
43689 ExtIdx /= IndexDivisor;
43690 } else {
43691 llvm_unreachable("Element count of simple vector types are not divisible?");
43694 unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
43695 unsigned NarrowElts = SelElts / NarrowingFactor;
43696 MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
43697 SDLoc DL(Ext);
43698 SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
43699 SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
43700 SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
43701 SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
43702 return DAG.getBitcast(VT, NarrowSel);
43705 static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
43706 TargetLowering::DAGCombinerInfo &DCI,
43707 const X86Subtarget &Subtarget) {
43708 // For AVX1 only, if we are extracting from a 256-bit and+not (which will
43709 // eventually get combined/lowered into ANDNP) with a concatenated operand,
43710 // split the 'and' into 128-bit ops to avoid the concatenate and extract.
43711 // We let generic combining take over from there to simplify the
43712 // insert/extract and 'not'.
43713 // This pattern emerges during AVX1 legalization. We handle it before lowering
43714 // to avoid complications like splitting constant vector loads.
43716 // Capture the original wide type in the likely case that we need to bitcast
43717 // back to this type.
43718 if (!N->getValueType(0).isSimple())
43719 return SDValue();
43721 MVT VT = N->getSimpleValueType(0);
43722 EVT WideVecVT = N->getOperand(0).getValueType();
43723 SDValue WideVec = peekThroughBitcasts(N->getOperand(0));
43724 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43725 if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
43726 TLI.isTypeLegal(WideVecVT) &&
43727 WideVecVT.getSizeInBits() == 256 && WideVec.getOpcode() == ISD::AND) {
43728 auto isConcatenatedNot = [] (SDValue V) {
43729 V = peekThroughBitcasts(V);
43730 if (!isBitwiseNot(V))
43731 return false;
43732 SDValue NotOp = V->getOperand(0);
43733 return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
43735 if (isConcatenatedNot(WideVec.getOperand(0)) ||
43736 isConcatenatedNot(WideVec.getOperand(1))) {
43737 // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
43738 SDValue Concat = split256IntArith(WideVec, DAG);
43739 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
43740 DAG.getBitcast(WideVecVT, Concat), N->getOperand(1));
43744 if (DCI.isBeforeLegalizeOps())
43745 return SDValue();
43747 if (SDValue V = narrowExtractedVectorSelect(N, DAG))
43748 return V;
43750 SDValue InVec = N->getOperand(0);
43751 unsigned IdxVal = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
43753 if (ISD::isBuildVectorAllZeros(InVec.getNode()))
43754 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
43756 if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
43757 if (VT.getScalarType() == MVT::i1)
43758 return DAG.getConstant(1, SDLoc(N), VT);
43759 return getOnesVector(VT, DAG, SDLoc(N));
43762 if (InVec.getOpcode() == ISD::BUILD_VECTOR)
43763 return DAG.getBuildVector(
43764 VT, SDLoc(N),
43765 InVec.getNode()->ops().slice(IdxVal, VT.getVectorNumElements()));
43767 // Try to move vector bitcast after extract_subv by scaling extraction index:
43768 // extract_subv (bitcast X), Index --> bitcast (extract_subv X, Index')
43769 // TODO: Move this to DAGCombiner::visitEXTRACT_SUBVECTOR
43770 if (InVec.getOpcode() == ISD::BITCAST &&
43771 InVec.getOperand(0).getValueType().isVector()) {
43772 SDValue SrcOp = InVec.getOperand(0);
43773 EVT SrcVT = SrcOp.getValueType();
43774 unsigned SrcNumElts = SrcVT.getVectorNumElements();
43775 unsigned DestNumElts = InVec.getValueType().getVectorNumElements();
43776 if ((DestNumElts % SrcNumElts) == 0) {
43777 unsigned DestSrcRatio = DestNumElts / SrcNumElts;
43778 if ((VT.getVectorNumElements() % DestSrcRatio) == 0) {
43779 unsigned NewExtNumElts = VT.getVectorNumElements() / DestSrcRatio;
43780 EVT NewExtVT = EVT::getVectorVT(*DAG.getContext(),
43781 SrcVT.getScalarType(), NewExtNumElts);
43782 if ((N->getConstantOperandVal(1) % DestSrcRatio) == 0 &&
43783 TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, NewExtVT)) {
43784 unsigned IndexValScaled = N->getConstantOperandVal(1) / DestSrcRatio;
43785 SDLoc DL(N);
43786 SDValue NewIndex = DAG.getIntPtrConstant(IndexValScaled, DL);
43787 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewExtVT,
43788 SrcOp, NewIndex);
43789 return DAG.getBitcast(VT, NewExtract);
43795 // If we're extracting from a broadcast then we're better off just
43796 // broadcasting to the smaller type directly, assuming this is the only use.
43797 // As its a broadcast we don't care about the extraction index.
43798 if (InVec.getOpcode() == X86ISD::VBROADCAST && InVec.hasOneUse() &&
43799 InVec.getOperand(0).getValueSizeInBits() <= VT.getSizeInBits())
43800 return DAG.getNode(X86ISD::VBROADCAST, SDLoc(N), VT, InVec.getOperand(0));
43802 // If we're extracting the lowest subvector and we're the only user,
43803 // we may be able to perform this with a smaller vector width.
43804 if (IdxVal == 0 && InVec.hasOneUse()) {
43805 unsigned InOpcode = InVec.getOpcode();
43806 if (VT == MVT::v2f64 && InVec.getValueType() == MVT::v4f64) {
43807 // v2f64 CVTDQ2PD(v4i32).
43808 if (InOpcode == ISD::SINT_TO_FP &&
43809 InVec.getOperand(0).getValueType() == MVT::v4i32) {
43810 return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
43812 // v2f64 CVTUDQ2PD(v4i32).
43813 if (InOpcode == ISD::UINT_TO_FP &&
43814 InVec.getOperand(0).getValueType() == MVT::v4i32) {
43815 return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
43817 // v2f64 CVTPS2PD(v4f32).
43818 if (InOpcode == ISD::FP_EXTEND &&
43819 InVec.getOperand(0).getValueType() == MVT::v4f32) {
43820 return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
43823 if ((InOpcode == ISD::ANY_EXTEND ||
43824 InOpcode == ISD::ANY_EXTEND_VECTOR_INREG ||
43825 InOpcode == ISD::ZERO_EXTEND ||
43826 InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
43827 InOpcode == ISD::SIGN_EXTEND ||
43828 InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) &&
43829 VT.is128BitVector() &&
43830 InVec.getOperand(0).getSimpleValueType().is128BitVector()) {
43831 unsigned ExtOp = getOpcode_EXTEND_VECTOR_INREG(InOpcode);
43832 return DAG.getNode(ExtOp, SDLoc(N), VT, InVec.getOperand(0));
43834 if (InOpcode == ISD::VSELECT &&
43835 InVec.getOperand(0).getValueType().is256BitVector() &&
43836 InVec.getOperand(1).getValueType().is256BitVector() &&
43837 InVec.getOperand(2).getValueType().is256BitVector()) {
43838 SDLoc DL(N);
43839 SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
43840 SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
43841 SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
43842 return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
43846 return SDValue();
43849 static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
43850 EVT VT = N->getValueType(0);
43851 SDValue Src = N->getOperand(0);
43852 SDLoc DL(N);
43854 // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
43855 // This occurs frequently in our masked scalar intrinsic code and our
43856 // floating point select lowering with AVX512.
43857 // TODO: SimplifyDemandedBits instead?
43858 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
43859 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
43860 if (C->getAPIntValue().isOneValue())
43861 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1,
43862 Src.getOperand(0));
43864 // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
43865 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
43866 Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
43867 Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
43868 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
43869 if (C->isNullValue())
43870 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
43871 Src.getOperand(1));
43873 // Reduce v2i64 to v4i32 if we don't need the upper bits.
43874 // TODO: Move to DAGCombine?
43875 if (VT == MVT::v2i64 && Src.getOpcode() == ISD::ANY_EXTEND &&
43876 Src.getValueType() == MVT::i64 && Src.hasOneUse() &&
43877 Src.getOperand(0).getScalarValueSizeInBits() <= 32)
43878 return DAG.getBitcast(
43879 VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
43880 DAG.getAnyExtOrTrunc(Src.getOperand(0), DL, MVT::i32)));
43882 return SDValue();
43885 // Simplify PMULDQ and PMULUDQ operations.
43886 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
43887 TargetLowering::DAGCombinerInfo &DCI) {
43888 SDValue LHS = N->getOperand(0);
43889 SDValue RHS = N->getOperand(1);
43891 // Canonicalize constant to RHS.
43892 if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
43893 !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
43894 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
43896 // Multiply by zero.
43897 if (ISD::isBuildVectorAllZeros(RHS.getNode()))
43898 return RHS;
43900 // Aggressively peek through ops to get at the demanded low bits.
43901 APInt DemandedMask = APInt::getLowBitsSet(64, 32);
43902 SDValue DemandedLHS = DAG.GetDemandedBits(LHS, DemandedMask);
43903 SDValue DemandedRHS = DAG.GetDemandedBits(RHS, DemandedMask);
43904 if (DemandedLHS || DemandedRHS)
43905 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
43906 DemandedLHS ? DemandedLHS : LHS,
43907 DemandedRHS ? DemandedRHS : RHS);
43909 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
43910 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43911 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnesValue(64), DCI))
43912 return SDValue(N, 0);
43914 return SDValue();
43917 static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG,
43918 TargetLowering::DAGCombinerInfo &DCI,
43919 const X86Subtarget &Subtarget) {
43920 EVT VT = N->getValueType(0);
43921 SDValue In = N->getOperand(0);
43923 // Try to merge vector loads and extend_inreg to an extload.
43924 if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
43925 In.hasOneUse()) {
43926 auto *Ld = cast<LoadSDNode>(In);
43927 if (!Ld->isVolatile()) {
43928 MVT SVT = In.getSimpleValueType().getVectorElementType();
43929 ISD::LoadExtType Ext = N->getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
43930 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), SVT,
43931 VT.getVectorNumElements());
43932 SDValue Load =
43933 DAG.getExtLoad(Ext, SDLoc(N), VT, Ld->getChain(), Ld->getBasePtr(),
43934 Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
43935 Ld->getMemOperand()->getFlags());
43936 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
43937 return Load;
43941 // Disabling for widening legalization for now. We can enable if we find a
43942 // case that needs it. Otherwise it can be deleted when we switch to
43943 // widening legalization.
43944 if (ExperimentalVectorWideningLegalization)
43945 return SDValue();
43947 // Combine (ext_invec (ext_invec X)) -> (ext_invec X)
43948 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43949 if (In.getOpcode() == N->getOpcode() &&
43950 TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getOperand(0).getValueType()))
43951 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, In.getOperand(0));
43953 // Attempt to combine as a shuffle.
43954 // TODO: SSE41 support
43955 if (Subtarget.hasAVX() && N->getOpcode() != ISD::SIGN_EXTEND_VECTOR_INREG) {
43956 SDValue Op(N, 0);
43957 if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
43958 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
43959 return Res;
43962 return SDValue();
43965 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
43966 DAGCombinerInfo &DCI) const {
43967 SelectionDAG &DAG = DCI.DAG;
43968 switch (N->getOpcode()) {
43969 default: break;
43970 case ISD::SCALAR_TO_VECTOR:
43971 return combineScalarToVector(N, DAG);
43972 case ISD::EXTRACT_VECTOR_ELT:
43973 case X86ISD::PEXTRW:
43974 case X86ISD::PEXTRB:
43975 return combineExtractVectorElt(N, DAG, DCI, Subtarget);
43976 case ISD::CONCAT_VECTORS:
43977 return combineConcatVectors(N, DAG, DCI, Subtarget);
43978 case ISD::INSERT_SUBVECTOR:
43979 return combineInsertSubvector(N, DAG, DCI, Subtarget);
43980 case ISD::EXTRACT_SUBVECTOR:
43981 return combineExtractSubvector(N, DAG, DCI, Subtarget);
43982 case ISD::VSELECT:
43983 case ISD::SELECT:
43984 case X86ISD::BLENDV: return combineSelect(N, DAG, DCI, Subtarget);
43985 case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
43986 case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
43987 case X86ISD::CMP: return combineCMP(N, DAG);
43988 case ISD::ADD: return combineAdd(N, DAG, Subtarget);
43989 case ISD::SUB: return combineSub(N, DAG, Subtarget);
43990 case X86ISD::ADD:
43991 case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI);
43992 case X86ISD::SBB: return combineSBB(N, DAG);
43993 case X86ISD::ADC: return combineADC(N, DAG, DCI);
43994 case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
43995 case ISD::SHL: return combineShiftLeft(N, DAG);
43996 case ISD::SRA: return combineShiftRightArithmetic(N, DAG);
43997 case ISD::SRL: return combineShiftRightLogical(N, DAG, DCI);
43998 case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
43999 case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
44000 case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
44001 case X86ISD::BEXTR: return combineBEXTR(N, DAG, DCI, Subtarget);
44002 case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
44003 case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
44004 case ISD::STORE: return combineStore(N, DAG, DCI, Subtarget);
44005 case ISD::MSTORE: return combineMaskedStore(N, DAG, DCI, Subtarget);
44006 case ISD::SINT_TO_FP: return combineSIntToFP(N, DAG, Subtarget);
44007 case ISD::UINT_TO_FP: return combineUIntToFP(N, DAG, Subtarget);
44008 case ISD::FADD:
44009 case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
44010 case ISD::FNEG: return combineFneg(N, DAG, Subtarget);
44011 case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
44012 case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget);
44013 case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
44014 case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
44015 case X86ISD::FXOR:
44016 case X86ISD::FOR: return combineFOr(N, DAG, Subtarget);
44017 case X86ISD::FMIN:
44018 case X86ISD::FMAX: return combineFMinFMax(N, DAG);
44019 case ISD::FMINNUM:
44020 case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
44021 case X86ISD::CVTSI2P:
44022 case X86ISD::CVTUI2P: return combineX86INT_TO_FP(N, DAG, DCI);
44023 case X86ISD::CVTP2SI:
44024 case X86ISD::CVTP2UI:
44025 case X86ISD::CVTTP2SI:
44026 case X86ISD::CVTTP2UI: return combineCVTP2I_CVTTP2I(N, DAG, DCI);
44027 case X86ISD::BT: return combineBT(N, DAG, DCI);
44028 case ISD::ANY_EXTEND:
44029 case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
44030 case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
44031 case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
44032 case ISD::ANY_EXTEND_VECTOR_INREG:
44033 case ISD::SIGN_EXTEND_VECTOR_INREG:
44034 case ISD::ZERO_EXTEND_VECTOR_INREG: return combineExtInVec(N, DAG, DCI,
44035 Subtarget);
44036 case ISD::SETCC: return combineSetCC(N, DAG, Subtarget);
44037 case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget);
44038 case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget);
44039 case X86ISD::PACKSS:
44040 case X86ISD::PACKUS: return combineVectorPack(N, DAG, DCI, Subtarget);
44041 case X86ISD::VSHL:
44042 case X86ISD::VSRA:
44043 case X86ISD::VSRL:
44044 return combineVectorShiftVar(N, DAG, DCI, Subtarget);
44045 case X86ISD::VSHLI:
44046 case X86ISD::VSRAI:
44047 case X86ISD::VSRLI:
44048 return combineVectorShiftImm(N, DAG, DCI, Subtarget);
44049 case X86ISD::PINSRB:
44050 case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
44051 case X86ISD::SHUFP: // Handle all target specific shuffles
44052 case X86ISD::INSERTPS:
44053 case X86ISD::EXTRQI:
44054 case X86ISD::INSERTQI:
44055 case X86ISD::PALIGNR:
44056 case X86ISD::VSHLDQ:
44057 case X86ISD::VSRLDQ:
44058 case X86ISD::BLENDI:
44059 case X86ISD::UNPCKH:
44060 case X86ISD::UNPCKL:
44061 case X86ISD::MOVHLPS:
44062 case X86ISD::MOVLHPS:
44063 case X86ISD::PSHUFB:
44064 case X86ISD::PSHUFD:
44065 case X86ISD::PSHUFHW:
44066 case X86ISD::PSHUFLW:
44067 case X86ISD::MOVSHDUP:
44068 case X86ISD::MOVSLDUP:
44069 case X86ISD::MOVDDUP:
44070 case X86ISD::MOVSS:
44071 case X86ISD::MOVSD:
44072 case X86ISD::VBROADCAST:
44073 case X86ISD::VPPERM:
44074 case X86ISD::VPERMI:
44075 case X86ISD::VPERMV:
44076 case X86ISD::VPERMV3:
44077 case X86ISD::VPERMIL2:
44078 case X86ISD::VPERMILPI:
44079 case X86ISD::VPERMILPV:
44080 case X86ISD::VPERM2X128:
44081 case X86ISD::SHUF128:
44082 case X86ISD::VZEXT_MOVL:
44083 case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
44084 case X86ISD::FMADD_RND:
44085 case X86ISD::FMSUB:
44086 case X86ISD::FMSUB_RND:
44087 case X86ISD::FNMADD:
44088 case X86ISD::FNMADD_RND:
44089 case X86ISD::FNMSUB:
44090 case X86ISD::FNMSUB_RND:
44091 case ISD::FMA: return combineFMA(N, DAG, Subtarget);
44092 case X86ISD::FMADDSUB_RND:
44093 case X86ISD::FMSUBADD_RND:
44094 case X86ISD::FMADDSUB:
44095 case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, Subtarget);
44096 case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, DCI);
44097 case X86ISD::MGATHER:
44098 case X86ISD::MSCATTER:
44099 case ISD::MGATHER:
44100 case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI, Subtarget);
44101 case X86ISD::PCMPEQ:
44102 case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
44103 case X86ISD::PMULDQ:
44104 case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI);
44107 return SDValue();
44110 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
44111 if (!isTypeLegal(VT))
44112 return false;
44114 // There are no vXi8 shifts.
44115 if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
44116 return false;
44118 // TODO: Almost no 8-bit ops are desirable because they have no actual
44119 // size/speed advantages vs. 32-bit ops, but they do have a major
44120 // potential disadvantage by causing partial register stalls.
44122 // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
44123 // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
44124 // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
44125 // check for a constant operand to the multiply.
44126 if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
44127 return false;
44129 // i16 instruction encodings are longer and some i16 instructions are slow,
44130 // so those are not desirable.
44131 if (VT == MVT::i16) {
44132 switch (Opc) {
44133 default:
44134 break;
44135 case ISD::LOAD:
44136 case ISD::SIGN_EXTEND:
44137 case ISD::ZERO_EXTEND:
44138 case ISD::ANY_EXTEND:
44139 case ISD::SHL:
44140 case ISD::SRA:
44141 case ISD::SRL:
44142 case ISD::SUB:
44143 case ISD::ADD:
44144 case ISD::MUL:
44145 case ISD::AND:
44146 case ISD::OR:
44147 case ISD::XOR:
44148 return false;
44152 // Any legal type not explicitly accounted for above here is desirable.
44153 return true;
44156 SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
44157 SDValue Value, SDValue Addr,
44158 SelectionDAG &DAG) const {
44159 const Module *M = DAG.getMachineFunction().getMMI().getModule();
44160 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
44161 if (IsCFProtectionSupported) {
44162 // In case control-flow branch protection is enabled, we need to add
44163 // notrack prefix to the indirect branch.
44164 // In order to do that we create NT_BRIND SDNode.
44165 // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
44166 return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
44169 return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
44172 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
44173 EVT VT = Op.getValueType();
44174 bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
44175 isa<ConstantSDNode>(Op.getOperand(1));
44177 // i16 is legal, but undesirable since i16 instruction encodings are longer
44178 // and some i16 instructions are slow.
44179 // 8-bit multiply-by-constant can usually be expanded to something cheaper
44180 // using LEA and/or other ALU ops.
44181 if (VT != MVT::i16 && !Is8BitMulByConstant)
44182 return false;
44184 auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
44185 if (!Op.hasOneUse())
44186 return false;
44187 SDNode *User = *Op->use_begin();
44188 if (!ISD::isNormalStore(User))
44189 return false;
44190 auto *Ld = cast<LoadSDNode>(Load);
44191 auto *St = cast<StoreSDNode>(User);
44192 return Ld->getBasePtr() == St->getBasePtr();
44195 auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
44196 if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
44197 return false;
44198 if (!Op.hasOneUse())
44199 return false;
44200 SDNode *User = *Op->use_begin();
44201 if (User->getOpcode() != ISD::ATOMIC_STORE)
44202 return false;
44203 auto *Ld = cast<AtomicSDNode>(Load);
44204 auto *St = cast<AtomicSDNode>(User);
44205 return Ld->getBasePtr() == St->getBasePtr();
44208 bool Commute = false;
44209 switch (Op.getOpcode()) {
44210 default: return false;
44211 case ISD::SIGN_EXTEND:
44212 case ISD::ZERO_EXTEND:
44213 case ISD::ANY_EXTEND:
44214 break;
44215 case ISD::SHL:
44216 case ISD::SRA:
44217 case ISD::SRL: {
44218 SDValue N0 = Op.getOperand(0);
44219 // Look out for (store (shl (load), x)).
44220 if (MayFoldLoad(N0) && IsFoldableRMW(N0, Op))
44221 return false;
44222 break;
44224 case ISD::ADD:
44225 case ISD::MUL:
44226 case ISD::AND:
44227 case ISD::OR:
44228 case ISD::XOR:
44229 Commute = true;
44230 LLVM_FALLTHROUGH;
44231 case ISD::SUB: {
44232 SDValue N0 = Op.getOperand(0);
44233 SDValue N1 = Op.getOperand(1);
44234 // Avoid disabling potential load folding opportunities.
44235 if (MayFoldLoad(N1) &&
44236 (!Commute || !isa<ConstantSDNode>(N0) ||
44237 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
44238 return false;
44239 if (MayFoldLoad(N0) &&
44240 ((Commute && !isa<ConstantSDNode>(N1)) ||
44241 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
44242 return false;
44243 if (IsFoldableAtomicRMW(N0, Op) ||
44244 (Commute && IsFoldableAtomicRMW(N1, Op)))
44245 return false;
44249 PVT = MVT::i32;
44250 return true;
44253 bool X86TargetLowering::
44254 isDesirableToCombineBuildVectorToShuffleTruncate(
44255 ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {
44257 assert(SrcVT.getVectorNumElements() == ShuffleMask.size() &&
44258 "Element count mismatch");
44259 assert(
44260 Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) &&
44261 "Shuffle Mask expected to be legal");
44263 // For 32-bit elements VPERMD is better than shuffle+truncate.
44264 // TODO: After we improve lowerBuildVector, add execption for VPERMW.
44265 if (SrcVT.getScalarSizeInBits() == 32 || !Subtarget.hasAVX2())
44266 return false;
44268 if (is128BitLaneCrossingShuffleMask(SrcVT.getSimpleVT(), ShuffleMask))
44269 return false;
44271 return true;
44274 //===----------------------------------------------------------------------===//
44275 // X86 Inline Assembly Support
44276 //===----------------------------------------------------------------------===//
44278 // Helper to match a string separated by whitespace.
44279 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
44280 S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
44282 for (StringRef Piece : Pieces) {
44283 if (!S.startswith(Piece)) // Check if the piece matches.
44284 return false;
44286 S = S.substr(Piece.size());
44287 StringRef::size_type Pos = S.find_first_not_of(" \t");
44288 if (Pos == 0) // We matched a prefix.
44289 return false;
44291 S = S.substr(Pos);
44294 return S.empty();
44297 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
44299 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
44300 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
44301 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
44302 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
44304 if (AsmPieces.size() == 3)
44305 return true;
44306 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
44307 return true;
44310 return false;
44313 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
44314 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
44316 const std::string &AsmStr = IA->getAsmString();
44318 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
44319 if (!Ty || Ty->getBitWidth() % 16 != 0)
44320 return false;
44322 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
44323 SmallVector<StringRef, 4> AsmPieces;
44324 SplitString(AsmStr, AsmPieces, ";\n");
44326 switch (AsmPieces.size()) {
44327 default: return false;
44328 case 1:
44329 // FIXME: this should verify that we are targeting a 486 or better. If not,
44330 // we will turn this bswap into something that will be lowered to logical
44331 // ops instead of emitting the bswap asm. For now, we don't support 486 or
44332 // lower so don't worry about this.
44333 // bswap $0
44334 if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
44335 matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
44336 matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
44337 matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
44338 matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
44339 matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
44340 // No need to check constraints, nothing other than the equivalent of
44341 // "=r,0" would be valid here.
44342 return IntrinsicLowering::LowerToByteSwap(CI);
44345 // rorw $$8, ${0:w} --> llvm.bswap.i16
44346 if (CI->getType()->isIntegerTy(16) &&
44347 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
44348 (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
44349 matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
44350 AsmPieces.clear();
44351 StringRef ConstraintsStr = IA->getConstraintString();
44352 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
44353 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
44354 if (clobbersFlagRegisters(AsmPieces))
44355 return IntrinsicLowering::LowerToByteSwap(CI);
44357 break;
44358 case 3:
44359 if (CI->getType()->isIntegerTy(32) &&
44360 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
44361 matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
44362 matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
44363 matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
44364 AsmPieces.clear();
44365 StringRef ConstraintsStr = IA->getConstraintString();
44366 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
44367 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
44368 if (clobbersFlagRegisters(AsmPieces))
44369 return IntrinsicLowering::LowerToByteSwap(CI);
44372 if (CI->getType()->isIntegerTy(64)) {
44373 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
44374 if (Constraints.size() >= 2 &&
44375 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
44376 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
44377 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
44378 if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
44379 matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
44380 matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
44381 return IntrinsicLowering::LowerToByteSwap(CI);
44384 break;
44386 return false;
44389 static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
44390 X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
44391 .Case("{@cca}", X86::COND_A)
44392 .Case("{@ccae}", X86::COND_AE)
44393 .Case("{@ccb}", X86::COND_B)
44394 .Case("{@ccbe}", X86::COND_BE)
44395 .Case("{@ccc}", X86::COND_B)
44396 .Case("{@cce}", X86::COND_E)
44397 .Case("{@ccz}", X86::COND_E)
44398 .Case("{@ccg}", X86::COND_G)
44399 .Case("{@ccge}", X86::COND_GE)
44400 .Case("{@ccl}", X86::COND_L)
44401 .Case("{@ccle}", X86::COND_LE)
44402 .Case("{@ccna}", X86::COND_BE)
44403 .Case("{@ccnae}", X86::COND_B)
44404 .Case("{@ccnb}", X86::COND_AE)
44405 .Case("{@ccnbe}", X86::COND_A)
44406 .Case("{@ccnc}", X86::COND_AE)
44407 .Case("{@ccne}", X86::COND_NE)
44408 .Case("{@ccnz}", X86::COND_NE)
44409 .Case("{@ccng}", X86::COND_LE)
44410 .Case("{@ccnge}", X86::COND_L)
44411 .Case("{@ccnl}", X86::COND_GE)
44412 .Case("{@ccnle}", X86::COND_G)
44413 .Case("{@ccno}", X86::COND_NO)
44414 .Case("{@ccnp}", X86::COND_P)
44415 .Case("{@ccns}", X86::COND_NS)
44416 .Case("{@cco}", X86::COND_O)
44417 .Case("{@ccp}", X86::COND_P)
44418 .Case("{@ccs}", X86::COND_S)
44419 .Default(X86::COND_INVALID);
44420 return Cond;
44423 /// Given a constraint letter, return the type of constraint for this target.
44424 X86TargetLowering::ConstraintType
44425 X86TargetLowering::getConstraintType(StringRef Constraint) const {
44426 if (Constraint.size() == 1) {
44427 switch (Constraint[0]) {
44428 case 'R':
44429 case 'q':
44430 case 'Q':
44431 case 'f':
44432 case 't':
44433 case 'u':
44434 case 'y':
44435 case 'x':
44436 case 'v':
44437 case 'Y':
44438 case 'l':
44439 case 'k': // AVX512 masking registers.
44440 return C_RegisterClass;
44441 case 'a':
44442 case 'b':
44443 case 'c':
44444 case 'd':
44445 case 'S':
44446 case 'D':
44447 case 'A':
44448 return C_Register;
44449 case 'I':
44450 case 'J':
44451 case 'K':
44452 case 'L':
44453 case 'M':
44454 case 'N':
44455 case 'G':
44456 case 'C':
44457 case 'e':
44458 case 'Z':
44459 return C_Other;
44460 default:
44461 break;
44464 else if (Constraint.size() == 2) {
44465 switch (Constraint[0]) {
44466 default:
44467 break;
44468 case 'Y':
44469 switch (Constraint[1]) {
44470 default:
44471 break;
44472 case 'z':
44473 case '0':
44474 return C_Register;
44475 case 'i':
44476 case 'm':
44477 case 'k':
44478 case 't':
44479 case '2':
44480 return C_RegisterClass;
44483 } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
44484 return C_Other;
44485 return TargetLowering::getConstraintType(Constraint);
44488 /// Examine constraint type and operand type and determine a weight value.
44489 /// This object must already have been set up with the operand type
44490 /// and the current alternative constraint selected.
44491 TargetLowering::ConstraintWeight
44492 X86TargetLowering::getSingleConstraintMatchWeight(
44493 AsmOperandInfo &info, const char *constraint) const {
44494 ConstraintWeight weight = CW_Invalid;
44495 Value *CallOperandVal = info.CallOperandVal;
44496 // If we don't have a value, we can't do a match,
44497 // but allow it at the lowest weight.
44498 if (!CallOperandVal)
44499 return CW_Default;
44500 Type *type = CallOperandVal->getType();
44501 // Look at the constraint type.
44502 switch (*constraint) {
44503 default:
44504 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
44505 LLVM_FALLTHROUGH;
44506 case 'R':
44507 case 'q':
44508 case 'Q':
44509 case 'a':
44510 case 'b':
44511 case 'c':
44512 case 'd':
44513 case 'S':
44514 case 'D':
44515 case 'A':
44516 if (CallOperandVal->getType()->isIntegerTy())
44517 weight = CW_SpecificReg;
44518 break;
44519 case 'f':
44520 case 't':
44521 case 'u':
44522 if (type->isFloatingPointTy())
44523 weight = CW_SpecificReg;
44524 break;
44525 case 'y':
44526 if (type->isX86_MMXTy() && Subtarget.hasMMX())
44527 weight = CW_SpecificReg;
44528 break;
44529 case 'Y': {
44530 unsigned Size = StringRef(constraint).size();
44531 // Pick 'i' as the next char as 'Yi' and 'Y' are synonymous, when matching 'Y'
44532 char NextChar = Size == 2 ? constraint[1] : 'i';
44533 if (Size > 2)
44534 break;
44535 switch (NextChar) {
44536 default:
44537 return CW_Invalid;
44538 // XMM0
44539 case 'z':
44540 case '0':
44541 if ((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1())
44542 return CW_SpecificReg;
44543 return CW_Invalid;
44544 // Conditional OpMask regs (AVX512)
44545 case 'k':
44546 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
44547 return CW_Register;
44548 return CW_Invalid;
44549 // Any MMX reg
44550 case 'm':
44551 if (type->isX86_MMXTy() && Subtarget.hasMMX())
44552 return weight;
44553 return CW_Invalid;
44554 // Any SSE reg when ISA >= SSE2, same as 'Y'
44555 case 'i':
44556 case 't':
44557 case '2':
44558 if (!Subtarget.hasSSE2())
44559 return CW_Invalid;
44560 break;
44562 // Fall through (handle "Y" constraint).
44563 LLVM_FALLTHROUGH;
44565 case 'v':
44566 if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
44567 weight = CW_Register;
44568 LLVM_FALLTHROUGH;
44569 case 'x':
44570 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
44571 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
44572 weight = CW_Register;
44573 break;
44574 case 'k':
44575 // Enable conditional vector operations using %k<#> registers.
44576 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
44577 weight = CW_Register;
44578 break;
44579 case 'I':
44580 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
44581 if (C->getZExtValue() <= 31)
44582 weight = CW_Constant;
44584 break;
44585 case 'J':
44586 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
44587 if (C->getZExtValue() <= 63)
44588 weight = CW_Constant;
44590 break;
44591 case 'K':
44592 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
44593 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
44594 weight = CW_Constant;
44596 break;
44597 case 'L':
44598 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
44599 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
44600 weight = CW_Constant;
44602 break;
44603 case 'M':
44604 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
44605 if (C->getZExtValue() <= 3)
44606 weight = CW_Constant;
44608 break;
44609 case 'N':
44610 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
44611 if (C->getZExtValue() <= 0xff)
44612 weight = CW_Constant;
44614 break;
44615 case 'G':
44616 case 'C':
44617 if (isa<ConstantFP>(CallOperandVal)) {
44618 weight = CW_Constant;
44620 break;
44621 case 'e':
44622 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
44623 if ((C->getSExtValue() >= -0x80000000LL) &&
44624 (C->getSExtValue() <= 0x7fffffffLL))
44625 weight = CW_Constant;
44627 break;
44628 case 'Z':
44629 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
44630 if (C->getZExtValue() <= 0xffffffff)
44631 weight = CW_Constant;
44633 break;
44635 return weight;
44638 /// Try to replace an X constraint, which matches anything, with another that
44639 /// has more specific requirements based on the type of the corresponding
44640 /// operand.
44641 const char *X86TargetLowering::
44642 LowerXConstraint(EVT ConstraintVT) const {
44643 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
44644 // 'f' like normal targets.
44645 if (ConstraintVT.isFloatingPoint()) {
44646 if (Subtarget.hasSSE2())
44647 return "Y";
44648 if (Subtarget.hasSSE1())
44649 return "x";
44652 return TargetLowering::LowerXConstraint(ConstraintVT);
44655 // Lower @cc targets via setcc.
44656 SDValue X86TargetLowering::LowerAsmOutputForConstraint(
44657 SDValue &Chain, SDValue &Flag, SDLoc DL, const AsmOperandInfo &OpInfo,
44658 SelectionDAG &DAG) const {
44659 X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
44660 if (Cond == X86::COND_INVALID)
44661 return SDValue();
44662 // Check that return type is valid.
44663 if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
44664 OpInfo.ConstraintVT.getSizeInBits() < 8)
44665 report_fatal_error("Flag output operand is of invalid type");
44667 // Get EFLAGS register. Only update chain when copyfrom is glued.
44668 if (Flag.getNode()) {
44669 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Flag);
44670 Chain = Flag.getValue(1);
44671 } else
44672 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
44673 // Extract CC code.
44674 SDValue CC = getSETCC(Cond, Flag, DL, DAG);
44675 // Extend to 32-bits
44676 SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
44678 return Result;
44681 /// Lower the specified operand into the Ops vector.
44682 /// If it is invalid, don't add anything to Ops.
44683 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
44684 std::string &Constraint,
44685 std::vector<SDValue>&Ops,
44686 SelectionDAG &DAG) const {
44687 SDValue Result;
44689 // Only support length 1 constraints for now.
44690 if (Constraint.length() > 1) return;
44692 char ConstraintLetter = Constraint[0];
44693 switch (ConstraintLetter) {
44694 default: break;
44695 case 'I':
44696 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44697 if (C->getZExtValue() <= 31) {
44698 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
44699 Op.getValueType());
44700 break;
44703 return;
44704 case 'J':
44705 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44706 if (C->getZExtValue() <= 63) {
44707 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
44708 Op.getValueType());
44709 break;
44712 return;
44713 case 'K':
44714 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44715 if (isInt<8>(C->getSExtValue())) {
44716 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
44717 Op.getValueType());
44718 break;
44721 return;
44722 case 'L':
44723 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44724 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
44725 (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
44726 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
44727 Op.getValueType());
44728 break;
44731 return;
44732 case 'M':
44733 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44734 if (C->getZExtValue() <= 3) {
44735 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
44736 Op.getValueType());
44737 break;
44740 return;
44741 case 'N':
44742 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44743 if (C->getZExtValue() <= 255) {
44744 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
44745 Op.getValueType());
44746 break;
44749 return;
44750 case 'O':
44751 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44752 if (C->getZExtValue() <= 127) {
44753 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
44754 Op.getValueType());
44755 break;
44758 return;
44759 case 'e': {
44760 // 32-bit signed value
44761 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44762 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
44763 C->getSExtValue())) {
44764 // Widen to 64 bits here to get it sign extended.
44765 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
44766 break;
44768 // FIXME gcc accepts some relocatable values here too, but only in certain
44769 // memory models; it's complicated.
44771 return;
44773 case 'Z': {
44774 // 32-bit unsigned value
44775 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44776 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
44777 C->getZExtValue())) {
44778 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
44779 Op.getValueType());
44780 break;
44783 // FIXME gcc accepts some relocatable values here too, but only in certain
44784 // memory models; it's complicated.
44785 return;
44787 case 'i': {
44788 // Literal immediates are always ok.
44789 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
44790 bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
44791 BooleanContent BCont = getBooleanContents(MVT::i64);
44792 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
44793 : ISD::SIGN_EXTEND;
44794 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
44795 : CST->getSExtValue();
44796 Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
44797 break;
44800 // In any sort of PIC mode addresses need to be computed at runtime by
44801 // adding in a register or some sort of table lookup. These can't
44802 // be used as immediates.
44803 if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
44804 return;
44806 // If we are in non-pic codegen mode, we allow the address of a global (with
44807 // an optional displacement) to be used with 'i'.
44808 if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
44809 // If we require an extra load to get this address, as in PIC mode, we
44810 // can't accept it.
44811 if (isGlobalStubReference(
44812 Subtarget.classifyGlobalReference(GA->getGlobal())))
44813 return;
44814 break;
44818 if (Result.getNode()) {
44819 Ops.push_back(Result);
44820 return;
44822 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
44825 /// Check if \p RC is a general purpose register class.
44826 /// I.e., GR* or one of their variant.
44827 static bool isGRClass(const TargetRegisterClass &RC) {
44828 return RC.hasSuperClassEq(&X86::GR8RegClass) ||
44829 RC.hasSuperClassEq(&X86::GR16RegClass) ||
44830 RC.hasSuperClassEq(&X86::GR32RegClass) ||
44831 RC.hasSuperClassEq(&X86::GR64RegClass) ||
44832 RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
44835 /// Check if \p RC is a vector register class.
44836 /// I.e., FR* / VR* or one of their variant.
44837 static bool isFRClass(const TargetRegisterClass &RC) {
44838 return RC.hasSuperClassEq(&X86::FR32XRegClass) ||
44839 RC.hasSuperClassEq(&X86::FR64XRegClass) ||
44840 RC.hasSuperClassEq(&X86::VR128XRegClass) ||
44841 RC.hasSuperClassEq(&X86::VR256XRegClass) ||
44842 RC.hasSuperClassEq(&X86::VR512RegClass);
44845 /// Check if \p RC is a mask register class.
44846 /// I.e., VK* or one of their variant.
44847 static bool isVKClass(const TargetRegisterClass &RC) {
44848 return RC.hasSuperClassEq(&X86::VK1RegClass) ||
44849 RC.hasSuperClassEq(&X86::VK2RegClass) ||
44850 RC.hasSuperClassEq(&X86::VK4RegClass) ||
44851 RC.hasSuperClassEq(&X86::VK8RegClass) ||
44852 RC.hasSuperClassEq(&X86::VK16RegClass) ||
44853 RC.hasSuperClassEq(&X86::VK32RegClass) ||
44854 RC.hasSuperClassEq(&X86::VK64RegClass);
44857 std::pair<unsigned, const TargetRegisterClass *>
44858 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
44859 StringRef Constraint,
44860 MVT VT) const {
44861 // First, see if this is a constraint that directly corresponds to an LLVM
44862 // register class.
44863 if (Constraint.size() == 1) {
44864 // GCC Constraint Letters
44865 switch (Constraint[0]) {
44866 default: break;
44867 // 'A' means [ER]AX + [ER]DX.
44868 case 'A':
44869 if (Subtarget.is64Bit())
44870 return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
44871 assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
44872 "Expecting 64, 32 or 16 bit subtarget");
44873 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
44875 // TODO: Slight differences here in allocation order and leaving
44876 // RIP in the class. Do they matter any more here than they do
44877 // in the normal allocation?
44878 case 'k':
44879 if (Subtarget.hasAVX512()) {
44880 if (VT == MVT::i1)
44881 return std::make_pair(0U, &X86::VK1RegClass);
44882 if (VT == MVT::i8)
44883 return std::make_pair(0U, &X86::VK8RegClass);
44884 if (VT == MVT::i16)
44885 return std::make_pair(0U, &X86::VK16RegClass);
44887 if (Subtarget.hasBWI()) {
44888 if (VT == MVT::i32)
44889 return std::make_pair(0U, &X86::VK32RegClass);
44890 if (VT == MVT::i64)
44891 return std::make_pair(0U, &X86::VK64RegClass);
44893 break;
44894 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
44895 if (Subtarget.is64Bit()) {
44896 if (VT == MVT::i32 || VT == MVT::f32)
44897 return std::make_pair(0U, &X86::GR32RegClass);
44898 if (VT == MVT::i16)
44899 return std::make_pair(0U, &X86::GR16RegClass);
44900 if (VT == MVT::i8 || VT == MVT::i1)
44901 return std::make_pair(0U, &X86::GR8RegClass);
44902 if (VT == MVT::i64 || VT == MVT::f64)
44903 return std::make_pair(0U, &X86::GR64RegClass);
44904 break;
44906 LLVM_FALLTHROUGH;
44907 // 32-bit fallthrough
44908 case 'Q': // Q_REGS
44909 if (VT == MVT::i32 || VT == MVT::f32)
44910 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
44911 if (VT == MVT::i16)
44912 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
44913 if (VT == MVT::i8 || VT == MVT::i1)
44914 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
44915 if (VT == MVT::i64)
44916 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
44917 break;
44918 case 'r': // GENERAL_REGS
44919 case 'l': // INDEX_REGS
44920 if (VT == MVT::i8 || VT == MVT::i1)
44921 return std::make_pair(0U, &X86::GR8RegClass);
44922 if (VT == MVT::i16)
44923 return std::make_pair(0U, &X86::GR16RegClass);
44924 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
44925 return std::make_pair(0U, &X86::GR32RegClass);
44926 return std::make_pair(0U, &X86::GR64RegClass);
44927 case 'R': // LEGACY_REGS
44928 if (VT == MVT::i8 || VT == MVT::i1)
44929 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
44930 if (VT == MVT::i16)
44931 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
44932 if (VT == MVT::i32 || !Subtarget.is64Bit())
44933 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
44934 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
44935 case 'f': // FP Stack registers.
44936 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
44937 // value to the correct fpstack register class.
44938 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
44939 return std::make_pair(0U, &X86::RFP32RegClass);
44940 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
44941 return std::make_pair(0U, &X86::RFP64RegClass);
44942 return std::make_pair(0U, &X86::RFP80RegClass);
44943 case 'y': // MMX_REGS if MMX allowed.
44944 if (!Subtarget.hasMMX()) break;
44945 return std::make_pair(0U, &X86::VR64RegClass);
44946 case 'Y': // SSE_REGS if SSE2 allowed
44947 if (!Subtarget.hasSSE2()) break;
44948 LLVM_FALLTHROUGH;
44949 case 'v':
44950 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
44951 if (!Subtarget.hasSSE1()) break;
44952 bool VConstraint = (Constraint[0] == 'v');
44954 switch (VT.SimpleTy) {
44955 default: break;
44956 // Scalar SSE types.
44957 case MVT::f32:
44958 case MVT::i32:
44959 if (VConstraint && Subtarget.hasVLX())
44960 return std::make_pair(0U, &X86::FR32XRegClass);
44961 return std::make_pair(0U, &X86::FR32RegClass);
44962 case MVT::f64:
44963 case MVT::i64:
44964 if (VConstraint && Subtarget.hasVLX())
44965 return std::make_pair(0U, &X86::FR64XRegClass);
44966 return std::make_pair(0U, &X86::FR64RegClass);
44967 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
44968 // Vector types.
44969 case MVT::v16i8:
44970 case MVT::v8i16:
44971 case MVT::v4i32:
44972 case MVT::v2i64:
44973 case MVT::v4f32:
44974 case MVT::v2f64:
44975 if (VConstraint && Subtarget.hasVLX())
44976 return std::make_pair(0U, &X86::VR128XRegClass);
44977 return std::make_pair(0U, &X86::VR128RegClass);
44978 // AVX types.
44979 case MVT::v32i8:
44980 case MVT::v16i16:
44981 case MVT::v8i32:
44982 case MVT::v4i64:
44983 case MVT::v8f32:
44984 case MVT::v4f64:
44985 if (VConstraint && Subtarget.hasVLX())
44986 return std::make_pair(0U, &X86::VR256XRegClass);
44987 if (Subtarget.hasAVX())
44988 return std::make_pair(0U, &X86::VR256RegClass);
44989 break;
44990 case MVT::v8f64:
44991 case MVT::v16f32:
44992 case MVT::v16i32:
44993 case MVT::v8i64:
44994 if (!Subtarget.hasAVX512()) break;
44995 if (VConstraint)
44996 return std::make_pair(0U, &X86::VR512RegClass);
44997 return std::make_pair(0U, &X86::VR512_0_15RegClass);
44999 break;
45001 } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
45002 switch (Constraint[1]) {
45003 default:
45004 break;
45005 case 'i':
45006 case 't':
45007 case '2':
45008 return getRegForInlineAsmConstraint(TRI, "Y", VT);
45009 case 'm':
45010 if (!Subtarget.hasMMX()) break;
45011 return std::make_pair(0U, &X86::VR64RegClass);
45012 case 'z':
45013 case '0':
45014 if (!Subtarget.hasSSE1()) break;
45015 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
45016 case 'k':
45017 // This register class doesn't allocate k0 for masked vector operation.
45018 if (Subtarget.hasAVX512()) {
45019 if (VT == MVT::i1)
45020 return std::make_pair(0U, &X86::VK1WMRegClass);
45021 if (VT == MVT::i8)
45022 return std::make_pair(0U, &X86::VK8WMRegClass);
45023 if (VT == MVT::i16)
45024 return std::make_pair(0U, &X86::VK16WMRegClass);
45026 if (Subtarget.hasBWI()) {
45027 if (VT == MVT::i32)
45028 return std::make_pair(0U, &X86::VK32WMRegClass);
45029 if (VT == MVT::i64)
45030 return std::make_pair(0U, &X86::VK64WMRegClass);
45032 break;
45036 if (parseConstraintCode(Constraint) != X86::COND_INVALID)
45037 return std::make_pair(0U, &X86::GR32RegClass);
45039 // Use the default implementation in TargetLowering to convert the register
45040 // constraint into a member of a register class.
45041 std::pair<unsigned, const TargetRegisterClass*> Res;
45042 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
45044 // Not found as a standard register?
45045 if (!Res.second) {
45046 // Map st(0) -> st(7) -> ST0
45047 if (Constraint.size() == 7 && Constraint[0] == '{' &&
45048 tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
45049 Constraint[3] == '(' &&
45050 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
45051 Constraint[5] == ')' && Constraint[6] == '}') {
45052 // st(7) is not allocatable and thus not a member of RFP80. Return
45053 // singleton class in cases where we have a reference to it.
45054 if (Constraint[4] == '7')
45055 return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
45056 return std::make_pair(X86::FP0 + Constraint[4] - '0',
45057 &X86::RFP80RegClass);
45060 // GCC allows "st(0)" to be called just plain "st".
45061 if (StringRef("{st}").equals_lower(Constraint))
45062 return std::make_pair(X86::FP0, &X86::RFP80RegClass);
45064 // flags -> EFLAGS
45065 if (StringRef("{flags}").equals_lower(Constraint))
45066 return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
45068 // dirflag -> DF
45069 if (StringRef("{dirflag}").equals_lower(Constraint))
45070 return std::make_pair(X86::DF, &X86::DFCCRRegClass);
45072 // fpsr -> FPSW
45073 if (StringRef("{fpsr}").equals_lower(Constraint))
45074 return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
45076 return Res;
45079 // Make sure it isn't a register that requires 64-bit mode.
45080 if (!Subtarget.is64Bit() &&
45081 (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
45082 TRI->getEncodingValue(Res.first) >= 8) {
45083 // Register requires REX prefix, but we're in 32-bit mode.
45084 return std::make_pair(0, nullptr);
45087 // Make sure it isn't a register that requires AVX512.
45088 if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
45089 TRI->getEncodingValue(Res.first) & 0x10) {
45090 // Register requires EVEX prefix.
45091 return std::make_pair(0, nullptr);
45094 // Otherwise, check to see if this is a register class of the wrong value
45095 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
45096 // turn into {ax},{dx}.
45097 // MVT::Other is used to specify clobber names.
45098 if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
45099 return Res; // Correct type already, nothing to do.
45101 // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
45102 // return "eax". This should even work for things like getting 64bit integer
45103 // registers when given an f64 type.
45104 const TargetRegisterClass *Class = Res.second;
45105 // The generic code will match the first register class that contains the
45106 // given register. Thus, based on the ordering of the tablegened file,
45107 // the "plain" GR classes might not come first.
45108 // Therefore, use a helper method.
45109 if (isGRClass(*Class)) {
45110 unsigned Size = VT.getSizeInBits();
45111 if (Size == 1) Size = 8;
45112 unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
45113 if (DestReg > 0) {
45114 bool is64Bit = Subtarget.is64Bit();
45115 const TargetRegisterClass *RC =
45116 Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
45117 : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
45118 : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
45119 : Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr)
45120 : nullptr;
45121 if (Size == 64 && !is64Bit) {
45122 // Model GCC's behavior here and select a fixed pair of 32-bit
45123 // registers.
45124 switch (DestReg) {
45125 case X86::RAX:
45126 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
45127 case X86::RDX:
45128 return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
45129 case X86::RCX:
45130 return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
45131 case X86::RBX:
45132 return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
45133 case X86::RSI:
45134 return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
45135 case X86::RDI:
45136 return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
45137 case X86::RBP:
45138 return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
45139 default:
45140 return std::make_pair(0, nullptr);
45143 if (RC && RC->contains(DestReg))
45144 return std::make_pair(DestReg, RC);
45145 return Res;
45147 // No register found/type mismatch.
45148 return std::make_pair(0, nullptr);
45149 } else if (isFRClass(*Class)) {
45150 // Handle references to XMM physical registers that got mapped into the
45151 // wrong class. This can happen with constraints like {xmm0} where the
45152 // target independent register mapper will just pick the first match it can
45153 // find, ignoring the required type.
45155 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
45156 if (VT == MVT::f32 || VT == MVT::i32)
45157 Res.second = &X86::FR32XRegClass;
45158 else if (VT == MVT::f64 || VT == MVT::i64)
45159 Res.second = &X86::FR64XRegClass;
45160 else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
45161 Res.second = &X86::VR128XRegClass;
45162 else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
45163 Res.second = &X86::VR256XRegClass;
45164 else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
45165 Res.second = &X86::VR512RegClass;
45166 else {
45167 // Type mismatch and not a clobber: Return an error;
45168 Res.first = 0;
45169 Res.second = nullptr;
45171 } else if (isVKClass(*Class)) {
45172 if (VT == MVT::i1)
45173 Res.second = &X86::VK1RegClass;
45174 else if (VT == MVT::i8)
45175 Res.second = &X86::VK8RegClass;
45176 else if (VT == MVT::i16)
45177 Res.second = &X86::VK16RegClass;
45178 else if (VT == MVT::i32)
45179 Res.second = &X86::VK32RegClass;
45180 else if (VT == MVT::i64)
45181 Res.second = &X86::VK64RegClass;
45182 else {
45183 // Type mismatch and not a clobber: Return an error;
45184 Res.first = 0;
45185 Res.second = nullptr;
45189 return Res;
45192 int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
45193 const AddrMode &AM, Type *Ty,
45194 unsigned AS) const {
45195 // Scaling factors are not free at all.
45196 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
45197 // will take 2 allocations in the out of order engine instead of 1
45198 // for plain addressing mode, i.e. inst (reg1).
45199 // E.g.,
45200 // vaddps (%rsi,%rdx), %ymm0, %ymm1
45201 // Requires two allocations (one for the load, one for the computation)
45202 // whereas:
45203 // vaddps (%rsi), %ymm0, %ymm1
45204 // Requires just 1 allocation, i.e., freeing allocations for other operations
45205 // and having less micro operations to execute.
45207 // For some X86 architectures, this is even worse because for instance for
45208 // stores, the complex addressing mode forces the instruction to use the
45209 // "load" ports instead of the dedicated "store" port.
45210 // E.g., on Haswell:
45211 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
45212 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
45213 if (isLegalAddressingMode(DL, AM, Ty, AS))
45214 // Scale represents reg2 * scale, thus account for 1
45215 // as soon as we use a second register.
45216 return AM.Scale != 0;
45217 return -1;
45220 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
45221 // Integer division on x86 is expensive. However, when aggressively optimizing
45222 // for code size, we prefer to use a div instruction, as it is usually smaller
45223 // than the alternative sequence.
45224 // The exception to this is vector division. Since x86 doesn't have vector
45225 // integer division, leaving the division as-is is a loss even in terms of
45226 // size, because it will have to be scalarized, while the alternative code
45227 // sequence can be performed in vector form.
45228 bool OptSize =
45229 Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize);
45230 return OptSize && !VT.isVector();
45233 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
45234 if (!Subtarget.is64Bit())
45235 return;
45237 // Update IsSplitCSR in X86MachineFunctionInfo.
45238 X86MachineFunctionInfo *AFI =
45239 Entry->getParent()->getInfo<X86MachineFunctionInfo>();
45240 AFI->setIsSplitCSR(true);
45243 void X86TargetLowering::insertCopiesSplitCSR(
45244 MachineBasicBlock *Entry,
45245 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
45246 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
45247 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
45248 if (!IStart)
45249 return;
45251 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
45252 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
45253 MachineBasicBlock::iterator MBBI = Entry->begin();
45254 for (const MCPhysReg *I = IStart; *I; ++I) {
45255 const TargetRegisterClass *RC = nullptr;
45256 if (X86::GR64RegClass.contains(*I))
45257 RC = &X86::GR64RegClass;
45258 else
45259 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
45261 unsigned NewVR = MRI->createVirtualRegister(RC);
45262 // Create copy from CSR to a virtual register.
45263 // FIXME: this currently does not emit CFI pseudo-instructions, it works
45264 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
45265 // nounwind. If we want to generalize this later, we may need to emit
45266 // CFI pseudo-instructions.
45267 assert(
45268 Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
45269 "Function should be nounwind in insertCopiesSplitCSR!");
45270 Entry->addLiveIn(*I);
45271 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
45272 .addReg(*I);
45274 // Insert the copy-back instructions right before the terminator.
45275 for (auto *Exit : Exits)
45276 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
45277 TII->get(TargetOpcode::COPY), *I)
45278 .addReg(NewVR);
45282 bool X86TargetLowering::supportSwiftError() const {
45283 return Subtarget.is64Bit();
45286 /// Returns the name of the symbol used to emit stack probes or the empty
45287 /// string if not applicable.
45288 StringRef
45289 X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
45290 // If the function specifically requests stack probes, emit them.
45291 if (MF.getFunction().hasFnAttribute("probe-stack"))
45292 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
45294 // Generally, if we aren't on Windows, the platform ABI does not include
45295 // support for stack probes, so don't emit them.
45296 if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
45297 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
45298 return "";
45300 // We need a stack probe to conform to the Windows ABI. Choose the right
45301 // symbol.
45302 if (Subtarget.is64Bit())
45303 return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
45304 return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";