1 ;; Predicate definitions for IA-32 and x86-64.
2 ;; Copyright (C) 2004-2016 Free Software Foundation, Inc.
4 ;; This file is part of GCC.
6 ;; GCC is free software; you can redistribute it and/or modify
7 ;; it under the terms of the GNU General Public License as published by
8 ;; the Free Software Foundation; either version 3, or (at your option)
11 ;; GCC is distributed in the hope that it will be useful,
12 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ;; GNU General Public License for more details.
16 ;; You should have received a copy of the GNU General Public License
17 ;; along with GCC; see the file COPYING3. If not see
18 ;; <http://www.gnu.org/licenses/>.
20 ;; Return true if OP is either a i387 or SSE fp register.
21 (define_predicate "any_fp_register_operand"
22 (and (match_code "reg")
23 (match_test "ANY_FP_REGNO_P (REGNO (op))")))
25 ;; Return true if OP is an i387 fp register.
26 (define_predicate "fp_register_operand"
27 (and (match_code "reg")
28 (match_test "STACK_REGNO_P (REGNO (op))")))
30 ;; True if the operand is a GENERAL class register.
31 (define_predicate "general_reg_operand"
32 (and (match_code "reg")
33 (match_test "GENERAL_REGNO_P (REGNO (op))")))
35 ;; True if the operand is a nonimmediate operand with GENERAL class register.
36 (define_predicate "nonimmediate_gr_operand"
37 (if_then_else (match_code "reg")
38 (match_test "GENERAL_REGNO_P (REGNO (op))")
39 (match_operand 0 "nonimmediate_operand")))
41 ;; True if the operand is an MMX register.
42 (define_predicate "mmx_reg_operand"
43 (and (match_code "reg")
44 (match_test "MMX_REGNO_P (REGNO (op))")))
46 ;; True if the operand is an SSE register.
47 (define_predicate "sse_reg_operand"
48 (and (match_code "reg")
49 (match_test "SSE_REGNO_P (REGNO (op))")))
51 ;; True if the operand is an AVX-512 new register.
52 (define_predicate "ext_sse_reg_operand"
53 (and (match_code "reg")
54 (match_test "EXT_REX_SSE_REGNO_P (REGNO (op))")))
56 ;; True if the operand is an AVX-512 mask register.
57 (define_predicate "mask_reg_operand"
58 (and (match_code "reg")
59 (match_test "MASK_REGNO_P (REGNO (op))")))
61 ;; Return true if op is a QImode register.
62 (define_predicate "any_QIreg_operand"
63 (and (match_code "reg")
64 (match_test "ANY_QI_REGNO_P (REGNO (op))")))
66 ;; Return true if op is one of QImode registers: %[abcd][hl].
67 (define_predicate "QIreg_operand"
68 (and (match_code "reg")
69 (match_test "QI_REGNO_P (REGNO (op))")))
71 ;; Return true if op is a QImode register operand other than %[abcd][hl].
72 (define_predicate "ext_QIreg_operand"
73 (and (match_test "TARGET_64BIT")
75 (not (match_test "QI_REGNO_P (REGNO (op))"))))
77 ;; Return true if op is the AX register.
78 (define_predicate "ax_reg_operand"
79 (and (match_code "reg")
80 (match_test "REGNO (op) == AX_REG")))
82 ;; Return true if op is the flags register.
83 (define_predicate "flags_reg_operand"
84 (and (match_code "reg")
85 (match_test "REGNO (op) == FLAGS_REG")))
87 ;; Match an SI or HImode register for a zero_extract.
88 (define_special_predicate "ext_register_operand"
89 (match_operand 0 "register_operand")
91 if ((!TARGET_64BIT || GET_MODE (op) != DImode)
92 && GET_MODE (op) != SImode && GET_MODE (op) != HImode)
97 /* Be careful to accept only registers having upper parts. */
99 && (REGNO (op) > LAST_VIRTUAL_REGISTER || QI_REGNO_P (REGNO (op))));
102 ;; Match nonimmediate operands, but exclude memory operands on 64bit targets.
103 (define_predicate "nonimmediate_x64nomem_operand"
104 (if_then_else (match_test "TARGET_64BIT")
105 (match_operand 0 "register_operand")
106 (match_operand 0 "nonimmediate_operand")))
108 ;; Match general operands, but exclude memory operands on 64bit targets.
109 (define_predicate "general_x64nomem_operand"
110 (if_then_else (match_test "TARGET_64BIT")
111 (match_operand 0 "nonmemory_operand")
112 (match_operand 0 "general_operand")))
114 ;; Match register operands, but include memory operands for TARGET_SSE_MATH.
115 (define_predicate "register_ssemem_operand"
117 (match_test "SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH")
118 (match_operand 0 "nonimmediate_operand")
119 (match_operand 0 "register_operand")))
121 ;; Match nonimmediate operands, but exclude memory operands
122 ;; for TARGET_SSE_MATH if TARGET_MIX_SSE_I387 is not enabled.
123 (define_predicate "nonimm_ssenomem_operand"
125 (and (match_test "SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH")
126 (not (match_test "TARGET_MIX_SSE_I387")))
127 (match_operand 0 "register_operand")
128 (match_operand 0 "nonimmediate_operand")))
130 ;; The above predicate, suitable for x87 arithmetic operators.
131 (define_predicate "x87nonimm_ssenomem_operand"
133 (and (match_test "SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH")
134 (not (match_test "TARGET_MIX_SSE_I387 && X87_ENABLE_ARITH (mode)")))
135 (match_operand 0 "register_operand")
136 (match_operand 0 "nonimmediate_operand")))
138 ;; Match register operands, include memory operand for TARGET_SSE4_1.
139 (define_predicate "register_sse4nonimm_operand"
140 (if_then_else (match_test "TARGET_SSE4_1")
141 (match_operand 0 "nonimmediate_operand")
142 (match_operand 0 "register_operand")))
144 ;; Return true if VALUE is symbol reference
145 (define_predicate "symbol_operand"
146 (match_code "symbol_ref"))
148 ;; Return true if VALUE can be stored in a sign extended immediate field.
149 (define_predicate "x86_64_immediate_operand"
150 (match_code "const_int,symbol_ref,label_ref,const")
153 return immediate_operand (op, mode);
155 switch (GET_CODE (op))
159 HOST_WIDE_INT val = INTVAL (op);
160 return trunc_int_for_mode (val, SImode) == val;
163 /* TLS symbols are not constant. */
164 if (SYMBOL_REF_TLS_MODEL (op))
167 /* Load the external function address via the GOT slot. */
168 if (ix86_force_load_from_GOT_p (op))
171 /* For certain code models, the symbolic references are known to fit.
172 in CM_SMALL_PIC model we know it fits if it is local to the shared
173 library. Don't count TLS SYMBOL_REFs here, since they should fit
174 only if inside of UNSPEC handled below. */
175 return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_KERNEL
176 || (ix86_cmodel == CM_MEDIUM && !SYMBOL_REF_FAR_ADDR_P (op)));
179 /* For certain code models, the code is near as well. */
180 return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM
181 || ix86_cmodel == CM_KERNEL);
184 /* We also may accept the offsetted memory references in certain
186 if (GET_CODE (XEXP (op, 0)) == UNSPEC)
187 switch (XINT (XEXP (op, 0), 1))
189 case UNSPEC_GOTPCREL:
191 case UNSPEC_GOTNTPOFF:
198 if (GET_CODE (XEXP (op, 0)) == PLUS)
200 rtx op1 = XEXP (XEXP (op, 0), 0);
201 rtx op2 = XEXP (XEXP (op, 0), 1);
203 if (ix86_cmodel == CM_LARGE)
205 if (!CONST_INT_P (op2))
208 HOST_WIDE_INT offset = INTVAL (op2);
209 if (trunc_int_for_mode (offset, SImode) != offset)
212 switch (GET_CODE (op1))
215 /* TLS symbols are not constant. */
216 if (SYMBOL_REF_TLS_MODEL (op1))
219 /* Load the external function address via the GOT slot. */
220 if (ix86_force_load_from_GOT_p (op1))
223 /* For CM_SMALL assume that latest object is 16MB before
224 end of 31bits boundary. We may also accept pretty
225 large negative constants knowing that all objects are
226 in the positive half of address space. */
227 if ((ix86_cmodel == CM_SMALL
228 || (ix86_cmodel == CM_MEDIUM
229 && !SYMBOL_REF_FAR_ADDR_P (op1)))
230 && offset < 16*1024*1024)
232 /* For CM_KERNEL we know that all object resist in the
233 negative half of 32bits address space. We may not
234 accept negative offsets, since they may be just off
235 and we may accept pretty large positive ones. */
236 if (ix86_cmodel == CM_KERNEL
242 /* These conditions are similar to SYMBOL_REF ones, just the
243 constraints for code models differ. */
244 if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
245 && offset < 16*1024*1024)
247 if (ix86_cmodel == CM_KERNEL
253 switch (XINT (op1, 1))
274 ;; Return true if VALUE can be stored in the zero extended immediate field.
275 (define_predicate "x86_64_zext_immediate_operand"
276 (match_code "const_int,symbol_ref,label_ref,const")
278 switch (GET_CODE (op))
281 return !(INTVAL (op) & ~(HOST_WIDE_INT) 0xffffffff);
284 /* TLS symbols are not constant. */
285 if (SYMBOL_REF_TLS_MODEL (op))
288 /* Load the external function address via the GOT slot. */
289 if (ix86_force_load_from_GOT_p (op))
292 /* For certain code models, the symbolic references are known to fit. */
293 return (ix86_cmodel == CM_SMALL
294 || (ix86_cmodel == CM_MEDIUM
295 && !SYMBOL_REF_FAR_ADDR_P (op)));
298 /* For certain code models, the code is near as well. */
299 return ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM;
302 /* We also may accept the offsetted memory references in certain
304 if (GET_CODE (XEXP (op, 0)) == PLUS)
306 rtx op1 = XEXP (XEXP (op, 0), 0);
307 rtx op2 = XEXP (XEXP (op, 0), 1);
309 if (ix86_cmodel == CM_LARGE)
311 if (!CONST_INT_P (op2))
314 HOST_WIDE_INT offset = INTVAL (op2);
315 if (trunc_int_for_mode (offset, SImode) != offset)
318 switch (GET_CODE (op1))
321 /* TLS symbols are not constant. */
322 if (SYMBOL_REF_TLS_MODEL (op1))
325 /* Load the external function address via the GOT slot. */
326 if (ix86_force_load_from_GOT_p (op1))
329 /* For small code model we may accept pretty large positive
330 offsets, since one bit is available for free. Negative
331 offsets are limited by the size of NULL pointer area
332 specified by the ABI. */
333 if ((ix86_cmodel == CM_SMALL
334 || (ix86_cmodel == CM_MEDIUM
335 && !SYMBOL_REF_FAR_ADDR_P (op1)))
336 && offset > -0x10000)
338 /* ??? For the kernel, we may accept adjustment of
339 -0x10000000, since we know that it will just convert
340 negative address space to positive, but perhaps this
341 is not worthwhile. */
345 /* These conditions are similar to SYMBOL_REF ones, just the
346 constraints for code models differ. */
347 if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
348 && offset > -0x10000)
364 ;; Return true if VALUE is a constant integer whose low and high words satisfy
365 ;; x86_64_immediate_operand.
366 (define_predicate "x86_64_hilo_int_operand"
367 (match_code "const_int,const_wide_int")
369 switch (GET_CODE (op))
372 return x86_64_immediate_operand (op, mode);
375 gcc_assert (CONST_WIDE_INT_NUNITS (op) == 2);
376 return (x86_64_immediate_operand (GEN_INT (CONST_WIDE_INT_ELT (op, 0)),
378 && x86_64_immediate_operand (GEN_INT (CONST_WIDE_INT_ELT (op,
387 ;; Return true if size of VALUE can be stored in a sign
388 ;; extended immediate field.
389 (define_predicate "x86_64_immediate_size_operand"
390 (and (match_code "symbol_ref")
391 (ior (not (match_test "TARGET_64BIT"))
392 (match_test "ix86_cmodel == CM_SMALL")
393 (match_test "ix86_cmodel == CM_KERNEL"))))
395 ;; Return true if OP is general operand representable on x86_64.
396 (define_predicate "x86_64_general_operand"
397 (if_then_else (match_test "TARGET_64BIT")
398 (ior (match_operand 0 "nonimmediate_operand")
399 (match_operand 0 "x86_64_immediate_operand"))
400 (match_operand 0 "general_operand")))
402 ;; Return true if OP's both words are general operands representable
404 (define_predicate "x86_64_hilo_general_operand"
405 (if_then_else (match_test "TARGET_64BIT")
406 (ior (match_operand 0 "nonimmediate_operand")
407 (match_operand 0 "x86_64_hilo_int_operand"))
408 (match_operand 0 "general_operand")))
410 ;; Return true if OP is non-VOIDmode general operand representable
411 ;; on x86_64. This predicate is used in sign-extending conversion
412 ;; operations that require non-VOIDmode immediate operands.
413 (define_predicate "x86_64_sext_operand"
414 (and (match_test "GET_MODE (op) != VOIDmode")
415 (match_operand 0 "x86_64_general_operand")))
417 ;; Return true if OP is non-VOIDmode general operand. This predicate
418 ;; is used in sign-extending conversion operations that require
419 ;; non-VOIDmode immediate operands.
420 (define_predicate "sext_operand"
421 (and (match_test "GET_MODE (op) != VOIDmode")
422 (match_operand 0 "general_operand")))
424 ;; Return true if OP is representable on x86_64 as zero-extended operand.
425 ;; This predicate is used in zero-extending conversion operations that
426 ;; require non-VOIDmode immediate operands.
427 (define_predicate "x86_64_zext_operand"
428 (if_then_else (match_test "TARGET_64BIT")
429 (ior (match_operand 0 "nonimmediate_operand")
430 (and (match_operand 0 "x86_64_zext_immediate_operand")
431 (match_test "GET_MODE (op) != VOIDmode")))
432 (match_operand 0 "nonimmediate_operand")))
434 ;; Return true if OP is general operand representable on x86_64
435 ;; as either sign extended or zero extended constant.
436 (define_predicate "x86_64_szext_general_operand"
437 (if_then_else (match_test "TARGET_64BIT")
438 (ior (match_operand 0 "nonimmediate_operand")
439 (match_operand 0 "x86_64_immediate_operand")
440 (match_operand 0 "x86_64_zext_immediate_operand"))
441 (match_operand 0 "general_operand")))
443 ;; Return true if OP is nonmemory operand representable on x86_64.
444 (define_predicate "x86_64_nonmemory_operand"
445 (if_then_else (match_test "TARGET_64BIT")
446 (ior (match_operand 0 "register_operand")
447 (match_operand 0 "x86_64_immediate_operand"))
448 (match_operand 0 "nonmemory_operand")))
450 ;; Return true if OP is nonmemory operand representable on x86_64.
451 (define_predicate "x86_64_szext_nonmemory_operand"
452 (if_then_else (match_test "TARGET_64BIT")
453 (ior (match_operand 0 "register_operand")
454 (match_operand 0 "x86_64_immediate_operand")
455 (match_operand 0 "x86_64_zext_immediate_operand"))
456 (match_operand 0 "nonmemory_operand")))
458 ;; Return true when operand is PIC expression that can be computed by lea
460 (define_predicate "pic_32bit_operand"
461 (match_code "const,symbol_ref,label_ref")
466 /* Rule out relocations that translate into 64bit constants. */
467 if (TARGET_64BIT && GET_CODE (op) == CONST)
470 if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
472 if (GET_CODE (op) == UNSPEC
473 && (XINT (op, 1) == UNSPEC_GOTOFF
474 || XINT (op, 1) == UNSPEC_GOT))
478 return symbolic_operand (op, mode);
481 ;; Return true if OP is nonmemory operand acceptable by movabs patterns.
482 (define_predicate "x86_64_movabs_operand"
483 (and (match_operand 0 "nonmemory_operand")
484 (not (match_operand 0 "pic_32bit_operand"))))
486 ;; Return true if OP is either a symbol reference or a sum of a symbol
487 ;; reference and a constant.
488 (define_predicate "symbolic_operand"
489 (match_code "symbol_ref,label_ref,const")
491 switch (GET_CODE (op))
499 if (GET_CODE (op) == SYMBOL_REF
500 || GET_CODE (op) == LABEL_REF
501 || (GET_CODE (op) == UNSPEC
502 && (XINT (op, 1) == UNSPEC_GOT
503 || XINT (op, 1) == UNSPEC_GOTOFF
504 || XINT (op, 1) == UNSPEC_PCREL
505 || XINT (op, 1) == UNSPEC_GOTPCREL)))
507 if (GET_CODE (op) != PLUS
508 || !CONST_INT_P (XEXP (op, 1)))
512 if (GET_CODE (op) == SYMBOL_REF
513 || GET_CODE (op) == LABEL_REF)
515 /* Only @GOTOFF gets offsets. */
516 if (GET_CODE (op) != UNSPEC
517 || XINT (op, 1) != UNSPEC_GOTOFF)
520 op = XVECEXP (op, 0, 0);
521 if (GET_CODE (op) == SYMBOL_REF
522 || GET_CODE (op) == LABEL_REF)
531 ;; Return true if OP is a symbolic operand that resolves locally.
532 (define_predicate "local_symbolic_operand"
533 (match_code "const,label_ref,symbol_ref")
535 if (GET_CODE (op) == CONST
536 && GET_CODE (XEXP (op, 0)) == PLUS
537 && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
538 op = XEXP (XEXP (op, 0), 0);
540 if (GET_CODE (op) == LABEL_REF)
543 if (GET_CODE (op) != SYMBOL_REF)
546 if (SYMBOL_REF_TLS_MODEL (op))
549 /* Dll-imported symbols are always external. */
550 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES && SYMBOL_REF_DLLIMPORT_P (op))
552 if (SYMBOL_REF_LOCAL_P (op))
555 /* There is, however, a not insubstantial body of code in the rest of
556 the compiler that assumes it can just stick the results of
557 ASM_GENERATE_INTERNAL_LABEL in a symbol_ref and have done. */
558 /* ??? This is a hack. Should update the body of the compiler to
559 always create a DECL an invoke targetm.encode_section_info. */
560 if (strncmp (XSTR (op, 0), internal_label_prefix,
561 internal_label_prefix_len) == 0)
567 ;; Test for a legitimate @GOTOFF operand.
569 ;; VxWorks does not impose a fixed gap between segments; the run-time
570 ;; gap can be different from the object-file gap. We therefore can't
571 ;; use @GOTOFF unless we are absolutely sure that the symbol is in the
572 ;; same segment as the GOT. Unfortunately, the flexibility of linker
573 ;; scripts means that we can't be sure of that in general, so assume
574 ;; that @GOTOFF is never valid on VxWorks.
575 (define_predicate "gotoff_operand"
576 (and (not (match_test "TARGET_VXWORKS_RTP"))
577 (match_operand 0 "local_symbolic_operand")))
579 ;; Test for various thread-local symbols.
580 (define_special_predicate "tls_symbolic_operand"
581 (and (match_code "symbol_ref")
582 (match_test "SYMBOL_REF_TLS_MODEL (op)")))
584 (define_special_predicate "tls_modbase_operand"
585 (and (match_code "symbol_ref")
586 (match_test "op == ix86_tls_module_base ()")))
588 ;; Test for a pc-relative call operand
589 (define_predicate "constant_call_address_operand"
590 (match_code "symbol_ref")
592 if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
594 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES && SYMBOL_REF_DLLIMPORT_P (op))
599 ;; P6 processors will jump to the address after the decrement when %esp
600 ;; is used as a call operand, so they will execute return address as a code.
601 ;; See Pentium Pro errata 70, Pentium 2 errata A33 and Pentium 3 errata E17.
603 (define_predicate "call_register_no_elim_operand"
604 (match_operand 0 "register_operand")
607 op = SUBREG_REG (op);
609 if (!TARGET_64BIT && op == stack_pointer_rtx)
612 return register_no_elim_operand (op, mode);
615 ;; True for any non-virtual or eliminable register. Used in places where
616 ;; instantiation of such a register may cause the pattern to not be recognized.
617 (define_predicate "register_no_elim_operand"
618 (match_operand 0 "register_operand")
621 op = SUBREG_REG (op);
622 return !(op == arg_pointer_rtx
623 || op == frame_pointer_rtx
624 || IN_RANGE (REGNO (op),
625 FIRST_PSEUDO_REGISTER, LAST_VIRTUAL_REGISTER));
628 ;; Similarly, but include the stack pointer. This is used to prevent esp
629 ;; from being used as an index reg.
630 (define_predicate "index_register_operand"
631 (match_operand 0 "register_operand")
634 op = SUBREG_REG (op);
635 if (reload_completed)
636 return REG_OK_FOR_INDEX_STRICT_P (op);
638 return REG_OK_FOR_INDEX_NONSTRICT_P (op);
641 ;; Return false if this is any eliminable register. Otherwise general_operand.
642 (define_predicate "general_no_elim_operand"
643 (if_then_else (match_code "reg,subreg")
644 (match_operand 0 "register_no_elim_operand")
645 (match_operand 0 "general_operand")))
647 ;; Return false if this is any eliminable register. Otherwise
648 ;; register_operand or a constant.
649 (define_predicate "nonmemory_no_elim_operand"
650 (ior (match_operand 0 "register_no_elim_operand")
651 (match_operand 0 "immediate_operand")))
653 ;; Test for a valid operand for indirect branch.
654 (define_predicate "indirect_branch_operand"
655 (ior (match_operand 0 "register_operand")
656 (and (not (match_test "TARGET_X32"))
657 (match_operand 0 "memory_operand"))))
659 ;; Return true if OP is a memory operands that can be used in sibcalls.
660 ;; Since sibcall never returns, we can only use call-clobbered register
661 ;; as GOT base. Allow GOT slot here only with pseudo register as GOT
662 ;; base. Properly handle sibcall over GOT slot with *sibcall_GOT_32
663 ;; and *sibcall_value_GOT_32 patterns.
664 (define_predicate "sibcall_memory_operand"
665 (match_operand 0 "memory_operand")
670 if (GET_CODE (op) == PLUS && REG_P (XEXP (op, 0)))
672 int regno = REGNO (XEXP (op, 0));
673 if (!HARD_REGISTER_NUM_P (regno) || call_used_regs[regno])
676 if (GOT32_symbol_operand (op, VOIDmode))
683 ;; Return true if OP is a GOT memory operand.
684 (define_predicate "GOT_memory_operand"
685 (match_operand 0 "memory_operand")
688 return (GET_CODE (op) == CONST
689 && GET_CODE (XEXP (op, 0)) == UNSPEC
690 && XINT (XEXP (op, 0), 1) == UNSPEC_GOTPCREL);
693 ;; Test for a valid operand for a call instruction.
694 ;; Allow constant call address operands in Pmode only.
695 (define_special_predicate "call_insn_operand"
696 (ior (match_test "constant_call_address_operand
697 (op, mode == VOIDmode ? mode : Pmode)")
698 (match_operand 0 "call_register_no_elim_operand")
699 (ior (and (not (match_test "TARGET_X32"))
700 (match_operand 0 "memory_operand"))
701 (and (match_test "TARGET_X32 && Pmode == DImode")
702 (match_operand 0 "GOT_memory_operand")))))
704 ;; Similarly, but for tail calls, in which we cannot allow memory references.
705 (define_special_predicate "sibcall_insn_operand"
706 (ior (match_test "constant_call_address_operand
707 (op, mode == VOIDmode ? mode : Pmode)")
708 (match_operand 0 "register_no_elim_operand")
709 (ior (and (not (match_test "TARGET_X32"))
710 (match_operand 0 "sibcall_memory_operand"))
711 (and (match_test "TARGET_X32 && Pmode == DImode")
712 (match_operand 0 "GOT_memory_operand")))))
714 ;; Return true if OP is a 32-bit GOT symbol operand.
715 (define_predicate "GOT32_symbol_operand"
716 (match_test "GET_CODE (op) == CONST
717 && GET_CODE (XEXP (op, 0)) == UNSPEC
718 && XINT (XEXP (op, 0), 1) == UNSPEC_GOT"))
720 ;; Match exactly zero.
721 (define_predicate "const0_operand"
722 (match_code "const_int,const_double,const_vector")
724 if (mode == VOIDmode)
725 mode = GET_MODE (op);
726 return op == CONST0_RTX (mode);
729 ;; Match one or a vector with all elements equal to one.
730 (define_predicate "const1_operand"
731 (match_code "const_int,const_double,const_vector")
733 if (mode == VOIDmode)
734 mode = GET_MODE (op);
735 return op == CONST1_RTX (mode);
739 (define_predicate "constm1_operand"
740 (and (match_code "const_int")
741 (match_test "op == constm1_rtx")))
743 ;; Match exactly eight.
744 (define_predicate "const8_operand"
745 (and (match_code "const_int")
746 (match_test "INTVAL (op) == 8")))
748 ;; Match exactly 128.
749 (define_predicate "const128_operand"
750 (and (match_code "const_int")
751 (match_test "INTVAL (op) == 128")))
753 ;; Match exactly 0x0FFFFFFFF in anddi as a zero-extension operation
754 (define_predicate "const_32bit_mask"
755 (and (match_code "const_int")
756 (match_test "trunc_int_for_mode (INTVAL (op), DImode)
757 == (HOST_WIDE_INT) 0xffffffff")))
759 ;; Match 2, 4, or 8. Used for leal multiplicands.
760 (define_predicate "const248_operand"
761 (match_code "const_int")
763 HOST_WIDE_INT i = INTVAL (op);
764 return i == 2 || i == 4 || i == 8;
767 ;; Match 2, 3, 6, or 7
768 (define_predicate "const2367_operand"
769 (match_code "const_int")
771 HOST_WIDE_INT i = INTVAL (op);
772 return i == 2 || i == 3 || i == 6 || i == 7;
775 ;; Match 1, 2, 4, or 8
776 (define_predicate "const1248_operand"
777 (match_code "const_int")
779 HOST_WIDE_INT i = INTVAL (op);
780 return i == 1 || i == 2 || i == 4 || i == 8;
783 ;; Match 3, 5, or 9. Used for leal multiplicands.
784 (define_predicate "const359_operand"
785 (match_code "const_int")
787 HOST_WIDE_INT i = INTVAL (op);
788 return i == 3 || i == 5 || i == 9;
791 ;; Match 4 or 8 to 11. Used for embeded rounding.
792 (define_predicate "const_4_or_8_to_11_operand"
793 (match_code "const_int")
795 HOST_WIDE_INT i = INTVAL (op);
796 return i == 4 || (i >= 8 && i <= 11);
799 ;; Match 4 or 8. Used for SAE.
800 (define_predicate "const48_operand"
801 (match_code "const_int")
803 HOST_WIDE_INT i = INTVAL (op);
804 return i == 4 || i == 8;
808 (define_predicate "const_0_to_1_operand"
809 (and (match_code "const_int")
810 (ior (match_test "op == const0_rtx")
811 (match_test "op == const1_rtx"))))
814 (define_predicate "const_0_to_3_operand"
815 (and (match_code "const_int")
816 (match_test "IN_RANGE (INTVAL (op), 0, 3)")))
819 (define_predicate "const_0_to_4_operand"
820 (and (match_code "const_int")
821 (match_test "IN_RANGE (INTVAL (op), 0, 4)")))
824 (define_predicate "const_0_to_5_operand"
825 (and (match_code "const_int")
826 (match_test "IN_RANGE (INTVAL (op), 0, 5)")))
829 (define_predicate "const_0_to_7_operand"
830 (and (match_code "const_int")
831 (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
834 (define_predicate "const_0_to_15_operand"
835 (and (match_code "const_int")
836 (match_test "IN_RANGE (INTVAL (op), 0, 15)")))
839 (define_predicate "const_0_to_31_operand"
840 (and (match_code "const_int")
841 (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
844 (define_predicate "const_0_to_63_operand"
845 (and (match_code "const_int")
846 (match_test "IN_RANGE (INTVAL (op), 0, 63)")))
849 (define_predicate "const_0_to_255_operand"
850 (and (match_code "const_int")
851 (match_test "IN_RANGE (INTVAL (op), 0, 255)")))
853 ;; Match (0 to 255) * 8
854 (define_predicate "const_0_to_255_mul_8_operand"
855 (match_code "const_int")
857 unsigned HOST_WIDE_INT val = INTVAL (op);
858 return val <= 255*8 && val % 8 == 0;
861 ;; Return true if OP is CONST_INT >= 1 and <= 31 (a valid operand
862 ;; for shift & compare patterns, as shifting by 0 does not change flags).
863 (define_predicate "const_1_to_31_operand"
864 (and (match_code "const_int")
865 (match_test "IN_RANGE (INTVAL (op), 1, 31)")))
867 ;; Return true if OP is CONST_INT >= 1 and <= 63 (a valid operand
868 ;; for 64bit shift & compare patterns, as shifting by 0 does not change flags).
869 (define_predicate "const_1_to_63_operand"
870 (and (match_code "const_int")
871 (match_test "IN_RANGE (INTVAL (op), 1, 63)")))
874 (define_predicate "const_2_to_3_operand"
875 (and (match_code "const_int")
876 (match_test "IN_RANGE (INTVAL (op), 2, 3)")))
879 (define_predicate "const_4_to_5_operand"
880 (and (match_code "const_int")
881 (match_test "IN_RANGE (INTVAL (op), 4, 5)")))
884 (define_predicate "const_4_to_7_operand"
885 (and (match_code "const_int")
886 (match_test "IN_RANGE (INTVAL (op), 4, 7)")))
889 (define_predicate "const_6_to_7_operand"
890 (and (match_code "const_int")
891 (match_test "IN_RANGE (INTVAL (op), 6, 7)")))
894 (define_predicate "const_8_to_9_operand"
895 (and (match_code "const_int")
896 (match_test "IN_RANGE (INTVAL (op), 8, 9)")))
899 (define_predicate "const_8_to_11_operand"
900 (and (match_code "const_int")
901 (match_test "IN_RANGE (INTVAL (op), 8, 11)")))
904 (define_predicate "const_8_to_15_operand"
905 (and (match_code "const_int")
906 (match_test "IN_RANGE (INTVAL (op), 8, 15)")))
909 (define_predicate "const_10_to_11_operand"
910 (and (match_code "const_int")
911 (match_test "IN_RANGE (INTVAL (op), 10, 11)")))
914 (define_predicate "const_12_to_13_operand"
915 (and (match_code "const_int")
916 (match_test "IN_RANGE (INTVAL (op), 12, 13)")))
919 (define_predicate "const_12_to_15_operand"
920 (and (match_code "const_int")
921 (match_test "IN_RANGE (INTVAL (op), 12, 15)")))
924 (define_predicate "const_14_to_15_operand"
925 (and (match_code "const_int")
926 (match_test "IN_RANGE (INTVAL (op), 14, 15)")))
929 (define_predicate "const_16_to_19_operand"
930 (and (match_code "const_int")
931 (match_test "IN_RANGE (INTVAL (op), 16, 19)")))
934 (define_predicate "const_16_to_31_operand"
935 (and (match_code "const_int")
936 (match_test "IN_RANGE (INTVAL (op), 16, 31)")))
939 (define_predicate "const_20_to_23_operand"
940 (and (match_code "const_int")
941 (match_test "IN_RANGE (INTVAL (op), 20, 23)")))
944 (define_predicate "const_24_to_27_operand"
945 (and (match_code "const_int")
946 (match_test "IN_RANGE (INTVAL (op), 24, 27)")))
949 (define_predicate "const_28_to_31_operand"
950 (and (match_code "const_int")
951 (match_test "IN_RANGE (INTVAL (op), 28, 31)")))
953 ;; True if this is a constant appropriate for an increment or decrement.
954 (define_predicate "incdec_operand"
955 (match_code "const_int")
957 /* On Pentium4, the inc and dec operations causes extra dependency on flag
958 registers, since carry flag is not set. */
959 if (!TARGET_USE_INCDEC && !optimize_insn_for_size_p ())
961 return op == const1_rtx || op == constm1_rtx;
964 ;; True for registers, or 1 or -1. Used to optimize double-word shifts.
965 (define_predicate "reg_or_pm1_operand"
966 (ior (match_operand 0 "register_operand")
967 (and (match_code "const_int")
968 (ior (match_test "op == const1_rtx")
969 (match_test "op == constm1_rtx")))))
971 ;; True if OP is acceptable as operand of DImode shift expander.
972 (define_predicate "shiftdi_operand"
973 (if_then_else (match_test "TARGET_64BIT")
974 (match_operand 0 "nonimmediate_operand")
975 (match_operand 0 "register_operand")))
977 (define_predicate "ashldi_input_operand"
978 (if_then_else (match_test "TARGET_64BIT")
979 (match_operand 0 "nonimmediate_operand")
980 (match_operand 0 "reg_or_pm1_operand")))
982 ;; Return true if OP is a vector load from the constant pool with just
983 ;; the first element nonzero.
984 (define_predicate "zero_extended_scalar_load_operand"
988 op = maybe_get_pool_constant (op);
990 if (!(op && GET_CODE (op) == CONST_VECTOR))
993 n_elts = CONST_VECTOR_NUNITS (op);
995 for (n_elts--; n_elts > 0; n_elts--)
997 rtx elt = CONST_VECTOR_ELT (op, n_elts);
998 if (elt != CONST0_RTX (GET_MODE_INNER (GET_MODE (op))))
1004 /* Return true if operand is a vector constant that is all ones. */
1005 (define_predicate "vector_all_ones_operand"
1006 (and (match_code "const_vector")
1007 (match_test "INTEGRAL_MODE_P (GET_MODE (op))")
1008 (match_test "op == CONSTM1_RTX (GET_MODE (op))")))
1010 ; Return true when OP is operand acceptable for vector memory operand.
1011 ; Only AVX can have misaligned memory operand.
1012 (define_predicate "vector_memory_operand"
1013 (and (match_operand 0 "memory_operand")
1014 (ior (match_test "TARGET_AVX")
1015 (match_test "MEM_ALIGN (op) >= GET_MODE_ALIGNMENT (mode)"))))
1017 ; Return true when OP is register_operand or vector_memory_operand.
1018 (define_predicate "vector_operand"
1019 (ior (match_operand 0 "register_operand")
1020 (match_operand 0 "vector_memory_operand")))
1022 ; Return true when OP is operand acceptable for standard SSE move.
1023 (define_predicate "vector_move_operand"
1024 (ior (match_operand 0 "nonimmediate_operand")
1025 (match_operand 0 "const0_operand")))
1027 ;; Return true when OP is either nonimmediate operand, or any
1029 (define_predicate "nonimmediate_or_const_vector_operand"
1030 (ior (match_operand 0 "nonimmediate_operand")
1031 (match_code "const_vector")))
1033 ;; Return true when OP is nonimmediate or standard SSE constant.
1034 (define_predicate "nonimmediate_or_sse_const_operand"
1035 (ior (match_operand 0 "nonimmediate_operand")
1036 (match_test "standard_sse_constant_p (op, mode)")))
1038 ;; Return true if OP is a register or a zero.
1039 (define_predicate "reg_or_0_operand"
1040 (ior (match_operand 0 "register_operand")
1041 (match_operand 0 "const0_operand")))
1043 ;; Return true for RTX codes that force SImode address.
1044 (define_predicate "SImode_address_operand"
1045 (match_code "subreg,zero_extend,and"))
1047 ;; Return true if op if a valid address for LEA, and does not contain
1048 ;; a segment override. Defined as a special predicate to allow
1049 ;; mode-less const_int operands pass to address_operand.
1050 (define_special_predicate "address_no_seg_operand"
1051 (match_test "address_operand (op, VOIDmode)")
1053 struct ix86_address parts;
1056 if (!CONST_INT_P (op)
1058 && GET_MODE (op) != mode)
1061 ok = ix86_decompose_address (op, &parts);
1063 return parts.seg == ADDR_SPACE_GENERIC;
1066 ;; Return true if op if a valid base register, displacement or
1067 ;; sum of base register and displacement for VSIB addressing.
1068 (define_predicate "vsib_address_operand"
1069 (match_test "address_operand (op, VOIDmode)")
1071 struct ix86_address parts;
1075 ok = ix86_decompose_address (op, &parts);
1077 if (parts.index || parts.seg != ADDR_SPACE_GENERIC)
1080 /* VSIB addressing doesn't support (%rip). */
1084 if (GET_CODE (disp) == CONST)
1086 disp = XEXP (disp, 0);
1087 if (GET_CODE (disp) == PLUS)
1088 disp = XEXP (disp, 0);
1089 if (GET_CODE (disp) == UNSPEC)
1090 switch (XINT (disp, 1))
1092 case UNSPEC_GOTPCREL:
1094 case UNSPEC_GOTNTPOFF:
1100 && (GET_CODE (disp) == SYMBOL_REF
1101 || GET_CODE (disp) == LABEL_REF))
1108 ;; Return true if op is valid MPX address operand without base
1109 (define_predicate "address_mpx_no_base_operand"
1110 (match_test "address_operand (op, VOIDmode)")
1112 struct ix86_address parts;
1115 ok = ix86_decompose_address (op, &parts);
1118 if (parts.index && parts.base)
1121 if (parts.seg != ADDR_SPACE_GENERIC)
1124 /* Do not support (%rip). */
1125 if (parts.disp && flag_pic && TARGET_64BIT
1126 && SYMBOLIC_CONST (parts.disp))
1128 if (GET_CODE (parts.disp) != CONST
1129 || GET_CODE (XEXP (parts.disp, 0)) != PLUS
1130 || GET_CODE (XEXP (XEXP (parts.disp, 0), 0)) != UNSPEC
1131 || !CONST_INT_P (XEXP (XEXP (parts.disp, 0), 1))
1132 || (XINT (XEXP (XEXP (parts.disp, 0), 0), 1) != UNSPEC_DTPOFF
1133 && XINT (XEXP (XEXP (parts.disp, 0), 0), 1) != UNSPEC_NTPOFF))
1140 ;; Return true if op is valid MPX address operand without index
1141 (define_predicate "address_mpx_no_index_operand"
1142 (match_test "address_operand (op, VOIDmode)")
1144 struct ix86_address parts;
1147 ok = ix86_decompose_address (op, &parts);
1153 if (parts.seg != ADDR_SPACE_GENERIC)
1156 /* Do not support (%rip). */
1157 if (parts.disp && flag_pic && TARGET_64BIT
1158 && SYMBOLIC_CONST (parts.disp)
1159 && (GET_CODE (parts.disp) != CONST
1160 || GET_CODE (XEXP (parts.disp, 0)) != PLUS
1161 || GET_CODE (XEXP (XEXP (parts.disp, 0), 0)) != UNSPEC
1162 || !CONST_INT_P (XEXP (XEXP (parts.disp, 0), 1))
1163 || (XINT (XEXP (XEXP (parts.disp, 0), 0), 1) != UNSPEC_DTPOFF
1164 && XINT (XEXP (XEXP (parts.disp, 0), 0), 1) != UNSPEC_NTPOFF)))
1170 (define_predicate "vsib_mem_operator"
1173 (define_predicate "bnd_mem_operator"
1176 ;; Return true if the rtx is known to be at least 32 bits aligned.
1177 (define_predicate "aligned_operand"
1178 (match_operand 0 "general_operand")
1180 struct ix86_address parts;
1183 /* Registers and immediate operands are always "aligned". */
1187 /* All patterns using aligned_operand on memory operands ends up
1188 in promoting memory operand to 64bit and thus causing memory mismatch. */
1189 if (TARGET_MEMORY_MISMATCH_STALL && !optimize_insn_for_size_p ())
1192 /* Don't even try to do any aligned optimizations with volatiles. */
1193 if (MEM_VOLATILE_P (op))
1196 if (MEM_ALIGN (op) >= 32)
1201 /* Pushes and pops are only valid on the stack pointer. */
1202 if (GET_CODE (op) == PRE_DEC
1203 || GET_CODE (op) == POST_INC)
1206 /* Decode the address. */
1207 ok = ix86_decompose_address (op, &parts);
1210 if (parts.base && SUBREG_P (parts.base))
1211 parts.base = SUBREG_REG (parts.base);
1212 if (parts.index && SUBREG_P (parts.index))
1213 parts.index = SUBREG_REG (parts.index);
1215 /* Look for some component that isn't known to be aligned. */
1218 if (REGNO_POINTER_ALIGN (REGNO (parts.index)) * parts.scale < 32)
1223 if (REGNO_POINTER_ALIGN (REGNO (parts.base)) < 32)
1228 if (!CONST_INT_P (parts.disp)
1229 || (INTVAL (parts.disp) & 3))
1233 /* Didn't find one -- this must be an aligned address. */
1237 ;; Return true if OP is memory operand with a displacement.
1238 (define_predicate "memory_displacement_operand"
1239 (match_operand 0 "memory_operand")
1241 struct ix86_address parts;
1244 ok = ix86_decompose_address (XEXP (op, 0), &parts);
1246 return parts.disp != NULL_RTX;
1249 ;; Return true if OP is memory operand with a displacement only.
1250 (define_predicate "memory_displacement_only_operand"
1251 (match_operand 0 "memory_operand")
1253 struct ix86_address parts;
1259 ok = ix86_decompose_address (XEXP (op, 0), &parts);
1262 if (parts.base || parts.index)
1265 return parts.disp != NULL_RTX;
1268 ;; Return true if OP is memory operand that cannot be represented
1269 ;; by the modRM array.
1270 (define_predicate "long_memory_operand"
1271 (and (match_operand 0 "memory_operand")
1272 (match_test "memory_address_length (op, false)")))
1274 ;; Return true if OP is a comparison operator that can be issued by fcmov.
1275 (define_predicate "fcmov_comparison_operator"
1276 (match_operand 0 "comparison_operator")
1278 machine_mode inmode = GET_MODE (XEXP (op, 0));
1279 enum rtx_code code = GET_CODE (op);
1281 if (inmode == CCFPmode || inmode == CCFPUmode)
1283 if (!ix86_trivial_fp_comparison_operator (op, mode))
1285 code = ix86_fp_compare_code_to_integer (code);
1287 /* i387 supports just limited amount of conditional codes. */
1290 case LTU: case GTU: case LEU: case GEU:
1291 if (inmode == CCmode || inmode == CCFPmode || inmode == CCFPUmode
1292 || inmode == CCCmode)
1295 case ORDERED: case UNORDERED:
1303 ;; Return true if OP is a comparison that can be used in the CMPSS/CMPPS insns.
1304 ;; The first set are supported directly; the second set can't be done with
1305 ;; full IEEE support, i.e. NaNs.
1307 (define_predicate "sse_comparison_operator"
1308 (ior (match_code "eq,ne,lt,le,unordered,unge,ungt,ordered")
1309 (and (match_test "TARGET_AVX")
1310 (match_code "ge,gt,uneq,unle,unlt,ltgt"))))
1312 (define_predicate "ix86_comparison_int_operator"
1313 (match_code "ne,eq,ge,gt,le,lt"))
1315 (define_predicate "ix86_comparison_uns_operator"
1316 (match_code "ne,eq,geu,gtu,leu,ltu"))
1318 (define_predicate "bt_comparison_operator"
1319 (match_code "ne,eq"))
1321 ;; Return true if OP is a valid comparison operator in valid mode.
1322 (define_predicate "ix86_comparison_operator"
1323 (match_operand 0 "comparison_operator")
1325 machine_mode inmode = GET_MODE (XEXP (op, 0));
1326 enum rtx_code code = GET_CODE (op);
1328 if (inmode == CCFPmode || inmode == CCFPUmode)
1329 return ix86_trivial_fp_comparison_operator (op, mode);
1336 if (inmode == CCmode || inmode == CCGCmode
1337 || inmode == CCGOCmode || inmode == CCNOmode)
1340 case LTU: case GTU: case LEU: case GEU:
1341 if (inmode == CCmode || inmode == CCCmode)
1344 case ORDERED: case UNORDERED:
1345 if (inmode == CCmode)
1349 if (inmode == CCmode || inmode == CCGCmode || inmode == CCNOmode)
1357 ;; Return true if OP is a valid comparison operator
1358 ;; testing carry flag to be set.
1359 (define_predicate "ix86_carry_flag_operator"
1360 (match_code "ltu,lt,unlt,gtu,gt,ungt,le,unle,ge,unge,ltgt,uneq")
1362 machine_mode inmode = GET_MODE (XEXP (op, 0));
1363 enum rtx_code code = GET_CODE (op);
1365 if (inmode == CCFPmode || inmode == CCFPUmode)
1367 if (!ix86_trivial_fp_comparison_operator (op, mode))
1369 code = ix86_fp_compare_code_to_integer (code);
1371 else if (inmode == CCCmode)
1372 return code == LTU || code == GTU;
1373 else if (inmode != CCmode)
1379 ;; Return true if this comparison only requires testing one flag bit.
1380 (define_predicate "ix86_trivial_fp_comparison_operator"
1381 (match_code "gt,ge,unlt,unle,uneq,ltgt,ordered,unordered"))
1383 ;; Return true if we know how to do this comparison. Others require
1384 ;; testing more than one flag bit, and we let the generic middle-end
1386 (define_predicate "ix86_fp_comparison_operator"
1387 (if_then_else (match_test "ix86_fp_comparison_strategy (GET_CODE (op))
1388 == IX86_FPCMP_ARITH")
1389 (match_operand 0 "comparison_operator")
1390 (match_operand 0 "ix86_trivial_fp_comparison_operator")))
1392 ;; Same as above, but for swapped comparison used in *jcc<fp>_<int>_i387.
1393 (define_predicate "ix86_swapped_fp_comparison_operator"
1394 (match_operand 0 "comparison_operator")
1396 enum rtx_code code = GET_CODE (op);
1399 PUT_CODE (op, swap_condition (code));
1400 ret = ix86_fp_comparison_operator (op, mode);
1401 PUT_CODE (op, code);
1405 ;; Nearly general operand, but accept any const_double, since we wish
1406 ;; to be able to drop them into memory rather than have them get pulled
1408 (define_predicate "cmp_fp_expander_operand"
1409 (ior (match_code "const_double")
1410 (match_operand 0 "general_operand")))
1412 ;; Return true if this is a valid binary floating-point operation.
1413 (define_predicate "binary_fp_operator"
1414 (match_code "plus,minus,mult,div"))
1416 ;; Return true if this is a multiply operation.
1417 (define_predicate "mult_operator"
1418 (match_code "mult"))
1420 ;; Return true if this is a division operation.
1421 (define_predicate "div_operator"
1424 ;; Return true if this is a plus, minus, and, ior or xor operation.
1425 (define_predicate "plusminuslogic_operator"
1426 (match_code "plus,minus,and,ior,xor"))
1428 ;; Return true if this is a float extend operation.
1429 (define_predicate "float_operator"
1430 (match_code "float"))
1432 ;; Return true for ARITHMETIC_P.
1433 (define_predicate "arith_or_logical_operator"
1434 (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax,compare,minus,div,
1435 mod,udiv,umod,ashift,rotate,ashiftrt,lshiftrt,rotatert"))
1437 ;; Return true for COMMUTATIVE_P.
1438 (define_predicate "commutative_operator"
1439 (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax"))
1441 ;; Return true if OP is a binary operator that can be promoted to wider mode.
1442 (define_predicate "promotable_binary_operator"
1443 (ior (match_code "plus,minus,and,ior,xor,ashift")
1444 (and (match_code "mult")
1445 (match_test "TARGET_TUNE_PROMOTE_HIMODE_IMUL"))))
1447 (define_predicate "compare_operator"
1448 (match_code "compare"))
1450 (define_predicate "absneg_operator"
1451 (match_code "abs,neg"))
1453 ;; Return true if OP is a memory operand, aligned to
1454 ;; less than its natural alignment.
1455 (define_predicate "misaligned_operand"
1456 (and (match_code "mem")
1457 (match_test "MEM_ALIGN (op) < GET_MODE_BITSIZE (mode)")))
1459 ;; Return true if OP is a emms operation, known to be a PARALLEL.
1460 (define_predicate "emms_operation"
1461 (match_code "parallel")
1465 if (XVECLEN (op, 0) != 17)
1468 for (i = 0; i < 8; i++)
1470 rtx elt = XVECEXP (op, 0, i+1);
1472 if (GET_CODE (elt) != CLOBBER
1473 || GET_CODE (SET_DEST (elt)) != REG
1474 || GET_MODE (SET_DEST (elt)) != XFmode
1475 || REGNO (SET_DEST (elt)) != FIRST_STACK_REG + i)
1478 elt = XVECEXP (op, 0, i+9);
1480 if (GET_CODE (elt) != CLOBBER
1481 || GET_CODE (SET_DEST (elt)) != REG
1482 || GET_MODE (SET_DEST (elt)) != DImode
1483 || REGNO (SET_DEST (elt)) != FIRST_MMX_REG + i)
1489 ;; Return true if OP is a vzeroall operation, known to be a PARALLEL.
1490 (define_predicate "vzeroall_operation"
1491 (match_code "parallel")
1493 unsigned i, nregs = TARGET_64BIT ? 16 : 8;
1495 if ((unsigned) XVECLEN (op, 0) != 1 + nregs)
1498 for (i = 0; i < nregs; i++)
1500 rtx elt = XVECEXP (op, 0, i+1);
1502 if (GET_CODE (elt) != SET
1503 || GET_CODE (SET_DEST (elt)) != REG
1504 || GET_MODE (SET_DEST (elt)) != V8SImode
1505 || REGNO (SET_DEST (elt)) != SSE_REGNO (i)
1506 || SET_SRC (elt) != CONST0_RTX (V8SImode))
1512 ;; return true if OP is a vzeroupper operation.
1513 (define_predicate "vzeroupper_operation"
1514 (and (match_code "unspec_volatile")
1515 (match_test "XINT (op, 1) == UNSPECV_VZEROUPPER")))
1517 ;; Return true if OP is an addsub vec_merge operation
1518 (define_predicate "addsub_vm_operator"
1519 (match_code "vec_merge")
1530 if (GET_CODE (op0) == MINUS && GET_CODE (op1) == PLUS)
1532 else if (GET_CODE (op0) == PLUS && GET_CODE (op1) == MINUS)
1537 mask = INTVAL (XEXP (op, 2));
1538 nunits = GET_MODE_NUNITS (mode);
1540 for (elt = 0; elt < nunits; elt++)
1542 /* bit clear: take from op0, set: take from op1 */
1543 int bit = !(mask & (HOST_WIDE_INT_1U << elt));
1545 if (bit != ((elt & 1) ^ swapped))
1552 ;; Return true if OP is an addsub vec_select/vec_concat operation
1553 (define_predicate "addsub_vs_operator"
1554 (and (match_code "vec_select")
1555 (match_code "vec_concat" "0"))
1561 op0 = XEXP (XEXP (op, 0), 0);
1562 op1 = XEXP (XEXP (op, 0), 1);
1565 if (GET_CODE (op0) == MINUS && GET_CODE (op1) == PLUS)
1567 else if (GET_CODE (op0) == PLUS && GET_CODE (op1) == MINUS)
1572 nunits = GET_MODE_NUNITS (mode);
1573 if (XVECLEN (XEXP (op, 1), 0) != nunits)
1576 /* We already checked that permutation is suitable for addsub,
1577 so only look at the first element of the parallel. */
1578 elt = INTVAL (XVECEXP (XEXP (op, 1), 0, 0));
1580 return elt == (swapped ? nunits : 0);
1583 ;; Return true if OP is a parallel for an addsub vec_select.
1584 (define_predicate "addsub_vs_parallel"
1585 (and (match_code "parallel")
1586 (match_code "const_int" "a"))
1588 int nelt = XVECLEN (op, 0);
1594 /* Check that the permutation is suitable for addsub.
1595 For example, { 0 9 2 11 4 13 6 15 } or { 8 1 10 3 12 5 14 7 }. */
1596 elt = INTVAL (XVECEXP (op, 0, 0));
1599 for (i = 1; i < nelt; ++i)
1600 if (INTVAL (XVECEXP (op, 0, i)) != (i + (i & 1) * nelt))
1603 else if (elt == nelt)
1605 for (i = 1; i < nelt; ++i)
1606 if (INTVAL (XVECEXP (op, 0, i)) != (elt + i - (i & 1) * nelt))
1615 ;; Return true if OP is a parallel for a vbroadcast permute.
1616 (define_predicate "avx_vbroadcast_operand"
1617 (and (match_code "parallel")
1618 (match_code "const_int" "a"))
1620 rtx elt = XVECEXP (op, 0, 0);
1621 int i, nelt = XVECLEN (op, 0);
1623 /* Don't bother checking there are the right number of operands,
1624 merely that they're all identical. */
1625 for (i = 1; i < nelt; ++i)
1626 if (XVECEXP (op, 0, i) != elt)
1631 ;; Return true if OP is a parallel for a palignr permute.
1632 (define_predicate "palignr_operand"
1633 (and (match_code "parallel")
1634 (match_code "const_int" "a"))
1636 int elt = INTVAL (XVECEXP (op, 0, 0));
1637 int i, nelt = XVECLEN (op, 0);
1639 /* Check that an order in the permutation is suitable for palignr.
1640 For example, {5 6 7 0 1 2 3 4} is "palignr 5, xmm, xmm". */
1641 for (i = 1; i < nelt; ++i)
1642 if (INTVAL (XVECEXP (op, 0, i)) != ((elt + i) % nelt))
1647 ;; Return true if OP is a proper third operand to vpblendw256.
1648 (define_predicate "avx2_pblendw_operand"
1649 (match_code "const_int")
1651 HOST_WIDE_INT val = INTVAL (op);
1652 HOST_WIDE_INT low = val & 0xff;
1653 return val == ((low << 8) | low);
1656 ;; Return true if OP is vector_operand or CONST_VECTOR.
1657 (define_predicate "general_vector_operand"
1658 (ior (match_operand 0 "vector_operand")
1659 (match_code "const_vector")))
1661 ;; Return true if OP is either -1 constant or stored in register.
1662 (define_predicate "register_or_constm1_operand"
1663 (ior (match_operand 0 "register_operand")
1664 (and (match_code "const_int")
1665 (match_test "op == constm1_rtx"))))