1 ;;- Instruction patterns for the System z vector facility
2 ;; Copyright (C) 2015-2018 Free Software Foundation, Inc.
3 ;; Contributed by Andreas Krebbel (Andreas.Krebbel@de.ibm.com)
5 ;; This file is part of GCC.
7 ;; GCC is free software; you can redistribute it and/or modify it under
8 ;; the terms of the GNU General Public License as published by the Free
9 ;; Software Foundation; either version 3, or (at your option) any later
12 ;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 ;; WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 ;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING3. If not see
19 ;; <http://www.gnu.org/licenses/>.
21 ; All vector modes supported in a vector register
22 (define_mode_iterator V
23 [V1QI V2QI V4QI V8QI V16QI V1HI V2HI V4HI V8HI V1SI V2SI V4SI V1DI V2DI V1SF
25 (define_mode_iterator VT
26 [V1QI V2QI V4QI V8QI V16QI V1HI V2HI V4HI V8HI V1SI V2SI V4SI V1DI V2DI V1SF
27 V2SF V4SF V1DF V2DF V1TF V1TI TI])
29 ; All modes directly supported by the hardware having full vector reg size
30 ; V_HW2 is duplicate of V_HW for having two iterators expanding
31 ; independently e.g. vcond
32 (define_mode_iterator V_HW [V16QI V8HI V4SI V2DI V2DF (V4SF "TARGET_VXE") (V1TF "TARGET_VXE")])
33 (define_mode_iterator V_HW2 [V16QI V8HI V4SI V2DI V2DF (V4SF "TARGET_VXE") (V1TF "TARGET_VXE")])
35 (define_mode_iterator V_HW_64 [V2DI V2DF])
37 ; Including TI for instructions that support it (va, vn, ...)
38 (define_mode_iterator VT_HW [V16QI V8HI V4SI V2DI V2DF V1TI TI (V4SF "TARGET_VXE") (V1TF "TARGET_VXE")])
40 ; All full size integer vector modes supported in a vector register + TImode
41 (define_mode_iterator VIT_HW [V16QI V8HI V4SI V2DI V1TI TI])
42 (define_mode_iterator VI_HW [V16QI V8HI V4SI V2DI])
43 (define_mode_iterator VI_HW_QHS [V16QI V8HI V4SI])
44 (define_mode_iterator VI_HW_HSD [V8HI V4SI V2DI])
45 (define_mode_iterator VI_HW_HS [V8HI V4SI])
46 (define_mode_iterator VI_HW_QH [V16QI V8HI])
47 (define_mode_iterator VI_HW_4 [V4SI V4SF])
49 ; All integer vector modes supported in a vector register + TImode
50 (define_mode_iterator VIT [V1QI V2QI V4QI V8QI V16QI V1HI V2HI V4HI V8HI V1SI V2SI V4SI V1DI V2DI V1TI TI])
51 (define_mode_iterator VI [V1QI V2QI V4QI V8QI V16QI V1HI V2HI V4HI V8HI V1SI V2SI V4SI V1DI V2DI])
52 (define_mode_iterator VI_QHS [V1QI V2QI V4QI V8QI V16QI V1HI V2HI V4HI V8HI V1SI V2SI V4SI])
54 (define_mode_iterator VFT [(V1SF "TARGET_VXE") (V2SF "TARGET_VXE") (V4SF "TARGET_VXE")
58 ; FP vector modes directly supported by the HW. This does not include
59 ; vector modes using only part of a vector register and should be used
60 ; for instructions which might trigger IEEE exceptions.
61 (define_mode_iterator VF_HW [(V4SF "TARGET_VXE") V2DF (V1TF "TARGET_VXE")])
63 (define_mode_iterator V_8 [V1QI])
64 (define_mode_iterator V_16 [V2QI V1HI])
65 (define_mode_iterator V_32 [V4QI V2HI V1SI V1SF])
66 (define_mode_iterator V_64 [V8QI V4HI V2SI V2SF V1DI V1DF])
67 (define_mode_iterator V_128 [V16QI V8HI V4SI V4SF V2DI V2DF V1TI V1TF])
69 (define_mode_iterator V_128_NOSINGLE [V16QI V8HI V4SI V4SF V2DI V2DF])
71 ; Empty string for all but TImode. This is used to hide the TImode
72 ; expander name in case it is defined already. See addti3 for an
74 (define_mode_attr ti* [(V1QI "") (V2QI "") (V4QI "") (V8QI "") (V16QI "")
75 (V1HI "") (V2HI "") (V4HI "") (V8HI "")
76 (V1SI "") (V2SI "") (V4SI "")
79 (V1SF "") (V2SF "") (V4SF "")
83 ; The element type of the vector.
84 (define_mode_attr non_vec[(V1QI "QI") (V2QI "QI") (V4QI "QI") (V8QI "QI") (V16QI "QI")
85 (V1HI "HI") (V2HI "HI") (V4HI "HI") (V8HI "HI")
86 (V1SI "SI") (V2SI "SI") (V4SI "SI")
87 (V1DI "DI") (V2DI "DI")
89 (V1SF "SF") (V2SF "SF") (V4SF "SF")
90 (V1DF "DF") (V2DF "DF")
91 (V1TF "TF") (TF "TF")])
93 ; Like above, but in lower case.
94 (define_mode_attr non_vec_l[(V1QI "qi") (V2QI "qi") (V4QI "qi") (V8QI "qi")
96 (V1HI "hi") (V2HI "hi") (V4HI "hi") (V8HI "hi")
97 (V1SI "si") (V2SI "si") (V4SI "si")
98 (V1DI "di") (V2DI "di")
100 (V1SF "sf") (V2SF "sf") (V4SF "sf")
101 (V1DF "df") (V2DF "df")
102 (V1TF "tf") (TF "tf")])
104 ; The instruction suffix for integer instructions and instructions
105 ; which do not care about whether it is floating point or integer.
106 (define_mode_attr bhfgq[(V1QI "b") (V2QI "b") (V4QI "b") (V8QI "b") (V16QI "b")
107 (V1HI "h") (V2HI "h") (V4HI "h") (V8HI "h")
108 (V1SI "f") (V2SI "f") (V4SI "f")
109 (V1DI "g") (V2DI "g")
111 (V1SF "f") (V2SF "f") (V4SF "f")
112 (V1DF "g") (V2DF "g")
115 ; This is for vmalhw. It gets an 'w' attached to avoid confusion with
116 ; multiply and add logical high vmalh.
117 (define_mode_attr w [(V1QI "") (V2QI "") (V4QI "") (V8QI "") (V16QI "")
118 (V1HI "w") (V2HI "w") (V4HI "w") (V8HI "w")
119 (V1SI "") (V2SI "") (V4SI "")
120 (V1DI "") (V2DI "")])
122 ; Resulting mode of a vector comparison. For floating point modes an
123 ; integer vector mode with the same element size is picked.
124 (define_mode_attr tointvec [(V1QI "V1QI") (V2QI "V2QI") (V4QI "V4QI") (V8QI "V8QI") (V16QI "V16QI")
125 (V1HI "V1HI") (V2HI "V2HI") (V4HI "V4HI") (V8HI "V8HI")
126 (V1SI "V1SI") (V2SI "V2SI") (V4SI "V4SI")
127 (V1DI "V1DI") (V2DI "V2DI")
129 (V1SF "V1SI") (V2SF "V2SI") (V4SF "V4SI")
130 (V1DF "V1DI") (V2DF "V2DI")
132 (define_mode_attr vw [(SF "w") (V1SF "w") (V2SF "v") (V4SF "v")
133 (DF "w") (V1DF "w") (V2DF "v")
134 (TF "w") (V1TF "w")])
136 (define_mode_attr sdx [(SF "s") (V1SF "s") (V2SF "s") (V4SF "s")
137 (DF "d") (V1DF "d") (V2DF "d")
138 (TF "x") (V1TF "x")])
140 ; Vector with doubled element size.
141 (define_mode_attr vec_double [(V1QI "V1HI") (V2QI "V1HI") (V4QI "V2HI") (V8QI "V4HI") (V16QI "V8HI")
142 (V1HI "V1SI") (V2HI "V1SI") (V4HI "V2SI") (V8HI "V4SI")
143 (V1SI "V1DI") (V2SI "V1DI") (V4SI "V2DI")
144 (V1DI "V1TI") (V2DI "V1TI")
145 (V1SF "V1DF") (V2SF "V1DF") (V4SF "V2DF")])
147 ; Vector with half the element size.
148 (define_mode_attr vec_half [(V1HI "V2QI") (V2HI "V4QI") (V4HI "V8QI") (V8HI "V16QI")
149 (V1SI "V2HI") (V2SI "V4HI") (V4SI "V8HI")
150 (V1DI "V2SI") (V2DI "V4SI")
152 (V1DF "V2SF") (V2DF "V4SF")
155 ; Vector with half the element size AND half the number of elements.
156 (define_mode_attr vec_halfhalf
157 [(V2HI "V2QI") (V4HI "V4QI") (V8HI "V8QI")
158 (V2SI "V2HI") (V4SI "V4HI")
162 (define_mode_attr vec_halfnumelts
163 [(V4SF "V2SF") (V4SI "V2SI")])
165 ; The comparisons not setting CC iterate over the rtx code.
166 (define_code_iterator VFCMP_HW_OP [eq gt ge])
167 (define_code_attr asm_fcmp_op [(eq "e") (gt "h") (ge "he")])
171 ; Comparison operators on int and fp compares which are directly
172 ; supported by the HW.
173 (define_code_iterator VICMP_HW_OP [eq gt gtu])
174 ; For int insn_cmp_op can be used in the insn name as well as in the asm output.
175 (define_code_attr insn_cmp_op [(eq "eq") (gt "h") (gtu "hl") (ge "he")])
177 ; Flags for vector string instructions (vfae all 4, vfee only ZS and CS, vstrc all 4)
179 [(VSTRING_FLAG_IN 8) ; invert result
180 (VSTRING_FLAG_RT 4) ; result type
181 (VSTRING_FLAG_ZS 2) ; zero search
182 (VSTRING_FLAG_CS 1)]) ; condition code set
184 (include "vx-builtins.md")
186 ; Full HW vector size moves
188 ; We don't use lm/stm for 128 bit moves since these are slower than
189 ; splitting it into separate moves.
191 ; FIXME: More constants are possible by enabling jxx, jyy constraints
192 ; for TImode (use double-int for the calculations)
194 ; vgmb, vgmh, vgmf, vgmg, vrepib, vrepih, vrepif, vrepig
195 (define_insn "mov<mode>"
196 [(set (match_operand:V_128 0 "nonimmediate_operand" "=v,v,R, v, v, v, v, v,v,*d,*d,?o")
197 (match_operand:V_128 1 "general_operand" " v,R,v,j00,jm1,jyy,jxx,jKK,d, v,dT,*d"))]
206 vgm<bhfgq>\t%v0,%s1,%e1
207 vrepi<bhfgq>\t%v0,%h1
212 [(set_attr "cpu_facility" "vx,vx,vx,vx,vx,vx,vx,vx,vx,vx,*,*")
213 (set_attr "op_type" "VRR,VRX,VRX,VRI,VRI,VRI,VRI,VRI,VRR,*,*,*")])
215 ; VR -> GPR, no instruction so split it into 64 element sets.
217 [(set (match_operand:V_128 0 "register_operand" "")
218 (match_operand:V_128 1 "register_operand" ""))]
219 "TARGET_VX && GENERAL_REG_P (operands[0]) && VECTOR_REG_P (operands[1])"
221 (unspec:DI [(subreg:V2DI (match_dup 1) 0)
222 (const_int 0)] UNSPEC_VEC_EXTRACT))
224 (unspec:DI [(subreg:V2DI (match_dup 1) 0)
225 (const_int 1)] UNSPEC_VEC_EXTRACT))]
227 operands[2] = operand_subword (operands[0], 0, 0, <MODE>mode);
228 operands[3] = operand_subword (operands[0], 1, 0, <MODE>mode);
231 ; Split the 128 bit GPR move into two word mode moves
232 ; s390_split_ok_p decides which part needs to be moved first.
235 [(set (match_operand:V_128 0 "nonimmediate_operand" "")
236 (match_operand:V_128 1 "general_operand" ""))]
238 && s390_split_ok_p (operands[0], operands[1], <MODE>mode, 0)"
239 [(set (match_dup 2) (match_dup 4))
240 (set (match_dup 3) (match_dup 5))]
242 operands[2] = operand_subword (operands[0], 0, 0, <MODE>mode);
243 operands[3] = operand_subword (operands[0], 1, 0, <MODE>mode);
244 operands[4] = operand_subword (operands[1], 0, 0, <MODE>mode);
245 operands[5] = operand_subword (operands[1], 1, 0, <MODE>mode);
249 [(set (match_operand:V_128 0 "nonimmediate_operand" "")
250 (match_operand:V_128 1 "general_operand" ""))]
252 && s390_split_ok_p (operands[0], operands[1], <MODE>mode, 1)"
253 [(set (match_dup 2) (match_dup 4))
254 (set (match_dup 3) (match_dup 5))]
256 operands[2] = operand_subword (operands[0], 1, 0, <MODE>mode);
257 operands[3] = operand_subword (operands[0], 0, 0, <MODE>mode);
258 operands[4] = operand_subword (operands[1], 1, 0, <MODE>mode);
259 operands[5] = operand_subword (operands[1], 0, 0, <MODE>mode);
262 ; This is the vector equivalent to the TImode splitter in s390.md. It
263 ; is required if both target GPRs occur in the source address operand.
265 ; For non-s_operands at least one of the target GPRs does not conflict
266 ; with the address operand and one of the splitters above will take
269 [(set (match_operand:V_128 0 "register_operand" "")
270 (match_operand:V_128 1 "memory_operand" ""))]
271 "TARGET_ZARCH && reload_completed
272 && !VECTOR_REG_P (operands[0])
273 && !s_operand (operands[1], VOIDmode)"
274 [(set (match_dup 0) (match_dup 1))]
276 rtx addr = operand_subword (operands[0], 1, 0, <MODE>mode);
277 addr = gen_lowpart (Pmode, addr);
278 s390_load_address (addr, XEXP (operands[1], 0));
279 operands[1] = replace_equiv_address (operands[1], addr);
282 ; Moves for smaller vector modes.
284 ; In these patterns only the vlr, vone, and vzero instructions write
285 ; VR bytes outside the mode. This should be ok since we disallow
286 ; formerly bigger modes being accessed with smaller modes via
287 ; subreg. Note: The vone, vzero instructions could easily be replaced
288 ; with vlei which would only access the bytes belonging to the mode.
289 ; However, this would probably be slower.
291 (define_insn "mov<mode>"
292 [(set (match_operand:V_8 0 "nonimmediate_operand" "=v,v,d,v,R, v, v, v, v,d, Q, S, Q, S, d, d,d,d,d,R,T")
293 (match_operand:V_8 1 "general_operand" " v,d,v,R,v,j00,jm1,jyy,jxx,d,j00,j00,jm1,jm1,j00,jm1,R,T,b,d,d"))]
317 [(set_attr "op_type" "VRR,VRS,VRS,VRX,VRX,VRI,VRI,VRI,VRI,RR,SI,SIY,SI,SIY,RI,RI,RX,RXY,RIL,RX,RXY")])
319 (define_insn "mov<mode>"
320 [(set (match_operand:V_16 0 "nonimmediate_operand" "=v,v,d,v,R, v, v, v, v,d, Q, Q, d, d,d,d,d,R,T,b")
321 (match_operand:V_16 1 "general_operand" " v,d,v,R,v,j00,jm1,jyy,jxx,d,j00,jm1,j00,jm1,R,T,b,d,d,d"))]
344 [(set_attr "op_type" "VRR,VRS,VRS,VRX,VRX,VRI,VRI,VRI,VRI,RR,SIL,SIL,RI,RI,RX,RXY,RIL,RX,RXY,RIL")])
346 (define_insn "mov<mode>"
347 [(set (match_operand:V_32 0 "nonimmediate_operand" "=f,f,f,R,T,v,v,d,v,R, f, v, v, v, v, Q, Q, d, d,d,d,d,d,R,T,b")
348 (match_operand:V_32 1 "general_operand" " f,R,T,f,f,v,d,v,R,v,j00,j00,jm1,jyy,jxx,j00,jm1,j00,jm1,b,d,R,T,d,d,d"))]
377 [(set_attr "op_type" "RR,RXE,RXY,RX,RXY,VRR,VRS,VRS,VRX,VRX,RRE,VRI,VRI,VRI,VRI,SIL,SIL,RI,RI,
378 RIL,RR,RX,RXY,RX,RXY,RIL")])
380 (define_insn "mov<mode>"
381 [(set (match_operand:V_64 0 "nonimmediate_operand"
382 "=f,f,f,R,T,v,v,d,v,R, f, v, v, v, v, Q, Q, d, d,f,d,d,d,d,T,b")
383 (match_operand:V_64 1 "general_operand"
384 " f,R,T,f,f,v,d,v,R,v,j00,j00,jm1,jyy,jxx,j00,jm1,j00,jm1,d,f,b,d,T,d,d"))]
413 [(set_attr "op_type" "RRE,RX,RXY,RX,RXY,VRR,VRS,VRS,VRX,VRX,RRE,VRI,VRI,VRI,VRI,
414 SIL,SIL,RI,RI,RRE,RRE,RIL,RR,RXY,RXY,RIL")])
421 ; vec_set is supposed to *modify* an existing vector so operand 0 is
422 ; duplicated as input operand.
423 (define_expand "vec_set<mode>"
424 [(set (match_operand:V 0 "register_operand" "")
425 (unspec:V [(match_operand:<non_vec> 1 "general_operand" "")
426 (match_operand:SI 2 "nonmemory_operand" "")
431 ; FIXME: Support also vector mode operands for 1
432 ; FIXME: A target memory operand seems to be useful otherwise we end
433 ; up with vl vlvgg vst. Shouldn't the middle-end be able to handle
435 ; vlvgb, vlvgh, vlvgf, vlvgg, vleb, vleh, vlef, vleg, vleib, vleih, vleif, vleig
436 (define_insn "*vec_set<mode>"
437 [(set (match_operand:V 0 "register_operand" "=v,v,v")
438 (unspec:V [(match_operand:<non_vec> 1 "general_operand" "d,R,K")
439 (match_operand:SI 2 "nonmemory_operand" "an,I,I")
440 (match_operand:V 3 "register_operand" "0,0,0")]
443 && (!CONST_INT_P (operands[2])
444 || UINTVAL (operands[2]) < GET_MODE_NUNITS (<V:MODE>mode))"
446 vlvg<bhfgq>\t%v0,%1,%Y2
447 vle<bhfgq>\t%v0,%1,%2
448 vlei<bhfgq>\t%v0,%1,%2"
449 [(set_attr "op_type" "VRS,VRX,VRI")])
451 ; vlvgb, vlvgh, vlvgf, vlvgg
452 (define_insn "*vec_set<mode>_plus"
453 [(set (match_operand:V 0 "register_operand" "=v")
454 (unspec:V [(match_operand:<non_vec> 1 "general_operand" "d")
455 (plus:SI (match_operand:SI 2 "register_operand" "a")
456 (match_operand:SI 4 "const_int_operand" "n"))
457 (match_operand:V 3 "register_operand" "0")]
460 "vlvg<bhfgq>\t%v0,%1,%Y4(%2)"
461 [(set_attr "op_type" "VRS")])
464 ; FIXME: Support also vector mode operands for 0
465 ; FIXME: This should be (vec_select ..) or something but it does only allow constant selectors :(
466 ; This is used via RTL standard name as well as for expanding the builtin
467 (define_expand "vec_extract<mode><non_vec_l>"
468 [(set (match_operand:<non_vec> 0 "nonimmediate_operand" "")
469 (unspec:<non_vec> [(match_operand:V 1 "register_operand" "")
470 (match_operand:SI 2 "nonmemory_operand" "")]
471 UNSPEC_VEC_EXTRACT))]
474 ; vlgvb, vlgvh, vlgvf, vlgvg, vsteb, vsteh, vstef, vsteg
475 (define_insn "*vec_extract<mode>"
476 [(set (match_operand:<non_vec> 0 "nonimmediate_operand" "=d,R")
477 (unspec:<non_vec> [(match_operand:V 1 "register_operand" "v,v")
478 (match_operand:SI 2 "nonmemory_operand" "an,I")]
479 UNSPEC_VEC_EXTRACT))]
481 && (!CONST_INT_P (operands[2])
482 || UINTVAL (operands[2]) < GET_MODE_NUNITS (<V:MODE>mode))"
484 vlgv<bhfgq>\t%0,%v1,%Y2
485 vste<bhfgq>\t%v1,%0,%2"
486 [(set_attr "op_type" "VRS,VRX")])
488 ; vlgvb, vlgvh, vlgvf, vlgvg
489 (define_insn "*vec_extract<mode>_plus"
490 [(set (match_operand:<non_vec> 0 "nonimmediate_operand" "=d")
491 (unspec:<non_vec> [(match_operand:V 1 "register_operand" "v")
492 (plus:SI (match_operand:SI 2 "nonmemory_operand" "a")
493 (match_operand:SI 3 "const_int_operand" "n"))]
494 UNSPEC_VEC_EXTRACT))]
496 "vlgv<bhfgq>\t%0,%v1,%Y3(%2)"
497 [(set_attr "op_type" "VRS")])
499 (define_expand "vec_init<mode><non_vec_l>"
500 [(match_operand:V_128 0 "register_operand" "")
501 (match_operand:V_128 1 "nonmemory_operand" "")]
504 s390_expand_vec_init (operands[0], operands[1]);
508 (define_insn "*vec_vllezlf<mode>"
509 [(set (match_operand:VI_HW_4 0 "register_operand" "=v")
511 (vec_concat:<vec_halfnumelts>
512 (match_operand:<non_vec> 1 "memory_operand" "R")
514 (vec_concat:<vec_halfnumelts>
519 [(set_attr "op_type" "VRX")])
521 ; Replicate from vector element
522 ; vrepb, vreph, vrepf, vrepg
523 (define_insn "*vec_splat<mode>"
524 [(set (match_operand:V_128_NOSINGLE 0 "register_operand" "=v")
525 (vec_duplicate:V_128_NOSINGLE
526 (vec_select:<non_vec>
527 (match_operand:V_128_NOSINGLE 1 "register_operand" "v")
529 [(match_operand:QI 2 "const_mask_operand" "C")]))))]
530 "TARGET_VX && UINTVAL (operands[2]) < GET_MODE_NUNITS (<MODE>mode)"
531 "vrep<bhfgq>\t%v0,%v1,%2"
532 [(set_attr "op_type" "VRI")])
534 ; vlrepb, vlreph, vlrepf, vlrepg, vrepib, vrepih, vrepif, vrepig, vrepb, vreph, vrepf, vrepg
535 (define_insn "*vec_splats<mode>"
536 [(set (match_operand:V_128_NOSINGLE 0 "register_operand" "=v,v,v,v")
537 (vec_duplicate:V_128_NOSINGLE (match_operand:<non_vec> 1 "general_operand" " R,K,v,d")))]
541 vrepi<bhfgq>\t%v0,%h1
542 vrep<bhfgq>\t%v0,%v1,0
544 [(set_attr "op_type" "VRX,VRI,VRI,*")])
546 ; A TFmode operand resides in FPR register pairs while V1TF is in a
547 ; single vector register.
548 (define_insn "*vec_tf_to_v1tf"
549 [(set (match_operand:V1TF 0 "nonimmediate_operand" "=v,v,R,v,v")
550 (vec_duplicate:V1TF (match_operand:TF 1 "general_operand" "v,R,v,G,d")))]
558 [(set_attr "op_type" "VRR,VRX,VRX,VRI,VRR")])
560 (define_insn "*vec_ti_to_v1ti"
561 [(set (match_operand:V1TI 0 "nonimmediate_operand" "=v,v,R, v, v,v")
562 (vec_duplicate:V1TI (match_operand:TI 1 "general_operand" "v,R,v,j00,jm1,d")))]
571 [(set_attr "op_type" "VRR,VRX,VRX,VRI,VRI,VRR")])
573 ; vec_splats is supposed to replicate op1 into all elements of op0
574 ; This splitter first sets the rightmost element of op0 to op1 and
575 ; then does a vec_splat to replicate that element into all other
578 [(set (match_operand:V_128_NOSINGLE 0 "register_operand" "")
579 (vec_duplicate:V_128_NOSINGLE (match_operand:<non_vec> 1 "register_operand" "")))]
580 "TARGET_VX && GENERAL_REG_P (operands[1])"
582 (unspec:V_128_NOSINGLE [(match_dup 1) (match_dup 2) (match_dup 0)] UNSPEC_VEC_SET))
584 (vec_duplicate:V_128_NOSINGLE
585 (vec_select:<non_vec>
586 (match_dup 0) (parallel [(match_dup 2)]))))]
588 operands[2] = GEN_INT (GET_MODE_NUNITS (<MODE>mode) - 1);
591 (define_expand "vcond<V_HW:mode><V_HW2:mode>"
592 [(set (match_operand:V_HW 0 "register_operand" "")
594 (match_operator 3 "comparison_operator"
595 [(match_operand:V_HW2 4 "register_operand" "")
596 (match_operand:V_HW2 5 "nonmemory_operand" "")])
597 (match_operand:V_HW 1 "nonmemory_operand" "")
598 (match_operand:V_HW 2 "nonmemory_operand" "")))]
599 "TARGET_VX && GET_MODE_NUNITS (<V_HW:MODE>mode) == GET_MODE_NUNITS (<V_HW2:MODE>mode)"
601 s390_expand_vcond (operands[0], operands[1], operands[2],
602 GET_CODE (operands[3]), operands[4], operands[5]);
606 (define_expand "vcondu<V_HW:mode><V_HW2:mode>"
607 [(set (match_operand:V_HW 0 "register_operand" "")
609 (match_operator 3 "comparison_operator"
610 [(match_operand:V_HW2 4 "register_operand" "")
611 (match_operand:V_HW2 5 "nonmemory_operand" "")])
612 (match_operand:V_HW 1 "nonmemory_operand" "")
613 (match_operand:V_HW 2 "nonmemory_operand" "")))]
614 "TARGET_VX && GET_MODE_NUNITS (<V_HW:MODE>mode) == GET_MODE_NUNITS (<V_HW2:MODE>mode)"
616 s390_expand_vcond (operands[0], operands[1], operands[2],
617 GET_CODE (operands[3]), operands[4], operands[5]);
621 ; We only have HW support for byte vectors. The middle-end is
622 ; supposed to lower the mode if required.
623 (define_insn "vec_permv16qi"
624 [(set (match_operand:V16QI 0 "register_operand" "=v")
625 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
626 (match_operand:V16QI 2 "register_operand" "v")
627 (match_operand:V16QI 3 "register_operand" "v")]
630 "vperm\t%v0,%v1,%v2,%v3"
631 [(set_attr "op_type" "VRR")])
633 ; vec_perm_const for V2DI using vpdi?
636 ;; Vector integer arithmetic instructions
639 ; vab, vah, vaf, vag, vaq
641 ; We use nonimmediate_operand instead of register_operand since it is
642 ; better to have the reloads into VRs instead of splitting the
643 ; operation into two DImode ADDs.
644 (define_insn "<ti*>add<mode>3"
645 [(set (match_operand:VIT 0 "nonimmediate_operand" "=v")
646 (plus:VIT (match_operand:VIT 1 "nonimmediate_operand" "%v")
647 (match_operand:VIT 2 "general_operand" "v")))]
649 "va<bhfgq>\t%v0,%v1,%v2"
650 [(set_attr "op_type" "VRR")])
652 ; vsb, vsh, vsf, vsg, vsq
653 (define_insn "<ti*>sub<mode>3"
654 [(set (match_operand:VIT 0 "nonimmediate_operand" "=v")
655 (minus:VIT (match_operand:VIT 1 "nonimmediate_operand" "v")
656 (match_operand:VIT 2 "general_operand" "v")))]
658 "vs<bhfgq>\t%v0,%v1,%v2"
659 [(set_attr "op_type" "VRR")])
662 (define_insn "mul<mode>3"
663 [(set (match_operand:VI_QHS 0 "register_operand" "=v")
664 (mult:VI_QHS (match_operand:VI_QHS 1 "register_operand" "%v")
665 (match_operand:VI_QHS 2 "register_operand" "v")))]
667 "vml<bhfgq><w>\t%v0,%v1,%v2"
668 [(set_attr "op_type" "VRR")])
670 ; vlcb, vlch, vlcf, vlcg
671 (define_insn "neg<mode>2"
672 [(set (match_operand:VI 0 "register_operand" "=v")
673 (neg:VI (match_operand:VI 1 "register_operand" "v")))]
675 "vlc<bhfgq>\t%v0,%v1"
676 [(set_attr "op_type" "VRR")])
678 ; vlpb, vlph, vlpf, vlpg
679 (define_insn "abs<mode>2"
680 [(set (match_operand:VI 0 "register_operand" "=v")
681 (abs:VI (match_operand:VI 1 "register_operand" "v")))]
683 "vlp<bhfgq>\t%v0,%v1"
684 [(set_attr "op_type" "VRR")])
689 ; Sum across DImode parts of the 1st operand and add the rightmost
690 ; element of 2nd operand
692 (define_insn "*vec_sum2<mode>"
693 [(set (match_operand:V2DI 0 "register_operand" "=v")
694 (unspec:V2DI [(match_operand:VI_HW_HS 1 "register_operand" "v")
695 (match_operand:VI_HW_HS 2 "register_operand" "v")]
698 "vsumg<bhfgq>\t%v0,%v1,%v2"
699 [(set_attr "op_type" "VRR")])
702 (define_insn "*vec_sum4<mode>"
703 [(set (match_operand:V4SI 0 "register_operand" "=v")
704 (unspec:V4SI [(match_operand:VI_HW_QH 1 "register_operand" "v")
705 (match_operand:VI_HW_QH 2 "register_operand" "v")]
708 "vsum<bhfgq>\t%v0,%v1,%v2"
709 [(set_attr "op_type" "VRR")])
712 ;; Vector bit instructions (int + fp)
717 (define_insn "and<mode>3"
718 [(set (match_operand:VT 0 "register_operand" "=v")
719 (and:VT (match_operand:VT 1 "register_operand" "%v")
720 (match_operand:VT 2 "register_operand" "v")))]
723 [(set_attr "op_type" "VRR")])
727 (define_insn "notand<mode>3"
728 [(set (match_operand:VT 0 "register_operand" "=v")
729 (ior:VT (not:VT (match_operand:VT 1 "register_operand" "%v"))
730 (not:VT (match_operand:VT 2 "register_operand" "v"))))]
733 [(set_attr "op_type" "VRR")])
737 (define_insn "ior<mode>3"
738 [(set (match_operand:VT 0 "register_operand" "=v")
739 (ior:VT (match_operand:VT 1 "register_operand" "%v")
740 (match_operand:VT 2 "register_operand" "v")))]
743 [(set_attr "op_type" "VRR")])
745 ; Vector or with complement
747 (define_insn "ior_not<mode>3"
748 [(set (match_operand:VT 0 "register_operand" "=v")
749 (ior:VT (not:VT (match_operand:VT 2 "register_operand" "v"))
750 (match_operand:VT 1 "register_operand" "%v")))]
753 [(set_attr "op_type" "VRR")])
757 (define_insn "xor<mode>3"
758 [(set (match_operand:VT 0 "register_operand" "=v")
759 (xor:VT (match_operand:VT 1 "register_operand" "%v")
760 (match_operand:VT 2 "register_operand" "v")))]
763 [(set_attr "op_type" "VRR")])
767 (define_insn "notxor<mode>3"
768 [(set (match_operand:VT 0 "register_operand" "=v")
769 (not:VT (xor:VT (match_operand:VT 1 "register_operand" "%v")
770 (match_operand:VT 2 "register_operand" "v"))))]
773 [(set_attr "op_type" "VRR")])
775 ; Bitwise inversion of a vector
776 (define_insn "one_cmpl<mode>2"
777 [(set (match_operand:VT 0 "register_operand" "=v")
778 (not:VT (match_operand:VT 1 "register_operand" "v")))]
781 [(set_attr "op_type" "VRR")])
783 ; Vector population count
785 (define_expand "popcount<mode>2"
786 [(set (match_operand:VI_HW 0 "register_operand" "=v")
787 (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v")]
792 emit_insn (gen_popcount<mode>2_vxe (operands[0], operands[1]));
794 emit_insn (gen_popcount<mode>2_vx (operands[0], operands[1]));
798 ; vpopctb, vpopcth, vpopctf, vpopctg
799 (define_insn "popcount<mode>2_vxe"
800 [(set (match_operand:VI_HW 0 "register_operand" "=v")
801 (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "v")]
804 "vpopct<bhfgq>\t%v0,%v1"
805 [(set_attr "op_type" "VRR")])
807 (define_insn "popcountv16qi2_vx"
808 [(set (match_operand:V16QI 0 "register_operand" "=v")
809 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")]
811 "TARGET_VX && !TARGET_VXE"
813 [(set_attr "op_type" "VRR")])
815 ; vpopct only counts bits in byte elements. Bigger element sizes need
816 ; to be emulated. Word and doubleword elements can use the sum across
817 ; instructions. For halfword sized elements we do a shift of a copy
818 ; of the result, add it to the result and extend it to halfword
819 ; element size (unpack).
821 (define_expand "popcountv8hi2_vx"
823 (unspec:V16QI [(subreg:V16QI (match_operand:V8HI 1 "register_operand" "v") 0)]
825 ; Make a copy of the result
826 (set (match_dup 3) (match_dup 2))
827 ; Generate the shift count operand in a VR (8->byte 7)
828 (set (match_dup 4) (match_dup 5))
829 (set (match_dup 4) (unspec:V16QI [(const_int 8)
831 (match_dup 4)] UNSPEC_VEC_SET))
832 ; Vector shift right logical by one byte
834 (unspec:V16QI [(match_dup 3) (match_dup 4)] UNSPEC_VEC_SRLB))
835 ; Add the shifted and the original result
837 (plus:V16QI (match_dup 2) (match_dup 3)))
838 ; Generate mask for the odd numbered byte elements
840 (const_vector:V16QI [(const_int 0) (const_int 255)
841 (const_int 0) (const_int 255)
842 (const_int 0) (const_int 255)
843 (const_int 0) (const_int 255)
844 (const_int 0) (const_int 255)
845 (const_int 0) (const_int 255)
846 (const_int 0) (const_int 255)
847 (const_int 0) (const_int 255)]))
848 ; Zero out the even indexed bytes
849 (set (match_operand:V8HI 0 "register_operand" "=v")
850 (and:V8HI (subreg:V8HI (match_dup 2) 0)
851 (subreg:V8HI (match_dup 3) 0)))
853 "TARGET_VX && !TARGET_VXE"
855 operands[2] = gen_reg_rtx (V16QImode);
856 operands[3] = gen_reg_rtx (V16QImode);
857 operands[4] = gen_reg_rtx (V16QImode);
858 operands[5] = CONST0_RTX (V16QImode);
861 (define_expand "popcountv4si2_vx"
863 (unspec:V16QI [(subreg:V16QI (match_operand:V4SI 1 "register_operand" "v") 0)]
865 (set (match_operand:V4SI 0 "register_operand" "=v")
866 (unspec:V4SI [(match_dup 2) (match_dup 3)]
868 "TARGET_VX && !TARGET_VXE"
870 operands[2] = gen_reg_rtx (V16QImode);
871 operands[3] = force_reg (V16QImode, CONST0_RTX (V16QImode));
874 (define_expand "popcountv2di2_vx"
876 (unspec:V16QI [(subreg:V16QI (match_operand:V2DI 1 "register_operand" "v") 0)]
879 (unspec:V4SI [(match_dup 2) (match_dup 4)]
881 (set (match_operand:V2DI 0 "register_operand" "=v")
882 (unspec:V2DI [(match_dup 3) (match_dup 5)]
884 "TARGET_VX && !TARGET_VXE"
886 operands[2] = gen_reg_rtx (V16QImode);
887 operands[3] = gen_reg_rtx (V4SImode);
888 operands[4] = force_reg (V16QImode, CONST0_RTX (V16QImode));
889 operands[5] = force_reg (V4SImode, CONST0_RTX (V4SImode));
892 ; Count leading zeros
893 ; vclzb, vclzh, vclzf, vclzg
894 (define_insn "clz<mode>2"
895 [(set (match_operand:V 0 "register_operand" "=v")
896 (clz:V (match_operand:V 1 "register_operand" "v")))]
898 "vclz<bhfgq>\t%v0,%v1"
899 [(set_attr "op_type" "VRR")])
901 ; Count trailing zeros
902 ; vctzb, vctzh, vctzf, vctzg
903 (define_insn "ctz<mode>2"
904 [(set (match_operand:V 0 "register_operand" "=v")
905 (ctz:V (match_operand:V 1 "register_operand" "v")))]
907 "vctz<bhfgq>\t%v0,%v1"
908 [(set_attr "op_type" "VRR")])
912 ; Each vector element rotated by the corresponding vector element
913 ; verllvb, verllvh, verllvf, verllvg
914 (define_insn "vrotl<mode>3"
915 [(set (match_operand:VI 0 "register_operand" "=v")
916 (rotate:VI (match_operand:VI 1 "register_operand" "v")
917 (match_operand:VI 2 "register_operand" "v")))]
919 "verllv<bhfgq>\t%v0,%v1,%v2"
920 [(set_attr "op_type" "VRR")])
923 ; Vector rotate and shift by scalar instructions
925 (define_code_iterator VEC_SHIFTS [ashift ashiftrt lshiftrt rotate])
926 (define_code_attr vec_shifts_name [(ashift "ashl") (ashiftrt "ashr")
927 (lshiftrt "lshr") (rotate "rotl")])
928 (define_code_attr vec_shifts_mnem [(ashift "vesl") (ashiftrt "vesra")
929 (lshiftrt "vesrl") (rotate "verll")])
931 ; Each vector element rotated by a scalar
932 (define_expand "<vec_shifts_name><mode>3"
933 [(set (match_operand:VI 0 "register_operand" "")
934 (VEC_SHIFTS:VI (match_operand:VI 1 "register_operand" "")
935 (match_operand:SI 2 "nonmemory_operand" "")))]
938 ; verllb, verllh, verllf, verllg
939 ; veslb, veslh, veslf, veslg
940 ; vesrab, vesrah, vesraf, vesrag
941 ; vesrlb, vesrlh, vesrlf, vesrlg
942 (define_insn "*<vec_shifts_name><mode>3<addr_style_op>"
943 [(set (match_operand:VI 0 "register_operand" "=v")
944 (VEC_SHIFTS:VI (match_operand:VI 1 "register_operand" "v")
945 (match_operand:SI 2 "nonmemory_operand" "an")))]
947 "<vec_shifts_mnem><bhfgq>\t%v0,%v1,%Y2"
948 [(set_attr "op_type" "VRS")])
950 ; Shift each element by corresponding vector element
952 ; veslvb, veslvh, veslvf, veslvg
953 (define_insn "vashl<mode>3"
954 [(set (match_operand:VI 0 "register_operand" "=v")
955 (ashift:VI (match_operand:VI 1 "register_operand" "v")
956 (match_operand:VI 2 "register_operand" "v")))]
958 "veslv<bhfgq>\t%v0,%v1,%v2"
959 [(set_attr "op_type" "VRR")])
961 ; vesravb, vesravh, vesravf, vesravg
962 (define_insn "vashr<mode>3"
963 [(set (match_operand:VI 0 "register_operand" "=v")
964 (ashiftrt:VI (match_operand:VI 1 "register_operand" "v")
965 (match_operand:VI 2 "register_operand" "v")))]
967 "vesrav<bhfgq>\t%v0,%v1,%v2"
968 [(set_attr "op_type" "VRR")])
970 ; vesrlvb, vesrlvh, vesrlvf, vesrlvg
971 (define_insn "vlshr<mode>3"
972 [(set (match_operand:VI 0 "register_operand" "=v")
973 (lshiftrt:VI (match_operand:VI 1 "register_operand" "v")
974 (match_operand:VI 2 "register_operand" "v")))]
976 "vesrlv<bhfgq>\t%v0,%v1,%v2"
977 [(set_attr "op_type" "VRR")])
979 ; Vector shift right logical by byte
981 ; Pattern used by e.g. popcount
982 (define_insn "*vec_srb<mode>"
983 [(set (match_operand:V_128 0 "register_operand" "=v")
984 (unspec:V_128 [(match_operand:V_128 1 "register_operand" "v")
985 (match_operand:V16QI 2 "register_operand" "v")]
989 [(set_attr "op_type" "VRR")])
992 ; Vector shift left by byte
994 (define_insn "*vec_slb<mode>"
995 [(set (match_operand:V_128 0 "register_operand" "=v")
996 (unspec:V_128 [(match_operand:V_128 1 "register_operand" "v")
997 (match_operand:V16QI 2 "register_operand" "v")]
1001 [(set_attr "op_type" "VRR")])
1003 ; vec_shr is defined as shift towards element 0
1004 ; this means it is a left shift on BE targets!
1005 (define_expand "vec_shr_<mode>"
1007 (unspec:V16QI [(match_operand:SI 2 "const_shift_by_byte_operand" "")
1011 (set (match_operand:V_128 0 "register_operand" "")
1012 (unspec:V_128 [(match_operand:V_128 1 "register_operand" "")
1017 operands[3] = gen_reg_rtx(V16QImode);
1020 ; vmnb, vmnh, vmnf, vmng
1021 (define_insn "smin<mode>3"
1022 [(set (match_operand:VI 0 "register_operand" "=v")
1023 (smin:VI (match_operand:VI 1 "register_operand" "%v")
1024 (match_operand:VI 2 "register_operand" "v")))]
1026 "vmn<bhfgq>\t%v0,%v1,%v2"
1027 [(set_attr "op_type" "VRR")])
1029 ; vmxb, vmxh, vmxf, vmxg
1030 (define_insn "smax<mode>3"
1031 [(set (match_operand:VI 0 "register_operand" "=v")
1032 (smax:VI (match_operand:VI 1 "register_operand" "%v")
1033 (match_operand:VI 2 "register_operand" "v")))]
1035 "vmx<bhfgq>\t%v0,%v1,%v2"
1036 [(set_attr "op_type" "VRR")])
1038 ; vmnlb, vmnlh, vmnlf, vmnlg
1039 (define_insn "umin<mode>3"
1040 [(set (match_operand:VI 0 "register_operand" "=v")
1041 (umin:VI (match_operand:VI 1 "register_operand" "%v")
1042 (match_operand:VI 2 "register_operand" "v")))]
1044 "vmnl<bhfgq>\t%v0,%v1,%v2"
1045 [(set_attr "op_type" "VRR")])
1047 ; vmxlb, vmxlh, vmxlf, vmxlg
1048 (define_insn "umax<mode>3"
1049 [(set (match_operand:VI 0 "register_operand" "=v")
1050 (umax:VI (match_operand:VI 1 "register_operand" "%v")
1051 (match_operand:VI 2 "register_operand" "v")))]
1053 "vmxl<bhfgq>\t%v0,%v1,%v2"
1054 [(set_attr "op_type" "VRR")])
1057 (define_insn "vec_widen_smult_even_<mode>"
1058 [(set (match_operand:<vec_double> 0 "register_operand" "=v")
1059 (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand" "%v")
1060 (match_operand:VI_QHS 2 "register_operand" "v")]
1061 UNSPEC_VEC_SMULT_EVEN))]
1063 "vme<bhfgq>\t%v0,%v1,%v2"
1064 [(set_attr "op_type" "VRR")])
1066 ; vmleb, vmleh, vmlef
1067 (define_insn "vec_widen_umult_even_<mode>"
1068 [(set (match_operand:<vec_double> 0 "register_operand" "=v")
1069 (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand" "%v")
1070 (match_operand:VI_QHS 2 "register_operand" "v")]
1071 UNSPEC_VEC_UMULT_EVEN))]
1073 "vmle<bhfgq>\t%v0,%v1,%v2"
1074 [(set_attr "op_type" "VRR")])
1077 (define_insn "vec_widen_smult_odd_<mode>"
1078 [(set (match_operand:<vec_double> 0 "register_operand" "=v")
1079 (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand" "%v")
1080 (match_operand:VI_QHS 2 "register_operand" "v")]
1081 UNSPEC_VEC_SMULT_ODD))]
1083 "vmo<bhfgq>\t%v0,%v1,%v2"
1084 [(set_attr "op_type" "VRR")])
1086 ; vmlob, vmloh, vmlof
1087 (define_insn "vec_widen_umult_odd_<mode>"
1088 [(set (match_operand:<vec_double> 0 "register_operand" "=v")
1089 (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand" "%v")
1090 (match_operand:VI_QHS 2 "register_operand" "v")]
1091 UNSPEC_VEC_UMULT_ODD))]
1093 "vmlo<bhfgq>\t%v0,%v1,%v2"
1094 [(set_attr "op_type" "VRR")])
1097 ; Widening hi/lo multiplications
1099 ; The S/390 instructions vml and vmh return the low or high parts of
1100 ; the double sized result elements in the corresponding elements of
1101 ; the target register. That's NOT what the vec_widen_umult_lo/hi
1102 ; patterns are expected to do.
1104 ; We emulate the widening lo/hi multiplies with the even/odd versions
1105 ; followed by a vector merge
1108 (define_expand "vec_widen_umult_lo_<mode>"
1110 (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand" "%v")
1111 (match_operand:VI_QHS 2 "register_operand" "v")]
1112 UNSPEC_VEC_UMULT_EVEN))
1114 (unspec:<vec_double> [(match_dup 1) (match_dup 2)]
1115 UNSPEC_VEC_UMULT_ODD))
1116 (set (match_operand:<vec_double> 0 "register_operand" "=v")
1117 (unspec:<vec_double> [(match_dup 3) (match_dup 4)]
1118 UNSPEC_VEC_MERGEL))]
1121 operands[3] = gen_reg_rtx (<vec_double>mode);
1122 operands[4] = gen_reg_rtx (<vec_double>mode);
1125 (define_expand "vec_widen_umult_hi_<mode>"
1127 (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand" "%v")
1128 (match_operand:VI_QHS 2 "register_operand" "v")]
1129 UNSPEC_VEC_UMULT_EVEN))
1131 (unspec:<vec_double> [(match_dup 1) (match_dup 2)]
1132 UNSPEC_VEC_UMULT_ODD))
1133 (set (match_operand:<vec_double> 0 "register_operand" "=v")
1134 (unspec:<vec_double> [(match_dup 3) (match_dup 4)]
1135 UNSPEC_VEC_MERGEH))]
1138 operands[3] = gen_reg_rtx (<vec_double>mode);
1139 operands[4] = gen_reg_rtx (<vec_double>mode);
1142 (define_expand "vec_widen_smult_lo_<mode>"
1144 (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand" "%v")
1145 (match_operand:VI_QHS 2 "register_operand" "v")]
1146 UNSPEC_VEC_SMULT_EVEN))
1148 (unspec:<vec_double> [(match_dup 1) (match_dup 2)]
1149 UNSPEC_VEC_SMULT_ODD))
1150 (set (match_operand:<vec_double> 0 "register_operand" "=v")
1151 (unspec:<vec_double> [(match_dup 3) (match_dup 4)]
1152 UNSPEC_VEC_MERGEL))]
1155 operands[3] = gen_reg_rtx (<vec_double>mode);
1156 operands[4] = gen_reg_rtx (<vec_double>mode);
1159 (define_expand "vec_widen_smult_hi_<mode>"
1161 (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand" "%v")
1162 (match_operand:VI_QHS 2 "register_operand" "v")]
1163 UNSPEC_VEC_SMULT_EVEN))
1165 (unspec:<vec_double> [(match_dup 1) (match_dup 2)]
1166 UNSPEC_VEC_SMULT_ODD))
1167 (set (match_operand:<vec_double> 0 "register_operand" "=v")
1168 (unspec:<vec_double> [(match_dup 3) (match_dup 4)]
1169 UNSPEC_VEC_MERGEH))]
1172 operands[3] = gen_reg_rtx (<vec_double>mode);
1173 operands[4] = gen_reg_rtx (<vec_double>mode);
1176 ; vec_widen_ushiftl_hi
1177 ; vec_widen_ushiftl_lo
1178 ; vec_widen_sshiftl_hi
1179 ; vec_widen_sshiftl_lo
1182 ;; Vector floating point arithmetic instructions
1185 ; vfasb, vfadb, wfasb, wfadb, wfaxb
1186 (define_insn "add<mode>3"
1187 [(set (match_operand:VF_HW 0 "register_operand" "=v")
1188 (plus:VF_HW (match_operand:VF_HW 1 "register_operand" "%v")
1189 (match_operand:VF_HW 2 "register_operand" "v")))]
1191 "<vw>fa<sdx>b\t%v0,%v1,%v2"
1192 [(set_attr "op_type" "VRR")])
1194 ; vfssb, vfsdb, wfssb, wfsdb, wfsxb
1195 (define_insn "sub<mode>3"
1196 [(set (match_operand:VF_HW 0 "register_operand" "=v")
1197 (minus:VF_HW (match_operand:VF_HW 1 "register_operand" "%v")
1198 (match_operand:VF_HW 2 "register_operand" "v")))]
1200 "<vw>fs<sdx>b\t%v0,%v1,%v2"
1201 [(set_attr "op_type" "VRR")])
1203 ; vfmsb, vfmdb, wfmsb, wfmdb, wfmxb
1204 (define_insn "mul<mode>3"
1205 [(set (match_operand:VF_HW 0 "register_operand" "=v")
1206 (mult:VF_HW (match_operand:VF_HW 1 "register_operand" "%v")
1207 (match_operand:VF_HW 2 "register_operand" "v")))]
1209 "<vw>fm<sdx>b\t%v0,%v1,%v2"
1210 [(set_attr "op_type" "VRR")])
1212 ; vfdsb, vfddb, wfdsb, wfddb, wfdxb
1213 (define_insn "div<mode>3"
1214 [(set (match_operand:VF_HW 0 "register_operand" "=v")
1215 (div:VF_HW (match_operand:VF_HW 1 "register_operand" "v")
1216 (match_operand:VF_HW 2 "register_operand" "v")))]
1218 "<vw>fd<sdx>b\t%v0,%v1,%v2"
1219 [(set_attr "op_type" "VRR")])
1221 ; vfsqsb, vfsqdb, wfsqsb, wfsqdb, wfsqxb
1222 (define_insn "sqrt<mode>2"
1223 [(set (match_operand:VF_HW 0 "register_operand" "=v")
1224 (sqrt:VF_HW (match_operand:VF_HW 1 "register_operand" "v")))]
1226 "<vw>fsq<sdx>b\t%v0,%v1"
1227 [(set_attr "op_type" "VRR")])
1229 ; vfmasb, vfmadb, wfmasb, wfmadb, wfmaxb
1230 (define_insn "fma<mode>4"
1231 [(set (match_operand:VF_HW 0 "register_operand" "=v")
1232 (fma:VF_HW (match_operand:VF_HW 1 "register_operand" "%v")
1233 (match_operand:VF_HW 2 "register_operand" "v")
1234 (match_operand:VF_HW 3 "register_operand" "v")))]
1236 "<vw>fma<sdx>b\t%v0,%v1,%v2,%v3"
1237 [(set_attr "op_type" "VRR")])
1239 ; vfmssb, vfmsdb, wfmssb, wfmsdb, wfmsxb
1240 (define_insn "fms<mode>4"
1241 [(set (match_operand:VF_HW 0 "register_operand" "=v")
1242 (fma:VF_HW (match_operand:VF_HW 1 "register_operand" "%v")
1243 (match_operand:VF_HW 2 "register_operand" "v")
1244 (neg:VF_HW (match_operand:VF_HW 3 "register_operand" "v"))))]
1246 "<vw>fms<sdx>b\t%v0,%v1,%v2,%v3"
1247 [(set_attr "op_type" "VRR")])
1249 ; vfnmasb, vfnmadb, wfnmasb, wfnmadb, wfnmaxb
1250 (define_insn "neg_fma<mode>4"
1251 [(set (match_operand:VF_HW 0 "register_operand" "=v")
1253 (fma:VF_HW (match_operand:VF_HW 1 "register_operand" "%v")
1254 (match_operand:VF_HW 2 "register_operand" "v")
1255 (match_operand:VF_HW 3 "register_operand" "v"))))]
1257 "<vw>fnma<sdx>b\t%v0,%v1,%v2,%v3"
1258 [(set_attr "op_type" "VRR")])
1260 ; vfnmssb, vfnmsdb, wfnmssb, wfnmsdb, wfnmsxb
1261 (define_insn "neg_fms<mode>4"
1262 [(set (match_operand:VF_HW 0 "register_operand" "=v")
1264 (fma:VF_HW (match_operand:VF_HW 1 "register_operand" "%v")
1265 (match_operand:VF_HW 2 "register_operand" "v")
1266 (neg:VF_HW (match_operand:VF_HW 3 "register_operand" "v")))))]
1268 "<vw>fnms<sdx>b\t%v0,%v1,%v2,%v3"
1269 [(set_attr "op_type" "VRR")])
1271 ; vflcsb, vflcdb, wflcsb, wflcdb, wflcxb
1272 (define_insn "neg<mode>2"
1273 [(set (match_operand:VFT 0 "register_operand" "=v")
1274 (neg:VFT (match_operand:VFT 1 "register_operand" "v")))]
1276 "<vw>flc<sdx>b\t%v0,%v1"
1277 [(set_attr "op_type" "VRR")])
1279 ; vflpsb, vflpdb, wflpsb, wflpdb, wflpxb
1280 (define_insn "abs<mode>2"
1281 [(set (match_operand:VFT 0 "register_operand" "=v")
1282 (abs:VFT (match_operand:VFT 1 "register_operand" "v")))]
1284 "<vw>flp<sdx>b\t%v0,%v1"
1285 [(set_attr "op_type" "VRR")])
1287 ; vflnsb, vflndb, wflnsb, wflndb, wflnxb
1288 (define_insn "negabs<mode>2"
1289 [(set (match_operand:VFT 0 "register_operand" "=v")
1290 (neg:VFT (abs:VFT (match_operand:VFT 1 "register_operand" "v"))))]
1292 "<vw>fln<sdx>b\t%v0,%v1"
1293 [(set_attr "op_type" "VRR")])
1295 (define_expand "smax<mode>3"
1296 [(set (match_operand:VF_HW 0 "register_operand")
1297 (smax:VF_HW (match_operand:VF_HW 1 "register_operand")
1298 (match_operand:VF_HW 2 "register_operand")))]
1301 ; vfmaxsb, vfmaxdb, wfmaxsb, wfmaxdb, wfmaxxb
1302 (define_insn "*smax<mode>3_vxe"
1303 [(set (match_operand:VF_HW 0 "register_operand" "=v")
1304 (smax:VF_HW (match_operand:VF_HW 1 "register_operand" "%v")
1305 (match_operand:VF_HW 2 "register_operand" "v")))]
1307 "<vw>fmax<sdx>b\t%v0,%v1,%v2,4"
1308 [(set_attr "op_type" "VRR")])
1310 ; Emulate with compare + select
1311 (define_insn_and_split "*smaxv2df3_vx"
1312 [(set (match_operand:V2DF 0 "register_operand" "=v")
1313 (smax:V2DF (match_operand:V2DF 1 "register_operand" "%v")
1314 (match_operand:V2DF 2 "register_operand" "v")))]
1315 "TARGET_VX && !TARGET_VXE"
1319 (gt:V2DI (match_dup 1) (match_dup 2)))
1322 (eq (match_dup 3) (match_dup 4))
1326 operands[3] = gen_reg_rtx (V2DImode);
1327 operands[4] = CONST0_RTX (V2DImode);
1330 (define_expand "smin<mode>3"
1331 [(set (match_operand:VF_HW 0 "register_operand")
1332 (smin:VF_HW (match_operand:VF_HW 1 "register_operand")
1333 (match_operand:VF_HW 2 "register_operand")))]
1336 ; vfminsb, vfmindb, wfminsb, wfmindb, wfminxb
1337 (define_insn "*smin<mode>3_vxe"
1338 [(set (match_operand:VF_HW 0 "register_operand" "=v")
1339 (smin:VF_HW (match_operand:VF_HW 1 "register_operand" "%v")
1340 (match_operand:VF_HW 2 "register_operand" "v")))]
1342 "<vw>fmin<sdx>b\t%v0,%v1,%v2,4"
1343 [(set_attr "op_type" "VRR")])
1345 ; Emulate with compare + select
1346 (define_insn_and_split "*sminv2df3_vx"
1347 [(set (match_operand:V2DF 0 "register_operand" "=v")
1348 (smin:V2DF (match_operand:V2DF 1 "register_operand" "%v")
1349 (match_operand:V2DF 2 "register_operand" "v")))]
1350 "TARGET_VX && !TARGET_VXE"
1354 (gt:V2DI (match_dup 1) (match_dup 2)))
1357 (eq (match_dup 3) (match_dup 4))
1361 operands[3] = gen_reg_rtx (V2DImode);
1362 operands[4] = CONST0_RTX (V2DImode);
1370 (define_insn "*vec_cmp<VICMP_HW_OP:code><VI:mode>_nocc"
1371 [(set (match_operand:VI 2 "register_operand" "=v")
1372 (VICMP_HW_OP:VI (match_operand:VI 0 "register_operand" "v")
1373 (match_operand:VI 1 "register_operand" "v")))]
1375 "vc<VICMP_HW_OP:insn_cmp_op><VI:bhfgq>\t%v2,%v0,%v1"
1376 [(set_attr "op_type" "VRR")])
1380 ;; Floating point compares
1384 ; vfcesb, vfcedb, wfcexb, vfchsb, vfchdb, wfchxb, vfchesb, vfchedb, wfchexb
1385 (define_insn "*vec_cmp<VFCMP_HW_OP:code><mode>_nocc"
1386 [(set (match_operand:<tointvec> 0 "register_operand" "=v")
1387 (VFCMP_HW_OP:<tointvec> (match_operand:VFT 1 "register_operand" "v")
1388 (match_operand:VFT 2 "register_operand" "v")))]
1390 "<vw>fc<VFCMP_HW_OP:asm_fcmp_op><sdx>b\t%v0,%v1,%v2"
1391 [(set_attr "op_type" "VRR")])
1393 ; Expanders for not directly supported comparisons
1395 ; UNEQ a u== b -> !(a > b | b > a)
1396 (define_expand "vec_cmpuneq<mode>"
1397 [(set (match_operand:<tointvec> 0 "register_operand" "=v")
1398 (gt:<tointvec> (match_operand:VFT 1 "register_operand" "v")
1399 (match_operand:VFT 2 "register_operand" "v")))
1401 (gt:<tointvec> (match_dup 2) (match_dup 1)))
1402 (set (match_dup 0) (ior:<tointvec> (match_dup 0) (match_dup 3)))
1403 (set (match_dup 0) (not:<tointvec> (match_dup 0)))]
1406 operands[3] = gen_reg_rtx (<tointvec>mode);
1409 (define_expand "vec_cmpuneq"
1410 [(match_operand 0 "register_operand" "")
1411 (match_operand 1 "register_operand" "")
1412 (match_operand 2 "register_operand" "")]
1415 if (GET_MODE (operands[1]) == V4SFmode)
1416 emit_insn (gen_vec_cmpuneqv4sf (operands[0], operands[1], operands[2]));
1417 else if (GET_MODE (operands[1]) == V2DFmode)
1418 emit_insn (gen_vec_cmpuneqv2df (operands[0], operands[1], operands[2]));
1425 ; LTGT a <> b -> a > b | b > a
1426 (define_expand "vec_cmpltgt<mode>"
1427 [(set (match_operand:<tointvec> 0 "register_operand" "=v")
1428 (gt:<tointvec> (match_operand:VFT 1 "register_operand" "v")
1429 (match_operand:VFT 2 "register_operand" "v")))
1430 (set (match_dup 3) (gt:<tointvec> (match_dup 2) (match_dup 1)))
1431 (set (match_dup 0) (ior:<tointvec> (match_dup 0) (match_dup 3)))]
1434 operands[3] = gen_reg_rtx (<tointvec>mode);
1437 (define_expand "vec_cmpltgt"
1438 [(match_operand 0 "register_operand" "")
1439 (match_operand 1 "register_operand" "")
1440 (match_operand 2 "register_operand" "")]
1443 if (GET_MODE (operands[1]) == V4SFmode)
1444 emit_insn (gen_vec_cmpltgtv4sf (operands[0], operands[1], operands[2]));
1445 else if (GET_MODE (operands[1]) == V2DFmode)
1446 emit_insn (gen_vec_cmpltgtv2df (operands[0], operands[1], operands[2]));
1453 ; ORDERED (a, b): a >= b | b > a
1454 (define_expand "vec_ordered<mode>"
1455 [(set (match_operand:<tointvec> 0 "register_operand" "=v")
1456 (ge:<tointvec> (match_operand:VFT 1 "register_operand" "v")
1457 (match_operand:VFT 2 "register_operand" "v")))
1458 (set (match_dup 3) (gt:<tointvec> (match_dup 2) (match_dup 1)))
1459 (set (match_dup 0) (ior:<tointvec> (match_dup 0) (match_dup 3)))]
1462 operands[3] = gen_reg_rtx (<tointvec>mode);
1465 (define_expand "vec_ordered"
1466 [(match_operand 0 "register_operand" "")
1467 (match_operand 1 "register_operand" "")
1468 (match_operand 2 "register_operand" "")]
1471 if (GET_MODE (operands[1]) == V4SFmode)
1472 emit_insn (gen_vec_orderedv4sf (operands[0], operands[1], operands[2]));
1473 else if (GET_MODE (operands[1]) == V2DFmode)
1474 emit_insn (gen_vec_orderedv2df (operands[0], operands[1], operands[2]));
1481 ; UNORDERED (a, b): !ORDERED (a, b)
1482 (define_expand "vec_unordered<mode>"
1483 [(set (match_operand:<tointvec> 0 "register_operand" "=v")
1484 (ge:<tointvec> (match_operand:VFT 1 "register_operand" "v")
1485 (match_operand:VFT 2 "register_operand" "v")))
1486 (set (match_dup 3) (gt:<tointvec> (match_dup 2) (match_dup 1)))
1487 (set (match_dup 0) (ior:<tointvec> (match_dup 0) (match_dup 3)))
1488 (set (match_dup 0) (not:<tointvec> (match_dup 0)))]
1491 operands[3] = gen_reg_rtx (<tointvec>mode);
1494 (define_expand "vec_unordered"
1495 [(match_operand 0 "register_operand" "")
1496 (match_operand 1 "register_operand" "")
1497 (match_operand 2 "register_operand" "")]
1500 if (GET_MODE (operands[1]) == V4SFmode)
1501 emit_insn (gen_vec_unorderedv4sf (operands[0], operands[1], operands[2]));
1502 else if (GET_MODE (operands[1]) == V2DFmode)
1503 emit_insn (gen_vec_unorderedv2df (operands[0], operands[1], operands[2]));
1510 (define_insn "*vec_load_pair<mode>"
1511 [(set (match_operand:V_HW_64 0 "register_operand" "=v,v")
1512 (vec_concat:V_HW_64 (match_operand:<non_vec> 1 "register_operand" "d,v")
1513 (match_operand:<non_vec> 2 "register_operand" "d,v")))]
1518 [(set_attr "op_type" "VRR,VRR")])
1520 (define_insn "vllv16qi"
1521 [(set (match_operand:V16QI 0 "register_operand" "=v")
1522 (unspec:V16QI [(match_operand:SI 1 "register_operand" "d")
1523 (match_operand:BLK 2 "memory_operand" "Q")]
1524 UNSPEC_VEC_LOAD_LEN))]
1527 [(set_attr "op_type" "VRS")])
1529 ; vfenebs, vfenehs, vfenefs
1530 ; vfenezbs, vfenezhs, vfenezfs
1531 (define_insn "vec_vfenes<mode>"
1532 [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
1533 (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
1534 (match_operand:VI_HW_QHS 2 "register_operand" "v")
1535 (match_operand:QI 3 "const_mask_operand" "C")]
1537 (set (reg:CCRAW CC_REGNUM)
1538 (unspec:CCRAW [(match_dup 1)
1541 UNSPEC_VEC_VFENECC))]
1544 unsigned HOST_WIDE_INT flags = UINTVAL (operands[3]);
1546 gcc_assert (!(flags & ~(VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
1547 flags &= ~VSTRING_FLAG_CS;
1549 if (flags == VSTRING_FLAG_ZS)
1550 return "vfenez<bhfgq>s\t%v0,%v1,%v2";
1551 return "vfene<bhfgq>s\t%v0,%v1,%v2";
1553 [(set_attr "op_type" "VRR")])
1558 ; The following splitters simplify vec_sel for constant 0 or -1
1559 ; selection sources. This is required to generate efficient code for
1564 [(set (match_operand:V 0 "register_operand" "")
1566 (eq (match_operand:<tointvec> 3 "register_operand" "")
1567 (match_operand:V 4 "const0_operand" ""))
1568 (match_operand:V 1 "const0_operand" "")
1569 (match_operand:V 2 "all_ones_operand" "")))]
1571 [(set (match_dup 0) (match_dup 3))]
1573 PUT_MODE (operands[3], <V:MODE>mode);
1578 [(set (match_operand:V 0 "register_operand" "")
1580 (eq (match_operand:<tointvec> 3 "register_operand" "")
1581 (match_operand:V 4 "const0_operand" ""))
1582 (match_operand:V 1 "all_ones_operand" "")
1583 (match_operand:V 2 "const0_operand" "")))]
1585 [(set (match_dup 0) (not:V (match_dup 3)))]
1587 PUT_MODE (operands[3], <V:MODE>mode);
1592 [(set (match_operand:V 0 "register_operand" "")
1594 (ne (match_operand:<tointvec> 3 "register_operand" "")
1595 (match_operand:V 4 "const0_operand" ""))
1596 (match_operand:V 1 "all_ones_operand" "")
1597 (match_operand:V 2 "const0_operand" "")))]
1599 [(set (match_dup 0) (match_dup 3))]
1601 PUT_MODE (operands[3], <V:MODE>mode);
1606 [(set (match_operand:V 0 "register_operand" "")
1608 (ne (match_operand:<tointvec> 3 "register_operand" "")
1609 (match_operand:V 4 "const0_operand" ""))
1610 (match_operand:V 1 "const0_operand" "")
1611 (match_operand:V 2 "all_ones_operand" "")))]
1613 [(set (match_dup 0) (not:V (match_dup 3)))]
1615 PUT_MODE (operands[3], <V:MODE>mode);
1618 ; op0 = op3 == 0 ? op1 : op2
1619 (define_insn "*vec_sel0<mode>"
1620 [(set (match_operand:V 0 "register_operand" "=v")
1622 (eq (match_operand:<tointvec> 3 "register_operand" "v")
1623 (match_operand:<tointvec> 4 "const0_operand" ""))
1624 (match_operand:V 1 "register_operand" "v")
1625 (match_operand:V 2 "register_operand" "v")))]
1627 "vsel\t%v0,%2,%1,%3"
1628 [(set_attr "op_type" "VRR")])
1630 ; op0 = !op3 == 0 ? op1 : op2
1631 (define_insn "*vec_sel0<mode>"
1632 [(set (match_operand:V 0 "register_operand" "=v")
1634 (eq (not:<tointvec> (match_operand:<tointvec> 3 "register_operand" "v"))
1635 (match_operand:<tointvec> 4 "const0_operand" ""))
1636 (match_operand:V 1 "register_operand" "v")
1637 (match_operand:V 2 "register_operand" "v")))]
1639 "vsel\t%v0,%1,%2,%3"
1640 [(set_attr "op_type" "VRR")])
1642 ; op0 = op3 == -1 ? op1 : op2
1643 (define_insn "*vec_sel1<mode>"
1644 [(set (match_operand:V 0 "register_operand" "=v")
1646 (eq (match_operand:<tointvec> 3 "register_operand" "v")
1647 (match_operand:<tointvec> 4 "all_ones_operand" ""))
1648 (match_operand:V 1 "register_operand" "v")
1649 (match_operand:V 2 "register_operand" "v")))]
1651 "vsel\t%v0,%1,%2,%3"
1652 [(set_attr "op_type" "VRR")])
1654 ; op0 = !op3 == -1 ? op1 : op2
1655 (define_insn "*vec_sel1<mode>"
1656 [(set (match_operand:V 0 "register_operand" "=v")
1658 (eq (not:<tointvec> (match_operand:<tointvec> 3 "register_operand" "v"))
1659 (match_operand:<tointvec> 4 "all_ones_operand" ""))
1660 (match_operand:V 1 "register_operand" "v")
1661 (match_operand:V 2 "register_operand" "v")))]
1663 "vsel\t%v0,%2,%1,%3"
1664 [(set_attr "op_type" "VRR")])
1669 (define_insn "vec_pack_trunc_<mode>"
1670 [(set (match_operand:<vec_half> 0 "register_operand" "=v")
1671 (vec_concat:<vec_half>
1672 (truncate:<vec_halfhalf>
1673 (match_operand:VI_HW_HSD 1 "register_operand" "v"))
1674 (truncate:<vec_halfhalf>
1675 (match_operand:VI_HW_HSD 2 "register_operand" "v"))))]
1677 "vpk<bhfgq>\t%0,%1,%2"
1678 [(set_attr "op_type" "VRR")])
1680 ; vpksh, vpksf, vpksg
1681 (define_insn "vec_pack_ssat_<mode>"
1682 [(set (match_operand:<vec_half> 0 "register_operand" "=v")
1683 (vec_concat:<vec_half>
1684 (ss_truncate:<vec_halfhalf>
1685 (match_operand:VI_HW_HSD 1 "register_operand" "v"))
1686 (ss_truncate:<vec_halfhalf>
1687 (match_operand:VI_HW_HSD 2 "register_operand" "v"))))]
1689 "vpks<bhfgq>\t%0,%1,%2"
1690 [(set_attr "op_type" "VRR")])
1692 ; vpklsh, vpklsf, vpklsg
1693 (define_insn "vec_pack_usat_<mode>"
1694 [(set (match_operand:<vec_half> 0 "register_operand" "=v")
1695 (vec_concat:<vec_half>
1696 (us_truncate:<vec_halfhalf>
1697 (match_operand:VI_HW_HSD 1 "register_operand" "v"))
1698 (us_truncate:<vec_halfhalf>
1699 (match_operand:VI_HW_HSD 2 "register_operand" "v"))))]
1701 "vpkls<bhfgq>\t%0,%1,%2"
1702 [(set_attr "op_type" "VRR")])
1704 ;; vector unpack v16qi
1708 (define_insn "vec_unpacks_hi_v16qi"
1709 [(set (match_operand:V8HI 0 "register_operand" "=v")
1712 (match_operand:V16QI 1 "register_operand" "v")
1713 (parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)
1714 (const_int 4)(const_int 5)(const_int 6)(const_int 7)]))))]
1717 [(set_attr "op_type" "VRR")])
1719 (define_insn "vec_unpacks_lo_v16qi"
1720 [(set (match_operand:V8HI 0 "register_operand" "=v")
1723 (match_operand:V16QI 1 "register_operand" "v")
1724 (parallel [(const_int 8) (const_int 9) (const_int 10)(const_int 11)
1725 (const_int 12)(const_int 13)(const_int 14)(const_int 15)]))))]
1728 [(set_attr "op_type" "VRR")])
1732 (define_insn "vec_unpacku_hi_v16qi"
1733 [(set (match_operand:V8HI 0 "register_operand" "=v")
1736 (match_operand:V16QI 1 "register_operand" "v")
1737 (parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)
1738 (const_int 4)(const_int 5)(const_int 6)(const_int 7)]))))]
1741 [(set_attr "op_type" "VRR")])
1743 (define_insn "vec_unpacku_lo_v16qi"
1744 [(set (match_operand:V8HI 0 "register_operand" "=v")
1747 (match_operand:V16QI 1 "register_operand" "v")
1748 (parallel [(const_int 8) (const_int 9) (const_int 10)(const_int 11)
1749 (const_int 12)(const_int 13)(const_int 14)(const_int 15)]))))]
1752 [(set_attr "op_type" "VRR")])
1754 ;; vector unpack v8hi
1758 (define_insn "vec_unpacks_hi_v8hi"
1759 [(set (match_operand:V4SI 0 "register_operand" "=v")
1762 (match_operand:V8HI 1 "register_operand" "v")
1763 (parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)]))))]
1766 [(set_attr "op_type" "VRR")])
1768 (define_insn "vec_unpacks_lo_v8hi"
1769 [(set (match_operand:V4SI 0 "register_operand" "=v")
1772 (match_operand:V8HI 1 "register_operand" "v")
1773 (parallel [(const_int 4)(const_int 5)(const_int 6)(const_int 7)]))))]
1776 [(set_attr "op_type" "VRR")])
1780 (define_insn "vec_unpacku_hi_v8hi"
1781 [(set (match_operand:V4SI 0 "register_operand" "=v")
1784 (match_operand:V8HI 1 "register_operand" "v")
1785 (parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)]))))]
1788 [(set_attr "op_type" "VRR")])
1790 (define_insn "vec_unpacku_lo_v8hi"
1791 [(set (match_operand:V4SI 0 "register_operand" "=v")
1794 (match_operand:V8HI 1 "register_operand" "v")
1795 (parallel [(const_int 4)(const_int 5)(const_int 6)(const_int 7)]))))]
1798 [(set_attr "op_type" "VRR")])
1800 ;; vector unpack v4si
1804 (define_insn "vec_unpacks_hi_v4si"
1805 [(set (match_operand:V2DI 0 "register_operand" "=v")
1808 (match_operand:V4SI 1 "register_operand" "v")
1809 (parallel [(const_int 0)(const_int 1)]))))]
1812 [(set_attr "op_type" "VRR")])
1814 (define_insn "vec_unpacks_lo_v4si"
1815 [(set (match_operand:V2DI 0 "register_operand" "=v")
1818 (match_operand:V4SI 1 "register_operand" "v")
1819 (parallel [(const_int 2)(const_int 3)]))))]
1822 [(set_attr "op_type" "VRR")])
1826 (define_insn "vec_unpacku_hi_v4si"
1827 [(set (match_operand:V2DI 0 "register_operand" "=v")
1830 (match_operand:V4SI 1 "register_operand" "v")
1831 (parallel [(const_int 0)(const_int 1)]))))]
1834 [(set_attr "op_type" "VRR")])
1836 (define_insn "vec_unpacku_lo_v4si"
1837 [(set (match_operand:V2DI 0 "register_operand" "=v")
1840 (match_operand:V4SI 1 "register_operand" "v")
1841 (parallel [(const_int 2)(const_int 3)]))))]
1844 [(set_attr "op_type" "VRR")])
1846 ;; vector load lengthened
1848 ; vflls float -> double
1849 (define_insn "*vec_extendv4sf"
1850 [(set (match_operand:V2DF 0 "register_operand" "=v")
1853 (match_operand:V4SF 1 "register_operand" "v")
1854 (parallel [(const_int 0) (const_int 2)]))))]
1857 [(set_attr "op_type" "VRR")])
1859 (define_expand "vec_unpacks_lo_v4sf"
1861 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
1864 (set (match_operand:V2DF 0 "register_operand" "=v")
1868 (parallel [(const_int 0) (const_int 2)]))))]
1870 { operands[2] = gen_reg_rtx(V4SFmode); })
1872 (define_expand "vec_unpacks_hi_v4sf"
1874 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
1877 (set (match_operand:V2DF 0 "register_operand" "=v")
1881 (parallel [(const_int 0) (const_int 2)]))))]
1883 { operands[2] = gen_reg_rtx(V4SFmode); })
1886 ; double -> long double
1887 (define_insn "*vec_extendv2df"
1888 [(set (match_operand:V1TF 0 "register_operand" "=v")
1891 (match_operand:V2DF 1 "register_operand" "v")
1892 (parallel [(const_int 0)]))))]
1895 [(set_attr "op_type" "VRR")])
1897 (define_expand "vec_unpacks_lo_v2df"
1899 (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "v")
1902 (set (match_operand:V1TF 0 "register_operand" "=v")
1906 (parallel [(const_int 0)]))))]
1908 { operands[2] = gen_reg_rtx (V2DFmode); })
1910 (define_expand "vec_unpacks_hi_v2df"
1912 (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "v")
1915 (set (match_operand:V1TF 0 "register_operand" "=v")
1919 (parallel [(const_int 0)]))))]
1921 { operands[2] = gen_reg_rtx (V2DFmode); })
1924 ; 2 x v2df -> 1 x v4sf
1925 (define_expand "vec_pack_trunc_v2df"
1927 (unspec:V4SF [(match_operand:V2DF 1 "register_operand" "")
1928 (const_int VEC_INEXACT)
1929 (const_int VEC_RND_CURRENT)]
1932 (unspec:V4SF [(match_operand:V2DF 2 "register_operand" "")
1933 (const_int VEC_INEXACT)
1934 (const_int VEC_RND_CURRENT)]
1937 (unspec:V16QI [(subreg:V16QI (match_dup 3) 0)
1938 (subreg:V16QI (match_dup 4) 0)
1941 (set (match_operand:V4SF 0 "register_operand" "")
1942 (subreg:V4SF (match_dup 6) 0))]
1945 rtx constv, perm[16];
1948 for (i = 0; i < 4; ++i)
1950 perm[i] = GEN_INT (i);
1951 perm[i + 4] = GEN_INT (i + 8);
1952 perm[i + 8] = GEN_INT (i + 16);
1953 perm[i + 12] = GEN_INT (i + 24);
1955 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
1957 operands[3] = gen_reg_rtx (V4SFmode);
1958 operands[4] = gen_reg_rtx (V4SFmode);
1959 operands[5] = force_reg (V16QImode, constv);
1960 operands[6] = gen_reg_rtx (V16QImode);
1968 ; vec_pack_sfix_trunc: convert + pack ?
1969 ; vec_pack_ufix_trunc
1970 ; vec_unpacks_float_hi
1971 ; vec_unpacks_float_lo
1972 ; vec_unpacku_float_hi
1973 ; vec_unpacku_float_lo