IBM Z: Restrict vec_cmp<m><n> on z13
[official-gcc.git] / gcc / config / s390 / vector.md
blob029ee0886c273d63365b0accee420aaa478297d7
1 ;;- Instruction patterns for the System z vector facility
2 ;;  Copyright (C) 2015-2020 Free Software Foundation, Inc.
3 ;;  Contributed by Andreas Krebbel (Andreas.Krebbel@de.ibm.com)
5 ;; This file is part of GCC.
7 ;; GCC is free software; you can redistribute it and/or modify it under
8 ;; the terms of the GNU General Public License as published by the Free
9 ;; Software Foundation; either version 3, or (at your option) any later
10 ;; version.
12 ;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 ;; WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 ;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 ;; for more details.
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING3.  If not see
19 ;; <http://www.gnu.org/licenses/>.
21 ; All vector modes supported in a vector register
22 (define_mode_iterator V
23   [V1QI V2QI V4QI V8QI V16QI V1HI V2HI V4HI V8HI V1SI V2SI V4SI V1DI V2DI V1SF
24    V2SF V4SF V1DF V2DF])
25 (define_mode_iterator VT
26   [V1QI V2QI V4QI V8QI V16QI V1HI V2HI V4HI V8HI V1SI V2SI V4SI V1DI V2DI V1SF
27    V2SF V4SF V1DF V2DF V1TF V1TI TI])
29 ; All modes directly supported by the hardware having full vector reg size
30 ; V_HW2 is for having two iterators expanding independently e.g. vcond.
31 ; It's similar to V_HW, but not fully identical: V1TI is not included, because
32 ; there are no 128-bit compares.
33 (define_mode_iterator V_HW  [V16QI V8HI V4SI V2DI (V1TI "TARGET_VXE") V2DF
34                              (V4SF "TARGET_VXE") (V1TF "TARGET_VXE")
35                              (TF "TARGET_VXE")])
36 (define_mode_iterator V_HW2 [V16QI V8HI V4SI V2DI V2DF (V4SF "TARGET_VXE")
37                              (V1TF "TARGET_VXE") (TF "TARGET_VXE")])
39 (define_mode_iterator V_HW_64 [V2DI V2DF])
40 (define_mode_iterator VT_HW_HSDT [V8HI V4SI V4SF V2DI V2DF V1TI V1TF TI TF])
41 (define_mode_iterator V_HW_HSD [V8HI V4SI (V4SF "TARGET_VXE") V2DI V2DF])
43 ; Including TI for instructions that support it (va, vn, ...)
44 (define_mode_iterator VT_HW [V16QI V8HI V4SI V2DI V2DF V1TI TI (V4SF "TARGET_VXE") (V1TF "TARGET_VXE")])
46 ; All full size integer vector modes supported in a vector register + TImode
47 (define_mode_iterator VIT_HW    [V16QI V8HI V4SI V2DI V1TI TI])
48 (define_mode_iterator VI_HW     [V16QI V8HI V4SI V2DI])
49 (define_mode_iterator VI_HW_QHS [V16QI V8HI V4SI])
50 (define_mode_iterator VI_HW_HSD [V8HI  V4SI V2DI])
51 (define_mode_iterator VI_HW_HS  [V8HI  V4SI])
52 (define_mode_iterator VI_HW_QH  [V16QI V8HI])
53 (define_mode_iterator VI_HW_4   [V4SI V4SF])
55 ; All integer vector modes supported in a vector register + TImode
56 (define_mode_iterator VIT [V1QI V2QI V4QI V8QI V16QI V1HI V2HI V4HI V8HI V1SI V2SI V4SI V1DI V2DI V1TI TI])
57 (define_mode_iterator VI  [V1QI V2QI V4QI V8QI V16QI V1HI V2HI V4HI V8HI V1SI V2SI V4SI V1DI V2DI])
58 (define_mode_iterator VI_QHS [V1QI V2QI V4QI V8QI V16QI V1HI V2HI V4HI V8HI V1SI V2SI V4SI])
60 (define_mode_iterator VFT [(V1SF "TARGET_VXE") (V2SF "TARGET_VXE") (V4SF "TARGET_VXE")
61                            V1DF V2DF
62                            (V1TF "TARGET_VXE") (TF "TARGET_VXE")])
64 ; FP vector modes directly supported by the HW.  This does not include
65 ; vector modes using only part of a vector register and should be used
66 ; for instructions which might trigger IEEE exceptions.
67 (define_mode_iterator VF_HW [(V4SF "TARGET_VXE") V2DF (V1TF "TARGET_VXE")
68                              (TF "TARGET_VXE")])
70 (define_mode_iterator V_8   [V1QI])
71 (define_mode_iterator V_16  [V2QI  V1HI])
72 (define_mode_iterator V_32  [V4QI  V2HI V1SI V1SF])
73 (define_mode_iterator V_64  [V8QI  V4HI V2SI V2SF V1DI V1DF])
74 (define_mode_iterator V_128 [V16QI V8HI V4SI V4SF V2DI V2DF V1TI V1TF
75                              (TF "TARGET_VXE")])
76 (define_mode_iterator V_128_NOSINGLE [V16QI V8HI V4SI V4SF V2DI V2DF])
78 ; 32 bit int<->fp vector conversion instructions are available since VXE2 (z15).
79 (define_mode_iterator VX_VEC_CONV_BFP [V2DF (V4SF "TARGET_VXE2")])
80 (define_mode_iterator VX_VEC_CONV_INT [V2DI (V4SI "TARGET_VXE2")])
82 ; Empty string for all but TImode.  This is used to hide the TImode
83 ; expander name in case it is defined already.  See addti3 for an
84 ; example.
85 (define_mode_attr ti* [(V1QI "")  (V2QI "") (V4QI "") (V8QI "") (V16QI "")
86                        (V1HI "")  (V2HI "") (V4HI "") (V8HI "")
87                        (V1SI "")  (V2SI "") (V4SI "")
88                        (V1DI "")  (V2DI "")
89                        (V1TI "")  (TI "*")
90                        (V1SF "")  (V2SF "") (V4SF "")
91                        (V1DF "")  (V2DF "")
92                        (V1TF "")  (TF "")])
94 ;; Facilitate dispatching TFmode expanders on z14+.
95 (define_mode_attr tf_vr [(TF "_vr") (V4SF "") (V2DF "") (V1TF "") (V1SF "")
96                          (V2SF "") (V1DF "") (V16QI "") (V8HI "") (V4SI "")
97                          (V2DI "") (V1TI "")])
99 ; The element type of the vector.
100 (define_mode_attr non_vec[(V1QI "QI") (V2QI "QI") (V4QI "QI") (V8QI "QI") (V16QI "QI")
101                           (V1HI "HI") (V2HI "HI") (V4HI "HI") (V8HI "HI")
102                           (V1SI "SI") (V2SI "SI") (V4SI "SI")
103                           (V1DI "DI") (V2DI "DI")
104                           (V1TI "TI") (TI "TI")
105                           (V1SF "SF") (V2SF "SF") (V4SF "SF")
106                           (V1DF "DF") (V2DF "DF")
107                           (V1TF "TF") (TF "TF")])
109 ; Like above, but in lower case.
110 (define_mode_attr non_vec_l[(V1QI "qi") (V2QI "qi") (V4QI "qi") (V8QI "qi")
111                             (V16QI "qi")
112                             (V1HI "hi") (V2HI "hi") (V4HI "hi") (V8HI "hi")
113                             (V1SI "si") (V2SI "si") (V4SI "si")
114                             (V1DI "di") (V2DI "di")
115                             (V1TI "ti") (TI "ti")
116                             (V1SF "sf") (V2SF "sf") (V4SF "sf")
117                             (V1DF "df") (V2DF "df")
118                             (V1TF "tf") (TF "tf")])
120 ; The instruction suffix for integer instructions and instructions
121 ; which do not care about whether it is floating point or integer.
122 (define_mode_attr bhfgq[(V1QI "b") (V2QI "b") (V4QI "b") (V8QI "b") (V16QI "b")
123                         (V1HI "h") (V2HI "h") (V4HI "h") (V8HI "h")
124                         (V1SI "f") (V2SI "f") (V4SI "f")
125                         (V1DI "g") (V2DI "g")
126                         (V1TI "q") (TI "q")
127                         (V1SF "f") (V2SF "f") (V4SF "f")
128                         (V1DF "g") (V2DF "g")
129                         (V1TF "q")])
131 ; This is for vmalhw. It gets an 'w' attached to avoid confusion with
132 ; multiply and add logical high vmalh.
133 (define_mode_attr w [(V1QI "")  (V2QI "")  (V4QI "")  (V8QI "") (V16QI "")
134                      (V1HI "w") (V2HI "w") (V4HI "w") (V8HI "w")
135                      (V1SI "")  (V2SI "")  (V4SI "")
136                      (V1DI "")  (V2DI "")])
138 ; Resulting mode of a vector comparison.  For floating point modes an
139 ; integer vector mode with the same element size is picked.
140 (define_mode_attr TOINTVEC [(V1QI "V1QI") (V2QI "V2QI") (V4QI "V4QI") (V8QI "V8QI") (V16QI "V16QI")
141                             (V1HI "V1HI") (V2HI "V2HI") (V4HI "V4HI") (V8HI "V8HI")
142                             (V1SI "V1SI") (V2SI "V2SI") (V4SI "V4SI")
143                             (V1DI "V1DI") (V2DI "V2DI")
144                             (V1TI "V1TI")
145                             (V1SF "V1SI") (V2SF "V2SI") (V4SF "V4SI")
146                             (V1DF "V1DI") (V2DF "V2DI")
147                             (V1TF "V1TI") (TF "V1TI")])
149 (define_mode_attr tointvec [(V1QI "v1qi") (V2QI "v2qi") (V4QI "v4qi") (V8QI "v8qi") (V16QI "v16qi")
150                             (V1HI "v1hi") (V2HI "v2hi") (V4HI "v4hi") (V8HI "v8hi")
151                             (V1SI "v1si") (V2SI "v2si") (V4SI "v4si")
152                             (V1DI "v1di") (V2DI "v2di")
153                             (V1TI "v1ti")
154                             (V1SF "v1si") (V2SF "v2si") (V4SF "v4si")
155                             (V1DF "v1di") (V2DF "v2di")
156                             (V1TF "v1ti") (TF   "v1ti")])
158 (define_mode_attr vw [(SF "w") (V1SF "w") (V2SF "v") (V4SF "v")
159                       (DF "w") (V1DF "w") (V2DF "v")
160                       (TF "w") (V1TF "w")])
162 (define_mode_attr sdx [(SF "s") (V1SF "s") (V2SF "s") (V4SF "s")
163                        (DF "d") (V1DF "d") (V2DF "d")
164                        (TF "x") (V1TF "x")])
166 ; Vector with doubled element size.
167 (define_mode_attr vec_double [(V1QI "V1HI") (V2QI "V1HI") (V4QI "V2HI") (V8QI "V4HI") (V16QI "V8HI")
168                               (V1HI "V1SI") (V2HI "V1SI") (V4HI "V2SI") (V8HI "V4SI")
169                               (V1SI "V1DI") (V2SI "V1DI") (V4SI "V2DI")
170                               (V1DI "V1TI") (V2DI "V1TI")
171                               (V1SF "V1DF") (V2SF "V1DF") (V4SF "V2DF")])
173 ; Vector with half the element size.
174 (define_mode_attr vec_half [(V1HI "V2QI") (V2HI "V4QI") (V4HI "V8QI") (V8HI "V16QI")
175                             (V1SI "V2HI") (V2SI "V4HI") (V4SI "V8HI")
176                             (V1DI "V2SI") (V2DI "V4SI")
177                             (V1TI "V2DI")
178                             (V1DF "V2SF") (V2DF "V4SF")
179                             (V1TF "V1DF")])
181 ; Vector with half the element size AND half the number of elements.
182 (define_mode_attr vec_halfhalf
183   [(V2HI "V2QI") (V4HI "V4QI") (V8HI "V8QI")
184    (V2SI "V2HI") (V4SI "V4HI")
185    (V2DI "V2SI")
186    (V2DF "V2SF")])
188 (define_mode_attr vec_halfnumelts
189   [(V4SF "V2SF") (V4SI "V2SI")])
193 ; Comparison operators on int and fp compares which are directly
194 ; supported by the HW.
195 (define_code_iterator VICMP_HW_OP [eq gt gtu])
196 ; For int insn_cmp_op can be used in the insn name as well as in the asm output.
197 (define_code_attr insn_cmp_op [(eq "eq") (gt "h") (gtu "hl") (ge "he")])
199 ; Flags for vector string instructions (vfae all 4, vfee only ZS and CS, vstrc all 4)
200 (define_constants
201   [(VSTRING_FLAG_IN         8)   ; invert result
202    (VSTRING_FLAG_RT         4)   ; result type
203    (VSTRING_FLAG_ZS         2)   ; zero search
204    (VSTRING_FLAG_CS         1)]) ; condition code set
206 (include "vx-builtins.md")
208 ; Full HW vector size moves
210 ; We don't use lm/stm for 128 bit moves since these are slower than
211 ; splitting it into separate moves.
213 ; FIXME: More constants are possible by enabling jxx, jyy constraints
214 ; for TImode (use double-int for the calculations)
216 ; vgmb, vgmh, vgmf, vgmg, vrepib, vrepih, vrepif, vrepig
217 (define_insn "mov<mode><tf_vr>"
218   [(set (match_operand:V_128 0 "nonimmediate_operand" "=v,v,R,  v,  v,  v,  v,  v,v,*d,*d,?o")
219         (match_operand:V_128 1 "general_operand"      " v,R,v,j00,jm1,jyy,jxx,jKK,d, v,dT,*d"))]
220   ""
221   "@
222    vlr\t%v0,%v1
223    vl\t%v0,%1%A1
224    vst\t%v1,%0%A0
225    vzero\t%v0
226    vone\t%v0
227    vgbm\t%v0,%t1
228    vgm<bhfgq>\t%v0,%s1,%e1
229    vrepi<bhfgq>\t%v0,%h1
230    vlvgp\t%v0,%1,%N1
231    #
232    #
233    #"
234   [(set_attr "cpu_facility" "vx,vx,vx,vx,vx,vx,vx,vx,vx,vx,*,*")
235    (set_attr "op_type"      "VRR,VRX,VRX,VRI,VRI,VRI,VRI,VRI,VRR,*,*,*")])
237 (define_expand "movtf"
238   [(match_operand:TF 0 "nonimmediate_operand" "")
239    (match_operand:TF 1 "general_operand"      "")]
240   ""
241   { EXPAND_MOVTF(movtf); })
243 ; VR -> GPR, no instruction so split it into 64 element sets.
244 (define_split
245   [(set (match_operand:V_128 0 "register_operand" "")
246         (match_operand:V_128 1 "register_operand" ""))]
247   "TARGET_VX && GENERAL_REG_P (operands[0]) && VECTOR_REG_P (operands[1])"
248   [(set (match_dup 2)
249         (unspec:DI [(subreg:V2DI (match_dup 1) 0)
250                     (const_int 0)] UNSPEC_VEC_EXTRACT))
251    (set (match_dup 3)
252         (unspec:DI [(subreg:V2DI (match_dup 1) 0)
253                     (const_int 1)] UNSPEC_VEC_EXTRACT))]
255   operands[2] = operand_subword (operands[0], 0, 0, <MODE>mode);
256   operands[3] = operand_subword (operands[0], 1, 0, <MODE>mode);
259 ; Split the 128 bit GPR move into two word mode moves
260 ; s390_split_ok_p decides which part needs to be moved first.
262 (define_split
263   [(set (match_operand:V_128 0 "nonimmediate_operand" "")
264         (match_operand:V_128 1 "general_operand" ""))]
265   "reload_completed
266    && s390_split_ok_p (operands[0], operands[1], <MODE>mode, 0)"
267   [(set (match_dup 2) (match_dup 4))
268    (set (match_dup 3) (match_dup 5))]
270   operands[2] = operand_subword (operands[0], 0, 0, <MODE>mode);
271   operands[3] = operand_subword (operands[0], 1, 0, <MODE>mode);
272   operands[4] = operand_subword (operands[1], 0, 0, <MODE>mode);
273   operands[5] = operand_subword (operands[1], 1, 0, <MODE>mode);
276 (define_split
277   [(set (match_operand:V_128 0 "nonimmediate_operand" "")
278         (match_operand:V_128 1 "general_operand" ""))]
279   "reload_completed
280    && s390_split_ok_p (operands[0], operands[1], <MODE>mode, 1)"
281   [(set (match_dup 2) (match_dup 4))
282    (set (match_dup 3) (match_dup 5))]
284   operands[2] = operand_subword (operands[0], 1, 0, <MODE>mode);
285   operands[3] = operand_subword (operands[0], 0, 0, <MODE>mode);
286   operands[4] = operand_subword (operands[1], 1, 0, <MODE>mode);
287   operands[5] = operand_subword (operands[1], 0, 0, <MODE>mode);
290 ; This is the vector equivalent to the TImode splitter in s390.md.  It
291 ; is required if both target GPRs occur in the source address operand.
293 ; For non-s_operands at least one of the target GPRs does not conflict
294 ; with the address operand and one of the splitters above will take
295 ; over.
296 (define_split
297   [(set (match_operand:V_128 0 "register_operand" "")
298         (match_operand:V_128 1 "memory_operand" ""))]
299   "TARGET_ZARCH && reload_completed
300    && !VECTOR_REG_P (operands[0])
301    && !s_operand (operands[1], VOIDmode)"
302   [(set (match_dup 0) (match_dup 1))]
304   rtx addr = operand_subword (operands[0], 1, 0, <MODE>mode);
305   addr = gen_lowpart (Pmode, addr);
306   s390_load_address (addr, XEXP (operands[1], 0));
307   operands[1] = replace_equiv_address (operands[1], addr);
310 ; Moves for smaller vector modes.
312 ; In these patterns only the vlr, vone, and vzero instructions write
313 ; VR bytes outside the mode.  This should be ok since we disallow
314 ; formerly bigger modes being accessed with smaller modes via
315 ; subreg. Note: The vone, vzero instructions could easily be replaced
316 ; with vlei which would only access the bytes belonging to the mode.
317 ; However, this would probably be slower.
319 (define_insn "mov<mode>"
320   [(set (match_operand:V_8 0 "nonimmediate_operand" "=v,v,d,v,R,  v,  v,  v,  v,d,  Q,  S,  Q,  S,  d,  d,d,R,T")
321         (match_operand:V_8 1 "general_operand"      " v,d,v,R,v,j00,jm1,jyy,jxx,d,j00,j00,jm1,jm1,j00,jm1,T,d,d"))]
322   "TARGET_VX"
323   "@
324    vlr\t%v0,%v1
325    vlvgb\t%v0,%1,0
326    vlgvb\t%0,%v1,0
327    vleb\t%v0,%1,0
328    vsteb\t%v1,%0,0
329    vzero\t%v0
330    vone\t%v0
331    vgbm\t%v0,%t1
332    vgm\t%v0,%s1,%e1
333    lr\t%0,%1
334    mvi\t%0,0
335    mviy\t%0,0
336    mvi\t%0,-1
337    mviy\t%0,-1
338    lhi\t%0,0
339    lhi\t%0,-1
340    llc\t%0,%1
341    stc\t%1,%0
342    stcy\t%1,%0"
343   [(set_attr "op_type"      "VRR,VRS,VRS,VRX,VRX,VRI,VRI,VRI,VRI,RR,SI,SIY,SI,SIY,RI,RI,RXY,RX,RXY")])
345 (define_insn "mov<mode>"
346   [(set (match_operand:V_16 0 "nonimmediate_operand" "=v,v,d,v,R,  v,  v,  v,  v,d,  Q,  Q,  d,  d,d,d,d,R,T,b")
347         (match_operand:V_16 1 "general_operand"      " v,d,v,R,v,j00,jm1,jyy,jxx,d,j00,jm1,j00,jm1,R,T,b,d,d,d"))]
348   ""
349   "@
350    vlr\t%v0,%v1
351    vlvgh\t%v0,%1,0
352    vlgvh\t%0,%v1,0
353    vleh\t%v0,%1,0
354    vsteh\t%v1,%0,0
355    vzero\t%v0
356    vone\t%v0
357    vgbm\t%v0,%t1
358    vgm\t%v0,%s1,%e1
359    lr\t%0,%1
360    mvhhi\t%0,0
361    mvhhi\t%0,-1
362    lhi\t%0,0
363    lhi\t%0,-1
364    lh\t%0,%1
365    lhy\t%0,%1
366    lhrl\t%0,%1
367    sth\t%1,%0
368    sthy\t%1,%0
369    sthrl\t%1,%0"
370   [(set_attr "op_type"      "VRR,VRS,VRS,VRX,VRX,VRI,VRI,VRI,VRI,RR,SIL,SIL,RI,RI,RX,RXY,RIL,RX,RXY,RIL")])
372 (define_insn "mov<mode>"
373   [(set (match_operand:V_32 0 "nonimmediate_operand" "=f,f,f,R,T,v,v,d,v,R,  f,  v,  v,  v,  v,  Q,  Q,  d,  d,d,d,d,d,R,T,b")
374         (match_operand:V_32 1 "general_operand"      " f,R,T,f,f,v,d,v,R,v,j00,j00,jm1,jyy,jxx,j00,jm1,j00,jm1,b,d,R,T,d,d,d"))]
375   "TARGET_VX"
376   "@
377    ldr\t%v0,%v1
378    lde\t%0,%1
379    ley\t%0,%1
380    ste\t%1,%0
381    stey\t%1,%0
382    vlr\t%v0,%v1
383    vlvgf\t%v0,%1,0
384    vlgvf\t%0,%v1,0
385    vlef\t%v0,%1,0
386    vstef\t%1,%0,0
387    lzer\t%v0
388    vzero\t%v0
389    vone\t%v0
390    vgbm\t%v0,%t1
391    vgm\t%v0,%s1,%e1
392    mvhi\t%0,0
393    mvhi\t%0,-1
394    lhi\t%0,0
395    lhi\t%0,-1
396    lrl\t%0,%1
397    lr\t%0,%1
398    l\t%0,%1
399    ly\t%0,%1
400    st\t%1,%0
401    sty\t%1,%0
402    strl\t%1,%0"
403   [(set_attr "op_type" "RR,RXE,RXY,RX,RXY,VRR,VRS,VRS,VRX,VRX,RRE,VRI,VRI,VRI,VRI,SIL,SIL,RI,RI,
404                         RIL,RR,RX,RXY,RX,RXY,RIL")])
406 (define_insn "mov<mode>"
407   [(set (match_operand:V_64 0 "nonimmediate_operand"
408          "=f,f,f,R,T,v,v,d,v,R,  f,  v,  v,  v,  v,  Q,  Q,  d,  d,f,d,d,d,d,T,b")
409         (match_operand:V_64 1 "general_operand"
410          " f,R,T,f,f,v,d,v,R,v,j00,j00,jm1,jyy,jxx,j00,jm1,j00,jm1,d,f,b,d,T,d,d"))]
411   "TARGET_ZARCH"
412   "@
413    ldr\t%0,%1
414    ld\t%0,%1
415    ldy\t%0,%1
416    std\t%1,%0
417    stdy\t%1,%0
418    vlr\t%v0,%v1
419    vlvgg\t%v0,%1,0
420    vlgvg\t%0,%v1,0
421    vleg\t%v0,%1,0
422    vsteg\t%v1,%0,0
423    lzdr\t%0
424    vzero\t%v0
425    vone\t%v0
426    vgbm\t%v0,%t1
427    vgm\t%v0,%s1,%e1
428    mvghi\t%0,0
429    mvghi\t%0,-1
430    lghi\t%0,0
431    lghi\t%0,-1
432    ldgr\t%0,%1
433    lgdr\t%0,%1
434    lgrl\t%0,%1
435    lgr\t%0,%1
436    lg\t%0,%1
437    stg\t%1,%0
438    stgrl\t%1,%0"
439   [(set_attr "op_type" "RRE,RX,RXY,RX,RXY,VRR,VRS,VRS,VRX,VRX,RRE,VRI,VRI,VRI,VRI,
440                         SIL,SIL,RI,RI,RRE,RRE,RIL,RR,RXY,RXY,RIL")])
443 ; vec_load_lanes?
445 ; vec_store_lanes?
447 ; vec_set is supposed to *modify* an existing vector so operand 0 is
448 ; duplicated as input operand.
449 (define_expand "vec_set<mode>"
450   [(set (match_operand:V                    0 "register_operand"  "")
451         (unspec:V [(match_operand:<non_vec> 1 "general_operand"   "")
452                    (match_operand:SI        2 "nonmemory_operand" "")
453                    (match_dup 0)]
454                    UNSPEC_VEC_SET))]
455   "TARGET_VX")
457 ; FIXME: Support also vector mode operands for 1
458 ; FIXME: A target memory operand seems to be useful otherwise we end
459 ; up with vl vlvgg vst.  Shouldn't the middle-end be able to handle
460 ; that itself?
461 ; vlvgb, vlvgh, vlvgf, vlvgg, vleb, vleh, vlef, vleg, vleib, vleih, vleif, vleig
462 (define_insn "*vec_set<mode>"
463   [(set (match_operand:V                    0 "register_operand"  "=v,v,v")
464         (unspec:V [(match_operand:<non_vec> 1 "general_operand"    "d,R,K")
465                    (match_operand:SI        2 "nonmemory_operand" "an,I,I")
466                    (match_operand:V         3 "register_operand"   "0,0,0")]
467                   UNSPEC_VEC_SET))]
468   "TARGET_VX
469    && (!CONST_INT_P (operands[2])
470        || UINTVAL (operands[2]) < GET_MODE_NUNITS (<V:MODE>mode))"
471   "@
472    vlvg<bhfgq>\t%v0,%1,%Y2
473    vle<bhfgq>\t%v0,%1,%2
474    vlei<bhfgq>\t%v0,%1,%2"
475   [(set_attr "op_type" "VRS,VRX,VRI")])
477 ; vlvgb, vlvgh, vlvgf, vlvgg
478 (define_insn "*vec_set<mode>_plus"
479   [(set (match_operand:V                      0 "register_operand" "=v")
480         (unspec:V [(match_operand:<non_vec>   1 "general_operand"   "d")
481                    (plus:SI (match_operand:SI 2 "register_operand"  "a")
482                             (match_operand:SI 4 "const_int_operand" "n"))
483                    (match_operand:V           3 "register_operand"  "0")]
484                   UNSPEC_VEC_SET))]
485   "TARGET_VX"
486   "vlvg<bhfgq>\t%v0,%1,%Y4(%2)"
487   [(set_attr "op_type" "VRS")])
490 ; FIXME: Support also vector mode operands for 0
491 ; FIXME: This should be (vec_select ..) or something but it does only allow constant selectors :(
492 ; This is used via RTL standard name as well as for expanding the builtin
493 (define_expand "vec_extract<mode><non_vec_l>"
494   [(set (match_operand:<non_vec> 0 "nonimmediate_operand" "")
495         (unspec:<non_vec> [(match_operand:V  1 "register_operand" "")
496                            (match_operand:SI 2 "nonmemory_operand" "")]
497                           UNSPEC_VEC_EXTRACT))]
498   "TARGET_VX")
500 ; vlgvb, vlgvh, vlgvf, vlgvg, vsteb, vsteh, vstef, vsteg
501 (define_insn "*vec_extract<mode>"
502   [(set (match_operand:<non_vec> 0 "nonimmediate_operand"          "=d,R")
503         (unspec:<non_vec> [(match_operand:V  1 "register_operand"   "v,v")
504                            (match_operand:SI 2 "nonmemory_operand" "an,I")]
505                           UNSPEC_VEC_EXTRACT))]
506   "TARGET_VX
507    && (!CONST_INT_P (operands[2])
508        || UINTVAL (operands[2]) < GET_MODE_NUNITS (<V:MODE>mode))"
509   "@
510    vlgv<bhfgq>\t%0,%v1,%Y2
511    vste<bhfgq>\t%v1,%0,%2"
512   [(set_attr "op_type" "VRS,VRX")])
514 ; vlgvb, vlgvh, vlgvf, vlgvg
515 (define_insn "*vec_extract<mode>_plus"
516   [(set (match_operand:<non_vec>                      0 "nonimmediate_operand" "=d")
517         (unspec:<non_vec> [(match_operand:V           1 "register_operand"      "v")
518                            (plus:SI (match_operand:SI 2 "nonmemory_operand"     "a")
519                                     (match_operand:SI 3 "const_int_operand"     "n"))]
520                            UNSPEC_VEC_EXTRACT))]
521   "TARGET_VX"
522   "vlgv<bhfgq>\t%0,%v1,%Y3(%2)"
523   [(set_attr "op_type" "VRS")])
525 (define_expand "vec_init<mode><non_vec_l>"
526   [(match_operand:V_128 0 "register_operand" "")
527    (match_operand:V_128 1 "nonmemory_operand" "")]
528   "TARGET_VX"
530   s390_expand_vec_init (operands[0], operands[1]);
531   DONE;
534 (define_insn "*vec_vllezlf<mode>"
535   [(set (match_operand:VI_HW_4              0 "register_operand" "=v")
536         (vec_concat:VI_HW_4
537          (vec_concat:<vec_halfnumelts>
538           (match_operand:<non_vec> 1 "memory_operand"    "R")
539           (const_int 0))
540          (vec_concat:<vec_halfnumelts>
541           (const_int 0)
542           (const_int 0))))]
543   "TARGET_VXE"
544   "vllezlf\t%v0,%1"
545   [(set_attr "op_type" "VRX")])
547 ; Replicate from vector element
548 ; vrepb, vreph, vrepf, vrepg
549 (define_insn "*vec_splat<mode>"
550   [(set (match_operand:V_128_NOSINGLE   0 "register_operand" "=v")
551         (vec_duplicate:V_128_NOSINGLE
552          (vec_select:<non_vec>
553           (match_operand:V_128_NOSINGLE 1 "register_operand"  "v")
554           (parallel
555            [(match_operand:QI 2 "const_mask_operand" "C")]))))]
556   "TARGET_VX && UINTVAL (operands[2]) < GET_MODE_NUNITS (<MODE>mode)"
557   "vrep<bhfgq>\t%v0,%v1,%2"
558   [(set_attr "op_type" "VRI")])
560 ; vlrepb, vlreph, vlrepf, vlrepg, vrepib, vrepih, vrepif, vrepig, vrepb, vreph, vrepf, vrepg
561 (define_insn "*vec_splats<mode>"
562   [(set (match_operand:V_128_NOSINGLE                          0 "register_operand" "=v,v,v,v")
563         (vec_duplicate:V_128_NOSINGLE (match_operand:<non_vec> 1 "general_operand"  " R,K,v,d")))]
564   "TARGET_VX"
565   "@
566    vlrep<bhfgq>\t%v0,%1
567    vrepi<bhfgq>\t%v0,%h1
568    vrep<bhfgq>\t%v0,%v1,0
569    #"
570   [(set_attr "op_type" "VRX,VRI,VRI,*")])
572 ; vlbrreph, vlbrrepf, vlbrrepg
573 (define_insn "*vec_splats_bswap_vec<mode>"
574   [(set (match_operand:V_HW_HSD                           0 "register_operand"        "=v")
575         (bswap:V_HW_HSD
576          (vec_duplicate:V_HW_HSD (match_operand:<non_vec> 1 "memory_operand"           "R"))))
577    (use (match_operand:V16QI                              2 "permute_pattern_operand"  "X"))]
578   "TARGET_VXE2"
579   "vlbrrep<bhfgq>\t%v0,%1"
580   [(set_attr "op_type" "VRX")])
582 ; Why do we need both? Shouldn't there be a canonical form?
583 ; vlbrreph, vlbrrepf, vlbrrepg
584 (define_insn "*vec_splats_bswap_elem<mode>"
585   [(set (match_operand:V_HW_HSD                    0 "register_operand" "=v")
586         (vec_duplicate:V_HW_HSD
587          (bswap:<non_vec> (match_operand:<non_vec> 1 "memory_operand"    "R"))))]
588   "TARGET_VXE2"
589   "vlbrrep<bhfgq>\t%v0,%1"
590   [(set_attr "op_type" "VRX")])
592 ; A TFmode operand resides in FPR register pairs while V1TF is in a
593 ; single vector register.
594 (define_insn "*vec_tf_to_v1tf_fpr"
595   [(set (match_operand:V1TF                   0 "nonimmediate_operand" "=v,v,R,v,v")
596         (vec_duplicate:V1TF (match_operand:TF 1 "general_operand"       "f,R,f,G,d")))]
597   "TARGET_VX && !TARGET_VXE"
598   "@
599    vmrhg\t%v0,%1,%N1
600    vl\t%v0,%1%A1
601    vst\t%v1,%0%A0
602    vzero\t%v0
603    vlvgp\t%v0,%1,%N1"
604   [(set_attr "op_type" "VRR,VRX,VRX,VRI,VRR")])
606 ; Both TFmode and V1TFmode operands reside in vector registers.
607 (define_insn "*vec_tf_to_v1tf_vr"
608   [(set (match_operand:V1TF                   0 "nonimmediate_operand" "=v,v,R,v,v")
609         (vec_duplicate:V1TF (match_operand:TF 1 "general_operand"       "v,R,v,G,d")))]
610   "TARGET_VXE"
611   "@
612    vlr\t%v0,%1
613    vl\t%v0,%1%A1
614    vst\t%v1,%0%A0
615    vzero\t%v0
616    vlvgp\t%v0,%1,%N1"
617   [(set_attr "op_type" "VRR,VRX,VRX,VRI,VRR")])
619 (define_insn "*fprx2_to_tf"
620   [(set (match_operand:TF               0 "nonimmediate_operand" "=v")
621         (subreg:TF (match_operand:FPRX2 1 "general_operand"       "f") 0))]
622   "TARGET_VXE"
623   "vmrhg\t%v0,%1,%N1"
624   [(set_attr "op_type" "VRR")])
626 (define_insn "*vec_ti_to_v1ti"
627   [(set (match_operand:V1TI                   0 "nonimmediate_operand" "=v,v,R,  v,  v,v")
628         (vec_duplicate:V1TI (match_operand:TI 1 "general_operand"       "v,R,v,j00,jm1,d")))]
629   "TARGET_VX"
630   "@
631    vlr\t%v0,%v1
632    vl\t%v0,%1%A1
633    vst\t%v1,%0%A0
634    vzero\t%v0
635    vone\t%v0
636    vlvgp\t%v0,%1,%N1"
637   [(set_attr "op_type" "VRR,VRX,VRX,VRI,VRI,VRR")])
639 ; vec_splats is supposed to replicate op1 into all elements of op0
640 ; This splitter first sets the rightmost element of op0 to op1 and
641 ; then does a vec_splat to replicate that element into all other
642 ; elements.
643 (define_split
644   [(set (match_operand:V_128_NOSINGLE                          0 "register_operand" "")
645         (vec_duplicate:V_128_NOSINGLE (match_operand:<non_vec> 1 "register_operand" "")))]
646   "TARGET_VX && GENERAL_REG_P (operands[1])"
647   [(set (match_dup 0)
648         (unspec:V_128_NOSINGLE [(match_dup 1) (match_dup 2) (match_dup 0)] UNSPEC_VEC_SET))
649    (set (match_dup 0)
650         (vec_duplicate:V_128_NOSINGLE
651          (vec_select:<non_vec>
652           (match_dup 0) (parallel [(match_dup 2)]))))]
654   operands[2] = GEN_INT (GET_MODE_NUNITS (<MODE>mode) - 1);
657 (define_predicate "vcond_comparison_operator"
658   (match_operand 0 "comparison_operator")
660   if (!HONOR_NANS (GET_MODE (XEXP (op, 0)))
661       && !HONOR_NANS (GET_MODE (XEXP (op, 1))))
662     return true;
663   switch (GET_CODE (op))
664     {
665     case LE:
666     case LT:
667     case GE:
668     case GT:
669     case LTGT:
670       /* Signaling vector comparisons are supported only on z14+.  */
671       return TARGET_VXE || TARGET_NONSIGNALING_VECTOR_COMPARE_OK;
672     default:
673       return true;
674     }
677 (define_expand "vcond<V_HW:mode><V_HW2:mode>"
678   [(set (match_operand:V_HW 0 "register_operand" "")
679         (if_then_else:V_HW
680          (match_operator 3 "vcond_comparison_operator"
681                          [(match_operand:V_HW2 4 "register_operand" "")
682                           (match_operand:V_HW2 5 "nonmemory_operand" "")])
683          (match_operand:V_HW 1 "nonmemory_operand" "")
684          (match_operand:V_HW 2 "nonmemory_operand" "")))]
685   "TARGET_VX && GET_MODE_NUNITS (<V_HW:MODE>mode) == GET_MODE_NUNITS (<V_HW2:MODE>mode)"
687   s390_expand_vcond (operands[0], operands[1], operands[2],
688                      GET_CODE (operands[3]), operands[4], operands[5]);
689   DONE;
692 (define_expand "vcondu<V_HW:mode><V_HW2:mode>"
693   [(set (match_operand:V_HW 0 "register_operand" "")
694         (if_then_else:V_HW
695          (match_operator 3 "comparison_operator"
696                          [(match_operand:V_HW2 4 "register_operand" "")
697                           (match_operand:V_HW2 5 "nonmemory_operand" "")])
698          (match_operand:V_HW 1 "nonmemory_operand" "")
699          (match_operand:V_HW 2 "nonmemory_operand" "")))]
700   "TARGET_VX && GET_MODE_NUNITS (<V_HW:MODE>mode) == GET_MODE_NUNITS (<V_HW2:MODE>mode)"
702   s390_expand_vcond (operands[0], operands[1], operands[2],
703                      GET_CODE (operands[3]), operands[4], operands[5]);
704   DONE;
707 (define_expand "vcond_mask_<mode><mode>"
708   [(set (match_operand:V 0 "register_operand" "")
709         (if_then_else:V
710          (eq (match_operand:<TOINTVEC> 3 "register_operand" "")
711              (match_dup 4))
712          (match_operand:V 2 "register_operand" "")
713          (match_operand:V 1 "register_operand" "")))]
714   "TARGET_VX"
715   "operands[4] = CONST0_RTX (<TOINTVEC>mode);")
718 ; We only have HW support for byte vectors.  The middle-end is
719 ; supposed to lower the mode if required.
720 (define_insn "vec_permv16qi"
721   [(set (match_operand:V16QI 0 "register_operand"               "=v")
722         (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
723                        (match_operand:V16QI 2 "register_operand" "v")
724                        (match_operand:V16QI 3 "register_operand" "v")]
725                       UNSPEC_VEC_PERM))]
726   "TARGET_VX"
727   "vperm\t%v0,%v1,%v2,%v3"
728   [(set_attr "op_type" "VRR")])
730 (define_insn "*vec_perm<mode>"
731   [(set (match_operand:VT_HW                                            0 "register_operand" "=v")
732         (subreg:VT_HW (unspec:V16QI [(subreg:V16QI (match_operand:VT_HW 1 "register_operand"  "v") 0)
733                                      (subreg:V16QI (match_operand:VT_HW 2 "register_operand"  "v") 0)
734                                      (match_operand:V16QI               3 "register_operand"  "v")]
735                                     UNSPEC_VEC_PERM) 0))]
736   "TARGET_VX"
737   "vperm\t%v0,%v1,%v2,%v3"
738   [(set_attr "op_type" "VRR")])
740 (define_insn "*mov_tf_to_fprx2_0"
741   [(set (subreg:DF (match_operand:FPRX2 0 "nonimmediate_operand" "=f") 0)
742         (subreg:DF (match_operand:TF    1 "general_operand"       "v") 0))]
743   "TARGET_VXE"
744   ; M4 == 1 corresponds to %v0[0] = %v1[0]; %v0[1] = %v0[1];
745   "vpdi\t%v0,%v1,%v0,1"
746   [(set_attr "op_type" "VRR")])
748 (define_insn "*mov_tf_to_fprx2_1"
749   [(set (subreg:DF (match_operand:FPRX2 0 "nonimmediate_operand" "=f") 8)
750         (subreg:DF (match_operand:TF    1 "general_operand"       "v") 8))]
751   "TARGET_VXE"
752   ; M4 == 5 corresponds to %V0[0] = %v1[1]; %V0[1] = %V0[1];
753   "vpdi\t%V0,%v1,%V0,5"
754   [(set_attr "op_type" "VRR")])
756 ; vec_perm_const for V2DI using vpdi?
759 ;; Vector integer arithmetic instructions
762 ; vab, vah, vaf, vag, vaq
764 ; We use nonimmediate_operand instead of register_operand since it is
765 ; better to have the reloads into VRs instead of splitting the
766 ; operation into two DImode ADDs.
767 (define_insn "<ti*>add<mode>3"
768   [(set (match_operand:VIT           0 "nonimmediate_operand" "=v")
769         (plus:VIT (match_operand:VIT 1 "nonimmediate_operand"  "v")
770                   (match_operand:VIT 2 "general_operand"       "v")))]
771   "TARGET_VX"
772   "va<bhfgq>\t%v0,%v1,%v2"
773   [(set_attr "op_type" "VRR")])
775 ; vsb, vsh, vsf, vsg, vsq
776 (define_insn "<ti*>sub<mode>3"
777   [(set (match_operand:VIT            0 "nonimmediate_operand" "=v")
778         (minus:VIT (match_operand:VIT 1 "nonimmediate_operand"  "v")
779                    (match_operand:VIT 2 "general_operand"       "v")))]
780   "TARGET_VX"
781   "vs<bhfgq>\t%v0,%v1,%v2"
782   [(set_attr "op_type" "VRR")])
784 ; vmlb, vmlhw, vmlf
785 (define_insn "mul<mode>3"
786   [(set (match_operand:VI_QHS              0 "register_operand" "=v")
787         (mult:VI_QHS (match_operand:VI_QHS 1 "register_operand"  "v")
788                      (match_operand:VI_QHS 2 "register_operand"  "v")))]
789   "TARGET_VX"
790   "vml<bhfgq><w>\t%v0,%v1,%v2"
791   [(set_attr "op_type" "VRR")])
793 ; vlcb, vlch, vlcf, vlcg
794 (define_insn "neg<mode>2"
795   [(set (match_operand:VI         0 "register_operand" "=v")
796         (neg:VI (match_operand:VI 1 "register_operand"  "v")))]
797   "TARGET_VX"
798   "vlc<bhfgq>\t%v0,%v1"
799   [(set_attr "op_type" "VRR")])
801 ; vlpb, vlph, vlpf, vlpg
802 (define_insn "abs<mode>2"
803   [(set (match_operand:VI         0 "register_operand" "=v")
804         (abs:VI (match_operand:VI 1 "register_operand"  "v")))]
805   "TARGET_VX"
806   "vlp<bhfgq>\t%v0,%v1"
807   [(set_attr "op_type" "VRR")])
810 ; Vector sum across
812 ; Sum across DImode parts of the 1st operand and add the rightmost
813 ; element of 2nd operand
814 ; vsumgh, vsumgf
815 (define_insn "*vec_sum2<mode>"
816   [(set (match_operand:V2DI 0 "register_operand" "=v")
817         (unspec:V2DI [(match_operand:VI_HW_HS 1 "register_operand" "v")
818                       (match_operand:VI_HW_HS 2 "register_operand" "v")]
819                      UNSPEC_VEC_VSUMG))]
820   "TARGET_VX"
821   "vsumg<bhfgq>\t%v0,%v1,%v2"
822   [(set_attr "op_type" "VRR")])
824 ; vsumb, vsumh
825 (define_insn "*vec_sum4<mode>"
826   [(set (match_operand:V4SI 0 "register_operand" "=v")
827         (unspec:V4SI [(match_operand:VI_HW_QH 1 "register_operand" "v")
828                       (match_operand:VI_HW_QH 2 "register_operand" "v")]
829                      UNSPEC_VEC_VSUM))]
830   "TARGET_VX"
831   "vsum<bhfgq>\t%v0,%v1,%v2"
832   [(set_attr "op_type" "VRR")])
835 ;; Vector bit instructions (int + fp)
838 ; Vector and
840 (define_insn "and<mode>3"
841   [(set (match_operand:VT         0 "register_operand" "=v")
842         (and:VT (match_operand:VT 1 "register_operand"  "v")
843                 (match_operand:VT 2 "register_operand"  "v")))]
844   "TARGET_VX"
845   "vn\t%v0,%v1,%v2"
846   [(set_attr "op_type" "VRR")])
848 ; Vector not and
850 (define_insn "notand<mode>3"
851   [(set (match_operand:VT                 0 "register_operand" "=v")
852         (ior:VT (not:VT (match_operand:VT 1 "register_operand"  "v"))
853                 (not:VT (match_operand:VT 2 "register_operand"  "v"))))]
854   "TARGET_VXE"
855   "vnn\t%v0,%v1,%v2"
856   [(set_attr "op_type" "VRR")])
858 ; Vector or
860 (define_insn "ior<mode>3"
861   [(set (match_operand:VT         0 "register_operand" "=v")
862         (ior:VT (match_operand:VT 1 "register_operand"  "v")
863                 (match_operand:VT 2 "register_operand"  "v")))]
864   "TARGET_VX"
865   "vo\t%v0,%v1,%v2"
866   [(set_attr "op_type" "VRR")])
868 ; Vector or with complement
870 (define_insn "ior_not<mode>3"
871   [(set (match_operand:VT                 0 "register_operand" "=v")
872         (ior:VT (not:VT (match_operand:VT 2 "register_operand"  "v"))
873                 (match_operand:VT         1 "register_operand"  "v")))]
874   "TARGET_VXE"
875   "voc\t%v0,%v1,%v2"
876   [(set_attr "op_type" "VRR")])
878 ; Vector xor
880 (define_insn "xor<mode>3"
881   [(set (match_operand:VT         0 "register_operand" "=v")
882         (xor:VT (match_operand:VT 1 "register_operand"  "v")
883                 (match_operand:VT 2 "register_operand"  "v")))]
884   "TARGET_VX"
885   "vx\t%v0,%v1,%v2"
886   [(set_attr "op_type" "VRR")])
888 ; Vector not xor
890 (define_insn "notxor<mode>3"
891   [(set (match_operand:VT                 0 "register_operand" "=v")
892         (not:VT (xor:VT (match_operand:VT 1 "register_operand"  "v")
893                         (match_operand:VT 2 "register_operand"  "v"))))]
894   "TARGET_VXE"
895   "vnx\t%v0,%v1,%v2"
896   [(set_attr "op_type" "VRR")])
898 ; Bitwise inversion of a vector
899 (define_insn "one_cmpl<mode>2"
900   [(set (match_operand:VT         0 "register_operand" "=v")
901         (not:VT (match_operand:VT 1 "register_operand"  "v")))]
902   "TARGET_VX"
903   "vnot\t%v0,%v1"
904   [(set_attr "op_type" "VRR")])
906 ; Vector population count
908 (define_expand "popcount<mode>2"
909   [(set (match_operand:VI_HW                0 "register_operand" "=v")
910         (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand"  "v")]
911                       UNSPEC_POPCNT))]
912   "TARGET_VX"
914   if (TARGET_VXE)
915     emit_insn (gen_popcount<mode>2_vxe (operands[0], operands[1]));
916   else
917     emit_insn (gen_popcount<mode>2_vx (operands[0], operands[1]));
918   DONE;
921 ; vpopctb, vpopcth, vpopctf, vpopctg
922 (define_insn "popcount<mode>2_vxe"
923   [(set (match_operand:VI_HW                0 "register_operand" "=v")
924         (unspec:VI_HW [(match_operand:VI_HW 1 "register_operand"  "v")]
925                       UNSPEC_POPCNT))]
926   "TARGET_VXE"
927   "vpopct<bhfgq>\t%v0,%v1"
928   [(set_attr "op_type" "VRR")])
930 (define_insn "popcountv16qi2_vx"
931   [(set (match_operand:V16QI                0 "register_operand" "=v")
932         (unspec:V16QI [(match_operand:V16QI 1 "register_operand"  "v")]
933                       UNSPEC_POPCNT))]
934   "TARGET_VX && !TARGET_VXE"
935   "vpopct\t%v0,%v1,0"
936   [(set_attr "op_type" "VRR")])
938 ; vpopct only counts bits in byte elements.  Bigger element sizes need
939 ; to be emulated.  Word and doubleword elements can use the sum across
940 ; instructions.  For halfword sized elements we do a shift of a copy
941 ; of the result, add it to the result and extend it to halfword
942 ; element size (unpack).
944 (define_expand "popcountv8hi2_vx"
945   [(set (match_dup 2)
946         (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")]
947                       UNSPEC_POPCNT))
948    ; Make a copy of the result
949    (set (match_dup 3) (match_dup 2))
950    ; Generate the shift count operand in a VR (8->byte 7)
951    (set (match_dup 4) (match_dup 5))
952    (set (match_dup 4) (unspec:V16QI [(const_int 8)
953                                      (const_int 7)
954                                      (match_dup 4)] UNSPEC_VEC_SET))
955    ; Vector shift right logical by one byte
956    (set (match_dup 3)
957         (unspec:V16QI [(match_dup 3) (match_dup 4)] UNSPEC_VEC_SRLB))
958    ; Add the shifted and the original result
959    (set (match_dup 2)
960         (plus:V16QI (match_dup 2) (match_dup 3)))
961    ; Generate mask for the odd numbered byte elements
962    (set (match_dup 3)
963         (const_vector:V16QI [(const_int 0) (const_int 255)
964                              (const_int 0) (const_int 255)
965                              (const_int 0) (const_int 255)
966                              (const_int 0) (const_int 255)
967                              (const_int 0) (const_int 255)
968                              (const_int 0) (const_int 255)
969                              (const_int 0) (const_int 255)
970                              (const_int 0) (const_int 255)]))
971    ; Zero out the even indexed bytes
972    (set (match_operand:V8HI 0 "register_operand" "=v")
973         (and:V8HI (subreg:V8HI (match_dup 2) 0)
974                   (subreg:V8HI (match_dup 3) 0)))
976   "TARGET_VX && !TARGET_VXE"
978   operands[1] = simplify_gen_subreg (V16QImode, operands[1],
979                                      V8HImode, 0);
980   operands[2] = gen_reg_rtx (V16QImode);
981   operands[3] = gen_reg_rtx (V16QImode);
982   operands[4] = gen_reg_rtx (V16QImode);
983   operands[5] = CONST0_RTX (V16QImode);
986 (define_expand "popcountv4si2_vx"
987   [(set (match_dup 2)
988         (unspec:V16QI [(match_operand:V4SI 1 "register_operand" "v")]
989                       UNSPEC_POPCNT))
990    (set (match_operand:V4SI 0 "register_operand" "=v")
991         (unspec:V4SI [(match_dup 2) (match_dup 3)]
992                      UNSPEC_VEC_VSUM))]
993   "TARGET_VX && !TARGET_VXE"
995   operands[1] = simplify_gen_subreg (V16QImode, operands[1], V4SImode, 0);
996   operands[2] = gen_reg_rtx (V16QImode);
997   operands[3] = force_reg (V16QImode, CONST0_RTX (V16QImode));
1000 (define_expand "popcountv2di2_vx"
1001   [(set (match_dup 2)
1002         (unspec:V16QI [(match_operand:V2DI 1 "register_operand" "v")]
1003                       UNSPEC_POPCNT))
1004    (set (match_dup 3)
1005         (unspec:V4SI [(match_dup 2) (match_dup 4)]
1006                      UNSPEC_VEC_VSUM))
1007    (set (match_operand:V2DI 0 "register_operand" "=v")
1008         (unspec:V2DI [(match_dup 3) (match_dup 5)]
1009                      UNSPEC_VEC_VSUMG))]
1010   "TARGET_VX && !TARGET_VXE"
1012   operands[1] = simplify_gen_subreg (V16QImode, operands[1], V2DImode, 0);
1013   operands[2] = gen_reg_rtx (V16QImode);
1014   operands[3] = gen_reg_rtx (V4SImode);
1015   operands[4] = force_reg (V16QImode, CONST0_RTX (V16QImode));
1016   operands[5] = force_reg (V4SImode, CONST0_RTX (V4SImode));
1019 ; Count leading zeros
1020 ; vclzb, vclzh, vclzf, vclzg
1021 (define_insn "clz<mode>2"
1022   [(set (match_operand:V        0 "register_operand" "=v")
1023         (clz:V (match_operand:V 1 "register_operand"  "v")))]
1024   "TARGET_VX"
1025   "vclz<bhfgq>\t%v0,%v1"
1026   [(set_attr "op_type" "VRR")])
1028 ; Count trailing zeros
1029 ; vctzb, vctzh, vctzf, vctzg
1030 (define_insn "ctz<mode>2"
1031   [(set (match_operand:V        0 "register_operand" "=v")
1032         (ctz:V (match_operand:V 1 "register_operand"  "v")))]
1033   "TARGET_VX"
1034   "vctz<bhfgq>\t%v0,%v1"
1035   [(set_attr "op_type" "VRR")])
1039 ; Each vector element rotated by the corresponding vector element
1040 ; verllvb, verllvh, verllvf, verllvg
1041 (define_insn "vrotl<mode>3"
1042   [(set (match_operand:VI            0 "register_operand" "=v")
1043         (rotate:VI (match_operand:VI 1 "register_operand"  "v")
1044                    (match_operand:VI 2 "register_operand"  "v")))]
1045   "TARGET_VX"
1046   "verllv<bhfgq>\t%v0,%v1,%v2"
1047   [(set_attr "op_type" "VRR")])
1050 ; Vector rotate and shift by scalar instructions
1052 (define_code_iterator VEC_SHIFTS [ashift ashiftrt lshiftrt rotate])
1053 (define_code_attr vec_shifts_name [(ashift "ashl")    (ashiftrt "ashr")
1054                                    (lshiftrt "lshr")  (rotate "rotl")])
1055 (define_code_attr vec_shifts_mnem [(ashift "vesl")    (ashiftrt "vesra")
1056                                    (lshiftrt "vesrl") (rotate "verll")])
1058 ; Each vector element rotated by a scalar
1059 (define_expand "<vec_shifts_name><mode>3"
1060   [(set (match_operand:VI 0 "register_operand" "")
1061         (VEC_SHIFTS:VI (match_operand:VI 1 "register_operand" "")
1062                        (match_operand:QI 2 "shift_count_operand" "")))]
1063   "TARGET_VX")
1065 ; verllb, verllh, verllf, verllg
1066 ; veslb,  veslh,  veslf,  veslg
1067 ; vesrab, vesrah, vesraf, vesrag
1068 ; vesrlb, vesrlh, vesrlf, vesrlg
1069 (define_insn "*<vec_shifts_name><mode>3"
1070   [(set (match_operand:VI                0 "register_operand"  "=v")
1071         (VEC_SHIFTS:VI (match_operand:VI 1 "register_operand"   "v")
1072                        (match_operand:QI 2 "shift_count_operand_vec" "jsc")))]
1073   "TARGET_VX
1074   && s390_valid_shift_count (operands[2],
1075     GET_MODE_BITSIZE (GET_MODE_INNER (<MODE>mode)) - 1)
1076   "
1077   "<vec_shifts_mnem><bhfgq>\t%v0,%v1,%Y2"
1078   [(set_attr "op_type" "VRS")])
1081 ; Shift each element by corresponding vector element
1083 ; veslvb, veslvh, veslvf, veslvg
1084 (define_insn "vashl<mode>3"
1085   [(set (match_operand:VI            0 "register_operand" "=v")
1086         (ashift:VI (match_operand:VI 1 "register_operand"  "v")
1087                    (match_operand:VI 2 "register_operand"  "v")))]
1088   "TARGET_VX"
1089   "veslv<bhfgq>\t%v0,%v1,%v2"
1090   [(set_attr "op_type" "VRR")])
1092 ; vesravb, vesravh, vesravf, vesravg
1093 (define_insn "vashr<mode>3"
1094   [(set (match_operand:VI              0 "register_operand" "=v")
1095         (ashiftrt:VI (match_operand:VI 1 "register_operand"  "v")
1096                      (match_operand:VI 2 "register_operand"  "v")))]
1097   "TARGET_VX"
1098   "vesrav<bhfgq>\t%v0,%v1,%v2"
1099   [(set_attr "op_type" "VRR")])
1101 ; vesrlvb, vesrlvh, vesrlvf, vesrlvg
1102 (define_insn "vlshr<mode>3"
1103   [(set (match_operand:VI              0 "register_operand" "=v")
1104         (lshiftrt:VI (match_operand:VI 1 "register_operand"  "v")
1105                      (match_operand:VI 2 "register_operand"  "v")))]
1106   "TARGET_VX"
1107   "vesrlv<bhfgq>\t%v0,%v1,%v2"
1108   [(set_attr "op_type" "VRR")])
1110 ; Vector shift right logical by byte
1112 ; Pattern used by e.g. popcount
1113 (define_insn "*vec_srb<mode>"
1114   [(set (match_operand:V_128                0 "register_operand" "=v")
1115         (unspec:V_128 [(match_operand:V_128 1 "register_operand"  "v")
1116                        (match_operand:V16QI 2 "register_operand"  "v")]
1117                    UNSPEC_VEC_SRLB))]
1118   "TARGET_VX"
1119   "vsrlb\t%v0,%v1,%v2"
1120   [(set_attr "op_type" "VRR")])
1123 ; Vector shift left by byte
1125 (define_insn "*vec_slb<mode>"
1126   [(set (match_operand:V_128                0 "register_operand" "=v")
1127         (unspec:V_128 [(match_operand:V_128 1 "register_operand"  "v")
1128                     (match_operand:V16QI    2 "register_operand"  "v")]
1129                    UNSPEC_VEC_SLB))]
1130   "TARGET_VX"
1131   "vslb\t%v0,%v1,%v2"
1132   [(set_attr "op_type" "VRR")])
1134 ; vec_shr is defined as shift towards element 0
1135 ; this means it is a left shift on BE targets!
1136 (define_expand "vec_shr_<mode>"
1137   [(set (match_dup 3)
1138         (unspec:V16QI [(match_operand:SI 2 "const_shift_by_byte_operand" "")
1139                    (const_int 7)
1140                    (match_dup 3)]
1141                    UNSPEC_VEC_SET))
1142    (set (match_operand:V_128 0 "register_operand" "")
1143         (unspec:V_128 [(match_operand:V_128 1 "register_operand" "")
1144                     (match_dup 3)]
1145                    UNSPEC_VEC_SLB))]
1146   "TARGET_VX"
1148    operands[3] = gen_reg_rtx(V16QImode);
1149  })
1151 ; vmnb, vmnh, vmnf, vmng
1152 (define_insn "smin<mode>3"
1153   [(set (match_operand:VI          0 "register_operand" "=v")
1154         (smin:VI (match_operand:VI 1 "register_operand"  "v")
1155                  (match_operand:VI 2 "register_operand"  "v")))]
1156   "TARGET_VX"
1157   "vmn<bhfgq>\t%v0,%v1,%v2"
1158   [(set_attr "op_type" "VRR")])
1160 ; vmxb, vmxh, vmxf, vmxg
1161 (define_insn "smax<mode>3"
1162   [(set (match_operand:VI          0 "register_operand" "=v")
1163         (smax:VI (match_operand:VI 1 "register_operand"  "v")
1164                  (match_operand:VI 2 "register_operand"  "v")))]
1165   "TARGET_VX"
1166   "vmx<bhfgq>\t%v0,%v1,%v2"
1167   [(set_attr "op_type" "VRR")])
1169 ; vmnlb, vmnlh, vmnlf, vmnlg
1170 (define_insn "umin<mode>3"
1171   [(set (match_operand:VI          0 "register_operand" "=v")
1172         (umin:VI (match_operand:VI 1 "register_operand"  "v")
1173                  (match_operand:VI 2 "register_operand"  "v")))]
1174   "TARGET_VX"
1175   "vmnl<bhfgq>\t%v0,%v1,%v2"
1176   [(set_attr "op_type" "VRR")])
1178 ; vmxlb, vmxlh, vmxlf, vmxlg
1179 (define_insn "umax<mode>3"
1180   [(set (match_operand:VI          0 "register_operand" "=v")
1181         (umax:VI (match_operand:VI 1 "register_operand"  "v")
1182                  (match_operand:VI 2 "register_operand"  "v")))]
1183   "TARGET_VX"
1184   "vmxl<bhfgq>\t%v0,%v1,%v2"
1185   [(set_attr "op_type" "VRR")])
1187 ; vmeb, vmeh, vmef
1188 (define_insn "vec_widen_smult_even_<mode>"
1189   [(set (match_operand:<vec_double>                 0 "register_operand" "=v")
1190         (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand"  "v")
1191                               (match_operand:VI_QHS 2 "register_operand"  "v")]
1192                              UNSPEC_VEC_SMULT_EVEN))]
1193   "TARGET_VX"
1194   "vme<bhfgq>\t%v0,%v1,%v2"
1195   [(set_attr "op_type" "VRR")])
1197 ; vmleb, vmleh, vmlef
1198 (define_insn "vec_widen_umult_even_<mode>"
1199   [(set (match_operand:<vec_double>                 0 "register_operand" "=v")
1200         (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand"  "v")
1201                               (match_operand:VI_QHS 2 "register_operand"  "v")]
1202                              UNSPEC_VEC_UMULT_EVEN))]
1203   "TARGET_VX"
1204   "vmle<bhfgq>\t%v0,%v1,%v2"
1205   [(set_attr "op_type" "VRR")])
1207 ; vmob, vmoh, vmof
1208 (define_insn "vec_widen_smult_odd_<mode>"
1209   [(set (match_operand:<vec_double>                 0 "register_operand" "=v")
1210         (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand"  "v")
1211                               (match_operand:VI_QHS 2 "register_operand"  "v")]
1212                              UNSPEC_VEC_SMULT_ODD))]
1213   "TARGET_VX"
1214   "vmo<bhfgq>\t%v0,%v1,%v2"
1215   [(set_attr "op_type" "VRR")])
1217 ; vmlob, vmloh, vmlof
1218 (define_insn "vec_widen_umult_odd_<mode>"
1219   [(set (match_operand:<vec_double>                 0 "register_operand" "=v")
1220         (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand"  "v")
1221                               (match_operand:VI_QHS 2 "register_operand"  "v")]
1222                              UNSPEC_VEC_UMULT_ODD))]
1223   "TARGET_VX"
1224   "vmlo<bhfgq>\t%v0,%v1,%v2"
1225   [(set_attr "op_type" "VRR")])
1228 ; Widening hi/lo multiplications
1230 ; The S/390 instructions vml and vmh return the low or high parts of
1231 ; the double sized result elements in the corresponding elements of
1232 ; the target register.  That's NOT what the vec_widen_umult_lo/hi
1233 ; patterns are expected to do.
1235 ; We emulate the widening lo/hi multiplies with the even/odd versions
1236 ; followed by a vector merge
1239 (define_expand "vec_widen_umult_lo_<mode>"
1240   [(set (match_dup 3)
1241         (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand" "")
1242                               (match_operand:VI_QHS 2 "register_operand" "")]
1243                              UNSPEC_VEC_UMULT_EVEN))
1244    (set (match_dup 4)
1245         (unspec:<vec_double> [(match_dup 1) (match_dup 2)]
1246                              UNSPEC_VEC_UMULT_ODD))
1247    (set (match_operand:<vec_double>                 0 "register_operand" "")
1248         (unspec:<vec_double> [(match_dup 3) (match_dup 4)]
1249                              UNSPEC_VEC_MERGEL))]
1250   "TARGET_VX"
1252    operands[3] = gen_reg_rtx (<vec_double>mode);
1253    operands[4] = gen_reg_rtx (<vec_double>mode);
1254  })
1256 (define_expand "vec_widen_umult_hi_<mode>"
1257   [(set (match_dup 3)
1258         (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand" "")
1259                               (match_operand:VI_QHS 2 "register_operand" "")]
1260                              UNSPEC_VEC_UMULT_EVEN))
1261    (set (match_dup 4)
1262         (unspec:<vec_double> [(match_dup 1) (match_dup 2)]
1263                              UNSPEC_VEC_UMULT_ODD))
1264    (set (match_operand:<vec_double>                 0 "register_operand" "")
1265         (unspec:<vec_double> [(match_dup 3) (match_dup 4)]
1266                              UNSPEC_VEC_MERGEH))]
1267   "TARGET_VX"
1269    operands[3] = gen_reg_rtx (<vec_double>mode);
1270    operands[4] = gen_reg_rtx (<vec_double>mode);
1271  })
1273 (define_expand "vec_widen_smult_lo_<mode>"
1274   [(set (match_dup 3)
1275         (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand" "")
1276                               (match_operand:VI_QHS 2 "register_operand" "")]
1277                              UNSPEC_VEC_SMULT_EVEN))
1278    (set (match_dup 4)
1279         (unspec:<vec_double> [(match_dup 1) (match_dup 2)]
1280                              UNSPEC_VEC_SMULT_ODD))
1281    (set (match_operand:<vec_double>                 0 "register_operand" "")
1282         (unspec:<vec_double> [(match_dup 3) (match_dup 4)]
1283                              UNSPEC_VEC_MERGEL))]
1284   "TARGET_VX"
1286    operands[3] = gen_reg_rtx (<vec_double>mode);
1287    operands[4] = gen_reg_rtx (<vec_double>mode);
1288  })
1290 (define_expand "vec_widen_smult_hi_<mode>"
1291   [(set (match_dup 3)
1292         (unspec:<vec_double> [(match_operand:VI_QHS 1 "register_operand" "")
1293                               (match_operand:VI_QHS 2 "register_operand" "")]
1294                              UNSPEC_VEC_SMULT_EVEN))
1295    (set (match_dup 4)
1296         (unspec:<vec_double> [(match_dup 1) (match_dup 2)]
1297                              UNSPEC_VEC_SMULT_ODD))
1298    (set (match_operand:<vec_double>                 0 "register_operand" "")
1299         (unspec:<vec_double> [(match_dup 3) (match_dup 4)]
1300                              UNSPEC_VEC_MERGEH))]
1301   "TARGET_VX"
1303    operands[3] = gen_reg_rtx (<vec_double>mode);
1304    operands[4] = gen_reg_rtx (<vec_double>mode);
1305  })
1307 ; vec_widen_ushiftl_hi
1308 ; vec_widen_ushiftl_lo
1309 ; vec_widen_sshiftl_hi
1310 ; vec_widen_sshiftl_lo
1313 ;; Vector floating point arithmetic instructions
1316 ; vfasb, vfadb, wfasb, wfadb, wfaxb
1317 (define_insn "add<mode>3<tf_vr>"
1318   [(set (match_operand:VF_HW             0 "register_operand" "=v")
1319         (plus:VF_HW (match_operand:VF_HW 1 "register_operand"  "v")
1320                     (match_operand:VF_HW 2 "register_operand"  "v")))]
1321   "TARGET_VX"
1322   "<vw>fa<sdx>b\t%v0,%v1,%v2"
1323   [(set_attr "op_type" "VRR")])
1325 (define_expand "addtf3"
1326   [(match_operand:TF 0 "register_operand"     "")
1327    (match_operand:TF 1 "nonimmediate_operand" "")
1328    (match_operand:TF 2 "general_operand"      "")]
1329   "HAVE_TF (addtf3)"
1330   { EXPAND_TF (addtf3, 3); })
1332 ; vfssb, vfsdb, wfssb, wfsdb, wfsxb
1333 (define_insn "sub<mode>3<tf_vr>"
1334   [(set (match_operand:VF_HW              0 "register_operand" "=v")
1335         (minus:VF_HW (match_operand:VF_HW 1 "register_operand"  "v")
1336                      (match_operand:VF_HW 2 "register_operand"  "v")))]
1337   "TARGET_VX"
1338   "<vw>fs<sdx>b\t%v0,%v1,%v2"
1339   [(set_attr "op_type" "VRR")])
1341 (define_expand "subtf3"
1342   [(match_operand:TF 0 "register_operand" "")
1343    (match_operand:TF 1 "register_operand" "")
1344    (match_operand:TF 2 "general_operand"  "")]
1345   "HAVE_TF (subtf3)"
1346   { EXPAND_TF (subtf3, 3); })
1348 ; vfmsb, vfmdb, wfmsb, wfmdb, wfmxb
1349 (define_insn "mul<mode>3<tf_vr>"
1350   [(set (match_operand:VF_HW             0 "register_operand" "=v")
1351         (mult:VF_HW (match_operand:VF_HW 1 "register_operand"  "v")
1352                     (match_operand:VF_HW 2 "register_operand"  "v")))]
1353   "TARGET_VX"
1354   "<vw>fm<sdx>b\t%v0,%v1,%v2"
1355   [(set_attr "op_type" "VRR")])
1357 (define_expand "multf3"
1358   [(match_operand:TF 0 "register_operand"     "")
1359    (match_operand:TF 1 "nonimmediate_operand" "")
1360    (match_operand:TF 2 "general_operand"      "")]
1361   "HAVE_TF (multf3)"
1362   { EXPAND_TF (multf3, 3); })
1364 ; vfdsb, vfddb, wfdsb, wfddb, wfdxb
1365 (define_insn "div<mode>3<tf_vr>"
1366   [(set (match_operand:VF_HW            0 "register_operand" "=v")
1367         (div:VF_HW (match_operand:VF_HW 1 "register_operand"  "v")
1368                    (match_operand:VF_HW 2 "register_operand"  "v")))]
1369   "TARGET_VX"
1370   "<vw>fd<sdx>b\t%v0,%v1,%v2"
1371   [(set_attr "op_type" "VRR")])
1373 (define_expand "divtf3"
1374   [(match_operand:TF 0 "register_operand" "")
1375    (match_operand:TF 1 "register_operand" "")
1376    (match_operand:TF 2 "general_operand"  "")]
1377   "HAVE_TF (divtf3)"
1378   { EXPAND_TF (divtf3, 3); })
1380 ; vfsqsb, vfsqdb, wfsqsb, wfsqdb, wfsqxb
1381 (define_insn "sqrt<mode>2<tf_vr>"
1382   [(set (match_operand:VF_HW             0 "register_operand" "=v")
1383         (sqrt:VF_HW (match_operand:VF_HW 1 "register_operand"  "v")))]
1384   "TARGET_VX"
1385   "<vw>fsq<sdx>b\t%v0,%v1"
1386   [(set_attr "op_type" "VRR")])
1388 (define_expand "sqrttf2"
1389   [(match_operand:TF 0 "register_operand" "")
1390    (match_operand:TF 1 "general_operand"  "")]
1391   "HAVE_TF (sqrttf2)"
1392   { EXPAND_TF (sqrttf2, 2); })
1394 ; vfmasb, vfmadb, wfmasb, wfmadb, wfmaxb
1395 (define_insn "fma<mode>4"
1396   [(set (match_operand:VF_HW            0 "register_operand" "=v")
1397         (fma:VF_HW (match_operand:VF_HW 1 "register_operand"  "v")
1398                    (match_operand:VF_HW 2 "register_operand"  "v")
1399                    (match_operand:VF_HW 3 "register_operand"  "v")))]
1400   "TARGET_VX && s390_fma_allowed_p (<MODE>mode)"
1401   "<vw>fma<sdx>b\t%v0,%v1,%v2,%v3"
1402   [(set_attr "op_type" "VRR")])
1404 ; vfmssb, vfmsdb, wfmssb, wfmsdb, wfmsxb
1405 (define_insn "fms<mode>4"
1406   [(set (match_operand:VF_HW                     0 "register_operand" "=v")
1407         (fma:VF_HW (match_operand:VF_HW          1 "register_operand"  "v")
1408                    (match_operand:VF_HW          2 "register_operand"  "v")
1409                  (neg:VF_HW (match_operand:VF_HW 3 "register_operand"  "v"))))]
1410   "TARGET_VX && s390_fma_allowed_p (<MODE>mode)"
1411   "<vw>fms<sdx>b\t%v0,%v1,%v2,%v3"
1412   [(set_attr "op_type" "VRR")])
1414 ; vfnmasb, vfnmadb, wfnmasb, wfnmadb, wfnmaxb
1415 (define_insn "neg_fma<mode>4"
1416   [(set (match_operand:VF_HW             0 "register_operand" "=v")
1417         (neg:VF_HW
1418          (fma:VF_HW (match_operand:VF_HW 1 "register_operand"  "v")
1419                     (match_operand:VF_HW 2 "register_operand"  "v")
1420                     (match_operand:VF_HW 3 "register_operand"  "v"))))]
1421   "TARGET_VXE && s390_fma_allowed_p (<MODE>mode)"
1422   "<vw>fnma<sdx>b\t%v0,%v1,%v2,%v3"
1423   [(set_attr "op_type" "VRR")])
1425 ; vfnmssb, vfnmsdb, wfnmssb, wfnmsdb, wfnmsxb
1426 (define_insn "neg_fms<mode>4"
1427   [(set (match_operand:VF_HW                      0 "register_operand" "=v")
1428         (neg:VF_HW
1429          (fma:VF_HW (match_operand:VF_HW          1 "register_operand"  "v")
1430                     (match_operand:VF_HW          2 "register_operand"  "v")
1431                   (neg:VF_HW (match_operand:VF_HW 3 "register_operand"  "v")))))]
1432   "TARGET_VXE && s390_fma_allowed_p (<MODE>mode)"
1433   "<vw>fnms<sdx>b\t%v0,%v1,%v2,%v3"
1434   [(set_attr "op_type" "VRR")])
1436 ; vflcsb, vflcdb, wflcsb, wflcdb, wflcxb
1437 (define_insn "neg<mode>2<tf_vr>"
1438   [(set (match_operand:VFT          0 "register_operand" "=v")
1439         (neg:VFT (match_operand:VFT 1 "register_operand"  "v")))]
1440   "TARGET_VX"
1441   "<vw>flc<sdx>b\t%v0,%v1"
1442   [(set_attr "op_type" "VRR")])
1444 (define_expand "negtf2"
1445   [(match_operand:TF 0 "register_operand" "")
1446    (match_operand:TF 1 "register_operand" "")]
1447   "HAVE_TF (negtf2)"
1448   { EXPAND_TF (negtf2, 2); })
1450 ; vflpsb, vflpdb, wflpsb, wflpdb, wflpxb
1451 (define_insn "abs<mode>2<tf_vr>"
1452   [(set (match_operand:VFT          0 "register_operand" "=v")
1453         (abs:VFT (match_operand:VFT 1 "register_operand"  "v")))]
1454   "TARGET_VX"
1455   "<vw>flp<sdx>b\t%v0,%v1"
1456   [(set_attr "op_type" "VRR")])
1458 (define_expand "abstf2"
1459   [(match_operand:TF 0 "register_operand" "")
1460    (match_operand:TF 1 "register_operand" "")]
1461   "HAVE_TF (abstf2)"
1462   { EXPAND_TF (abstf2, 2); })
1464 ; vflnsb, vflndb, wflnsb, wflndb, wflnxb
1465 (define_insn "negabs<mode>2"
1466   [(set (match_operand:VFT                   0 "register_operand" "=v")
1467         (neg:VFT (abs:VFT (match_operand:VFT 1 "register_operand"  "v"))))]
1468   "TARGET_VX"
1469   "<vw>fln<sdx>b\t%v0,%v1"
1470   [(set_attr "op_type" "VRR")])
1472 (define_expand "smax<mode>3"
1473   [(set (match_operand:VF_HW             0 "register_operand")
1474         (smax:VF_HW (match_operand:VF_HW 1 "register_operand")
1475                     (match_operand:VF_HW 2 "register_operand")))]
1476   "TARGET_VX")
1478 ; vfmaxsb, vfmaxdb, wfmaxsb, wfmaxdb, wfmaxxb
1479 (define_insn "*smax<mode>3_vxe"
1480   [(set (match_operand:VF_HW             0 "register_operand" "=v")
1481         (smax:VF_HW (match_operand:VF_HW 1 "register_operand"  "v")
1482                     (match_operand:VF_HW 2 "register_operand"  "v")))]
1483   "TARGET_VXE"
1484   "<vw>fmax<sdx>b\t%v0,%v1,%v2,4"
1485   [(set_attr "op_type" "VRR")])
1487 ; Emulate with compare + select
1488 (define_insn_and_split "*smaxv2df3_vx"
1489   [(set (match_operand:V2DF            0 "register_operand" "=v")
1490         (smax:V2DF (match_operand:V2DF 1 "register_operand"  "v")
1491                    (match_operand:V2DF 2 "register_operand"  "v")))]
1492   "TARGET_VX && !TARGET_VXE"
1493   "#"
1494   "&& 1"
1495   [(set (match_dup 3)
1496         (not:V2DI
1497          (unge:V2DI (match_dup 2) (match_dup 1))))
1498    (set (match_dup 0)
1499         (if_then_else:V2DF
1500          (eq (match_dup 3) (match_dup 4))
1501          (match_dup 2)
1502          (match_dup 1)))]
1504   operands[3] = gen_reg_rtx (V2DImode);
1505   operands[4] = CONST0_RTX (V2DImode);
1508 (define_expand "smin<mode>3"
1509   [(set (match_operand:VF_HW             0 "register_operand")
1510         (smin:VF_HW (match_operand:VF_HW 1 "register_operand")
1511                     (match_operand:VF_HW 2 "register_operand")))]
1512   "TARGET_VX")
1514 ; vfminsb, vfmindb, wfminsb, wfmindb, wfminxb
1515 (define_insn "*smin<mode>3_vxe"
1516   [(set (match_operand:VF_HW             0 "register_operand" "=v")
1517         (smin:VF_HW (match_operand:VF_HW 1 "register_operand"  "v")
1518                     (match_operand:VF_HW 2 "register_operand"  "v")))]
1519   "TARGET_VXE"
1520   "<vw>fmin<sdx>b\t%v0,%v1,%v2,4"
1521   [(set_attr "op_type" "VRR")])
1523 ; Emulate with compare + select
1524 (define_insn_and_split "*sminv2df3_vx"
1525   [(set (match_operand:V2DF            0 "register_operand" "=v")
1526         (smin:V2DF (match_operand:V2DF 1 "register_operand"  "v")
1527                    (match_operand:V2DF 2 "register_operand"  "v")))]
1528   "TARGET_VX && !TARGET_VXE"
1529   "#"
1530   "&& 1"
1531   [(set (match_dup 3)
1532         (not:V2DI
1533          (unge:V2DI (match_dup 2) (match_dup 1))))
1534    (set (match_dup 0)
1535         (if_then_else:V2DF
1536          (eq (match_dup 3) (match_dup 4))
1537          (match_dup 1)
1538          (match_dup 2)))]
1540   operands[3] = gen_reg_rtx (V2DImode);
1541   operands[4] = CONST0_RTX (V2DImode);
1544 ; Vector copysign, implement using vector select
1545 (define_expand "copysign<mode>3"
1546   [(set (match_operand:VFT            0 "register_operand" "")
1547         (ior:VFT
1548          (and:VFT (match_operand:VFT  2 "register_operand" "")
1549                   (match_dup 3))
1550          (and:VFT (not:VFT (match_dup 3))
1551                   (match_operand:VFT  1 "register_operand" ""))))]
1552   "TARGET_VX"
1554   rtx mask = s390_build_signbit_mask (<MODE>mode);
1555   operands[3] = force_reg (<MODE>mode, mask);
1559 ;; Compares
1562 (define_expand "vec_cmp<mode><tointvec>"
1563   [(set (match_operand:<TOINTVEC>  0 "register_operand" "")
1564         (match_operator:<TOINTVEC> 1 "vcond_comparison_operator"
1565           [(match_operand:V_HW     2 "register_operand" "")
1566            (match_operand:V_HW     3 "register_operand" "")]))]
1567   "TARGET_VX"
1569   s390_expand_vec_compare (operands[0], GET_CODE(operands[1]), operands[2], operands[3]);
1570   DONE;
1573 (define_expand "vec_cmpu<VI_HW:mode><VI_HW:mode>"
1574   [(set (match_operand:VI_HW    0 "register_operand" "")
1575         (match_operator:VI_HW   1 ""
1576           [(match_operand:VI_HW 2 "register_operand" "")
1577            (match_operand:VI_HW 3 "register_operand" "")]))]
1578   "TARGET_VX"
1580   s390_expand_vec_compare (operands[0], GET_CODE(operands[1]), operands[2], operands[3]);
1581   DONE;
1584 (define_insn "*vec_cmp<VICMP_HW_OP:code><VI:mode><VI:mode>_nocc"
1585   [(set (match_operand:VI                 2 "register_operand" "=v")
1586         (VICMP_HW_OP:VI (match_operand:VI 0 "register_operand"  "v")
1587                         (match_operand:VI 1 "register_operand"  "v")))]
1588   "TARGET_VX"
1589   "vc<VICMP_HW_OP:insn_cmp_op><VI:bhfgq>\t%v2,%v0,%v1"
1590   [(set_attr "op_type" "VRR")])
1594 ;; Floating point compares
1597 ; vfcesb, vfcedb, wfcexb: non-signaling "==" comparison (a == b)
1598 (define_insn "*vec_cmpeq<mode>_quiet_nocc"
1599   [(set (match_operand:<TOINTVEC>         0 "register_operand" "=v")
1600         (eq:<TOINTVEC> (match_operand:VFT 1 "register_operand" "v")
1601                        (match_operand:VFT 2 "register_operand" "v")))]
1602   "TARGET_VX"
1603   "<vw>fce<sdx>b\t%v0,%v1,%v2"
1604   [(set_attr "op_type" "VRR")])
1606 ; vfchsb, vfchdb, wfchxb: non-signaling > comparison (!(b u>= a))
1607 (define_insn "vec_cmpgt<mode>_quiet_nocc"
1608   [(set (match_operand:<TOINTVEC>            0 "register_operand" "=v")
1609         (not:<TOINTVEC>
1610          (unge:<TOINTVEC> (match_operand:VFT 2 "register_operand" "v")
1611                           (match_operand:VFT 1 "register_operand" "v"))))]
1612   "TARGET_VX"
1613   "<vw>fch<sdx>b\t%v0,%v1,%v2"
1614   [(set_attr "op_type" "VRR")])
1616 (define_expand "vec_cmplt<mode>_quiet_nocc"
1617   [(set (match_operand:<TOINTVEC>            0 "register_operand" "=v")
1618         (not:<TOINTVEC>
1619          (unge:<TOINTVEC> (match_operand:VFT 1 "register_operand" "v")
1620                           (match_operand:VFT 2 "register_operand" "v"))))]
1621   "TARGET_VX")
1623 ; vfchesb, vfchedb, wfchexb: non-signaling >= comparison (!(a u< b))
1624 (define_insn "vec_cmpge<mode>_quiet_nocc"
1625   [(set (match_operand:<TOINTVEC>            0 "register_operand" "=v")
1626         (not:<TOINTVEC>
1627          (unlt:<TOINTVEC> (match_operand:VFT 1 "register_operand" "v")
1628                           (match_operand:VFT 2 "register_operand" "v"))))]
1629   "TARGET_VX"
1630   "<vw>fche<sdx>b\t%v0,%v1,%v2"
1631   [(set_attr "op_type" "VRR")])
1633 (define_expand "vec_cmple<mode>_quiet_nocc"
1634   [(set (match_operand:<TOINTVEC>            0 "register_operand" "=v")
1635         (not:<TOINTVEC>
1636          (unlt:<TOINTVEC> (match_operand:VFT 2 "register_operand" "v")
1637                           (match_operand:VFT 1 "register_operand" "v"))))]
1638   "TARGET_VX")
1640 ; vfkesb, vfkedb, wfkexb: signaling == comparison ((a >= b) & (b >= a))
1641 (define_insn "*vec_cmpeq<mode>_signaling_nocc"
1642   [(set (match_operand:<TOINTVEC>          0 "register_operand" "=v")
1643         (and:<TOINTVEC>
1644          (ge:<TOINTVEC> (match_operand:VFT 1 "register_operand" "v")
1645                         (match_operand:VFT 2 "register_operand" "v"))
1646          (ge:<TOINTVEC> (match_dup         2)
1647                         (match_dup         1))))]
1648   "TARGET_VXE"
1649   "<vw>fke<sdx>b\t%v0,%v1,%v2"
1650   [(set_attr "op_type" "VRR")])
1652 ; vfkhsb, vfkhdb, wfkhxb: signaling > comparison (a > b)
1653 (define_insn "*vec_cmpgt<mode>_signaling_nocc"
1654   [(set (match_operand:<TOINTVEC>         0 "register_operand" "=v")
1655         (gt:<TOINTVEC> (match_operand:VFT 1 "register_operand" "v")
1656                        (match_operand:VFT 2 "register_operand" "v")))]
1657   "TARGET_VXE"
1658   "<vw>fkh<sdx>b\t%v0,%v1,%v2"
1659   [(set_attr "op_type" "VRR")])
1661 (define_insn "*vec_cmpgt<mode>_signaling_finite_nocc"
1662   [(set (match_operand:<TOINTVEC>         0 "register_operand" "=v")
1663         (gt:<TOINTVEC> (match_operand:VFT 1 "register_operand" "v")
1664                        (match_operand:VFT 2 "register_operand" "v")))]
1665   "TARGET_NONSIGNALING_VECTOR_COMPARE_OK"
1666   "<vw>fch<sdx>b\t%v0,%v1,%v2"
1667   [(set_attr "op_type" "VRR")])
1669 ; vfkhesb, vfkhedb, wfkhexb: signaling >= comparison (a >= b)
1670 (define_insn "*vec_cmpge<mode>_signaling_nocc"
1671   [(set (match_operand:<TOINTVEC>         0 "register_operand" "=v")
1672         (ge:<TOINTVEC> (match_operand:VFT 1 "register_operand" "v")
1673                        (match_operand:VFT 2 "register_operand" "v")))]
1674   "TARGET_VXE"
1675   "<vw>fkhe<sdx>b\t%v0,%v1,%v2"
1676   [(set_attr "op_type" "VRR")])
1678 (define_insn "*vec_cmpge<mode>_signaling_finite_nocc"
1679   [(set (match_operand:<TOINTVEC>         0 "register_operand" "=v")
1680         (ge:<TOINTVEC> (match_operand:VFT 1 "register_operand" "v")
1681                        (match_operand:VFT 2 "register_operand" "v")))]
1682   "TARGET_NONSIGNALING_VECTOR_COMPARE_OK"
1683   "<vw>fche<sdx>b\t%v0,%v1,%v2"
1684   [(set_attr "op_type" "VRR")])
1686 ; Expanders for not directly supported comparisons
1687 ; Signaling comparisons must be expressed via signaling rtxes only,
1688 ; and quiet comparisons must be expressed via quiet rtxes only.
1690 ; UNGT a u> b -> !!(b u< a)
1691 (define_expand "vec_cmpungt<mode>"
1692   [(set (match_operand:<TOINTVEC>            0 "register_operand" "=v")
1693         (not:<TOINTVEC>
1694          (unlt:<TOINTVEC> (match_operand:VFT 2 "register_operand" "v")
1695                           (match_operand:VFT 1 "register_operand" "v"))))
1696    (set (match_dup                           0)
1697         (not:<TOINTVEC> (match_dup           0)))]
1698   "TARGET_VX")
1700 ; UNGE a u>= b -> !!(a u>= b)
1701 (define_expand "vec_cmpunge<mode>"
1702   [(set (match_operand:<TOINTVEC>            0 "register_operand" "=v")
1703         (not:<TOINTVEC>
1704          (unge:<TOINTVEC> (match_operand:VFT 1 "register_operand" "v")
1705                           (match_operand:VFT 2 "register_operand" "v"))))
1706    (set (match_dup                           0)
1707         (not:<TOINTVEC> (match_dup           0)))]
1708   "TARGET_VX")
1710 ; UNEQ a u== b -> !(!(a u>= b) | !(b u>= a))
1711 (define_expand "vec_cmpuneq<mode>"
1712   [(set (match_operand:<TOINTVEC>            0 "register_operand" "=v")
1713         (not:<TOINTVEC>
1714          (unge:<TOINTVEC> (match_operand:VFT 1 "register_operand"  "v")
1715                           (match_operand:VFT 2 "register_operand"  "v"))))
1716    (set (match_dup                           3)
1717         (not:<TOINTVEC>
1718          (unge:<TOINTVEC> (match_dup         2)
1719                           (match_dup         1))))
1720    (set (match_dup                           0)
1721         (ior:<TOINTVEC> (match_dup           0)
1722                         (match_dup           3)))
1723    (set (match_dup                           0)
1724         (not:<TOINTVEC> (match_dup           0)))]
1725   "TARGET_VX"
1727   operands[3] = gen_reg_rtx (<TOINTVEC>mode);
1730 ; LTGT a <> b -> a > b | b > a
1731 (define_expand "vec_cmpltgt<mode>"
1732   [(set (match_operand:<TOINTVEC>         0 "register_operand" "=v")
1733         (gt:<TOINTVEC> (match_operand:VFT 1 "register_operand"  "v")
1734                     (match_operand:VFT 2 "register_operand"  "v")))
1735    (set (match_dup 3) (gt:<TOINTVEC> (match_dup 2) (match_dup 1)))
1736    (set (match_dup 0) (ior:<TOINTVEC> (match_dup 0) (match_dup 3)))]
1737   "TARGET_VXE"
1739   operands[3] = gen_reg_rtx (<TOINTVEC>mode);
1742 ; ORDERED (a, b): !(a u< b) | !(a u>= b)
1743 (define_expand "vec_cmpordered<mode>"
1744   [(set (match_operand:<TOINTVEC>            0 "register_operand" "=v")
1745         (not:<TOINTVEC>
1746          (unlt:<TOINTVEC> (match_operand:VFT 1 "register_operand" "v")
1747                           (match_operand:VFT 2 "register_operand" "v"))))
1748    (set (match_dup                           3)
1749         (not:<TOINTVEC>
1750          (unge:<TOINTVEC> (match_dup         1)
1751                           (match_dup         2))))
1752    (set (match_dup                           0)
1753         (ior:<TOINTVEC> (match_dup           0)
1754                         (match_dup           3)))]
1755   "TARGET_VX"
1757   operands[3] = gen_reg_rtx (<TOINTVEC>mode);
1760 ; UNORDERED (a, b): !ORDERED (a, b)
1761 (define_expand "vec_cmpunordered<mode>"
1762   [(match_operand:<TOINTVEC> 0 "register_operand" "=v")
1763    (match_operand:VFT        1 "register_operand" "v")
1764    (match_operand:VFT        2 "register_operand" "v")]
1765   "TARGET_VX"
1767   emit_insn (gen_vec_cmpordered<mode> (operands[0], operands[1], operands[2]));
1768   emit_insn (gen_rtx_SET (operands[0],
1769              gen_rtx_NOT (<TOINTVEC>mode, operands[0])));
1770   DONE;
1773 (define_code_iterator VEC_CMP_EXPAND
1774   [ungt unge uneq ltgt ordered unordered])
1776 (define_expand "vec_cmp<code>"
1777   [(match_operand 0 "register_operand" "")
1778    (VEC_CMP_EXPAND (match_operand 1 "register_operand" "")
1779                    (match_operand 2 "register_operand" ""))]
1780   "TARGET_VX"
1782   if (GET_MODE (operands[1]) == V4SFmode)
1783     emit_insn (gen_vec_cmp<code>v4sf (operands[0], operands[1], operands[2]));
1784   else if (GET_MODE (operands[1]) == V2DFmode)
1785     emit_insn (gen_vec_cmp<code>v2df (operands[0], operands[1], operands[2]));
1786   else
1787     gcc_unreachable ();
1789   DONE;
1792 (define_insn "*vec_load_pair<mode>"
1793   [(set (match_operand:V_HW_64                       0 "register_operand" "=v,v")
1794         (vec_concat:V_HW_64 (match_operand:<non_vec> 1 "register_operand"  "d,v")
1795                             (match_operand:<non_vec> 2 "register_operand"  "d,v")))]
1796   "TARGET_VX"
1797   "@
1798    vlvgp\t%v0,%1,%2
1799    vmrhg\t%v0,%v1,%v2"
1800   [(set_attr "op_type" "VRR,VRR")])
1802 (define_insn "vllv16qi"
1803   [(set (match_operand:V16QI              0 "register_operand" "=v")
1804         (unspec:V16QI [(match_operand:SI  1 "register_operand"  "d")
1805                        (match_operand:BLK 2 "memory_operand"    "Q")]
1806                       UNSPEC_VEC_LOAD_LEN))]
1807   "TARGET_VX"
1808   "vll\t%v0,%1,%2"
1809   [(set_attr "op_type" "VRS")])
1811 ; vfenebs, vfenehs, vfenefs
1812 ; vfenezbs, vfenezhs, vfenezfs
1813 (define_insn "vec_vfenes<mode>"
1814   [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
1815         (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
1816                            (match_operand:VI_HW_QHS 2 "register_operand" "v")
1817                            (match_operand:QI 3 "const_mask_operand" "C")]
1818                           UNSPEC_VEC_VFENE))
1819    (set (reg:CCRAW CC_REGNUM)
1820         (unspec:CCRAW [(match_dup 1)
1821                        (match_dup 2)
1822                        (match_dup 3)]
1823                       UNSPEC_VEC_VFENECC))]
1824   "TARGET_VX"
1826   unsigned HOST_WIDE_INT flags = UINTVAL (operands[3]);
1828   gcc_assert (!(flags & ~(VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
1829   flags &= ~VSTRING_FLAG_CS;
1831   if (flags == VSTRING_FLAG_ZS)
1832     return "vfenez<bhfgq>s\t%v0,%v1,%v2";
1833   return "vfene<bhfgq>s\t%v0,%v1,%v2";
1835   [(set_attr "op_type" "VRR")])
1838 ; Vector select
1840 ; The following splitters simplify vec_sel for constant 0 or -1
1841 ; selection sources.  This is required to generate efficient code for
1842 ; vcond.
1844 ; a = b == c;
1845 (define_split
1846   [(set (match_operand:V 0 "register_operand" "")
1847         (if_then_else:V
1848          (eq (match_operand:<TOINTVEC> 3 "register_operand" "")
1849              (match_operand:V 4 "const0_operand" ""))
1850          (match_operand:V 1 "const0_operand" "")
1851          (match_operand:V 2 "all_ones_operand" "")))]
1852   "TARGET_VX"
1853   [(set (match_dup 0) (match_dup 3))]
1855   PUT_MODE (operands[3], <V:MODE>mode);
1858 ; a = ~(b == c)
1859 (define_split
1860   [(set (match_operand:V 0 "register_operand" "")
1861         (if_then_else:V
1862          (eq (match_operand:<TOINTVEC> 3 "register_operand" "")
1863              (match_operand:V 4 "const0_operand" ""))
1864          (match_operand:V 1 "all_ones_operand" "")
1865          (match_operand:V 2 "const0_operand" "")))]
1866   "TARGET_VX"
1867   [(set (match_dup 0) (not:V (match_dup 3)))]
1869   PUT_MODE (operands[3], <V:MODE>mode);
1872 ; a = b != c
1873 (define_split
1874   [(set (match_operand:V 0 "register_operand" "")
1875         (if_then_else:V
1876          (ne (match_operand:<TOINTVEC> 3 "register_operand" "")
1877              (match_operand:V 4 "const0_operand" ""))
1878          (match_operand:V 1 "all_ones_operand" "")
1879          (match_operand:V 2 "const0_operand" "")))]
1880   "TARGET_VX"
1881   [(set (match_dup 0) (match_dup 3))]
1883   PUT_MODE (operands[3], <V:MODE>mode);
1886 ; a = ~(b != c)
1887 (define_split
1888   [(set (match_operand:V 0 "register_operand" "")
1889         (if_then_else:V
1890          (ne (match_operand:<TOINTVEC> 3 "register_operand" "")
1891              (match_operand:V 4 "const0_operand" ""))
1892          (match_operand:V 1 "const0_operand" "")
1893          (match_operand:V 2 "all_ones_operand" "")))]
1894   "TARGET_VX"
1895   [(set (match_dup 0) (not:V (match_dup 3)))]
1897   PUT_MODE (operands[3], <V:MODE>mode);
1900 ; op0 = op3 == 0 ? op1 : op2
1901 (define_insn "*vec_sel0<mode>"
1902   [(set (match_operand:V 0 "register_operand" "=v")
1903         (if_then_else:V
1904          (eq (match_operand:<TOINTVEC> 3 "register_operand" "v")
1905              (match_operand:<TOINTVEC> 4 "const0_operand" ""))
1906          (match_operand:V 1 "register_operand" "v")
1907          (match_operand:V 2 "register_operand" "v")))]
1908   "TARGET_VX"
1909   "vsel\t%v0,%2,%1,%3"
1910   [(set_attr "op_type" "VRR")])
1912 ; op0 = !op3 == 0 ? op1 : op2
1913 (define_insn "*vec_sel0<mode>"
1914   [(set (match_operand:V 0 "register_operand" "=v")
1915         (if_then_else:V
1916          (eq (not:<TOINTVEC> (match_operand:<TOINTVEC> 3 "register_operand" "v"))
1917              (match_operand:<TOINTVEC> 4 "const0_operand" ""))
1918          (match_operand:V 1 "register_operand" "v")
1919          (match_operand:V 2 "register_operand" "v")))]
1920   "TARGET_VX"
1921   "vsel\t%v0,%1,%2,%3"
1922   [(set_attr "op_type" "VRR")])
1924 ; op0 = op3 == -1 ? op1 : op2
1925 (define_insn "*vec_sel1<mode>"
1926   [(set (match_operand:V 0 "register_operand" "=v")
1927         (if_then_else:V
1928          (eq (match_operand:<TOINTVEC> 3 "register_operand" "v")
1929              (match_operand:<TOINTVEC> 4 "all_ones_operand" ""))
1930          (match_operand:V 1 "register_operand" "v")
1931          (match_operand:V 2 "register_operand" "v")))]
1932   "TARGET_VX"
1933   "vsel\t%v0,%1,%2,%3"
1934   [(set_attr "op_type" "VRR")])
1936 ; op0 = !op3 == -1 ? op1 : op2
1937 (define_insn "*vec_sel1<mode>"
1938   [(set (match_operand:V 0 "register_operand" "=v")
1939         (if_then_else:V
1940          (eq (not:<TOINTVEC> (match_operand:<TOINTVEC> 3 "register_operand" "v"))
1941              (match_operand:<TOINTVEC> 4 "all_ones_operand" ""))
1942          (match_operand:V 1 "register_operand" "v")
1943          (match_operand:V 2 "register_operand" "v")))]
1944   "TARGET_VX"
1945   "vsel\t%v0,%2,%1,%3"
1946   [(set_attr "op_type" "VRR")])
1948 ; vec_pack_trunc
1950 ; vpkh, vpkf, vpkg
1951 (define_insn "vec_pack_trunc_<mode>"
1952   [(set (match_operand:<vec_half> 0 "register_operand" "=v")
1953         (vec_concat:<vec_half>
1954          (truncate:<vec_halfhalf>
1955           (match_operand:VI_HW_HSD 1 "register_operand" "v"))
1956          (truncate:<vec_halfhalf>
1957           (match_operand:VI_HW_HSD 2 "register_operand" "v"))))]
1958   "TARGET_VX"
1959   "vpk<bhfgq>\t%0,%1,%2"
1960   [(set_attr "op_type" "VRR")])
1962 ; vpksh, vpksf, vpksg
1963 (define_insn "vec_pack_ssat_<mode>"
1964   [(set (match_operand:<vec_half> 0 "register_operand" "=v")
1965         (vec_concat:<vec_half>
1966          (ss_truncate:<vec_halfhalf>
1967           (match_operand:VI_HW_HSD 1 "register_operand" "v"))
1968          (ss_truncate:<vec_halfhalf>
1969           (match_operand:VI_HW_HSD 2 "register_operand" "v"))))]
1970   "TARGET_VX"
1971   "vpks<bhfgq>\t%0,%1,%2"
1972   [(set_attr "op_type" "VRR")])
1974 ; vpklsh, vpklsf, vpklsg
1975 (define_insn "vec_pack_usat_<mode>"
1976   [(set (match_operand:<vec_half> 0 "register_operand" "=v")
1977         (vec_concat:<vec_half>
1978          (us_truncate:<vec_halfhalf>
1979           (match_operand:VI_HW_HSD 1 "register_operand" "v"))
1980          (us_truncate:<vec_halfhalf>
1981           (match_operand:VI_HW_HSD 2 "register_operand" "v"))))]
1982   "TARGET_VX"
1983   "vpkls<bhfgq>\t%0,%1,%2"
1984   [(set_attr "op_type" "VRR")])
1986 ;; vector unpack v16qi
1988 ; signed
1990 (define_insn "vec_unpacks_hi_v16qi"
1991   [(set (match_operand:V8HI 0 "register_operand" "=v")
1992         (sign_extend:V8HI
1993          (vec_select:V8QI
1994           (match_operand:V16QI 1 "register_operand" "v")
1995           (parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)
1996                      (const_int 4)(const_int 5)(const_int 6)(const_int 7)]))))]
1997   "TARGET_VX"
1998   "vuphb\t%0,%1"
1999   [(set_attr "op_type" "VRR")])
2001 (define_insn "vec_unpacks_lo_v16qi"
2002   [(set (match_operand:V8HI 0 "register_operand" "=v")
2003         (sign_extend:V8HI
2004          (vec_select:V8QI
2005           (match_operand:V16QI 1 "register_operand" "v")
2006           (parallel [(const_int 8) (const_int 9) (const_int 10)(const_int 11)
2007                      (const_int 12)(const_int 13)(const_int 14)(const_int 15)]))))]
2008   "TARGET_VX"
2009   "vuplb\t%0,%1"
2010   [(set_attr "op_type" "VRR")])
2012 ; unsigned
2014 (define_insn "vec_unpacku_hi_v16qi"
2015   [(set (match_operand:V8HI 0 "register_operand" "=v")
2016         (zero_extend:V8HI
2017          (vec_select:V8QI
2018           (match_operand:V16QI 1 "register_operand" "v")
2019           (parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)
2020                      (const_int 4)(const_int 5)(const_int 6)(const_int 7)]))))]
2021   "TARGET_VX"
2022   "vuplhb\t%0,%1"
2023   [(set_attr "op_type" "VRR")])
2025 (define_insn "vec_unpacku_lo_v16qi"
2026   [(set (match_operand:V8HI 0 "register_operand" "=v")
2027         (zero_extend:V8HI
2028          (vec_select:V8QI
2029           (match_operand:V16QI 1 "register_operand" "v")
2030           (parallel [(const_int 8) (const_int 9) (const_int 10)(const_int 11)
2031                      (const_int 12)(const_int 13)(const_int 14)(const_int 15)]))))]
2032   "TARGET_VX"
2033   "vupllb\t%0,%1"
2034   [(set_attr "op_type" "VRR")])
2036 ;; vector unpack v8hi
2038 ; signed
2040 (define_insn "vec_unpacks_hi_v8hi"
2041   [(set (match_operand:V4SI 0 "register_operand" "=v")
2042         (sign_extend:V4SI
2043          (vec_select:V4HI
2044           (match_operand:V8HI 1 "register_operand" "v")
2045           (parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)]))))]
2046   "TARGET_VX"
2047   "vuphh\t%0,%1"
2048   [(set_attr "op_type" "VRR")])
2050 (define_insn "vec_unpacks_lo_v8hi"
2051   [(set (match_operand:V4SI 0 "register_operand" "=v")
2052         (sign_extend:V4SI
2053          (vec_select:V4HI
2054           (match_operand:V8HI 1 "register_operand" "v")
2055           (parallel [(const_int 4)(const_int 5)(const_int 6)(const_int 7)]))))]
2056   "TARGET_VX"
2057   "vuplhw\t%0,%1"
2058   [(set_attr "op_type" "VRR")])
2060 ; unsigned
2062 (define_insn "vec_unpacku_hi_v8hi"
2063   [(set (match_operand:V4SI 0 "register_operand" "=v")
2064         (zero_extend:V4SI
2065          (vec_select:V4HI
2066           (match_operand:V8HI 1 "register_operand" "v")
2067           (parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)]))))]
2068   "TARGET_VX"
2069   "vuplhh\t%0,%1"
2070   [(set_attr "op_type" "VRR")])
2072 (define_insn "vec_unpacku_lo_v8hi"
2073   [(set (match_operand:V4SI 0 "register_operand" "=v")
2074         (zero_extend:V4SI
2075          (vec_select:V4HI
2076           (match_operand:V8HI 1 "register_operand" "v")
2077           (parallel [(const_int 4)(const_int 5)(const_int 6)(const_int 7)]))))]
2078   "TARGET_VX"
2079   "vupllh\t%0,%1"
2080   [(set_attr "op_type" "VRR")])
2082 ;; vector unpack v4si
2084 ; signed
2086 (define_insn "vec_unpacks_hi_v4si"
2087   [(set (match_operand:V2DI 0 "register_operand" "=v")
2088         (sign_extend:V2DI
2089          (vec_select:V2SI
2090           (match_operand:V4SI 1 "register_operand" "v")
2091           (parallel [(const_int 0)(const_int 1)]))))]
2092   "TARGET_VX"
2093   "vuphf\t%0,%1"
2094   [(set_attr "op_type" "VRR")])
2096 (define_insn "vec_unpacks_lo_v4si"
2097   [(set (match_operand:V2DI 0 "register_operand" "=v")
2098         (sign_extend:V2DI
2099          (vec_select:V2SI
2100           (match_operand:V4SI 1 "register_operand" "v")
2101           (parallel [(const_int 2)(const_int 3)]))))]
2102   "TARGET_VX"
2103   "vuplf\t%0,%1"
2104   [(set_attr "op_type" "VRR")])
2106 ; unsigned
2108 (define_insn "vec_unpacku_hi_v4si"
2109   [(set (match_operand:V2DI 0 "register_operand" "=v")
2110         (zero_extend:V2DI
2111          (vec_select:V2SI
2112           (match_operand:V4SI 1 "register_operand" "v")
2113           (parallel [(const_int 0)(const_int 1)]))))]
2114   "TARGET_VX"
2115   "vuplhf\t%0,%1"
2116   [(set_attr "op_type" "VRR")])
2118 (define_insn "vec_unpacku_lo_v4si"
2119   [(set (match_operand:V2DI 0 "register_operand" "=v")
2120         (zero_extend:V2DI
2121          (vec_select:V2SI
2122           (match_operand:V4SI 1 "register_operand" "v")
2123           (parallel [(const_int 2)(const_int 3)]))))]
2124   "TARGET_VX"
2125   "vupllf\t%0,%1"
2126   [(set_attr "op_type" "VRR")])
2128 ;; vector load lengthened
2130 ; vflls float -> double
2131 (define_insn "*vec_extendv4sf"
2132   [(set (match_operand:V2DF 0 "register_operand" "=v")
2133         (float_extend:V2DF
2134          (vec_select:V2SF
2135           (match_operand:V4SF 1 "register_operand" "v")
2136           (parallel [(const_int 0) (const_int 2)]))))]
2137   "TARGET_VX"
2138   "vldeb\t%v0,%v1"
2139   [(set_attr "op_type" "VRR")])
2141 (define_expand "vec_unpacks_lo_v4sf"
2142   [(set (match_dup 2)
2143         (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
2144                       (match_dup 1)]
2145                      UNSPEC_VEC_MERGEL))
2146    (set (match_operand:V2DF               0 "register_operand" "=v")
2147         (float_extend:V2DF
2148          (vec_select:V2SF
2149           (match_dup 2)
2150           (parallel [(const_int 0) (const_int 2)]))))]
2151   "TARGET_VX"
2152 { operands[2] = gen_reg_rtx(V4SFmode); })
2154 (define_expand "vec_unpacks_hi_v4sf"
2155   [(set (match_dup 2)
2156         (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
2157                       (match_dup 1)]
2158                      UNSPEC_VEC_MERGEH))
2159    (set (match_operand:V2DF               0 "register_operand" "=v")
2160         (float_extend:V2DF
2161          (vec_select:V2SF
2162           (match_dup 2)
2163           (parallel [(const_int 0) (const_int 2)]))))]
2164   "TARGET_VX"
2165 { operands[2] = gen_reg_rtx(V4SFmode); })
2168 ; double -> long double
2169 (define_insn "*vec_extendv2df"
2170   [(set (match_operand:V1TF 0 "register_operand" "=v")
2171         (float_extend:V1TF
2172          (vec_select:V1DF
2173           (match_operand:V2DF 1 "register_operand" "v")
2174           (parallel [(const_int 0)]))))]
2175   "TARGET_VXE"
2176   "wflld\t%v0,%v1"
2177   [(set_attr "op_type" "VRR")])
2179 (define_expand "vec_unpacks_lo_v2df"
2180   [(set (match_dup 2)
2181         (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "v")
2182                       (match_dup 1)]
2183                      UNSPEC_VEC_MERGEL))
2184    (set (match_operand:V1TF               0 "register_operand" "=v")
2185         (float_extend:V1TF
2186          (vec_select:V1DF
2187           (match_dup 2)
2188           (parallel [(const_int 0)]))))]
2189   "TARGET_VXE"
2190 { operands[2] = gen_reg_rtx (V2DFmode); })
2192 (define_expand "vec_unpacks_hi_v2df"
2193   [(set (match_dup 2)
2194         (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "v")
2195                       (match_dup 1)]
2196                      UNSPEC_VEC_MERGEH))
2197    (set (match_operand:V1TF               0 "register_operand" "=v")
2198         (float_extend:V1TF
2199          (vec_select:V1DF
2200           (match_dup 2)
2201           (parallel [(const_int 0)]))))]
2202   "TARGET_VXE"
2203 { operands[2] = gen_reg_rtx (V2DFmode); })
2206 ; 2 x v2df -> 1 x v4sf
2207 (define_expand "vec_pack_trunc_v2df"
2208   [(set (match_dup 3)
2209         (unspec:V4SF [(match_operand:V2DF 1 "register_operand" "")
2210                       (const_int VEC_INEXACT)
2211                       (const_int VEC_RND_CURRENT)]
2212                      UNSPEC_VEC_VFLR))
2213    (set (match_dup 4)
2214         (unspec:V4SF [(match_operand:V2DF 2 "register_operand" "")
2215                       (const_int VEC_INEXACT)
2216                       (const_int VEC_RND_CURRENT)]
2217                      UNSPEC_VEC_VFLR))
2218    (set (match_dup 6)
2219         (unspec:V16QI [(subreg:V16QI (match_dup 3) 0)
2220                        (subreg:V16QI (match_dup 4) 0)
2221                        (match_dup 5)]
2222                       UNSPEC_VEC_PERM))
2223    (set (match_operand:V4SF 0 "register_operand" "")
2224         (subreg:V4SF (match_dup 6) 0))]
2225   "TARGET_VX"
2227   rtx constv, perm[16];
2228   int i;
2230   for (i = 0; i < 4; ++i)
2231     {
2232       perm[i] = GEN_INT (i);
2233       perm[i + 4] = GEN_INT (i + 8);
2234       perm[i + 8] = GEN_INT (i + 16);
2235       perm[i + 12] = GEN_INT (i + 24);
2236     }
2237   constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
2239   operands[3] = gen_reg_rtx (V4SFmode);
2240   operands[4] = gen_reg_rtx (V4SFmode);
2241   operands[5] = force_reg (V16QImode, constv);
2242   operands[6] = gen_reg_rtx (V16QImode);
2246 ; BFP <-> integer conversions
2249 ; signed integer to floating point
2251 ; op2: inexact exception not suppressed (IEEE 754 2008)
2252 ; op3: according to current rounding mode
2253 ; vcdgb, vcefb
2254 (define_insn "float<VX_VEC_CONV_INT:mode><VX_VEC_CONV_BFP:mode>2"
2255   [(set (match_operand:VX_VEC_CONV_BFP                        0 "register_operand" "=v")
2256         (float:VX_VEC_CONV_BFP (match_operand:VX_VEC_CONV_INT 1 "register_operand"  "v")))]
2257   "TARGET_VX
2258    && GET_MODE_UNIT_SIZE (<VX_VEC_CONV_INT:MODE>mode) == GET_MODE_UNIT_SIZE (<VX_VEC_CONV_BFP:MODE>mode)"
2259   "vc<VX_VEC_CONV_BFP:xde><VX_VEC_CONV_INT:bhfgq>b\t%v0,%v1,0,0"
2260   [(set_attr "op_type" "VRR")])
2262 ; There is no instruction for loading a signed integer into an extended BFP
2263 ; operand in a VR, therefore we need to load it into a FPR pair first.
2264 (define_expand "float<mode>tf2_vr"
2265   [(set (match_dup 2)
2266         (float:FPRX2 (match_operand:DSI 1 "register_operand" "")))
2267    (set (match_operand:TF               0 "register_operand" "")
2268         (subreg:TF (match_dup 2) 0))]
2269   "TARGET_VXE"
2271   operands[2] = gen_reg_rtx (FPRX2mode);
2274 (define_expand "float<mode>tf2"
2275   [(match_operand:TF  0 "register_operand" "")
2276    (match_operand:DSI 1 "register_operand" "")]
2277   "HAVE_TF (float<mode>tf2)"
2278   { EXPAND_TF (float<mode>tf2, 2); })
2280 ; unsigned integer to floating point
2282 ; op2: inexact exception not suppressed (IEEE 754 2008)
2283 ; op3: according to current rounding mode
2284 ; vcdlgb, vcelfb
2285 (define_insn "floatuns<VX_VEC_CONV_INT:mode><VX_VEC_CONV_BFP:mode>2"
2286   [(set (match_operand:VX_VEC_CONV_BFP                                 0 "register_operand" "=v")
2287         (unsigned_float:VX_VEC_CONV_BFP (match_operand:VX_VEC_CONV_INT 1 "register_operand"  "v")))]
2288   "TARGET_VX
2289    && GET_MODE_UNIT_SIZE (<VX_VEC_CONV_INT:MODE>mode) == GET_MODE_UNIT_SIZE (<VX_VEC_CONV_BFP:MODE>mode)"
2290   "vc<VX_VEC_CONV_BFP:xde>l<VX_VEC_CONV_INT:bhfgq>b\t%v0,%v1,0,0"
2291   [(set_attr "op_type" "VRR")])
2293 ; There is no instruction for loading an unsigned integer into an extended BFP
2294 ; operand in a VR, therefore load it into a FPR pair first.
2295 (define_expand "floatuns<mode>tf2_vr"
2296   [(set (match_dup 2)
2297         (unsigned_float:FPRX2 (match_operand:GPR 1 "register_operand" "")))
2298    (set (match_operand:TF                        0 "register_operand" "")
2299         (subreg:TF (match_dup 2) 0))]
2300   "TARGET_VXE"
2302   operands[2] = gen_reg_rtx (FPRX2mode);
2305 (define_expand "floatuns<mode>tf2"
2306   [(match_operand:TF  0 "register_operand" "")
2307    (match_operand:GPR 1 "register_operand" "")]
2308   "HAVE_TF (floatuns<mode>tf2)"
2309   { EXPAND_TF (floatuns<mode>tf2, 2); })
2311 ; floating point to signed integer
2313 ; op2: inexact exception not suppressed (IEEE 754 2008)
2314 ; op3: rounding mode 5 (round towards 0 C11 6.3.1.4)
2315 ; vcgdb, vcfeb
2316 (define_insn "fix_trunc<VX_VEC_CONV_BFP:mode><VX_VEC_CONV_INT:mode>2"
2317   [(set (match_operand:VX_VEC_CONV_INT                      0 "register_operand" "=v")
2318         (fix:VX_VEC_CONV_INT (match_operand:VX_VEC_CONV_BFP 1 "register_operand"  "v")))]
2319   "TARGET_VX
2320    && GET_MODE_UNIT_SIZE (<VX_VEC_CONV_INT:MODE>mode) == GET_MODE_UNIT_SIZE (<VX_VEC_CONV_BFP:MODE>mode)"
2321   "vc<VX_VEC_CONV_INT:bhfgq><VX_VEC_CONV_BFP:xde>b\t%v0,%v1,0,5"
2322   [(set_attr "op_type" "VRR")])
2324 ; There is no instruction for rounding an extended BFP operand in a VR into
2325 ; a signed integer, therefore copy it into a FPR pair first.
2326 (define_expand "fix_trunctf<mode>2_vr"
2327   [(set (subreg:DF (match_dup 2) 0)
2328         (subreg:DF (match_operand:TF 1 "register_operand" "") 0))
2329    (set (subreg:DF (match_dup 2) 8) (subreg:DF (match_dup 1) 8))
2330    (parallel [(set (match_operand:GPR 0 "register_operand" "")
2331                    (fix:GPR (match_dup 2)))
2332               (unspec:GPR [(const_int BFP_RND_TOWARD_0)] UNSPEC_ROUND)
2333               (clobber (reg:CC CC_REGNUM))])]
2334   "TARGET_VXE"
2336   operands[2] = gen_reg_rtx (FPRX2mode);
2339 (define_expand "fix_trunctf<mode>2"
2340   [(match_operand:GPR 0 "register_operand" "")
2341    (match_operand:TF  1 "register_operand" "")]
2342   "HAVE_TF (fix_trunctf<mode>2)"
2343   { EXPAND_TF (fix_trunctf<mode>2, 2); })
2345 ; floating point to unsigned integer
2347 ; op2: inexact exception not suppressed (IEEE 754 2008)
2348 ; op3: rounding mode 5 (round towards 0 C11 6.3.1.4)
2349 ; vclgdb, vclfeb
2350 (define_insn "fixuns_trunc<VX_VEC_CONV_BFP:mode><VX_VEC_CONV_INT:mode>2"
2351   [(set (match_operand:VX_VEC_CONV_INT                               0 "register_operand" "=v")
2352         (unsigned_fix:VX_VEC_CONV_INT (match_operand:VX_VEC_CONV_BFP 1 "register_operand"  "v")))]
2353   "TARGET_VX
2354    && GET_MODE_UNIT_SIZE (<VX_VEC_CONV_INT:MODE>mode) == GET_MODE_UNIT_SIZE (<VX_VEC_CONV_BFP:MODE>mode)"
2355   "vcl<VX_VEC_CONV_INT:bhfgq><VX_VEC_CONV_BFP:xde>b\t%v0,%v1,0,5"
2356   [(set_attr "op_type" "VRR")])
2358 ; There is no instruction for rounding an extended BFP operand in a VR into
2359 ; an unsigned integer, therefore copy it into a FPR pair first.
2360 (define_expand "fixuns_trunctf<mode>2_vr"
2361   [(set (subreg:DF (match_dup 2) 0)
2362         (subreg:DF (match_operand:TF 1 "register_operand" "") 0))
2363    (set (subreg:DF (match_dup 2) 8) (subreg:DF (match_dup 1) 8))
2364    (parallel [(set (match_operand:GPR 0 "register_operand" "")
2365                    (unsigned_fix:GPR (match_dup 2)))
2366               (unspec:GPR [(const_int BFP_RND_TOWARD_0)] UNSPEC_ROUND)
2367               (clobber (reg:CC CC_REGNUM))])]
2368   "TARGET_VXE"
2370   operands[2] = gen_reg_rtx (FPRX2mode);
2373 (define_expand "fixuns_trunctf<mode>2"
2374   [(match_operand:GPR 0 "register_operand" "")
2375    (match_operand:TF  1 "register_operand" "")]
2376   "HAVE_TF (fixuns_trunctf<mode>2)"
2377   { EXPAND_TF (fixuns_trunctf<mode>2, 2); })
2379 ; load fp integer
2381 ; vfisb, wfisb, vfidb, wfidb, wfixb; suppress inexact exceptions
2382 (define_insn "<FPINT:fpint_name><VF_HW:mode>2<VF_HW:tf_vr>"
2383   [(set (match_operand:VF_HW                0 "register_operand" "=v")
2384         (unspec:VF_HW [(match_operand:VF_HW 1 "register_operand"  "v")]
2385                       FPINT))]
2386   "TARGET_VX"
2387   "<vw>fi<VF_HW:sdx>b\t%v0,%v1,4,<FPINT:fpint_roundingmode>"
2388   [(set_attr "op_type" "VRR")])
2390 (define_expand "<FPINT:fpint_name>tf2"
2391   [(match_operand:TF 0 "register_operand" "")
2392    (match_operand:TF 1 "register_operand" "")
2393    ; recognize FPINT as an iterator
2394    (unspec:TF [(match_dup 1)] FPINT)]
2395   "HAVE_TF (<FPINT:fpint_name>tf2)"
2396   { EXPAND_TF (<FPINT:fpint_name>tf2, 2); })
2398 ; vfisb, wfisb, vfidb, wfidb, wfixb; raise inexact exceptions
2399 (define_insn "rint<mode>2<tf_vr>"
2400   [(set (match_operand:VF_HW                0 "register_operand" "=v")
2401         (unspec:VF_HW [(match_operand:VF_HW 1 "register_operand"  "v")]
2402                       UNSPEC_FPINT_RINT))]
2403   "TARGET_VX"
2404   "<vw>fi<sdx>b\t%v0,%v1,0,0"
2405   [(set_attr "op_type" "VRR")])
2407 (define_expand "rinttf2"
2408   [(match_operand:TF 0 "register_operand" "")
2409    (match_operand:TF 1 "register_operand" "")]
2410   "HAVE_TF (rinttf2)"
2411   { EXPAND_TF (rinttf2, 2); })
2413 ; load rounded
2415 ; wflrx
2416 (define_insn "*trunctfdf2_vr"
2417   [(set (match_operand:DF                    0 "register_operand" "=f")
2418         (float_truncate:DF (match_operand:TF 1 "register_operand"  "v")))
2419    (unspec:DF [(match_operand                2 "const_int_operand" "")]
2420                UNSPEC_ROUND)]
2421   "TARGET_VXE"
2422   "wflrx\t%v0,%v1,0,%2"
2423   [(set_attr "op_type" "VRR")])
2425 (define_expand "trunctfdf2_vr"
2426   [(parallel [
2427      (set (match_operand:DF                    0 "register_operand" "")
2428           (float_truncate:DF (match_operand:TF 1 "register_operand" "")))
2429      (unspec:DF [(const_int BFP_RND_CURRENT)] UNSPEC_ROUND)])]
2430   "TARGET_VXE")
2432 (define_expand "trunctfdf2"
2433   [(match_operand:DF 0 "register_operand" "")
2434    (match_operand:TF 1 "register_operand" "")]
2435   "HAVE_TF (trunctfdf2)"
2436   { EXPAND_TF (trunctfdf2, 2); })
2438 ; wflrx + (ledbr|wledb)
2439 (define_expand "trunctfsf2_vr"
2440   [(parallel [
2441      (set (match_dup 2)
2442           (float_truncate:DF (match_operand:TF 1 "register_operand" "")))
2443      (unspec:DF [(const_int BFP_RND_PREP_FOR_SHORT_PREC)] UNSPEC_ROUND)])
2444    (set (match_operand:SF                    0 "register_operand" "")
2445         (float_truncate:SF (match_dup 2)))]
2446   "TARGET_VXE"
2448   operands[2] = gen_reg_rtx(DFmode);
2451 (define_expand "trunctfsf2"
2452   [(match_operand:SF 0 "register_operand" "")
2453    (match_operand:TF 1 "register_operand" "")]
2454   "HAVE_TF (trunctfsf2)"
2455   { EXPAND_TF (trunctfsf2, 2); })
2457 ; load lengthened
2459 (define_insn "extenddftf2_vr"
2460   [(set (match_operand:TF                  0 "register_operand" "=v")
2461         (float_extend:TF (match_operand:DF 1 "register_operand"  "f")))]
2462   "TARGET_VXE"
2463   "wflld\t%v0,%v1"
2464   [(set_attr "op_type" "VRR")])
2466 (define_expand "extenddftf2"
2467   [(match_operand:TF 0 "register_operand" "")
2468    (match_operand:DF 1 "nonimmediate_operand" "")]
2469   "HAVE_TF (extenddftf2)"
2470   { EXPAND_TF (extenddftf2, 2); })
2472 (define_expand "extendsftf2_vr"
2473   [(set (match_dup 2)
2474         (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "")))
2475    (set (match_operand:TF                  0 "register_operand"     "")
2476         (float_extend:TF (match_dup 2)))]
2477   "TARGET_VXE"
2479   operands[2] = gen_reg_rtx(DFmode);
2482 (define_expand "extendsftf2"
2483   [(match_operand:TF 0 "register_operand" "")
2484    (match_operand:SF 1 "nonimmediate_operand" "")]
2485   "HAVE_TF (extendsftf2)"
2486   { EXPAND_TF (extendsftf2, 2); })
2488 ; test data class
2490 (define_expand "signbittf2_vr"
2491   [(parallel
2492     [(set (reg:CCRAW CC_REGNUM)
2493           (unspec:CCRAW [(match_operand:TF 1 "register_operand" "")
2494                          (match_dup        2)]
2495                         UNSPEC_VEC_VFTCICC))
2496      (clobber (scratch:V1TI))])
2497    (set (match_operand:SI                  0 "register_operand" "")
2498         (const_int 0))
2499    (set (match_dup                         0)
2500         (if_then_else:SI (eq (reg:CCRAW CC_REGNUM) (const_int 8))
2501                          (const_int 1)
2502                          (match_dup        0)))]
2503   "TARGET_VXE"
2505   operands[2] = GEN_INT (S390_TDC_SIGNBIT_SET);
2508 (define_expand "signbittf2"
2509   [(match_operand:SI 0 "register_operand" "")
2510    (match_operand:TF 1 "register_operand" "")]
2511   "HAVE_TF (signbittf2)"
2512   { EXPAND_TF (signbittf2, 2); })
2514 (define_expand "isinftf2_vr"
2515   [(parallel
2516     [(set (reg:CCRAW CC_REGNUM)
2517           (unspec:CCRAW [(match_operand:TF 1 "register_operand" "")
2518                          (match_dup        2)]
2519                         UNSPEC_VEC_VFTCICC))
2520      (clobber (scratch:V1TI))])
2521    (set (match_operand:SI                  0 "register_operand" "")
2522         (const_int 0))
2523    (set (match_dup                         0)
2524         (if_then_else:SI (eq (reg:CCRAW CC_REGNUM) (const_int 8))
2525                          (const_int 1)
2526                          (match_dup        0)))]
2527   "TARGET_VXE"
2529   operands[2] = GEN_INT (S390_TDC_INFINITY);
2532 (define_expand "isinftf2"
2533   [(match_operand:SI 0 "register_operand" "")
2534    (match_operand:TF 1 "register_operand" "")]
2535   "HAVE_TF (isinftf2)"
2536   { EXPAND_TF (isinftf2, 2); })
2539 ; Vector byte swap patterns
2542 ; FIXME: The bswap rtl standard name currently does not appear to be
2543 ; used for vector modes.
2544 (define_expand "bswap<mode>"
2545   [(parallel
2546     [(set (match_operand:VT_HW_HSDT                   0 "nonimmediate_operand" "")
2547           (bswap:VT_HW_HSDT (match_operand:VT_HW_HSDT 1 "nonimmediate_operand" "")))
2548      (use (match_dup 2))])]
2549   "TARGET_VX"
2551   static char p[4][16] =
2552     { { 1,  0,  3,  2,  5,  4,  7, 6, 9,  8,  11, 10, 13, 12, 15, 14 },   /* H */
2553       { 3,  2,  1,  0,  7,  6,  5, 4, 11, 10, 9,  8,  15, 14, 13, 12 },   /* S */
2554       { 7,  6,  5,  4,  3,  2,  1, 0, 15, 14, 13, 12, 11, 10, 9,  8  },   /* D */
2555       { 15, 14, 13, 12, 11, 10, 9, 8, 7,  6,  5,  4,  3,  2,  1,  0  } }; /* T */
2556   char *perm;
2557   rtx perm_rtx[16];
2559   switch (GET_MODE_SIZE (GET_MODE_INNER (<MODE>mode)))
2560     {
2561     case 2: perm = p[0]; break;
2562     case 4: perm = p[1]; break;
2563     case 8: perm = p[2]; break;
2564     case 16: perm = p[3]; break;
2565     default: gcc_unreachable ();
2566     }
2567   for (int i = 0; i < 16; i++)
2568     perm_rtx[i] = GEN_INT (perm[i]);
2570   operands[2] = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm_rtx));
2572   /* Without vxe2 we do not have byte swap instructions dealing
2573      directly with memory operands.  So instead of waiting until
2574      reload to fix that up switch over to vector permute right
2575      now.  */
2576   if (!TARGET_VXE2)
2577     {
2578       rtx in = force_reg (V16QImode, simplify_gen_subreg (V16QImode, operands[1], <MODE>mode, 0));
2579       rtx permute = force_reg (V16QImode, force_const_mem (V16QImode, operands[2]));
2580       rtx out = gen_reg_rtx (V16QImode);
2582       emit_insn (gen_vec_permv16qi (out, in, in, permute));
2583       emit_move_insn (operands[0], simplify_gen_subreg (<MODE>mode, out, V16QImode, 0));
2584       DONE;
2585     }
2588 ; Switching late to the reg-reg variant requires the vector permute
2589 ; pattern to be pushed into literal pool and allocating a vector
2590 ; register to load it into.  We rely on both being provided by LRA
2591 ; when fixing up the v constraint for operand 2.
2593 ; permute_pattern_operand: general_operand would reject the permute
2594 ; pattern constants since these are not accepted by
2595 ; s390_legimitate_constant_p
2597 ; ^R: Prevent these alternatives from being chosen if it would require
2598 ; pushing the operand into memory first
2600 ; vlbrh, vlbrf, vlbrg, vlbrq, vstbrh, vstbrf, vstbrg, vstbrq
2601 (define_insn_and_split "*bswap<mode>"
2602   [(set (match_operand:VT_HW_HSDT                   0 "nonimmediate_operand"    "=v, v,^R")
2603         (bswap:VT_HW_HSDT (match_operand:VT_HW_HSDT 1 "nonimmediate_operand"     "v,^R, v")))
2604    (use (match_operand:V16QI                        2 "permute_pattern_operand"  "v, X, X"))]
2605   "TARGET_VXE2"
2606   "@
2607    #
2608    vlbr<bhfgq>\t%v0,%v1
2609    vstbr<bhfgq>\t%v1,%v0"
2610   "&& reload_completed
2611    && !memory_operand (operands[0], <MODE>mode)
2612    && !memory_operand (operands[1], <MODE>mode)"
2613   [(set (match_dup 0)
2614         (subreg:VT_HW_HSDT
2615          (unspec:V16QI [(subreg:V16QI (match_dup 1) 0)
2616                         (subreg:V16QI (match_dup 1) 0)
2617                         (match_dup 2)]
2618                        UNSPEC_VEC_PERM) 0))]
2619   ""
2620   [(set_attr "op_type"      "*,VRX,VRX")])
2622 ; reduc_smin
2623 ; reduc_smax
2624 ; reduc_umin
2625 ; reduc_umax
2627 ; vec_pack_sfix_trunc: convert + pack ?
2628 ; vec_pack_ufix_trunc
2629 ; vec_unpacks_float_hi
2630 ; vec_unpacks_float_lo
2631 ; vec_unpacku_float_hi
2632 ; vec_unpacku_float_lo