2 ;; Copyright (C) 2002-2018 Free Software Foundation, Inc.
3 ;; Contributed by Aldy Hernandez (aldy@quesejoda.com)
5 ;; This file is part of GCC.
7 ;; GCC is free software; you can redistribute it and/or modify it
8 ;; under the terms of the GNU General Public License as published
9 ;; by the Free Software Foundation; either version 3, or (at your
10 ;; option) any later version.
12 ;; GCC is distributed in the hope that it will be useful, but WITHOUT
13 ;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 ;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 ;; License for more details.
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING3. If not see
19 ;; <http://www.gnu.org/licenses/>.
21 (define_c_enum "unspec"
48 UNSPEC_VPACK_SIGN_SIGN_SAT
49 UNSPEC_VPACK_SIGN_UNS_SAT
50 UNSPEC_VPACK_UNS_UNS_SAT
51 UNSPEC_VPACK_UNS_UNS_MOD
52 UNSPEC_VPACK_UNS_UNS_MOD_DIRECT
76 UNSPEC_VUNPACK_HI_SIGN
77 UNSPEC_VUNPACK_LO_SIGN
78 UNSPEC_VUNPACK_HI_SIGN_DIRECT
79 UNSPEC_VUNPACK_LO_SIGN_DIRECT
82 UNSPEC_CONVERT_4F32_8I16
152 UNSPEC_VSUMSWS_DIRECT
171 (define_c_enum "unspecv"
179 ;; Like VI, defined in vector.md, but add ISA 2.07 integer vector ops
180 (define_mode_iterator VI2 [V4SI V8HI V16QI V2DI])
181 ;; Short vec int modes
182 (define_mode_iterator VIshort [V8HI V16QI])
183 ;; Longer vec int modes for rotate/mask ops
184 (define_mode_iterator VIlong [V2DI V4SI])
186 (define_mode_iterator VF [V4SF])
187 ;; Vec modes, pity mode iterators are not composable
188 (define_mode_iterator V [V4SI V8HI V16QI V4SF])
189 ;; Vec modes for move/logical/permute ops, include vector types for move not
190 ;; otherwise handled by altivec (v2df, v2di, ti)
191 (define_mode_iterator VM [V4SI
199 (KF "FLOAT128_VECTOR_P (KFmode)")
200 (TF "FLOAT128_VECTOR_P (TFmode)")])
202 ;; Like VM, except don't do TImode
203 (define_mode_iterator VM2 [V4SI
210 (KF "FLOAT128_VECTOR_P (KFmode)")
211 (TF "FLOAT128_VECTOR_P (TFmode)")])
213 ;; Map the Vector convert single precision to double precision for integer
214 ;; versus floating point
215 (define_mode_attr VS_sxwsp [(V4SI "sxw") (V4SF "sp")])
217 ;; Specific iterator for parity which does not have a byte/half-word form, but
218 ;; does have a quad word form
219 (define_mode_iterator VParity [V4SI
224 (define_mode_attr VI_char [(V2DI "d") (V4SI "w") (V8HI "h") (V16QI "b")])
225 (define_mode_attr VI_scalar [(V2DI "DI") (V4SI "SI") (V8HI "HI") (V16QI "QI")])
226 (define_mode_attr VI_unit [(V16QI "VECTOR_UNIT_ALTIVEC_P (V16QImode)")
227 (V8HI "VECTOR_UNIT_ALTIVEC_P (V8HImode)")
228 (V4SI "VECTOR_UNIT_ALTIVEC_P (V4SImode)")
229 (V2DI "VECTOR_UNIT_P8_VECTOR_P (V2DImode)")
230 (V1TI "VECTOR_UNIT_ALTIVEC_P (V1TImode)")])
232 ;; Vector pack/unpack
233 (define_mode_iterator VP [V2DI V4SI V8HI])
234 (define_mode_attr VP_small [(V2DI "V4SI") (V4SI "V8HI") (V8HI "V16QI")])
235 (define_mode_attr VP_small_lc [(V2DI "v4si") (V4SI "v8hi") (V8HI "v16qi")])
236 (define_mode_attr VU_char [(V2DI "w") (V4SI "h") (V8HI "b")])
239 (define_mode_iterator VNEG [V4SI V2DI])
241 ;; Vector move instructions.
242 (define_insn "*altivec_mov<mode>"
243 [(set (match_operand:VM2 0 "nonimmediate_operand" "=Z,v,v,?Y,?*r,?*r,v,v,?*r")
244 (match_operand:VM2 1 "input_operand" "v,Z,v,*r,Y,*r,j,W,W"))]
245 "VECTOR_MEM_ALTIVEC_P (<MODE>mode)
246 && (register_operand (operands[0], <MODE>mode)
247 || register_operand (operands[1], <MODE>mode))"
249 switch (which_alternative)
251 case 0: return "stvx %1,%y0";
252 case 1: return "lvx %0,%y1";
253 case 2: return "vor %0,%1,%1";
257 case 6: return "vxor %0,%0,%0";
258 case 7: return output_vec_const_move (operands);
260 default: gcc_unreachable ();
263 [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*,*")
264 (set_attr "length" "4,4,4,20,20,20,4,8,32")])
266 ;; Unlike other altivec moves, allow the GPRs, since a normal use of TImode
267 ;; is for unions. However for plain data movement, slightly favor the vector
269 (define_insn "*altivec_movti"
270 [(set (match_operand:TI 0 "nonimmediate_operand" "=Z,v,v,?Y,?r,?r,v,v")
271 (match_operand:TI 1 "input_operand" "v,Z,v,r,Y,r,j,W"))]
272 "VECTOR_MEM_ALTIVEC_P (TImode)
273 && (register_operand (operands[0], TImode)
274 || register_operand (operands[1], TImode))"
276 switch (which_alternative)
278 case 0: return "stvx %1,%y0";
279 case 1: return "lvx %0,%y1";
280 case 2: return "vor %0,%1,%1";
284 case 6: return "vxor %0,%0,%0";
285 case 7: return output_vec_const_move (operands);
286 default: gcc_unreachable ();
289 [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*")])
291 ;; Load up a vector with the most significant bit set by loading up -1 and
292 ;; doing a shift left
294 [(set (match_operand:VM 0 "altivec_register_operand")
295 (match_operand:VM 1 "easy_vector_constant_msb"))]
296 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed"
299 rtx dest = operands[0];
300 machine_mode mode = GET_MODE (operands[0]);
304 if (mode == V4SFmode)
307 dest = gen_lowpart (V4SImode, dest);
310 num_elements = GET_MODE_NUNITS (mode);
311 v = rtvec_alloc (num_elements);
312 for (i = 0; i < num_elements; i++)
313 RTVEC_ELT (v, i) = constm1_rtx;
315 emit_insn (gen_vec_initv4sisi (dest, gen_rtx_PARALLEL (mode, v)));
316 emit_insn (gen_rtx_SET (dest, gen_rtx_ASHIFT (mode, dest, dest)));
321 [(set (match_operand:VM 0 "altivec_register_operand")
322 (match_operand:VM 1 "easy_vector_constant_add_self"))]
323 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed"
324 [(set (match_dup 0) (match_dup 3))
325 (set (match_dup 0) (match_dup 4))]
327 rtx dup = gen_easy_altivec_constant (operands[1]);
329 machine_mode op_mode = <MODE>mode;
331 /* Divide the operand of the resulting VEC_DUPLICATE, and use
332 simplify_rtx to make a CONST_VECTOR. */
333 XEXP (dup, 0) = simplify_const_binary_operation (ASHIFTRT, QImode,
334 XEXP (dup, 0), const1_rtx);
335 const_vec = simplify_rtx (dup);
337 if (op_mode == V4SFmode)
340 operands[0] = gen_lowpart (op_mode, operands[0]);
342 if (GET_MODE (const_vec) == op_mode)
343 operands[3] = const_vec;
345 operands[3] = gen_lowpart (op_mode, const_vec);
346 operands[4] = gen_rtx_PLUS (op_mode, operands[0], operands[0]);
350 [(set (match_operand:VM 0 "altivec_register_operand")
351 (match_operand:VM 1 "easy_vector_constant_vsldoi"))]
352 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && can_create_pseudo_p ()"
353 [(set (match_dup 2) (match_dup 3))
354 (set (match_dup 4) (match_dup 5))
356 (unspec:VM [(match_dup 2)
361 rtx op1 = operands[1];
362 int elt = (BYTES_BIG_ENDIAN) ? 0 : GET_MODE_NUNITS (<MODE>mode) - 1;
363 HOST_WIDE_INT val = const_vector_elt_as_int (op1, elt);
364 rtx rtx_val = GEN_INT (val);
365 int shift = vspltis_shifted (op1);
367 gcc_assert (shift != 0);
368 operands[2] = gen_reg_rtx (<MODE>mode);
369 operands[3] = gen_const_vec_duplicate (<MODE>mode, rtx_val);
370 operands[4] = gen_reg_rtx (<MODE>mode);
374 operands[5] = CONSTM1_RTX (<MODE>mode);
375 operands[6] = GEN_INT (-shift);
379 operands[5] = CONST0_RTX (<MODE>mode);
380 operands[6] = GEN_INT (shift);
384 (define_insn "get_vrsave_internal"
385 [(set (match_operand:SI 0 "register_operand" "=r")
386 (unspec:SI [(reg:SI VRSAVE_REGNO)] UNSPEC_GET_VRSAVE))]
390 return "mfspr %0,256";
392 return "mfvrsave %0";
394 [(set_attr "type" "*")])
396 (define_insn "*set_vrsave_internal"
397 [(match_parallel 0 "vrsave_operation"
398 [(set (reg:SI VRSAVE_REGNO)
399 (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")
400 (reg:SI VRSAVE_REGNO)] UNSPECV_SET_VRSAVE))])]
404 return "mtspr 256,%1";
406 return "mtvrsave %1";
408 [(set_attr "type" "*")])
410 (define_insn "*save_world"
411 [(match_parallel 0 "save_world_operation"
412 [(clobber (reg:SI LR_REGNO))
413 (use (match_operand:SI 1 "call_operand" "s"))])]
414 "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
416 [(set_attr "type" "branch")
417 (set_attr "length" "4")])
419 (define_insn "*restore_world"
420 [(match_parallel 0 "restore_world_operation"
422 (use (match_operand:SI 1 "call_operand" "s"))
423 (clobber (match_operand:SI 2 "gpc_reg_operand" "=r"))])]
424 "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
427 ;; The save_vregs and restore_vregs patterns don't use memory_operand
428 ;; because (plus (reg) (const_int)) is not a valid vector address.
429 ;; This way is more compact than describing exactly what happens in
430 ;; the out-of-line functions, ie. loading the constant into r11/r12
431 ;; then using indexed addressing, and requires less editing of rtl
432 ;; to describe the operation to dwarf2out_frame_debug_expr.
433 (define_insn "*save_vregs_<mode>_r11"
434 [(match_parallel 0 "any_parallel_operand"
435 [(clobber (reg:P LR_REGNO))
436 (use (match_operand:P 1 "symbol_ref_operand" "s"))
439 (set (mem:V4SI (plus:P (match_operand:P 2 "gpc_reg_operand" "b")
440 (match_operand:P 3 "short_cint_operand" "I")))
441 (match_operand:V4SI 4 "altivec_register_operand" "v"))])]
444 [(set_attr "type" "branch")
445 (set_attr "length" "4")])
447 (define_insn "*save_vregs_<mode>_r12"
448 [(match_parallel 0 "any_parallel_operand"
449 [(clobber (reg:P LR_REGNO))
450 (use (match_operand:P 1 "symbol_ref_operand" "s"))
453 (set (mem:V4SI (plus:P (match_operand:P 2 "gpc_reg_operand" "b")
454 (match_operand:P 3 "short_cint_operand" "I")))
455 (match_operand:V4SI 4 "altivec_register_operand" "v"))])]
458 [(set_attr "type" "branch")
459 (set_attr "length" "4")])
461 (define_insn "*restore_vregs_<mode>_r11"
462 [(match_parallel 0 "any_parallel_operand"
463 [(clobber (reg:P LR_REGNO))
464 (use (match_operand:P 1 "symbol_ref_operand" "s"))
467 (set (match_operand:V4SI 2 "altivec_register_operand" "=v")
468 (mem:V4SI (plus:P (match_operand:P 3 "gpc_reg_operand" "b")
469 (match_operand:P 4 "short_cint_operand" "I"))))])]
472 [(set_attr "type" "branch")
473 (set_attr "length" "4")])
475 (define_insn "*restore_vregs_<mode>_r12"
476 [(match_parallel 0 "any_parallel_operand"
477 [(clobber (reg:P LR_REGNO))
478 (use (match_operand:P 1 "symbol_ref_operand" "s"))
481 (set (match_operand:V4SI 2 "altivec_register_operand" "=v")
482 (mem:V4SI (plus:P (match_operand:P 3 "gpc_reg_operand" "b")
483 (match_operand:P 4 "short_cint_operand" "I"))))])]
486 [(set_attr "type" "branch")
487 (set_attr "length" "4")])
489 ;; Simple binary operations.
492 (define_insn "add<mode>3"
493 [(set (match_operand:VI2 0 "register_operand" "=v")
494 (plus:VI2 (match_operand:VI2 1 "register_operand" "v")
495 (match_operand:VI2 2 "register_operand" "v")))]
497 "vaddu<VI_char>m %0,%1,%2"
498 [(set_attr "type" "vecsimple")])
500 (define_insn "*altivec_addv4sf3"
501 [(set (match_operand:V4SF 0 "register_operand" "=v")
502 (plus:V4SF (match_operand:V4SF 1 "register_operand" "v")
503 (match_operand:V4SF 2 "register_operand" "v")))]
504 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
506 [(set_attr "type" "vecfloat")])
508 (define_insn "altivec_vaddcuw"
509 [(set (match_operand:V4SI 0 "register_operand" "=v")
510 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
511 (match_operand:V4SI 2 "register_operand" "v")]
513 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
515 [(set_attr "type" "vecsimple")])
517 (define_insn "altivec_vaddu<VI_char>s"
518 [(set (match_operand:VI 0 "register_operand" "=v")
519 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
520 (match_operand:VI 2 "register_operand" "v")]
522 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
524 "vaddu<VI_char>s %0,%1,%2"
525 [(set_attr "type" "vecsimple")])
527 (define_insn "altivec_vadds<VI_char>s"
528 [(set (match_operand:VI 0 "register_operand" "=v")
529 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
530 (match_operand:VI 2 "register_operand" "v")]
532 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
533 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
534 "vadds<VI_char>s %0,%1,%2"
535 [(set_attr "type" "vecsimple")])
538 (define_insn "sub<mode>3"
539 [(set (match_operand:VI2 0 "register_operand" "=v")
540 (minus:VI2 (match_operand:VI2 1 "register_operand" "v")
541 (match_operand:VI2 2 "register_operand" "v")))]
543 "vsubu<VI_char>m %0,%1,%2"
544 [(set_attr "type" "vecsimple")])
546 (define_insn "*altivec_subv4sf3"
547 [(set (match_operand:V4SF 0 "register_operand" "=v")
548 (minus:V4SF (match_operand:V4SF 1 "register_operand" "v")
549 (match_operand:V4SF 2 "register_operand" "v")))]
550 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
552 [(set_attr "type" "vecfloat")])
554 (define_insn "altivec_vsubcuw"
555 [(set (match_operand:V4SI 0 "register_operand" "=v")
556 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
557 (match_operand:V4SI 2 "register_operand" "v")]
559 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
561 [(set_attr "type" "vecsimple")])
563 (define_insn "altivec_vsubu<VI_char>s"
564 [(set (match_operand:VI 0 "register_operand" "=v")
565 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
566 (match_operand:VI 2 "register_operand" "v")]
568 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
569 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
570 "vsubu<VI_char>s %0,%1,%2"
571 [(set_attr "type" "vecsimple")])
573 (define_insn "altivec_vsubs<VI_char>s"
574 [(set (match_operand:VI 0 "register_operand" "=v")
575 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
576 (match_operand:VI 2 "register_operand" "v")]
578 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
579 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
580 "vsubs<VI_char>s %0,%1,%2"
581 [(set_attr "type" "vecsimple")])
584 (define_insn "altivec_vavgu<VI_char>"
585 [(set (match_operand:VI 0 "register_operand" "=v")
586 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
587 (match_operand:VI 2 "register_operand" "v")]
590 "vavgu<VI_char> %0,%1,%2"
591 [(set_attr "type" "vecsimple")])
593 (define_insn "altivec_vavgs<VI_char>"
594 [(set (match_operand:VI 0 "register_operand" "=v")
595 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
596 (match_operand:VI 2 "register_operand" "v")]
598 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
599 "vavgs<VI_char> %0,%1,%2"
600 [(set_attr "type" "vecsimple")])
602 (define_insn "altivec_vcmpbfp"
603 [(set (match_operand:V4SI 0 "register_operand" "=v")
604 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
605 (match_operand:V4SF 2 "register_operand" "v")]
607 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
609 [(set_attr "type" "veccmp")])
611 (define_insn "*altivec_eq<mode>"
612 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
613 (eq:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
614 (match_operand:VI2 2 "altivec_register_operand" "v")))]
616 "vcmpequ<VI_char> %0,%1,%2"
617 [(set_attr "type" "veccmpfx")])
619 (define_insn "*altivec_gt<mode>"
620 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
621 (gt:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
622 (match_operand:VI2 2 "altivec_register_operand" "v")))]
624 "vcmpgts<VI_char> %0,%1,%2"
625 [(set_attr "type" "veccmpfx")])
627 (define_insn "*altivec_gtu<mode>"
628 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
629 (gtu:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
630 (match_operand:VI2 2 "altivec_register_operand" "v")))]
632 "vcmpgtu<VI_char> %0,%1,%2"
633 [(set_attr "type" "veccmpfx")])
635 (define_insn "*altivec_eqv4sf"
636 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
637 (eq:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
638 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
639 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
641 [(set_attr "type" "veccmp")])
643 (define_insn "*altivec_gtv4sf"
644 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
645 (gt:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
646 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
647 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
649 [(set_attr "type" "veccmp")])
651 (define_insn "*altivec_gev4sf"
652 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
653 (ge:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
654 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
655 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
657 [(set_attr "type" "veccmp")])
659 (define_insn "*altivec_vsel<mode>"
660 [(set (match_operand:VM 0 "altivec_register_operand" "=v")
662 (ne:CC (match_operand:VM 1 "altivec_register_operand" "v")
663 (match_operand:VM 4 "zero_constant" ""))
664 (match_operand:VM 2 "altivec_register_operand" "v")
665 (match_operand:VM 3 "altivec_register_operand" "v")))]
666 "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
668 [(set_attr "type" "vecmove")])
670 (define_insn "*altivec_vsel<mode>_uns"
671 [(set (match_operand:VM 0 "altivec_register_operand" "=v")
673 (ne:CCUNS (match_operand:VM 1 "altivec_register_operand" "v")
674 (match_operand:VM 4 "zero_constant" ""))
675 (match_operand:VM 2 "altivec_register_operand" "v")
676 (match_operand:VM 3 "altivec_register_operand" "v")))]
677 "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
679 [(set_attr "type" "vecmove")])
681 ;; Fused multiply add.
683 (define_insn "*altivec_fmav4sf4"
684 [(set (match_operand:V4SF 0 "register_operand" "=v")
685 (fma:V4SF (match_operand:V4SF 1 "register_operand" "v")
686 (match_operand:V4SF 2 "register_operand" "v")
687 (match_operand:V4SF 3 "register_operand" "v")))]
688 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
689 "vmaddfp %0,%1,%2,%3"
690 [(set_attr "type" "vecfloat")])
692 ;; We do multiply as a fused multiply-add with an add of a -0.0 vector.
694 (define_expand "altivec_mulv4sf3"
695 [(set (match_operand:V4SF 0 "register_operand")
696 (fma:V4SF (match_operand:V4SF 1 "register_operand")
697 (match_operand:V4SF 2 "register_operand")
699 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
703 /* Generate [-0.0, -0.0, -0.0, -0.0]. */
704 neg0 = gen_reg_rtx (V4SImode);
705 emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
706 emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
708 operands[3] = gen_lowpart (V4SFmode, neg0);
711 ;; 32-bit integer multiplication
712 ;; A_high = Operand_0 & 0xFFFF0000 >> 16
713 ;; A_low = Operand_0 & 0xFFFF
714 ;; B_high = Operand_1 & 0xFFFF0000 >> 16
715 ;; B_low = Operand_1 & 0xFFFF
716 ;; result = A_low * B_low + (A_high * B_low + B_high * A_low) << 16
718 ;; (define_insn "mulv4si3"
719 ;; [(set (match_operand:V4SI 0 "register_operand" "=v")
720 ;; (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
721 ;; (match_operand:V4SI 2 "register_operand" "v")))]
722 (define_insn "mulv4si3_p8"
723 [(set (match_operand:V4SI 0 "register_operand" "=v")
724 (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
725 (match_operand:V4SI 2 "register_operand" "v")))]
728 [(set_attr "type" "veccomplex")])
730 (define_expand "mulv4si3"
731 [(use (match_operand:V4SI 0 "register_operand"))
732 (use (match_operand:V4SI 1 "register_operand"))
733 (use (match_operand:V4SI 2 "register_operand"))]
745 if (TARGET_P8_VECTOR)
747 emit_insn (gen_mulv4si3_p8 (operands[0], operands[1], operands[2]));
751 zero = gen_reg_rtx (V4SImode);
752 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
754 sixteen = gen_reg_rtx (V4SImode);
755 emit_insn (gen_altivec_vspltisw (sixteen, gen_rtx_CONST_INT (V4SImode, -16)));
757 swap = gen_reg_rtx (V4SImode);
758 emit_insn (gen_vrotlv4si3 (swap, operands[2], sixteen));
760 one = gen_reg_rtx (V8HImode);
761 convert_move (one, operands[1], 0);
763 two = gen_reg_rtx (V8HImode);
764 convert_move (two, operands[2], 0);
766 small_swap = gen_reg_rtx (V8HImode);
767 convert_move (small_swap, swap, 0);
769 low_product = gen_reg_rtx (V4SImode);
770 emit_insn (gen_altivec_vmulouh (low_product, one, two));
772 high_product = gen_reg_rtx (V4SImode);
773 emit_insn (gen_altivec_vmsumuhm (high_product, one, small_swap, zero));
775 emit_insn (gen_vashlv4si3 (high_product, high_product, sixteen));
777 emit_insn (gen_addv4si3 (operands[0], high_product, low_product));
782 (define_expand "mulv8hi3"
783 [(use (match_operand:V8HI 0 "register_operand"))
784 (use (match_operand:V8HI 1 "register_operand"))
785 (use (match_operand:V8HI 2 "register_operand"))]
788 rtx zero = gen_reg_rtx (V8HImode);
790 emit_insn (gen_altivec_vspltish (zero, const0_rtx));
791 emit_insn (gen_fmav8hi4 (operands[0], operands[1], operands[2], zero));
797 ;; Fused multiply subtract
798 (define_insn "*altivec_vnmsubfp"
799 [(set (match_operand:V4SF 0 "register_operand" "=v")
801 (fma:V4SF (match_operand:V4SF 1 "register_operand" "v")
802 (match_operand:V4SF 2 "register_operand" "v")
804 (match_operand:V4SF 3 "register_operand" "v")))))]
805 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
806 "vnmsubfp %0,%1,%2,%3"
807 [(set_attr "type" "vecfloat")])
809 (define_insn "altivec_vmsumu<VI_char>m"
810 [(set (match_operand:V4SI 0 "register_operand" "=v")
811 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
812 (match_operand:VIshort 2 "register_operand" "v")
813 (match_operand:V4SI 3 "register_operand" "v")]
816 "vmsumu<VI_char>m %0,%1,%2,%3"
817 [(set_attr "type" "veccomplex")])
819 (define_insn "altivec_vmsumm<VI_char>m"
820 [(set (match_operand:V4SI 0 "register_operand" "=v")
821 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
822 (match_operand:VIshort 2 "register_operand" "v")
823 (match_operand:V4SI 3 "register_operand" "v")]
826 "vmsumm<VI_char>m %0,%1,%2,%3"
827 [(set_attr "type" "veccomplex")])
829 (define_insn "altivec_vmsumshm"
830 [(set (match_operand:V4SI 0 "register_operand" "=v")
831 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
832 (match_operand:V8HI 2 "register_operand" "v")
833 (match_operand:V4SI 3 "register_operand" "v")]
836 "vmsumshm %0,%1,%2,%3"
837 [(set_attr "type" "veccomplex")])
839 (define_insn "altivec_vmsumuhs"
840 [(set (match_operand:V4SI 0 "register_operand" "=v")
841 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
842 (match_operand:V8HI 2 "register_operand" "v")
843 (match_operand:V4SI 3 "register_operand" "v")]
845 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
847 "vmsumuhs %0,%1,%2,%3"
848 [(set_attr "type" "veccomplex")])
850 (define_insn "altivec_vmsumshs"
851 [(set (match_operand:V4SI 0 "register_operand" "=v")
852 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
853 (match_operand:V8HI 2 "register_operand" "v")
854 (match_operand:V4SI 3 "register_operand" "v")]
856 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
858 "vmsumshs %0,%1,%2,%3"
859 [(set_attr "type" "veccomplex")])
863 (define_insn "umax<mode>3"
864 [(set (match_operand:VI2 0 "register_operand" "=v")
865 (umax:VI2 (match_operand:VI2 1 "register_operand" "v")
866 (match_operand:VI2 2 "register_operand" "v")))]
868 "vmaxu<VI_char> %0,%1,%2"
869 [(set_attr "type" "vecsimple")])
871 (define_insn "smax<mode>3"
872 [(set (match_operand:VI2 0 "register_operand" "=v")
873 (smax:VI2 (match_operand:VI2 1 "register_operand" "v")
874 (match_operand:VI2 2 "register_operand" "v")))]
876 "vmaxs<VI_char> %0,%1,%2"
877 [(set_attr "type" "vecsimple")])
879 (define_insn "*altivec_smaxv4sf3"
880 [(set (match_operand:V4SF 0 "register_operand" "=v")
881 (smax:V4SF (match_operand:V4SF 1 "register_operand" "v")
882 (match_operand:V4SF 2 "register_operand" "v")))]
883 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
885 [(set_attr "type" "veccmp")])
887 (define_insn "umin<mode>3"
888 [(set (match_operand:VI2 0 "register_operand" "=v")
889 (umin:VI2 (match_operand:VI2 1 "register_operand" "v")
890 (match_operand:VI2 2 "register_operand" "v")))]
892 "vminu<VI_char> %0,%1,%2"
893 [(set_attr "type" "vecsimple")])
895 (define_insn "smin<mode>3"
896 [(set (match_operand:VI2 0 "register_operand" "=v")
897 (smin:VI2 (match_operand:VI2 1 "register_operand" "v")
898 (match_operand:VI2 2 "register_operand" "v")))]
900 "vmins<VI_char> %0,%1,%2"
901 [(set_attr "type" "vecsimple")])
903 (define_insn "*altivec_sminv4sf3"
904 [(set (match_operand:V4SF 0 "register_operand" "=v")
905 (smin:V4SF (match_operand:V4SF 1 "register_operand" "v")
906 (match_operand:V4SF 2 "register_operand" "v")))]
907 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
909 [(set_attr "type" "veccmp")])
911 (define_insn "altivec_vmhaddshs"
912 [(set (match_operand:V8HI 0 "register_operand" "=v")
913 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
914 (match_operand:V8HI 2 "register_operand" "v")
915 (match_operand:V8HI 3 "register_operand" "v")]
917 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
919 "vmhaddshs %0,%1,%2,%3"
920 [(set_attr "type" "veccomplex")])
922 (define_insn "altivec_vmhraddshs"
923 [(set (match_operand:V8HI 0 "register_operand" "=v")
924 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
925 (match_operand:V8HI 2 "register_operand" "v")
926 (match_operand:V8HI 3 "register_operand" "v")]
928 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
930 "vmhraddshs %0,%1,%2,%3"
931 [(set_attr "type" "veccomplex")])
933 (define_insn "fmav8hi4"
934 [(set (match_operand:V8HI 0 "register_operand" "=v")
935 (plus:V8HI (mult:V8HI (match_operand:V8HI 1 "register_operand" "v")
936 (match_operand:V8HI 2 "register_operand" "v"))
937 (match_operand:V8HI 3 "register_operand" "v")))]
939 "vmladduhm %0,%1,%2,%3"
940 [(set_attr "type" "veccomplex")])
942 (define_expand "altivec_vmrghb"
943 [(use (match_operand:V16QI 0 "register_operand"))
944 (use (match_operand:V16QI 1 "register_operand"))
945 (use (match_operand:V16QI 2 "register_operand"))]
951 /* Special handling for LE with -maltivec=be. */
952 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
954 v = gen_rtvec (16, GEN_INT (8), GEN_INT (24), GEN_INT (9), GEN_INT (25),
955 GEN_INT (10), GEN_INT (26), GEN_INT (11), GEN_INT (27),
956 GEN_INT (12), GEN_INT (28), GEN_INT (13), GEN_INT (29),
957 GEN_INT (14), GEN_INT (30), GEN_INT (15), GEN_INT (31));
958 x = gen_rtx_VEC_CONCAT (V32QImode, operands[2], operands[1]);
962 v = gen_rtvec (16, GEN_INT (0), GEN_INT (16), GEN_INT (1), GEN_INT (17),
963 GEN_INT (2), GEN_INT (18), GEN_INT (3), GEN_INT (19),
964 GEN_INT (4), GEN_INT (20), GEN_INT (5), GEN_INT (21),
965 GEN_INT (6), GEN_INT (22), GEN_INT (7), GEN_INT (23));
966 x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]);
969 x = gen_rtx_VEC_SELECT (V16QImode, x, gen_rtx_PARALLEL (VOIDmode, v));
970 emit_insn (gen_rtx_SET (operands[0], x));
974 (define_insn "*altivec_vmrghb_internal"
975 [(set (match_operand:V16QI 0 "register_operand" "=v")
978 (match_operand:V16QI 1 "register_operand" "v")
979 (match_operand:V16QI 2 "register_operand" "v"))
980 (parallel [(const_int 0) (const_int 16)
981 (const_int 1) (const_int 17)
982 (const_int 2) (const_int 18)
983 (const_int 3) (const_int 19)
984 (const_int 4) (const_int 20)
985 (const_int 5) (const_int 21)
986 (const_int 6) (const_int 22)
987 (const_int 7) (const_int 23)])))]
990 if (BYTES_BIG_ENDIAN)
991 return "vmrghb %0,%1,%2";
993 return "vmrglb %0,%2,%1";
995 [(set_attr "type" "vecperm")])
997 (define_insn "altivec_vmrghb_direct"
998 [(set (match_operand:V16QI 0 "register_operand" "=v")
999 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1000 (match_operand:V16QI 2 "register_operand" "v")]
1001 UNSPEC_VMRGH_DIRECT))]
1004 [(set_attr "type" "vecperm")])
1006 (define_expand "altivec_vmrghh"
1007 [(use (match_operand:V8HI 0 "register_operand"))
1008 (use (match_operand:V8HI 1 "register_operand"))
1009 (use (match_operand:V8HI 2 "register_operand"))]
1015 /* Special handling for LE with -maltivec=be. */
1016 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1018 v = gen_rtvec (8, GEN_INT (4), GEN_INT (12), GEN_INT (5), GEN_INT (13),
1019 GEN_INT (6), GEN_INT (14), GEN_INT (7), GEN_INT (15));
1020 x = gen_rtx_VEC_CONCAT (V16HImode, operands[2], operands[1]);
1024 v = gen_rtvec (8, GEN_INT (0), GEN_INT (8), GEN_INT (1), GEN_INT (9),
1025 GEN_INT (2), GEN_INT (10), GEN_INT (3), GEN_INT (11));
1026 x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]);
1029 x = gen_rtx_VEC_SELECT (V8HImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1030 emit_insn (gen_rtx_SET (operands[0], x));
1034 (define_insn "*altivec_vmrghh_internal"
1035 [(set (match_operand:V8HI 0 "register_operand" "=v")
1038 (match_operand:V8HI 1 "register_operand" "v")
1039 (match_operand:V8HI 2 "register_operand" "v"))
1040 (parallel [(const_int 0) (const_int 8)
1041 (const_int 1) (const_int 9)
1042 (const_int 2) (const_int 10)
1043 (const_int 3) (const_int 11)])))]
1046 if (BYTES_BIG_ENDIAN)
1047 return "vmrghh %0,%1,%2";
1049 return "vmrglh %0,%2,%1";
1051 [(set_attr "type" "vecperm")])
1053 (define_insn "altivec_vmrghh_direct"
1054 [(set (match_operand:V8HI 0 "register_operand" "=v")
1055 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1056 (match_operand:V8HI 2 "register_operand" "v")]
1057 UNSPEC_VMRGH_DIRECT))]
1060 [(set_attr "type" "vecperm")])
1062 (define_expand "altivec_vmrghw"
1063 [(use (match_operand:V4SI 0 "register_operand"))
1064 (use (match_operand:V4SI 1 "register_operand"))
1065 (use (match_operand:V4SI 2 "register_operand"))]
1066 "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1071 /* Special handling for LE with -maltivec=be. */
1072 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1074 v = gen_rtvec (4, GEN_INT (2), GEN_INT (6), GEN_INT (3), GEN_INT (7));
1075 x = gen_rtx_VEC_CONCAT (V8SImode, operands[2], operands[1]);
1079 v = gen_rtvec (4, GEN_INT (0), GEN_INT (4), GEN_INT (1), GEN_INT (5));
1080 x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]);
1083 x = gen_rtx_VEC_SELECT (V4SImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1084 emit_insn (gen_rtx_SET (operands[0], x));
1088 (define_insn "*altivec_vmrghw_internal"
1089 [(set (match_operand:V4SI 0 "register_operand" "=v")
1092 (match_operand:V4SI 1 "register_operand" "v")
1093 (match_operand:V4SI 2 "register_operand" "v"))
1094 (parallel [(const_int 0) (const_int 4)
1095 (const_int 1) (const_int 5)])))]
1096 "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1098 if (BYTES_BIG_ENDIAN)
1099 return "vmrghw %0,%1,%2";
1101 return "vmrglw %0,%2,%1";
1103 [(set_attr "type" "vecperm")])
1105 (define_insn "altivec_vmrghw_direct"
1106 [(set (match_operand:V4SI 0 "register_operand" "=v,wa")
1107 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v,wa")
1108 (match_operand:V4SI 2 "register_operand" "v,wa")]
1109 UNSPEC_VMRGH_DIRECT))]
1113 xxmrghw %x0,%x1,%x2"
1114 [(set_attr "type" "vecperm")])
1116 (define_insn "*altivec_vmrghsf"
1117 [(set (match_operand:V4SF 0 "register_operand" "=v")
1120 (match_operand:V4SF 1 "register_operand" "v")
1121 (match_operand:V4SF 2 "register_operand" "v"))
1122 (parallel [(const_int 0) (const_int 4)
1123 (const_int 1) (const_int 5)])))]
1124 "VECTOR_MEM_ALTIVEC_P (V4SFmode)"
1126 if (BYTES_BIG_ENDIAN)
1127 return "vmrghw %0,%1,%2";
1129 return "vmrglw %0,%2,%1";
1131 [(set_attr "type" "vecperm")])
1133 (define_expand "altivec_vmrglb"
1134 [(use (match_operand:V16QI 0 "register_operand"))
1135 (use (match_operand:V16QI 1 "register_operand"))
1136 (use (match_operand:V16QI 2 "register_operand"))]
1142 /* Special handling for LE with -maltivec=be. */
1143 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1145 v = gen_rtvec (16, GEN_INT (0), GEN_INT (16), GEN_INT (1), GEN_INT (17),
1146 GEN_INT (2), GEN_INT (18), GEN_INT (3), GEN_INT (19),
1147 GEN_INT (4), GEN_INT (20), GEN_INT (5), GEN_INT (21),
1148 GEN_INT (6), GEN_INT (22), GEN_INT (7), GEN_INT (23));
1149 x = gen_rtx_VEC_CONCAT (V32QImode, operands[2], operands[1]);
1153 v = gen_rtvec (16, GEN_INT (8), GEN_INT (24), GEN_INT (9), GEN_INT (25),
1154 GEN_INT (10), GEN_INT (26), GEN_INT (11), GEN_INT (27),
1155 GEN_INT (12), GEN_INT (28), GEN_INT (13), GEN_INT (29),
1156 GEN_INT (14), GEN_INT (30), GEN_INT (15), GEN_INT (31));
1157 x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]);
1160 x = gen_rtx_VEC_SELECT (V16QImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1161 emit_insn (gen_rtx_SET (operands[0], x));
1165 (define_insn "*altivec_vmrglb_internal"
1166 [(set (match_operand:V16QI 0 "register_operand" "=v")
1169 (match_operand:V16QI 1 "register_operand" "v")
1170 (match_operand:V16QI 2 "register_operand" "v"))
1171 (parallel [(const_int 8) (const_int 24)
1172 (const_int 9) (const_int 25)
1173 (const_int 10) (const_int 26)
1174 (const_int 11) (const_int 27)
1175 (const_int 12) (const_int 28)
1176 (const_int 13) (const_int 29)
1177 (const_int 14) (const_int 30)
1178 (const_int 15) (const_int 31)])))]
1181 if (BYTES_BIG_ENDIAN)
1182 return "vmrglb %0,%1,%2";
1184 return "vmrghb %0,%2,%1";
1186 [(set_attr "type" "vecperm")])
1188 (define_insn "altivec_vmrglb_direct"
1189 [(set (match_operand:V16QI 0 "register_operand" "=v")
1190 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1191 (match_operand:V16QI 2 "register_operand" "v")]
1192 UNSPEC_VMRGL_DIRECT))]
1195 [(set_attr "type" "vecperm")])
1197 (define_expand "altivec_vmrglh"
1198 [(use (match_operand:V8HI 0 "register_operand"))
1199 (use (match_operand:V8HI 1 "register_operand"))
1200 (use (match_operand:V8HI 2 "register_operand"))]
1206 /* Special handling for LE with -maltivec=be. */
1207 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1209 v = gen_rtvec (8, GEN_INT (0), GEN_INT (8), GEN_INT (1), GEN_INT (9),
1210 GEN_INT (2), GEN_INT (10), GEN_INT (3), GEN_INT (11));
1211 x = gen_rtx_VEC_CONCAT (V16HImode, operands[2], operands[1]);
1215 v = gen_rtvec (8, GEN_INT (4), GEN_INT (12), GEN_INT (5), GEN_INT (13),
1216 GEN_INT (6), GEN_INT (14), GEN_INT (7), GEN_INT (15));
1217 x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]);
1220 x = gen_rtx_VEC_SELECT (V8HImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1221 emit_insn (gen_rtx_SET (operands[0], x));
1225 (define_insn "*altivec_vmrglh_internal"
1226 [(set (match_operand:V8HI 0 "register_operand" "=v")
1229 (match_operand:V8HI 1 "register_operand" "v")
1230 (match_operand:V8HI 2 "register_operand" "v"))
1231 (parallel [(const_int 4) (const_int 12)
1232 (const_int 5) (const_int 13)
1233 (const_int 6) (const_int 14)
1234 (const_int 7) (const_int 15)])))]
1237 if (BYTES_BIG_ENDIAN)
1238 return "vmrglh %0,%1,%2";
1240 return "vmrghh %0,%2,%1";
1242 [(set_attr "type" "vecperm")])
1244 (define_insn "altivec_vmrglh_direct"
1245 [(set (match_operand:V8HI 0 "register_operand" "=v")
1246 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1247 (match_operand:V8HI 2 "register_operand" "v")]
1248 UNSPEC_VMRGL_DIRECT))]
1251 [(set_attr "type" "vecperm")])
1253 (define_expand "altivec_vmrglw"
1254 [(use (match_operand:V4SI 0 "register_operand"))
1255 (use (match_operand:V4SI 1 "register_operand"))
1256 (use (match_operand:V4SI 2 "register_operand"))]
1257 "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1262 /* Special handling for LE with -maltivec=be. */
1263 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1265 v = gen_rtvec (4, GEN_INT (0), GEN_INT (4), GEN_INT (1), GEN_INT (5));
1266 x = gen_rtx_VEC_CONCAT (V8SImode, operands[2], operands[1]);
1270 v = gen_rtvec (4, GEN_INT (2), GEN_INT (6), GEN_INT (3), GEN_INT (7));
1271 x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]);
1274 x = gen_rtx_VEC_SELECT (V4SImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1275 emit_insn (gen_rtx_SET (operands[0], x));
1279 (define_insn "*altivec_vmrglw_internal"
1280 [(set (match_operand:V4SI 0 "register_operand" "=v")
1283 (match_operand:V4SI 1 "register_operand" "v")
1284 (match_operand:V4SI 2 "register_operand" "v"))
1285 (parallel [(const_int 2) (const_int 6)
1286 (const_int 3) (const_int 7)])))]
1287 "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1289 if (BYTES_BIG_ENDIAN)
1290 return "vmrglw %0,%1,%2";
1292 return "vmrghw %0,%2,%1";
1294 [(set_attr "type" "vecperm")])
1296 (define_insn "altivec_vmrglw_direct"
1297 [(set (match_operand:V4SI 0 "register_operand" "=v,wa")
1298 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v,wa")
1299 (match_operand:V4SI 2 "register_operand" "v,wa")]
1300 UNSPEC_VMRGL_DIRECT))]
1304 xxmrglw %x0,%x1,%x2"
1305 [(set_attr "type" "vecperm")])
1307 (define_insn "*altivec_vmrglsf"
1308 [(set (match_operand:V4SF 0 "register_operand" "=v")
1311 (match_operand:V4SF 1 "register_operand" "v")
1312 (match_operand:V4SF 2 "register_operand" "v"))
1313 (parallel [(const_int 2) (const_int 6)
1314 (const_int 3) (const_int 7)])))]
1315 "VECTOR_MEM_ALTIVEC_P (V4SFmode)"
1317 if (BYTES_BIG_ENDIAN)
1318 return "vmrglw %0,%1,%2";
1320 return "vmrghw %0,%2,%1";
1322 [(set_attr "type" "vecperm")])
1324 ;; Power8 vector merge two V2DF/V2DI even words to V2DF
1325 (define_expand "p8_vmrgew_<mode>"
1326 [(use (match_operand:VSX_D 0 "vsx_register_operand"))
1327 (use (match_operand:VSX_D 1 "vsx_register_operand"))
1328 (use (match_operand:VSX_D 2 "vsx_register_operand"))]
1329 "VECTOR_MEM_VSX_P (<MODE>mode)"
1334 v = gen_rtvec (2, GEN_INT (0), GEN_INT (2));
1335 x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
1337 x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v));
1338 emit_insn (gen_rtx_SET (operands[0], x));
1342 ;; Power8 vector merge two V4SF/V4SI even words to V4SF
1343 (define_insn "p8_vmrgew_<mode>"
1344 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1346 (vec_concat:<VS_double>
1347 (match_operand:VSX_W 1 "register_operand" "v")
1348 (match_operand:VSX_W 2 "register_operand" "v"))
1349 (parallel [(const_int 0) (const_int 4)
1350 (const_int 2) (const_int 6)])))]
1353 if (BYTES_BIG_ENDIAN)
1354 return "vmrgew %0,%1,%2";
1356 return "vmrgow %0,%2,%1";
1358 [(set_attr "type" "vecperm")])
1360 (define_insn "p8_vmrgow_<mode>"
1361 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1363 (vec_concat:<VS_double>
1364 (match_operand:VSX_W 1 "register_operand" "v")
1365 (match_operand:VSX_W 2 "register_operand" "v"))
1366 (parallel [(const_int 1) (const_int 5)
1367 (const_int 3) (const_int 7)])))]
1370 if (BYTES_BIG_ENDIAN)
1371 return "vmrgow %0,%1,%2";
1373 return "vmrgew %0,%2,%1";
1375 [(set_attr "type" "vecperm")])
1377 (define_expand "p8_vmrgow_<mode>"
1378 [(use (match_operand:VSX_D 0 "vsx_register_operand"))
1379 (use (match_operand:VSX_D 1 "vsx_register_operand"))
1380 (use (match_operand:VSX_D 2 "vsx_register_operand"))]
1381 "VECTOR_MEM_VSX_P (<MODE>mode)"
1386 v = gen_rtvec (2, GEN_INT (1), GEN_INT (3));
1387 x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
1389 x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v));
1390 emit_insn (gen_rtx_SET (operands[0], x));
1394 (define_insn "p8_vmrgew_<mode>_direct"
1395 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1396 (unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
1397 (match_operand:VSX_W 2 "register_operand" "v")]
1398 UNSPEC_VMRGEW_DIRECT))]
1401 [(set_attr "type" "vecperm")])
1403 (define_insn "p8_vmrgow_<mode>_direct"
1404 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1405 (unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
1406 (match_operand:VSX_W 2 "register_operand" "v")]
1407 UNSPEC_VMRGOW_DIRECT))]
1410 [(set_attr "type" "vecperm")])
1412 (define_expand "vec_widen_umult_even_v16qi"
1413 [(use (match_operand:V8HI 0 "register_operand"))
1414 (use (match_operand:V16QI 1 "register_operand"))
1415 (use (match_operand:V16QI 2 "register_operand"))]
1418 if (VECTOR_ELT_ORDER_BIG)
1419 emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
1421 emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
1425 (define_expand "vec_widen_smult_even_v16qi"
1426 [(use (match_operand:V8HI 0 "register_operand"))
1427 (use (match_operand:V16QI 1 "register_operand"))
1428 (use (match_operand:V16QI 2 "register_operand"))]
1431 if (VECTOR_ELT_ORDER_BIG)
1432 emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
1434 emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
1438 (define_expand "vec_widen_umult_even_v8hi"
1439 [(use (match_operand:V4SI 0 "register_operand"))
1440 (use (match_operand:V8HI 1 "register_operand"))
1441 (use (match_operand:V8HI 2 "register_operand"))]
1444 if (VECTOR_ELT_ORDER_BIG)
1445 emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
1447 emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
1451 (define_expand "vec_widen_smult_even_v8hi"
1452 [(use (match_operand:V4SI 0 "register_operand"))
1453 (use (match_operand:V8HI 1 "register_operand"))
1454 (use (match_operand:V8HI 2 "register_operand"))]
1457 if (VECTOR_ELT_ORDER_BIG)
1458 emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
1460 emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
1464 (define_expand "vec_widen_umult_even_v4si"
1465 [(use (match_operand:V2DI 0 "register_operand"))
1466 (use (match_operand:V4SI 1 "register_operand"))
1467 (use (match_operand:V4SI 2 "register_operand"))]
1470 if (VECTOR_ELT_ORDER_BIG)
1471 emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2]));
1473 emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2]));
1477 (define_expand "vec_widen_smult_even_v4si"
1478 [(use (match_operand:V2DI 0 "register_operand"))
1479 (use (match_operand:V4SI 1 "register_operand"))
1480 (use (match_operand:V4SI 2 "register_operand"))]
1483 if (VECTOR_ELT_ORDER_BIG)
1484 emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2]));
1486 emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2]));
1490 (define_expand "vec_widen_umult_odd_v16qi"
1491 [(use (match_operand:V8HI 0 "register_operand"))
1492 (use (match_operand:V16QI 1 "register_operand"))
1493 (use (match_operand:V16QI 2 "register_operand"))]
1496 if (VECTOR_ELT_ORDER_BIG)
1497 emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
1499 emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
1503 (define_expand "vec_widen_smult_odd_v16qi"
1504 [(use (match_operand:V8HI 0 "register_operand"))
1505 (use (match_operand:V16QI 1 "register_operand"))
1506 (use (match_operand:V16QI 2 "register_operand"))]
1509 if (VECTOR_ELT_ORDER_BIG)
1510 emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
1512 emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
1516 (define_expand "vec_widen_umult_odd_v8hi"
1517 [(use (match_operand:V4SI 0 "register_operand"))
1518 (use (match_operand:V8HI 1 "register_operand"))
1519 (use (match_operand:V8HI 2 "register_operand"))]
1522 if (VECTOR_ELT_ORDER_BIG)
1523 emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
1525 emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
1529 (define_expand "vec_widen_smult_odd_v8hi"
1530 [(use (match_operand:V4SI 0 "register_operand"))
1531 (use (match_operand:V8HI 1 "register_operand"))
1532 (use (match_operand:V8HI 2 "register_operand"))]
1535 if (VECTOR_ELT_ORDER_BIG)
1536 emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
1538 emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
1542 (define_expand "vec_widen_umult_odd_v4si"
1543 [(use (match_operand:V2DI 0 "register_operand"))
1544 (use (match_operand:V4SI 1 "register_operand"))
1545 (use (match_operand:V4SI 2 "register_operand"))]
1548 if (VECTOR_ELT_ORDER_BIG)
1549 emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2]));
1551 emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2]));
1555 (define_expand "vec_widen_smult_odd_v4si"
1556 [(use (match_operand:V2DI 0 "register_operand"))
1557 (use (match_operand:V4SI 1 "register_operand"))
1558 (use (match_operand:V4SI 2 "register_operand"))]
1561 if (VECTOR_ELT_ORDER_BIG)
1562 emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2]));
1564 emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2]));
1568 (define_insn "altivec_vmuleub"
1569 [(set (match_operand:V8HI 0 "register_operand" "=v")
1570 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1571 (match_operand:V16QI 2 "register_operand" "v")]
1575 [(set_attr "type" "veccomplex")])
1577 (define_insn "altivec_vmuloub"
1578 [(set (match_operand:V8HI 0 "register_operand" "=v")
1579 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1580 (match_operand:V16QI 2 "register_operand" "v")]
1584 [(set_attr "type" "veccomplex")])
1586 (define_insn "altivec_vmulesb"
1587 [(set (match_operand:V8HI 0 "register_operand" "=v")
1588 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1589 (match_operand:V16QI 2 "register_operand" "v")]
1593 [(set_attr "type" "veccomplex")])
1595 (define_insn "altivec_vmulosb"
1596 [(set (match_operand:V8HI 0 "register_operand" "=v")
1597 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1598 (match_operand:V16QI 2 "register_operand" "v")]
1602 [(set_attr "type" "veccomplex")])
1604 (define_insn "altivec_vmuleuh"
1605 [(set (match_operand:V4SI 0 "register_operand" "=v")
1606 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1607 (match_operand:V8HI 2 "register_operand" "v")]
1611 [(set_attr "type" "veccomplex")])
1613 (define_insn "altivec_vmulouh"
1614 [(set (match_operand:V4SI 0 "register_operand" "=v")
1615 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1616 (match_operand:V8HI 2 "register_operand" "v")]
1620 [(set_attr "type" "veccomplex")])
1622 (define_insn "altivec_vmulesh"
1623 [(set (match_operand:V4SI 0 "register_operand" "=v")
1624 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1625 (match_operand:V8HI 2 "register_operand" "v")]
1629 [(set_attr "type" "veccomplex")])
1631 (define_insn "altivec_vmulosh"
1632 [(set (match_operand:V4SI 0 "register_operand" "=v")
1633 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1634 (match_operand:V8HI 2 "register_operand" "v")]
1638 [(set_attr "type" "veccomplex")])
1640 (define_insn "altivec_vmuleuw"
1641 [(set (match_operand:V2DI 0 "register_operand" "=v")
1642 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1643 (match_operand:V4SI 2 "register_operand" "v")]
1647 [(set_attr "type" "veccomplex")])
1649 (define_insn "altivec_vmulouw"
1650 [(set (match_operand:V2DI 0 "register_operand" "=v")
1651 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1652 (match_operand:V4SI 2 "register_operand" "v")]
1656 [(set_attr "type" "veccomplex")])
1658 (define_insn "altivec_vmulesw"
1659 [(set (match_operand:V2DI 0 "register_operand" "=v")
1660 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1661 (match_operand:V4SI 2 "register_operand" "v")]
1665 [(set_attr "type" "veccomplex")])
1667 (define_insn "altivec_vmulosw"
1668 [(set (match_operand:V2DI 0 "register_operand" "=v")
1669 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1670 (match_operand:V4SI 2 "register_operand" "v")]
1674 [(set_attr "type" "veccomplex")])
1676 ;; Vector pack/unpack
1677 (define_insn "altivec_vpkpx"
1678 [(set (match_operand:V8HI 0 "register_operand" "=v")
1679 (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
1680 (match_operand:V4SI 2 "register_operand" "v")]
1684 if (VECTOR_ELT_ORDER_BIG)
1685 return "vpkpx %0,%1,%2";
1687 return "vpkpx %0,%2,%1";
1689 [(set_attr "type" "vecperm")])
1691 (define_insn "altivec_vpks<VI_char>ss"
1692 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1693 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1694 (match_operand:VP 2 "register_operand" "v")]
1695 UNSPEC_VPACK_SIGN_SIGN_SAT))]
1698 if (VECTOR_ELT_ORDER_BIG)
1699 return "vpks<VI_char>ss %0,%1,%2";
1701 return "vpks<VI_char>ss %0,%2,%1";
1703 [(set_attr "type" "vecperm")])
1705 (define_insn "altivec_vpks<VI_char>us"
1706 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1707 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1708 (match_operand:VP 2 "register_operand" "v")]
1709 UNSPEC_VPACK_SIGN_UNS_SAT))]
1712 if (VECTOR_ELT_ORDER_BIG)
1713 return "vpks<VI_char>us %0,%1,%2";
1715 return "vpks<VI_char>us %0,%2,%1";
1717 [(set_attr "type" "vecperm")])
1719 (define_insn "altivec_vpku<VI_char>us"
1720 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1721 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1722 (match_operand:VP 2 "register_operand" "v")]
1723 UNSPEC_VPACK_UNS_UNS_SAT))]
1726 if (VECTOR_ELT_ORDER_BIG)
1727 return "vpku<VI_char>us %0,%1,%2";
1729 return "vpku<VI_char>us %0,%2,%1";
1731 [(set_attr "type" "vecperm")])
1733 (define_insn "altivec_vpku<VI_char>um"
1734 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1735 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1736 (match_operand:VP 2 "register_operand" "v")]
1737 UNSPEC_VPACK_UNS_UNS_MOD))]
1740 if (VECTOR_ELT_ORDER_BIG)
1741 return "vpku<VI_char>um %0,%1,%2";
1743 return "vpku<VI_char>um %0,%2,%1";
1745 [(set_attr "type" "vecperm")])
1747 (define_insn "altivec_vpku<VI_char>um_direct"
1748 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1749 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1750 (match_operand:VP 2 "register_operand" "v")]
1751 UNSPEC_VPACK_UNS_UNS_MOD_DIRECT))]
1754 if (BYTES_BIG_ENDIAN)
1755 return "vpku<VI_char>um %0,%1,%2";
1757 return "vpku<VI_char>um %0,%2,%1";
1759 [(set_attr "type" "vecperm")])
1761 (define_insn "*altivec_vrl<VI_char>"
1762 [(set (match_operand:VI2 0 "register_operand" "=v")
1763 (rotate:VI2 (match_operand:VI2 1 "register_operand" "v")
1764 (match_operand:VI2 2 "register_operand" "v")))]
1766 "vrl<VI_char> %0,%1,%2"
1767 [(set_attr "type" "vecsimple")])
1769 (define_insn "altivec_vrl<VI_char>mi"
1770 [(set (match_operand:VIlong 0 "register_operand" "=v")
1771 (unspec:VIlong [(match_operand:VIlong 1 "register_operand" "0")
1772 (match_operand:VIlong 2 "register_operand" "v")
1773 (match_operand:VIlong 3 "register_operand" "v")]
1776 "vrl<VI_char>mi %0,%2,%3"
1777 [(set_attr "type" "veclogical")])
1779 (define_insn "altivec_vrl<VI_char>nm"
1780 [(set (match_operand:VIlong 0 "register_operand" "=v")
1781 (unspec:VIlong [(match_operand:VIlong 1 "register_operand" "v")
1782 (match_operand:VIlong 2 "register_operand" "v")]
1785 "vrl<VI_char>nm %0,%1,%2"
1786 [(set_attr "type" "veclogical")])
1788 (define_insn "altivec_vsl"
1789 [(set (match_operand:V4SI 0 "register_operand" "=v")
1790 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1791 (match_operand:V4SI 2 "register_operand" "v")]
1795 [(set_attr "type" "vecperm")])
1797 (define_insn "altivec_vslo"
1798 [(set (match_operand:V4SI 0 "register_operand" "=v")
1799 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1800 (match_operand:V4SI 2 "register_operand" "v")]
1804 [(set_attr "type" "vecperm")])
1807 [(set (match_operand:V16QI 0 "register_operand" "=v")
1808 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1809 (match_operand:V16QI 2 "register_operand" "v")]
1813 [(set_attr "type" "vecsimple")])
1816 [(set (match_operand:V16QI 0 "register_operand" "=v")
1817 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1818 (match_operand:V16QI 2 "register_operand" "v")]
1822 [(set_attr "type" "vecsimple")])
1824 (define_insn "*altivec_vsl<VI_char>"
1825 [(set (match_operand:VI2 0 "register_operand" "=v")
1826 (ashift:VI2 (match_operand:VI2 1 "register_operand" "v")
1827 (match_operand:VI2 2 "register_operand" "v")))]
1829 "vsl<VI_char> %0,%1,%2"
1830 [(set_attr "type" "vecsimple")])
1832 (define_insn "*altivec_vsr<VI_char>"
1833 [(set (match_operand:VI2 0 "register_operand" "=v")
1834 (lshiftrt:VI2 (match_operand:VI2 1 "register_operand" "v")
1835 (match_operand:VI2 2 "register_operand" "v")))]
1837 "vsr<VI_char> %0,%1,%2"
1838 [(set_attr "type" "vecsimple")])
1840 (define_insn "*altivec_vsra<VI_char>"
1841 [(set (match_operand:VI2 0 "register_operand" "=v")
1842 (ashiftrt:VI2 (match_operand:VI2 1 "register_operand" "v")
1843 (match_operand:VI2 2 "register_operand" "v")))]
1845 "vsra<VI_char> %0,%1,%2"
1846 [(set_attr "type" "vecsimple")])
1848 (define_insn "altivec_vsr"
1849 [(set (match_operand:V4SI 0 "register_operand" "=v")
1850 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1851 (match_operand:V4SI 2 "register_operand" "v")]
1855 [(set_attr "type" "vecperm")])
1857 (define_insn "altivec_vsro"
1858 [(set (match_operand:V4SI 0 "register_operand" "=v")
1859 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1860 (match_operand:V4SI 2 "register_operand" "v")]
1864 [(set_attr "type" "vecperm")])
1866 (define_insn "altivec_vsum4ubs"
1867 [(set (match_operand:V4SI 0 "register_operand" "=v")
1868 (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
1869 (match_operand:V4SI 2 "register_operand" "v")]
1871 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1874 [(set_attr "type" "veccomplex")])
1876 (define_insn "altivec_vsum4s<VI_char>s"
1877 [(set (match_operand:V4SI 0 "register_operand" "=v")
1878 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
1879 (match_operand:V4SI 2 "register_operand" "v")]
1881 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1883 "vsum4s<VI_char>s %0,%1,%2"
1884 [(set_attr "type" "veccomplex")])
1886 (define_expand "altivec_vsum2sws"
1887 [(use (match_operand:V4SI 0 "register_operand"))
1888 (use (match_operand:V4SI 1 "register_operand"))
1889 (use (match_operand:V4SI 2 "register_operand"))]
1892 if (VECTOR_ELT_ORDER_BIG)
1893 emit_insn (gen_altivec_vsum2sws_direct (operands[0], operands[1],
1897 rtx tmp1 = gen_reg_rtx (V4SImode);
1898 rtx tmp2 = gen_reg_rtx (V4SImode);
1899 emit_insn (gen_altivec_vsldoi_v4si (tmp1, operands[2],
1900 operands[2], GEN_INT (12)));
1901 emit_insn (gen_altivec_vsum2sws_direct (tmp2, operands[1], tmp1));
1902 emit_insn (gen_altivec_vsldoi_v4si (operands[0], tmp2, tmp2,
1908 ; FIXME: This can probably be expressed without an UNSPEC.
1909 (define_insn "altivec_vsum2sws_direct"
1910 [(set (match_operand:V4SI 0 "register_operand" "=v")
1911 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1912 (match_operand:V4SI 2 "register_operand" "v")]
1914 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1917 [(set_attr "type" "veccomplex")])
1919 (define_expand "altivec_vsumsws"
1920 [(use (match_operand:V4SI 0 "register_operand"))
1921 (use (match_operand:V4SI 1 "register_operand"))
1922 (use (match_operand:V4SI 2 "register_operand"))]
1925 if (VECTOR_ELT_ORDER_BIG)
1926 emit_insn (gen_altivec_vsumsws_direct (operands[0], operands[1],
1930 rtx tmp1 = gen_reg_rtx (V4SImode);
1931 rtx tmp2 = gen_reg_rtx (V4SImode);
1932 emit_insn (gen_altivec_vspltw_direct (tmp1, operands[2], const0_rtx));
1933 emit_insn (gen_altivec_vsumsws_direct (tmp2, operands[1], tmp1));
1934 emit_insn (gen_altivec_vsldoi_v4si (operands[0], tmp2, tmp2,
1940 ; FIXME: This can probably be expressed without an UNSPEC.
1941 (define_insn "altivec_vsumsws_direct"
1942 [(set (match_operand:V4SI 0 "register_operand" "=v")
1943 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1944 (match_operand:V4SI 2 "register_operand" "v")]
1945 UNSPEC_VSUMSWS_DIRECT))
1946 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1949 [(set_attr "type" "veccomplex")])
1951 (define_expand "altivec_vspltb"
1952 [(use (match_operand:V16QI 0 "register_operand"))
1953 (use (match_operand:V16QI 1 "register_operand"))
1954 (use (match_operand:QI 2 "u5bit_cint_operand"))]
1960 /* Special handling for LE with -maltivec=be. We have to reflect
1961 the actual selected index for the splat in the RTL. */
1962 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1963 operands[2] = GEN_INT (15 - INTVAL (operands[2]));
1965 v = gen_rtvec (1, operands[2]);
1966 x = gen_rtx_VEC_SELECT (QImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
1967 x = gen_rtx_VEC_DUPLICATE (V16QImode, x);
1968 emit_insn (gen_rtx_SET (operands[0], x));
1972 (define_insn "*altivec_vspltb_internal"
1973 [(set (match_operand:V16QI 0 "register_operand" "=v")
1974 (vec_duplicate:V16QI
1975 (vec_select:QI (match_operand:V16QI 1 "register_operand" "v")
1977 [(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
1980 /* For true LE, this adjusts the selected index. For LE with
1981 -maltivec=be, this reverses what was done in the define_expand
1982 because the instruction already has big-endian bias. */
1983 if (!BYTES_BIG_ENDIAN)
1984 operands[2] = GEN_INT (15 - INTVAL (operands[2]));
1986 return "vspltb %0,%1,%2";
1988 [(set_attr "type" "vecperm")])
1990 (define_insn "altivec_vspltb_direct"
1991 [(set (match_operand:V16QI 0 "register_operand" "=v")
1992 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1993 (match_operand:QI 2 "u5bit_cint_operand" "i")]
1994 UNSPEC_VSPLT_DIRECT))]
1997 [(set_attr "type" "vecperm")])
1999 (define_expand "altivec_vsplth"
2000 [(use (match_operand:V8HI 0 "register_operand"))
2001 (use (match_operand:V8HI 1 "register_operand"))
2002 (use (match_operand:QI 2 "u5bit_cint_operand"))]
2008 /* Special handling for LE with -maltivec=be. We have to reflect
2009 the actual selected index for the splat in the RTL. */
2010 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2011 operands[2] = GEN_INT (7 - INTVAL (operands[2]));
2013 v = gen_rtvec (1, operands[2]);
2014 x = gen_rtx_VEC_SELECT (HImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2015 x = gen_rtx_VEC_DUPLICATE (V8HImode, x);
2016 emit_insn (gen_rtx_SET (operands[0], x));
2020 (define_insn "*altivec_vsplth_internal"
2021 [(set (match_operand:V8HI 0 "register_operand" "=v")
2023 (vec_select:HI (match_operand:V8HI 1 "register_operand" "v")
2025 [(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
2028 /* For true LE, this adjusts the selected index. For LE with
2029 -maltivec=be, this reverses what was done in the define_expand
2030 because the instruction already has big-endian bias. */
2031 if (!BYTES_BIG_ENDIAN)
2032 operands[2] = GEN_INT (7 - INTVAL (operands[2]));
2034 return "vsplth %0,%1,%2";
2036 [(set_attr "type" "vecperm")])
2038 (define_insn "altivec_vsplth_direct"
2039 [(set (match_operand:V8HI 0 "register_operand" "=v")
2040 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
2041 (match_operand:QI 2 "u5bit_cint_operand" "i")]
2042 UNSPEC_VSPLT_DIRECT))]
2045 [(set_attr "type" "vecperm")])
2047 (define_expand "altivec_vspltw"
2048 [(use (match_operand:V4SI 0 "register_operand"))
2049 (use (match_operand:V4SI 1 "register_operand"))
2050 (use (match_operand:QI 2 "u5bit_cint_operand"))]
2056 /* Special handling for LE with -maltivec=be. We have to reflect
2057 the actual selected index for the splat in the RTL. */
2058 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2059 operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2061 v = gen_rtvec (1, operands[2]);
2062 x = gen_rtx_VEC_SELECT (SImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2063 x = gen_rtx_VEC_DUPLICATE (V4SImode, x);
2064 emit_insn (gen_rtx_SET (operands[0], x));
2068 (define_insn "*altivec_vspltw_internal"
2069 [(set (match_operand:V4SI 0 "register_operand" "=v")
2071 (vec_select:SI (match_operand:V4SI 1 "register_operand" "v")
2073 [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
2076 /* For true LE, this adjusts the selected index. For LE with
2077 -maltivec=be, this reverses what was done in the define_expand
2078 because the instruction already has big-endian bias. */
2079 if (!BYTES_BIG_ENDIAN)
2080 operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2082 return "vspltw %0,%1,%2";
2084 [(set_attr "type" "vecperm")])
2086 (define_insn "altivec_vspltw_direct"
2087 [(set (match_operand:V4SI 0 "register_operand" "=v")
2088 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
2089 (match_operand:QI 2 "u5bit_cint_operand" "i")]
2090 UNSPEC_VSPLT_DIRECT))]
2093 [(set_attr "type" "vecperm")])
2095 (define_expand "altivec_vspltsf"
2096 [(use (match_operand:V4SF 0 "register_operand"))
2097 (use (match_operand:V4SF 1 "register_operand"))
2098 (use (match_operand:QI 2 "u5bit_cint_operand"))]
2104 /* Special handling for LE with -maltivec=be. We have to reflect
2105 the actual selected index for the splat in the RTL. */
2106 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2107 operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2109 v = gen_rtvec (1, operands[2]);
2110 x = gen_rtx_VEC_SELECT (SFmode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2111 x = gen_rtx_VEC_DUPLICATE (V4SFmode, x);
2112 emit_insn (gen_rtx_SET (operands[0], x));
2116 (define_insn "*altivec_vspltsf_internal"
2117 [(set (match_operand:V4SF 0 "register_operand" "=v")
2119 (vec_select:SF (match_operand:V4SF 1 "register_operand" "v")
2121 [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
2122 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2124 /* For true LE, this adjusts the selected index. For LE with
2125 -maltivec=be, this reverses what was done in the define_expand
2126 because the instruction already has big-endian bias. */
2127 if (!BYTES_BIG_ENDIAN)
2128 operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2130 return "vspltw %0,%1,%2";
2132 [(set_attr "type" "vecperm")])
2134 (define_insn "altivec_vspltis<VI_char>"
2135 [(set (match_operand:VI 0 "register_operand" "=v")
2137 (match_operand:QI 1 "s5bit_cint_operand" "i")))]
2139 "vspltis<VI_char> %0,%1"
2140 [(set_attr "type" "vecperm")])
2142 (define_insn "*altivec_vrfiz"
2143 [(set (match_operand:V4SF 0 "register_operand" "=v")
2144 (fix:V4SF (match_operand:V4SF 1 "register_operand" "v")))]
2145 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2147 [(set_attr "type" "vecfloat")])
2149 (define_expand "altivec_vperm_<mode>"
2150 [(set (match_operand:VM 0 "register_operand")
2151 (unspec:VM [(match_operand:VM 1 "register_operand")
2152 (match_operand:VM 2 "register_operand")
2153 (match_operand:V16QI 3 "register_operand")]
2157 if (!VECTOR_ELT_ORDER_BIG)
2159 altivec_expand_vec_perm_le (operands);
2164 ;; Slightly prefer vperm, since the target does not overlap the source
2165 (define_insn "altivec_vperm_<mode>_direct"
2166 [(set (match_operand:VM 0 "register_operand" "=v,?wo")
2167 (unspec:VM [(match_operand:VM 1 "register_operand" "v,wo")
2168 (match_operand:VM 2 "register_operand" "v,0")
2169 (match_operand:V16QI 3 "register_operand" "v,wo")]
2175 [(set_attr "type" "vecperm")
2176 (set_attr "length" "4")])
2178 (define_insn "altivec_vperm_v8hiv16qi"
2179 [(set (match_operand:V16QI 0 "register_operand" "=v,?wo")
2180 (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v,wo")
2181 (match_operand:V8HI 2 "register_operand" "v,0")
2182 (match_operand:V16QI 3 "register_operand" "v,wo")]
2188 [(set_attr "type" "vecperm")
2189 (set_attr "length" "4")])
2191 (define_expand "altivec_vperm_<mode>_uns"
2192 [(set (match_operand:VM 0 "register_operand")
2193 (unspec:VM [(match_operand:VM 1 "register_operand")
2194 (match_operand:VM 2 "register_operand")
2195 (match_operand:V16QI 3 "register_operand")]
2199 if (!VECTOR_ELT_ORDER_BIG)
2201 altivec_expand_vec_perm_le (operands);
2206 (define_insn "*altivec_vperm_<mode>_uns_internal"
2207 [(set (match_operand:VM 0 "register_operand" "=v,?wo")
2208 (unspec:VM [(match_operand:VM 1 "register_operand" "v,wo")
2209 (match_operand:VM 2 "register_operand" "v,0")
2210 (match_operand:V16QI 3 "register_operand" "v,wo")]
2216 [(set_attr "type" "vecperm")
2217 (set_attr "length" "4")])
2219 (define_expand "vec_permv16qi"
2220 [(set (match_operand:V16QI 0 "register_operand")
2221 (unspec:V16QI [(match_operand:V16QI 1 "register_operand")
2222 (match_operand:V16QI 2 "register_operand")
2223 (match_operand:V16QI 3 "register_operand")]
2227 if (!BYTES_BIG_ENDIAN) {
2228 altivec_expand_vec_perm_le (operands);
2233 (define_insn "*altivec_vpermr_<mode>_internal"
2234 [(set (match_operand:VM 0 "register_operand" "=v,?wo")
2235 (unspec:VM [(match_operand:VM 1 "register_operand" "v,wo")
2236 (match_operand:VM 2 "register_operand" "v,0")
2237 (match_operand:V16QI 3 "register_operand" "v,wo")]
2242 xxpermr %x0,%x1,%x3"
2243 [(set_attr "type" "vecperm")
2244 (set_attr "length" "4")])
2246 (define_insn "altivec_vrfip" ; ceil
2247 [(set (match_operand:V4SF 0 "register_operand" "=v")
2248 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2252 [(set_attr "type" "vecfloat")])
2254 (define_insn "altivec_vrfin"
2255 [(set (match_operand:V4SF 0 "register_operand" "=v")
2256 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2260 [(set_attr "type" "vecfloat")])
2262 (define_insn "*altivec_vrfim" ; floor
2263 [(set (match_operand:V4SF 0 "register_operand" "=v")
2264 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2268 [(set_attr "type" "vecfloat")])
2270 (define_insn "altivec_vcfux"
2271 [(set (match_operand:V4SF 0 "register_operand" "=v")
2272 (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
2273 (match_operand:QI 2 "immediate_operand" "i")]
2277 [(set_attr "type" "vecfloat")])
2279 (define_insn "altivec_vcfsx"
2280 [(set (match_operand:V4SF 0 "register_operand" "=v")
2281 (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
2282 (match_operand:QI 2 "immediate_operand" "i")]
2286 [(set_attr "type" "vecfloat")])
2288 (define_insn "altivec_vctuxs"
2289 [(set (match_operand:V4SI 0 "register_operand" "=v")
2290 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
2291 (match_operand:QI 2 "immediate_operand" "i")]
2293 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2296 [(set_attr "type" "vecfloat")])
2298 (define_insn "altivec_vctsxs"
2299 [(set (match_operand:V4SI 0 "register_operand" "=v")
2300 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
2301 (match_operand:QI 2 "immediate_operand" "i")]
2303 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2306 [(set_attr "type" "vecfloat")])
2308 (define_insn "altivec_vlogefp"
2309 [(set (match_operand:V4SF 0 "register_operand" "=v")
2310 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2314 [(set_attr "type" "vecfloat")])
2316 (define_insn "altivec_vexptefp"
2317 [(set (match_operand:V4SF 0 "register_operand" "=v")
2318 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2322 [(set_attr "type" "vecfloat")])
2324 (define_insn "*altivec_vrsqrtefp"
2325 [(set (match_operand:V4SF 0 "register_operand" "=v")
2326 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2328 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2330 [(set_attr "type" "vecfloat")])
2332 (define_insn "altivec_vrefp"
2333 [(set (match_operand:V4SF 0 "register_operand" "=v")
2334 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2336 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2338 [(set_attr "type" "vecfloat")])
2340 (define_expand "altivec_copysign_v4sf3"
2341 [(use (match_operand:V4SF 0 "register_operand"))
2342 (use (match_operand:V4SF 1 "register_operand"))
2343 (use (match_operand:V4SF 2 "register_operand"))]
2344 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2346 rtx mask = gen_reg_rtx (V4SImode);
2347 rtvec v = rtvec_alloc (4);
2348 unsigned HOST_WIDE_INT mask_val = ((unsigned HOST_WIDE_INT)1) << 31;
2350 RTVEC_ELT (v, 0) = GEN_INT (mask_val);
2351 RTVEC_ELT (v, 1) = GEN_INT (mask_val);
2352 RTVEC_ELT (v, 2) = GEN_INT (mask_val);
2353 RTVEC_ELT (v, 3) = GEN_INT (mask_val);
2355 emit_insn (gen_vec_initv4sisi (mask, gen_rtx_PARALLEL (V4SImode, v)));
2356 emit_insn (gen_vector_select_v4sf (operands[0], operands[1], operands[2],
2357 gen_lowpart (V4SFmode, mask)));
2361 (define_insn "altivec_vsldoi_<mode>"
2362 [(set (match_operand:VM 0 "register_operand" "=v")
2363 (unspec:VM [(match_operand:VM 1 "register_operand" "v")
2364 (match_operand:VM 2 "register_operand" "v")
2365 (match_operand:QI 3 "immediate_operand" "i")]
2368 "vsldoi %0,%1,%2,%3"
2369 [(set_attr "type" "vecperm")])
2371 (define_insn "altivec_vupkhs<VU_char>"
2372 [(set (match_operand:VP 0 "register_operand" "=v")
2373 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2374 UNSPEC_VUNPACK_HI_SIGN))]
2377 if (VECTOR_ELT_ORDER_BIG)
2378 return "vupkhs<VU_char> %0,%1";
2380 return "vupkls<VU_char> %0,%1";
2382 [(set_attr "type" "vecperm")])
2384 (define_insn "*altivec_vupkhs<VU_char>_direct"
2385 [(set (match_operand:VP 0 "register_operand" "=v")
2386 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2387 UNSPEC_VUNPACK_HI_SIGN_DIRECT))]
2389 "vupkhs<VU_char> %0,%1"
2390 [(set_attr "type" "vecperm")])
2392 (define_insn "altivec_vupkls<VU_char>"
2393 [(set (match_operand:VP 0 "register_operand" "=v")
2394 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2395 UNSPEC_VUNPACK_LO_SIGN))]
2398 if (VECTOR_ELT_ORDER_BIG)
2399 return "vupkls<VU_char> %0,%1";
2401 return "vupkhs<VU_char> %0,%1";
2403 [(set_attr "type" "vecperm")])
2405 (define_insn "*altivec_vupkls<VU_char>_direct"
2406 [(set (match_operand:VP 0 "register_operand" "=v")
2407 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2408 UNSPEC_VUNPACK_LO_SIGN_DIRECT))]
2410 "vupkls<VU_char> %0,%1"
2411 [(set_attr "type" "vecperm")])
2413 (define_insn "altivec_vupkhpx"
2414 [(set (match_operand:V4SI 0 "register_operand" "=v")
2415 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
2419 if (VECTOR_ELT_ORDER_BIG)
2420 return "vupkhpx %0,%1";
2422 return "vupklpx %0,%1";
2424 [(set_attr "type" "vecperm")])
2426 (define_insn "altivec_vupklpx"
2427 [(set (match_operand:V4SI 0 "register_operand" "=v")
2428 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
2432 if (VECTOR_ELT_ORDER_BIG)
2433 return "vupklpx %0,%1";
2435 return "vupkhpx %0,%1";
2437 [(set_attr "type" "vecperm")])
2439 ;; Compare vectors producing a vector result and a predicate, setting CR6 to
2440 ;; indicate a combined status
2441 (define_insn "*altivec_vcmpequ<VI_char>_p"
2442 [(set (reg:CC CR6_REGNO)
2443 (unspec:CC [(eq:CC (match_operand:VI2 1 "register_operand" "v")
2444 (match_operand:VI2 2 "register_operand" "v"))]
2446 (set (match_operand:VI2 0 "register_operand" "=v")
2447 (eq:VI2 (match_dup 1)
2450 "vcmpequ<VI_char>. %0,%1,%2"
2451 [(set_attr "type" "veccmpfx")])
2453 (define_insn "*altivec_vcmpgts<VI_char>_p"
2454 [(set (reg:CC CR6_REGNO)
2455 (unspec:CC [(gt:CC (match_operand:VI2 1 "register_operand" "v")
2456 (match_operand:VI2 2 "register_operand" "v"))]
2458 (set (match_operand:VI2 0 "register_operand" "=v")
2459 (gt:VI2 (match_dup 1)
2462 "vcmpgts<VI_char>. %0,%1,%2"
2463 [(set_attr "type" "veccmpfx")])
2465 (define_insn "*altivec_vcmpgtu<VI_char>_p"
2466 [(set (reg:CC CR6_REGNO)
2467 (unspec:CC [(gtu:CC (match_operand:VI2 1 "register_operand" "v")
2468 (match_operand:VI2 2 "register_operand" "v"))]
2470 (set (match_operand:VI2 0 "register_operand" "=v")
2471 (gtu:VI2 (match_dup 1)
2474 "vcmpgtu<VI_char>. %0,%1,%2"
2475 [(set_attr "type" "veccmpfx")])
2477 (define_insn "*altivec_vcmpeqfp_p"
2478 [(set (reg:CC CR6_REGNO)
2479 (unspec:CC [(eq:CC (match_operand:V4SF 1 "register_operand" "v")
2480 (match_operand:V4SF 2 "register_operand" "v"))]
2482 (set (match_operand:V4SF 0 "register_operand" "=v")
2483 (eq:V4SF (match_dup 1)
2485 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2486 "vcmpeqfp. %0,%1,%2"
2487 [(set_attr "type" "veccmp")])
2489 (define_insn "*altivec_vcmpgtfp_p"
2490 [(set (reg:CC CR6_REGNO)
2491 (unspec:CC [(gt:CC (match_operand:V4SF 1 "register_operand" "v")
2492 (match_operand:V4SF 2 "register_operand" "v"))]
2494 (set (match_operand:V4SF 0 "register_operand" "=v")
2495 (gt:V4SF (match_dup 1)
2497 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2498 "vcmpgtfp. %0,%1,%2"
2499 [(set_attr "type" "veccmp")])
2501 (define_insn "*altivec_vcmpgefp_p"
2502 [(set (reg:CC CR6_REGNO)
2503 (unspec:CC [(ge:CC (match_operand:V4SF 1 "register_operand" "v")
2504 (match_operand:V4SF 2 "register_operand" "v"))]
2506 (set (match_operand:V4SF 0 "register_operand" "=v")
2507 (ge:V4SF (match_dup 1)
2509 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2510 "vcmpgefp. %0,%1,%2"
2511 [(set_attr "type" "veccmp")])
2513 (define_insn "altivec_vcmpbfp_p"
2514 [(set (reg:CC CR6_REGNO)
2515 (unspec:CC [(match_operand:V4SF 1 "register_operand" "v")
2516 (match_operand:V4SF 2 "register_operand" "v")]
2518 (set (match_operand:V4SF 0 "register_operand" "=v")
2519 (unspec:V4SF [(match_dup 1)
2522 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)"
2524 [(set_attr "type" "veccmp")])
2526 (define_insn "altivec_mtvscr"
2527 [(set (reg:SI VSCR_REGNO)
2529 [(match_operand:V4SI 0 "register_operand" "v")] UNSPECV_MTVSCR))]
2532 [(set_attr "type" "vecsimple")])
2534 (define_insn "altivec_mfvscr"
2535 [(set (match_operand:V8HI 0 "register_operand" "=v")
2536 (unspec_volatile:V8HI [(reg:SI VSCR_REGNO)] UNSPECV_MFVSCR))]
2539 [(set_attr "type" "vecsimple")])
2541 (define_insn "altivec_dssall"
2542 [(unspec_volatile [(const_int 0)] UNSPECV_DSSALL)]
2545 [(set_attr "type" "vecsimple")])
2547 (define_insn "altivec_dss"
2548 [(unspec_volatile [(match_operand:QI 0 "immediate_operand" "i")]
2552 [(set_attr "type" "vecsimple")])
2554 (define_insn "altivec_dst"
2555 [(unspec [(match_operand 0 "register_operand" "b")
2556 (match_operand:SI 1 "register_operand" "r")
2557 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DST)]
2558 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2560 [(set_attr "type" "vecsimple")])
2562 (define_insn "altivec_dstt"
2563 [(unspec [(match_operand 0 "register_operand" "b")
2564 (match_operand:SI 1 "register_operand" "r")
2565 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTT)]
2566 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2568 [(set_attr "type" "vecsimple")])
2570 (define_insn "altivec_dstst"
2571 [(unspec [(match_operand 0 "register_operand" "b")
2572 (match_operand:SI 1 "register_operand" "r")
2573 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTST)]
2574 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2576 [(set_attr "type" "vecsimple")])
2578 (define_insn "altivec_dststt"
2579 [(unspec [(match_operand 0 "register_operand" "b")
2580 (match_operand:SI 1 "register_operand" "r")
2581 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTSTT)]
2582 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2584 [(set_attr "type" "vecsimple")])
2586 (define_expand "altivec_lvsl"
2587 [(use (match_operand:V16QI 0 "register_operand"))
2588 (use (match_operand:V16QI 1 "memory_operand"))]
2591 if (VECTOR_ELT_ORDER_BIG)
2592 emit_insn (gen_altivec_lvsl_direct (operands[0], operands[1]));
2595 rtx mask, constv, vperm;
2596 mask = gen_reg_rtx (V16QImode);
2597 emit_insn (gen_altivec_lvsl_direct (mask, operands[1]));
2598 constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx);
2599 constv = force_reg (V16QImode, constv);
2600 vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv),
2602 emit_insn (gen_rtx_SET (operands[0], vperm));
2607 (define_insn "altivec_lvsl_reg"
2608 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
2610 [(match_operand:DI 1 "gpc_reg_operand" "b")]
2614 [(set_attr "type" "vecload")])
2616 (define_insn "altivec_lvsl_direct"
2617 [(set (match_operand:V16QI 0 "register_operand" "=v")
2618 (unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "Z")]
2622 [(set_attr "type" "vecload")])
2624 (define_expand "altivec_lvsr"
2625 [(use (match_operand:V16QI 0 "altivec_register_operand"))
2626 (use (match_operand:V16QI 1 "memory_operand"))]
2629 if (VECTOR_ELT_ORDER_BIG)
2630 emit_insn (gen_altivec_lvsr_direct (operands[0], operands[1]));
2633 rtx mask, constv, vperm;
2634 mask = gen_reg_rtx (V16QImode);
2635 emit_insn (gen_altivec_lvsr_direct (mask, operands[1]));
2636 constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx);
2637 constv = force_reg (V16QImode, constv);
2638 vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv),
2640 emit_insn (gen_rtx_SET (operands[0], vperm));
2645 (define_insn "altivec_lvsr_reg"
2646 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
2648 [(match_operand:DI 1 "gpc_reg_operand" "b")]
2652 [(set_attr "type" "vecload")])
2654 (define_insn "altivec_lvsr_direct"
2655 [(set (match_operand:V16QI 0 "register_operand" "=v")
2656 (unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "Z")]
2660 [(set_attr "type" "vecload")])
2662 (define_expand "build_vector_mask_for_load"
2663 [(set (match_operand:V16QI 0 "register_operand")
2664 (unspec:V16QI [(match_operand 1 "memory_operand")] UNSPEC_LVSR))]
2670 gcc_assert (GET_CODE (operands[1]) == MEM);
2672 addr = XEXP (operands[1], 0);
2673 temp = gen_reg_rtx (GET_MODE (addr));
2674 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (GET_MODE (addr), addr)));
2675 emit_insn (gen_altivec_lvsr (operands[0],
2676 replace_equiv_address (operands[1], temp)));
2680 ;; Parallel some of the LVE* and STV*'s with unspecs because some have
2681 ;; identical rtl but different instructions-- and gcc gets confused.
2683 (define_expand "altivec_lve<VI_char>x"
2685 [(set (match_operand:VI 0 "register_operand" "=v")
2686 (match_operand:VI 1 "memory_operand" "Z"))
2687 (unspec [(const_int 0)] UNSPEC_LVE)])]
2690 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2692 altivec_expand_lvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_LVE);
2697 (define_insn "*altivec_lve<VI_char>x_internal"
2699 [(set (match_operand:VI 0 "register_operand" "=v")
2700 (match_operand:VI 1 "memory_operand" "Z"))
2701 (unspec [(const_int 0)] UNSPEC_LVE)])]
2703 "lve<VI_char>x %0,%y1"
2704 [(set_attr "type" "vecload")])
2706 (define_insn "*altivec_lvesfx"
2708 [(set (match_operand:V4SF 0 "register_operand" "=v")
2709 (match_operand:V4SF 1 "memory_operand" "Z"))
2710 (unspec [(const_int 0)] UNSPEC_LVE)])]
2713 [(set_attr "type" "vecload")])
2715 (define_expand "altivec_lvxl_<mode>"
2717 [(set (match_operand:VM2 0 "register_operand" "=v")
2718 (match_operand:VM2 1 "memory_operand" "Z"))
2719 (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
2722 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2724 altivec_expand_lvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_SET_VSCR);
2729 (define_insn "*altivec_lvxl_<mode>_internal"
2731 [(set (match_operand:VM2 0 "register_operand" "=v")
2732 (match_operand:VM2 1 "memory_operand" "Z"))
2733 (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
2736 [(set_attr "type" "vecload")])
2738 ; This version of lvx is used only in cases where we need to force an lvx
2739 ; over any other load, and we don't care about losing CSE opportunities.
2740 ; Its primary use is for prologue register saves.
2741 (define_insn "altivec_lvx_<mode>_internal"
2743 [(set (match_operand:VM2 0 "register_operand" "=v")
2744 (match_operand:VM2 1 "memory_operand" "Z"))
2745 (unspec [(const_int 0)] UNSPEC_LVX)])]
2748 [(set_attr "type" "vecload")])
2750 ; The following patterns embody what lvx should usually look like.
2751 (define_expand "altivec_lvx_<VM2:mode>"
2752 [(set (match_operand:VM2 0 "register_operand")
2753 (match_operand:VM2 1 "altivec_indexed_or_indirect_operand"))]
2756 rtx addr = XEXP (operand1, 0);
2757 if (rs6000_sum_of_two_registers_p (addr))
2759 rtx op1 = XEXP (addr, 0);
2760 rtx op2 = XEXP (addr, 1);
2762 emit_insn (gen_altivec_lvx_<VM2:mode>_2op_di (operand0, op1, op2));
2764 emit_insn (gen_altivec_lvx_<VM2:mode>_2op_si (operand0, op1, op2));
2769 emit_insn (gen_altivec_lvx_<VM2:mode>_1op_di (operand0, addr));
2771 emit_insn (gen_altivec_lvx_<VM2:mode>_1op_si (operand0, addr));
2776 ; The next two patterns embody what lvx should usually look like.
2777 (define_insn "altivec_lvx_<VM2:mode>_2op_<P:mptrsize>"
2778 [(set (match_operand:VM2 0 "register_operand" "=v")
2779 (mem:VM2 (and:P (plus:P (match_operand:P 1 "register_operand" "b")
2780 (match_operand:P 2 "register_operand" "r"))
2784 [(set_attr "type" "vecload")])
2786 (define_insn "altivec_lvx_<VM2:mode>_1op_<P:mptrsize>"
2787 [(set (match_operand:VM2 0 "register_operand" "=v")
2788 (mem:VM2 (and:P (match_operand:P 1 "register_operand" "r")
2792 [(set_attr "type" "vecload")])
2794 ; This version of stvx is used only in cases where we need to force an stvx
2795 ; over any other store, and we don't care about losing CSE opportunities.
2796 ; Its primary use is for epilogue register restores.
2797 (define_insn "altivec_stvx_<mode>_internal"
2799 [(set (match_operand:VM2 0 "memory_operand" "=Z")
2800 (match_operand:VM2 1 "register_operand" "v"))
2801 (unspec [(const_int 0)] UNSPEC_STVX)])]
2804 [(set_attr "type" "vecstore")])
2806 ; The following patterns embody what stvx should usually look like.
2807 (define_expand "altivec_stvx_<VM2:mode>"
2808 [(set (match_operand:VM2 1 "altivec_indexed_or_indirect_operand")
2809 (match_operand:VM2 0 "register_operand"))]
2812 rtx addr = XEXP (operand1, 0);
2813 if (rs6000_sum_of_two_registers_p (addr))
2815 rtx op1 = XEXP (addr, 0);
2816 rtx op2 = XEXP (addr, 1);
2818 emit_insn (gen_altivec_stvx_<VM2:mode>_2op_di (operand0, op1, op2));
2820 emit_insn (gen_altivec_stvx_<VM2:mode>_2op_si (operand0, op1, op2));
2825 emit_insn (gen_altivec_stvx_<VM2:mode>_1op_di (operand0, addr));
2827 emit_insn (gen_altivec_stvx_<VM2:mode>_1op_si (operand0, addr));
2832 ; The next two patterns embody what stvx should usually look like.
2833 (define_insn "altivec_stvx_<VM2:mode>_2op_<P:mptrsize>"
2834 [(set (mem:VM2 (and:P (plus:P (match_operand:P 1 "register_operand" "b")
2835 (match_operand:P 2 "register_operand" "r"))
2837 (match_operand:VM2 0 "register_operand" "v"))]
2840 [(set_attr "type" "vecstore")])
2842 (define_insn "altivec_stvx_<VM2:mode>_1op_<P:mptrsize>"
2843 [(set (mem:VM2 (and:P (match_operand:P 1 "register_operand" "r")
2845 (match_operand:VM2 0 "register_operand" "v"))]
2848 [(set_attr "type" "vecstore")])
2850 (define_expand "altivec_stvxl_<mode>"
2852 [(set (match_operand:VM2 0 "memory_operand" "=Z")
2853 (match_operand:VM2 1 "register_operand" "v"))
2854 (unspec [(const_int 0)] UNSPEC_STVXL)])]
2857 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2859 altivec_expand_stvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_STVXL);
2864 (define_insn "*altivec_stvxl_<mode>_internal"
2866 [(set (match_operand:VM2 0 "memory_operand" "=Z")
2867 (match_operand:VM2 1 "register_operand" "v"))
2868 (unspec [(const_int 0)] UNSPEC_STVXL)])]
2871 [(set_attr "type" "vecstore")])
2873 (define_expand "altivec_stve<VI_char>x"
2874 [(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z")
2875 (unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))]
2878 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2880 altivec_expand_stvex_be (operands[0], operands[1], <MODE>mode, UNSPEC_STVE);
2885 (define_insn "*altivec_stve<VI_char>x_internal"
2886 [(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z")
2887 (unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))]
2889 "stve<VI_char>x %1,%y0"
2890 [(set_attr "type" "vecstore")])
2892 (define_insn "*altivec_stvesfx"
2893 [(set (match_operand:SF 0 "memory_operand" "=Z")
2894 (unspec:SF [(match_operand:V4SF 1 "register_operand" "v")] UNSPEC_STVE))]
2897 [(set_attr "type" "vecstore")])
2900 ;; signed int/float to double convert words 0 and 2
2901 (define_expand "doublee<mode>2"
2902 [(set (match_operand:V2DF 0 "register_operand" "=v")
2903 (match_operand:VSX_W 1 "register_operand" "v"))]
2906 machine_mode op_mode = GET_MODE (operands[1]);
2908 if (VECTOR_ELT_ORDER_BIG)
2910 /* Big endian word numbering for words in operand is 0 1 2 3.
2911 Input words 0 and 2 are where they need to be. */
2912 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], operands[1]));
2916 /* Little endian word numbering for operand is 3 2 1 0.
2917 take (operand[1] operand[1]) and shift left one word
2918 3 2 1 0 3 2 1 0 => 2 1 0 3
2919 Input words 2 and 0 are now where they need to be for the
2922 rtx rtx_val = GEN_INT (1);
2924 rtx_tmp = gen_reg_rtx (op_mode);
2925 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
2926 operands[1], rtx_val));
2927 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
2931 [(set_attr "type" "veccomplex")])
2933 ;; Generate unsdoublee
2934 ;; unsigned int to double convert words 0 and 2
2935 (define_expand "unsdoubleev4si2"
2936 [(set (match_operand:V2DF 0 "register_operand" "=v")
2937 (match_operand:V4SI 1 "register_operand" "v"))]
2940 if (VECTOR_ELT_ORDER_BIG)
2942 /* Big endian word numbering for words in operand is 0 1 2 3.
2943 Input words 0 and 2 are where they need to be. */
2944 emit_insn (gen_vsx_xvcvuxwdp (operands[0], operands[1]));
2948 /* Little endian word numbering for operand is 3 2 1 0.
2949 take (operand[1] operand[1]) and shift left one word
2950 3 2 1 0 3 2 1 0 => 2 1 0 3
2951 Input words 2 and 0 are now where they need to be for the
2954 rtx rtx_val = GEN_INT (1);
2956 rtx_tmp = gen_reg_rtx (V4SImode);
2957 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
2958 operands[1], rtx_val));
2959 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
2963 [(set_attr "type" "veccomplex")])
2965 ;; Generate doubleov
2966 ;; signed int/float to double convert words 1 and 3
2967 (define_expand "doubleo<mode>2"
2968 [(set (match_operand:V2DF 0 "register_operand" "=v")
2969 (match_operand:VSX_W 1 "register_operand" "v"))]
2972 machine_mode op_mode = GET_MODE (operands[1]);
2974 if (VECTOR_ELT_ORDER_BIG)
2976 /* Big endian word numbering for words in operand is 0 1 2 3.
2977 take (operand[1] operand[1]) and shift left one word
2978 0 1 2 3 0 1 2 3 => 1 2 3 0
2979 Input words 1 and 3 are now where they need to be for the
2982 rtx rtx_val = GEN_INT (1);
2984 rtx_tmp = gen_reg_rtx (op_mode);
2985 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
2986 operands[1], rtx_val));
2987 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
2991 /* Little endian word numbering for operand is 3 2 1 0.
2992 Input words 3 and 1 are where they need to be. */
2993 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], operands[1]));
2997 [(set_attr "type" "veccomplex")])
2999 ;; Generate unsdoubleov
3000 ;; unsigned int to double convert words 1 and 3
3001 (define_expand "unsdoubleov4si2"
3002 [(set (match_operand:V2DF 0 "register_operand" "=v")
3003 (match_operand:V4SI 1 "register_operand" "v"))]
3006 if (VECTOR_ELT_ORDER_BIG)
3008 /* Big endian word numbering for words in operand is 0 1 2 3.
3009 take (operand[1] operand[1]) and shift left one word
3010 0 1 2 3 0 1 2 3 => 1 2 3 0
3011 Input words 1 and 3 are now where they need to be for the
3014 rtx rtx_val = GEN_INT (1);
3016 rtx_tmp = gen_reg_rtx (V4SImode);
3017 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3018 operands[1], rtx_val));
3019 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3023 /* Want to convert the words 1 and 3.
3024 Little endian word numbering for operand is 3 2 1 0.
3025 Input words 3 and 1 are where they need to be. */
3026 emit_insn (gen_vsx_xvcvuxwdp (operands[0], operands[1]));
3030 [(set_attr "type" "veccomplex")])
3032 ;; Generate doublehv
3033 ;; signed int/float to double convert words 0 and 1
3034 (define_expand "doubleh<mode>2"
3035 [(set (match_operand:V2DF 0 "register_operand" "=v")
3036 (match_operand:VSX_W 1 "register_operand" "v"))]
3042 machine_mode op_mode = GET_MODE (operands[1]);
3043 rtx_tmp = gen_reg_rtx (op_mode);
3045 if (VECTOR_ELT_ORDER_BIG)
3047 /* Big endian word numbering for words in operand is 0 1 2 3.
3048 Shift operand left one word, rtx_tmp word order is now 1 2 3 0.
3049 take (rts_tmp operand[1]) and shift left three words
3050 1 2 3 0 0 1 2 3 => 0 0 1 2
3051 Input words 0 and 1 are now where they need to be for the
3053 rtx_val = GEN_INT (1);
3054 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3055 operands[1], rtx_val));
3057 rtx_val = GEN_INT (3);
3058 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, rtx_tmp,
3059 operands[1], rtx_val));
3060 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3064 /* Little endian word numbering for operand is 3 2 1 0.
3065 Shift operand left three words, rtx_tmp word order is now 0 3 2 1.
3066 take (operand[1] rts_tmp) and shift left two words
3067 3 2 1 0 0 3 2 1 => 1 0 0 3
3068 Input words 0 and 1 are now where they need to be for the
3070 rtx_val = GEN_INT (3);
3071 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3072 operands[1], rtx_val));
3074 rtx_val = GEN_INT (2);
3075 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3077 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3081 [(set_attr "type" "veccomplex")])
3083 ;; Generate unsdoublehv
3084 ;; unsigned int to double convert words 0 and 1
3085 (define_expand "unsdoublehv4si2"
3086 [(set (match_operand:V2DF 0 "register_operand" "=v")
3087 (match_operand:V4SI 1 "register_operand" "v"))]
3090 rtx rtx_tmp = gen_reg_rtx (V4SImode);
3091 rtx rtx_val = GEN_INT (12);
3093 if (VECTOR_ELT_ORDER_BIG)
3095 /* Big endian word numbering for words in operand is 0 1 2 3.
3096 Shift operand left one word, rtx_tmp word order is now 1 2 3 0.
3097 take (rts_tmp operand[1]) and shift left three words
3098 1 2 3 0 0 1 2 3 => 0 0 1 2
3099 Input words 0 and 1 are now where they need to be for the
3101 rtx_val = GEN_INT (1);
3102 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3103 operands[1], rtx_val));
3105 rtx_val = GEN_INT (3);
3106 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, rtx_tmp,
3107 operands[1], rtx_val));
3108 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3112 /* Little endian word numbering for operand is 3 2 1 0.
3113 Shift operand left three words, rtx_tmp word order is now 0 3 2 1.
3114 take (operand[1] rts_tmp) and shift left two words
3115 3 2 1 0 0 3 2 1 => 1 0 0 3
3116 Input words 1 and 0 are now where they need to be for the
3118 rtx_val = GEN_INT (3);
3120 rtx_tmp = gen_reg_rtx (V4SImode);
3121 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3122 operands[1], rtx_val));
3124 rtx_val = GEN_INT (2);
3125 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3127 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3131 [(set_attr "type" "veccomplex")])
3133 ;; Generate doublelv
3134 ;; signed int/float to double convert words 2 and 3
3135 (define_expand "doublel<mode>2"
3136 [(set (match_operand:V2DF 0 "register_operand" "=v")
3137 (match_operand:VSX_W 1 "register_operand" "v"))]
3141 rtx rtx_val = GEN_INT (3);
3143 machine_mode op_mode = GET_MODE (operands[1]);
3144 rtx_tmp = gen_reg_rtx (op_mode);
3146 if (VECTOR_ELT_ORDER_BIG)
3148 /* Big endian word numbering for operand is 0 1 2 3.
3149 Shift operand left three words, rtx_tmp word order is now 3 0 1 2.
3150 take (operand[1] rtx_tmp) and shift left two words
3151 0 1 2 3 3 0 1 2 => 2 3 3 0
3152 now use convert instruction to convert word 2 and 3 in the
3154 rtx_val = GEN_INT (3);
3155 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3156 operands[1], rtx_val));
3158 rtx_val = GEN_INT (2);
3159 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3161 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3165 /* Little endian word numbering for operand is 3 2 1 0.
3166 Shift operand left one word, rtx_tmp word order is now 2 1 0 3.
3167 take (rtx_tmp operand[1]) and shift left three words
3168 2 1 0 3 3 2 1 0 => 3 3 2 1
3169 now use convert instruction to convert word 3 and 2 in the
3171 rtx_val = GEN_INT (1);
3172 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3173 operands[1], rtx_val));
3175 rtx_val = GEN_INT (3);
3176 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, rtx_tmp,
3177 operands[1], rtx_val));
3178 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3182 [(set_attr "type" "veccomplex")])
3184 ;; Generate unsdoublelv
3185 ;; unsigned int to double convert convert 2 and 3
3186 (define_expand "unsdoublelv4si2"
3187 [(set (match_operand:V2DF 0 "register_operand" "=v")
3188 (match_operand:V4SI 1 "register_operand" "v"))]
3191 rtx rtx_tmp = gen_reg_rtx (V4SImode);
3192 rtx rtx_val = GEN_INT (12);
3194 if (VECTOR_ELT_ORDER_BIG)
3196 /* Big endian word numbering for operand is 0 1 2 3.
3197 Shift operand left three words, rtx_tmp word order is now 3 0 1 2.
3198 take (operand[1] rtx_tmp) and shift left two words
3199 0 1 2 3 3 0 1 2 => 2 3 3 0
3200 now use convert instruction to convert word 2 and 3 in the
3202 rtx_val = GEN_INT (3);
3203 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3204 operands[1], rtx_val));
3206 rtx_val = GEN_INT (2);
3207 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3209 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3213 /* Little endian word numbering for operand is 3 2 1 0.
3214 Shift operand left one word, rtx_tmp word order is now 2 1 0 3.
3215 take (rtx_tmp operand[1]) and shift left three words
3216 2 1 0 3 3 2 1 0 => 3 3 2 1
3217 now use convert instruction to convert word 3 and 2 in the
3219 rtx_val = GEN_INT (1);
3220 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp,
3221 operands[1], operands[1], rtx_val));
3223 rtx_val = GEN_INT (3);
3224 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, rtx_tmp,
3225 operands[1], rtx_val));
3226 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3230 [(set_attr "type" "veccomplex")])
3232 ;; Generate two vector F32 converted to packed vector I16 vector
3233 (define_expand "convert_4f32_8i16"
3234 [(set (match_operand:V8HI 0 "register_operand" "=v")
3235 (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "v")
3236 (match_operand:V4SF 2 "register_operand" "v")]
3237 UNSPEC_CONVERT_4F32_8I16))]
3240 rtx rtx_tmp_hi = gen_reg_rtx (V4SImode);
3241 rtx rtx_tmp_lo = gen_reg_rtx (V4SImode);
3243 emit_insn (gen_altivec_vctuxs (rtx_tmp_hi, operands[1], const0_rtx));
3244 emit_insn (gen_altivec_vctuxs (rtx_tmp_lo, operands[2], const0_rtx));
3245 emit_insn (gen_altivec_vpkswss (operands[0], rtx_tmp_hi, rtx_tmp_lo));
3250 ;; xxlxor/vxor SCRATCH0,SCRATCH0,SCRATCH0
3251 ;; vsubu?m SCRATCH2,SCRATCH1,%1
3252 ;; vmaxs? %0,%1,SCRATCH2"
3253 (define_expand "abs<mode>2"
3254 [(set (match_dup 2) (match_dup 3))
3256 (minus:VI2 (match_dup 2)
3257 (match_operand:VI2 1 "register_operand" "v")))
3258 (set (match_operand:VI2 0 "register_operand" "=v")
3259 (smax:VI2 (match_dup 1) (match_dup 4)))]
3262 operands[2] = gen_reg_rtx (<MODE>mode);
3263 operands[3] = CONST0_RTX (<MODE>mode);
3264 operands[4] = gen_reg_rtx (<MODE>mode);
3268 ;; vspltisw SCRATCH1,0
3269 ;; vsubu?m SCRATCH2,SCRATCH1,%1
3270 ;; vmins? %0,%1,SCRATCH2"
3271 (define_expand "nabs<mode>2"
3272 [(set (match_dup 2) (match_dup 3))
3274 (minus:VI2 (match_dup 2)
3275 (match_operand:VI2 1 "register_operand" "v")))
3276 (set (match_operand:VI2 0 "register_operand" "=v")
3277 (smin:VI2 (match_dup 1) (match_dup 4)))]
3280 operands[2] = gen_reg_rtx (<MODE>mode);
3281 operands[3] = CONST0_RTX (<MODE>mode);
3282 operands[4] = gen_reg_rtx (<MODE>mode);
3286 ;; vspltisw SCRATCH1,-1
3287 ;; vslw SCRATCH2,SCRATCH1,SCRATCH1
3288 ;; vandc %0,%1,SCRATCH2
3289 (define_expand "altivec_absv4sf2"
3291 (vec_duplicate:V4SI (const_int -1)))
3293 (ashift:V4SI (match_dup 2) (match_dup 2)))
3294 (set (match_operand:V4SF 0 "register_operand" "=v")
3295 (and:V4SF (not:V4SF (subreg:V4SF (match_dup 3) 0))
3296 (match_operand:V4SF 1 "register_operand" "v")))]
3299 operands[2] = gen_reg_rtx (V4SImode);
3300 operands[3] = gen_reg_rtx (V4SImode);
3304 ;; vspltis? SCRATCH0,0
3305 ;; vsubs?s SCRATCH2,SCRATCH1,%1
3306 ;; vmaxs? %0,%1,SCRATCH2"
3307 (define_expand "altivec_abss_<mode>"
3308 [(set (match_dup 2) (vec_duplicate:VI (const_int 0)))
3309 (parallel [(set (match_dup 3)
3310 (unspec:VI [(match_dup 2)
3311 (match_operand:VI 1 "register_operand" "v")]
3313 (set (reg:SI VSCR_REGNO)
3314 (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))])
3315 (set (match_operand:VI 0 "register_operand" "=v")
3316 (smax:VI (match_dup 1) (match_dup 3)))]
3319 operands[2] = gen_reg_rtx (GET_MODE (operands[0]));
3320 operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
3323 (define_expand "reduc_plus_scal_<mode>"
3324 [(set (match_operand:<VI_scalar> 0 "register_operand" "=v")
3325 (unspec:VIshort [(match_operand:VIshort 1 "register_operand" "v")]
3326 UNSPEC_REDUC_PLUS))]
3329 rtx vzero = gen_reg_rtx (V4SImode);
3330 rtx vtmp1 = gen_reg_rtx (V4SImode);
3331 rtx vtmp2 = gen_reg_rtx (<MODE>mode);
3332 rtx dest = gen_lowpart (V4SImode, vtmp2);
3333 int elt = VECTOR_ELT_ORDER_BIG ? GET_MODE_NUNITS (<MODE>mode) - 1 : 0;
3335 emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
3336 emit_insn (gen_altivec_vsum4s<VI_char>s (vtmp1, operands[1], vzero));
3337 emit_insn (gen_altivec_vsumsws_direct (dest, vtmp1, vzero));
3338 rs6000_expand_vector_extract (operands[0], vtmp2, GEN_INT (elt));
3342 (define_insn "*p9_neg<mode>2"
3343 [(set (match_operand:VNEG 0 "altivec_register_operand" "=v")
3344 (neg:VNEG (match_operand:VNEG 1 "altivec_register_operand" "v")))]
3346 "vneg<VI_char> %0,%1"
3347 [(set_attr "type" "vecsimple")])
3349 (define_expand "neg<mode>2"
3350 [(set (match_operand:VI2 0 "register_operand")
3351 (neg:VI2 (match_operand:VI2 1 "register_operand")))]
3354 if (!TARGET_P9_VECTOR || (<MODE>mode != V4SImode && <MODE>mode != V2DImode))
3358 vzero = gen_reg_rtx (GET_MODE (operands[0]));
3359 emit_move_insn (vzero, CONST0_RTX (<MODE>mode));
3360 emit_insn (gen_sub<mode>3 (operands[0], vzero, operands[1]));
3365 (define_expand "udot_prod<mode>"
3366 [(set (match_operand:V4SI 0 "register_operand" "=v")
3367 (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
3368 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
3369 (match_operand:VIshort 2 "register_operand" "v")]
3373 emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], operands[2], operands[3]));
3377 (define_expand "sdot_prodv8hi"
3378 [(set (match_operand:V4SI 0 "register_operand" "=v")
3379 (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
3380 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3381 (match_operand:V8HI 2 "register_operand" "v")]
3385 emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], operands[2], operands[3]));
3389 (define_expand "widen_usum<mode>3"
3390 [(set (match_operand:V4SI 0 "register_operand" "=v")
3391 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3392 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")]
3396 rtx vones = gen_reg_rtx (GET_MODE (operands[1]));
3398 emit_insn (gen_altivec_vspltis<VI_char> (vones, const1_rtx));
3399 emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], vones, operands[2]));
3403 (define_expand "widen_ssumv16qi3"
3404 [(set (match_operand:V4SI 0 "register_operand" "=v")
3405 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3406 (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")]
3410 rtx vones = gen_reg_rtx (V16QImode);
3412 emit_insn (gen_altivec_vspltisb (vones, const1_rtx));
3413 emit_insn (gen_altivec_vmsummbm (operands[0], operands[1], vones, operands[2]));
3417 (define_expand "widen_ssumv8hi3"
3418 [(set (match_operand:V4SI 0 "register_operand" "=v")
3419 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3420 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3424 rtx vones = gen_reg_rtx (V8HImode);
3426 emit_insn (gen_altivec_vspltish (vones, const1_rtx));
3427 emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], vones, operands[2]));
3431 (define_expand "vec_unpacks_hi_<VP_small_lc>"
3432 [(set (match_operand:VP 0 "register_operand" "=v")
3433 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3434 UNSPEC_VUNPACK_HI_SIGN_DIRECT))]
3438 (define_expand "vec_unpacks_lo_<VP_small_lc>"
3439 [(set (match_operand:VP 0 "register_operand" "=v")
3440 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3441 UNSPEC_VUNPACK_LO_SIGN_DIRECT))]
3445 (define_insn "vperm_v8hiv4si"
3446 [(set (match_operand:V4SI 0 "register_operand" "=v,?wo")
3447 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v,wo")
3448 (match_operand:V4SI 2 "register_operand" "v,0")
3449 (match_operand:V16QI 3 "register_operand" "v,wo")]
3455 [(set_attr "type" "vecperm")
3456 (set_attr "length" "4")])
3458 (define_insn "vperm_v16qiv8hi"
3459 [(set (match_operand:V8HI 0 "register_operand" "=v,?wo")
3460 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v,wo")
3461 (match_operand:V8HI 2 "register_operand" "v,0")
3462 (match_operand:V16QI 3 "register_operand" "v,wo")]
3468 [(set_attr "type" "vecperm")
3469 (set_attr "length" "4")])
3472 (define_expand "vec_unpacku_hi_v16qi"
3473 [(set (match_operand:V8HI 0 "register_operand" "=v")
3474 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
3478 rtx vzero = gen_reg_rtx (V8HImode);
3479 rtx mask = gen_reg_rtx (V16QImode);
3480 rtvec v = rtvec_alloc (16);
3481 bool be = BYTES_BIG_ENDIAN;
3483 emit_insn (gen_altivec_vspltish (vzero, const0_rtx));
3485 RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, be ? 16 : 7);
3486 RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, be ? 0 : 16);
3487 RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, be ? 16 : 6);
3488 RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, be ? 1 : 16);
3489 RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, be ? 16 : 5);
3490 RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, be ? 2 : 16);
3491 RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, be ? 16 : 4);
3492 RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, be ? 3 : 16);
3493 RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, be ? 16 : 3);
3494 RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, be ? 4 : 16);
3495 RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ? 16 : 2);
3496 RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ? 5 : 16);
3497 RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 : 1);
3498 RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ? 6 : 16);
3499 RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ? 16 : 0);
3500 RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ? 7 : 16);
3502 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3503 emit_insn (gen_vperm_v16qiv8hi (operands[0], operands[1], vzero, mask));
3507 (define_expand "vec_unpacku_hi_v8hi"
3508 [(set (match_operand:V4SI 0 "register_operand" "=v")
3509 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3513 rtx vzero = gen_reg_rtx (V4SImode);
3514 rtx mask = gen_reg_rtx (V16QImode);
3515 rtvec v = rtvec_alloc (16);
3516 bool be = BYTES_BIG_ENDIAN;
3518 emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
3520 RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, be ? 16 : 7);
3521 RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, be ? 17 : 6);
3522 RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, be ? 0 : 17);
3523 RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, be ? 1 : 16);
3524 RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, be ? 16 : 5);
3525 RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, be ? 17 : 4);
3526 RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, be ? 2 : 17);
3527 RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, be ? 3 : 16);
3528 RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, be ? 16 : 3);
3529 RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, be ? 17 : 2);
3530 RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ? 4 : 17);
3531 RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ? 5 : 16);
3532 RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 : 1);
3533 RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ? 17 : 0);
3534 RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ? 6 : 17);
3535 RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ? 7 : 16);
3537 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3538 emit_insn (gen_vperm_v8hiv4si (operands[0], operands[1], vzero, mask));
3542 (define_expand "vec_unpacku_lo_v16qi"
3543 [(set (match_operand:V8HI 0 "register_operand" "=v")
3544 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
3548 rtx vzero = gen_reg_rtx (V8HImode);
3549 rtx mask = gen_reg_rtx (V16QImode);
3550 rtvec v = rtvec_alloc (16);
3551 bool be = BYTES_BIG_ENDIAN;
3553 emit_insn (gen_altivec_vspltish (vzero, const0_rtx));
3555 RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, be ? 16 : 15);
3556 RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, be ? 8 : 16);
3557 RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, be ? 16 : 14);
3558 RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, be ? 9 : 16);
3559 RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, be ? 16 : 13);
3560 RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, be ? 10 : 16);
3561 RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, be ? 16 : 12);
3562 RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, be ? 11 : 16);
3563 RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, be ? 16 : 11);
3564 RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, be ? 12 : 16);
3565 RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ? 16 : 10);
3566 RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ? 13 : 16);
3567 RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 : 9);
3568 RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ? 14 : 16);
3569 RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ? 16 : 8);
3570 RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ? 15 : 16);
3572 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3573 emit_insn (gen_vperm_v16qiv8hi (operands[0], operands[1], vzero, mask));
3577 (define_expand "vec_unpacku_lo_v8hi"
3578 [(set (match_operand:V4SI 0 "register_operand" "=v")
3579 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3583 rtx vzero = gen_reg_rtx (V4SImode);
3584 rtx mask = gen_reg_rtx (V16QImode);
3585 rtvec v = rtvec_alloc (16);
3586 bool be = BYTES_BIG_ENDIAN;
3588 emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
3590 RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, be ? 16 : 15);
3591 RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, be ? 17 : 14);
3592 RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, be ? 8 : 17);
3593 RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, be ? 9 : 16);
3594 RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, be ? 16 : 13);
3595 RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, be ? 17 : 12);
3596 RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, be ? 10 : 17);
3597 RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, be ? 11 : 16);
3598 RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, be ? 16 : 11);
3599 RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, be ? 17 : 10);
3600 RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ? 12 : 17);
3601 RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ? 13 : 16);
3602 RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 : 9);
3603 RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ? 17 : 8);
3604 RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ? 14 : 17);
3605 RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ? 15 : 16);
3607 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3608 emit_insn (gen_vperm_v8hiv4si (operands[0], operands[1], vzero, mask));
3612 (define_expand "vec_widen_umult_hi_v16qi"
3613 [(set (match_operand:V8HI 0 "register_operand" "=v")
3614 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3615 (match_operand:V16QI 2 "register_operand" "v")]
3619 rtx ve = gen_reg_rtx (V8HImode);
3620 rtx vo = gen_reg_rtx (V8HImode);
3622 if (BYTES_BIG_ENDIAN)
3624 emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
3625 emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
3626 emit_insn (gen_altivec_vmrghh_direct (operands[0], ve, vo));
3630 emit_insn (gen_altivec_vmuloub (ve, operands[1], operands[2]));
3631 emit_insn (gen_altivec_vmuleub (vo, operands[1], operands[2]));
3632 emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve));
3637 (define_expand "vec_widen_umult_lo_v16qi"
3638 [(set (match_operand:V8HI 0 "register_operand" "=v")
3639 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3640 (match_operand:V16QI 2 "register_operand" "v")]
3644 rtx ve = gen_reg_rtx (V8HImode);
3645 rtx vo = gen_reg_rtx (V8HImode);
3647 if (BYTES_BIG_ENDIAN)
3649 emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
3650 emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
3651 emit_insn (gen_altivec_vmrglh_direct (operands[0], ve, vo));
3655 emit_insn (gen_altivec_vmuloub (ve, operands[1], operands[2]));
3656 emit_insn (gen_altivec_vmuleub (vo, operands[1], operands[2]));
3657 emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve));
3662 (define_expand "vec_widen_smult_hi_v16qi"
3663 [(set (match_operand:V8HI 0 "register_operand" "=v")
3664 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3665 (match_operand:V16QI 2 "register_operand" "v")]
3669 rtx ve = gen_reg_rtx (V8HImode);
3670 rtx vo = gen_reg_rtx (V8HImode);
3672 if (BYTES_BIG_ENDIAN)
3674 emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
3675 emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
3676 emit_insn (gen_altivec_vmrghh_direct (operands[0], ve, vo));
3680 emit_insn (gen_altivec_vmulosb (ve, operands[1], operands[2]));
3681 emit_insn (gen_altivec_vmulesb (vo, operands[1], operands[2]));
3682 emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve));
3687 (define_expand "vec_widen_smult_lo_v16qi"
3688 [(set (match_operand:V8HI 0 "register_operand" "=v")
3689 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3690 (match_operand:V16QI 2 "register_operand" "v")]
3694 rtx ve = gen_reg_rtx (V8HImode);
3695 rtx vo = gen_reg_rtx (V8HImode);
3697 if (BYTES_BIG_ENDIAN)
3699 emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
3700 emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
3701 emit_insn (gen_altivec_vmrglh_direct (operands[0], ve, vo));
3705 emit_insn (gen_altivec_vmulosb (ve, operands[1], operands[2]));
3706 emit_insn (gen_altivec_vmulesb (vo, operands[1], operands[2]));
3707 emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve));
3712 (define_expand "vec_widen_umult_hi_v8hi"
3713 [(set (match_operand:V4SI 0 "register_operand" "=v")
3714 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3715 (match_operand:V8HI 2 "register_operand" "v")]
3719 rtx ve = gen_reg_rtx (V4SImode);
3720 rtx vo = gen_reg_rtx (V4SImode);
3722 if (BYTES_BIG_ENDIAN)
3724 emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
3725 emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
3726 emit_insn (gen_altivec_vmrghw_direct (operands[0], ve, vo));
3730 emit_insn (gen_altivec_vmulouh (ve, operands[1], operands[2]));
3731 emit_insn (gen_altivec_vmuleuh (vo, operands[1], operands[2]));
3732 emit_insn (gen_altivec_vmrghw_direct (operands[0], vo, ve));
3737 (define_expand "vec_widen_umult_lo_v8hi"
3738 [(set (match_operand:V4SI 0 "register_operand" "=v")
3739 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3740 (match_operand:V8HI 2 "register_operand" "v")]
3744 rtx ve = gen_reg_rtx (V4SImode);
3745 rtx vo = gen_reg_rtx (V4SImode);
3747 if (BYTES_BIG_ENDIAN)
3749 emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
3750 emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
3751 emit_insn (gen_altivec_vmrglw_direct (operands[0], ve, vo));
3755 emit_insn (gen_altivec_vmulouh (ve, operands[1], operands[2]));
3756 emit_insn (gen_altivec_vmuleuh (vo, operands[1], operands[2]));
3757 emit_insn (gen_altivec_vmrglw_direct (operands[0], vo, ve));
3762 (define_expand "vec_widen_smult_hi_v8hi"
3763 [(set (match_operand:V4SI 0 "register_operand" "=v")
3764 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3765 (match_operand:V8HI 2 "register_operand" "v")]
3769 rtx ve = gen_reg_rtx (V4SImode);
3770 rtx vo = gen_reg_rtx (V4SImode);
3772 if (BYTES_BIG_ENDIAN)
3774 emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
3775 emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
3776 emit_insn (gen_altivec_vmrghw_direct (operands[0], ve, vo));
3780 emit_insn (gen_altivec_vmulosh (ve, operands[1], operands[2]));
3781 emit_insn (gen_altivec_vmulesh (vo, operands[1], operands[2]));
3782 emit_insn (gen_altivec_vmrghw_direct (operands[0], vo, ve));
3787 (define_expand "vec_widen_smult_lo_v8hi"
3788 [(set (match_operand:V4SI 0 "register_operand" "=v")
3789 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3790 (match_operand:V8HI 2 "register_operand" "v")]
3794 rtx ve = gen_reg_rtx (V4SImode);
3795 rtx vo = gen_reg_rtx (V4SImode);
3797 if (BYTES_BIG_ENDIAN)
3799 emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
3800 emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
3801 emit_insn (gen_altivec_vmrglw_direct (operands[0], ve, vo));
3805 emit_insn (gen_altivec_vmulosh (ve, operands[1], operands[2]));
3806 emit_insn (gen_altivec_vmulesh (vo, operands[1], operands[2]));
3807 emit_insn (gen_altivec_vmrglw_direct (operands[0], vo, ve));
3812 (define_expand "vec_pack_trunc_<mode>"
3813 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
3814 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
3815 (match_operand:VP 2 "register_operand" "v")]
3816 UNSPEC_VPACK_UNS_UNS_MOD))]
3820 (define_expand "mulv16qi3"
3821 [(set (match_operand:V16QI 0 "register_operand" "=v")
3822 (mult:V16QI (match_operand:V16QI 1 "register_operand" "v")
3823 (match_operand:V16QI 2 "register_operand" "v")))]
3826 rtx even = gen_reg_rtx (V8HImode);
3827 rtx odd = gen_reg_rtx (V8HImode);
3828 rtx mask = gen_reg_rtx (V16QImode);
3829 rtvec v = rtvec_alloc (16);
3832 for (i = 0; i < 8; ++i) {
3833 RTVEC_ELT (v, 2 * i)
3834 = gen_rtx_CONST_INT (QImode, BYTES_BIG_ENDIAN ? 2 * i + 1 : 31 - 2 * i);
3835 RTVEC_ELT (v, 2 * i + 1)
3836 = gen_rtx_CONST_INT (QImode, BYTES_BIG_ENDIAN ? 2 * i + 17 : 15 - 2 * i);
3839 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3840 emit_insn (gen_altivec_vmulesb (even, operands[1], operands[2]));
3841 emit_insn (gen_altivec_vmulosb (odd, operands[1], operands[2]));
3842 emit_insn (gen_altivec_vperm_v8hiv16qi (operands[0], even, odd, mask));
3846 (define_expand "altivec_vpermxor"
3847 [(use (match_operand:V16QI 0 "register_operand"))
3848 (use (match_operand:V16QI 1 "register_operand"))
3849 (use (match_operand:V16QI 2 "register_operand"))
3850 (use (match_operand:V16QI 3 "register_operand"))]
3853 if (!BYTES_BIG_ENDIAN)
3855 /* vpermxor indexes the bytes using Big Endian numbering. If LE,
3856 change indexing in operand[3] to BE index. */
3857 rtx be_index = gen_reg_rtx (V16QImode);
3859 emit_insn (gen_one_cmplv16qi2 (be_index, operands[3]));
3860 emit_insn (gen_crypto_vpermxor_v16qi (operands[0], operands[1],
3861 operands[2], be_index));
3864 emit_insn (gen_crypto_vpermxor_v16qi (operands[0], operands[1],
3865 operands[2], operands[3]));
3869 (define_expand "altivec_negv4sf2"
3870 [(use (match_operand:V4SF 0 "register_operand"))
3871 (use (match_operand:V4SF 1 "register_operand"))]
3876 /* Generate [-0.0, -0.0, -0.0, -0.0]. */
3877 neg0 = gen_reg_rtx (V4SImode);
3878 emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
3879 emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
3882 emit_insn (gen_xorv4sf3 (operands[0],
3883 gen_lowpart (V4SFmode, neg0), operands[1]));
3888 ;; Vector reverse elements
3889 (define_expand "altivec_vreve<mode>2"
3890 [(set (match_operand:VEC_A 0 "register_operand" "=v")
3891 (unspec:VEC_A [(match_operand:VEC_A 1 "register_operand" "v")]
3895 int i, j, size, num_elements;
3896 rtvec v = rtvec_alloc (16);
3897 rtx mask = gen_reg_rtx (V16QImode);
3899 size = GET_MODE_UNIT_SIZE (<MODE>mode);
3900 num_elements = GET_MODE_NUNITS (<MODE>mode);
3902 for (j = 0; j < num_elements; j++)
3903 for (i = 0; i < size; i++)
3904 RTVEC_ELT (v, i + j * size)
3905 = GEN_INT (i + (num_elements - 1 - j) * size);
3907 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3908 emit_insn (gen_altivec_vperm_<mode> (operands[0], operands[1],
3909 operands[1], mask));
3913 ;; Vector SIMD PEM v2.06c defines LVLX, LVLXL, LVRX, LVRXL,
3914 ;; STVLX, STVLXL, STVVRX, STVRXL are available only on Cell.
3915 (define_insn "altivec_lvlx"
3916 [(set (match_operand:V16QI 0 "register_operand" "=v")
3917 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3919 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3921 [(set_attr "type" "vecload")])
3923 (define_insn "altivec_lvlxl"
3924 [(set (match_operand:V16QI 0 "register_operand" "=v")
3925 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3927 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3929 [(set_attr "type" "vecload")])
3931 (define_insn "altivec_lvrx"
3932 [(set (match_operand:V16QI 0 "register_operand" "=v")
3933 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3935 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3937 [(set_attr "type" "vecload")])
3939 (define_insn "altivec_lvrxl"
3940 [(set (match_operand:V16QI 0 "register_operand" "=v")
3941 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3943 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3945 [(set_attr "type" "vecload")])
3947 (define_insn "altivec_stvlx"
3949 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3950 (match_operand:V16QI 1 "register_operand" "v"))
3951 (unspec [(const_int 0)] UNSPEC_STVLX)])]
3952 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3954 [(set_attr "type" "vecstore")])
3956 (define_insn "altivec_stvlxl"
3958 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3959 (match_operand:V16QI 1 "register_operand" "v"))
3960 (unspec [(const_int 0)] UNSPEC_STVLXL)])]
3961 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3963 [(set_attr "type" "vecstore")])
3965 (define_insn "altivec_stvrx"
3967 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3968 (match_operand:V16QI 1 "register_operand" "v"))
3969 (unspec [(const_int 0)] UNSPEC_STVRX)])]
3970 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3972 [(set_attr "type" "vecstore")])
3974 (define_insn "altivec_stvrxl"
3976 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3977 (match_operand:V16QI 1 "register_operand" "v"))
3978 (unspec [(const_int 0)] UNSPEC_STVRXL)])]
3979 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3981 [(set_attr "type" "vecstore")])
3983 (define_expand "vec_unpacks_float_hi_v8hi"
3984 [(set (match_operand:V4SF 0 "register_operand")
3985 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
3986 UNSPEC_VUPKHS_V4SF))]
3989 rtx tmp = gen_reg_rtx (V4SImode);
3991 emit_insn (gen_vec_unpacks_hi_v8hi (tmp, operands[1]));
3992 emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
3996 (define_expand "vec_unpacks_float_lo_v8hi"
3997 [(set (match_operand:V4SF 0 "register_operand")
3998 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
3999 UNSPEC_VUPKLS_V4SF))]
4002 rtx tmp = gen_reg_rtx (V4SImode);
4004 emit_insn (gen_vec_unpacks_lo_v8hi (tmp, operands[1]));
4005 emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
4009 (define_expand "vec_unpacku_float_hi_v8hi"
4010 [(set (match_operand:V4SF 0 "register_operand")
4011 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4012 UNSPEC_VUPKHU_V4SF))]
4015 rtx tmp = gen_reg_rtx (V4SImode);
4017 emit_insn (gen_vec_unpacku_hi_v8hi (tmp, operands[1]));
4018 emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
4022 (define_expand "vec_unpacku_float_lo_v8hi"
4023 [(set (match_operand:V4SF 0 "register_operand")
4024 (unspec:V4SF [(match_operand:V8HI 1 "register_operand")]
4025 UNSPEC_VUPKLU_V4SF))]
4028 rtx tmp = gen_reg_rtx (V4SImode);
4030 emit_insn (gen_vec_unpacku_lo_v8hi (tmp, operands[1]));
4031 emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
4036 ;; Power8/power9 vector instructions encoded as Altivec instructions
4038 ;; Vector count leading zeros
4039 (define_insn "*p8v_clz<mode>2"
4040 [(set (match_operand:VI2 0 "register_operand" "=v")
4041 (clz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4044 [(set_attr "length" "4")
4045 (set_attr "type" "vecsimple")])
4047 ;; Vector absolute difference unsigned
4048 (define_expand "vadu<mode>3"
4049 [(set (match_operand:VI 0 "register_operand")
4050 (unspec:VI [(match_operand:VI 1 "register_operand")
4051 (match_operand:VI 2 "register_operand")]
4055 ;; Vector absolute difference unsigned
4056 (define_insn "p9_vadu<mode>3"
4057 [(set (match_operand:VI 0 "register_operand" "=v")
4058 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
4059 (match_operand:VI 2 "register_operand" "v")]
4062 "vabsdu<wd> %0,%1,%2"
4063 [(set_attr "type" "vecsimple")])
4065 ;; Vector count trailing zeros
4066 (define_insn "*p9v_ctz<mode>2"
4067 [(set (match_operand:VI2 0 "register_operand" "=v")
4068 (ctz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4071 [(set_attr "length" "4")
4072 (set_attr "type" "vecsimple")])
4074 ;; Vector population count
4075 (define_insn "*p8v_popcount<mode>2"
4076 [(set (match_operand:VI2 0 "register_operand" "=v")
4077 (popcount:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4080 [(set_attr "length" "4")
4081 (set_attr "type" "vecsimple")])
4084 (define_insn "*p9v_parity<mode>2"
4085 [(set (match_operand:VParity 0 "register_operand" "=v")
4086 (parity:VParity (match_operand:VParity 1 "register_operand" "v")))]
4089 [(set_attr "length" "4")
4090 (set_attr "type" "vecsimple")])
4092 ;; Vector Gather Bits by Bytes by Doubleword
4093 (define_insn "p8v_vgbbd"
4094 [(set (match_operand:V16QI 0 "register_operand" "=v")
4095 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")]
4099 [(set_attr "length" "4")
4100 (set_attr "type" "vecsimple")])
4103 ;; 128-bit binary integer arithmetic
4104 ;; We have a special container type (V1TImode) to allow operations using the
4105 ;; ISA 2.07 128-bit binary support to target the VMX/altivec registers without
4106 ;; having to worry about the register allocator deciding GPRs are better.
4108 (define_insn "altivec_vadduqm"
4109 [(set (match_operand:V1TI 0 "register_operand" "=v")
4110 (plus:V1TI (match_operand:V1TI 1 "register_operand" "v")
4111 (match_operand:V1TI 2 "register_operand" "v")))]
4114 [(set_attr "length" "4")
4115 (set_attr "type" "vecsimple")])
4117 (define_insn "altivec_vaddcuq"
4118 [(set (match_operand:V1TI 0 "register_operand" "=v")
4119 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4120 (match_operand:V1TI 2 "register_operand" "v")]
4124 [(set_attr "length" "4")
4125 (set_attr "type" "vecsimple")])
4127 (define_insn "altivec_vsubuqm"
4128 [(set (match_operand:V1TI 0 "register_operand" "=v")
4129 (minus:V1TI (match_operand:V1TI 1 "register_operand" "v")
4130 (match_operand:V1TI 2 "register_operand" "v")))]
4133 [(set_attr "length" "4")
4134 (set_attr "type" "vecsimple")])
4136 (define_insn "altivec_vsubcuq"
4137 [(set (match_operand:V1TI 0 "register_operand" "=v")
4138 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4139 (match_operand:V1TI 2 "register_operand" "v")]
4143 [(set_attr "length" "4")
4144 (set_attr "type" "vecsimple")])
4146 (define_insn "altivec_vaddeuqm"
4147 [(set (match_operand:V1TI 0 "register_operand" "=v")
4148 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4149 (match_operand:V1TI 2 "register_operand" "v")
4150 (match_operand:V1TI 3 "register_operand" "v")]
4153 "vaddeuqm %0,%1,%2,%3"
4154 [(set_attr "length" "4")
4155 (set_attr "type" "vecsimple")])
4157 (define_insn "altivec_vaddecuq"
4158 [(set (match_operand:V1TI 0 "register_operand" "=v")
4159 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4160 (match_operand:V1TI 2 "register_operand" "v")
4161 (match_operand:V1TI 3 "register_operand" "v")]
4164 "vaddecuq %0,%1,%2,%3"
4165 [(set_attr "length" "4")
4166 (set_attr "type" "vecsimple")])
4168 (define_insn "altivec_vsubeuqm"
4169 [(set (match_operand:V1TI 0 "register_operand" "=v")
4170 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4171 (match_operand:V1TI 2 "register_operand" "v")
4172 (match_operand:V1TI 3 "register_operand" "v")]
4175 "vsubeuqm %0,%1,%2,%3"
4176 [(set_attr "length" "4")
4177 (set_attr "type" "vecsimple")])
4179 (define_insn "altivec_vsubecuq"
4180 [(set (match_operand:V1TI 0 "register_operand" "=v")
4181 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4182 (match_operand:V1TI 2 "register_operand" "v")
4183 (match_operand:V1TI 3 "register_operand" "v")]
4186 "vsubecuq %0,%1,%2,%3"
4187 [(set_attr "length" "4")
4188 (set_attr "type" "vecsimple")])
4190 ;; We use V2DI as the output type to simplify converting the permute
4191 ;; bits into an integer
4192 (define_insn "altivec_vbpermq"
4193 [(set (match_operand:V2DI 0 "register_operand" "=v")
4194 (unspec:V2DI [(match_operand:V16QI 1 "register_operand" "v")
4195 (match_operand:V16QI 2 "register_operand" "v")]
4199 [(set_attr "type" "vecperm")])
4201 ; One of the vector API interfaces requires returning vector unsigned char.
4202 (define_insn "altivec_vbpermq2"
4203 [(set (match_operand:V16QI 0 "register_operand" "=v")
4204 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
4205 (match_operand:V16QI 2 "register_operand" "v")]
4209 [(set_attr "type" "vecperm")])
4211 (define_insn "altivec_vbpermd"
4212 [(set (match_operand:V2DI 0 "register_operand" "=v")
4213 (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "v")
4214 (match_operand:V16QI 2 "register_operand" "v")]
4218 [(set_attr "type" "vecsimple")])
4220 ;; Support for SAD (sum of absolute differences).
4222 ;; Due to saturating semantics, we can't combine the sum-across
4223 ;; with the vector accumulate in vsum4ubs. A vadduwm is needed.
4224 (define_expand "usadv16qi"
4225 [(use (match_operand:V4SI 0 "register_operand"))
4226 (use (match_operand:V16QI 1 "register_operand"))
4227 (use (match_operand:V16QI 2 "register_operand"))
4228 (use (match_operand:V4SI 3 "register_operand"))]
4231 rtx absd = gen_reg_rtx (V16QImode);
4232 rtx zero = gen_reg_rtx (V4SImode);
4233 rtx psum = gen_reg_rtx (V4SImode);
4235 emit_insn (gen_p9_vaduv16qi3 (absd, operands[1], operands[2]));
4236 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
4237 emit_insn (gen_altivec_vsum4ubs (psum, absd, zero));
4238 emit_insn (gen_addv4si3 (operands[0], psum, operands[3]));
4242 ;; Since vsum4shs is saturating and further performs signed
4243 ;; arithmetic, we can't combine the sum-across with the vector
4244 ;; accumulate in vsum4shs. A vadduwm is needed.
4245 (define_expand "usadv8hi"
4246 [(use (match_operand:V4SI 0 "register_operand"))
4247 (use (match_operand:V8HI 1 "register_operand"))
4248 (use (match_operand:V8HI 2 "register_operand"))
4249 (use (match_operand:V4SI 3 "register_operand"))]
4252 rtx absd = gen_reg_rtx (V8HImode);
4253 rtx zero = gen_reg_rtx (V4SImode);
4254 rtx psum = gen_reg_rtx (V4SImode);
4256 emit_insn (gen_p9_vaduv8hi3 (absd, operands[1], operands[2]));
4257 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
4258 emit_insn (gen_altivec_vsum4shs (psum, absd, zero));
4259 emit_insn (gen_addv4si3 (operands[0], psum, operands[3]));
4263 ;; Decimal Integer operations
4264 (define_int_iterator UNSPEC_BCD_ADD_SUB [UNSPEC_BCDADD UNSPEC_BCDSUB])
4266 (define_int_attr bcd_add_sub [(UNSPEC_BCDADD "add")
4267 (UNSPEC_BCDSUB "sub")])
4269 (define_code_iterator BCD_TEST [eq lt gt unordered])
4271 (define_insn "bcd<bcd_add_sub>"
4272 [(set (match_operand:V1TI 0 "gpc_reg_operand" "=v")
4273 (unspec:V1TI [(match_operand:V1TI 1 "gpc_reg_operand" "v")
4274 (match_operand:V1TI 2 "gpc_reg_operand" "v")
4275 (match_operand:QI 3 "const_0_to_1_operand" "n")]
4276 UNSPEC_BCD_ADD_SUB))
4277 (clobber (reg:CCFP CR6_REGNO))]
4279 "bcd<bcd_add_sub>. %0,%1,%2,%3"
4280 [(set_attr "length" "4")
4281 (set_attr "type" "vecsimple")])
4283 ;; Use a floating point type (V2DFmode) for the compare to set CR6 so that we
4284 ;; can use the unordered test for BCD nans and add/subtracts that overflow. An
4285 ;; UNORDERED test on an integer type (like V1TImode) is not defined. The type
4286 ;; probably should be one that can go in the VMX (Altivec) registers, so we
4287 ;; can't use DDmode or DFmode.
4288 (define_insn "*bcd<bcd_add_sub>_test"
4289 [(set (reg:CCFP CR6_REGNO)
4291 (unspec:V2DF [(match_operand:V1TI 1 "register_operand" "v")
4292 (match_operand:V1TI 2 "register_operand" "v")
4293 (match_operand:QI 3 "const_0_to_1_operand" "i")]
4295 (match_operand:V2DF 4 "zero_constant" "j")))
4296 (clobber (match_scratch:V1TI 0 "=v"))]
4298 "bcd<bcd_add_sub>. %0,%1,%2,%3"
4299 [(set_attr "length" "4")
4300 (set_attr "type" "vecsimple")])
4302 (define_insn "*bcd<bcd_add_sub>_test2"
4303 [(set (match_operand:V1TI 0 "register_operand" "=v")
4304 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4305 (match_operand:V1TI 2 "register_operand" "v")
4306 (match_operand:QI 3 "const_0_to_1_operand" "i")]
4307 UNSPEC_BCD_ADD_SUB))
4308 (set (reg:CCFP CR6_REGNO)
4310 (unspec:V2DF [(match_dup 1)
4314 (match_operand:V2DF 4 "zero_constant" "j")))]
4316 "bcd<bcd_add_sub>. %0,%1,%2,%3"
4317 [(set_attr "length" "4")
4318 (set_attr "type" "vecsimple")])
4320 (define_insn "darn_32"
4321 [(set (match_operand:SI 0 "register_operand" "=r")
4322 (unspec:SI [(const_int 0)] UNSPEC_DARN_32))]
4325 [(set_attr "type" "integer")])
4327 (define_insn "darn_raw"
4328 [(set (match_operand:DI 0 "register_operand" "=r")
4329 (unspec:DI [(const_int 0)] UNSPEC_DARN_RAW))]
4330 "TARGET_P9_MISC && TARGET_64BIT"
4332 [(set_attr "type" "integer")])
4335 [(set (match_operand:DI 0 "register_operand" "=r")
4336 (unspec:DI [(const_int 0)] UNSPEC_DARN))]
4337 "TARGET_P9_MISC && TARGET_64BIT"
4339 [(set_attr "type" "integer")])
4341 ;; Test byte within range.
4343 ;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
4344 ;; represents a byte whose value is ignored in this context and
4345 ;; vv, the least significant byte, holds the byte value that is to
4346 ;; be tested for membership within the range specified by operand 2.
4347 ;; The bytes of operand 2 are organized as xx:xx:hi:lo.
4349 ;; Return in target register operand 0 a value of 1 if lo <= vv and
4350 ;; vv <= hi. Otherwise, set register operand 0 to 0.
4352 ;; Though the instructions to which this expansion maps operate on
4353 ;; 64-bit registers, the current implementation only operates on
4354 ;; SI-mode operands as the high-order bits provide no information
4355 ;; that is not already available in the low-order bits. To avoid the
4356 ;; costs of data widening operations, future enhancements might allow
4357 ;; DI mode for operand 0 and/or might allow operand 1 to be QI mode.
4358 (define_expand "cmprb"
4360 (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
4361 (match_operand:SI 2 "gpc_reg_operand" "r")]
4363 (set (match_operand:SI 0 "gpc_reg_operand" "=r")
4364 (if_then_else:SI (lt (match_dup 3)
4367 (if_then_else (gt (match_dup 3)
4373 operands[3] = gen_reg_rtx (CCmode);
4376 ;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
4377 ;; represents a byte whose value is ignored in this context and
4378 ;; vv, the least significant byte, holds the byte value that is to
4379 ;; be tested for membership within the range specified by operand 2.
4380 ;; The bytes of operand 2 are organized as xx:xx:hi:lo.
4382 ;; Set bit 1 (the GT bit, 0x4) of CR register operand 0 to 1 if
4383 ;; lo <= vv and vv <= hi. Otherwise, set the GT bit to 0. The other
4384 ;; 3 bits of the target CR register are all set to 0.
4385 (define_insn "*cmprb_internal"
4386 [(set (match_operand:CC 0 "cc_reg_operand" "=y")
4387 (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
4388 (match_operand:SI 2 "gpc_reg_operand" "r")]
4392 [(set_attr "type" "logical")])
4394 ;; Set operand 0 register to -1 if the LT bit (0x8) of condition
4395 ;; register operand 1 is on. Otherwise, set operand 0 register to 1
4396 ;; if the GT bit (0x4) of condition register operand 1 is on.
4397 ;; Otherwise, set operand 0 to 0. Note that the result stored into
4398 ;; register operand 0 is non-zero iff either the LT or GT bits are on
4399 ;; within condition register operand 1.
4400 (define_insn "setb_signed"
4401 [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
4402 (if_then_else:SI (lt (match_operand:CC 1 "cc_reg_operand" "y")
4405 (if_then_else (gt (match_dup 1)
4411 [(set_attr "type" "logical")])
4413 (define_insn "setb_unsigned"
4414 [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
4415 (if_then_else:SI (ltu (match_operand:CCUNS 1 "cc_reg_operand" "y")
4418 (if_then_else (gtu (match_dup 1)
4424 [(set_attr "type" "logical")])
4426 ;; Test byte within two ranges.
4428 ;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
4429 ;; represents a byte whose value is ignored in this context and
4430 ;; vv, the least significant byte, holds the byte value that is to
4431 ;; be tested for membership within the range specified by operand 2.
4432 ;; The bytes of operand 2 are organized as hi_1:lo_1:hi_2:lo_2.
4434 ;; Return in target register operand 0 a value of 1 if (lo_1 <= vv and
4435 ;; vv <= hi_1) or if (lo_2 <= vv and vv <= hi_2). Otherwise, set register
4438 ;; Though the instructions to which this expansion maps operate on
4439 ;; 64-bit registers, the current implementation only operates on
4440 ;; SI-mode operands as the high-order bits provide no information
4441 ;; that is not already available in the low-order bits. To avoid the
4442 ;; costs of data widening operations, future enhancements might allow
4443 ;; DI mode for operand 0 and/or might allow operand 1 to be QI mode.
4444 (define_expand "cmprb2"
4446 (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
4447 (match_operand:SI 2 "gpc_reg_operand" "r")]
4449 (set (match_operand:SI 0 "gpc_reg_operand" "=r")
4450 (if_then_else:SI (lt (match_dup 3)
4453 (if_then_else (gt (match_dup 3)
4459 operands[3] = gen_reg_rtx (CCmode);
4462 ;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
4463 ;; represents a byte whose value is ignored in this context and
4464 ;; vv, the least significant byte, holds the byte value that is to
4465 ;; be tested for membership within the ranges specified by operand 2.
4466 ;; The bytes of operand 2 are organized as hi_1:lo_1:hi_2:lo_2.
4468 ;; Set bit 1 (the GT bit, 0x4) of CR register operand 0 to 1 if
4469 ;; (lo_1 <= vv and vv <= hi_1) or if (lo_2 <= vv and vv <= hi_2).
4470 ;; Otherwise, set the GT bit to 0. The other 3 bits of the target
4471 ;; CR register are all set to 0.
4472 (define_insn "*cmprb2_internal"
4473 [(set (match_operand:CC 0 "cc_reg_operand" "=y")
4474 (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
4475 (match_operand:SI 2 "gpc_reg_operand" "r")]
4479 [(set_attr "type" "logical")])
4481 ;; Test byte membership within set of 8 bytes.
4483 ;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
4484 ;; represents a byte whose value is ignored in this context and
4485 ;; vv, the least significant byte, holds the byte value that is to
4486 ;; be tested for membership within the set specified by operand 2.
4487 ;; The bytes of operand 2 are organized as e0:e1:e2:e3:e4:e5:e6:e7.
4489 ;; Return in target register operand 0 a value of 1 if vv equals one
4490 ;; of the values e0, e1, e2, e3, e4, e5, e6, or e7. Otherwise, set
4491 ;; register operand 0 to 0. Note that the 8 byte values held within
4492 ;; operand 2 need not be unique.
4494 ;; Though the instructions to which this expansion maps operate on
4495 ;; 64-bit registers, the current implementation requires that operands
4496 ;; 0 and 1 have mode SI as the high-order bits provide no information
4497 ;; that is not already available in the low-order bits. To avoid the
4498 ;; costs of data widening operations, future enhancements might allow
4499 ;; DI mode for operand 0 and/or might allow operand 1 to be QI mode.
4500 (define_expand "cmpeqb"
4502 (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
4503 (match_operand:DI 2 "gpc_reg_operand" "r")]
4505 (set (match_operand:SI 0 "gpc_reg_operand" "=r")
4506 (if_then_else:SI (lt (match_dup 3)
4509 (if_then_else (gt (match_dup 3)
4513 "TARGET_P9_MISC && TARGET_64BIT"
4515 operands[3] = gen_reg_rtx (CCmode);
4518 ;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
4519 ;; represents a byte whose value is ignored in this context and
4520 ;; vv, the least significant byte, holds the byte value that is to
4521 ;; be tested for membership within the set specified by operand 2.
4522 ;; The bytes of operand 2 are organized as e0:e1:e2:e3:e4:e5:e6:e7.
4524 ;; Set bit 1 (the GT bit, 0x4) of CR register operand 0 to 1 if vv
4525 ;; equals one of the values e0, e1, e2, e3, e4, e5, e6, or e7. Otherwise,
4526 ;; set the GT bit to zero. The other 3 bits of the target CR register
4527 ;; are all set to 0.
4528 (define_insn "*cmpeqb_internal"
4529 [(set (match_operand:CC 0 "cc_reg_operand" "=y")
4530 (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
4531 (match_operand:DI 2 "gpc_reg_operand" "r")]
4533 "TARGET_P9_MISC && TARGET_64BIT"
4535 [(set_attr "type" "logical")])
4537 (define_expand "bcd<bcd_add_sub>_<code>"
4538 [(parallel [(set (reg:CCFP CR6_REGNO)
4540 (unspec:V2DF [(match_operand:V1TI 1 "register_operand")
4541 (match_operand:V1TI 2 "register_operand")
4542 (match_operand:QI 3 "const_0_to_1_operand")]
4545 (clobber (match_scratch:V1TI 5))])
4546 (set (match_operand:SI 0 "register_operand")
4547 (BCD_TEST:SI (reg:CCFP CR6_REGNO)
4551 operands[4] = CONST0_RTX (V2DFmode);
4554 ;; Peephole2 pattern to combine a bcdadd/bcdsub that calculates the value and
4555 ;; the bcdadd/bcdsub that tests the value. The combiner won't work since
4556 ;; CR6 is a hard coded register. Unfortunately, all of the Altivec predicate
4557 ;; support is hard coded to use the fixed register CR6 instead of creating
4558 ;; a register class for CR6.
4561 [(parallel [(set (match_operand:V1TI 0 "register_operand")
4562 (unspec:V1TI [(match_operand:V1TI 1 "register_operand")
4563 (match_operand:V1TI 2 "register_operand")
4564 (match_operand:QI 3 "const_0_to_1_operand")]
4565 UNSPEC_BCD_ADD_SUB))
4566 (clobber (reg:CCFP CR6_REGNO))])
4567 (parallel [(set (reg:CCFP CR6_REGNO)
4569 (unspec:V2DF [(match_dup 1)
4573 (match_operand:V2DF 4 "zero_constant")))
4574 (clobber (match_operand:V1TI 5 "register_operand"))])]
4576 [(parallel [(set (match_dup 0)
4577 (unspec:V1TI [(match_dup 1)
4580 UNSPEC_BCD_ADD_SUB))
4581 (set (reg:CCFP CR6_REGNO)
4583 (unspec:V2DF [(match_dup 1)