2 ;; Copyright (C) 2002-2017 Free Software Foundation, Inc.
3 ;; Contributed by Aldy Hernandez (aldy@quesejoda.com)
5 ;; This file is part of GCC.
7 ;; GCC is free software; you can redistribute it and/or modify it
8 ;; under the terms of the GNU General Public License as published
9 ;; by the Free Software Foundation; either version 3, or (at your
10 ;; option) any later version.
12 ;; GCC is distributed in the hope that it will be useful, but WITHOUT
13 ;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 ;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 ;; License for more details.
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING3. If not see
19 ;; <http://www.gnu.org/licenses/>.
21 (define_c_enum "unspec"
48 UNSPEC_VPACK_SIGN_SIGN_SAT
49 UNSPEC_VPACK_SIGN_UNS_SAT
50 UNSPEC_VPACK_UNS_UNS_SAT
51 UNSPEC_VPACK_UNS_UNS_MOD
52 UNSPEC_VPACK_UNS_UNS_MOD_DIRECT
76 UNSPEC_VUNPACK_HI_SIGN
77 UNSPEC_VUNPACK_LO_SIGN
78 UNSPEC_VUNPACK_HI_SIGN_DIRECT
79 UNSPEC_VUNPACK_LO_SIGN_DIRECT
82 UNSPEC_CONVERT_4F32_8I16
152 UNSPEC_VSUMSWS_DIRECT
171 (define_c_enum "unspecv"
179 ;; Like VI, defined in vector.md, but add ISA 2.07 integer vector ops
180 (define_mode_iterator VI2 [V4SI V8HI V16QI V2DI])
181 ;; Short vec int modes
182 (define_mode_iterator VIshort [V8HI V16QI])
183 ;; Longer vec int modes for rotate/mask ops
184 (define_mode_iterator VIlong [V2DI V4SI])
186 (define_mode_iterator VF [V4SF])
187 ;; Vec modes, pity mode iterators are not composable
188 (define_mode_iterator V [V4SI V8HI V16QI V4SF])
189 ;; Vec modes for move/logical/permute ops, include vector types for move not
190 ;; otherwise handled by altivec (v2df, v2di, ti)
191 (define_mode_iterator VM [V4SI
199 (KF "FLOAT128_VECTOR_P (KFmode)")
200 (TF "FLOAT128_VECTOR_P (TFmode)")])
202 ;; Like VM, except don't do TImode
203 (define_mode_iterator VM2 [V4SI
210 (KF "FLOAT128_VECTOR_P (KFmode)")
211 (TF "FLOAT128_VECTOR_P (TFmode)")])
213 ;; Map the Vector convert single precision to double precision for integer
214 ;; versus floating point
215 (define_mode_attr VS_sxwsp [(V4SI "sxw") (V4SF "sp")])
217 ;; Specific iterator for parity which does not have a byte/half-word form, but
218 ;; does have a quad word form
219 (define_mode_iterator VParity [V4SI
224 (define_mode_attr VI_char [(V2DI "d") (V4SI "w") (V8HI "h") (V16QI "b")])
225 (define_mode_attr VI_scalar [(V2DI "DI") (V4SI "SI") (V8HI "HI") (V16QI "QI")])
226 (define_mode_attr VI_unit [(V16QI "VECTOR_UNIT_ALTIVEC_P (V16QImode)")
227 (V8HI "VECTOR_UNIT_ALTIVEC_P (V8HImode)")
228 (V4SI "VECTOR_UNIT_ALTIVEC_P (V4SImode)")
229 (V2DI "VECTOR_UNIT_P8_VECTOR_P (V2DImode)")
230 (V1TI "VECTOR_UNIT_ALTIVEC_P (V1TImode)")])
232 ;; Vector pack/unpack
233 (define_mode_iterator VP [V2DI V4SI V8HI])
234 (define_mode_attr VP_small [(V2DI "V4SI") (V4SI "V8HI") (V8HI "V16QI")])
235 (define_mode_attr VP_small_lc [(V2DI "v4si") (V4SI "v8hi") (V8HI "v16qi")])
236 (define_mode_attr VU_char [(V2DI "w") (V4SI "h") (V8HI "b")])
239 (define_mode_iterator VNEG [V4SI V2DI])
241 ;; Vector move instructions.
242 (define_insn "*altivec_mov<mode>"
243 [(set (match_operand:VM2 0 "nonimmediate_operand" "=Z,v,v,?Y,?*r,?*r,v,v,?*r")
244 (match_operand:VM2 1 "input_operand" "v,Z,v,*r,Y,*r,j,W,W"))]
245 "VECTOR_MEM_ALTIVEC_P (<MODE>mode)
246 && (register_operand (operands[0], <MODE>mode)
247 || register_operand (operands[1], <MODE>mode))"
249 switch (which_alternative)
251 case 0: return "stvx %1,%y0";
252 case 1: return "lvx %0,%y1";
253 case 2: return "vor %0,%1,%1";
257 case 6: return "vxor %0,%0,%0";
258 case 7: return output_vec_const_move (operands);
260 default: gcc_unreachable ();
263 [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*,*")
264 (set_attr "length" "4,4,4,20,20,20,4,8,32")])
266 ;; Unlike other altivec moves, allow the GPRs, since a normal use of TImode
267 ;; is for unions. However for plain data movement, slightly favor the vector
269 (define_insn "*altivec_movti"
270 [(set (match_operand:TI 0 "nonimmediate_operand" "=Z,v,v,?Y,?r,?r,v,v")
271 (match_operand:TI 1 "input_operand" "v,Z,v,r,Y,r,j,W"))]
272 "VECTOR_MEM_ALTIVEC_P (TImode)
273 && (register_operand (operands[0], TImode)
274 || register_operand (operands[1], TImode))"
276 switch (which_alternative)
278 case 0: return "stvx %1,%y0";
279 case 1: return "lvx %0,%y1";
280 case 2: return "vor %0,%1,%1";
284 case 6: return "vxor %0,%0,%0";
285 case 7: return output_vec_const_move (operands);
286 default: gcc_unreachable ();
289 [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*")])
291 ;; Load up a vector with the most significant bit set by loading up -1 and
292 ;; doing a shift left
294 [(set (match_operand:VM 0 "altivec_register_operand" "")
295 (match_operand:VM 1 "easy_vector_constant_msb" ""))]
296 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed"
299 rtx dest = operands[0];
300 machine_mode mode = GET_MODE (operands[0]);
304 if (mode == V4SFmode)
307 dest = gen_lowpart (V4SImode, dest);
310 num_elements = GET_MODE_NUNITS (mode);
311 v = rtvec_alloc (num_elements);
312 for (i = 0; i < num_elements; i++)
313 RTVEC_ELT (v, i) = constm1_rtx;
315 emit_insn (gen_vec_initv4sisi (dest, gen_rtx_PARALLEL (mode, v)));
316 emit_insn (gen_rtx_SET (dest, gen_rtx_ASHIFT (mode, dest, dest)));
321 [(set (match_operand:VM 0 "altivec_register_operand" "")
322 (match_operand:VM 1 "easy_vector_constant_add_self" ""))]
323 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed"
324 [(set (match_dup 0) (match_dup 3))
325 (set (match_dup 0) (match_dup 4))]
327 rtx dup = gen_easy_altivec_constant (operands[1]);
329 machine_mode op_mode = <MODE>mode;
331 /* Divide the operand of the resulting VEC_DUPLICATE, and use
332 simplify_rtx to make a CONST_VECTOR. */
333 XEXP (dup, 0) = simplify_const_binary_operation (ASHIFTRT, QImode,
334 XEXP (dup, 0), const1_rtx);
335 const_vec = simplify_rtx (dup);
337 if (op_mode == V4SFmode)
340 operands[0] = gen_lowpart (op_mode, operands[0]);
342 if (GET_MODE (const_vec) == op_mode)
343 operands[3] = const_vec;
345 operands[3] = gen_lowpart (op_mode, const_vec);
346 operands[4] = gen_rtx_PLUS (op_mode, operands[0], operands[0]);
350 [(set (match_operand:VM 0 "altivec_register_operand" "")
351 (match_operand:VM 1 "easy_vector_constant_vsldoi" ""))]
352 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && can_create_pseudo_p ()"
353 [(set (match_dup 2) (match_dup 3))
354 (set (match_dup 4) (match_dup 5))
356 (unspec:VM [(match_dup 2)
361 rtx op1 = operands[1];
362 int elt = (BYTES_BIG_ENDIAN) ? 0 : GET_MODE_NUNITS (<MODE>mode) - 1;
363 HOST_WIDE_INT val = const_vector_elt_as_int (op1, elt);
364 rtx rtx_val = GEN_INT (val);
365 int shift = vspltis_shifted (op1);
367 gcc_assert (shift != 0);
368 operands[2] = gen_reg_rtx (<MODE>mode);
369 operands[3] = gen_const_vec_duplicate (<MODE>mode, rtx_val);
370 operands[4] = gen_reg_rtx (<MODE>mode);
374 operands[5] = CONSTM1_RTX (<MODE>mode);
375 operands[6] = GEN_INT (-shift);
379 operands[5] = CONST0_RTX (<MODE>mode);
380 operands[6] = GEN_INT (shift);
384 (define_insn "get_vrsave_internal"
385 [(set (match_operand:SI 0 "register_operand" "=r")
386 (unspec:SI [(reg:SI VRSAVE_REGNO)] UNSPEC_GET_VRSAVE))]
390 return "mfspr %0,256";
392 return "mfvrsave %0";
394 [(set_attr "type" "*")])
396 (define_insn "*set_vrsave_internal"
397 [(match_parallel 0 "vrsave_operation"
398 [(set (reg:SI VRSAVE_REGNO)
399 (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")
400 (reg:SI VRSAVE_REGNO)] UNSPECV_SET_VRSAVE))])]
404 return "mtspr 256,%1";
406 return "mtvrsave %1";
408 [(set_attr "type" "*")])
410 (define_insn "*save_world"
411 [(match_parallel 0 "save_world_operation"
412 [(clobber (reg:SI LR_REGNO))
413 (use (match_operand:SI 1 "call_operand" "s"))])]
414 "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
416 [(set_attr "type" "branch")
417 (set_attr "length" "4")])
419 (define_insn "*restore_world"
420 [(match_parallel 0 "restore_world_operation"
422 (use (reg:SI LR_REGNO))
423 (use (match_operand:SI 1 "call_operand" "s"))
424 (clobber (match_operand:SI 2 "gpc_reg_operand" "=r"))])]
425 "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
428 ;; The save_vregs and restore_vregs patterns don't use memory_operand
429 ;; because (plus (reg) (const_int)) is not a valid vector address.
430 ;; This way is more compact than describing exactly what happens in
431 ;; the out-of-line functions, ie. loading the constant into r11/r12
432 ;; then using indexed addressing, and requires less editing of rtl
433 ;; to describe the operation to dwarf2out_frame_debug_expr.
434 (define_insn "*save_vregs_<mode>_r11"
435 [(match_parallel 0 "any_parallel_operand"
436 [(clobber (reg:P LR_REGNO))
437 (use (match_operand:P 1 "symbol_ref_operand" "s"))
440 (set (mem:V4SI (plus:P (match_operand:P 2 "gpc_reg_operand" "b")
441 (match_operand:P 3 "short_cint_operand" "I")))
442 (match_operand:V4SI 4 "altivec_register_operand" "v"))])]
445 [(set_attr "type" "branch")
446 (set_attr "length" "4")])
448 (define_insn "*save_vregs_<mode>_r12"
449 [(match_parallel 0 "any_parallel_operand"
450 [(clobber (reg:P LR_REGNO))
451 (use (match_operand:P 1 "symbol_ref_operand" "s"))
454 (set (mem:V4SI (plus:P (match_operand:P 2 "gpc_reg_operand" "b")
455 (match_operand:P 3 "short_cint_operand" "I")))
456 (match_operand:V4SI 4 "altivec_register_operand" "v"))])]
459 [(set_attr "type" "branch")
460 (set_attr "length" "4")])
462 (define_insn "*restore_vregs_<mode>_r11"
463 [(match_parallel 0 "any_parallel_operand"
464 [(clobber (reg:P LR_REGNO))
465 (use (match_operand:P 1 "symbol_ref_operand" "s"))
468 (set (match_operand:V4SI 2 "altivec_register_operand" "=v")
469 (mem:V4SI (plus:P (match_operand:P 3 "gpc_reg_operand" "b")
470 (match_operand:P 4 "short_cint_operand" "I"))))])]
473 [(set_attr "type" "branch")
474 (set_attr "length" "4")])
476 (define_insn "*restore_vregs_<mode>_r12"
477 [(match_parallel 0 "any_parallel_operand"
478 [(clobber (reg:P LR_REGNO))
479 (use (match_operand:P 1 "symbol_ref_operand" "s"))
482 (set (match_operand:V4SI 2 "altivec_register_operand" "=v")
483 (mem:V4SI (plus:P (match_operand:P 3 "gpc_reg_operand" "b")
484 (match_operand:P 4 "short_cint_operand" "I"))))])]
487 [(set_attr "type" "branch")
488 (set_attr "length" "4")])
490 ;; Simple binary operations.
493 (define_insn "add<mode>3"
494 [(set (match_operand:VI2 0 "register_operand" "=v")
495 (plus:VI2 (match_operand:VI2 1 "register_operand" "v")
496 (match_operand:VI2 2 "register_operand" "v")))]
498 "vaddu<VI_char>m %0,%1,%2"
499 [(set_attr "type" "vecsimple")])
501 (define_insn "*altivec_addv4sf3"
502 [(set (match_operand:V4SF 0 "register_operand" "=v")
503 (plus:V4SF (match_operand:V4SF 1 "register_operand" "v")
504 (match_operand:V4SF 2 "register_operand" "v")))]
505 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
507 [(set_attr "type" "vecfloat")])
509 (define_insn "altivec_vaddcuw"
510 [(set (match_operand:V4SI 0 "register_operand" "=v")
511 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
512 (match_operand:V4SI 2 "register_operand" "v")]
514 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
516 [(set_attr "type" "vecsimple")])
518 (define_insn "altivec_vaddu<VI_char>s"
519 [(set (match_operand:VI 0 "register_operand" "=v")
520 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
521 (match_operand:VI 2 "register_operand" "v")]
523 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
525 "vaddu<VI_char>s %0,%1,%2"
526 [(set_attr "type" "vecsimple")])
528 (define_insn "altivec_vadds<VI_char>s"
529 [(set (match_operand:VI 0 "register_operand" "=v")
530 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
531 (match_operand:VI 2 "register_operand" "v")]
533 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
534 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
535 "vadds<VI_char>s %0,%1,%2"
536 [(set_attr "type" "vecsimple")])
539 (define_insn "sub<mode>3"
540 [(set (match_operand:VI2 0 "register_operand" "=v")
541 (minus:VI2 (match_operand:VI2 1 "register_operand" "v")
542 (match_operand:VI2 2 "register_operand" "v")))]
544 "vsubu<VI_char>m %0,%1,%2"
545 [(set_attr "type" "vecsimple")])
547 (define_insn "*altivec_subv4sf3"
548 [(set (match_operand:V4SF 0 "register_operand" "=v")
549 (minus:V4SF (match_operand:V4SF 1 "register_operand" "v")
550 (match_operand:V4SF 2 "register_operand" "v")))]
551 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
553 [(set_attr "type" "vecfloat")])
555 (define_insn "altivec_vsubcuw"
556 [(set (match_operand:V4SI 0 "register_operand" "=v")
557 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
558 (match_operand:V4SI 2 "register_operand" "v")]
560 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
562 [(set_attr "type" "vecsimple")])
564 (define_insn "altivec_vsubu<VI_char>s"
565 [(set (match_operand:VI 0 "register_operand" "=v")
566 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
567 (match_operand:VI 2 "register_operand" "v")]
569 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
570 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
571 "vsubu<VI_char>s %0,%1,%2"
572 [(set_attr "type" "vecsimple")])
574 (define_insn "altivec_vsubs<VI_char>s"
575 [(set (match_operand:VI 0 "register_operand" "=v")
576 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
577 (match_operand:VI 2 "register_operand" "v")]
579 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
580 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
581 "vsubs<VI_char>s %0,%1,%2"
582 [(set_attr "type" "vecsimple")])
585 (define_insn "altivec_vavgu<VI_char>"
586 [(set (match_operand:VI 0 "register_operand" "=v")
587 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
588 (match_operand:VI 2 "register_operand" "v")]
591 "vavgu<VI_char> %0,%1,%2"
592 [(set_attr "type" "vecsimple")])
594 (define_insn "altivec_vavgs<VI_char>"
595 [(set (match_operand:VI 0 "register_operand" "=v")
596 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
597 (match_operand:VI 2 "register_operand" "v")]
599 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
600 "vavgs<VI_char> %0,%1,%2"
601 [(set_attr "type" "vecsimple")])
603 (define_insn "altivec_vcmpbfp"
604 [(set (match_operand:V4SI 0 "register_operand" "=v")
605 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
606 (match_operand:V4SF 2 "register_operand" "v")]
608 "VECTOR_UNIT_ALTIVEC_P (V4SImode)"
610 [(set_attr "type" "veccmp")])
612 (define_insn "*altivec_eq<mode>"
613 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
614 (eq:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
615 (match_operand:VI2 2 "altivec_register_operand" "v")))]
617 "vcmpequ<VI_char> %0,%1,%2"
618 [(set_attr "type" "veccmpfx")])
620 (define_insn "*altivec_gt<mode>"
621 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
622 (gt:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
623 (match_operand:VI2 2 "altivec_register_operand" "v")))]
625 "vcmpgts<VI_char> %0,%1,%2"
626 [(set_attr "type" "veccmpfx")])
628 (define_insn "*altivec_gtu<mode>"
629 [(set (match_operand:VI2 0 "altivec_register_operand" "=v")
630 (gtu:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
631 (match_operand:VI2 2 "altivec_register_operand" "v")))]
633 "vcmpgtu<VI_char> %0,%1,%2"
634 [(set_attr "type" "veccmpfx")])
636 (define_insn "*altivec_eqv4sf"
637 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
638 (eq:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
639 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
640 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
642 [(set_attr "type" "veccmp")])
644 (define_insn "*altivec_gtv4sf"
645 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
646 (gt:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
647 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
648 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
650 [(set_attr "type" "veccmp")])
652 (define_insn "*altivec_gev4sf"
653 [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
654 (ge:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v")
655 (match_operand:V4SF 2 "altivec_register_operand" "v")))]
656 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
658 [(set_attr "type" "veccmp")])
660 (define_insn "*altivec_vsel<mode>"
661 [(set (match_operand:VM 0 "altivec_register_operand" "=v")
663 (ne:CC (match_operand:VM 1 "altivec_register_operand" "v")
664 (match_operand:VM 4 "zero_constant" ""))
665 (match_operand:VM 2 "altivec_register_operand" "v")
666 (match_operand:VM 3 "altivec_register_operand" "v")))]
667 "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
669 [(set_attr "type" "vecmove")])
671 (define_insn "*altivec_vsel<mode>_uns"
672 [(set (match_operand:VM 0 "altivec_register_operand" "=v")
674 (ne:CCUNS (match_operand:VM 1 "altivec_register_operand" "v")
675 (match_operand:VM 4 "zero_constant" ""))
676 (match_operand:VM 2 "altivec_register_operand" "v")
677 (match_operand:VM 3 "altivec_register_operand" "v")))]
678 "VECTOR_MEM_ALTIVEC_P (<MODE>mode)"
680 [(set_attr "type" "vecmove")])
682 ;; Fused multiply add.
684 (define_insn "*altivec_fmav4sf4"
685 [(set (match_operand:V4SF 0 "register_operand" "=v")
686 (fma:V4SF (match_operand:V4SF 1 "register_operand" "v")
687 (match_operand:V4SF 2 "register_operand" "v")
688 (match_operand:V4SF 3 "register_operand" "v")))]
689 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
690 "vmaddfp %0,%1,%2,%3"
691 [(set_attr "type" "vecfloat")])
693 ;; We do multiply as a fused multiply-add with an add of a -0.0 vector.
695 (define_expand "altivec_mulv4sf3"
696 [(set (match_operand:V4SF 0 "register_operand" "")
697 (fma:V4SF (match_operand:V4SF 1 "register_operand" "")
698 (match_operand:V4SF 2 "register_operand" "")
700 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
704 /* Generate [-0.0, -0.0, -0.0, -0.0]. */
705 neg0 = gen_reg_rtx (V4SImode);
706 emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
707 emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
709 operands[3] = gen_lowpart (V4SFmode, neg0);
712 ;; 32-bit integer multiplication
713 ;; A_high = Operand_0 & 0xFFFF0000 >> 16
714 ;; A_low = Operand_0 & 0xFFFF
715 ;; B_high = Operand_1 & 0xFFFF0000 >> 16
716 ;; B_low = Operand_1 & 0xFFFF
717 ;; result = A_low * B_low + (A_high * B_low + B_high * A_low) << 16
719 ;; (define_insn "mulv4si3"
720 ;; [(set (match_operand:V4SI 0 "register_operand" "=v")
721 ;; (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
722 ;; (match_operand:V4SI 2 "register_operand" "v")))]
723 (define_insn "mulv4si3_p8"
724 [(set (match_operand:V4SI 0 "register_operand" "=v")
725 (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
726 (match_operand:V4SI 2 "register_operand" "v")))]
729 [(set_attr "type" "veccomplex")])
731 (define_expand "mulv4si3"
732 [(use (match_operand:V4SI 0 "register_operand" ""))
733 (use (match_operand:V4SI 1 "register_operand" ""))
734 (use (match_operand:V4SI 2 "register_operand" ""))]
746 if (TARGET_P8_VECTOR)
748 emit_insn (gen_mulv4si3_p8 (operands[0], operands[1], operands[2]));
752 zero = gen_reg_rtx (V4SImode);
753 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
755 sixteen = gen_reg_rtx (V4SImode);
756 emit_insn (gen_altivec_vspltisw (sixteen, gen_rtx_CONST_INT (V4SImode, -16)));
758 swap = gen_reg_rtx (V4SImode);
759 emit_insn (gen_vrotlv4si3 (swap, operands[2], sixteen));
761 one = gen_reg_rtx (V8HImode);
762 convert_move (one, operands[1], 0);
764 two = gen_reg_rtx (V8HImode);
765 convert_move (two, operands[2], 0);
767 small_swap = gen_reg_rtx (V8HImode);
768 convert_move (small_swap, swap, 0);
770 low_product = gen_reg_rtx (V4SImode);
771 emit_insn (gen_altivec_vmulouh (low_product, one, two));
773 high_product = gen_reg_rtx (V4SImode);
774 emit_insn (gen_altivec_vmsumuhm (high_product, one, small_swap, zero));
776 emit_insn (gen_vashlv4si3 (high_product, high_product, sixteen));
778 emit_insn (gen_addv4si3 (operands[0], high_product, low_product));
783 (define_expand "mulv8hi3"
784 [(use (match_operand:V8HI 0 "register_operand" ""))
785 (use (match_operand:V8HI 1 "register_operand" ""))
786 (use (match_operand:V8HI 2 "register_operand" ""))]
789 rtx zero = gen_reg_rtx (V8HImode);
791 emit_insn (gen_altivec_vspltish (zero, const0_rtx));
792 emit_insn (gen_fmav8hi4 (operands[0], operands[1], operands[2], zero));
798 ;; Fused multiply subtract
799 (define_insn "*altivec_vnmsubfp"
800 [(set (match_operand:V4SF 0 "register_operand" "=v")
802 (fma:V4SF (match_operand:V4SF 1 "register_operand" "v")
803 (match_operand:V4SF 2 "register_operand" "v")
805 (match_operand:V4SF 3 "register_operand" "v")))))]
806 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
807 "vnmsubfp %0,%1,%2,%3"
808 [(set_attr "type" "vecfloat")])
810 (define_insn "altivec_vmsumu<VI_char>m"
811 [(set (match_operand:V4SI 0 "register_operand" "=v")
812 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
813 (match_operand:VIshort 2 "register_operand" "v")
814 (match_operand:V4SI 3 "register_operand" "v")]
817 "vmsumu<VI_char>m %0,%1,%2,%3"
818 [(set_attr "type" "veccomplex")])
820 (define_insn "altivec_vmsumm<VI_char>m"
821 [(set (match_operand:V4SI 0 "register_operand" "=v")
822 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
823 (match_operand:VIshort 2 "register_operand" "v")
824 (match_operand:V4SI 3 "register_operand" "v")]
827 "vmsumm<VI_char>m %0,%1,%2,%3"
828 [(set_attr "type" "veccomplex")])
830 (define_insn "altivec_vmsumshm"
831 [(set (match_operand:V4SI 0 "register_operand" "=v")
832 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
833 (match_operand:V8HI 2 "register_operand" "v")
834 (match_operand:V4SI 3 "register_operand" "v")]
837 "vmsumshm %0,%1,%2,%3"
838 [(set_attr "type" "veccomplex")])
840 (define_insn "altivec_vmsumuhs"
841 [(set (match_operand:V4SI 0 "register_operand" "=v")
842 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
843 (match_operand:V8HI 2 "register_operand" "v")
844 (match_operand:V4SI 3 "register_operand" "v")]
846 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
848 "vmsumuhs %0,%1,%2,%3"
849 [(set_attr "type" "veccomplex")])
851 (define_insn "altivec_vmsumshs"
852 [(set (match_operand:V4SI 0 "register_operand" "=v")
853 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
854 (match_operand:V8HI 2 "register_operand" "v")
855 (match_operand:V4SI 3 "register_operand" "v")]
857 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
859 "vmsumshs %0,%1,%2,%3"
860 [(set_attr "type" "veccomplex")])
864 (define_insn "umax<mode>3"
865 [(set (match_operand:VI2 0 "register_operand" "=v")
866 (umax:VI2 (match_operand:VI2 1 "register_operand" "v")
867 (match_operand:VI2 2 "register_operand" "v")))]
869 "vmaxu<VI_char> %0,%1,%2"
870 [(set_attr "type" "vecsimple")])
872 (define_insn "smax<mode>3"
873 [(set (match_operand:VI2 0 "register_operand" "=v")
874 (smax:VI2 (match_operand:VI2 1 "register_operand" "v")
875 (match_operand:VI2 2 "register_operand" "v")))]
877 "vmaxs<VI_char> %0,%1,%2"
878 [(set_attr "type" "vecsimple")])
880 (define_insn "*altivec_smaxv4sf3"
881 [(set (match_operand:V4SF 0 "register_operand" "=v")
882 (smax:V4SF (match_operand:V4SF 1 "register_operand" "v")
883 (match_operand:V4SF 2 "register_operand" "v")))]
884 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
886 [(set_attr "type" "veccmp")])
888 (define_insn "umin<mode>3"
889 [(set (match_operand:VI2 0 "register_operand" "=v")
890 (umin:VI2 (match_operand:VI2 1 "register_operand" "v")
891 (match_operand:VI2 2 "register_operand" "v")))]
893 "vminu<VI_char> %0,%1,%2"
894 [(set_attr "type" "vecsimple")])
896 (define_insn "smin<mode>3"
897 [(set (match_operand:VI2 0 "register_operand" "=v")
898 (smin:VI2 (match_operand:VI2 1 "register_operand" "v")
899 (match_operand:VI2 2 "register_operand" "v")))]
901 "vmins<VI_char> %0,%1,%2"
902 [(set_attr "type" "vecsimple")])
904 (define_insn "*altivec_sminv4sf3"
905 [(set (match_operand:V4SF 0 "register_operand" "=v")
906 (smin:V4SF (match_operand:V4SF 1 "register_operand" "v")
907 (match_operand:V4SF 2 "register_operand" "v")))]
908 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
910 [(set_attr "type" "veccmp")])
912 (define_insn "altivec_vmhaddshs"
913 [(set (match_operand:V8HI 0 "register_operand" "=v")
914 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
915 (match_operand:V8HI 2 "register_operand" "v")
916 (match_operand:V8HI 3 "register_operand" "v")]
918 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
920 "vmhaddshs %0,%1,%2,%3"
921 [(set_attr "type" "veccomplex")])
923 (define_insn "altivec_vmhraddshs"
924 [(set (match_operand:V8HI 0 "register_operand" "=v")
925 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
926 (match_operand:V8HI 2 "register_operand" "v")
927 (match_operand:V8HI 3 "register_operand" "v")]
929 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
931 "vmhraddshs %0,%1,%2,%3"
932 [(set_attr "type" "veccomplex")])
934 (define_insn "fmav8hi4"
935 [(set (match_operand:V8HI 0 "register_operand" "=v")
936 (plus:V8HI (mult:V8HI (match_operand:V8HI 1 "register_operand" "v")
937 (match_operand:V8HI 2 "register_operand" "v"))
938 (match_operand:V8HI 3 "register_operand" "v")))]
940 "vmladduhm %0,%1,%2,%3"
941 [(set_attr "type" "veccomplex")])
943 (define_expand "altivec_vmrghb"
944 [(use (match_operand:V16QI 0 "register_operand" ""))
945 (use (match_operand:V16QI 1 "register_operand" ""))
946 (use (match_operand:V16QI 2 "register_operand" ""))]
952 /* Special handling for LE with -maltivec=be. */
953 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
955 v = gen_rtvec (16, GEN_INT (8), GEN_INT (24), GEN_INT (9), GEN_INT (25),
956 GEN_INT (10), GEN_INT (26), GEN_INT (11), GEN_INT (27),
957 GEN_INT (12), GEN_INT (28), GEN_INT (13), GEN_INT (29),
958 GEN_INT (14), GEN_INT (30), GEN_INT (15), GEN_INT (31));
959 x = gen_rtx_VEC_CONCAT (V32QImode, operands[2], operands[1]);
963 v = gen_rtvec (16, GEN_INT (0), GEN_INT (16), GEN_INT (1), GEN_INT (17),
964 GEN_INT (2), GEN_INT (18), GEN_INT (3), GEN_INT (19),
965 GEN_INT (4), GEN_INT (20), GEN_INT (5), GEN_INT (21),
966 GEN_INT (6), GEN_INT (22), GEN_INT (7), GEN_INT (23));
967 x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]);
970 x = gen_rtx_VEC_SELECT (V16QImode, x, gen_rtx_PARALLEL (VOIDmode, v));
971 emit_insn (gen_rtx_SET (operands[0], x));
975 (define_insn "*altivec_vmrghb_internal"
976 [(set (match_operand:V16QI 0 "register_operand" "=v")
979 (match_operand:V16QI 1 "register_operand" "v")
980 (match_operand:V16QI 2 "register_operand" "v"))
981 (parallel [(const_int 0) (const_int 16)
982 (const_int 1) (const_int 17)
983 (const_int 2) (const_int 18)
984 (const_int 3) (const_int 19)
985 (const_int 4) (const_int 20)
986 (const_int 5) (const_int 21)
987 (const_int 6) (const_int 22)
988 (const_int 7) (const_int 23)])))]
991 if (BYTES_BIG_ENDIAN)
992 return "vmrghb %0,%1,%2";
994 return "vmrglb %0,%2,%1";
996 [(set_attr "type" "vecperm")])
998 (define_insn "altivec_vmrghb_direct"
999 [(set (match_operand:V16QI 0 "register_operand" "=v")
1000 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1001 (match_operand:V16QI 2 "register_operand" "v")]
1002 UNSPEC_VMRGH_DIRECT))]
1005 [(set_attr "type" "vecperm")])
1007 (define_expand "altivec_vmrghh"
1008 [(use (match_operand:V8HI 0 "register_operand" ""))
1009 (use (match_operand:V8HI 1 "register_operand" ""))
1010 (use (match_operand:V8HI 2 "register_operand" ""))]
1016 /* Special handling for LE with -maltivec=be. */
1017 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1019 v = gen_rtvec (8, GEN_INT (4), GEN_INT (12), GEN_INT (5), GEN_INT (13),
1020 GEN_INT (6), GEN_INT (14), GEN_INT (7), GEN_INT (15));
1021 x = gen_rtx_VEC_CONCAT (V16HImode, operands[2], operands[1]);
1025 v = gen_rtvec (8, GEN_INT (0), GEN_INT (8), GEN_INT (1), GEN_INT (9),
1026 GEN_INT (2), GEN_INT (10), GEN_INT (3), GEN_INT (11));
1027 x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]);
1030 x = gen_rtx_VEC_SELECT (V8HImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1031 emit_insn (gen_rtx_SET (operands[0], x));
1035 (define_insn "*altivec_vmrghh_internal"
1036 [(set (match_operand:V8HI 0 "register_operand" "=v")
1039 (match_operand:V8HI 1 "register_operand" "v")
1040 (match_operand:V8HI 2 "register_operand" "v"))
1041 (parallel [(const_int 0) (const_int 8)
1042 (const_int 1) (const_int 9)
1043 (const_int 2) (const_int 10)
1044 (const_int 3) (const_int 11)])))]
1047 if (BYTES_BIG_ENDIAN)
1048 return "vmrghh %0,%1,%2";
1050 return "vmrglh %0,%2,%1";
1052 [(set_attr "type" "vecperm")])
1054 (define_insn "altivec_vmrghh_direct"
1055 [(set (match_operand:V8HI 0 "register_operand" "=v")
1056 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1057 (match_operand:V8HI 2 "register_operand" "v")]
1058 UNSPEC_VMRGH_DIRECT))]
1061 [(set_attr "type" "vecperm")])
1063 (define_expand "altivec_vmrghw"
1064 [(use (match_operand:V4SI 0 "register_operand" ""))
1065 (use (match_operand:V4SI 1 "register_operand" ""))
1066 (use (match_operand:V4SI 2 "register_operand" ""))]
1067 "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1072 /* Special handling for LE with -maltivec=be. */
1073 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1075 v = gen_rtvec (4, GEN_INT (2), GEN_INT (6), GEN_INT (3), GEN_INT (7));
1076 x = gen_rtx_VEC_CONCAT (V8SImode, operands[2], operands[1]);
1080 v = gen_rtvec (4, GEN_INT (0), GEN_INT (4), GEN_INT (1), GEN_INT (5));
1081 x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]);
1084 x = gen_rtx_VEC_SELECT (V4SImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1085 emit_insn (gen_rtx_SET (operands[0], x));
1089 (define_insn "*altivec_vmrghw_internal"
1090 [(set (match_operand:V4SI 0 "register_operand" "=v")
1093 (match_operand:V4SI 1 "register_operand" "v")
1094 (match_operand:V4SI 2 "register_operand" "v"))
1095 (parallel [(const_int 0) (const_int 4)
1096 (const_int 1) (const_int 5)])))]
1097 "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1099 if (BYTES_BIG_ENDIAN)
1100 return "vmrghw %0,%1,%2";
1102 return "vmrglw %0,%2,%1";
1104 [(set_attr "type" "vecperm")])
1106 (define_insn "altivec_vmrghw_direct"
1107 [(set (match_operand:V4SI 0 "register_operand" "=v")
1108 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1109 (match_operand:V4SI 2 "register_operand" "v")]
1110 UNSPEC_VMRGH_DIRECT))]
1113 [(set_attr "type" "vecperm")])
1115 (define_insn "*altivec_vmrghsf"
1116 [(set (match_operand:V4SF 0 "register_operand" "=v")
1119 (match_operand:V4SF 1 "register_operand" "v")
1120 (match_operand:V4SF 2 "register_operand" "v"))
1121 (parallel [(const_int 0) (const_int 4)
1122 (const_int 1) (const_int 5)])))]
1123 "VECTOR_MEM_ALTIVEC_P (V4SFmode)"
1125 if (BYTES_BIG_ENDIAN)
1126 return "vmrghw %0,%1,%2";
1128 return "vmrglw %0,%2,%1";
1130 [(set_attr "type" "vecperm")])
1132 (define_expand "altivec_vmrglb"
1133 [(use (match_operand:V16QI 0 "register_operand" ""))
1134 (use (match_operand:V16QI 1 "register_operand" ""))
1135 (use (match_operand:V16QI 2 "register_operand" ""))]
1141 /* Special handling for LE with -maltivec=be. */
1142 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1144 v = gen_rtvec (16, GEN_INT (0), GEN_INT (16), GEN_INT (1), GEN_INT (17),
1145 GEN_INT (2), GEN_INT (18), GEN_INT (3), GEN_INT (19),
1146 GEN_INT (4), GEN_INT (20), GEN_INT (5), GEN_INT (21),
1147 GEN_INT (6), GEN_INT (22), GEN_INT (7), GEN_INT (23));
1148 x = gen_rtx_VEC_CONCAT (V32QImode, operands[2], operands[1]);
1152 v = gen_rtvec (16, GEN_INT (8), GEN_INT (24), GEN_INT (9), GEN_INT (25),
1153 GEN_INT (10), GEN_INT (26), GEN_INT (11), GEN_INT (27),
1154 GEN_INT (12), GEN_INT (28), GEN_INT (13), GEN_INT (29),
1155 GEN_INT (14), GEN_INT (30), GEN_INT (15), GEN_INT (31));
1156 x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]);
1159 x = gen_rtx_VEC_SELECT (V16QImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1160 emit_insn (gen_rtx_SET (operands[0], x));
1164 (define_insn "*altivec_vmrglb_internal"
1165 [(set (match_operand:V16QI 0 "register_operand" "=v")
1168 (match_operand:V16QI 1 "register_operand" "v")
1169 (match_operand:V16QI 2 "register_operand" "v"))
1170 (parallel [(const_int 8) (const_int 24)
1171 (const_int 9) (const_int 25)
1172 (const_int 10) (const_int 26)
1173 (const_int 11) (const_int 27)
1174 (const_int 12) (const_int 28)
1175 (const_int 13) (const_int 29)
1176 (const_int 14) (const_int 30)
1177 (const_int 15) (const_int 31)])))]
1180 if (BYTES_BIG_ENDIAN)
1181 return "vmrglb %0,%1,%2";
1183 return "vmrghb %0,%2,%1";
1185 [(set_attr "type" "vecperm")])
1187 (define_insn "altivec_vmrglb_direct"
1188 [(set (match_operand:V16QI 0 "register_operand" "=v")
1189 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1190 (match_operand:V16QI 2 "register_operand" "v")]
1191 UNSPEC_VMRGL_DIRECT))]
1194 [(set_attr "type" "vecperm")])
1196 (define_expand "altivec_vmrglh"
1197 [(use (match_operand:V8HI 0 "register_operand" ""))
1198 (use (match_operand:V8HI 1 "register_operand" ""))
1199 (use (match_operand:V8HI 2 "register_operand" ""))]
1205 /* Special handling for LE with -maltivec=be. */
1206 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1208 v = gen_rtvec (8, GEN_INT (0), GEN_INT (8), GEN_INT (1), GEN_INT (9),
1209 GEN_INT (2), GEN_INT (10), GEN_INT (3), GEN_INT (11));
1210 x = gen_rtx_VEC_CONCAT (V16HImode, operands[2], operands[1]);
1214 v = gen_rtvec (8, GEN_INT (4), GEN_INT (12), GEN_INT (5), GEN_INT (13),
1215 GEN_INT (6), GEN_INT (14), GEN_INT (7), GEN_INT (15));
1216 x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]);
1219 x = gen_rtx_VEC_SELECT (V8HImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1220 emit_insn (gen_rtx_SET (operands[0], x));
1224 (define_insn "*altivec_vmrglh_internal"
1225 [(set (match_operand:V8HI 0 "register_operand" "=v")
1228 (match_operand:V8HI 1 "register_operand" "v")
1229 (match_operand:V8HI 2 "register_operand" "v"))
1230 (parallel [(const_int 4) (const_int 12)
1231 (const_int 5) (const_int 13)
1232 (const_int 6) (const_int 14)
1233 (const_int 7) (const_int 15)])))]
1236 if (BYTES_BIG_ENDIAN)
1237 return "vmrglh %0,%1,%2";
1239 return "vmrghh %0,%2,%1";
1241 [(set_attr "type" "vecperm")])
1243 (define_insn "altivec_vmrglh_direct"
1244 [(set (match_operand:V8HI 0 "register_operand" "=v")
1245 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
1246 (match_operand:V8HI 2 "register_operand" "v")]
1247 UNSPEC_VMRGL_DIRECT))]
1250 [(set_attr "type" "vecperm")])
1252 (define_expand "altivec_vmrglw"
1253 [(use (match_operand:V4SI 0 "register_operand" ""))
1254 (use (match_operand:V4SI 1 "register_operand" ""))
1255 (use (match_operand:V4SI 2 "register_operand" ""))]
1256 "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1261 /* Special handling for LE with -maltivec=be. */
1262 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1264 v = gen_rtvec (4, GEN_INT (0), GEN_INT (4), GEN_INT (1), GEN_INT (5));
1265 x = gen_rtx_VEC_CONCAT (V8SImode, operands[2], operands[1]);
1269 v = gen_rtvec (4, GEN_INT (2), GEN_INT (6), GEN_INT (3), GEN_INT (7));
1270 x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]);
1273 x = gen_rtx_VEC_SELECT (V4SImode, x, gen_rtx_PARALLEL (VOIDmode, v));
1274 emit_insn (gen_rtx_SET (operands[0], x));
1278 (define_insn "*altivec_vmrglw_internal"
1279 [(set (match_operand:V4SI 0 "register_operand" "=v")
1282 (match_operand:V4SI 1 "register_operand" "v")
1283 (match_operand:V4SI 2 "register_operand" "v"))
1284 (parallel [(const_int 2) (const_int 6)
1285 (const_int 3) (const_int 7)])))]
1286 "VECTOR_MEM_ALTIVEC_P (V4SImode)"
1288 if (BYTES_BIG_ENDIAN)
1289 return "vmrglw %0,%1,%2";
1291 return "vmrghw %0,%2,%1";
1293 [(set_attr "type" "vecperm")])
1295 (define_insn "altivec_vmrglw_direct"
1296 [(set (match_operand:V4SI 0 "register_operand" "=v")
1297 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1298 (match_operand:V4SI 2 "register_operand" "v")]
1299 UNSPEC_VMRGL_DIRECT))]
1302 [(set_attr "type" "vecperm")])
1304 (define_insn "*altivec_vmrglsf"
1305 [(set (match_operand:V4SF 0 "register_operand" "=v")
1308 (match_operand:V4SF 1 "register_operand" "v")
1309 (match_operand:V4SF 2 "register_operand" "v"))
1310 (parallel [(const_int 2) (const_int 6)
1311 (const_int 3) (const_int 7)])))]
1312 "VECTOR_MEM_ALTIVEC_P (V4SFmode)"
1314 if (BYTES_BIG_ENDIAN)
1315 return "vmrglw %0,%1,%2";
1317 return "vmrghw %0,%2,%1";
1319 [(set_attr "type" "vecperm")])
1321 ;; Power8 vector merge two V4SF/V4SI even words to V4SF
1322 (define_insn "p8_vmrgew_<mode>"
1323 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1325 (vec_concat:<VS_double>
1326 (match_operand:VSX_W 1 "register_operand" "v")
1327 (match_operand:VSX_W 2 "register_operand" "v"))
1328 (parallel [(const_int 0) (const_int 4)
1329 (const_int 2) (const_int 6)])))]
1332 if (BYTES_BIG_ENDIAN)
1333 return "vmrgew %0,%1,%2";
1335 return "vmrgow %0,%2,%1";
1337 [(set_attr "type" "vecperm")])
1339 (define_insn "p8_vmrgow"
1340 [(set (match_operand:V4SI 0 "register_operand" "=v")
1343 (match_operand:V4SI 1 "register_operand" "v")
1344 (match_operand:V4SI 2 "register_operand" "v"))
1345 (parallel [(const_int 1) (const_int 5)
1346 (const_int 3) (const_int 7)])))]
1349 if (BYTES_BIG_ENDIAN)
1350 return "vmrgow %0,%1,%2";
1352 return "vmrgew %0,%2,%1";
1354 [(set_attr "type" "vecperm")])
1356 (define_insn "p8_vmrgew_<mode>_direct"
1357 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1358 (unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
1359 (match_operand:VSX_W 2 "register_operand" "v")]
1360 UNSPEC_VMRGEW_DIRECT))]
1363 [(set_attr "type" "vecperm")])
1365 (define_insn "p8_vmrgow_<mode>_direct"
1366 [(set (match_operand:VSX_W 0 "register_operand" "=v")
1367 (unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
1368 (match_operand:VSX_W 2 "register_operand" "v")]
1369 UNSPEC_VMRGOW_DIRECT))]
1372 [(set_attr "type" "vecperm")])
1374 (define_expand "vec_widen_umult_even_v16qi"
1375 [(use (match_operand:V8HI 0 "register_operand" ""))
1376 (use (match_operand:V16QI 1 "register_operand" ""))
1377 (use (match_operand:V16QI 2 "register_operand" ""))]
1380 if (VECTOR_ELT_ORDER_BIG)
1381 emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
1383 emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
1387 (define_expand "vec_widen_smult_even_v16qi"
1388 [(use (match_operand:V8HI 0 "register_operand" ""))
1389 (use (match_operand:V16QI 1 "register_operand" ""))
1390 (use (match_operand:V16QI 2 "register_operand" ""))]
1393 if (VECTOR_ELT_ORDER_BIG)
1394 emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
1396 emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
1400 (define_expand "vec_widen_umult_even_v8hi"
1401 [(use (match_operand:V4SI 0 "register_operand" ""))
1402 (use (match_operand:V8HI 1 "register_operand" ""))
1403 (use (match_operand:V8HI 2 "register_operand" ""))]
1406 if (VECTOR_ELT_ORDER_BIG)
1407 emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
1409 emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
1413 (define_expand "vec_widen_smult_even_v8hi"
1414 [(use (match_operand:V4SI 0 "register_operand" ""))
1415 (use (match_operand:V8HI 1 "register_operand" ""))
1416 (use (match_operand:V8HI 2 "register_operand" ""))]
1419 if (VECTOR_ELT_ORDER_BIG)
1420 emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
1422 emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
1426 (define_expand "vec_widen_umult_even_v4si"
1427 [(use (match_operand:V2DI 0 "register_operand"))
1428 (use (match_operand:V4SI 1 "register_operand"))
1429 (use (match_operand:V4SI 2 "register_operand"))]
1432 if (VECTOR_ELT_ORDER_BIG)
1433 emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2]));
1435 emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2]));
1439 (define_expand "vec_widen_smult_even_v4si"
1440 [(use (match_operand:V2DI 0 "register_operand"))
1441 (use (match_operand:V4SI 1 "register_operand"))
1442 (use (match_operand:V4SI 2 "register_operand"))]
1445 if (VECTOR_ELT_ORDER_BIG)
1446 emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2]));
1448 emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2]));
1452 (define_expand "vec_widen_umult_odd_v16qi"
1453 [(use (match_operand:V8HI 0 "register_operand" ""))
1454 (use (match_operand:V16QI 1 "register_operand" ""))
1455 (use (match_operand:V16QI 2 "register_operand" ""))]
1458 if (VECTOR_ELT_ORDER_BIG)
1459 emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
1461 emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
1465 (define_expand "vec_widen_smult_odd_v16qi"
1466 [(use (match_operand:V8HI 0 "register_operand" ""))
1467 (use (match_operand:V16QI 1 "register_operand" ""))
1468 (use (match_operand:V16QI 2 "register_operand" ""))]
1471 if (VECTOR_ELT_ORDER_BIG)
1472 emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
1474 emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
1478 (define_expand "vec_widen_umult_odd_v8hi"
1479 [(use (match_operand:V4SI 0 "register_operand" ""))
1480 (use (match_operand:V8HI 1 "register_operand" ""))
1481 (use (match_operand:V8HI 2 "register_operand" ""))]
1484 if (VECTOR_ELT_ORDER_BIG)
1485 emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
1487 emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
1491 (define_expand "vec_widen_smult_odd_v8hi"
1492 [(use (match_operand:V4SI 0 "register_operand" ""))
1493 (use (match_operand:V8HI 1 "register_operand" ""))
1494 (use (match_operand:V8HI 2 "register_operand" ""))]
1497 if (VECTOR_ELT_ORDER_BIG)
1498 emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
1500 emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
1504 (define_expand "vec_widen_umult_odd_v4si"
1505 [(use (match_operand:V2DI 0 "register_operand"))
1506 (use (match_operand:V4SI 1 "register_operand"))
1507 (use (match_operand:V4SI 2 "register_operand"))]
1510 if (VECTOR_ELT_ORDER_BIG)
1511 emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2]));
1513 emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2]));
1517 (define_expand "vec_widen_smult_odd_v4si"
1518 [(use (match_operand:V2DI 0 "register_operand"))
1519 (use (match_operand:V4SI 1 "register_operand"))
1520 (use (match_operand:V4SI 2 "register_operand"))]
1523 if (VECTOR_ELT_ORDER_BIG)
1524 emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2]));
1526 emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2]));
1530 (define_insn "altivec_vmuleub"
1531 [(set (match_operand:V8HI 0 "register_operand" "=v")
1532 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1533 (match_operand:V16QI 2 "register_operand" "v")]
1537 [(set_attr "type" "veccomplex")])
1539 (define_insn "altivec_vmuloub"
1540 [(set (match_operand:V8HI 0 "register_operand" "=v")
1541 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1542 (match_operand:V16QI 2 "register_operand" "v")]
1546 [(set_attr "type" "veccomplex")])
1548 (define_insn "altivec_vmulesb"
1549 [(set (match_operand:V8HI 0 "register_operand" "=v")
1550 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1551 (match_operand:V16QI 2 "register_operand" "v")]
1555 [(set_attr "type" "veccomplex")])
1557 (define_insn "altivec_vmulosb"
1558 [(set (match_operand:V8HI 0 "register_operand" "=v")
1559 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
1560 (match_operand:V16QI 2 "register_operand" "v")]
1564 [(set_attr "type" "veccomplex")])
1566 (define_insn "altivec_vmuleuh"
1567 [(set (match_operand:V4SI 0 "register_operand" "=v")
1568 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1569 (match_operand:V8HI 2 "register_operand" "v")]
1573 [(set_attr "type" "veccomplex")])
1575 (define_insn "altivec_vmulouh"
1576 [(set (match_operand:V4SI 0 "register_operand" "=v")
1577 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1578 (match_operand:V8HI 2 "register_operand" "v")]
1582 [(set_attr "type" "veccomplex")])
1584 (define_insn "altivec_vmulesh"
1585 [(set (match_operand:V4SI 0 "register_operand" "=v")
1586 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1587 (match_operand:V8HI 2 "register_operand" "v")]
1591 [(set_attr "type" "veccomplex")])
1593 (define_insn "altivec_vmulosh"
1594 [(set (match_operand:V4SI 0 "register_operand" "=v")
1595 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
1596 (match_operand:V8HI 2 "register_operand" "v")]
1600 [(set_attr "type" "veccomplex")])
1602 (define_insn "altivec_vmuleuw"
1603 [(set (match_operand:V2DI 0 "register_operand" "=v")
1604 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1605 (match_operand:V4SI 2 "register_operand" "v")]
1609 [(set_attr "type" "veccomplex")])
1611 (define_insn "altivec_vmulouw"
1612 [(set (match_operand:V2DI 0 "register_operand" "=v")
1613 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1614 (match_operand:V4SI 2 "register_operand" "v")]
1618 [(set_attr "type" "veccomplex")])
1620 (define_insn "altivec_vmulesw"
1621 [(set (match_operand:V2DI 0 "register_operand" "=v")
1622 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1623 (match_operand:V4SI 2 "register_operand" "v")]
1627 [(set_attr "type" "veccomplex")])
1629 (define_insn "altivec_vmulosw"
1630 [(set (match_operand:V2DI 0 "register_operand" "=v")
1631 (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "v")
1632 (match_operand:V4SI 2 "register_operand" "v")]
1636 [(set_attr "type" "veccomplex")])
1638 ;; Vector pack/unpack
1639 (define_insn "altivec_vpkpx"
1640 [(set (match_operand:V8HI 0 "register_operand" "=v")
1641 (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
1642 (match_operand:V4SI 2 "register_operand" "v")]
1647 if (VECTOR_ELT_ORDER_BIG)
1648 return \"vpkpx %0,%1,%2\";
1650 return \"vpkpx %0,%2,%1\";
1652 [(set_attr "type" "vecperm")])
1654 (define_insn "altivec_vpks<VI_char>ss"
1655 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1656 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1657 (match_operand:VP 2 "register_operand" "v")]
1658 UNSPEC_VPACK_SIGN_SIGN_SAT))]
1662 if (VECTOR_ELT_ORDER_BIG)
1663 return \"vpks<VI_char>ss %0,%1,%2\";
1665 return \"vpks<VI_char>ss %0,%2,%1\";
1667 [(set_attr "type" "vecperm")])
1669 (define_insn "altivec_vpks<VI_char>us"
1670 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1671 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1672 (match_operand:VP 2 "register_operand" "v")]
1673 UNSPEC_VPACK_SIGN_UNS_SAT))]
1677 if (VECTOR_ELT_ORDER_BIG)
1678 return \"vpks<VI_char>us %0,%1,%2\";
1680 return \"vpks<VI_char>us %0,%2,%1\";
1682 [(set_attr "type" "vecperm")])
1684 (define_insn "altivec_vpku<VI_char>us"
1685 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1686 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1687 (match_operand:VP 2 "register_operand" "v")]
1688 UNSPEC_VPACK_UNS_UNS_SAT))]
1692 if (VECTOR_ELT_ORDER_BIG)
1693 return \"vpku<VI_char>us %0,%1,%2\";
1695 return \"vpku<VI_char>us %0,%2,%1\";
1697 [(set_attr "type" "vecperm")])
1699 (define_insn "altivec_vpku<VI_char>um"
1700 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1701 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1702 (match_operand:VP 2 "register_operand" "v")]
1703 UNSPEC_VPACK_UNS_UNS_MOD))]
1707 if (VECTOR_ELT_ORDER_BIG)
1708 return \"vpku<VI_char>um %0,%1,%2\";
1710 return \"vpku<VI_char>um %0,%2,%1\";
1712 [(set_attr "type" "vecperm")])
1714 (define_insn "altivec_vpku<VI_char>um_direct"
1715 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
1716 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
1717 (match_operand:VP 2 "register_operand" "v")]
1718 UNSPEC_VPACK_UNS_UNS_MOD_DIRECT))]
1722 if (BYTES_BIG_ENDIAN)
1723 return \"vpku<VI_char>um %0,%1,%2\";
1725 return \"vpku<VI_char>um %0,%2,%1\";
1727 [(set_attr "type" "vecperm")])
1729 (define_insn "*altivec_vrl<VI_char>"
1730 [(set (match_operand:VI2 0 "register_operand" "=v")
1731 (rotate:VI2 (match_operand:VI2 1 "register_operand" "v")
1732 (match_operand:VI2 2 "register_operand" "v")))]
1734 "vrl<VI_char> %0,%1,%2"
1735 [(set_attr "type" "vecsimple")])
1737 (define_insn "altivec_vrl<VI_char>mi"
1738 [(set (match_operand:VIlong 0 "register_operand" "=v")
1739 (unspec:VIlong [(match_operand:VIlong 1 "register_operand" "0")
1740 (match_operand:VIlong 2 "register_operand" "v")
1741 (match_operand:VIlong 3 "register_operand" "v")]
1744 "vrl<VI_char>mi %0,%2,%3"
1745 [(set_attr "type" "veclogical")])
1747 (define_insn "altivec_vrl<VI_char>nm"
1748 [(set (match_operand:VIlong 0 "register_operand" "=v")
1749 (unspec:VIlong [(match_operand:VIlong 1 "register_operand" "v")
1750 (match_operand:VIlong 2 "register_operand" "v")]
1753 "vrl<VI_char>nm %0,%1,%2"
1754 [(set_attr "type" "veclogical")])
1756 (define_insn "altivec_vsl"
1757 [(set (match_operand:V4SI 0 "register_operand" "=v")
1758 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1759 (match_operand:V4SI 2 "register_operand" "v")]
1763 [(set_attr "type" "vecperm")])
1765 (define_insn "altivec_vslo"
1766 [(set (match_operand:V4SI 0 "register_operand" "=v")
1767 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1768 (match_operand:V4SI 2 "register_operand" "v")]
1772 [(set_attr "type" "vecperm")])
1775 [(set (match_operand:V16QI 0 "register_operand" "=v")
1776 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1777 (match_operand:V16QI 2 "register_operand" "v")]
1781 [(set_attr "type" "vecsimple")])
1784 [(set (match_operand:V16QI 0 "register_operand" "=v")
1785 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1786 (match_operand:V16QI 2 "register_operand" "v")]
1790 [(set_attr "type" "vecsimple")])
1792 (define_insn "*altivec_vsl<VI_char>"
1793 [(set (match_operand:VI2 0 "register_operand" "=v")
1794 (ashift:VI2 (match_operand:VI2 1 "register_operand" "v")
1795 (match_operand:VI2 2 "register_operand" "v")))]
1797 "vsl<VI_char> %0,%1,%2"
1798 [(set_attr "type" "vecsimple")])
1800 (define_insn "*altivec_vsr<VI_char>"
1801 [(set (match_operand:VI2 0 "register_operand" "=v")
1802 (lshiftrt:VI2 (match_operand:VI2 1 "register_operand" "v")
1803 (match_operand:VI2 2 "register_operand" "v")))]
1805 "vsr<VI_char> %0,%1,%2"
1806 [(set_attr "type" "vecsimple")])
1808 (define_insn "*altivec_vsra<VI_char>"
1809 [(set (match_operand:VI2 0 "register_operand" "=v")
1810 (ashiftrt:VI2 (match_operand:VI2 1 "register_operand" "v")
1811 (match_operand:VI2 2 "register_operand" "v")))]
1813 "vsra<VI_char> %0,%1,%2"
1814 [(set_attr "type" "vecsimple")])
1816 (define_insn "altivec_vsr"
1817 [(set (match_operand:V4SI 0 "register_operand" "=v")
1818 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1819 (match_operand:V4SI 2 "register_operand" "v")]
1823 [(set_attr "type" "vecperm")])
1825 (define_insn "altivec_vsro"
1826 [(set (match_operand:V4SI 0 "register_operand" "=v")
1827 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1828 (match_operand:V4SI 2 "register_operand" "v")]
1832 [(set_attr "type" "vecperm")])
1834 (define_insn "altivec_vsum4ubs"
1835 [(set (match_operand:V4SI 0 "register_operand" "=v")
1836 (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
1837 (match_operand:V4SI 2 "register_operand" "v")]
1839 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1842 [(set_attr "type" "veccomplex")])
1844 (define_insn "altivec_vsum4s<VI_char>s"
1845 [(set (match_operand:V4SI 0 "register_operand" "=v")
1846 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
1847 (match_operand:V4SI 2 "register_operand" "v")]
1849 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1851 "vsum4s<VI_char>s %0,%1,%2"
1852 [(set_attr "type" "veccomplex")])
1854 (define_expand "altivec_vsum2sws"
1855 [(use (match_operand:V4SI 0 "register_operand"))
1856 (use (match_operand:V4SI 1 "register_operand"))
1857 (use (match_operand:V4SI 2 "register_operand"))]
1860 if (VECTOR_ELT_ORDER_BIG)
1861 emit_insn (gen_altivec_vsum2sws_direct (operands[0], operands[1],
1865 rtx tmp1 = gen_reg_rtx (V4SImode);
1866 rtx tmp2 = gen_reg_rtx (V4SImode);
1867 emit_insn (gen_altivec_vsldoi_v4si (tmp1, operands[2],
1868 operands[2], GEN_INT (12)));
1869 emit_insn (gen_altivec_vsum2sws_direct (tmp2, operands[1], tmp1));
1870 emit_insn (gen_altivec_vsldoi_v4si (operands[0], tmp2, tmp2,
1876 ; FIXME: This can probably be expressed without an UNSPEC.
1877 (define_insn "altivec_vsum2sws_direct"
1878 [(set (match_operand:V4SI 0 "register_operand" "=v")
1879 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1880 (match_operand:V4SI 2 "register_operand" "v")]
1882 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1885 [(set_attr "type" "veccomplex")])
1887 (define_expand "altivec_vsumsws"
1888 [(use (match_operand:V4SI 0 "register_operand"))
1889 (use (match_operand:V4SI 1 "register_operand"))
1890 (use (match_operand:V4SI 2 "register_operand"))]
1893 if (VECTOR_ELT_ORDER_BIG)
1894 emit_insn (gen_altivec_vsumsws_direct (operands[0], operands[1],
1898 rtx tmp1 = gen_reg_rtx (V4SImode);
1899 rtx tmp2 = gen_reg_rtx (V4SImode);
1900 emit_insn (gen_altivec_vspltw_direct (tmp1, operands[2], const0_rtx));
1901 emit_insn (gen_altivec_vsumsws_direct (tmp2, operands[1], tmp1));
1902 emit_insn (gen_altivec_vsldoi_v4si (operands[0], tmp2, tmp2,
1908 ; FIXME: This can probably be expressed without an UNSPEC.
1909 (define_insn "altivec_vsumsws_direct"
1910 [(set (match_operand:V4SI 0 "register_operand" "=v")
1911 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
1912 (match_operand:V4SI 2 "register_operand" "v")]
1913 UNSPEC_VSUMSWS_DIRECT))
1914 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
1917 [(set_attr "type" "veccomplex")])
1919 (define_expand "altivec_vspltb"
1920 [(use (match_operand:V16QI 0 "register_operand" ""))
1921 (use (match_operand:V16QI 1 "register_operand" ""))
1922 (use (match_operand:QI 2 "u5bit_cint_operand" ""))]
1928 /* Special handling for LE with -maltivec=be. We have to reflect
1929 the actual selected index for the splat in the RTL. */
1930 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1931 operands[2] = GEN_INT (15 - INTVAL (operands[2]));
1933 v = gen_rtvec (1, operands[2]);
1934 x = gen_rtx_VEC_SELECT (QImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
1935 x = gen_rtx_VEC_DUPLICATE (V16QImode, x);
1936 emit_insn (gen_rtx_SET (operands[0], x));
1940 (define_insn "*altivec_vspltb_internal"
1941 [(set (match_operand:V16QI 0 "register_operand" "=v")
1942 (vec_duplicate:V16QI
1943 (vec_select:QI (match_operand:V16QI 1 "register_operand" "v")
1945 [(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
1948 /* For true LE, this adjusts the selected index. For LE with
1949 -maltivec=be, this reverses what was done in the define_expand
1950 because the instruction already has big-endian bias. */
1951 if (!BYTES_BIG_ENDIAN)
1952 operands[2] = GEN_INT (15 - INTVAL (operands[2]));
1954 return "vspltb %0,%1,%2";
1956 [(set_attr "type" "vecperm")])
1958 (define_insn "altivec_vspltb_direct"
1959 [(set (match_operand:V16QI 0 "register_operand" "=v")
1960 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
1961 (match_operand:QI 2 "u5bit_cint_operand" "i")]
1962 UNSPEC_VSPLT_DIRECT))]
1965 [(set_attr "type" "vecperm")])
1967 (define_expand "altivec_vsplth"
1968 [(use (match_operand:V8HI 0 "register_operand" ""))
1969 (use (match_operand:V8HI 1 "register_operand" ""))
1970 (use (match_operand:QI 2 "u5bit_cint_operand" ""))]
1976 /* Special handling for LE with -maltivec=be. We have to reflect
1977 the actual selected index for the splat in the RTL. */
1978 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
1979 operands[2] = GEN_INT (7 - INTVAL (operands[2]));
1981 v = gen_rtvec (1, operands[2]);
1982 x = gen_rtx_VEC_SELECT (HImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
1983 x = gen_rtx_VEC_DUPLICATE (V8HImode, x);
1984 emit_insn (gen_rtx_SET (operands[0], x));
1988 (define_insn "*altivec_vsplth_internal"
1989 [(set (match_operand:V8HI 0 "register_operand" "=v")
1991 (vec_select:HI (match_operand:V8HI 1 "register_operand" "v")
1993 [(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
1996 /* For true LE, this adjusts the selected index. For LE with
1997 -maltivec=be, this reverses what was done in the define_expand
1998 because the instruction already has big-endian bias. */
1999 if (!BYTES_BIG_ENDIAN)
2000 operands[2] = GEN_INT (7 - INTVAL (operands[2]));
2002 return "vsplth %0,%1,%2";
2004 [(set_attr "type" "vecperm")])
2006 (define_insn "altivec_vsplth_direct"
2007 [(set (match_operand:V8HI 0 "register_operand" "=v")
2008 (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
2009 (match_operand:QI 2 "u5bit_cint_operand" "i")]
2010 UNSPEC_VSPLT_DIRECT))]
2013 [(set_attr "type" "vecperm")])
2015 (define_expand "altivec_vspltw"
2016 [(use (match_operand:V4SI 0 "register_operand" ""))
2017 (use (match_operand:V4SI 1 "register_operand" ""))
2018 (use (match_operand:QI 2 "u5bit_cint_operand" ""))]
2024 /* Special handling for LE with -maltivec=be. We have to reflect
2025 the actual selected index for the splat in the RTL. */
2026 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2027 operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2029 v = gen_rtvec (1, operands[2]);
2030 x = gen_rtx_VEC_SELECT (SImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2031 x = gen_rtx_VEC_DUPLICATE (V4SImode, x);
2032 emit_insn (gen_rtx_SET (operands[0], x));
2036 (define_insn "*altivec_vspltw_internal"
2037 [(set (match_operand:V4SI 0 "register_operand" "=v")
2039 (vec_select:SI (match_operand:V4SI 1 "register_operand" "v")
2041 [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
2044 /* For true LE, this adjusts the selected index. For LE with
2045 -maltivec=be, this reverses what was done in the define_expand
2046 because the instruction already has big-endian bias. */
2047 if (!BYTES_BIG_ENDIAN)
2048 operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2050 return "vspltw %0,%1,%2";
2052 [(set_attr "type" "vecperm")])
2054 (define_insn "altivec_vspltw_direct"
2055 [(set (match_operand:V4SI 0 "register_operand" "=v")
2056 (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
2057 (match_operand:QI 2 "u5bit_cint_operand" "i")]
2058 UNSPEC_VSPLT_DIRECT))]
2061 [(set_attr "type" "vecperm")])
2063 (define_expand "altivec_vspltsf"
2064 [(use (match_operand:V4SF 0 "register_operand" ""))
2065 (use (match_operand:V4SF 1 "register_operand" ""))
2066 (use (match_operand:QI 2 "u5bit_cint_operand" ""))]
2072 /* Special handling for LE with -maltivec=be. We have to reflect
2073 the actual selected index for the splat in the RTL. */
2074 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2075 operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2077 v = gen_rtvec (1, operands[2]);
2078 x = gen_rtx_VEC_SELECT (SFmode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
2079 x = gen_rtx_VEC_DUPLICATE (V4SFmode, x);
2080 emit_insn (gen_rtx_SET (operands[0], x));
2084 (define_insn "*altivec_vspltsf_internal"
2085 [(set (match_operand:V4SF 0 "register_operand" "=v")
2087 (vec_select:SF (match_operand:V4SF 1 "register_operand" "v")
2089 [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
2090 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2092 /* For true LE, this adjusts the selected index. For LE with
2093 -maltivec=be, this reverses what was done in the define_expand
2094 because the instruction already has big-endian bias. */
2095 if (!BYTES_BIG_ENDIAN)
2096 operands[2] = GEN_INT (3 - INTVAL (operands[2]));
2098 return "vspltw %0,%1,%2";
2100 [(set_attr "type" "vecperm")])
2102 (define_insn "altivec_vspltis<VI_char>"
2103 [(set (match_operand:VI 0 "register_operand" "=v")
2105 (match_operand:QI 1 "s5bit_cint_operand" "i")))]
2107 "vspltis<VI_char> %0,%1"
2108 [(set_attr "type" "vecperm")])
2110 (define_insn "*altivec_vrfiz"
2111 [(set (match_operand:V4SF 0 "register_operand" "=v")
2112 (fix:V4SF (match_operand:V4SF 1 "register_operand" "v")))]
2113 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2115 [(set_attr "type" "vecfloat")])
2117 (define_expand "altivec_vperm_<mode>"
2118 [(set (match_operand:VM 0 "register_operand" "")
2119 (unspec:VM [(match_operand:VM 1 "register_operand" "")
2120 (match_operand:VM 2 "register_operand" "")
2121 (match_operand:V16QI 3 "register_operand" "")]
2125 if (!VECTOR_ELT_ORDER_BIG)
2127 altivec_expand_vec_perm_le (operands);
2132 ;; Slightly prefer vperm, since the target does not overlap the source
2133 (define_insn "altivec_vperm_<mode>_direct"
2134 [(set (match_operand:VM 0 "register_operand" "=v,?wo")
2135 (unspec:VM [(match_operand:VM 1 "register_operand" "v,wo")
2136 (match_operand:VM 2 "register_operand" "v,0")
2137 (match_operand:V16QI 3 "register_operand" "v,wo")]
2143 [(set_attr "type" "vecperm")
2144 (set_attr "length" "4")])
2146 (define_insn "altivec_vperm_v8hiv16qi"
2147 [(set (match_operand:V16QI 0 "register_operand" "=v,?wo")
2148 (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v,wo")
2149 (match_operand:V8HI 2 "register_operand" "v,0")
2150 (match_operand:V16QI 3 "register_operand" "v,wo")]
2156 [(set_attr "type" "vecperm")
2157 (set_attr "length" "4")])
2159 (define_expand "altivec_vperm_<mode>_uns"
2160 [(set (match_operand:VM 0 "register_operand" "")
2161 (unspec:VM [(match_operand:VM 1 "register_operand" "")
2162 (match_operand:VM 2 "register_operand" "")
2163 (match_operand:V16QI 3 "register_operand" "")]
2167 if (!VECTOR_ELT_ORDER_BIG)
2169 altivec_expand_vec_perm_le (operands);
2174 (define_insn "*altivec_vperm_<mode>_uns_internal"
2175 [(set (match_operand:VM 0 "register_operand" "=v,?wo")
2176 (unspec:VM [(match_operand:VM 1 "register_operand" "v,wo")
2177 (match_operand:VM 2 "register_operand" "v,0")
2178 (match_operand:V16QI 3 "register_operand" "v,wo")]
2184 [(set_attr "type" "vecperm")
2185 (set_attr "length" "4")])
2187 (define_expand "vec_permv16qi"
2188 [(set (match_operand:V16QI 0 "register_operand" "")
2189 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "")
2190 (match_operand:V16QI 2 "register_operand" "")
2191 (match_operand:V16QI 3 "register_operand" "")]
2195 if (!BYTES_BIG_ENDIAN) {
2196 altivec_expand_vec_perm_le (operands);
2201 (define_expand "vec_perm_constv16qi"
2202 [(match_operand:V16QI 0 "register_operand" "")
2203 (match_operand:V16QI 1 "register_operand" "")
2204 (match_operand:V16QI 2 "register_operand" "")
2205 (match_operand:V16QI 3 "" "")]
2208 if (altivec_expand_vec_perm_const (operands))
2214 (define_insn "*altivec_vpermr_<mode>_internal"
2215 [(set (match_operand:VM 0 "register_operand" "=v,?wo")
2216 (unspec:VM [(match_operand:VM 1 "register_operand" "v,wo")
2217 (match_operand:VM 2 "register_operand" "v,0")
2218 (match_operand:V16QI 3 "register_operand" "v,wo")]
2223 xxpermr %x0,%x1,%x3"
2224 [(set_attr "type" "vecperm")
2225 (set_attr "length" "4")])
2227 (define_insn "altivec_vrfip" ; ceil
2228 [(set (match_operand:V4SF 0 "register_operand" "=v")
2229 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2233 [(set_attr "type" "vecfloat")])
2235 (define_insn "altivec_vrfin"
2236 [(set (match_operand:V4SF 0 "register_operand" "=v")
2237 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2241 [(set_attr "type" "vecfloat")])
2243 (define_insn "*altivec_vrfim" ; floor
2244 [(set (match_operand:V4SF 0 "register_operand" "=v")
2245 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2249 [(set_attr "type" "vecfloat")])
2251 (define_insn "altivec_vcfux"
2252 [(set (match_operand:V4SF 0 "register_operand" "=v")
2253 (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
2254 (match_operand:QI 2 "immediate_operand" "i")]
2258 [(set_attr "type" "vecfloat")])
2260 (define_insn "altivec_vcfsx"
2261 [(set (match_operand:V4SF 0 "register_operand" "=v")
2262 (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
2263 (match_operand:QI 2 "immediate_operand" "i")]
2267 [(set_attr "type" "vecfloat")])
2269 (define_insn "altivec_vctuxs"
2270 [(set (match_operand:V4SI 0 "register_operand" "=v")
2271 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
2272 (match_operand:QI 2 "immediate_operand" "i")]
2274 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2277 [(set_attr "type" "vecfloat")])
2279 (define_insn "altivec_vctsxs"
2280 [(set (match_operand:V4SI 0 "register_operand" "=v")
2281 (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
2282 (match_operand:QI 2 "immediate_operand" "i")]
2284 (set (reg:SI VSCR_REGNO) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
2287 [(set_attr "type" "vecfloat")])
2289 (define_insn "altivec_vlogefp"
2290 [(set (match_operand:V4SF 0 "register_operand" "=v")
2291 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2295 [(set_attr "type" "vecfloat")])
2297 (define_insn "altivec_vexptefp"
2298 [(set (match_operand:V4SF 0 "register_operand" "=v")
2299 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2303 [(set_attr "type" "vecfloat")])
2305 (define_insn "*altivec_vrsqrtefp"
2306 [(set (match_operand:V4SF 0 "register_operand" "=v")
2307 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2309 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2311 [(set_attr "type" "vecfloat")])
2313 (define_insn "altivec_vrefp"
2314 [(set (match_operand:V4SF 0 "register_operand" "=v")
2315 (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
2317 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2319 [(set_attr "type" "vecfloat")])
2321 (define_expand "altivec_copysign_v4sf3"
2322 [(use (match_operand:V4SF 0 "register_operand" ""))
2323 (use (match_operand:V4SF 1 "register_operand" ""))
2324 (use (match_operand:V4SF 2 "register_operand" ""))]
2325 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2328 rtx mask = gen_reg_rtx (V4SImode);
2329 rtvec v = rtvec_alloc (4);
2330 unsigned HOST_WIDE_INT mask_val = ((unsigned HOST_WIDE_INT)1) << 31;
2332 RTVEC_ELT (v, 0) = GEN_INT (mask_val);
2333 RTVEC_ELT (v, 1) = GEN_INT (mask_val);
2334 RTVEC_ELT (v, 2) = GEN_INT (mask_val);
2335 RTVEC_ELT (v, 3) = GEN_INT (mask_val);
2337 emit_insn (gen_vec_initv4sisi (mask, gen_rtx_PARALLEL (V4SImode, v)));
2338 emit_insn (gen_vector_select_v4sf (operands[0], operands[1], operands[2],
2339 gen_lowpart (V4SFmode, mask)));
2343 (define_insn "altivec_vsldoi_<mode>"
2344 [(set (match_operand:VM 0 "register_operand" "=v")
2345 (unspec:VM [(match_operand:VM 1 "register_operand" "v")
2346 (match_operand:VM 2 "register_operand" "v")
2347 (match_operand:QI 3 "immediate_operand" "i")]
2350 "vsldoi %0,%1,%2,%3"
2351 [(set_attr "type" "vecperm")])
2353 (define_insn "altivec_vupkhs<VU_char>"
2354 [(set (match_operand:VP 0 "register_operand" "=v")
2355 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2356 UNSPEC_VUNPACK_HI_SIGN))]
2359 if (VECTOR_ELT_ORDER_BIG)
2360 return "vupkhs<VU_char> %0,%1";
2362 return "vupkls<VU_char> %0,%1";
2364 [(set_attr "type" "vecperm")])
2366 (define_insn "*altivec_vupkhs<VU_char>_direct"
2367 [(set (match_operand:VP 0 "register_operand" "=v")
2368 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2369 UNSPEC_VUNPACK_HI_SIGN_DIRECT))]
2371 "vupkhs<VU_char> %0,%1"
2372 [(set_attr "type" "vecperm")])
2374 (define_insn "altivec_vupkls<VU_char>"
2375 [(set (match_operand:VP 0 "register_operand" "=v")
2376 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2377 UNSPEC_VUNPACK_LO_SIGN))]
2380 if (VECTOR_ELT_ORDER_BIG)
2381 return "vupkls<VU_char> %0,%1";
2383 return "vupkhs<VU_char> %0,%1";
2385 [(set_attr "type" "vecperm")])
2387 (define_insn "*altivec_vupkls<VU_char>_direct"
2388 [(set (match_operand:VP 0 "register_operand" "=v")
2389 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
2390 UNSPEC_VUNPACK_LO_SIGN_DIRECT))]
2392 "vupkls<VU_char> %0,%1"
2393 [(set_attr "type" "vecperm")])
2395 (define_insn "altivec_vupkhpx"
2396 [(set (match_operand:V4SI 0 "register_operand" "=v")
2397 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
2401 if (VECTOR_ELT_ORDER_BIG)
2402 return "vupkhpx %0,%1";
2404 return "vupklpx %0,%1";
2406 [(set_attr "type" "vecperm")])
2408 (define_insn "altivec_vupklpx"
2409 [(set (match_operand:V4SI 0 "register_operand" "=v")
2410 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
2414 if (VECTOR_ELT_ORDER_BIG)
2415 return "vupklpx %0,%1";
2417 return "vupkhpx %0,%1";
2419 [(set_attr "type" "vecperm")])
2421 ;; Compare vectors producing a vector result and a predicate, setting CR6 to
2422 ;; indicate a combined status
2423 (define_insn "*altivec_vcmpequ<VI_char>_p"
2424 [(set (reg:CC CR6_REGNO)
2425 (unspec:CC [(eq:CC (match_operand:VI2 1 "register_operand" "v")
2426 (match_operand:VI2 2 "register_operand" "v"))]
2428 (set (match_operand:VI2 0 "register_operand" "=v")
2429 (eq:VI2 (match_dup 1)
2432 "vcmpequ<VI_char>. %0,%1,%2"
2433 [(set_attr "type" "veccmpfx")])
2435 (define_insn "*altivec_vcmpgts<VI_char>_p"
2436 [(set (reg:CC CR6_REGNO)
2437 (unspec:CC [(gt:CC (match_operand:VI2 1 "register_operand" "v")
2438 (match_operand:VI2 2 "register_operand" "v"))]
2440 (set (match_operand:VI2 0 "register_operand" "=v")
2441 (gt:VI2 (match_dup 1)
2444 "vcmpgts<VI_char>. %0,%1,%2"
2445 [(set_attr "type" "veccmpfx")])
2447 (define_insn "*altivec_vcmpgtu<VI_char>_p"
2448 [(set (reg:CC CR6_REGNO)
2449 (unspec:CC [(gtu:CC (match_operand:VI2 1 "register_operand" "v")
2450 (match_operand:VI2 2 "register_operand" "v"))]
2452 (set (match_operand:VI2 0 "register_operand" "=v")
2453 (gtu:VI2 (match_dup 1)
2456 "vcmpgtu<VI_char>. %0,%1,%2"
2457 [(set_attr "type" "veccmpfx")])
2459 (define_insn "*altivec_vcmpeqfp_p"
2460 [(set (reg:CC CR6_REGNO)
2461 (unspec:CC [(eq:CC (match_operand:V4SF 1 "register_operand" "v")
2462 (match_operand:V4SF 2 "register_operand" "v"))]
2464 (set (match_operand:V4SF 0 "register_operand" "=v")
2465 (eq:V4SF (match_dup 1)
2467 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2468 "vcmpeqfp. %0,%1,%2"
2469 [(set_attr "type" "veccmp")])
2471 (define_insn "*altivec_vcmpgtfp_p"
2472 [(set (reg:CC CR6_REGNO)
2473 (unspec:CC [(gt:CC (match_operand:V4SF 1 "register_operand" "v")
2474 (match_operand:V4SF 2 "register_operand" "v"))]
2476 (set (match_operand:V4SF 0 "register_operand" "=v")
2477 (gt:V4SF (match_dup 1)
2479 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2480 "vcmpgtfp. %0,%1,%2"
2481 [(set_attr "type" "veccmp")])
2483 (define_insn "*altivec_vcmpgefp_p"
2484 [(set (reg:CC CR6_REGNO)
2485 (unspec:CC [(ge:CC (match_operand:V4SF 1 "register_operand" "v")
2486 (match_operand:V4SF 2 "register_operand" "v"))]
2488 (set (match_operand:V4SF 0 "register_operand" "=v")
2489 (ge:V4SF (match_dup 1)
2491 "VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
2492 "vcmpgefp. %0,%1,%2"
2493 [(set_attr "type" "veccmp")])
2495 (define_insn "altivec_vcmpbfp_p"
2496 [(set (reg:CC CR6_REGNO)
2497 (unspec:CC [(match_operand:V4SF 1 "register_operand" "v")
2498 (match_operand:V4SF 2 "register_operand" "v")]
2500 (set (match_operand:V4SF 0 "register_operand" "=v")
2501 (unspec:V4SF [(match_dup 1)
2504 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)"
2506 [(set_attr "type" "veccmp")])
2508 (define_insn "altivec_mtvscr"
2509 [(set (reg:SI VSCR_REGNO)
2511 [(match_operand:V4SI 0 "register_operand" "v")] UNSPECV_MTVSCR))]
2514 [(set_attr "type" "vecsimple")])
2516 (define_insn "altivec_mfvscr"
2517 [(set (match_operand:V8HI 0 "register_operand" "=v")
2518 (unspec_volatile:V8HI [(reg:SI VSCR_REGNO)] UNSPECV_MFVSCR))]
2521 [(set_attr "type" "vecsimple")])
2523 (define_insn "altivec_dssall"
2524 [(unspec_volatile [(const_int 0)] UNSPECV_DSSALL)]
2527 [(set_attr "type" "vecsimple")])
2529 (define_insn "altivec_dss"
2530 [(unspec_volatile [(match_operand:QI 0 "immediate_operand" "i")]
2534 [(set_attr "type" "vecsimple")])
2536 (define_insn "altivec_dst"
2537 [(unspec [(match_operand 0 "register_operand" "b")
2538 (match_operand:SI 1 "register_operand" "r")
2539 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DST)]
2540 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2542 [(set_attr "type" "vecsimple")])
2544 (define_insn "altivec_dstt"
2545 [(unspec [(match_operand 0 "register_operand" "b")
2546 (match_operand:SI 1 "register_operand" "r")
2547 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTT)]
2548 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2550 [(set_attr "type" "vecsimple")])
2552 (define_insn "altivec_dstst"
2553 [(unspec [(match_operand 0 "register_operand" "b")
2554 (match_operand:SI 1 "register_operand" "r")
2555 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTST)]
2556 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2558 [(set_attr "type" "vecsimple")])
2560 (define_insn "altivec_dststt"
2561 [(unspec [(match_operand 0 "register_operand" "b")
2562 (match_operand:SI 1 "register_operand" "r")
2563 (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTSTT)]
2564 "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
2566 [(set_attr "type" "vecsimple")])
2568 (define_expand "altivec_lvsl"
2569 [(use (match_operand:V16QI 0 "register_operand" ""))
2570 (use (match_operand:V16QI 1 "memory_operand" ""))]
2573 if (VECTOR_ELT_ORDER_BIG)
2574 emit_insn (gen_altivec_lvsl_direct (operands[0], operands[1]));
2577 rtx mask, constv, vperm;
2578 mask = gen_reg_rtx (V16QImode);
2579 emit_insn (gen_altivec_lvsl_direct (mask, operands[1]));
2580 constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx);
2581 constv = force_reg (V16QImode, constv);
2582 vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv),
2584 emit_insn (gen_rtx_SET (operands[0], vperm));
2589 (define_insn "altivec_lvsl_reg"
2590 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
2592 [(match_operand:DI 1 "gpc_reg_operand" "b")]
2596 [(set_attr "type" "vecload")])
2598 (define_insn "altivec_lvsl_direct"
2599 [(set (match_operand:V16QI 0 "register_operand" "=v")
2600 (unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "Z")]
2604 [(set_attr "type" "vecload")])
2606 (define_expand "altivec_lvsr"
2607 [(use (match_operand:V16QI 0 "altivec_register_operand"))
2608 (use (match_operand:V16QI 1 "memory_operand"))]
2611 if (VECTOR_ELT_ORDER_BIG)
2612 emit_insn (gen_altivec_lvsr_direct (operands[0], operands[1]));
2615 rtx mask, constv, vperm;
2616 mask = gen_reg_rtx (V16QImode);
2617 emit_insn (gen_altivec_lvsr_direct (mask, operands[1]));
2618 constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx);
2619 constv = force_reg (V16QImode, constv);
2620 vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv),
2622 emit_insn (gen_rtx_SET (operands[0], vperm));
2627 (define_insn "altivec_lvsr_reg"
2628 [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
2630 [(match_operand:DI 1 "gpc_reg_operand" "b")]
2634 [(set_attr "type" "vecload")])
2636 (define_insn "altivec_lvsr_direct"
2637 [(set (match_operand:V16QI 0 "register_operand" "=v")
2638 (unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "Z")]
2642 [(set_attr "type" "vecload")])
2644 (define_expand "build_vector_mask_for_load"
2645 [(set (match_operand:V16QI 0 "register_operand" "")
2646 (unspec:V16QI [(match_operand 1 "memory_operand" "")] UNSPEC_LVSR))]
2653 gcc_assert (GET_CODE (operands[1]) == MEM);
2655 addr = XEXP (operands[1], 0);
2656 temp = gen_reg_rtx (GET_MODE (addr));
2657 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (GET_MODE (addr), addr)));
2658 emit_insn (gen_altivec_lvsr (operands[0],
2659 replace_equiv_address (operands[1], temp)));
2663 ;; Parallel some of the LVE* and STV*'s with unspecs because some have
2664 ;; identical rtl but different instructions-- and gcc gets confused.
2666 (define_expand "altivec_lve<VI_char>x"
2668 [(set (match_operand:VI 0 "register_operand" "=v")
2669 (match_operand:VI 1 "memory_operand" "Z"))
2670 (unspec [(const_int 0)] UNSPEC_LVE)])]
2673 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2675 altivec_expand_lvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_LVE);
2680 (define_insn "*altivec_lve<VI_char>x_internal"
2682 [(set (match_operand:VI 0 "register_operand" "=v")
2683 (match_operand:VI 1 "memory_operand" "Z"))
2684 (unspec [(const_int 0)] UNSPEC_LVE)])]
2686 "lve<VI_char>x %0,%y1"
2687 [(set_attr "type" "vecload")])
2689 (define_insn "*altivec_lvesfx"
2691 [(set (match_operand:V4SF 0 "register_operand" "=v")
2692 (match_operand:V4SF 1 "memory_operand" "Z"))
2693 (unspec [(const_int 0)] UNSPEC_LVE)])]
2696 [(set_attr "type" "vecload")])
2698 (define_expand "altivec_lvxl_<mode>"
2700 [(set (match_operand:VM2 0 "register_operand" "=v")
2701 (match_operand:VM2 1 "memory_operand" "Z"))
2702 (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
2705 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2707 altivec_expand_lvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_SET_VSCR);
2712 (define_insn "*altivec_lvxl_<mode>_internal"
2714 [(set (match_operand:VM2 0 "register_operand" "=v")
2715 (match_operand:VM2 1 "memory_operand" "Z"))
2716 (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
2719 [(set_attr "type" "vecload")])
2721 ; This version of lvx is used only in cases where we need to force an lvx
2722 ; over any other load, and we don't care about losing CSE opportunities.
2723 ; Its primary use is for prologue register saves.
2724 (define_insn "altivec_lvx_<mode>_internal"
2726 [(set (match_operand:VM2 0 "register_operand" "=v")
2727 (match_operand:VM2 1 "memory_operand" "Z"))
2728 (unspec [(const_int 0)] UNSPEC_LVX)])]
2731 [(set_attr "type" "vecload")])
2733 ; The next two patterns embody what lvx should usually look like.
2734 (define_insn "altivec_lvx_<mode>_2op"
2735 [(set (match_operand:VM2 0 "register_operand" "=v")
2736 (mem:VM2 (and:DI (plus:DI (match_operand:DI 1 "register_operand" "b")
2737 (match_operand:DI 2 "register_operand" "r"))
2739 "TARGET_ALTIVEC && TARGET_64BIT"
2741 [(set_attr "type" "vecload")])
2743 (define_insn "altivec_lvx_<mode>_1op"
2744 [(set (match_operand:VM2 0 "register_operand" "=v")
2745 (mem:VM2 (and:DI (match_operand:DI 1 "register_operand" "r")
2747 "TARGET_ALTIVEC && TARGET_64BIT"
2749 [(set_attr "type" "vecload")])
2751 ; 32-bit versions of the above.
2752 (define_insn "altivec_lvx_<mode>_2op_si"
2753 [(set (match_operand:VM2 0 "register_operand" "=v")
2754 (mem:VM2 (and:SI (plus:SI (match_operand:SI 1 "register_operand" "b")
2755 (match_operand:SI 2 "register_operand" "r"))
2757 "TARGET_ALTIVEC && TARGET_32BIT"
2759 [(set_attr "type" "vecload")])
2761 (define_insn "altivec_lvx_<mode>_1op_si"
2762 [(set (match_operand:VM2 0 "register_operand" "=v")
2763 (mem:VM2 (and:SI (match_operand:SI 1 "register_operand" "r")
2765 "TARGET_ALTIVEC && TARGET_32BIT"
2767 [(set_attr "type" "vecload")])
2769 ; This version of stvx is used only in cases where we need to force an stvx
2770 ; over any other store, and we don't care about losing CSE opportunities.
2771 ; Its primary use is for epilogue register restores.
2772 (define_insn "altivec_stvx_<mode>_internal"
2774 [(set (match_operand:VM2 0 "memory_operand" "=Z")
2775 (match_operand:VM2 1 "register_operand" "v"))
2776 (unspec [(const_int 0)] UNSPEC_STVX)])]
2779 [(set_attr "type" "vecstore")])
2781 ; The next two patterns embody what stvx should usually look like.
2782 (define_insn "altivec_stvx_<mode>_2op"
2783 [(set (mem:VM2 (and:DI (plus:DI (match_operand:DI 1 "register_operand" "b")
2784 (match_operand:DI 2 "register_operand" "r"))
2786 (match_operand:VM2 0 "register_operand" "v"))]
2787 "TARGET_ALTIVEC && TARGET_64BIT"
2789 [(set_attr "type" "vecstore")])
2791 (define_insn "altivec_stvx_<mode>_1op"
2792 [(set (mem:VM2 (and:DI (match_operand:DI 1 "register_operand" "r")
2794 (match_operand:VM2 0 "register_operand" "v"))]
2795 "TARGET_ALTIVEC && TARGET_64BIT"
2797 [(set_attr "type" "vecstore")])
2799 ; 32-bit versions of the above.
2800 (define_insn "altivec_stvx_<mode>_2op_si"
2801 [(set (mem:VM2 (and:SI (plus:SI (match_operand:SI 1 "register_operand" "b")
2802 (match_operand:SI 2 "register_operand" "r"))
2804 (match_operand:VM2 0 "register_operand" "v"))]
2805 "TARGET_ALTIVEC && TARGET_32BIT"
2807 [(set_attr "type" "vecstore")])
2809 (define_insn "altivec_stvx_<mode>_1op_si"
2810 [(set (mem:VM2 (and:SI (match_operand:SI 1 "register_operand" "r")
2812 (match_operand:VM2 0 "register_operand" "v"))]
2813 "TARGET_ALTIVEC && TARGET_32BIT"
2815 [(set_attr "type" "vecstore")])
2817 (define_expand "altivec_stvxl_<mode>"
2819 [(set (match_operand:VM2 0 "memory_operand" "=Z")
2820 (match_operand:VM2 1 "register_operand" "v"))
2821 (unspec [(const_int 0)] UNSPEC_STVXL)])]
2824 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2826 altivec_expand_stvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_STVXL);
2831 (define_insn "*altivec_stvxl_<mode>_internal"
2833 [(set (match_operand:VM2 0 "memory_operand" "=Z")
2834 (match_operand:VM2 1 "register_operand" "v"))
2835 (unspec [(const_int 0)] UNSPEC_STVXL)])]
2838 [(set_attr "type" "vecstore")])
2840 (define_expand "altivec_stve<VI_char>x"
2841 [(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z")
2842 (unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))]
2845 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
2847 altivec_expand_stvex_be (operands[0], operands[1], <MODE>mode, UNSPEC_STVE);
2852 (define_insn "*altivec_stve<VI_char>x_internal"
2853 [(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z")
2854 (unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))]
2856 "stve<VI_char>x %1,%y0"
2857 [(set_attr "type" "vecstore")])
2859 (define_insn "*altivec_stvesfx"
2860 [(set (match_operand:SF 0 "memory_operand" "=Z")
2861 (unspec:SF [(match_operand:V4SF 1 "register_operand" "v")] UNSPEC_STVE))]
2864 [(set_attr "type" "vecstore")])
2867 ;; signed int/float to double convert words 0 and 2
2868 (define_expand "doublee<mode>2"
2869 [(set (match_operand:V2DF 0 "register_operand" "=v")
2870 (match_operand:VSX_W 1 "register_operand" "v"))]
2873 machine_mode op_mode = GET_MODE (operands[1]);
2875 if (VECTOR_ELT_ORDER_BIG)
2877 /* Big endian word numbering for words in operand is 0 1 2 3.
2878 Input words 0 and 2 are where they need to be. */
2879 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], operands[1]));
2883 /* Little endian word numbering for operand is 3 2 1 0.
2884 take (operand[1] operand[1]) and shift left one word
2885 3 2 1 0 3 2 1 0 => 2 1 0 3
2886 Input words 2 and 0 are now where they need to be for the
2889 rtx rtx_val = GEN_INT (1);
2891 rtx_tmp = gen_reg_rtx (op_mode);
2892 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
2893 operands[1], rtx_val));
2894 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
2898 [(set_attr "type" "veccomplex")])
2900 ;; Generate unsdoublee
2901 ;; unsigned int to double convert words 0 and 2
2902 (define_expand "unsdoubleev4si2"
2903 [(set (match_operand:V2DF 0 "register_operand" "=v")
2904 (match_operand:V4SI 1 "register_operand" "v"))]
2907 if (VECTOR_ELT_ORDER_BIG)
2909 /* Big endian word numbering for words in operand is 0 1 2 3.
2910 Input words 0 and 2 are where they need to be. */
2911 emit_insn (gen_vsx_xvcvuxwdp (operands[0], operands[1]));
2915 /* Little endian word numbering for operand is 3 2 1 0.
2916 take (operand[1] operand[1]) and shift left one word
2917 3 2 1 0 3 2 1 0 => 2 1 0 3
2918 Input words 2 and 0 are now where they need to be for the
2921 rtx rtx_val = GEN_INT (1);
2923 rtx_tmp = gen_reg_rtx (V4SImode);
2924 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
2925 operands[1], rtx_val));
2926 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
2930 [(set_attr "type" "veccomplex")])
2932 ;; Generate doubleov
2933 ;; signed int/float to double convert words 1 and 3
2934 (define_expand "doubleo<mode>2"
2935 [(set (match_operand:V2DF 0 "register_operand" "=v")
2936 (match_operand:VSX_W 1 "register_operand" "v"))]
2939 machine_mode op_mode = GET_MODE (operands[1]);
2941 if (VECTOR_ELT_ORDER_BIG)
2943 /* Big endian word numbering for words in operand is 0 1 2 3.
2944 take (operand[1] operand[1]) and shift left one word
2945 0 1 2 3 0 1 2 3 => 1 2 3 0
2946 Input words 1 and 3 are now where they need to be for the
2949 rtx rtx_val = GEN_INT (1);
2951 rtx_tmp = gen_reg_rtx (op_mode);
2952 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
2953 operands[1], rtx_val));
2954 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
2958 /* Little endian word numbering for operand is 3 2 1 0.
2959 Input words 3 and 1 are where they need to be. */
2960 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], operands[1]));
2964 [(set_attr "type" "veccomplex")])
2966 ;; Generate unsdoubleov
2967 ;; unsigned int to double convert words 1 and 3
2968 (define_expand "unsdoubleov4si2"
2969 [(set (match_operand:V2DF 0 "register_operand" "=v")
2970 (match_operand:V4SI 1 "register_operand" "v"))]
2973 if (VECTOR_ELT_ORDER_BIG)
2975 /* Big endian word numbering for words in operand is 0 1 2 3.
2976 take (operand[1] operand[1]) and shift left one word
2977 0 1 2 3 0 1 2 3 => 1 2 3 0
2978 Input words 1 and 3 are now where they need to be for the
2981 rtx rtx_val = GEN_INT (1);
2983 rtx_tmp = gen_reg_rtx (V4SImode);
2984 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
2985 operands[1], rtx_val));
2986 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
2990 /* Want to convert the words 1 and 3.
2991 Little endian word numbering for operand is 3 2 1 0.
2992 Input words 3 and 1 are where they need to be. */
2993 emit_insn (gen_vsx_xvcvuxwdp (operands[0], operands[1]));
2997 [(set_attr "type" "veccomplex")])
2999 ;; Generate doublehv
3000 ;; signed int/float to double convert words 0 and 1
3001 (define_expand "doubleh<mode>2"
3002 [(set (match_operand:V2DF 0 "register_operand" "=v")
3003 (match_operand:VSX_W 1 "register_operand" "v"))]
3009 machine_mode op_mode = GET_MODE (operands[1]);
3010 rtx_tmp = gen_reg_rtx (op_mode);
3012 if (VECTOR_ELT_ORDER_BIG)
3014 /* Big endian word numbering for words in operand is 0 1 2 3.
3015 Shift operand left one word, rtx_tmp word order is now 1 2 3 0.
3016 take (rts_tmp operand[1]) and shift left three words
3017 1 2 3 0 0 1 2 3 => 0 0 1 2
3018 Input words 0 and 1 are now where they need to be for the
3020 rtx_val = GEN_INT (1);
3021 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3022 operands[1], rtx_val));
3024 rtx_val = GEN_INT (3);
3025 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, rtx_tmp,
3026 operands[1], rtx_val));
3027 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3031 /* Little endian word numbering for operand is 3 2 1 0.
3032 Shift operand left three words, rtx_tmp word order is now 0 3 2 1.
3033 take (operand[1] rts_tmp) and shift left two words
3034 3 2 1 0 0 3 2 1 => 1 0 0 3
3035 Input words 0 and 1 are now where they need to be for the
3037 rtx_val = GEN_INT (3);
3038 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3039 operands[1], rtx_val));
3041 rtx_val = GEN_INT (2);
3042 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3044 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3048 [(set_attr "type" "veccomplex")])
3050 ;; Generate unsdoublehv
3051 ;; unsigned int to double convert words 0 and 1
3052 (define_expand "unsdoublehv4si2"
3053 [(set (match_operand:V2DF 0 "register_operand" "=v")
3054 (match_operand:V4SI 1 "register_operand" "v"))]
3057 rtx rtx_tmp = gen_reg_rtx (V4SImode);
3058 rtx rtx_val = GEN_INT (12);
3060 if (VECTOR_ELT_ORDER_BIG)
3062 /* Big endian word numbering for words in operand is 0 1 2 3.
3063 Shift operand left one word, rtx_tmp word order is now 1 2 3 0.
3064 take (rts_tmp operand[1]) and shift left three words
3065 1 2 3 0 0 1 2 3 => 0 0 1 2
3066 Input words 0 and 1 are now where they need to be for the
3068 rtx_val = GEN_INT (1);
3069 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3070 operands[1], rtx_val));
3072 rtx_val = GEN_INT (3);
3073 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, rtx_tmp,
3074 operands[1], rtx_val));
3075 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3079 /* Little endian word numbering for operand is 3 2 1 0.
3080 Shift operand left three words, rtx_tmp word order is now 0 3 2 1.
3081 take (operand[1] rts_tmp) and shift left two words
3082 3 2 1 0 0 3 2 1 => 1 0 0 3
3083 Input words 1 and 0 are now where they need to be for the
3085 rtx_val = GEN_INT (3);
3087 rtx_tmp = gen_reg_rtx (V4SImode);
3088 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3089 operands[1], rtx_val));
3091 rtx_val = GEN_INT (2);
3092 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3094 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3098 [(set_attr "type" "veccomplex")])
3100 ;; Generate doublelv
3101 ;; signed int/float to double convert words 2 and 3
3102 (define_expand "doublel<mode>2"
3103 [(set (match_operand:V2DF 0 "register_operand" "=v")
3104 (match_operand:VSX_W 1 "register_operand" "v"))]
3108 rtx rtx_val = GEN_INT (3);
3110 machine_mode op_mode = GET_MODE (operands[1]);
3111 rtx_tmp = gen_reg_rtx (op_mode);
3113 if (VECTOR_ELT_ORDER_BIG)
3115 /* Big endian word numbering for operand is 0 1 2 3.
3116 Shift operand left three words, rtx_tmp word order is now 3 0 1 2.
3117 take (operand[1] rtx_tmp) and shift left two words
3118 0 1 2 3 3 0 1 2 => 2 3 3 0
3119 now use convert instruction to convert word 2 and 3 in the
3121 rtx_val = GEN_INT (3);
3122 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3123 operands[1], rtx_val));
3125 rtx_val = GEN_INT (2);
3126 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3128 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3132 /* Little endian word numbering for operand is 3 2 1 0.
3133 Shift operand left one word, rtx_tmp word order is now 2 1 0 3.
3134 take (rtx_tmp operand[1]) and shift left three words
3135 2 1 0 3 3 2 1 0 => 3 3 2 1
3136 now use convert instruction to convert word 3 and 2 in the
3138 rtx_val = GEN_INT (1);
3139 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, operands[1],
3140 operands[1], rtx_val));
3142 rtx_val = GEN_INT (3);
3143 emit_insn (gen_vsx_xxsldwi_<mode> (rtx_tmp, rtx_tmp,
3144 operands[1], rtx_val));
3145 emit_insn (gen_vsx_xvcv<VS_sxwsp>dp (operands[0], rtx_tmp));
3149 [(set_attr "type" "veccomplex")])
3151 ;; Generate unsdoublelv
3152 ;; unsigned int to double convert convert 2 and 3
3153 (define_expand "unsdoublelv4si2"
3154 [(set (match_operand:V2DF 0 "register_operand" "=v")
3155 (match_operand:V4SI 1 "register_operand" "v"))]
3158 rtx rtx_tmp = gen_reg_rtx (V4SImode);
3159 rtx rtx_val = GEN_INT (12);
3161 if (VECTOR_ELT_ORDER_BIG)
3163 /* Big endian word numbering for operand is 0 1 2 3.
3164 Shift operand left three words, rtx_tmp word order is now 3 0 1 2.
3165 take (operand[1] rtx_tmp) and shift left two words
3166 0 1 2 3 3 0 1 2 => 2 3 3 0
3167 now use convert instruction to convert word 2 and 3 in the
3169 rtx_val = GEN_INT (3);
3170 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3171 operands[1], rtx_val));
3173 rtx_val = GEN_INT (2);
3174 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, operands[1],
3176 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3180 /* Little endian word numbering for operand is 3 2 1 0.
3181 Shift operand left one word, rtx_tmp word order is now 2 1 0 3.
3182 take (rtx_tmp operand[1]) and shift left three words
3183 2 1 0 3 3 2 1 0 => 3 3 2 1
3184 now use convert instruction to convert word 3 and 2 in the
3186 rtx_val = GEN_INT (1);
3187 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp,
3188 operands[1], operands[1], rtx_val));
3190 rtx_val = GEN_INT (3);
3191 emit_insn (gen_vsx_xxsldwi_v4si (rtx_tmp, rtx_tmp,
3192 operands[1], rtx_val));
3193 emit_insn (gen_vsx_xvcvuxwdp (operands[0], rtx_tmp));
3197 [(set_attr "type" "veccomplex")])
3199 ;; Generate two vector F32 converted to packed vector I16 vector
3200 (define_expand "convert_4f32_8i16"
3201 [(set (match_operand:V8HI 0 "register_operand" "=v")
3202 (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "v")
3203 (match_operand:V4SF 2 "register_operand" "v")]
3204 UNSPEC_CONVERT_4F32_8I16))]
3207 rtx rtx_tmp_hi = gen_reg_rtx (V4SImode);
3208 rtx rtx_tmp_lo = gen_reg_rtx (V4SImode);
3210 emit_insn (gen_altivec_vctuxs (rtx_tmp_hi, operands[1], const0_rtx));
3211 emit_insn (gen_altivec_vctuxs (rtx_tmp_lo, operands[2], const0_rtx));
3212 emit_insn (gen_altivec_vpkswss (operands[0], rtx_tmp_hi, rtx_tmp_lo));
3217 ;; xxlxor/vxor SCRATCH0,SCRATCH0,SCRATCH0
3218 ;; vsubu?m SCRATCH2,SCRATCH1,%1
3219 ;; vmaxs? %0,%1,SCRATCH2"
3220 (define_expand "abs<mode>2"
3221 [(set (match_dup 2) (match_dup 3))
3223 (minus:VI2 (match_dup 2)
3224 (match_operand:VI2 1 "register_operand" "v")))
3225 (set (match_operand:VI2 0 "register_operand" "=v")
3226 (smax:VI2 (match_dup 1) (match_dup 4)))]
3229 operands[2] = gen_reg_rtx (<MODE>mode);
3230 operands[3] = CONST0_RTX (<MODE>mode);
3231 operands[4] = gen_reg_rtx (<MODE>mode);
3235 ;; vspltisw SCRATCH1,0
3236 ;; vsubu?m SCRATCH2,SCRATCH1,%1
3237 ;; vmins? %0,%1,SCRATCH2"
3238 (define_expand "nabs<mode>2"
3239 [(set (match_dup 2) (match_dup 3))
3241 (minus:VI2 (match_dup 2)
3242 (match_operand:VI2 1 "register_operand" "v")))
3243 (set (match_operand:VI2 0 "register_operand" "=v")
3244 (smin:VI2 (match_dup 1) (match_dup 4)))]
3247 operands[2] = gen_reg_rtx (<MODE>mode);
3248 operands[3] = CONST0_RTX (<MODE>mode);
3249 operands[4] = gen_reg_rtx (<MODE>mode);
3253 ;; vspltisw SCRATCH1,-1
3254 ;; vslw SCRATCH2,SCRATCH1,SCRATCH1
3255 ;; vandc %0,%1,SCRATCH2
3256 (define_expand "altivec_absv4sf2"
3258 (vec_duplicate:V4SI (const_int -1)))
3260 (ashift:V4SI (match_dup 2) (match_dup 2)))
3261 (set (match_operand:V4SF 0 "register_operand" "=v")
3262 (and:V4SF (not:V4SF (subreg:V4SF (match_dup 3) 0))
3263 (match_operand:V4SF 1 "register_operand" "v")))]
3266 operands[2] = gen_reg_rtx (V4SImode);
3267 operands[3] = gen_reg_rtx (V4SImode);
3271 ;; vspltis? SCRATCH0,0
3272 ;; vsubs?s SCRATCH2,SCRATCH1,%1
3273 ;; vmaxs? %0,%1,SCRATCH2"
3274 (define_expand "altivec_abss_<mode>"
3275 [(set (match_dup 2) (vec_duplicate:VI (const_int 0)))
3276 (parallel [(set (match_dup 3)
3277 (unspec:VI [(match_dup 2)
3278 (match_operand:VI 1 "register_operand" "v")]
3280 (set (reg:SI VSCR_REGNO)
3281 (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))])
3282 (set (match_operand:VI 0 "register_operand" "=v")
3283 (smax:VI (match_dup 1) (match_dup 3)))]
3286 operands[2] = gen_reg_rtx (GET_MODE (operands[0]));
3287 operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
3290 (define_expand "reduc_plus_scal_<mode>"
3291 [(set (match_operand:<VI_scalar> 0 "register_operand" "=v")
3292 (unspec:VIshort [(match_operand:VIshort 1 "register_operand" "v")]
3293 UNSPEC_REDUC_PLUS))]
3296 rtx vzero = gen_reg_rtx (V4SImode);
3297 rtx vtmp1 = gen_reg_rtx (V4SImode);
3298 rtx vtmp2 = gen_reg_rtx (<MODE>mode);
3299 rtx dest = gen_lowpart (V4SImode, vtmp2);
3300 int elt = VECTOR_ELT_ORDER_BIG ? GET_MODE_NUNITS (<MODE>mode) - 1 : 0;
3302 emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
3303 emit_insn (gen_altivec_vsum4s<VI_char>s (vtmp1, operands[1], vzero));
3304 emit_insn (gen_altivec_vsumsws_direct (dest, vtmp1, vzero));
3305 rs6000_expand_vector_extract (operands[0], vtmp2, GEN_INT (elt));
3309 (define_insn "*p9_neg<mode>2"
3310 [(set (match_operand:VNEG 0 "altivec_register_operand" "=v")
3311 (neg:VNEG (match_operand:VNEG 1 "altivec_register_operand" "v")))]
3313 "vneg<VI_char> %0,%1"
3314 [(set_attr "type" "vecsimple")])
3316 (define_expand "neg<mode>2"
3317 [(set (match_operand:VI2 0 "register_operand" "")
3318 (neg:VI2 (match_operand:VI2 1 "register_operand" "")))]
3321 if (!TARGET_P9_VECTOR || (<MODE>mode != V4SImode && <MODE>mode != V2DImode))
3325 vzero = gen_reg_rtx (GET_MODE (operands[0]));
3326 emit_move_insn (vzero, CONST0_RTX (<MODE>mode));
3327 emit_insn (gen_sub<mode>3 (operands[0], vzero, operands[1]));
3332 (define_expand "udot_prod<mode>"
3333 [(set (match_operand:V4SI 0 "register_operand" "=v")
3334 (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
3335 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
3336 (match_operand:VIshort 2 "register_operand" "v")]
3341 emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], operands[2], operands[3]));
3345 (define_expand "sdot_prodv8hi"
3346 [(set (match_operand:V4SI 0 "register_operand" "=v")
3347 (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
3348 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3349 (match_operand:V8HI 2 "register_operand" "v")]
3354 emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], operands[2], operands[3]));
3358 (define_expand "widen_usum<mode>3"
3359 [(set (match_operand:V4SI 0 "register_operand" "=v")
3360 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3361 (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")]
3366 rtx vones = gen_reg_rtx (GET_MODE (operands[1]));
3368 emit_insn (gen_altivec_vspltis<VI_char> (vones, const1_rtx));
3369 emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], vones, operands[2]));
3373 (define_expand "widen_ssumv16qi3"
3374 [(set (match_operand:V4SI 0 "register_operand" "=v")
3375 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3376 (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")]
3381 rtx vones = gen_reg_rtx (V16QImode);
3383 emit_insn (gen_altivec_vspltisb (vones, const1_rtx));
3384 emit_insn (gen_altivec_vmsummbm (operands[0], operands[1], vones, operands[2]));
3388 (define_expand "widen_ssumv8hi3"
3389 [(set (match_operand:V4SI 0 "register_operand" "=v")
3390 (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
3391 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3396 rtx vones = gen_reg_rtx (V8HImode);
3398 emit_insn (gen_altivec_vspltish (vones, const1_rtx));
3399 emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], vones, operands[2]));
3403 (define_expand "vec_unpacks_hi_<VP_small_lc>"
3404 [(set (match_operand:VP 0 "register_operand" "=v")
3405 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3406 UNSPEC_VUNPACK_HI_SIGN_DIRECT))]
3410 (define_expand "vec_unpacks_lo_<VP_small_lc>"
3411 [(set (match_operand:VP 0 "register_operand" "=v")
3412 (unspec:VP [(match_operand:<VP_small> 1 "register_operand" "v")]
3413 UNSPEC_VUNPACK_LO_SIGN_DIRECT))]
3417 (define_insn "vperm_v8hiv4si"
3418 [(set (match_operand:V4SI 0 "register_operand" "=v,?wo")
3419 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v,wo")
3420 (match_operand:V4SI 2 "register_operand" "v,0")
3421 (match_operand:V16QI 3 "register_operand" "v,wo")]
3427 [(set_attr "type" "vecperm")
3428 (set_attr "length" "4")])
3430 (define_insn "vperm_v16qiv8hi"
3431 [(set (match_operand:V8HI 0 "register_operand" "=v,?wo")
3432 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v,wo")
3433 (match_operand:V8HI 2 "register_operand" "v,0")
3434 (match_operand:V16QI 3 "register_operand" "v,wo")]
3440 [(set_attr "type" "vecperm")
3441 (set_attr "length" "4")])
3444 (define_expand "vec_unpacku_hi_v16qi"
3445 [(set (match_operand:V8HI 0 "register_operand" "=v")
3446 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
3451 rtx vzero = gen_reg_rtx (V8HImode);
3452 rtx mask = gen_reg_rtx (V16QImode);
3453 rtvec v = rtvec_alloc (16);
3454 bool be = BYTES_BIG_ENDIAN;
3456 emit_insn (gen_altivec_vspltish (vzero, const0_rtx));
3458 RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, be ? 16 : 7);
3459 RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, be ? 0 : 16);
3460 RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, be ? 16 : 6);
3461 RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, be ? 1 : 16);
3462 RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, be ? 16 : 5);
3463 RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, be ? 2 : 16);
3464 RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, be ? 16 : 4);
3465 RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, be ? 3 : 16);
3466 RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, be ? 16 : 3);
3467 RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, be ? 4 : 16);
3468 RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ? 16 : 2);
3469 RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ? 5 : 16);
3470 RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 : 1);
3471 RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ? 6 : 16);
3472 RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ? 16 : 0);
3473 RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ? 7 : 16);
3475 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3476 emit_insn (gen_vperm_v16qiv8hi (operands[0], operands[1], vzero, mask));
3480 (define_expand "vec_unpacku_hi_v8hi"
3481 [(set (match_operand:V4SI 0 "register_operand" "=v")
3482 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3487 rtx vzero = gen_reg_rtx (V4SImode);
3488 rtx mask = gen_reg_rtx (V16QImode);
3489 rtvec v = rtvec_alloc (16);
3490 bool be = BYTES_BIG_ENDIAN;
3492 emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
3494 RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, be ? 16 : 7);
3495 RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, be ? 17 : 6);
3496 RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, be ? 0 : 17);
3497 RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, be ? 1 : 16);
3498 RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, be ? 16 : 5);
3499 RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, be ? 17 : 4);
3500 RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, be ? 2 : 17);
3501 RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, be ? 3 : 16);
3502 RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, be ? 16 : 3);
3503 RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, be ? 17 : 2);
3504 RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ? 4 : 17);
3505 RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ? 5 : 16);
3506 RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 : 1);
3507 RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ? 17 : 0);
3508 RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ? 6 : 17);
3509 RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ? 7 : 16);
3511 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3512 emit_insn (gen_vperm_v8hiv4si (operands[0], operands[1], vzero, mask));
3516 (define_expand "vec_unpacku_lo_v16qi"
3517 [(set (match_operand:V8HI 0 "register_operand" "=v")
3518 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
3523 rtx vzero = gen_reg_rtx (V8HImode);
3524 rtx mask = gen_reg_rtx (V16QImode);
3525 rtvec v = rtvec_alloc (16);
3526 bool be = BYTES_BIG_ENDIAN;
3528 emit_insn (gen_altivec_vspltish (vzero, const0_rtx));
3530 RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, be ? 16 : 15);
3531 RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, be ? 8 : 16);
3532 RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, be ? 16 : 14);
3533 RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, be ? 9 : 16);
3534 RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, be ? 16 : 13);
3535 RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, be ? 10 : 16);
3536 RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, be ? 16 : 12);
3537 RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, be ? 11 : 16);
3538 RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, be ? 16 : 11);
3539 RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, be ? 12 : 16);
3540 RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ? 16 : 10);
3541 RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ? 13 : 16);
3542 RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 : 9);
3543 RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ? 14 : 16);
3544 RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ? 16 : 8);
3545 RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ? 15 : 16);
3547 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3548 emit_insn (gen_vperm_v16qiv8hi (operands[0], operands[1], vzero, mask));
3552 (define_expand "vec_unpacku_lo_v8hi"
3553 [(set (match_operand:V4SI 0 "register_operand" "=v")
3554 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
3559 rtx vzero = gen_reg_rtx (V4SImode);
3560 rtx mask = gen_reg_rtx (V16QImode);
3561 rtvec v = rtvec_alloc (16);
3562 bool be = BYTES_BIG_ENDIAN;
3564 emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
3566 RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, be ? 16 : 15);
3567 RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, be ? 17 : 14);
3568 RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, be ? 8 : 17);
3569 RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, be ? 9 : 16);
3570 RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, be ? 16 : 13);
3571 RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, be ? 17 : 12);
3572 RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, be ? 10 : 17);
3573 RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, be ? 11 : 16);
3574 RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, be ? 16 : 11);
3575 RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, be ? 17 : 10);
3576 RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, be ? 12 : 17);
3577 RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, be ? 13 : 16);
3578 RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, be ? 16 : 9);
3579 RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, be ? 17 : 8);
3580 RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, be ? 14 : 17);
3581 RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, be ? 15 : 16);
3583 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3584 emit_insn (gen_vperm_v8hiv4si (operands[0], operands[1], vzero, mask));
3588 (define_expand "vec_widen_umult_hi_v16qi"
3589 [(set (match_operand:V8HI 0 "register_operand" "=v")
3590 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3591 (match_operand:V16QI 2 "register_operand" "v")]
3596 rtx ve = gen_reg_rtx (V8HImode);
3597 rtx vo = gen_reg_rtx (V8HImode);
3599 if (BYTES_BIG_ENDIAN)
3601 emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
3602 emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
3603 emit_insn (gen_altivec_vmrghh_direct (operands[0], ve, vo));
3607 emit_insn (gen_altivec_vmuloub (ve, operands[1], operands[2]));
3608 emit_insn (gen_altivec_vmuleub (vo, operands[1], operands[2]));
3609 emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve));
3614 (define_expand "vec_widen_umult_lo_v16qi"
3615 [(set (match_operand:V8HI 0 "register_operand" "=v")
3616 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3617 (match_operand:V16QI 2 "register_operand" "v")]
3622 rtx ve = gen_reg_rtx (V8HImode);
3623 rtx vo = gen_reg_rtx (V8HImode);
3625 if (BYTES_BIG_ENDIAN)
3627 emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
3628 emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
3629 emit_insn (gen_altivec_vmrglh_direct (operands[0], ve, vo));
3633 emit_insn (gen_altivec_vmuloub (ve, operands[1], operands[2]));
3634 emit_insn (gen_altivec_vmuleub (vo, operands[1], operands[2]));
3635 emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve));
3640 (define_expand "vec_widen_smult_hi_v16qi"
3641 [(set (match_operand:V8HI 0 "register_operand" "=v")
3642 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3643 (match_operand:V16QI 2 "register_operand" "v")]
3648 rtx ve = gen_reg_rtx (V8HImode);
3649 rtx vo = gen_reg_rtx (V8HImode);
3651 if (BYTES_BIG_ENDIAN)
3653 emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
3654 emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
3655 emit_insn (gen_altivec_vmrghh_direct (operands[0], ve, vo));
3659 emit_insn (gen_altivec_vmulosb (ve, operands[1], operands[2]));
3660 emit_insn (gen_altivec_vmulesb (vo, operands[1], operands[2]));
3661 emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve));
3666 (define_expand "vec_widen_smult_lo_v16qi"
3667 [(set (match_operand:V8HI 0 "register_operand" "=v")
3668 (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
3669 (match_operand:V16QI 2 "register_operand" "v")]
3674 rtx ve = gen_reg_rtx (V8HImode);
3675 rtx vo = gen_reg_rtx (V8HImode);
3677 if (BYTES_BIG_ENDIAN)
3679 emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
3680 emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
3681 emit_insn (gen_altivec_vmrglh_direct (operands[0], ve, vo));
3685 emit_insn (gen_altivec_vmulosb (ve, operands[1], operands[2]));
3686 emit_insn (gen_altivec_vmulesb (vo, operands[1], operands[2]));
3687 emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve));
3692 (define_expand "vec_widen_umult_hi_v8hi"
3693 [(set (match_operand:V4SI 0 "register_operand" "=v")
3694 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3695 (match_operand:V8HI 2 "register_operand" "v")]
3700 rtx ve = gen_reg_rtx (V4SImode);
3701 rtx vo = gen_reg_rtx (V4SImode);
3703 if (BYTES_BIG_ENDIAN)
3705 emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
3706 emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
3707 emit_insn (gen_altivec_vmrghw_direct (operands[0], ve, vo));
3711 emit_insn (gen_altivec_vmulouh (ve, operands[1], operands[2]));
3712 emit_insn (gen_altivec_vmuleuh (vo, operands[1], operands[2]));
3713 emit_insn (gen_altivec_vmrghw_direct (operands[0], vo, ve));
3718 (define_expand "vec_widen_umult_lo_v8hi"
3719 [(set (match_operand:V4SI 0 "register_operand" "=v")
3720 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3721 (match_operand:V8HI 2 "register_operand" "v")]
3726 rtx ve = gen_reg_rtx (V4SImode);
3727 rtx vo = gen_reg_rtx (V4SImode);
3729 if (BYTES_BIG_ENDIAN)
3731 emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
3732 emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
3733 emit_insn (gen_altivec_vmrglw_direct (operands[0], ve, vo));
3737 emit_insn (gen_altivec_vmulouh (ve, operands[1], operands[2]));
3738 emit_insn (gen_altivec_vmuleuh (vo, operands[1], operands[2]));
3739 emit_insn (gen_altivec_vmrglw_direct (operands[0], vo, ve));
3744 (define_expand "vec_widen_smult_hi_v8hi"
3745 [(set (match_operand:V4SI 0 "register_operand" "=v")
3746 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3747 (match_operand:V8HI 2 "register_operand" "v")]
3752 rtx ve = gen_reg_rtx (V4SImode);
3753 rtx vo = gen_reg_rtx (V4SImode);
3755 if (BYTES_BIG_ENDIAN)
3757 emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
3758 emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
3759 emit_insn (gen_altivec_vmrghw_direct (operands[0], ve, vo));
3763 emit_insn (gen_altivec_vmulosh (ve, operands[1], operands[2]));
3764 emit_insn (gen_altivec_vmulesh (vo, operands[1], operands[2]));
3765 emit_insn (gen_altivec_vmrghw_direct (operands[0], vo, ve));
3770 (define_expand "vec_widen_smult_lo_v8hi"
3771 [(set (match_operand:V4SI 0 "register_operand" "=v")
3772 (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
3773 (match_operand:V8HI 2 "register_operand" "v")]
3778 rtx ve = gen_reg_rtx (V4SImode);
3779 rtx vo = gen_reg_rtx (V4SImode);
3781 if (BYTES_BIG_ENDIAN)
3783 emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
3784 emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
3785 emit_insn (gen_altivec_vmrglw_direct (operands[0], ve, vo));
3789 emit_insn (gen_altivec_vmulosh (ve, operands[1], operands[2]));
3790 emit_insn (gen_altivec_vmulesh (vo, operands[1], operands[2]));
3791 emit_insn (gen_altivec_vmrglw_direct (operands[0], vo, ve));
3796 (define_expand "vec_pack_trunc_<mode>"
3797 [(set (match_operand:<VP_small> 0 "register_operand" "=v")
3798 (unspec:<VP_small> [(match_operand:VP 1 "register_operand" "v")
3799 (match_operand:VP 2 "register_operand" "v")]
3800 UNSPEC_VPACK_UNS_UNS_MOD))]
3804 (define_expand "mulv16qi3"
3805 [(set (match_operand:V16QI 0 "register_operand" "=v")
3806 (mult:V16QI (match_operand:V16QI 1 "register_operand" "v")
3807 (match_operand:V16QI 2 "register_operand" "v")))]
3811 rtx even = gen_reg_rtx (V8HImode);
3812 rtx odd = gen_reg_rtx (V8HImode);
3813 rtx mask = gen_reg_rtx (V16QImode);
3814 rtvec v = rtvec_alloc (16);
3817 for (i = 0; i < 8; ++i) {
3818 RTVEC_ELT (v, 2 * i)
3819 = gen_rtx_CONST_INT (QImode, BYTES_BIG_ENDIAN ? 2 * i + 1 : 31 - 2 * i);
3820 RTVEC_ELT (v, 2 * i + 1)
3821 = gen_rtx_CONST_INT (QImode, BYTES_BIG_ENDIAN ? 2 * i + 17 : 15 - 2 * i);
3824 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3825 emit_insn (gen_altivec_vmulesb (even, operands[1], operands[2]));
3826 emit_insn (gen_altivec_vmulosb (odd, operands[1], operands[2]));
3827 emit_insn (gen_altivec_vperm_v8hiv16qi (operands[0], even, odd, mask));
3831 (define_expand "altivec_negv4sf2"
3832 [(use (match_operand:V4SF 0 "register_operand" ""))
3833 (use (match_operand:V4SF 1 "register_operand" ""))]
3839 /* Generate [-0.0, -0.0, -0.0, -0.0]. */
3840 neg0 = gen_reg_rtx (V4SImode);
3841 emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
3842 emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
3845 emit_insn (gen_xorv4sf3 (operands[0],
3846 gen_lowpart (V4SFmode, neg0), operands[1]));
3851 ;; Vector reverse elements
3852 (define_expand "altivec_vreve<mode>2"
3853 [(set (match_operand:VEC_A 0 "register_operand" "=v")
3854 (unspec:VEC_A [(match_operand:VEC_A 1 "register_operand" "v")]
3858 int i, j, size, num_elements;
3859 rtvec v = rtvec_alloc (16);
3860 rtx mask = gen_reg_rtx (V16QImode);
3862 size = GET_MODE_UNIT_SIZE (<MODE>mode);
3863 num_elements = GET_MODE_NUNITS (<MODE>mode);
3865 for (j = 0; j < num_elements; j++)
3866 for (i = 0; i < size; i++)
3867 RTVEC_ELT (v, i + j * size)
3868 = GEN_INT (i + (num_elements - 1 - j) * size);
3870 emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v)));
3871 emit_insn (gen_altivec_vperm_<mode> (operands[0], operands[1],
3872 operands[1], mask));
3876 ;; Vector SIMD PEM v2.06c defines LVLX, LVLXL, LVRX, LVRXL,
3877 ;; STVLX, STVLXL, STVVRX, STVRXL are available only on Cell.
3878 (define_insn "altivec_lvlx"
3879 [(set (match_operand:V16QI 0 "register_operand" "=v")
3880 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3882 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3884 [(set_attr "type" "vecload")])
3886 (define_insn "altivec_lvlxl"
3887 [(set (match_operand:V16QI 0 "register_operand" "=v")
3888 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3890 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3892 [(set_attr "type" "vecload")])
3894 (define_insn "altivec_lvrx"
3895 [(set (match_operand:V16QI 0 "register_operand" "=v")
3896 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3898 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3900 [(set_attr "type" "vecload")])
3902 (define_insn "altivec_lvrxl"
3903 [(set (match_operand:V16QI 0 "register_operand" "=v")
3904 (unspec:V16QI [(match_operand:BLK 1 "memory_operand" "Z")]
3906 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3908 [(set_attr "type" "vecload")])
3910 (define_insn "altivec_stvlx"
3912 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3913 (match_operand:V16QI 1 "register_operand" "v"))
3914 (unspec [(const_int 0)] UNSPEC_STVLX)])]
3915 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3917 [(set_attr "type" "vecstore")])
3919 (define_insn "altivec_stvlxl"
3921 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3922 (match_operand:V16QI 1 "register_operand" "v"))
3923 (unspec [(const_int 0)] UNSPEC_STVLXL)])]
3924 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3926 [(set_attr "type" "vecstore")])
3928 (define_insn "altivec_stvrx"
3930 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3931 (match_operand:V16QI 1 "register_operand" "v"))
3932 (unspec [(const_int 0)] UNSPEC_STVRX)])]
3933 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3935 [(set_attr "type" "vecstore")])
3937 (define_insn "altivec_stvrxl"
3939 [(set (match_operand:V16QI 0 "memory_operand" "=Z")
3940 (match_operand:V16QI 1 "register_operand" "v"))
3941 (unspec [(const_int 0)] UNSPEC_STVRXL)])]
3942 "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
3944 [(set_attr "type" "vecstore")])
3946 (define_expand "vec_unpacks_float_hi_v8hi"
3947 [(set (match_operand:V4SF 0 "register_operand" "")
3948 (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")]
3949 UNSPEC_VUPKHS_V4SF))]
3953 rtx tmp = gen_reg_rtx (V4SImode);
3955 emit_insn (gen_vec_unpacks_hi_v8hi (tmp, operands[1]));
3956 emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
3960 (define_expand "vec_unpacks_float_lo_v8hi"
3961 [(set (match_operand:V4SF 0 "register_operand" "")
3962 (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")]
3963 UNSPEC_VUPKLS_V4SF))]
3967 rtx tmp = gen_reg_rtx (V4SImode);
3969 emit_insn (gen_vec_unpacks_lo_v8hi (tmp, operands[1]));
3970 emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
3974 (define_expand "vec_unpacku_float_hi_v8hi"
3975 [(set (match_operand:V4SF 0 "register_operand" "")
3976 (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")]
3977 UNSPEC_VUPKHU_V4SF))]
3981 rtx tmp = gen_reg_rtx (V4SImode);
3983 emit_insn (gen_vec_unpacku_hi_v8hi (tmp, operands[1]));
3984 emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
3988 (define_expand "vec_unpacku_float_lo_v8hi"
3989 [(set (match_operand:V4SF 0 "register_operand" "")
3990 (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")]
3991 UNSPEC_VUPKLU_V4SF))]
3995 rtx tmp = gen_reg_rtx (V4SImode);
3997 emit_insn (gen_vec_unpacku_lo_v8hi (tmp, operands[1]));
3998 emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
4003 ;; Power8/power9 vector instructions encoded as Altivec instructions
4005 ;; Vector count leading zeros
4006 (define_insn "*p8v_clz<mode>2"
4007 [(set (match_operand:VI2 0 "register_operand" "=v")
4008 (clz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4011 [(set_attr "length" "4")
4012 (set_attr "type" "vecsimple")])
4014 ;; Vector absolute difference unsigned
4015 (define_expand "vadu<mode>3"
4016 [(set (match_operand:VI 0 "register_operand")
4017 (unspec:VI [(match_operand:VI 1 "register_operand")
4018 (match_operand:VI 2 "register_operand")]
4022 ;; Vector absolute difference unsigned
4023 (define_insn "p9_vadu<mode>3"
4024 [(set (match_operand:VI 0 "register_operand" "=v")
4025 (unspec:VI [(match_operand:VI 1 "register_operand" "v")
4026 (match_operand:VI 2 "register_operand" "v")]
4029 "vabsdu<wd> %0,%1,%2"
4030 [(set_attr "type" "vecsimple")])
4032 ;; Vector count trailing zeros
4033 (define_insn "*p9v_ctz<mode>2"
4034 [(set (match_operand:VI2 0 "register_operand" "=v")
4035 (ctz:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4038 [(set_attr "length" "4")
4039 (set_attr "type" "vecsimple")])
4041 ;; Vector population count
4042 (define_insn "*p8v_popcount<mode>2"
4043 [(set (match_operand:VI2 0 "register_operand" "=v")
4044 (popcount:VI2 (match_operand:VI2 1 "register_operand" "v")))]
4047 [(set_attr "length" "4")
4048 (set_attr "type" "vecsimple")])
4051 (define_insn "*p9v_parity<mode>2"
4052 [(set (match_operand:VParity 0 "register_operand" "=v")
4053 (parity:VParity (match_operand:VParity 1 "register_operand" "v")))]
4056 [(set_attr "length" "4")
4057 (set_attr "type" "vecsimple")])
4059 ;; Vector Gather Bits by Bytes by Doubleword
4060 (define_insn "p8v_vgbbd"
4061 [(set (match_operand:V16QI 0 "register_operand" "=v")
4062 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")]
4066 [(set_attr "length" "4")
4067 (set_attr "type" "vecsimple")])
4070 ;; 128-bit binary integer arithmetic
4071 ;; We have a special container type (V1TImode) to allow operations using the
4072 ;; ISA 2.07 128-bit binary support to target the VMX/altivec registers without
4073 ;; having to worry about the register allocator deciding GPRs are better.
4075 (define_insn "altivec_vadduqm"
4076 [(set (match_operand:V1TI 0 "register_operand" "=v")
4077 (plus:V1TI (match_operand:V1TI 1 "register_operand" "v")
4078 (match_operand:V1TI 2 "register_operand" "v")))]
4081 [(set_attr "length" "4")
4082 (set_attr "type" "vecsimple")])
4084 (define_insn "altivec_vaddcuq"
4085 [(set (match_operand:V1TI 0 "register_operand" "=v")
4086 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4087 (match_operand:V1TI 2 "register_operand" "v")]
4091 [(set_attr "length" "4")
4092 (set_attr "type" "vecsimple")])
4094 (define_insn "altivec_vsubuqm"
4095 [(set (match_operand:V1TI 0 "register_operand" "=v")
4096 (minus:V1TI (match_operand:V1TI 1 "register_operand" "v")
4097 (match_operand:V1TI 2 "register_operand" "v")))]
4100 [(set_attr "length" "4")
4101 (set_attr "type" "vecsimple")])
4103 (define_insn "altivec_vsubcuq"
4104 [(set (match_operand:V1TI 0 "register_operand" "=v")
4105 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4106 (match_operand:V1TI 2 "register_operand" "v")]
4110 [(set_attr "length" "4")
4111 (set_attr "type" "vecsimple")])
4113 (define_insn "altivec_vaddeuqm"
4114 [(set (match_operand:V1TI 0 "register_operand" "=v")
4115 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4116 (match_operand:V1TI 2 "register_operand" "v")
4117 (match_operand:V1TI 3 "register_operand" "v")]
4120 "vaddeuqm %0,%1,%2,%3"
4121 [(set_attr "length" "4")
4122 (set_attr "type" "vecsimple")])
4124 (define_insn "altivec_vaddecuq"
4125 [(set (match_operand:V1TI 0 "register_operand" "=v")
4126 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4127 (match_operand:V1TI 2 "register_operand" "v")
4128 (match_operand:V1TI 3 "register_operand" "v")]
4131 "vaddecuq %0,%1,%2,%3"
4132 [(set_attr "length" "4")
4133 (set_attr "type" "vecsimple")])
4135 (define_insn "altivec_vsubeuqm"
4136 [(set (match_operand:V1TI 0 "register_operand" "=v")
4137 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4138 (match_operand:V1TI 2 "register_operand" "v")
4139 (match_operand:V1TI 3 "register_operand" "v")]
4142 "vsubeuqm %0,%1,%2,%3"
4143 [(set_attr "length" "4")
4144 (set_attr "type" "vecsimple")])
4146 (define_insn "altivec_vsubecuq"
4147 [(set (match_operand:V1TI 0 "register_operand" "=v")
4148 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4149 (match_operand:V1TI 2 "register_operand" "v")
4150 (match_operand:V1TI 3 "register_operand" "v")]
4153 "vsubecuq %0,%1,%2,%3"
4154 [(set_attr "length" "4")
4155 (set_attr "type" "vecsimple")])
4157 ;; We use V2DI as the output type to simplify converting the permute
4158 ;; bits into an integer
4159 (define_insn "altivec_vbpermq"
4160 [(set (match_operand:V2DI 0 "register_operand" "=v")
4161 (unspec:V2DI [(match_operand:V16QI 1 "register_operand" "v")
4162 (match_operand:V16QI 2 "register_operand" "v")]
4166 [(set_attr "type" "vecperm")])
4168 ; One of the vector API interfaces requires returning vector unsigned char.
4169 (define_insn "altivec_vbpermq2"
4170 [(set (match_operand:V16QI 0 "register_operand" "=v")
4171 (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
4172 (match_operand:V16QI 2 "register_operand" "v")]
4176 [(set_attr "type" "vecperm")])
4178 (define_insn "altivec_vbpermd"
4179 [(set (match_operand:V2DI 0 "register_operand" "=v")
4180 (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "v")
4181 (match_operand:V16QI 2 "register_operand" "v")]
4185 [(set_attr "type" "vecsimple")])
4187 ;; Support for SAD (sum of absolute differences).
4189 ;; Due to saturating semantics, we can't combine the sum-across
4190 ;; with the vector accumulate in vsum4ubs. A vadduwm is needed.
4191 (define_expand "usadv16qi"
4192 [(use (match_operand:V4SI 0 "register_operand"))
4193 (use (match_operand:V16QI 1 "register_operand"))
4194 (use (match_operand:V16QI 2 "register_operand"))
4195 (use (match_operand:V4SI 3 "register_operand"))]
4198 rtx absd = gen_reg_rtx (V16QImode);
4199 rtx zero = gen_reg_rtx (V4SImode);
4200 rtx psum = gen_reg_rtx (V4SImode);
4202 emit_insn (gen_p9_vaduv16qi3 (absd, operands[1], operands[2]));
4203 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
4204 emit_insn (gen_altivec_vsum4ubs (psum, absd, zero));
4205 emit_insn (gen_addv4si3 (operands[0], psum, operands[3]));
4209 ;; Since vsum4shs is saturating and further performs signed
4210 ;; arithmetic, we can't combine the sum-across with the vector
4211 ;; accumulate in vsum4shs. A vadduwm is needed.
4212 (define_expand "usadv8hi"
4213 [(use (match_operand:V4SI 0 "register_operand"))
4214 (use (match_operand:V8HI 1 "register_operand"))
4215 (use (match_operand:V8HI 2 "register_operand"))
4216 (use (match_operand:V4SI 3 "register_operand"))]
4219 rtx absd = gen_reg_rtx (V8HImode);
4220 rtx zero = gen_reg_rtx (V4SImode);
4221 rtx psum = gen_reg_rtx (V4SImode);
4223 emit_insn (gen_p9_vaduv8hi3 (absd, operands[1], operands[2]));
4224 emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
4225 emit_insn (gen_altivec_vsum4shs (psum, absd, zero));
4226 emit_insn (gen_addv4si3 (operands[0], psum, operands[3]));
4230 ;; Decimal Integer operations
4231 (define_int_iterator UNSPEC_BCD_ADD_SUB [UNSPEC_BCDADD UNSPEC_BCDSUB])
4233 (define_int_attr bcd_add_sub [(UNSPEC_BCDADD "add")
4234 (UNSPEC_BCDSUB "sub")])
4236 (define_code_iterator BCD_TEST [eq lt gt unordered])
4238 (define_insn "bcd<bcd_add_sub>"
4239 [(set (match_operand:V1TI 0 "gpc_reg_operand" "=v")
4240 (unspec:V1TI [(match_operand:V1TI 1 "gpc_reg_operand" "v")
4241 (match_operand:V1TI 2 "gpc_reg_operand" "v")
4242 (match_operand:QI 3 "const_0_to_1_operand" "n")]
4243 UNSPEC_BCD_ADD_SUB))
4244 (clobber (reg:CCFP CR6_REGNO))]
4246 "bcd<bcd_add_sub>. %0,%1,%2,%3"
4247 [(set_attr "length" "4")
4248 (set_attr "type" "vecsimple")])
4250 ;; Use a floating point type (V2DFmode) for the compare to set CR6 so that we
4251 ;; can use the unordered test for BCD nans and add/subtracts that overflow. An
4252 ;; UNORDERED test on an integer type (like V1TImode) is not defined. The type
4253 ;; probably should be one that can go in the VMX (Altivec) registers, so we
4254 ;; can't use DDmode or DFmode.
4255 (define_insn "*bcd<bcd_add_sub>_test"
4256 [(set (reg:CCFP CR6_REGNO)
4258 (unspec:V2DF [(match_operand:V1TI 1 "register_operand" "v")
4259 (match_operand:V1TI 2 "register_operand" "v")
4260 (match_operand:QI 3 "const_0_to_1_operand" "i")]
4262 (match_operand:V2DF 4 "zero_constant" "j")))
4263 (clobber (match_scratch:V1TI 0 "=v"))]
4265 "bcd<bcd_add_sub>. %0,%1,%2,%3"
4266 [(set_attr "length" "4")
4267 (set_attr "type" "vecsimple")])
4269 (define_insn "*bcd<bcd_add_sub>_test2"
4270 [(set (match_operand:V1TI 0 "register_operand" "=v")
4271 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "v")
4272 (match_operand:V1TI 2 "register_operand" "v")
4273 (match_operand:QI 3 "const_0_to_1_operand" "i")]
4274 UNSPEC_BCD_ADD_SUB))
4275 (set (reg:CCFP CR6_REGNO)
4277 (unspec:V2DF [(match_dup 1)
4281 (match_operand:V2DF 4 "zero_constant" "j")))]
4283 "bcd<bcd_add_sub>. %0,%1,%2,%3"
4284 [(set_attr "length" "4")
4285 (set_attr "type" "vecsimple")])
4287 (define_insn "darn_32"
4288 [(set (match_operand:SI 0 "register_operand" "=r")
4289 (unspec:SI [(const_int 0)] UNSPEC_DARN_32))]
4292 [(set_attr "type" "integer")])
4294 (define_insn "darn_raw"
4295 [(set (match_operand:DI 0 "register_operand" "=r")
4296 (unspec:DI [(const_int 0)] UNSPEC_DARN_RAW))]
4297 "TARGET_P9_MISC && TARGET_64BIT"
4299 [(set_attr "type" "integer")])
4302 [(set (match_operand:DI 0 "register_operand" "=r")
4303 (unspec:DI [(const_int 0)] UNSPEC_DARN))]
4304 "TARGET_P9_MISC && TARGET_64BIT"
4306 [(set_attr "type" "integer")])
4308 ;; Test byte within range.
4310 ;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
4311 ;; represents a byte whose value is ignored in this context and
4312 ;; vv, the least significant byte, holds the byte value that is to
4313 ;; be tested for membership within the range specified by operand 2.
4314 ;; The bytes of operand 2 are organized as xx:xx:hi:lo.
4316 ;; Return in target register operand 0 a value of 1 if lo <= vv and
4317 ;; vv <= hi. Otherwise, set register operand 0 to 0.
4319 ;; Though the instructions to which this expansion maps operate on
4320 ;; 64-bit registers, the current implementation only operates on
4321 ;; SI-mode operands as the high-order bits provide no information
4322 ;; that is not already available in the low-order bits. To avoid the
4323 ;; costs of data widening operations, future enhancements might allow
4324 ;; DI mode for operand 0 and/or might allow operand 1 to be QI mode.
4325 (define_expand "cmprb"
4327 (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
4328 (match_operand:SI 2 "gpc_reg_operand" "r")]
4330 (set (match_operand:SI 0 "gpc_reg_operand" "=r")
4331 (if_then_else:SI (lt (match_dup 3)
4334 (if_then_else (gt (match_dup 3)
4340 operands[3] = gen_reg_rtx (CCmode);
4343 ;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
4344 ;; represents a byte whose value is ignored in this context and
4345 ;; vv, the least significant byte, holds the byte value that is to
4346 ;; be tested for membership within the range specified by operand 2.
4347 ;; The bytes of operand 2 are organized as xx:xx:hi:lo.
4349 ;; Set bit 1 (the GT bit, 0x4) of CR register operand 0 to 1 if
4350 ;; lo <= vv and vv <= hi. Otherwise, set the GT bit to 0. The other
4351 ;; 3 bits of the target CR register are all set to 0.
4352 (define_insn "*cmprb_internal"
4353 [(set (match_operand:CC 0 "cc_reg_operand" "=y")
4354 (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
4355 (match_operand:SI 2 "gpc_reg_operand" "r")]
4359 [(set_attr "type" "logical")])
4361 ;; Set operand 0 register to -1 if the LT bit (0x8) of condition
4362 ;; register operand 1 is on. Otherwise, set operand 0 register to 1
4363 ;; if the GT bit (0x4) of condition register operand 1 is on.
4364 ;; Otherwise, set operand 0 to 0. Note that the result stored into
4365 ;; register operand 0 is non-zero iff either the LT or GT bits are on
4366 ;; within condition register operand 1.
4367 (define_insn "setb_signed"
4368 [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
4369 (if_then_else:SI (lt (match_operand:CC 1 "cc_reg_operand" "y")
4372 (if_then_else (gt (match_dup 1)
4378 [(set_attr "type" "logical")])
4380 (define_insn "setb_unsigned"
4381 [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
4382 (if_then_else:SI (ltu (match_operand:CCUNS 1 "cc_reg_operand" "y")
4385 (if_then_else (gtu (match_dup 1)
4391 [(set_attr "type" "logical")])
4393 ;; Test byte within two ranges.
4395 ;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
4396 ;; represents a byte whose value is ignored in this context and
4397 ;; vv, the least significant byte, holds the byte value that is to
4398 ;; be tested for membership within the range specified by operand 2.
4399 ;; The bytes of operand 2 are organized as hi_1:lo_1:hi_2:lo_2.
4401 ;; Return in target register operand 0 a value of 1 if (lo_1 <= vv and
4402 ;; vv <= hi_1) or if (lo_2 <= vv and vv <= hi_2). Otherwise, set register
4405 ;; Though the instructions to which this expansion maps operate on
4406 ;; 64-bit registers, the current implementation only operates on
4407 ;; SI-mode operands as the high-order bits provide no information
4408 ;; that is not already available in the low-order bits. To avoid the
4409 ;; costs of data widening operations, future enhancements might allow
4410 ;; DI mode for operand 0 and/or might allow operand 1 to be QI mode.
4411 (define_expand "cmprb2"
4413 (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
4414 (match_operand:SI 2 "gpc_reg_operand" "r")]
4416 (set (match_operand:SI 0 "gpc_reg_operand" "=r")
4417 (if_then_else:SI (lt (match_dup 3)
4420 (if_then_else (gt (match_dup 3)
4426 operands[3] = gen_reg_rtx (CCmode);
4429 ;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
4430 ;; represents a byte whose value is ignored in this context and
4431 ;; vv, the least significant byte, holds the byte value that is to
4432 ;; be tested for membership within the ranges specified by operand 2.
4433 ;; The bytes of operand 2 are organized as hi_1:lo_1:hi_2:lo_2.
4435 ;; Set bit 1 (the GT bit, 0x4) of CR register operand 0 to 1 if
4436 ;; (lo_1 <= vv and vv <= hi_1) or if (lo_2 <= vv and vv <= hi_2).
4437 ;; Otherwise, set the GT bit to 0. The other 3 bits of the target
4438 ;; CR register are all set to 0.
4439 (define_insn "*cmprb2_internal"
4440 [(set (match_operand:CC 0 "cc_reg_operand" "=y")
4441 (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
4442 (match_operand:SI 2 "gpc_reg_operand" "r")]
4446 [(set_attr "type" "logical")])
4448 ;; Test byte membership within set of 8 bytes.
4450 ;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
4451 ;; represents a byte whose value is ignored in this context and
4452 ;; vv, the least significant byte, holds the byte value that is to
4453 ;; be tested for membership within the set specified by operand 2.
4454 ;; The bytes of operand 2 are organized as e0:e1:e2:e3:e4:e5:e6:e7.
4456 ;; Return in target register operand 0 a value of 1 if vv equals one
4457 ;; of the values e0, e1, e2, e3, e4, e5, e6, or e7. Otherwise, set
4458 ;; register operand 0 to 0. Note that the 8 byte values held within
4459 ;; operand 2 need not be unique.
4461 ;; Though the instructions to which this expansion maps operate on
4462 ;; 64-bit registers, the current implementation requires that operands
4463 ;; 0 and 1 have mode SI as the high-order bits provide no information
4464 ;; that is not already available in the low-order bits. To avoid the
4465 ;; costs of data widening operations, future enhancements might allow
4466 ;; DI mode for operand 0 and/or might allow operand 1 to be QI mode.
4467 (define_expand "cmpeqb"
4469 (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
4470 (match_operand:DI 2 "gpc_reg_operand" "r")]
4472 (set (match_operand:SI 0 "gpc_reg_operand" "=r")
4473 (if_then_else:SI (lt (match_dup 3)
4476 (if_then_else (gt (match_dup 3)
4480 "TARGET_P9_MISC && TARGET_64BIT"
4482 operands[3] = gen_reg_rtx (CCmode);
4485 ;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
4486 ;; represents a byte whose value is ignored in this context and
4487 ;; vv, the least significant byte, holds the byte value that is to
4488 ;; be tested for membership within the set specified by operand 2.
4489 ;; The bytes of operand 2 are organized as e0:e1:e2:e3:e4:e5:e6:e7.
4491 ;; Set bit 1 (the GT bit, 0x4) of CR register operand 0 to 1 if vv
4492 ;; equals one of the values e0, e1, e2, e3, e4, e5, e6, or e7. Otherwise,
4493 ;; set the GT bit to zero. The other 3 bits of the target CR register
4494 ;; are all set to 0.
4495 (define_insn "*cmpeqb_internal"
4496 [(set (match_operand:CC 0 "cc_reg_operand" "=y")
4497 (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
4498 (match_operand:DI 2 "gpc_reg_operand" "r")]
4500 "TARGET_P9_MISC && TARGET_64BIT"
4502 [(set_attr "type" "logical")])
4504 (define_expand "bcd<bcd_add_sub>_<code>"
4505 [(parallel [(set (reg:CCFP CR6_REGNO)
4507 (unspec:V2DF [(match_operand:V1TI 1 "register_operand" "")
4508 (match_operand:V1TI 2 "register_operand" "")
4509 (match_operand:QI 3 "const_0_to_1_operand" "")]
4512 (clobber (match_scratch:V1TI 5 ""))])
4513 (set (match_operand:SI 0 "register_operand" "")
4514 (BCD_TEST:SI (reg:CCFP CR6_REGNO)
4518 operands[4] = CONST0_RTX (V2DFmode);
4521 ;; Peephole2 pattern to combine a bcdadd/bcdsub that calculates the value and
4522 ;; the bcdadd/bcdsub that tests the value. The combiner won't work since
4523 ;; CR6 is a hard coded register. Unfortunately, all of the Altivec predicate
4524 ;; support is hard coded to use the fixed register CR6 instead of creating
4525 ;; a register class for CR6.
4528 [(parallel [(set (match_operand:V1TI 0 "register_operand" "")
4529 (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "")
4530 (match_operand:V1TI 2 "register_operand" "")
4531 (match_operand:QI 3 "const_0_to_1_operand" "")]
4532 UNSPEC_BCD_ADD_SUB))
4533 (clobber (reg:CCFP CR6_REGNO))])
4534 (parallel [(set (reg:CCFP CR6_REGNO)
4536 (unspec:V2DF [(match_dup 1)
4540 (match_operand:V2DF 4 "zero_constant" "")))
4541 (clobber (match_operand:V1TI 5 "register_operand" ""))])]
4543 [(parallel [(set (match_dup 0)
4544 (unspec:V1TI [(match_dup 1)
4547 UNSPEC_BCD_ADD_SUB))
4548 (set (reg:CCFP CR6_REGNO)
4550 (unspec:V2DF [(match_dup 1)