Rebase.
[official-gcc.git] / gcc / config / rs6000 / spu2vmx.h
blob1e63bf74904a89573c87264ed830987abdbfe392
1 /* Cell SPU 2 VMX intrinsics header
2 Copyright (C) 2007-2014 Free Software Foundation, Inc.
4 This file is free software; you can redistribute it and/or modify it under
5 the terms of the GNU General Public License as published by the Free
6 Software Foundation; either version 3 of the License, or (at your option)
7 any later version.
9 This file is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 for more details.
14 Under Section 7 of GPL version 3, you are granted additional
15 permissions described in the GCC Runtime Library Exception, version
16 3.1, as published by the Free Software Foundation.
18 You should have received a copy of the GNU General Public License and
19 a copy of the GCC Runtime Library Exception along with this program;
20 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
21 <http://www.gnu.org/licenses/>. */
23 #ifndef _SPU2VMX_H_
24 #define _SPU2VMX_H_ 1
26 #ifdef __cplusplus
28 #ifndef __SPU__
30 #include <si2vmx.h>
32 /* spu_absd (absolute difference)
33 * ========
35 static __inline vec_uchar16 spu_absd(vec_uchar16 a, vec_uchar16 b)
37 return ((vec_uchar16)(si_absdb((qword)(a), (qword)(b))));
42 /* spu_add
43 * =======
45 static __inline vec_uint4 spu_add(vec_uint4 a, vec_uint4 b)
47 return ((vec_uint4)(si_a((qword)(a), (qword)(b))));
50 static __inline vec_int4 spu_add(vec_int4 a, vec_int4 b)
52 return ((vec_int4)(si_a((qword)(a), (qword)(b))));
55 static __inline vec_ushort8 spu_add(vec_ushort8 a, vec_ushort8 b)
57 return ((vec_ushort8)(si_ah((qword)(a), (qword)(b))));
60 static __inline vec_short8 spu_add(vec_short8 a, vec_short8 b)
62 return ((vec_short8)(si_ah((qword)(a), (qword)(b))));
65 static __inline vec_uint4 spu_add(vec_uint4 a, unsigned int b)
67 return ((vec_uint4)(si_ai((qword)(a), (int)(b))));
70 static __inline vec_int4 spu_add(vec_int4 a, int b)
72 return ((vec_int4)(si_ai((qword)(a), b)));
75 static __inline vec_ushort8 spu_add(vec_ushort8 a, unsigned short b)
77 return ((vec_ushort8)(si_ahi((qword)(a), (short)(b))));
80 static __inline vec_short8 spu_add(vec_short8 a, short b)
82 return ((vec_short8)(si_ahi((qword)(a), b)));
85 static __inline vec_float4 spu_add(vec_float4 a, vec_float4 b)
87 return ((vec_float4)(si_fa((qword)(a), (qword)(b))));
90 static __inline vec_double2 spu_add(vec_double2 a, vec_double2 b)
92 return ((vec_double2)(si_dfa((qword)(a), (qword)(b))));
96 /* spu_addx
97 * ========
99 static __inline vec_uint4 spu_addx(vec_uint4 a, vec_uint4 b, vec_uint4 c)
101 return ((vec_uint4)(si_addx((qword)(a), (qword)(b), (qword)(c))));
104 static __inline vec_int4 spu_addx(vec_int4 a, vec_int4 b, vec_int4 c)
106 return ((vec_int4)(si_addx((qword)(a), (qword)(b), (qword)(c))));
110 /* spu_and
111 * =======
113 static __inline vec_uchar16 spu_and(vec_uchar16 a, vec_uchar16 b)
115 return ((vec_uchar16)(si_and((qword)(a), (qword)(b))));
118 static __inline vec_char16 spu_and(vec_char16 a, vec_char16 b)
120 return ((vec_char16)(si_and((qword)(a), (qword)(b))));
123 static __inline vec_ushort8 spu_and(vec_ushort8 a, vec_ushort8 b)
125 return ((vec_ushort8)(si_and((qword)(a), (qword)(b))));
128 static __inline vec_short8 spu_and(vec_short8 a, vec_short8 b)
130 return ((vec_short8)(si_and((qword)(a), (qword)(b))));
133 static __inline vec_uint4 spu_and(vec_uint4 a, vec_uint4 b)
135 return ((vec_uint4)(si_and((qword)(a), (qword)(b))));
138 static __inline vec_int4 spu_and(vec_int4 a, vec_int4 b)
140 return ((vec_int4)(si_and((qword)(a), (qword)(b))));
143 static __inline vec_float4 spu_and(vec_float4 a, vec_float4 b)
145 return ((vec_float4)(si_and((qword)(a), (qword)(b))));
148 static __inline vec_ullong2 spu_and(vec_ullong2 a, vec_ullong2 b)
150 return ((vec_ullong2)(si_and((qword)(a), (qword)(b))));
153 static __inline vec_llong2 spu_and(vec_llong2 a, vec_llong2 b)
155 return ((vec_llong2)(si_and((qword)(a), (qword)(b))));
158 static __inline vec_double2 spu_and(vec_double2 a, vec_double2 b)
160 return ((vec_double2)(si_and((qword)(a), (qword)(b))));
163 static __inline vec_uchar16 spu_and(vec_uchar16 a, unsigned char b)
165 return ((vec_uchar16)(si_andbi((qword)(a), (signed char)(b))));
169 static __inline vec_char16 spu_and(vec_char16 a, signed char b)
171 return ((vec_char16)(si_andbi((qword)(a), b)));
174 static __inline vec_ushort8 spu_and(vec_ushort8 a, unsigned short b)
176 return ((vec_ushort8)(si_andhi((qword)(a), (signed short)(b))));
179 static __inline vec_short8 spu_and(vec_short8 a, signed short b)
181 return ((vec_short8)(si_andhi((qword)(a), b)));
184 static __inline vec_uint4 spu_and(vec_uint4 a, unsigned int b)
186 return ((vec_uint4)(si_andi((qword)(a), (signed int)(b))));
189 static __inline vec_int4 spu_and(vec_int4 a, signed int b)
191 return ((vec_int4)(si_andi((qword)(a), b)));
195 /* spu_andc
196 * ========
198 #define spu_andc(_a, _b) vec_andc(_a, _b)
201 /* spu_avg
202 * =======
204 #define spu_avg(_a, _b) vec_avg(_a, _b)
207 /* spu_bisled
208 * spu_bisled_d
209 * spu_bisled_e
210 * ============
212 #define spu_bisled(_func) /* not mappable */
213 #define spu_bisled_d(_func) /* not mappable */
214 #define spu_bisled_e(_func) /* not mappable */
216 /* spu_cmpabseq
217 * ============
219 static __inline vec_uint4 spu_cmpabseq(vec_float4 a, vec_float4 b)
221 return ((vec_uint4)(si_fcmeq((qword)(a), (qword)(b))));
225 static __inline vec_ullong2 spu_cmpabseq(vec_double2 a, vec_double2 b)
227 return ((vec_ullong2)(si_dfcmeq((qword)(a), (qword)(b))));
231 /* spu_cmpabsgt
232 * ============
234 static __inline vec_uint4 spu_cmpabsgt(vec_float4 a, vec_float4 b)
236 return ((vec_uint4)(si_fcmgt((qword)(a), (qword)(b))));
239 static __inline vec_ullong2 spu_cmpabsgt(vec_double2 a, vec_double2 b)
241 return ((vec_ullong2)(si_dfcmgt((qword)(a), (qword)(b))));
245 /* spu_cmpeq
246 * ========
248 static __inline vec_uchar16 spu_cmpeq(vec_uchar16 a, vec_uchar16 b)
250 return ((vec_uchar16)(si_ceqb((qword)(a), (qword)(b))));
253 static __inline vec_uchar16 spu_cmpeq(vec_char16 a, vec_char16 b)
255 return ((vec_uchar16)(si_ceqb((qword)(a), (qword)(b))));
258 static __inline vec_ushort8 spu_cmpeq(vec_ushort8 a, vec_ushort8 b)
260 return ((vec_ushort8)(si_ceqh((qword)(a), (qword)(b))));
263 static __inline vec_ushort8 spu_cmpeq(vec_short8 a, vec_short8 b)
265 return ((vec_ushort8)(si_ceqh((qword)(a), (qword)(b))));
268 static __inline vec_uint4 spu_cmpeq(vec_uint4 a, vec_uint4 b)
270 return ((vec_uint4)(si_ceq((qword)(a), (qword)(b))));
273 static __inline vec_uint4 spu_cmpeq(vec_int4 a, vec_int4 b)
275 return ((vec_uint4)(si_ceq((qword)(a), (qword)(b))));
278 static __inline vec_uint4 spu_cmpeq(vec_float4 a, vec_float4 b)
280 return ((vec_uint4)(si_fceq((qword)(a), (qword)(b))));
283 static __inline vec_uchar16 spu_cmpeq(vec_uchar16 a, unsigned char b)
285 return ((vec_uchar16)(si_ceqbi((qword)(a), (signed char)(b))));
288 static __inline vec_uchar16 spu_cmpeq(vec_char16 a, signed char b)
290 return ((vec_uchar16)(si_ceqbi((qword)(a), b)));
293 static __inline vec_ushort8 spu_cmpeq(vec_ushort8 a, unsigned short b)
295 return ((vec_ushort8)(si_ceqhi((qword)(a), (signed short)(b))));
298 static __inline vec_ushort8 spu_cmpeq(vec_short8 a, signed short b)
300 return ((vec_ushort8)(si_ceqhi((qword)(a), b)));
303 static __inline vec_uint4 spu_cmpeq(vec_uint4 a, unsigned int b)
305 return ((vec_uint4)(si_ceqi((qword)(a), (signed int)(b))));
308 static __inline vec_uint4 spu_cmpeq(vec_int4 a, signed int b)
310 return ((vec_uint4)(si_ceqi((qword)(a), b)));
313 static __inline vec_ullong2 spu_cmpeq(vec_double2 a, vec_double2 b)
315 return ((vec_ullong2)(si_dfceq((qword)(a), (qword)(b))));
319 /* spu_cmpgt
320 * ========
322 static __inline vec_uchar16 spu_cmpgt(vec_uchar16 a, vec_uchar16 b)
324 return ((vec_uchar16)(si_clgtb((qword)(a), (qword)(b))));
327 static __inline vec_uchar16 spu_cmpgt(vec_char16 a, vec_char16 b)
329 return ((vec_uchar16)(si_cgtb((qword)(a), (qword)(b))));
332 static __inline vec_ushort8 spu_cmpgt(vec_ushort8 a, vec_ushort8 b)
334 return ((vec_ushort8)(si_clgth((qword)(a), (qword)(b))));
337 static __inline vec_ushort8 spu_cmpgt(vec_short8 a, vec_short8 b)
339 return ((vec_ushort8)(si_cgth((qword)(a), (qword)(b))));
342 static __inline vec_uint4 spu_cmpgt(vec_uint4 a, vec_uint4 b)
344 return ((vec_uint4)(si_clgt((qword)(a), (qword)(b))));
347 static __inline vec_uint4 spu_cmpgt(vec_int4 a, vec_int4 b)
349 return ((vec_uint4)(si_cgt((qword)(a), (qword)(b))));
352 static __inline vec_uint4 spu_cmpgt(vec_float4 a, vec_float4 b)
354 return ((vec_uint4)(si_fcgt((qword)(a), (qword)(b))));
357 static __inline vec_uchar16 spu_cmpgt(vec_uchar16 a, unsigned char b)
359 return ((vec_uchar16)(si_clgtbi((qword)(a), b)));
362 static __inline vec_uchar16 spu_cmpgt(vec_char16 a, signed char b)
364 return ((vec_uchar16)(si_cgtbi((qword)(a), b)));
367 static __inline vec_ushort8 spu_cmpgt(vec_ushort8 a, unsigned short b)
369 return ((vec_ushort8)(si_clgthi((qword)(a), b)));
372 static __inline vec_ushort8 spu_cmpgt(vec_short8 a, signed short b)
374 return ((vec_ushort8)(si_cgthi((qword)(a), b)));
377 static __inline vec_uint4 spu_cmpgt(vec_uint4 a, unsigned int b)
379 return ((vec_uint4)(si_clgti((qword)(a), b)));
382 static __inline vec_uint4 spu_cmpgt(vec_int4 a, signed int b)
384 return ((vec_uint4)(si_cgti((qword)(a), b)));
387 static __inline vec_ullong2 spu_cmpgt(vec_double2 a, vec_double2 b)
389 return ((vec_ullong2)(si_dfcgt((qword)(a), (qword)(b))));
393 /* spu_cntb
394 * ========
396 static __inline vec_uchar16 spu_cntb(vec_uchar16 a)
398 return ((vec_uchar16)(si_cntb((qword)(a))));
402 static __inline vec_uchar16 spu_cntb(vec_char16 a)
404 return ((vec_uchar16)(si_cntb((qword)(a))));
407 /* spu_cntlz
408 * =========
410 static __inline vec_uint4 spu_cntlz(vec_uint4 a)
412 return ((vec_uint4)(si_clz((qword)(a))));
415 static __inline vec_uint4 spu_cntlz(vec_int4 a)
417 return ((vec_uint4)(si_clz((qword)(a))));
420 static __inline vec_uint4 spu_cntlz(vec_float4 a)
422 return ((vec_uint4)(si_clz((qword)(a))));
425 /* spu_testsv
426 * ==========
428 static __inline vec_ullong2 spu_testsv(vec_double2 a, char b)
430 return ((vec_ullong2)(si_dftsv((qword)(a), b)));
433 /* spu_convtf
434 * ==========
436 #define spu_convtf(_a, _b) (vec_ctf(_a, _b))
438 /* spu_convts
439 * ==========
441 #define spu_convts(_a, _b) (vec_cts(_a, _b))
443 /* spu_convtu
444 * ==========
446 #define spu_convtu(_a, _b) (vec_ctu(_a, _b))
449 /* spu_dsync
450 * ========
452 #define spu_dsync()
454 /* spu_eqv
455 * =======
457 static __inline vec_uchar16 spu_eqv(vec_uchar16 a, vec_uchar16 b)
459 return ((vec_uchar16)(si_eqv((qword)(a), (qword)(b))));
462 static __inline vec_char16 spu_eqv(vec_char16 a, vec_char16 b)
464 return ((vec_char16)(si_eqv((qword)(a), (qword)(b))));
467 static __inline vec_ushort8 spu_eqv(vec_ushort8 a, vec_ushort8 b)
469 return ((vec_ushort8)(si_eqv((qword)(a), (qword)(b))));
472 static __inline vec_short8 spu_eqv(vec_short8 a, vec_short8 b)
474 return ((vec_short8)(si_eqv((qword)(a), (qword)(b))));
477 static __inline vec_uint4 spu_eqv(vec_uint4 a, vec_uint4 b)
479 return ((vec_uint4)(si_eqv((qword)(a), (qword)(b))));
482 static __inline vec_int4 spu_eqv(vec_int4 a, vec_int4 b)
484 return ((vec_int4)(si_eqv((qword)(a), (qword)(b))));
487 static __inline vec_float4 spu_eqv(vec_float4 a, vec_float4 b)
489 return ((vec_float4)(si_eqv((qword)(a), (qword)(b))));
492 static __inline vec_ullong2 spu_eqv(vec_ullong2 a, vec_ullong2 b)
494 return ((vec_ullong2)(si_eqv((qword)(a), (qword)(b))));
497 static __inline vec_llong2 spu_eqv(vec_llong2 a, vec_llong2 b)
499 return ((vec_llong2)(si_eqv((qword)(a), (qword)(b))));
502 static __inline vec_double2 spu_eqv(vec_double2 a, vec_double2 b)
504 return ((vec_double2)(si_eqv((qword)(a), (qword)(b))));
507 /* spu_extend
508 * ========
510 static __inline vec_short8 spu_extend(vec_char16 a)
512 return ((vec_short8)(si_xsbh((qword)(a))));
516 static __inline vec_int4 spu_extend(vec_short8 a)
518 return ((vec_int4)(si_xshw((qword)(a))));
521 static __inline vec_llong2 spu_extend(vec_int4 a)
523 return ((vec_llong2)(si_xswd((qword)(a))));
527 static __inline vec_double2 spu_extend(vec_float4 a)
529 return ((vec_double2)(si_fesd((qword)(a))));
533 /* spu_extract
534 * ========
536 static __inline unsigned char spu_extract(vec_uchar16 a, int element)
538 union {
539 vec_uchar16 v;
540 unsigned char c[16];
541 } in;
543 in.v = a;
544 return (in.c[element & 15]);
547 static __inline signed char spu_extract(vec_char16 a, int element)
549 union {
550 vec_char16 v;
551 signed char c[16];
552 } in;
554 in.v = a;
555 return (in.c[element & 15]);
558 static __inline unsigned short spu_extract(vec_ushort8 a, int element)
560 union {
561 vec_ushort8 v;
562 unsigned short s[8];
563 } in;
565 in.v = a;
566 return (in.s[element & 7]);
569 static __inline signed short spu_extract(vec_short8 a, int element)
571 union {
572 vec_short8 v;
573 signed short s[8];
574 } in;
576 in.v = a;
577 return (in.s[element & 7]);
580 static __inline unsigned int spu_extract(vec_uint4 a, int element)
582 union {
583 vec_uint4 v;
584 unsigned int i[4];
585 } in;
587 in.v = a;
588 return (in.i[element & 3]);
591 static __inline signed int spu_extract(vec_int4 a, int element)
593 union {
594 vec_int4 v;
595 signed int i[4];
596 } in;
598 in.v = a;
599 return (in.i[element & 3]);
602 static __inline float spu_extract(vec_float4 a, int element)
604 union {
605 vec_float4 v;
606 float f[4];
607 } in;
609 in.v = a;
610 return (in.f[element & 3]);
613 static __inline unsigned long long spu_extract(vec_ullong2 a, int element)
615 union {
616 vec_ullong2 v;
617 unsigned long long l[2];
618 } in;
620 in.v = a;
621 return (in.l[element & 1]);
624 static __inline signed long long spu_extract(vec_llong2 a, int element)
626 union {
627 vec_llong2 v;
628 signed long long l[2];
629 } in;
631 in.v = a;
632 return (in.l[element & 1]);
635 static __inline double spu_extract(vec_double2 a, int element)
637 union {
638 vec_double2 v;
639 double d[2];
640 } in;
642 in.v = a;
643 return (in.d[element & 1]);
646 /* spu_gather
647 * ========
649 static __inline vec_uint4 spu_gather(vec_uchar16 a)
651 return ((vec_uint4)(si_gbb((qword)(a))));
655 static __inline vec_uint4 spu_gather(vec_char16 a)
657 return ((vec_uint4)(si_gbb((qword)(a))));
660 static __inline vec_uint4 spu_gather(vec_ushort8 a)
662 return ((vec_uint4)(si_gbh((qword)(a))));
665 static __inline vec_uint4 spu_gather(vec_short8 a)
667 return ((vec_uint4)(si_gbh((qword)(a))));
671 static __inline vec_uint4 spu_gather(vec_uint4 a)
673 return ((vec_uint4)(si_gb((qword)(a))));
676 static __inline vec_uint4 spu_gather(vec_int4 a)
678 return ((vec_uint4)(si_gb((qword)(a))));
681 static __inline vec_uint4 spu_gather(vec_float4 a)
683 return ((vec_uint4)(si_gb((qword)(a))));
686 /* spu_genb
687 * ========
689 static __inline vec_uint4 spu_genb(vec_uint4 a, vec_uint4 b)
691 return ((vec_uint4)(si_bg((qword)(b), (qword)(a))));
694 static __inline vec_int4 spu_genb(vec_int4 a, vec_int4 b)
696 return ((vec_int4)(si_bg((qword)(b), (qword)(a))));
699 /* spu_genbx
700 * =========
702 static __inline vec_uint4 spu_genbx(vec_uint4 a, vec_uint4 b, vec_uint4 c)
704 return ((vec_uint4)(si_bgx((qword)(b), (qword)(a), (qword)(c))));
707 static __inline vec_int4 spu_genbx(vec_int4 a, vec_int4 b, vec_int4 c)
709 return ((vec_int4)(si_bgx((qword)(b), (qword)(a), (qword)(c))));
713 /* spu_genc
714 * ========
716 static __inline vec_uint4 spu_genc(vec_uint4 a, vec_uint4 b)
718 return ((vec_uint4)(si_cg((qword)(a), (qword)(b))));
721 static __inline vec_int4 spu_genc(vec_int4 a, vec_int4 b)
723 return ((vec_int4)(si_cg((qword)(a), (qword)(b))));
726 /* spu_gencx
727 * =========
729 static __inline vec_uint4 spu_gencx(vec_uint4 a, vec_uint4 b, vec_uint4 c)
731 return ((vec_uint4)(si_cgx((qword)(a), (qword)(b), (qword)(c))));
734 static __inline vec_int4 spu_gencx(vec_int4 a, vec_int4 b, vec_int4 c)
736 return ((vec_int4)(si_cgx((qword)(a), (qword)(b), (qword)(c))));
740 /* spu_hcmpeq
741 * ========
743 #define spu_hcmpeq(_a, _b) if (_a == _b) { SPU_HALT_ACTION; };
746 /* spu_hcmpgt
747 * ========
749 #define spu_hcmpgt(_a, _b) if (_a > _b) { SPU_HALT_ACTION; };
752 /* spu_idisable
753 * ============
755 #define spu_idisable() SPU_UNSUPPORTED_ACTION
758 /* spu_ienable
759 * ===========
761 #define spu_ienable() SPU_UNSUPPORTED_ACTION
764 /* spu_insert
765 * ========
767 static __inline vec_uchar16 spu_insert(unsigned char a, vec_uchar16 b, int element)
769 union {
770 vec_uchar16 v;
771 unsigned char c[16];
772 } in;
774 in.v = b;
775 in.c[element & 15] = a;
776 return (in.v);
779 static __inline vec_char16 spu_insert(signed char a, vec_char16 b, int element)
781 return ((vec_char16)spu_insert((unsigned char)(a), (vec_uchar16)(b), element));
784 static __inline vec_ushort8 spu_insert(unsigned short a, vec_ushort8 b, int element)
786 union {
787 vec_ushort8 v;
788 unsigned short s[8];
789 } in;
791 in.v = b;
792 in.s[element & 7] = a;
793 return (in.v);
796 static __inline vec_short8 spu_insert(signed short a, vec_short8 b, int element)
798 return ((vec_short8)spu_insert((unsigned short)(a), (vec_ushort8)(b), element));
801 static __inline vec_uint4 spu_insert(unsigned int a, vec_uint4 b, int element)
803 union {
804 vec_uint4 v;
805 unsigned int i[4];
806 } in;
808 in.v = b;
809 in.i[element & 3] = a;
810 return (in.v);
813 static __inline vec_int4 spu_insert(signed int a, vec_int4 b, int element)
815 return ((vec_int4)spu_insert((unsigned int)(a), (vec_uint4)(b), element));
818 static __inline vec_float4 spu_insert(float a, vec_float4 b, int element)
820 union {
821 vec_float4 v;
822 float f[4];
823 } in;
825 in.v = b;
826 in.f[element & 3] = a;
827 return (in.v);
830 static __inline vec_ullong2 spu_insert(unsigned long long a, vec_ullong2 b, int element)
832 union {
833 vec_ullong2 v;
834 unsigned long long l[2];
835 } in;
837 in.v = b;
838 in.l[element & 1] = a;
839 return (in.v);
842 static __inline vec_llong2 spu_insert(signed long long a, vec_llong2 b, int element)
844 return ((vec_llong2)spu_insert((unsigned long long)(a), (vec_ullong2)(b), element));
847 static __inline vec_double2 spu_insert(double a, vec_double2 b, int element)
849 union {
850 vec_double2 v;
851 double d[2];
852 } in;
854 in.v = b;
855 in.d[element & 1] = a;
856 return (in.v);
860 /* spu_madd
861 * ========
863 static __inline vec_int4 spu_madd(vec_short8 a, vec_short8 b, vec_int4 c)
865 return ((vec_int4)(si_mpya((qword)(a), (qword)(b), (qword)(c))));
868 static __inline vec_float4 spu_madd(vec_float4 a, vec_float4 b, vec_float4 c)
870 return ((vec_float4)(si_fma((qword)(a), (qword)(b), (qword)(c))));
873 static __inline vec_double2 spu_madd(vec_double2 a, vec_double2 b, vec_double2 c)
875 return ((vec_double2)(si_dfma((qword)(a), (qword)(b), (qword)(c))));
879 /* spu_maskb
880 * ========
882 #define spu_maskb(_a) (vec_uchar16)(si_fsmb(si_from_int((int)(_a))))
884 /* spu_maskh
885 * ========
887 #define spu_maskh(_a) (vec_ushort8)(si_fsmh(si_from_int((int)(_a))))
890 /* spu_maskw
891 * ========
893 #define spu_maskw(_a) (vec_uint4)(si_fsm(si_from_int((int)(_a))))
896 /* spu_mfcdma32
897 * ========
899 #define spu_mfcdma32(_ls, _ea, _size, _tagid, _cmd)
902 /* spu_mfcdma64
903 * ========
905 #define spu_mfcdma64(_ls, _eahi, _ealow, _size, _tagid, _cmd)
907 /* spu_mfcstat
908 * ========
910 #define spu_mfcstat(_type) 0xFFFFFFFF
914 /* spu_mffpscr
915 * ===========
917 #define spu_mffpscr() (vec_uint4)(si_fscrrd())
920 /* spu_mfspr
921 * ========
924 #define spu_mfspr(_reg) si_to_uint(si_mfspr(_reg))
928 /* spu_mhhadd
929 * ==========
931 static __inline vec_int4 spu_mhhadd(vec_short8 a, vec_short8 b, vec_int4 c)
933 return ((vec_int4)(si_mpyhha((qword)(a), (qword)(b), (qword)(c))));
937 static __inline vec_uint4 spu_mhhadd(vec_ushort8 a, vec_ushort8 b, vec_uint4 c)
939 return ((vec_uint4)(si_mpyhhau((qword)(a), (qword)(b), (qword)(c))));
943 /* spu_msub
944 * ========
946 static __inline vec_float4 spu_msub(vec_float4 a, vec_float4 b, vec_float4 c)
948 return ((vec_float4)(si_fms((qword)(a), (qword)(b), (qword)(c))));
951 static __inline vec_double2 spu_msub(vec_double2 a, vec_double2 b, vec_double2 c)
953 return ((vec_double2)(si_dfms((qword)(a), (qword)(b), (qword)(c))));
957 /* spu_mtfpscr
958 * ===========
960 #define spu_mtfpscr(_a)
963 /* spu_mtspr
964 * ========
966 #define spu_mtspr(_reg, _a)
969 /* spu_mul
970 * ========
972 static __inline vec_float4 spu_mul(vec_float4 a, vec_float4 b)
974 return ((vec_float4)(si_fm((qword)(a), (qword)(b))));
977 static __inline vec_double2 spu_mul(vec_double2 a, vec_double2 b)
979 return ((vec_double2)(si_dfm((qword)(a), (qword)(b))));
983 /* spu_mulh
984 * ========
986 static __inline vec_int4 spu_mulh(vec_short8 a, vec_short8 b)
988 return ((vec_int4)(si_mpyh((qword)(a), (qword)(b))));
991 /* spu_mule
992 * =========
994 #define spu_mule(_a, _b) vec_mule(_a, _b)
998 /* spu_mulo
999 * ========
1001 static __inline vec_int4 spu_mulo(vec_short8 a, vec_short8 b)
1003 return ((vec_int4)(si_mpy((qword)(a), (qword)(b))));
1007 static __inline vec_uint4 spu_mulo(vec_ushort8 a, vec_ushort8 b)
1009 return ((vec_uint4)(si_mpyu((qword)(a), (qword)(b))));
1013 static __inline vec_int4 spu_mulo(vec_short8 a, short b)
1015 return ((vec_int4)(si_mpyi((qword)(a), b)));
1018 static __inline vec_uint4 spu_mulo(vec_ushort8 a, unsigned short b)
1020 return ((vec_uint4)(si_mpyui((qword)(a), b)));
1024 /* spu_mulsr
1025 * =========
1027 static __inline vec_int4 spu_mulsr(vec_short8 a, vec_short8 b)
1029 return ((vec_int4)(si_mpys((qword)(a), (qword)(b))));
1033 /* spu_nand
1034 * ========
1036 static __inline vec_uchar16 spu_nand(vec_uchar16 a, vec_uchar16 b)
1038 return ((vec_uchar16)(si_nand((qword)(a), (qword)(b))));
1041 static __inline vec_char16 spu_nand(vec_char16 a, vec_char16 b)
1043 return ((vec_char16)(si_nand((qword)(a), (qword)(b))));
1046 static __inline vec_ushort8 spu_nand(vec_ushort8 a, vec_ushort8 b)
1048 return ((vec_ushort8)(si_nand((qword)(a), (qword)(b))));
1051 static __inline vec_short8 spu_nand(vec_short8 a, vec_short8 b)
1053 return ((vec_short8)(si_nand((qword)(a), (qword)(b))));
1056 static __inline vec_uint4 spu_nand(vec_uint4 a, vec_uint4 b)
1058 return ((vec_uint4)(si_nand((qword)(a), (qword)(b))));
1061 static __inline vec_int4 spu_nand(vec_int4 a, vec_int4 b)
1063 return ((vec_int4)(si_nand((qword)(a), (qword)(b))));
1066 static __inline vec_float4 spu_nand(vec_float4 a, vec_float4 b)
1068 return ((vec_float4)(si_nand((qword)(a), (qword)(b))));
1071 static __inline vec_ullong2 spu_nand(vec_ullong2 a, vec_ullong2 b)
1073 return ((vec_ullong2)(si_nand((qword)(a), (qword)(b))));
1076 static __inline vec_llong2 spu_nand(vec_llong2 a, vec_llong2 b)
1078 return ((vec_llong2)(si_nand((qword)(a), (qword)(b))));
1081 static __inline vec_double2 spu_nand(vec_double2 a, vec_double2 b)
1083 return ((vec_double2)(si_nand((qword)(a), (qword)(b))));
1087 /* spu_nmadd
1088 * =========
1090 static __inline vec_double2 spu_nmadd(vec_double2 a, vec_double2 b, vec_double2 c)
1092 return ((vec_double2)(si_dfnma((qword)(a), (qword)(b), (qword)(c))));
1096 /* spu_nmsub
1097 * =========
1099 static __inline vec_float4 spu_nmsub(vec_float4 a, vec_float4 b, vec_float4 c)
1101 return ((vec_float4)(si_fnms((qword)(a), (qword)(b), (qword)(c))));
1104 static __inline vec_double2 spu_nmsub(vec_double2 a, vec_double2 b, vec_double2 c)
1106 return ((vec_double2)(si_dfnms((qword)(a), (qword)(b), (qword)(c))));
1110 /* spu_nor
1111 * =======
1113 #define spu_nor(_a, _b) vec_nor(_a, _b)
1116 /* spu_or
1117 * ======
1119 static __inline vec_uchar16 spu_or(vec_uchar16 a, vec_uchar16 b)
1121 return ((vec_uchar16)(si_or((qword)(a), (qword)(b))));
1124 static __inline vec_char16 spu_or(vec_char16 a, vec_char16 b)
1126 return ((vec_char16)(si_or((qword)(a), (qword)(b))));
1129 static __inline vec_ushort8 spu_or(vec_ushort8 a, vec_ushort8 b)
1131 return ((vec_ushort8)(si_or((qword)(a), (qword)(b))));
1134 static __inline vec_short8 spu_or(vec_short8 a, vec_short8 b)
1136 return ((vec_short8)(si_or((qword)(a), (qword)(b))));
1139 static __inline vec_uint4 spu_or(vec_uint4 a, vec_uint4 b)
1141 return ((vec_uint4)(si_or((qword)(a), (qword)(b))));
1144 static __inline vec_int4 spu_or(vec_int4 a, vec_int4 b)
1146 return ((vec_int4)(si_or((qword)(a), (qword)(b))));
1149 static __inline vec_float4 spu_or(vec_float4 a, vec_float4 b)
1151 return ((vec_float4)(si_or((qword)(a), (qword)(b))));
1154 static __inline vec_ullong2 spu_or(vec_ullong2 a, vec_ullong2 b)
1156 return ((vec_ullong2)(si_or((qword)(a), (qword)(b))));
1159 static __inline vec_llong2 spu_or(vec_llong2 a, vec_llong2 b)
1161 return ((vec_llong2)(si_or((qword)(a), (qword)(b))));
1164 static __inline vec_double2 spu_or(vec_double2 a, vec_double2 b)
1166 return ((vec_double2)(si_or((qword)(a), (qword)(b))));
1170 static __inline vec_uchar16 spu_or(vec_uchar16 a, unsigned char b)
1172 return ((vec_uchar16)(si_orbi((qword)(a), b)));
1175 static __inline vec_char16 spu_or(vec_char16 a, signed char b)
1177 return ((vec_char16)(si_orbi((qword)(a), (unsigned char)(b))));
1180 static __inline vec_ushort8 spu_or(vec_ushort8 a, unsigned short b)
1182 return ((vec_ushort8)(si_orhi((qword)(a), b)));
1185 static __inline vec_short8 spu_or(vec_short8 a, signed short b)
1187 return ((vec_short8)(si_orhi((qword)(a), (unsigned short)(b))));
1190 static __inline vec_uint4 spu_or(vec_uint4 a, unsigned int b)
1192 return ((vec_uint4)(si_ori((qword)(a), b)));
1195 static __inline vec_int4 spu_or(vec_int4 a, signed int b)
1197 return ((vec_int4)(si_ori((qword)(a), (unsigned int)(b))));
1201 /* spu_orc
1202 * =======
1204 #define spu_orc(_a, _b) vec_or(_a, vec_nor(_b, _b))
1207 /* spu_orx
1208 * =======
1210 static __inline vec_uint4 spu_orx(vec_uint4 a)
1212 return ((vec_uint4)(si_orx((qword)(a))));
1215 static __inline vec_int4 spu_orx(vec_int4 a)
1217 return ((vec_int4)(si_orx((qword)(a))));
1221 /* spu_promote
1222 * ===========
1224 static __inline vec_uchar16 spu_promote(unsigned char a, int element)
1226 union {
1227 vec_uchar16 v;
1228 unsigned char c[16];
1229 } in;
1231 in.c[element & 15] = a;
1232 return (in.v);
1235 static __inline vec_char16 spu_promote(signed char a, int element)
1237 union {
1238 vec_char16 v;
1239 signed char c[16];
1240 } in;
1242 in.c[element & 15] = a;
1243 return (in.v);
1246 static __inline vec_ushort8 spu_promote(unsigned short a, int element)
1248 union {
1249 vec_ushort8 v;
1250 unsigned short s[8];
1251 } in;
1253 in.s[element & 7] = a;
1254 return (in.v);
1257 static __inline vec_short8 spu_promote(signed short a, int element)
1259 union {
1260 vec_short8 v;
1261 signed short s[8];
1262 } in;
1264 in.s[element & 7] = a;
1265 return (in.v);
1268 static __inline vec_uint4 spu_promote(unsigned int a, int element)
1270 union {
1271 vec_uint4 v;
1272 unsigned int i[4];
1273 } in;
1275 in.i[element & 3] = a;
1276 return (in.v);
1279 static __inline vec_int4 spu_promote(signed int a, int element)
1281 union {
1282 vec_int4 v;
1283 signed int i[4];
1284 } in;
1286 in.i[element & 3] = a;
1287 return (in.v);
1290 static __inline vec_float4 spu_promote(float a, int element)
1292 union {
1293 vec_float4 v;
1294 float f[4];
1295 } in;
1297 in.f[element & 3] = a;
1298 return (in.v);
1301 static __inline vec_ullong2 spu_promote(unsigned long long a, int element)
1303 union {
1304 vec_ullong2 v;
1305 unsigned long long l[2];
1306 } in;
1308 in.l[element & 1] = a;
1309 return (in.v);
1312 static __inline vec_llong2 spu_promote(signed long long a, int element)
1314 union {
1315 vec_llong2 v;
1316 signed long long l[2];
1317 } in;
1319 in.l[element & 1] = a;
1320 return (in.v);
1323 static __inline vec_double2 spu_promote(double a, int element)
1325 union {
1326 vec_double2 v;
1327 double d[2];
1328 } in;
1330 in.d[element & 1] = a;
1331 return (in.v);
1334 /* spu_re
1335 * ======
1337 #define spu_re(_a) vec_re(_a)
1340 /* spu_readch
1341 * ==========
1343 #define spu_readch(_channel) 0 /* not mappable */
1346 /* spu_readchcnt
1347 * =============
1349 #define spu_readchcnt(_channel) 0 /* not mappable */
1352 /* spu_readchqw
1353 * ============
1355 #define spu_readchqw(_channel) __extension__ ({ vec_uint4 result = { 0, 0, 0, 0 }; result; })
1357 /* spu_rl
1358 * ======
1360 static __inline vec_ushort8 spu_rl(vec_ushort8 a, vec_short8 b)
1362 return ((vec_ushort8)(si_roth((qword)(a), (qword)(b))));
1365 static __inline vec_short8 spu_rl(vec_short8 a, vec_short8 b)
1367 return ((vec_short8)(si_roth((qword)(a), (qword)(b))));
1370 static __inline vec_uint4 spu_rl(vec_uint4 a, vec_int4 b)
1372 return ((vec_uint4)(si_rot((qword)(a), (qword)(b))));
1375 static __inline vec_int4 spu_rl(vec_int4 a, vec_int4 b)
1377 return ((vec_int4)(si_rot((qword)(a), (qword)(b))));
1380 static __inline vec_ushort8 spu_rl(vec_ushort8 a, int b)
1382 return ((vec_ushort8)(si_rothi((qword)(a), b)));
1385 static __inline vec_short8 spu_rl(vec_short8 a, int b)
1387 return ((vec_short8)(si_rothi((qword)(a), b)));
1390 static __inline vec_uint4 spu_rl(vec_uint4 a, int b)
1392 return ((vec_uint4)(si_roti((qword)(a), b)));
1395 static __inline vec_int4 spu_rl(vec_int4 a, int b)
1397 return ((vec_int4)(si_roti((qword)(a), b)));
1401 /* spu_rlmask
1402 * ==========
1404 static __inline vec_ushort8 spu_rlmask(vec_ushort8 a, vec_short8 b)
1406 return ((vec_ushort8)(si_rothm((qword)(a), (qword)(b))));
1409 static __inline vec_short8 spu_rlmask(vec_short8 a, vec_short8 b)
1411 return ((vec_short8)(si_rothm((qword)(a), (qword)(b))));
1414 static __inline vec_uint4 spu_rlmask(vec_uint4 a, vec_int4 b)
1416 return ((vec_uint4)(si_rotm((qword)(a), (qword)(b))));
1419 static __inline vec_int4 spu_rlmask(vec_int4 a, vec_int4 b)
1421 return ((vec_int4)(si_rotm((qword)(a), (qword)(b))));
1424 static __inline vec_ushort8 spu_rlmask(vec_ushort8 a, int b)
1426 return ((vec_ushort8)(si_rothmi((qword)(a), b)));
1429 static __inline vec_short8 spu_rlmask(vec_short8 a, int b)
1431 return ((vec_short8)(si_rothmi((qword)(a), b)));
1435 static __inline vec_uint4 spu_rlmask(vec_uint4 a, int b)
1437 return ((vec_uint4)(si_rotmi((qword)(a), b)));
1440 static __inline vec_int4 spu_rlmask(vec_int4 a, int b)
1442 return ((vec_int4)(si_rotmi((qword)(a), b)));
1445 /* spu_rlmaska
1446 * ===========
1448 static __inline vec_short8 spu_rlmaska(vec_short8 a, vec_short8 b)
1450 return ((vec_short8)(si_rotmah((qword)(a), (qword)(b))));
1453 static __inline vec_ushort8 spu_rlmaska(vec_ushort8 a, vec_short8 b)
1455 return ((vec_ushort8)(si_rotmah((qword)(a), (qword)(b))));
1459 static __inline vec_int4 spu_rlmaska(vec_int4 a, vec_int4 b)
1461 return ((vec_int4)(si_rotma((qword)(a), (qword)(b))));
1464 static __inline vec_uint4 spu_rlmaska(vec_uint4 a, vec_int4 b)
1466 return ((vec_uint4)(si_rotma((qword)(a), (qword)(b))));
1469 static __inline vec_ushort8 spu_rlmaska(vec_ushort8 a, int b)
1471 return ((vec_ushort8)(si_rotmahi((qword)(a), b)));
1474 static __inline vec_short8 spu_rlmaska(vec_short8 a, int b)
1476 return ((vec_short8)(si_rotmahi((qword)(a), b)));
1479 static __inline vec_uint4 spu_rlmaska(vec_uint4 a, int b)
1481 return ((vec_uint4)(si_rotmai((qword)(a), b)));
1484 static __inline vec_int4 spu_rlmaska(vec_int4 a, int b)
1486 return ((vec_int4)(si_rotmai((qword)(a), b)));
1490 /* spu_rlmaskqw
1491 * ============
1493 static __inline vec_uchar16 spu_rlmaskqw(vec_uchar16 a, int count)
1495 return ((vec_uchar16)(si_rotqmbi((qword)(a), si_from_int(count))));
1498 static __inline vec_char16 spu_rlmaskqw(vec_char16 a, int count)
1500 return ((vec_char16)(si_rotqmbi((qword)(a), si_from_int(count))));
1503 static __inline vec_ushort8 spu_rlmaskqw(vec_ushort8 a, int count)
1505 return ((vec_ushort8)(si_rotqmbi((qword)(a), si_from_int(count))));
1508 static __inline vec_short8 spu_rlmaskqw(vec_short8 a, int count)
1510 return ((vec_short8)(si_rotqmbi((qword)(a), si_from_int(count))));
1513 static __inline vec_uint4 spu_rlmaskqw(vec_uint4 a, int count)
1515 return ((vec_uint4)(si_rotqmbi((qword)(a), si_from_int(count))));
1518 static __inline vec_int4 spu_rlmaskqw(vec_int4 a, int count)
1520 return ((vec_int4)(si_rotqmbi((qword)(a), si_from_int(count))));
1523 static __inline vec_float4 spu_rlmaskqw(vec_float4 a, int count)
1525 return ((vec_float4)(si_rotqmbi((qword)(a), si_from_int(count))));
1528 static __inline vec_ullong2 spu_rlmaskqw(vec_ullong2 a, int count)
1530 return ((vec_ullong2)(si_rotqmbi((qword)(a), si_from_int(count))));
1533 static __inline vec_llong2 spu_rlmaskqw(vec_llong2 a, int count)
1535 return ((vec_llong2)(si_rotqmbi((qword)(a), si_from_int(count))));
1538 static __inline vec_double2 spu_rlmaskqw(vec_double2 a, int count)
1540 return ((vec_double2)(si_rotqmbi((qword)(a), si_from_int(count))));
1543 /* spu_rlmaskqwbyte
1544 * ================
1546 static __inline vec_uchar16 spu_rlmaskqwbyte(vec_uchar16 a, int count)
1548 return ((vec_uchar16)(si_rotqmby((qword)(a), si_from_int(count))));
1551 static __inline vec_char16 spu_rlmaskqwbyte(vec_char16 a, int count)
1553 return ((vec_char16)(si_rotqmby((qword)(a), si_from_int(count))));
1556 static __inline vec_ushort8 spu_rlmaskqwbyte(vec_ushort8 a, int count)
1558 return ((vec_ushort8)(si_rotqmby((qword)(a), si_from_int(count))));
1561 static __inline vec_short8 spu_rlmaskqwbyte(vec_short8 a, int count)
1563 return ((vec_short8)(si_rotqmby((qword)(a), si_from_int(count))));
1566 static __inline vec_uint4 spu_rlmaskqwbyte(vec_uint4 a, int count)
1568 return ((vec_uint4)(si_rotqmby((qword)(a), si_from_int(count))));
1571 static __inline vec_int4 spu_rlmaskqwbyte(vec_int4 a, int count)
1573 return ((vec_int4)(si_rotqmby((qword)(a), si_from_int(count))));
1576 static __inline vec_float4 spu_rlmaskqwbyte(vec_float4 a, int count)
1578 return ((vec_float4)(si_rotqmby((qword)(a), si_from_int(count))));
1581 static __inline vec_ullong2 spu_rlmaskqwbyte(vec_ullong2 a, int count)
1583 return ((vec_ullong2)(si_rotqmby((qword)(a), si_from_int(count))));
1586 static __inline vec_llong2 spu_rlmaskqwbyte(vec_llong2 a, int count)
1588 return ((vec_llong2)(si_rotqmby((qword)(a), si_from_int(count))));
1591 static __inline vec_double2 spu_rlmaskqwbyte(vec_double2 a, int count)
1593 return ((vec_double2)(si_rotqmby((qword)(a), si_from_int(count))));
1596 /* spu_rlmaskqwbytebc
1597 * ==================
1599 static __inline vec_uchar16 spu_rlmaskqwbytebc(vec_uchar16 a, int count)
1601 return ((vec_uchar16)(si_rotqmbybi((qword)(a), si_from_int(count))));
1604 static __inline vec_char16 spu_rlmaskqwbytebc(vec_char16 a, int count)
1606 return ((vec_char16)(si_rotqmbybi((qword)(a), si_from_int(count))));
1609 static __inline vec_ushort8 spu_rlmaskqwbytebc(vec_ushort8 a, int count)
1611 return ((vec_ushort8)(si_rotqmbybi((qword)(a), si_from_int(count))));
1614 static __inline vec_short8 spu_rlmaskqwbytebc(vec_short8 a, int count)
1616 return ((vec_short8)(si_rotqmbybi((qword)(a), si_from_int(count))));
1619 static __inline vec_uint4 spu_rlmaskqwbytebc(vec_uint4 a, int count)
1621 return ((vec_uint4)(si_rotqmbybi((qword)(a), si_from_int(count))));
1624 static __inline vec_int4 spu_rlmaskqwbytebc(vec_int4 a, int count)
1626 return ((vec_int4)(si_rotqmbybi((qword)(a), si_from_int(count))));
1629 static __inline vec_float4 spu_rlmaskqwbytebc(vec_float4 a, int count)
1631 return ((vec_float4)(si_rotqmbybi((qword)(a), si_from_int(count))));
1634 static __inline vec_ullong2 spu_rlmaskqwbytebc(vec_ullong2 a, int count)
1636 return ((vec_ullong2)(si_rotqmbybi((qword)(a), si_from_int(count))));
1639 static __inline vec_llong2 spu_rlmaskqwbytebc(vec_llong2 a, int count)
1641 return ((vec_llong2)(si_rotqmbybi((qword)(a), si_from_int(count))));
1644 static __inline vec_double2 spu_rlmaskqwbytebc(vec_double2 a, int count)
1646 return ((vec_double2)(si_rotqmbybi((qword)(a), si_from_int(count))));
1650 /* spu_rlqwbyte
1651 * ============
1653 static __inline vec_uchar16 spu_rlqwbyte(vec_uchar16 a, int count)
1655 return ((vec_uchar16)(si_rotqby((qword)(a), si_from_int(count))));
1658 static __inline vec_char16 spu_rlqwbyte(vec_char16 a, int count)
1660 return ((vec_char16)(si_rotqby((qword)(a), si_from_int(count))));
1663 static __inline vec_ushort8 spu_rlqwbyte(vec_ushort8 a, int count)
1665 return ((vec_ushort8)(si_rotqby((qword)(a), si_from_int(count))));
1668 static __inline vec_short8 spu_rlqwbyte(vec_short8 a, int count)
1670 return ((vec_short8)(si_rotqby((qword)(a), si_from_int(count))));
1673 static __inline vec_uint4 spu_rlqwbyte(vec_uint4 a, int count)
1675 return ((vec_uint4)(si_rotqby((qword)(a), si_from_int(count))));
1678 static __inline vec_int4 spu_rlqwbyte(vec_int4 a, int count)
1680 return ((vec_int4)(si_rotqby((qword)(a), si_from_int(count))));
1683 static __inline vec_float4 spu_rlqwbyte(vec_float4 a, int count)
1685 return ((vec_float4)(si_rotqby((qword)(a), si_from_int(count))));
1688 static __inline vec_ullong2 spu_rlqwbyte(vec_ullong2 a, int count)
1690 return ((vec_ullong2)(si_rotqby((qword)(a), si_from_int(count))));
1693 static __inline vec_llong2 spu_rlqwbyte(vec_llong2 a, int count)
1695 return ((vec_llong2)(si_rotqby((qword)(a), si_from_int(count))));
1698 static __inline vec_double2 spu_rlqwbyte(vec_double2 a, int count)
1700 return ((vec_double2)(si_rotqby((qword)(a), si_from_int(count))));
1704 /* spu_rlqwbytebc
1705 * ==============
1707 static __inline vec_uchar16 spu_rlqwbytebc(vec_uchar16 a, int count)
1709 return ((vec_uchar16)(si_rotqbybi((qword)(a), si_from_int(count))));
1712 static __inline vec_char16 spu_rlqwbytebc(vec_char16 a, int count)
1714 return ((vec_char16)(si_rotqbybi((qword)(a), si_from_int(count))));
1717 static __inline vec_ushort8 spu_rlqwbytebc(vec_ushort8 a, int count)
1719 return ((vec_ushort8)(si_rotqbybi((qword)(a), si_from_int(count))));
1722 static __inline vec_short8 spu_rlqwbytebc(vec_short8 a, int count)
1724 return ((vec_short8)(si_rotqbybi((qword)(a), si_from_int(count))));
1727 static __inline vec_uint4 spu_rlqwbytebc(vec_uint4 a, int count)
1729 return ((vec_uint4)(si_rotqbybi((qword)(a), si_from_int(count))));
1732 static __inline vec_int4 spu_rlqwbytebc(vec_int4 a, int count)
1734 return ((vec_int4)(si_rotqbybi((qword)(a), si_from_int(count))));
1737 static __inline vec_float4 spu_rlqwbytebc(vec_float4 a, int count)
1739 return ((vec_float4)(si_rotqbybi((qword)(a), si_from_int(count))));
1742 static __inline vec_ullong2 spu_rlqwbytebc(vec_ullong2 a, int count)
1744 return ((vec_ullong2)(si_rotqbybi((qword)(a), si_from_int(count))));
1747 static __inline vec_llong2 spu_rlqwbytebc(vec_llong2 a, int count)
1749 return ((vec_llong2)(si_rotqbybi((qword)(a), si_from_int(count))));
1752 static __inline vec_double2 spu_rlqwbytebc(vec_double2 a, int count)
1754 return ((vec_double2)(si_rotqbybi((qword)(a), si_from_int(count))));
1757 /* spu_rlqw
1758 * ========
1760 static __inline vec_uchar16 spu_rlqw(vec_uchar16 a, int count)
1762 return ((vec_uchar16)(si_rotqbi((qword)(a), si_from_int(count))));
1765 static __inline vec_char16 spu_rlqw(vec_char16 a, int count)
1767 return ((vec_char16)(si_rotqbi((qword)(a), si_from_int(count))));
1770 static __inline vec_ushort8 spu_rlqw(vec_ushort8 a, int count)
1772 return ((vec_ushort8)(si_rotqbi((qword)(a), si_from_int(count))));
1775 static __inline vec_short8 spu_rlqw(vec_short8 a, int count)
1777 return ((vec_short8)(si_rotqbi((qword)(a), si_from_int(count))));
1780 static __inline vec_uint4 spu_rlqw(vec_uint4 a, int count)
1782 return ((vec_uint4)(si_rotqbi((qword)(a), si_from_int(count))));
1785 static __inline vec_int4 spu_rlqw(vec_int4 a, int count)
1787 return ((vec_int4)(si_rotqbi((qword)(a), si_from_int(count))));
1790 static __inline vec_float4 spu_rlqw(vec_float4 a, int count)
1792 return ((vec_float4)(si_rotqbi((qword)(a), si_from_int(count))));
1795 static __inline vec_ullong2 spu_rlqw(vec_ullong2 a, int count)
1797 return ((vec_ullong2)(si_rotqbi((qword)(a), si_from_int(count))));
1800 static __inline vec_llong2 spu_rlqw(vec_llong2 a, int count)
1802 return ((vec_llong2)(si_rotqbi((qword)(a), si_from_int(count))));
1805 static __inline vec_double2 spu_rlqw(vec_double2 a, int count)
1807 return ((vec_double2)(si_rotqbi((qword)(a), si_from_int(count))));
1810 /* spu_roundtf
1811 * ===========
1813 static __inline vec_float4 spu_roundtf(vec_double2 a)
1815 return ((vec_float4)(si_frds((qword)(a))));
1819 /* spu_rsqrte
1820 * ==========
1822 #define spu_rsqrte(_a) vec_rsqrte(_a)
1825 /* spu_sel
1826 * =======
1828 static __inline vec_uchar16 spu_sel(vec_uchar16 a, vec_uchar16 b, vec_uchar16 pattern)
1830 return ((vec_uchar16)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1833 static __inline vec_char16 spu_sel(vec_char16 a, vec_char16 b, vec_uchar16 pattern)
1835 return ((vec_char16)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1838 static __inline vec_ushort8 spu_sel(vec_ushort8 a, vec_ushort8 b, vec_ushort8 pattern)
1840 return ((vec_ushort8)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1843 static __inline vec_short8 spu_sel(vec_short8 a, vec_short8 b, vec_ushort8 pattern)
1845 return ((vec_short8)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1848 static __inline vec_uint4 spu_sel(vec_uint4 a, vec_uint4 b, vec_uint4 pattern)
1850 return ((vec_uint4)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1853 static __inline vec_int4 spu_sel(vec_int4 a, vec_int4 b, vec_uint4 pattern)
1855 return ((vec_int4)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1858 static __inline vec_float4 spu_sel(vec_float4 a, vec_float4 b, vec_uint4 pattern)
1860 return ((vec_float4)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1863 static __inline vec_ullong2 spu_sel(vec_ullong2 a, vec_ullong2 b, vec_ullong2 pattern)
1865 return ((vec_ullong2)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1868 static __inline vec_llong2 spu_sel(vec_llong2 a, vec_llong2 b, vec_ullong2 pattern)
1870 return ((vec_llong2)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1873 static __inline vec_double2 spu_sel(vec_double2 a, vec_double2 b, vec_ullong2 pattern)
1875 return ((vec_double2)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
1880 /* spu_shuffle
1881 * ===========
1883 static __inline vec_uchar16 spu_shuffle(vec_uchar16 a, vec_uchar16 b, vec_uchar16 pattern)
1885 return ((vec_uchar16)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1888 static __inline vec_char16 spu_shuffle(vec_char16 a, vec_char16 b, vec_uchar16 pattern)
1890 return ((vec_char16)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1893 static __inline vec_ushort8 spu_shuffle(vec_ushort8 a, vec_ushort8 b, vec_uchar16 pattern)
1895 return ((vec_ushort8)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1898 static __inline vec_short8 spu_shuffle(vec_short8 a, vec_short8 b, vec_uchar16 pattern)
1900 return ((vec_short8)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1903 static __inline vec_uint4 spu_shuffle(vec_uint4 a, vec_uint4 b, vec_uchar16 pattern)
1905 return ((vec_uint4)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1908 static __inline vec_int4 spu_shuffle(vec_int4 a, vec_int4 b, vec_uchar16 pattern)
1910 return ((vec_int4)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1913 static __inline vec_float4 spu_shuffle(vec_float4 a, vec_float4 b, vec_uchar16 pattern)
1915 return ((vec_float4)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1918 static __inline vec_ullong2 spu_shuffle(vec_ullong2 a, vec_ullong2 b, vec_uchar16 pattern)
1920 return ((vec_ullong2)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1923 static __inline vec_llong2 spu_shuffle(vec_llong2 a, vec_llong2 b, vec_uchar16 pattern)
1925 return ((vec_llong2)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1928 static __inline vec_double2 spu_shuffle(vec_double2 a, vec_double2 b, vec_uchar16 pattern)
1930 return ((vec_double2)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
1934 /* spu_sl
1935 * ======
1937 static __inline vec_ushort8 spu_sl(vec_ushort8 a, vec_ushort8 b)
1939 return ((vec_ushort8)(si_shlh((qword)(a), (qword)(b))));
1942 static __inline vec_short8 spu_sl(vec_short8 a, vec_ushort8 b)
1944 return ((vec_short8)(si_shlh((qword)(a), (qword)(b))));
1947 static __inline vec_uint4 spu_sl(vec_uint4 a, vec_uint4 b)
1949 return ((vec_uint4)(si_shl((qword)(a), (qword)(b))));
1952 static __inline vec_int4 spu_sl(vec_int4 a, vec_uint4 b)
1954 return ((vec_int4)(si_shl((qword)(a), (qword)(b))));
1957 static __inline vec_ushort8 spu_sl(vec_ushort8 a, unsigned int b)
1959 return ((vec_ushort8)(si_shlhi((qword)(a), b)));
1962 static __inline vec_short8 spu_sl(vec_short8 a, unsigned int b)
1964 return ((vec_short8)(si_shlhi((qword)(a), b)));
1967 static __inline vec_uint4 spu_sl(vec_uint4 a, unsigned int b)
1969 return ((vec_uint4)(si_shli((qword)(a), b)));
1972 static __inline vec_int4 spu_sl(vec_int4 a, unsigned int b)
1974 return ((vec_int4)(si_shli((qword)(a), b)));
1978 /* spu_slqw
1979 * ========
1981 static __inline vec_uchar16 spu_slqw(vec_uchar16 a, unsigned int count)
1983 return ((vec_uchar16)(si_shlqbi((qword)(a), si_from_uint(count))));
1986 static __inline vec_char16 spu_slqw(vec_char16 a, unsigned int count)
1988 return ((vec_char16)(si_shlqbi((qword)(a), si_from_uint(count))));
1991 static __inline vec_ushort8 spu_slqw(vec_ushort8 a, unsigned int count)
1993 return ((vec_ushort8)(si_shlqbi((qword)(a), si_from_uint(count))));
1996 static __inline vec_short8 spu_slqw(vec_short8 a, unsigned int count)
1998 return ((vec_short8)(si_shlqbi((qword)(a), si_from_uint(count))));
2001 static __inline vec_uint4 spu_slqw(vec_uint4 a, unsigned int count)
2003 return ((vec_uint4)(si_shlqbi((qword)(a), si_from_uint(count))));
2006 static __inline vec_int4 spu_slqw(vec_int4 a, unsigned int count)
2008 return ((vec_int4)(si_shlqbi((qword)(a), si_from_uint(count))));
2011 static __inline vec_float4 spu_slqw(vec_float4 a, unsigned int count)
2013 return ((vec_float4)(si_shlqbi((qword)(a), si_from_uint(count))));
2016 static __inline vec_ullong2 spu_slqw(vec_ullong2 a, unsigned int count)
2018 return ((vec_ullong2)(si_shlqbi((qword)(a), si_from_uint(count))));
2021 static __inline vec_llong2 spu_slqw(vec_llong2 a, unsigned int count)
2023 return ((vec_llong2)(si_shlqbi((qword)(a), si_from_uint(count))));
2026 static __inline vec_double2 spu_slqw(vec_double2 a, unsigned int count)
2028 return ((vec_double2)(si_shlqbi((qword)(a), si_from_uint(count))));
2031 /* spu_slqwbyte
2032 * ============
2034 static __inline vec_uchar16 spu_slqwbyte(vec_uchar16 a, unsigned int count)
2036 return ((vec_uchar16)(si_shlqby((qword)(a), si_from_uint(count))));
2039 static __inline vec_char16 spu_slqwbyte(vec_char16 a, unsigned int count)
2041 return ((vec_char16)(si_shlqby((qword)(a), si_from_uint(count))));
2044 static __inline vec_ushort8 spu_slqwbyte(vec_ushort8 a, unsigned int count)
2046 return ((vec_ushort8)(si_shlqby((qword)(a), si_from_uint(count))));
2049 static __inline vec_short8 spu_slqwbyte(vec_short8 a, unsigned int count)
2051 return ((vec_short8)(si_shlqby((qword)(a), si_from_uint(count))));
2054 static __inline vec_uint4 spu_slqwbyte(vec_uint4 a, unsigned int count)
2056 return ((vec_uint4)(si_shlqby((qword)(a), si_from_uint(count))));
2059 static __inline vec_int4 spu_slqwbyte(vec_int4 a, unsigned int count)
2061 return ((vec_int4)(si_shlqby((qword)(a), si_from_uint(count))));
2064 static __inline vec_float4 spu_slqwbyte(vec_float4 a, unsigned int count)
2066 return ((vec_float4)(si_shlqby((qword)(a), si_from_uint(count))));
2069 static __inline vec_ullong2 spu_slqwbyte(vec_ullong2 a, unsigned int count)
2071 return ((vec_ullong2)(si_shlqby((qword)(a), si_from_uint(count))));
2074 static __inline vec_llong2 spu_slqwbyte(vec_llong2 a, unsigned int count)
2076 return ((vec_llong2)(si_shlqby((qword)(a), si_from_uint(count))));
2079 static __inline vec_double2 spu_slqwbyte(vec_double2 a, unsigned int count)
2081 return ((vec_double2)(si_shlqby((qword)(a), si_from_uint(count))));
2084 /* spu_slqwbytebc
2085 * ==============
2087 static __inline vec_uchar16 spu_slqwbytebc(vec_uchar16 a, unsigned int count)
2089 return ((vec_uchar16)(si_shlqbybi((qword)(a), si_from_uint(count))));
2092 static __inline vec_char16 spu_slqwbytebc(vec_char16 a, unsigned int count)
2094 return ((vec_char16)(si_shlqbybi((qword)(a), si_from_uint(count))));
2097 static __inline vec_ushort8 spu_slqwbytebc(vec_ushort8 a, unsigned int count)
2099 return ((vec_ushort8)(si_shlqbybi((qword)(a), si_from_uint(count))));
2102 static __inline vec_short8 spu_slqwbytebc(vec_short8 a, unsigned int count)
2104 return ((vec_short8)(si_shlqbybi((qword)(a), si_from_uint(count))));
2107 static __inline vec_uint4 spu_slqwbytebc(vec_uint4 a, unsigned int count)
2109 return ((vec_uint4)(si_shlqbybi((qword)(a), si_from_uint(count))));
2112 static __inline vec_int4 spu_slqwbytebc(vec_int4 a, unsigned int count)
2114 return ((vec_int4)(si_shlqbybi((qword)(a), si_from_uint(count))));
2117 static __inline vec_float4 spu_slqwbytebc(vec_float4 a, unsigned int count)
2119 return ((vec_float4)(si_shlqbybi((qword)(a), si_from_uint(count))));
2122 static __inline vec_ullong2 spu_slqwbytebc(vec_ullong2 a, unsigned int count)
2124 return ((vec_ullong2)(si_shlqbybi((qword)(a), si_from_uint(count))));
2127 static __inline vec_llong2 spu_slqwbytebc(vec_llong2 a, unsigned int count)
2129 return ((vec_llong2)(si_shlqbybi((qword)(a), si_from_uint(count))));
2132 static __inline vec_double2 spu_slqwbytebc(vec_double2 a, unsigned int count)
2134 return ((vec_double2)(si_shlqbybi((qword)(a), si_from_uint(count))));
2137 /* spu_splats
2138 * ==========
2140 static __inline vec_uchar16 spu_splats(unsigned char a)
2142 union {
2143 vec_uchar16 v;
2144 unsigned char c[16];
2145 } in;
2147 in.c[0] = a;
2148 return (vec_splat(in.v, 0));
2151 static __inline vec_char16 spu_splats(signed char a)
2153 return ((vec_char16)spu_splats((unsigned char)(a)));
2156 static __inline vec_ushort8 spu_splats(unsigned short a)
2158 union {
2159 vec_ushort8 v;
2160 unsigned short s[8];
2161 } in;
2163 in.s[0] = a;
2164 return (vec_splat(in.v, 0));
2167 static __inline vec_short8 spu_splats(signed short a)
2169 return ((vec_short8)spu_splats((unsigned short)(a)));
2172 static __inline vec_uint4 spu_splats(unsigned int a)
2174 union {
2175 vec_uint4 v;
2176 unsigned int i[4];
2177 } in;
2179 in.i[0] = a;
2180 return (vec_splat(in.v, 0));
2183 static __inline vec_int4 spu_splats(signed int a)
2185 return ((vec_int4)spu_splats((unsigned int)(a)));
2188 static __inline vec_float4 spu_splats(float a)
2190 union {
2191 vec_float4 v;
2192 float f[4];
2193 } in;
2195 in.f[0] = a;
2196 return (vec_splat(in.v, 0));
2199 static __inline vec_ullong2 spu_splats(unsigned long long a)
2201 union {
2202 vec_ullong2 v;
2203 unsigned long long l[2];
2204 } in;
2206 in.l[0] = a;
2207 in.l[1] = a;
2208 return (in.v);
2211 static __inline vec_llong2 spu_splats(signed long long a)
2213 return ((vec_llong2)spu_splats((unsigned long long)(a)));
2216 static __inline vec_double2 spu_splats(double a)
2218 union {
2219 vec_double2 v;
2220 double d[2];
2221 } in;
2223 in.d[0] = a;
2224 in.d[1] = a;
2225 return (in.v);
2229 /* spu_stop
2230 * ========
2232 #define spu_stop(_type) si_stop(_type)
2235 /* spu_sub
2236 * =======
2238 static __inline vec_ushort8 spu_sub(vec_ushort8 a, vec_ushort8 b)
2240 return ((vec_ushort8)(si_sfh((qword)(b), (qword)(a))));
2243 static __inline vec_short8 spu_sub(vec_short8 a, vec_short8 b)
2245 return ((vec_short8)(si_sfh((qword)(b), (qword)(a))));
2248 static __inline vec_uint4 spu_sub(vec_uint4 a, vec_uint4 b)
2250 return ((vec_uint4)(si_sf((qword)(b), (qword)(a))));
2253 static __inline vec_int4 spu_sub(vec_int4 a, vec_int4 b)
2255 return ((vec_int4)(si_sf((qword)(b), (qword)(a))));
2258 static __inline vec_float4 spu_sub(vec_float4 a, vec_float4 b)
2260 return ((vec_float4)(si_fs((qword)(a), (qword)(b))));
2263 static __inline vec_double2 spu_sub(vec_double2 a, vec_double2 b)
2265 return ((vec_double2)(si_dfs((qword)(a), (qword)(b))));
2268 static __inline vec_uint4 spu_sub(unsigned int a, vec_uint4 b)
2270 return ((vec_uint4)(si_sfi((qword)b, (int)a)));
2273 static __inline vec_int4 spu_sub(signed int a, vec_int4 b)
2275 return ((vec_int4)(si_sfi((qword)b, (int)a)));
2278 static __inline vec_ushort8 spu_sub(unsigned short a, vec_ushort8 b)
2280 return ((vec_ushort8)(si_sfhi((qword)b, (short)a)));
2283 static __inline vec_short8 spu_sub(signed short a, vec_short8 b)
2285 return ((vec_short8)(si_sfhi((qword)b, (short)a)));
2288 /* spu_subx
2289 * ========
2291 static __inline vec_uint4 spu_subx(vec_uint4 a, vec_uint4 b, vec_uint4 c)
2293 return ((vec_uint4)(si_sfx((qword)(b), (qword)(a), (qword)(c))));
2296 static __inline vec_int4 spu_subx(vec_int4 a, vec_int4 b, vec_int4 c)
2298 return ((vec_int4)(si_sfx((qword)(b), (qword)(a), (qword)(c))));
2301 /* spu_sumb
2302 * ========
2304 static __inline vec_ushort8 spu_sumb(vec_uchar16 a, vec_uchar16 b)
2306 return ((vec_ushort8)(si_sumb((qword)(a), (qword)(b))));
2310 /* spu_sync
2311 * spu_sync_c
2312 * ========
2314 #define spu_sync() /* do nothing */
2316 #define spu_sync_c() /* do nothing */
2319 /* spu_writech
2320 * ===========
2322 #define spu_writech(_channel, _a) /* not mappable */
2324 /* spu_writechqw
2325 * =============
2327 #define spu_writechqw(_channel, _a) /* not mappable */
2330 /* spu_xor
2331 * =======
2333 static __inline vec_uchar16 spu_xor(vec_uchar16 a, vec_uchar16 b)
2335 return ((vec_uchar16)(si_xor((qword)(a), (qword)(b))));
2338 static __inline vec_char16 spu_xor(vec_char16 a, vec_char16 b)
2340 return ((vec_char16)(si_xor((qword)(a), (qword)(b))));
2343 static __inline vec_ushort8 spu_xor(vec_ushort8 a, vec_ushort8 b)
2345 return ((vec_ushort8)(si_xor((qword)(a), (qword)(b))));
2348 static __inline vec_short8 spu_xor(vec_short8 a, vec_short8 b)
2350 return ((vec_short8)(si_xor((qword)(a), (qword)(b))));
2353 static __inline vec_uint4 spu_xor(vec_uint4 a, vec_uint4 b)
2355 return ((vec_uint4)(si_xor((qword)(a), (qword)(b))));
2358 static __inline vec_int4 spu_xor(vec_int4 a, vec_int4 b)
2360 return ((vec_int4)(si_xor((qword)(a), (qword)(b))));
2363 static __inline vec_float4 spu_xor(vec_float4 a, vec_float4 b)
2365 return ((vec_float4)(si_xor((qword)(a), (qword)(b))));
2368 static __inline vec_ullong2 spu_xor(vec_ullong2 a, vec_ullong2 b)
2370 return ((vec_ullong2)(si_xor((qword)(a), (qword)(b))));
2373 static __inline vec_llong2 spu_xor(vec_llong2 a, vec_llong2 b)
2375 return ((vec_llong2)(si_xor((qword)(a), (qword)(b))));
2378 static __inline vec_double2 spu_xor(vec_double2 a, vec_double2 b)
2380 return ((vec_double2)(si_xor((qword)(a), (qword)(b))));
2383 static __inline vec_uchar16 spu_xor(vec_uchar16 a, unsigned char b)
2385 return ((vec_uchar16)(si_xorbi((qword)(a), b)));
2388 static __inline vec_char16 spu_xor(vec_char16 a, signed char b)
2390 return ((vec_char16)(si_xorbi((qword)(a), (unsigned char)(b))));
2393 static __inline vec_ushort8 spu_xor(vec_ushort8 a, unsigned short b)
2395 return ((vec_ushort8)(si_xorhi((qword)(a), b)));
2398 static __inline vec_short8 spu_xor(vec_short8 a, signed short b)
2400 return ((vec_short8)(si_xorhi((qword)(a), (unsigned short)(b))));
2403 static __inline vec_uint4 spu_xor(vec_uint4 a, unsigned int b)
2405 return ((vec_uint4)(si_xori((qword)(a), b)));
2408 static __inline vec_int4 spu_xor(vec_int4 a, signed int b)
2410 return ((vec_int4)(si_xori((qword)(a), (unsigned int)(b))));
2413 #endif /* !__SPU__ */
2414 #endif /* __cplusplus */
2415 #endif /* !_SPU2VMX_H_ */