* defaults.h (FRAME_GROWS_DOWNWARD): Define to 0 if not defined.
[official-gcc.git] / gcc / config / arm / mmintrin.h
blobbed6204c24a49b690f87fd394e4eb6dc8d9b9566
1 /* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 This file is part of GCC.
5 GCC is free software; you can redistribute it and/or modify it
6 under the terms of the GNU General Public License as published
7 by the Free Software Foundation; either version 2, or (at your
8 option) any later version.
10 GCC is distributed in the hope that it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
13 License for more details.
15 You should have received a copy of the GNU General Public License
16 along with GCC; see the file COPYING. If not, write to the Free
17 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
18 02110-1301, USA. */
20 /* As a special exception, if you include this header file into source
21 files compiled by GCC, this header file does not by itself cause
22 the resulting executable to be covered by the GNU General Public
23 License. This exception does not however invalidate any other
24 reasons why the executable file might be covered by the GNU General
25 Public License. */
27 #ifndef _MMINTRIN_H_INCLUDED
28 #define _MMINTRIN_H_INCLUDED
30 /* The data type intended for user use. */
31 typedef unsigned long long __m64, __int64;
33 /* Internal data types for implementing the intrinsics. */
34 typedef int __v2si __attribute__ ((vector_size (8)));
35 typedef short __v4hi __attribute__ ((vector_size (8)));
36 typedef char __v8qi __attribute__ ((vector_size (8)));
38 /* "Convert" __m64 and __int64 into each other. */
39 static __inline __m64
40 _mm_cvtsi64_m64 (__int64 __i)
42 return __i;
45 static __inline __int64
46 _mm_cvtm64_si64 (__m64 __i)
48 return __i;
51 static __inline int
52 _mm_cvtsi64_si32 (__int64 __i)
54 return __i;
57 static __inline __int64
58 _mm_cvtsi32_si64 (int __i)
60 return __i;
63 /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
64 the result, and the four 16-bit values from M2 into the upper four 8-bit
65 values of the result, all with signed saturation. */
66 static __inline __m64
67 _mm_packs_pi16 (__m64 __m1, __m64 __m2)
69 return (__m64) __builtin_arm_wpackhss ((__v4hi)__m1, (__v4hi)__m2);
72 /* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
73 the result, and the two 32-bit values from M2 into the upper two 16-bit
74 values of the result, all with signed saturation. */
75 static __inline __m64
76 _mm_packs_pi32 (__m64 __m1, __m64 __m2)
78 return (__m64) __builtin_arm_wpackwss ((__v2si)__m1, (__v2si)__m2);
81 /* Copy the 64-bit value from M1 into the lower 32-bits of the result, and
82 the 64-bit value from M2 into the upper 32-bits of the result, all with
83 signed saturation for values that do not fit exactly into 32-bits. */
84 static __inline __m64
85 _mm_packs_pi64 (__m64 __m1, __m64 __m2)
87 return (__m64) __builtin_arm_wpackdss ((long long)__m1, (long long)__m2);
90 /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
91 the result, and the four 16-bit values from M2 into the upper four 8-bit
92 values of the result, all with unsigned saturation. */
93 static __inline __m64
94 _mm_packs_pu16 (__m64 __m1, __m64 __m2)
96 return (__m64) __builtin_arm_wpackhus ((__v4hi)__m1, (__v4hi)__m2);
99 /* Pack the two 32-bit values from M1 into the lower two 16-bit values of
100 the result, and the two 32-bit values from M2 into the upper two 16-bit
101 values of the result, all with unsigned saturation. */
102 static __inline __m64
103 _mm_packs_pu32 (__m64 __m1, __m64 __m2)
105 return (__m64) __builtin_arm_wpackwus ((__v2si)__m1, (__v2si)__m2);
108 /* Copy the 64-bit value from M1 into the lower 32-bits of the result, and
109 the 64-bit value from M2 into the upper 32-bits of the result, all with
110 unsigned saturation for values that do not fit exactly into 32-bits. */
111 static __inline __m64
112 _mm_packs_pu64 (__m64 __m1, __m64 __m2)
114 return (__m64) __builtin_arm_wpackdus ((long long)__m1, (long long)__m2);
117 /* Interleave the four 8-bit values from the high half of M1 with the four
118 8-bit values from the high half of M2. */
119 static __inline __m64
120 _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
122 return (__m64) __builtin_arm_wunpckihb ((__v8qi)__m1, (__v8qi)__m2);
125 /* Interleave the two 16-bit values from the high half of M1 with the two
126 16-bit values from the high half of M2. */
127 static __inline __m64
128 _mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
130 return (__m64) __builtin_arm_wunpckihh ((__v4hi)__m1, (__v4hi)__m2);
133 /* Interleave the 32-bit value from the high half of M1 with the 32-bit
134 value from the high half of M2. */
135 static __inline __m64
136 _mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
138 return (__m64) __builtin_arm_wunpckihw ((__v2si)__m1, (__v2si)__m2);
141 /* Interleave the four 8-bit values from the low half of M1 with the four
142 8-bit values from the low half of M2. */
143 static __inline __m64
144 _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
146 return (__m64) __builtin_arm_wunpckilb ((__v8qi)__m1, (__v8qi)__m2);
149 /* Interleave the two 16-bit values from the low half of M1 with the two
150 16-bit values from the low half of M2. */
151 static __inline __m64
152 _mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
154 return (__m64) __builtin_arm_wunpckilh ((__v4hi)__m1, (__v4hi)__m2);
157 /* Interleave the 32-bit value from the low half of M1 with the 32-bit
158 value from the low half of M2. */
159 static __inline __m64
160 _mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
162 return (__m64) __builtin_arm_wunpckilw ((__v2si)__m1, (__v2si)__m2);
165 /* Take the four 8-bit values from the low half of M1, sign extend them,
166 and return the result as a vector of four 16-bit quantities. */
167 static __inline __m64
168 _mm_unpackel_pi8 (__m64 __m1)
170 return (__m64) __builtin_arm_wunpckelsb ((__v8qi)__m1);
173 /* Take the two 16-bit values from the low half of M1, sign extend them,
174 and return the result as a vector of two 32-bit quantities. */
175 static __inline __m64
176 _mm_unpackel_pi16 (__m64 __m1)
178 return (__m64) __builtin_arm_wunpckelsh ((__v4hi)__m1);
181 /* Take the 32-bit value from the low half of M1, and return it sign extended
182 to 64 bits. */
183 static __inline __m64
184 _mm_unpackel_pi32 (__m64 __m1)
186 return (__m64) __builtin_arm_wunpckelsw ((__v2si)__m1);
189 /* Take the four 8-bit values from the high half of M1, sign extend them,
190 and return the result as a vector of four 16-bit quantities. */
191 static __inline __m64
192 _mm_unpackeh_pi8 (__m64 __m1)
194 return (__m64) __builtin_arm_wunpckehsb ((__v8qi)__m1);
197 /* Take the two 16-bit values from the high half of M1, sign extend them,
198 and return the result as a vector of two 32-bit quantities. */
199 static __inline __m64
200 _mm_unpackeh_pi16 (__m64 __m1)
202 return (__m64) __builtin_arm_wunpckehsh ((__v4hi)__m1);
205 /* Take the 32-bit value from the high half of M1, and return it sign extended
206 to 64 bits. */
207 static __inline __m64
208 _mm_unpackeh_pi32 (__m64 __m1)
210 return (__m64) __builtin_arm_wunpckehsw ((__v2si)__m1);
213 /* Take the four 8-bit values from the low half of M1, zero extend them,
214 and return the result as a vector of four 16-bit quantities. */
215 static __inline __m64
216 _mm_unpackel_pu8 (__m64 __m1)
218 return (__m64) __builtin_arm_wunpckelub ((__v8qi)__m1);
221 /* Take the two 16-bit values from the low half of M1, zero extend them,
222 and return the result as a vector of two 32-bit quantities. */
223 static __inline __m64
224 _mm_unpackel_pu16 (__m64 __m1)
226 return (__m64) __builtin_arm_wunpckeluh ((__v4hi)__m1);
229 /* Take the 32-bit value from the low half of M1, and return it zero extended
230 to 64 bits. */
231 static __inline __m64
232 _mm_unpackel_pu32 (__m64 __m1)
234 return (__m64) __builtin_arm_wunpckeluw ((__v2si)__m1);
237 /* Take the four 8-bit values from the high half of M1, zero extend them,
238 and return the result as a vector of four 16-bit quantities. */
239 static __inline __m64
240 _mm_unpackeh_pu8 (__m64 __m1)
242 return (__m64) __builtin_arm_wunpckehub ((__v8qi)__m1);
245 /* Take the two 16-bit values from the high half of M1, zero extend them,
246 and return the result as a vector of two 32-bit quantities. */
247 static __inline __m64
248 _mm_unpackeh_pu16 (__m64 __m1)
250 return (__m64) __builtin_arm_wunpckehuh ((__v4hi)__m1);
253 /* Take the 32-bit value from the high half of M1, and return it zero extended
254 to 64 bits. */
255 static __inline __m64
256 _mm_unpackeh_pu32 (__m64 __m1)
258 return (__m64) __builtin_arm_wunpckehuw ((__v2si)__m1);
261 /* Add the 8-bit values in M1 to the 8-bit values in M2. */
262 static __inline __m64
263 _mm_add_pi8 (__m64 __m1, __m64 __m2)
265 return (__m64) __builtin_arm_waddb ((__v8qi)__m1, (__v8qi)__m2);
268 /* Add the 16-bit values in M1 to the 16-bit values in M2. */
269 static __inline __m64
270 _mm_add_pi16 (__m64 __m1, __m64 __m2)
272 return (__m64) __builtin_arm_waddh ((__v4hi)__m1, (__v4hi)__m2);
275 /* Add the 32-bit values in M1 to the 32-bit values in M2. */
276 static __inline __m64
277 _mm_add_pi32 (__m64 __m1, __m64 __m2)
279 return (__m64) __builtin_arm_waddw ((__v2si)__m1, (__v2si)__m2);
282 /* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
283 saturated arithmetic. */
284 static __inline __m64
285 _mm_adds_pi8 (__m64 __m1, __m64 __m2)
287 return (__m64) __builtin_arm_waddbss ((__v8qi)__m1, (__v8qi)__m2);
290 /* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
291 saturated arithmetic. */
292 static __inline __m64
293 _mm_adds_pi16 (__m64 __m1, __m64 __m2)
295 return (__m64) __builtin_arm_waddhss ((__v4hi)__m1, (__v4hi)__m2);
298 /* Add the 32-bit values in M1 to the 32-bit values in M2 using signed
299 saturated arithmetic. */
300 static __inline __m64
301 _mm_adds_pi32 (__m64 __m1, __m64 __m2)
303 return (__m64) __builtin_arm_waddwss ((__v2si)__m1, (__v2si)__m2);
306 /* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
307 saturated arithmetic. */
308 static __inline __m64
309 _mm_adds_pu8 (__m64 __m1, __m64 __m2)
311 return (__m64) __builtin_arm_waddbus ((__v8qi)__m1, (__v8qi)__m2);
314 /* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
315 saturated arithmetic. */
316 static __inline __m64
317 _mm_adds_pu16 (__m64 __m1, __m64 __m2)
319 return (__m64) __builtin_arm_waddhus ((__v4hi)__m1, (__v4hi)__m2);
322 /* Add the 32-bit values in M1 to the 32-bit values in M2 using unsigned
323 saturated arithmetic. */
324 static __inline __m64
325 _mm_adds_pu32 (__m64 __m1, __m64 __m2)
327 return (__m64) __builtin_arm_waddwus ((__v2si)__m1, (__v2si)__m2);
330 /* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
331 static __inline __m64
332 _mm_sub_pi8 (__m64 __m1, __m64 __m2)
334 return (__m64) __builtin_arm_wsubb ((__v8qi)__m1, (__v8qi)__m2);
337 /* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
338 static __inline __m64
339 _mm_sub_pi16 (__m64 __m1, __m64 __m2)
341 return (__m64) __builtin_arm_wsubh ((__v4hi)__m1, (__v4hi)__m2);
344 /* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
345 static __inline __m64
346 _mm_sub_pi32 (__m64 __m1, __m64 __m2)
348 return (__m64) __builtin_arm_wsubw ((__v2si)__m1, (__v2si)__m2);
351 /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
352 saturating arithmetic. */
353 static __inline __m64
354 _mm_subs_pi8 (__m64 __m1, __m64 __m2)
356 return (__m64) __builtin_arm_wsubbss ((__v8qi)__m1, (__v8qi)__m2);
359 /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
360 signed saturating arithmetic. */
361 static __inline __m64
362 _mm_subs_pi16 (__m64 __m1, __m64 __m2)
364 return (__m64) __builtin_arm_wsubhss ((__v4hi)__m1, (__v4hi)__m2);
367 /* Subtract the 32-bit values in M2 from the 32-bit values in M1 using
368 signed saturating arithmetic. */
369 static __inline __m64
370 _mm_subs_pi32 (__m64 __m1, __m64 __m2)
372 return (__m64) __builtin_arm_wsubwss ((__v2si)__m1, (__v2si)__m2);
375 /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
376 unsigned saturating arithmetic. */
377 static __inline __m64
378 _mm_subs_pu8 (__m64 __m1, __m64 __m2)
380 return (__m64) __builtin_arm_wsubbus ((__v8qi)__m1, (__v8qi)__m2);
383 /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
384 unsigned saturating arithmetic. */
385 static __inline __m64
386 _mm_subs_pu16 (__m64 __m1, __m64 __m2)
388 return (__m64) __builtin_arm_wsubhus ((__v4hi)__m1, (__v4hi)__m2);
391 /* Subtract the 32-bit values in M2 from the 32-bit values in M1 using
392 unsigned saturating arithmetic. */
393 static __inline __m64
394 _mm_subs_pu32 (__m64 __m1, __m64 __m2)
396 return (__m64) __builtin_arm_wsubwus ((__v2si)__m1, (__v2si)__m2);
399 /* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
400 four 32-bit intermediate results, which are then summed by pairs to
401 produce two 32-bit results. */
402 static __inline __m64
403 _mm_madd_pi16 (__m64 __m1, __m64 __m2)
405 return (__m64) __builtin_arm_wmadds ((__v4hi)__m1, (__v4hi)__m2);
408 /* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
409 four 32-bit intermediate results, which are then summed by pairs to
410 produce two 32-bit results. */
411 static __inline __m64
412 _mm_madd_pu16 (__m64 __m1, __m64 __m2)
414 return (__m64) __builtin_arm_wmaddu ((__v4hi)__m1, (__v4hi)__m2);
417 /* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
418 M2 and produce the high 16 bits of the 32-bit results. */
419 static __inline __m64
420 _mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
422 return (__m64) __builtin_arm_wmulsm ((__v4hi)__m1, (__v4hi)__m2);
425 /* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
426 M2 and produce the high 16 bits of the 32-bit results. */
427 static __inline __m64
428 _mm_mulhi_pu16 (__m64 __m1, __m64 __m2)
430 return (__m64) __builtin_arm_wmulum ((__v4hi)__m1, (__v4hi)__m2);
433 /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
434 the low 16 bits of the results. */
435 static __inline __m64
436 _mm_mullo_pi16 (__m64 __m1, __m64 __m2)
438 return (__m64) __builtin_arm_wmulul ((__v4hi)__m1, (__v4hi)__m2);
441 /* Shift four 16-bit values in M left by COUNT. */
442 static __inline __m64
443 _mm_sll_pi16 (__m64 __m, __m64 __count)
445 return (__m64) __builtin_arm_wsllh ((__v4hi)__m, __count);
448 static __inline __m64
449 _mm_slli_pi16 (__m64 __m, int __count)
451 return (__m64) __builtin_arm_wsllhi ((__v4hi)__m, __count);
454 /* Shift two 32-bit values in M left by COUNT. */
455 static __inline __m64
456 _mm_sll_pi32 (__m64 __m, __m64 __count)
458 return (__m64) __builtin_arm_wsllw ((__v2si)__m, __count);
461 static __inline __m64
462 _mm_slli_pi32 (__m64 __m, int __count)
464 return (__m64) __builtin_arm_wsllwi ((__v2si)__m, __count);
467 /* Shift the 64-bit value in M left by COUNT. */
468 static __inline __m64
469 _mm_sll_si64 (__m64 __m, __m64 __count)
471 return (__m64) __builtin_arm_wslld (__m, __count);
474 static __inline __m64
475 _mm_slli_si64 (__m64 __m, int __count)
477 return (__m64) __builtin_arm_wslldi (__m, __count);
480 /* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
481 static __inline __m64
482 _mm_sra_pi16 (__m64 __m, __m64 __count)
484 return (__m64) __builtin_arm_wsrah ((__v4hi)__m, __count);
487 static __inline __m64
488 _mm_srai_pi16 (__m64 __m, int __count)
490 return (__m64) __builtin_arm_wsrahi ((__v4hi)__m, __count);
493 /* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
494 static __inline __m64
495 _mm_sra_pi32 (__m64 __m, __m64 __count)
497 return (__m64) __builtin_arm_wsraw ((__v2si)__m, __count);
500 static __inline __m64
501 _mm_srai_pi32 (__m64 __m, int __count)
503 return (__m64) __builtin_arm_wsrawi ((__v2si)__m, __count);
506 /* Shift the 64-bit value in M right by COUNT; shift in the sign bit. */
507 static __inline __m64
508 _mm_sra_si64 (__m64 __m, __m64 __count)
510 return (__m64) __builtin_arm_wsrad (__m, __count);
513 static __inline __m64
514 _mm_srai_si64 (__m64 __m, int __count)
516 return (__m64) __builtin_arm_wsradi (__m, __count);
519 /* Shift four 16-bit values in M right by COUNT; shift in zeros. */
520 static __inline __m64
521 _mm_srl_pi16 (__m64 __m, __m64 __count)
523 return (__m64) __builtin_arm_wsrlh ((__v4hi)__m, __count);
526 static __inline __m64
527 _mm_srli_pi16 (__m64 __m, int __count)
529 return (__m64) __builtin_arm_wsrlhi ((__v4hi)__m, __count);
532 /* Shift two 32-bit values in M right by COUNT; shift in zeros. */
533 static __inline __m64
534 _mm_srl_pi32 (__m64 __m, __m64 __count)
536 return (__m64) __builtin_arm_wsrlw ((__v2si)__m, __count);
539 static __inline __m64
540 _mm_srli_pi32 (__m64 __m, int __count)
542 return (__m64) __builtin_arm_wsrlwi ((__v2si)__m, __count);
545 /* Shift the 64-bit value in M left by COUNT; shift in zeros. */
546 static __inline __m64
547 _mm_srl_si64 (__m64 __m, __m64 __count)
549 return (__m64) __builtin_arm_wsrld (__m, __count);
552 static __inline __m64
553 _mm_srli_si64 (__m64 __m, int __count)
555 return (__m64) __builtin_arm_wsrldi (__m, __count);
558 /* Rotate four 16-bit values in M right by COUNT. */
559 static __inline __m64
560 _mm_ror_pi16 (__m64 __m, __m64 __count)
562 return (__m64) __builtin_arm_wrorh ((__v4hi)__m, __count);
565 static __inline __m64
566 _mm_rori_pi16 (__m64 __m, int __count)
568 return (__m64) __builtin_arm_wrorhi ((__v4hi)__m, __count);
571 /* Rotate two 32-bit values in M right by COUNT. */
572 static __inline __m64
573 _mm_ror_pi32 (__m64 __m, __m64 __count)
575 return (__m64) __builtin_arm_wrorw ((__v2si)__m, __count);
578 static __inline __m64
579 _mm_rori_pi32 (__m64 __m, int __count)
581 return (__m64) __builtin_arm_wrorwi ((__v2si)__m, __count);
584 /* Rotate two 64-bit values in M right by COUNT. */
585 static __inline __m64
586 _mm_ror_si64 (__m64 __m, __m64 __count)
588 return (__m64) __builtin_arm_wrord (__m, __count);
591 static __inline __m64
592 _mm_rori_si64 (__m64 __m, int __count)
594 return (__m64) __builtin_arm_wrordi (__m, __count);
597 /* Bit-wise AND the 64-bit values in M1 and M2. */
598 static __inline __m64
599 _mm_and_si64 (__m64 __m1, __m64 __m2)
601 return __builtin_arm_wand (__m1, __m2);
604 /* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
605 64-bit value in M2. */
606 static __inline __m64
607 _mm_andnot_si64 (__m64 __m1, __m64 __m2)
609 return __builtin_arm_wandn (__m1, __m2);
612 /* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
613 static __inline __m64
614 _mm_or_si64 (__m64 __m1, __m64 __m2)
616 return __builtin_arm_wor (__m1, __m2);
619 /* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
620 static __inline __m64
621 _mm_xor_si64 (__m64 __m1, __m64 __m2)
623 return __builtin_arm_wxor (__m1, __m2);
626 /* Compare eight 8-bit values. The result of the comparison is 0xFF if the
627 test is true and zero if false. */
628 static __inline __m64
629 _mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
631 return (__m64) __builtin_arm_wcmpeqb ((__v8qi)__m1, (__v8qi)__m2);
634 static __inline __m64
635 _mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
637 return (__m64) __builtin_arm_wcmpgtsb ((__v8qi)__m1, (__v8qi)__m2);
640 static __inline __m64
641 _mm_cmpgt_pu8 (__m64 __m1, __m64 __m2)
643 return (__m64) __builtin_arm_wcmpgtub ((__v8qi)__m1, (__v8qi)__m2);
646 /* Compare four 16-bit values. The result of the comparison is 0xFFFF if
647 the test is true and zero if false. */
648 static __inline __m64
649 _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
651 return (__m64) __builtin_arm_wcmpeqh ((__v4hi)__m1, (__v4hi)__m2);
654 static __inline __m64
655 _mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
657 return (__m64) __builtin_arm_wcmpgtsh ((__v4hi)__m1, (__v4hi)__m2);
660 static __inline __m64
661 _mm_cmpgt_pu16 (__m64 __m1, __m64 __m2)
663 return (__m64) __builtin_arm_wcmpgtuh ((__v4hi)__m1, (__v4hi)__m2);
666 /* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
667 the test is true and zero if false. */
668 static __inline __m64
669 _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
671 return (__m64) __builtin_arm_wcmpeqw ((__v2si)__m1, (__v2si)__m2);
674 static __inline __m64
675 _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
677 return (__m64) __builtin_arm_wcmpgtsw ((__v2si)__m1, (__v2si)__m2);
680 static __inline __m64
681 _mm_cmpgt_pu32 (__m64 __m1, __m64 __m2)
683 return (__m64) __builtin_arm_wcmpgtuw ((__v2si)__m1, (__v2si)__m2);
686 /* Element-wise multiplication of unsigned 16-bit values __B and __C, followed
687 by accumulate across all elements and __A. */
688 static __inline __m64
689 _mm_mac_pu16 (__m64 __A, __m64 __B, __m64 __C)
691 return __builtin_arm_wmacu (__A, (__v4hi)__B, (__v4hi)__C);
694 /* Element-wise multiplication of signed 16-bit values __B and __C, followed
695 by accumulate across all elements and __A. */
696 static __inline __m64
697 _mm_mac_pi16 (__m64 __A, __m64 __B, __m64 __C)
699 return __builtin_arm_wmacs (__A, (__v4hi)__B, (__v4hi)__C);
702 /* Element-wise multiplication of unsigned 16-bit values __B and __C, followed
703 by accumulate across all elements. */
704 static __inline __m64
705 _mm_macz_pu16 (__m64 __A, __m64 __B)
707 return __builtin_arm_wmacuz ((__v4hi)__A, (__v4hi)__B);
710 /* Element-wise multiplication of signed 16-bit values __B and __C, followed
711 by accumulate across all elements. */
712 static __inline __m64
713 _mm_macz_pi16 (__m64 __A, __m64 __B)
715 return __builtin_arm_wmacsz ((__v4hi)__A, (__v4hi)__B);
718 /* Accumulate across all unsigned 8-bit values in __A. */
719 static __inline __m64
720 _mm_acc_pu8 (__m64 __A)
722 return __builtin_arm_waccb ((__v8qi)__A);
725 /* Accumulate across all unsigned 16-bit values in __A. */
726 static __inline __m64
727 _mm_acc_pu16 (__m64 __A)
729 return __builtin_arm_wacch ((__v4hi)__A);
732 /* Accumulate across all unsigned 32-bit values in __A. */
733 static __inline __m64
734 _mm_acc_pu32 (__m64 __A)
736 return __builtin_arm_waccw ((__v2si)__A);
739 static __inline __m64
740 _mm_mia_si64 (__m64 __A, int __B, int __C)
742 return __builtin_arm_tmia (__A, __B, __C);
745 static __inline __m64
746 _mm_miaph_si64 (__m64 __A, int __B, int __C)
748 return __builtin_arm_tmiaph (__A, __B, __C);
751 static __inline __m64
752 _mm_miabb_si64 (__m64 __A, int __B, int __C)
754 return __builtin_arm_tmiabb (__A, __B, __C);
757 static __inline __m64
758 _mm_miabt_si64 (__m64 __A, int __B, int __C)
760 return __builtin_arm_tmiabt (__A, __B, __C);
763 static __inline __m64
764 _mm_miatb_si64 (__m64 __A, int __B, int __C)
766 return __builtin_arm_tmiatb (__A, __B, __C);
769 static __inline __m64
770 _mm_miatt_si64 (__m64 __A, int __B, int __C)
772 return __builtin_arm_tmiatt (__A, __B, __C);
775 /* Extract one of the elements of A and sign extend. The selector N must
776 be immediate. */
777 #define _mm_extract_pi8(A, N) __builtin_arm_textrmsb ((__v8qi)(A), (N))
778 #define _mm_extract_pi16(A, N) __builtin_arm_textrmsh ((__v4hi)(A), (N))
779 #define _mm_extract_pi32(A, N) __builtin_arm_textrmsw ((__v2si)(A), (N))
781 /* Extract one of the elements of A and zero extend. The selector N must
782 be immediate. */
783 #define _mm_extract_pu8(A, N) __builtin_arm_textrmub ((__v8qi)(A), (N))
784 #define _mm_extract_pu16(A, N) __builtin_arm_textrmuh ((__v4hi)(A), (N))
785 #define _mm_extract_pu32(A, N) __builtin_arm_textrmuw ((__v2si)(A), (N))
787 /* Inserts word D into one of the elements of A. The selector N must be
788 immediate. */
789 #define _mm_insert_pi8(A, D, N) \
790 ((__m64) __builtin_arm_tinsrb ((__v8qi)(A), (D), (N)))
791 #define _mm_insert_pi16(A, D, N) \
792 ((__m64) __builtin_arm_tinsrh ((__v4hi)(A), (D), (N)))
793 #define _mm_insert_pi32(A, D, N) \
794 ((__m64) __builtin_arm_tinsrw ((__v2si)(A), (D), (N)))
796 /* Compute the element-wise maximum of signed 8-bit values. */
797 static __inline __m64
798 _mm_max_pi8 (__m64 __A, __m64 __B)
800 return (__m64) __builtin_arm_wmaxsb ((__v8qi)__A, (__v8qi)__B);
803 /* Compute the element-wise maximum of signed 16-bit values. */
804 static __inline __m64
805 _mm_max_pi16 (__m64 __A, __m64 __B)
807 return (__m64) __builtin_arm_wmaxsh ((__v4hi)__A, (__v4hi)__B);
810 /* Compute the element-wise maximum of signed 32-bit values. */
811 static __inline __m64
812 _mm_max_pi32 (__m64 __A, __m64 __B)
814 return (__m64) __builtin_arm_wmaxsw ((__v2si)__A, (__v2si)__B);
817 /* Compute the element-wise maximum of unsigned 8-bit values. */
818 static __inline __m64
819 _mm_max_pu8 (__m64 __A, __m64 __B)
821 return (__m64) __builtin_arm_wmaxub ((__v8qi)__A, (__v8qi)__B);
824 /* Compute the element-wise maximum of unsigned 16-bit values. */
825 static __inline __m64
826 _mm_max_pu16 (__m64 __A, __m64 __B)
828 return (__m64) __builtin_arm_wmaxuh ((__v4hi)__A, (__v4hi)__B);
831 /* Compute the element-wise maximum of unsigned 32-bit values. */
832 static __inline __m64
833 _mm_max_pu32 (__m64 __A, __m64 __B)
835 return (__m64) __builtin_arm_wmaxuw ((__v2si)__A, (__v2si)__B);
838 /* Compute the element-wise minimum of signed 16-bit values. */
839 static __inline __m64
840 _mm_min_pi8 (__m64 __A, __m64 __B)
842 return (__m64) __builtin_arm_wminsb ((__v8qi)__A, (__v8qi)__B);
845 /* Compute the element-wise minimum of signed 16-bit values. */
846 static __inline __m64
847 _mm_min_pi16 (__m64 __A, __m64 __B)
849 return (__m64) __builtin_arm_wminsh ((__v4hi)__A, (__v4hi)__B);
852 /* Compute the element-wise minimum of signed 32-bit values. */
853 static __inline __m64
854 _mm_min_pi32 (__m64 __A, __m64 __B)
856 return (__m64) __builtin_arm_wminsw ((__v2si)__A, (__v2si)__B);
859 /* Compute the element-wise minimum of unsigned 16-bit values. */
860 static __inline __m64
861 _mm_min_pu8 (__m64 __A, __m64 __B)
863 return (__m64) __builtin_arm_wminub ((__v8qi)__A, (__v8qi)__B);
866 /* Compute the element-wise minimum of unsigned 16-bit values. */
867 static __inline __m64
868 _mm_min_pu16 (__m64 __A, __m64 __B)
870 return (__m64) __builtin_arm_wminuh ((__v4hi)__A, (__v4hi)__B);
873 /* Compute the element-wise minimum of unsigned 32-bit values. */
874 static __inline __m64
875 _mm_min_pu32 (__m64 __A, __m64 __B)
877 return (__m64) __builtin_arm_wminuw ((__v2si)__A, (__v2si)__B);
880 /* Create an 8-bit mask of the signs of 8-bit values. */
881 static __inline int
882 _mm_movemask_pi8 (__m64 __A)
884 return __builtin_arm_tmovmskb ((__v8qi)__A);
887 /* Create an 8-bit mask of the signs of 16-bit values. */
888 static __inline int
889 _mm_movemask_pi16 (__m64 __A)
891 return __builtin_arm_tmovmskh ((__v4hi)__A);
894 /* Create an 8-bit mask of the signs of 32-bit values. */
895 static __inline int
896 _mm_movemask_pi32 (__m64 __A)
898 return __builtin_arm_tmovmskw ((__v2si)__A);
901 /* Return a combination of the four 16-bit values in A. The selector
902 must be an immediate. */
903 #define _mm_shuffle_pi16(A, N) \
904 ((__m64) __builtin_arm_wshufh ((__v4hi)(A), (N)))
907 /* Compute the rounded averages of the unsigned 8-bit values in A and B. */
908 static __inline __m64
909 _mm_avg_pu8 (__m64 __A, __m64 __B)
911 return (__m64) __builtin_arm_wavg2br ((__v8qi)__A, (__v8qi)__B);
914 /* Compute the rounded averages of the unsigned 16-bit values in A and B. */
915 static __inline __m64
916 _mm_avg_pu16 (__m64 __A, __m64 __B)
918 return (__m64) __builtin_arm_wavg2hr ((__v4hi)__A, (__v4hi)__B);
921 /* Compute the averages of the unsigned 8-bit values in A and B. */
922 static __inline __m64
923 _mm_avg2_pu8 (__m64 __A, __m64 __B)
925 return (__m64) __builtin_arm_wavg2b ((__v8qi)__A, (__v8qi)__B);
928 /* Compute the averages of the unsigned 16-bit values in A and B. */
929 static __inline __m64
930 _mm_avg2_pu16 (__m64 __A, __m64 __B)
932 return (__m64) __builtin_arm_wavg2h ((__v4hi)__A, (__v4hi)__B);
935 /* Compute the sum of the absolute differences of the unsigned 8-bit
936 values in A and B. Return the value in the lower 16-bit word; the
937 upper words are cleared. */
938 static __inline __m64
939 _mm_sad_pu8 (__m64 __A, __m64 __B)
941 return (__m64) __builtin_arm_wsadb ((__v8qi)__A, (__v8qi)__B);
944 /* Compute the sum of the absolute differences of the unsigned 16-bit
945 values in A and B. Return the value in the lower 32-bit word; the
946 upper words are cleared. */
947 static __inline __m64
948 _mm_sad_pu16 (__m64 __A, __m64 __B)
950 return (__m64) __builtin_arm_wsadh ((__v4hi)__A, (__v4hi)__B);
953 /* Compute the sum of the absolute differences of the unsigned 8-bit
954 values in A and B. Return the value in the lower 16-bit word; the
955 upper words are cleared. */
956 static __inline __m64
957 _mm_sadz_pu8 (__m64 __A, __m64 __B)
959 return (__m64) __builtin_arm_wsadbz ((__v8qi)__A, (__v8qi)__B);
962 /* Compute the sum of the absolute differences of the unsigned 16-bit
963 values in A and B. Return the value in the lower 32-bit word; the
964 upper words are cleared. */
965 static __inline __m64
966 _mm_sadz_pu16 (__m64 __A, __m64 __B)
968 return (__m64) __builtin_arm_wsadhz ((__v4hi)__A, (__v4hi)__B);
971 static __inline __m64
972 _mm_align_si64 (__m64 __A, __m64 __B, int __C)
974 return (__m64) __builtin_arm_walign ((__v8qi)__A, (__v8qi)__B, __C);
977 /* Creates a 64-bit zero. */
978 static __inline __m64
979 _mm_setzero_si64 (void)
981 return __builtin_arm_wzero ();
984 /* Set and Get arbitrary iWMMXt Control registers.
985 Note only registers 0-3 and 8-11 are currently defined,
986 the rest are reserved. */
988 static __inline void
989 _mm_setwcx (const int __value, const int __regno)
991 switch (__regno)
993 case 0: __builtin_arm_setwcx (__value, 0); break;
994 case 1: __builtin_arm_setwcx (__value, 1); break;
995 case 2: __builtin_arm_setwcx (__value, 2); break;
996 case 3: __builtin_arm_setwcx (__value, 3); break;
997 case 8: __builtin_arm_setwcx (__value, 8); break;
998 case 9: __builtin_arm_setwcx (__value, 9); break;
999 case 10: __builtin_arm_setwcx (__value, 10); break;
1000 case 11: __builtin_arm_setwcx (__value, 11); break;
1001 default: break;
1005 static __inline int
1006 _mm_getwcx (const int __regno)
1008 switch (__regno)
1010 case 0: return __builtin_arm_getwcx (0);
1011 case 1: return __builtin_arm_getwcx (1);
1012 case 2: return __builtin_arm_getwcx (2);
1013 case 3: return __builtin_arm_getwcx (3);
1014 case 8: return __builtin_arm_getwcx (8);
1015 case 9: return __builtin_arm_getwcx (9);
1016 case 10: return __builtin_arm_getwcx (10);
1017 case 11: return __builtin_arm_getwcx (11);
1018 default: return 0;
1022 /* Creates a vector of two 32-bit values; I0 is least significant. */
1023 static __inline __m64
1024 _mm_set_pi32 (int __i1, int __i0)
1026 union {
1027 __m64 __q;
1028 struct {
1029 unsigned int __i0;
1030 unsigned int __i1;
1031 } __s;
1032 } __u;
1034 __u.__s.__i0 = __i0;
1035 __u.__s.__i1 = __i1;
1037 return __u.__q;
1040 /* Creates a vector of four 16-bit values; W0 is least significant. */
1041 static __inline __m64
1042 _mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
1044 unsigned int __i1 = (unsigned short)__w3 << 16 | (unsigned short)__w2;
1045 unsigned int __i0 = (unsigned short)__w1 << 16 | (unsigned short)__w0;
1046 return _mm_set_pi32 (__i1, __i0);
1050 /* Creates a vector of eight 8-bit values; B0 is least significant. */
1051 static __inline __m64
1052 _mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
1053 char __b3, char __b2, char __b1, char __b0)
1055 unsigned int __i1, __i0;
1057 __i1 = (unsigned char)__b7;
1058 __i1 = __i1 << 8 | (unsigned char)__b6;
1059 __i1 = __i1 << 8 | (unsigned char)__b5;
1060 __i1 = __i1 << 8 | (unsigned char)__b4;
1062 __i0 = (unsigned char)__b3;
1063 __i0 = __i0 << 8 | (unsigned char)__b2;
1064 __i0 = __i0 << 8 | (unsigned char)__b1;
1065 __i0 = __i0 << 8 | (unsigned char)__b0;
1067 return _mm_set_pi32 (__i1, __i0);
1070 /* Similar, but with the arguments in reverse order. */
1071 static __inline __m64
1072 _mm_setr_pi32 (int __i0, int __i1)
1074 return _mm_set_pi32 (__i1, __i0);
1077 static __inline __m64
1078 _mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
1080 return _mm_set_pi16 (__w3, __w2, __w1, __w0);
1083 static __inline __m64
1084 _mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
1085 char __b4, char __b5, char __b6, char __b7)
1087 return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
1090 /* Creates a vector of two 32-bit values, both elements containing I. */
1091 static __inline __m64
1092 _mm_set1_pi32 (int __i)
1094 return _mm_set_pi32 (__i, __i);
1097 /* Creates a vector of four 16-bit values, all elements containing W. */
1098 static __inline __m64
1099 _mm_set1_pi16 (short __w)
1101 unsigned int __i = (unsigned short)__w << 16 | (unsigned short)__w;
1102 return _mm_set1_pi32 (__i);
1105 /* Creates a vector of four 16-bit values, all elements containing B. */
1106 static __inline __m64
1107 _mm_set1_pi8 (char __b)
1109 unsigned int __w = (unsigned char)__b << 8 | (unsigned char)__b;
1110 unsigned int __i = __w << 16 | __w;
1111 return _mm_set1_pi32 (__i);
1114 /* Convert an integer to a __m64 object. */
1115 static __inline __m64
1116 _m_from_int (int __a)
1118 return (__m64)__a;
1121 #define _m_packsswb _mm_packs_pi16
1122 #define _m_packssdw _mm_packs_pi32
1123 #define _m_packuswb _mm_packs_pu16
1124 #define _m_packusdw _mm_packs_pu32
1125 #define _m_packssqd _mm_packs_pi64
1126 #define _m_packusqd _mm_packs_pu64
1127 #define _mm_packs_si64 _mm_packs_pi64
1128 #define _mm_packs_su64 _mm_packs_pu64
1129 #define _m_punpckhbw _mm_unpackhi_pi8
1130 #define _m_punpckhwd _mm_unpackhi_pi16
1131 #define _m_punpckhdq _mm_unpackhi_pi32
1132 #define _m_punpcklbw _mm_unpacklo_pi8
1133 #define _m_punpcklwd _mm_unpacklo_pi16
1134 #define _m_punpckldq _mm_unpacklo_pi32
1135 #define _m_punpckehsbw _mm_unpackeh_pi8
1136 #define _m_punpckehswd _mm_unpackeh_pi16
1137 #define _m_punpckehsdq _mm_unpackeh_pi32
1138 #define _m_punpckehubw _mm_unpackeh_pu8
1139 #define _m_punpckehuwd _mm_unpackeh_pu16
1140 #define _m_punpckehudq _mm_unpackeh_pu32
1141 #define _m_punpckelsbw _mm_unpackel_pi8
1142 #define _m_punpckelswd _mm_unpackel_pi16
1143 #define _m_punpckelsdq _mm_unpackel_pi32
1144 #define _m_punpckelubw _mm_unpackel_pu8
1145 #define _m_punpckeluwd _mm_unpackel_pu16
1146 #define _m_punpckeludq _mm_unpackel_pu32
1147 #define _m_paddb _mm_add_pi8
1148 #define _m_paddw _mm_add_pi16
1149 #define _m_paddd _mm_add_pi32
1150 #define _m_paddsb _mm_adds_pi8
1151 #define _m_paddsw _mm_adds_pi16
1152 #define _m_paddsd _mm_adds_pi32
1153 #define _m_paddusb _mm_adds_pu8
1154 #define _m_paddusw _mm_adds_pu16
1155 #define _m_paddusd _mm_adds_pu32
1156 #define _m_psubb _mm_sub_pi8
1157 #define _m_psubw _mm_sub_pi16
1158 #define _m_psubd _mm_sub_pi32
1159 #define _m_psubsb _mm_subs_pi8
1160 #define _m_psubsw _mm_subs_pi16
1161 #define _m_psubuw _mm_subs_pi32
1162 #define _m_psubusb _mm_subs_pu8
1163 #define _m_psubusw _mm_subs_pu16
1164 #define _m_psubusd _mm_subs_pu32
1165 #define _m_pmaddwd _mm_madd_pi16
1166 #define _m_pmadduwd _mm_madd_pu16
1167 #define _m_pmulhw _mm_mulhi_pi16
1168 #define _m_pmulhuw _mm_mulhi_pu16
1169 #define _m_pmullw _mm_mullo_pi16
1170 #define _m_pmacsw _mm_mac_pi16
1171 #define _m_pmacuw _mm_mac_pu16
1172 #define _m_pmacszw _mm_macz_pi16
1173 #define _m_pmacuzw _mm_macz_pu16
1174 #define _m_paccb _mm_acc_pu8
1175 #define _m_paccw _mm_acc_pu16
1176 #define _m_paccd _mm_acc_pu32
1177 #define _m_pmia _mm_mia_si64
1178 #define _m_pmiaph _mm_miaph_si64
1179 #define _m_pmiabb _mm_miabb_si64
1180 #define _m_pmiabt _mm_miabt_si64
1181 #define _m_pmiatb _mm_miatb_si64
1182 #define _m_pmiatt _mm_miatt_si64
1183 #define _m_psllw _mm_sll_pi16
1184 #define _m_psllwi _mm_slli_pi16
1185 #define _m_pslld _mm_sll_pi32
1186 #define _m_pslldi _mm_slli_pi32
1187 #define _m_psllq _mm_sll_si64
1188 #define _m_psllqi _mm_slli_si64
1189 #define _m_psraw _mm_sra_pi16
1190 #define _m_psrawi _mm_srai_pi16
1191 #define _m_psrad _mm_sra_pi32
1192 #define _m_psradi _mm_srai_pi32
1193 #define _m_psraq _mm_sra_si64
1194 #define _m_psraqi _mm_srai_si64
1195 #define _m_psrlw _mm_srl_pi16
1196 #define _m_psrlwi _mm_srli_pi16
1197 #define _m_psrld _mm_srl_pi32
1198 #define _m_psrldi _mm_srli_pi32
1199 #define _m_psrlq _mm_srl_si64
1200 #define _m_psrlqi _mm_srli_si64
1201 #define _m_prorw _mm_ror_pi16
1202 #define _m_prorwi _mm_rori_pi16
1203 #define _m_prord _mm_ror_pi32
1204 #define _m_prordi _mm_rori_pi32
1205 #define _m_prorq _mm_ror_si64
1206 #define _m_prorqi _mm_rori_si64
1207 #define _m_pand _mm_and_si64
1208 #define _m_pandn _mm_andnot_si64
1209 #define _m_por _mm_or_si64
1210 #define _m_pxor _mm_xor_si64
1211 #define _m_pcmpeqb _mm_cmpeq_pi8
1212 #define _m_pcmpeqw _mm_cmpeq_pi16
1213 #define _m_pcmpeqd _mm_cmpeq_pi32
1214 #define _m_pcmpgtb _mm_cmpgt_pi8
1215 #define _m_pcmpgtub _mm_cmpgt_pu8
1216 #define _m_pcmpgtw _mm_cmpgt_pi16
1217 #define _m_pcmpgtuw _mm_cmpgt_pu16
1218 #define _m_pcmpgtd _mm_cmpgt_pi32
1219 #define _m_pcmpgtud _mm_cmpgt_pu32
1220 #define _m_pextrb _mm_extract_pi8
1221 #define _m_pextrw _mm_extract_pi16
1222 #define _m_pextrd _mm_extract_pi32
1223 #define _m_pextrub _mm_extract_pu8
1224 #define _m_pextruw _mm_extract_pu16
1225 #define _m_pextrud _mm_extract_pu32
1226 #define _m_pinsrb _mm_insert_pi8
1227 #define _m_pinsrw _mm_insert_pi16
1228 #define _m_pinsrd _mm_insert_pi32
1229 #define _m_pmaxsb _mm_max_pi8
1230 #define _m_pmaxsw _mm_max_pi16
1231 #define _m_pmaxsd _mm_max_pi32
1232 #define _m_pmaxub _mm_max_pu8
1233 #define _m_pmaxuw _mm_max_pu16
1234 #define _m_pmaxud _mm_max_pu32
1235 #define _m_pminsb _mm_min_pi8
1236 #define _m_pminsw _mm_min_pi16
1237 #define _m_pminsd _mm_min_pi32
1238 #define _m_pminub _mm_min_pu8
1239 #define _m_pminuw _mm_min_pu16
1240 #define _m_pminud _mm_min_pu32
1241 #define _m_pmovmskb _mm_movemask_pi8
1242 #define _m_pmovmskw _mm_movemask_pi16
1243 #define _m_pmovmskd _mm_movemask_pi32
1244 #define _m_pshufw _mm_shuffle_pi16
1245 #define _m_pavgb _mm_avg_pu8
1246 #define _m_pavgw _mm_avg_pu16
1247 #define _m_pavg2b _mm_avg2_pu8
1248 #define _m_pavg2w _mm_avg2_pu16
1249 #define _m_psadbw _mm_sad_pu8
1250 #define _m_psadwd _mm_sad_pu16
1251 #define _m_psadzbw _mm_sadz_pu8
1252 #define _m_psadzwd _mm_sadz_pu16
1253 #define _m_paligniq _mm_align_si64
1254 #define _m_cvt_si2pi _mm_cvtsi64_m64
1255 #define _m_cvt_pi2si _mm_cvtm64_si64
1257 #endif /* _MMINTRIN_H_INCLUDED */