gcc/
[official-gcc.git] / gcc / config / i386 / xmmintrin.h
blob5967d7ed1e61f0511bfac984f6891e3abf1c16c3
1 /* Copyright (C) 2002-2014 Free Software Foundation, Inc.
3 This file is part of GCC.
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3, or (at your option)
8 any later version.
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 Under Section 7 of GPL version 3, you are granted additional
16 permissions described in the GCC Runtime Library Exception, version
17 3.1, as published by the Free Software Foundation.
19 You should have received a copy of the GNU General Public License and
20 a copy of the GCC Runtime Library Exception along with this program;
21 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22 <http://www.gnu.org/licenses/>. */
24 /* Implemented from the specification included in the Intel C++ Compiler
25 User Guide and Reference, version 9.0. */
27 #ifndef _XMMINTRIN_H_INCLUDED
28 #define _XMMINTRIN_H_INCLUDED
30 /* We need type definitions from the MMX header file. */
31 #include <mmintrin.h>
33 /* Get _mm_malloc () and _mm_free (). */
34 #include <mm_malloc.h>
36 #ifndef __SSE__
37 #pragma GCC push_options
38 #pragma GCC target("sse")
39 #define __DISABLE_SSE__
40 #endif /* __SSE__ */
42 /* The Intel API is flexible enough that we must allow aliasing with other
43 vector types, and their scalar components. */
44 typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
46 /* Internal data types for implementing the intrinsics. */
47 typedef float __v4sf __attribute__ ((__vector_size__ (16)));
49 /* Create a selector for use with the SHUFPS instruction. */
50 #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
51 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
53 /* Constants for use with _mm_prefetch. */
54 enum _mm_hint
56 /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit. */
57 _MM_HINT_ET0 = 5,
58 _MM_HINT_ET1 = 6,
59 _MM_HINT_T0 = 3,
60 _MM_HINT_T1 = 2,
61 _MM_HINT_T2 = 1,
62 _MM_HINT_NTA = 0
65 /* Bits in the MXCSR. */
66 #define _MM_EXCEPT_MASK 0x003f
67 #define _MM_EXCEPT_INVALID 0x0001
68 #define _MM_EXCEPT_DENORM 0x0002
69 #define _MM_EXCEPT_DIV_ZERO 0x0004
70 #define _MM_EXCEPT_OVERFLOW 0x0008
71 #define _MM_EXCEPT_UNDERFLOW 0x0010
72 #define _MM_EXCEPT_INEXACT 0x0020
74 #define _MM_MASK_MASK 0x1f80
75 #define _MM_MASK_INVALID 0x0080
76 #define _MM_MASK_DENORM 0x0100
77 #define _MM_MASK_DIV_ZERO 0x0200
78 #define _MM_MASK_OVERFLOW 0x0400
79 #define _MM_MASK_UNDERFLOW 0x0800
80 #define _MM_MASK_INEXACT 0x1000
82 #define _MM_ROUND_MASK 0x6000
83 #define _MM_ROUND_NEAREST 0x0000
84 #define _MM_ROUND_DOWN 0x2000
85 #define _MM_ROUND_UP 0x4000
86 #define _MM_ROUND_TOWARD_ZERO 0x6000
88 #define _MM_FLUSH_ZERO_MASK 0x8000
89 #define _MM_FLUSH_ZERO_ON 0x8000
90 #define _MM_FLUSH_ZERO_OFF 0x0000
92 /* Create a vector of zeros. */
93 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
94 _mm_setzero_ps (void)
96 return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
99 /* Perform the respective operation on the lower SPFP (single-precision
100 floating-point) values of A and B; the upper three SPFP values are
101 passed through from A. */
103 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
104 _mm_add_ss (__m128 __A, __m128 __B)
106 return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
109 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
110 _mm_sub_ss (__m128 __A, __m128 __B)
112 return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
115 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
116 _mm_mul_ss (__m128 __A, __m128 __B)
118 return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
121 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
122 _mm_div_ss (__m128 __A, __m128 __B)
124 return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
127 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
128 _mm_sqrt_ss (__m128 __A)
130 return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
133 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
134 _mm_rcp_ss (__m128 __A)
136 return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
139 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
140 _mm_rsqrt_ss (__m128 __A)
142 return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
145 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
146 _mm_min_ss (__m128 __A, __m128 __B)
148 return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
151 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
152 _mm_max_ss (__m128 __A, __m128 __B)
154 return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
157 /* Perform the respective operation on the four SPFP values in A and B. */
159 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
160 _mm_add_ps (__m128 __A, __m128 __B)
162 return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
165 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
166 _mm_sub_ps (__m128 __A, __m128 __B)
168 return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
171 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
172 _mm_mul_ps (__m128 __A, __m128 __B)
174 return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
177 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
178 _mm_div_ps (__m128 __A, __m128 __B)
180 return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
183 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
184 _mm_sqrt_ps (__m128 __A)
186 return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
189 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
190 _mm_rcp_ps (__m128 __A)
192 return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
195 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
196 _mm_rsqrt_ps (__m128 __A)
198 return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
201 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
202 _mm_min_ps (__m128 __A, __m128 __B)
204 return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
207 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
208 _mm_max_ps (__m128 __A, __m128 __B)
210 return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
213 /* Perform logical bit-wise operations on 128-bit values. */
215 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
216 _mm_and_ps (__m128 __A, __m128 __B)
218 return __builtin_ia32_andps (__A, __B);
221 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
222 _mm_andnot_ps (__m128 __A, __m128 __B)
224 return __builtin_ia32_andnps (__A, __B);
227 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
228 _mm_or_ps (__m128 __A, __m128 __B)
230 return __builtin_ia32_orps (__A, __B);
233 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
234 _mm_xor_ps (__m128 __A, __m128 __B)
236 return __builtin_ia32_xorps (__A, __B);
239 /* Perform a comparison on the lower SPFP values of A and B. If the
240 comparison is true, place a mask of all ones in the result, otherwise a
241 mask of zeros. The upper three SPFP values are passed through from A. */
243 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
244 _mm_cmpeq_ss (__m128 __A, __m128 __B)
246 return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
249 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
250 _mm_cmplt_ss (__m128 __A, __m128 __B)
252 return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
255 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
256 _mm_cmple_ss (__m128 __A, __m128 __B)
258 return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
261 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
262 _mm_cmpgt_ss (__m128 __A, __m128 __B)
264 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
265 (__v4sf)
266 __builtin_ia32_cmpltss ((__v4sf) __B,
267 (__v4sf)
268 __A));
271 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
272 _mm_cmpge_ss (__m128 __A, __m128 __B)
274 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
275 (__v4sf)
276 __builtin_ia32_cmpless ((__v4sf) __B,
277 (__v4sf)
278 __A));
281 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
282 _mm_cmpneq_ss (__m128 __A, __m128 __B)
284 return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
287 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
288 _mm_cmpnlt_ss (__m128 __A, __m128 __B)
290 return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
293 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
294 _mm_cmpnle_ss (__m128 __A, __m128 __B)
296 return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
299 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
300 _mm_cmpngt_ss (__m128 __A, __m128 __B)
302 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
303 (__v4sf)
304 __builtin_ia32_cmpnltss ((__v4sf) __B,
305 (__v4sf)
306 __A));
309 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
310 _mm_cmpnge_ss (__m128 __A, __m128 __B)
312 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
313 (__v4sf)
314 __builtin_ia32_cmpnless ((__v4sf) __B,
315 (__v4sf)
316 __A));
319 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
320 _mm_cmpord_ss (__m128 __A, __m128 __B)
322 return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
325 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
326 _mm_cmpunord_ss (__m128 __A, __m128 __B)
328 return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
331 /* Perform a comparison on the four SPFP values of A and B. For each
332 element, if the comparison is true, place a mask of all ones in the
333 result, otherwise a mask of zeros. */
335 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
336 _mm_cmpeq_ps (__m128 __A, __m128 __B)
338 return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
341 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
342 _mm_cmplt_ps (__m128 __A, __m128 __B)
344 return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
347 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
348 _mm_cmple_ps (__m128 __A, __m128 __B)
350 return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
353 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
354 _mm_cmpgt_ps (__m128 __A, __m128 __B)
356 return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
359 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
360 _mm_cmpge_ps (__m128 __A, __m128 __B)
362 return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
365 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
366 _mm_cmpneq_ps (__m128 __A, __m128 __B)
368 return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
371 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
372 _mm_cmpnlt_ps (__m128 __A, __m128 __B)
374 return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
377 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
378 _mm_cmpnle_ps (__m128 __A, __m128 __B)
380 return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
383 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
384 _mm_cmpngt_ps (__m128 __A, __m128 __B)
386 return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
389 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
390 _mm_cmpnge_ps (__m128 __A, __m128 __B)
392 return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
395 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
396 _mm_cmpord_ps (__m128 __A, __m128 __B)
398 return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
401 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
402 _mm_cmpunord_ps (__m128 __A, __m128 __B)
404 return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
407 /* Compare the lower SPFP values of A and B and return 1 if true
408 and 0 if false. */
410 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
411 _mm_comieq_ss (__m128 __A, __m128 __B)
413 return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
416 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
417 _mm_comilt_ss (__m128 __A, __m128 __B)
419 return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
422 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
423 _mm_comile_ss (__m128 __A, __m128 __B)
425 return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
428 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
429 _mm_comigt_ss (__m128 __A, __m128 __B)
431 return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
434 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
435 _mm_comige_ss (__m128 __A, __m128 __B)
437 return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
440 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
441 _mm_comineq_ss (__m128 __A, __m128 __B)
443 return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
446 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
447 _mm_ucomieq_ss (__m128 __A, __m128 __B)
449 return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
452 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
453 _mm_ucomilt_ss (__m128 __A, __m128 __B)
455 return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
458 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
459 _mm_ucomile_ss (__m128 __A, __m128 __B)
461 return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
464 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
465 _mm_ucomigt_ss (__m128 __A, __m128 __B)
467 return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
470 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
471 _mm_ucomige_ss (__m128 __A, __m128 __B)
473 return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
476 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
477 _mm_ucomineq_ss (__m128 __A, __m128 __B)
479 return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
482 /* Convert the lower SPFP value to a 32-bit integer according to the current
483 rounding mode. */
484 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
485 _mm_cvtss_si32 (__m128 __A)
487 return __builtin_ia32_cvtss2si ((__v4sf) __A);
490 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
491 _mm_cvt_ss2si (__m128 __A)
493 return _mm_cvtss_si32 (__A);
496 #ifdef __x86_64__
497 /* Convert the lower SPFP value to a 32-bit integer according to the
498 current rounding mode. */
500 /* Intel intrinsic. */
501 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
502 _mm_cvtss_si64 (__m128 __A)
504 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
507 /* Microsoft intrinsic. */
508 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
509 _mm_cvtss_si64x (__m128 __A)
511 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
513 #endif
515 /* Convert the two lower SPFP values to 32-bit integers according to the
516 current rounding mode. Return the integers in packed form. */
517 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
518 _mm_cvtps_pi32 (__m128 __A)
520 return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
523 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
524 _mm_cvt_ps2pi (__m128 __A)
526 return _mm_cvtps_pi32 (__A);
529 /* Truncate the lower SPFP value to a 32-bit integer. */
530 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
531 _mm_cvttss_si32 (__m128 __A)
533 return __builtin_ia32_cvttss2si ((__v4sf) __A);
536 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
537 _mm_cvtt_ss2si (__m128 __A)
539 return _mm_cvttss_si32 (__A);
542 #ifdef __x86_64__
543 /* Truncate the lower SPFP value to a 32-bit integer. */
545 /* Intel intrinsic. */
546 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
547 _mm_cvttss_si64 (__m128 __A)
549 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
552 /* Microsoft intrinsic. */
553 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
554 _mm_cvttss_si64x (__m128 __A)
556 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
558 #endif
560 /* Truncate the two lower SPFP values to 32-bit integers. Return the
561 integers in packed form. */
562 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
563 _mm_cvttps_pi32 (__m128 __A)
565 return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
568 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
569 _mm_cvtt_ps2pi (__m128 __A)
571 return _mm_cvttps_pi32 (__A);
574 /* Convert B to a SPFP value and insert it as element zero in A. */
575 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
576 _mm_cvtsi32_ss (__m128 __A, int __B)
578 return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
581 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
582 _mm_cvt_si2ss (__m128 __A, int __B)
584 return _mm_cvtsi32_ss (__A, __B);
587 #ifdef __x86_64__
588 /* Convert B to a SPFP value and insert it as element zero in A. */
590 /* Intel intrinsic. */
591 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
592 _mm_cvtsi64_ss (__m128 __A, long long __B)
594 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
597 /* Microsoft intrinsic. */
598 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
599 _mm_cvtsi64x_ss (__m128 __A, long long __B)
601 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
603 #endif
605 /* Convert the two 32-bit values in B to SPFP form and insert them
606 as the two lower elements in A. */
607 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
608 _mm_cvtpi32_ps (__m128 __A, __m64 __B)
610 return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
613 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
614 _mm_cvt_pi2ps (__m128 __A, __m64 __B)
616 return _mm_cvtpi32_ps (__A, __B);
619 /* Convert the four signed 16-bit values in A to SPFP form. */
620 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
621 _mm_cvtpi16_ps (__m64 __A)
623 __v4hi __sign;
624 __v2si __hisi, __losi;
625 __v4sf __zero, __ra, __rb;
627 /* This comparison against zero gives us a mask that can be used to
628 fill in the missing sign bits in the unpack operations below, so
629 that we get signed values after unpacking. */
630 __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
632 /* Convert the four words to doublewords. */
633 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
634 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
636 /* Convert the doublewords to floating point two at a time. */
637 __zero = (__v4sf) _mm_setzero_ps ();
638 __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
639 __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
641 return (__m128) __builtin_ia32_movlhps (__ra, __rb);
644 /* Convert the four unsigned 16-bit values in A to SPFP form. */
645 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
646 _mm_cvtpu16_ps (__m64 __A)
648 __v2si __hisi, __losi;
649 __v4sf __zero, __ra, __rb;
651 /* Convert the four words to doublewords. */
652 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
653 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
655 /* Convert the doublewords to floating point two at a time. */
656 __zero = (__v4sf) _mm_setzero_ps ();
657 __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
658 __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
660 return (__m128) __builtin_ia32_movlhps (__ra, __rb);
663 /* Convert the low four signed 8-bit values in A to SPFP form. */
664 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
665 _mm_cvtpi8_ps (__m64 __A)
667 __v8qi __sign;
669 /* This comparison against zero gives us a mask that can be used to
670 fill in the missing sign bits in the unpack operations below, so
671 that we get signed values after unpacking. */
672 __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
674 /* Convert the four low bytes to words. */
675 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
677 return _mm_cvtpi16_ps(__A);
680 /* Convert the low four unsigned 8-bit values in A to SPFP form. */
681 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
682 _mm_cvtpu8_ps(__m64 __A)
684 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
685 return _mm_cvtpu16_ps(__A);
688 /* Convert the four signed 32-bit values in A and B to SPFP form. */
689 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
690 _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
692 __v4sf __zero = (__v4sf) _mm_setzero_ps ();
693 __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
694 __v4sf __sfb = __builtin_ia32_cvtpi2ps (__sfa, (__v2si)__B);
695 return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
698 /* Convert the four SPFP values in A to four signed 16-bit integers. */
699 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
700 _mm_cvtps_pi16(__m128 __A)
702 __v4sf __hisf = (__v4sf)__A;
703 __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
704 __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
705 __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
706 return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
709 /* Convert the four SPFP values in A to four signed 8-bit integers. */
710 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
711 _mm_cvtps_pi8(__m128 __A)
713 __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
714 return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
717 /* Selects four specific SPFP values from A and B based on MASK. */
718 #ifdef __OPTIMIZE__
719 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
720 _mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
722 return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
724 #else
725 #define _mm_shuffle_ps(A, B, MASK) \
726 ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A), \
727 (__v4sf)(__m128)(B), (int)(MASK)))
728 #endif
730 /* Selects and interleaves the upper two SPFP values from A and B. */
731 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
732 _mm_unpackhi_ps (__m128 __A, __m128 __B)
734 return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
737 /* Selects and interleaves the lower two SPFP values from A and B. */
738 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
739 _mm_unpacklo_ps (__m128 __A, __m128 __B)
741 return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
744 /* Sets the upper two SPFP values with 64-bits of data loaded from P;
745 the lower two values are passed through from A. */
746 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
747 _mm_loadh_pi (__m128 __A, __m64 const *__P)
749 return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (const __v2sf *)__P);
752 /* Stores the upper two SPFP values of A into P. */
753 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
754 _mm_storeh_pi (__m64 *__P, __m128 __A)
756 __builtin_ia32_storehps ((__v2sf *)__P, (__v4sf)__A);
759 /* Moves the upper two values of B into the lower two values of A. */
760 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
761 _mm_movehl_ps (__m128 __A, __m128 __B)
763 return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
766 /* Moves the lower two values of B into the upper two values of A. */
767 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
768 _mm_movelh_ps (__m128 __A, __m128 __B)
770 return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
773 /* Sets the lower two SPFP values with 64-bits of data loaded from P;
774 the upper two values are passed through from A. */
775 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
776 _mm_loadl_pi (__m128 __A, __m64 const *__P)
778 return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (const __v2sf *)__P);
781 /* Stores the lower two SPFP values of A into P. */
782 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
783 _mm_storel_pi (__m64 *__P, __m128 __A)
785 __builtin_ia32_storelps ((__v2sf *)__P, (__v4sf)__A);
788 /* Creates a 4-bit mask from the most significant bits of the SPFP values. */
789 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
790 _mm_movemask_ps (__m128 __A)
792 return __builtin_ia32_movmskps ((__v4sf)__A);
795 /* Return the contents of the control register. */
796 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
797 _mm_getcsr (void)
799 return __builtin_ia32_stmxcsr ();
802 /* Read exception bits from the control register. */
803 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
804 _MM_GET_EXCEPTION_STATE (void)
806 return _mm_getcsr() & _MM_EXCEPT_MASK;
809 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
810 _MM_GET_EXCEPTION_MASK (void)
812 return _mm_getcsr() & _MM_MASK_MASK;
815 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
816 _MM_GET_ROUNDING_MODE (void)
818 return _mm_getcsr() & _MM_ROUND_MASK;
821 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
822 _MM_GET_FLUSH_ZERO_MODE (void)
824 return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
827 /* Set the control register to I. */
828 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
829 _mm_setcsr (unsigned int __I)
831 __builtin_ia32_ldmxcsr (__I);
834 /* Set exception bits in the control register. */
835 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
836 _MM_SET_EXCEPTION_STATE(unsigned int __mask)
838 _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
841 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
842 _MM_SET_EXCEPTION_MASK (unsigned int __mask)
844 _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
847 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
848 _MM_SET_ROUNDING_MODE (unsigned int __mode)
850 _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
853 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
854 _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
856 _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
859 /* Create a vector with element 0 as F and the rest zero. */
860 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
861 _mm_set_ss (float __F)
863 return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
866 /* Create a vector with all four elements equal to F. */
867 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
868 _mm_set1_ps (float __F)
870 return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
873 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
874 _mm_set_ps1 (float __F)
876 return _mm_set1_ps (__F);
879 /* Create a vector with element 0 as *P and the rest zero. */
880 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
881 _mm_load_ss (float const *__P)
883 return _mm_set_ss (*__P);
886 /* Create a vector with all four elements equal to *P. */
887 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
888 _mm_load1_ps (float const *__P)
890 return _mm_set1_ps (*__P);
893 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
894 _mm_load_ps1 (float const *__P)
896 return _mm_load1_ps (__P);
899 /* Load four SPFP values from P. The address must be 16-byte aligned. */
900 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
901 _mm_load_ps (float const *__P)
903 return (__m128) *(__v4sf *)__P;
906 /* Load four SPFP values from P. The address need not be 16-byte aligned. */
907 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
908 _mm_loadu_ps (float const *__P)
910 return (__m128) __builtin_ia32_loadups (__P);
913 /* Load four SPFP values in reverse order. The address must be aligned. */
914 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
915 _mm_loadr_ps (float const *__P)
917 __v4sf __tmp = *(__v4sf *)__P;
918 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
921 /* Create the vector [Z Y X W]. */
922 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
923 _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
925 return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
928 /* Create the vector [W X Y Z]. */
929 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
930 _mm_setr_ps (float __Z, float __Y, float __X, float __W)
932 return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
935 /* Stores the lower SPFP value. */
936 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
937 _mm_store_ss (float *__P, __m128 __A)
939 *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
942 extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
943 _mm_cvtss_f32 (__m128 __A)
945 return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
948 /* Store four SPFP values. The address must be 16-byte aligned. */
949 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
950 _mm_store_ps (float *__P, __m128 __A)
952 *(__v4sf *)__P = (__v4sf)__A;
955 /* Store four SPFP values. The address need not be 16-byte aligned. */
956 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
957 _mm_storeu_ps (float *__P, __m128 __A)
959 __builtin_ia32_storeups (__P, (__v4sf)__A);
962 /* Store the lower SPFP value across four words. */
963 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
964 _mm_store1_ps (float *__P, __m128 __A)
966 __v4sf __va = (__v4sf)__A;
967 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
968 _mm_storeu_ps (__P, __tmp);
971 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
972 _mm_store_ps1 (float *__P, __m128 __A)
974 _mm_store1_ps (__P, __A);
977 /* Store four SPFP values in reverse order. The address must be aligned. */
978 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
979 _mm_storer_ps (float *__P, __m128 __A)
981 __v4sf __va = (__v4sf)__A;
982 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
983 _mm_store_ps (__P, __tmp);
986 /* Sets the low SPFP value of A from the low value of B. */
987 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
988 _mm_move_ss (__m128 __A, __m128 __B)
990 return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
993 /* Extracts one of the four words of A. The selector N must be immediate. */
994 #ifdef __OPTIMIZE__
995 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
996 _mm_extract_pi16 (__m64 const __A, int const __N)
998 return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
1001 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1002 _m_pextrw (__m64 const __A, int const __N)
1004 return _mm_extract_pi16 (__A, __N);
1006 #else
1007 #define _mm_extract_pi16(A, N) \
1008 ((int) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N)))
1010 #define _m_pextrw(A, N) _mm_extract_pi16(A, N)
1011 #endif
1013 /* Inserts word D into one of four words of A. The selector N must be
1014 immediate. */
1015 #ifdef __OPTIMIZE__
1016 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1017 _mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
1019 return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
1022 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1023 _m_pinsrw (__m64 const __A, int const __D, int const __N)
1025 return _mm_insert_pi16 (__A, __D, __N);
1027 #else
1028 #define _mm_insert_pi16(A, D, N) \
1029 ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A), \
1030 (int)(D), (int)(N)))
1032 #define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N)
1033 #endif
1035 /* Compute the element-wise maximum of signed 16-bit values. */
1036 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1037 _mm_max_pi16 (__m64 __A, __m64 __B)
1039 return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1042 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1043 _m_pmaxsw (__m64 __A, __m64 __B)
1045 return _mm_max_pi16 (__A, __B);
1048 /* Compute the element-wise maximum of unsigned 8-bit values. */
1049 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1050 _mm_max_pu8 (__m64 __A, __m64 __B)
1052 return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1055 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1056 _m_pmaxub (__m64 __A, __m64 __B)
1058 return _mm_max_pu8 (__A, __B);
1061 /* Compute the element-wise minimum of signed 16-bit values. */
1062 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1063 _mm_min_pi16 (__m64 __A, __m64 __B)
1065 return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1068 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1069 _m_pminsw (__m64 __A, __m64 __B)
1071 return _mm_min_pi16 (__A, __B);
1074 /* Compute the element-wise minimum of unsigned 8-bit values. */
1075 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1076 _mm_min_pu8 (__m64 __A, __m64 __B)
1078 return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1081 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1082 _m_pminub (__m64 __A, __m64 __B)
1084 return _mm_min_pu8 (__A, __B);
1087 /* Create an 8-bit mask of the signs of 8-bit values. */
1088 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1089 _mm_movemask_pi8 (__m64 __A)
1091 return __builtin_ia32_pmovmskb ((__v8qi)__A);
1094 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1095 _m_pmovmskb (__m64 __A)
1097 return _mm_movemask_pi8 (__A);
1100 /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1101 in B and produce the high 16 bits of the 32-bit results. */
1102 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1103 _mm_mulhi_pu16 (__m64 __A, __m64 __B)
1105 return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1108 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1109 _m_pmulhuw (__m64 __A, __m64 __B)
1111 return _mm_mulhi_pu16 (__A, __B);
1114 /* Return a combination of the four 16-bit values in A. The selector
1115 must be an immediate. */
1116 #ifdef __OPTIMIZE__
1117 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1118 _mm_shuffle_pi16 (__m64 __A, int const __N)
1120 return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1123 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1124 _m_pshufw (__m64 __A, int const __N)
1126 return _mm_shuffle_pi16 (__A, __N);
1128 #else
1129 #define _mm_shuffle_pi16(A, N) \
1130 ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N)))
1132 #define _m_pshufw(A, N) _mm_shuffle_pi16 (A, N)
1133 #endif
1135 /* Conditionally store byte elements of A into P. The high bit of each
1136 byte in the selector N determines whether the corresponding byte from
1137 A is stored. */
1138 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1139 _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1141 __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1144 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1145 _m_maskmovq (__m64 __A, __m64 __N, char *__P)
1147 _mm_maskmove_si64 (__A, __N, __P);
1150 /* Compute the rounded averages of the unsigned 8-bit values in A and B. */
1151 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1152 _mm_avg_pu8 (__m64 __A, __m64 __B)
1154 return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1157 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1158 _m_pavgb (__m64 __A, __m64 __B)
1160 return _mm_avg_pu8 (__A, __B);
1163 /* Compute the rounded averages of the unsigned 16-bit values in A and B. */
1164 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1165 _mm_avg_pu16 (__m64 __A, __m64 __B)
1167 return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1170 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1171 _m_pavgw (__m64 __A, __m64 __B)
1173 return _mm_avg_pu16 (__A, __B);
1176 /* Compute the sum of the absolute differences of the unsigned 8-bit
1177 values in A and B. Return the value in the lower 16-bit word; the
1178 upper words are cleared. */
1179 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1180 _mm_sad_pu8 (__m64 __A, __m64 __B)
1182 return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1185 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1186 _m_psadbw (__m64 __A, __m64 __B)
1188 return _mm_sad_pu8 (__A, __B);
1191 /* Loads one cache line from address P to a location "closer" to the
1192 processor. The selector I specifies the type of prefetch operation. */
1193 #ifdef __OPTIMIZE__
1194 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1195 _mm_prefetch (const void *__P, enum _mm_hint __I)
1197 __builtin_prefetch (__P, (__I & 0x4) >> 2, __I & 0x3);
1199 #else
1200 #define _mm_prefetch(P, I) \
1201 __builtin_prefetch ((P), ((I & 0x4) >> 2), (I & 0x3))
1202 #endif
1204 /* Stores the data in A to the address P without polluting the caches. */
1205 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1206 _mm_stream_pi (__m64 *__P, __m64 __A)
1208 __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1211 /* Likewise. The address must be 16-byte aligned. */
1212 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1213 _mm_stream_ps (float *__P, __m128 __A)
1215 __builtin_ia32_movntps (__P, (__v4sf)__A);
1218 /* Guarantees that every preceding store is globally visible before
1219 any subsequent store. */
1220 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1221 _mm_sfence (void)
1223 __builtin_ia32_sfence ();
1226 /* The execution of the next instruction is delayed by an implementation
1227 specific amount of time. The instruction does not modify the
1228 architectural state. */
1229 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1230 _mm_pause (void)
1232 __builtin_ia32_pause ();
1235 /* Transpose the 4x4 matrix composed of row[0-3]. */
1236 #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
1237 do { \
1238 __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
1239 __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \
1240 __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \
1241 __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \
1242 __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \
1243 (row0) = __builtin_ia32_movlhps (__t0, __t1); \
1244 (row1) = __builtin_ia32_movhlps (__t1, __t0); \
1245 (row2) = __builtin_ia32_movlhps (__t2, __t3); \
1246 (row3) = __builtin_ia32_movhlps (__t3, __t2); \
1247 } while (0)
1249 /* For backward source compatibility. */
1250 # include <emmintrin.h>
1252 #ifdef __DISABLE_SSE__
1253 #undef __DISABLE_SSE__
1254 #pragma GCC pop_options
1255 #endif /* __DISABLE_SSE__ */
1257 #endif /* _XMMINTRIN_H_INCLUDED */