tree-optimization/114659 - VN and FP to int punning
[official-gcc.git] / gcc / config / i386 / xmmintrin.h
blob7f10f96d72ced7f71a83a2c7115000a99292795e
1 /* Copyright (C) 2002-2024 Free Software Foundation, Inc.
3 This file is part of GCC.
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3, or (at your option)
8 any later version.
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 Under Section 7 of GPL version 3, you are granted additional
16 permissions described in the GCC Runtime Library Exception, version
17 3.1, as published by the Free Software Foundation.
19 You should have received a copy of the GNU General Public License and
20 a copy of the GCC Runtime Library Exception along with this program;
21 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22 <http://www.gnu.org/licenses/>. */
24 /* Implemented from the specification included in the Intel C++ Compiler
25 User Guide and Reference, version 9.0. */
27 #ifndef _XMMINTRIN_H_INCLUDED
28 #define _XMMINTRIN_H_INCLUDED
30 /* We need type definitions from the MMX header file. */
31 #include <mmintrin.h>
33 /* Get _mm_malloc () and _mm_free (). */
34 #include <mm_malloc.h>
36 /* Constants for use with _mm_prefetch. */
37 enum _mm_hint
39 _MM_HINT_IT0 = 19,
40 _MM_HINT_IT1 = 18,
41 /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit. */
42 _MM_HINT_ET0 = 7,
43 _MM_HINT_T0 = 3,
44 _MM_HINT_T1 = 2,
45 _MM_HINT_T2 = 1,
46 _MM_HINT_NTA = 0
49 /* Loads one cache line from address P to a location "closer" to the
50 processor. The selector I specifies the type of prefetch operation. */
51 #ifdef __OPTIMIZE__
52 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
53 _mm_prefetch (const void *__P, enum _mm_hint __I)
55 __builtin_ia32_prefetch (__P, (__I & 0x4) >> 2,
56 __I & 0x3, (__I & 0x10) >> 4);
58 #else
59 #define _mm_prefetch(P, I) \
60 __builtin_ia32_prefetch ((P), ((I) & 0x4) >> 2, ((I) & 0x3), ((I) & 0x10) >> 4)
61 #endif
63 #ifndef __SSE__
64 #pragma GCC push_options
65 #pragma GCC target("sse")
66 #define __DISABLE_SSE__
67 #endif /* __SSE__ */
69 /* The Intel API is flexible enough that we must allow aliasing with other
70 vector types, and their scalar components. */
71 typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
73 /* Unaligned version of the same type. */
74 typedef float __m128_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
75 typedef float __x86_float_u __attribute__ ((__may_alias__, __aligned__ (1)));
77 /* Internal data types for implementing the intrinsics. */
78 typedef float __v4sf __attribute__ ((__vector_size__ (16)));
80 /* Create a selector for use with the SHUFPS instruction. */
81 #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
82 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
84 /* Bits in the MXCSR. */
85 #define _MM_EXCEPT_MASK 0x003f
86 #define _MM_EXCEPT_INVALID 0x0001
87 #define _MM_EXCEPT_DENORM 0x0002
88 #define _MM_EXCEPT_DIV_ZERO 0x0004
89 #define _MM_EXCEPT_OVERFLOW 0x0008
90 #define _MM_EXCEPT_UNDERFLOW 0x0010
91 #define _MM_EXCEPT_INEXACT 0x0020
93 #define _MM_MASK_MASK 0x1f80
94 #define _MM_MASK_INVALID 0x0080
95 #define _MM_MASK_DENORM 0x0100
96 #define _MM_MASK_DIV_ZERO 0x0200
97 #define _MM_MASK_OVERFLOW 0x0400
98 #define _MM_MASK_UNDERFLOW 0x0800
99 #define _MM_MASK_INEXACT 0x1000
101 #define _MM_ROUND_MASK 0x6000
102 #define _MM_ROUND_NEAREST 0x0000
103 #define _MM_ROUND_DOWN 0x2000
104 #define _MM_ROUND_UP 0x4000
105 #define _MM_ROUND_TOWARD_ZERO 0x6000
107 #define _MM_FLUSH_ZERO_MASK 0x8000
108 #define _MM_FLUSH_ZERO_ON 0x8000
109 #define _MM_FLUSH_ZERO_OFF 0x0000
111 /* Compare predicates for scalar and packed compare intrinsics. */
113 /* Equal (ordered, non-signaling) */
114 #define _CMP_EQ_OQ 0x00
115 /* Less-than (ordered, signaling) */
116 #define _CMP_LT_OS 0x01
117 /* Less-than-or-equal (ordered, signaling) */
118 #define _CMP_LE_OS 0x02
119 /* Unordered (non-signaling) */
120 #define _CMP_UNORD_Q 0x03
121 /* Not-equal (unordered, non-signaling) */
122 #define _CMP_NEQ_UQ 0x04
123 /* Not-less-than (unordered, signaling) */
124 #define _CMP_NLT_US 0x05
125 /* Not-less-than-or-equal (unordered, signaling) */
126 #define _CMP_NLE_US 0x06
127 /* Ordered (nonsignaling) */
128 #define _CMP_ORD_Q 0x07
130 /* Create an undefined vector. */
131 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
132 _mm_undefined_ps (void)
134 #pragma GCC diagnostic push
135 #pragma GCC diagnostic ignored "-Winit-self"
136 __m128 __Y = __Y;
137 #pragma GCC diagnostic pop
138 return __Y;
141 /* Create a vector of zeros. */
142 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
143 _mm_setzero_ps (void)
145 return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
148 /* Perform the respective operation on the lower SPFP (single-precision
149 floating-point) values of A and B; the upper three SPFP values are
150 passed through from A. */
152 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
153 _mm_add_ss (__m128 __A, __m128 __B)
155 return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
158 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
159 _mm_sub_ss (__m128 __A, __m128 __B)
161 return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
164 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
165 _mm_mul_ss (__m128 __A, __m128 __B)
167 return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
170 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
171 _mm_div_ss (__m128 __A, __m128 __B)
173 return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
176 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
177 _mm_sqrt_ss (__m128 __A)
179 return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
182 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
183 _mm_rcp_ss (__m128 __A)
185 return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
188 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
189 _mm_rsqrt_ss (__m128 __A)
191 return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
194 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
195 _mm_min_ss (__m128 __A, __m128 __B)
197 return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
200 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
201 _mm_max_ss (__m128 __A, __m128 __B)
203 return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
206 /* Perform the respective operation on the four SPFP values in A and B. */
208 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
209 _mm_add_ps (__m128 __A, __m128 __B)
211 return (__m128) ((__v4sf)__A + (__v4sf)__B);
214 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
215 _mm_sub_ps (__m128 __A, __m128 __B)
217 return (__m128) ((__v4sf)__A - (__v4sf)__B);
220 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
221 _mm_mul_ps (__m128 __A, __m128 __B)
223 return (__m128) ((__v4sf)__A * (__v4sf)__B);
226 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
227 _mm_div_ps (__m128 __A, __m128 __B)
229 return (__m128) ((__v4sf)__A / (__v4sf)__B);
232 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
233 _mm_sqrt_ps (__m128 __A)
235 return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
238 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
239 _mm_rcp_ps (__m128 __A)
241 return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
244 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
245 _mm_rsqrt_ps (__m128 __A)
247 return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
250 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
251 _mm_min_ps (__m128 __A, __m128 __B)
253 return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
256 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
257 _mm_max_ps (__m128 __A, __m128 __B)
259 return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
262 /* Perform logical bit-wise operations on 128-bit values. */
264 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
265 _mm_and_ps (__m128 __A, __m128 __B)
267 return __builtin_ia32_andps (__A, __B);
270 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
271 _mm_andnot_ps (__m128 __A, __m128 __B)
273 return __builtin_ia32_andnps (__A, __B);
276 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
277 _mm_or_ps (__m128 __A, __m128 __B)
279 return __builtin_ia32_orps (__A, __B);
282 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
283 _mm_xor_ps (__m128 __A, __m128 __B)
285 return __builtin_ia32_xorps (__A, __B);
288 /* Perform a comparison on the lower SPFP values of A and B. If the
289 comparison is true, place a mask of all ones in the result, otherwise a
290 mask of zeros. The upper three SPFP values are passed through from A. */
292 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
293 _mm_cmpeq_ss (__m128 __A, __m128 __B)
295 return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
298 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
299 _mm_cmplt_ss (__m128 __A, __m128 __B)
301 return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
304 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
305 _mm_cmple_ss (__m128 __A, __m128 __B)
307 return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
310 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
311 _mm_cmpgt_ss (__m128 __A, __m128 __B)
313 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
314 (__v4sf)
315 __builtin_ia32_cmpltss ((__v4sf) __B,
316 (__v4sf)
317 __A));
320 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
321 _mm_cmpge_ss (__m128 __A, __m128 __B)
323 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
324 (__v4sf)
325 __builtin_ia32_cmpless ((__v4sf) __B,
326 (__v4sf)
327 __A));
330 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
331 _mm_cmpneq_ss (__m128 __A, __m128 __B)
333 return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
336 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
337 _mm_cmpnlt_ss (__m128 __A, __m128 __B)
339 return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
342 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
343 _mm_cmpnle_ss (__m128 __A, __m128 __B)
345 return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
348 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
349 _mm_cmpngt_ss (__m128 __A, __m128 __B)
351 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
352 (__v4sf)
353 __builtin_ia32_cmpnltss ((__v4sf) __B,
354 (__v4sf)
355 __A));
358 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
359 _mm_cmpnge_ss (__m128 __A, __m128 __B)
361 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
362 (__v4sf)
363 __builtin_ia32_cmpnless ((__v4sf) __B,
364 (__v4sf)
365 __A));
368 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
369 _mm_cmpord_ss (__m128 __A, __m128 __B)
371 return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
374 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
375 _mm_cmpunord_ss (__m128 __A, __m128 __B)
377 return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
380 /* Perform a comparison on the four SPFP values of A and B. For each
381 element, if the comparison is true, place a mask of all ones in the
382 result, otherwise a mask of zeros. */
384 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
385 _mm_cmpeq_ps (__m128 __A, __m128 __B)
387 return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
390 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
391 _mm_cmplt_ps (__m128 __A, __m128 __B)
393 return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
396 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
397 _mm_cmple_ps (__m128 __A, __m128 __B)
399 return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
402 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
403 _mm_cmpgt_ps (__m128 __A, __m128 __B)
405 return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
408 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
409 _mm_cmpge_ps (__m128 __A, __m128 __B)
411 return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
414 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
415 _mm_cmpneq_ps (__m128 __A, __m128 __B)
417 return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
420 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
421 _mm_cmpnlt_ps (__m128 __A, __m128 __B)
423 return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
426 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
427 _mm_cmpnle_ps (__m128 __A, __m128 __B)
429 return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
432 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
433 _mm_cmpngt_ps (__m128 __A, __m128 __B)
435 return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
438 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
439 _mm_cmpnge_ps (__m128 __A, __m128 __B)
441 return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
444 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
445 _mm_cmpord_ps (__m128 __A, __m128 __B)
447 return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
450 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
451 _mm_cmpunord_ps (__m128 __A, __m128 __B)
453 return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
456 #ifdef __OPTIMIZE__
457 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
458 _mm_cmp_ps (__m128 __X, __m128 __Y, const int __P)
460 return (__m128) __builtin_ia32_cmpps ((__v4sf)__X, (__v4sf)__Y, __P);
463 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
464 _mm_cmp_ss (__m128 __X, __m128 __Y, const int __P)
466 return (__m128) __builtin_ia32_cmpss ((__v4sf)__X, (__v4sf)__Y, __P);
468 #else
469 #define _mm_cmp_ps(X, Y, P) \
470 ((__m128) __builtin_ia32_cmpps ((__v4sf)(__m128)(X), \
471 (__v4sf)(__m128)(Y), (int)(P)))
473 #define _mm_cmp_ss(X, Y, P) \
474 ((__m128) __builtin_ia32_cmpss ((__v4sf)(__m128)(X), \
475 (__v4sf)(__m128)(Y), (int)(P)))
476 #endif
478 /* Compare the lower SPFP values of A and B and return 1 if true
479 and 0 if false. */
481 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
482 _mm_comieq_ss (__m128 __A, __m128 __B)
484 return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
487 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
488 _mm_comilt_ss (__m128 __A, __m128 __B)
490 return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
493 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
494 _mm_comile_ss (__m128 __A, __m128 __B)
496 return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
499 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
500 _mm_comigt_ss (__m128 __A, __m128 __B)
502 return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
505 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
506 _mm_comige_ss (__m128 __A, __m128 __B)
508 return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
511 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
512 _mm_comineq_ss (__m128 __A, __m128 __B)
514 return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
517 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
518 _mm_ucomieq_ss (__m128 __A, __m128 __B)
520 return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
523 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
524 _mm_ucomilt_ss (__m128 __A, __m128 __B)
526 return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
529 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
530 _mm_ucomile_ss (__m128 __A, __m128 __B)
532 return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
535 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
536 _mm_ucomigt_ss (__m128 __A, __m128 __B)
538 return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
541 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
542 _mm_ucomige_ss (__m128 __A, __m128 __B)
544 return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
547 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
548 _mm_ucomineq_ss (__m128 __A, __m128 __B)
550 return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
553 /* Convert the lower SPFP value to a 32-bit integer according to the current
554 rounding mode. */
555 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
556 _mm_cvtss_si32 (__m128 __A)
558 return __builtin_ia32_cvtss2si ((__v4sf) __A);
561 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
562 _mm_cvt_ss2si (__m128 __A)
564 return _mm_cvtss_si32 (__A);
567 #ifdef __x86_64__
568 /* Convert the lower SPFP value to a 32-bit integer according to the
569 current rounding mode. */
571 /* Intel intrinsic. */
572 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
573 _mm_cvtss_si64 (__m128 __A)
575 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
578 /* Microsoft intrinsic. */
579 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
580 _mm_cvtss_si64x (__m128 __A)
582 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
584 #endif
586 /* Convert the two lower SPFP values to 32-bit integers according to the
587 current rounding mode. Return the integers in packed form. */
588 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
589 _mm_cvtps_pi32 (__m128 __A)
591 return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
594 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
595 _mm_cvt_ps2pi (__m128 __A)
597 return _mm_cvtps_pi32 (__A);
600 /* Truncate the lower SPFP value to a 32-bit integer. */
601 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
602 _mm_cvttss_si32 (__m128 __A)
604 return __builtin_ia32_cvttss2si ((__v4sf) __A);
607 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
608 _mm_cvtt_ss2si (__m128 __A)
610 return _mm_cvttss_si32 (__A);
613 #ifdef __x86_64__
614 /* Truncate the lower SPFP value to a 32-bit integer. */
616 /* Intel intrinsic. */
617 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
618 _mm_cvttss_si64 (__m128 __A)
620 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
623 /* Microsoft intrinsic. */
624 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
625 _mm_cvttss_si64x (__m128 __A)
627 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
629 #endif
631 /* Truncate the two lower SPFP values to 32-bit integers. Return the
632 integers in packed form. */
633 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
634 _mm_cvttps_pi32 (__m128 __A)
636 return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
639 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
640 _mm_cvtt_ps2pi (__m128 __A)
642 return _mm_cvttps_pi32 (__A);
645 /* Convert B to a SPFP value and insert it as element zero in A. */
646 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
647 _mm_cvtsi32_ss (__m128 __A, int __B)
649 return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
652 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
653 _mm_cvt_si2ss (__m128 __A, int __B)
655 return _mm_cvtsi32_ss (__A, __B);
658 #ifdef __x86_64__
659 /* Convert B to a SPFP value and insert it as element zero in A. */
661 /* Intel intrinsic. */
662 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
663 _mm_cvtsi64_ss (__m128 __A, long long __B)
665 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
668 /* Microsoft intrinsic. */
669 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
670 _mm_cvtsi64x_ss (__m128 __A, long long __B)
672 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
674 #endif
676 /* Convert the two 32-bit values in B to SPFP form and insert them
677 as the two lower elements in A. */
678 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
679 _mm_cvtpi32_ps (__m128 __A, __m64 __B)
681 return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
684 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
685 _mm_cvt_pi2ps (__m128 __A, __m64 __B)
687 return _mm_cvtpi32_ps (__A, __B);
690 /* Convert the four signed 16-bit values in A to SPFP form. */
691 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
692 _mm_cvtpi16_ps (__m64 __A)
694 __v4hi __sign;
695 __v2si __hisi, __losi;
696 __v4sf __zero, __ra, __rb;
698 /* This comparison against zero gives us a mask that can be used to
699 fill in the missing sign bits in the unpack operations below, so
700 that we get signed values after unpacking. */
701 __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
703 /* Convert the four words to doublewords. */
704 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
705 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
707 /* Convert the doublewords to floating point two at a time. */
708 __zero = (__v4sf) _mm_setzero_ps ();
709 __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
710 __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
712 return (__m128) __builtin_ia32_movlhps (__ra, __rb);
715 /* Convert the four unsigned 16-bit values in A to SPFP form. */
716 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
717 _mm_cvtpu16_ps (__m64 __A)
719 __v2si __hisi, __losi;
720 __v4sf __zero, __ra, __rb;
722 /* Convert the four words to doublewords. */
723 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
724 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
726 /* Convert the doublewords to floating point two at a time. */
727 __zero = (__v4sf) _mm_setzero_ps ();
728 __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
729 __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
731 return (__m128) __builtin_ia32_movlhps (__ra, __rb);
734 /* Convert the low four signed 8-bit values in A to SPFP form. */
735 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
736 _mm_cvtpi8_ps (__m64 __A)
738 __v8qi __sign;
740 /* This comparison against zero gives us a mask that can be used to
741 fill in the missing sign bits in the unpack operations below, so
742 that we get signed values after unpacking. */
743 __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
745 /* Convert the four low bytes to words. */
746 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
748 return _mm_cvtpi16_ps(__A);
751 /* Convert the low four unsigned 8-bit values in A to SPFP form. */
752 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
753 _mm_cvtpu8_ps(__m64 __A)
755 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
756 return _mm_cvtpu16_ps(__A);
759 /* Convert the four signed 32-bit values in A and B to SPFP form. */
760 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
761 _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
763 __v4sf __zero = (__v4sf) _mm_setzero_ps ();
764 __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
765 __v4sf __sfb = __builtin_ia32_cvtpi2ps (__sfa, (__v2si)__B);
766 return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
769 /* Convert the four SPFP values in A to four signed 16-bit integers. */
770 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
771 _mm_cvtps_pi16(__m128 __A)
773 __v4sf __hisf = (__v4sf)__A;
774 __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
775 __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
776 __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
777 return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
780 /* Convert the four SPFP values in A to four signed 8-bit integers. */
781 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
782 _mm_cvtps_pi8(__m128 __A)
784 __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
785 return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
788 /* Selects four specific SPFP values from A and B based on MASK. */
789 #ifdef __OPTIMIZE__
790 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
791 _mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
793 return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
795 #else
796 #define _mm_shuffle_ps(A, B, MASK) \
797 ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A), \
798 (__v4sf)(__m128)(B), (int)(MASK)))
799 #endif
801 /* Selects and interleaves the upper two SPFP values from A and B. */
802 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
803 _mm_unpackhi_ps (__m128 __A, __m128 __B)
805 return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
808 /* Selects and interleaves the lower two SPFP values from A and B. */
809 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
810 _mm_unpacklo_ps (__m128 __A, __m128 __B)
812 return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
815 /* Sets the upper two SPFP values with 64-bits of data loaded from P;
816 the lower two values are passed through from A. */
817 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
818 _mm_loadh_pi (__m128 __A, __m64_u const *__P)
820 return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (const __v2sf *)__P);
823 /* Stores the upper two SPFP values of A into P. */
824 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
825 _mm_storeh_pi (__m64 *__P, __m128 __A)
827 __builtin_ia32_storehps ((__v2sf *)__P, (__v4sf)__A);
830 /* Moves the upper two values of B into the lower two values of A. */
831 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
832 _mm_movehl_ps (__m128 __A, __m128 __B)
834 return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
837 /* Moves the lower two values of B into the upper two values of A. */
838 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
839 _mm_movelh_ps (__m128 __A, __m128 __B)
841 return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
844 /* Sets the lower two SPFP values with 64-bits of data loaded from P;
845 the upper two values are passed through from A. */
846 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
847 _mm_loadl_pi (__m128 __A, __m64_u const *__P)
849 return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (const __v2sf *)__P);
852 /* Stores the lower two SPFP values of A into P. */
853 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
854 _mm_storel_pi (__m64 *__P, __m128 __A)
856 __builtin_ia32_storelps ((__v2sf *)__P, (__v4sf)__A);
859 /* Creates a 4-bit mask from the most significant bits of the SPFP values. */
860 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
861 _mm_movemask_ps (__m128 __A)
863 return __builtin_ia32_movmskps ((__v4sf)__A);
866 /* Return the contents of the control register. */
867 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
868 _mm_getcsr (void)
870 return __builtin_ia32_stmxcsr ();
873 /* Read exception bits from the control register. */
874 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
875 _MM_GET_EXCEPTION_STATE (void)
877 return _mm_getcsr() & _MM_EXCEPT_MASK;
880 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
881 _MM_GET_EXCEPTION_MASK (void)
883 return _mm_getcsr() & _MM_MASK_MASK;
886 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
887 _MM_GET_ROUNDING_MODE (void)
889 return _mm_getcsr() & _MM_ROUND_MASK;
892 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
893 _MM_GET_FLUSH_ZERO_MODE (void)
895 return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
898 /* Set the control register to I. */
899 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
900 _mm_setcsr (unsigned int __I)
902 __builtin_ia32_ldmxcsr (__I);
905 /* Set exception bits in the control register. */
906 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
907 _MM_SET_EXCEPTION_STATE(unsigned int __mask)
909 _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
912 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
913 _MM_SET_EXCEPTION_MASK (unsigned int __mask)
915 _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
918 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
919 _MM_SET_ROUNDING_MODE (unsigned int __mode)
921 _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
924 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
925 _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
927 _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
930 /* Create a vector with element 0 as F and the rest zero. */
931 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
932 _mm_set_ss (float __F)
934 return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
937 /* Create a vector with all four elements equal to F. */
938 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
939 _mm_set1_ps (float __F)
941 return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
944 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
945 _mm_set_ps1 (float __F)
947 return _mm_set1_ps (__F);
950 /* Create a vector with element 0 as *P and the rest zero. */
951 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
952 _mm_load_ss (float const *__P)
954 return __extension__ (__m128) (__v4sf){ *(__x86_float_u *)__P, 0.0f, 0.0f, 0.0f };
957 /* Create a vector with all four elements equal to *P. */
958 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
959 _mm_load1_ps (float const *__P)
961 return _mm_set1_ps (*__P);
964 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
965 _mm_load_ps1 (float const *__P)
967 return _mm_load1_ps (__P);
970 /* Load four SPFP values from P. The address must be 16-byte aligned. */
971 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
972 _mm_load_ps (float const *__P)
974 return *(__m128 *)__P;
977 /* Load four SPFP values from P. The address need not be 16-byte aligned. */
978 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
979 _mm_loadu_ps (float const *__P)
981 return *(__m128_u *)__P;
984 /* Load four SPFP values in reverse order. The address must be aligned. */
985 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
986 _mm_loadr_ps (float const *__P)
988 __v4sf __tmp = *(__v4sf *)__P;
989 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
992 /* Create the vector [Z Y X W]. */
993 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
994 _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
996 return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
999 /* Create the vector [W X Y Z]. */
1000 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1001 _mm_setr_ps (float __Z, float __Y, float __X, float __W)
1003 return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
1006 /* Stores the lower SPFP value. */
1007 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1008 _mm_store_ss (float *__P, __m128 __A)
1010 *(__x86_float_u *)__P = ((__v4sf)__A)[0];
1013 extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1014 _mm_cvtss_f32 (__m128 __A)
1016 return ((__v4sf)__A)[0];
1019 /* Store four SPFP values. The address must be 16-byte aligned. */
1020 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1021 _mm_store_ps (float *__P, __m128 __A)
1023 *(__m128 *)__P = __A;
1026 /* Store four SPFP values. The address need not be 16-byte aligned. */
1027 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1028 _mm_storeu_ps (float *__P, __m128 __A)
1030 *(__m128_u *)__P = __A;
1033 /* Store the lower SPFP value across four words. */
1034 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1035 _mm_store1_ps (float *__P, __m128 __A)
1037 __v4sf __va = (__v4sf)__A;
1038 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
1039 _mm_storeu_ps (__P, __tmp);
1042 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1043 _mm_store_ps1 (float *__P, __m128 __A)
1045 _mm_store1_ps (__P, __A);
1048 /* Store four SPFP values in reverse order. The address must be aligned. */
1049 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1050 _mm_storer_ps (float *__P, __m128 __A)
1052 __v4sf __va = (__v4sf)__A;
1053 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
1054 _mm_store_ps (__P, __tmp);
1057 /* Sets the low SPFP value of A from the low value of B. */
1058 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1059 _mm_move_ss (__m128 __A, __m128 __B)
1061 return (__m128) __builtin_shuffle ((__v4sf)__A, (__v4sf)__B,
1062 __extension__
1063 (__attribute__((__vector_size__ (16))) int)
1064 {4,1,2,3});
1067 /* Extracts one of the four words of A. The selector N must be immediate. */
1068 #ifdef __OPTIMIZE__
1069 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1070 _mm_extract_pi16 (__m64 const __A, int const __N)
1072 return (unsigned short) __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
1075 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1076 _m_pextrw (__m64 const __A, int const __N)
1078 return _mm_extract_pi16 (__A, __N);
1080 #else
1081 #define _mm_extract_pi16(A, N) \
1082 ((int) (unsigned short) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N)))
1084 #define _m_pextrw(A, N) _mm_extract_pi16(A, N)
1085 #endif
1087 /* Inserts word D into one of four words of A. The selector N must be
1088 immediate. */
1089 #ifdef __OPTIMIZE__
1090 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1091 _mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
1093 return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
1096 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1097 _m_pinsrw (__m64 const __A, int const __D, int const __N)
1099 return _mm_insert_pi16 (__A, __D, __N);
1101 #else
1102 #define _mm_insert_pi16(A, D, N) \
1103 ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A), \
1104 (int)(D), (int)(N)))
1106 #define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N)
1107 #endif
1109 /* Compute the element-wise maximum of signed 16-bit values. */
1110 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1111 _mm_max_pi16 (__m64 __A, __m64 __B)
1113 return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1116 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1117 _m_pmaxsw (__m64 __A, __m64 __B)
1119 return _mm_max_pi16 (__A, __B);
1122 /* Compute the element-wise maximum of unsigned 8-bit values. */
1123 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1124 _mm_max_pu8 (__m64 __A, __m64 __B)
1126 return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1129 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1130 _m_pmaxub (__m64 __A, __m64 __B)
1132 return _mm_max_pu8 (__A, __B);
1135 /* Compute the element-wise minimum of signed 16-bit values. */
1136 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1137 _mm_min_pi16 (__m64 __A, __m64 __B)
1139 return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1142 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1143 _m_pminsw (__m64 __A, __m64 __B)
1145 return _mm_min_pi16 (__A, __B);
1148 /* Compute the element-wise minimum of unsigned 8-bit values. */
1149 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1150 _mm_min_pu8 (__m64 __A, __m64 __B)
1152 return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1155 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1156 _m_pminub (__m64 __A, __m64 __B)
1158 return _mm_min_pu8 (__A, __B);
1161 /* Create an 8-bit mask of the signs of 8-bit values. */
1162 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1163 _mm_movemask_pi8 (__m64 __A)
1165 return __builtin_ia32_pmovmskb ((__v8qi)__A);
1168 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1169 _m_pmovmskb (__m64 __A)
1171 return _mm_movemask_pi8 (__A);
1174 /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1175 in B and produce the high 16 bits of the 32-bit results. */
1176 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1177 _mm_mulhi_pu16 (__m64 __A, __m64 __B)
1179 return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1182 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1183 _m_pmulhuw (__m64 __A, __m64 __B)
1185 return _mm_mulhi_pu16 (__A, __B);
1188 /* Return a combination of the four 16-bit values in A. The selector
1189 must be an immediate. */
1190 #ifdef __OPTIMIZE__
1191 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1192 _mm_shuffle_pi16 (__m64 __A, int const __N)
1194 return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1197 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1198 _m_pshufw (__m64 __A, int const __N)
1200 return _mm_shuffle_pi16 (__A, __N);
1202 #else
1203 #define _mm_shuffle_pi16(A, N) \
1204 ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N)))
1206 #define _m_pshufw(A, N) _mm_shuffle_pi16 (A, N)
1207 #endif
1209 /* Conditionally store byte elements of A into P. The high bit of each
1210 byte in the selector N determines whether the corresponding byte from
1211 A is stored. */
1212 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1213 _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1215 #ifdef __MMX_WITH_SSE__
1216 /* Emulate MMX maskmovq with SSE2 maskmovdqu and handle unmapped bits
1217 64:127 at address __P. */
1218 typedef long long __v2di __attribute__ ((__vector_size__ (16)));
1219 typedef char __v16qi __attribute__ ((__vector_size__ (16)));
1220 /* Zero-extend __A and __N to 128 bits. */
1221 __v2di __A128 = __extension__ (__v2di) { ((__v1di) __A)[0], 0 };
1222 __v2di __N128 = __extension__ (__v2di) { ((__v1di) __N)[0], 0 };
1224 /* Check the alignment of __P. */
1225 __SIZE_TYPE__ offset = ((__SIZE_TYPE__) __P) & 0xf;
1226 if (offset)
1228 /* If the misalignment of __P > 8, subtract __P by 8 bytes.
1229 Otherwise, subtract __P by the misalignment. */
1230 if (offset > 8)
1231 offset = 8;
1232 __P = (char *) (((__SIZE_TYPE__) __P) - offset);
1234 /* Shift __A128 and __N128 to the left by the adjustment. */
1235 switch (offset)
1237 case 1:
1238 __A128 = __builtin_ia32_pslldqi128 (__A128, 8);
1239 __N128 = __builtin_ia32_pslldqi128 (__N128, 8);
1240 break;
1241 case 2:
1242 __A128 = __builtin_ia32_pslldqi128 (__A128, 2 * 8);
1243 __N128 = __builtin_ia32_pslldqi128 (__N128, 2 * 8);
1244 break;
1245 case 3:
1246 __A128 = __builtin_ia32_pslldqi128 (__A128, 3 * 8);
1247 __N128 = __builtin_ia32_pslldqi128 (__N128, 3 * 8);
1248 break;
1249 case 4:
1250 __A128 = __builtin_ia32_pslldqi128 (__A128, 4 * 8);
1251 __N128 = __builtin_ia32_pslldqi128 (__N128, 4 * 8);
1252 break;
1253 case 5:
1254 __A128 = __builtin_ia32_pslldqi128 (__A128, 5 * 8);
1255 __N128 = __builtin_ia32_pslldqi128 (__N128, 5 * 8);
1256 break;
1257 case 6:
1258 __A128 = __builtin_ia32_pslldqi128 (__A128, 6 * 8);
1259 __N128 = __builtin_ia32_pslldqi128 (__N128, 6 * 8);
1260 break;
1261 case 7:
1262 __A128 = __builtin_ia32_pslldqi128 (__A128, 7 * 8);
1263 __N128 = __builtin_ia32_pslldqi128 (__N128, 7 * 8);
1264 break;
1265 case 8:
1266 __A128 = __builtin_ia32_pslldqi128 (__A128, 8 * 8);
1267 __N128 = __builtin_ia32_pslldqi128 (__N128, 8 * 8);
1268 break;
1269 default:
1270 break;
1273 __builtin_ia32_maskmovdqu ((__v16qi)__A128, (__v16qi)__N128, __P);
1274 #else
1275 __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1276 #endif
1279 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1280 _m_maskmovq (__m64 __A, __m64 __N, char *__P)
1282 _mm_maskmove_si64 (__A, __N, __P);
1285 /* Compute the rounded averages of the unsigned 8-bit values in A and B. */
1286 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1287 _mm_avg_pu8 (__m64 __A, __m64 __B)
1289 return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1292 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1293 _m_pavgb (__m64 __A, __m64 __B)
1295 return _mm_avg_pu8 (__A, __B);
1298 /* Compute the rounded averages of the unsigned 16-bit values in A and B. */
1299 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1300 _mm_avg_pu16 (__m64 __A, __m64 __B)
1302 return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1305 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1306 _m_pavgw (__m64 __A, __m64 __B)
1308 return _mm_avg_pu16 (__A, __B);
1311 /* Compute the sum of the absolute differences of the unsigned 8-bit
1312 values in A and B. Return the value in the lower 16-bit word; the
1313 upper words are cleared. */
1314 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1315 _mm_sad_pu8 (__m64 __A, __m64 __B)
1317 return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1320 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1321 _m_psadbw (__m64 __A, __m64 __B)
1323 return _mm_sad_pu8 (__A, __B);
1326 /* Stores the data in A to the address P without polluting the caches. */
1327 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1328 _mm_stream_pi (__m64 *__P, __m64 __A)
1330 __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1333 /* Likewise. The address must be 16-byte aligned. */
1334 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1335 _mm_stream_ps (float *__P, __m128 __A)
1337 __builtin_ia32_movntps (__P, (__v4sf)__A);
1340 /* Guarantees that every preceding store is globally visible before
1341 any subsequent store. */
1342 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1343 _mm_sfence (void)
1345 __builtin_ia32_sfence ();
1348 /* Transpose the 4x4 matrix composed of row[0-3]. */
1349 #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
1350 do { \
1351 __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
1352 __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \
1353 __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \
1354 __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \
1355 __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \
1356 (row0) = __builtin_ia32_movlhps (__t0, __t1); \
1357 (row1) = __builtin_ia32_movhlps (__t1, __t0); \
1358 (row2) = __builtin_ia32_movlhps (__t2, __t3); \
1359 (row3) = __builtin_ia32_movhlps (__t3, __t2); \
1360 } while (0)
1362 /* For backward source compatibility. */
1363 # include <emmintrin.h>
1365 #ifdef __DISABLE_SSE__
1366 #undef __DISABLE_SSE__
1367 #pragma GCC pop_options
1368 #endif /* __DISABLE_SSE__ */
1370 /* The execution of the next instruction is delayed by an implementation
1371 specific amount of time. The instruction does not modify the
1372 architectural state. This is after the pop_options pragma because
1373 it does not require SSE support in the processor--the encoding is a
1374 nop on processors that do not support it. */
1375 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1376 _mm_pause (void)
1378 __builtin_ia32_pause ();
1381 #endif /* _XMMINTRIN_H_INCLUDED */