Merged with mainline at revision 128810.
[official-gcc.git] / gcc / config / i386 / xmmintrin.h
blobc06b5acbd2316d0d7dba032c36b9accbaa25f9ea
1 /* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
2 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to
18 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
19 Boston, MA 02110-1301, USA. */
21 /* As a special exception, if you include this header file into source
22 files compiled by GCC, this header file does not by itself cause
23 the resulting executable to be covered by the GNU General Public
24 License. This exception does not however invalidate any other
25 reasons why the executable file might be covered by the GNU General
26 Public License. */
28 /* Implemented from the specification included in the Intel C++ Compiler
29 User Guide and Reference, version 9.0. */
31 #ifndef _XMMINTRIN_H_INCLUDED
32 #define _XMMINTRIN_H_INCLUDED
34 #ifndef __SSE__
35 # error "SSE instruction set not enabled"
36 #else
38 /* We need type definitions from the MMX header file. */
39 #include <mmintrin.h>
41 /* Get _mm_malloc () and _mm_free (). */
42 #include <mm_malloc.h>
44 /* The Intel API is flexible enough that we must allow aliasing with other
45 vector types, and their scalar components. */
46 typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
48 /* Internal data types for implementing the intrinsics. */
49 typedef float __v4sf __attribute__ ((__vector_size__ (16)));
51 /* Create a selector for use with the SHUFPS instruction. */
52 #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
53 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
55 /* Constants for use with _mm_prefetch. */
56 enum _mm_hint
58 _MM_HINT_T0 = 3,
59 _MM_HINT_T1 = 2,
60 _MM_HINT_T2 = 1,
61 _MM_HINT_NTA = 0
64 /* Bits in the MXCSR. */
65 #define _MM_EXCEPT_MASK 0x003f
66 #define _MM_EXCEPT_INVALID 0x0001
67 #define _MM_EXCEPT_DENORM 0x0002
68 #define _MM_EXCEPT_DIV_ZERO 0x0004
69 #define _MM_EXCEPT_OVERFLOW 0x0008
70 #define _MM_EXCEPT_UNDERFLOW 0x0010
71 #define _MM_EXCEPT_INEXACT 0x0020
73 #define _MM_MASK_MASK 0x1f80
74 #define _MM_MASK_INVALID 0x0080
75 #define _MM_MASK_DENORM 0x0100
76 #define _MM_MASK_DIV_ZERO 0x0200
77 #define _MM_MASK_OVERFLOW 0x0400
78 #define _MM_MASK_UNDERFLOW 0x0800
79 #define _MM_MASK_INEXACT 0x1000
81 #define _MM_ROUND_MASK 0x6000
82 #define _MM_ROUND_NEAREST 0x0000
83 #define _MM_ROUND_DOWN 0x2000
84 #define _MM_ROUND_UP 0x4000
85 #define _MM_ROUND_TOWARD_ZERO 0x6000
87 #define _MM_FLUSH_ZERO_MASK 0x8000
88 #define _MM_FLUSH_ZERO_ON 0x8000
89 #define _MM_FLUSH_ZERO_OFF 0x0000
91 /* Create a vector of zeros. */
92 static __inline __m128 __attribute__((__always_inline__, __artificial__))
93 _mm_setzero_ps (void)
95 return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
98 /* Perform the respective operation on the lower SPFP (single-precision
99 floating-point) values of A and B; the upper three SPFP values are
100 passed through from A. */
102 static __inline __m128 __attribute__((__always_inline__, __artificial__))
103 _mm_add_ss (__m128 __A, __m128 __B)
105 return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
108 static __inline __m128 __attribute__((__always_inline__, __artificial__))
109 _mm_sub_ss (__m128 __A, __m128 __B)
111 return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
114 static __inline __m128 __attribute__((__always_inline__, __artificial__))
115 _mm_mul_ss (__m128 __A, __m128 __B)
117 return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
120 static __inline __m128 __attribute__((__always_inline__, __artificial__))
121 _mm_div_ss (__m128 __A, __m128 __B)
123 return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
126 static __inline __m128 __attribute__((__always_inline__, __artificial__))
127 _mm_sqrt_ss (__m128 __A)
129 return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
132 static __inline __m128 __attribute__((__always_inline__, __artificial__))
133 _mm_rcp_ss (__m128 __A)
135 return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
138 static __inline __m128 __attribute__((__always_inline__, __artificial__))
139 _mm_rsqrt_ss (__m128 __A)
141 return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
144 static __inline __m128 __attribute__((__always_inline__, __artificial__))
145 _mm_min_ss (__m128 __A, __m128 __B)
147 return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
150 static __inline __m128 __attribute__((__always_inline__, __artificial__))
151 _mm_max_ss (__m128 __A, __m128 __B)
153 return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
156 /* Perform the respective operation on the four SPFP values in A and B. */
158 static __inline __m128 __attribute__((__always_inline__, __artificial__))
159 _mm_add_ps (__m128 __A, __m128 __B)
161 return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
164 static __inline __m128 __attribute__((__always_inline__, __artificial__))
165 _mm_sub_ps (__m128 __A, __m128 __B)
167 return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
170 static __inline __m128 __attribute__((__always_inline__, __artificial__))
171 _mm_mul_ps (__m128 __A, __m128 __B)
173 return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
176 static __inline __m128 __attribute__((__always_inline__, __artificial__))
177 _mm_div_ps (__m128 __A, __m128 __B)
179 return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
182 static __inline __m128 __attribute__((__always_inline__, __artificial__))
183 _mm_sqrt_ps (__m128 __A)
185 return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
188 static __inline __m128 __attribute__((__always_inline__, __artificial__))
189 _mm_rcp_ps (__m128 __A)
191 return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
194 static __inline __m128 __attribute__((__always_inline__, __artificial__))
195 _mm_rsqrt_ps (__m128 __A)
197 return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
200 static __inline __m128 __attribute__((__always_inline__, __artificial__))
201 _mm_min_ps (__m128 __A, __m128 __B)
203 return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
206 static __inline __m128 __attribute__((__always_inline__, __artificial__))
207 _mm_max_ps (__m128 __A, __m128 __B)
209 return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
212 /* Perform logical bit-wise operations on 128-bit values. */
214 static __inline __m128 __attribute__((__always_inline__, __artificial__))
215 _mm_and_ps (__m128 __A, __m128 __B)
217 return __builtin_ia32_andps (__A, __B);
220 static __inline __m128 __attribute__((__always_inline__, __artificial__))
221 _mm_andnot_ps (__m128 __A, __m128 __B)
223 return __builtin_ia32_andnps (__A, __B);
226 static __inline __m128 __attribute__((__always_inline__, __artificial__))
227 _mm_or_ps (__m128 __A, __m128 __B)
229 return __builtin_ia32_orps (__A, __B);
232 static __inline __m128 __attribute__((__always_inline__, __artificial__))
233 _mm_xor_ps (__m128 __A, __m128 __B)
235 return __builtin_ia32_xorps (__A, __B);
238 /* Perform a comparison on the lower SPFP values of A and B. If the
239 comparison is true, place a mask of all ones in the result, otherwise a
240 mask of zeros. The upper three SPFP values are passed through from A. */
242 static __inline __m128 __attribute__((__always_inline__, __artificial__))
243 _mm_cmpeq_ss (__m128 __A, __m128 __B)
245 return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
248 static __inline __m128 __attribute__((__always_inline__, __artificial__))
249 _mm_cmplt_ss (__m128 __A, __m128 __B)
251 return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
254 static __inline __m128 __attribute__((__always_inline__, __artificial__))
255 _mm_cmple_ss (__m128 __A, __m128 __B)
257 return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
260 static __inline __m128 __attribute__((__always_inline__, __artificial__))
261 _mm_cmpgt_ss (__m128 __A, __m128 __B)
263 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
264 (__v4sf)
265 __builtin_ia32_cmpltss ((__v4sf) __B,
266 (__v4sf)
267 __A));
270 static __inline __m128 __attribute__((__always_inline__, __artificial__))
271 _mm_cmpge_ss (__m128 __A, __m128 __B)
273 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
274 (__v4sf)
275 __builtin_ia32_cmpless ((__v4sf) __B,
276 (__v4sf)
277 __A));
280 static __inline __m128 __attribute__((__always_inline__, __artificial__))
281 _mm_cmpneq_ss (__m128 __A, __m128 __B)
283 return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
286 static __inline __m128 __attribute__((__always_inline__, __artificial__))
287 _mm_cmpnlt_ss (__m128 __A, __m128 __B)
289 return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
292 static __inline __m128 __attribute__((__always_inline__, __artificial__))
293 _mm_cmpnle_ss (__m128 __A, __m128 __B)
295 return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
298 static __inline __m128 __attribute__((__always_inline__, __artificial__))
299 _mm_cmpngt_ss (__m128 __A, __m128 __B)
301 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
302 (__v4sf)
303 __builtin_ia32_cmpnltss ((__v4sf) __B,
304 (__v4sf)
305 __A));
308 static __inline __m128 __attribute__((__always_inline__, __artificial__))
309 _mm_cmpnge_ss (__m128 __A, __m128 __B)
311 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
312 (__v4sf)
313 __builtin_ia32_cmpnless ((__v4sf) __B,
314 (__v4sf)
315 __A));
318 static __inline __m128 __attribute__((__always_inline__, __artificial__))
319 _mm_cmpord_ss (__m128 __A, __m128 __B)
321 return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
324 static __inline __m128 __attribute__((__always_inline__, __artificial__))
325 _mm_cmpunord_ss (__m128 __A, __m128 __B)
327 return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
330 /* Perform a comparison on the four SPFP values of A and B. For each
331 element, if the comparison is true, place a mask of all ones in the
332 result, otherwise a mask of zeros. */
334 static __inline __m128 __attribute__((__always_inline__, __artificial__))
335 _mm_cmpeq_ps (__m128 __A, __m128 __B)
337 return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
340 static __inline __m128 __attribute__((__always_inline__, __artificial__))
341 _mm_cmplt_ps (__m128 __A, __m128 __B)
343 return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
346 static __inline __m128 __attribute__((__always_inline__, __artificial__))
347 _mm_cmple_ps (__m128 __A, __m128 __B)
349 return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
352 static __inline __m128 __attribute__((__always_inline__, __artificial__))
353 _mm_cmpgt_ps (__m128 __A, __m128 __B)
355 return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
358 static __inline __m128 __attribute__((__always_inline__, __artificial__))
359 _mm_cmpge_ps (__m128 __A, __m128 __B)
361 return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
364 static __inline __m128 __attribute__((__always_inline__, __artificial__))
365 _mm_cmpneq_ps (__m128 __A, __m128 __B)
367 return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
370 static __inline __m128 __attribute__((__always_inline__, __artificial__))
371 _mm_cmpnlt_ps (__m128 __A, __m128 __B)
373 return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
376 static __inline __m128 __attribute__((__always_inline__, __artificial__))
377 _mm_cmpnle_ps (__m128 __A, __m128 __B)
379 return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
382 static __inline __m128 __attribute__((__always_inline__, __artificial__))
383 _mm_cmpngt_ps (__m128 __A, __m128 __B)
385 return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
388 static __inline __m128 __attribute__((__always_inline__, __artificial__))
389 _mm_cmpnge_ps (__m128 __A, __m128 __B)
391 return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
394 static __inline __m128 __attribute__((__always_inline__, __artificial__))
395 _mm_cmpord_ps (__m128 __A, __m128 __B)
397 return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
400 static __inline __m128 __attribute__((__always_inline__, __artificial__))
401 _mm_cmpunord_ps (__m128 __A, __m128 __B)
403 return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
406 /* Compare the lower SPFP values of A and B and return 1 if true
407 and 0 if false. */
409 static __inline int __attribute__((__always_inline__, __artificial__))
410 _mm_comieq_ss (__m128 __A, __m128 __B)
412 return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
415 static __inline int __attribute__((__always_inline__, __artificial__))
416 _mm_comilt_ss (__m128 __A, __m128 __B)
418 return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
421 static __inline int __attribute__((__always_inline__, __artificial__))
422 _mm_comile_ss (__m128 __A, __m128 __B)
424 return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
427 static __inline int __attribute__((__always_inline__, __artificial__))
428 _mm_comigt_ss (__m128 __A, __m128 __B)
430 return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
433 static __inline int __attribute__((__always_inline__, __artificial__))
434 _mm_comige_ss (__m128 __A, __m128 __B)
436 return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
439 static __inline int __attribute__((__always_inline__, __artificial__))
440 _mm_comineq_ss (__m128 __A, __m128 __B)
442 return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
445 static __inline int __attribute__((__always_inline__, __artificial__))
446 _mm_ucomieq_ss (__m128 __A, __m128 __B)
448 return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
451 static __inline int __attribute__((__always_inline__, __artificial__))
452 _mm_ucomilt_ss (__m128 __A, __m128 __B)
454 return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
457 static __inline int __attribute__((__always_inline__, __artificial__))
458 _mm_ucomile_ss (__m128 __A, __m128 __B)
460 return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
463 static __inline int __attribute__((__always_inline__, __artificial__))
464 _mm_ucomigt_ss (__m128 __A, __m128 __B)
466 return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
469 static __inline int __attribute__((__always_inline__, __artificial__))
470 _mm_ucomige_ss (__m128 __A, __m128 __B)
472 return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
475 static __inline int __attribute__((__always_inline__, __artificial__))
476 _mm_ucomineq_ss (__m128 __A, __m128 __B)
478 return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
481 /* Convert the lower SPFP value to a 32-bit integer according to the current
482 rounding mode. */
483 static __inline int __attribute__((__always_inline__, __artificial__))
484 _mm_cvtss_si32 (__m128 __A)
486 return __builtin_ia32_cvtss2si ((__v4sf) __A);
489 static __inline int __attribute__((__always_inline__, __artificial__))
490 _mm_cvt_ss2si (__m128 __A)
492 return _mm_cvtss_si32 (__A);
495 #ifdef __x86_64__
496 /* Convert the lower SPFP value to a 32-bit integer according to the
497 current rounding mode. */
499 /* Intel intrinsic. */
500 static __inline long long __attribute__((__always_inline__, __artificial__))
501 _mm_cvtss_si64 (__m128 __A)
503 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
506 /* Microsoft intrinsic. */
507 static __inline long long __attribute__((__always_inline__, __artificial__))
508 _mm_cvtss_si64x (__m128 __A)
510 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
512 #endif
514 /* Convert the two lower SPFP values to 32-bit integers according to the
515 current rounding mode. Return the integers in packed form. */
516 static __inline __m64 __attribute__((__always_inline__, __artificial__))
517 _mm_cvtps_pi32 (__m128 __A)
519 return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
522 static __inline __m64 __attribute__((__always_inline__, __artificial__))
523 _mm_cvt_ps2pi (__m128 __A)
525 return _mm_cvtps_pi32 (__A);
528 /* Truncate the lower SPFP value to a 32-bit integer. */
529 static __inline int __attribute__((__always_inline__, __artificial__))
530 _mm_cvttss_si32 (__m128 __A)
532 return __builtin_ia32_cvttss2si ((__v4sf) __A);
535 static __inline int __attribute__((__always_inline__, __artificial__))
536 _mm_cvtt_ss2si (__m128 __A)
538 return _mm_cvttss_si32 (__A);
541 #ifdef __x86_64__
542 /* Truncate the lower SPFP value to a 32-bit integer. */
544 /* Intel intrinsic. */
545 static __inline long long __attribute__((__always_inline__, __artificial__))
546 _mm_cvttss_si64 (__m128 __A)
548 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
551 /* Microsoft intrinsic. */
552 static __inline long long __attribute__((__always_inline__, __artificial__))
553 _mm_cvttss_si64x (__m128 __A)
555 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
557 #endif
559 /* Truncate the two lower SPFP values to 32-bit integers. Return the
560 integers in packed form. */
561 static __inline __m64 __attribute__((__always_inline__, __artificial__))
562 _mm_cvttps_pi32 (__m128 __A)
564 return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
567 static __inline __m64 __attribute__((__always_inline__, __artificial__))
568 _mm_cvtt_ps2pi (__m128 __A)
570 return _mm_cvttps_pi32 (__A);
573 /* Convert B to a SPFP value and insert it as element zero in A. */
574 static __inline __m128 __attribute__((__always_inline__, __artificial__))
575 _mm_cvtsi32_ss (__m128 __A, int __B)
577 return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
580 static __inline __m128 __attribute__((__always_inline__, __artificial__))
581 _mm_cvt_si2ss (__m128 __A, int __B)
583 return _mm_cvtsi32_ss (__A, __B);
586 #ifdef __x86_64__
587 /* Convert B to a SPFP value and insert it as element zero in A. */
589 /* Intel intrinsic. */
590 static __inline __m128 __attribute__((__always_inline__, __artificial__))
591 _mm_cvtsi64_ss (__m128 __A, long long __B)
593 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
596 /* Microsoft intrinsic. */
597 static __inline __m128 __attribute__((__always_inline__, __artificial__))
598 _mm_cvtsi64x_ss (__m128 __A, long long __B)
600 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
602 #endif
604 /* Convert the two 32-bit values in B to SPFP form and insert them
605 as the two lower elements in A. */
606 static __inline __m128 __attribute__((__always_inline__, __artificial__))
607 _mm_cvtpi32_ps (__m128 __A, __m64 __B)
609 return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
612 static __inline __m128 __attribute__((__always_inline__, __artificial__))
613 _mm_cvt_pi2ps (__m128 __A, __m64 __B)
615 return _mm_cvtpi32_ps (__A, __B);
618 /* Convert the four signed 16-bit values in A to SPFP form. */
619 static __inline __m128 __attribute__((__always_inline__, __artificial__))
620 _mm_cvtpi16_ps (__m64 __A)
622 __v4hi __sign;
623 __v2si __hisi, __losi;
624 __v4sf __r;
626 /* This comparison against zero gives us a mask that can be used to
627 fill in the missing sign bits in the unpack operations below, so
628 that we get signed values after unpacking. */
629 __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
631 /* Convert the four words to doublewords. */
632 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
633 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
635 /* Convert the doublewords to floating point two at a time. */
636 __r = (__v4sf) _mm_setzero_ps ();
637 __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
638 __r = __builtin_ia32_movlhps (__r, __r);
639 __r = __builtin_ia32_cvtpi2ps (__r, __losi);
641 return (__m128) __r;
644 /* Convert the four unsigned 16-bit values in A to SPFP form. */
645 static __inline __m128 __attribute__((__always_inline__, __artificial__))
646 _mm_cvtpu16_ps (__m64 __A)
648 __v2si __hisi, __losi;
649 __v4sf __r;
651 /* Convert the four words to doublewords. */
652 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
653 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
655 /* Convert the doublewords to floating point two at a time. */
656 __r = (__v4sf) _mm_setzero_ps ();
657 __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
658 __r = __builtin_ia32_movlhps (__r, __r);
659 __r = __builtin_ia32_cvtpi2ps (__r, __losi);
661 return (__m128) __r;
664 /* Convert the low four signed 8-bit values in A to SPFP form. */
665 static __inline __m128 __attribute__((__always_inline__, __artificial__))
666 _mm_cvtpi8_ps (__m64 __A)
668 __v8qi __sign;
670 /* This comparison against zero gives us a mask that can be used to
671 fill in the missing sign bits in the unpack operations below, so
672 that we get signed values after unpacking. */
673 __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
675 /* Convert the four low bytes to words. */
676 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
678 return _mm_cvtpi16_ps(__A);
681 /* Convert the low four unsigned 8-bit values in A to SPFP form. */
682 static __inline __m128 __attribute__((__always_inline__, __artificial__))
683 _mm_cvtpu8_ps(__m64 __A)
685 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
686 return _mm_cvtpu16_ps(__A);
689 /* Convert the four signed 32-bit values in A and B to SPFP form. */
690 static __inline __m128 __attribute__((__always_inline__, __artificial__))
691 _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
693 __v4sf __zero = (__v4sf) _mm_setzero_ps ();
694 __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
695 __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B);
696 return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
699 /* Convert the four SPFP values in A to four signed 16-bit integers. */
700 static __inline __m64 __attribute__((__always_inline__, __artificial__))
701 _mm_cvtps_pi16(__m128 __A)
703 __v4sf __hisf = (__v4sf)__A;
704 __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
705 __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
706 __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
707 return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
710 /* Convert the four SPFP values in A to four signed 8-bit integers. */
711 static __inline __m64 __attribute__((__always_inline__, __artificial__))
712 _mm_cvtps_pi8(__m128 __A)
714 __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
715 return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
718 /* Selects four specific SPFP values from A and B based on MASK. */
719 #ifdef __OPTIMIZE__
720 static __inline __m128 __attribute__((__always_inline__, __artificial__))
721 _mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
723 return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
725 #else
726 #define _mm_shuffle_ps(A, B, MASK) \
727 ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK)))
728 #endif
730 /* Selects and interleaves the upper two SPFP values from A and B. */
731 static __inline __m128 __attribute__((__always_inline__, __artificial__))
732 _mm_unpackhi_ps (__m128 __A, __m128 __B)
734 return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
737 /* Selects and interleaves the lower two SPFP values from A and B. */
738 static __inline __m128 __attribute__((__always_inline__, __artificial__))
739 _mm_unpacklo_ps (__m128 __A, __m128 __B)
741 return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
744 /* Sets the upper two SPFP values with 64-bits of data loaded from P;
745 the lower two values are passed through from A. */
746 static __inline __m128 __attribute__((__always_inline__, __artificial__))
747 _mm_loadh_pi (__m128 __A, __m64 const *__P)
749 return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P);
752 /* Stores the upper two SPFP values of A into P. */
753 static __inline void __attribute__((__always_inline__, __artificial__))
754 _mm_storeh_pi (__m64 *__P, __m128 __A)
756 __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A);
759 /* Moves the upper two values of B into the lower two values of A. */
760 static __inline __m128 __attribute__((__always_inline__, __artificial__))
761 _mm_movehl_ps (__m128 __A, __m128 __B)
763 return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
766 /* Moves the lower two values of B into the upper two values of A. */
767 static __inline __m128 __attribute__((__always_inline__, __artificial__))
768 _mm_movelh_ps (__m128 __A, __m128 __B)
770 return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
773 /* Sets the lower two SPFP values with 64-bits of data loaded from P;
774 the upper two values are passed through from A. */
775 static __inline __m128 __attribute__((__always_inline__, __artificial__))
776 _mm_loadl_pi (__m128 __A, __m64 const *__P)
778 return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P);
781 /* Stores the lower two SPFP values of A into P. */
782 static __inline void __attribute__((__always_inline__, __artificial__))
783 _mm_storel_pi (__m64 *__P, __m128 __A)
785 __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A);
788 /* Creates a 4-bit mask from the most significant bits of the SPFP values. */
789 static __inline int __attribute__((__always_inline__, __artificial__))
790 _mm_movemask_ps (__m128 __A)
792 return __builtin_ia32_movmskps ((__v4sf)__A);
795 /* Return the contents of the control register. */
796 static __inline unsigned int __attribute__((__always_inline__, __artificial__))
797 _mm_getcsr (void)
799 return __builtin_ia32_stmxcsr ();
802 /* Read exception bits from the control register. */
803 static __inline unsigned int __attribute__((__always_inline__, __artificial__))
804 _MM_GET_EXCEPTION_STATE (void)
806 return _mm_getcsr() & _MM_EXCEPT_MASK;
809 static __inline unsigned int __attribute__((__always_inline__, __artificial__))
810 _MM_GET_EXCEPTION_MASK (void)
812 return _mm_getcsr() & _MM_MASK_MASK;
815 static __inline unsigned int __attribute__((__always_inline__, __artificial__))
816 _MM_GET_ROUNDING_MODE (void)
818 return _mm_getcsr() & _MM_ROUND_MASK;
821 static __inline unsigned int __attribute__((__always_inline__, __artificial__))
822 _MM_GET_FLUSH_ZERO_MODE (void)
824 return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
827 /* Set the control register to I. */
828 static __inline void __attribute__((__always_inline__, __artificial__))
829 _mm_setcsr (unsigned int __I)
831 __builtin_ia32_ldmxcsr (__I);
834 /* Set exception bits in the control register. */
835 static __inline void __attribute__((__always_inline__, __artificial__))
836 _MM_SET_EXCEPTION_STATE(unsigned int __mask)
838 _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
841 static __inline void __attribute__((__always_inline__, __artificial__))
842 _MM_SET_EXCEPTION_MASK (unsigned int __mask)
844 _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
847 static __inline void __attribute__((__always_inline__, __artificial__))
848 _MM_SET_ROUNDING_MODE (unsigned int __mode)
850 _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
853 static __inline void __attribute__((__always_inline__, __artificial__))
854 _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
856 _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
859 /* Create a vector with element 0 as F and the rest zero. */
860 static __inline __m128 __attribute__((__always_inline__, __artificial__))
861 _mm_set_ss (float __F)
863 return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
866 /* Create a vector with all four elements equal to F. */
867 static __inline __m128 __attribute__((__always_inline__, __artificial__))
868 _mm_set1_ps (float __F)
870 return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
873 static __inline __m128 __attribute__((__always_inline__, __artificial__))
874 _mm_set_ps1 (float __F)
876 return _mm_set1_ps (__F);
879 /* Create a vector with element 0 as *P and the rest zero. */
880 static __inline __m128 __attribute__((__always_inline__, __artificial__))
881 _mm_load_ss (float const *__P)
883 return _mm_set_ss (*__P);
886 /* Create a vector with all four elements equal to *P. */
887 static __inline __m128 __attribute__((__always_inline__, __artificial__))
888 _mm_load1_ps (float const *__P)
890 return _mm_set1_ps (*__P);
893 static __inline __m128 __attribute__((__always_inline__, __artificial__))
894 _mm_load_ps1 (float const *__P)
896 return _mm_load1_ps (__P);
899 /* Load four SPFP values from P. The address must be 16-byte aligned. */
900 static __inline __m128 __attribute__((__always_inline__, __artificial__))
901 _mm_load_ps (float const *__P)
903 return (__m128) *(__v4sf *)__P;
906 /* Load four SPFP values from P. The address need not be 16-byte aligned. */
907 static __inline __m128 __attribute__((__always_inline__, __artificial__))
908 _mm_loadu_ps (float const *__P)
910 return (__m128) __builtin_ia32_loadups (__P);
913 /* Load four SPFP values in reverse order. The address must be aligned. */
914 static __inline __m128 __attribute__((__always_inline__, __artificial__))
915 _mm_loadr_ps (float const *__P)
917 __v4sf __tmp = *(__v4sf *)__P;
918 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
921 /* Create the vector [Z Y X W]. */
922 static __inline __m128 __attribute__((__always_inline__, __artificial__))
923 _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
925 return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
928 /* Create the vector [W X Y Z]. */
929 static __inline __m128 __attribute__((__always_inline__, __artificial__))
930 _mm_setr_ps (float __Z, float __Y, float __X, float __W)
932 return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
935 /* Stores the lower SPFP value. */
936 static __inline void __attribute__((__always_inline__, __artificial__))
937 _mm_store_ss (float *__P, __m128 __A)
939 *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
942 static __inline float __attribute__((__always_inline__, __artificial__))
943 _mm_cvtss_f32 (__m128 __A)
945 return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
948 /* Store four SPFP values. The address must be 16-byte aligned. */
949 static __inline void __attribute__((__always_inline__, __artificial__))
950 _mm_store_ps (float *__P, __m128 __A)
952 *(__v4sf *)__P = (__v4sf)__A;
955 /* Store four SPFP values. The address need not be 16-byte aligned. */
956 static __inline void __attribute__((__always_inline__, __artificial__))
957 _mm_storeu_ps (float *__P, __m128 __A)
959 __builtin_ia32_storeups (__P, (__v4sf)__A);
962 /* Store the lower SPFP value across four words. */
963 static __inline void __attribute__((__always_inline__, __artificial__))
964 _mm_store1_ps (float *__P, __m128 __A)
966 __v4sf __va = (__v4sf)__A;
967 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
968 _mm_storeu_ps (__P, __tmp);
971 static __inline void __attribute__((__always_inline__, __artificial__))
972 _mm_store_ps1 (float *__P, __m128 __A)
974 _mm_store1_ps (__P, __A);
977 /* Store four SPFP values in reverse order. The address must be aligned. */
978 static __inline void __attribute__((__always_inline__, __artificial__))
979 _mm_storer_ps (float *__P, __m128 __A)
981 __v4sf __va = (__v4sf)__A;
982 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
983 _mm_store_ps (__P, __tmp);
986 /* Sets the low SPFP value of A from the low value of B. */
987 static __inline __m128 __attribute__((__always_inline__, __artificial__))
988 _mm_move_ss (__m128 __A, __m128 __B)
990 return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
993 /* Extracts one of the four words of A. The selector N must be immediate. */
994 #ifdef __OPTIMIZE__
995 static __inline int __attribute__((__always_inline__, __artificial__))
996 _mm_extract_pi16 (__m64 const __A, int const __N)
998 return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
1001 static __inline int __attribute__((__always_inline__, __artificial__))
1002 _m_pextrw (__m64 const __A, int const __N)
1004 return _mm_extract_pi16 (__A, __N);
1006 #else
1007 #define _mm_extract_pi16(A, N) __builtin_ia32_vec_ext_v4hi ((__v4hi)(A), (N))
1008 #define _m_pextrw(A, N) _mm_extract_pi16((A), (N))
1009 #endif
1011 /* Inserts word D into one of four words of A. The selector N must be
1012 immediate. */
1013 #ifdef __OPTIMIZE__
1014 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1015 _mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
1017 return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
1020 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1021 _m_pinsrw (__m64 const __A, int const __D, int const __N)
1023 return _mm_insert_pi16 (__A, __D, __N);
1025 #else
1026 #define _mm_insert_pi16(A, D, N) \
1027 ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(A), (D), (N)))
1028 #define _m_pinsrw(A, D, N) _mm_insert_pi16((A), (D), (N))
1029 #endif
1031 /* Compute the element-wise maximum of signed 16-bit values. */
1032 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1033 _mm_max_pi16 (__m64 __A, __m64 __B)
1035 return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1038 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1039 _m_pmaxsw (__m64 __A, __m64 __B)
1041 return _mm_max_pi16 (__A, __B);
1044 /* Compute the element-wise maximum of unsigned 8-bit values. */
1045 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1046 _mm_max_pu8 (__m64 __A, __m64 __B)
1048 return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1051 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1052 _m_pmaxub (__m64 __A, __m64 __B)
1054 return _mm_max_pu8 (__A, __B);
1057 /* Compute the element-wise minimum of signed 16-bit values. */
1058 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1059 _mm_min_pi16 (__m64 __A, __m64 __B)
1061 return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1064 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1065 _m_pminsw (__m64 __A, __m64 __B)
1067 return _mm_min_pi16 (__A, __B);
1070 /* Compute the element-wise minimum of unsigned 8-bit values. */
1071 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1072 _mm_min_pu8 (__m64 __A, __m64 __B)
1074 return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1077 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1078 _m_pminub (__m64 __A, __m64 __B)
1080 return _mm_min_pu8 (__A, __B);
1083 /* Create an 8-bit mask of the signs of 8-bit values. */
1084 static __inline int __attribute__((__always_inline__, __artificial__))
1085 _mm_movemask_pi8 (__m64 __A)
1087 return __builtin_ia32_pmovmskb ((__v8qi)__A);
1090 static __inline int __attribute__((__always_inline__, __artificial__))
1091 _m_pmovmskb (__m64 __A)
1093 return _mm_movemask_pi8 (__A);
1096 /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1097 in B and produce the high 16 bits of the 32-bit results. */
1098 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1099 _mm_mulhi_pu16 (__m64 __A, __m64 __B)
1101 return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1104 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1105 _m_pmulhuw (__m64 __A, __m64 __B)
1107 return _mm_mulhi_pu16 (__A, __B);
1110 /* Return a combination of the four 16-bit values in A. The selector
1111 must be an immediate. */
1112 #ifdef __OPTIMIZE__
1113 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1114 _mm_shuffle_pi16 (__m64 __A, int const __N)
1116 return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1119 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1120 _m_pshufw (__m64 __A, int const __N)
1122 return _mm_shuffle_pi16 (__A, __N);
1124 #else
1125 #define _mm_shuffle_pi16(A, N) \
1126 ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N)))
1127 #define _m_pshufw(A, N) _mm_shuffle_pi16 ((A), (N))
1128 #endif
1130 /* Conditionally store byte elements of A into P. The high bit of each
1131 byte in the selector N determines whether the corresponding byte from
1132 A is stored. */
1133 static __inline void __attribute__((__always_inline__, __artificial__))
1134 _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1136 __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1139 static __inline void __attribute__((__always_inline__, __artificial__))
1140 _m_maskmovq (__m64 __A, __m64 __N, char *__P)
1142 _mm_maskmove_si64 (__A, __N, __P);
1145 /* Compute the rounded averages of the unsigned 8-bit values in A and B. */
1146 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1147 _mm_avg_pu8 (__m64 __A, __m64 __B)
1149 return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1152 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1153 _m_pavgb (__m64 __A, __m64 __B)
1155 return _mm_avg_pu8 (__A, __B);
1158 /* Compute the rounded averages of the unsigned 16-bit values in A and B. */
1159 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1160 _mm_avg_pu16 (__m64 __A, __m64 __B)
1162 return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1165 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1166 _m_pavgw (__m64 __A, __m64 __B)
1168 return _mm_avg_pu16 (__A, __B);
1171 /* Compute the sum of the absolute differences of the unsigned 8-bit
1172 values in A and B. Return the value in the lower 16-bit word; the
1173 upper words are cleared. */
1174 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1175 _mm_sad_pu8 (__m64 __A, __m64 __B)
1177 return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1180 static __inline __m64 __attribute__((__always_inline__, __artificial__))
1181 _m_psadbw (__m64 __A, __m64 __B)
1183 return _mm_sad_pu8 (__A, __B);
1186 /* Loads one cache line from address P to a location "closer" to the
1187 processor. The selector I specifies the type of prefetch operation. */
1188 #ifdef __OPTIMIZE__
1189 static __inline void __attribute__((__always_inline__, __artificial__))
1190 _mm_prefetch (void *__P, enum _mm_hint __I)
1192 __builtin_prefetch (__P, 0, __I);
1194 #else
1195 #define _mm_prefetch(P, I) \
1196 __builtin_prefetch ((P), 0, (I))
1197 #endif
1199 /* Stores the data in A to the address P without polluting the caches. */
1200 static __inline void __attribute__((__always_inline__, __artificial__))
1201 _mm_stream_pi (__m64 *__P, __m64 __A)
1203 __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1206 /* Likewise. The address must be 16-byte aligned. */
1207 static __inline void __attribute__((__always_inline__, __artificial__))
1208 _mm_stream_ps (float *__P, __m128 __A)
1210 __builtin_ia32_movntps (__P, (__v4sf)__A);
1213 /* Guarantees that every preceding store is globally visible before
1214 any subsequent store. */
1215 static __inline void __attribute__((__always_inline__, __artificial__))
1216 _mm_sfence (void)
1218 __builtin_ia32_sfence ();
1221 /* The execution of the next instruction is delayed by an implementation
1222 specific amount of time. The instruction does not modify the
1223 architectural state. */
1224 static __inline void __attribute__((__always_inline__, __artificial__))
1225 _mm_pause (void)
1227 __asm__ __volatile__ ("rep; nop" : : );
1230 /* Transpose the 4x4 matrix composed of row[0-3]. */
1231 #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
1232 do { \
1233 __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
1234 __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \
1235 __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \
1236 __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \
1237 __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \
1238 (row0) = __builtin_ia32_movlhps (__t0, __t1); \
1239 (row1) = __builtin_ia32_movhlps (__t1, __t0); \
1240 (row2) = __builtin_ia32_movlhps (__t2, __t3); \
1241 (row3) = __builtin_ia32_movhlps (__t3, __t2); \
1242 } while (0)
1244 /* For backward source compatibility. */
1245 #ifdef __SSE2__
1246 # include <emmintrin.h>
1247 #endif
1249 #endif /* __SSE__ */
1250 #endif /* _XMMINTRIN_H_INCLUDED */