1 /* Copyright (C) 2007 Free Software Foundation, Inc.
3 This file is part of GCC.
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with GCC; see the file COPYING. If not, write to
17 the Free Software Foundation, 59 Temple Place - Suite 330,
18 Boston, MA 02111-1307, USA. */
20 /* As a special exception, if you include this header file into source
21 files compiled by GCC, this header file does not by itself cause
22 the resulting executable to be covered by the GNU General Public
23 License. This exception does not however invalidate any other
24 reasons why the executable file might be covered by the GNU General
27 /* Implemented from the specification included in the Intel C++ Compiler
28 User Guide and Reference, version 10.0. */
30 #ifndef _SMMINTRIN_H_INCLUDED
31 #define _SMMINTRIN_H_INCLUDED
34 # error "SSE4.1 instruction set not enabled"
37 /* We need definitions from the SSSE3, SSE3, SSE2 and SSE header
39 #include <tmmintrin.h>
43 /* Rounding mode macros. */
44 #define _MM_FROUND_TO_NEAREST_INT 0x00
45 #define _MM_FROUND_TO_NEG_INF 0x01
46 #define _MM_FROUND_TO_POS_INF 0x02
47 #define _MM_FROUND_TO_ZERO 0x03
48 #define _MM_FROUND_CUR_DIRECTION 0x04
50 #define _MM_FROUND_RAISE_EXC 0x00
51 #define _MM_FROUND_NO_EXC 0x08
53 #define _MM_FROUND_NINT \
54 (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
55 #define _MM_FROUND_FLOOR \
56 (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
57 #define _MM_FROUND_CEIL \
58 (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
59 #define _MM_FROUND_TRUNC \
60 (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
61 #define _MM_FROUND_RINT \
62 (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
63 #define _MM_FROUND_NEARBYINT \
64 (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
66 /* Integer blend instructions - select data from 2 sources using
67 constant/variable mask. */
70 static __inline __m128i
__attribute__((__always_inline__
))
71 _mm_blend_epi16 (__m128i __X
, __m128i __Y
, const int __M
)
73 return (__m128i
) __builtin_ia32_pblendw128 ((__v8hi
)__X
,
78 #define _mm_blend_epi16(X, Y, M) \
79 ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(X), (__v8hi)(Y), (M)))
82 static __inline __m128i
__attribute__((__always_inline__
))
83 _mm_blendv_epi8 (__m128i __X
, __m128i __Y
, __m128i __M
)
85 return (__m128i
) __builtin_ia32_pblendvb128 ((__v16qi
)__X
,
90 /* Single precision floating point blend instructions - select data
91 from 2 sources using constant/variable mask. */
94 static __inline __m128
__attribute__((__always_inline__
))
95 _mm_blend_ps (__m128 __X
, __m128 __Y
, const int __M
)
97 return (__m128
) __builtin_ia32_blendps ((__v4sf
)__X
,
102 #define _mm_blend_ps(X, Y, M) \
103 ((__m128) __builtin_ia32_blendps ((__v4sf)(X), (__v4sf)(Y), (M)))
106 static __inline __m128
__attribute__((__always_inline__
))
107 _mm_blendv_ps (__m128 __X
, __m128 __Y
, __m128 __M
)
109 return (__m128
) __builtin_ia32_blendvps ((__v4sf
)__X
,
114 /* Double precision floating point blend instructions - select data
115 from 2 sources using constant/variable mask. */
118 static __inline __m128d
__attribute__((__always_inline__
))
119 _mm_blend_pd (__m128d __X
, __m128d __Y
, const int __M
)
121 return (__m128d
) __builtin_ia32_blendpd ((__v2df
)__X
,
126 #define _mm_blend_pd(X, Y, M) \
127 ((__m128d) __builtin_ia32_blendpd ((__v2df)(X), (__v2df)(Y), (M)))
130 static __inline __m128d
__attribute__((__always_inline__
))
131 _mm_blendv_pd (__m128d __X
, __m128d __Y
, __m128d __M
)
133 return (__m128d
) __builtin_ia32_blendvpd ((__v2df
)__X
,
138 /* Dot product instructions with mask-defined summing and zeroing parts
142 static __inline __m128
__attribute__((__always_inline__
))
143 _mm_dp_ps (__m128 __X
, __m128 __Y
, const int __M
)
145 return (__m128
) __builtin_ia32_dpps ((__v4sf
)__X
,
150 static __inline __m128d
__attribute__((__always_inline__
))
151 _mm_dp_pd (__m128d __X
, __m128d __Y
, const int __M
)
153 return (__m128d
) __builtin_ia32_dppd ((__v2df
)__X
,
158 #define _mm_dp_ps(X, Y, M) \
159 ((__m128) __builtin_ia32_dpps ((__v4sf)(X), (__v4sf)(Y), (M)))
161 #define _mm_dp_pd(X, Y, M) \
162 ((__m128d) __builtin_ia32_dppd ((__v2df)(X), (__v2df)(Y), (M)))
165 /* Packed integer 64-bit comparison, zeroing or filling with ones
166 corresponding parts of result. */
167 static __inline __m128i
__attribute__((__always_inline__
))
168 _mm_cmpeq_epi64 (__m128i __X
, __m128i __Y
)
170 return (__m128i
) __builtin_ia32_pcmpeqq ((__v2di
)__X
, (__v2di
)__Y
);
173 /* Min/max packed integer instructions. */
175 static __inline __m128i
__attribute__((__always_inline__
))
176 _mm_min_epi8 (__m128i __X
, __m128i __Y
)
178 return (__m128i
) __builtin_ia32_pminsb128 ((__v16qi
)__X
, (__v16qi
)__Y
);
181 static __inline __m128i
__attribute__((__always_inline__
))
182 _mm_max_epi8 (__m128i __X
, __m128i __Y
)
184 return (__m128i
) __builtin_ia32_pmaxsb128 ((__v16qi
)__X
, (__v16qi
)__Y
);
187 static __inline __m128i
__attribute__((__always_inline__
))
188 _mm_min_epu16 (__m128i __X
, __m128i __Y
)
190 return (__m128i
) __builtin_ia32_pminuw128 ((__v8hi
)__X
, (__v8hi
)__Y
);
193 static __inline __m128i
__attribute__((__always_inline__
))
194 _mm_max_epu16 (__m128i __X
, __m128i __Y
)
196 return (__m128i
) __builtin_ia32_pmaxuw128 ((__v8hi
)__X
, (__v8hi
)__Y
);
199 static __inline __m128i
__attribute__((__always_inline__
))
200 _mm_min_epi32 (__m128i __X
, __m128i __Y
)
202 return (__m128i
) __builtin_ia32_pminsd128 ((__v4si
)__X
, (__v4si
)__Y
);
205 static __inline __m128i
__attribute__((__always_inline__
))
206 _mm_max_epi32 (__m128i __X
, __m128i __Y
)
208 return (__m128i
) __builtin_ia32_pmaxsd128 ((__v4si
)__X
, (__v4si
)__Y
);
211 static __inline __m128i
__attribute__((__always_inline__
))
212 _mm_min_epu32 (__m128i __X
, __m128i __Y
)
214 return (__m128i
) __builtin_ia32_pminud128 ((__v4si
)__X
, (__v4si
)__Y
);
217 static __inline __m128i
__attribute__((__always_inline__
))
218 _mm_max_epu32 (__m128i __X
, __m128i __Y
)
220 return (__m128i
) __builtin_ia32_pmaxud128 ((__v4si
)__X
, (__v4si
)__Y
);
223 /* Packed integer 32-bit multiplication with truncation of upper
224 halves of results. */
225 static __inline __m128i
__attribute__((__always_inline__
))
226 _mm_mullo_epi32 (__m128i __X
, __m128i __Y
)
228 return (__m128i
) __builtin_ia32_pmulld128 ((__v4si
)__X
, (__v4si
)__Y
);
231 /* Packed integer 32-bit multiplication of 2 pairs of operands
232 with two 64-bit results. */
233 static __inline __m128i
__attribute__((__always_inline__
))
234 _mm_mul_epi32 (__m128i __X
, __m128i __Y
)
236 return (__m128i
) __builtin_ia32_pmuldq128 ((__v4si
)__X
, (__v4si
)__Y
);
239 /* Packed integer 128-bit bitwise comparison. Return 1 if
241 static __inline
int __attribute__((__always_inline__
))
242 _mm_testz_si128 (__m128i __M
, __m128i __V
)
244 return __builtin_ia32_ptestz128 ((__v2di
)__M
, (__v2di
)__V
);
247 /* Packed integer 128-bit bitwise comparison. Return 1 if
248 (__V & ~__M) == 0. */
249 static __inline
int __attribute__((__always_inline__
))
250 _mm_testc_si128 (__m128i __M
, __m128i __V
)
252 return __builtin_ia32_ptestc128 ((__v2di
)__M
, (__v2di
)__V
);
255 /* Packed integer 128-bit bitwise comparison. Return 1 if
256 (__V & __M) != 0 && (__V & ~__M) != 0. */
257 static __inline
int __attribute__((__always_inline__
))
258 _mm_testnzc_si128 (__m128i __M
, __m128i __V
)
260 return __builtin_ia32_ptestnzc128 ((__v2di
)__M
, (__v2di
)__V
);
263 /* Macros for packed integer 128-bit comparison intrinsics. */
264 #define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
266 #define _mm_test_all_ones(V) \
267 _mm_testc_si128 ((V), _mm_cmpeq_epi32 ((V), (V)))
269 #define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128 ((M), (V))
271 /* Insert single precision float into packed single precision array
272 element selected by index N. The bits [7-6] of N define S
273 index, the bits [5-4] define D index, and bits [3-0] define
274 zeroing mask for D. */
277 static __inline __m128
__attribute__((__always_inline__
))
278 _mm_insert_ps (__m128 __D
, __m128 __S
, const int __N
)
280 return (__m128
) __builtin_ia32_insertps128 ((__v4sf
)__D
,
285 #define _mm_insert_ps(D, S, N) \
286 ((__m128) __builtin_ia32_insertps128 ((__v4sf)(D), (__v4sf)(S), (N)))
289 /* Helper macro to create the N value for _mm_insert_ps. */
290 #define _MM_MK_INSERTPS_NDX(S, D, M) (((S) << 6) | ((D) << 4) | (M))
292 /* Extract binary representation of single precision float from packed
293 single precision array element of X selected by index N. */
296 static __inline
int __attribute__((__always_inline__
))
297 _mm_extract_ps (__m128 __X
, const int __N
)
299 union { int i
; float f
; } __tmp
;
300 __tmp
.f
= __builtin_ia32_vec_ext_v4sf ((__v4sf
)__X
, __N
);
304 #define _mm_extract_ps(X, N) \
307 union { int i; float f; } __tmp; \
308 __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(X), (N)); \
314 /* Extract binary representation of single precision float into
315 D from packed single precision array element of S selected
317 #define _MM_EXTRACT_FLOAT(D, S, N) \
318 { (D) = __builtin_ia32_vec_ext_v4sf ((__v4sf)(S), (N)); }
320 /* Extract specified single precision float element into the lower
322 #define _MM_PICK_OUT_PS(X, N) \
323 _mm_insert_ps (_mm_setzero_ps (), (X), \
324 _MM_MK_INSERTPS_NDX ((N), 0, 0x0e))
326 /* Insert integer, S, into packed integer array element of D
327 selected by index N. */
330 static __inline __m128i
__attribute__((__always_inline__
))
331 _mm_insert_epi8 (__m128i __D
, int __S
, const int __N
)
333 return (__m128i
) __builtin_ia32_vec_set_v16qi ((__v16qi
)__D
,
337 static __inline __m128i
__attribute__((__always_inline__
))
338 _mm_insert_epi32 (__m128i __D
, int __S
, const int __N
)
340 return (__m128i
) __builtin_ia32_vec_set_v4si ((__v4si
)__D
,
345 static __inline __m128i
__attribute__((__always_inline__
))
346 _mm_insert_epi64 (__m128i __D
, long long __S
, const int __N
)
348 return (__m128i
) __builtin_ia32_vec_set_v2di ((__v2di
)__D
,
353 #define _mm_insert_epi8(D, S, N) \
354 ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(D), (S), (N)))
356 #define _mm_insert_epi32(D, S, N) \
357 ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(D), (S), (N)))
360 #define _mm_insert_epi64(D, S, N) \
361 ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(D), (S), (N)))
365 /* Extract integer from packed integer array element of X selected by
369 static __inline
int __attribute__((__always_inline__
))
370 _mm_extract_epi8 (__m128i __X
, const int __N
)
372 return __builtin_ia32_vec_ext_v16qi ((__v16qi
)__X
, __N
);
375 static __inline
int __attribute__((__always_inline__
))
376 _mm_extract_epi32 (__m128i __X
, const int __N
)
378 return __builtin_ia32_vec_ext_v4si ((__v4si
)__X
, __N
);
382 static __inline
long long __attribute__((__always_inline__
))
383 _mm_extract_epi64 (__m128i __X
, const int __N
)
385 return __builtin_ia32_vec_ext_v2di ((__v2di
)__X
, __N
);
389 #define _mm_extract_epi8(X, N) \
390 __builtin_ia32_vec_ext_v16qi ((__v16qi) X, (N))
391 #define _mm_extract_epi32(X, N) \
392 __builtin_ia32_vec_ext_v4si ((__v4si) X, (N))
395 #define _mm_extract_epi64(X, N) \
396 ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(X), (N)))
400 /* Return horizontal packed word minimum and its index in bits [15:0]
401 and bits [18:16] respectively. */
402 static __inline __m128i
__attribute__((__always_inline__
))
403 _mm_minpos_epu16 (__m128i __X
)
405 return (__m128i
) __builtin_ia32_phminposuw128 ((__v8hi
)__X
);
408 /* Packed/scalar double precision floating point rounding. */
411 static __inline __m128d
__attribute__((__always_inline__
))
412 _mm_round_pd (__m128d __V
, const int __M
)
414 return (__m128d
) __builtin_ia32_roundpd ((__v2df
)__V
, __M
);
417 static __inline __m128d
__attribute__((__always_inline__
))
418 _mm_round_sd(__m128d __D
, __m128d __V
, const int __M
)
420 return (__m128d
) __builtin_ia32_roundsd ((__v2df
)__D
,
425 #define _mm_round_pd(V, M) \
426 ((__m128d) __builtin_ia32_roundpd ((__v2df)(V), (M)))
428 #define _mm_round_sd(D, V, M) \
429 ((__m128d) __builtin_ia32_roundsd ((__v2df)(D), (__v2df)(V), (M)))
432 /* Packed/scalar single precision floating point rounding. */
435 static __inline __m128
__attribute__((__always_inline__
))
436 _mm_round_ps (__m128 __V
, const int __M
)
438 return (__m128
) __builtin_ia32_roundps ((__v4sf
)__V
, __M
);
441 static __inline __m128
__attribute__((__always_inline__
))
442 _mm_round_ss (__m128 __D
, __m128 __V
, const int __M
)
444 return (__m128
) __builtin_ia32_roundss ((__v4sf
)__D
,
449 #define _mm_round_ps(V, M) \
450 ((__m128) __builtin_ia32_roundps ((__v4sf)(V), (M)))
452 #define _mm_round_ss(D, V, M) \
453 ((__m128) __builtin_ia32_roundss ((__v4sf)(D), (__v4sf)(V), (M)))
456 /* Macros for ceil/floor intrinsics. */
457 #define _mm_ceil_pd(V) _mm_round_pd ((V), _MM_FROUND_CEIL)
458 #define _mm_ceil_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_CEIL)
460 #define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR)
461 #define _mm_floor_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_FLOOR)
463 #define _mm_ceil_ps(V) _mm_round_ps ((V), _MM_FROUND_CEIL)
464 #define _mm_ceil_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_CEIL)
466 #define _mm_floor_ps(V) _mm_round_ps ((V), _MM_FROUND_FLOOR)
467 #define _mm_floor_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_FLOOR)
469 /* Packed integer sign-extension. */
471 static __inline __m128i
__attribute__((__always_inline__
))
472 _mm_cvtepi8_epi32 (__m128i __X
)
474 return (__m128i
) __builtin_ia32_pmovsxbd128 ((__v16qi
)__X
);
477 static __inline __m128i
__attribute__((__always_inline__
))
478 _mm_cvtepi16_epi32 (__m128i __X
)
480 return (__m128i
) __builtin_ia32_pmovsxwd128 ((__v8hi
)__X
);
483 static __inline __m128i
__attribute__((__always_inline__
))
484 _mm_cvtepi8_epi64 (__m128i __X
)
486 return (__m128i
) __builtin_ia32_pmovsxbq128 ((__v16qi
)__X
);
489 static __inline __m128i
__attribute__((__always_inline__
))
490 _mm_cvtepi32_epi64 (__m128i __X
)
492 return (__m128i
) __builtin_ia32_pmovsxdq128 ((__v4si
)__X
);
495 static __inline __m128i
__attribute__((__always_inline__
))
496 _mm_cvtepi16_epi64 (__m128i __X
)
498 return (__m128i
) __builtin_ia32_pmovsxwq128 ((__v8hi
)__X
);
501 static __inline __m128i
__attribute__((__always_inline__
))
502 _mm_cvtepi8_epi16 (__m128i __X
)
504 return (__m128i
) __builtin_ia32_pmovsxbw128 ((__v16qi
)__X
);
507 /* Packed integer zero-extension. */
509 static __inline __m128i
__attribute__((__always_inline__
))
510 _mm_cvtepu8_epi32 (__m128i __X
)
512 return (__m128i
) __builtin_ia32_pmovzxbd128 ((__v16qi
)__X
);
515 static __inline __m128i
__attribute__((__always_inline__
))
516 _mm_cvtepu16_epi32 (__m128i __X
)
518 return (__m128i
) __builtin_ia32_pmovzxwd128 ((__v8hi
)__X
);
521 static __inline __m128i
__attribute__((__always_inline__
))
522 _mm_cvtepu8_epi64 (__m128i __X
)
524 return (__m128i
) __builtin_ia32_pmovzxbq128 ((__v16qi
)__X
);
527 static __inline __m128i
__attribute__((__always_inline__
))
528 _mm_cvtepu32_epi64 (__m128i __X
)
530 return (__m128i
) __builtin_ia32_pmovzxdq128 ((__v4si
)__X
);
533 static __inline __m128i
__attribute__((__always_inline__
))
534 _mm_cvtepu16_epi64 (__m128i __X
)
536 return (__m128i
) __builtin_ia32_pmovzxwq128 ((__v8hi
)__X
);
539 static __inline __m128i
__attribute__((__always_inline__
))
540 _mm_cvtepu8_epi16 (__m128i __X
)
542 return (__m128i
) __builtin_ia32_pmovzxbw128 ((__v16qi
)__X
);
545 /* Pack 8 double words from 2 operands into 8 words of result with
546 unsigned saturation. */
547 static __inline __m128i
__attribute__((__always_inline__
))
548 _mm_packus_epi32 (__m128i __X
, __m128i __Y
)
550 return (__m128i
) __builtin_ia32_packusdw128 ((__v4si
)__X
, (__v4si
)__Y
);
553 /* Sum absolute 8-bit integer difference of adjacent groups of 4
554 byte integers in the first 2 operands. Starting offsets within
555 operands are determined by the 3rd mask operand. */
558 static __inline __m128i
__attribute__((__always_inline__
))
559 _mm_mpsadbw_epu8 (__m128i __X
, __m128i __Y
, const int __M
)
561 return (__m128i
) __builtin_ia32_mpsadbw128 ((__v16qi
)__X
,
565 #define _mm_mpsadbw_epu8(X, Y, M) \
566 ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(X), (__v16qi)(Y), (M)))
569 /* Load double quadword using non-temporal aligned hint. */
570 static __inline __m128i
__attribute__((__always_inline__
))
571 _mm_stream_load_si128 (__m128i
*__X
)
573 return (__m128i
) __builtin_ia32_movntdqa ((__v2di
*) __X
);
576 #endif /* __SSE4_1__ */
578 #endif /* _SMMINTRIN_H_INCLUDED */