Portable Git: include builtins as-are
[msysgit.git] / mingw / lib / gcc / mingw32 / 4.3.3 / include / smmintrin.h
blobd6423b48aa515c55d921f9af054c9dd12e7f28aa
1 /* Copyright (C) 2007 Free Software Foundation, Inc.
3 This file is part of GCC.
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with GCC; see the file COPYING. If not, write to
17 the Free Software Foundation, 59 Temple Place - Suite 330,
18 Boston, MA 02111-1307, USA. */
20 /* As a special exception, if you include this header file into source
21 files compiled by GCC, this header file does not by itself cause
22 the resulting executable to be covered by the GNU General Public
23 License. This exception does not however invalidate any other
24 reasons why the executable file might be covered by the GNU General
25 Public License. */
27 /* Implemented from the specification included in the Intel C++ Compiler
28 User Guide and Reference, version 10.0. */
30 #ifndef _SMMINTRIN_H_INCLUDED
31 #define _SMMINTRIN_H_INCLUDED
33 #ifndef __SSE4_1__
34 # error "SSE4.1 instruction set not enabled"
35 #else
37 /* We need definitions from the SSSE3, SSE3, SSE2 and SSE header
38 files. */
39 #include <tmmintrin.h>
40 #include <mmintrin-common.h>
42 /* SSE4.1 */
44 /* Integer blend instructions - select data from 2 sources using
45 constant/variable mask. */
47 #ifdef __OPTIMIZE__
48 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
49 _mm_blend_epi16 (__m128i __X, __m128i __Y, const int __M)
51 return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__X,
52 (__v8hi)__Y,
53 __M);
55 #else
56 #define _mm_blend_epi16(X, Y, M) \
57 ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(X), \
58 (__v8hi)(__m128i)(Y), (int)(M)))
59 #endif
61 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
62 _mm_blendv_epi8 (__m128i __X, __m128i __Y, __m128i __M)
64 return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__X,
65 (__v16qi)__Y,
66 (__v16qi)__M);
69 /* Single precision floating point blend instructions - select data
70 from 2 sources using constant/variable mask. */
72 #ifdef __OPTIMIZE__
73 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
74 _mm_blend_ps (__m128 __X, __m128 __Y, const int __M)
76 return (__m128) __builtin_ia32_blendps ((__v4sf)__X,
77 (__v4sf)__Y,
78 __M);
80 #else
81 #define _mm_blend_ps(X, Y, M) \
82 ((__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(X), \
83 (__v4sf)(__m128)(Y), (int)(M)))
84 #endif
86 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
87 _mm_blendv_ps (__m128 __X, __m128 __Y, __m128 __M)
89 return (__m128) __builtin_ia32_blendvps ((__v4sf)__X,
90 (__v4sf)__Y,
91 (__v4sf)__M);
94 /* Double precision floating point blend instructions - select data
95 from 2 sources using constant/variable mask. */
97 #ifdef __OPTIMIZE__
98 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
99 _mm_blend_pd (__m128d __X, __m128d __Y, const int __M)
101 return (__m128d) __builtin_ia32_blendpd ((__v2df)__X,
102 (__v2df)__Y,
103 __M);
105 #else
106 #define _mm_blend_pd(X, Y, M) \
107 ((__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(X), \
108 (__v2df)(__m128d)(Y), (int)(M)))
109 #endif
111 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
112 _mm_blendv_pd (__m128d __X, __m128d __Y, __m128d __M)
114 return (__m128d) __builtin_ia32_blendvpd ((__v2df)__X,
115 (__v2df)__Y,
116 (__v2df)__M);
119 /* Dot product instructions with mask-defined summing and zeroing parts
120 of result. */
122 #ifdef __OPTIMIZE__
123 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
124 _mm_dp_ps (__m128 __X, __m128 __Y, const int __M)
126 return (__m128) __builtin_ia32_dpps ((__v4sf)__X,
127 (__v4sf)__Y,
128 __M);
131 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
132 _mm_dp_pd (__m128d __X, __m128d __Y, const int __M)
134 return (__m128d) __builtin_ia32_dppd ((__v2df)__X,
135 (__v2df)__Y,
136 __M);
138 #else
139 #define _mm_dp_ps(X, Y, M) \
140 ((__m128) __builtin_ia32_dpps ((__v4sf)(__m128)(X), \
141 (__v4sf)(__m128)(Y), (int)(M)))
143 #define _mm_dp_pd(X, Y, M) \
144 ((__m128d) __builtin_ia32_dppd ((__v2df)(__m128d)(X), \
145 (__v2df)(__m128d)(Y), (int)(M)))
146 #endif
148 /* Packed integer 64-bit comparison, zeroing or filling with ones
149 corresponding parts of result. */
150 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
151 _mm_cmpeq_epi64 (__m128i __X, __m128i __Y)
153 return (__m128i) __builtin_ia32_pcmpeqq ((__v2di)__X, (__v2di)__Y);
156 /* Min/max packed integer instructions. */
158 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
159 _mm_min_epi8 (__m128i __X, __m128i __Y)
161 return (__m128i) __builtin_ia32_pminsb128 ((__v16qi)__X, (__v16qi)__Y);
164 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
165 _mm_max_epi8 (__m128i __X, __m128i __Y)
167 return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi)__X, (__v16qi)__Y);
170 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
171 _mm_min_epu16 (__m128i __X, __m128i __Y)
173 return (__m128i) __builtin_ia32_pminuw128 ((__v8hi)__X, (__v8hi)__Y);
176 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
177 _mm_max_epu16 (__m128i __X, __m128i __Y)
179 return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi)__X, (__v8hi)__Y);
182 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
183 _mm_min_epi32 (__m128i __X, __m128i __Y)
185 return (__m128i) __builtin_ia32_pminsd128 ((__v4si)__X, (__v4si)__Y);
188 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
189 _mm_max_epi32 (__m128i __X, __m128i __Y)
191 return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si)__X, (__v4si)__Y);
194 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
195 _mm_min_epu32 (__m128i __X, __m128i __Y)
197 return (__m128i) __builtin_ia32_pminud128 ((__v4si)__X, (__v4si)__Y);
200 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
201 _mm_max_epu32 (__m128i __X, __m128i __Y)
203 return (__m128i) __builtin_ia32_pmaxud128 ((__v4si)__X, (__v4si)__Y);
206 /* Packed integer 32-bit multiplication with truncation of upper
207 halves of results. */
208 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
209 _mm_mullo_epi32 (__m128i __X, __m128i __Y)
211 return (__m128i) __builtin_ia32_pmulld128 ((__v4si)__X, (__v4si)__Y);
214 /* Packed integer 32-bit multiplication of 2 pairs of operands
215 with two 64-bit results. */
216 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
217 _mm_mul_epi32 (__m128i __X, __m128i __Y)
219 return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__X, (__v4si)__Y);
222 /* Insert single precision float into packed single precision array
223 element selected by index N. The bits [7-6] of N define S
224 index, the bits [5-4] define D index, and bits [3-0] define
225 zeroing mask for D. */
227 #ifdef __OPTIMIZE__
228 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
229 _mm_insert_ps (__m128 __D, __m128 __S, const int __N)
231 return (__m128) __builtin_ia32_insertps128 ((__v4sf)__D,
232 (__v4sf)__S,
233 __N);
235 #else
236 #define _mm_insert_ps(D, S, N) \
237 ((__m128) __builtin_ia32_insertps128 ((__v4sf)(__m128)(D), \
238 (__v4sf)(__m128)(S), (int)(N)))
239 #endif
241 /* Helper macro to create the N value for _mm_insert_ps. */
242 #define _MM_MK_INSERTPS_NDX(S, D, M) (((S) << 6) | ((D) << 4) | (M))
244 /* Extract binary representation of single precision float from packed
245 single precision array element of X selected by index N. */
247 #ifdef __OPTIMIZE__
248 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
249 _mm_extract_ps (__m128 __X, const int __N)
251 union { int i; float f; } __tmp;
252 __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)__X, __N);
253 return __tmp.i;
255 #else
256 #define _mm_extract_ps(X, N) \
257 (__extension__ \
258 ({ \
259 union { int i; float f; } __tmp; \
260 __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(__m128)(X), (int)(N)); \
261 __tmp.i; \
263 #endif
265 /* Extract binary representation of single precision float into
266 D from packed single precision array element of S selected
267 by index N. */
268 #define _MM_EXTRACT_FLOAT(D, S, N) \
269 { (D) = __builtin_ia32_vec_ext_v4sf ((__v4sf)(S), (N)); }
271 /* Extract specified single precision float element into the lower
272 part of __m128. */
273 #define _MM_PICK_OUT_PS(X, N) \
274 _mm_insert_ps (_mm_setzero_ps (), (X), \
275 _MM_MK_INSERTPS_NDX ((N), 0, 0x0e))
277 /* Insert integer, S, into packed integer array element of D
278 selected by index N. */
280 #ifdef __OPTIMIZE__
281 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
282 _mm_insert_epi8 (__m128i __D, int __S, const int __N)
284 return (__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)__D,
285 __S, __N);
288 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
289 _mm_insert_epi32 (__m128i __D, int __S, const int __N)
291 return (__m128i) __builtin_ia32_vec_set_v4si ((__v4si)__D,
292 __S, __N);
295 #ifdef __x86_64__
296 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
297 _mm_insert_epi64 (__m128i __D, long long __S, const int __N)
299 return (__m128i) __builtin_ia32_vec_set_v2di ((__v2di)__D,
300 __S, __N);
302 #endif
303 #else
304 #define _mm_insert_epi8(D, S, N) \
305 ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(__m128i)(D), \
306 (int)(S), (int)(N)))
308 #define _mm_insert_epi32(D, S, N) \
309 ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(__m128i)(D), \
310 (int)(S), (int)(N)))
312 #ifdef __x86_64__
313 #define _mm_insert_epi64(D, S, N) \
314 ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(__m128i)(D), \
315 (long long)(S), (int)(N)))
316 #endif
317 #endif
319 /* Extract integer from packed integer array element of X selected by
320 index N. */
322 #ifdef __OPTIMIZE__
323 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
324 _mm_extract_epi8 (__m128i __X, const int __N)
326 return __builtin_ia32_vec_ext_v16qi ((__v16qi)__X, __N);
329 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
330 _mm_extract_epi32 (__m128i __X, const int __N)
332 return __builtin_ia32_vec_ext_v4si ((__v4si)__X, __N);
335 #ifdef __x86_64__
336 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
337 _mm_extract_epi64 (__m128i __X, const int __N)
339 return __builtin_ia32_vec_ext_v2di ((__v2di)__X, __N);
341 #endif
342 #else
343 #define _mm_extract_epi8(X, N) \
344 ((int) __builtin_ia32_vec_ext_v16qi ((__v16qi)(__m128i)(X), (int)(N)))
345 #define _mm_extract_epi32(X, N) \
346 ((int) __builtin_ia32_vec_ext_v4si ((__v4si)(__m128i)(X), (int)(N)))
348 #ifdef __x86_64__
349 #define _mm_extract_epi64(X, N) \
350 ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(__m128i)(X), (int)(N)))
351 #endif
352 #endif
354 /* Return horizontal packed word minimum and its index in bits [15:0]
355 and bits [18:16] respectively. */
356 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
357 _mm_minpos_epu16 (__m128i __X)
359 return (__m128i) __builtin_ia32_phminposuw128 ((__v8hi)__X);
362 /* Packed integer sign-extension. */
364 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
365 _mm_cvtepi8_epi32 (__m128i __X)
367 return (__m128i) __builtin_ia32_pmovsxbd128 ((__v16qi)__X);
370 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
371 _mm_cvtepi16_epi32 (__m128i __X)
373 return (__m128i) __builtin_ia32_pmovsxwd128 ((__v8hi)__X);
376 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
377 _mm_cvtepi8_epi64 (__m128i __X)
379 return (__m128i) __builtin_ia32_pmovsxbq128 ((__v16qi)__X);
382 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
383 _mm_cvtepi32_epi64 (__m128i __X)
385 return (__m128i) __builtin_ia32_pmovsxdq128 ((__v4si)__X);
388 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
389 _mm_cvtepi16_epi64 (__m128i __X)
391 return (__m128i) __builtin_ia32_pmovsxwq128 ((__v8hi)__X);
394 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
395 _mm_cvtepi8_epi16 (__m128i __X)
397 return (__m128i) __builtin_ia32_pmovsxbw128 ((__v16qi)__X);
400 /* Packed integer zero-extension. */
402 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
403 _mm_cvtepu8_epi32 (__m128i __X)
405 return (__m128i) __builtin_ia32_pmovzxbd128 ((__v16qi)__X);
408 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
409 _mm_cvtepu16_epi32 (__m128i __X)
411 return (__m128i) __builtin_ia32_pmovzxwd128 ((__v8hi)__X);
414 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
415 _mm_cvtepu8_epi64 (__m128i __X)
417 return (__m128i) __builtin_ia32_pmovzxbq128 ((__v16qi)__X);
420 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
421 _mm_cvtepu32_epi64 (__m128i __X)
423 return (__m128i) __builtin_ia32_pmovzxdq128 ((__v4si)__X);
426 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
427 _mm_cvtepu16_epi64 (__m128i __X)
429 return (__m128i) __builtin_ia32_pmovzxwq128 ((__v8hi)__X);
432 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
433 _mm_cvtepu8_epi16 (__m128i __X)
435 return (__m128i) __builtin_ia32_pmovzxbw128 ((__v16qi)__X);
438 /* Pack 8 double words from 2 operands into 8 words of result with
439 unsigned saturation. */
440 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
441 _mm_packus_epi32 (__m128i __X, __m128i __Y)
443 return (__m128i) __builtin_ia32_packusdw128 ((__v4si)__X, (__v4si)__Y);
446 /* Sum absolute 8-bit integer difference of adjacent groups of 4
447 byte integers in the first 2 operands. Starting offsets within
448 operands are determined by the 3rd mask operand. */
450 #ifdef __OPTIMIZE__
451 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
452 _mm_mpsadbw_epu8 (__m128i __X, __m128i __Y, const int __M)
454 return (__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)__X,
455 (__v16qi)__Y, __M);
457 #else
458 #define _mm_mpsadbw_epu8(X, Y, M) \
459 ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(__m128i)(X), \
460 (__v16qi)(__m128i)(Y), (int)(M)))
461 #endif
463 /* Load double quadword using non-temporal aligned hint. */
464 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
465 _mm_stream_load_si128 (__m128i *__X)
467 return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __X);
470 #ifdef __SSE4_2__
472 /* These macros specify the source data format. */
473 #define _SIDD_UBYTE_OPS 0x00
474 #define _SIDD_UWORD_OPS 0x01
475 #define _SIDD_SBYTE_OPS 0x02
476 #define _SIDD_SWORD_OPS 0x03
478 /* These macros specify the comparison operation. */
479 #define _SIDD_CMP_EQUAL_ANY 0x00
480 #define _SIDD_CMP_RANGES 0x04
481 #define _SIDD_CMP_EQUAL_EACH 0x08
482 #define _SIDD_CMP_EQUAL_ORDERED 0x0c
484 /* These macros specify the the polarity. */
485 #define _SIDD_POSITIVE_POLARITY 0x00
486 #define _SIDD_NEGATIVE_POLARITY 0x10
487 #define _SIDD_MASKED_POSITIVE_POLARITY 0x20
488 #define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
490 /* These macros specify the output selection in _mm_cmpXstri (). */
491 #define _SIDD_LEAST_SIGNIFICANT 0x00
492 #define _SIDD_MOST_SIGNIFICANT 0x40
494 /* These macros specify the output selection in _mm_cmpXstrm (). */
495 #define _SIDD_BIT_MASK 0x00
496 #define _SIDD_UNIT_MASK 0x40
498 /* Intrinsics for text/string processing. */
500 #ifdef __OPTIMIZE__
501 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
502 _mm_cmpistrm (__m128i __X, __m128i __Y, const int __M)
504 return (__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)__X,
505 (__v16qi)__Y,
506 __M);
509 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
510 _mm_cmpistri (__m128i __X, __m128i __Y, const int __M)
512 return __builtin_ia32_pcmpistri128 ((__v16qi)__X,
513 (__v16qi)__Y,
514 __M);
517 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
518 _mm_cmpestrm (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
520 return (__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)__X, __LX,
521 (__v16qi)__Y, __LY,
522 __M);
525 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
526 _mm_cmpestri (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
528 return __builtin_ia32_pcmpestri128 ((__v16qi)__X, __LX,
529 (__v16qi)__Y, __LY,
530 __M);
532 #else
533 #define _mm_cmpistrm(X, Y, M) \
534 ((__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)(__m128i)(X), \
535 (__v16qi)(__m128i)(Y), (int)(M)))
536 #define _mm_cmpistri(X, Y, M) \
537 ((int) __builtin_ia32_pcmpistri128 ((__v16qi)(__m128i)(X), \
538 (__v16qi)(__m128i)(Y), (int)(M)))
540 #define _mm_cmpestrm(X, LX, Y, LY, M) \
541 ((__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)(__m128i)(X), \
542 (int)(LX), (__v16qi)(__m128i)(Y), \
543 (int)(LY), (int)(M)))
544 #define _mm_cmpestri(X, LX, Y, LY, M) \
545 ((int) __builtin_ia32_pcmpestri128 ((__v16qi)(__m128i)(X), (int)(LX), \
546 (__v16qi)(__m128i)(Y), (int)(LY), \
547 (int)(M)))
548 #endif
550 /* Intrinsics for text/string processing and reading values of
551 EFlags. */
553 #ifdef __OPTIMIZE__
554 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
555 _mm_cmpistra (__m128i __X, __m128i __Y, const int __M)
557 return __builtin_ia32_pcmpistria128 ((__v16qi)__X,
558 (__v16qi)__Y,
559 __M);
562 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
563 _mm_cmpistrc (__m128i __X, __m128i __Y, const int __M)
565 return __builtin_ia32_pcmpistric128 ((__v16qi)__X,
566 (__v16qi)__Y,
567 __M);
570 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
571 _mm_cmpistro (__m128i __X, __m128i __Y, const int __M)
573 return __builtin_ia32_pcmpistrio128 ((__v16qi)__X,
574 (__v16qi)__Y,
575 __M);
578 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
579 _mm_cmpistrs (__m128i __X, __m128i __Y, const int __M)
581 return __builtin_ia32_pcmpistris128 ((__v16qi)__X,
582 (__v16qi)__Y,
583 __M);
586 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
587 _mm_cmpistrz (__m128i __X, __m128i __Y, const int __M)
589 return __builtin_ia32_pcmpistriz128 ((__v16qi)__X,
590 (__v16qi)__Y,
591 __M);
594 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
595 _mm_cmpestra (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
597 return __builtin_ia32_pcmpestria128 ((__v16qi)__X, __LX,
598 (__v16qi)__Y, __LY,
599 __M);
602 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
603 _mm_cmpestrc (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
605 return __builtin_ia32_pcmpestric128 ((__v16qi)__X, __LX,
606 (__v16qi)__Y, __LY,
607 __M);
610 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
611 _mm_cmpestro (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
613 return __builtin_ia32_pcmpestrio128 ((__v16qi)__X, __LX,
614 (__v16qi)__Y, __LY,
615 __M);
618 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
619 _mm_cmpestrs (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
621 return __builtin_ia32_pcmpestris128 ((__v16qi)__X, __LX,
622 (__v16qi)__Y, __LY,
623 __M);
626 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
627 _mm_cmpestrz (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
629 return __builtin_ia32_pcmpestriz128 ((__v16qi)__X, __LX,
630 (__v16qi)__Y, __LY,
631 __M);
633 #else
634 #define _mm_cmpistra(X, Y, M) \
635 ((int) __builtin_ia32_pcmpistria128 ((__v16qi)(__m128i)(X), \
636 (__v16qi)(__m128i)(Y), (int)(M)))
637 #define _mm_cmpistrc(X, Y, M) \
638 ((int) __builtin_ia32_pcmpistric128 ((__v16qi)(__m128i)(X), \
639 (__v16qi)(__m128i)(Y), (int)(M)))
640 #define _mm_cmpistro(X, Y, M) \
641 ((int) __builtin_ia32_pcmpistrio128 ((__v16qi)(__m128i)(X), \
642 (__v16qi)(__m128i)(Y), (int)(M)))
643 #define _mm_cmpistrs(X, Y, M) \
644 ((int) __builtin_ia32_pcmpistris128 ((__v16qi)(__m128i)(X), \
645 (__v16qi)(__m128i)(Y), (int)(M)))
646 #define _mm_cmpistrz(X, Y, M) \
647 ((int) __builtin_ia32_pcmpistriz128 ((__v16qi)(__m128i)(X), \
648 (__v16qi)(__m128i)(Y), (int)(M)))
650 #define _mm_cmpestra(X, LX, Y, LY, M) \
651 ((int) __builtin_ia32_pcmpestria128 ((__v16qi)(__m128i)(X), (int)(LX), \
652 (__v16qi)(__m128i)(Y), (int)(LY), \
653 (int)(M)))
654 #define _mm_cmpestrc(X, LX, Y, LY, M) \
655 ((int) __builtin_ia32_pcmpestric128 ((__v16qi)(__m128i)(X), (int)(LX), \
656 (__v16qi)(__m128i)(Y), (int)(LY), \
657 (int)(M)))
658 #define _mm_cmpestro(X, LX, Y, LY, M) \
659 ((int) __builtin_ia32_pcmpestrio128 ((__v16qi)(__m128i)(X), (int)(LX), \
660 (__v16qi)(__m128i)(Y), (int)(LY), \
661 (int)(M)))
662 #define _mm_cmpestrs(X, LX, Y, LY, M) \
663 ((int) __builtin_ia32_pcmpestris128 ((__v16qi)(__m128i)(X), (int)(LX), \
664 (__v16qi)(__m128i)(Y), (int)(LY), \
665 (int)(M)))
666 #define _mm_cmpestrz(X, LX, Y, LY, M) \
667 ((int) __builtin_ia32_pcmpestriz128 ((__v16qi)(__m128i)(X), (int)(LX), \
668 (__v16qi)(__m128i)(Y), (int)(LY), \
669 (int)(M)))
670 #endif
672 /* Packed integer 64-bit comparison, zeroing or filling with ones
673 corresponding parts of result. */
674 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
675 _mm_cmpgt_epi64 (__m128i __X, __m128i __Y)
677 return (__m128i) __builtin_ia32_pcmpgtq ((__v2di)__X, (__v2di)__Y);
680 /* Calculate a number of bits set to 1. */
681 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
682 _mm_popcnt_u32 (unsigned int __X)
684 return __builtin_popcount (__X);
687 #ifdef __x86_64__
688 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
689 _mm_popcnt_u64 (unsigned long long __X)
691 return __builtin_popcountll (__X);
693 #endif
695 /* Accumulate CRC32 (polynomial 0x11EDC6F41) value. */
696 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
697 _mm_crc32_u8 (unsigned int __C, unsigned char __V)
699 return __builtin_ia32_crc32qi (__C, __V);
702 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
703 _mm_crc32_u16 (unsigned int __C, unsigned short __V)
705 return __builtin_ia32_crc32hi (__C, __V);
708 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
709 _mm_crc32_u32 (unsigned int __C, unsigned int __V)
711 return __builtin_ia32_crc32si (__C, __V);
714 #ifdef __x86_64__
715 extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
716 _mm_crc32_u64 (unsigned long long __C, unsigned long long __V)
718 return __builtin_ia32_crc32di (__C, __V);
720 #endif
722 #endif /* __SSE4_2__ */
724 #endif /* __SSE4_1__ */
726 #endif /* _SMMINTRIN_H_INCLUDED */