Merged with mainline at revision 128810.
[official-gcc.git] / gcc / config / i386 / smmintrin.h
blob693ebf4d3aa41b0491ebb22f599bee1a356e5252
1 /* Copyright (C) 2007 Free Software Foundation, Inc.
3 This file is part of GCC.
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with GCC; see the file COPYING. If not, write to
17 the Free Software Foundation, 59 Temple Place - Suite 330,
18 Boston, MA 02111-1307, USA. */
20 /* As a special exception, if you include this header file into source
21 files compiled by GCC, this header file does not by itself cause
22 the resulting executable to be covered by the GNU General Public
23 License. This exception does not however invalidate any other
24 reasons why the executable file might be covered by the GNU General
25 Public License. */
27 /* Implemented from the specification included in the Intel C++ Compiler
28 User Guide and Reference, version 10.0. */
30 #ifndef _SMMINTRIN_H_INCLUDED
31 #define _SMMINTRIN_H_INCLUDED
33 #ifndef __SSE4_1__
34 # error "SSE4.1 instruction set not enabled"
35 #else
37 /* We need definitions from the SSSE3, SSE3, SSE2 and SSE header
38 files. */
39 #include <tmmintrin.h>
40 #include <mmintrin-common.h>
42 /* SSE4.1 */
44 /* Integer blend instructions - select data from 2 sources using
45 constant/variable mask. */
47 #ifdef __OPTIMIZE__
48 static __inline __m128i __attribute__((__always_inline__, __artificial__))
49 _mm_blend_epi16 (__m128i __X, __m128i __Y, const int __M)
51 return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__X,
52 (__v8hi)__Y,
53 __M);
55 #else
56 #define _mm_blend_epi16(X, Y, M) \
57 ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(X), (__v8hi)(Y), (M)))
58 #endif
60 static __inline __m128i __attribute__((__always_inline__, __artificial__))
61 _mm_blendv_epi8 (__m128i __X, __m128i __Y, __m128i __M)
63 return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__X,
64 (__v16qi)__Y,
65 (__v16qi)__M);
68 /* Single precision floating point blend instructions - select data
69 from 2 sources using constant/variable mask. */
71 #ifdef __OPTIMIZE__
72 static __inline __m128 __attribute__((__always_inline__, __artificial__))
73 _mm_blend_ps (__m128 __X, __m128 __Y, const int __M)
75 return (__m128) __builtin_ia32_blendps ((__v4sf)__X,
76 (__v4sf)__Y,
77 __M);
79 #else
80 #define _mm_blend_ps(X, Y, M) \
81 ((__m128) __builtin_ia32_blendps ((__v4sf)(X), (__v4sf)(Y), (M)))
82 #endif
84 static __inline __m128 __attribute__((__always_inline__, __artificial__))
85 _mm_blendv_ps (__m128 __X, __m128 __Y, __m128 __M)
87 return (__m128) __builtin_ia32_blendvps ((__v4sf)__X,
88 (__v4sf)__Y,
89 (__v4sf)__M);
92 /* Double precision floating point blend instructions - select data
93 from 2 sources using constant/variable mask. */
95 #ifdef __OPTIMIZE__
96 static __inline __m128d __attribute__((__always_inline__, __artificial__))
97 _mm_blend_pd (__m128d __X, __m128d __Y, const int __M)
99 return (__m128d) __builtin_ia32_blendpd ((__v2df)__X,
100 (__v2df)__Y,
101 __M);
103 #else
104 #define _mm_blend_pd(X, Y, M) \
105 ((__m128d) __builtin_ia32_blendpd ((__v2df)(X), (__v2df)(Y), (M)))
106 #endif
108 static __inline __m128d __attribute__((__always_inline__, __artificial__))
109 _mm_blendv_pd (__m128d __X, __m128d __Y, __m128d __M)
111 return (__m128d) __builtin_ia32_blendvpd ((__v2df)__X,
112 (__v2df)__Y,
113 (__v2df)__M);
116 /* Dot product instructions with mask-defined summing and zeroing parts
117 of result. */
119 #ifdef __OPTIMIZE__
120 static __inline __m128 __attribute__((__always_inline__, __artificial__))
121 _mm_dp_ps (__m128 __X, __m128 __Y, const int __M)
123 return (__m128) __builtin_ia32_dpps ((__v4sf)__X,
124 (__v4sf)__Y,
125 __M);
128 static __inline __m128d __attribute__((__always_inline__, __artificial__))
129 _mm_dp_pd (__m128d __X, __m128d __Y, const int __M)
131 return (__m128d) __builtin_ia32_dppd ((__v2df)__X,
132 (__v2df)__Y,
133 __M);
135 #else
136 #define _mm_dp_ps(X, Y, M) \
137 ((__m128) __builtin_ia32_dpps ((__v4sf)(X), (__v4sf)(Y), (M)))
139 #define _mm_dp_pd(X, Y, M) \
140 ((__m128d) __builtin_ia32_dppd ((__v2df)(X), (__v2df)(Y), (M)))
141 #endif
143 /* Packed integer 64-bit comparison, zeroing or filling with ones
144 corresponding parts of result. */
145 static __inline __m128i __attribute__((__always_inline__, __artificial__))
146 _mm_cmpeq_epi64 (__m128i __X, __m128i __Y)
148 return (__m128i) __builtin_ia32_pcmpeqq ((__v2di)__X, (__v2di)__Y);
151 /* Min/max packed integer instructions. */
153 static __inline __m128i __attribute__((__always_inline__, __artificial__))
154 _mm_min_epi8 (__m128i __X, __m128i __Y)
156 return (__m128i) __builtin_ia32_pminsb128 ((__v16qi)__X, (__v16qi)__Y);
159 static __inline __m128i __attribute__((__always_inline__, __artificial__))
160 _mm_max_epi8 (__m128i __X, __m128i __Y)
162 return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi)__X, (__v16qi)__Y);
165 static __inline __m128i __attribute__((__always_inline__, __artificial__))
166 _mm_min_epu16 (__m128i __X, __m128i __Y)
168 return (__m128i) __builtin_ia32_pminuw128 ((__v8hi)__X, (__v8hi)__Y);
171 static __inline __m128i __attribute__((__always_inline__, __artificial__))
172 _mm_max_epu16 (__m128i __X, __m128i __Y)
174 return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi)__X, (__v8hi)__Y);
177 static __inline __m128i __attribute__((__always_inline__, __artificial__))
178 _mm_min_epi32 (__m128i __X, __m128i __Y)
180 return (__m128i) __builtin_ia32_pminsd128 ((__v4si)__X, (__v4si)__Y);
183 static __inline __m128i __attribute__((__always_inline__, __artificial__))
184 _mm_max_epi32 (__m128i __X, __m128i __Y)
186 return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si)__X, (__v4si)__Y);
189 static __inline __m128i __attribute__((__always_inline__, __artificial__))
190 _mm_min_epu32 (__m128i __X, __m128i __Y)
192 return (__m128i) __builtin_ia32_pminud128 ((__v4si)__X, (__v4si)__Y);
195 static __inline __m128i __attribute__((__always_inline__, __artificial__))
196 _mm_max_epu32 (__m128i __X, __m128i __Y)
198 return (__m128i) __builtin_ia32_pmaxud128 ((__v4si)__X, (__v4si)__Y);
201 /* Packed integer 32-bit multiplication with truncation of upper
202 halves of results. */
203 static __inline __m128i __attribute__((__always_inline__, __artificial__))
204 _mm_mullo_epi32 (__m128i __X, __m128i __Y)
206 return (__m128i) __builtin_ia32_pmulld128 ((__v4si)__X, (__v4si)__Y);
209 /* Packed integer 32-bit multiplication of 2 pairs of operands
210 with two 64-bit results. */
211 static __inline __m128i __attribute__((__always_inline__, __artificial__))
212 _mm_mul_epi32 (__m128i __X, __m128i __Y)
214 return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__X, (__v4si)__Y);
217 /* Insert single precision float into packed single precision array
218 element selected by index N. The bits [7-6] of N define S
219 index, the bits [5-4] define D index, and bits [3-0] define
220 zeroing mask for D. */
222 #ifdef __OPTIMIZE__
223 static __inline __m128 __attribute__((__always_inline__, __artificial__))
224 _mm_insert_ps (__m128 __D, __m128 __S, const int __N)
226 return (__m128) __builtin_ia32_insertps128 ((__v4sf)__D,
227 (__v4sf)__S,
228 __N);
230 #else
231 #define _mm_insert_ps(D, S, N) \
232 ((__m128) __builtin_ia32_insertps128 ((__v4sf)(D), (__v4sf)(S), (N)))
233 #endif
235 /* Helper macro to create the N value for _mm_insert_ps. */
236 #define _MM_MK_INSERTPS_NDX(S, D, M) (((S) << 6) | ((D) << 4) | (M))
238 /* Extract binary representation of single precision float from packed
239 single precision array element of X selected by index N. */
241 #ifdef __OPTIMIZE__
242 static __inline int __attribute__((__always_inline__, __artificial__))
243 _mm_extract_ps (__m128 __X, const int __N)
245 union { int i; float f; } __tmp;
246 __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)__X, __N);
247 return __tmp.i;
249 #else
250 #define _mm_extract_ps(X, N) \
251 (__extension__ \
252 ({ \
253 union { int i; float f; } __tmp; \
254 __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(X), (N)); \
255 __tmp.i; \
256 }) \
258 #endif
260 /* Extract binary representation of single precision float into
261 D from packed single precision array element of S selected
262 by index N. */
263 #define _MM_EXTRACT_FLOAT(D, S, N) \
264 { (D) = __builtin_ia32_vec_ext_v4sf ((__v4sf)(S), (N)); }
266 /* Extract specified single precision float element into the lower
267 part of __m128. */
268 #define _MM_PICK_OUT_PS(X, N) \
269 _mm_insert_ps (_mm_setzero_ps (), (X), \
270 _MM_MK_INSERTPS_NDX ((N), 0, 0x0e))
272 /* Insert integer, S, into packed integer array element of D
273 selected by index N. */
275 #ifdef __OPTIMIZE__
276 static __inline __m128i __attribute__((__always_inline__, __artificial__))
277 _mm_insert_epi8 (__m128i __D, int __S, const int __N)
279 return (__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)__D,
280 __S, __N);
283 static __inline __m128i __attribute__((__always_inline__, __artificial__))
284 _mm_insert_epi32 (__m128i __D, int __S, const int __N)
286 return (__m128i) __builtin_ia32_vec_set_v4si ((__v4si)__D,
287 __S, __N);
290 #ifdef __x86_64__
291 static __inline __m128i __attribute__((__always_inline__, __artificial__))
292 _mm_insert_epi64 (__m128i __D, long long __S, const int __N)
294 return (__m128i) __builtin_ia32_vec_set_v2di ((__v2di)__D,
295 __S, __N);
297 #endif
298 #else
299 #define _mm_insert_epi8(D, S, N) \
300 ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(D), (S), (N)))
302 #define _mm_insert_epi32(D, S, N) \
303 ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(D), (S), (N)))
305 #ifdef __x86_64__
306 #define _mm_insert_epi64(D, S, N) \
307 ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(D), (S), (N)))
308 #endif
309 #endif
311 /* Extract integer from packed integer array element of X selected by
312 index N. */
314 #ifdef __OPTIMIZE__
315 static __inline int __attribute__((__always_inline__, __artificial__))
316 _mm_extract_epi8 (__m128i __X, const int __N)
318 return __builtin_ia32_vec_ext_v16qi ((__v16qi)__X, __N);
321 static __inline int __attribute__((__always_inline__, __artificial__))
322 _mm_extract_epi32 (__m128i __X, const int __N)
324 return __builtin_ia32_vec_ext_v4si ((__v4si)__X, __N);
327 #ifdef __x86_64__
328 static __inline long long __attribute__((__always_inline__, __artificial__))
329 _mm_extract_epi64 (__m128i __X, const int __N)
331 return __builtin_ia32_vec_ext_v2di ((__v2di)__X, __N);
333 #endif
334 #else
335 #define _mm_extract_epi8(X, N) \
336 __builtin_ia32_vec_ext_v16qi ((__v16qi) X, (N))
337 #define _mm_extract_epi32(X, N) \
338 __builtin_ia32_vec_ext_v4si ((__v4si) X, (N))
340 #ifdef __x86_64__
341 #define _mm_extract_epi64(X, N) \
342 ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(X), (N)))
343 #endif
344 #endif
346 /* Return horizontal packed word minimum and its index in bits [15:0]
347 and bits [18:16] respectively. */
348 static __inline __m128i __attribute__((__always_inline__, __artificial__))
349 _mm_minpos_epu16 (__m128i __X)
351 return (__m128i) __builtin_ia32_phminposuw128 ((__v8hi)__X);
354 /* Packed integer sign-extension. */
356 static __inline __m128i __attribute__((__always_inline__, __artificial__))
357 _mm_cvtepi8_epi32 (__m128i __X)
359 return (__m128i) __builtin_ia32_pmovsxbd128 ((__v16qi)__X);
362 static __inline __m128i __attribute__((__always_inline__, __artificial__))
363 _mm_cvtepi16_epi32 (__m128i __X)
365 return (__m128i) __builtin_ia32_pmovsxwd128 ((__v8hi)__X);
368 static __inline __m128i __attribute__((__always_inline__, __artificial__))
369 _mm_cvtepi8_epi64 (__m128i __X)
371 return (__m128i) __builtin_ia32_pmovsxbq128 ((__v16qi)__X);
374 static __inline __m128i __attribute__((__always_inline__, __artificial__))
375 _mm_cvtepi32_epi64 (__m128i __X)
377 return (__m128i) __builtin_ia32_pmovsxdq128 ((__v4si)__X);
380 static __inline __m128i __attribute__((__always_inline__, __artificial__))
381 _mm_cvtepi16_epi64 (__m128i __X)
383 return (__m128i) __builtin_ia32_pmovsxwq128 ((__v8hi)__X);
386 static __inline __m128i __attribute__((__always_inline__, __artificial__))
387 _mm_cvtepi8_epi16 (__m128i __X)
389 return (__m128i) __builtin_ia32_pmovsxbw128 ((__v16qi)__X);
392 /* Packed integer zero-extension. */
394 static __inline __m128i __attribute__((__always_inline__, __artificial__))
395 _mm_cvtepu8_epi32 (__m128i __X)
397 return (__m128i) __builtin_ia32_pmovzxbd128 ((__v16qi)__X);
400 static __inline __m128i __attribute__((__always_inline__, __artificial__))
401 _mm_cvtepu16_epi32 (__m128i __X)
403 return (__m128i) __builtin_ia32_pmovzxwd128 ((__v8hi)__X);
406 static __inline __m128i __attribute__((__always_inline__, __artificial__))
407 _mm_cvtepu8_epi64 (__m128i __X)
409 return (__m128i) __builtin_ia32_pmovzxbq128 ((__v16qi)__X);
412 static __inline __m128i __attribute__((__always_inline__, __artificial__))
413 _mm_cvtepu32_epi64 (__m128i __X)
415 return (__m128i) __builtin_ia32_pmovzxdq128 ((__v4si)__X);
418 static __inline __m128i __attribute__((__always_inline__, __artificial__))
419 _mm_cvtepu16_epi64 (__m128i __X)
421 return (__m128i) __builtin_ia32_pmovzxwq128 ((__v8hi)__X);
424 static __inline __m128i __attribute__((__always_inline__, __artificial__))
425 _mm_cvtepu8_epi16 (__m128i __X)
427 return (__m128i) __builtin_ia32_pmovzxbw128 ((__v16qi)__X);
430 /* Pack 8 double words from 2 operands into 8 words of result with
431 unsigned saturation. */
432 static __inline __m128i __attribute__((__always_inline__, __artificial__))
433 _mm_packus_epi32 (__m128i __X, __m128i __Y)
435 return (__m128i) __builtin_ia32_packusdw128 ((__v4si)__X, (__v4si)__Y);
438 /* Sum absolute 8-bit integer difference of adjacent groups of 4
439 byte integers in the first 2 operands. Starting offsets within
440 operands are determined by the 3rd mask operand. */
442 #ifdef __OPTIMIZE__
443 static __inline __m128i __attribute__((__always_inline__, __artificial__))
444 _mm_mpsadbw_epu8 (__m128i __X, __m128i __Y, const int __M)
446 return (__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)__X,
447 (__v16qi)__Y, __M);
449 #else
450 #define _mm_mpsadbw_epu8(X, Y, M) \
451 ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(X), (__v16qi)(Y), (M)))
452 #endif
454 /* Load double quadword using non-temporal aligned hint. */
455 static __inline __m128i __attribute__((__always_inline__, __artificial__))
456 _mm_stream_load_si128 (__m128i *__X)
458 return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __X);
461 #ifdef __SSE4_2__
463 /* These macros specify the source data format. */
464 #define SIDD_UBYTE_OPS 0x00
465 #define SIDD_UWORD_OPS 0x01
466 #define SIDD_SBYTE_OPS 0x02
467 #define SIDD_SWORD_OPS 0x03
469 /* These macros specify the comparison operation. */
470 #define SIDD_CMP_EQUAL_ANY 0x00
471 #define SIDD_CMP_RANGES 0x04
472 #define SIDD_CMP_EQUAL_EACH 0x08
473 #define SIDD_CMP_EQUAL_ORDERED 0x0c
475 /* These macros specify the the polarity. */
476 #define SIDD_POSITIVE_POLARITY 0x00
477 #define SIDD_NEGATIVE_POLARITY 0x10
478 #define SIDD_MASKED_POSITIVE_POLARITY 0x20
479 #define SIDD_MASKED_NEGATIVE_POLARITY 0x30
481 /* These macros specify the output selection in _mm_cmpXstri (). */
482 #define SIDD_LEAST_SIGNIFICANT 0x00
483 #define SIDD_MOST_SIGNIFICANT 0x40
485 /* These macros specify the output selection in _mm_cmpXstrm (). */
486 #define SIDD_BIT_MASK 0x00
487 #define SIDD_UNIT_MASK 0x40
489 /* Intrinsics for text/string processing. */
491 #ifdef __OPTIMIZE__
492 static __inline __m128i __attribute__((__always_inline__, __artificial__))
493 _mm_cmpistrm (__m128i __X, __m128i __Y, const int __M)
495 return (__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)__X,
496 (__v16qi)__Y,
497 __M);
500 static __inline int __attribute__((__always_inline__, __artificial__))
501 _mm_cmpistri (__m128i __X, __m128i __Y, const int __M)
503 return __builtin_ia32_pcmpistri128 ((__v16qi)__X,
504 (__v16qi)__Y,
505 __M);
508 static __inline __m128i __attribute__((__always_inline__, __artificial__))
509 _mm_cmpestrm (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
511 return (__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)__X, __LX,
512 (__v16qi)__Y, __LY,
513 __M);
516 static __inline int __attribute__((__always_inline__, __artificial__))
517 _mm_cmpestri (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
519 return __builtin_ia32_pcmpestri128 ((__v16qi)__X, __LX,
520 (__v16qi)__Y, __LY,
521 __M);
523 #else
524 #define _mm_cmpistrm(X, Y, M) \
525 ((__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)(X), (__v16qi)(Y), (M)))
526 #define _mm_cmpistri(X, Y, M) \
527 __builtin_ia32_pcmpistri128 ((__v16qi)(X), (__v16qi)(Y), (M))
529 #define _mm_cmpestrm(X, LX, Y, LY, M) \
530 ((__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)(X), (int)(LX), \
531 (__v16qi)(Y), (int)(LY), (M)))
532 #define _mm_cmpestri(X, LX, Y, LY, M) \
533 __builtin_ia32_pcmpestri128 ((__v16qi)(X), (int)(LX), \
534 (__v16qi)(Y), (int)(LY), (M))
535 #endif
537 /* Intrinsics for text/string processing and reading values of
538 EFlags. */
540 #ifdef __OPTIMIZE__
541 static __inline int __attribute__((__always_inline__, __artificial__))
542 _mm_cmpistra (__m128i __X, __m128i __Y, const int __M)
544 return __builtin_ia32_pcmpistria128 ((__v16qi)__X,
545 (__v16qi)__Y,
546 __M);
549 static __inline int __attribute__((__always_inline__, __artificial__))
550 _mm_cmpistrc (__m128i __X, __m128i __Y, const int __M)
552 return __builtin_ia32_pcmpistric128 ((__v16qi)__X,
553 (__v16qi)__Y,
554 __M);
557 static __inline int __attribute__((__always_inline__, __artificial__))
558 _mm_cmpistro (__m128i __X, __m128i __Y, const int __M)
560 return __builtin_ia32_pcmpistrio128 ((__v16qi)__X,
561 (__v16qi)__Y,
562 __M);
565 static __inline int __attribute__((__always_inline__, __artificial__))
566 _mm_cmpistrs (__m128i __X, __m128i __Y, const int __M)
568 return __builtin_ia32_pcmpistris128 ((__v16qi)__X,
569 (__v16qi)__Y,
570 __M);
573 static __inline int __attribute__((__always_inline__, __artificial__))
574 _mm_cmpistrz (__m128i __X, __m128i __Y, const int __M)
576 return __builtin_ia32_pcmpistriz128 ((__v16qi)__X,
577 (__v16qi)__Y,
578 __M);
581 static __inline int __attribute__((__always_inline__, __artificial__))
582 _mm_cmpestra (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
584 return __builtin_ia32_pcmpestria128 ((__v16qi)__X, __LX,
585 (__v16qi)__Y, __LY,
586 __M);
589 static __inline int __attribute__((__always_inline__, __artificial__))
590 _mm_cmpestrc (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
592 return __builtin_ia32_pcmpestric128 ((__v16qi)__X, __LX,
593 (__v16qi)__Y, __LY,
594 __M);
597 static __inline int __attribute__((__always_inline__, __artificial__))
598 _mm_cmpestro (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
600 return __builtin_ia32_pcmpestrio128 ((__v16qi)__X, __LX,
601 (__v16qi)__Y, __LY,
602 __M);
605 static __inline int __attribute__((__always_inline__, __artificial__))
606 _mm_cmpestrs (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
608 return __builtin_ia32_pcmpestris128 ((__v16qi)__X, __LX,
609 (__v16qi)__Y, __LY,
610 __M);
613 static __inline int __attribute__((__always_inline__, __artificial__))
614 _mm_cmpestrz (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
616 return __builtin_ia32_pcmpestriz128 ((__v16qi)__X, __LX,
617 (__v16qi)__Y, __LY,
618 __M);
620 #else
621 #define _mm_cmpistra(X, Y, M) \
622 __builtin_ia32_pcmpistria128 ((__v16qi)(X), (__v16qi)(Y), (M))
623 #define _mm_cmpistrc(X, Y, M) \
624 __builtin_ia32_pcmpistric128 ((__v16qi)(X), (__v16qi)(Y), (M))
625 #define _mm_cmpistro(X, Y, M) \
626 __builtin_ia32_pcmpistrio128 ((__v16qi)(X), (__v16qi)(Y), (M))
627 #define _mm_cmpistrs(X, Y, M) \
628 __builtin_ia32_pcmpistris128 ((__v16qi)(X), (__v16qi)(Y), (M))
629 #define _mm_cmpistrz(X, Y, M) \
630 __builtin_ia32_pcmpistriz128 ((__v16qi)(X), (__v16qi)(Y), (M))
632 #define _mm_cmpestra(X, LX, Y, LY, M) \
633 __builtin_ia32_pcmpestria128 ((__v16qi)(X), (int)(LX), \
634 (__v16qi)(Y), (int)(LY), (M))
635 #define _mm_cmpestrc(X, LX, Y, LY, M) \
636 __builtin_ia32_pcmpestric128 ((__v16qi)(X), (int)(LX), \
637 (__v16qi)(Y), (int)(LY), (M))
638 #define _mm_cmpestro(X, LX, Y, LY, M) \
639 __builtin_ia32_pcmpestrio128 ((__v16qi)(X), (int)(LX), \
640 (__v16qi)(Y), (int)(LY), (M))
641 #define _mm_cmpestrs(X, LX, Y, LY, M) \
642 __builtin_ia32_pcmpestris128 ((__v16qi)(X), (int)(LX), \
643 (__v16qi)(Y), (int)(LY), (M))
644 #define _mm_cmpestrz(X, LX, Y, LY, M) \
645 __builtin_ia32_pcmpestriz128 ((__v16qi)(X), (int)(LX), \
646 (__v16qi)(Y), (int)(LY), (M))
647 #endif
649 /* Packed integer 64-bit comparison, zeroing or filling with ones
650 corresponding parts of result. */
651 static __inline __m128i __attribute__((__always_inline__, __artificial__))
652 _mm_cmpgt_epi64 (__m128i __X, __m128i __Y)
654 return (__m128i) __builtin_ia32_pcmpgtq ((__v2di)__X, (__v2di)__Y);
657 /* Calculate a number of bits set to 1. */
658 static __inline int __attribute__((__always_inline__, __artificial__))
659 _mm_popcnt_u32 (unsigned int __X)
661 return __builtin_popcount (__X);
664 #ifdef __x86_64__
665 static __inline long long __attribute__((__always_inline__, __artificial__))
666 _mm_popcnt_u64 (unsigned long long __X)
668 return __builtin_popcountll (__X);
670 #endif
672 /* Accumulate CRC32 (polynomial 0x11EDC6F41) value. */
673 static __inline unsigned int __attribute__((__always_inline__, __artificial__))
674 _mm_crc32_u8 (unsigned int __C, unsigned char __V)
676 return __builtin_ia32_crc32qi (__C, __V);
679 static __inline unsigned int __attribute__((__always_inline__, __artificial__))
680 _mm_crc32_u16 (unsigned int __C, unsigned short __V)
682 return __builtin_ia32_crc32hi (__C, __V);
685 static __inline unsigned int __attribute__((__always_inline__, __artificial__))
686 _mm_crc32_u32 (unsigned int __C, unsigned int __V)
688 return __builtin_ia32_crc32si (__C, __V);
691 #ifdef __x86_64__
692 static __inline unsigned long long __attribute__((__always_inline__, __artificial__))
693 _mm_crc32_u64 (unsigned long long __C, unsigned long long __V)
695 return __builtin_ia32_crc32di (__C, __V);
697 #endif
699 #endif /* __SSE4_2__ */
701 #endif /* __SSE4_1__ */
703 #endif /* _SMMINTRIN_H_INCLUDED */