Implement -mmemcpy-strategy= and -mmemset-strategy= options
[official-gcc.git] / gcc / config / i386 / mmintrin.h
blobc0729709373936ea64088b5a8aa733bf8327347a
1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
3 This file is part of GCC.
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3, or (at your option)
8 any later version.
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 Under Section 7 of GPL version 3, you are granted additional
16 permissions described in the GCC Runtime Library Exception, version
17 3.1, as published by the Free Software Foundation.
19 You should have received a copy of the GNU General Public License and
20 a copy of the GCC Runtime Library Exception along with this program;
21 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22 <http://www.gnu.org/licenses/>. */
24 /* Implemented from the specification included in the Intel C++ Compiler
25 User Guide and Reference, version 9.0. */
27 #ifndef _MMINTRIN_H_INCLUDED
28 #define _MMINTRIN_H_INCLUDED
30 #ifndef __MMX__
31 #pragma GCC push_options
32 #pragma GCC target("mmx")
33 #define __DISABLE_MMX__
34 #endif /* __MMX__ */
36 /* The Intel API is flexible enough that we must allow aliasing with other
37 vector types, and their scalar components. */
38 typedef int __m64 __attribute__ ((__vector_size__ (8), __may_alias__));
40 /* Internal data types for implementing the intrinsics. */
41 typedef int __v2si __attribute__ ((__vector_size__ (8)));
42 typedef short __v4hi __attribute__ ((__vector_size__ (8)));
43 typedef char __v8qi __attribute__ ((__vector_size__ (8)));
44 typedef long long __v1di __attribute__ ((__vector_size__ (8)));
45 typedef float __v2sf __attribute__ ((__vector_size__ (8)));
47 /* Empty the multimedia state. */
48 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
49 _mm_empty (void)
51 __builtin_ia32_emms ();
54 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
55 _m_empty (void)
57 _mm_empty ();
60 /* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */
61 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
62 _mm_cvtsi32_si64 (int __i)
64 return (__m64) __builtin_ia32_vec_init_v2si (__i, 0);
67 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
68 _m_from_int (int __i)
70 return _mm_cvtsi32_si64 (__i);
73 #ifdef __x86_64__
74 /* Convert I to a __m64 object. */
76 /* Intel intrinsic. */
77 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
78 _m_from_int64 (long long __i)
80 return (__m64) __i;
83 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
84 _mm_cvtsi64_m64 (long long __i)
86 return (__m64) __i;
89 /* Microsoft intrinsic. */
90 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
91 _mm_cvtsi64x_si64 (long long __i)
93 return (__m64) __i;
96 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
97 _mm_set_pi64x (long long __i)
99 return (__m64) __i;
101 #endif
103 /* Convert the lower 32 bits of the __m64 object into an integer. */
104 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
105 _mm_cvtsi64_si32 (__m64 __i)
107 return __builtin_ia32_vec_ext_v2si ((__v2si)__i, 0);
110 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
111 _m_to_int (__m64 __i)
113 return _mm_cvtsi64_si32 (__i);
116 #ifdef __x86_64__
117 /* Convert the __m64 object to a 64bit integer. */
119 /* Intel intrinsic. */
120 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
121 _m_to_int64 (__m64 __i)
123 return (long long)__i;
126 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
127 _mm_cvtm64_si64 (__m64 __i)
129 return (long long)__i;
132 /* Microsoft intrinsic. */
133 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
134 _mm_cvtsi64_si64x (__m64 __i)
136 return (long long)__i;
138 #endif
140 /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
141 the result, and the four 16-bit values from M2 into the upper four 8-bit
142 values of the result, all with signed saturation. */
143 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
144 _mm_packs_pi16 (__m64 __m1, __m64 __m2)
146 return (__m64) __builtin_ia32_packsswb ((__v4hi)__m1, (__v4hi)__m2);
149 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
150 _m_packsswb (__m64 __m1, __m64 __m2)
152 return _mm_packs_pi16 (__m1, __m2);
155 /* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
156 the result, and the two 32-bit values from M2 into the upper two 16-bit
157 values of the result, all with signed saturation. */
158 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
159 _mm_packs_pi32 (__m64 __m1, __m64 __m2)
161 return (__m64) __builtin_ia32_packssdw ((__v2si)__m1, (__v2si)__m2);
164 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
165 _m_packssdw (__m64 __m1, __m64 __m2)
167 return _mm_packs_pi32 (__m1, __m2);
170 /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
171 the result, and the four 16-bit values from M2 into the upper four 8-bit
172 values of the result, all with unsigned saturation. */
173 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
174 _mm_packs_pu16 (__m64 __m1, __m64 __m2)
176 return (__m64) __builtin_ia32_packuswb ((__v4hi)__m1, (__v4hi)__m2);
179 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
180 _m_packuswb (__m64 __m1, __m64 __m2)
182 return _mm_packs_pu16 (__m1, __m2);
185 /* Interleave the four 8-bit values from the high half of M1 with the four
186 8-bit values from the high half of M2. */
187 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
188 _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
190 return (__m64) __builtin_ia32_punpckhbw ((__v8qi)__m1, (__v8qi)__m2);
193 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
194 _m_punpckhbw (__m64 __m1, __m64 __m2)
196 return _mm_unpackhi_pi8 (__m1, __m2);
199 /* Interleave the two 16-bit values from the high half of M1 with the two
200 16-bit values from the high half of M2. */
201 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
202 _mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
204 return (__m64) __builtin_ia32_punpckhwd ((__v4hi)__m1, (__v4hi)__m2);
207 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
208 _m_punpckhwd (__m64 __m1, __m64 __m2)
210 return _mm_unpackhi_pi16 (__m1, __m2);
213 /* Interleave the 32-bit value from the high half of M1 with the 32-bit
214 value from the high half of M2. */
215 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
216 _mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
218 return (__m64) __builtin_ia32_punpckhdq ((__v2si)__m1, (__v2si)__m2);
221 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
222 _m_punpckhdq (__m64 __m1, __m64 __m2)
224 return _mm_unpackhi_pi32 (__m1, __m2);
227 /* Interleave the four 8-bit values from the low half of M1 with the four
228 8-bit values from the low half of M2. */
229 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
230 _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
232 return (__m64) __builtin_ia32_punpcklbw ((__v8qi)__m1, (__v8qi)__m2);
235 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
236 _m_punpcklbw (__m64 __m1, __m64 __m2)
238 return _mm_unpacklo_pi8 (__m1, __m2);
241 /* Interleave the two 16-bit values from the low half of M1 with the two
242 16-bit values from the low half of M2. */
243 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
244 _mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
246 return (__m64) __builtin_ia32_punpcklwd ((__v4hi)__m1, (__v4hi)__m2);
249 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
250 _m_punpcklwd (__m64 __m1, __m64 __m2)
252 return _mm_unpacklo_pi16 (__m1, __m2);
255 /* Interleave the 32-bit value from the low half of M1 with the 32-bit
256 value from the low half of M2. */
257 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
258 _mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
260 return (__m64) __builtin_ia32_punpckldq ((__v2si)__m1, (__v2si)__m2);
263 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
264 _m_punpckldq (__m64 __m1, __m64 __m2)
266 return _mm_unpacklo_pi32 (__m1, __m2);
269 /* Add the 8-bit values in M1 to the 8-bit values in M2. */
270 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
271 _mm_add_pi8 (__m64 __m1, __m64 __m2)
273 return (__m64) __builtin_ia32_paddb ((__v8qi)__m1, (__v8qi)__m2);
276 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
277 _m_paddb (__m64 __m1, __m64 __m2)
279 return _mm_add_pi8 (__m1, __m2);
282 /* Add the 16-bit values in M1 to the 16-bit values in M2. */
283 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
284 _mm_add_pi16 (__m64 __m1, __m64 __m2)
286 return (__m64) __builtin_ia32_paddw ((__v4hi)__m1, (__v4hi)__m2);
289 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
290 _m_paddw (__m64 __m1, __m64 __m2)
292 return _mm_add_pi16 (__m1, __m2);
295 /* Add the 32-bit values in M1 to the 32-bit values in M2. */
296 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
297 _mm_add_pi32 (__m64 __m1, __m64 __m2)
299 return (__m64) __builtin_ia32_paddd ((__v2si)__m1, (__v2si)__m2);
302 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
303 _m_paddd (__m64 __m1, __m64 __m2)
305 return _mm_add_pi32 (__m1, __m2);
308 /* Add the 64-bit values in M1 to the 64-bit values in M2. */
309 #ifndef __SSE2__
310 #pragma GCC push_options
311 #pragma GCC target("sse2")
312 #define __DISABLE_SSE2__
313 #endif /* __SSE2__ */
315 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
316 _mm_add_si64 (__m64 __m1, __m64 __m2)
318 return (__m64) __builtin_ia32_paddq ((__v1di)__m1, (__v1di)__m2);
320 #ifdef __DISABLE_SSE2__
321 #undef __DISABLE_SSE2__
322 #pragma GCC pop_options
323 #endif /* __DISABLE_SSE2__ */
325 /* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
326 saturated arithmetic. */
327 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
328 _mm_adds_pi8 (__m64 __m1, __m64 __m2)
330 return (__m64) __builtin_ia32_paddsb ((__v8qi)__m1, (__v8qi)__m2);
333 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
334 _m_paddsb (__m64 __m1, __m64 __m2)
336 return _mm_adds_pi8 (__m1, __m2);
339 /* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
340 saturated arithmetic. */
341 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
342 _mm_adds_pi16 (__m64 __m1, __m64 __m2)
344 return (__m64) __builtin_ia32_paddsw ((__v4hi)__m1, (__v4hi)__m2);
347 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
348 _m_paddsw (__m64 __m1, __m64 __m2)
350 return _mm_adds_pi16 (__m1, __m2);
353 /* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
354 saturated arithmetic. */
355 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
356 _mm_adds_pu8 (__m64 __m1, __m64 __m2)
358 return (__m64) __builtin_ia32_paddusb ((__v8qi)__m1, (__v8qi)__m2);
361 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
362 _m_paddusb (__m64 __m1, __m64 __m2)
364 return _mm_adds_pu8 (__m1, __m2);
367 /* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
368 saturated arithmetic. */
369 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
370 _mm_adds_pu16 (__m64 __m1, __m64 __m2)
372 return (__m64) __builtin_ia32_paddusw ((__v4hi)__m1, (__v4hi)__m2);
375 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
376 _m_paddusw (__m64 __m1, __m64 __m2)
378 return _mm_adds_pu16 (__m1, __m2);
381 /* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
382 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
383 _mm_sub_pi8 (__m64 __m1, __m64 __m2)
385 return (__m64) __builtin_ia32_psubb ((__v8qi)__m1, (__v8qi)__m2);
388 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
389 _m_psubb (__m64 __m1, __m64 __m2)
391 return _mm_sub_pi8 (__m1, __m2);
394 /* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
395 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
396 _mm_sub_pi16 (__m64 __m1, __m64 __m2)
398 return (__m64) __builtin_ia32_psubw ((__v4hi)__m1, (__v4hi)__m2);
401 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
402 _m_psubw (__m64 __m1, __m64 __m2)
404 return _mm_sub_pi16 (__m1, __m2);
407 /* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
408 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
409 _mm_sub_pi32 (__m64 __m1, __m64 __m2)
411 return (__m64) __builtin_ia32_psubd ((__v2si)__m1, (__v2si)__m2);
414 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
415 _m_psubd (__m64 __m1, __m64 __m2)
417 return _mm_sub_pi32 (__m1, __m2);
420 /* Add the 64-bit values in M1 to the 64-bit values in M2. */
421 #ifndef __SSE2__
422 #pragma GCC push_options
423 #pragma GCC target("sse2")
424 #define __DISABLE_SSE2__
425 #endif /* __SSE2__ */
427 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
428 _mm_sub_si64 (__m64 __m1, __m64 __m2)
430 return (__m64) __builtin_ia32_psubq ((__v1di)__m1, (__v1di)__m2);
432 #ifdef __DISABLE_SSE2__
433 #undef __DISABLE_SSE2__
434 #pragma GCC pop_options
435 #endif /* __DISABLE_SSE2__ */
437 /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
438 saturating arithmetic. */
439 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
440 _mm_subs_pi8 (__m64 __m1, __m64 __m2)
442 return (__m64) __builtin_ia32_psubsb ((__v8qi)__m1, (__v8qi)__m2);
445 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
446 _m_psubsb (__m64 __m1, __m64 __m2)
448 return _mm_subs_pi8 (__m1, __m2);
451 /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
452 signed saturating arithmetic. */
453 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
454 _mm_subs_pi16 (__m64 __m1, __m64 __m2)
456 return (__m64) __builtin_ia32_psubsw ((__v4hi)__m1, (__v4hi)__m2);
459 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
460 _m_psubsw (__m64 __m1, __m64 __m2)
462 return _mm_subs_pi16 (__m1, __m2);
465 /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
466 unsigned saturating arithmetic. */
467 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
468 _mm_subs_pu8 (__m64 __m1, __m64 __m2)
470 return (__m64) __builtin_ia32_psubusb ((__v8qi)__m1, (__v8qi)__m2);
473 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
474 _m_psubusb (__m64 __m1, __m64 __m2)
476 return _mm_subs_pu8 (__m1, __m2);
479 /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
480 unsigned saturating arithmetic. */
481 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
482 _mm_subs_pu16 (__m64 __m1, __m64 __m2)
484 return (__m64) __builtin_ia32_psubusw ((__v4hi)__m1, (__v4hi)__m2);
487 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
488 _m_psubusw (__m64 __m1, __m64 __m2)
490 return _mm_subs_pu16 (__m1, __m2);
493 /* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
494 four 32-bit intermediate results, which are then summed by pairs to
495 produce two 32-bit results. */
496 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
497 _mm_madd_pi16 (__m64 __m1, __m64 __m2)
499 return (__m64) __builtin_ia32_pmaddwd ((__v4hi)__m1, (__v4hi)__m2);
502 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
503 _m_pmaddwd (__m64 __m1, __m64 __m2)
505 return _mm_madd_pi16 (__m1, __m2);
508 /* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
509 M2 and produce the high 16 bits of the 32-bit results. */
510 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
511 _mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
513 return (__m64) __builtin_ia32_pmulhw ((__v4hi)__m1, (__v4hi)__m2);
516 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
517 _m_pmulhw (__m64 __m1, __m64 __m2)
519 return _mm_mulhi_pi16 (__m1, __m2);
522 /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
523 the low 16 bits of the results. */
524 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
525 _mm_mullo_pi16 (__m64 __m1, __m64 __m2)
527 return (__m64) __builtin_ia32_pmullw ((__v4hi)__m1, (__v4hi)__m2);
530 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
531 _m_pmullw (__m64 __m1, __m64 __m2)
533 return _mm_mullo_pi16 (__m1, __m2);
536 /* Shift four 16-bit values in M left by COUNT. */
537 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
538 _mm_sll_pi16 (__m64 __m, __m64 __count)
540 return (__m64) __builtin_ia32_psllw ((__v4hi)__m, (__v4hi)__count);
543 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
544 _m_psllw (__m64 __m, __m64 __count)
546 return _mm_sll_pi16 (__m, __count);
549 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
550 _mm_slli_pi16 (__m64 __m, int __count)
552 return (__m64) __builtin_ia32_psllwi ((__v4hi)__m, __count);
555 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
556 _m_psllwi (__m64 __m, int __count)
558 return _mm_slli_pi16 (__m, __count);
561 /* Shift two 32-bit values in M left by COUNT. */
562 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
563 _mm_sll_pi32 (__m64 __m, __m64 __count)
565 return (__m64) __builtin_ia32_pslld ((__v2si)__m, (__v2si)__count);
568 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
569 _m_pslld (__m64 __m, __m64 __count)
571 return _mm_sll_pi32 (__m, __count);
574 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
575 _mm_slli_pi32 (__m64 __m, int __count)
577 return (__m64) __builtin_ia32_pslldi ((__v2si)__m, __count);
580 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
581 _m_pslldi (__m64 __m, int __count)
583 return _mm_slli_pi32 (__m, __count);
586 /* Shift the 64-bit value in M left by COUNT. */
587 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
588 _mm_sll_si64 (__m64 __m, __m64 __count)
590 return (__m64) __builtin_ia32_psllq ((__v1di)__m, (__v1di)__count);
593 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
594 _m_psllq (__m64 __m, __m64 __count)
596 return _mm_sll_si64 (__m, __count);
599 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
600 _mm_slli_si64 (__m64 __m, int __count)
602 return (__m64) __builtin_ia32_psllqi ((__v1di)__m, __count);
605 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
606 _m_psllqi (__m64 __m, int __count)
608 return _mm_slli_si64 (__m, __count);
611 /* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
612 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
613 _mm_sra_pi16 (__m64 __m, __m64 __count)
615 return (__m64) __builtin_ia32_psraw ((__v4hi)__m, (__v4hi)__count);
618 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
619 _m_psraw (__m64 __m, __m64 __count)
621 return _mm_sra_pi16 (__m, __count);
624 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
625 _mm_srai_pi16 (__m64 __m, int __count)
627 return (__m64) __builtin_ia32_psrawi ((__v4hi)__m, __count);
630 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
631 _m_psrawi (__m64 __m, int __count)
633 return _mm_srai_pi16 (__m, __count);
636 /* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
637 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
638 _mm_sra_pi32 (__m64 __m, __m64 __count)
640 return (__m64) __builtin_ia32_psrad ((__v2si)__m, (__v2si)__count);
643 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
644 _m_psrad (__m64 __m, __m64 __count)
646 return _mm_sra_pi32 (__m, __count);
649 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
650 _mm_srai_pi32 (__m64 __m, int __count)
652 return (__m64) __builtin_ia32_psradi ((__v2si)__m, __count);
655 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
656 _m_psradi (__m64 __m, int __count)
658 return _mm_srai_pi32 (__m, __count);
661 /* Shift four 16-bit values in M right by COUNT; shift in zeros. */
662 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
663 _mm_srl_pi16 (__m64 __m, __m64 __count)
665 return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, (__v4hi)__count);
668 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
669 _m_psrlw (__m64 __m, __m64 __count)
671 return _mm_srl_pi16 (__m, __count);
674 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
675 _mm_srli_pi16 (__m64 __m, int __count)
677 return (__m64) __builtin_ia32_psrlwi ((__v4hi)__m, __count);
680 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
681 _m_psrlwi (__m64 __m, int __count)
683 return _mm_srli_pi16 (__m, __count);
686 /* Shift two 32-bit values in M right by COUNT; shift in zeros. */
687 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
688 _mm_srl_pi32 (__m64 __m, __m64 __count)
690 return (__m64) __builtin_ia32_psrld ((__v2si)__m, (__v2si)__count);
693 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
694 _m_psrld (__m64 __m, __m64 __count)
696 return _mm_srl_pi32 (__m, __count);
699 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
700 _mm_srli_pi32 (__m64 __m, int __count)
702 return (__m64) __builtin_ia32_psrldi ((__v2si)__m, __count);
705 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
706 _m_psrldi (__m64 __m, int __count)
708 return _mm_srli_pi32 (__m, __count);
711 /* Shift the 64-bit value in M left by COUNT; shift in zeros. */
712 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
713 _mm_srl_si64 (__m64 __m, __m64 __count)
715 return (__m64) __builtin_ia32_psrlq ((__v1di)__m, (__v1di)__count);
718 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
719 _m_psrlq (__m64 __m, __m64 __count)
721 return _mm_srl_si64 (__m, __count);
724 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
725 _mm_srli_si64 (__m64 __m, int __count)
727 return (__m64) __builtin_ia32_psrlqi ((__v1di)__m, __count);
730 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
731 _m_psrlqi (__m64 __m, int __count)
733 return _mm_srli_si64 (__m, __count);
736 /* Bit-wise AND the 64-bit values in M1 and M2. */
737 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
738 _mm_and_si64 (__m64 __m1, __m64 __m2)
740 return __builtin_ia32_pand (__m1, __m2);
743 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
744 _m_pand (__m64 __m1, __m64 __m2)
746 return _mm_and_si64 (__m1, __m2);
749 /* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
750 64-bit value in M2. */
751 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
752 _mm_andnot_si64 (__m64 __m1, __m64 __m2)
754 return __builtin_ia32_pandn (__m1, __m2);
757 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
758 _m_pandn (__m64 __m1, __m64 __m2)
760 return _mm_andnot_si64 (__m1, __m2);
763 /* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
764 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
765 _mm_or_si64 (__m64 __m1, __m64 __m2)
767 return __builtin_ia32_por (__m1, __m2);
770 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
771 _m_por (__m64 __m1, __m64 __m2)
773 return _mm_or_si64 (__m1, __m2);
776 /* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
777 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
778 _mm_xor_si64 (__m64 __m1, __m64 __m2)
780 return __builtin_ia32_pxor (__m1, __m2);
783 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
784 _m_pxor (__m64 __m1, __m64 __m2)
786 return _mm_xor_si64 (__m1, __m2);
789 /* Compare eight 8-bit values. The result of the comparison is 0xFF if the
790 test is true and zero if false. */
791 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
792 _mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
794 return (__m64) __builtin_ia32_pcmpeqb ((__v8qi)__m1, (__v8qi)__m2);
797 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
798 _m_pcmpeqb (__m64 __m1, __m64 __m2)
800 return _mm_cmpeq_pi8 (__m1, __m2);
803 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
804 _mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
806 return (__m64) __builtin_ia32_pcmpgtb ((__v8qi)__m1, (__v8qi)__m2);
809 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
810 _m_pcmpgtb (__m64 __m1, __m64 __m2)
812 return _mm_cmpgt_pi8 (__m1, __m2);
815 /* Compare four 16-bit values. The result of the comparison is 0xFFFF if
816 the test is true and zero if false. */
817 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
818 _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
820 return (__m64) __builtin_ia32_pcmpeqw ((__v4hi)__m1, (__v4hi)__m2);
823 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
824 _m_pcmpeqw (__m64 __m1, __m64 __m2)
826 return _mm_cmpeq_pi16 (__m1, __m2);
829 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
830 _mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
832 return (__m64) __builtin_ia32_pcmpgtw ((__v4hi)__m1, (__v4hi)__m2);
835 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
836 _m_pcmpgtw (__m64 __m1, __m64 __m2)
838 return _mm_cmpgt_pi16 (__m1, __m2);
841 /* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
842 the test is true and zero if false. */
843 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
844 _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
846 return (__m64) __builtin_ia32_pcmpeqd ((__v2si)__m1, (__v2si)__m2);
849 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
850 _m_pcmpeqd (__m64 __m1, __m64 __m2)
852 return _mm_cmpeq_pi32 (__m1, __m2);
855 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
856 _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
858 return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2);
861 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
862 _m_pcmpgtd (__m64 __m1, __m64 __m2)
864 return _mm_cmpgt_pi32 (__m1, __m2);
867 /* Creates a 64-bit zero. */
868 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
869 _mm_setzero_si64 (void)
871 return (__m64)0LL;
874 /* Creates a vector of two 32-bit values; I0 is least significant. */
875 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
876 _mm_set_pi32 (int __i1, int __i0)
878 return (__m64) __builtin_ia32_vec_init_v2si (__i0, __i1);
881 /* Creates a vector of four 16-bit values; W0 is least significant. */
882 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
883 _mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
885 return (__m64) __builtin_ia32_vec_init_v4hi (__w0, __w1, __w2, __w3);
888 /* Creates a vector of eight 8-bit values; B0 is least significant. */
889 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
890 _mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
891 char __b3, char __b2, char __b1, char __b0)
893 return (__m64) __builtin_ia32_vec_init_v8qi (__b0, __b1, __b2, __b3,
894 __b4, __b5, __b6, __b7);
897 /* Similar, but with the arguments in reverse order. */
898 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
899 _mm_setr_pi32 (int __i0, int __i1)
901 return _mm_set_pi32 (__i1, __i0);
904 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
905 _mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
907 return _mm_set_pi16 (__w3, __w2, __w1, __w0);
910 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
911 _mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
912 char __b4, char __b5, char __b6, char __b7)
914 return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
917 /* Creates a vector of two 32-bit values, both elements containing I. */
918 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
919 _mm_set1_pi32 (int __i)
921 return _mm_set_pi32 (__i, __i);
924 /* Creates a vector of four 16-bit values, all elements containing W. */
925 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
926 _mm_set1_pi16 (short __w)
928 return _mm_set_pi16 (__w, __w, __w, __w);
931 /* Creates a vector of eight 8-bit values, all elements containing B. */
932 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
933 _mm_set1_pi8 (char __b)
935 return _mm_set_pi8 (__b, __b, __b, __b, __b, __b, __b, __b);
937 #ifdef __DISABLE_MMX__
938 #undef __DISABLE_MMX__
939 #pragma GCC pop_options
940 #endif /* __DISABLE_MMX__ */
942 #endif /* _MMINTRIN_H_INCLUDED */