Merged with mainline at revision 128810.
[official-gcc.git] / gcc / config / i386 / mmintrin.h
bloba04109be616a62b01d86c0e81109e3656ee2ff42
1 /* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Free Software
2 Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to
18 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
19 Boston, MA 02110-1301, USA. */
21 /* As a special exception, if you include this header file into source
22 files compiled by GCC, this header file does not by itself cause
23 the resulting executable to be covered by the GNU General Public
24 License. This exception does not however invalidate any other
25 reasons why the executable file might be covered by the GNU General
26 Public License. */
28 /* Implemented from the specification included in the Intel C++ Compiler
29 User Guide and Reference, version 9.0. */
31 #ifndef _MMINTRIN_H_INCLUDED
32 #define _MMINTRIN_H_INCLUDED
34 #ifndef __MMX__
35 # error "MMX instruction set not enabled"
36 #else
37 /* The Intel API is flexible enough that we must allow aliasing with other
38 vector types, and their scalar components. */
39 typedef int __m64 __attribute__ ((__vector_size__ (8), __may_alias__));
41 /* Internal data types for implementing the intrinsics. */
42 typedef int __v2si __attribute__ ((__vector_size__ (8)));
43 typedef short __v4hi __attribute__ ((__vector_size__ (8)));
44 typedef char __v8qi __attribute__ ((__vector_size__ (8)));
46 /* Empty the multimedia state. */
47 static __inline void __attribute__((__always_inline__, __artificial__))
48 _mm_empty (void)
50 __builtin_ia32_emms ();
53 static __inline void __attribute__((__always_inline__, __artificial__))
54 _m_empty (void)
56 _mm_empty ();
59 /* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */
60 static __inline __m64 __attribute__((__always_inline__, __artificial__))
61 _mm_cvtsi32_si64 (int __i)
63 return (__m64) __builtin_ia32_vec_init_v2si (__i, 0);
66 static __inline __m64 __attribute__((__always_inline__, __artificial__))
67 _m_from_int (int __i)
69 return _mm_cvtsi32_si64 (__i);
72 #ifdef __x86_64__
73 /* Convert I to a __m64 object. */
75 /* Intel intrinsic. */
76 static __inline __m64 __attribute__((__always_inline__, __artificial__))
77 _m_from_int64 (long long __i)
79 return (__m64) __i;
82 static __inline __m64 __attribute__((__always_inline__, __artificial__))
83 _mm_cvtsi64_m64 (long long __i)
85 return (__m64) __i;
88 /* Microsoft intrinsic. */
89 static __inline __m64 __attribute__((__always_inline__, __artificial__))
90 _mm_cvtsi64x_si64 (long long __i)
92 return (__m64) __i;
95 static __inline __m64 __attribute__((__always_inline__, __artificial__))
96 _mm_set_pi64x (long long __i)
98 return (__m64) __i;
100 #endif
102 /* Convert the lower 32 bits of the __m64 object into an integer. */
103 static __inline int __attribute__((__always_inline__, __artificial__))
104 _mm_cvtsi64_si32 (__m64 __i)
106 return __builtin_ia32_vec_ext_v2si ((__v2si)__i, 0);
109 static __inline int __attribute__((__always_inline__, __artificial__))
110 _m_to_int (__m64 __i)
112 return _mm_cvtsi64_si32 (__i);
115 #ifdef __x86_64__
116 /* Convert the __m64 object to a 64bit integer. */
118 /* Intel intrinsic. */
119 static __inline long long __attribute__((__always_inline__, __artificial__))
120 _m_to_int64 (__m64 __i)
122 return (long long)__i;
125 static __inline long long __attribute__((__always_inline__, __artificial__))
126 _mm_cvtm64_si64 (__m64 __i)
128 return (long long)__i;
131 /* Microsoft intrinsic. */
132 static __inline long long __attribute__((__always_inline__, __artificial__))
133 _mm_cvtsi64_si64x (__m64 __i)
135 return (long long)__i;
137 #endif
139 /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
140 the result, and the four 16-bit values from M2 into the upper four 8-bit
141 values of the result, all with signed saturation. */
142 static __inline __m64 __attribute__((__always_inline__, __artificial__))
143 _mm_packs_pi16 (__m64 __m1, __m64 __m2)
145 return (__m64) __builtin_ia32_packsswb ((__v4hi)__m1, (__v4hi)__m2);
148 static __inline __m64 __attribute__((__always_inline__, __artificial__))
149 _m_packsswb (__m64 __m1, __m64 __m2)
151 return _mm_packs_pi16 (__m1, __m2);
154 /* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
155 the result, and the two 32-bit values from M2 into the upper two 16-bit
156 values of the result, all with signed saturation. */
157 static __inline __m64 __attribute__((__always_inline__, __artificial__))
158 _mm_packs_pi32 (__m64 __m1, __m64 __m2)
160 return (__m64) __builtin_ia32_packssdw ((__v2si)__m1, (__v2si)__m2);
163 static __inline __m64 __attribute__((__always_inline__, __artificial__))
164 _m_packssdw (__m64 __m1, __m64 __m2)
166 return _mm_packs_pi32 (__m1, __m2);
169 /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
170 the result, and the four 16-bit values from M2 into the upper four 8-bit
171 values of the result, all with unsigned saturation. */
172 static __inline __m64 __attribute__((__always_inline__, __artificial__))
173 _mm_packs_pu16 (__m64 __m1, __m64 __m2)
175 return (__m64) __builtin_ia32_packuswb ((__v4hi)__m1, (__v4hi)__m2);
178 static __inline __m64 __attribute__((__always_inline__, __artificial__))
179 _m_packuswb (__m64 __m1, __m64 __m2)
181 return _mm_packs_pu16 (__m1, __m2);
184 /* Interleave the four 8-bit values from the high half of M1 with the four
185 8-bit values from the high half of M2. */
186 static __inline __m64 __attribute__((__always_inline__, __artificial__))
187 _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
189 return (__m64) __builtin_ia32_punpckhbw ((__v8qi)__m1, (__v8qi)__m2);
192 static __inline __m64 __attribute__((__always_inline__, __artificial__))
193 _m_punpckhbw (__m64 __m1, __m64 __m2)
195 return _mm_unpackhi_pi8 (__m1, __m2);
198 /* Interleave the two 16-bit values from the high half of M1 with the two
199 16-bit values from the high half of M2. */
200 static __inline __m64 __attribute__((__always_inline__, __artificial__))
201 _mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
203 return (__m64) __builtin_ia32_punpckhwd ((__v4hi)__m1, (__v4hi)__m2);
206 static __inline __m64 __attribute__((__always_inline__, __artificial__))
207 _m_punpckhwd (__m64 __m1, __m64 __m2)
209 return _mm_unpackhi_pi16 (__m1, __m2);
212 /* Interleave the 32-bit value from the high half of M1 with the 32-bit
213 value from the high half of M2. */
214 static __inline __m64 __attribute__((__always_inline__, __artificial__))
215 _mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
217 return (__m64) __builtin_ia32_punpckhdq ((__v2si)__m1, (__v2si)__m2);
220 static __inline __m64 __attribute__((__always_inline__, __artificial__))
221 _m_punpckhdq (__m64 __m1, __m64 __m2)
223 return _mm_unpackhi_pi32 (__m1, __m2);
226 /* Interleave the four 8-bit values from the low half of M1 with the four
227 8-bit values from the low half of M2. */
228 static __inline __m64 __attribute__((__always_inline__, __artificial__))
229 _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
231 return (__m64) __builtin_ia32_punpcklbw ((__v8qi)__m1, (__v8qi)__m2);
234 static __inline __m64 __attribute__((__always_inline__, __artificial__))
235 _m_punpcklbw (__m64 __m1, __m64 __m2)
237 return _mm_unpacklo_pi8 (__m1, __m2);
240 /* Interleave the two 16-bit values from the low half of M1 with the two
241 16-bit values from the low half of M2. */
242 static __inline __m64 __attribute__((__always_inline__, __artificial__))
243 _mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
245 return (__m64) __builtin_ia32_punpcklwd ((__v4hi)__m1, (__v4hi)__m2);
248 static __inline __m64 __attribute__((__always_inline__, __artificial__))
249 _m_punpcklwd (__m64 __m1, __m64 __m2)
251 return _mm_unpacklo_pi16 (__m1, __m2);
254 /* Interleave the 32-bit value from the low half of M1 with the 32-bit
255 value from the low half of M2. */
256 static __inline __m64 __attribute__((__always_inline__, __artificial__))
257 _mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
259 return (__m64) __builtin_ia32_punpckldq ((__v2si)__m1, (__v2si)__m2);
262 static __inline __m64 __attribute__((__always_inline__, __artificial__))
263 _m_punpckldq (__m64 __m1, __m64 __m2)
265 return _mm_unpacklo_pi32 (__m1, __m2);
268 /* Add the 8-bit values in M1 to the 8-bit values in M2. */
269 static __inline __m64 __attribute__((__always_inline__, __artificial__))
270 _mm_add_pi8 (__m64 __m1, __m64 __m2)
272 return (__m64) __builtin_ia32_paddb ((__v8qi)__m1, (__v8qi)__m2);
275 static __inline __m64 __attribute__((__always_inline__, __artificial__))
276 _m_paddb (__m64 __m1, __m64 __m2)
278 return _mm_add_pi8 (__m1, __m2);
281 /* Add the 16-bit values in M1 to the 16-bit values in M2. */
282 static __inline __m64 __attribute__((__always_inline__, __artificial__))
283 _mm_add_pi16 (__m64 __m1, __m64 __m2)
285 return (__m64) __builtin_ia32_paddw ((__v4hi)__m1, (__v4hi)__m2);
288 static __inline __m64 __attribute__((__always_inline__, __artificial__))
289 _m_paddw (__m64 __m1, __m64 __m2)
291 return _mm_add_pi16 (__m1, __m2);
294 /* Add the 32-bit values in M1 to the 32-bit values in M2. */
295 static __inline __m64 __attribute__((__always_inline__, __artificial__))
296 _mm_add_pi32 (__m64 __m1, __m64 __m2)
298 return (__m64) __builtin_ia32_paddd ((__v2si)__m1, (__v2si)__m2);
301 static __inline __m64 __attribute__((__always_inline__, __artificial__))
302 _m_paddd (__m64 __m1, __m64 __m2)
304 return _mm_add_pi32 (__m1, __m2);
307 /* Add the 64-bit values in M1 to the 64-bit values in M2. */
308 #ifdef __SSE2__
309 static __inline __m64 __attribute__((__always_inline__, __artificial__))
310 _mm_add_si64 (__m64 __m1, __m64 __m2)
312 return (__m64) __builtin_ia32_paddq ((long long)__m1, (long long)__m2);
314 #endif
316 /* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
317 saturated arithmetic. */
318 static __inline __m64 __attribute__((__always_inline__, __artificial__))
319 _mm_adds_pi8 (__m64 __m1, __m64 __m2)
321 return (__m64) __builtin_ia32_paddsb ((__v8qi)__m1, (__v8qi)__m2);
324 static __inline __m64 __attribute__((__always_inline__, __artificial__))
325 _m_paddsb (__m64 __m1, __m64 __m2)
327 return _mm_adds_pi8 (__m1, __m2);
330 /* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
331 saturated arithmetic. */
332 static __inline __m64 __attribute__((__always_inline__, __artificial__))
333 _mm_adds_pi16 (__m64 __m1, __m64 __m2)
335 return (__m64) __builtin_ia32_paddsw ((__v4hi)__m1, (__v4hi)__m2);
338 static __inline __m64 __attribute__((__always_inline__, __artificial__))
339 _m_paddsw (__m64 __m1, __m64 __m2)
341 return _mm_adds_pi16 (__m1, __m2);
344 /* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
345 saturated arithmetic. */
346 static __inline __m64 __attribute__((__always_inline__, __artificial__))
347 _mm_adds_pu8 (__m64 __m1, __m64 __m2)
349 return (__m64) __builtin_ia32_paddusb ((__v8qi)__m1, (__v8qi)__m2);
352 static __inline __m64 __attribute__((__always_inline__, __artificial__))
353 _m_paddusb (__m64 __m1, __m64 __m2)
355 return _mm_adds_pu8 (__m1, __m2);
358 /* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
359 saturated arithmetic. */
360 static __inline __m64 __attribute__((__always_inline__, __artificial__))
361 _mm_adds_pu16 (__m64 __m1, __m64 __m2)
363 return (__m64) __builtin_ia32_paddusw ((__v4hi)__m1, (__v4hi)__m2);
366 static __inline __m64 __attribute__((__always_inline__, __artificial__))
367 _m_paddusw (__m64 __m1, __m64 __m2)
369 return _mm_adds_pu16 (__m1, __m2);
372 /* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
373 static __inline __m64 __attribute__((__always_inline__, __artificial__))
374 _mm_sub_pi8 (__m64 __m1, __m64 __m2)
376 return (__m64) __builtin_ia32_psubb ((__v8qi)__m1, (__v8qi)__m2);
379 static __inline __m64 __attribute__((__always_inline__, __artificial__))
380 _m_psubb (__m64 __m1, __m64 __m2)
382 return _mm_sub_pi8 (__m1, __m2);
385 /* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
386 static __inline __m64 __attribute__((__always_inline__, __artificial__))
387 _mm_sub_pi16 (__m64 __m1, __m64 __m2)
389 return (__m64) __builtin_ia32_psubw ((__v4hi)__m1, (__v4hi)__m2);
392 static __inline __m64 __attribute__((__always_inline__, __artificial__))
393 _m_psubw (__m64 __m1, __m64 __m2)
395 return _mm_sub_pi16 (__m1, __m2);
398 /* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
399 static __inline __m64 __attribute__((__always_inline__, __artificial__))
400 _mm_sub_pi32 (__m64 __m1, __m64 __m2)
402 return (__m64) __builtin_ia32_psubd ((__v2si)__m1, (__v2si)__m2);
405 static __inline __m64 __attribute__((__always_inline__, __artificial__))
406 _m_psubd (__m64 __m1, __m64 __m2)
408 return _mm_sub_pi32 (__m1, __m2);
411 /* Add the 64-bit values in M1 to the 64-bit values in M2. */
412 #ifdef __SSE2__
413 static __inline __m64 __attribute__((__always_inline__, __artificial__))
414 _mm_sub_si64 (__m64 __m1, __m64 __m2)
416 return (__m64) __builtin_ia32_psubq ((long long)__m1, (long long)__m2);
418 #endif
420 /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
421 saturating arithmetic. */
422 static __inline __m64 __attribute__((__always_inline__, __artificial__))
423 _mm_subs_pi8 (__m64 __m1, __m64 __m2)
425 return (__m64) __builtin_ia32_psubsb ((__v8qi)__m1, (__v8qi)__m2);
428 static __inline __m64 __attribute__((__always_inline__, __artificial__))
429 _m_psubsb (__m64 __m1, __m64 __m2)
431 return _mm_subs_pi8 (__m1, __m2);
434 /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
435 signed saturating arithmetic. */
436 static __inline __m64 __attribute__((__always_inline__, __artificial__))
437 _mm_subs_pi16 (__m64 __m1, __m64 __m2)
439 return (__m64) __builtin_ia32_psubsw ((__v4hi)__m1, (__v4hi)__m2);
442 static __inline __m64 __attribute__((__always_inline__, __artificial__))
443 _m_psubsw (__m64 __m1, __m64 __m2)
445 return _mm_subs_pi16 (__m1, __m2);
448 /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
449 unsigned saturating arithmetic. */
450 static __inline __m64 __attribute__((__always_inline__, __artificial__))
451 _mm_subs_pu8 (__m64 __m1, __m64 __m2)
453 return (__m64) __builtin_ia32_psubusb ((__v8qi)__m1, (__v8qi)__m2);
456 static __inline __m64 __attribute__((__always_inline__, __artificial__))
457 _m_psubusb (__m64 __m1, __m64 __m2)
459 return _mm_subs_pu8 (__m1, __m2);
462 /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
463 unsigned saturating arithmetic. */
464 static __inline __m64 __attribute__((__always_inline__, __artificial__))
465 _mm_subs_pu16 (__m64 __m1, __m64 __m2)
467 return (__m64) __builtin_ia32_psubusw ((__v4hi)__m1, (__v4hi)__m2);
470 static __inline __m64 __attribute__((__always_inline__, __artificial__))
471 _m_psubusw (__m64 __m1, __m64 __m2)
473 return _mm_subs_pu16 (__m1, __m2);
476 /* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
477 four 32-bit intermediate results, which are then summed by pairs to
478 produce two 32-bit results. */
479 static __inline __m64 __attribute__((__always_inline__, __artificial__))
480 _mm_madd_pi16 (__m64 __m1, __m64 __m2)
482 return (__m64) __builtin_ia32_pmaddwd ((__v4hi)__m1, (__v4hi)__m2);
485 static __inline __m64 __attribute__((__always_inline__, __artificial__))
486 _m_pmaddwd (__m64 __m1, __m64 __m2)
488 return _mm_madd_pi16 (__m1, __m2);
491 /* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
492 M2 and produce the high 16 bits of the 32-bit results. */
493 static __inline __m64 __attribute__((__always_inline__, __artificial__))
494 _mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
496 return (__m64) __builtin_ia32_pmulhw ((__v4hi)__m1, (__v4hi)__m2);
499 static __inline __m64 __attribute__((__always_inline__, __artificial__))
500 _m_pmulhw (__m64 __m1, __m64 __m2)
502 return _mm_mulhi_pi16 (__m1, __m2);
505 /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
506 the low 16 bits of the results. */
507 static __inline __m64 __attribute__((__always_inline__, __artificial__))
508 _mm_mullo_pi16 (__m64 __m1, __m64 __m2)
510 return (__m64) __builtin_ia32_pmullw ((__v4hi)__m1, (__v4hi)__m2);
513 static __inline __m64 __attribute__((__always_inline__, __artificial__))
514 _m_pmullw (__m64 __m1, __m64 __m2)
516 return _mm_mullo_pi16 (__m1, __m2);
519 /* Shift four 16-bit values in M left by COUNT. */
520 static __inline __m64 __attribute__((__always_inline__, __artificial__))
521 _mm_sll_pi16 (__m64 __m, __m64 __count)
523 return (__m64) __builtin_ia32_psllw ((__v4hi)__m, (long long)__count);
526 static __inline __m64 __attribute__((__always_inline__, __artificial__))
527 _m_psllw (__m64 __m, __m64 __count)
529 return _mm_sll_pi16 (__m, __count);
532 static __inline __m64 __attribute__((__always_inline__, __artificial__))
533 _mm_slli_pi16 (__m64 __m, int __count)
535 return (__m64) __builtin_ia32_psllw ((__v4hi)__m, __count);
538 static __inline __m64 __attribute__((__always_inline__, __artificial__))
539 _m_psllwi (__m64 __m, int __count)
541 return _mm_slli_pi16 (__m, __count);
544 /* Shift two 32-bit values in M left by COUNT. */
545 static __inline __m64 __attribute__((__always_inline__, __artificial__))
546 _mm_sll_pi32 (__m64 __m, __m64 __count)
548 return (__m64) __builtin_ia32_pslld ((__v2si)__m, (long long)__count);
551 static __inline __m64 __attribute__((__always_inline__, __artificial__))
552 _m_pslld (__m64 __m, __m64 __count)
554 return _mm_sll_pi32 (__m, __count);
557 static __inline __m64 __attribute__((__always_inline__, __artificial__))
558 _mm_slli_pi32 (__m64 __m, int __count)
560 return (__m64) __builtin_ia32_pslld ((__v2si)__m, __count);
563 static __inline __m64 __attribute__((__always_inline__, __artificial__))
564 _m_pslldi (__m64 __m, int __count)
566 return _mm_slli_pi32 (__m, __count);
569 /* Shift the 64-bit value in M left by COUNT. */
570 static __inline __m64 __attribute__((__always_inline__, __artificial__))
571 _mm_sll_si64 (__m64 __m, __m64 __count)
573 return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count);
576 static __inline __m64 __attribute__((__always_inline__, __artificial__))
577 _m_psllq (__m64 __m, __m64 __count)
579 return _mm_sll_si64 (__m, __count);
582 static __inline __m64 __attribute__((__always_inline__, __artificial__))
583 _mm_slli_si64 (__m64 __m, int __count)
585 return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count);
588 static __inline __m64 __attribute__((__always_inline__, __artificial__))
589 _m_psllqi (__m64 __m, int __count)
591 return _mm_slli_si64 (__m, __count);
594 /* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
595 static __inline __m64 __attribute__((__always_inline__, __artificial__))
596 _mm_sra_pi16 (__m64 __m, __m64 __count)
598 return (__m64) __builtin_ia32_psraw ((__v4hi)__m, (long long)__count);
601 static __inline __m64 __attribute__((__always_inline__, __artificial__))
602 _m_psraw (__m64 __m, __m64 __count)
604 return _mm_sra_pi16 (__m, __count);
607 static __inline __m64 __attribute__((__always_inline__, __artificial__))
608 _mm_srai_pi16 (__m64 __m, int __count)
610 return (__m64) __builtin_ia32_psraw ((__v4hi)__m, __count);
613 static __inline __m64 __attribute__((__always_inline__, __artificial__))
614 _m_psrawi (__m64 __m, int __count)
616 return _mm_srai_pi16 (__m, __count);
619 /* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
620 static __inline __m64 __attribute__((__always_inline__, __artificial__))
621 _mm_sra_pi32 (__m64 __m, __m64 __count)
623 return (__m64) __builtin_ia32_psrad ((__v2si)__m, (long long)__count);
626 static __inline __m64 __attribute__((__always_inline__, __artificial__))
627 _m_psrad (__m64 __m, __m64 __count)
629 return _mm_sra_pi32 (__m, __count);
632 static __inline __m64 __attribute__((__always_inline__, __artificial__))
633 _mm_srai_pi32 (__m64 __m, int __count)
635 return (__m64) __builtin_ia32_psrad ((__v2si)__m, __count);
638 static __inline __m64 __attribute__((__always_inline__, __artificial__))
639 _m_psradi (__m64 __m, int __count)
641 return _mm_srai_pi32 (__m, __count);
644 /* Shift four 16-bit values in M right by COUNT; shift in zeros. */
645 static __inline __m64 __attribute__((__always_inline__, __artificial__))
646 _mm_srl_pi16 (__m64 __m, __m64 __count)
648 return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, (long long)__count);
651 static __inline __m64 __attribute__((__always_inline__, __artificial__))
652 _m_psrlw (__m64 __m, __m64 __count)
654 return _mm_srl_pi16 (__m, __count);
657 static __inline __m64 __attribute__((__always_inline__, __artificial__))
658 _mm_srli_pi16 (__m64 __m, int __count)
660 return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, __count);
663 static __inline __m64 __attribute__((__always_inline__, __artificial__))
664 _m_psrlwi (__m64 __m, int __count)
666 return _mm_srli_pi16 (__m, __count);
669 /* Shift two 32-bit values in M right by COUNT; shift in zeros. */
670 static __inline __m64 __attribute__((__always_inline__, __artificial__))
671 _mm_srl_pi32 (__m64 __m, __m64 __count)
673 return (__m64) __builtin_ia32_psrld ((__v2si)__m, (long long)__count);
676 static __inline __m64 __attribute__((__always_inline__, __artificial__))
677 _m_psrld (__m64 __m, __m64 __count)
679 return _mm_srl_pi32 (__m, __count);
682 static __inline __m64 __attribute__((__always_inline__, __artificial__))
683 _mm_srli_pi32 (__m64 __m, int __count)
685 return (__m64) __builtin_ia32_psrld ((__v2si)__m, __count);
688 static __inline __m64 __attribute__((__always_inline__, __artificial__))
689 _m_psrldi (__m64 __m, int __count)
691 return _mm_srli_pi32 (__m, __count);
694 /* Shift the 64-bit value in M left by COUNT; shift in zeros. */
695 static __inline __m64 __attribute__((__always_inline__, __artificial__))
696 _mm_srl_si64 (__m64 __m, __m64 __count)
698 return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count);
701 static __inline __m64 __attribute__((__always_inline__, __artificial__))
702 _m_psrlq (__m64 __m, __m64 __count)
704 return _mm_srl_si64 (__m, __count);
707 static __inline __m64 __attribute__((__always_inline__, __artificial__))
708 _mm_srli_si64 (__m64 __m, int __count)
710 return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count);
713 static __inline __m64 __attribute__((__always_inline__, __artificial__))
714 _m_psrlqi (__m64 __m, int __count)
716 return _mm_srli_si64 (__m, __count);
719 /* Bit-wise AND the 64-bit values in M1 and M2. */
720 static __inline __m64 __attribute__((__always_inline__, __artificial__))
721 _mm_and_si64 (__m64 __m1, __m64 __m2)
723 return __builtin_ia32_pand (__m1, __m2);
726 static __inline __m64 __attribute__((__always_inline__, __artificial__))
727 _m_pand (__m64 __m1, __m64 __m2)
729 return _mm_and_si64 (__m1, __m2);
732 /* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
733 64-bit value in M2. */
734 static __inline __m64 __attribute__((__always_inline__, __artificial__))
735 _mm_andnot_si64 (__m64 __m1, __m64 __m2)
737 return __builtin_ia32_pandn (__m1, __m2);
740 static __inline __m64 __attribute__((__always_inline__, __artificial__))
741 _m_pandn (__m64 __m1, __m64 __m2)
743 return _mm_andnot_si64 (__m1, __m2);
746 /* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
747 static __inline __m64 __attribute__((__always_inline__, __artificial__))
748 _mm_or_si64 (__m64 __m1, __m64 __m2)
750 return __builtin_ia32_por (__m1, __m2);
753 static __inline __m64 __attribute__((__always_inline__, __artificial__))
754 _m_por (__m64 __m1, __m64 __m2)
756 return _mm_or_si64 (__m1, __m2);
759 /* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
760 static __inline __m64 __attribute__((__always_inline__, __artificial__))
761 _mm_xor_si64 (__m64 __m1, __m64 __m2)
763 return __builtin_ia32_pxor (__m1, __m2);
766 static __inline __m64 __attribute__((__always_inline__, __artificial__))
767 _m_pxor (__m64 __m1, __m64 __m2)
769 return _mm_xor_si64 (__m1, __m2);
772 /* Compare eight 8-bit values. The result of the comparison is 0xFF if the
773 test is true and zero if false. */
774 static __inline __m64 __attribute__((__always_inline__, __artificial__))
775 _mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
777 return (__m64) __builtin_ia32_pcmpeqb ((__v8qi)__m1, (__v8qi)__m2);
780 static __inline __m64 __attribute__((__always_inline__, __artificial__))
781 _m_pcmpeqb (__m64 __m1, __m64 __m2)
783 return _mm_cmpeq_pi8 (__m1, __m2);
786 static __inline __m64 __attribute__((__always_inline__, __artificial__))
787 _mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
789 return (__m64) __builtin_ia32_pcmpgtb ((__v8qi)__m1, (__v8qi)__m2);
792 static __inline __m64 __attribute__((__always_inline__, __artificial__))
793 _m_pcmpgtb (__m64 __m1, __m64 __m2)
795 return _mm_cmpgt_pi8 (__m1, __m2);
798 /* Compare four 16-bit values. The result of the comparison is 0xFFFF if
799 the test is true and zero if false. */
800 static __inline __m64 __attribute__((__always_inline__, __artificial__))
801 _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
803 return (__m64) __builtin_ia32_pcmpeqw ((__v4hi)__m1, (__v4hi)__m2);
806 static __inline __m64 __attribute__((__always_inline__, __artificial__))
807 _m_pcmpeqw (__m64 __m1, __m64 __m2)
809 return _mm_cmpeq_pi16 (__m1, __m2);
812 static __inline __m64 __attribute__((__always_inline__, __artificial__))
813 _mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
815 return (__m64) __builtin_ia32_pcmpgtw ((__v4hi)__m1, (__v4hi)__m2);
818 static __inline __m64 __attribute__((__always_inline__, __artificial__))
819 _m_pcmpgtw (__m64 __m1, __m64 __m2)
821 return _mm_cmpgt_pi16 (__m1, __m2);
824 /* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
825 the test is true and zero if false. */
826 static __inline __m64 __attribute__((__always_inline__, __artificial__))
827 _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
829 return (__m64) __builtin_ia32_pcmpeqd ((__v2si)__m1, (__v2si)__m2);
832 static __inline __m64 __attribute__((__always_inline__, __artificial__))
833 _m_pcmpeqd (__m64 __m1, __m64 __m2)
835 return _mm_cmpeq_pi32 (__m1, __m2);
838 static __inline __m64 __attribute__((__always_inline__, __artificial__))
839 _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
841 return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2);
844 static __inline __m64 __attribute__((__always_inline__, __artificial__))
845 _m_pcmpgtd (__m64 __m1, __m64 __m2)
847 return _mm_cmpgt_pi32 (__m1, __m2);
850 /* Creates a 64-bit zero. */
851 static __inline __m64 __attribute__((__always_inline__, __artificial__))
852 _mm_setzero_si64 (void)
854 return (__m64)0LL;
857 /* Creates a vector of two 32-bit values; I0 is least significant. */
858 static __inline __m64 __attribute__((__always_inline__, __artificial__))
859 _mm_set_pi32 (int __i1, int __i0)
861 return (__m64) __builtin_ia32_vec_init_v2si (__i0, __i1);
864 /* Creates a vector of four 16-bit values; W0 is least significant. */
865 static __inline __m64 __attribute__((__always_inline__, __artificial__))
866 _mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
868 return (__m64) __builtin_ia32_vec_init_v4hi (__w0, __w1, __w2, __w3);
871 /* Creates a vector of eight 8-bit values; B0 is least significant. */
872 static __inline __m64 __attribute__((__always_inline__, __artificial__))
873 _mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
874 char __b3, char __b2, char __b1, char __b0)
876 return (__m64) __builtin_ia32_vec_init_v8qi (__b0, __b1, __b2, __b3,
877 __b4, __b5, __b6, __b7);
880 /* Similar, but with the arguments in reverse order. */
881 static __inline __m64 __attribute__((__always_inline__, __artificial__))
882 _mm_setr_pi32 (int __i0, int __i1)
884 return _mm_set_pi32 (__i1, __i0);
887 static __inline __m64 __attribute__((__always_inline__, __artificial__))
888 _mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
890 return _mm_set_pi16 (__w3, __w2, __w1, __w0);
893 static __inline __m64 __attribute__((__always_inline__, __artificial__))
894 _mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
895 char __b4, char __b5, char __b6, char __b7)
897 return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
900 /* Creates a vector of two 32-bit values, both elements containing I. */
901 static __inline __m64 __attribute__((__always_inline__, __artificial__))
902 _mm_set1_pi32 (int __i)
904 return _mm_set_pi32 (__i, __i);
907 /* Creates a vector of four 16-bit values, all elements containing W. */
908 static __inline __m64 __attribute__((__always_inline__, __artificial__))
909 _mm_set1_pi16 (short __w)
911 return _mm_set_pi16 (__w, __w, __w, __w);
914 /* Creates a vector of eight 8-bit values, all elements containing B. */
915 static __inline __m64 __attribute__((__always_inline__, __artificial__))
916 _mm_set1_pi8 (char __b)
918 return _mm_set_pi8 (__b, __b, __b, __b, __b, __b, __b, __b);
921 #endif /* __MMX__ */
922 #endif /* _MMINTRIN_H_INCLUDED */