Dead
[official-gcc.git] / gomp-20050608-branch / gcc / config / i386 / mmintrin.h
blob1a74271f64831067ee3920f9326d2acc341d69d3
1 /* Copyright (C) 2002, 2003, 2004, 2005, 2006
2 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to
18 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
19 Boston, MA 02110-1301, USA. */
21 /* As a special exception, if you include this header file into source
22 files compiled by GCC, this header file does not by itself cause
23 the resulting executable to be covered by the GNU General Public
24 License. This exception does not however invalidate any other
25 reasons why the executable file might be covered by the GNU General
26 Public License. */
28 /* Implemented from the specification included in the Intel C++ Compiler
29 User Guide and Reference, version 9.0. */
31 #ifndef _MMINTRIN_H_INCLUDED
32 #define _MMINTRIN_H_INCLUDED
34 #ifndef __MMX__
35 # error "MMX instruction set not enabled"
36 #else
37 /* The data type intended for user use. */
38 typedef int __m64 __attribute__ ((__vector_size__ (8)));
40 /* Internal data types for implementing the intrinsics. */
41 typedef int __v2si __attribute__ ((__vector_size__ (8)));
42 typedef short __v4hi __attribute__ ((__vector_size__ (8)));
43 typedef char __v8qi __attribute__ ((__vector_size__ (8)));
45 /* Empty the multimedia state. */
46 static __inline void __attribute__((__always_inline__))
47 _mm_empty (void)
49 __builtin_ia32_emms ();
52 static __inline void __attribute__((__always_inline__))
53 _m_empty (void)
55 _mm_empty ();
58 /* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */
59 static __inline __m64 __attribute__((__always_inline__))
60 _mm_cvtsi32_si64 (int __i)
62 return (__m64) __builtin_ia32_vec_init_v2si (__i, 0);
65 static __inline __m64 __attribute__((__always_inline__))
66 _m_from_int (int __i)
68 return _mm_cvtsi32_si64 (__i);
71 #ifdef __x86_64__
72 /* Convert I to a __m64 object. */
74 /* Intel intrinsic. */
75 static __inline __m64 __attribute__((__always_inline__))
76 _m_from_int64 (long long __i)
78 return (__m64) __i;
81 static __inline __m64 __attribute__((__always_inline__))
82 _mm_cvtsi64_m64 (long long __i)
84 return (__m64) __i;
87 /* Microsoft intrinsic. */
88 static __inline __m64 __attribute__((__always_inline__))
89 _mm_cvtsi64x_si64 (long long __i)
91 return (__m64) __i;
94 static __inline __m64 __attribute__((__always_inline__))
95 _mm_set_pi64x (long long __i)
97 return (__m64) __i;
99 #endif
101 /* Convert the lower 32 bits of the __m64 object into an integer. */
102 static __inline int __attribute__((__always_inline__))
103 _mm_cvtsi64_si32 (__m64 __i)
105 return __builtin_ia32_vec_ext_v2si ((__v2si)__i, 0);
108 static __inline int __attribute__((__always_inline__))
109 _m_to_int (__m64 __i)
111 return _mm_cvtsi64_si32 (__i);
114 #ifdef __x86_64__
115 /* Convert the __m64 object to a 64bit integer. */
117 /* Intel intrinsic. */
118 static __inline long long __attribute__((__always_inline__))
119 _m_to_int64 (__m64 __i)
121 return (long long)__i;
124 static __inline long long __attribute__((__always_inline__))
125 _mm_cvtm64_si64 (__m64 __i)
127 return (long long)__i;
130 /* Microsoft intrinsic. */
131 static __inline long long __attribute__((__always_inline__))
132 _mm_cvtsi64_si64x (__m64 __i)
134 return (long long)__i;
136 #endif
138 /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
139 the result, and the four 16-bit values from M2 into the upper four 8-bit
140 values of the result, all with signed saturation. */
141 static __inline __m64 __attribute__((__always_inline__))
142 _mm_packs_pi16 (__m64 __m1, __m64 __m2)
144 return (__m64) __builtin_ia32_packsswb ((__v4hi)__m1, (__v4hi)__m2);
147 static __inline __m64 __attribute__((__always_inline__))
148 _m_packsswb (__m64 __m1, __m64 __m2)
150 return _mm_packs_pi16 (__m1, __m2);
153 /* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
154 the result, and the two 32-bit values from M2 into the upper two 16-bit
155 values of the result, all with signed saturation. */
156 static __inline __m64 __attribute__((__always_inline__))
157 _mm_packs_pi32 (__m64 __m1, __m64 __m2)
159 return (__m64) __builtin_ia32_packssdw ((__v2si)__m1, (__v2si)__m2);
162 static __inline __m64 __attribute__((__always_inline__))
163 _m_packssdw (__m64 __m1, __m64 __m2)
165 return _mm_packs_pi32 (__m1, __m2);
168 /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
169 the result, and the four 16-bit values from M2 into the upper four 8-bit
170 values of the result, all with unsigned saturation. */
171 static __inline __m64 __attribute__((__always_inline__))
172 _mm_packs_pu16 (__m64 __m1, __m64 __m2)
174 return (__m64) __builtin_ia32_packuswb ((__v4hi)__m1, (__v4hi)__m2);
177 static __inline __m64 __attribute__((__always_inline__))
178 _m_packuswb (__m64 __m1, __m64 __m2)
180 return _mm_packs_pu16 (__m1, __m2);
183 /* Interleave the four 8-bit values from the high half of M1 with the four
184 8-bit values from the high half of M2. */
185 static __inline __m64 __attribute__((__always_inline__))
186 _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
188 return (__m64) __builtin_ia32_punpckhbw ((__v8qi)__m1, (__v8qi)__m2);
191 static __inline __m64 __attribute__((__always_inline__))
192 _m_punpckhbw (__m64 __m1, __m64 __m2)
194 return _mm_unpackhi_pi8 (__m1, __m2);
197 /* Interleave the two 16-bit values from the high half of M1 with the two
198 16-bit values from the high half of M2. */
199 static __inline __m64 __attribute__((__always_inline__))
200 _mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
202 return (__m64) __builtin_ia32_punpckhwd ((__v4hi)__m1, (__v4hi)__m2);
205 static __inline __m64 __attribute__((__always_inline__))
206 _m_punpckhwd (__m64 __m1, __m64 __m2)
208 return _mm_unpackhi_pi16 (__m1, __m2);
211 /* Interleave the 32-bit value from the high half of M1 with the 32-bit
212 value from the high half of M2. */
213 static __inline __m64 __attribute__((__always_inline__))
214 _mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
216 return (__m64) __builtin_ia32_punpckhdq ((__v2si)__m1, (__v2si)__m2);
219 static __inline __m64 __attribute__((__always_inline__))
220 _m_punpckhdq (__m64 __m1, __m64 __m2)
222 return _mm_unpackhi_pi32 (__m1, __m2);
225 /* Interleave the four 8-bit values from the low half of M1 with the four
226 8-bit values from the low half of M2. */
227 static __inline __m64 __attribute__((__always_inline__))
228 _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
230 return (__m64) __builtin_ia32_punpcklbw ((__v8qi)__m1, (__v8qi)__m2);
233 static __inline __m64 __attribute__((__always_inline__))
234 _m_punpcklbw (__m64 __m1, __m64 __m2)
236 return _mm_unpacklo_pi8 (__m1, __m2);
239 /* Interleave the two 16-bit values from the low half of M1 with the two
240 16-bit values from the low half of M2. */
241 static __inline __m64 __attribute__((__always_inline__))
242 _mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
244 return (__m64) __builtin_ia32_punpcklwd ((__v4hi)__m1, (__v4hi)__m2);
247 static __inline __m64 __attribute__((__always_inline__))
248 _m_punpcklwd (__m64 __m1, __m64 __m2)
250 return _mm_unpacklo_pi16 (__m1, __m2);
253 /* Interleave the 32-bit value from the low half of M1 with the 32-bit
254 value from the low half of M2. */
255 static __inline __m64 __attribute__((__always_inline__))
256 _mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
258 return (__m64) __builtin_ia32_punpckldq ((__v2si)__m1, (__v2si)__m2);
261 static __inline __m64 __attribute__((__always_inline__))
262 _m_punpckldq (__m64 __m1, __m64 __m2)
264 return _mm_unpacklo_pi32 (__m1, __m2);
267 /* Add the 8-bit values in M1 to the 8-bit values in M2. */
268 static __inline __m64 __attribute__((__always_inline__))
269 _mm_add_pi8 (__m64 __m1, __m64 __m2)
271 return (__m64) __builtin_ia32_paddb ((__v8qi)__m1, (__v8qi)__m2);
274 static __inline __m64 __attribute__((__always_inline__))
275 _m_paddb (__m64 __m1, __m64 __m2)
277 return _mm_add_pi8 (__m1, __m2);
280 /* Add the 16-bit values in M1 to the 16-bit values in M2. */
281 static __inline __m64 __attribute__((__always_inline__))
282 _mm_add_pi16 (__m64 __m1, __m64 __m2)
284 return (__m64) __builtin_ia32_paddw ((__v4hi)__m1, (__v4hi)__m2);
287 static __inline __m64 __attribute__((__always_inline__))
288 _m_paddw (__m64 __m1, __m64 __m2)
290 return _mm_add_pi16 (__m1, __m2);
293 /* Add the 32-bit values in M1 to the 32-bit values in M2. */
294 static __inline __m64 __attribute__((__always_inline__))
295 _mm_add_pi32 (__m64 __m1, __m64 __m2)
297 return (__m64) __builtin_ia32_paddd ((__v2si)__m1, (__v2si)__m2);
300 static __inline __m64 __attribute__((__always_inline__))
301 _m_paddd (__m64 __m1, __m64 __m2)
303 return _mm_add_pi32 (__m1, __m2);
306 /* Add the 64-bit values in M1 to the 64-bit values in M2. */
307 #ifdef __SSE2__
308 static __inline __m64 __attribute__((__always_inline__))
309 _mm_add_si64 (__m64 __m1, __m64 __m2)
311 return (__m64) __builtin_ia32_paddq ((long long)__m1, (long long)__m2);
313 #endif
315 /* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
316 saturated arithmetic. */
317 static __inline __m64 __attribute__((__always_inline__))
318 _mm_adds_pi8 (__m64 __m1, __m64 __m2)
320 return (__m64) __builtin_ia32_paddsb ((__v8qi)__m1, (__v8qi)__m2);
323 static __inline __m64 __attribute__((__always_inline__))
324 _m_paddsb (__m64 __m1, __m64 __m2)
326 return _mm_adds_pi8 (__m1, __m2);
329 /* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
330 saturated arithmetic. */
331 static __inline __m64 __attribute__((__always_inline__))
332 _mm_adds_pi16 (__m64 __m1, __m64 __m2)
334 return (__m64) __builtin_ia32_paddsw ((__v4hi)__m1, (__v4hi)__m2);
337 static __inline __m64 __attribute__((__always_inline__))
338 _m_paddsw (__m64 __m1, __m64 __m2)
340 return _mm_adds_pi16 (__m1, __m2);
343 /* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
344 saturated arithmetic. */
345 static __inline __m64 __attribute__((__always_inline__))
346 _mm_adds_pu8 (__m64 __m1, __m64 __m2)
348 return (__m64) __builtin_ia32_paddusb ((__v8qi)__m1, (__v8qi)__m2);
351 static __inline __m64 __attribute__((__always_inline__))
352 _m_paddusb (__m64 __m1, __m64 __m2)
354 return _mm_adds_pu8 (__m1, __m2);
357 /* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
358 saturated arithmetic. */
359 static __inline __m64 __attribute__((__always_inline__))
360 _mm_adds_pu16 (__m64 __m1, __m64 __m2)
362 return (__m64) __builtin_ia32_paddusw ((__v4hi)__m1, (__v4hi)__m2);
365 static __inline __m64 __attribute__((__always_inline__))
366 _m_paddusw (__m64 __m1, __m64 __m2)
368 return _mm_adds_pu16 (__m1, __m2);
371 /* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
372 static __inline __m64 __attribute__((__always_inline__))
373 _mm_sub_pi8 (__m64 __m1, __m64 __m2)
375 return (__m64) __builtin_ia32_psubb ((__v8qi)__m1, (__v8qi)__m2);
378 static __inline __m64 __attribute__((__always_inline__))
379 _m_psubb (__m64 __m1, __m64 __m2)
381 return _mm_sub_pi8 (__m1, __m2);
384 /* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
385 static __inline __m64 __attribute__((__always_inline__))
386 _mm_sub_pi16 (__m64 __m1, __m64 __m2)
388 return (__m64) __builtin_ia32_psubw ((__v4hi)__m1, (__v4hi)__m2);
391 static __inline __m64 __attribute__((__always_inline__))
392 _m_psubw (__m64 __m1, __m64 __m2)
394 return _mm_sub_pi16 (__m1, __m2);
397 /* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
398 static __inline __m64 __attribute__((__always_inline__))
399 _mm_sub_pi32 (__m64 __m1, __m64 __m2)
401 return (__m64) __builtin_ia32_psubd ((__v2si)__m1, (__v2si)__m2);
404 static __inline __m64 __attribute__((__always_inline__))
405 _m_psubd (__m64 __m1, __m64 __m2)
407 return _mm_sub_pi32 (__m1, __m2);
410 /* Add the 64-bit values in M1 to the 64-bit values in M2. */
411 #ifdef __SSE2__
412 static __inline __m64 __attribute__((__always_inline__))
413 _mm_sub_si64 (__m64 __m1, __m64 __m2)
415 return (__m64) __builtin_ia32_psubq ((long long)__m1, (long long)__m2);
417 #endif
419 /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
420 saturating arithmetic. */
421 static __inline __m64 __attribute__((__always_inline__))
422 _mm_subs_pi8 (__m64 __m1, __m64 __m2)
424 return (__m64) __builtin_ia32_psubsb ((__v8qi)__m1, (__v8qi)__m2);
427 static __inline __m64 __attribute__((__always_inline__))
428 _m_psubsb (__m64 __m1, __m64 __m2)
430 return _mm_subs_pi8 (__m1, __m2);
433 /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
434 signed saturating arithmetic. */
435 static __inline __m64 __attribute__((__always_inline__))
436 _mm_subs_pi16 (__m64 __m1, __m64 __m2)
438 return (__m64) __builtin_ia32_psubsw ((__v4hi)__m1, (__v4hi)__m2);
441 static __inline __m64 __attribute__((__always_inline__))
442 _m_psubsw (__m64 __m1, __m64 __m2)
444 return _mm_subs_pi16 (__m1, __m2);
447 /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
448 unsigned saturating arithmetic. */
449 static __inline __m64 __attribute__((__always_inline__))
450 _mm_subs_pu8 (__m64 __m1, __m64 __m2)
452 return (__m64) __builtin_ia32_psubusb ((__v8qi)__m1, (__v8qi)__m2);
455 static __inline __m64 __attribute__((__always_inline__))
456 _m_psubusb (__m64 __m1, __m64 __m2)
458 return _mm_subs_pu8 (__m1, __m2);
461 /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
462 unsigned saturating arithmetic. */
463 static __inline __m64 __attribute__((__always_inline__))
464 _mm_subs_pu16 (__m64 __m1, __m64 __m2)
466 return (__m64) __builtin_ia32_psubusw ((__v4hi)__m1, (__v4hi)__m2);
469 static __inline __m64 __attribute__((__always_inline__))
470 _m_psubusw (__m64 __m1, __m64 __m2)
472 return _mm_subs_pu16 (__m1, __m2);
475 /* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
476 four 32-bit intermediate results, which are then summed by pairs to
477 produce two 32-bit results. */
478 static __inline __m64 __attribute__((__always_inline__))
479 _mm_madd_pi16 (__m64 __m1, __m64 __m2)
481 return (__m64) __builtin_ia32_pmaddwd ((__v4hi)__m1, (__v4hi)__m2);
484 static __inline __m64 __attribute__((__always_inline__))
485 _m_pmaddwd (__m64 __m1, __m64 __m2)
487 return _mm_madd_pi16 (__m1, __m2);
490 /* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
491 M2 and produce the high 16 bits of the 32-bit results. */
492 static __inline __m64 __attribute__((__always_inline__))
493 _mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
495 return (__m64) __builtin_ia32_pmulhw ((__v4hi)__m1, (__v4hi)__m2);
498 static __inline __m64 __attribute__((__always_inline__))
499 _m_pmulhw (__m64 __m1, __m64 __m2)
501 return _mm_mulhi_pi16 (__m1, __m2);
504 /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
505 the low 16 bits of the results. */
506 static __inline __m64 __attribute__((__always_inline__))
507 _mm_mullo_pi16 (__m64 __m1, __m64 __m2)
509 return (__m64) __builtin_ia32_pmullw ((__v4hi)__m1, (__v4hi)__m2);
512 static __inline __m64 __attribute__((__always_inline__))
513 _m_pmullw (__m64 __m1, __m64 __m2)
515 return _mm_mullo_pi16 (__m1, __m2);
518 /* Shift four 16-bit values in M left by COUNT. */
519 static __inline __m64 __attribute__((__always_inline__))
520 _mm_sll_pi16 (__m64 __m, __m64 __count)
522 return (__m64) __builtin_ia32_psllw ((__v4hi)__m, (long long)__count);
525 static __inline __m64 __attribute__((__always_inline__))
526 _m_psllw (__m64 __m, __m64 __count)
528 return _mm_sll_pi16 (__m, __count);
531 static __inline __m64 __attribute__((__always_inline__))
532 _mm_slli_pi16 (__m64 __m, int __count)
534 return (__m64) __builtin_ia32_psllw ((__v4hi)__m, __count);
537 static __inline __m64 __attribute__((__always_inline__))
538 _m_psllwi (__m64 __m, int __count)
540 return _mm_slli_pi16 (__m, __count);
543 /* Shift two 32-bit values in M left by COUNT. */
544 static __inline __m64 __attribute__((__always_inline__))
545 _mm_sll_pi32 (__m64 __m, __m64 __count)
547 return (__m64) __builtin_ia32_pslld ((__v2si)__m, (long long)__count);
550 static __inline __m64 __attribute__((__always_inline__))
551 _m_pslld (__m64 __m, __m64 __count)
553 return _mm_sll_pi32 (__m, __count);
556 static __inline __m64 __attribute__((__always_inline__))
557 _mm_slli_pi32 (__m64 __m, int __count)
559 return (__m64) __builtin_ia32_pslld ((__v2si)__m, __count);
562 static __inline __m64 __attribute__((__always_inline__))
563 _m_pslldi (__m64 __m, int __count)
565 return _mm_slli_pi32 (__m, __count);
568 /* Shift the 64-bit value in M left by COUNT. */
569 static __inline __m64 __attribute__((__always_inline__))
570 _mm_sll_si64 (__m64 __m, __m64 __count)
572 return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count);
575 static __inline __m64 __attribute__((__always_inline__))
576 _m_psllq (__m64 __m, __m64 __count)
578 return _mm_sll_si64 (__m, __count);
581 static __inline __m64 __attribute__((__always_inline__))
582 _mm_slli_si64 (__m64 __m, int __count)
584 return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count);
587 static __inline __m64 __attribute__((__always_inline__))
588 _m_psllqi (__m64 __m, int __count)
590 return _mm_slli_si64 (__m, __count);
593 /* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
594 static __inline __m64 __attribute__((__always_inline__))
595 _mm_sra_pi16 (__m64 __m, __m64 __count)
597 return (__m64) __builtin_ia32_psraw ((__v4hi)__m, (long long)__count);
600 static __inline __m64 __attribute__((__always_inline__))
601 _m_psraw (__m64 __m, __m64 __count)
603 return _mm_sra_pi16 (__m, __count);
606 static __inline __m64 __attribute__((__always_inline__))
607 _mm_srai_pi16 (__m64 __m, int __count)
609 return (__m64) __builtin_ia32_psraw ((__v4hi)__m, __count);
612 static __inline __m64 __attribute__((__always_inline__))
613 _m_psrawi (__m64 __m, int __count)
615 return _mm_srai_pi16 (__m, __count);
618 /* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
619 static __inline __m64 __attribute__((__always_inline__))
620 _mm_sra_pi32 (__m64 __m, __m64 __count)
622 return (__m64) __builtin_ia32_psrad ((__v2si)__m, (long long)__count);
625 static __inline __m64 __attribute__((__always_inline__))
626 _m_psrad (__m64 __m, __m64 __count)
628 return _mm_sra_pi32 (__m, __count);
631 static __inline __m64 __attribute__((__always_inline__))
632 _mm_srai_pi32 (__m64 __m, int __count)
634 return (__m64) __builtin_ia32_psrad ((__v2si)__m, __count);
637 static __inline __m64 __attribute__((__always_inline__))
638 _m_psradi (__m64 __m, int __count)
640 return _mm_srai_pi32 (__m, __count);
643 /* Shift four 16-bit values in M right by COUNT; shift in zeros. */
644 static __inline __m64 __attribute__((__always_inline__))
645 _mm_srl_pi16 (__m64 __m, __m64 __count)
647 return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, (long long)__count);
650 static __inline __m64 __attribute__((__always_inline__))
651 _m_psrlw (__m64 __m, __m64 __count)
653 return _mm_srl_pi16 (__m, __count);
656 static __inline __m64 __attribute__((__always_inline__))
657 _mm_srli_pi16 (__m64 __m, int __count)
659 return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, __count);
662 static __inline __m64 __attribute__((__always_inline__))
663 _m_psrlwi (__m64 __m, int __count)
665 return _mm_srli_pi16 (__m, __count);
668 /* Shift two 32-bit values in M right by COUNT; shift in zeros. */
669 static __inline __m64 __attribute__((__always_inline__))
670 _mm_srl_pi32 (__m64 __m, __m64 __count)
672 return (__m64) __builtin_ia32_psrld ((__v2si)__m, (long long)__count);
675 static __inline __m64 __attribute__((__always_inline__))
676 _m_psrld (__m64 __m, __m64 __count)
678 return _mm_srl_pi32 (__m, __count);
681 static __inline __m64 __attribute__((__always_inline__))
682 _mm_srli_pi32 (__m64 __m, int __count)
684 return (__m64) __builtin_ia32_psrld ((__v2si)__m, __count);
687 static __inline __m64 __attribute__((__always_inline__))
688 _m_psrldi (__m64 __m, int __count)
690 return _mm_srli_pi32 (__m, __count);
693 /* Shift the 64-bit value in M left by COUNT; shift in zeros. */
694 static __inline __m64 __attribute__((__always_inline__))
695 _mm_srl_si64 (__m64 __m, __m64 __count)
697 return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count);
700 static __inline __m64 __attribute__((__always_inline__))
701 _m_psrlq (__m64 __m, __m64 __count)
703 return _mm_srl_si64 (__m, __count);
706 static __inline __m64 __attribute__((__always_inline__))
707 _mm_srli_si64 (__m64 __m, int __count)
709 return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count);
712 static __inline __m64 __attribute__((__always_inline__))
713 _m_psrlqi (__m64 __m, int __count)
715 return _mm_srli_si64 (__m, __count);
718 /* Bit-wise AND the 64-bit values in M1 and M2. */
719 static __inline __m64 __attribute__((__always_inline__))
720 _mm_and_si64 (__m64 __m1, __m64 __m2)
722 return __builtin_ia32_pand (__m1, __m2);
725 static __inline __m64 __attribute__((__always_inline__))
726 _m_pand (__m64 __m1, __m64 __m2)
728 return _mm_and_si64 (__m1, __m2);
731 /* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
732 64-bit value in M2. */
733 static __inline __m64 __attribute__((__always_inline__))
734 _mm_andnot_si64 (__m64 __m1, __m64 __m2)
736 return __builtin_ia32_pandn (__m1, __m2);
739 static __inline __m64 __attribute__((__always_inline__))
740 _m_pandn (__m64 __m1, __m64 __m2)
742 return _mm_andnot_si64 (__m1, __m2);
745 /* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
746 static __inline __m64 __attribute__((__always_inline__))
747 _mm_or_si64 (__m64 __m1, __m64 __m2)
749 return __builtin_ia32_por (__m1, __m2);
752 static __inline __m64 __attribute__((__always_inline__))
753 _m_por (__m64 __m1, __m64 __m2)
755 return _mm_or_si64 (__m1, __m2);
758 /* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
759 static __inline __m64 __attribute__((__always_inline__))
760 _mm_xor_si64 (__m64 __m1, __m64 __m2)
762 return __builtin_ia32_pxor (__m1, __m2);
765 static __inline __m64 __attribute__((__always_inline__))
766 _m_pxor (__m64 __m1, __m64 __m2)
768 return _mm_xor_si64 (__m1, __m2);
771 /* Compare eight 8-bit values. The result of the comparison is 0xFF if the
772 test is true and zero if false. */
773 static __inline __m64 __attribute__((__always_inline__))
774 _mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
776 return (__m64) __builtin_ia32_pcmpeqb ((__v8qi)__m1, (__v8qi)__m2);
779 static __inline __m64 __attribute__((__always_inline__))
780 _m_pcmpeqb (__m64 __m1, __m64 __m2)
782 return _mm_cmpeq_pi8 (__m1, __m2);
785 static __inline __m64 __attribute__((__always_inline__))
786 _mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
788 return (__m64) __builtin_ia32_pcmpgtb ((__v8qi)__m1, (__v8qi)__m2);
791 static __inline __m64 __attribute__((__always_inline__))
792 _m_pcmpgtb (__m64 __m1, __m64 __m2)
794 return _mm_cmpgt_pi8 (__m1, __m2);
797 /* Compare four 16-bit values. The result of the comparison is 0xFFFF if
798 the test is true and zero if false. */
799 static __inline __m64 __attribute__((__always_inline__))
800 _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
802 return (__m64) __builtin_ia32_pcmpeqw ((__v4hi)__m1, (__v4hi)__m2);
805 static __inline __m64 __attribute__((__always_inline__))
806 _m_pcmpeqw (__m64 __m1, __m64 __m2)
808 return _mm_cmpeq_pi16 (__m1, __m2);
811 static __inline __m64 __attribute__((__always_inline__))
812 _mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
814 return (__m64) __builtin_ia32_pcmpgtw ((__v4hi)__m1, (__v4hi)__m2);
817 static __inline __m64 __attribute__((__always_inline__))
818 _m_pcmpgtw (__m64 __m1, __m64 __m2)
820 return _mm_cmpgt_pi16 (__m1, __m2);
823 /* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
824 the test is true and zero if false. */
825 static __inline __m64 __attribute__((__always_inline__))
826 _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
828 return (__m64) __builtin_ia32_pcmpeqd ((__v2si)__m1, (__v2si)__m2);
831 static __inline __m64 __attribute__((__always_inline__))
832 _m_pcmpeqd (__m64 __m1, __m64 __m2)
834 return _mm_cmpeq_pi32 (__m1, __m2);
837 static __inline __m64 __attribute__((__always_inline__))
838 _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
840 return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2);
843 static __inline __m64 __attribute__((__always_inline__))
844 _m_pcmpgtd (__m64 __m1, __m64 __m2)
846 return _mm_cmpgt_pi32 (__m1, __m2);
849 /* Creates a 64-bit zero. */
850 static __inline __m64 __attribute__((__always_inline__))
851 _mm_setzero_si64 (void)
853 return (__m64)0LL;
856 /* Creates a vector of two 32-bit values; I0 is least significant. */
857 static __inline __m64 __attribute__((__always_inline__))
858 _mm_set_pi32 (int __i1, int __i0)
860 return (__m64) __builtin_ia32_vec_init_v2si (__i0, __i1);
863 /* Creates a vector of four 16-bit values; W0 is least significant. */
864 static __inline __m64 __attribute__((__always_inline__))
865 _mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
867 return (__m64) __builtin_ia32_vec_init_v4hi (__w0, __w1, __w2, __w3);
870 /* Creates a vector of eight 8-bit values; B0 is least significant. */
871 static __inline __m64 __attribute__((__always_inline__))
872 _mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
873 char __b3, char __b2, char __b1, char __b0)
875 return (__m64) __builtin_ia32_vec_init_v8qi (__b0, __b1, __b2, __b3,
876 __b4, __b5, __b6, __b7);
879 /* Similar, but with the arguments in reverse order. */
880 static __inline __m64 __attribute__((__always_inline__))
881 _mm_setr_pi32 (int __i0, int __i1)
883 return _mm_set_pi32 (__i1, __i0);
886 static __inline __m64 __attribute__((__always_inline__))
887 _mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
889 return _mm_set_pi16 (__w3, __w2, __w1, __w0);
892 static __inline __m64 __attribute__((__always_inline__))
893 _mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
894 char __b4, char __b5, char __b6, char __b7)
896 return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
899 /* Creates a vector of two 32-bit values, both elements containing I. */
900 static __inline __m64 __attribute__((__always_inline__))
901 _mm_set1_pi32 (int __i)
903 return _mm_set_pi32 (__i, __i);
906 /* Creates a vector of four 16-bit values, all elements containing W. */
907 static __inline __m64 __attribute__((__always_inline__))
908 _mm_set1_pi16 (short __w)
910 return _mm_set_pi16 (__w, __w, __w, __w);
913 /* Creates a vector of eight 8-bit values, all elements containing B. */
914 static __inline __m64 __attribute__((__always_inline__))
915 _mm_set1_pi8 (char __b)
917 return _mm_set_pi8 (__b, __b, __b, __b, __b, __b, __b, __b);
920 #endif /* __MMX__ */
921 #endif /* _MMINTRIN_H_INCLUDED */