1 /*===---- avxintrin.h - AVX intrinsics -------------------------------------===
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to deal
5 * in the Software without restriction, including without limitation the rights
6 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 * copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 *===-----------------------------------------------------------------------===
25 #error "Never use <avxintrin.h> directly; include <immintrin.h> instead."
31 typedef double __v4df
__attribute__ ((__vector_size__ (32)));
32 typedef float __v8sf
__attribute__ ((__vector_size__ (32)));
33 typedef long long __v4di
__attribute__ ((__vector_size__ (32)));
34 typedef int __v8si
__attribute__ ((__vector_size__ (32)));
35 typedef short __v16hi
__attribute__ ((__vector_size__ (32)));
36 typedef char __v32qi
__attribute__ ((__vector_size__ (32)));
38 /* We need an explicitly signed variant for char. Note that this shouldn't
39 * appear in the interface though. */
40 typedef signed char __v32qs
__attribute__((__vector_size__(32)));
42 typedef float __m256
__attribute__ ((__vector_size__ (32)));
43 typedef double __m256d
__attribute__((__vector_size__(32)));
44 typedef long long __m256i
__attribute__((__vector_size__(32)));
46 /* Define the default attributes for the functions in this file. */
47 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx")))
50 static __inline __m256d __DEFAULT_FN_ATTRS
51 _mm256_add_pd(__m256d __a
, __m256d __b
)
56 static __inline __m256 __DEFAULT_FN_ATTRS
57 _mm256_add_ps(__m256 __a
, __m256 __b
)
62 static __inline __m256d __DEFAULT_FN_ATTRS
63 _mm256_sub_pd(__m256d __a
, __m256d __b
)
68 static __inline __m256 __DEFAULT_FN_ATTRS
69 _mm256_sub_ps(__m256 __a
, __m256 __b
)
74 static __inline __m256d __DEFAULT_FN_ATTRS
75 _mm256_addsub_pd(__m256d __a
, __m256d __b
)
77 return (__m256d
)__builtin_ia32_addsubpd256((__v4df
)__a
, (__v4df
)__b
);
80 static __inline __m256 __DEFAULT_FN_ATTRS
81 _mm256_addsub_ps(__m256 __a
, __m256 __b
)
83 return (__m256
)__builtin_ia32_addsubps256((__v8sf
)__a
, (__v8sf
)__b
);
86 static __inline __m256d __DEFAULT_FN_ATTRS
87 _mm256_div_pd(__m256d __a
, __m256d __b
)
92 static __inline __m256 __DEFAULT_FN_ATTRS
93 _mm256_div_ps(__m256 __a
, __m256 __b
)
98 static __inline __m256d __DEFAULT_FN_ATTRS
99 _mm256_max_pd(__m256d __a
, __m256d __b
)
101 return (__m256d
)__builtin_ia32_maxpd256((__v4df
)__a
, (__v4df
)__b
);
104 static __inline __m256 __DEFAULT_FN_ATTRS
105 _mm256_max_ps(__m256 __a
, __m256 __b
)
107 return (__m256
)__builtin_ia32_maxps256((__v8sf
)__a
, (__v8sf
)__b
);
110 static __inline __m256d __DEFAULT_FN_ATTRS
111 _mm256_min_pd(__m256d __a
, __m256d __b
)
113 return (__m256d
)__builtin_ia32_minpd256((__v4df
)__a
, (__v4df
)__b
);
116 static __inline __m256 __DEFAULT_FN_ATTRS
117 _mm256_min_ps(__m256 __a
, __m256 __b
)
119 return (__m256
)__builtin_ia32_minps256((__v8sf
)__a
, (__v8sf
)__b
);
122 static __inline __m256d __DEFAULT_FN_ATTRS
123 _mm256_mul_pd(__m256d __a
, __m256d __b
)
128 static __inline __m256 __DEFAULT_FN_ATTRS
129 _mm256_mul_ps(__m256 __a
, __m256 __b
)
134 static __inline __m256d __DEFAULT_FN_ATTRS
135 _mm256_sqrt_pd(__m256d __a
)
137 return (__m256d
)__builtin_ia32_sqrtpd256((__v4df
)__a
);
140 static __inline __m256 __DEFAULT_FN_ATTRS
141 _mm256_sqrt_ps(__m256 __a
)
143 return (__m256
)__builtin_ia32_sqrtps256((__v8sf
)__a
);
146 static __inline __m256 __DEFAULT_FN_ATTRS
147 _mm256_rsqrt_ps(__m256 __a
)
149 return (__m256
)__builtin_ia32_rsqrtps256((__v8sf
)__a
);
152 static __inline __m256 __DEFAULT_FN_ATTRS
153 _mm256_rcp_ps(__m256 __a
)
155 return (__m256
)__builtin_ia32_rcpps256((__v8sf
)__a
);
158 #define _mm256_round_pd(V, M) __extension__ ({ \
159 (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)); })
161 #define _mm256_round_ps(V, M) __extension__ ({ \
162 (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)); })
164 #define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)
165 #define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
166 #define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL)
167 #define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
170 static __inline __m256d __DEFAULT_FN_ATTRS
171 _mm256_and_pd(__m256d __a
, __m256d __b
)
173 return (__m256d
)((__v4di
)__a
& (__v4di
)__b
);
176 static __inline __m256 __DEFAULT_FN_ATTRS
177 _mm256_and_ps(__m256 __a
, __m256 __b
)
179 return (__m256
)((__v8si
)__a
& (__v8si
)__b
);
182 static __inline __m256d __DEFAULT_FN_ATTRS
183 _mm256_andnot_pd(__m256d __a
, __m256d __b
)
185 return (__m256d
)(~(__v4di
)__a
& (__v4di
)__b
);
188 static __inline __m256 __DEFAULT_FN_ATTRS
189 _mm256_andnot_ps(__m256 __a
, __m256 __b
)
191 return (__m256
)(~(__v8si
)__a
& (__v8si
)__b
);
194 static __inline __m256d __DEFAULT_FN_ATTRS
195 _mm256_or_pd(__m256d __a
, __m256d __b
)
197 return (__m256d
)((__v4di
)__a
| (__v4di
)__b
);
200 static __inline __m256 __DEFAULT_FN_ATTRS
201 _mm256_or_ps(__m256 __a
, __m256 __b
)
203 return (__m256
)((__v8si
)__a
| (__v8si
)__b
);
206 static __inline __m256d __DEFAULT_FN_ATTRS
207 _mm256_xor_pd(__m256d __a
, __m256d __b
)
209 return (__m256d
)((__v4di
)__a
^ (__v4di
)__b
);
212 static __inline __m256 __DEFAULT_FN_ATTRS
213 _mm256_xor_ps(__m256 __a
, __m256 __b
)
215 return (__m256
)((__v8si
)__a
^ (__v8si
)__b
);
218 /* Horizontal arithmetic */
219 static __inline __m256d __DEFAULT_FN_ATTRS
220 _mm256_hadd_pd(__m256d __a
, __m256d __b
)
222 return (__m256d
)__builtin_ia32_haddpd256((__v4df
)__a
, (__v4df
)__b
);
225 static __inline __m256 __DEFAULT_FN_ATTRS
226 _mm256_hadd_ps(__m256 __a
, __m256 __b
)
228 return (__m256
)__builtin_ia32_haddps256((__v8sf
)__a
, (__v8sf
)__b
);
231 static __inline __m256d __DEFAULT_FN_ATTRS
232 _mm256_hsub_pd(__m256d __a
, __m256d __b
)
234 return (__m256d
)__builtin_ia32_hsubpd256((__v4df
)__a
, (__v4df
)__b
);
237 static __inline __m256 __DEFAULT_FN_ATTRS
238 _mm256_hsub_ps(__m256 __a
, __m256 __b
)
240 return (__m256
)__builtin_ia32_hsubps256((__v8sf
)__a
, (__v8sf
)__b
);
243 /* Vector permutations */
244 static __inline __m128d __DEFAULT_FN_ATTRS
245 _mm_permutevar_pd(__m128d __a
, __m128i __c
)
247 return (__m128d
)__builtin_ia32_vpermilvarpd((__v2df
)__a
, (__v2di
)__c
);
250 static __inline __m256d __DEFAULT_FN_ATTRS
251 _mm256_permutevar_pd(__m256d __a
, __m256i __c
)
253 return (__m256d
)__builtin_ia32_vpermilvarpd256((__v4df
)__a
, (__v4di
)__c
);
256 static __inline __m128 __DEFAULT_FN_ATTRS
257 _mm_permutevar_ps(__m128 __a
, __m128i __c
)
259 return (__m128
)__builtin_ia32_vpermilvarps((__v4sf
)__a
, (__v4si
)__c
);
262 static __inline __m256 __DEFAULT_FN_ATTRS
263 _mm256_permutevar_ps(__m256 __a
, __m256i __c
)
265 return (__m256
)__builtin_ia32_vpermilvarps256((__v8sf
)__a
, (__v8si
)__c
);
268 #define _mm_permute_pd(A, C) __extension__ ({ \
269 (__m128d)__builtin_shufflevector((__v2df)(__m128d)(A), \
270 (__v2df)_mm_setzero_pd(), \
271 (C) & 0x1, ((C) & 0x2) >> 1); })
273 #define _mm256_permute_pd(A, C) __extension__ ({ \
274 (__m256d)__builtin_shufflevector((__v4df)(__m256d)(A), \
275 (__v4df)_mm256_setzero_pd(), \
276 (C) & 0x1, ((C) & 0x2) >> 1, \
277 2 + (((C) & 0x4) >> 2), \
278 2 + (((C) & 0x8) >> 3)); })
280 #define _mm_permute_ps(A, C) __extension__ ({ \
281 (__m128)__builtin_shufflevector((__v4sf)(__m128)(A), \
282 (__v4sf)_mm_setzero_ps(), \
283 (C) & 0x3, ((C) & 0xc) >> 2, \
284 ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); })
286 #define _mm256_permute_ps(A, C) __extension__ ({ \
287 (__m256)__builtin_shufflevector((__v8sf)(__m256)(A), \
288 (__v8sf)_mm256_setzero_ps(), \
289 (C) & 0x3, ((C) & 0xc) >> 2, \
290 ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \
291 4 + (((C) & 0x03) >> 0), \
292 4 + (((C) & 0x0c) >> 2), \
293 4 + (((C) & 0x30) >> 4), \
294 4 + (((C) & 0xc0) >> 6)); })
296 #define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
297 (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
298 (__v4df)(__m256d)(V2), (M)); })
300 #define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
301 (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
302 (__v8sf)(__m256)(V2), (M)); })
304 #define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
305 (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \
306 (__v8si)(__m256i)(V2), (M)); })
309 #define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
310 (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V1), \
311 (__v4df)(__m256d)(V2), \
312 (((M) & 0x01) ? 4 : 0), \
313 (((M) & 0x02) ? 5 : 1), \
314 (((M) & 0x04) ? 6 : 2), \
315 (((M) & 0x08) ? 7 : 3)); })
317 #define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
318 (__m256)__builtin_shufflevector((__v8sf)(__m256)(V1), \
319 (__v8sf)(__m256)(V2), \
320 (((M) & 0x01) ? 8 : 0), \
321 (((M) & 0x02) ? 9 : 1), \
322 (((M) & 0x04) ? 10 : 2), \
323 (((M) & 0x08) ? 11 : 3), \
324 (((M) & 0x10) ? 12 : 4), \
325 (((M) & 0x20) ? 13 : 5), \
326 (((M) & 0x40) ? 14 : 6), \
327 (((M) & 0x80) ? 15 : 7)); })
329 static __inline __m256d __DEFAULT_FN_ATTRS
330 _mm256_blendv_pd(__m256d __a
, __m256d __b
, __m256d __c
)
332 return (__m256d
)__builtin_ia32_blendvpd256(
333 (__v4df
)__a
, (__v4df
)__b
, (__v4df
)__c
);
336 static __inline __m256 __DEFAULT_FN_ATTRS
337 _mm256_blendv_ps(__m256 __a
, __m256 __b
, __m256 __c
)
339 return (__m256
)__builtin_ia32_blendvps256(
340 (__v8sf
)__a
, (__v8sf
)__b
, (__v8sf
)__c
);
343 /* Vector Dot Product */
344 #define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
345 (__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
346 (__v8sf)(__m256)(V2), (M)); })
349 #define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
350 (__m256)__builtin_shufflevector((__v8sf)(__m256)(a), \
351 (__v8sf)(__m256)(b), \
353 ((mask) & 0xc) >> 2, \
354 (((mask) & 0x30) >> 4) + 8, \
355 (((mask) & 0xc0) >> 6) + 8, \
356 ((mask) & 0x3) + 4, \
357 (((mask) & 0xc) >> 2) + 4, \
358 (((mask) & 0x30) >> 4) + 12, \
359 (((mask) & 0xc0) >> 6) + 12); })
361 #define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
362 (__m256d)__builtin_shufflevector((__v4df)(__m256d)(a), \
363 (__v4df)(__m256d)(b), \
365 (((mask) & 0x2) >> 1) + 4, \
366 (((mask) & 0x4) >> 2) + 2, \
367 (((mask) & 0x8) >> 3) + 6); })
370 #define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
371 #define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */
372 #define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
373 #define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */
374 #define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */
375 #define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */
376 #define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */
377 #define _CMP_ORD_Q 0x07 /* Ordered (nonsignaling) */
378 #define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */
379 #define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unord, signaling) */
380 #define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */
381 #define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */
382 #define _CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */
383 #define _CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */
384 #define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */
385 #define _CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */
386 #define _CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */
387 #define _CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */
388 #define _CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */
389 #define _CMP_UNORD_S 0x13 /* Unordered (signaling) */
390 #define _CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */
391 #define _CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */
392 #define _CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unord, non-signaling) */
393 #define _CMP_ORD_S 0x17 /* Ordered (signaling) */
394 #define _CMP_EQ_US 0x18 /* Equal (unordered, signaling) */
395 #define _CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unord, non-sign) */
396 #define _CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */
397 #define _CMP_FALSE_OS 0x1b /* False (ordered, signaling) */
398 #define _CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */
399 #define _CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */
400 #define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */
401 #define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */
403 #define _mm_cmp_pd(a, b, c) __extension__ ({ \
404 (__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \
405 (__v2df)(__m128d)(b), (c)); })
407 #define _mm_cmp_ps(a, b, c) __extension__ ({ \
408 (__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
409 (__v4sf)(__m128)(b), (c)); })
411 #define _mm256_cmp_pd(a, b, c) __extension__ ({ \
412 (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
413 (__v4df)(__m256d)(b), (c)); })
415 #define _mm256_cmp_ps(a, b, c) __extension__ ({ \
416 (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
417 (__v8sf)(__m256)(b), (c)); })
419 #define _mm_cmp_sd(a, b, c) __extension__ ({ \
420 (__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \
421 (__v2df)(__m128d)(b), (c)); })
423 #define _mm_cmp_ss(a, b, c) __extension__ ({ \
424 (__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
425 (__v4sf)(__m128)(b), (c)); })
427 static __inline
int __DEFAULT_FN_ATTRS
428 _mm256_extract_epi32(__m256i __a
, const int __imm
)
430 __v8si __b
= (__v8si
)__a
;
431 return __b
[__imm
& 7];
434 static __inline
int __DEFAULT_FN_ATTRS
435 _mm256_extract_epi16(__m256i __a
, const int __imm
)
437 __v16hi __b
= (__v16hi
)__a
;
438 return __b
[__imm
& 15];
441 static __inline
int __DEFAULT_FN_ATTRS
442 _mm256_extract_epi8(__m256i __a
, const int __imm
)
444 __v32qi __b
= (__v32qi
)__a
;
445 return __b
[__imm
& 31];
449 static __inline
long long __DEFAULT_FN_ATTRS
450 _mm256_extract_epi64(__m256i __a
, const int __imm
)
452 __v4di __b
= (__v4di
)__a
;
453 return __b
[__imm
& 3];
457 static __inline __m256i __DEFAULT_FN_ATTRS
458 _mm256_insert_epi32(__m256i __a
, int __b
, int const __imm
)
460 __v8si __c
= (__v8si
)__a
;
461 __c
[__imm
& 7] = __b
;
465 static __inline __m256i __DEFAULT_FN_ATTRS
466 _mm256_insert_epi16(__m256i __a
, int __b
, int const __imm
)
468 __v16hi __c
= (__v16hi
)__a
;
469 __c
[__imm
& 15] = __b
;
473 static __inline __m256i __DEFAULT_FN_ATTRS
474 _mm256_insert_epi8(__m256i __a
, int __b
, int const __imm
)
476 __v32qi __c
= (__v32qi
)__a
;
477 __c
[__imm
& 31] = __b
;
482 static __inline __m256i __DEFAULT_FN_ATTRS
483 _mm256_insert_epi64(__m256i __a
, long long __b
, int const __imm
)
485 __v4di __c
= (__v4di
)__a
;
486 __c
[__imm
& 3] = __b
;
492 static __inline __m256d __DEFAULT_FN_ATTRS
493 _mm256_cvtepi32_pd(__m128i __a
)
495 return (__m256d
)__builtin_ia32_cvtdq2pd256((__v4si
) __a
);
498 static __inline __m256 __DEFAULT_FN_ATTRS
499 _mm256_cvtepi32_ps(__m256i __a
)
501 return (__m256
)__builtin_ia32_cvtdq2ps256((__v8si
) __a
);
504 static __inline __m128 __DEFAULT_FN_ATTRS
505 _mm256_cvtpd_ps(__m256d __a
)
507 return (__m128
)__builtin_ia32_cvtpd2ps256((__v4df
) __a
);
510 static __inline __m256i __DEFAULT_FN_ATTRS
511 _mm256_cvtps_epi32(__m256 __a
)
513 return (__m256i
)__builtin_ia32_cvtps2dq256((__v8sf
) __a
);
516 static __inline __m256d __DEFAULT_FN_ATTRS
517 _mm256_cvtps_pd(__m128 __a
)
519 return (__m256d
)__builtin_ia32_cvtps2pd256((__v4sf
) __a
);
522 static __inline __m128i __DEFAULT_FN_ATTRS
523 _mm256_cvttpd_epi32(__m256d __a
)
525 return (__m128i
)__builtin_ia32_cvttpd2dq256((__v4df
) __a
);
528 static __inline __m128i __DEFAULT_FN_ATTRS
529 _mm256_cvtpd_epi32(__m256d __a
)
531 return (__m128i
)__builtin_ia32_cvtpd2dq256((__v4df
) __a
);
534 static __inline __m256i __DEFAULT_FN_ATTRS
535 _mm256_cvttps_epi32(__m256 __a
)
537 return (__m256i
)__builtin_ia32_cvttps2dq256((__v8sf
) __a
);
540 /* Vector replicate */
541 static __inline __m256 __DEFAULT_FN_ATTRS
542 _mm256_movehdup_ps(__m256 __a
)
544 return __builtin_shufflevector(__a
, __a
, 1, 1, 3, 3, 5, 5, 7, 7);
547 static __inline __m256 __DEFAULT_FN_ATTRS
548 _mm256_moveldup_ps(__m256 __a
)
550 return __builtin_shufflevector(__a
, __a
, 0, 0, 2, 2, 4, 4, 6, 6);
553 static __inline __m256d __DEFAULT_FN_ATTRS
554 _mm256_movedup_pd(__m256d __a
)
556 return __builtin_shufflevector(__a
, __a
, 0, 0, 2, 2);
559 /* Unpack and Interleave */
560 static __inline __m256d __DEFAULT_FN_ATTRS
561 _mm256_unpackhi_pd(__m256d __a
, __m256d __b
)
563 return __builtin_shufflevector(__a
, __b
, 1, 5, 1+2, 5+2);
566 static __inline __m256d __DEFAULT_FN_ATTRS
567 _mm256_unpacklo_pd(__m256d __a
, __m256d __b
)
569 return __builtin_shufflevector(__a
, __b
, 0, 4, 0+2, 4+2);
572 static __inline __m256 __DEFAULT_FN_ATTRS
573 _mm256_unpackhi_ps(__m256 __a
, __m256 __b
)
575 return __builtin_shufflevector(__a
, __b
, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
578 static __inline __m256 __DEFAULT_FN_ATTRS
579 _mm256_unpacklo_ps(__m256 __a
, __m256 __b
)
581 return __builtin_shufflevector(__a
, __b
, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
585 static __inline
int __DEFAULT_FN_ATTRS
586 _mm_testz_pd(__m128d __a
, __m128d __b
)
588 return __builtin_ia32_vtestzpd((__v2df
)__a
, (__v2df
)__b
);
591 static __inline
int __DEFAULT_FN_ATTRS
592 _mm_testc_pd(__m128d __a
, __m128d __b
)
594 return __builtin_ia32_vtestcpd((__v2df
)__a
, (__v2df
)__b
);
597 static __inline
int __DEFAULT_FN_ATTRS
598 _mm_testnzc_pd(__m128d __a
, __m128d __b
)
600 return __builtin_ia32_vtestnzcpd((__v2df
)__a
, (__v2df
)__b
);
603 static __inline
int __DEFAULT_FN_ATTRS
604 _mm_testz_ps(__m128 __a
, __m128 __b
)
606 return __builtin_ia32_vtestzps((__v4sf
)__a
, (__v4sf
)__b
);
609 static __inline
int __DEFAULT_FN_ATTRS
610 _mm_testc_ps(__m128 __a
, __m128 __b
)
612 return __builtin_ia32_vtestcps((__v4sf
)__a
, (__v4sf
)__b
);
615 static __inline
int __DEFAULT_FN_ATTRS
616 _mm_testnzc_ps(__m128 __a
, __m128 __b
)
618 return __builtin_ia32_vtestnzcps((__v4sf
)__a
, (__v4sf
)__b
);
621 static __inline
int __DEFAULT_FN_ATTRS
622 _mm256_testz_pd(__m256d __a
, __m256d __b
)
624 return __builtin_ia32_vtestzpd256((__v4df
)__a
, (__v4df
)__b
);
627 static __inline
int __DEFAULT_FN_ATTRS
628 _mm256_testc_pd(__m256d __a
, __m256d __b
)
630 return __builtin_ia32_vtestcpd256((__v4df
)__a
, (__v4df
)__b
);
633 static __inline
int __DEFAULT_FN_ATTRS
634 _mm256_testnzc_pd(__m256d __a
, __m256d __b
)
636 return __builtin_ia32_vtestnzcpd256((__v4df
)__a
, (__v4df
)__b
);
639 static __inline
int __DEFAULT_FN_ATTRS
640 _mm256_testz_ps(__m256 __a
, __m256 __b
)
642 return __builtin_ia32_vtestzps256((__v8sf
)__a
, (__v8sf
)__b
);
645 static __inline
int __DEFAULT_FN_ATTRS
646 _mm256_testc_ps(__m256 __a
, __m256 __b
)
648 return __builtin_ia32_vtestcps256((__v8sf
)__a
, (__v8sf
)__b
);
651 static __inline
int __DEFAULT_FN_ATTRS
652 _mm256_testnzc_ps(__m256 __a
, __m256 __b
)
654 return __builtin_ia32_vtestnzcps256((__v8sf
)__a
, (__v8sf
)__b
);
657 static __inline
int __DEFAULT_FN_ATTRS
658 _mm256_testz_si256(__m256i __a
, __m256i __b
)
660 return __builtin_ia32_ptestz256((__v4di
)__a
, (__v4di
)__b
);
663 static __inline
int __DEFAULT_FN_ATTRS
664 _mm256_testc_si256(__m256i __a
, __m256i __b
)
666 return __builtin_ia32_ptestc256((__v4di
)__a
, (__v4di
)__b
);
669 static __inline
int __DEFAULT_FN_ATTRS
670 _mm256_testnzc_si256(__m256i __a
, __m256i __b
)
672 return __builtin_ia32_ptestnzc256((__v4di
)__a
, (__v4di
)__b
);
675 /* Vector extract sign mask */
676 static __inline
int __DEFAULT_FN_ATTRS
677 _mm256_movemask_pd(__m256d __a
)
679 return __builtin_ia32_movmskpd256((__v4df
)__a
);
682 static __inline
int __DEFAULT_FN_ATTRS
683 _mm256_movemask_ps(__m256 __a
)
685 return __builtin_ia32_movmskps256((__v8sf
)__a
);
689 static __inline
void __DEFAULT_FN_ATTRS
692 __builtin_ia32_vzeroall();
695 static __inline
void __DEFAULT_FN_ATTRS
696 _mm256_zeroupper(void)
698 __builtin_ia32_vzeroupper();
701 /* Vector load with broadcast */
702 static __inline __m128 __DEFAULT_FN_ATTRS
703 _mm_broadcast_ss(float const *__a
)
706 return (__m128
)(__v4sf
){ __f
, __f
, __f
, __f
};
709 static __inline __m256d __DEFAULT_FN_ATTRS
710 _mm256_broadcast_sd(double const *__a
)
713 return (__m256d
)(__v4df
){ __d
, __d
, __d
, __d
};
716 static __inline __m256 __DEFAULT_FN_ATTRS
717 _mm256_broadcast_ss(float const *__a
)
720 return (__m256
)(__v8sf
){ __f
, __f
, __f
, __f
, __f
, __f
, __f
, __f
};
723 static __inline __m256d __DEFAULT_FN_ATTRS
724 _mm256_broadcast_pd(__m128d
const *__a
)
726 return (__m256d
)__builtin_ia32_vbroadcastf128_pd256(__a
);
729 static __inline __m256 __DEFAULT_FN_ATTRS
730 _mm256_broadcast_ps(__m128
const *__a
)
732 return (__m256
)__builtin_ia32_vbroadcastf128_ps256(__a
);
736 static __inline __m256d __DEFAULT_FN_ATTRS
737 _mm256_load_pd(double const *__p
)
739 return *(__m256d
*)__p
;
742 static __inline __m256 __DEFAULT_FN_ATTRS
743 _mm256_load_ps(float const *__p
)
745 return *(__m256
*)__p
;
748 static __inline __m256d __DEFAULT_FN_ATTRS
749 _mm256_loadu_pd(double const *__p
)
753 } __attribute__((__packed__
, __may_alias__
));
754 return ((struct __loadu_pd
*)__p
)->__v
;
757 static __inline __m256 __DEFAULT_FN_ATTRS
758 _mm256_loadu_ps(float const *__p
)
762 } __attribute__((__packed__
, __may_alias__
));
763 return ((struct __loadu_ps
*)__p
)->__v
;
766 static __inline __m256i __DEFAULT_FN_ATTRS
767 _mm256_load_si256(__m256i
const *__p
)
772 static __inline __m256i __DEFAULT_FN_ATTRS
773 _mm256_loadu_si256(__m256i
const *__p
)
775 struct __loadu_si256
{
777 } __attribute__((__packed__
, __may_alias__
));
778 return ((struct __loadu_si256
*)__p
)->__v
;
781 static __inline __m256i __DEFAULT_FN_ATTRS
782 _mm256_lddqu_si256(__m256i
const *__p
)
784 return (__m256i
)__builtin_ia32_lddqu256((char const *)__p
);
788 static __inline
void __DEFAULT_FN_ATTRS
789 _mm256_store_pd(double *__p
, __m256d __a
)
791 *(__m256d
*)__p
= __a
;
794 static __inline
void __DEFAULT_FN_ATTRS
795 _mm256_store_ps(float *__p
, __m256 __a
)
797 *(__m256
*)__p
= __a
;
800 static __inline
void __DEFAULT_FN_ATTRS
801 _mm256_storeu_pd(double *__p
, __m256d __a
)
803 __builtin_ia32_storeupd256(__p
, (__v4df
)__a
);
806 static __inline
void __DEFAULT_FN_ATTRS
807 _mm256_storeu_ps(float *__p
, __m256 __a
)
809 __builtin_ia32_storeups256(__p
, (__v8sf
)__a
);
812 static __inline
void __DEFAULT_FN_ATTRS
813 _mm256_store_si256(__m256i
*__p
, __m256i __a
)
818 static __inline
void __DEFAULT_FN_ATTRS
819 _mm256_storeu_si256(__m256i
*__p
, __m256i __a
)
821 __builtin_ia32_storedqu256((char *)__p
, (__v32qi
)__a
);
824 /* Conditional load ops */
825 static __inline __m128d __DEFAULT_FN_ATTRS
826 _mm_maskload_pd(double const *__p
, __m128i __m
)
828 return (__m128d
)__builtin_ia32_maskloadpd((const __v2df
*)__p
, (__v2di
)__m
);
831 static __inline __m256d __DEFAULT_FN_ATTRS
832 _mm256_maskload_pd(double const *__p
, __m256i __m
)
834 return (__m256d
)__builtin_ia32_maskloadpd256((const __v4df
*)__p
,
838 static __inline __m128 __DEFAULT_FN_ATTRS
839 _mm_maskload_ps(float const *__p
, __m128i __m
)
841 return (__m128
)__builtin_ia32_maskloadps((const __v4sf
*)__p
, (__v4si
)__m
);
844 static __inline __m256 __DEFAULT_FN_ATTRS
845 _mm256_maskload_ps(float const *__p
, __m256i __m
)
847 return (__m256
)__builtin_ia32_maskloadps256((const __v8sf
*)__p
, (__v8si
)__m
);
850 /* Conditional store ops */
851 static __inline
void __DEFAULT_FN_ATTRS
852 _mm256_maskstore_ps(float *__p
, __m256i __m
, __m256 __a
)
854 __builtin_ia32_maskstoreps256((__v8sf
*)__p
, (__v8si
)__m
, (__v8sf
)__a
);
857 static __inline
void __DEFAULT_FN_ATTRS
858 _mm_maskstore_pd(double *__p
, __m128i __m
, __m128d __a
)
860 __builtin_ia32_maskstorepd((__v2df
*)__p
, (__v2di
)__m
, (__v2df
)__a
);
863 static __inline
void __DEFAULT_FN_ATTRS
864 _mm256_maskstore_pd(double *__p
, __m256i __m
, __m256d __a
)
866 __builtin_ia32_maskstorepd256((__v4df
*)__p
, (__v4di
)__m
, (__v4df
)__a
);
869 static __inline
void __DEFAULT_FN_ATTRS
870 _mm_maskstore_ps(float *__p
, __m128i __m
, __m128 __a
)
872 __builtin_ia32_maskstoreps((__v4sf
*)__p
, (__v4si
)__m
, (__v4sf
)__a
);
875 /* Cacheability support ops */
876 static __inline
void __DEFAULT_FN_ATTRS
877 _mm256_stream_si256(__m256i
*__a
, __m256i __b
)
879 __builtin_ia32_movntdq256((__v4di
*)__a
, (__v4di
)__b
);
882 static __inline
void __DEFAULT_FN_ATTRS
883 _mm256_stream_pd(double *__a
, __m256d __b
)
885 __builtin_ia32_movntpd256(__a
, (__v4df
)__b
);
888 static __inline
void __DEFAULT_FN_ATTRS
889 _mm256_stream_ps(float *__p
, __m256 __a
)
891 __builtin_ia32_movntps256(__p
, (__v8sf
)__a
);
895 static __inline__ __m256d __DEFAULT_FN_ATTRS
896 _mm256_undefined_pd()
898 return (__m256d
)__builtin_ia32_undef256();
901 static __inline__ __m256 __DEFAULT_FN_ATTRS
902 _mm256_undefined_ps()
904 return (__m256
)__builtin_ia32_undef256();
907 static __inline__ __m256i __DEFAULT_FN_ATTRS
908 _mm256_undefined_si256()
910 return (__m256i
)__builtin_ia32_undef256();
913 static __inline __m256d __DEFAULT_FN_ATTRS
914 _mm256_set_pd(double __a
, double __b
, double __c
, double __d
)
916 return (__m256d
){ __d
, __c
, __b
, __a
};
919 static __inline __m256 __DEFAULT_FN_ATTRS
920 _mm256_set_ps(float __a
, float __b
, float __c
, float __d
,
921 float __e
, float __f
, float __g
, float __h
)
923 return (__m256
){ __h
, __g
, __f
, __e
, __d
, __c
, __b
, __a
};
926 static __inline __m256i __DEFAULT_FN_ATTRS
927 _mm256_set_epi32(int __i0
, int __i1
, int __i2
, int __i3
,
928 int __i4
, int __i5
, int __i6
, int __i7
)
930 return (__m256i
)(__v8si
){ __i7
, __i6
, __i5
, __i4
, __i3
, __i2
, __i1
, __i0
};
933 static __inline __m256i __DEFAULT_FN_ATTRS
934 _mm256_set_epi16(short __w15
, short __w14
, short __w13
, short __w12
,
935 short __w11
, short __w10
, short __w09
, short __w08
,
936 short __w07
, short __w06
, short __w05
, short __w04
,
937 short __w03
, short __w02
, short __w01
, short __w00
)
939 return (__m256i
)(__v16hi
){ __w00
, __w01
, __w02
, __w03
, __w04
, __w05
, __w06
,
940 __w07
, __w08
, __w09
, __w10
, __w11
, __w12
, __w13
, __w14
, __w15
};
943 static __inline __m256i __DEFAULT_FN_ATTRS
944 _mm256_set_epi8(char __b31
, char __b30
, char __b29
, char __b28
,
945 char __b27
, char __b26
, char __b25
, char __b24
,
946 char __b23
, char __b22
, char __b21
, char __b20
,
947 char __b19
, char __b18
, char __b17
, char __b16
,
948 char __b15
, char __b14
, char __b13
, char __b12
,
949 char __b11
, char __b10
, char __b09
, char __b08
,
950 char __b07
, char __b06
, char __b05
, char __b04
,
951 char __b03
, char __b02
, char __b01
, char __b00
)
953 return (__m256i
)(__v32qi
){
954 __b00
, __b01
, __b02
, __b03
, __b04
, __b05
, __b06
, __b07
,
955 __b08
, __b09
, __b10
, __b11
, __b12
, __b13
, __b14
, __b15
,
956 __b16
, __b17
, __b18
, __b19
, __b20
, __b21
, __b22
, __b23
,
957 __b24
, __b25
, __b26
, __b27
, __b28
, __b29
, __b30
, __b31
961 static __inline __m256i __DEFAULT_FN_ATTRS
962 _mm256_set_epi64x(long long __a
, long long __b
, long long __c
, long long __d
)
964 return (__m256i
)(__v4di
){ __d
, __c
, __b
, __a
};
967 /* Create vectors with elements in reverse order */
968 static __inline __m256d __DEFAULT_FN_ATTRS
969 _mm256_setr_pd(double __a
, double __b
, double __c
, double __d
)
971 return (__m256d
){ __a
, __b
, __c
, __d
};
974 static __inline __m256 __DEFAULT_FN_ATTRS
975 _mm256_setr_ps(float __a
, float __b
, float __c
, float __d
,
976 float __e
, float __f
, float __g
, float __h
)
978 return (__m256
){ __a
, __b
, __c
, __d
, __e
, __f
, __g
, __h
};
981 static __inline __m256i __DEFAULT_FN_ATTRS
982 _mm256_setr_epi32(int __i0
, int __i1
, int __i2
, int __i3
,
983 int __i4
, int __i5
, int __i6
, int __i7
)
985 return (__m256i
)(__v8si
){ __i0
, __i1
, __i2
, __i3
, __i4
, __i5
, __i6
, __i7
};
988 static __inline __m256i __DEFAULT_FN_ATTRS
989 _mm256_setr_epi16(short __w15
, short __w14
, short __w13
, short __w12
,
990 short __w11
, short __w10
, short __w09
, short __w08
,
991 short __w07
, short __w06
, short __w05
, short __w04
,
992 short __w03
, short __w02
, short __w01
, short __w00
)
994 return (__m256i
)(__v16hi
){ __w15
, __w14
, __w13
, __w12
, __w11
, __w10
, __w09
,
995 __w08
, __w07
, __w06
, __w05
, __w04
, __w03
, __w02
, __w01
, __w00
};
998 static __inline __m256i __DEFAULT_FN_ATTRS
999 _mm256_setr_epi8(char __b31
, char __b30
, char __b29
, char __b28
,
1000 char __b27
, char __b26
, char __b25
, char __b24
,
1001 char __b23
, char __b22
, char __b21
, char __b20
,
1002 char __b19
, char __b18
, char __b17
, char __b16
,
1003 char __b15
, char __b14
, char __b13
, char __b12
,
1004 char __b11
, char __b10
, char __b09
, char __b08
,
1005 char __b07
, char __b06
, char __b05
, char __b04
,
1006 char __b03
, char __b02
, char __b01
, char __b00
)
1008 return (__m256i
)(__v32qi
){
1009 __b31
, __b30
, __b29
, __b28
, __b27
, __b26
, __b25
, __b24
,
1010 __b23
, __b22
, __b21
, __b20
, __b19
, __b18
, __b17
, __b16
,
1011 __b15
, __b14
, __b13
, __b12
, __b11
, __b10
, __b09
, __b08
,
1012 __b07
, __b06
, __b05
, __b04
, __b03
, __b02
, __b01
, __b00
};
1015 static __inline __m256i __DEFAULT_FN_ATTRS
1016 _mm256_setr_epi64x(long long __a
, long long __b
, long long __c
, long long __d
)
1018 return (__m256i
)(__v4di
){ __a
, __b
, __c
, __d
};
1021 /* Create vectors with repeated elements */
1022 static __inline __m256d __DEFAULT_FN_ATTRS
1023 _mm256_set1_pd(double __w
)
1025 return (__m256d
){ __w
, __w
, __w
, __w
};
1028 static __inline __m256 __DEFAULT_FN_ATTRS
1029 _mm256_set1_ps(float __w
)
1031 return (__m256
){ __w
, __w
, __w
, __w
, __w
, __w
, __w
, __w
};
1034 static __inline __m256i __DEFAULT_FN_ATTRS
1035 _mm256_set1_epi32(int __i
)
1037 return (__m256i
)(__v8si
){ __i
, __i
, __i
, __i
, __i
, __i
, __i
, __i
};
1040 static __inline __m256i __DEFAULT_FN_ATTRS
1041 _mm256_set1_epi16(short __w
)
1043 return (__m256i
)(__v16hi
){ __w
, __w
, __w
, __w
, __w
, __w
, __w
, __w
, __w
, __w
,
1044 __w
, __w
, __w
, __w
, __w
, __w
};
1047 static __inline __m256i __DEFAULT_FN_ATTRS
1048 _mm256_set1_epi8(char __b
)
1050 return (__m256i
)(__v32qi
){ __b
, __b
, __b
, __b
, __b
, __b
, __b
, __b
, __b
, __b
,
1051 __b
, __b
, __b
, __b
, __b
, __b
, __b
, __b
, __b
, __b
, __b
, __b
, __b
, __b
, __b
,
1052 __b
, __b
, __b
, __b
, __b
, __b
, __b
};
1055 static __inline __m256i __DEFAULT_FN_ATTRS
1056 _mm256_set1_epi64x(long long __q
)
1058 return (__m256i
)(__v4di
){ __q
, __q
, __q
, __q
};
1061 /* Create __zeroed vectors */
1062 static __inline __m256d __DEFAULT_FN_ATTRS
1063 _mm256_setzero_pd(void)
1065 return (__m256d
){ 0, 0, 0, 0 };
1068 static __inline __m256 __DEFAULT_FN_ATTRS
1069 _mm256_setzero_ps(void)
1071 return (__m256
){ 0, 0, 0, 0, 0, 0, 0, 0 };
1074 static __inline __m256i __DEFAULT_FN_ATTRS
1075 _mm256_setzero_si256(void)
1077 return (__m256i
){ 0LL, 0LL, 0LL, 0LL };
1080 /* Cast between vector types */
1081 static __inline __m256 __DEFAULT_FN_ATTRS
1082 _mm256_castpd_ps(__m256d __a
)
1087 static __inline __m256i __DEFAULT_FN_ATTRS
1088 _mm256_castpd_si256(__m256d __a
)
1090 return (__m256i
)__a
;
1093 static __inline __m256d __DEFAULT_FN_ATTRS
1094 _mm256_castps_pd(__m256 __a
)
1096 return (__m256d
)__a
;
1099 static __inline __m256i __DEFAULT_FN_ATTRS
1100 _mm256_castps_si256(__m256 __a
)
1102 return (__m256i
)__a
;
1105 static __inline __m256 __DEFAULT_FN_ATTRS
1106 _mm256_castsi256_ps(__m256i __a
)
1111 static __inline __m256d __DEFAULT_FN_ATTRS
1112 _mm256_castsi256_pd(__m256i __a
)
1114 return (__m256d
)__a
;
1117 static __inline __m128d __DEFAULT_FN_ATTRS
1118 _mm256_castpd256_pd128(__m256d __a
)
1120 return __builtin_shufflevector(__a
, __a
, 0, 1);
1123 static __inline __m128 __DEFAULT_FN_ATTRS
1124 _mm256_castps256_ps128(__m256 __a
)
1126 return __builtin_shufflevector(__a
, __a
, 0, 1, 2, 3);
1129 static __inline __m128i __DEFAULT_FN_ATTRS
1130 _mm256_castsi256_si128(__m256i __a
)
1132 return __builtin_shufflevector(__a
, __a
, 0, 1);
1135 static __inline __m256d __DEFAULT_FN_ATTRS
1136 _mm256_castpd128_pd256(__m128d __a
)
1138 return __builtin_shufflevector(__a
, __a
, 0, 1, -1, -1);
1141 static __inline __m256 __DEFAULT_FN_ATTRS
1142 _mm256_castps128_ps256(__m128 __a
)
1144 return __builtin_shufflevector(__a
, __a
, 0, 1, 2, 3, -1, -1, -1, -1);
1147 static __inline __m256i __DEFAULT_FN_ATTRS
1148 _mm256_castsi128_si256(__m128i __a
)
1150 return __builtin_shufflevector(__a
, __a
, 0, 1, -1, -1);
1155 We use macros rather than inlines because we only want to accept
1156 invocations where the immediate M is a constant expression.
1158 #define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \
1159 (__m256)__builtin_shufflevector( \
1160 (__v8sf)(__m256)(V1), \
1161 (__v8sf)_mm256_castps128_ps256((__m128)(V2)), \
1162 (((M) & 1) ? 0 : 8), \
1163 (((M) & 1) ? 1 : 9), \
1164 (((M) & 1) ? 2 : 10), \
1165 (((M) & 1) ? 3 : 11), \
1166 (((M) & 1) ? 8 : 4), \
1167 (((M) & 1) ? 9 : 5), \
1168 (((M) & 1) ? 10 : 6), \
1169 (((M) & 1) ? 11 : 7) );})
1171 #define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \
1172 (__m256d)__builtin_shufflevector( \
1173 (__v4df)(__m256d)(V1), \
1174 (__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \
1175 (((M) & 1) ? 0 : 4), \
1176 (((M) & 1) ? 1 : 5), \
1177 (((M) & 1) ? 4 : 2), \
1178 (((M) & 1) ? 5 : 3) );})
1180 #define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \
1181 (__m256i)__builtin_shufflevector( \
1182 (__v4di)(__m256i)(V1), \
1183 (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
1184 (((M) & 1) ? 0 : 4), \
1185 (((M) & 1) ? 1 : 5), \
1186 (((M) & 1) ? 4 : 2), \
1187 (((M) & 1) ? 5 : 3) );})
1191 We use macros rather than inlines because we only want to accept
1192 invocations where the immediate M is a constant expression.
1194 #define _mm256_extractf128_ps(V, M) __extension__ ({ \
1195 (__m128)__builtin_shufflevector( \
1196 (__v8sf)(__m256)(V), \
1197 (__v8sf)(_mm256_setzero_ps()), \
1198 (((M) & 1) ? 4 : 0), \
1199 (((M) & 1) ? 5 : 1), \
1200 (((M) & 1) ? 6 : 2), \
1201 (((M) & 1) ? 7 : 3) );})
1203 #define _mm256_extractf128_pd(V, M) __extension__ ({ \
1204 (__m128d)__builtin_shufflevector( \
1205 (__v4df)(__m256d)(V), \
1206 (__v4df)(_mm256_setzero_pd()), \
1207 (((M) & 1) ? 2 : 0), \
1208 (((M) & 1) ? 3 : 1) );})
1210 #define _mm256_extractf128_si256(V, M) __extension__ ({ \
1211 (__m128i)__builtin_shufflevector( \
1212 (__v4di)(__m256i)(V), \
1213 (__v4di)(_mm256_setzero_si256()), \
1214 (((M) & 1) ? 2 : 0), \
1215 (((M) & 1) ? 3 : 1) );})
1217 /* SIMD load ops (unaligned) */
1218 static __inline __m256 __DEFAULT_FN_ATTRS
1219 _mm256_loadu2_m128(float const *__addr_hi
, float const *__addr_lo
)
1223 } __attribute__((__packed__
, __may_alias__
));
1225 __m256 __v256
= _mm256_castps128_ps256(((struct __loadu_ps
*)__addr_lo
)->__v
);
1226 return _mm256_insertf128_ps(__v256
, ((struct __loadu_ps
*)__addr_hi
)->__v
, 1);
1229 static __inline __m256d __DEFAULT_FN_ATTRS
1230 _mm256_loadu2_m128d(double const *__addr_hi
, double const *__addr_lo
)
1234 } __attribute__((__packed__
, __may_alias__
));
1236 __m256d __v256
= _mm256_castpd128_pd256(((struct __loadu_pd
*)__addr_lo
)->__v
);
1237 return _mm256_insertf128_pd(__v256
, ((struct __loadu_pd
*)__addr_hi
)->__v
, 1);
1240 static __inline __m256i __DEFAULT_FN_ATTRS
1241 _mm256_loadu2_m128i(__m128i
const *__addr_hi
, __m128i
const *__addr_lo
)
1243 struct __loadu_si128
{
1245 } __attribute__((__packed__
, __may_alias__
));
1246 __m256i __v256
= _mm256_castsi128_si256(
1247 ((struct __loadu_si128
*)__addr_lo
)->__v
);
1248 return _mm256_insertf128_si256(__v256
,
1249 ((struct __loadu_si128
*)__addr_hi
)->__v
, 1);
1252 /* SIMD store ops (unaligned) */
1253 static __inline
void __DEFAULT_FN_ATTRS
1254 _mm256_storeu2_m128(float *__addr_hi
, float *__addr_lo
, __m256 __a
)
1258 __v128
= _mm256_castps256_ps128(__a
);
1259 __builtin_ia32_storeups(__addr_lo
, __v128
);
1260 __v128
= _mm256_extractf128_ps(__a
, 1);
1261 __builtin_ia32_storeups(__addr_hi
, __v128
);
1264 static __inline
void __DEFAULT_FN_ATTRS
1265 _mm256_storeu2_m128d(double *__addr_hi
, double *__addr_lo
, __m256d __a
)
1269 __v128
= _mm256_castpd256_pd128(__a
);
1270 __builtin_ia32_storeupd(__addr_lo
, __v128
);
1271 __v128
= _mm256_extractf128_pd(__a
, 1);
1272 __builtin_ia32_storeupd(__addr_hi
, __v128
);
1275 static __inline
void __DEFAULT_FN_ATTRS
1276 _mm256_storeu2_m128i(__m128i
*__addr_hi
, __m128i
*__addr_lo
, __m256i __a
)
1280 __v128
= _mm256_castsi256_si128(__a
);
1281 __builtin_ia32_storedqu((char *)__addr_lo
, (__v16qi
)__v128
);
1282 __v128
= _mm256_extractf128_si256(__a
, 1);
1283 __builtin_ia32_storedqu((char *)__addr_hi
, (__v16qi
)__v128
);
1286 static __inline __m256 __DEFAULT_FN_ATTRS
1287 _mm256_set_m128 (__m128 __hi
, __m128 __lo
) {
1288 return (__m256
) __builtin_shufflevector(__lo
, __hi
, 0, 1, 2, 3, 4, 5, 6, 7);
1291 static __inline __m256d __DEFAULT_FN_ATTRS
1292 _mm256_set_m128d (__m128d __hi
, __m128d __lo
) {
1293 return (__m256d
)_mm256_set_m128((__m128
)__hi
, (__m128
)__lo
);
1296 static __inline __m256i __DEFAULT_FN_ATTRS
1297 _mm256_set_m128i (__m128i __hi
, __m128i __lo
) {
1298 return (__m256i
)_mm256_set_m128((__m128
)__hi
, (__m128
)__lo
);
1301 static __inline __m256 __DEFAULT_FN_ATTRS
1302 _mm256_setr_m128 (__m128 __lo
, __m128 __hi
) {
1303 return _mm256_set_m128(__hi
, __lo
);
1306 static __inline __m256d __DEFAULT_FN_ATTRS
1307 _mm256_setr_m128d (__m128d __lo
, __m128d __hi
) {
1308 return (__m256d
)_mm256_set_m128((__m128
)__hi
, (__m128
)__lo
);
1311 static __inline __m256i __DEFAULT_FN_ATTRS
1312 _mm256_setr_m128i (__m128i __lo
, __m128i __hi
) {
1313 return (__m256i
)_mm256_set_m128((__m128
)__hi
, (__m128
)__lo
);
1316 #undef __DEFAULT_FN_ATTRS
1318 #endif /* __AVXINTRIN_H */