Bug 1771985 [wpt PR 34266] - [FedCM] Move set_cookie into the helper file to share...
[gecko.git] / third_party / aom / test / simd_cmp_impl.h
blobb98af9aade948cfaa21d1ec012c14129204b163c
1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
12 #include <assert.h>
13 #include <string>
15 #include "config/aom_dsp_rtcd.h"
17 #include "test/acm_random.h"
18 #include "aom_dsp/aom_simd.h"
19 #undef SIMD_INLINE
20 #define SIMD_INLINE static // Don't enforce inlining
21 #include "aom_dsp/simd/v256_intrinsics_c.h"
23 // Machine tuned code goes into this file. This file is included from
24 // simd_cmp_sse2.cc, simd_cmp_ssse3.cc etc which define the macros
25 // ARCH (=neon, sse2, ssse3, etc), SIMD_NAMESPACE and ARCH_POSTFIX().
27 #ifdef _MSC_VER
28 // Disable "value of intrinsic immediate argument 'value' is out of range
29 // 'lowerbound - upperbound'" warning. Visual Studio emits this warning though
30 // the parameters are conditionally checked in e.g., v256_shr_n_byte. Adding a
31 // mask doesn't always appear to be sufficient.
32 #pragma warning(disable : 4556)
33 #endif
35 using libaom_test::ACMRandom;
37 namespace SIMD_NAMESPACE {
39 // Wrap templates around intrinsics using immediate values
40 template <int shift>
41 v64 imm_v64_shl_n_byte(v64 a) {
42 return v64_shl_n_byte(a, shift);
44 template <int shift>
45 v64 imm_v64_shr_n_byte(v64 a) {
46 return v64_shr_n_byte(a, shift);
48 template <int shift>
49 v64 imm_v64_shl_n_8(v64 a) {
50 return v64_shl_n_8(a, shift);
52 template <int shift>
53 v64 imm_v64_shr_n_u8(v64 a) {
54 return v64_shr_n_u8(a, shift);
56 template <int shift>
57 v64 imm_v64_shr_n_s8(v64 a) {
58 return v64_shr_n_s8(a, shift);
60 template <int shift>
61 v64 imm_v64_shl_n_16(v64 a) {
62 return v64_shl_n_16(a, shift);
64 template <int shift>
65 v64 imm_v64_shr_n_u16(v64 a) {
66 return v64_shr_n_u16(a, shift);
68 template <int shift>
69 v64 imm_v64_shr_n_s16(v64 a) {
70 return v64_shr_n_s16(a, shift);
72 template <int shift>
73 v64 imm_v64_shl_n_32(v64 a) {
74 return v64_shl_n_32(a, shift);
76 template <int shift>
77 v64 imm_v64_shr_n_u32(v64 a) {
78 return v64_shr_n_u32(a, shift);
80 template <int shift>
81 v64 imm_v64_shr_n_s32(v64 a) {
82 return v64_shr_n_s32(a, shift);
84 template <int shift>
85 v64 imm_v64_align(v64 a, v64 b) {
86 return v64_align(a, b, shift);
89 // Wrap templates around corresponding C implementations of the above
90 template <int shift>
91 c_v64 c_imm_v64_shl_n_byte(c_v64 a) {
92 return c_v64_shl_n_byte(a, shift);
94 template <int shift>
95 c_v64 c_imm_v64_shr_n_byte(c_v64 a) {
96 return c_v64_shr_n_byte(a, shift);
98 template <int shift>
99 c_v64 c_imm_v64_shl_n_8(c_v64 a) {
100 return c_v64_shl_n_8(a, shift);
102 template <int shift>
103 c_v64 c_imm_v64_shr_n_u8(c_v64 a) {
104 return c_v64_shr_n_u8(a, shift);
106 template <int shift>
107 c_v64 c_imm_v64_shr_n_s8(c_v64 a) {
108 return c_v64_shr_n_s8(a, shift);
110 template <int shift>
111 c_v64 c_imm_v64_shl_n_16(c_v64 a) {
112 return c_v64_shl_n_16(a, shift);
114 template <int shift>
115 c_v64 c_imm_v64_shr_n_u16(c_v64 a) {
116 return c_v64_shr_n_u16(a, shift);
118 template <int shift>
119 c_v64 c_imm_v64_shr_n_s16(c_v64 a) {
120 return c_v64_shr_n_s16(a, shift);
122 template <int shift>
123 c_v64 c_imm_v64_shl_n_32(c_v64 a) {
124 return c_v64_shl_n_32(a, shift);
126 template <int shift>
127 c_v64 c_imm_v64_shr_n_u32(c_v64 a) {
128 return c_v64_shr_n_u32(a, shift);
130 template <int shift>
131 c_v64 c_imm_v64_shr_n_s32(c_v64 a) {
132 return c_v64_shr_n_s32(a, shift);
134 template <int shift>
135 c_v64 c_imm_v64_align(c_v64 a, c_v64 b) {
136 return c_v64_align(a, b, shift);
139 template <int shift>
140 v128 imm_v128_shl_n_byte(v128 a) {
141 return v128_shl_n_byte(a, shift);
143 template <int shift>
144 v128 imm_v128_shr_n_byte(v128 a) {
145 return v128_shr_n_byte(a, shift);
147 template <int shift>
148 v128 imm_v128_shl_n_8(v128 a) {
149 return v128_shl_n_8(a, shift);
151 template <int shift>
152 v128 imm_v128_shr_n_u8(v128 a) {
153 return v128_shr_n_u8(a, shift);
155 template <int shift>
156 v128 imm_v128_shr_n_s8(v128 a) {
157 return v128_shr_n_s8(a, shift);
159 template <int shift>
160 v128 imm_v128_shl_n_16(v128 a) {
161 return v128_shl_n_16(a, shift);
163 template <int shift>
164 v128 imm_v128_shr_n_u16(v128 a) {
165 return v128_shr_n_u16(a, shift);
167 template <int shift>
168 v128 imm_v128_shr_n_s16(v128 a) {
169 return v128_shr_n_s16(a, shift);
171 template <int shift>
172 v128 imm_v128_shl_n_32(v128 a) {
173 return v128_shl_n_32(a, shift);
175 template <int shift>
176 v128 imm_v128_shr_n_u32(v128 a) {
177 return v128_shr_n_u32(a, shift);
179 template <int shift>
180 v128 imm_v128_shr_n_s32(v128 a) {
181 return v128_shr_n_s32(a, shift);
183 template <int shift>
184 v128 imm_v128_shl_n_64(v128 a) {
185 return v128_shl_n_64(a, shift);
187 template <int shift>
188 v128 imm_v128_shr_n_u64(v128 a) {
189 return v128_shr_n_u64(a, shift);
191 template <int shift>
192 v128 imm_v128_shr_n_s64(v128 a) {
193 return v128_shr_n_s64(a, shift);
195 template <int shift>
196 v128 imm_v128_align(v128 a, v128 b) {
197 return v128_align(a, b, shift);
200 template <int shift>
201 c_v128 c_imm_v128_shl_n_byte(c_v128 a) {
202 return c_v128_shl_n_byte(a, shift);
204 template <int shift>
205 c_v128 c_imm_v128_shr_n_byte(c_v128 a) {
206 return c_v128_shr_n_byte(a, shift);
208 template <int shift>
209 c_v128 c_imm_v128_shl_n_8(c_v128 a) {
210 return c_v128_shl_n_8(a, shift);
212 template <int shift>
213 c_v128 c_imm_v128_shr_n_u8(c_v128 a) {
214 return c_v128_shr_n_u8(a, shift);
216 template <int shift>
217 c_v128 c_imm_v128_shr_n_s8(c_v128 a) {
218 return c_v128_shr_n_s8(a, shift);
220 template <int shift>
221 c_v128 c_imm_v128_shl_n_16(c_v128 a) {
222 return c_v128_shl_n_16(a, shift);
224 template <int shift>
225 c_v128 c_imm_v128_shr_n_u16(c_v128 a) {
226 return c_v128_shr_n_u16(a, shift);
228 template <int shift>
229 c_v128 c_imm_v128_shr_n_s16(c_v128 a) {
230 return c_v128_shr_n_s16(a, shift);
232 template <int shift>
233 c_v128 c_imm_v128_shl_n_32(c_v128 a) {
234 return c_v128_shl_n_32(a, shift);
236 template <int shift>
237 c_v128 c_imm_v128_shr_n_u32(c_v128 a) {
238 return c_v128_shr_n_u32(a, shift);
240 template <int shift>
241 c_v128 c_imm_v128_shr_n_s32(c_v128 a) {
242 return c_v128_shr_n_s32(a, shift);
244 template <int shift>
245 c_v128 c_imm_v128_shl_n_64(c_v128 a) {
246 return c_v128_shl_n_64(a, shift);
248 template <int shift>
249 c_v128 c_imm_v128_shr_n_u64(c_v128 a) {
250 return c_v128_shr_n_u64(a, shift);
252 template <int shift>
253 c_v128 c_imm_v128_shr_n_s64(c_v128 a) {
254 return c_v128_shr_n_s64(a, shift);
256 template <int shift>
257 c_v128 c_imm_v128_align(c_v128 a, c_v128 b) {
258 return c_v128_align(a, b, shift);
261 template <int shift>
262 v256 imm_v256_shl_n_word(v256 a) {
263 return v256_shl_n_word(a, shift);
265 template <int shift>
266 v256 imm_v256_shr_n_word(v256 a) {
267 return v256_shr_n_word(a, shift);
269 template <int shift>
270 v256 imm_v256_shl_n_byte(v256 a) {
271 return v256_shl_n_byte(a, shift);
273 template <int shift>
274 v256 imm_v256_shr_n_byte(v256 a) {
275 return v256_shr_n_byte(a, shift);
277 template <int shift>
278 v256 imm_v256_shl_n_8(v256 a) {
279 return v256_shl_n_8(a, shift);
281 template <int shift>
282 v256 imm_v256_shr_n_u8(v256 a) {
283 return v256_shr_n_u8(a, shift);
285 template <int shift>
286 v256 imm_v256_shr_n_s8(v256 a) {
287 return v256_shr_n_s8(a, shift);
289 template <int shift>
290 v256 imm_v256_shl_n_16(v256 a) {
291 return v256_shl_n_16(a, shift);
293 template <int shift>
294 v256 imm_v256_shr_n_u16(v256 a) {
295 return v256_shr_n_u16(a, shift);
297 template <int shift>
298 v256 imm_v256_shr_n_s16(v256 a) {
299 return v256_shr_n_s16(a, shift);
301 template <int shift>
302 v256 imm_v256_shl_n_32(v256 a) {
303 return v256_shl_n_32(a, shift);
305 template <int shift>
306 v256 imm_v256_shr_n_u32(v256 a) {
307 return v256_shr_n_u32(a, shift);
309 template <int shift>
310 v256 imm_v256_shr_n_s32(v256 a) {
311 return v256_shr_n_s32(a, shift);
313 template <int shift>
314 v256 imm_v256_shl_n_64(v256 a) {
315 return v256_shl_n_64(a, shift);
317 template <int shift>
318 v256 imm_v256_shr_n_u64(v256 a) {
319 return v256_shr_n_u64(a, shift);
321 template <int shift>
322 v256 imm_v256_shr_n_s64(v256 a) {
323 return v256_shr_n_s64(a, shift);
325 template <int shift>
326 v256 imm_v256_align(v256 a, v256 b) {
327 return v256_align(a, b, shift);
330 template <int shift>
331 c_v256 c_imm_v256_shl_n_word(c_v256 a) {
332 return c_v256_shl_n_word(a, shift);
334 template <int shift>
335 c_v256 c_imm_v256_shr_n_word(c_v256 a) {
336 return c_v256_shr_n_word(a, shift);
338 template <int shift>
339 c_v256 c_imm_v256_shl_n_byte(c_v256 a) {
340 return c_v256_shl_n_byte(a, shift);
342 template <int shift>
343 c_v256 c_imm_v256_shr_n_byte(c_v256 a) {
344 return c_v256_shr_n_byte(a, shift);
346 template <int shift>
347 c_v256 c_imm_v256_shl_n_8(c_v256 a) {
348 return c_v256_shl_n_8(a, shift);
350 template <int shift>
351 c_v256 c_imm_v256_shr_n_u8(c_v256 a) {
352 return c_v256_shr_n_u8(a, shift);
354 template <int shift>
355 c_v256 c_imm_v256_shr_n_s8(c_v256 a) {
356 return c_v256_shr_n_s8(a, shift);
358 template <int shift>
359 c_v256 c_imm_v256_shl_n_16(c_v256 a) {
360 return c_v256_shl_n_16(a, shift);
362 template <int shift>
363 c_v256 c_imm_v256_shr_n_u16(c_v256 a) {
364 return c_v256_shr_n_u16(a, shift);
366 template <int shift>
367 c_v256 c_imm_v256_shr_n_s16(c_v256 a) {
368 return c_v256_shr_n_s16(a, shift);
370 template <int shift>
371 c_v256 c_imm_v256_shl_n_32(c_v256 a) {
372 return c_v256_shl_n_32(a, shift);
374 template <int shift>
375 c_v256 c_imm_v256_shr_n_u32(c_v256 a) {
376 return c_v256_shr_n_u32(a, shift);
378 template <int shift>
379 c_v256 c_imm_v256_shr_n_s32(c_v256 a) {
380 return c_v256_shr_n_s32(a, shift);
382 template <int shift>
383 c_v256 c_imm_v256_shl_n_64(c_v256 a) {
384 return c_v256_shl_n_64(a, shift);
386 template <int shift>
387 c_v256 c_imm_v256_shr_n_u64(c_v256 a) {
388 return c_v256_shr_n_u64(a, shift);
390 template <int shift>
391 c_v256 c_imm_v256_shr_n_s64(c_v256 a) {
392 return c_v256_shr_n_s64(a, shift);
394 template <int shift>
395 c_v256 c_imm_v256_align(c_v256 a, c_v256 b) {
396 return c_v256_align(a, b, shift);
399 // Wrappers around the the SAD and SSD functions
400 uint32_t v64_sad_u8(v64 a, v64 b) {
401 return v64_sad_u8_sum(::v64_sad_u8(v64_sad_u8_init(), a, b));
403 uint32_t v64_ssd_u8(v64 a, v64 b) {
404 return v64_ssd_u8_sum(::v64_ssd_u8(v64_ssd_u8_init(), a, b));
407 uint32_t c_v64_sad_u8(c_v64 a, c_v64 b) {
408 return c_v64_sad_u8_sum(::c_v64_sad_u8(c_v64_sad_u8_init(), a, b));
410 uint32_t c_v64_ssd_u8(c_v64 a, c_v64 b) {
411 return c_v64_ssd_u8_sum(::c_v64_ssd_u8(c_v64_ssd_u8_init(), a, b));
413 uint32_t v128_sad_u8(v128 a, v128 b) {
414 return v128_sad_u8_sum(::v128_sad_u8(v128_sad_u8_init(), a, b));
416 uint32_t v128_ssd_u8(v128 a, v128 b) {
417 return v128_ssd_u8_sum(::v128_ssd_u8(v128_ssd_u8_init(), a, b));
419 uint32_t c_v128_sad_u8(c_v128 a, c_v128 b) {
420 return c_v128_sad_u8_sum(::c_v128_sad_u8(c_v128_sad_u8_init(), a, b));
422 uint32_t c_v128_ssd_u8(c_v128 a, c_v128 b) {
423 return c_v128_ssd_u8_sum(::c_v128_ssd_u8(c_v128_ssd_u8_init(), a, b));
425 uint32_t v128_sad_u16(v128 a, v128 b) {
426 return v128_sad_u16_sum(::v128_sad_u16(v128_sad_u16_init(), a, b));
428 uint64_t v128_ssd_s16(v128 a, v128 b) {
429 return v128_ssd_s16_sum(::v128_ssd_s16(v128_ssd_s16_init(), a, b));
431 uint32_t c_v128_sad_u16(c_v128 a, c_v128 b) {
432 return c_v128_sad_u16_sum(::c_v128_sad_u16(c_v128_sad_u16_init(), a, b));
434 uint64_t c_v128_ssd_s16(c_v128 a, c_v128 b) {
435 return c_v128_ssd_s16_sum(::c_v128_ssd_s16(c_v128_ssd_s16_init(), a, b));
437 uint32_t v256_sad_u8(v256 a, v256 b) {
438 return v256_sad_u8_sum(::v256_sad_u8(v256_sad_u8_init(), a, b));
440 uint32_t v256_ssd_u8(v256 a, v256 b) {
441 return v256_ssd_u8_sum(::v256_ssd_u8(v256_ssd_u8_init(), a, b));
443 uint32_t c_v256_sad_u8(c_v256 a, c_v256 b) {
444 return c_v256_sad_u8_sum(::c_v256_sad_u8(c_v256_sad_u8_init(), a, b));
446 uint32_t c_v256_ssd_u8(c_v256 a, c_v256 b) {
447 return c_v256_ssd_u8_sum(::c_v256_ssd_u8(c_v256_ssd_u8_init(), a, b));
449 uint32_t v256_sad_u16(v256 a, v256 b) {
450 return v256_sad_u16_sum(::v256_sad_u16(v256_sad_u16_init(), a, b));
452 uint64_t v256_ssd_s16(v256 a, v256 b) {
453 return v256_ssd_s16_sum(::v256_ssd_s16(v256_ssd_s16_init(), a, b));
455 uint32_t c_v256_sad_u16(c_v256 a, c_v256 b) {
456 return c_v256_sad_u16_sum(::c_v256_sad_u16(c_v256_sad_u16_init(), a, b));
458 uint64_t c_v256_ssd_s16(c_v256 a, c_v256 b) {
459 return c_v256_ssd_s16_sum(::c_v256_ssd_s16(c_v256_ssd_s16_init(), a, b));
462 namespace {
464 typedef void (*fptr)();
466 typedef struct {
467 const char *name;
468 fptr ref;
469 fptr simd;
470 } mapping;
472 #define MAP(name) \
474 #name, reinterpret_cast < fptr > (c_##name), \
475 reinterpret_cast < fptr > (name) \
478 const mapping m[] = { MAP(v64_sad_u8),
479 MAP(v64_ssd_u8),
480 MAP(v64_add_8),
481 MAP(v64_add_16),
482 MAP(v64_sadd_s8),
483 MAP(v64_sadd_u8),
484 MAP(v64_sadd_s16),
485 MAP(v64_add_32),
486 MAP(v64_sub_8),
487 MAP(v64_ssub_u8),
488 MAP(v64_ssub_s8),
489 MAP(v64_sub_16),
490 MAP(v64_ssub_s16),
491 MAP(v64_ssub_u16),
492 MAP(v64_sub_32),
493 MAP(v64_ziplo_8),
494 MAP(v64_ziphi_8),
495 MAP(v64_ziplo_16),
496 MAP(v64_ziphi_16),
497 MAP(v64_ziplo_32),
498 MAP(v64_ziphi_32),
499 MAP(v64_pack_s32_u16),
500 MAP(v64_pack_s32_s16),
501 MAP(v64_pack_s16_u8),
502 MAP(v64_pack_s16_s8),
503 MAP(v64_unziphi_8),
504 MAP(v64_unziplo_8),
505 MAP(v64_unziphi_16),
506 MAP(v64_unziplo_16),
507 MAP(v64_or),
508 MAP(v64_xor),
509 MAP(v64_and),
510 MAP(v64_andn),
511 MAP(v64_mullo_s16),
512 MAP(v64_mulhi_s16),
513 MAP(v64_mullo_s32),
514 MAP(v64_madd_s16),
515 MAP(v64_madd_us8),
516 MAP(v64_avg_u8),
517 MAP(v64_rdavg_u8),
518 MAP(v64_rdavg_u16),
519 MAP(v64_avg_u16),
520 MAP(v64_min_u8),
521 MAP(v64_max_u8),
522 MAP(v64_min_s8),
523 MAP(v64_max_s8),
524 MAP(v64_min_s16),
525 MAP(v64_max_s16),
526 MAP(v64_cmpgt_s8),
527 MAP(v64_cmplt_s8),
528 MAP(v64_cmpeq_8),
529 MAP(v64_cmpgt_s16),
530 MAP(v64_cmplt_s16),
531 MAP(v64_cmpeq_16),
532 MAP(v64_shuffle_8),
533 MAP(imm_v64_align<1>),
534 MAP(imm_v64_align<2>),
535 MAP(imm_v64_align<3>),
536 MAP(imm_v64_align<4>),
537 MAP(imm_v64_align<5>),
538 MAP(imm_v64_align<6>),
539 MAP(imm_v64_align<7>),
540 MAP(v64_abs_s8),
541 MAP(v64_abs_s16),
542 MAP(v64_unpacklo_u8_s16),
543 MAP(v64_unpackhi_u8_s16),
544 MAP(v64_unpacklo_s8_s16),
545 MAP(v64_unpackhi_s8_s16),
546 MAP(v64_unpacklo_u16_s32),
547 MAP(v64_unpacklo_s16_s32),
548 MAP(v64_unpackhi_u16_s32),
549 MAP(v64_unpackhi_s16_s32),
550 MAP(imm_v64_shr_n_byte<1>),
551 MAP(imm_v64_shr_n_byte<2>),
552 MAP(imm_v64_shr_n_byte<3>),
553 MAP(imm_v64_shr_n_byte<4>),
554 MAP(imm_v64_shr_n_byte<5>),
555 MAP(imm_v64_shr_n_byte<6>),
556 MAP(imm_v64_shr_n_byte<7>),
557 MAP(imm_v64_shl_n_byte<1>),
558 MAP(imm_v64_shl_n_byte<2>),
559 MAP(imm_v64_shl_n_byte<3>),
560 MAP(imm_v64_shl_n_byte<4>),
561 MAP(imm_v64_shl_n_byte<5>),
562 MAP(imm_v64_shl_n_byte<6>),
563 MAP(imm_v64_shl_n_byte<7>),
564 MAP(imm_v64_shl_n_8<1>),
565 MAP(imm_v64_shl_n_8<2>),
566 MAP(imm_v64_shl_n_8<3>),
567 MAP(imm_v64_shl_n_8<4>),
568 MAP(imm_v64_shl_n_8<5>),
569 MAP(imm_v64_shl_n_8<6>),
570 MAP(imm_v64_shl_n_8<7>),
571 MAP(imm_v64_shr_n_u8<1>),
572 MAP(imm_v64_shr_n_u8<2>),
573 MAP(imm_v64_shr_n_u8<3>),
574 MAP(imm_v64_shr_n_u8<4>),
575 MAP(imm_v64_shr_n_u8<5>),
576 MAP(imm_v64_shr_n_u8<6>),
577 MAP(imm_v64_shr_n_u8<7>),
578 MAP(imm_v64_shr_n_s8<1>),
579 MAP(imm_v64_shr_n_s8<2>),
580 MAP(imm_v64_shr_n_s8<3>),
581 MAP(imm_v64_shr_n_s8<4>),
582 MAP(imm_v64_shr_n_s8<5>),
583 MAP(imm_v64_shr_n_s8<6>),
584 MAP(imm_v64_shr_n_s8<7>),
585 MAP(imm_v64_shl_n_16<1>),
586 MAP(imm_v64_shl_n_16<2>),
587 MAP(imm_v64_shl_n_16<4>),
588 MAP(imm_v64_shl_n_16<6>),
589 MAP(imm_v64_shl_n_16<8>),
590 MAP(imm_v64_shl_n_16<10>),
591 MAP(imm_v64_shl_n_16<12>),
592 MAP(imm_v64_shl_n_16<14>),
593 MAP(imm_v64_shr_n_u16<1>),
594 MAP(imm_v64_shr_n_u16<2>),
595 MAP(imm_v64_shr_n_u16<4>),
596 MAP(imm_v64_shr_n_u16<6>),
597 MAP(imm_v64_shr_n_u16<8>),
598 MAP(imm_v64_shr_n_u16<10>),
599 MAP(imm_v64_shr_n_u16<12>),
600 MAP(imm_v64_shr_n_u16<14>),
601 MAP(imm_v64_shr_n_s16<1>),
602 MAP(imm_v64_shr_n_s16<2>),
603 MAP(imm_v64_shr_n_s16<4>),
604 MAP(imm_v64_shr_n_s16<6>),
605 MAP(imm_v64_shr_n_s16<8>),
606 MAP(imm_v64_shr_n_s16<10>),
607 MAP(imm_v64_shr_n_s16<12>),
608 MAP(imm_v64_shr_n_s16<14>),
609 MAP(imm_v64_shl_n_32<1>),
610 MAP(imm_v64_shl_n_32<4>),
611 MAP(imm_v64_shl_n_32<8>),
612 MAP(imm_v64_shl_n_32<12>),
613 MAP(imm_v64_shl_n_32<16>),
614 MAP(imm_v64_shl_n_32<20>),
615 MAP(imm_v64_shl_n_32<24>),
616 MAP(imm_v64_shl_n_32<28>),
617 MAP(imm_v64_shr_n_u32<1>),
618 MAP(imm_v64_shr_n_u32<4>),
619 MAP(imm_v64_shr_n_u32<8>),
620 MAP(imm_v64_shr_n_u32<12>),
621 MAP(imm_v64_shr_n_u32<16>),
622 MAP(imm_v64_shr_n_u32<20>),
623 MAP(imm_v64_shr_n_u32<24>),
624 MAP(imm_v64_shr_n_u32<28>),
625 MAP(imm_v64_shr_n_s32<1>),
626 MAP(imm_v64_shr_n_s32<4>),
627 MAP(imm_v64_shr_n_s32<8>),
628 MAP(imm_v64_shr_n_s32<12>),
629 MAP(imm_v64_shr_n_s32<16>),
630 MAP(imm_v64_shr_n_s32<20>),
631 MAP(imm_v64_shr_n_s32<24>),
632 MAP(imm_v64_shr_n_s32<28>),
633 MAP(v64_shl_8),
634 MAP(v64_shr_u8),
635 MAP(v64_shr_s8),
636 MAP(v64_shl_16),
637 MAP(v64_shr_u16),
638 MAP(v64_shr_s16),
639 MAP(v64_shl_32),
640 MAP(v64_shr_u32),
641 MAP(v64_shr_s32),
642 MAP(v64_hadd_u8),
643 MAP(v64_hadd_s16),
644 MAP(v64_dotp_s16),
645 MAP(v64_dotp_su8),
646 MAP(v64_u64),
647 MAP(v64_low_u32),
648 MAP(v64_high_u32),
649 MAP(v64_low_s32),
650 MAP(v64_high_s32),
651 MAP(v64_dup_8),
652 MAP(v64_dup_16),
653 MAP(v64_dup_32),
654 MAP(v64_from_32),
655 MAP(v64_zero),
656 MAP(v64_from_16),
657 MAP(v128_sad_u8),
658 MAP(v128_ssd_u8),
659 MAP(v128_sad_u16),
660 MAP(v128_ssd_s16),
661 MAP(v128_add_8),
662 MAP(v128_add_16),
663 MAP(v128_sadd_s8),
664 MAP(v128_sadd_u8),
665 MAP(v128_sadd_s16),
666 MAP(v128_add_32),
667 MAP(v128_add_64),
668 MAP(v128_sub_8),
669 MAP(v128_ssub_u8),
670 MAP(v128_ssub_s8),
671 MAP(v128_sub_16),
672 MAP(v128_ssub_s16),
673 MAP(v128_ssub_u16),
674 MAP(v128_sub_32),
675 MAP(v128_sub_64),
676 MAP(v128_ziplo_8),
677 MAP(v128_ziphi_8),
678 MAP(v128_ziplo_16),
679 MAP(v128_ziphi_16),
680 MAP(v128_ziplo_32),
681 MAP(v128_ziphi_32),
682 MAP(v128_ziplo_64),
683 MAP(v128_ziphi_64),
684 MAP(v128_unziphi_8),
685 MAP(v128_unziplo_8),
686 MAP(v128_unziphi_16),
687 MAP(v128_unziplo_16),
688 MAP(v128_unziphi_32),
689 MAP(v128_unziplo_32),
690 MAP(v128_pack_s32_u16),
691 MAP(v128_pack_s32_s16),
692 MAP(v128_pack_s16_u8),
693 MAP(v128_pack_s16_s8),
694 MAP(v128_or),
695 MAP(v128_xor),
696 MAP(v128_and),
697 MAP(v128_andn),
698 MAP(v128_mullo_s16),
699 MAP(v128_mulhi_s16),
700 MAP(v128_mullo_s32),
701 MAP(v128_madd_s16),
702 MAP(v128_madd_us8),
703 MAP(v128_avg_u8),
704 MAP(v128_rdavg_u8),
705 MAP(v128_rdavg_u16),
706 MAP(v128_avg_u16),
707 MAP(v128_min_u8),
708 MAP(v128_max_u8),
709 MAP(v128_min_s8),
710 MAP(v128_max_s8),
711 MAP(v128_min_s16),
712 MAP(v128_max_s16),
713 MAP(v128_min_s32),
714 MAP(v128_max_s32),
715 MAP(v128_cmpgt_s8),
716 MAP(v128_cmplt_s8),
717 MAP(v128_cmpeq_8),
718 MAP(v128_cmpgt_s16),
719 MAP(v128_cmpeq_16),
720 MAP(v128_cmplt_s16),
721 MAP(v128_cmpgt_s32),
722 MAP(v128_cmpeq_32),
723 MAP(v128_cmplt_s32),
724 MAP(v128_shuffle_8),
725 MAP(imm_v128_align<1>),
726 MAP(imm_v128_align<2>),
727 MAP(imm_v128_align<3>),
728 MAP(imm_v128_align<4>),
729 MAP(imm_v128_align<5>),
730 MAP(imm_v128_align<6>),
731 MAP(imm_v128_align<7>),
732 MAP(imm_v128_align<8>),
733 MAP(imm_v128_align<9>),
734 MAP(imm_v128_align<10>),
735 MAP(imm_v128_align<11>),
736 MAP(imm_v128_align<12>),
737 MAP(imm_v128_align<13>),
738 MAP(imm_v128_align<14>),
739 MAP(imm_v128_align<15>),
740 MAP(v128_abs_s8),
741 MAP(v128_abs_s16),
742 MAP(v128_padd_u8),
743 MAP(v128_padd_s16),
744 MAP(v128_unpacklo_u16_s32),
745 MAP(v128_unpacklo_s16_s32),
746 MAP(v128_unpackhi_u16_s32),
747 MAP(v128_unpackhi_s16_s32),
748 MAP(imm_v128_shr_n_byte<1>),
749 MAP(imm_v128_shr_n_byte<2>),
750 MAP(imm_v128_shr_n_byte<3>),
751 MAP(imm_v128_shr_n_byte<4>),
752 MAP(imm_v128_shr_n_byte<5>),
753 MAP(imm_v128_shr_n_byte<6>),
754 MAP(imm_v128_shr_n_byte<7>),
755 MAP(imm_v128_shr_n_byte<8>),
756 MAP(imm_v128_shr_n_byte<9>),
757 MAP(imm_v128_shr_n_byte<10>),
758 MAP(imm_v128_shr_n_byte<11>),
759 MAP(imm_v128_shr_n_byte<12>),
760 MAP(imm_v128_shr_n_byte<13>),
761 MAP(imm_v128_shr_n_byte<14>),
762 MAP(imm_v128_shr_n_byte<15>),
763 MAP(imm_v128_shl_n_byte<1>),
764 MAP(imm_v128_shl_n_byte<2>),
765 MAP(imm_v128_shl_n_byte<3>),
766 MAP(imm_v128_shl_n_byte<4>),
767 MAP(imm_v128_shl_n_byte<5>),
768 MAP(imm_v128_shl_n_byte<6>),
769 MAP(imm_v128_shl_n_byte<7>),
770 MAP(imm_v128_shl_n_byte<8>),
771 MAP(imm_v128_shl_n_byte<9>),
772 MAP(imm_v128_shl_n_byte<10>),
773 MAP(imm_v128_shl_n_byte<11>),
774 MAP(imm_v128_shl_n_byte<12>),
775 MAP(imm_v128_shl_n_byte<13>),
776 MAP(imm_v128_shl_n_byte<14>),
777 MAP(imm_v128_shl_n_byte<15>),
778 MAP(imm_v128_shl_n_8<1>),
779 MAP(imm_v128_shl_n_8<2>),
780 MAP(imm_v128_shl_n_8<3>),
781 MAP(imm_v128_shl_n_8<4>),
782 MAP(imm_v128_shl_n_8<5>),
783 MAP(imm_v128_shl_n_8<6>),
784 MAP(imm_v128_shl_n_8<7>),
785 MAP(imm_v128_shr_n_u8<1>),
786 MAP(imm_v128_shr_n_u8<2>),
787 MAP(imm_v128_shr_n_u8<3>),
788 MAP(imm_v128_shr_n_u8<4>),
789 MAP(imm_v128_shr_n_u8<5>),
790 MAP(imm_v128_shr_n_u8<6>),
791 MAP(imm_v128_shr_n_u8<7>),
792 MAP(imm_v128_shr_n_s8<1>),
793 MAP(imm_v128_shr_n_s8<2>),
794 MAP(imm_v128_shr_n_s8<3>),
795 MAP(imm_v128_shr_n_s8<4>),
796 MAP(imm_v128_shr_n_s8<5>),
797 MAP(imm_v128_shr_n_s8<6>),
798 MAP(imm_v128_shr_n_s8<7>),
799 MAP(imm_v128_shl_n_16<1>),
800 MAP(imm_v128_shl_n_16<2>),
801 MAP(imm_v128_shl_n_16<4>),
802 MAP(imm_v128_shl_n_16<6>),
803 MAP(imm_v128_shl_n_16<8>),
804 MAP(imm_v128_shl_n_16<10>),
805 MAP(imm_v128_shl_n_16<12>),
806 MAP(imm_v128_shl_n_16<14>),
807 MAP(imm_v128_shr_n_u16<1>),
808 MAP(imm_v128_shr_n_u16<2>),
809 MAP(imm_v128_shr_n_u16<4>),
810 MAP(imm_v128_shr_n_u16<6>),
811 MAP(imm_v128_shr_n_u16<8>),
812 MAP(imm_v128_shr_n_u16<10>),
813 MAP(imm_v128_shr_n_u16<12>),
814 MAP(imm_v128_shr_n_u16<14>),
815 MAP(imm_v128_shr_n_s16<1>),
816 MAP(imm_v128_shr_n_s16<2>),
817 MAP(imm_v128_shr_n_s16<4>),
818 MAP(imm_v128_shr_n_s16<6>),
819 MAP(imm_v128_shr_n_s16<8>),
820 MAP(imm_v128_shr_n_s16<10>),
821 MAP(imm_v128_shr_n_s16<12>),
822 MAP(imm_v128_shr_n_s16<14>),
823 MAP(imm_v128_shl_n_32<1>),
824 MAP(imm_v128_shl_n_32<4>),
825 MAP(imm_v128_shl_n_32<8>),
826 MAP(imm_v128_shl_n_32<12>),
827 MAP(imm_v128_shl_n_32<16>),
828 MAP(imm_v128_shl_n_32<20>),
829 MAP(imm_v128_shl_n_32<24>),
830 MAP(imm_v128_shl_n_32<28>),
831 MAP(imm_v128_shr_n_u32<1>),
832 MAP(imm_v128_shr_n_u32<4>),
833 MAP(imm_v128_shr_n_u32<8>),
834 MAP(imm_v128_shr_n_u32<12>),
835 MAP(imm_v128_shr_n_u32<16>),
836 MAP(imm_v128_shr_n_u32<20>),
837 MAP(imm_v128_shr_n_u32<24>),
838 MAP(imm_v128_shr_n_u32<28>),
839 MAP(imm_v128_shr_n_s32<1>),
840 MAP(imm_v128_shr_n_s32<4>),
841 MAP(imm_v128_shr_n_s32<8>),
842 MAP(imm_v128_shr_n_s32<12>),
843 MAP(imm_v128_shr_n_s32<16>),
844 MAP(imm_v128_shr_n_s32<20>),
845 MAP(imm_v128_shr_n_s32<24>),
846 MAP(imm_v128_shr_n_s32<28>),
847 MAP(imm_v128_shl_n_64<1>),
848 MAP(imm_v128_shl_n_64<4>),
849 MAP(imm_v128_shl_n_64<8>),
850 MAP(imm_v128_shl_n_64<12>),
851 MAP(imm_v128_shl_n_64<16>),
852 MAP(imm_v128_shl_n_64<20>),
853 MAP(imm_v128_shl_n_64<24>),
854 MAP(imm_v128_shl_n_64<28>),
855 MAP(imm_v128_shl_n_64<32>),
856 MAP(imm_v128_shl_n_64<36>),
857 MAP(imm_v128_shl_n_64<40>),
858 MAP(imm_v128_shl_n_64<44>),
859 MAP(imm_v128_shl_n_64<48>),
860 MAP(imm_v128_shl_n_64<52>),
861 MAP(imm_v128_shl_n_64<56>),
862 MAP(imm_v128_shl_n_64<60>),
863 MAP(imm_v128_shr_n_u64<1>),
864 MAP(imm_v128_shr_n_u64<4>),
865 MAP(imm_v128_shr_n_u64<8>),
866 MAP(imm_v128_shr_n_u64<12>),
867 MAP(imm_v128_shr_n_u64<16>),
868 MAP(imm_v128_shr_n_u64<20>),
869 MAP(imm_v128_shr_n_u64<24>),
870 MAP(imm_v128_shr_n_u64<28>),
871 MAP(imm_v128_shr_n_u64<32>),
872 MAP(imm_v128_shr_n_u64<36>),
873 MAP(imm_v128_shr_n_u64<40>),
874 MAP(imm_v128_shr_n_u64<44>),
875 MAP(imm_v128_shr_n_u64<48>),
876 MAP(imm_v128_shr_n_u64<52>),
877 MAP(imm_v128_shr_n_u64<56>),
878 MAP(imm_v128_shr_n_u64<60>),
879 MAP(imm_v128_shr_n_s64<1>),
880 MAP(imm_v128_shr_n_s64<4>),
881 MAP(imm_v128_shr_n_s64<8>),
882 MAP(imm_v128_shr_n_s64<12>),
883 MAP(imm_v128_shr_n_s64<16>),
884 MAP(imm_v128_shr_n_s64<20>),
885 MAP(imm_v128_shr_n_s64<24>),
886 MAP(imm_v128_shr_n_s64<28>),
887 MAP(imm_v128_shr_n_s64<32>),
888 MAP(imm_v128_shr_n_s64<36>),
889 MAP(imm_v128_shr_n_s64<40>),
890 MAP(imm_v128_shr_n_s64<44>),
891 MAP(imm_v128_shr_n_s64<48>),
892 MAP(imm_v128_shr_n_s64<52>),
893 MAP(imm_v128_shr_n_s64<56>),
894 MAP(imm_v128_shr_n_s64<60>),
895 MAP(v128_from_v64),
896 MAP(v128_zip_8),
897 MAP(v128_zip_16),
898 MAP(v128_zip_32),
899 MAP(v128_mul_s16),
900 MAP(v128_unpack_u8_s16),
901 MAP(v128_unpack_s8_s16),
902 MAP(v128_unpack_u16_s32),
903 MAP(v128_unpack_s16_s32),
904 MAP(v128_shl_8),
905 MAP(v128_shr_u8),
906 MAP(v128_shr_s8),
907 MAP(v128_shl_16),
908 MAP(v128_shr_u16),
909 MAP(v128_shr_s16),
910 MAP(v128_shl_32),
911 MAP(v128_shr_u32),
912 MAP(v128_shr_s32),
913 MAP(v128_shl_64),
914 MAP(v128_shr_u64),
915 MAP(v128_shr_s64),
916 MAP(v128_hadd_u8),
917 MAP(v128_dotp_su8),
918 MAP(v128_dotp_s16),
919 MAP(v128_dotp_s32),
920 MAP(v128_low_u32),
921 MAP(v128_low_v64),
922 MAP(v128_high_v64),
923 MAP(v128_from_64),
924 MAP(v128_from_32),
925 MAP(v128_movemask_8),
926 MAP(v128_zero),
927 MAP(v128_dup_8),
928 MAP(v128_dup_16),
929 MAP(v128_dup_32),
930 MAP(v128_dup_64),
931 MAP(v128_unpacklo_u8_s16),
932 MAP(v128_unpackhi_u8_s16),
933 MAP(v128_unpacklo_s8_s16),
934 MAP(v128_unpackhi_s8_s16),
935 MAP(v128_blend_8),
936 MAP(u32_load_unaligned),
937 MAP(u32_store_unaligned),
938 MAP(v64_load_unaligned),
939 MAP(v64_store_unaligned),
940 MAP(v128_load_unaligned),
941 MAP(v128_store_unaligned),
942 MAP(v256_sad_u8),
943 MAP(v256_ssd_u8),
944 MAP(v256_sad_u16),
945 MAP(v256_ssd_s16),
946 MAP(v256_hadd_u8),
947 MAP(v256_low_u64),
948 MAP(v256_dotp_su8),
949 MAP(v256_dotp_s16),
950 MAP(v256_dotp_s32),
951 MAP(v256_add_8),
952 MAP(v256_add_16),
953 MAP(v256_sadd_s8),
954 MAP(v256_sadd_u8),
955 MAP(v256_sadd_s16),
956 MAP(v256_add_32),
957 MAP(v256_add_64),
958 MAP(v256_sub_8),
959 MAP(v256_ssub_u8),
960 MAP(v256_ssub_s8),
961 MAP(v256_sub_16),
962 MAP(v256_ssub_u16),
963 MAP(v256_ssub_s16),
964 MAP(v256_sub_32),
965 MAP(v256_sub_64),
966 MAP(v256_ziplo_8),
967 MAP(v256_ziphi_8),
968 MAP(v256_ziplo_16),
969 MAP(v256_ziphi_16),
970 MAP(v256_ziplo_32),
971 MAP(v256_ziphi_32),
972 MAP(v256_ziplo_64),
973 MAP(v256_ziphi_64),
974 MAP(v256_unziphi_8),
975 MAP(v256_unziplo_8),
976 MAP(v256_unziphi_16),
977 MAP(v256_unziplo_16),
978 MAP(v256_unziphi_32),
979 MAP(v256_unziplo_32),
980 MAP(v256_unziphi_64),
981 MAP(v256_unziplo_64),
982 MAP(v256_pack_s32_u16),
983 MAP(v256_pack_s32_s16),
984 MAP(v256_pack_s16_u8),
985 MAP(v256_pack_s16_s8),
986 MAP(v256_or),
987 MAP(v256_xor),
988 MAP(v256_and),
989 MAP(v256_andn),
990 MAP(v256_mullo_s16),
991 MAP(v256_mulhi_s16),
992 MAP(v256_mullo_s32),
993 MAP(v256_madd_s16),
994 MAP(v256_madd_us8),
995 MAP(v256_avg_u8),
996 MAP(v256_rdavg_u8),
997 MAP(v256_rdavg_u16),
998 MAP(v256_avg_u16),
999 MAP(v256_min_u8),
1000 MAP(v256_max_u8),
1001 MAP(v256_min_s8),
1002 MAP(v256_max_s8),
1003 MAP(v256_min_s16),
1004 MAP(v256_max_s16),
1005 MAP(v256_min_s32),
1006 MAP(v256_max_s32),
1007 MAP(v256_cmpgt_s8),
1008 MAP(v256_cmplt_s8),
1009 MAP(v256_cmpeq_8),
1010 MAP(v256_cmpgt_s16),
1011 MAP(v256_cmplt_s16),
1012 MAP(v256_cmpeq_16),
1013 MAP(v256_cmpgt_s32),
1014 MAP(v256_cmplt_s32),
1015 MAP(v256_cmpeq_32),
1016 MAP(v256_shuffle_8),
1017 MAP(v256_pshuffle_8),
1018 MAP(v256_wideshuffle_8),
1019 MAP(imm_v256_align<1>),
1020 MAP(imm_v256_align<2>),
1021 MAP(imm_v256_align<3>),
1022 MAP(imm_v256_align<4>),
1023 MAP(imm_v256_align<5>),
1024 MAP(imm_v256_align<6>),
1025 MAP(imm_v256_align<7>),
1026 MAP(imm_v256_align<8>),
1027 MAP(imm_v256_align<9>),
1028 MAP(imm_v256_align<10>),
1029 MAP(imm_v256_align<11>),
1030 MAP(imm_v256_align<12>),
1031 MAP(imm_v256_align<13>),
1032 MAP(imm_v256_align<14>),
1033 MAP(imm_v256_align<15>),
1034 MAP(imm_v256_align<16>),
1035 MAP(imm_v256_align<17>),
1036 MAP(imm_v256_align<18>),
1037 MAP(imm_v256_align<19>),
1038 MAP(imm_v256_align<20>),
1039 MAP(imm_v256_align<21>),
1040 MAP(imm_v256_align<22>),
1041 MAP(imm_v256_align<23>),
1042 MAP(imm_v256_align<24>),
1043 MAP(imm_v256_align<25>),
1044 MAP(imm_v256_align<26>),
1045 MAP(imm_v256_align<27>),
1046 MAP(imm_v256_align<28>),
1047 MAP(imm_v256_align<29>),
1048 MAP(imm_v256_align<30>),
1049 MAP(imm_v256_align<31>),
1050 MAP(v256_from_v128),
1051 MAP(v256_zip_8),
1052 MAP(v256_zip_16),
1053 MAP(v256_zip_32),
1054 MAP(v256_mul_s16),
1055 MAP(v256_unpack_u8_s16),
1056 MAP(v256_unpack_s8_s16),
1057 MAP(v256_unpack_u16_s32),
1058 MAP(v256_unpack_s16_s32),
1059 MAP(v256_shl_8),
1060 MAP(v256_shr_u8),
1061 MAP(v256_shr_s8),
1062 MAP(v256_shl_16),
1063 MAP(v256_shr_u16),
1064 MAP(v256_shr_s16),
1065 MAP(v256_shl_32),
1066 MAP(v256_shr_u32),
1067 MAP(v256_shr_s32),
1068 MAP(v256_shl_64),
1069 MAP(v256_shr_u64),
1070 MAP(v256_shr_s64),
1071 MAP(v256_abs_s8),
1072 MAP(v256_abs_s16),
1073 MAP(v256_padd_u8),
1074 MAP(v256_padd_s16),
1075 MAP(v256_unpacklo_u16_s32),
1076 MAP(v256_unpacklo_s16_s32),
1077 MAP(v256_unpackhi_u16_s32),
1078 MAP(v256_unpackhi_s16_s32),
1079 MAP(imm_v256_shr_n_word<1>),
1080 MAP(imm_v256_shr_n_word<2>),
1081 MAP(imm_v256_shr_n_word<3>),
1082 MAP(imm_v256_shr_n_word<4>),
1083 MAP(imm_v256_shr_n_word<5>),
1084 MAP(imm_v256_shr_n_word<6>),
1085 MAP(imm_v256_shr_n_word<7>),
1086 MAP(imm_v256_shr_n_word<8>),
1087 MAP(imm_v256_shr_n_word<9>),
1088 MAP(imm_v256_shr_n_word<10>),
1089 MAP(imm_v256_shr_n_word<11>),
1090 MAP(imm_v256_shr_n_word<12>),
1091 MAP(imm_v256_shr_n_word<13>),
1092 MAP(imm_v256_shr_n_word<14>),
1093 MAP(imm_v256_shr_n_word<15>),
1094 MAP(imm_v256_shl_n_word<1>),
1095 MAP(imm_v256_shl_n_word<2>),
1096 MAP(imm_v256_shl_n_word<3>),
1097 MAP(imm_v256_shl_n_word<4>),
1098 MAP(imm_v256_shl_n_word<5>),
1099 MAP(imm_v256_shl_n_word<6>),
1100 MAP(imm_v256_shl_n_word<7>),
1101 MAP(imm_v256_shl_n_word<8>),
1102 MAP(imm_v256_shl_n_word<9>),
1103 MAP(imm_v256_shl_n_word<10>),
1104 MAP(imm_v256_shl_n_word<11>),
1105 MAP(imm_v256_shl_n_word<12>),
1106 MAP(imm_v256_shl_n_word<13>),
1107 MAP(imm_v256_shl_n_word<14>),
1108 MAP(imm_v256_shl_n_word<15>),
1109 MAP(imm_v256_shr_n_byte<1>),
1110 MAP(imm_v256_shr_n_byte<2>),
1111 MAP(imm_v256_shr_n_byte<3>),
1112 MAP(imm_v256_shr_n_byte<4>),
1113 MAP(imm_v256_shr_n_byte<5>),
1114 MAP(imm_v256_shr_n_byte<6>),
1115 MAP(imm_v256_shr_n_byte<7>),
1116 MAP(imm_v256_shr_n_byte<8>),
1117 MAP(imm_v256_shr_n_byte<9>),
1118 MAP(imm_v256_shr_n_byte<10>),
1119 MAP(imm_v256_shr_n_byte<11>),
1120 MAP(imm_v256_shr_n_byte<12>),
1121 MAP(imm_v256_shr_n_byte<13>),
1122 MAP(imm_v256_shr_n_byte<14>),
1123 MAP(imm_v256_shr_n_byte<15>),
1124 MAP(imm_v256_shr_n_byte<16>),
1125 MAP(imm_v256_shr_n_byte<17>),
1126 MAP(imm_v256_shr_n_byte<18>),
1127 MAP(imm_v256_shr_n_byte<19>),
1128 MAP(imm_v256_shr_n_byte<20>),
1129 MAP(imm_v256_shr_n_byte<21>),
1130 MAP(imm_v256_shr_n_byte<22>),
1131 MAP(imm_v256_shr_n_byte<23>),
1132 MAP(imm_v256_shr_n_byte<24>),
1133 MAP(imm_v256_shr_n_byte<25>),
1134 MAP(imm_v256_shr_n_byte<26>),
1135 MAP(imm_v256_shr_n_byte<27>),
1136 MAP(imm_v256_shr_n_byte<28>),
1137 MAP(imm_v256_shr_n_byte<29>),
1138 MAP(imm_v256_shr_n_byte<30>),
1139 MAP(imm_v256_shr_n_byte<31>),
1140 MAP(imm_v256_shl_n_byte<1>),
1141 MAP(imm_v256_shl_n_byte<2>),
1142 MAP(imm_v256_shl_n_byte<3>),
1143 MAP(imm_v256_shl_n_byte<4>),
1144 MAP(imm_v256_shl_n_byte<5>),
1145 MAP(imm_v256_shl_n_byte<6>),
1146 MAP(imm_v256_shl_n_byte<7>),
1147 MAP(imm_v256_shl_n_byte<8>),
1148 MAP(imm_v256_shl_n_byte<9>),
1149 MAP(imm_v256_shl_n_byte<10>),
1150 MAP(imm_v256_shl_n_byte<11>),
1151 MAP(imm_v256_shl_n_byte<12>),
1152 MAP(imm_v256_shl_n_byte<13>),
1153 MAP(imm_v256_shl_n_byte<14>),
1154 MAP(imm_v256_shl_n_byte<15>),
1155 MAP(imm_v256_shl_n_byte<16>),
1156 MAP(imm_v256_shl_n_byte<17>),
1157 MAP(imm_v256_shl_n_byte<18>),
1158 MAP(imm_v256_shl_n_byte<19>),
1159 MAP(imm_v256_shl_n_byte<20>),
1160 MAP(imm_v256_shl_n_byte<21>),
1161 MAP(imm_v256_shl_n_byte<22>),
1162 MAP(imm_v256_shl_n_byte<23>),
1163 MAP(imm_v256_shl_n_byte<24>),
1164 MAP(imm_v256_shl_n_byte<25>),
1165 MAP(imm_v256_shl_n_byte<26>),
1166 MAP(imm_v256_shl_n_byte<27>),
1167 MAP(imm_v256_shl_n_byte<28>),
1168 MAP(imm_v256_shl_n_byte<29>),
1169 MAP(imm_v256_shl_n_byte<30>),
1170 MAP(imm_v256_shl_n_byte<31>),
1171 MAP(imm_v256_shl_n_8<1>),
1172 MAP(imm_v256_shl_n_8<2>),
1173 MAP(imm_v256_shl_n_8<3>),
1174 MAP(imm_v256_shl_n_8<4>),
1175 MAP(imm_v256_shl_n_8<5>),
1176 MAP(imm_v256_shl_n_8<6>),
1177 MAP(imm_v256_shl_n_8<7>),
1178 MAP(imm_v256_shr_n_u8<1>),
1179 MAP(imm_v256_shr_n_u8<2>),
1180 MAP(imm_v256_shr_n_u8<3>),
1181 MAP(imm_v256_shr_n_u8<4>),
1182 MAP(imm_v256_shr_n_u8<5>),
1183 MAP(imm_v256_shr_n_u8<6>),
1184 MAP(imm_v256_shr_n_u8<7>),
1185 MAP(imm_v256_shr_n_s8<1>),
1186 MAP(imm_v256_shr_n_s8<2>),
1187 MAP(imm_v256_shr_n_s8<3>),
1188 MAP(imm_v256_shr_n_s8<4>),
1189 MAP(imm_v256_shr_n_s8<5>),
1190 MAP(imm_v256_shr_n_s8<6>),
1191 MAP(imm_v256_shr_n_s8<7>),
1192 MAP(imm_v256_shl_n_16<1>),
1193 MAP(imm_v256_shl_n_16<2>),
1194 MAP(imm_v256_shl_n_16<4>),
1195 MAP(imm_v256_shl_n_16<6>),
1196 MAP(imm_v256_shl_n_16<8>),
1197 MAP(imm_v256_shl_n_16<10>),
1198 MAP(imm_v256_shl_n_16<12>),
1199 MAP(imm_v256_shl_n_16<14>),
1200 MAP(imm_v256_shr_n_u16<1>),
1201 MAP(imm_v256_shr_n_u16<2>),
1202 MAP(imm_v256_shr_n_u16<4>),
1203 MAP(imm_v256_shr_n_u16<6>),
1204 MAP(imm_v256_shr_n_u16<8>),
1205 MAP(imm_v256_shr_n_u16<10>),
1206 MAP(imm_v256_shr_n_u16<12>),
1207 MAP(imm_v256_shr_n_u16<14>),
1208 MAP(imm_v256_shr_n_s16<1>),
1209 MAP(imm_v256_shr_n_s16<2>),
1210 MAP(imm_v256_shr_n_s16<4>),
1211 MAP(imm_v256_shr_n_s16<6>),
1212 MAP(imm_v256_shr_n_s16<8>),
1213 MAP(imm_v256_shr_n_s16<10>),
1214 MAP(imm_v256_shr_n_s16<12>),
1215 MAP(imm_v256_shr_n_s16<14>),
1216 MAP(imm_v256_shl_n_32<1>),
1217 MAP(imm_v256_shl_n_32<4>),
1218 MAP(imm_v256_shl_n_32<8>),
1219 MAP(imm_v256_shl_n_32<12>),
1220 MAP(imm_v256_shl_n_32<16>),
1221 MAP(imm_v256_shl_n_32<20>),
1222 MAP(imm_v256_shl_n_32<24>),
1223 MAP(imm_v256_shl_n_32<28>),
1224 MAP(imm_v256_shr_n_u32<1>),
1225 MAP(imm_v256_shr_n_u32<4>),
1226 MAP(imm_v256_shr_n_u32<8>),
1227 MAP(imm_v256_shr_n_u32<12>),
1228 MAP(imm_v256_shr_n_u32<16>),
1229 MAP(imm_v256_shr_n_u32<20>),
1230 MAP(imm_v256_shr_n_u32<24>),
1231 MAP(imm_v256_shr_n_u32<28>),
1232 MAP(imm_v256_shr_n_s32<1>),
1233 MAP(imm_v256_shr_n_s32<4>),
1234 MAP(imm_v256_shr_n_s32<8>),
1235 MAP(imm_v256_shr_n_s32<12>),
1236 MAP(imm_v256_shr_n_s32<16>),
1237 MAP(imm_v256_shr_n_s32<20>),
1238 MAP(imm_v256_shr_n_s32<24>),
1239 MAP(imm_v256_shr_n_s32<28>),
1240 MAP(imm_v256_shl_n_64<1>),
1241 MAP(imm_v256_shl_n_64<4>),
1242 MAP(imm_v256_shl_n_64<8>),
1243 MAP(imm_v256_shl_n_64<12>),
1244 MAP(imm_v256_shl_n_64<16>),
1245 MAP(imm_v256_shl_n_64<20>),
1246 MAP(imm_v256_shl_n_64<24>),
1247 MAP(imm_v256_shl_n_64<28>),
1248 MAP(imm_v256_shl_n_64<32>),
1249 MAP(imm_v256_shl_n_64<36>),
1250 MAP(imm_v256_shl_n_64<40>),
1251 MAP(imm_v256_shl_n_64<44>),
1252 MAP(imm_v256_shl_n_64<48>),
1253 MAP(imm_v256_shl_n_64<52>),
1254 MAP(imm_v256_shl_n_64<56>),
1255 MAP(imm_v256_shl_n_64<60>),
1256 MAP(imm_v256_shr_n_u64<1>),
1257 MAP(imm_v256_shr_n_u64<4>),
1258 MAP(imm_v256_shr_n_u64<8>),
1259 MAP(imm_v256_shr_n_u64<12>),
1260 MAP(imm_v256_shr_n_u64<16>),
1261 MAP(imm_v256_shr_n_u64<20>),
1262 MAP(imm_v256_shr_n_u64<24>),
1263 MAP(imm_v256_shr_n_u64<28>),
1264 MAP(imm_v256_shr_n_u64<32>),
1265 MAP(imm_v256_shr_n_u64<36>),
1266 MAP(imm_v256_shr_n_u64<40>),
1267 MAP(imm_v256_shr_n_u64<44>),
1268 MAP(imm_v256_shr_n_u64<48>),
1269 MAP(imm_v256_shr_n_u64<52>),
1270 MAP(imm_v256_shr_n_u64<56>),
1271 MAP(imm_v256_shr_n_u64<60>),
1272 MAP(imm_v256_shr_n_s64<1>),
1273 MAP(imm_v256_shr_n_s64<4>),
1274 MAP(imm_v256_shr_n_s64<8>),
1275 MAP(imm_v256_shr_n_s64<12>),
1276 MAP(imm_v256_shr_n_s64<16>),
1277 MAP(imm_v256_shr_n_s64<20>),
1278 MAP(imm_v256_shr_n_s64<24>),
1279 MAP(imm_v256_shr_n_s64<28>),
1280 MAP(imm_v256_shr_n_s64<32>),
1281 MAP(imm_v256_shr_n_s64<36>),
1282 MAP(imm_v256_shr_n_s64<40>),
1283 MAP(imm_v256_shr_n_s64<44>),
1284 MAP(imm_v256_shr_n_s64<48>),
1285 MAP(imm_v256_shr_n_s64<52>),
1286 MAP(imm_v256_shr_n_s64<56>),
1287 MAP(imm_v256_shr_n_s64<60>),
1288 MAP(v256_movemask_8),
1289 MAP(v256_zero),
1290 MAP(v256_dup_8),
1291 MAP(v256_dup_16),
1292 MAP(v256_dup_32),
1293 MAP(v256_dup_64),
1294 MAP(v256_low_u32),
1295 MAP(v256_low_v64),
1296 MAP(v256_from_64),
1297 MAP(v256_from_v64),
1298 MAP(v256_ziplo_128),
1299 MAP(v256_ziphi_128),
1300 MAP(v256_unpacklo_u8_s16),
1301 MAP(v256_unpackhi_u8_s16),
1302 MAP(v256_unpacklo_s8_s16),
1303 MAP(v256_unpackhi_s8_s16),
1304 MAP(v256_blend_8),
1305 { NULL, NULL, NULL } };
1306 #undef MAP
1308 // Map reference functions to machine tuned functions. Since the
1309 // functions depend on machine tuned types, the non-machine tuned
1310 // instantiations of the test can't refer to these functions directly,
1311 // so we refer to them by name and do the mapping here.
1312 void Map(const char *name, fptr *ref, fptr *simd) {
1313 unsigned int i;
1314 for (i = 0; m[i].name && strcmp(name, m[i].name); i++) {
1317 *ref = m[i].ref;
1318 *simd = m[i].simd;
1321 // Used for printing errors in TestSimd1Arg, TestSimd2Args and TestSimd3Args
1322 std::string Print(const uint8_t *a, int size) {
1323 std::string text = "0x";
1324 for (int i = 0; i < size; i++) {
1325 const uint8_t c = a[!CONFIG_BIG_ENDIAN ? size - 1 - i : i];
1326 // Same as snprintf(..., ..., "%02x", c)
1327 text += (c >> 4) + '0' + ((c >> 4) > 9) * ('a' - '0' - 10);
1328 text += (c & 15) + '0' + ((c & 15) > 9) * ('a' - '0' - 10);
1331 return text;
1334 // Used in TestSimd1Arg, TestSimd2Args and TestSimd3Args to restrict argument
1335 // ranges
1336 void SetMask(uint8_t *s, int size, uint32_t mask, uint32_t maskwidth) {
1337 switch (maskwidth) {
1338 case 0: {
1339 break;
1341 case 8: {
1342 for (int i = 0; i < size; i++) s[i] &= mask;
1343 break;
1345 case 16: {
1346 uint16_t *t = reinterpret_cast<uint16_t *>(s);
1347 assert(!(reinterpret_cast<uintptr_t>(s) & 1));
1348 for (int i = 0; i < size / 2; i++) t[i] &= mask;
1349 break;
1351 case 32: {
1352 uint32_t *t = reinterpret_cast<uint32_t *>(s);
1353 assert(!(reinterpret_cast<uintptr_t>(s) & 3));
1354 for (int i = 0; i < size / 4; i++) t[i] &= mask;
1355 break;
1357 case 64: {
1358 uint64_t *t = reinterpret_cast<uint64_t *>(s);
1359 assert(!(reinterpret_cast<uintptr_t>(s) & 7));
1360 for (int i = 0; i < size / 8; i++) t[i] &= mask;
1361 break;
1363 default: {
1364 FAIL() << "Unsupported mask width";
1365 break;
1370 // We need some extra load/store functions
1371 void u64_store_aligned(void *p, uint64_t a) {
1372 v64_store_aligned(p, v64_from_64(a));
1374 void s32_store_aligned(void *p, int32_t a) {
1375 u32_store_aligned(p, static_cast<uint32_t>(a));
1377 void s64_store_aligned(void *p, int64_t a) {
1378 v64_store_aligned(p, v64_from_64(static_cast<uint64_t>(a)));
1381 void c_u64_store_aligned(void *p, uint64_t a) {
1382 c_v64_store_aligned(p, c_v64_from_64(a));
1385 void c_s32_store_aligned(void *p, int32_t a) {
1386 c_u32_store_aligned(p, static_cast<uint32_t>(a));
1389 void c_s64_store_aligned(void *p, int64_t a) {
1390 c_v64_store_aligned(p, c_v64_from_64(static_cast<uint64_t>(a)));
1393 uint64_t u64_load_aligned(const void *p) {
1394 return v64_u64(v64_load_aligned(p));
1396 uint16_t u16_load_aligned(const void *p) {
1397 return *(reinterpret_cast<const uint16_t *>(p));
1399 uint8_t u8_load_aligned(const void *p) {
1400 return *(reinterpret_cast<const uint8_t *>(p));
1403 uint64_t c_u64_load_aligned(const void *p) {
1404 return c_v64_u64(c_v64_load_aligned(p));
1406 uint16_t c_u16_load_aligned(const void *p) {
1407 return *(reinterpret_cast<const uint16_t *>(p));
1409 uint8_t c_u8_load_aligned(const void *p) {
1410 return *(reinterpret_cast<const uint8_t *>(p));
1413 // CompareSimd1Arg, CompareSimd2Args and CompareSimd3Args compare
1414 // intrinsics taking 1, 2 or 3 arguments respectively with their
1415 // corresponding C reference. Ideally, the loads and stores should
1416 // have gone into the template parameter list, but v64 and v128 could
1417 // be typedef'ed to the same type (which is the case on x86) and then
1418 // we can't instantiate both v64 and v128, so the function return and
1419 // argument types, including the always differing types in the C
1420 // equivalent are used instead. The function arguments must be void
1421 // pointers and then go through a cast to avoid matching errors in the
1422 // branches eliminated by the typeid tests in the calling function.
1423 template <typename Ret, typename Arg, typename CRet, typename CArg>
1424 int CompareSimd1Arg(fptr store, fptr load, fptr simd, void *d, fptr c_store,
1425 fptr c_load, fptr c_simd, void *ref_d, const void *a) {
1426 void (*const my_store)(void *, Ret) = (void (*const)(void *, Ret))store;
1427 Arg (*const my_load)(const void *) = (Arg(*const)(const void *))load;
1428 Ret (*const my_simd)(Arg) = (Ret(*const)(Arg))simd;
1429 void (*const my_c_store)(void *, CRet) = (void (*const)(void *, CRet))c_store;
1430 CArg (*const my_c_load)(const void *) = (CArg(*const)(const void *))c_load;
1431 CRet (*const my_c_simd)(CArg) = (CRet(*const)(CArg))c_simd;
1433 // Call reference and intrinsic
1434 my_c_store(ref_d, my_c_simd(my_c_load(a)));
1435 my_store(d, my_simd(my_load(a)));
1437 // Compare results
1438 return memcmp(ref_d, d, sizeof(CRet));
1441 template <typename Ret, typename Arg1, typename Arg2, typename CRet,
1442 typename CArg1, typename CArg2>
1443 int CompareSimd2Args(fptr store, fptr load1, fptr load2, fptr simd, void *d,
1444 fptr c_store, fptr c_load1, fptr c_load2, fptr c_simd,
1445 void *ref_d, const void *a, const void *b) {
1446 void (*const my_store)(void *, Ret) = (void (*const)(void *, Ret))store;
1447 Arg1 (*const my_load1)(const void *) = (Arg1(*const)(const void *))load1;
1448 Arg2 (*const my_load2)(const void *) = (Arg2(*const)(const void *))load2;
1449 Ret (*const my_simd)(Arg1, Arg2) = (Ret(*const)(Arg1, Arg2))simd;
1450 void (*const my_c_store)(void *, CRet) = (void (*const)(void *, CRet))c_store;
1451 CArg1 (*const my_c_load1)(const void *) =
1452 (CArg1(*const)(const void *))c_load1;
1453 CArg2 (*const my_c_load2)(const void *) =
1454 (CArg2(*const)(const void *))c_load2;
1455 CRet (*const my_c_simd)(CArg1, CArg2) = (CRet(*const)(CArg1, CArg2))c_simd;
1457 // Call reference and intrinsic
1458 my_c_store(ref_d, my_c_simd(my_c_load1(a), my_c_load2(b)));
1459 my_store(d, my_simd(my_load1(a), my_load2(b)));
1461 // Compare results
1462 return memcmp(ref_d, d, sizeof(CRet));
1465 template <typename Ret, typename Arg1, typename Arg2, typename Arg3,
1466 typename CRet, typename CArg1, typename CArg2, typename CArg3>
1467 int CompareSimd3Args(fptr store, fptr load1, fptr load2, fptr load3, fptr simd,
1468 void *d, fptr c_store, fptr c_load1, fptr c_load2,
1469 fptr c_load3, fptr c_simd, void *ref_d, const void *a,
1470 const void *b, const void *c) {
1471 void (*const my_store)(void *, Ret) = (void (*const)(void *, Ret))store;
1472 Arg1 (*const my_load1)(const void *) = (Arg1(*const)(const void *))load1;
1473 Arg2 (*const my_load2)(const void *) = (Arg2(*const)(const void *))load2;
1474 Arg3 (*const my_load3)(const void *) = (Arg3(*const)(const void *))load3;
1475 Ret (*const my_simd)(Arg1, Arg2, Arg3) = (Ret(*const)(Arg1, Arg2, Arg3))simd;
1476 void (*const my_c_store)(void *, CRet) = (void (*const)(void *, CRet))c_store;
1477 CArg1 (*const my_c_load1)(const void *) =
1478 (CArg1(*const)(const void *))c_load1;
1479 CArg2 (*const my_c_load2)(const void *) =
1480 (CArg2(*const)(const void *))c_load2;
1481 CArg2 (*const my_c_load3)(const void *) =
1482 (CArg2(*const)(const void *))c_load3;
1483 CRet (*const my_c_simd)(CArg1, CArg2, CArg3) =
1484 (CRet(*const)(CArg1, CArg2, CArg3))c_simd;
1486 // Call reference and intrinsic
1487 my_c_store(ref_d, my_c_simd(my_c_load1(a), my_c_load2(b), my_c_load3(c)));
1488 my_store(d, my_simd(my_load1(a), my_load2(b), my_load3(c)));
1490 // Compare results
1491 return memcmp(ref_d, d, sizeof(CRet));
1494 } // namespace
1496 template <typename CRet, typename CArg>
1497 void TestSimd1Arg(uint32_t iterations, uint32_t mask, uint32_t maskwidth,
1498 const char *name) {
1499 ACMRandom rnd(ACMRandom::DeterministicSeed());
1500 fptr ref_simd;
1501 fptr simd;
1502 int error = 0;
1503 DECLARE_ALIGNED(32, uint8_t, s[32]);
1504 DECLARE_ALIGNED(32, uint8_t, d[32]);
1505 DECLARE_ALIGNED(32, uint8_t, ref_d[32]);
1506 assert(sizeof(CArg) <= 32 && sizeof(CRet) <= 32);
1507 memset(ref_d, 0, sizeof(ref_d));
1508 memset(d, 0, sizeof(d));
1510 Map(name, &ref_simd, &simd);
1511 if (simd == NULL || ref_simd == NULL) {
1512 FAIL() << "Internal error: Unknown intrinsic function " << name;
1514 for (unsigned int count = 0;
1515 count < iterations && !error && !testing::Test::HasFailure(); count++) {
1516 for (unsigned int c = 0; c < sizeof(CArg); c++) s[c] = rnd.Rand8();
1518 if (maskwidth) {
1519 SetMask(s, sizeof(CArg), mask, maskwidth);
1522 if (typeid(CRet) == typeid(c_v64) && typeid(CArg) == typeid(c_v64)) {
1523 // V64_V64
1524 error = CompareSimd1Arg<v64, v64, CRet, CArg>(
1525 reinterpret_cast<fptr>(v64_store_aligned),
1526 reinterpret_cast<fptr>(v64_load_aligned), simd, d,
1527 reinterpret_cast<fptr>(c_v64_store_aligned),
1528 reinterpret_cast<fptr>(c_v64_load_aligned), ref_simd, ref_d, s);
1529 } else if (typeid(CRet) == typeid(c_v64) &&
1530 typeid(CArg) == typeid(uint8_t)) {
1531 // V64_U8
1532 error = CompareSimd1Arg<v64, uint8_t, CRet, CArg>(
1533 reinterpret_cast<fptr>(v64_store_aligned),
1534 reinterpret_cast<fptr>(u8_load_aligned), simd, d,
1535 reinterpret_cast<fptr>(c_v64_store_aligned),
1536 reinterpret_cast<fptr>(c_u8_load_aligned), ref_simd, ref_d, s);
1537 } else if (typeid(CRet) == typeid(c_v64) &&
1538 typeid(CArg) == typeid(uint16_t)) {
1539 // V64_U16
1540 error = CompareSimd1Arg<v64, uint16_t, CRet, CArg>(
1541 reinterpret_cast<fptr>(v64_store_aligned),
1542 reinterpret_cast<fptr>(u16_load_aligned), simd, d,
1543 reinterpret_cast<fptr>(c_v64_store_aligned),
1544 reinterpret_cast<fptr>(c_u16_load_aligned), ref_simd, ref_d, s);
1545 } else if (typeid(CRet) == typeid(c_v64) &&
1546 typeid(CArg) == typeid(uint32_t)) {
1547 // V64_U32
1548 error = CompareSimd1Arg<v64, uint32_t, CRet, CArg>(
1549 reinterpret_cast<fptr>(v64_store_aligned),
1550 reinterpret_cast<fptr>(u32_load_aligned), simd, d,
1551 reinterpret_cast<fptr>(c_v64_store_aligned),
1552 reinterpret_cast<fptr>(c_u32_load_aligned), ref_simd, ref_d, s);
1553 } else if (typeid(CRet) == typeid(uint64_t) &&
1554 typeid(CArg) == typeid(c_v64)) {
1555 // U64_V64
1556 error = CompareSimd1Arg<uint64_t, v64, CRet, CArg>(
1557 reinterpret_cast<fptr>(u64_store_aligned),
1558 reinterpret_cast<fptr>(v64_load_aligned), simd, d,
1559 reinterpret_cast<fptr>(c_u64_store_aligned),
1560 reinterpret_cast<fptr>(c_v64_load_aligned), ref_simd, ref_d, s);
1561 } else if (typeid(CRet) == typeid(int64_t) &&
1562 typeid(CArg) == typeid(c_v64)) {
1563 // S64_V64
1564 error = CompareSimd1Arg<int64_t, v64, CRet, CArg>(
1565 reinterpret_cast<fptr>(s64_store_aligned),
1566 reinterpret_cast<fptr>(v64_load_aligned), simd, d,
1567 reinterpret_cast<fptr>(c_s64_store_aligned),
1568 reinterpret_cast<fptr>(c_v64_load_aligned), ref_simd, ref_d, s);
1569 } else if (typeid(CRet) == typeid(uint32_t) &&
1570 typeid(CArg) == typeid(c_v64)) {
1571 // U32_V64
1572 error = CompareSimd1Arg<uint32_t, v64, CRet, CArg>(
1573 reinterpret_cast<fptr>(u32_store_aligned),
1574 reinterpret_cast<fptr>(v64_load_aligned), simd, d,
1575 reinterpret_cast<fptr>(c_u32_store_aligned),
1576 reinterpret_cast<fptr>(c_v64_load_aligned), ref_simd, ref_d, s);
1577 } else if (typeid(CRet) == typeid(int32_t) &&
1578 typeid(CArg) == typeid(c_v64)) {
1579 // S32_V64
1580 error = CompareSimd1Arg<int32_t, v64, CRet, CArg>(
1581 reinterpret_cast<fptr>(s32_store_aligned),
1582 reinterpret_cast<fptr>(v64_load_aligned), simd, d,
1583 reinterpret_cast<fptr>(c_s32_store_aligned),
1584 reinterpret_cast<fptr>(c_v64_load_aligned), ref_simd, ref_d, s);
1585 } else if (typeid(CRet) == typeid(uint32_t) &&
1586 typeid(CArg) == typeid(c_v128)) {
1587 // U32_V128
1588 error = CompareSimd1Arg<uint32_t, v128, CRet, CArg>(
1589 reinterpret_cast<fptr>(u32_store_aligned),
1590 reinterpret_cast<fptr>(v128_load_aligned), simd, d,
1591 reinterpret_cast<fptr>(c_u32_store_aligned),
1592 reinterpret_cast<fptr>(c_v128_load_aligned), ref_simd, ref_d, s);
1593 } else if (typeid(CRet) == typeid(uint64_t) &&
1594 typeid(CArg) == typeid(c_v128)) {
1595 // U64_V128
1596 error = CompareSimd1Arg<uint64_t, v128, CRet, CArg>(
1597 reinterpret_cast<fptr>(u64_store_aligned),
1598 reinterpret_cast<fptr>(v128_load_aligned), simd, d,
1599 reinterpret_cast<fptr>(c_u64_store_aligned),
1600 reinterpret_cast<fptr>(c_v128_load_aligned), ref_simd, ref_d, s);
1601 } else if (typeid(CRet) == typeid(uint64_t) &&
1602 typeid(CArg) == typeid(c_v256)) {
1603 // U64_V256
1604 error = CompareSimd1Arg<uint64_t, v256, CRet, CArg>(
1605 reinterpret_cast<fptr>(u64_store_aligned),
1606 reinterpret_cast<fptr>(v256_load_aligned), simd, d,
1607 reinterpret_cast<fptr>(c_u64_store_aligned),
1608 reinterpret_cast<fptr>(c_v256_load_aligned), ref_simd, ref_d, s);
1609 } else if (typeid(CRet) == typeid(c_v64) &&
1610 typeid(CArg) == typeid(c_v128)) {
1611 // V64_V128
1612 error = CompareSimd1Arg<v64, v128, CRet, CArg>(
1613 reinterpret_cast<fptr>(v64_store_aligned),
1614 reinterpret_cast<fptr>(v128_load_aligned), simd, d,
1615 reinterpret_cast<fptr>(c_v64_store_aligned),
1616 reinterpret_cast<fptr>(c_v128_load_aligned), ref_simd, ref_d, s);
1617 } else if (typeid(CRet) == typeid(c_v128) &&
1618 typeid(CArg) == typeid(c_v128)) {
1619 // V128_V128
1620 error = CompareSimd1Arg<v128, v128, CRet, CArg>(
1621 reinterpret_cast<fptr>(v128_store_aligned),
1622 reinterpret_cast<fptr>(v128_load_aligned), simd, d,
1623 reinterpret_cast<fptr>(c_v128_store_aligned),
1624 reinterpret_cast<fptr>(c_v128_load_aligned), ref_simd, ref_d, s);
1625 } else if (typeid(CRet) == typeid(c_v128) &&
1626 typeid(CArg) == typeid(c_v64)) {
1627 // V128_V64
1628 error = CompareSimd1Arg<v128, v64, CRet, CArg>(
1629 reinterpret_cast<fptr>(v128_store_aligned),
1630 reinterpret_cast<fptr>(v64_load_aligned), simd, d,
1631 reinterpret_cast<fptr>(c_v128_store_aligned),
1632 reinterpret_cast<fptr>(c_v64_load_aligned), ref_simd, ref_d, s);
1633 } else if (typeid(CRet) == typeid(c_v128) &&
1634 typeid(CArg) == typeid(uint8_t)) {
1635 // V128_U8
1636 error = CompareSimd1Arg<v128, uint8_t, CRet, CArg>(
1637 reinterpret_cast<fptr>(v128_store_aligned),
1638 reinterpret_cast<fptr>(u8_load_aligned), simd, d,
1639 reinterpret_cast<fptr>(c_v128_store_aligned),
1640 reinterpret_cast<fptr>(c_u8_load_aligned), ref_simd, ref_d, s);
1641 } else if (typeid(CRet) == typeid(c_v128) &&
1642 typeid(CArg) == typeid(uint16_t)) {
1643 // V128_U16
1644 error = CompareSimd1Arg<v128, uint16_t, CRet, CArg>(
1645 reinterpret_cast<fptr>(v128_store_aligned),
1646 reinterpret_cast<fptr>(u16_load_aligned), simd, d,
1647 reinterpret_cast<fptr>(c_v128_store_aligned),
1648 reinterpret_cast<fptr>(c_u16_load_aligned), ref_simd, ref_d, s);
1649 } else if (typeid(CRet) == typeid(c_v128) &&
1650 typeid(CArg) == typeid(uint32_t)) {
1651 // V128_U32
1652 error = CompareSimd1Arg<v128, uint32_t, CRet, CArg>(
1653 reinterpret_cast<fptr>(v128_store_aligned),
1654 reinterpret_cast<fptr>(u32_load_aligned), simd, d,
1655 reinterpret_cast<fptr>(c_v128_store_aligned),
1656 reinterpret_cast<fptr>(c_u32_load_aligned), ref_simd, ref_d, s);
1657 } else if (typeid(CRet) == typeid(c_v128) &&
1658 typeid(CArg) == typeid(uint64_t)) {
1659 // V128_U64
1660 error = CompareSimd1Arg<v128, uint64_t, CRet, CArg>(
1661 reinterpret_cast<fptr>(v128_store_aligned),
1662 reinterpret_cast<fptr>(u64_load_aligned), simd, d,
1663 reinterpret_cast<fptr>(c_v128_store_aligned),
1664 reinterpret_cast<fptr>(c_u64_load_aligned), ref_simd, ref_d, s);
1665 } else if (typeid(CRet) == typeid(c_v256) &&
1666 typeid(CArg) == typeid(c_v256)) {
1667 // V256_V256
1668 error = CompareSimd1Arg<v256, v256, CRet, CArg>(
1669 reinterpret_cast<fptr>(v256_store_aligned),
1670 reinterpret_cast<fptr>(v256_load_aligned), simd, d,
1671 reinterpret_cast<fptr>(c_v256_store_aligned),
1672 reinterpret_cast<fptr>(c_v256_load_aligned), ref_simd, ref_d, s);
1673 } else if (typeid(CRet) == typeid(c_v256) &&
1674 typeid(CArg) == typeid(c_v128)) {
1675 // V256_V128
1676 error = CompareSimd1Arg<v256, v128, CRet, CArg>(
1677 reinterpret_cast<fptr>(v256_store_aligned),
1678 reinterpret_cast<fptr>(v128_load_aligned), simd, d,
1679 reinterpret_cast<fptr>(c_v256_store_aligned),
1680 reinterpret_cast<fptr>(c_v128_load_aligned), ref_simd, ref_d, s);
1681 } else if (typeid(CRet) == typeid(c_v256) &&
1682 typeid(CArg) == typeid(uint8_t)) {
1683 // V256_U8
1684 error = CompareSimd1Arg<v256, uint8_t, CRet, CArg>(
1685 reinterpret_cast<fptr>(v256_store_aligned),
1686 reinterpret_cast<fptr>(u8_load_aligned), simd, d,
1687 reinterpret_cast<fptr>(c_v256_store_aligned),
1688 reinterpret_cast<fptr>(c_u8_load_aligned), ref_simd, ref_d, s);
1689 } else if (typeid(CRet) == typeid(c_v256) &&
1690 typeid(CArg) == typeid(uint16_t)) {
1691 // V256_U16
1692 error = CompareSimd1Arg<v256, uint16_t, CRet, CArg>(
1693 reinterpret_cast<fptr>(v256_store_aligned),
1694 reinterpret_cast<fptr>(u16_load_aligned), simd, d,
1695 reinterpret_cast<fptr>(c_v256_store_aligned),
1696 reinterpret_cast<fptr>(c_u16_load_aligned), ref_simd, ref_d, s);
1697 } else if (typeid(CRet) == typeid(c_v256) &&
1698 typeid(CArg) == typeid(uint32_t)) {
1699 // V256_U32
1700 error = CompareSimd1Arg<v256, uint32_t, CRet, CArg>(
1701 reinterpret_cast<fptr>(v256_store_aligned),
1702 reinterpret_cast<fptr>(u32_load_aligned), simd, d,
1703 reinterpret_cast<fptr>(c_v256_store_aligned),
1704 reinterpret_cast<fptr>(c_u32_load_aligned), ref_simd, ref_d, s);
1705 } else if (typeid(CRet) == typeid(c_v256) &&
1706 typeid(CArg) == typeid(uint64_t)) {
1707 // V256_U64
1708 error = CompareSimd1Arg<v256, uint64_t, CRet, CArg>(
1709 reinterpret_cast<fptr>(v256_store_aligned),
1710 reinterpret_cast<fptr>(u64_load_aligned), simd, d,
1711 reinterpret_cast<fptr>(c_v256_store_aligned),
1712 reinterpret_cast<fptr>(c_u64_load_aligned), ref_simd, ref_d, s);
1713 } else if (typeid(CRet) == typeid(uint32_t) &&
1714 typeid(CArg) == typeid(c_v256)) {
1715 // U32_V256
1716 error = CompareSimd1Arg<uint32_t, v256, CRet, CArg>(
1717 reinterpret_cast<fptr>(u32_store_aligned),
1718 reinterpret_cast<fptr>(v256_load_aligned), simd, d,
1719 reinterpret_cast<fptr>(c_u32_store_aligned),
1720 reinterpret_cast<fptr>(c_v256_load_aligned), ref_simd, ref_d, s);
1721 } else if (typeid(CRet) == typeid(c_v64) &&
1722 typeid(CArg) == typeid(c_v256)) {
1723 // V64_V256
1724 error = CompareSimd1Arg<v64, v256, CRet, CArg>(
1725 reinterpret_cast<fptr>(v64_store_aligned),
1726 reinterpret_cast<fptr>(v256_load_aligned), simd, d,
1727 reinterpret_cast<fptr>(c_v64_store_aligned),
1728 reinterpret_cast<fptr>(c_v256_load_aligned), ref_simd, ref_d, s);
1729 } else {
1730 FAIL() << "Internal error: Unknown intrinsic function "
1731 << typeid(CRet).name() << " " << name << "(" << typeid(CArg).name()
1732 << ")";
1736 EXPECT_EQ(0, error) << "Error: mismatch for " << name << "("
1737 << Print(s, sizeof(s)) << ") -> " << Print(d, sizeof(d))
1738 << " (simd), " << Print(ref_d, sizeof(ref_d)) << " (ref)";
1741 template <typename CRet, typename CArg1, typename CArg2>
1742 void TestSimd2Args(uint32_t iterations, uint32_t mask, uint32_t maskwidth,
1743 const char *name) {
1744 ACMRandom rnd(ACMRandom::DeterministicSeed());
1745 fptr ref_simd;
1746 fptr simd;
1747 int error = 0;
1748 DECLARE_ALIGNED(32, uint8_t, s1[32]);
1749 DECLARE_ALIGNED(32, uint8_t, s2[32]);
1750 DECLARE_ALIGNED(32, uint8_t, d[32]);
1751 DECLARE_ALIGNED(32, uint8_t, ref_d[32]);
1752 assert(sizeof(CArg1) <= 32 && sizeof(CArg2) <= 32 && sizeof(CRet) <= 32);
1753 memset(ref_d, 0, sizeof(ref_d));
1754 memset(d, 0, sizeof(d));
1756 Map(name, &ref_simd, &simd);
1757 if (simd == NULL || ref_simd == NULL) {
1758 FAIL() << "Internal error: Unknown intrinsic function " << name;
1761 for (unsigned int count = 0;
1762 count < iterations && !error && !testing::Test::HasFailure(); count++) {
1763 for (unsigned int c = 0; c < sizeof(CArg1); c++) s1[c] = rnd.Rand8();
1765 for (unsigned int c = 0; c < sizeof(CArg2); c++) s2[c] = rnd.Rand8();
1767 if (maskwidth) SetMask(s2, sizeof(CArg2), mask, maskwidth);
1769 if (typeid(CRet) == typeid(c_v64) && typeid(CArg1) == typeid(c_v64) &&
1770 typeid(CArg2) == typeid(c_v64)) {
1771 // V64_V64V64
1772 error = CompareSimd2Args<v64, v64, v64, CRet, CArg1, CArg2>(
1773 reinterpret_cast<fptr>(v64_store_aligned),
1774 reinterpret_cast<fptr>(v64_load_aligned),
1775 reinterpret_cast<fptr>(v64_load_aligned), simd, d,
1776 reinterpret_cast<fptr>(c_v64_store_aligned),
1777 reinterpret_cast<fptr>(c_v64_load_aligned),
1778 reinterpret_cast<fptr>(c_v64_load_aligned),
1779 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1780 } else if (typeid(CRet) == typeid(c_v64) &&
1781 typeid(CArg1) == typeid(uint32_t) &&
1782 typeid(CArg2) == typeid(uint32_t)) {
1783 // V64_U32U32
1784 error = CompareSimd2Args<v64, uint32_t, uint32_t, CRet, CArg1, CArg2>(
1785 reinterpret_cast<fptr>(v64_store_aligned),
1786 reinterpret_cast<fptr>(u32_load_aligned),
1787 reinterpret_cast<fptr>(u32_load_aligned), simd, d,
1788 reinterpret_cast<fptr>(c_v64_store_aligned),
1789 reinterpret_cast<fptr>(c_u32_load_aligned),
1790 reinterpret_cast<fptr>(c_u32_load_aligned),
1791 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1792 } else if (typeid(CRet) == typeid(uint32_t) &&
1793 typeid(CArg1) == typeid(c_v64) &&
1794 typeid(CArg2) == typeid(c_v64)) {
1795 // U32_V64V64
1796 error = CompareSimd2Args<uint32_t, v64, v64, CRet, CArg1, CArg2>(
1797 reinterpret_cast<fptr>(u32_store_aligned),
1798 reinterpret_cast<fptr>(v64_load_aligned),
1799 reinterpret_cast<fptr>(v64_load_aligned), simd, d,
1800 reinterpret_cast<fptr>(c_u32_store_aligned),
1801 reinterpret_cast<fptr>(c_v64_load_aligned),
1802 reinterpret_cast<fptr>(c_v64_load_aligned),
1803 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1804 } else if (typeid(CRet) == typeid(int64_t) &&
1805 typeid(CArg1) == typeid(c_v64) &&
1806 typeid(CArg2) == typeid(c_v64)) {
1807 // S64_V64V64
1808 error = CompareSimd2Args<int64_t, v64, v64, CRet, CArg1, CArg2>(
1809 reinterpret_cast<fptr>(s64_store_aligned),
1810 reinterpret_cast<fptr>(v64_load_aligned),
1811 reinterpret_cast<fptr>(v64_load_aligned), simd, d,
1812 reinterpret_cast<fptr>(c_s64_store_aligned),
1813 reinterpret_cast<fptr>(c_v64_load_aligned),
1814 reinterpret_cast<fptr>(c_v64_load_aligned),
1815 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1816 } else if (typeid(CRet) == typeid(c_v64) &&
1817 typeid(CArg1) == typeid(c_v64) &&
1818 typeid(CArg2) == typeid(uint32_t)) {
1819 // V64_V64U32
1820 error = CompareSimd2Args<v64, v64, uint32_t, CRet, CArg1, CArg2>(
1821 reinterpret_cast<fptr>(v64_store_aligned),
1822 reinterpret_cast<fptr>(v64_load_aligned),
1823 reinterpret_cast<fptr>(u32_load_aligned), simd, d,
1824 reinterpret_cast<fptr>(c_v64_store_aligned),
1825 reinterpret_cast<fptr>(c_v64_load_aligned),
1826 reinterpret_cast<fptr>(c_u32_load_aligned),
1827 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1828 } else if (typeid(CRet) == typeid(c_v128) &&
1829 typeid(CArg1) == typeid(c_v128) &&
1830 typeid(CArg2) == typeid(c_v128)) {
1831 // V128_V128V128
1832 error = CompareSimd2Args<v128, v128, v128, CRet, CArg1, CArg2>(
1833 reinterpret_cast<fptr>(v128_store_aligned),
1834 reinterpret_cast<fptr>(v128_load_aligned),
1835 reinterpret_cast<fptr>(v128_load_aligned), simd, d,
1836 reinterpret_cast<fptr>(c_v128_store_aligned),
1837 reinterpret_cast<fptr>(c_v128_load_aligned),
1838 reinterpret_cast<fptr>(c_v128_load_aligned),
1839 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1840 } else if (typeid(CRet) == typeid(uint32_t) &&
1841 typeid(CArg1) == typeid(c_v128) &&
1842 typeid(CArg2) == typeid(c_v128)) {
1843 // U32_V128V128
1844 error = CompareSimd2Args<uint32_t, v128, v128, CRet, CArg1, CArg2>(
1845 reinterpret_cast<fptr>(u32_store_aligned),
1846 reinterpret_cast<fptr>(v128_load_aligned),
1847 reinterpret_cast<fptr>(v128_load_aligned), simd, d,
1848 reinterpret_cast<fptr>(c_u32_store_aligned),
1849 reinterpret_cast<fptr>(c_v128_load_aligned),
1850 reinterpret_cast<fptr>(c_v128_load_aligned),
1851 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1852 } else if (typeid(CRet) == typeid(uint64_t) &&
1853 typeid(CArg1) == typeid(c_v128) &&
1854 typeid(CArg2) == typeid(c_v128)) {
1855 // U64_V128V128
1856 error = CompareSimd2Args<uint64_t, v128, v128, CRet, CArg1, CArg2>(
1857 reinterpret_cast<fptr>(u64_store_aligned),
1858 reinterpret_cast<fptr>(v128_load_aligned),
1859 reinterpret_cast<fptr>(v128_load_aligned), simd, d,
1860 reinterpret_cast<fptr>(c_u64_store_aligned),
1861 reinterpret_cast<fptr>(c_v128_load_aligned),
1862 reinterpret_cast<fptr>(c_v128_load_aligned),
1863 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1864 } else if (typeid(CRet) == typeid(int64_t) &&
1865 typeid(CArg1) == typeid(c_v128) &&
1866 typeid(CArg2) == typeid(c_v128)) {
1867 // S64_V128V128
1868 error = CompareSimd2Args<int64_t, v128, v128, CRet, CArg1, CArg2>(
1869 reinterpret_cast<fptr>(s64_store_aligned),
1870 reinterpret_cast<fptr>(v128_load_aligned),
1871 reinterpret_cast<fptr>(v128_load_aligned), simd, d,
1872 reinterpret_cast<fptr>(c_s64_store_aligned),
1873 reinterpret_cast<fptr>(c_v128_load_aligned),
1874 reinterpret_cast<fptr>(c_v128_load_aligned),
1875 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1876 } else if (typeid(CRet) == typeid(c_v128) &&
1877 typeid(CArg1) == typeid(uint64_t) &&
1878 typeid(CArg2) == typeid(uint64_t)) {
1879 // V128_U64U64
1880 error = CompareSimd2Args<v128, uint64_t, uint64_t, CRet, CArg1, CArg2>(
1881 reinterpret_cast<fptr>(v128_store_aligned),
1882 reinterpret_cast<fptr>(u64_load_aligned),
1883 reinterpret_cast<fptr>(u64_load_aligned), simd, d,
1884 reinterpret_cast<fptr>(c_v128_store_aligned),
1885 reinterpret_cast<fptr>(c_u64_load_aligned),
1886 reinterpret_cast<fptr>(c_u64_load_aligned),
1887 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1888 } else if (typeid(CRet) == typeid(c_v128) &&
1889 typeid(CArg1) == typeid(c_v64) &&
1890 typeid(CArg2) == typeid(c_v64)) {
1891 // V128_V64V64
1892 error = CompareSimd2Args<v128, v64, v64, CRet, CArg1, CArg2>(
1893 reinterpret_cast<fptr>(v128_store_aligned),
1894 reinterpret_cast<fptr>(v64_load_aligned),
1895 reinterpret_cast<fptr>(v64_load_aligned), simd, d,
1896 reinterpret_cast<fptr>(c_v128_store_aligned),
1897 reinterpret_cast<fptr>(c_v64_load_aligned),
1898 reinterpret_cast<fptr>(c_v64_load_aligned),
1899 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1900 } else if (typeid(CRet) == typeid(c_v128) &&
1901 typeid(CArg1) == typeid(c_v128) &&
1902 typeid(CArg2) == typeid(uint32_t)) {
1903 // V128_V128U32
1904 error = CompareSimd2Args<v128, v128, uint32_t, CRet, CArg1, CArg2>(
1905 reinterpret_cast<fptr>(v128_store_aligned),
1906 reinterpret_cast<fptr>(v128_load_aligned),
1907 reinterpret_cast<fptr>(u32_load_aligned), simd, d,
1908 reinterpret_cast<fptr>(c_v128_store_aligned),
1909 reinterpret_cast<fptr>(c_v128_load_aligned),
1910 reinterpret_cast<fptr>(c_u32_load_aligned),
1911 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1912 } else if (typeid(CRet) == typeid(c_v256) &&
1913 typeid(CArg1) == typeid(c_v256) &&
1914 typeid(CArg2) == typeid(c_v256)) {
1915 // V256_V256V256
1916 error = CompareSimd2Args<v256, v256, v256, CRet, CArg1, CArg2>(
1917 reinterpret_cast<fptr>(v256_store_aligned),
1918 reinterpret_cast<fptr>(v256_load_aligned),
1919 reinterpret_cast<fptr>(v256_load_aligned), simd, d,
1920 reinterpret_cast<fptr>(c_v256_store_aligned),
1921 reinterpret_cast<fptr>(c_v256_load_aligned),
1922 reinterpret_cast<fptr>(c_v256_load_aligned),
1923 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1924 } else if (typeid(CRet) == typeid(uint64_t) &&
1925 typeid(CArg1) == typeid(c_v256) &&
1926 typeid(CArg2) == typeid(c_v256)) {
1927 // U64_V256V256
1928 error = CompareSimd2Args<uint64_t, v256, v256, CRet, CArg1, CArg2>(
1929 reinterpret_cast<fptr>(u64_store_aligned),
1930 reinterpret_cast<fptr>(v256_load_aligned),
1931 reinterpret_cast<fptr>(v256_load_aligned), simd, d,
1932 reinterpret_cast<fptr>(c_u64_store_aligned),
1933 reinterpret_cast<fptr>(c_v256_load_aligned),
1934 reinterpret_cast<fptr>(c_v256_load_aligned),
1935 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1936 } else if (typeid(CRet) == typeid(int64_t) &&
1937 typeid(CArg1) == typeid(c_v256) &&
1938 typeid(CArg2) == typeid(c_v256)) {
1939 // S64_V256V256
1940 error = CompareSimd2Args<int64_t, v256, v256, CRet, CArg1, CArg2>(
1941 reinterpret_cast<fptr>(s64_store_aligned),
1942 reinterpret_cast<fptr>(v256_load_aligned),
1943 reinterpret_cast<fptr>(v256_load_aligned), simd, d,
1944 reinterpret_cast<fptr>(c_s64_store_aligned),
1945 reinterpret_cast<fptr>(c_v256_load_aligned),
1946 reinterpret_cast<fptr>(c_v256_load_aligned),
1947 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1948 } else if (typeid(CRet) == typeid(uint32_t) &&
1949 typeid(CArg1) == typeid(c_v256) &&
1950 typeid(CArg2) == typeid(c_v256)) {
1951 // U32_V256V256
1952 error = CompareSimd2Args<uint32_t, v256, v256, CRet, CArg1, CArg2>(
1953 reinterpret_cast<fptr>(u32_store_aligned),
1954 reinterpret_cast<fptr>(v256_load_aligned),
1955 reinterpret_cast<fptr>(v256_load_aligned), simd, d,
1956 reinterpret_cast<fptr>(c_u32_store_aligned),
1957 reinterpret_cast<fptr>(c_v256_load_aligned),
1958 reinterpret_cast<fptr>(c_v256_load_aligned),
1959 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1960 } else if (typeid(CRet) == typeid(c_v256) &&
1961 typeid(CArg1) == typeid(c_v128) &&
1962 typeid(CArg2) == typeid(c_v128)) {
1963 // V256_V128V128
1964 error = CompareSimd2Args<v256, v128, v128, CRet, CArg1, CArg2>(
1965 reinterpret_cast<fptr>(v256_store_aligned),
1966 reinterpret_cast<fptr>(v128_load_aligned),
1967 reinterpret_cast<fptr>(v128_load_aligned), simd, d,
1968 reinterpret_cast<fptr>(c_v256_store_aligned),
1969 reinterpret_cast<fptr>(c_v128_load_aligned),
1970 reinterpret_cast<fptr>(c_v128_load_aligned),
1971 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1972 } else if (typeid(CRet) == typeid(c_v256) &&
1973 typeid(CArg1) == typeid(c_v256) &&
1974 typeid(CArg2) == typeid(uint32_t)) {
1975 // V256_V256U32
1976 error = CompareSimd2Args<v256, v256, uint32_t, CRet, CArg1, CArg2>(
1977 reinterpret_cast<fptr>(v256_store_aligned),
1978 reinterpret_cast<fptr>(v256_load_aligned),
1979 reinterpret_cast<fptr>(u32_load_aligned), simd, d,
1980 reinterpret_cast<fptr>(c_v256_store_aligned),
1981 reinterpret_cast<fptr>(c_v256_load_aligned),
1982 reinterpret_cast<fptr>(c_u32_load_aligned),
1983 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
1985 } else {
1986 FAIL() << "Internal error: Unknown intrinsic function "
1987 << typeid(CRet).name() << " " << name << "("
1988 << typeid(CArg1).name() << ", " << typeid(CArg2).name() << ")";
1992 EXPECT_EQ(0, error) << "Error: mismatch for " << name << "("
1993 << Print(s1, sizeof(s1)) << ", " << Print(s2, sizeof(s2))
1994 << ") -> " << Print(d, sizeof(d)) << " (simd), "
1995 << Print(ref_d, sizeof(ref_d)) << " (ref)";
1998 template <typename CRet, typename CArg1, typename CArg2, typename CArg3>
1999 void TestSimd3Args(uint32_t iterations, uint32_t mask, uint32_t maskwidth,
2000 const char *name) {
2001 ACMRandom rnd(ACMRandom::DeterministicSeed());
2002 fptr ref_simd;
2003 fptr simd;
2004 int error = 0;
2005 DECLARE_ALIGNED(32, uint8_t, s1[32]);
2006 DECLARE_ALIGNED(32, uint8_t, s2[32]);
2007 DECLARE_ALIGNED(32, uint8_t, s3[32]);
2008 DECLARE_ALIGNED(32, uint8_t, d[32]);
2009 DECLARE_ALIGNED(32, uint8_t, ref_d[32]);
2010 assert(sizeof(CArg1) <= 32 && sizeof(CArg2) <= 32 && sizeof(CArg3) <= 32 &&
2011 sizeof(CRet) <= 32);
2012 memset(ref_d, 0, sizeof(ref_d));
2013 memset(d, 0, sizeof(d));
2015 Map(name, &ref_simd, &simd);
2016 if (simd == NULL || ref_simd == NULL) {
2017 FAIL() << "Internal error: Unknown intrinsic function " << name;
2020 for (unsigned int count = 0;
2021 count < iterations && !error && !testing::Test::HasFailure(); count++) {
2022 for (unsigned int c = 0; c < sizeof(CArg1); c++) s1[c] = rnd.Rand8();
2024 for (unsigned int c = 0; c < sizeof(CArg2); c++) s2[c] = rnd.Rand8();
2026 for (unsigned int c = 0; c < sizeof(CArg3); c++) s3[c] = rnd.Rand8();
2028 if (maskwidth) SetMask(s3, sizeof(CArg3), mask, maskwidth);
2030 if (typeid(CRet) == typeid(c_v128) && typeid(CArg1) == typeid(c_v128) &&
2031 typeid(CArg2) == typeid(c_v128) && typeid(CArg3) == typeid(c_v128)) {
2032 // V128_V128V128V128
2033 error =
2034 CompareSimd3Args<v128, v128, v128, v128, CRet, CArg1, CArg2, CArg3>(
2035 reinterpret_cast<fptr>(v128_store_aligned),
2036 reinterpret_cast<fptr>(v128_load_aligned),
2037 reinterpret_cast<fptr>(v128_load_aligned),
2038 reinterpret_cast<fptr>(v128_load_aligned), simd, d,
2039 reinterpret_cast<fptr>(c_v128_store_aligned),
2040 reinterpret_cast<fptr>(c_v128_load_aligned),
2041 reinterpret_cast<fptr>(c_v128_load_aligned),
2042 reinterpret_cast<fptr>(c_v128_load_aligned),
2043 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2, s3);
2044 } else if (typeid(CRet) == typeid(c_v256) &&
2045 typeid(CArg1) == typeid(c_v256) &&
2046 typeid(CArg2) == typeid(c_v256) &&
2047 typeid(CArg3) == typeid(c_v256)) {
2048 // V256_V256V256V256
2049 error =
2050 CompareSimd3Args<v256, v256, v256, v256, CRet, CArg1, CArg2, CArg3>(
2051 reinterpret_cast<fptr>(v256_store_aligned),
2052 reinterpret_cast<fptr>(v256_load_aligned),
2053 reinterpret_cast<fptr>(v256_load_aligned),
2054 reinterpret_cast<fptr>(v256_load_aligned), simd, d,
2055 reinterpret_cast<fptr>(c_v256_store_aligned),
2056 reinterpret_cast<fptr>(c_v256_load_aligned),
2057 reinterpret_cast<fptr>(c_v256_load_aligned),
2058 reinterpret_cast<fptr>(c_v256_load_aligned),
2059 reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2, s3);
2060 } else {
2061 FAIL() << "Internal error: Unknown intrinsic function "
2062 << typeid(CRet).name() << " " << name << "("
2063 << typeid(CArg1).name() << ", " << typeid(CArg2).name() << ", "
2064 << typeid(CArg3).name() << ")";
2068 EXPECT_EQ(0, error) << "Error: mismatch for " << name << "("
2069 << Print(s1, sizeof(s1)) << ", " << Print(s2, sizeof(s2))
2070 << ", " << Print(s3, sizeof(s3)) << ") -> "
2071 << Print(d, sizeof(d)) << " (simd), "
2072 << Print(ref_d, sizeof(ref_d)) << " (ref)";
2075 // Instantiations to make the functions callable from another files
2076 template void TestSimd1Arg<c_v64, uint8_t>(uint32_t, uint32_t, uint32_t,
2077 const char *);
2078 template void TestSimd1Arg<c_v64, uint16_t>(uint32_t, uint32_t, uint32_t,
2079 const char *);
2080 template void TestSimd1Arg<c_v64, uint32_t>(uint32_t, uint32_t, uint32_t,
2081 const char *);
2082 template void TestSimd1Arg<c_v64, c_v64>(uint32_t, uint32_t, uint32_t,
2083 const char *);
2084 template void TestSimd1Arg<uint32_t, c_v64>(uint32_t, uint32_t, uint32_t,
2085 const char *);
2086 template void TestSimd1Arg<int32_t, c_v64>(uint32_t, uint32_t, uint32_t,
2087 const char *);
2088 template void TestSimd1Arg<uint64_t, c_v64>(uint32_t, uint32_t, uint32_t,
2089 const char *);
2090 template void TestSimd1Arg<int64_t, c_v64>(uint32_t, uint32_t, uint32_t,
2091 const char *);
2092 template void TestSimd2Args<c_v64, uint32_t, uint32_t>(uint32_t, uint32_t,
2093 uint32_t, const char *);
2094 template void TestSimd2Args<c_v64, c_v64, c_v64>(uint32_t, uint32_t, uint32_t,
2095 const char *);
2096 template void TestSimd2Args<c_v64, c_v64, uint32_t>(uint32_t, uint32_t,
2097 uint32_t, const char *);
2098 template void TestSimd2Args<int64_t, c_v64, c_v64>(uint32_t, uint32_t, uint32_t,
2099 const char *);
2100 template void TestSimd2Args<uint32_t, c_v64, c_v64>(uint32_t, uint32_t,
2101 uint32_t, const char *);
2102 template void TestSimd1Arg<c_v128, c_v128>(uint32_t, uint32_t, uint32_t,
2103 const char *);
2104 template void TestSimd1Arg<c_v128, uint8_t>(uint32_t, uint32_t, uint32_t,
2105 const char *);
2106 template void TestSimd1Arg<c_v128, uint16_t>(uint32_t, uint32_t, uint32_t,
2107 const char *);
2108 template void TestSimd1Arg<c_v128, uint32_t>(uint32_t, uint32_t, uint32_t,
2109 const char *);
2110 template void TestSimd1Arg<c_v128, uint64_t>(uint32_t, uint32_t, uint32_t,
2111 const char *);
2112 template void TestSimd1Arg<c_v128, c_v64>(uint32_t, uint32_t, uint32_t,
2113 const char *);
2114 template void TestSimd1Arg<uint32_t, c_v128>(uint32_t, uint32_t, uint32_t,
2115 const char *);
2116 template void TestSimd1Arg<uint64_t, c_v128>(uint32_t, uint32_t, uint32_t,
2117 const char *);
2118 template void TestSimd1Arg<c_v64, c_v128>(uint32_t, uint32_t, uint32_t,
2119 const char *);
2120 template void TestSimd2Args<c_v128, c_v128, c_v128>(uint32_t, uint32_t,
2121 uint32_t, const char *);
2122 template void TestSimd2Args<c_v128, c_v128, uint32_t>(uint32_t, uint32_t,
2123 uint32_t, const char *);
2124 template void TestSimd2Args<c_v128, uint64_t, uint64_t>(uint32_t, uint32_t,
2125 uint32_t, const char *);
2126 template void TestSimd2Args<c_v128, c_v64, c_v64>(uint32_t, uint32_t, uint32_t,
2127 const char *);
2128 template void TestSimd2Args<uint64_t, c_v128, c_v128>(uint32_t, uint32_t,
2129 uint32_t, const char *);
2130 template void TestSimd2Args<int64_t, c_v128, c_v128>(uint32_t, uint32_t,
2131 uint32_t, const char *);
2132 template void TestSimd2Args<uint32_t, c_v128, c_v128>(uint32_t, uint32_t,
2133 uint32_t, const char *);
2134 template void TestSimd3Args<c_v128, c_v128, c_v128, c_v128>(uint32_t, uint32_t,
2135 uint32_t,
2136 const char *);
2137 template void TestSimd1Arg<c_v256, c_v128>(uint32_t, uint32_t, uint32_t,
2138 const char *);
2139 template void TestSimd1Arg<c_v256, c_v256>(uint32_t, uint32_t, uint32_t,
2140 const char *);
2141 template void TestSimd1Arg<uint64_t, c_v256>(uint32_t, uint32_t, uint32_t,
2142 const char *);
2143 template void TestSimd1Arg<c_v256, uint8_t>(uint32_t, uint32_t, uint32_t,
2144 const char *);
2145 template void TestSimd1Arg<c_v256, uint16_t>(uint32_t, uint32_t, uint32_t,
2146 const char *);
2147 template void TestSimd1Arg<c_v256, uint32_t>(uint32_t, uint32_t, uint32_t,
2148 const char *);
2149 template void TestSimd1Arg<c_v256, uint64_t>(uint32_t, uint32_t, uint32_t,
2150 const char *);
2151 template void TestSimd1Arg<uint32_t, c_v256>(uint32_t, uint32_t, uint32_t,
2152 const char *);
2153 template void TestSimd1Arg<c_v64, c_v256>(uint32_t, uint32_t, uint32_t,
2154 const char *);
2155 template void TestSimd2Args<c_v256, c_v128, c_v128>(uint32_t, uint32_t,
2156 uint32_t, const char *);
2157 template void TestSimd2Args<c_v256, c_v256, c_v256>(uint32_t, uint32_t,
2158 uint32_t, const char *);
2159 template void TestSimd2Args<c_v256, c_v256, uint32_t>(uint32_t, uint32_t,
2160 uint32_t, const char *);
2161 template void TestSimd2Args<uint64_t, c_v256, c_v256>(uint32_t, uint32_t,
2162 uint32_t, const char *);
2163 template void TestSimd2Args<int64_t, c_v256, c_v256>(uint32_t, uint32_t,
2164 uint32_t, const char *);
2165 template void TestSimd2Args<uint32_t, c_v256, c_v256>(uint32_t, uint32_t,
2166 uint32_t, const char *);
2167 template void TestSimd3Args<c_v256, c_v256, c_v256, c_v256>(uint32_t, uint32_t,
2168 uint32_t,
2169 const char *);
2171 } // namespace SIMD_NAMESPACE