2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
16 #include "third_party/googletest/src/googletest/include/gtest/gtest.h"
17 #include "test/acm_random.h"
18 #include "test/clear_system_state.h"
19 #include "test/register_state_check.h"
20 #include "test/util.h"
22 #include "./aom_config.h"
23 #include "./aom_dsp_rtcd.h"
24 #include "aom/aom_codec.h"
25 #include "aom/aom_integer.h"
26 #include "aom_dsp/aom_filter.h"
27 #include "aom_mem/aom_mem.h"
29 using libaom_test::ACMRandom
;
32 const int number_of_iterations
= 200;
34 typedef unsigned int (*MaskedSubPixelVarianceFunc
)(
35 const uint8_t *src
, int src_stride
, int xoffset
, int yoffset
,
36 const uint8_t *ref
, int ref_stride
, const uint8_t *second_pred
,
37 const uint8_t *msk
, int msk_stride
, int invert_mask
, unsigned int *sse
);
39 typedef std::tr1::tuple
<MaskedSubPixelVarianceFunc
, MaskedSubPixelVarianceFunc
>
40 MaskedSubPixelVarianceParam
;
42 class MaskedSubPixelVarianceTest
43 : public ::testing::TestWithParam
<MaskedSubPixelVarianceParam
> {
45 virtual ~MaskedSubPixelVarianceTest() {}
46 virtual void SetUp() {
47 opt_func_
= GET_PARAM(0);
48 ref_func_
= GET_PARAM(1);
51 virtual void TearDown() { libaom_test::ClearSystemState(); }
54 MaskedSubPixelVarianceFunc opt_func_
;
55 MaskedSubPixelVarianceFunc ref_func_
;
58 TEST_P(MaskedSubPixelVarianceTest
, OperationCheck
) {
59 unsigned int ref_ret
, opt_ret
;
60 unsigned int ref_sse
, opt_sse
;
61 ACMRandom
rnd(ACMRandom::DeterministicSeed());
62 // Note: We pad out the input array to a multiple of 16 bytes wide, so that
63 // consecutive rows keep the 16-byte alignment.
64 DECLARE_ALIGNED(16, uint8_t, src_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 16)]);
65 DECLARE_ALIGNED(16, uint8_t, ref_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 16)]);
66 DECLARE_ALIGNED(16, uint8_t,
67 second_pred_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 16)]);
68 DECLARE_ALIGNED(16, uint8_t, msk_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 16)]);
70 int first_failure
= -1;
71 int src_stride
= (MAX_SB_SIZE
+ 16);
72 int ref_stride
= (MAX_SB_SIZE
+ 16);
73 int msk_stride
= (MAX_SB_SIZE
+ 16);
77 for (int i
= 0; i
< number_of_iterations
; ++i
) {
78 int xoffsets
[] = { 0, 4, rnd(BIL_SUBPEL_SHIFTS
) };
79 int yoffsets
[] = { 0, 4, rnd(BIL_SUBPEL_SHIFTS
) };
80 for (int j
= 0; j
< (MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 16); j
++) {
81 src_ptr
[j
] = rnd
.Rand8();
82 ref_ptr
[j
] = rnd
.Rand8();
83 second_pred_ptr
[j
] = rnd
.Rand8();
86 for (int k
= 0; k
< 3; k
++) {
87 for (int l
= 0; l
< 3; l
++) {
88 xoffset
= xoffsets
[k
];
89 yoffset
= yoffsets
[l
];
90 for (int invert_mask
= 0; invert_mask
< 2; ++invert_mask
) {
91 ref_ret
= ref_func_(src_ptr
, src_stride
, xoffset
, yoffset
, ref_ptr
,
92 ref_stride
, second_pred_ptr
, msk_ptr
, msk_stride
,
93 invert_mask
, &ref_sse
);
94 ASM_REGISTER_STATE_CHECK(
95 opt_ret
= opt_func_(src_ptr
, src_stride
, xoffset
, yoffset
,
96 ref_ptr
, ref_stride
, second_pred_ptr
, msk_ptr
,
97 msk_stride
, invert_mask
, &opt_sse
));
99 if (opt_ret
!= ref_ret
|| opt_sse
!= ref_sse
) {
101 if (first_failure
== -1) first_failure
= i
;
108 EXPECT_EQ(0, err_count
)
109 << "Error: Masked Sub Pixel Variance Test OperationCheck,"
110 << "C output doesn't match SSSE3 output. "
111 << "First failed at test case " << first_failure
;
114 TEST_P(MaskedSubPixelVarianceTest
, ExtremeValues
) {
115 unsigned int ref_ret
, opt_ret
;
116 unsigned int ref_sse
, opt_sse
;
117 ACMRandom
rnd(ACMRandom::DeterministicSeed());
118 DECLARE_ALIGNED(16, uint8_t, src_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 16)]);
119 DECLARE_ALIGNED(16, uint8_t, ref_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 16)]);
120 DECLARE_ALIGNED(16, uint8_t,
121 second_pred_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 16)]);
122 DECLARE_ALIGNED(16, uint8_t, msk_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 16)]);
123 int first_failure_x
= -1;
124 int first_failure_y
= -1;
126 int first_failure
= -1;
127 int src_stride
= (MAX_SB_SIZE
+ 16);
128 int ref_stride
= (MAX_SB_SIZE
+ 16);
129 int msk_stride
= (MAX_SB_SIZE
+ 16);
131 for (int xoffset
= 0; xoffset
< BIL_SUBPEL_SHIFTS
; xoffset
++) {
132 for (int yoffset
= 0; yoffset
< BIL_SUBPEL_SHIFTS
; yoffset
++) {
133 for (int i
= 0; i
< 16; ++i
) {
134 memset(src_ptr
, (i
& 0x1) ? 255 : 0,
135 (MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 16));
136 memset(ref_ptr
, (i
& 0x2) ? 255 : 0,
137 (MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 16));
138 memset(second_pred_ptr
, (i
& 0x4) ? 255 : 0,
139 (MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 16));
140 memset(msk_ptr
, (i
& 0x8) ? 64 : 0,
141 (MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 16));
143 for (int invert_mask
= 0; invert_mask
< 2; ++invert_mask
) {
144 ref_ret
= ref_func_(src_ptr
, src_stride
, xoffset
, yoffset
, ref_ptr
,
145 ref_stride
, second_pred_ptr
, msk_ptr
, msk_stride
,
146 invert_mask
, &ref_sse
);
147 ASM_REGISTER_STATE_CHECK(
148 opt_ret
= opt_func_(src_ptr
, src_stride
, xoffset
, yoffset
,
149 ref_ptr
, ref_stride
, second_pred_ptr
, msk_ptr
,
150 msk_stride
, invert_mask
, &opt_sse
));
152 if (opt_ret
!= ref_ret
|| opt_sse
!= ref_sse
) {
154 if (first_failure
== -1) {
156 first_failure_x
= xoffset
;
157 first_failure_y
= yoffset
;
165 EXPECT_EQ(0, err_count
) << "Error: Masked Variance Test ExtremeValues,"
166 << "C output doesn't match SSSE3 output. "
167 << "First failed at test case " << first_failure
168 << " x_offset = " << first_failure_x
169 << " y_offset = " << first_failure_y
;
172 typedef std::tr1::tuple
<MaskedSubPixelVarianceFunc
, MaskedSubPixelVarianceFunc
,
174 HighbdMaskedSubPixelVarianceParam
;
176 class HighbdMaskedSubPixelVarianceTest
177 : public ::testing::TestWithParam
<HighbdMaskedSubPixelVarianceParam
> {
179 virtual ~HighbdMaskedSubPixelVarianceTest() {}
180 virtual void SetUp() {
181 opt_func_
= GET_PARAM(0);
182 ref_func_
= GET_PARAM(1);
183 bit_depth_
= GET_PARAM(2);
186 virtual void TearDown() { libaom_test::ClearSystemState(); }
189 MaskedSubPixelVarianceFunc opt_func_
;
190 MaskedSubPixelVarianceFunc ref_func_
;
191 aom_bit_depth_t bit_depth_
;
194 TEST_P(HighbdMaskedSubPixelVarianceTest
, OperationCheck
) {
195 unsigned int ref_ret
, opt_ret
;
196 unsigned int ref_sse
, opt_sse
;
197 ACMRandom
rnd(ACMRandom::DeterministicSeed());
198 DECLARE_ALIGNED(16, uint16_t, src_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 8)]);
199 DECLARE_ALIGNED(16, uint16_t, ref_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 8)]);
200 DECLARE_ALIGNED(16, uint16_t,
201 second_pred_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 8)]);
202 DECLARE_ALIGNED(16, uint8_t, msk_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 8)]);
203 uint8_t *src8_ptr
= CONVERT_TO_BYTEPTR(src_ptr
);
204 uint8_t *ref8_ptr
= CONVERT_TO_BYTEPTR(ref_ptr
);
205 uint8_t *second_pred8_ptr
= CONVERT_TO_BYTEPTR(second_pred_ptr
);
207 int first_failure
= -1;
208 int first_failure_x
= -1;
209 int first_failure_y
= -1;
210 int src_stride
= (MAX_SB_SIZE
+ 8);
211 int ref_stride
= (MAX_SB_SIZE
+ 8);
212 int msk_stride
= (MAX_SB_SIZE
+ 8);
213 int xoffset
, yoffset
;
215 for (int i
= 0; i
< number_of_iterations
; ++i
) {
216 for (int j
= 0; j
< (MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 8); j
++) {
217 src_ptr
[j
] = rnd
.Rand16() & ((1 << bit_depth_
) - 1);
218 ref_ptr
[j
] = rnd
.Rand16() & ((1 << bit_depth_
) - 1);
219 second_pred_ptr
[j
] = rnd
.Rand16() & ((1 << bit_depth_
) - 1);
220 msk_ptr
[j
] = rnd(65);
222 for (xoffset
= 0; xoffset
< BIL_SUBPEL_SHIFTS
; xoffset
++) {
223 for (yoffset
= 0; yoffset
< BIL_SUBPEL_SHIFTS
; yoffset
++) {
224 for (int invert_mask
= 0; invert_mask
< 2; ++invert_mask
) {
225 ref_ret
= ref_func_(src8_ptr
, src_stride
, xoffset
, yoffset
, ref8_ptr
,
226 ref_stride
, second_pred8_ptr
, msk_ptr
, msk_stride
,
227 invert_mask
, &ref_sse
);
228 ASM_REGISTER_STATE_CHECK(
229 opt_ret
= opt_func_(src8_ptr
, src_stride
, xoffset
, yoffset
,
230 ref8_ptr
, ref_stride
, second_pred8_ptr
,
231 msk_ptr
, msk_stride
, invert_mask
, &opt_sse
));
233 if (opt_ret
!= ref_ret
|| opt_sse
!= ref_sse
) {
235 if (first_failure
== -1) {
237 first_failure_x
= xoffset
;
238 first_failure_y
= yoffset
;
246 EXPECT_EQ(0, err_count
)
247 << "Error: Masked Sub Pixel Variance Test OperationCheck,"
248 << "C output doesn't match SSSE3 output. "
249 << "First failed at test case " << first_failure
250 << " x_offset = " << first_failure_x
<< " y_offset = " << first_failure_y
;
253 TEST_P(HighbdMaskedSubPixelVarianceTest
, ExtremeValues
) {
254 unsigned int ref_ret
, opt_ret
;
255 unsigned int ref_sse
, opt_sse
;
256 ACMRandom
rnd(ACMRandom::DeterministicSeed());
257 DECLARE_ALIGNED(16, uint16_t, src_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 8)]);
258 DECLARE_ALIGNED(16, uint16_t, ref_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 8)]);
259 DECLARE_ALIGNED(16, uint8_t, msk_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 8)]);
260 DECLARE_ALIGNED(16, uint16_t,
261 second_pred_ptr
[(MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 8)]);
262 uint8_t *src8_ptr
= CONVERT_TO_BYTEPTR(src_ptr
);
263 uint8_t *ref8_ptr
= CONVERT_TO_BYTEPTR(ref_ptr
);
264 uint8_t *second_pred8_ptr
= CONVERT_TO_BYTEPTR(second_pred_ptr
);
265 int first_failure_x
= -1;
266 int first_failure_y
= -1;
268 int first_failure
= -1;
269 int src_stride
= (MAX_SB_SIZE
+ 8);
270 int ref_stride
= (MAX_SB_SIZE
+ 8);
271 int msk_stride
= (MAX_SB_SIZE
+ 8);
273 for (int xoffset
= 0; xoffset
< BIL_SUBPEL_SHIFTS
; xoffset
++) {
274 for (int yoffset
= 0; yoffset
< BIL_SUBPEL_SHIFTS
; yoffset
++) {
275 for (int i
= 0; i
< 16; ++i
) {
276 aom_memset16(src_ptr
, (i
& 0x1) ? ((1 << bit_depth_
) - 1) : 0,
277 (MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 8));
278 aom_memset16(ref_ptr
, (i
& 0x2) ? ((1 << bit_depth_
) - 1) : 0,
279 (MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 8));
280 aom_memset16(second_pred_ptr
, (i
& 0x4) ? ((1 << bit_depth_
) - 1) : 0,
281 (MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 8));
282 memset(msk_ptr
, (i
& 0x8) ? 64 : 0,
283 (MAX_SB_SIZE
+ 1) * (MAX_SB_SIZE
+ 8));
285 for (int invert_mask
= 0; invert_mask
< 2; ++invert_mask
) {
286 ref_ret
= ref_func_(src8_ptr
, src_stride
, xoffset
, yoffset
, ref8_ptr
,
287 ref_stride
, second_pred8_ptr
, msk_ptr
, msk_stride
,
288 invert_mask
, &ref_sse
);
289 ASM_REGISTER_STATE_CHECK(
290 opt_ret
= opt_func_(src8_ptr
, src_stride
, xoffset
, yoffset
,
291 ref8_ptr
, ref_stride
, second_pred8_ptr
,
292 msk_ptr
, msk_stride
, invert_mask
, &opt_sse
));
294 if (opt_ret
!= ref_ret
|| opt_sse
!= ref_sse
) {
296 if (first_failure
== -1) {
298 first_failure_x
= xoffset
;
299 first_failure_y
= yoffset
;
307 EXPECT_EQ(0, err_count
) << "Error: Masked Variance Test ExtremeValues,"
308 << "C output doesn't match SSSE3 output. "
309 << "First failed at test case " << first_failure
310 << " x_offset = " << first_failure_x
311 << " y_offset = " << first_failure_y
;
314 using std::tr1::make_tuple
;
318 const MaskedSubPixelVarianceParam sub_pel_var_test
[] = {
319 #if CONFIG_EXT_PARTITION
320 make_tuple(&aom_masked_sub_pixel_variance128x128_ssse3
,
321 &aom_masked_sub_pixel_variance128x128_c
),
322 make_tuple(&aom_masked_sub_pixel_variance128x64_ssse3
,
323 &aom_masked_sub_pixel_variance128x64_c
),
324 make_tuple(&aom_masked_sub_pixel_variance64x128_ssse3
,
325 &aom_masked_sub_pixel_variance64x128_c
),
326 #endif // CONFIG_EXT_PARTITION
327 make_tuple(&aom_masked_sub_pixel_variance64x64_ssse3
,
328 &aom_masked_sub_pixel_variance64x64_c
),
329 make_tuple(&aom_masked_sub_pixel_variance64x32_ssse3
,
330 &aom_masked_sub_pixel_variance64x32_c
),
331 make_tuple(&aom_masked_sub_pixel_variance32x64_ssse3
,
332 &aom_masked_sub_pixel_variance32x64_c
),
333 make_tuple(&aom_masked_sub_pixel_variance32x32_ssse3
,
334 &aom_masked_sub_pixel_variance32x32_c
),
335 make_tuple(&aom_masked_sub_pixel_variance32x16_ssse3
,
336 &aom_masked_sub_pixel_variance32x16_c
),
337 make_tuple(&aom_masked_sub_pixel_variance16x32_ssse3
,
338 &aom_masked_sub_pixel_variance16x32_c
),
339 make_tuple(&aom_masked_sub_pixel_variance16x16_ssse3
,
340 &aom_masked_sub_pixel_variance16x16_c
),
341 make_tuple(&aom_masked_sub_pixel_variance16x8_ssse3
,
342 &aom_masked_sub_pixel_variance16x8_c
),
343 make_tuple(&aom_masked_sub_pixel_variance8x16_ssse3
,
344 &aom_masked_sub_pixel_variance8x16_c
),
345 make_tuple(&aom_masked_sub_pixel_variance8x8_ssse3
,
346 &aom_masked_sub_pixel_variance8x8_c
),
347 make_tuple(&aom_masked_sub_pixel_variance8x4_ssse3
,
348 &aom_masked_sub_pixel_variance8x4_c
),
349 make_tuple(&aom_masked_sub_pixel_variance4x8_ssse3
,
350 &aom_masked_sub_pixel_variance4x8_c
),
351 make_tuple(&aom_masked_sub_pixel_variance4x4_ssse3
,
352 &aom_masked_sub_pixel_variance4x4_c
)
355 INSTANTIATE_TEST_CASE_P(SSSE3_C_COMPARE
, MaskedSubPixelVarianceTest
,
356 ::testing::ValuesIn(sub_pel_var_test
));
358 const HighbdMaskedSubPixelVarianceParam hbd_sub_pel_var_test
[] = {
359 #if CONFIG_EXT_PARTITION
360 make_tuple(&aom_highbd_8_masked_sub_pixel_variance128x128_ssse3
,
361 &aom_highbd_8_masked_sub_pixel_variance128x128_c
, AOM_BITS_8
),
362 make_tuple(&aom_highbd_8_masked_sub_pixel_variance128x64_ssse3
,
363 &aom_highbd_8_masked_sub_pixel_variance128x64_c
, AOM_BITS_8
),
364 make_tuple(&aom_highbd_8_masked_sub_pixel_variance64x128_ssse3
,
365 &aom_highbd_8_masked_sub_pixel_variance64x128_c
, AOM_BITS_8
),
366 #endif // CONFIG_EXT_PARTITION
367 make_tuple(&aom_highbd_8_masked_sub_pixel_variance64x64_ssse3
,
368 &aom_highbd_8_masked_sub_pixel_variance64x64_c
, AOM_BITS_8
),
369 make_tuple(&aom_highbd_8_masked_sub_pixel_variance64x32_ssse3
,
370 &aom_highbd_8_masked_sub_pixel_variance64x32_c
, AOM_BITS_8
),
371 make_tuple(&aom_highbd_8_masked_sub_pixel_variance32x64_ssse3
,
372 &aom_highbd_8_masked_sub_pixel_variance32x64_c
, AOM_BITS_8
),
373 make_tuple(&aom_highbd_8_masked_sub_pixel_variance32x32_ssse3
,
374 &aom_highbd_8_masked_sub_pixel_variance32x32_c
, AOM_BITS_8
),
375 make_tuple(&aom_highbd_8_masked_sub_pixel_variance32x16_ssse3
,
376 &aom_highbd_8_masked_sub_pixel_variance32x16_c
, AOM_BITS_8
),
377 make_tuple(&aom_highbd_8_masked_sub_pixel_variance16x32_ssse3
,
378 &aom_highbd_8_masked_sub_pixel_variance16x32_c
, AOM_BITS_8
),
379 make_tuple(&aom_highbd_8_masked_sub_pixel_variance16x16_ssse3
,
380 &aom_highbd_8_masked_sub_pixel_variance16x16_c
, AOM_BITS_8
),
381 make_tuple(&aom_highbd_8_masked_sub_pixel_variance16x8_ssse3
,
382 &aom_highbd_8_masked_sub_pixel_variance16x8_c
, AOM_BITS_8
),
383 make_tuple(&aom_highbd_8_masked_sub_pixel_variance8x16_ssse3
,
384 &aom_highbd_8_masked_sub_pixel_variance8x16_c
, AOM_BITS_8
),
385 make_tuple(&aom_highbd_8_masked_sub_pixel_variance8x8_ssse3
,
386 &aom_highbd_8_masked_sub_pixel_variance8x8_c
, AOM_BITS_8
),
387 make_tuple(&aom_highbd_8_masked_sub_pixel_variance8x4_ssse3
,
388 &aom_highbd_8_masked_sub_pixel_variance8x4_c
, AOM_BITS_8
),
389 make_tuple(&aom_highbd_8_masked_sub_pixel_variance4x8_ssse3
,
390 &aom_highbd_8_masked_sub_pixel_variance4x8_c
, AOM_BITS_8
),
391 make_tuple(&aom_highbd_8_masked_sub_pixel_variance4x4_ssse3
,
392 &aom_highbd_8_masked_sub_pixel_variance4x4_c
, AOM_BITS_8
),
393 #if CONFIG_EXT_PARTITION
394 make_tuple(&aom_highbd_10_masked_sub_pixel_variance128x128_ssse3
,
395 &aom_highbd_10_masked_sub_pixel_variance128x128_c
, AOM_BITS_10
),
396 make_tuple(&aom_highbd_10_masked_sub_pixel_variance128x64_ssse3
,
397 &aom_highbd_10_masked_sub_pixel_variance128x64_c
, AOM_BITS_10
),
398 make_tuple(&aom_highbd_10_masked_sub_pixel_variance64x128_ssse3
,
399 &aom_highbd_10_masked_sub_pixel_variance64x128_c
, AOM_BITS_10
),
400 #endif // CONFIG_EXT_PARTITION
401 make_tuple(&aom_highbd_10_masked_sub_pixel_variance64x64_ssse3
,
402 &aom_highbd_10_masked_sub_pixel_variance64x64_c
, AOM_BITS_10
),
403 make_tuple(&aom_highbd_10_masked_sub_pixel_variance64x32_ssse3
,
404 &aom_highbd_10_masked_sub_pixel_variance64x32_c
, AOM_BITS_10
),
405 make_tuple(&aom_highbd_10_masked_sub_pixel_variance32x64_ssse3
,
406 &aom_highbd_10_masked_sub_pixel_variance32x64_c
, AOM_BITS_10
),
407 make_tuple(&aom_highbd_10_masked_sub_pixel_variance32x32_ssse3
,
408 &aom_highbd_10_masked_sub_pixel_variance32x32_c
, AOM_BITS_10
),
409 make_tuple(&aom_highbd_10_masked_sub_pixel_variance32x16_ssse3
,
410 &aom_highbd_10_masked_sub_pixel_variance32x16_c
, AOM_BITS_10
),
411 make_tuple(&aom_highbd_10_masked_sub_pixel_variance16x32_ssse3
,
412 &aom_highbd_10_masked_sub_pixel_variance16x32_c
, AOM_BITS_10
),
413 make_tuple(&aom_highbd_10_masked_sub_pixel_variance16x16_ssse3
,
414 &aom_highbd_10_masked_sub_pixel_variance16x16_c
, AOM_BITS_10
),
415 make_tuple(&aom_highbd_10_masked_sub_pixel_variance16x8_ssse3
,
416 &aom_highbd_10_masked_sub_pixel_variance16x8_c
, AOM_BITS_10
),
417 make_tuple(&aom_highbd_10_masked_sub_pixel_variance8x16_ssse3
,
418 &aom_highbd_10_masked_sub_pixel_variance8x16_c
, AOM_BITS_10
),
419 make_tuple(&aom_highbd_10_masked_sub_pixel_variance8x8_ssse3
,
420 &aom_highbd_10_masked_sub_pixel_variance8x8_c
, AOM_BITS_10
),
421 make_tuple(&aom_highbd_10_masked_sub_pixel_variance8x4_ssse3
,
422 &aom_highbd_10_masked_sub_pixel_variance8x4_c
, AOM_BITS_10
),
423 make_tuple(&aom_highbd_10_masked_sub_pixel_variance4x8_ssse3
,
424 &aom_highbd_10_masked_sub_pixel_variance4x8_c
, AOM_BITS_10
),
425 make_tuple(&aom_highbd_10_masked_sub_pixel_variance4x4_ssse3
,
426 &aom_highbd_10_masked_sub_pixel_variance4x4_c
, AOM_BITS_10
),
427 #if CONFIG_EXT_PARTITION
428 make_tuple(&aom_highbd_12_masked_sub_pixel_variance128x128_ssse3
,
429 &aom_highbd_12_masked_sub_pixel_variance128x128_c
, AOM_BITS_12
),
430 make_tuple(&aom_highbd_12_masked_sub_pixel_variance128x64_ssse3
,
431 &aom_highbd_12_masked_sub_pixel_variance128x64_c
, AOM_BITS_12
),
432 make_tuple(&aom_highbd_12_masked_sub_pixel_variance64x128_ssse3
,
433 &aom_highbd_12_masked_sub_pixel_variance64x128_c
, AOM_BITS_12
),
434 #endif // CONFIG_EXT_PARTITION
435 make_tuple(&aom_highbd_12_masked_sub_pixel_variance64x64_ssse3
,
436 &aom_highbd_12_masked_sub_pixel_variance64x64_c
, AOM_BITS_12
),
437 make_tuple(&aom_highbd_12_masked_sub_pixel_variance64x32_ssse3
,
438 &aom_highbd_12_masked_sub_pixel_variance64x32_c
, AOM_BITS_12
),
439 make_tuple(&aom_highbd_12_masked_sub_pixel_variance32x64_ssse3
,
440 &aom_highbd_12_masked_sub_pixel_variance32x64_c
, AOM_BITS_12
),
441 make_tuple(&aom_highbd_12_masked_sub_pixel_variance32x32_ssse3
,
442 &aom_highbd_12_masked_sub_pixel_variance32x32_c
, AOM_BITS_12
),
443 make_tuple(&aom_highbd_12_masked_sub_pixel_variance32x16_ssse3
,
444 &aom_highbd_12_masked_sub_pixel_variance32x16_c
, AOM_BITS_12
),
445 make_tuple(&aom_highbd_12_masked_sub_pixel_variance16x32_ssse3
,
446 &aom_highbd_12_masked_sub_pixel_variance16x32_c
, AOM_BITS_12
),
447 make_tuple(&aom_highbd_12_masked_sub_pixel_variance16x16_ssse3
,
448 &aom_highbd_12_masked_sub_pixel_variance16x16_c
, AOM_BITS_12
),
449 make_tuple(&aom_highbd_12_masked_sub_pixel_variance16x8_ssse3
,
450 &aom_highbd_12_masked_sub_pixel_variance16x8_c
, AOM_BITS_12
),
451 make_tuple(&aom_highbd_12_masked_sub_pixel_variance8x16_ssse3
,
452 &aom_highbd_12_masked_sub_pixel_variance8x16_c
, AOM_BITS_12
),
453 make_tuple(&aom_highbd_12_masked_sub_pixel_variance8x8_ssse3
,
454 &aom_highbd_12_masked_sub_pixel_variance8x8_c
, AOM_BITS_12
),
455 make_tuple(&aom_highbd_12_masked_sub_pixel_variance8x4_ssse3
,
456 &aom_highbd_12_masked_sub_pixel_variance8x4_c
, AOM_BITS_12
),
457 make_tuple(&aom_highbd_12_masked_sub_pixel_variance4x8_ssse3
,
458 &aom_highbd_12_masked_sub_pixel_variance4x8_c
, AOM_BITS_12
),
459 make_tuple(&aom_highbd_12_masked_sub_pixel_variance4x4_ssse3
,
460 &aom_highbd_12_masked_sub_pixel_variance4x4_c
, AOM_BITS_12
)
463 INSTANTIATE_TEST_CASE_P(SSSE3_C_COMPARE
, HighbdMaskedSubPixelVarianceTest
,
464 ::testing::ValuesIn(hbd_sub_pel_var_test
));