Add dr prediction test
[aom.git] / test / masked_variance_test.cc
blob275b9feb6a8b3f4636c83193f9b811af0084737e
1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
12 #include <math.h>
13 #include <stdlib.h>
14 #include <string.h>
16 #include "third_party/googletest/src/googletest/include/gtest/gtest.h"
17 #include "test/acm_random.h"
18 #include "test/clear_system_state.h"
19 #include "test/register_state_check.h"
20 #include "test/util.h"
22 #include "config/aom_config.h"
23 #include "config/aom_dsp_rtcd.h"
25 #include "aom/aom_codec.h"
26 #include "aom/aom_integer.h"
27 #include "aom_dsp/aom_filter.h"
28 #include "aom_mem/aom_mem.h"
30 using libaom_test::ACMRandom;
32 namespace {
33 const int number_of_iterations = 200;
35 typedef unsigned int (*MaskedSubPixelVarianceFunc)(
36 const uint8_t *src, int src_stride, int xoffset, int yoffset,
37 const uint8_t *ref, int ref_stride, const uint8_t *second_pred,
38 const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse);
40 typedef ::testing::tuple<MaskedSubPixelVarianceFunc, MaskedSubPixelVarianceFunc>
41 MaskedSubPixelVarianceParam;
43 class MaskedSubPixelVarianceTest
44 : public ::testing::TestWithParam<MaskedSubPixelVarianceParam> {
45 public:
46 virtual ~MaskedSubPixelVarianceTest() {}
47 virtual void SetUp() {
48 opt_func_ = GET_PARAM(0);
49 ref_func_ = GET_PARAM(1);
52 virtual void TearDown() { libaom_test::ClearSystemState(); }
54 protected:
55 MaskedSubPixelVarianceFunc opt_func_;
56 MaskedSubPixelVarianceFunc ref_func_;
59 TEST_P(MaskedSubPixelVarianceTest, OperationCheck) {
60 unsigned int ref_ret, opt_ret;
61 unsigned int ref_sse, opt_sse;
62 ACMRandom rnd(ACMRandom::DeterministicSeed());
63 // Note: We pad out the input array to a multiple of 16 bytes wide, so that
64 // consecutive rows keep the 16-byte alignment.
65 DECLARE_ALIGNED(16, uint8_t, src_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 16)]);
66 DECLARE_ALIGNED(16, uint8_t, ref_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 16)]);
67 DECLARE_ALIGNED(16, uint8_t,
68 second_pred_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 16)]);
69 DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 16)]);
70 int err_count = 0;
71 int first_failure = -1;
72 int src_stride = (MAX_SB_SIZE + 16);
73 int ref_stride = (MAX_SB_SIZE + 16);
74 int msk_stride = (MAX_SB_SIZE + 16);
75 int xoffset;
76 int yoffset;
78 for (int i = 0; i < number_of_iterations; ++i) {
79 int xoffsets[] = { 0, 4, rnd(BIL_SUBPEL_SHIFTS) };
80 int yoffsets[] = { 0, 4, rnd(BIL_SUBPEL_SHIFTS) };
81 for (int j = 0; j < (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 16); j++) {
82 src_ptr[j] = rnd.Rand8();
83 ref_ptr[j] = rnd.Rand8();
84 second_pred_ptr[j] = rnd.Rand8();
85 msk_ptr[j] = rnd(65);
87 for (int k = 0; k < 3; k++) {
88 for (int l = 0; l < 3; l++) {
89 xoffset = xoffsets[k];
90 yoffset = yoffsets[l];
91 for (int invert_mask = 0; invert_mask < 2; ++invert_mask) {
92 ref_ret = ref_func_(src_ptr, src_stride, xoffset, yoffset, ref_ptr,
93 ref_stride, second_pred_ptr, msk_ptr, msk_stride,
94 invert_mask, &ref_sse);
95 ASM_REGISTER_STATE_CHECK(
96 opt_ret = opt_func_(src_ptr, src_stride, xoffset, yoffset,
97 ref_ptr, ref_stride, second_pred_ptr, msk_ptr,
98 msk_stride, invert_mask, &opt_sse));
100 if (opt_ret != ref_ret || opt_sse != ref_sse) {
101 err_count++;
102 if (first_failure == -1) first_failure = i;
109 EXPECT_EQ(0, err_count)
110 << "Error: Masked Sub Pixel Variance Test OperationCheck,"
111 << "C output doesn't match SSSE3 output. "
112 << "First failed at test case " << first_failure;
115 TEST_P(MaskedSubPixelVarianceTest, ExtremeValues) {
116 unsigned int ref_ret, opt_ret;
117 unsigned int ref_sse, opt_sse;
118 ACMRandom rnd(ACMRandom::DeterministicSeed());
119 DECLARE_ALIGNED(16, uint8_t, src_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 16)]);
120 DECLARE_ALIGNED(16, uint8_t, ref_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 16)]);
121 DECLARE_ALIGNED(16, uint8_t,
122 second_pred_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 16)]);
123 DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 16)]);
124 int first_failure_x = -1;
125 int first_failure_y = -1;
126 int err_count = 0;
127 int first_failure = -1;
128 int src_stride = (MAX_SB_SIZE + 16);
129 int ref_stride = (MAX_SB_SIZE + 16);
130 int msk_stride = (MAX_SB_SIZE + 16);
132 for (int xoffset = 0; xoffset < BIL_SUBPEL_SHIFTS; xoffset++) {
133 for (int yoffset = 0; yoffset < BIL_SUBPEL_SHIFTS; yoffset++) {
134 for (int i = 0; i < 16; ++i) {
135 memset(src_ptr, (i & 0x1) ? 255 : 0,
136 (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 16));
137 memset(ref_ptr, (i & 0x2) ? 255 : 0,
138 (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 16));
139 memset(second_pred_ptr, (i & 0x4) ? 255 : 0,
140 (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 16));
141 memset(msk_ptr, (i & 0x8) ? 64 : 0,
142 (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 16));
144 for (int invert_mask = 0; invert_mask < 2; ++invert_mask) {
145 ref_ret = ref_func_(src_ptr, src_stride, xoffset, yoffset, ref_ptr,
146 ref_stride, second_pred_ptr, msk_ptr, msk_stride,
147 invert_mask, &ref_sse);
148 ASM_REGISTER_STATE_CHECK(
149 opt_ret = opt_func_(src_ptr, src_stride, xoffset, yoffset,
150 ref_ptr, ref_stride, second_pred_ptr, msk_ptr,
151 msk_stride, invert_mask, &opt_sse));
153 if (opt_ret != ref_ret || opt_sse != ref_sse) {
154 err_count++;
155 if (first_failure == -1) {
156 first_failure = i;
157 first_failure_x = xoffset;
158 first_failure_y = yoffset;
166 EXPECT_EQ(0, err_count) << "Error: Masked Variance Test ExtremeValues,"
167 << "C output doesn't match SSSE3 output. "
168 << "First failed at test case " << first_failure
169 << " x_offset = " << first_failure_x
170 << " y_offset = " << first_failure_y;
173 typedef ::testing::tuple<MaskedSubPixelVarianceFunc, MaskedSubPixelVarianceFunc,
174 aom_bit_depth_t>
175 HighbdMaskedSubPixelVarianceParam;
177 class HighbdMaskedSubPixelVarianceTest
178 : public ::testing::TestWithParam<HighbdMaskedSubPixelVarianceParam> {
179 public:
180 virtual ~HighbdMaskedSubPixelVarianceTest() {}
181 virtual void SetUp() {
182 opt_func_ = GET_PARAM(0);
183 ref_func_ = GET_PARAM(1);
184 bit_depth_ = GET_PARAM(2);
187 virtual void TearDown() { libaom_test::ClearSystemState(); }
189 protected:
190 MaskedSubPixelVarianceFunc opt_func_;
191 MaskedSubPixelVarianceFunc ref_func_;
192 aom_bit_depth_t bit_depth_;
195 TEST_P(HighbdMaskedSubPixelVarianceTest, OperationCheck) {
196 unsigned int ref_ret, opt_ret;
197 unsigned int ref_sse, opt_sse;
198 ACMRandom rnd(ACMRandom::DeterministicSeed());
199 DECLARE_ALIGNED(16, uint16_t, src_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 8)]);
200 DECLARE_ALIGNED(16, uint16_t, ref_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 8)]);
201 DECLARE_ALIGNED(16, uint16_t,
202 second_pred_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 8)]);
203 DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 8)]);
204 uint8_t *src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
205 uint8_t *ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
206 uint8_t *second_pred8_ptr = CONVERT_TO_BYTEPTR(second_pred_ptr);
207 int err_count = 0;
208 int first_failure = -1;
209 int first_failure_x = -1;
210 int first_failure_y = -1;
211 int src_stride = (MAX_SB_SIZE + 8);
212 int ref_stride = (MAX_SB_SIZE + 8);
213 int msk_stride = (MAX_SB_SIZE + 8);
214 int xoffset, yoffset;
216 for (int i = 0; i < number_of_iterations; ++i) {
217 for (int j = 0; j < (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 8); j++) {
218 src_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
219 ref_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
220 second_pred_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
221 msk_ptr[j] = rnd(65);
223 for (xoffset = 0; xoffset < BIL_SUBPEL_SHIFTS; xoffset++) {
224 for (yoffset = 0; yoffset < BIL_SUBPEL_SHIFTS; yoffset++) {
225 for (int invert_mask = 0; invert_mask < 2; ++invert_mask) {
226 ref_ret = ref_func_(src8_ptr, src_stride, xoffset, yoffset, ref8_ptr,
227 ref_stride, second_pred8_ptr, msk_ptr, msk_stride,
228 invert_mask, &ref_sse);
229 ASM_REGISTER_STATE_CHECK(
230 opt_ret = opt_func_(src8_ptr, src_stride, xoffset, yoffset,
231 ref8_ptr, ref_stride, second_pred8_ptr,
232 msk_ptr, msk_stride, invert_mask, &opt_sse));
234 if (opt_ret != ref_ret || opt_sse != ref_sse) {
235 err_count++;
236 if (first_failure == -1) {
237 first_failure = i;
238 first_failure_x = xoffset;
239 first_failure_y = yoffset;
247 EXPECT_EQ(0, err_count)
248 << "Error: Masked Sub Pixel Variance Test OperationCheck,"
249 << "C output doesn't match SSSE3 output. "
250 << "First failed at test case " << first_failure
251 << " x_offset = " << first_failure_x << " y_offset = " << first_failure_y;
254 TEST_P(HighbdMaskedSubPixelVarianceTest, ExtremeValues) {
255 unsigned int ref_ret, opt_ret;
256 unsigned int ref_sse, opt_sse;
257 ACMRandom rnd(ACMRandom::DeterministicSeed());
258 DECLARE_ALIGNED(16, uint16_t, src_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 8)]);
259 DECLARE_ALIGNED(16, uint16_t, ref_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 8)]);
260 DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 8)]);
261 DECLARE_ALIGNED(16, uint16_t,
262 second_pred_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 8)]);
263 uint8_t *src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
264 uint8_t *ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
265 uint8_t *second_pred8_ptr = CONVERT_TO_BYTEPTR(second_pred_ptr);
266 int first_failure_x = -1;
267 int first_failure_y = -1;
268 int err_count = 0;
269 int first_failure = -1;
270 int src_stride = (MAX_SB_SIZE + 8);
271 int ref_stride = (MAX_SB_SIZE + 8);
272 int msk_stride = (MAX_SB_SIZE + 8);
274 for (int xoffset = 0; xoffset < BIL_SUBPEL_SHIFTS; xoffset++) {
275 for (int yoffset = 0; yoffset < BIL_SUBPEL_SHIFTS; yoffset++) {
276 for (int i = 0; i < 16; ++i) {
277 aom_memset16(src_ptr, (i & 0x1) ? ((1 << bit_depth_) - 1) : 0,
278 (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 8));
279 aom_memset16(ref_ptr, (i & 0x2) ? ((1 << bit_depth_) - 1) : 0,
280 (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 8));
281 aom_memset16(second_pred_ptr, (i & 0x4) ? ((1 << bit_depth_) - 1) : 0,
282 (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 8));
283 memset(msk_ptr, (i & 0x8) ? 64 : 0,
284 (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 8));
286 for (int invert_mask = 0; invert_mask < 2; ++invert_mask) {
287 ref_ret = ref_func_(src8_ptr, src_stride, xoffset, yoffset, ref8_ptr,
288 ref_stride, second_pred8_ptr, msk_ptr, msk_stride,
289 invert_mask, &ref_sse);
290 ASM_REGISTER_STATE_CHECK(
291 opt_ret = opt_func_(src8_ptr, src_stride, xoffset, yoffset,
292 ref8_ptr, ref_stride, second_pred8_ptr,
293 msk_ptr, msk_stride, invert_mask, &opt_sse));
295 if (opt_ret != ref_ret || opt_sse != ref_sse) {
296 err_count++;
297 if (first_failure == -1) {
298 first_failure = i;
299 first_failure_x = xoffset;
300 first_failure_y = yoffset;
308 EXPECT_EQ(0, err_count) << "Error: Masked Variance Test ExtremeValues,"
309 << "C output doesn't match SSSE3 output. "
310 << "First failed at test case " << first_failure
311 << " x_offset = " << first_failure_x
312 << " y_offset = " << first_failure_y;
315 using ::testing::make_tuple;
317 #if HAVE_SSSE3
319 const MaskedSubPixelVarianceParam sub_pel_var_test[] = {
320 make_tuple(&aom_masked_sub_pixel_variance128x128_ssse3,
321 &aom_masked_sub_pixel_variance128x128_c),
322 make_tuple(&aom_masked_sub_pixel_variance128x64_ssse3,
323 &aom_masked_sub_pixel_variance128x64_c),
324 make_tuple(&aom_masked_sub_pixel_variance64x128_ssse3,
325 &aom_masked_sub_pixel_variance64x128_c),
326 make_tuple(&aom_masked_sub_pixel_variance64x64_ssse3,
327 &aom_masked_sub_pixel_variance64x64_c),
328 make_tuple(&aom_masked_sub_pixel_variance64x32_ssse3,
329 &aom_masked_sub_pixel_variance64x32_c),
330 make_tuple(&aom_masked_sub_pixel_variance32x64_ssse3,
331 &aom_masked_sub_pixel_variance32x64_c),
332 make_tuple(&aom_masked_sub_pixel_variance32x32_ssse3,
333 &aom_masked_sub_pixel_variance32x32_c),
334 make_tuple(&aom_masked_sub_pixel_variance32x16_ssse3,
335 &aom_masked_sub_pixel_variance32x16_c),
336 make_tuple(&aom_masked_sub_pixel_variance16x32_ssse3,
337 &aom_masked_sub_pixel_variance16x32_c),
338 make_tuple(&aom_masked_sub_pixel_variance16x16_ssse3,
339 &aom_masked_sub_pixel_variance16x16_c),
340 make_tuple(&aom_masked_sub_pixel_variance16x8_ssse3,
341 &aom_masked_sub_pixel_variance16x8_c),
342 make_tuple(&aom_masked_sub_pixel_variance8x16_ssse3,
343 &aom_masked_sub_pixel_variance8x16_c),
344 make_tuple(&aom_masked_sub_pixel_variance8x8_ssse3,
345 &aom_masked_sub_pixel_variance8x8_c),
346 make_tuple(&aom_masked_sub_pixel_variance8x4_ssse3,
347 &aom_masked_sub_pixel_variance8x4_c),
348 make_tuple(&aom_masked_sub_pixel_variance4x8_ssse3,
349 &aom_masked_sub_pixel_variance4x8_c),
350 make_tuple(&aom_masked_sub_pixel_variance4x4_ssse3,
351 &aom_masked_sub_pixel_variance4x4_c)
354 INSTANTIATE_TEST_CASE_P(SSSE3_C_COMPARE, MaskedSubPixelVarianceTest,
355 ::testing::ValuesIn(sub_pel_var_test));
357 const HighbdMaskedSubPixelVarianceParam hbd_sub_pel_var_test[] = {
358 make_tuple(&aom_highbd_8_masked_sub_pixel_variance128x128_ssse3,
359 &aom_highbd_8_masked_sub_pixel_variance128x128_c, AOM_BITS_8),
360 make_tuple(&aom_highbd_8_masked_sub_pixel_variance128x64_ssse3,
361 &aom_highbd_8_masked_sub_pixel_variance128x64_c, AOM_BITS_8),
362 make_tuple(&aom_highbd_8_masked_sub_pixel_variance64x128_ssse3,
363 &aom_highbd_8_masked_sub_pixel_variance64x128_c, AOM_BITS_8),
364 make_tuple(&aom_highbd_8_masked_sub_pixel_variance64x64_ssse3,
365 &aom_highbd_8_masked_sub_pixel_variance64x64_c, AOM_BITS_8),
366 make_tuple(&aom_highbd_8_masked_sub_pixel_variance64x32_ssse3,
367 &aom_highbd_8_masked_sub_pixel_variance64x32_c, AOM_BITS_8),
368 make_tuple(&aom_highbd_8_masked_sub_pixel_variance32x64_ssse3,
369 &aom_highbd_8_masked_sub_pixel_variance32x64_c, AOM_BITS_8),
370 make_tuple(&aom_highbd_8_masked_sub_pixel_variance32x32_ssse3,
371 &aom_highbd_8_masked_sub_pixel_variance32x32_c, AOM_BITS_8),
372 make_tuple(&aom_highbd_8_masked_sub_pixel_variance32x16_ssse3,
373 &aom_highbd_8_masked_sub_pixel_variance32x16_c, AOM_BITS_8),
374 make_tuple(&aom_highbd_8_masked_sub_pixel_variance16x32_ssse3,
375 &aom_highbd_8_masked_sub_pixel_variance16x32_c, AOM_BITS_8),
376 make_tuple(&aom_highbd_8_masked_sub_pixel_variance16x16_ssse3,
377 &aom_highbd_8_masked_sub_pixel_variance16x16_c, AOM_BITS_8),
378 make_tuple(&aom_highbd_8_masked_sub_pixel_variance16x8_ssse3,
379 &aom_highbd_8_masked_sub_pixel_variance16x8_c, AOM_BITS_8),
380 make_tuple(&aom_highbd_8_masked_sub_pixel_variance8x16_ssse3,
381 &aom_highbd_8_masked_sub_pixel_variance8x16_c, AOM_BITS_8),
382 make_tuple(&aom_highbd_8_masked_sub_pixel_variance8x8_ssse3,
383 &aom_highbd_8_masked_sub_pixel_variance8x8_c, AOM_BITS_8),
384 make_tuple(&aom_highbd_8_masked_sub_pixel_variance8x4_ssse3,
385 &aom_highbd_8_masked_sub_pixel_variance8x4_c, AOM_BITS_8),
386 make_tuple(&aom_highbd_8_masked_sub_pixel_variance4x8_ssse3,
387 &aom_highbd_8_masked_sub_pixel_variance4x8_c, AOM_BITS_8),
388 make_tuple(&aom_highbd_8_masked_sub_pixel_variance4x4_ssse3,
389 &aom_highbd_8_masked_sub_pixel_variance4x4_c, AOM_BITS_8),
390 make_tuple(&aom_highbd_10_masked_sub_pixel_variance128x128_ssse3,
391 &aom_highbd_10_masked_sub_pixel_variance128x128_c, AOM_BITS_10),
392 make_tuple(&aom_highbd_10_masked_sub_pixel_variance128x64_ssse3,
393 &aom_highbd_10_masked_sub_pixel_variance128x64_c, AOM_BITS_10),
394 make_tuple(&aom_highbd_10_masked_sub_pixel_variance64x128_ssse3,
395 &aom_highbd_10_masked_sub_pixel_variance64x128_c, AOM_BITS_10),
396 make_tuple(&aom_highbd_10_masked_sub_pixel_variance64x64_ssse3,
397 &aom_highbd_10_masked_sub_pixel_variance64x64_c, AOM_BITS_10),
398 make_tuple(&aom_highbd_10_masked_sub_pixel_variance64x32_ssse3,
399 &aom_highbd_10_masked_sub_pixel_variance64x32_c, AOM_BITS_10),
400 make_tuple(&aom_highbd_10_masked_sub_pixel_variance32x64_ssse3,
401 &aom_highbd_10_masked_sub_pixel_variance32x64_c, AOM_BITS_10),
402 make_tuple(&aom_highbd_10_masked_sub_pixel_variance32x32_ssse3,
403 &aom_highbd_10_masked_sub_pixel_variance32x32_c, AOM_BITS_10),
404 make_tuple(&aom_highbd_10_masked_sub_pixel_variance32x16_ssse3,
405 &aom_highbd_10_masked_sub_pixel_variance32x16_c, AOM_BITS_10),
406 make_tuple(&aom_highbd_10_masked_sub_pixel_variance16x32_ssse3,
407 &aom_highbd_10_masked_sub_pixel_variance16x32_c, AOM_BITS_10),
408 make_tuple(&aom_highbd_10_masked_sub_pixel_variance16x16_ssse3,
409 &aom_highbd_10_masked_sub_pixel_variance16x16_c, AOM_BITS_10),
410 make_tuple(&aom_highbd_10_masked_sub_pixel_variance16x8_ssse3,
411 &aom_highbd_10_masked_sub_pixel_variance16x8_c, AOM_BITS_10),
412 make_tuple(&aom_highbd_10_masked_sub_pixel_variance8x16_ssse3,
413 &aom_highbd_10_masked_sub_pixel_variance8x16_c, AOM_BITS_10),
414 make_tuple(&aom_highbd_10_masked_sub_pixel_variance8x8_ssse3,
415 &aom_highbd_10_masked_sub_pixel_variance8x8_c, AOM_BITS_10),
416 make_tuple(&aom_highbd_10_masked_sub_pixel_variance8x4_ssse3,
417 &aom_highbd_10_masked_sub_pixel_variance8x4_c, AOM_BITS_10),
418 make_tuple(&aom_highbd_10_masked_sub_pixel_variance4x8_ssse3,
419 &aom_highbd_10_masked_sub_pixel_variance4x8_c, AOM_BITS_10),
420 make_tuple(&aom_highbd_10_masked_sub_pixel_variance4x4_ssse3,
421 &aom_highbd_10_masked_sub_pixel_variance4x4_c, AOM_BITS_10),
422 make_tuple(&aom_highbd_12_masked_sub_pixel_variance128x128_ssse3,
423 &aom_highbd_12_masked_sub_pixel_variance128x128_c, AOM_BITS_12),
424 make_tuple(&aom_highbd_12_masked_sub_pixel_variance128x64_ssse3,
425 &aom_highbd_12_masked_sub_pixel_variance128x64_c, AOM_BITS_12),
426 make_tuple(&aom_highbd_12_masked_sub_pixel_variance64x128_ssse3,
427 &aom_highbd_12_masked_sub_pixel_variance64x128_c, AOM_BITS_12),
428 make_tuple(&aom_highbd_12_masked_sub_pixel_variance64x64_ssse3,
429 &aom_highbd_12_masked_sub_pixel_variance64x64_c, AOM_BITS_12),
430 make_tuple(&aom_highbd_12_masked_sub_pixel_variance64x32_ssse3,
431 &aom_highbd_12_masked_sub_pixel_variance64x32_c, AOM_BITS_12),
432 make_tuple(&aom_highbd_12_masked_sub_pixel_variance32x64_ssse3,
433 &aom_highbd_12_masked_sub_pixel_variance32x64_c, AOM_BITS_12),
434 make_tuple(&aom_highbd_12_masked_sub_pixel_variance32x32_ssse3,
435 &aom_highbd_12_masked_sub_pixel_variance32x32_c, AOM_BITS_12),
436 make_tuple(&aom_highbd_12_masked_sub_pixel_variance32x16_ssse3,
437 &aom_highbd_12_masked_sub_pixel_variance32x16_c, AOM_BITS_12),
438 make_tuple(&aom_highbd_12_masked_sub_pixel_variance16x32_ssse3,
439 &aom_highbd_12_masked_sub_pixel_variance16x32_c, AOM_BITS_12),
440 make_tuple(&aom_highbd_12_masked_sub_pixel_variance16x16_ssse3,
441 &aom_highbd_12_masked_sub_pixel_variance16x16_c, AOM_BITS_12),
442 make_tuple(&aom_highbd_12_masked_sub_pixel_variance16x8_ssse3,
443 &aom_highbd_12_masked_sub_pixel_variance16x8_c, AOM_BITS_12),
444 make_tuple(&aom_highbd_12_masked_sub_pixel_variance8x16_ssse3,
445 &aom_highbd_12_masked_sub_pixel_variance8x16_c, AOM_BITS_12),
446 make_tuple(&aom_highbd_12_masked_sub_pixel_variance8x8_ssse3,
447 &aom_highbd_12_masked_sub_pixel_variance8x8_c, AOM_BITS_12),
448 make_tuple(&aom_highbd_12_masked_sub_pixel_variance8x4_ssse3,
449 &aom_highbd_12_masked_sub_pixel_variance8x4_c, AOM_BITS_12),
450 make_tuple(&aom_highbd_12_masked_sub_pixel_variance4x8_ssse3,
451 &aom_highbd_12_masked_sub_pixel_variance4x8_c, AOM_BITS_12),
452 make_tuple(&aom_highbd_12_masked_sub_pixel_variance4x4_ssse3,
453 &aom_highbd_12_masked_sub_pixel_variance4x4_c, AOM_BITS_12)
456 INSTANTIATE_TEST_CASE_P(SSSE3_C_COMPARE, HighbdMaskedSubPixelVarianceTest,
457 ::testing::ValuesIn(hbd_sub_pel_var_test));
458 #endif // HAVE_SSSE3
459 } // namespace