Refactor prob_diff_update_savings_search_model
[aom.git] / test / variance_test.cc
blob6f50f78f2ea3b663ece9fc3fcf689c38280219f5
1 /*
2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
11 #include <cstdlib>
12 #include <new>
14 #include "third_party/googletest/src/include/gtest/gtest.h"
16 #include "./vpx_config.h"
17 #include "./vpx_dsp_rtcd.h"
18 #include "test/acm_random.h"
19 #include "test/clear_system_state.h"
20 #include "test/register_state_check.h"
21 #include "vpx/vpx_codec.h"
22 #include "vpx/vpx_integer.h"
23 #include "vpx_mem/vpx_mem.h"
24 #include "vpx_ports/mem.h"
26 namespace {
28 typedef unsigned int (*VarianceMxNFunc)(const uint8_t *a, int a_stride,
29 const uint8_t *b, int b_stride,
30 unsigned int *sse);
31 typedef unsigned int (*SubpixVarMxNFunc)(const uint8_t *a, int a_stride,
32 int xoffset, int yoffset,
33 const uint8_t *b, int b_stride,
34 unsigned int *sse);
35 typedef unsigned int (*SubpixAvgVarMxNFunc)(const uint8_t *a, int a_stride,
36 int xoffset, int yoffset,
37 const uint8_t *b, int b_stride,
38 uint32_t *sse,
39 const uint8_t *second_pred);
40 typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride,
41 const uint8_t *b, int b_stride);
42 typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
45 using ::std::tr1::get;
46 using ::std::tr1::make_tuple;
47 using ::std::tr1::tuple;
48 using libvpx_test::ACMRandom;
50 // Truncate high bit depth results by downshifting (with rounding) by:
51 // 2 * (bit_depth - 8) for sse
52 // (bit_depth - 8) for se
53 static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
54 switch (bit_depth) {
55 case VPX_BITS_12:
56 *sse = (*sse + 128) >> 8;
57 *se = (*se + 8) >> 4;
58 break;
59 case VPX_BITS_10:
60 *sse = (*sse + 8) >> 4;
61 *se = (*se + 2) >> 2;
62 break;
63 case VPX_BITS_8:
64 default:
65 break;
69 static unsigned int mb_ss_ref(const int16_t *src) {
70 unsigned int res = 0;
71 for (int i = 0; i < 256; ++i) {
72 res += src[i] * src[i];
74 return res;
77 static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
78 int l2w, int l2h, int src_stride_coeff,
79 int ref_stride_coeff, uint32_t *sse_ptr,
80 bool use_high_bit_depth_,
81 vpx_bit_depth_t bit_depth) {
82 int64_t se = 0;
83 uint64_t sse = 0;
84 const int w = 1 << l2w;
85 const int h = 1 << l2h;
86 for (int y = 0; y < h; y++) {
87 for (int x = 0; x < w; x++) {
88 int diff;
89 if (!use_high_bit_depth_) {
90 diff = ref[w * y * ref_stride_coeff + x] -
91 src[w * y * src_stride_coeff + x];
92 se += diff;
93 sse += diff * diff;
94 #if CONFIG_VP9_HIGHBITDEPTH
95 } else {
96 diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
97 CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
98 se += diff;
99 sse += diff * diff;
100 #endif // CONFIG_VP9_HIGHBITDEPTH
104 RoundHighBitDepth(bit_depth, &se, &sse);
105 *sse_ptr = static_cast<uint32_t>(sse);
106 return static_cast<uint32_t>(sse -
107 ((static_cast<int64_t>(se) * se) >>
108 (l2w + l2h)));
111 /* The subpel reference functions differ from the codec version in one aspect:
112 * they calculate the bilinear factors directly instead of using a lookup table
113 * and therefore upshift xoff and yoff by 1. Only every other calculated value
114 * is used so the codec version shrinks the table to save space and maintain
115 * compatibility with vp8.
117 static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
118 int l2w, int l2h, int xoff, int yoff,
119 uint32_t *sse_ptr,
120 bool use_high_bit_depth_,
121 vpx_bit_depth_t bit_depth) {
122 int64_t se = 0;
123 uint64_t sse = 0;
124 const int w = 1 << l2w;
125 const int h = 1 << l2h;
127 xoff <<= 1;
128 yoff <<= 1;
130 for (int y = 0; y < h; y++) {
131 for (int x = 0; x < w; x++) {
132 // Bilinear interpolation at a 16th pel step.
133 if (!use_high_bit_depth_) {
134 const int a1 = ref[(w + 1) * (y + 0) + x + 0];
135 const int a2 = ref[(w + 1) * (y + 0) + x + 1];
136 const int b1 = ref[(w + 1) * (y + 1) + x + 0];
137 const int b2 = ref[(w + 1) * (y + 1) + x + 1];
138 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
139 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
140 const int r = a + (((b - a) * yoff + 8) >> 4);
141 const int diff = r - src[w * y + x];
142 se += diff;
143 sse += diff * diff;
144 #if CONFIG_VP9_HIGHBITDEPTH
145 } else {
146 uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
147 uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
148 const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
149 const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
150 const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
151 const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
152 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
153 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
154 const int r = a + (((b - a) * yoff + 8) >> 4);
155 const int diff = r - src16[w * y + x];
156 se += diff;
157 sse += diff * diff;
158 #endif // CONFIG_VP9_HIGHBITDEPTH
162 RoundHighBitDepth(bit_depth, &se, &sse);
163 *sse_ptr = static_cast<uint32_t>(sse);
164 return static_cast<uint32_t>(sse -
165 ((static_cast<int64_t>(se) * se) >>
166 (l2w + l2h)));
169 class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
170 public:
171 SumOfSquaresTest() : func_(GetParam()) {}
173 virtual ~SumOfSquaresTest() {
174 libvpx_test::ClearSystemState();
177 protected:
178 void ConstTest();
179 void RefTest();
181 SumOfSquaresFunction func_;
182 ACMRandom rnd_;
185 void SumOfSquaresTest::ConstTest() {
186 int16_t mem[256];
187 unsigned int res;
188 for (int v = 0; v < 256; ++v) {
189 for (int i = 0; i < 256; ++i) {
190 mem[i] = v;
192 ASM_REGISTER_STATE_CHECK(res = func_(mem));
193 EXPECT_EQ(256u * (v * v), res);
197 void SumOfSquaresTest::RefTest() {
198 int16_t mem[256];
199 for (int i = 0; i < 100; ++i) {
200 for (int j = 0; j < 256; ++j) {
201 mem[j] = rnd_.Rand8() - rnd_.Rand8();
204 const unsigned int expected = mb_ss_ref(mem);
205 unsigned int res;
206 ASM_REGISTER_STATE_CHECK(res = func_(mem));
207 EXPECT_EQ(expected, res);
211 template<typename VarianceFunctionType>
212 class VarianceTest
213 : public ::testing::TestWithParam<tuple<int, int,
214 VarianceFunctionType, int> > {
215 public:
216 virtual void SetUp() {
217 const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
218 log2width_ = get<0>(params);
219 width_ = 1 << log2width_;
220 log2height_ = get<1>(params);
221 height_ = 1 << log2height_;
222 variance_ = get<2>(params);
223 if (get<3>(params)) {
224 bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
225 use_high_bit_depth_ = true;
226 } else {
227 bit_depth_ = VPX_BITS_8;
228 use_high_bit_depth_ = false;
230 mask_ = (1 << bit_depth_) - 1;
232 rnd_.Reset(ACMRandom::DeterministicSeed());
233 block_size_ = width_ * height_;
234 if (!use_high_bit_depth_) {
235 src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
236 ref_ = new uint8_t[block_size_ * 2];
237 #if CONFIG_VP9_HIGHBITDEPTH
238 } else {
239 src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
240 vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
241 ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
242 #endif // CONFIG_VP9_HIGHBITDEPTH
244 ASSERT_TRUE(src_ != NULL);
245 ASSERT_TRUE(ref_ != NULL);
248 virtual void TearDown() {
249 if (!use_high_bit_depth_) {
250 vpx_free(src_);
251 delete[] ref_;
252 #if CONFIG_VP9_HIGHBITDEPTH
253 } else {
254 vpx_free(CONVERT_TO_SHORTPTR(src_));
255 delete[] CONVERT_TO_SHORTPTR(ref_);
256 #endif // CONFIG_VP9_HIGHBITDEPTH
258 libvpx_test::ClearSystemState();
261 protected:
262 void ZeroTest();
263 void RefTest();
264 void RefStrideTest();
265 void OneQuarterTest();
267 ACMRandom rnd_;
268 uint8_t *src_;
269 uint8_t *ref_;
270 int width_, log2width_;
271 int height_, log2height_;
272 vpx_bit_depth_t bit_depth_;
273 int mask_;
274 bool use_high_bit_depth_;
275 int block_size_;
276 VarianceFunctionType variance_;
279 template<typename VarianceFunctionType>
280 void VarianceTest<VarianceFunctionType>::ZeroTest() {
281 for (int i = 0; i <= 255; ++i) {
282 if (!use_high_bit_depth_) {
283 memset(src_, i, block_size_);
284 #if CONFIG_VP9_HIGHBITDEPTH
285 } else {
286 vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
287 block_size_);
288 #endif // CONFIG_VP9_HIGHBITDEPTH
290 for (int j = 0; j <= 255; ++j) {
291 if (!use_high_bit_depth_) {
292 memset(ref_, j, block_size_);
293 #if CONFIG_VP9_HIGHBITDEPTH
294 } else {
295 vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
296 block_size_);
297 #endif // CONFIG_VP9_HIGHBITDEPTH
299 unsigned int sse;
300 unsigned int var;
301 ASM_REGISTER_STATE_CHECK(
302 var = variance_(src_, width_, ref_, width_, &sse));
303 EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
308 template<typename VarianceFunctionType>
309 void VarianceTest<VarianceFunctionType>::RefTest() {
310 for (int i = 0; i < 10; ++i) {
311 for (int j = 0; j < block_size_; j++) {
312 if (!use_high_bit_depth_) {
313 src_[j] = rnd_.Rand8();
314 ref_[j] = rnd_.Rand8();
315 #if CONFIG_VP9_HIGHBITDEPTH
316 } else {
317 CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
318 CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
319 #endif // CONFIG_VP9_HIGHBITDEPTH
322 unsigned int sse1, sse2;
323 unsigned int var1;
324 const int stride_coeff = 1;
325 ASM_REGISTER_STATE_CHECK(
326 var1 = variance_(src_, width_, ref_, width_, &sse1));
327 const unsigned int var2 = variance_ref(src_, ref_, log2width_,
328 log2height_, stride_coeff,
329 stride_coeff, &sse2,
330 use_high_bit_depth_, bit_depth_);
331 EXPECT_EQ(sse1, sse2);
332 EXPECT_EQ(var1, var2);
336 template<typename VarianceFunctionType>
337 void VarianceTest<VarianceFunctionType>::RefStrideTest() {
338 for (int i = 0; i < 10; ++i) {
339 int ref_stride_coeff = i % 2;
340 int src_stride_coeff = (i >> 1) % 2;
341 for (int j = 0; j < block_size_; j++) {
342 int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
343 int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
344 if (!use_high_bit_depth_) {
345 src_[src_ind] = rnd_.Rand8();
346 ref_[ref_ind] = rnd_.Rand8();
347 #if CONFIG_VP9_HIGHBITDEPTH
348 } else {
349 CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
350 CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
351 #endif // CONFIG_VP9_HIGHBITDEPTH
354 unsigned int sse1, sse2;
355 unsigned int var1;
357 ASM_REGISTER_STATE_CHECK(
358 var1 = variance_(src_, width_ * src_stride_coeff,
359 ref_, width_ * ref_stride_coeff, &sse1));
360 const unsigned int var2 = variance_ref(src_, ref_, log2width_,
361 log2height_, src_stride_coeff,
362 ref_stride_coeff, &sse2,
363 use_high_bit_depth_, bit_depth_);
364 EXPECT_EQ(sse1, sse2);
365 EXPECT_EQ(var1, var2);
369 template<typename VarianceFunctionType>
370 void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
371 const int half = block_size_ / 2;
372 if (!use_high_bit_depth_) {
373 memset(src_, 255, block_size_);
374 memset(ref_, 255, half);
375 memset(ref_ + half, 0, half);
376 #if CONFIG_VP9_HIGHBITDEPTH
377 } else {
378 vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
379 block_size_);
380 vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
381 vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
382 #endif // CONFIG_VP9_HIGHBITDEPTH
384 unsigned int sse;
385 unsigned int var;
386 ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
387 const unsigned int expected = block_size_ * 255 * 255 / 4;
388 EXPECT_EQ(expected, var);
391 template<typename MseFunctionType>
392 class MseTest
393 : public ::testing::TestWithParam<tuple<int, int, MseFunctionType> > {
394 public:
395 virtual void SetUp() {
396 const tuple<int, int, MseFunctionType>& params = this->GetParam();
397 log2width_ = get<0>(params);
398 width_ = 1 << log2width_;
399 log2height_ = get<1>(params);
400 height_ = 1 << log2height_;
401 mse_ = get<2>(params);
403 rnd(ACMRandom::DeterministicSeed());
404 block_size_ = width_ * height_;
405 src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
406 ref_ = new uint8_t[block_size_];
407 ASSERT_TRUE(src_ != NULL);
408 ASSERT_TRUE(ref_ != NULL);
411 virtual void TearDown() {
412 vpx_free(src_);
413 delete[] ref_;
414 libvpx_test::ClearSystemState();
417 protected:
418 void RefTest_mse();
419 void RefTest_sse();
420 void MaxTest_mse();
421 void MaxTest_sse();
423 ACMRandom rnd;
424 uint8_t* src_;
425 uint8_t* ref_;
426 int width_, log2width_;
427 int height_, log2height_;
428 int block_size_;
429 MseFunctionType mse_;
432 template<typename MseFunctionType>
433 void MseTest<MseFunctionType>::RefTest_mse() {
434 for (int i = 0; i < 10; ++i) {
435 for (int j = 0; j < block_size_; j++) {
436 src_[j] = rnd.Rand8();
437 ref_[j] = rnd.Rand8();
439 unsigned int sse1, sse2;
440 const int stride_coeff = 1;
441 ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1));
442 variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
443 stride_coeff, &sse2, false, VPX_BITS_8);
444 EXPECT_EQ(sse1, sse2);
448 template<typename MseFunctionType>
449 void MseTest<MseFunctionType>::RefTest_sse() {
450 for (int i = 0; i < 10; ++i) {
451 for (int j = 0; j < block_size_; j++) {
452 src_[j] = rnd.Rand8();
453 ref_[j] = rnd.Rand8();
455 unsigned int sse2;
456 unsigned int var1;
457 const int stride_coeff = 1;
458 ASM_REGISTER_STATE_CHECK(var1 = mse_(src_, width_, ref_, width_));
459 variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
460 stride_coeff, &sse2, false, VPX_BITS_8);
461 EXPECT_EQ(var1, sse2);
465 template<typename MseFunctionType>
466 void MseTest<MseFunctionType>::MaxTest_mse() {
467 memset(src_, 255, block_size_);
468 memset(ref_, 0, block_size_);
469 unsigned int sse;
470 ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse));
471 const unsigned int expected = block_size_ * 255 * 255;
472 EXPECT_EQ(expected, sse);
475 template<typename MseFunctionType>
476 void MseTest<MseFunctionType>::MaxTest_sse() {
477 memset(src_, 255, block_size_);
478 memset(ref_, 0, block_size_);
479 unsigned int var;
480 ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_));
481 const unsigned int expected = block_size_ * 255 * 255;
482 EXPECT_EQ(expected, var);
485 static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
486 const uint8_t *src,
487 const uint8_t *second_pred,
488 int l2w, int l2h,
489 int xoff, int yoff,
490 uint32_t *sse_ptr,
491 bool use_high_bit_depth,
492 vpx_bit_depth_t bit_depth) {
493 int64_t se = 0;
494 uint64_t sse = 0;
495 const int w = 1 << l2w;
496 const int h = 1 << l2h;
498 xoff <<= 1;
499 yoff <<= 1;
501 for (int y = 0; y < h; y++) {
502 for (int x = 0; x < w; x++) {
503 // bilinear interpolation at a 16th pel step
504 if (!use_high_bit_depth) {
505 const int a1 = ref[(w + 1) * (y + 0) + x + 0];
506 const int a2 = ref[(w + 1) * (y + 0) + x + 1];
507 const int b1 = ref[(w + 1) * (y + 1) + x + 0];
508 const int b2 = ref[(w + 1) * (y + 1) + x + 1];
509 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
510 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
511 const int r = a + (((b - a) * yoff + 8) >> 4);
512 const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
513 se += diff;
514 sse += diff * diff;
515 #if CONFIG_VP9_HIGHBITDEPTH
516 } else {
517 uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
518 uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
519 uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred);
520 const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
521 const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
522 const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
523 const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
524 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
525 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
526 const int r = a + (((b - a) * yoff + 8) >> 4);
527 const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
528 se += diff;
529 sse += diff * diff;
530 #endif // CONFIG_VP9_HIGHBITDEPTH
534 RoundHighBitDepth(bit_depth, &se, &sse);
535 *sse_ptr = static_cast<uint32_t>(sse);
536 return static_cast<uint32_t>(sse -
537 ((static_cast<int64_t>(se) * se) >>
538 (l2w + l2h)));
541 template<typename SubpelVarianceFunctionType>
542 class SubpelVarianceTest
543 : public ::testing::TestWithParam<tuple<int, int,
544 SubpelVarianceFunctionType, int> > {
545 public:
546 virtual void SetUp() {
547 const tuple<int, int, SubpelVarianceFunctionType, int>& params =
548 this->GetParam();
549 log2width_ = get<0>(params);
550 width_ = 1 << log2width_;
551 log2height_ = get<1>(params);
552 height_ = 1 << log2height_;
553 subpel_variance_ = get<2>(params);
554 if (get<3>(params)) {
555 bit_depth_ = (vpx_bit_depth_t) get<3>(params);
556 use_high_bit_depth_ = true;
557 } else {
558 bit_depth_ = VPX_BITS_8;
559 use_high_bit_depth_ = false;
561 mask_ = (1 << bit_depth_)-1;
563 rnd_.Reset(ACMRandom::DeterministicSeed());
564 block_size_ = width_ * height_;
565 if (!use_high_bit_depth_) {
566 src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
567 sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
568 ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
569 #if CONFIG_VP9_HIGHBITDEPTH
570 } else {
571 src_ = CONVERT_TO_BYTEPTR(
572 reinterpret_cast<uint16_t *>(
573 vpx_memalign(16, block_size_*sizeof(uint16_t))));
574 sec_ = CONVERT_TO_BYTEPTR(
575 reinterpret_cast<uint16_t *>(
576 vpx_memalign(16, block_size_*sizeof(uint16_t))));
577 ref_ = CONVERT_TO_BYTEPTR(
578 new uint16_t[block_size_ + width_ + height_ + 1]);
579 #endif // CONFIG_VP9_HIGHBITDEPTH
581 ASSERT_TRUE(src_ != NULL);
582 ASSERT_TRUE(sec_ != NULL);
583 ASSERT_TRUE(ref_ != NULL);
586 virtual void TearDown() {
587 if (!use_high_bit_depth_) {
588 vpx_free(src_);
589 delete[] ref_;
590 vpx_free(sec_);
591 #if CONFIG_VP9_HIGHBITDEPTH
592 } else {
593 vpx_free(CONVERT_TO_SHORTPTR(src_));
594 delete[] CONVERT_TO_SHORTPTR(ref_);
595 vpx_free(CONVERT_TO_SHORTPTR(sec_));
596 #endif // CONFIG_VP9_HIGHBITDEPTH
598 libvpx_test::ClearSystemState();
601 protected:
602 void RefTest();
603 void ExtremeRefTest();
605 ACMRandom rnd_;
606 uint8_t *src_;
607 uint8_t *ref_;
608 uint8_t *sec_;
609 bool use_high_bit_depth_;
610 vpx_bit_depth_t bit_depth_;
611 int width_, log2width_;
612 int height_, log2height_;
613 int block_size_, mask_;
614 SubpelVarianceFunctionType subpel_variance_;
617 template<typename SubpelVarianceFunctionType>
618 void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
619 for (int x = 0; x < 8; ++x) {
620 for (int y = 0; y < 8; ++y) {
621 if (!use_high_bit_depth_) {
622 for (int j = 0; j < block_size_; j++) {
623 src_[j] = rnd_.Rand8();
625 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
626 ref_[j] = rnd_.Rand8();
628 #if CONFIG_VP9_HIGHBITDEPTH
629 } else {
630 for (int j = 0; j < block_size_; j++) {
631 CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
633 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
634 CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
636 #endif // CONFIG_VP9_HIGHBITDEPTH
638 unsigned int sse1, sse2;
639 unsigned int var1;
640 ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
641 src_, width_, &sse1));
642 const unsigned int var2 = subpel_variance_ref(ref_, src_,
643 log2width_, log2height_,
644 x, y, &sse2,
645 use_high_bit_depth_,
646 bit_depth_);
647 EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
648 EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
653 template<typename SubpelVarianceFunctionType>
654 void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
655 // Compare against reference.
656 // Src: Set the first half of values to 0, the second half to the maximum.
657 // Ref: Set the first half of values to the maximum, the second half to 0.
658 for (int x = 0; x < 8; ++x) {
659 for (int y = 0; y < 8; ++y) {
660 const int half = block_size_ / 2;
661 if (!use_high_bit_depth_) {
662 memset(src_, 0, half);
663 memset(src_ + half, 255, half);
664 memset(ref_, 255, half);
665 memset(ref_ + half, 0, half + width_ + height_ + 1);
666 #if CONFIG_VP9_HIGHBITDEPTH
667 } else {
668 vpx_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
669 vpx_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
670 vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
671 vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
672 half + width_ + height_ + 1);
673 #endif // CONFIG_VP9_HIGHBITDEPTH
675 unsigned int sse1, sse2;
676 unsigned int var1;
677 ASM_REGISTER_STATE_CHECK(
678 var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
679 const unsigned int var2 =
680 subpel_variance_ref(ref_, src_, log2width_, log2height_,
681 x, y, &sse2, use_high_bit_depth_, bit_depth_);
682 EXPECT_EQ(sse1, sse2) << "for xoffset " << x << " and yoffset " << y;
683 EXPECT_EQ(var1, var2) << "for xoffset " << x << " and yoffset " << y;
688 template<>
689 void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
690 for (int x = 0; x < 8; ++x) {
691 for (int y = 0; y < 8; ++y) {
692 if (!use_high_bit_depth_) {
693 for (int j = 0; j < block_size_; j++) {
694 src_[j] = rnd_.Rand8();
695 sec_[j] = rnd_.Rand8();
697 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
698 ref_[j] = rnd_.Rand8();
700 #if CONFIG_VP9_HIGHBITDEPTH
701 } else {
702 for (int j = 0; j < block_size_; j++) {
703 CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
704 CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
706 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
707 CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
709 #endif // CONFIG_VP9_HIGHBITDEPTH
711 unsigned int sse1, sse2;
712 unsigned int var1;
713 ASM_REGISTER_STATE_CHECK(
714 var1 = subpel_variance_(ref_, width_ + 1, x, y,
715 src_, width_, &sse1, sec_));
716 const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
717 log2width_, log2height_,
718 x, y, &sse2,
719 use_high_bit_depth_,
720 bit_depth_);
721 EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
722 EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
727 typedef MseTest<Get4x4SseFunc> VpxSseTest;
728 typedef MseTest<VarianceMxNFunc> VpxMseTest;
729 typedef VarianceTest<VarianceMxNFunc> VpxVarianceTest;
730 typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxSubpelVarianceTest;
731 typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxSubpelAvgVarianceTest;
733 TEST_P(VpxSseTest, Ref_sse) { RefTest_sse(); }
734 TEST_P(VpxSseTest, Max_sse) { MaxTest_sse(); }
735 TEST_P(VpxMseTest, Ref_mse) { RefTest_mse(); }
736 TEST_P(VpxMseTest, Max_mse) { MaxTest_mse(); }
737 TEST_P(VpxVarianceTest, Zero) { ZeroTest(); }
738 TEST_P(VpxVarianceTest, Ref) { RefTest(); }
739 TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); }
740 TEST_P(VpxVarianceTest, OneQuarter) { OneQuarterTest(); }
741 TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
742 TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
743 TEST_P(VpxSubpelVarianceTest, Ref) { RefTest(); }
744 TEST_P(VpxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
745 TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); }
747 INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
748 ::testing::Values(vpx_get_mb_ss_c));
750 INSTANTIATE_TEST_CASE_P(C, VpxSseTest,
751 ::testing::Values(make_tuple(2, 2,
752 &vpx_get4x4sse_cs_c)));
754 INSTANTIATE_TEST_CASE_P(C, VpxMseTest,
755 ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_c),
756 make_tuple(4, 3, &vpx_mse16x8_c),
757 make_tuple(3, 4, &vpx_mse8x16_c),
758 make_tuple(3, 3, &vpx_mse8x8_c)));
760 INSTANTIATE_TEST_CASE_P(
761 C, VpxVarianceTest,
762 ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_c, 0),
763 make_tuple(6, 5, &vpx_variance64x32_c, 0),
764 make_tuple(5, 6, &vpx_variance32x64_c, 0),
765 make_tuple(5, 5, &vpx_variance32x32_c, 0),
766 make_tuple(5, 4, &vpx_variance32x16_c, 0),
767 make_tuple(4, 5, &vpx_variance16x32_c, 0),
768 make_tuple(4, 4, &vpx_variance16x16_c, 0),
769 make_tuple(4, 3, &vpx_variance16x8_c, 0),
770 make_tuple(3, 4, &vpx_variance8x16_c, 0),
771 make_tuple(3, 3, &vpx_variance8x8_c, 0),
772 make_tuple(3, 2, &vpx_variance8x4_c, 0),
773 make_tuple(2, 3, &vpx_variance4x8_c, 0),
774 make_tuple(2, 2, &vpx_variance4x4_c, 0)));
776 INSTANTIATE_TEST_CASE_P(
777 C, VpxSubpelVarianceTest,
778 ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_c, 0),
779 make_tuple(6, 5, &vpx_sub_pixel_variance64x32_c, 0),
780 make_tuple(5, 6, &vpx_sub_pixel_variance32x64_c, 0),
781 make_tuple(5, 5, &vpx_sub_pixel_variance32x32_c, 0),
782 make_tuple(5, 4, &vpx_sub_pixel_variance32x16_c, 0),
783 make_tuple(4, 5, &vpx_sub_pixel_variance16x32_c, 0),
784 make_tuple(4, 4, &vpx_sub_pixel_variance16x16_c, 0),
785 make_tuple(4, 3, &vpx_sub_pixel_variance16x8_c, 0),
786 make_tuple(3, 4, &vpx_sub_pixel_variance8x16_c, 0),
787 make_tuple(3, 3, &vpx_sub_pixel_variance8x8_c, 0),
788 make_tuple(3, 2, &vpx_sub_pixel_variance8x4_c, 0),
789 make_tuple(2, 3, &vpx_sub_pixel_variance4x8_c, 0),
790 make_tuple(2, 2, &vpx_sub_pixel_variance4x4_c, 0)));
792 INSTANTIATE_TEST_CASE_P(
793 C, VpxSubpelAvgVarianceTest,
794 ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_c, 0),
795 make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_c, 0),
796 make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_c, 0),
797 make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_c, 0),
798 make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_c, 0),
799 make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_c, 0),
800 make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_c, 0),
801 make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_c, 0),
802 make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_c, 0),
803 make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_c, 0),
804 make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_c, 0),
805 make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_c, 0),
806 make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_c, 0)));
808 #if CONFIG_VP9_HIGHBITDEPTH
809 typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
810 typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
811 typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
812 typedef SubpelVarianceTest<SubpixAvgVarMxNFunc>
813 VpxHBDSubpelAvgVarianceTest;
815 TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); }
816 TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); }
817 TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); }
818 TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); }
819 TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); }
820 TEST_P(VpxHBDVarianceTest, OneQuarter) { OneQuarterTest(); }
821 TEST_P(VpxHBDSubpelVarianceTest, Ref) { RefTest(); }
822 TEST_P(VpxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
823 TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
825 /* TODO(debargha): This test does not support the highbd version
826 INSTANTIATE_TEST_CASE_P(
827 C, VpxHBDMseTest,
828 ::testing::Values(make_tuple(4, 4, &vpx_highbd_12_mse16x16_c),
829 make_tuple(4, 4, &vpx_highbd_12_mse16x8_c),
830 make_tuple(4, 4, &vpx_highbd_12_mse8x16_c),
831 make_tuple(4, 4, &vpx_highbd_12_mse8x8_c),
832 make_tuple(4, 4, &vpx_highbd_10_mse16x16_c),
833 make_tuple(4, 4, &vpx_highbd_10_mse16x8_c),
834 make_tuple(4, 4, &vpx_highbd_10_mse8x16_c),
835 make_tuple(4, 4, &vpx_highbd_10_mse8x8_c),
836 make_tuple(4, 4, &vpx_highbd_8_mse16x16_c),
837 make_tuple(4, 4, &vpx_highbd_8_mse16x8_c),
838 make_tuple(4, 4, &vpx_highbd_8_mse8x16_c),
839 make_tuple(4, 4, &vpx_highbd_8_mse8x8_c)));
842 INSTANTIATE_TEST_CASE_P(
843 C, VpxHBDVarianceTest,
844 ::testing::Values(make_tuple(6, 6, &vpx_highbd_12_variance64x64_c, 12),
845 make_tuple(6, 5, &vpx_highbd_12_variance64x32_c, 12),
846 make_tuple(5, 6, &vpx_highbd_12_variance32x64_c, 12),
847 make_tuple(5, 5, &vpx_highbd_12_variance32x32_c, 12),
848 make_tuple(5, 4, &vpx_highbd_12_variance32x16_c, 12),
849 make_tuple(4, 5, &vpx_highbd_12_variance16x32_c, 12),
850 make_tuple(4, 4, &vpx_highbd_12_variance16x16_c, 12),
851 make_tuple(4, 3, &vpx_highbd_12_variance16x8_c, 12),
852 make_tuple(3, 4, &vpx_highbd_12_variance8x16_c, 12),
853 make_tuple(3, 3, &vpx_highbd_12_variance8x8_c, 12),
854 make_tuple(3, 2, &vpx_highbd_12_variance8x4_c, 12),
855 make_tuple(2, 3, &vpx_highbd_12_variance4x8_c, 12),
856 make_tuple(2, 2, &vpx_highbd_12_variance4x4_c, 12),
857 make_tuple(6, 6, &vpx_highbd_10_variance64x64_c, 10),
858 make_tuple(6, 5, &vpx_highbd_10_variance64x32_c, 10),
859 make_tuple(5, 6, &vpx_highbd_10_variance32x64_c, 10),
860 make_tuple(5, 5, &vpx_highbd_10_variance32x32_c, 10),
861 make_tuple(5, 4, &vpx_highbd_10_variance32x16_c, 10),
862 make_tuple(4, 5, &vpx_highbd_10_variance16x32_c, 10),
863 make_tuple(4, 4, &vpx_highbd_10_variance16x16_c, 10),
864 make_tuple(4, 3, &vpx_highbd_10_variance16x8_c, 10),
865 make_tuple(3, 4, &vpx_highbd_10_variance8x16_c, 10),
866 make_tuple(3, 3, &vpx_highbd_10_variance8x8_c, 10),
867 make_tuple(3, 2, &vpx_highbd_10_variance8x4_c, 10),
868 make_tuple(2, 3, &vpx_highbd_10_variance4x8_c, 10),
869 make_tuple(2, 2, &vpx_highbd_10_variance4x4_c, 10),
870 make_tuple(6, 6, &vpx_highbd_8_variance64x64_c, 8),
871 make_tuple(6, 5, &vpx_highbd_8_variance64x32_c, 8),
872 make_tuple(5, 6, &vpx_highbd_8_variance32x64_c, 8),
873 make_tuple(5, 5, &vpx_highbd_8_variance32x32_c, 8),
874 make_tuple(5, 4, &vpx_highbd_8_variance32x16_c, 8),
875 make_tuple(4, 5, &vpx_highbd_8_variance16x32_c, 8),
876 make_tuple(4, 4, &vpx_highbd_8_variance16x16_c, 8),
877 make_tuple(4, 3, &vpx_highbd_8_variance16x8_c, 8),
878 make_tuple(3, 4, &vpx_highbd_8_variance8x16_c, 8),
879 make_tuple(3, 3, &vpx_highbd_8_variance8x8_c, 8),
880 make_tuple(3, 2, &vpx_highbd_8_variance8x4_c, 8),
881 make_tuple(2, 3, &vpx_highbd_8_variance4x8_c, 8),
882 make_tuple(2, 2, &vpx_highbd_8_variance4x4_c, 8)));
884 INSTANTIATE_TEST_CASE_P(
885 C, VpxHBDSubpelVarianceTest,
886 ::testing::Values(
887 make_tuple(6, 6, &vpx_highbd_8_sub_pixel_variance64x64_c, 8),
888 make_tuple(6, 5, &vpx_highbd_8_sub_pixel_variance64x32_c, 8),
889 make_tuple(5, 6, &vpx_highbd_8_sub_pixel_variance32x64_c, 8),
890 make_tuple(5, 5, &vpx_highbd_8_sub_pixel_variance32x32_c, 8),
891 make_tuple(5, 4, &vpx_highbd_8_sub_pixel_variance32x16_c, 8),
892 make_tuple(4, 5, &vpx_highbd_8_sub_pixel_variance16x32_c, 8),
893 make_tuple(4, 4, &vpx_highbd_8_sub_pixel_variance16x16_c, 8),
894 make_tuple(4, 3, &vpx_highbd_8_sub_pixel_variance16x8_c, 8),
895 make_tuple(3, 4, &vpx_highbd_8_sub_pixel_variance8x16_c, 8),
896 make_tuple(3, 3, &vpx_highbd_8_sub_pixel_variance8x8_c, 8),
897 make_tuple(3, 2, &vpx_highbd_8_sub_pixel_variance8x4_c, 8),
898 make_tuple(2, 3, &vpx_highbd_8_sub_pixel_variance4x8_c, 8),
899 make_tuple(2, 2, &vpx_highbd_8_sub_pixel_variance4x4_c, 8),
900 make_tuple(6, 6, &vpx_highbd_10_sub_pixel_variance64x64_c, 10),
901 make_tuple(6, 5, &vpx_highbd_10_sub_pixel_variance64x32_c, 10),
902 make_tuple(5, 6, &vpx_highbd_10_sub_pixel_variance32x64_c, 10),
903 make_tuple(5, 5, &vpx_highbd_10_sub_pixel_variance32x32_c, 10),
904 make_tuple(5, 4, &vpx_highbd_10_sub_pixel_variance32x16_c, 10),
905 make_tuple(4, 5, &vpx_highbd_10_sub_pixel_variance16x32_c, 10),
906 make_tuple(4, 4, &vpx_highbd_10_sub_pixel_variance16x16_c, 10),
907 make_tuple(4, 3, &vpx_highbd_10_sub_pixel_variance16x8_c, 10),
908 make_tuple(3, 4, &vpx_highbd_10_sub_pixel_variance8x16_c, 10),
909 make_tuple(3, 3, &vpx_highbd_10_sub_pixel_variance8x8_c, 10),
910 make_tuple(3, 2, &vpx_highbd_10_sub_pixel_variance8x4_c, 10),
911 make_tuple(2, 3, &vpx_highbd_10_sub_pixel_variance4x8_c, 10),
912 make_tuple(2, 2, &vpx_highbd_10_sub_pixel_variance4x4_c, 10),
913 make_tuple(6, 6, &vpx_highbd_12_sub_pixel_variance64x64_c, 12),
914 make_tuple(6, 5, &vpx_highbd_12_sub_pixel_variance64x32_c, 12),
915 make_tuple(5, 6, &vpx_highbd_12_sub_pixel_variance32x64_c, 12),
916 make_tuple(5, 5, &vpx_highbd_12_sub_pixel_variance32x32_c, 12),
917 make_tuple(5, 4, &vpx_highbd_12_sub_pixel_variance32x16_c, 12),
918 make_tuple(4, 5, &vpx_highbd_12_sub_pixel_variance16x32_c, 12),
919 make_tuple(4, 4, &vpx_highbd_12_sub_pixel_variance16x16_c, 12),
920 make_tuple(4, 3, &vpx_highbd_12_sub_pixel_variance16x8_c, 12),
921 make_tuple(3, 4, &vpx_highbd_12_sub_pixel_variance8x16_c, 12),
922 make_tuple(3, 3, &vpx_highbd_12_sub_pixel_variance8x8_c, 12),
923 make_tuple(3, 2, &vpx_highbd_12_sub_pixel_variance8x4_c, 12),
924 make_tuple(2, 3, &vpx_highbd_12_sub_pixel_variance4x8_c, 12),
925 make_tuple(2, 2, &vpx_highbd_12_sub_pixel_variance4x4_c, 12)));
927 INSTANTIATE_TEST_CASE_P(
928 C, VpxHBDSubpelAvgVarianceTest,
929 ::testing::Values(
930 make_tuple(6, 6, &vpx_highbd_8_sub_pixel_avg_variance64x64_c, 8),
931 make_tuple(6, 5, &vpx_highbd_8_sub_pixel_avg_variance64x32_c, 8),
932 make_tuple(5, 6, &vpx_highbd_8_sub_pixel_avg_variance32x64_c, 8),
933 make_tuple(5, 5, &vpx_highbd_8_sub_pixel_avg_variance32x32_c, 8),
934 make_tuple(5, 4, &vpx_highbd_8_sub_pixel_avg_variance32x16_c, 8),
935 make_tuple(4, 5, &vpx_highbd_8_sub_pixel_avg_variance16x32_c, 8),
936 make_tuple(4, 4, &vpx_highbd_8_sub_pixel_avg_variance16x16_c, 8),
937 make_tuple(4, 3, &vpx_highbd_8_sub_pixel_avg_variance16x8_c, 8),
938 make_tuple(3, 4, &vpx_highbd_8_sub_pixel_avg_variance8x16_c, 8),
939 make_tuple(3, 3, &vpx_highbd_8_sub_pixel_avg_variance8x8_c, 8),
940 make_tuple(3, 2, &vpx_highbd_8_sub_pixel_avg_variance8x4_c, 8),
941 make_tuple(2, 3, &vpx_highbd_8_sub_pixel_avg_variance4x8_c, 8),
942 make_tuple(2, 2, &vpx_highbd_8_sub_pixel_avg_variance4x4_c, 8),
943 make_tuple(6, 6, &vpx_highbd_10_sub_pixel_avg_variance64x64_c, 10),
944 make_tuple(6, 5, &vpx_highbd_10_sub_pixel_avg_variance64x32_c, 10),
945 make_tuple(5, 6, &vpx_highbd_10_sub_pixel_avg_variance32x64_c, 10),
946 make_tuple(5, 5, &vpx_highbd_10_sub_pixel_avg_variance32x32_c, 10),
947 make_tuple(5, 4, &vpx_highbd_10_sub_pixel_avg_variance32x16_c, 10),
948 make_tuple(4, 5, &vpx_highbd_10_sub_pixel_avg_variance16x32_c, 10),
949 make_tuple(4, 4, &vpx_highbd_10_sub_pixel_avg_variance16x16_c, 10),
950 make_tuple(4, 3, &vpx_highbd_10_sub_pixel_avg_variance16x8_c, 10),
951 make_tuple(3, 4, &vpx_highbd_10_sub_pixel_avg_variance8x16_c, 10),
952 make_tuple(3, 3, &vpx_highbd_10_sub_pixel_avg_variance8x8_c, 10),
953 make_tuple(3, 2, &vpx_highbd_10_sub_pixel_avg_variance8x4_c, 10),
954 make_tuple(2, 3, &vpx_highbd_10_sub_pixel_avg_variance4x8_c, 10),
955 make_tuple(2, 2, &vpx_highbd_10_sub_pixel_avg_variance4x4_c, 10),
956 make_tuple(6, 6, &vpx_highbd_12_sub_pixel_avg_variance64x64_c, 12),
957 make_tuple(6, 5, &vpx_highbd_12_sub_pixel_avg_variance64x32_c, 12),
958 make_tuple(5, 6, &vpx_highbd_12_sub_pixel_avg_variance32x64_c, 12),
959 make_tuple(5, 5, &vpx_highbd_12_sub_pixel_avg_variance32x32_c, 12),
960 make_tuple(5, 4, &vpx_highbd_12_sub_pixel_avg_variance32x16_c, 12),
961 make_tuple(4, 5, &vpx_highbd_12_sub_pixel_avg_variance16x32_c, 12),
962 make_tuple(4, 4, &vpx_highbd_12_sub_pixel_avg_variance16x16_c, 12),
963 make_tuple(4, 3, &vpx_highbd_12_sub_pixel_avg_variance16x8_c, 12),
964 make_tuple(3, 4, &vpx_highbd_12_sub_pixel_avg_variance8x16_c, 12),
965 make_tuple(3, 3, &vpx_highbd_12_sub_pixel_avg_variance8x8_c, 12),
966 make_tuple(3, 2, &vpx_highbd_12_sub_pixel_avg_variance8x4_c, 12),
967 make_tuple(2, 3, &vpx_highbd_12_sub_pixel_avg_variance4x8_c, 12),
968 make_tuple(2, 2, &vpx_highbd_12_sub_pixel_avg_variance4x4_c, 12)));
969 #endif // CONFIG_VP9_HIGHBITDEPTH
971 #if HAVE_MMX
972 INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest,
973 ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_mmx)));
975 INSTANTIATE_TEST_CASE_P(MMX, SumOfSquaresTest,
976 ::testing::Values(vpx_get_mb_ss_mmx));
978 INSTANTIATE_TEST_CASE_P(
979 MMX, VpxVarianceTest,
980 ::testing::Values(make_tuple(4, 4, &vpx_variance16x16_mmx, 0),
981 make_tuple(4, 3, &vpx_variance16x8_mmx, 0),
982 make_tuple(3, 4, &vpx_variance8x16_mmx, 0),
983 make_tuple(3, 3, &vpx_variance8x8_mmx, 0),
984 make_tuple(2, 2, &vpx_variance4x4_mmx, 0)));
986 INSTANTIATE_TEST_CASE_P(
987 MMX, VpxSubpelVarianceTest,
988 ::testing::Values(make_tuple(4, 4, &vpx_sub_pixel_variance16x16_mmx, 0),
989 make_tuple(4, 3, &vpx_sub_pixel_variance16x8_mmx, 0),
990 make_tuple(3, 4, &vpx_sub_pixel_variance8x16_mmx, 0),
991 make_tuple(3, 3, &vpx_sub_pixel_variance8x8_mmx, 0),
992 make_tuple(2, 2, &vpx_sub_pixel_variance4x4_mmx, 0)));
993 #endif // HAVE_MMX
995 #if HAVE_SSE2
996 INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
997 ::testing::Values(vpx_get_mb_ss_sse2));
999 INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest,
1000 ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_sse2),
1001 make_tuple(4, 3, &vpx_mse16x8_sse2),
1002 make_tuple(3, 4, &vpx_mse8x16_sse2),
1003 make_tuple(3, 3, &vpx_mse8x8_sse2)));
1005 INSTANTIATE_TEST_CASE_P(
1006 SSE2, VpxVarianceTest,
1007 ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_sse2, 0),
1008 make_tuple(6, 5, &vpx_variance64x32_sse2, 0),
1009 make_tuple(5, 6, &vpx_variance32x64_sse2, 0),
1010 make_tuple(5, 5, &vpx_variance32x32_sse2, 0),
1011 make_tuple(5, 4, &vpx_variance32x16_sse2, 0),
1012 make_tuple(4, 5, &vpx_variance16x32_sse2, 0),
1013 make_tuple(4, 4, &vpx_variance16x16_sse2, 0),
1014 make_tuple(4, 3, &vpx_variance16x8_sse2, 0),
1015 make_tuple(3, 4, &vpx_variance8x16_sse2, 0),
1016 make_tuple(3, 3, &vpx_variance8x8_sse2, 0),
1017 make_tuple(3, 2, &vpx_variance8x4_sse2, 0),
1018 make_tuple(2, 3, &vpx_variance4x8_sse2, 0),
1019 make_tuple(2, 2, &vpx_variance4x4_sse2, 0)));
1021 #if CONFIG_USE_X86INC
1022 INSTANTIATE_TEST_CASE_P(
1023 SSE2, VpxSubpelVarianceTest,
1024 ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_sse2, 0),
1025 make_tuple(6, 5, &vpx_sub_pixel_variance64x32_sse2, 0),
1026 make_tuple(5, 6, &vpx_sub_pixel_variance32x64_sse2, 0),
1027 make_tuple(5, 5, &vpx_sub_pixel_variance32x32_sse2, 0),
1028 make_tuple(5, 4, &vpx_sub_pixel_variance32x16_sse2, 0),
1029 make_tuple(4, 5, &vpx_sub_pixel_variance16x32_sse2, 0),
1030 make_tuple(4, 4, &vpx_sub_pixel_variance16x16_sse2, 0),
1031 make_tuple(4, 3, &vpx_sub_pixel_variance16x8_sse2, 0),
1032 make_tuple(3, 4, &vpx_sub_pixel_variance8x16_sse2, 0),
1033 make_tuple(3, 3, &vpx_sub_pixel_variance8x8_sse2, 0),
1034 make_tuple(3, 2, &vpx_sub_pixel_variance8x4_sse2, 0),
1035 make_tuple(2, 3, &vpx_sub_pixel_variance4x8_sse, 0),
1036 make_tuple(2, 2, &vpx_sub_pixel_variance4x4_sse, 0)));
1038 INSTANTIATE_TEST_CASE_P(
1039 SSE2, VpxSubpelAvgVarianceTest,
1040 ::testing::Values(
1041 make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_sse2, 0),
1042 make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_sse2, 0),
1043 make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_sse2, 0),
1044 make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_sse2, 0),
1045 make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_sse2, 0),
1046 make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_sse2, 0),
1047 make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_sse2, 0),
1048 make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_sse2, 0),
1049 make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_sse2, 0),
1050 make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_sse2, 0),
1051 make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_sse2, 0),
1052 make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_sse, 0),
1053 make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_sse, 0)));
1054 #endif // CONFIG_USE_X86INC
1056 #if CONFIG_VP9_HIGHBITDEPTH
1057 /* TODO(debargha): This test does not support the highbd version
1058 INSTANTIATE_TEST_CASE_P(
1059 SSE2, VpxHBDMseTest,
1060 ::testing::Values(make_tuple(4, 4, &vpx_highbd_12_mse16x16_sse2),
1061 make_tuple(4, 3, &vpx_highbd_12_mse16x8_sse2),
1062 make_tuple(3, 4, &vpx_highbd_12_mse8x16_sse2),
1063 make_tuple(3, 3, &vpx_highbd_12_mse8x8_sse2),
1064 make_tuple(4, 4, &vpx_highbd_10_mse16x16_sse2),
1065 make_tuple(4, 3, &vpx_highbd_10_mse16x8_sse2),
1066 make_tuple(3, 4, &vpx_highbd_10_mse8x16_sse2),
1067 make_tuple(3, 3, &vpx_highbd_10_mse8x8_sse2),
1068 make_tuple(4, 4, &vpx_highbd_8_mse16x16_sse2),
1069 make_tuple(4, 3, &vpx_highbd_8_mse16x8_sse2),
1070 make_tuple(3, 4, &vpx_highbd_8_mse8x16_sse2),
1071 make_tuple(3, 3, &vpx_highbd_8_mse8x8_sse2)));
1074 INSTANTIATE_TEST_CASE_P(
1075 SSE2, VpxHBDVarianceTest,
1076 ::testing::Values(make_tuple(6, 6, &vpx_highbd_12_variance64x64_sse2, 12),
1077 make_tuple(6, 5, &vpx_highbd_12_variance64x32_sse2, 12),
1078 make_tuple(5, 6, &vpx_highbd_12_variance32x64_sse2, 12),
1079 make_tuple(5, 5, &vpx_highbd_12_variance32x32_sse2, 12),
1080 make_tuple(5, 4, &vpx_highbd_12_variance32x16_sse2, 12),
1081 make_tuple(4, 5, &vpx_highbd_12_variance16x32_sse2, 12),
1082 make_tuple(4, 4, &vpx_highbd_12_variance16x16_sse2, 12),
1083 make_tuple(4, 3, &vpx_highbd_12_variance16x8_sse2, 12),
1084 make_tuple(3, 4, &vpx_highbd_12_variance8x16_sse2, 12),
1085 make_tuple(3, 3, &vpx_highbd_12_variance8x8_sse2, 12),
1086 make_tuple(6, 6, &vpx_highbd_10_variance64x64_sse2, 10),
1087 make_tuple(6, 5, &vpx_highbd_10_variance64x32_sse2, 10),
1088 make_tuple(5, 6, &vpx_highbd_10_variance32x64_sse2, 10),
1089 make_tuple(5, 5, &vpx_highbd_10_variance32x32_sse2, 10),
1090 make_tuple(5, 4, &vpx_highbd_10_variance32x16_sse2, 10),
1091 make_tuple(4, 5, &vpx_highbd_10_variance16x32_sse2, 10),
1092 make_tuple(4, 4, &vpx_highbd_10_variance16x16_sse2, 10),
1093 make_tuple(4, 3, &vpx_highbd_10_variance16x8_sse2, 10),
1094 make_tuple(3, 4, &vpx_highbd_10_variance8x16_sse2, 10),
1095 make_tuple(3, 3, &vpx_highbd_10_variance8x8_sse2, 10),
1096 make_tuple(6, 6, &vpx_highbd_8_variance64x64_sse2, 8),
1097 make_tuple(6, 5, &vpx_highbd_8_variance64x32_sse2, 8),
1098 make_tuple(5, 6, &vpx_highbd_8_variance32x64_sse2, 8),
1099 make_tuple(5, 5, &vpx_highbd_8_variance32x32_sse2, 8),
1100 make_tuple(5, 4, &vpx_highbd_8_variance32x16_sse2, 8),
1101 make_tuple(4, 5, &vpx_highbd_8_variance16x32_sse2, 8),
1102 make_tuple(4, 4, &vpx_highbd_8_variance16x16_sse2, 8),
1103 make_tuple(4, 3, &vpx_highbd_8_variance16x8_sse2, 8),
1104 make_tuple(3, 4, &vpx_highbd_8_variance8x16_sse2, 8),
1105 make_tuple(3, 3, &vpx_highbd_8_variance8x8_sse2, 8)));
1107 #if CONFIG_USE_X86INC
1108 INSTANTIATE_TEST_CASE_P(
1109 SSE2, VpxHBDSubpelVarianceTest,
1110 ::testing::Values(
1111 make_tuple(6, 6, &vpx_highbd_12_sub_pixel_variance64x64_sse2, 12),
1112 make_tuple(6, 5, &vpx_highbd_12_sub_pixel_variance64x32_sse2, 12),
1113 make_tuple(5, 6, &vpx_highbd_12_sub_pixel_variance32x64_sse2, 12),
1114 make_tuple(5, 5, &vpx_highbd_12_sub_pixel_variance32x32_sse2, 12),
1115 make_tuple(5, 4, &vpx_highbd_12_sub_pixel_variance32x16_sse2, 12),
1116 make_tuple(4, 5, &vpx_highbd_12_sub_pixel_variance16x32_sse2, 12),
1117 make_tuple(4, 4, &vpx_highbd_12_sub_pixel_variance16x16_sse2, 12),
1118 make_tuple(4, 3, &vpx_highbd_12_sub_pixel_variance16x8_sse2, 12),
1119 make_tuple(3, 4, &vpx_highbd_12_sub_pixel_variance8x16_sse2, 12),
1120 make_tuple(3, 3, &vpx_highbd_12_sub_pixel_variance8x8_sse2, 12),
1121 make_tuple(3, 2, &vpx_highbd_12_sub_pixel_variance8x4_sse2, 12),
1122 make_tuple(6, 6, &vpx_highbd_10_sub_pixel_variance64x64_sse2, 10),
1123 make_tuple(6, 5, &vpx_highbd_10_sub_pixel_variance64x32_sse2, 10),
1124 make_tuple(5, 6, &vpx_highbd_10_sub_pixel_variance32x64_sse2, 10),
1125 make_tuple(5, 5, &vpx_highbd_10_sub_pixel_variance32x32_sse2, 10),
1126 make_tuple(5, 4, &vpx_highbd_10_sub_pixel_variance32x16_sse2, 10),
1127 make_tuple(4, 5, &vpx_highbd_10_sub_pixel_variance16x32_sse2, 10),
1128 make_tuple(4, 4, &vpx_highbd_10_sub_pixel_variance16x16_sse2, 10),
1129 make_tuple(4, 3, &vpx_highbd_10_sub_pixel_variance16x8_sse2, 10),
1130 make_tuple(3, 4, &vpx_highbd_10_sub_pixel_variance8x16_sse2, 10),
1131 make_tuple(3, 3, &vpx_highbd_10_sub_pixel_variance8x8_sse2, 10),
1132 make_tuple(3, 2, &vpx_highbd_10_sub_pixel_variance8x4_sse2, 10),
1133 make_tuple(6, 6, &vpx_highbd_8_sub_pixel_variance64x64_sse2, 8),
1134 make_tuple(6, 5, &vpx_highbd_8_sub_pixel_variance64x32_sse2, 8),
1135 make_tuple(5, 6, &vpx_highbd_8_sub_pixel_variance32x64_sse2, 8),
1136 make_tuple(5, 5, &vpx_highbd_8_sub_pixel_variance32x32_sse2, 8),
1137 make_tuple(5, 4, &vpx_highbd_8_sub_pixel_variance32x16_sse2, 8),
1138 make_tuple(4, 5, &vpx_highbd_8_sub_pixel_variance16x32_sse2, 8),
1139 make_tuple(4, 4, &vpx_highbd_8_sub_pixel_variance16x16_sse2, 8),
1140 make_tuple(4, 3, &vpx_highbd_8_sub_pixel_variance16x8_sse2, 8),
1141 make_tuple(3, 4, &vpx_highbd_8_sub_pixel_variance8x16_sse2, 8),
1142 make_tuple(3, 3, &vpx_highbd_8_sub_pixel_variance8x8_sse2, 8),
1143 make_tuple(3, 2, &vpx_highbd_8_sub_pixel_variance8x4_sse2, 8)));
1145 INSTANTIATE_TEST_CASE_P(
1146 SSE2, VpxHBDSubpelAvgVarianceTest,
1147 ::testing::Values(
1148 make_tuple(6, 6, &vpx_highbd_12_sub_pixel_avg_variance64x64_sse2, 12),
1149 make_tuple(6, 5, &vpx_highbd_12_sub_pixel_avg_variance64x32_sse2, 12),
1150 make_tuple(5, 6, &vpx_highbd_12_sub_pixel_avg_variance32x64_sse2, 12),
1151 make_tuple(5, 5, &vpx_highbd_12_sub_pixel_avg_variance32x32_sse2, 12),
1152 make_tuple(5, 4, &vpx_highbd_12_sub_pixel_avg_variance32x16_sse2, 12),
1153 make_tuple(4, 5, &vpx_highbd_12_sub_pixel_avg_variance16x32_sse2, 12),
1154 make_tuple(4, 4, &vpx_highbd_12_sub_pixel_avg_variance16x16_sse2, 12),
1155 make_tuple(4, 3, &vpx_highbd_12_sub_pixel_avg_variance16x8_sse2, 12),
1156 make_tuple(3, 4, &vpx_highbd_12_sub_pixel_avg_variance8x16_sse2, 12),
1157 make_tuple(3, 3, &vpx_highbd_12_sub_pixel_avg_variance8x8_sse2, 12),
1158 make_tuple(3, 2, &vpx_highbd_12_sub_pixel_avg_variance8x4_sse2, 12),
1159 make_tuple(6, 6, &vpx_highbd_10_sub_pixel_avg_variance64x64_sse2, 10),
1160 make_tuple(6, 5, &vpx_highbd_10_sub_pixel_avg_variance64x32_sse2, 10),
1161 make_tuple(5, 6, &vpx_highbd_10_sub_pixel_avg_variance32x64_sse2, 10),
1162 make_tuple(5, 5, &vpx_highbd_10_sub_pixel_avg_variance32x32_sse2, 10),
1163 make_tuple(5, 4, &vpx_highbd_10_sub_pixel_avg_variance32x16_sse2, 10),
1164 make_tuple(4, 5, &vpx_highbd_10_sub_pixel_avg_variance16x32_sse2, 10),
1165 make_tuple(4, 4, &vpx_highbd_10_sub_pixel_avg_variance16x16_sse2, 10),
1166 make_tuple(4, 3, &vpx_highbd_10_sub_pixel_avg_variance16x8_sse2, 10),
1167 make_tuple(3, 4, &vpx_highbd_10_sub_pixel_avg_variance8x16_sse2, 10),
1168 make_tuple(3, 3, &vpx_highbd_10_sub_pixel_avg_variance8x8_sse2, 10),
1169 make_tuple(3, 2, &vpx_highbd_10_sub_pixel_avg_variance8x4_sse2, 10),
1170 make_tuple(6, 6, &vpx_highbd_8_sub_pixel_avg_variance64x64_sse2, 8),
1171 make_tuple(6, 5, &vpx_highbd_8_sub_pixel_avg_variance64x32_sse2, 8),
1172 make_tuple(5, 6, &vpx_highbd_8_sub_pixel_avg_variance32x64_sse2, 8),
1173 make_tuple(5, 5, &vpx_highbd_8_sub_pixel_avg_variance32x32_sse2, 8),
1174 make_tuple(5, 4, &vpx_highbd_8_sub_pixel_avg_variance32x16_sse2, 8),
1175 make_tuple(4, 5, &vpx_highbd_8_sub_pixel_avg_variance16x32_sse2, 8),
1176 make_tuple(4, 4, &vpx_highbd_8_sub_pixel_avg_variance16x16_sse2, 8),
1177 make_tuple(4, 3, &vpx_highbd_8_sub_pixel_avg_variance16x8_sse2, 8),
1178 make_tuple(3, 4, &vpx_highbd_8_sub_pixel_avg_variance8x16_sse2, 8),
1179 make_tuple(3, 3, &vpx_highbd_8_sub_pixel_avg_variance8x8_sse2, 8),
1180 make_tuple(3, 2, &vpx_highbd_8_sub_pixel_avg_variance8x4_sse2, 8)));
1181 #endif // CONFIG_USE_X86INC
1182 #endif // CONFIG_VP9_HIGHBITDEPTH
1183 #endif // HAVE_SSE2
1185 #if HAVE_SSSE3
1186 #if CONFIG_USE_X86INC
1187 INSTANTIATE_TEST_CASE_P(
1188 SSSE3, VpxSubpelVarianceTest,
1189 ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_ssse3, 0),
1190 make_tuple(6, 5, &vpx_sub_pixel_variance64x32_ssse3, 0),
1191 make_tuple(5, 6, &vpx_sub_pixel_variance32x64_ssse3, 0),
1192 make_tuple(5, 5, &vpx_sub_pixel_variance32x32_ssse3, 0),
1193 make_tuple(5, 4, &vpx_sub_pixel_variance32x16_ssse3, 0),
1194 make_tuple(4, 5, &vpx_sub_pixel_variance16x32_ssse3, 0),
1195 make_tuple(4, 4, &vpx_sub_pixel_variance16x16_ssse3, 0),
1196 make_tuple(4, 3, &vpx_sub_pixel_variance16x8_ssse3, 0),
1197 make_tuple(3, 4, &vpx_sub_pixel_variance8x16_ssse3, 0),
1198 make_tuple(3, 3, &vpx_sub_pixel_variance8x8_ssse3, 0),
1199 make_tuple(3, 2, &vpx_sub_pixel_variance8x4_ssse3, 0),
1200 make_tuple(2, 3, &vpx_sub_pixel_variance4x8_ssse3, 0),
1201 make_tuple(2, 2, &vpx_sub_pixel_variance4x4_ssse3, 0)));
1203 INSTANTIATE_TEST_CASE_P(
1204 SSSE3, VpxSubpelAvgVarianceTest,
1205 ::testing::Values(
1206 make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_ssse3, 0),
1207 make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_ssse3, 0),
1208 make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_ssse3, 0),
1209 make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_ssse3, 0),
1210 make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_ssse3, 0),
1211 make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_ssse3, 0),
1212 make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_ssse3, 0),
1213 make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_ssse3, 0),
1214 make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_ssse3, 0),
1215 make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_ssse3, 0),
1216 make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_ssse3, 0),
1217 make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_ssse3, 0),
1218 make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_ssse3, 0)));
1219 #endif // CONFIG_USE_X86INC
1220 #endif // HAVE_SSSE3
1222 #if HAVE_AVX2
1223 INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest,
1224 ::testing::Values(make_tuple(4, 4,
1225 &vpx_mse16x16_avx2)));
1227 INSTANTIATE_TEST_CASE_P(
1228 AVX2, VpxVarianceTest,
1229 ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_avx2, 0),
1230 make_tuple(6, 5, &vpx_variance64x32_avx2, 0),
1231 make_tuple(5, 5, &vpx_variance32x32_avx2, 0),
1232 make_tuple(5, 4, &vpx_variance32x16_avx2, 0),
1233 make_tuple(4, 4, &vpx_variance16x16_avx2, 0)));
1235 INSTANTIATE_TEST_CASE_P(
1236 AVX2, VpxSubpelVarianceTest,
1237 ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_avx2, 0),
1238 make_tuple(5, 5, &vpx_sub_pixel_variance32x32_avx2, 0)));
1240 INSTANTIATE_TEST_CASE_P(
1241 AVX2, VpxSubpelAvgVarianceTest,
1242 ::testing::Values(
1243 make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_avx2, 0),
1244 make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_avx2, 0)));
1245 #endif // HAVE_AVX2
1247 #if HAVE_MEDIA
1248 INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest,
1249 ::testing::Values(make_tuple(4, 4,
1250 &vpx_mse16x16_media)));
1252 INSTANTIATE_TEST_CASE_P(
1253 MEDIA, VpxVarianceTest,
1254 ::testing::Values(make_tuple(4, 4, &vpx_variance16x16_media, 0),
1255 make_tuple(3, 3, &vpx_variance8x8_media, 0)));
1257 INSTANTIATE_TEST_CASE_P(
1258 MEDIA, VpxSubpelVarianceTest,
1259 ::testing::Values(make_tuple(4, 4, &vpx_sub_pixel_variance16x16_media, 0),
1260 make_tuple(3, 3, &vpx_sub_pixel_variance8x8_media, 0)));
1261 #endif // HAVE_MEDIA
1263 #if HAVE_NEON
1264 INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest,
1265 ::testing::Values(make_tuple(2, 2,
1266 &vpx_get4x4sse_cs_neon)));
1268 INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest,
1269 ::testing::Values(make_tuple(4, 4,
1270 &vpx_mse16x16_neon)));
1272 INSTANTIATE_TEST_CASE_P(
1273 NEON, VpxVarianceTest,
1274 ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_neon, 0),
1275 make_tuple(6, 5, &vpx_variance64x32_neon, 0),
1276 make_tuple(5, 6, &vpx_variance32x64_neon, 0),
1277 make_tuple(5, 5, &vpx_variance32x32_neon, 0),
1278 make_tuple(4, 4, &vpx_variance16x16_neon, 0),
1279 make_tuple(4, 3, &vpx_variance16x8_neon, 0),
1280 make_tuple(3, 4, &vpx_variance8x16_neon, 0),
1281 make_tuple(3, 3, &vpx_variance8x8_neon, 0)));
1283 INSTANTIATE_TEST_CASE_P(
1284 NEON, VpxSubpelVarianceTest,
1285 ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_neon, 0),
1286 make_tuple(5, 5, &vpx_sub_pixel_variance32x32_neon, 0),
1287 make_tuple(4, 4, &vpx_sub_pixel_variance16x16_neon, 0),
1288 make_tuple(3, 3, &vpx_sub_pixel_variance8x8_neon, 0)));
1289 #endif // HAVE_NEON
1291 #if HAVE_MSA
1292 INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest,
1293 ::testing::Values(vpx_get_mb_ss_msa));
1295 INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest,
1296 ::testing::Values(make_tuple(2, 2,
1297 &vpx_get4x4sse_cs_msa)));
1299 INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest,
1300 ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_msa),
1301 make_tuple(4, 3, &vpx_mse16x8_msa),
1302 make_tuple(3, 4, &vpx_mse8x16_msa),
1303 make_tuple(3, 3, &vpx_mse8x8_msa)));
1305 INSTANTIATE_TEST_CASE_P(
1306 MSA, VpxVarianceTest,
1307 ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_msa, 0),
1308 make_tuple(6, 5, &vpx_variance64x32_msa, 0),
1309 make_tuple(5, 6, &vpx_variance32x64_msa, 0),
1310 make_tuple(5, 5, &vpx_variance32x32_msa, 0),
1311 make_tuple(5, 4, &vpx_variance32x16_msa, 0),
1312 make_tuple(4, 5, &vpx_variance16x32_msa, 0),
1313 make_tuple(4, 4, &vpx_variance16x16_msa, 0),
1314 make_tuple(4, 3, &vpx_variance16x8_msa, 0),
1315 make_tuple(3, 4, &vpx_variance8x16_msa, 0),
1316 make_tuple(3, 3, &vpx_variance8x8_msa, 0),
1317 make_tuple(3, 2, &vpx_variance8x4_msa, 0),
1318 make_tuple(2, 3, &vpx_variance4x8_msa, 0),
1319 make_tuple(2, 2, &vpx_variance4x4_msa, 0)));
1321 INSTANTIATE_TEST_CASE_P(
1322 MSA, VpxSubpelVarianceTest,
1323 ::testing::Values(make_tuple(2, 2, &vpx_sub_pixel_variance4x4_msa, 0),
1324 make_tuple(2, 3, &vpx_sub_pixel_variance4x8_msa, 0),
1325 make_tuple(3, 2, &vpx_sub_pixel_variance8x4_msa, 0),
1326 make_tuple(3, 3, &vpx_sub_pixel_variance8x8_msa, 0),
1327 make_tuple(3, 4, &vpx_sub_pixel_variance8x16_msa, 0),
1328 make_tuple(4, 3, &vpx_sub_pixel_variance16x8_msa, 0),
1329 make_tuple(4, 4, &vpx_sub_pixel_variance16x16_msa, 0),
1330 make_tuple(4, 5, &vpx_sub_pixel_variance16x32_msa, 0),
1331 make_tuple(5, 4, &vpx_sub_pixel_variance32x16_msa, 0),
1332 make_tuple(5, 5, &vpx_sub_pixel_variance32x32_msa, 0),
1333 make_tuple(5, 6, &vpx_sub_pixel_variance32x64_msa, 0),
1334 make_tuple(6, 5, &vpx_sub_pixel_variance64x32_msa, 0),
1335 make_tuple(6, 6, &vpx_sub_pixel_variance64x64_msa, 0)));
1337 INSTANTIATE_TEST_CASE_P(
1338 MSA, VpxSubpelAvgVarianceTest,
1339 ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_msa, 0),
1340 make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_msa, 0),
1341 make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_msa, 0),
1342 make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_msa, 0),
1343 make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_msa, 0),
1344 make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_msa, 0),
1345 make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_msa, 0),
1346 make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_msa, 0),
1347 make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_msa, 0),
1348 make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_msa, 0),
1349 make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_msa, 0),
1350 make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_msa, 0),
1351 make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_msa, 0)));
1352 #endif // HAVE_MSA
1353 } // namespace