Bug 773151: Convert nsCAutoString->nsAutoCString CLOSED TREE r=bsmedberg
[gecko.git] / media / libvpx / I256a37c6.patch
blob797cbf8b7986a9ae1f07785bab92e3297ba647ca
1 # HG changeset patch
2 # Parent 677ab41568f1a8427e3e43a6ce9b0d7c822b1f7e
3 # User Timothy B. Terriberry <tterribe@vt.edu>
4 Move SAD and variance functions to common
6 Upstream Change-Id: I256a37c6de079fe92ce744b1f11e16526d06b50a
8 This patch contains substantial differences compared to the upstream
9 one, as it still uses the old RTCD framework and does not include
10 the extra short-circuiting work done in upstream change
11 I05ce5b2d34e6d45fb3ec2a450aa99c4f3343bf3a.
13 diff --git a/media/libvpx/vp8/common/arm/arm_systemdependent.c b/media/libvpx/vp8/common/arm/arm_systemdependent.c
14 --- a/media/libvpx/vp8/common/arm/arm_systemdependent.c
15 +++ b/media/libvpx/vp8/common/arm/arm_systemdependent.c
16 @@ -11,16 +11,17 @@
18 #include "vpx_config.h"
19 #include "vpx_ports/arm.h"
20 #include "vp8/common/pragmas.h"
21 #include "vp8/common/subpixel.h"
22 #include "vp8/common/loopfilter.h"
23 #include "vp8/common/recon.h"
24 #include "vp8/common/idct.h"
25 +#include "vp8/common/variance.h"
26 #include "vp8/common/onyxc_int.h"
28 void vp8_arch_arm_common_init(VP8_COMMON *ctx)
30 #if CONFIG_RUNTIME_CPU_DETECT
31 VP8_COMMON_RTCD *rtcd = &ctx->rtcd;
32 int flags = arm_cpu_caps();
33 rtcd->flags = flags;
34 @@ -63,16 +64,41 @@ void vp8_arch_arm_common_init(VP8_COMMON
35 rtcd->recon.copy8x4 = vp8_copy_mem8x4_v6;
36 rtcd->recon.intra4x4_predict = vp8_intra4x4_predict_armv6;
38 rtcd->dequant.block = vp8_dequantize_b_v6;
39 rtcd->dequant.idct_add = vp8_dequant_idct_add_v6;
40 rtcd->dequant.idct_add_y_block = vp8_dequant_idct_add_y_block_v6;
41 rtcd->dequant.idct_add_uv_block = vp8_dequant_idct_add_uv_block_v6;
43 + rtcd->variance.sad16x16 = vp8_sad16x16_armv6;
44 + /*rtcd->variance.sad16x8 = vp8_sad16x8_c;
45 + rtcd->variance.sad8x16 = vp8_sad8x16_c;
46 + rtcd->variance.sad8x8 = vp8_sad8x8_c;
47 + rtcd->variance.sad4x4 = vp8_sad4x4_c;*/
49 + /*rtcd->variance.var4x4 = vp8_variance4x4_c;*/
50 + rtcd->variance.var8x8 = vp8_variance8x8_armv6;
51 + /*rtcd->variance.var8x16 = vp8_variance8x16_c;
52 + rtcd->variance.var16x8 = vp8_variance16x8_c;*/
53 + rtcd->variance.var16x16 = vp8_variance16x16_armv6;
55 + /*rtcd->variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
56 + rtcd->variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_armv6;
57 + /*rtcd->variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
58 + rtcd->variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
59 + rtcd->variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_armv6;
60 + rtcd->variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_armv6;
61 + rtcd->variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_armv6;
62 + rtcd->variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_armv6;
64 + rtcd->variance.mse16x16 = vp8_mse16x16_armv6;
65 + /*rtcd->variance.getmbss = vp8_get_mb_ss_c;*/
67 + /*rtcd->variance.get4x4sse_cs = vp8_get4x4sse_cs_c;*/
69 #endif
71 #if HAVE_ARMV7
72 if (flags & HAS_NEON)
74 rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_neon;
75 rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_neon;
76 @@ -103,13 +129,38 @@ void vp8_arch_arm_common_init(VP8_COMMON
77 rtcd->recon.build_intra_predictors_mby_s =
78 vp8_build_intra_predictors_mby_s_neon;
80 rtcd->dequant.block = vp8_dequantize_b_neon;
81 rtcd->dequant.idct_add = vp8_dequant_idct_add_neon;
82 rtcd->dequant.idct_add_y_block = vp8_dequant_idct_add_y_block_neon;
83 rtcd->dequant.idct_add_uv_block = vp8_dequant_idct_add_uv_block_neon;
85 + rtcd->variance.sad16x16 = vp8_sad16x16_neon;
86 + rtcd->variance.sad16x8 = vp8_sad16x8_neon;
87 + rtcd->variance.sad8x16 = vp8_sad8x16_neon;
88 + rtcd->variance.sad8x8 = vp8_sad8x8_neon;
89 + rtcd->variance.sad4x4 = vp8_sad4x4_neon;
91 + /*rtcd->variance.var4x4 = vp8_variance4x4_c;*/
92 + rtcd->variance.var8x8 = vp8_variance8x8_neon;
93 + rtcd->variance.var8x16 = vp8_variance8x16_neon;
94 + rtcd->variance.var16x8 = vp8_variance16x8_neon;
95 + rtcd->variance.var16x16 = vp8_variance16x16_neon;
97 + /*rtcd->variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
98 + rtcd->variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_neon;
99 + /*rtcd->variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
100 + rtcd->variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
101 + rtcd->variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_neon;
102 + rtcd->variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_neon;
103 + rtcd->variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_neon;
104 + rtcd->variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_neon;
106 + rtcd->variance.mse16x16 = vp8_mse16x16_neon;
107 + /*rtcd->variance.getmbss = vp8_get_mb_ss_c;*/
109 + rtcd->variance.get4x4sse_cs = vp8_get4x4sse_cs_neon;
111 #endif
113 #endif
115 diff --git a/media/libvpx/vp8/encoder/arm/armv6/vp8_mse16x16_armv6.asm b/media/libvpx/vp8/common/arm/armv6/vp8_mse16x16_armv6.asm
116 rename from media/libvpx/vp8/encoder/arm/armv6/vp8_mse16x16_armv6.asm
117 rename to media/libvpx/vp8/common/arm/armv6/vp8_mse16x16_armv6.asm
118 diff --git a/media/libvpx/vp8/encoder/arm/armv6/vp8_sad16x16_armv6.asm b/media/libvpx/vp8/common/arm/armv6/vp8_sad16x16_armv6.asm
119 rename from media/libvpx/vp8/encoder/arm/armv6/vp8_sad16x16_armv6.asm
120 rename to media/libvpx/vp8/common/arm/armv6/vp8_sad16x16_armv6.asm
121 diff --git a/media/libvpx/vp8/encoder/arm/armv6/vp8_variance16x16_armv6.asm b/media/libvpx/vp8/common/arm/armv6/vp8_variance16x16_armv6.asm
122 rename from media/libvpx/vp8/encoder/arm/armv6/vp8_variance16x16_armv6.asm
123 rename to media/libvpx/vp8/common/arm/armv6/vp8_variance16x16_armv6.asm
124 diff --git a/media/libvpx/vp8/encoder/arm/armv6/vp8_variance8x8_armv6.asm b/media/libvpx/vp8/common/arm/armv6/vp8_variance8x8_armv6.asm
125 rename from media/libvpx/vp8/encoder/arm/armv6/vp8_variance8x8_armv6.asm
126 rename to media/libvpx/vp8/common/arm/armv6/vp8_variance8x8_armv6.asm
127 diff --git a/media/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm b/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm
128 rename from media/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm
129 rename to media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm
130 diff --git a/media/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm b/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm
131 rename from media/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm
132 rename to media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm
133 diff --git a/media/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm b/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm
134 rename from media/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm
135 rename to media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm
136 diff --git a/media/libvpx/vp8/encoder/arm/neon/sad16_neon.asm b/media/libvpx/vp8/common/arm/neon/sad16_neon.asm
137 rename from media/libvpx/vp8/encoder/arm/neon/sad16_neon.asm
138 rename to media/libvpx/vp8/common/arm/neon/sad16_neon.asm
139 diff --git a/media/libvpx/vp8/encoder/arm/neon/sad8_neon.asm b/media/libvpx/vp8/common/arm/neon/sad8_neon.asm
140 rename from media/libvpx/vp8/encoder/arm/neon/sad8_neon.asm
141 rename to media/libvpx/vp8/common/arm/neon/sad8_neon.asm
142 diff --git a/media/libvpx/vp8/encoder/arm/neon/variance_neon.asm b/media/libvpx/vp8/common/arm/neon/variance_neon.asm
143 rename from media/libvpx/vp8/encoder/arm/neon/variance_neon.asm
144 rename to media/libvpx/vp8/common/arm/neon/variance_neon.asm
145 diff --git a/media/libvpx/vp8/encoder/arm/neon/vp8_mse16x16_neon.asm b/media/libvpx/vp8/common/arm/neon/vp8_mse16x16_neon.asm
146 rename from media/libvpx/vp8/encoder/arm/neon/vp8_mse16x16_neon.asm
147 rename to media/libvpx/vp8/common/arm/neon/vp8_mse16x16_neon.asm
148 diff --git a/media/libvpx/vp8/encoder/arm/neon/vp8_subpixelvariance16x16_neon.asm b/media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
149 rename from media/libvpx/vp8/encoder/arm/neon/vp8_subpixelvariance16x16_neon.asm
150 rename to media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
151 diff --git a/media/libvpx/vp8/encoder/arm/neon/vp8_subpixelvariance16x16s_neon.asm b/media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16s_neon.asm
152 rename from media/libvpx/vp8/encoder/arm/neon/vp8_subpixelvariance16x16s_neon.asm
153 rename to media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16s_neon.asm
154 diff --git a/media/libvpx/vp8/encoder/arm/neon/vp8_subpixelvariance8x8_neon.asm b/media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm
155 rename from media/libvpx/vp8/encoder/arm/neon/vp8_subpixelvariance8x8_neon.asm
156 rename to media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm
157 diff --git a/media/libvpx/vp8/encoder/arm/variance_arm.c b/media/libvpx/vp8/common/arm/variance_arm.c
158 rename from media/libvpx/vp8/encoder/arm/variance_arm.c
159 rename to media/libvpx/vp8/common/arm/variance_arm.c
160 --- a/media/libvpx/vp8/encoder/arm/variance_arm.c
161 +++ b/media/libvpx/vp8/common/arm/variance_arm.c
162 @@ -4,17 +4,17 @@
163 * Use of this source code is governed by a BSD-style license
164 * that can be found in the LICENSE file in the root of the source
165 * tree. An additional intellectual property rights grant can be found
166 * in the file PATENTS. All contributing project authors may
167 * be found in the AUTHORS file in the root of the source tree.
170 #include "vpx_config.h"
171 -#include "vp8/encoder/variance.h"
172 +#include "vp8/common/variance.h"
173 #include "vp8/common/filter.h"
175 #if HAVE_ARMV6
176 #include "vp8/common/arm/bilinearfilter_arm.h"
178 unsigned int vp8_sub_pixel_variance8x8_armv6
180 const unsigned char *src_ptr,
181 diff --git a/media/libvpx/vp8/encoder/arm/variance_arm.h b/media/libvpx/vp8/common/arm/variance_arm.h
182 rename from media/libvpx/vp8/encoder/arm/variance_arm.h
183 rename to media/libvpx/vp8/common/arm/variance_arm.h
184 diff --git a/media/libvpx/vp8/common/generic/systemdependent.c b/media/libvpx/vp8/common/generic/systemdependent.c
185 --- a/media/libvpx/vp8/common/generic/systemdependent.c
186 +++ b/media/libvpx/vp8/common/generic/systemdependent.c
187 @@ -9,16 +9,17 @@
191 #include "vpx_config.h"
192 #include "vp8/common/subpixel.h"
193 #include "vp8/common/loopfilter.h"
194 #include "vp8/common/recon.h"
195 #include "vp8/common/idct.h"
196 +#include "vp8/common/variance.h"
197 #include "vp8/common/onyxc_int.h"
199 #if CONFIG_MULTITHREAD
200 #if HAVE_UNISTD_H
201 #include <unistd.h>
202 #elif defined(_WIN32)
203 #include <windows.h>
204 typedef void (WINAPI *PGNSI)(LPSYSTEM_INFO);
205 @@ -110,16 +111,67 @@ void vp8_machine_specific_config(VP8_COM
206 rtcd->loopfilter.normal_b_v = vp8_loop_filter_bv_c;
207 rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_c;
208 rtcd->loopfilter.normal_b_h = vp8_loop_filter_bh_c;
209 rtcd->loopfilter.simple_mb_v = vp8_loop_filter_simple_vertical_edge_c;
210 rtcd->loopfilter.simple_b_v = vp8_loop_filter_bvs_c;
211 rtcd->loopfilter.simple_mb_h = vp8_loop_filter_simple_horizontal_edge_c;
212 rtcd->loopfilter.simple_b_h = vp8_loop_filter_bhs_c;
214 + rtcd->variance.sad16x16 = vp8_sad16x16_c;
215 + rtcd->variance.sad16x8 = vp8_sad16x8_c;
216 + rtcd->variance.sad8x16 = vp8_sad8x16_c;
217 + rtcd->variance.sad8x8 = vp8_sad8x8_c;
218 + rtcd->variance.sad4x4 = vp8_sad4x4_c;
220 + rtcd->variance.sad16x16x3 = vp8_sad16x16x3_c;
221 + rtcd->variance.sad16x8x3 = vp8_sad16x8x3_c;
222 + rtcd->variance.sad8x16x3 = vp8_sad8x16x3_c;
223 + rtcd->variance.sad8x8x3 = vp8_sad8x8x3_c;
224 + rtcd->variance.sad4x4x3 = vp8_sad4x4x3_c;
226 + rtcd->variance.sad16x16x8 = vp8_sad16x16x8_c;
227 + rtcd->variance.sad16x8x8 = vp8_sad16x8x8_c;
228 + rtcd->variance.sad8x16x8 = vp8_sad8x16x8_c;
229 + rtcd->variance.sad8x8x8 = vp8_sad8x8x8_c;
230 + rtcd->variance.sad4x4x8 = vp8_sad4x4x8_c;
232 + rtcd->variance.sad16x16x4d = vp8_sad16x16x4d_c;
233 + rtcd->variance.sad16x8x4d = vp8_sad16x8x4d_c;
234 + rtcd->variance.sad8x16x4d = vp8_sad8x16x4d_c;
235 + rtcd->variance.sad8x8x4d = vp8_sad8x8x4d_c;
236 + rtcd->variance.sad4x4x4d = vp8_sad4x4x4d_c;
237 +#if ARCH_X86 || ARCH_X86_64
238 + rtcd->variance.copy32xn = vp8_copy32xn_c;
239 +#endif
240 + rtcd->variance.var4x4 = vp8_variance4x4_c;
241 + rtcd->variance.var8x8 = vp8_variance8x8_c;
242 + rtcd->variance.var8x16 = vp8_variance8x16_c;
243 + rtcd->variance.var16x8 = vp8_variance16x8_c;
244 + rtcd->variance.var16x16 = vp8_variance16x16_c;
246 + rtcd->variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;
247 + rtcd->variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_c;
248 + rtcd->variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
249 + rtcd->variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;
250 + rtcd->variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_c;
251 + rtcd->variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_c;
252 + rtcd->variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_c;
253 + rtcd->variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_c;
254 + rtcd->variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_c;
256 + rtcd->variance.mse16x16 = vp8_mse16x16_c;
257 + rtcd->variance.getmbss = vp8_get_mb_ss_c;
259 + rtcd->variance.get4x4sse_cs = vp8_get4x4sse_cs_c;
260 +#if CONFIG_INTERNAL_STATS
261 + rtcd->variance.ssimpf_8x8 = vp8_ssim_parms_8x8_c;
262 + rtcd->variance.ssimpf_16x16 = vp8_ssim_parms_16x16_c;
263 +#endif
265 #if CONFIG_POSTPROC || (CONFIG_VP8_ENCODER && CONFIG_INTERNAL_STATS)
266 rtcd->postproc.down = vp8_mbpost_proc_down_c;
267 rtcd->postproc.across = vp8_mbpost_proc_across_ip_c;
268 rtcd->postproc.downacross = vp8_post_proc_down_and_across_c;
269 rtcd->postproc.addnoise = vp8_plane_add_noise_c;
270 rtcd->postproc.blend_mb_inner = vp8_blend_mb_inner_c;
271 rtcd->postproc.blend_mb_outer = vp8_blend_mb_outer_c;
272 rtcd->postproc.blend_b = vp8_blend_b_c;
273 diff --git a/media/libvpx/vp8/common/onyxc_int.h b/media/libvpx/vp8/common/onyxc_int.h
274 --- a/media/libvpx/vp8/common/onyxc_int.h
275 +++ b/media/libvpx/vp8/common/onyxc_int.h
276 @@ -14,16 +14,17 @@
278 #include "vpx_config.h"
279 #include "vpx/internal/vpx_codec_internal.h"
280 #include "loopfilter.h"
281 #include "entropymv.h"
282 #include "entropy.h"
283 #include "idct.h"
284 #include "recon.h"
285 +#include "variance.h"
286 #if CONFIG_POSTPROC
287 #include "postproc.h"
288 #endif
289 #include "dequantize.h"
291 /*#ifdef PACKET_TESTING*/
292 #include "header.h"
293 /*#endif*/
294 @@ -74,16 +75,17 @@ typedef enum
295 typedef struct VP8_COMMON_RTCD
297 #if CONFIG_RUNTIME_CPU_DETECT
298 vp8_dequant_rtcd_vtable_t dequant;
299 vp8_idct_rtcd_vtable_t idct;
300 vp8_recon_rtcd_vtable_t recon;
301 vp8_subpix_rtcd_vtable_t subpix;
302 vp8_loopfilter_rtcd_vtable_t loopfilter;
303 + vp8_variance_rtcd_vtable_t variance;
304 #if CONFIG_POSTPROC
305 vp8_postproc_rtcd_vtable_t postproc;
306 #endif
307 int flags;
308 #else
309 int unused;
310 #endif
311 } VP8_COMMON_RTCD;
312 diff --git a/media/libvpx/vp8/common/postproc.c b/media/libvpx/vp8/common/postproc.c
313 --- a/media/libvpx/vp8/common/postproc.c
314 +++ b/media/libvpx/vp8/common/postproc.c
315 @@ -12,17 +12,17 @@
316 #include "vpx_config.h"
317 #include "vpx_scale/yv12config.h"
318 #include "postproc.h"
319 #include "common.h"
320 #include "recon.h"
321 #include "vpx_scale/yv12extend.h"
322 #include "vpx_scale/vpxscale.h"
323 #include "systemdependent.h"
324 -#include "../encoder/variance.h"
325 +#include "variance.h"
327 #include <math.h>
328 #include <stdlib.h>
329 #include <stdio.h>
331 #define RGB_TO_YUV(t) \
332 ( (0.257*(float)(t>>16)) + (0.504*(float)(t>>8&0xff)) + (0.098*(float)(t&0xff)) + 16), \
333 (-(0.148*(float)(t>>16)) - (0.291*(float)(t>>8&0xff)) + (0.439*(float)(t&0xff)) + 128), \
334 diff --git a/media/libvpx/vp8/encoder/sad_c.c b/media/libvpx/vp8/common/sad_c.c
335 rename from media/libvpx/vp8/encoder/sad_c.c
336 rename to media/libvpx/vp8/common/sad_c.c
337 diff --git a/media/libvpx/vp8/encoder/variance.h b/media/libvpx/vp8/common/variance.h
338 rename from media/libvpx/vp8/encoder/variance.h
339 rename to media/libvpx/vp8/common/variance.h
340 --- a/media/libvpx/vp8/encoder/variance.h
341 +++ b/media/libvpx/vp8/common/variance.h
342 @@ -78,31 +78,31 @@
344 const unsigned char *src_ptr, \
345 int source_stride, \
346 int xoffset, \
347 int yoffset, \
348 const unsigned char *ref_ptr, \
349 int Refstride, \
350 unsigned int *sse \
351 - );
354 #define prototype_ssimpf(sym) \
355 void (sym) \
357 unsigned char *s, \
358 int sp, \
359 unsigned char *r, \
360 int rp, \
361 unsigned long *sum_s, \
362 unsigned long *sum_r, \
363 unsigned long *sum_sq_s, \
364 unsigned long *sum_sq_r, \
365 unsigned long *sum_sxr \
366 - );
369 #define prototype_getmbss(sym) unsigned int (sym)(const short *)
371 #define prototype_get16x16prederror(sym)\
372 unsigned int (sym)\
374 const unsigned char *src_ptr, \
375 int source_stride, \
376 @@ -318,22 +318,22 @@ extern prototype_variance(vp8_variance_m
377 #ifndef vp8_variance_get4x4sse_cs
378 #define vp8_variance_get4x4sse_cs vp8_get4x4sse_cs_c
379 #endif
380 extern prototype_get16x16prederror(vp8_variance_get4x4sse_cs);
382 #ifndef vp8_ssimpf_8x8
383 #define vp8_ssimpf_8x8 vp8_ssim_parms_8x8_c
384 #endif
385 -extern prototype_ssimpf(vp8_ssimpf_8x8)
386 +extern prototype_ssimpf(vp8_ssimpf_8x8);
388 #ifndef vp8_ssimpf_16x16
389 #define vp8_ssimpf_16x16 vp8_ssim_parms_16x16_c
390 #endif
391 -extern prototype_ssimpf(vp8_ssimpf_16x16)
392 +extern prototype_ssimpf(vp8_ssimpf_16x16);
394 typedef prototype_sad(*vp8_sad_fn_t);
395 typedef prototype_sad_multi_same_address(*vp8_sad_multi_fn_t);
396 typedef prototype_sad_multi_same_address_1(*vp8_sad_multi1_fn_t);
397 typedef prototype_sad_multi_dif_address(*vp8_sad_multi_d_fn_t);
398 typedef prototype_variance(*vp8_variance_fn_t);
399 typedef prototype_variance2(*vp8_variance2_fn_t);
400 typedef prototype_subpixvariance(*vp8_subpixvariance_fn_t);
401 diff --git a/media/libvpx/vp8/encoder/variance_c.c b/media/libvpx/vp8/common/variance_c.c
402 rename from media/libvpx/vp8/encoder/variance_c.c
403 rename to media/libvpx/vp8/common/variance_c.c
404 --- a/media/libvpx/vp8/encoder/variance_c.c
405 +++ b/media/libvpx/vp8/common/variance_c.c
406 @@ -5,17 +5,17 @@
407 * that can be found in the LICENSE file in the root of the source
408 * tree. An additional intellectual property rights grant can be found
409 * in the file PATENTS. All contributing project authors may
410 * be found in the AUTHORS file in the root of the source tree.
414 #include "variance.h"
415 -#include "vp8/common/filter.h"
416 +#include "filter.h"
419 unsigned int vp8_get_mb_ss_c
421 const short *src_ptr
424 unsigned int i = 0, sum = 0;
425 @@ -451,8 +451,34 @@ unsigned int vp8_sub_pixel_variance8x16_
426 VFilter = vp8_bilinear_filters[yoffset];
429 var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 8, HFilter);
430 var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 16, 8, VFilter);
432 return vp8_variance8x16_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
435 +unsigned int vp8_get4x4sse_cs_c
437 + const unsigned char *src_ptr,
438 + int source_stride,
439 + const unsigned char *ref_ptr,
440 + int recon_stride
443 + int distortion = 0;
444 + int r, c;
446 + for (r = 0; r < 4; r++)
448 + for (c = 0; c < 4; c++)
450 + int diff = src_ptr[c] - ref_ptr[c];
451 + distortion += diff * diff;
454 + src_ptr += source_stride;
455 + ref_ptr += recon_stride;
458 + return distortion;
460 diff --git a/media/libvpx/vp8/encoder/x86/sad_mmx.asm b/media/libvpx/vp8/common/x86/sad_mmx.asm
461 rename from media/libvpx/vp8/encoder/x86/sad_mmx.asm
462 rename to media/libvpx/vp8/common/x86/sad_mmx.asm
463 diff --git a/media/libvpx/vp8/encoder/x86/sad_sse2.asm b/media/libvpx/vp8/common/x86/sad_sse2.asm
464 rename from media/libvpx/vp8/encoder/x86/sad_sse2.asm
465 rename to media/libvpx/vp8/common/x86/sad_sse2.asm
466 diff --git a/media/libvpx/vp8/encoder/x86/sad_sse3.asm b/media/libvpx/vp8/common/x86/sad_sse3.asm
467 rename from media/libvpx/vp8/encoder/x86/sad_sse3.asm
468 rename to media/libvpx/vp8/common/x86/sad_sse3.asm
469 diff --git a/media/libvpx/vp8/encoder/x86/sad_sse4.asm b/media/libvpx/vp8/common/x86/sad_sse4.asm
470 rename from media/libvpx/vp8/encoder/x86/sad_sse4.asm
471 rename to media/libvpx/vp8/common/x86/sad_sse4.asm
472 diff --git a/media/libvpx/vp8/encoder/x86/sad_ssse3.asm b/media/libvpx/vp8/common/x86/sad_ssse3.asm
473 rename from media/libvpx/vp8/encoder/x86/sad_ssse3.asm
474 rename to media/libvpx/vp8/common/x86/sad_ssse3.asm
475 diff --git a/media/libvpx/vp8/encoder/x86/variance_impl_mmx.asm b/media/libvpx/vp8/common/x86/variance_impl_mmx.asm
476 rename from media/libvpx/vp8/encoder/x86/variance_impl_mmx.asm
477 rename to media/libvpx/vp8/common/x86/variance_impl_mmx.asm
478 diff --git a/media/libvpx/vp8/encoder/x86/variance_impl_sse2.asm b/media/libvpx/vp8/common/x86/variance_impl_sse2.asm
479 rename from media/libvpx/vp8/encoder/x86/variance_impl_sse2.asm
480 rename to media/libvpx/vp8/common/x86/variance_impl_sse2.asm
481 diff --git a/media/libvpx/vp8/encoder/x86/variance_impl_ssse3.asm b/media/libvpx/vp8/common/x86/variance_impl_ssse3.asm
482 rename from media/libvpx/vp8/encoder/x86/variance_impl_ssse3.asm
483 rename to media/libvpx/vp8/common/x86/variance_impl_ssse3.asm
484 diff --git a/media/libvpx/vp8/encoder/x86/variance_mmx.c b/media/libvpx/vp8/common/x86/variance_mmx.c
485 rename from media/libvpx/vp8/encoder/x86/variance_mmx.c
486 rename to media/libvpx/vp8/common/x86/variance_mmx.c
487 --- a/media/libvpx/vp8/encoder/x86/variance_mmx.c
488 +++ b/media/libvpx/vp8/common/x86/variance_mmx.c
489 @@ -4,17 +4,17 @@
490 * Use of this source code is governed by a BSD-style license
491 * that can be found in the LICENSE file in the root of the source
492 * tree. An additional intellectual property rights grant can be found
493 * in the file PATENTS. All contributing project authors may
494 * be found in the AUTHORS file in the root of the source tree.
497 #include "vpx_config.h"
498 -#include "vp8/encoder/variance.h"
499 +#include "vp8/common/variance.h"
500 #include "vp8/common/pragmas.h"
501 #include "vpx_ports/mem.h"
502 #include "vp8/common/x86/filter_x86.h"
504 extern void filter_block1d_h6_mmx
506 const unsigned char *src_ptr,
507 unsigned short *output_ptr,
508 diff --git a/media/libvpx/vp8/encoder/x86/variance_sse2.c b/media/libvpx/vp8/common/x86/variance_sse2.c
509 rename from media/libvpx/vp8/encoder/x86/variance_sse2.c
510 rename to media/libvpx/vp8/common/x86/variance_sse2.c
511 --- a/media/libvpx/vp8/encoder/x86/variance_sse2.c
512 +++ b/media/libvpx/vp8/common/x86/variance_sse2.c
513 @@ -4,17 +4,17 @@
514 * Use of this source code is governed by a BSD-style license
515 * that can be found in the LICENSE file in the root of the source
516 * tree. An additional intellectual property rights grant can be found
517 * in the file PATENTS. All contributing project authors may
518 * be found in the AUTHORS file in the root of the source tree.
521 #include "vpx_config.h"
522 -#include "vp8/encoder/variance.h"
523 +#include "vp8/common/variance.h"
524 #include "vp8/common/pragmas.h"
525 #include "vpx_ports/mem.h"
526 #include "vp8/common/x86/filter_x86.h"
528 extern void filter_block1d_h6_mmx(const unsigned char *src_ptr, unsigned short *output_ptr, unsigned int src_pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *filter);
529 extern void filter_block1d_v6_mmx(const short *src_ptr, unsigned char *output_ptr, unsigned int pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *filter);
530 extern void filter_block1d8_h6_sse2(const unsigned char *src_ptr, unsigned short *output_ptr, unsigned int src_pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *filter);
531 extern void filter_block1d8_v6_sse2(const short *src_ptr, unsigned char *output_ptr, unsigned int pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *filter);
532 diff --git a/media/libvpx/vp8/encoder/x86/variance_ssse3.c b/media/libvpx/vp8/common/x86/variance_ssse3.c
533 rename from media/libvpx/vp8/encoder/x86/variance_ssse3.c
534 rename to media/libvpx/vp8/common/x86/variance_ssse3.c
535 --- a/media/libvpx/vp8/encoder/x86/variance_ssse3.c
536 +++ b/media/libvpx/vp8/common/x86/variance_ssse3.c
537 @@ -4,17 +4,17 @@
538 * Use of this source code is governed by a BSD-style license
539 * that can be found in the LICENSE file in the root of the source
540 * tree. An additional intellectual property rights grant can be found
541 * in the file PATENTS. All contributing project authors may
542 * be found in the AUTHORS file in the root of the source tree.
545 #include "vpx_config.h"
546 -#include "vp8/encoder/variance.h"
547 +#include "vp8/common/variance.h"
548 #include "vp8/common/pragmas.h"
549 #include "vpx_ports/mem.h"
551 extern unsigned int vp8_get16x16var_sse2
553 const unsigned char *src_ptr,
554 int source_stride,
555 const unsigned char *ref_ptr,
556 diff --git a/media/libvpx/vp8/encoder/x86/variance_x86.h b/media/libvpx/vp8/common/x86/variance_x86.h
557 rename from media/libvpx/vp8/encoder/x86/variance_x86.h
558 rename to media/libvpx/vp8/common/x86/variance_x86.h
559 --- a/media/libvpx/vp8/encoder/x86/variance_x86.h
560 +++ b/media/libvpx/vp8/common/x86/variance_x86.h
561 @@ -135,18 +135,18 @@ extern prototype_subpixvariance(vp8_sub_
562 extern prototype_variance(vp8_variance_halfpixvar16x16_h_wmt);
563 extern prototype_variance(vp8_variance_halfpixvar16x16_v_wmt);
564 extern prototype_variance(vp8_variance_halfpixvar16x16_hv_wmt);
565 extern prototype_subpixvariance(vp8_sub_pixel_mse16x16_wmt);
566 extern prototype_getmbss(vp8_get_mb_ss_sse2);
567 extern prototype_variance(vp8_mse16x16_wmt);
568 extern prototype_variance2(vp8_get8x8var_sse2);
569 extern prototype_variance2(vp8_get16x16var_sse2);
570 -extern prototype_ssimpf(vp8_ssim_parms_8x8_sse2)
571 -extern prototype_ssimpf(vp8_ssim_parms_16x16_sse2)
572 +extern prototype_ssimpf(vp8_ssim_parms_8x8_sse2);
573 +extern prototype_ssimpf(vp8_ssim_parms_16x16_sse2);
575 #if !CONFIG_RUNTIME_CPU_DETECT
576 #undef vp8_variance_sad4x4
577 #define vp8_variance_sad4x4 vp8_sad4x4_wmt
579 #undef vp8_variance_sad8x8
580 #define vp8_variance_sad8x8 vp8_sad8x8_wmt
582 diff --git a/media/libvpx/vp8/common/x86/x86_systemdependent.c b/media/libvpx/vp8/common/x86/x86_systemdependent.c
583 --- a/media/libvpx/vp8/common/x86/x86_systemdependent.c
584 +++ b/media/libvpx/vp8/common/x86/x86_systemdependent.c
585 @@ -10,16 +10,17 @@
588 #include "vpx_config.h"
589 #include "vpx_ports/x86.h"
590 #include "vp8/common/subpixel.h"
591 #include "vp8/common/loopfilter.h"
592 #include "vp8/common/recon.h"
593 #include "vp8/common/idct.h"
594 +#include "vp8/common/variance.h"
595 #include "vp8/common/pragmas.h"
596 #include "vp8/common/onyxc_int.h"
598 void vp8_arch_x86_common_init(VP8_COMMON *ctx)
600 #if CONFIG_RUNTIME_CPU_DETECT
601 VP8_COMMON_RTCD *rtcd = &ctx->rtcd;
602 int flags = x86_simd_caps();
603 @@ -62,16 +63,43 @@ void vp8_arch_x86_common_init(VP8_COMMON
604 rtcd->loopfilter.normal_b_v = vp8_loop_filter_bv_mmx;
605 rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_mmx;
606 rtcd->loopfilter.normal_b_h = vp8_loop_filter_bh_mmx;
607 rtcd->loopfilter.simple_mb_v = vp8_loop_filter_simple_vertical_edge_mmx;
608 rtcd->loopfilter.simple_b_v = vp8_loop_filter_bvs_mmx;
609 rtcd->loopfilter.simple_mb_h = vp8_loop_filter_simple_horizontal_edge_mmx;
610 rtcd->loopfilter.simple_b_h = vp8_loop_filter_bhs_mmx;
612 + rtcd->variance.sad16x16 = vp8_sad16x16_mmx;
613 + rtcd->variance.sad16x8 = vp8_sad16x8_mmx;
614 + rtcd->variance.sad8x16 = vp8_sad8x16_mmx;
615 + rtcd->variance.sad8x8 = vp8_sad8x8_mmx;
616 + rtcd->variance.sad4x4 = vp8_sad4x4_mmx;
618 + rtcd->variance.var4x4 = vp8_variance4x4_mmx;
619 + rtcd->variance.var8x8 = vp8_variance8x8_mmx;
620 + rtcd->variance.var8x16 = vp8_variance8x16_mmx;
621 + rtcd->variance.var16x8 = vp8_variance16x8_mmx;
622 + rtcd->variance.var16x16 = vp8_variance16x16_mmx;
624 + rtcd->variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_mmx;
625 + rtcd->variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_mmx;
626 + rtcd->variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_mmx;
627 + rtcd->variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_mmx;
628 + rtcd->variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_mmx;
629 + rtcd->variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_mmx;
630 + rtcd->variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_mmx;
631 + rtcd->variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_mmx;
632 + rtcd->variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_mmx;
634 + rtcd->variance.mse16x16 = vp8_mse16x16_mmx;
635 + rtcd->variance.getmbss = vp8_get_mb_ss_mmx;
637 + rtcd->variance.get4x4sse_cs = vp8_get4x4sse_cs_mmx;
639 #if CONFIG_POSTPROC
640 rtcd->postproc.down = vp8_mbpost_proc_down_mmx;
641 /*rtcd->postproc.across = vp8_mbpost_proc_across_ip_c;*/
642 rtcd->postproc.downacross = vp8_post_proc_down_and_across_mmx;
643 rtcd->postproc.addnoise = vp8_plane_add_noise_mmx;
644 #endif
647 @@ -105,26 +133,81 @@ void vp8_arch_x86_common_init(VP8_COMMON
648 rtcd->loopfilter.normal_b_v = vp8_loop_filter_bv_sse2;
649 rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_sse2;
650 rtcd->loopfilter.normal_b_h = vp8_loop_filter_bh_sse2;
651 rtcd->loopfilter.simple_mb_v = vp8_loop_filter_simple_vertical_edge_sse2;
652 rtcd->loopfilter.simple_b_v = vp8_loop_filter_bvs_sse2;
653 rtcd->loopfilter.simple_mb_h = vp8_loop_filter_simple_horizontal_edge_sse2;
654 rtcd->loopfilter.simple_b_h = vp8_loop_filter_bhs_sse2;
656 + rtcd->variance.sad16x16 = vp8_sad16x16_wmt;
657 + rtcd->variance.sad16x8 = vp8_sad16x8_wmt;
658 + rtcd->variance.sad8x16 = vp8_sad8x16_wmt;
659 + rtcd->variance.sad8x8 = vp8_sad8x8_wmt;
660 + rtcd->variance.sad4x4 = vp8_sad4x4_wmt;
661 + rtcd->variance.copy32xn = vp8_copy32xn_sse2;
663 + rtcd->variance.var4x4 = vp8_variance4x4_wmt;
664 + rtcd->variance.var8x8 = vp8_variance8x8_wmt;
665 + rtcd->variance.var8x16 = vp8_variance8x16_wmt;
666 + rtcd->variance.var16x8 = vp8_variance16x8_wmt;
667 + rtcd->variance.var16x16 = vp8_variance16x16_wmt;
669 + rtcd->variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_wmt;
670 + rtcd->variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_wmt;
671 + rtcd->variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_wmt;
672 + rtcd->variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_wmt;
673 + rtcd->variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_wmt;
674 + rtcd->variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_wmt;
675 + rtcd->variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_wmt;
676 + rtcd->variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_wmt;
677 + rtcd->variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_wmt;
679 + rtcd->variance.mse16x16 = vp8_mse16x16_wmt;
680 + rtcd->variance.getmbss = vp8_get_mb_ss_sse2;
682 + /* rtcd->variance.get4x4sse_cs not implemented for wmt */;
684 +#if CONFIG_INTERNAL_STATS
685 +#if ARCH_X86_64
686 + rtcd->variance.ssimpf_8x8 = vp8_ssim_parms_8x8_sse2;
687 + rtcd->variance.ssimpf_16x16 = vp8_ssim_parms_16x16_sse2;
688 +#endif
689 +#endif
691 #if CONFIG_POSTPROC
692 rtcd->postproc.down = vp8_mbpost_proc_down_xmm;
693 rtcd->postproc.across = vp8_mbpost_proc_across_ip_xmm;
694 rtcd->postproc.downacross = vp8_post_proc_down_and_across_xmm;
695 rtcd->postproc.addnoise = vp8_plane_add_noise_wmt;
696 #endif
699 #endif
701 +#if HAVE_SSE3
703 + if (flags & HAS_SSE3)
705 + rtcd->variance.sad16x16 = vp8_sad16x16_sse3;
706 + rtcd->variance.sad16x16x3 = vp8_sad16x16x3_sse3;
707 + rtcd->variance.sad16x8x3 = vp8_sad16x8x3_sse3;
708 + rtcd->variance.sad8x16x3 = vp8_sad8x16x3_sse3;
709 + rtcd->variance.sad8x8x3 = vp8_sad8x8x3_sse3;
710 + rtcd->variance.sad4x4x3 = vp8_sad4x4x3_sse3;
711 + rtcd->variance.sad16x16x4d = vp8_sad16x16x4d_sse3;
712 + rtcd->variance.sad16x8x4d = vp8_sad16x8x4d_sse3;
713 + rtcd->variance.sad8x16x4d = vp8_sad8x16x4d_sse3;
714 + rtcd->variance.sad8x8x4d = vp8_sad8x8x4d_sse3;
715 + rtcd->variance.sad4x4x4d = vp8_sad4x4x4d_sse3;
716 + rtcd->variance.copy32xn = vp8_copy32xn_sse3;
719 +#endif
721 #if HAVE_SSSE3
723 if (flags & HAS_SSSE3)
725 rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_ssse3;
726 rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_ssse3;
727 rtcd->subpix.sixtap8x4 = vp8_sixtap_predict8x4_ssse3;
728 rtcd->subpix.sixtap4x4 = vp8_sixtap_predict4x4_ssse3;
729 @@ -134,13 +217,30 @@ void vp8_arch_x86_common_init(VP8_COMMON
730 rtcd->recon.build_intra_predictors_mbuv =
731 vp8_build_intra_predictors_mbuv_ssse3;
732 rtcd->recon.build_intra_predictors_mbuv_s =
733 vp8_build_intra_predictors_mbuv_s_ssse3;
734 rtcd->recon.build_intra_predictors_mby =
735 vp8_build_intra_predictors_mby_ssse3;
736 rtcd->recon.build_intra_predictors_mby_s =
737 vp8_build_intra_predictors_mby_s_ssse3;
739 + rtcd->variance.sad16x16x3 = vp8_sad16x16x3_ssse3;
740 + rtcd->variance.sad16x8x3 = vp8_sad16x8x3_ssse3;
742 + rtcd->variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_ssse3;
743 + rtcd->variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_ssse3;
745 +#endif
747 +#if HAVE_SSE4_1
748 + if (flags & HAS_SSE4_1)
750 + rtcd->variance.sad16x16x8 = vp8_sad16x16x8_sse4;
751 + rtcd->variance.sad16x8x8 = vp8_sad16x8x8_sse4;
752 + rtcd->variance.sad8x16x8 = vp8_sad8x16x8_sse4;
753 + rtcd->variance.sad8x8x8 = vp8_sad8x8x8_sse4;
754 + rtcd->variance.sad4x4x8 = vp8_sad4x4x8_sse4;
756 #endif
758 #endif
760 diff --git a/media/libvpx/vp8/encoder/arm/arm_csystemdependent.c b/media/libvpx/vp8/encoder/arm/arm_csystemdependent.c
761 --- a/media/libvpx/vp8/encoder/arm/arm_csystemdependent.c
762 +++ b/media/libvpx/vp8/encoder/arm/arm_csystemdependent.c
763 @@ -6,17 +6,16 @@
764 * tree. An additional intellectual property rights grant can be found
765 * in the file PATENTS. All contributing project authors may
766 * be found in the AUTHORS file in the root of the source tree.
770 #include "vpx_config.h"
771 #include "vpx_ports/arm.h"
772 -#include "vp8/encoder/variance.h"
773 #include "vp8/encoder/onyx_int.h"
775 extern void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
776 extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
777 extern void vp8_yv12_copy_partial_frame_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
779 void vp8_arch_arm_encoder_init(VP8_COMP *cpi)
781 @@ -27,42 +26,16 @@ void vp8_arch_arm_encoder_init(VP8_COMP
782 if (flags & HAS_EDSP)
785 #endif
787 #if HAVE_ARMV6
788 if (flags & HAS_MEDIA)
790 - cpi->rtcd.variance.sad16x16 = vp8_sad16x16_armv6;
791 - /*cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
792 - cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
793 - cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
794 - cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;*/
796 - /*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
797 - cpi->rtcd.variance.var8x8 = vp8_variance8x8_armv6;
798 - /*cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
799 - cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;*/
800 - cpi->rtcd.variance.var16x16 = vp8_variance16x16_armv6;
802 - /*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
803 - cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_armv6;
804 - /*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
805 - cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
806 - cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_armv6;
807 - cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_armv6;
808 - cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_armv6;
809 - cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_armv6;
811 - cpi->rtcd.variance.mse16x16 = vp8_mse16x16_armv6;
812 - /*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
814 - /*cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_c;*/
816 cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_armv6;
817 cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_armv6;
818 cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_armv6;
819 cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_armv6;
820 cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_armv6;
822 /*cpi->rtcd.encodemb.berr = vp8_block_error_c;
823 cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
824 @@ -74,42 +47,16 @@ void vp8_arch_arm_encoder_init(VP8_COMP
825 /*cpi->rtcd.quantize.quantb = vp8_regular_quantize_b;*/
826 cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_armv6;
828 #endif
830 #if HAVE_ARMV7
831 if (flags & HAS_NEON)
833 - cpi->rtcd.variance.sad16x16 = vp8_sad16x16_neon;
834 - cpi->rtcd.variance.sad16x8 = vp8_sad16x8_neon;
835 - cpi->rtcd.variance.sad8x16 = vp8_sad8x16_neon;
836 - cpi->rtcd.variance.sad8x8 = vp8_sad8x8_neon;
837 - cpi->rtcd.variance.sad4x4 = vp8_sad4x4_neon;
839 - /*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
840 - cpi->rtcd.variance.var8x8 = vp8_variance8x8_neon;
841 - cpi->rtcd.variance.var8x16 = vp8_variance8x16_neon;
842 - cpi->rtcd.variance.var16x8 = vp8_variance16x8_neon;
843 - cpi->rtcd.variance.var16x16 = vp8_variance16x16_neon;
845 - /*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
846 - cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_neon;
847 - /*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
848 - cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
849 - cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_neon;
850 - cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_neon;
851 - cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_neon;
852 - cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_neon;
854 - cpi->rtcd.variance.mse16x16 = vp8_mse16x16_neon;
855 - /*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
857 - cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_neon;
859 cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_neon;
860 cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_neon;
861 cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_neon;
862 cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_neon;
863 cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_neon;
865 /*cpi->rtcd.encodemb.berr = vp8_block_error_c;
866 cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
867 diff --git a/media/libvpx/vp8/encoder/encodeframe.c b/media/libvpx/vp8/encoder/encodeframe.c
868 --- a/media/libvpx/vp8/encoder/encodeframe.c
869 +++ b/media/libvpx/vp8/encoder/encodeframe.c
870 @@ -93,17 +93,17 @@ static unsigned int tt_activity_measure(
871 unsigned int sse;
872 /* TODO: This could also be done over smaller areas (8x8), but that would
873 * require extensive changes elsewhere, as lambda is assumed to be fixed
874 * over an entire MB in most of the code.
875 * Another option is to compute four 8x8 variances, and pick a single
876 * lambda using a non-linear combination (e.g., the smallest, or second
877 * smallest, etc.).
879 - act = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)(x->src.y_buffer,
880 + act = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var16x16)(x->src.y_buffer,
881 x->src.y_stride, VP8_VAR_OFFS, 0, &sse);
882 act = act<<4;
884 /* If the region is flat, lower the activity some more. */
885 if (act < 8<<12)
886 act = act < 5<<12 ? act : 5<<12;
888 return act;
889 diff --git a/media/libvpx/vp8/encoder/encodeintra.c b/media/libvpx/vp8/encoder/encodeintra.c
890 --- a/media/libvpx/vp8/encoder/encodeintra.c
891 +++ b/media/libvpx/vp8/encoder/encodeintra.c
892 @@ -50,17 +50,17 @@ int vp8_encode_intra(VP8_COMP *cpi, MACR
894 for (i = 0; i < 16; i++)
896 x->e_mbd.block[i].bmi.as_mode = B_DC_PRED;
897 vp8_encode_intra4x4block(IF_RTCD(&cpi->rtcd), x, i);
901 - intra_pred_var = VARIANCE_INVOKE(&cpi->rtcd.variance, getmbss)(x->src_diff);
902 + intra_pred_var = VARIANCE_INVOKE(&cpi->common.rtcd.variance, getmbss)(x->src_diff);
904 return intra_pred_var;
907 void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
908 MACROBLOCK *x, int ib)
910 BLOCKD *b = &x->e_mbd.block[ib];
911 diff --git a/media/libvpx/vp8/encoder/firstpass.c b/media/libvpx/vp8/encoder/firstpass.c
912 --- a/media/libvpx/vp8/encoder/firstpass.c
913 +++ b/media/libvpx/vp8/encoder/firstpass.c
914 @@ -7,17 +7,17 @@
915 * in the file PATENTS. All contributing project authors may
916 * be found in the AUTHORS file in the root of the source tree.
919 #include "math.h"
920 #include "limits.h"
921 #include "block.h"
922 #include "onyx_int.h"
923 -#include "variance.h"
924 +#include "vp8/common/variance.h"
925 #include "encodeintra.h"
926 #include "vp8/common/setupintrarecon.h"
927 #include "mcomp.h"
928 #include "firstpass.h"
929 #include "vpx_scale/vpxscale.h"
930 #include "encodemb.h"
931 #include "vp8/common/extend.h"
932 #include "vp8/common/systemdependent.h"
933 @@ -404,17 +404,17 @@ static void zz_motion_search( VP8_COMP *
934 unsigned char *ref_ptr;
935 int ref_stride=d->pre_stride;
937 // Set up pointers for this macro block recon buffer
938 xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
940 ref_ptr = (unsigned char *)(*(d->base_pre) + d->pre );
942 - VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16) ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
943 + VARIANCE_INVOKE(IF_RTCD(&cpi->common.rtcd.variance), mse16x16) ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
946 static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
947 int_mv *ref_mv, MV *best_mv,
948 YV12_BUFFER_CONFIG *recon_buffer,
949 int *best_motion_err, int recon_yoffset )
951 MACROBLOCKD *const xd = & x->e_mbd;
952 @@ -428,17 +428,17 @@ static void first_pass_motion_search(VP8
953 int tmp_err;
954 int step_param = 3; //3; // Dont search over full range for first pass
955 int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param; //3;
956 int n;
957 vp8_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
958 int new_mv_mode_penalty = 256;
960 // override the default variance function to use MSE
961 - v_fn_ptr.vf = VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16);
962 + v_fn_ptr.vf = VARIANCE_INVOKE(IF_RTCD(&cpi->common.rtcd.variance), mse16x16);
964 // Set up pointers for this macro block recon buffer
965 xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
967 // Initial step/diamond search centred on best mv
968 tmp_mv.as_int = 0;
969 ref_mv_full.as_mv.col = ref_mv->as_mv.col>>3;
970 ref_mv_full.as_mv.row = ref_mv->as_mv.row>>3;
971 diff --git a/media/libvpx/vp8/encoder/generic/csystemdependent.c b/media/libvpx/vp8/encoder/generic/csystemdependent.c
972 --- a/media/libvpx/vp8/encoder/generic/csystemdependent.c
973 +++ b/media/libvpx/vp8/encoder/generic/csystemdependent.c
974 @@ -5,78 +5,31 @@
975 * that can be found in the LICENSE file in the root of the source
976 * tree. An additional intellectual property rights grant can be found
977 * in the file PATENTS. All contributing project authors may
978 * be found in the AUTHORS file in the root of the source tree.
982 #include "vpx_config.h"
983 -#include "vp8/encoder/variance.h"
984 #include "vp8/encoder/onyx_int.h"
987 void vp8_arch_x86_encoder_init(VP8_COMP *cpi);
988 void vp8_arch_arm_encoder_init(VP8_COMP *cpi);
990 void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc,
991 YV12_BUFFER_CONFIG *dst_ybc);
992 extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc,
993 YV12_BUFFER_CONFIG *dst_ybc);
995 void vp8_cmachine_specific_config(VP8_COMP *cpi)
997 #if CONFIG_RUNTIME_CPU_DETECT
998 cpi->rtcd.common = &cpi->common.rtcd;
999 - cpi->rtcd.variance.sad16x16 = vp8_sad16x16_c;
1000 - cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
1001 - cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
1002 - cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
1003 - cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;
1005 - cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_c;
1006 - cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_c;
1007 - cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_c;
1008 - cpi->rtcd.variance.sad8x8x3 = vp8_sad8x8x3_c;
1009 - cpi->rtcd.variance.sad4x4x3 = vp8_sad4x4x3_c;
1011 - cpi->rtcd.variance.sad16x16x8 = vp8_sad16x16x8_c;
1012 - cpi->rtcd.variance.sad16x8x8 = vp8_sad16x8x8_c;
1013 - cpi->rtcd.variance.sad8x16x8 = vp8_sad8x16x8_c;
1014 - cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_c;
1015 - cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_c;
1017 - cpi->rtcd.variance.sad16x16x4d = vp8_sad16x16x4d_c;
1018 - cpi->rtcd.variance.sad16x8x4d = vp8_sad16x8x4d_c;
1019 - cpi->rtcd.variance.sad8x16x4d = vp8_sad8x16x4d_c;
1020 - cpi->rtcd.variance.sad8x8x4d = vp8_sad8x8x4d_c;
1021 - cpi->rtcd.variance.sad4x4x4d = vp8_sad4x4x4d_c;
1022 -#if ARCH_X86 || ARCH_X86_64
1023 - cpi->rtcd.variance.copy32xn = vp8_copy32xn_c;
1024 -#endif
1025 - cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;
1026 - cpi->rtcd.variance.var8x8 = vp8_variance8x8_c;
1027 - cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
1028 - cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;
1029 - cpi->rtcd.variance.var16x16 = vp8_variance16x16_c;
1031 - cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;
1032 - cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_c;
1033 - cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
1034 - cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;
1035 - cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_c;
1036 - cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_c;
1037 - cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_c;
1038 - cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_c;
1039 - cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_c;
1041 - cpi->rtcd.variance.mse16x16 = vp8_mse16x16_c;
1042 - cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;
1044 - cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_c;
1046 cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
1047 cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
1048 cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
1049 cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
1050 cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
1052 cpi->rtcd.encodemb.berr = vp8_block_error_c;
1053 @@ -91,20 +44,16 @@ void vp8_cmachine_specific_config(VP8_CO
1054 cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_c;
1055 cpi->rtcd.quantize.fastquantb_pair = vp8_fast_quantize_b_pair_c;
1056 cpi->rtcd.search.full_search = vp8_full_search_sad;
1057 cpi->rtcd.search.refining_search = vp8_refining_search_sad;
1058 cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;
1059 #if !(CONFIG_REALTIME_ONLY)
1060 cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_c;
1061 #endif
1062 -#if CONFIG_INTERNAL_STATS
1063 - cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_c;
1064 - cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_c;
1065 -#endif
1066 #endif
1068 // Pure C:
1069 vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
1071 #if ARCH_X86 || ARCH_X86_64
1072 vp8_arch_x86_encoder_init(cpi);
1073 #endif
1074 diff --git a/media/libvpx/vp8/encoder/mcomp.h b/media/libvpx/vp8/encoder/mcomp.h
1075 --- a/media/libvpx/vp8/encoder/mcomp.h
1076 +++ b/media/libvpx/vp8/encoder/mcomp.h
1077 @@ -8,17 +8,17 @@
1078 * be found in the AUTHORS file in the root of the source tree.
1082 #ifndef __INC_MCOMP_H
1083 #define __INC_MCOMP_H
1085 #include "block.h"
1086 -#include "variance.h"
1087 +#include "vp8/common/variance.h"
1089 #ifdef ENTROPY_STATS
1090 extern void init_mv_ref_counts();
1091 extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]);
1092 #endif
1095 #define MAX_MVSEARCH_STEPS 8 // The maximum number of steps in a step search given the largest allowed initial step
1096 diff --git a/media/libvpx/vp8/encoder/onyx_if.c b/media/libvpx/vp8/encoder/onyx_if.c
1097 --- a/media/libvpx/vp8/encoder/onyx_if.c
1098 +++ b/media/libvpx/vp8/encoder/onyx_if.c
1099 @@ -1948,72 +1948,72 @@ struct VP8_COMP* vp8_create_compressor(V
1100 #ifdef ENTROPY_STATS
1101 init_mv_ref_counts();
1102 #endif
1104 #if CONFIG_MULTITHREAD
1105 vp8cx_create_encoder_threads(cpi);
1106 #endif
1108 - cpi->fn_ptr[BLOCK_16X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16);
1109 - cpi->fn_ptr[BLOCK_16X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16);
1110 - cpi->fn_ptr[BLOCK_16X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x16);
1111 - cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_h);
1112 - cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_v);
1113 - cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_hv);
1114 - cpi->fn_ptr[BLOCK_16X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x3);
1115 - cpi->fn_ptr[BLOCK_16X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x8);
1116 - cpi->fn_ptr[BLOCK_16X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x4d);
1118 - cpi->fn_ptr[BLOCK_16X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8);
1119 - cpi->fn_ptr[BLOCK_16X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x8);
1120 - cpi->fn_ptr[BLOCK_16X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x8);
1121 + cpi->fn_ptr[BLOCK_16X16].sdf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x16);
1122 + cpi->fn_ptr[BLOCK_16X16].vf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var16x16);
1123 + cpi->fn_ptr[BLOCK_16X16].svf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, subpixvar16x16);
1124 + cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = VARIANCE_INVOKE(&cpi->common.rtcd.variance, halfpixvar16x16_h);
1125 + cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = VARIANCE_INVOKE(&cpi->common.rtcd.variance, halfpixvar16x16_v);
1126 + cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = VARIANCE_INVOKE(&cpi->common.rtcd.variance, halfpixvar16x16_hv);
1127 + cpi->fn_ptr[BLOCK_16X16].sdx3f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x16x3);
1128 + cpi->fn_ptr[BLOCK_16X16].sdx8f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x16x8);
1129 + cpi->fn_ptr[BLOCK_16X16].sdx4df = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x16x4d);
1131 + cpi->fn_ptr[BLOCK_16X8].sdf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x8);
1132 + cpi->fn_ptr[BLOCK_16X8].vf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var16x8);
1133 + cpi->fn_ptr[BLOCK_16X8].svf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, subpixvar16x8);
1134 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL;
1135 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL;
1136 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL;
1137 - cpi->fn_ptr[BLOCK_16X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x3);
1138 - cpi->fn_ptr[BLOCK_16X8].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x8);
1139 - cpi->fn_ptr[BLOCK_16X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x4d);
1141 - cpi->fn_ptr[BLOCK_8X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16);
1142 - cpi->fn_ptr[BLOCK_8X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x16);
1143 - cpi->fn_ptr[BLOCK_8X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x16);
1144 + cpi->fn_ptr[BLOCK_16X8].sdx3f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x8x3);
1145 + cpi->fn_ptr[BLOCK_16X8].sdx8f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x8x8);
1146 + cpi->fn_ptr[BLOCK_16X8].sdx4df = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad16x8x4d);
1148 + cpi->fn_ptr[BLOCK_8X16].sdf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x16);
1149 + cpi->fn_ptr[BLOCK_8X16].vf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var8x16);
1150 + cpi->fn_ptr[BLOCK_8X16].svf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, subpixvar8x16);
1151 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL;
1152 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL;
1153 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL;
1154 - cpi->fn_ptr[BLOCK_8X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x3);
1155 - cpi->fn_ptr[BLOCK_8X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x8);
1156 - cpi->fn_ptr[BLOCK_8X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x4d);
1158 - cpi->fn_ptr[BLOCK_8X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8);
1159 - cpi->fn_ptr[BLOCK_8X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x8);
1160 - cpi->fn_ptr[BLOCK_8X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x8);
1161 + cpi->fn_ptr[BLOCK_8X16].sdx3f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x16x3);
1162 + cpi->fn_ptr[BLOCK_8X16].sdx8f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x16x8);
1163 + cpi->fn_ptr[BLOCK_8X16].sdx4df = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x16x4d);
1165 + cpi->fn_ptr[BLOCK_8X8].sdf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x8);
1166 + cpi->fn_ptr[BLOCK_8X8].vf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var8x8);
1167 + cpi->fn_ptr[BLOCK_8X8].svf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, subpixvar8x8);
1168 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL;
1169 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL;
1170 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL;
1171 - cpi->fn_ptr[BLOCK_8X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x3);
1172 - cpi->fn_ptr[BLOCK_8X8].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x8);
1173 - cpi->fn_ptr[BLOCK_8X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x4d);
1175 - cpi->fn_ptr[BLOCK_4X4].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4);
1176 - cpi->fn_ptr[BLOCK_4X4].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var4x4);
1177 - cpi->fn_ptr[BLOCK_4X4].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar4x4);
1178 + cpi->fn_ptr[BLOCK_8X8].sdx3f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x8x3);
1179 + cpi->fn_ptr[BLOCK_8X8].sdx8f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x8x8);
1180 + cpi->fn_ptr[BLOCK_8X8].sdx4df = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad8x8x4d);
1182 + cpi->fn_ptr[BLOCK_4X4].sdf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad4x4);
1183 + cpi->fn_ptr[BLOCK_4X4].vf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var4x4);
1184 + cpi->fn_ptr[BLOCK_4X4].svf = VARIANCE_INVOKE(&cpi->common.rtcd.variance, subpixvar4x4);
1185 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL;
1186 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL;
1187 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL;
1188 - cpi->fn_ptr[BLOCK_4X4].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x3);
1189 - cpi->fn_ptr[BLOCK_4X4].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x8);
1190 - cpi->fn_ptr[BLOCK_4X4].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x4d);
1191 + cpi->fn_ptr[BLOCK_4X4].sdx3f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad4x4x3);
1192 + cpi->fn_ptr[BLOCK_4X4].sdx8f = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad4x4x8);
1193 + cpi->fn_ptr[BLOCK_4X4].sdx4df = VARIANCE_INVOKE(&cpi->common.rtcd.variance, sad4x4x4d);
1195 #if ARCH_X86 || ARCH_X86_64
1196 - cpi->fn_ptr[BLOCK_16X16].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
1197 - cpi->fn_ptr[BLOCK_16X8].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
1198 - cpi->fn_ptr[BLOCK_8X16].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
1199 - cpi->fn_ptr[BLOCK_8X8].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
1200 - cpi->fn_ptr[BLOCK_4X4].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
1201 + cpi->fn_ptr[BLOCK_16X16].copymem = VARIANCE_INVOKE(&cpi->common.rtcd.variance, copy32xn);
1202 + cpi->fn_ptr[BLOCK_16X8].copymem = VARIANCE_INVOKE(&cpi->common.rtcd.variance, copy32xn);
1203 + cpi->fn_ptr[BLOCK_8X16].copymem = VARIANCE_INVOKE(&cpi->common.rtcd.variance, copy32xn);
1204 + cpi->fn_ptr[BLOCK_8X8].copymem = VARIANCE_INVOKE(&cpi->common.rtcd.variance, copy32xn);
1205 + cpi->fn_ptr[BLOCK_4X4].copymem = VARIANCE_INVOKE(&cpi->common.rtcd.variance, copy32xn);
1206 #endif
1208 cpi->full_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, full_search);
1209 cpi->diamond_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, diamond_search);
1210 cpi->refining_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, refining_search);
1212 // make sure frame 1 is okay
1213 cpi->error_bins[0] = cpi->common.MBs;
1214 @@ -2410,38 +2410,38 @@ static void generate_psnr_packet(VP8_COM
1215 int i;
1216 unsigned int width = cpi->common.Width;
1217 unsigned int height = cpi->common.Height;
1219 pkt.kind = VPX_CODEC_PSNR_PKT;
1220 sse = calc_plane_error(orig->y_buffer, orig->y_stride,
1221 recon->y_buffer, recon->y_stride,
1222 width, height,
1223 - IF_RTCD(&cpi->rtcd.variance));
1224 + IF_RTCD(&cpi->common.rtcd.variance));
1225 pkt.data.psnr.sse[0] = sse;
1226 pkt.data.psnr.sse[1] = sse;
1227 pkt.data.psnr.samples[0] = width * height;
1228 pkt.data.psnr.samples[1] = width * height;
1230 width = (width + 1) / 2;
1231 height = (height + 1) / 2;
1233 sse = calc_plane_error(orig->u_buffer, orig->uv_stride,
1234 recon->u_buffer, recon->uv_stride,
1235 width, height,
1236 - IF_RTCD(&cpi->rtcd.variance));
1237 + IF_RTCD(&cpi->common.rtcd.variance));
1238 pkt.data.psnr.sse[0] += sse;
1239 pkt.data.psnr.sse[2] = sse;
1240 pkt.data.psnr.samples[0] += width * height;
1241 pkt.data.psnr.samples[2] = width * height;
1243 sse = calc_plane_error(orig->v_buffer, orig->uv_stride,
1244 recon->v_buffer, recon->uv_stride,
1245 width, height,
1246 - IF_RTCD(&cpi->rtcd.variance));
1247 + IF_RTCD(&cpi->common.rtcd.variance));
1248 pkt.data.psnr.sse[0] += sse;
1249 pkt.data.psnr.sse[3] = sse;
1250 pkt.data.psnr.samples[0] += width * height;
1251 pkt.data.psnr.samples[3] = width * height;
1253 for (i = 0; i < 4; i++)
1254 pkt.data.psnr.psnr[i] = vp8_mse2psnr(pkt.data.psnr.samples[i], 255.0,
1255 pkt.data.psnr.sse[i]);
1256 @@ -3821,17 +3821,17 @@ static void encode_frame_to_data_rate
1258 #if !(CONFIG_REALTIME_ONLY)
1259 // Special case handling for forced key frames
1260 if ( (cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced )
1262 int last_q = Q;
1263 int kf_err = vp8_calc_ss_err(cpi->Source,
1264 &cm->yv12_fb[cm->new_fb_idx],
1265 - IF_RTCD(&cpi->rtcd.variance));
1266 + IF_RTCD(&cpi->common.rtcd.variance));
1268 // The key frame is not good enough
1269 if ( kf_err > ((cpi->ambient_err * 7) >> 3) )
1271 // Lower q_high
1272 q_high = (Q > q_low) ? (Q - 1) : q_low;
1274 // Adjust Q
1275 @@ -4018,17 +4018,17 @@ static void encode_frame_to_data_rate
1277 // Special case code to reduce pulsing when key frames are forced at a
1278 // fixed interval. Note the reconstruction error if it is the frame before
1279 // the force key frame
1280 if ( cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0) )
1282 cpi->ambient_err = vp8_calc_ss_err(cpi->Source,
1283 &cm->yv12_fb[cm->new_fb_idx],
1284 - IF_RTCD(&cpi->rtcd.variance));
1285 + IF_RTCD(&cpi->common.rtcd.variance));
1288 /* This frame's MVs are saved and will be used in next frame's MV predictor.
1289 * Last frame has one more line(add to bottom) and one more column(add to
1290 * right) than cm->mip. The edge elements are initialized to 0.
1292 #if CONFIG_MULTI_RES_ENCODING
1293 if(!cpi->oxcf.mr_encoder_id && cm->show_frame)
1294 @@ -4963,25 +4963,25 @@ int vp8_get_compressed_data(VP8_COMP *cp
1295 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
1296 int y_samples = orig->y_height * orig->y_width ;
1297 int uv_samples = orig->uv_height * orig->uv_width ;
1298 int t_samples = y_samples + 2 * uv_samples;
1299 int64_t sq_error, sq_error2;
1301 ye = calc_plane_error(orig->y_buffer, orig->y_stride,
1302 recon->y_buffer, recon->y_stride, orig->y_width, orig->y_height,
1303 - IF_RTCD(&cpi->rtcd.variance));
1304 + IF_RTCD(&cpi->common.rtcd.variance));
1306 ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
1307 recon->u_buffer, recon->uv_stride, orig->uv_width, orig->uv_height,
1308 - IF_RTCD(&cpi->rtcd.variance));
1309 + IF_RTCD(&cpi->common.rtcd.variance));
1311 ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
1312 recon->v_buffer, recon->uv_stride, orig->uv_width, orig->uv_height,
1313 - IF_RTCD(&cpi->rtcd.variance));
1314 + IF_RTCD(&cpi->common.rtcd.variance));
1316 sq_error = ye + ue + ve;
1318 frame_psnr = vp8_mse2psnr(t_samples, 255.0, sq_error);
1320 cpi->total_y += vp8_mse2psnr(y_samples, 255.0, ye);
1321 cpi->total_u += vp8_mse2psnr(uv_samples, 255.0, ue);
1322 cpi->total_v += vp8_mse2psnr(uv_samples, 255.0, ve);
1323 @@ -4991,39 +4991,39 @@ int vp8_get_compressed_data(VP8_COMP *cp
1324 double frame_psnr2, frame_ssim2 = 0;
1325 double weight = 0;
1327 vp8_deblock(cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0, IF_RTCD(&cm->rtcd.postproc));
1328 vp8_clear_system_state();
1330 ye = calc_plane_error(orig->y_buffer, orig->y_stride,
1331 pp->y_buffer, pp->y_stride, orig->y_width, orig->y_height,
1332 - IF_RTCD(&cpi->rtcd.variance));
1333 + IF_RTCD(&cpi->common.rtcd.variance));
1335 ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
1336 pp->u_buffer, pp->uv_stride, orig->uv_width, orig->uv_height,
1337 - IF_RTCD(&cpi->rtcd.variance));
1338 + IF_RTCD(&cpi->common.rtcd.variance));
1340 ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
1341 pp->v_buffer, pp->uv_stride, orig->uv_width, orig->uv_height,
1342 - IF_RTCD(&cpi->rtcd.variance));
1343 + IF_RTCD(&cpi->common.rtcd.variance));
1345 sq_error2 = ye + ue + ve;
1347 frame_psnr2 = vp8_mse2psnr(t_samples, 255.0, sq_error2);
1349 cpi->totalp_y += vp8_mse2psnr(y_samples, 255.0, ye);
1350 cpi->totalp_u += vp8_mse2psnr(uv_samples, 255.0, ue);
1351 cpi->totalp_v += vp8_mse2psnr(uv_samples, 255.0, ve);
1352 cpi->total_sq_error2 += sq_error2;
1353 cpi->totalp += frame_psnr2;
1355 frame_ssim2 = vp8_calc_ssim(cpi->Source,
1356 &cm->post_proc_buffer, 1, &weight,
1357 - IF_RTCD(&cpi->rtcd.variance));
1358 + IF_RTCD(&cpi->common.rtcd.variance));
1360 cpi->summed_quality += frame_ssim2 * weight;
1361 cpi->summed_weights += weight;
1363 if (cpi->oxcf.number_of_layers > 1)
1365 int i;
1367 @@ -5043,17 +5043,17 @@ int vp8_get_compressed_data(VP8_COMP *cp
1372 if (cpi->b_calculate_ssimg)
1374 double y, u, v, frame_all;
1375 frame_all = vp8_calc_ssimg(cpi->Source, cm->frame_to_show,
1376 - &y, &u, &v, IF_RTCD(&cpi->rtcd.variance));
1377 + &y, &u, &v, IF_RTCD(&cpi->common.rtcd.variance));
1379 if (cpi->oxcf.number_of_layers > 1)
1381 int i;
1383 for (i=cpi->current_layer;
1384 i<cpi->oxcf.number_of_layers; i++)
1386 diff --git a/media/libvpx/vp8/encoder/onyx_int.h b/media/libvpx/vp8/encoder/onyx_int.h
1387 --- a/media/libvpx/vp8/encoder/onyx_int.h
1388 +++ b/media/libvpx/vp8/encoder/onyx_int.h
1389 @@ -13,17 +13,17 @@
1390 #define __INC_VP8_INT_H
1392 #include <stdio.h>
1393 #include "vpx_config.h"
1394 #include "vp8/common/onyx.h"
1395 #include "treewriter.h"
1396 #include "tokenize.h"
1397 #include "vp8/common/onyxc_int.h"
1398 -#include "variance.h"
1399 +#include "vp8/common/variance.h"
1400 #include "dct.h"
1401 #include "encodemb.h"
1402 #include "quantize.h"
1403 #include "vp8/common/entropy.h"
1404 #include "vp8/common/threading.h"
1405 #include "vpx_ports/mem.h"
1406 #include "vpx/internal/vpx_codec_internal.h"
1407 #include "mcomp.h"
1408 @@ -220,17 +220,16 @@ typedef struct
1409 int ithread;
1410 void *ptr1;
1411 } LPFTHREAD_DATA;
1414 typedef struct VP8_ENCODER_RTCD
1416 VP8_COMMON_RTCD *common;
1417 - vp8_variance_rtcd_vtable_t variance;
1418 vp8_fdct_rtcd_vtable_t fdct;
1419 vp8_encodemb_rtcd_vtable_t encodemb;
1420 vp8_quantize_rtcd_vtable_t quantize;
1421 vp8_search_rtcd_vtable_t search;
1422 vp8_temporal_rtcd_vtable_t temporal;
1423 } VP8_ENCODER_RTCD;
1425 enum
1426 diff --git a/media/libvpx/vp8/encoder/pickinter.c b/media/libvpx/vp8/encoder/pickinter.c
1427 --- a/media/libvpx/vp8/encoder/pickinter.c
1428 +++ b/media/libvpx/vp8/encoder/pickinter.c
1429 @@ -16,17 +16,17 @@
1430 #include "encodeintra.h"
1431 #include "vp8/common/entropymode.h"
1432 #include "pickinter.h"
1433 #include "vp8/common/findnearmv.h"
1434 #include "encodemb.h"
1435 #include "vp8/common/reconinter.h"
1436 #include "vp8/common/reconintra.h"
1437 #include "vp8/common/reconintra4x4.h"
1438 -#include "variance.h"
1439 +#include "vp8/common/variance.h"
1440 #include "mcomp.h"
1441 #include "rdopt.h"
1442 #include "vpx_mem/vpx_mem.h"
1444 #if CONFIG_RUNTIME_CPU_DETECT
1445 #define IF_RTCD(x) (x)
1446 #else
1447 #define IF_RTCD(x) NULL
1448 @@ -90,42 +90,16 @@ static int get_inter_mbpred_error(MACROB
1449 else
1451 return vfp->vf(what, what_stride, in_what, in_what_stride, sse);
1457 -unsigned int vp8_get4x4sse_cs_c
1459 - const unsigned char *src_ptr,
1460 - int source_stride,
1461 - const unsigned char *ref_ptr,
1462 - int recon_stride
1465 - int distortion = 0;
1466 - int r, c;
1468 - for (r = 0; r < 4; r++)
1470 - for (c = 0; c < 4; c++)
1472 - int diff = src_ptr[c] - ref_ptr[c];
1473 - distortion += diff * diff;
1476 - src_ptr += source_stride;
1477 - ref_ptr += recon_stride;
1480 - return distortion;
1483 static int get_prediction_error(BLOCK *be, BLOCKD *b, const vp8_variance_rtcd_vtable_t *rtcd)
1485 unsigned char *sptr;
1486 unsigned char *dptr;
1487 sptr = (*(be->base_src) + be->src);
1488 dptr = b->predictor;
1490 return VARIANCE_INVOKE(rtcd, get4x4sse_cs)(sptr, be->src_stride, dptr, 16);
1491 @@ -153,17 +127,17 @@ static int pick_intra4x4block(
1492 for (mode = B_DC_PRED; mode <= B_HE_PRED /*B_HU_PRED*/; mode++)
1494 int this_rd;
1496 rate = mode_costs[mode];
1497 RECON_INVOKE(&rtcd->common->recon, intra4x4_predict)
1498 (*(b->base_dst) + b->dst, b->dst_stride,
1499 mode, b->predictor, 16);
1500 - distortion = get_prediction_error(be, b, &rtcd->variance);
1501 + distortion = get_prediction_error(be, b, &rtcd->common->variance);
1502 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1504 if (this_rd < best_rd)
1506 *bestrate = rate;
1507 *bestdistortion = distortion;
1508 best_rd = this_rd;
1509 *best_mode = mode;
1510 @@ -671,17 +645,17 @@ void vp8_pick_inter_mode(VP8_COMP *cpi,
1511 if (distortion2 == INT_MAX)
1513 this_rd = INT_MAX;
1515 else
1517 rate2 += rate;
1518 distortion2 = VARIANCE_INVOKE
1519 - (&cpi->rtcd.variance, var16x16)(
1520 + (&cpi->common.rtcd.variance, var16x16)(
1521 *(b->base_src), b->src_stride,
1522 x->e_mbd.predictor, 16, &sse);
1523 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
1525 if (this_rd < best_intra_rd)
1527 best_intra_rd = this_rd;
1528 *returnintra = distortion2;
1529 @@ -696,17 +670,17 @@ void vp8_pick_inter_mode(VP8_COMP *cpi,
1530 break;
1532 case DC_PRED:
1533 case V_PRED:
1534 case H_PRED:
1535 case TM_PRED:
1536 RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
1537 (&x->e_mbd);
1538 - distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
1539 + distortion2 = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var16x16)
1540 (*(b->base_src), b->src_stride,
1541 x->e_mbd.predictor, 16, &sse);
1542 rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode];
1543 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
1545 if (this_rd < best_intra_rd)
1547 best_intra_rd = this_rd;
1548 @@ -933,17 +907,17 @@ void vp8_pick_inter_mode(VP8_COMP *cpi,
1550 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
1552 if (sse < x->encode_breakout)
1554 // Check u and v to make sure skip is ok
1555 int sse2 = 0;
1557 - sse2 = VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
1558 + sse2 = VP8_UVSSE(x, IF_RTCD(&cpi->common.rtcd.variance));
1560 if (sse2 * 2 < x->encode_breakout)
1561 x->skip = 1;
1562 else
1563 x->skip = 0;
1566 break;
1567 @@ -1067,17 +1041,17 @@ void vp8_pick_intra_mode(VP8_COMP *cpi,
1569 pick_intra_mbuv_mode(x);
1571 for (mode = DC_PRED; mode <= TM_PRED; mode ++)
1573 x->e_mbd.mode_info_context->mbmi.mode = mode;
1574 RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
1575 (&x->e_mbd);
1576 - distortion = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
1577 + distortion = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var16x16)
1578 (*(b->base_src), b->src_stride, x->e_mbd.predictor, 16, &sse);
1579 rate = x->mbmode_cost[x->e_mbd.frame_type][mode];
1580 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1582 if (error16x16 > this_rd)
1584 error16x16 = this_rd;
1585 best_mode = mode;
1586 diff --git a/media/libvpx/vp8/encoder/picklpf.c b/media/libvpx/vp8/encoder/picklpf.c
1587 --- a/media/libvpx/vp8/encoder/picklpf.c
1588 +++ b/media/libvpx/vp8/encoder/picklpf.c
1589 @@ -179,30 +179,30 @@ void vp8cx_pick_filter_level_fast(YV12_B
1591 // Get the err using the previous frame's filter value.
1593 /* Copy the unfiltered / processed recon buffer to the new buffer */
1594 vp8_yv12_copy_partial_frame_ptr(saved_frame, cm->frame_to_show);
1595 vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
1597 best_err = calc_partial_ssl_err(sd, cm->frame_to_show,
1598 - IF_RTCD(&cpi->rtcd.variance));
1599 + IF_RTCD(&cpi->common.rtcd.variance));
1601 filt_val -= 1 + (filt_val > 10);
1603 // Search lower filter levels
1604 while (filt_val >= min_filter_level)
1606 // Apply the loop filter
1607 vp8_yv12_copy_partial_frame_ptr(saved_frame, cm->frame_to_show);
1608 vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
1610 // Get the err for filtered frame
1611 filt_err = calc_partial_ssl_err(sd, cm->frame_to_show,
1612 - IF_RTCD(&cpi->rtcd.variance));
1613 + IF_RTCD(&cpi->common.rtcd.variance));
1615 // Update the best case record or exit loop.
1616 if (filt_err < best_err)
1618 best_err = filt_err;
1619 best_filt_val = filt_val;
1621 else
1622 @@ -224,17 +224,17 @@ void vp8cx_pick_filter_level_fast(YV12_B
1624 // Apply the loop filter
1625 vp8_yv12_copy_partial_frame_ptr(saved_frame, cm->frame_to_show);
1627 vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
1629 // Get the err for filtered frame
1630 filt_err = calc_partial_ssl_err(sd, cm->frame_to_show,
1631 - IF_RTCD(&cpi->rtcd.variance));
1632 + IF_RTCD(&cpi->common.rtcd.variance));
1634 // Update the best case record or exit loop.
1635 if (filt_err < best_err)
1637 // Do not raise filter level if improvement is < 1 part in 4096
1638 best_err = filt_err - (filt_err >> 10);
1640 best_filt_val = filt_val;
1641 @@ -318,17 +318,17 @@ void vp8cx_pick_filter_level(YV12_BUFFER
1643 /* Copy the unfiltered / processed recon buffer to the new buffer */
1644 vp8_yv12_copy_y_ptr(saved_frame, cm->frame_to_show);
1646 vp8cx_set_alt_lf_level(cpi, filt_mid);
1647 vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid);
1649 best_err = vp8_calc_ss_err(sd, cm->frame_to_show,
1650 - IF_RTCD(&cpi->rtcd.variance));
1651 + IF_RTCD(&cpi->common.rtcd.variance));
1653 ss_err[filt_mid] = best_err;
1655 filt_best = filt_mid;
1657 while (filter_step > 0)
1659 Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; //PGW change 12/12/06 for small images
1660 @@ -345,17 +345,17 @@ void vp8cx_pick_filter_level(YV12_BUFFER
1661 if(ss_err[filt_low] == 0)
1663 // Get Low filter error score
1664 vp8_yv12_copy_y_ptr(saved_frame, cm->frame_to_show);
1665 vp8cx_set_alt_lf_level(cpi, filt_low);
1666 vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
1668 filt_err = vp8_calc_ss_err(sd, cm->frame_to_show,
1669 - IF_RTCD(&cpi->rtcd.variance));
1670 + IF_RTCD(&cpi->common.rtcd.variance));
1671 ss_err[filt_low] = filt_err;
1673 else
1674 filt_err = ss_err[filt_low];
1676 // If value is close to the best so far then bias towards a lower loop filter value.
1677 if ((filt_err - Bias) < best_err)
1679 @@ -372,17 +372,17 @@ void vp8cx_pick_filter_level(YV12_BUFFER
1681 if(ss_err[filt_high] == 0)
1683 vp8_yv12_copy_y_ptr(saved_frame, cm->frame_to_show);
1684 vp8cx_set_alt_lf_level(cpi, filt_high);
1685 vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high);
1687 filt_err = vp8_calc_ss_err(sd, cm->frame_to_show,
1688 - IF_RTCD(&cpi->rtcd.variance));
1689 + IF_RTCD(&cpi->common.rtcd.variance));
1690 ss_err[filt_high] = filt_err;
1692 else
1693 filt_err = ss_err[filt_high];
1695 // Was it better than the previous best?
1696 if (filt_err < (best_err - Bias))
1698 diff --git a/media/libvpx/vp8/encoder/rdopt.c b/media/libvpx/vp8/encoder/rdopt.c
1699 --- a/media/libvpx/vp8/encoder/rdopt.c
1700 +++ b/media/libvpx/vp8/encoder/rdopt.c
1701 @@ -23,17 +23,17 @@
1702 #include "vp8/common/entropymode.h"
1703 #include "vp8/common/reconinter.h"
1704 #include "vp8/common/reconintra.h"
1705 #include "vp8/common/reconintra4x4.h"
1706 #include "vp8/common/findnearmv.h"
1707 #include "encodemb.h"
1708 #include "quantize.h"
1709 #include "vp8/common/idct.h"
1710 -#include "variance.h"
1711 +#include "vp8/common/variance.h"
1712 #include "mcomp.h"
1713 #include "rdopt.h"
1714 #include "vpx_mem/vpx_mem.h"
1715 #include "dct.h"
1716 #include "vp8/common/systemdependent.h"
1718 #if CONFIG_RUNTIME_CPU_DETECT
1719 #define IF_RTCD(x) (x)
1720 @@ -2132,30 +2132,30 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cp
1721 unsigned int sse;
1722 unsigned int var;
1723 int threshold = (xd->block[0].dequant[1]
1724 * xd->block[0].dequant[1] >>4);
1726 if(threshold < x->encode_breakout)
1727 threshold = x->encode_breakout;
1729 - var = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
1730 + var = VARIANCE_INVOKE(&cpi->common.rtcd.variance, var16x16)
1731 (*(b->base_src), b->src_stride,
1732 x->e_mbd.predictor, 16, &sse);
1734 if (sse < threshold)
1736 unsigned int q2dc = xd->block[24].dequant[0];
1737 /* If theres is no codeable 2nd order dc
1738 or a very small uniform pixel change change */
1739 if ((sse - var < q2dc * q2dc >>4) ||
1740 (sse /2 > var && sse-var < 64))
1742 // Check u and v to make sure skip is ok
1743 - int sse2= VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
1744 + int sse2= VP8_UVSSE(x, IF_RTCD(&cpi->common.rtcd.variance));
1745 if (sse2 * 2 < threshold)
1747 x->skip = 1;
1748 distortion2 = sse + sse2;
1749 rate2 = 500;
1751 /* for best_yrd calculation */
1752 rate_uv = 0;
1753 diff --git a/media/libvpx/vp8/encoder/x86/x86_csystemdependent.c b/media/libvpx/vp8/encoder/x86/x86_csystemdependent.c
1754 --- a/media/libvpx/vp8/encoder/x86/x86_csystemdependent.c
1755 +++ b/media/libvpx/vp8/encoder/x86/x86_csystemdependent.c
1756 @@ -6,17 +6,16 @@
1757 * tree. An additional intellectual property rights grant can be found
1758 * in the file PATENTS. All contributing project authors may
1759 * be found in the AUTHORS file in the root of the source tree.
1763 #include "vpx_config.h"
1764 #include "vpx_ports/x86.h"
1765 -#include "vp8/encoder/variance.h"
1766 #include "vp8/encoder/onyx_int.h"
1769 #if HAVE_MMX
1770 void vp8_short_fdct8x4_mmx(short *input, short *output, int pitch)
1772 vp8_short_fdct4x4_mmx(input, output, pitch);
1773 vp8_short_fdct4x4_mmx(input + 4, output + 16, pitch);
1774 @@ -122,43 +121,16 @@ void vp8_arch_x86_encoder_init(VP8_COMP
1775 * you modify any of the function mappings present in this file, be sure
1776 * to also update them in static mapings (<arch>/filename_<arch>.h)
1779 /* Override default functions with fastest ones for this CPU. */
1780 #if HAVE_MMX
1781 if (flags & HAS_MMX)
1783 - cpi->rtcd.variance.sad16x16 = vp8_sad16x16_mmx;
1784 - cpi->rtcd.variance.sad16x8 = vp8_sad16x8_mmx;
1785 - cpi->rtcd.variance.sad8x16 = vp8_sad8x16_mmx;
1786 - cpi->rtcd.variance.sad8x8 = vp8_sad8x8_mmx;
1787 - cpi->rtcd.variance.sad4x4 = vp8_sad4x4_mmx;
1789 - cpi->rtcd.variance.var4x4 = vp8_variance4x4_mmx;
1790 - cpi->rtcd.variance.var8x8 = vp8_variance8x8_mmx;
1791 - cpi->rtcd.variance.var8x16 = vp8_variance8x16_mmx;
1792 - cpi->rtcd.variance.var16x8 = vp8_variance16x8_mmx;
1793 - cpi->rtcd.variance.var16x16 = vp8_variance16x16_mmx;
1795 - cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_mmx;
1796 - cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_mmx;
1797 - cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_mmx;
1798 - cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_mmx;
1799 - cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_mmx;
1800 - cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_mmx;
1801 - cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_mmx;
1802 - cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_mmx;
1803 - cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_mmx;
1805 - cpi->rtcd.variance.mse16x16 = vp8_mse16x16_mmx;
1806 - cpi->rtcd.variance.getmbss = vp8_get_mb_ss_mmx;
1808 - cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_mmx;
1810 cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_mmx;
1811 cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_mmx;
1812 cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_mmx;
1813 cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_mmx;
1815 cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
1817 cpi->rtcd.encodemb.berr = vp8_block_error_mmx;
1818 @@ -170,44 +142,16 @@ void vp8_arch_x86_encoder_init(VP8_COMP
1820 /*cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_mmx;*/
1822 #endif
1824 #if HAVE_SSE2
1825 if (flags & HAS_SSE2)
1827 - cpi->rtcd.variance.sad16x16 = vp8_sad16x16_wmt;
1828 - cpi->rtcd.variance.sad16x8 = vp8_sad16x8_wmt;
1829 - cpi->rtcd.variance.sad8x16 = vp8_sad8x16_wmt;
1830 - cpi->rtcd.variance.sad8x8 = vp8_sad8x8_wmt;
1831 - cpi->rtcd.variance.sad4x4 = vp8_sad4x4_wmt;
1832 - cpi->rtcd.variance.copy32xn = vp8_copy32xn_sse2;
1834 - cpi->rtcd.variance.var4x4 = vp8_variance4x4_wmt;
1835 - cpi->rtcd.variance.var8x8 = vp8_variance8x8_wmt;
1836 - cpi->rtcd.variance.var8x16 = vp8_variance8x16_wmt;
1837 - cpi->rtcd.variance.var16x8 = vp8_variance16x8_wmt;
1838 - cpi->rtcd.variance.var16x16 = vp8_variance16x16_wmt;
1840 - cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_wmt;
1841 - cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_wmt;
1842 - cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_wmt;
1843 - cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_wmt;
1844 - cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_wmt;
1845 - cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_wmt;
1846 - cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_wmt;
1847 - cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_wmt;
1848 - cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_wmt;
1850 - cpi->rtcd.variance.mse16x16 = vp8_mse16x16_wmt;
1851 - cpi->rtcd.variance.getmbss = vp8_get_mb_ss_sse2;
1853 - /* cpi->rtcd.variance.get4x4sse_cs not implemented for wmt */;
1855 cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_sse2;
1856 cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_sse2;
1857 cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_sse2;
1858 cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_sse2;
1860 cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_sse2 ;
1862 cpi->rtcd.encodemb.berr = vp8_block_error_xmm;
1863 @@ -219,69 +163,40 @@ void vp8_arch_x86_encoder_init(VP8_COMP
1865 cpi->rtcd.quantize.quantb = vp8_regular_quantize_b_sse2;
1866 cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_sse2;
1868 #if !(CONFIG_REALTIME_ONLY)
1869 cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_sse2;
1870 #endif
1872 -#if CONFIG_INTERNAL_STATS
1873 -#if ARCH_X86_64
1874 - cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_sse2;
1875 - cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_sse2;
1876 -#endif
1877 -#endif
1879 #endif
1881 #if HAVE_SSE3
1882 if (flags & HAS_SSE3)
1884 - cpi->rtcd.variance.sad16x16 = vp8_sad16x16_sse3;
1885 - cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_sse3;
1886 - cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_sse3;
1887 - cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_sse3;
1888 - cpi->rtcd.variance.sad8x8x3 = vp8_sad8x8x3_sse3;
1889 - cpi->rtcd.variance.sad4x4x3 = vp8_sad4x4x3_sse3;
1890 cpi->rtcd.search.full_search = vp8_full_search_sadx3;
1891 - cpi->rtcd.variance.sad16x16x4d = vp8_sad16x16x4d_sse3;
1892 - cpi->rtcd.variance.sad16x8x4d = vp8_sad16x8x4d_sse3;
1893 - cpi->rtcd.variance.sad8x16x4d = vp8_sad8x16x4d_sse3;
1894 - cpi->rtcd.variance.sad8x8x4d = vp8_sad8x8x4d_sse3;
1895 - cpi->rtcd.variance.sad4x4x4d = vp8_sad4x4x4d_sse3;
1896 - cpi->rtcd.variance.copy32xn = vp8_copy32xn_sse3;
1897 cpi->rtcd.search.diamond_search = vp8_diamond_search_sadx4;
1898 cpi->rtcd.search.refining_search = vp8_refining_search_sadx4;
1900 #endif
1902 #if HAVE_SSSE3
1903 if (flags & HAS_SSSE3)
1905 - cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_ssse3;
1906 - cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_ssse3;
1908 - cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_ssse3;
1909 - cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_ssse3;
1911 cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_ssse3;
1913 #endif
1917 #if HAVE_SSE4_1
1918 if (flags & HAS_SSE4_1)
1920 - cpi->rtcd.variance.sad16x16x8 = vp8_sad16x16x8_sse4;
1921 - cpi->rtcd.variance.sad16x8x8 = vp8_sad16x8x8_sse4;
1922 - cpi->rtcd.variance.sad8x16x8 = vp8_sad8x16x8_sse4;
1923 - cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_sse4;
1924 - cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_sse4;
1925 cpi->rtcd.search.full_search = vp8_full_search_sadx8;
1927 cpi->rtcd.quantize.quantb = vp8_regular_quantize_b_sse4;
1929 #endif
1931 #endif