2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
14 #include "./vp9_rtcd.h"
16 #include "vpx_mem/vpx_mem.h"
18 #include "vp9/common/vp9_common.h"
19 #include "vp9/common/vp9_entropy.h"
20 #include "vp9/common/vp9_entropymode.h"
21 #include "vp9/common/vp9_idct.h"
22 #include "vp9/common/vp9_mvref_common.h"
23 #include "vp9/common/vp9_pred_common.h"
24 #include "vp9/common/vp9_quant_common.h"
25 #include "vp9/common/vp9_reconinter.h"
26 #include "vp9/common/vp9_reconintra.h"
27 #include "vp9/common/vp9_seg_common.h"
28 #include "vp9/common/vp9_systemdependent.h"
30 #include "vp9/encoder/vp9_cost.h"
31 #include "vp9/encoder/vp9_encodemb.h"
32 #include "vp9/encoder/vp9_encodemv.h"
33 #include "vp9/encoder/vp9_encoder.h"
34 #include "vp9/encoder/vp9_mcomp.h"
35 #include "vp9/encoder/vp9_quantize.h"
36 #include "vp9/encoder/vp9_ratectrl.h"
37 #include "vp9/encoder/vp9_rd.h"
38 #include "vp9/encoder/vp9_rdopt.h"
39 #include "vp9/encoder/vp9_variance.h"
40 #include "vp9/encoder/vp9_aq_variance.h"
42 #define LAST_FRAME_MODE_MASK ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | \
44 #define GOLDEN_FRAME_MODE_MASK ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | \
46 #define ALT_REF_MODE_MASK ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | \
49 #define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01)
51 #define MIN_EARLY_TERM_INDEX 3
52 #define NEW_MV_DISCOUNT_FACTOR 8
56 MV_REFERENCE_FRAME ref_frame
[2];
60 MV_REFERENCE_FRAME ref_frame
[2];
63 struct rdcost_block_args
{
65 ENTROPY_CONTEXT t_above
[16];
66 ENTROPY_CONTEXT t_left
[16];
76 int use_fast_coef_costing
;
80 #define LAST_NEW_MV_INDEX 6
81 static const MODE_DEFINITION vp9_mode_order
[MAX_MODES
] = {
82 {NEARESTMV
, {LAST_FRAME
, NONE
}},
83 {NEARESTMV
, {ALTREF_FRAME
, NONE
}},
84 {NEARESTMV
, {GOLDEN_FRAME
, NONE
}},
86 {DC_PRED
, {INTRA_FRAME
, NONE
}},
88 {NEWMV
, {LAST_FRAME
, NONE
}},
89 {NEWMV
, {ALTREF_FRAME
, NONE
}},
90 {NEWMV
, {GOLDEN_FRAME
, NONE
}},
92 {NEARMV
, {LAST_FRAME
, NONE
}},
93 {NEARMV
, {ALTREF_FRAME
, NONE
}},
94 {NEARMV
, {GOLDEN_FRAME
, NONE
}},
96 {ZEROMV
, {LAST_FRAME
, NONE
}},
97 {ZEROMV
, {GOLDEN_FRAME
, NONE
}},
98 {ZEROMV
, {ALTREF_FRAME
, NONE
}},
100 {NEARESTMV
, {LAST_FRAME
, ALTREF_FRAME
}},
101 {NEARESTMV
, {GOLDEN_FRAME
, ALTREF_FRAME
}},
103 {TM_PRED
, {INTRA_FRAME
, NONE
}},
105 {NEARMV
, {LAST_FRAME
, ALTREF_FRAME
}},
106 {NEWMV
, {LAST_FRAME
, ALTREF_FRAME
}},
107 {NEARMV
, {GOLDEN_FRAME
, ALTREF_FRAME
}},
108 {NEWMV
, {GOLDEN_FRAME
, ALTREF_FRAME
}},
110 {ZEROMV
, {LAST_FRAME
, ALTREF_FRAME
}},
111 {ZEROMV
, {GOLDEN_FRAME
, ALTREF_FRAME
}},
113 {H_PRED
, {INTRA_FRAME
, NONE
}},
114 {V_PRED
, {INTRA_FRAME
, NONE
}},
115 {D135_PRED
, {INTRA_FRAME
, NONE
}},
116 {D207_PRED
, {INTRA_FRAME
, NONE
}},
117 {D153_PRED
, {INTRA_FRAME
, NONE
}},
118 {D63_PRED
, {INTRA_FRAME
, NONE
}},
119 {D117_PRED
, {INTRA_FRAME
, NONE
}},
120 {D45_PRED
, {INTRA_FRAME
, NONE
}},
123 static const REF_DEFINITION vp9_ref_order
[MAX_REFS
] = {
124 {{LAST_FRAME
, NONE
}},
125 {{GOLDEN_FRAME
, NONE
}},
126 {{ALTREF_FRAME
, NONE
}},
127 {{LAST_FRAME
, ALTREF_FRAME
}},
128 {{GOLDEN_FRAME
, ALTREF_FRAME
}},
129 {{INTRA_FRAME
, NONE
}},
132 static void swap_block_ptr(MACROBLOCK
*x
, PICK_MODE_CONTEXT
*ctx
,
133 int m
, int n
, int min_plane
, int max_plane
) {
136 for (i
= min_plane
; i
< max_plane
; ++i
) {
137 struct macroblock_plane
*const p
= &x
->plane
[i
];
138 struct macroblockd_plane
*const pd
= &x
->e_mbd
.plane
[i
];
140 p
->coeff
= ctx
->coeff_pbuf
[i
][m
];
141 p
->qcoeff
= ctx
->qcoeff_pbuf
[i
][m
];
142 pd
->dqcoeff
= ctx
->dqcoeff_pbuf
[i
][m
];
143 p
->eobs
= ctx
->eobs_pbuf
[i
][m
];
145 ctx
->coeff_pbuf
[i
][m
] = ctx
->coeff_pbuf
[i
][n
];
146 ctx
->qcoeff_pbuf
[i
][m
] = ctx
->qcoeff_pbuf
[i
][n
];
147 ctx
->dqcoeff_pbuf
[i
][m
] = ctx
->dqcoeff_pbuf
[i
][n
];
148 ctx
->eobs_pbuf
[i
][m
] = ctx
->eobs_pbuf
[i
][n
];
150 ctx
->coeff_pbuf
[i
][n
] = p
->coeff
;
151 ctx
->qcoeff_pbuf
[i
][n
] = p
->qcoeff
;
152 ctx
->dqcoeff_pbuf
[i
][n
] = pd
->dqcoeff
;
153 ctx
->eobs_pbuf
[i
][n
] = p
->eobs
;
157 static void model_rd_for_sb(VP9_COMP
*cpi
, BLOCK_SIZE bsize
,
158 MACROBLOCK
*x
, MACROBLOCKD
*xd
,
159 int *out_rate_sum
, int64_t *out_dist_sum
,
160 int *skip_txfm_sb
, int64_t *skip_sse_sb
) {
161 // Note our transform coeffs are 8 times an orthogonal transform.
162 // Hence quantizer step is also 8 times. To get effective quantizer
163 // we need to divide by 8 before sending to modeling function.
165 int64_t rate_sum
= 0;
166 int64_t dist_sum
= 0;
167 const int ref
= xd
->mi
[0].src_mi
->mbmi
.ref_frame
[0];
169 unsigned int var
= 0;
170 unsigned int sum_sse
= 0;
171 int64_t total_sse
= 0;
177 x
->pred_sse
[ref
] = 0;
179 for (i
= 0; i
< MAX_MB_PLANE
; ++i
) {
180 struct macroblock_plane
*const p
= &x
->plane
[i
];
181 struct macroblockd_plane
*const pd
= &xd
->plane
[i
];
182 const BLOCK_SIZE bs
= get_plane_block_size(bsize
, pd
);
183 const TX_SIZE max_tx_size
= max_txsize_lookup
[bs
];
184 const BLOCK_SIZE unit_size
= txsize_to_bsize
[max_tx_size
];
185 const int64_t dc_thr
= p
->quant_thred
[0] >> shift
;
186 const int64_t ac_thr
= p
->quant_thred
[1] >> shift
;
187 // The low thresholds are used to measure if the prediction errors are
188 // low enough so that we can skip the mode search.
189 const int64_t low_dc_thr
= MIN(50, dc_thr
>> 2);
190 const int64_t low_ac_thr
= MIN(80, ac_thr
>> 2);
191 int bw
= 1 << (b_width_log2_lookup
[bs
] - b_width_log2_lookup
[unit_size
]);
192 int bh
= 1 << (b_height_log2_lookup
[bs
] - b_width_log2_lookup
[unit_size
]);
194 int lw
= b_width_log2_lookup
[unit_size
] + 2;
195 int lh
= b_height_log2_lookup
[unit_size
] + 2;
199 for (idy
= 0; idy
< bh
; ++idy
) {
200 for (idx
= 0; idx
< bw
; ++idx
) {
201 uint8_t *src
= p
->src
.buf
+ (idy
* p
->src
.stride
<< lh
) + (idx
<< lw
);
202 uint8_t *dst
= pd
->dst
.buf
+ (idy
* pd
->dst
.stride
<< lh
) + (idx
<< lh
);
203 int block_idx
= (idy
<< 1) + idx
;
204 int low_err_skip
= 0;
206 var
= cpi
->fn_ptr
[unit_size
].vf(src
, p
->src
.stride
,
207 dst
, pd
->dst
.stride
, &sse
);
208 x
->bsse
[(i
<< 2) + block_idx
] = sse
;
211 x
->skip_txfm
[(i
<< 2) + block_idx
] = 0;
212 if (!x
->select_tx_size
) {
213 // Check if all ac coefficients can be quantized to zero.
214 if (var
< ac_thr
|| var
== 0) {
215 x
->skip_txfm
[(i
<< 2) + block_idx
] = 2;
217 // Check if dc coefficient can be quantized to zero.
218 if (sse
- var
< dc_thr
|| sse
== var
) {
219 x
->skip_txfm
[(i
<< 2) + block_idx
] = 1;
221 if (!sse
|| (var
< low_ac_thr
&& sse
- var
< low_dc_thr
))
227 if (skip_flag
&& !low_err_skip
)
231 x
->pred_sse
[ref
] += sse
;
235 total_sse
+= sum_sse
;
237 // Fast approximate the modelling function.
238 if (cpi
->oxcf
.speed
> 4) {
240 const int64_t square_error
= sum_sse
;
241 int quantizer
= (pd
->dequant
[1] >> 3);
242 #if CONFIG_VP9_HIGHBITDEPTH
243 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
244 quantizer
>>= (xd
->bd
- 8);
246 #endif // CONFIG_VP9_HIGHBITDEPTH
249 rate
= (square_error
* (280 - quantizer
)) >> 8;
252 dist
= (square_error
* quantizer
) >> 8;
256 #if CONFIG_VP9_HIGHBITDEPTH
257 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
258 vp9_model_rd_from_var_lapndz(sum_sse
, num_pels_log2_lookup
[bs
],
259 pd
->dequant
[1] >> (xd
->bd
- 5),
262 vp9_model_rd_from_var_lapndz(sum_sse
, num_pels_log2_lookup
[bs
],
263 pd
->dequant
[1] >> 3, &rate
, &dist
);
266 vp9_model_rd_from_var_lapndz(sum_sse
, num_pels_log2_lookup
[bs
],
267 pd
->dequant
[1] >> 3, &rate
, &dist
);
268 #endif // CONFIG_VP9_HIGHBITDEPTH
274 *skip_txfm_sb
= skip_flag
;
275 *skip_sse_sb
= total_sse
<< 4;
276 *out_rate_sum
= (int)rate_sum
;
277 *out_dist_sum
= dist_sum
<< 4;
280 int64_t vp9_block_error_c(const tran_low_t
*coeff
, const tran_low_t
*dqcoeff
,
281 intptr_t block_size
, int64_t *ssz
) {
283 int64_t error
= 0, sqcoeff
= 0;
285 for (i
= 0; i
< block_size
; i
++) {
286 const int diff
= coeff
[i
] - dqcoeff
[i
];
287 error
+= diff
* diff
;
288 sqcoeff
+= coeff
[i
] * coeff
[i
];
296 #if CONFIG_VP9_HIGHBITDEPTH
297 int64_t vp9_highbd_block_error_c(const tran_low_t
*coeff
,
298 const tran_low_t
*dqcoeff
,
300 int64_t *ssz
, int bd
) {
302 int64_t error
= 0, sqcoeff
= 0;
303 int shift
= 2 * (bd
- 8);
304 int rounding
= shift
> 0 ? 1 << (shift
- 1) : 0;
306 for (i
= 0; i
< block_size
; i
++) {
307 const int64_t diff
= coeff
[i
] - dqcoeff
[i
];
308 error
+= diff
* diff
;
309 sqcoeff
+= (int64_t)coeff
[i
] * (int64_t)coeff
[i
];
311 assert(error
>= 0 && sqcoeff
>= 0);
312 error
= (error
+ rounding
) >> shift
;
313 sqcoeff
= (sqcoeff
+ rounding
) >> shift
;
318 #endif // CONFIG_VP9_HIGHBITDEPTH
320 /* The trailing '0' is a terminator which is used inside cost_coeffs() to
321 * decide whether to include cost of a trailing EOB node or not (i.e. we
322 * can skip this if the last coefficient in this transform block, e.g. the
323 * 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block,
325 static const int16_t band_counts
[TX_SIZES
][8] = {
326 { 1, 2, 3, 4, 3, 16 - 13, 0 },
327 { 1, 2, 3, 4, 11, 64 - 21, 0 },
328 { 1, 2, 3, 4, 11, 256 - 21, 0 },
329 { 1, 2, 3, 4, 11, 1024 - 21, 0 },
331 static INLINE
int cost_coeffs(MACROBLOCK
*x
,
332 int plane
, int block
,
333 ENTROPY_CONTEXT
*A
, ENTROPY_CONTEXT
*L
,
335 const int16_t *scan
, const int16_t *nb
,
336 int use_fast_coef_costing
) {
337 MACROBLOCKD
*const xd
= &x
->e_mbd
;
338 MB_MODE_INFO
*mbmi
= &xd
->mi
[0].src_mi
->mbmi
;
339 const struct macroblock_plane
*p
= &x
->plane
[plane
];
340 const struct macroblockd_plane
*pd
= &xd
->plane
[plane
];
341 const PLANE_TYPE type
= pd
->plane_type
;
342 const int16_t *band_count
= &band_counts
[tx_size
][1];
343 const int eob
= p
->eobs
[block
];
344 const tran_low_t
*const qcoeff
= BLOCK_OFFSET(p
->qcoeff
, block
);
345 unsigned int (*token_costs
)[2][COEFF_CONTEXTS
][ENTROPY_TOKENS
] =
346 x
->token_costs
[tx_size
][type
][is_inter_block(mbmi
)];
347 uint8_t token_cache
[32 * 32];
348 int pt
= combine_entropy_contexts(*A
, *L
);
350 #if CONFIG_VP9_HIGHBITDEPTH
351 const int16_t *cat6_high_cost
= vp9_get_high_cost_table(xd
->bd
);
353 const int16_t *cat6_high_cost
= vp9_get_high_cost_table(8);
356 // Check for consistency of tx_size with mode info
357 assert(type
== PLANE_TYPE_Y
? mbmi
->tx_size
== tx_size
358 : get_uv_tx_size(mbmi
, pd
) == tx_size
);
362 cost
= token_costs
[0][0][pt
][EOB_TOKEN
];
365 int band_left
= *band_count
++;
371 vp9_get_token_extra(v
, &prev_t
, &e
);
372 cost
= (*token_costs
)[0][pt
][prev_t
] +
373 vp9_get_cost(prev_t
, e
, cat6_high_cost
);
375 token_cache
[0] = vp9_pt_energy_class
[prev_t
];
379 for (c
= 1; c
< eob
; c
++) {
380 const int rc
= scan
[c
];
384 vp9_get_token_extra(v
, &t
, &e
);
385 if (use_fast_coef_costing
) {
386 cost
+= (*token_costs
)[!prev_t
][!prev_t
][t
] +
387 vp9_get_cost(t
, e
, cat6_high_cost
);
389 pt
= get_coef_context(nb
, token_cache
, c
);
390 cost
+= (*token_costs
)[!prev_t
][pt
][t
] +
391 vp9_get_cost(t
, e
, cat6_high_cost
);
392 token_cache
[rc
] = vp9_pt_energy_class
[t
];
396 band_left
= *band_count
++;
403 if (use_fast_coef_costing
) {
404 cost
+= (*token_costs
)[0][!prev_t
][EOB_TOKEN
];
406 pt
= get_coef_context(nb
, token_cache
, c
);
407 cost
+= (*token_costs
)[0][pt
][EOB_TOKEN
];
412 // is eob first coefficient;
418 #if CONFIG_VP9_HIGHBITDEPTH
419 static void dist_block(int plane
, int block
, TX_SIZE tx_size
,
420 struct rdcost_block_args
* args
, int bd
) {
422 static void dist_block(int plane
, int block
, TX_SIZE tx_size
,
423 struct rdcost_block_args
* args
) {
424 #endif // CONFIG_VP9_HIGHBITDEPTH
425 const int ss_txfrm_size
= tx_size
<< 1;
426 MACROBLOCK
* const x
= args
->x
;
427 MACROBLOCKD
* const xd
= &x
->e_mbd
;
428 const struct macroblock_plane
*const p
= &x
->plane
[plane
];
429 const struct macroblockd_plane
*const pd
= &xd
->plane
[plane
];
431 int shift
= tx_size
== TX_32X32
? 0 : 2;
432 tran_low_t
*const coeff
= BLOCK_OFFSET(p
->coeff
, block
);
433 tran_low_t
*const dqcoeff
= BLOCK_OFFSET(pd
->dqcoeff
, block
);
434 #if CONFIG_VP9_HIGHBITDEPTH
435 args
->dist
= vp9_highbd_block_error(coeff
, dqcoeff
, 16 << ss_txfrm_size
,
436 &this_sse
, bd
) >> shift
;
438 args
->dist
= vp9_block_error(coeff
, dqcoeff
, 16 << ss_txfrm_size
,
440 #endif // CONFIG_VP9_HIGHBITDEPTH
441 args
->sse
= this_sse
>> shift
;
443 if (x
->skip_encode
&& !is_inter_block(&xd
->mi
[0].src_mi
->mbmi
)) {
444 // TODO(jingning): tune the model to better capture the distortion.
445 int64_t p
= (pd
->dequant
[1] * pd
->dequant
[1] *
446 (1 << ss_txfrm_size
)) >> (shift
+ 2);
447 #if CONFIG_VP9_HIGHBITDEPTH
448 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
449 p
>>= ((xd
->bd
- 8) * 2);
451 #endif // CONFIG_VP9_HIGHBITDEPTH
452 args
->dist
+= (p
>> 4);
457 static void rate_block(int plane
, int block
, BLOCK_SIZE plane_bsize
,
458 TX_SIZE tx_size
, struct rdcost_block_args
* args
) {
460 txfrm_block_to_raster_xy(plane_bsize
, tx_size
, block
, &x_idx
, &y_idx
);
462 args
->rate
= cost_coeffs(args
->x
, plane
, block
, args
->t_above
+ x_idx
,
463 args
->t_left
+ y_idx
, tx_size
,
464 args
->so
->scan
, args
->so
->neighbors
,
465 args
->use_fast_coef_costing
);
468 static void block_rd_txfm(int plane
, int block
, BLOCK_SIZE plane_bsize
,
469 TX_SIZE tx_size
, void *arg
) {
470 struct rdcost_block_args
*args
= arg
;
471 MACROBLOCK
*const x
= args
->x
;
472 MACROBLOCKD
*const xd
= &x
->e_mbd
;
473 MB_MODE_INFO
*const mbmi
= &xd
->mi
[0].src_mi
->mbmi
;
474 int64_t rd1
, rd2
, rd
;
479 if (!is_inter_block(mbmi
)) {
480 struct encode_b_args arg
= {x
, NULL
, &mbmi
->skip
};
481 vp9_encode_block_intra(plane
, block
, plane_bsize
, tx_size
, &arg
);
482 #if CONFIG_VP9_HIGHBITDEPTH
483 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
484 dist_block(plane
, block
, tx_size
, args
, xd
->bd
);
486 dist_block(plane
, block
, tx_size
, args
, 8);
489 dist_block(plane
, block
, tx_size
, args
);
490 #endif // CONFIG_VP9_HIGHBITDEPTH
491 } else if (max_txsize_lookup
[plane_bsize
] == tx_size
) {
492 if (x
->skip_txfm
[(plane
<< 2) + (block
>> (tx_size
<< 1))] == 0) {
493 // full forward transform and quantization
494 vp9_xform_quant(x
, plane
, block
, plane_bsize
, tx_size
);
495 #if CONFIG_VP9_HIGHBITDEPTH
496 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
497 dist_block(plane
, block
, tx_size
, args
, xd
->bd
);
499 dist_block(plane
, block
, tx_size
, args
, 8);
502 dist_block(plane
, block
, tx_size
, args
);
503 #endif // CONFIG_VP9_HIGHBITDEPTH
504 } else if (x
->skip_txfm
[(plane
<< 2) + (block
>> (tx_size
<< 1))] == 2) {
505 // compute DC coefficient
506 tran_low_t
*const coeff
= BLOCK_OFFSET(x
->plane
[plane
].coeff
, block
);
507 tran_low_t
*const dqcoeff
= BLOCK_OFFSET(xd
->plane
[plane
].dqcoeff
, block
);
508 vp9_xform_quant_dc(x
, plane
, block
, plane_bsize
, tx_size
);
509 args
->sse
= x
->bsse
[(plane
<< 2) + (block
>> (tx_size
<< 1))] << 4;
510 args
->dist
= args
->sse
;
511 if (x
->plane
[plane
].eobs
[block
]) {
512 int64_t dc_correct
= coeff
[0] * coeff
[0] -
513 (coeff
[0] - dqcoeff
[0]) * (coeff
[0] - dqcoeff
[0]);
514 #if CONFIG_VP9_HIGHBITDEPTH
515 dc_correct
>>= ((xd
->bd
- 8) * 2);
517 if (tx_size
!= TX_32X32
)
520 args
->dist
= MAX(0, args
->sse
- dc_correct
);
523 // skip forward transform
524 x
->plane
[plane
].eobs
[block
] = 0;
525 args
->sse
= x
->bsse
[(plane
<< 2) + (block
>> (tx_size
<< 1))] << 4;
526 args
->dist
= args
->sse
;
529 // full forward transform and quantization
530 vp9_xform_quant(x
, plane
, block
, plane_bsize
, tx_size
);
531 #if CONFIG_VP9_HIGHBITDEPTH
532 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
533 dist_block(plane
, block
, tx_size
, args
, xd
->bd
);
535 dist_block(plane
, block
, tx_size
, args
, 8);
538 dist_block(plane
, block
, tx_size
, args
);
539 #endif // CONFIG_VP9_HIGHBITDEPTH
542 rate_block(plane
, block
, plane_bsize
, tx_size
, args
);
543 rd1
= RDCOST(x
->rdmult
, x
->rddiv
, args
->rate
, args
->dist
);
544 rd2
= RDCOST(x
->rdmult
, x
->rddiv
, 0, args
->sse
);
546 // TODO(jingning): temporarily enabled only for luma component
549 x
->zcoeff_blk
[tx_size
][block
] = !x
->plane
[plane
].eobs
[block
] ||
550 (rd1
> rd2
&& !xd
->lossless
);
552 args
->this_rate
+= args
->rate
;
553 args
->this_dist
+= args
->dist
;
554 args
->this_sse
+= args
->sse
;
557 if (args
->this_rd
> args
->best_rd
) {
563 static void txfm_rd_in_plane(MACROBLOCK
*x
,
564 int *rate
, int64_t *distortion
,
565 int *skippable
, int64_t *sse
,
566 int64_t ref_best_rd
, int plane
,
567 BLOCK_SIZE bsize
, TX_SIZE tx_size
,
568 int use_fast_coef_casting
) {
569 MACROBLOCKD
*const xd
= &x
->e_mbd
;
570 const struct macroblockd_plane
*const pd
= &xd
->plane
[plane
];
571 struct rdcost_block_args args
;
574 args
.best_rd
= ref_best_rd
;
575 args
.use_fast_coef_costing
= use_fast_coef_casting
;
578 xd
->mi
[0].src_mi
->mbmi
.tx_size
= tx_size
;
580 vp9_get_entropy_contexts(bsize
, tx_size
, pd
, args
.t_above
, args
.t_left
);
582 args
.so
= get_scan(xd
, tx_size
, pd
->plane_type
, 0);
584 vp9_foreach_transformed_block_in_plane(xd
, bsize
, plane
,
585 block_rd_txfm
, &args
);
588 *distortion
= INT64_MAX
;
592 *distortion
= args
.this_dist
;
593 *rate
= args
.this_rate
;
594 *sse
= args
.this_sse
;
595 *skippable
= vp9_is_skippable_in_plane(x
, bsize
, plane
);
599 static void choose_largest_tx_size(VP9_COMP
*cpi
, MACROBLOCK
*x
,
600 int *rate
, int64_t *distortion
,
601 int *skip
, int64_t *sse
,
604 const TX_SIZE max_tx_size
= max_txsize_lookup
[bs
];
605 VP9_COMMON
*const cm
= &cpi
->common
;
606 const TX_SIZE largest_tx_size
= tx_mode_to_biggest_tx_size
[cm
->tx_mode
];
607 MACROBLOCKD
*const xd
= &x
->e_mbd
;
608 MB_MODE_INFO
*const mbmi
= &xd
->mi
[0].src_mi
->mbmi
;
610 mbmi
->tx_size
= MIN(max_tx_size
, largest_tx_size
);
612 txfm_rd_in_plane(x
, rate
, distortion
, skip
,
613 sse
, ref_best_rd
, 0, bs
,
614 mbmi
->tx_size
, cpi
->sf
.use_fast_coef_costing
);
617 static void choose_tx_size_from_rd(VP9_COMP
*cpi
, MACROBLOCK
*x
,
622 int64_t tx_cache
[TX_MODES
],
625 const TX_SIZE max_tx_size
= max_txsize_lookup
[bs
];
626 VP9_COMMON
*const cm
= &cpi
->common
;
627 MACROBLOCKD
*const xd
= &x
->e_mbd
;
628 MB_MODE_INFO
*const mbmi
= &xd
->mi
[0].src_mi
->mbmi
;
629 vp9_prob skip_prob
= vp9_get_skip_prob(cm
, xd
);
630 int r
[TX_SIZES
][2], s
[TX_SIZES
];
631 int64_t d
[TX_SIZES
], sse
[TX_SIZES
];
632 int64_t rd
[TX_SIZES
][2] = {{INT64_MAX
, INT64_MAX
},
633 {INT64_MAX
, INT64_MAX
},
634 {INT64_MAX
, INT64_MAX
},
635 {INT64_MAX
, INT64_MAX
}};
638 const TX_SIZE max_mode_tx_size
= tx_mode_to_biggest_tx_size
[cm
->tx_mode
];
639 int64_t best_rd
= INT64_MAX
;
640 TX_SIZE best_tx
= max_tx_size
;
642 const vp9_prob
*tx_probs
= get_tx_probs2(max_tx_size
, xd
, &cm
->fc
->tx_probs
);
643 assert(skip_prob
> 0);
644 s0
= vp9_cost_bit(skip_prob
, 0);
645 s1
= vp9_cost_bit(skip_prob
, 1);
647 for (n
= max_tx_size
; n
>= 0; n
--) {
648 txfm_rd_in_plane(x
, &r
[n
][0], &d
[n
], &s
[n
],
649 &sse
[n
], ref_best_rd
, 0, bs
, n
,
650 cpi
->sf
.use_fast_coef_costing
);
652 if (r
[n
][0] < INT_MAX
) {
653 for (m
= 0; m
<= n
- (n
== (int) max_tx_size
); m
++) {
655 r
[n
][1] += vp9_cost_zero(tx_probs
[m
]);
657 r
[n
][1] += vp9_cost_one(tx_probs
[m
]);
660 if (d
[n
] == INT64_MAX
) {
661 rd
[n
][0] = rd
[n
][1] = INT64_MAX
;
663 rd
[n
][0] = rd
[n
][1] = RDCOST(x
->rdmult
, x
->rddiv
, s1
, d
[n
]);
665 rd
[n
][0] = RDCOST(x
->rdmult
, x
->rddiv
, r
[n
][0] + s0
, d
[n
]);
666 rd
[n
][1] = RDCOST(x
->rdmult
, x
->rddiv
, r
[n
][1] + s0
, d
[n
]);
669 // Early termination in transform size search.
670 if (cpi
->sf
.tx_size_search_breakout
&&
671 (rd
[n
][1] == INT64_MAX
||
672 (n
< (int) max_tx_size
&& rd
[n
][1] > rd
[n
+ 1][1]) ||
676 if (rd
[n
][1] < best_rd
) {
681 mbmi
->tx_size
= cm
->tx_mode
== TX_MODE_SELECT
?
682 best_tx
: MIN(max_tx_size
, max_mode_tx_size
);
685 *distortion
= d
[mbmi
->tx_size
];
686 *rate
= r
[mbmi
->tx_size
][cm
->tx_mode
== TX_MODE_SELECT
];
687 *skip
= s
[mbmi
->tx_size
];
688 *psse
= sse
[mbmi
->tx_size
];
690 tx_cache
[ONLY_4X4
] = rd
[TX_4X4
][0];
691 tx_cache
[ALLOW_8X8
] = rd
[TX_8X8
][0];
692 tx_cache
[ALLOW_16X16
] = rd
[MIN(max_tx_size
, TX_16X16
)][0];
693 tx_cache
[ALLOW_32X32
] = rd
[MIN(max_tx_size
, TX_32X32
)][0];
695 if (max_tx_size
== TX_32X32
&& best_tx
== TX_32X32
) {
696 tx_cache
[TX_MODE_SELECT
] = rd
[TX_32X32
][1];
697 } else if (max_tx_size
>= TX_16X16
&& best_tx
== TX_16X16
) {
698 tx_cache
[TX_MODE_SELECT
] = rd
[TX_16X16
][1];
699 } else if (rd
[TX_8X8
][1] < rd
[TX_4X4
][1]) {
700 tx_cache
[TX_MODE_SELECT
] = rd
[TX_8X8
][1];
702 tx_cache
[TX_MODE_SELECT
] = rd
[TX_4X4
][1];
706 static void super_block_yrd(VP9_COMP
*cpi
, MACROBLOCK
*x
, int *rate
,
707 int64_t *distortion
, int *skip
,
708 int64_t *psse
, BLOCK_SIZE bs
,
709 int64_t txfm_cache
[TX_MODES
],
710 int64_t ref_best_rd
) {
711 MACROBLOCKD
*xd
= &x
->e_mbd
;
713 int64_t *ret_sse
= psse
? psse
: &sse
;
715 assert(bs
== xd
->mi
[0].src_mi
->mbmi
.sb_type
);
717 if (cpi
->sf
.tx_size_search_method
== USE_LARGESTALL
|| xd
->lossless
) {
718 vpx_memset(txfm_cache
, 0, TX_MODES
* sizeof(int64_t));
719 choose_largest_tx_size(cpi
, x
, rate
, distortion
, skip
, ret_sse
, ref_best_rd
,
722 choose_tx_size_from_rd(cpi
, x
, rate
, distortion
, skip
, ret_sse
,
723 txfm_cache
, ref_best_rd
, bs
);
727 static int conditional_skipintra(PREDICTION_MODE mode
,
728 PREDICTION_MODE best_intra_mode
) {
729 if (mode
== D117_PRED
&&
730 best_intra_mode
!= V_PRED
&&
731 best_intra_mode
!= D135_PRED
)
733 if (mode
== D63_PRED
&&
734 best_intra_mode
!= V_PRED
&&
735 best_intra_mode
!= D45_PRED
)
737 if (mode
== D207_PRED
&&
738 best_intra_mode
!= H_PRED
&&
739 best_intra_mode
!= D45_PRED
)
741 if (mode
== D153_PRED
&&
742 best_intra_mode
!= H_PRED
&&
743 best_intra_mode
!= D135_PRED
)
748 static int64_t rd_pick_intra4x4block(VP9_COMP
*cpi
, MACROBLOCK
*x
, int ib
,
749 PREDICTION_MODE
*best_mode
,
750 const int *bmode_costs
,
751 ENTROPY_CONTEXT
*a
, ENTROPY_CONTEXT
*l
,
752 int *bestrate
, int *bestratey
,
753 int64_t *bestdistortion
,
754 BLOCK_SIZE bsize
, int64_t rd_thresh
) {
755 PREDICTION_MODE mode
;
756 MACROBLOCKD
*const xd
= &x
->e_mbd
;
757 int64_t best_rd
= rd_thresh
;
759 struct macroblock_plane
*p
= &x
->plane
[0];
760 struct macroblockd_plane
*pd
= &xd
->plane
[0];
761 const int src_stride
= p
->src
.stride
;
762 const int dst_stride
= pd
->dst
.stride
;
763 const uint8_t *src_init
= &p
->src
.buf
[vp9_raster_block_offset(BLOCK_8X8
, ib
,
765 uint8_t *dst_init
= &pd
->dst
.buf
[vp9_raster_block_offset(BLOCK_8X8
, ib
,
767 ENTROPY_CONTEXT ta
[2], tempa
[2];
768 ENTROPY_CONTEXT tl
[2], templ
[2];
770 const int num_4x4_blocks_wide
= num_4x4_blocks_wide_lookup
[bsize
];
771 const int num_4x4_blocks_high
= num_4x4_blocks_high_lookup
[bsize
];
773 uint8_t best_dst
[8 * 8];
774 #if CONFIG_VP9_HIGHBITDEPTH
775 uint16_t best_dst16
[8 * 8];
780 vpx_memcpy(ta
, a
, sizeof(ta
));
781 vpx_memcpy(tl
, l
, sizeof(tl
));
782 xd
->mi
[0].src_mi
->mbmi
.tx_size
= TX_4X4
;
784 #if CONFIG_VP9_HIGHBITDEPTH
785 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
786 for (mode
= DC_PRED
; mode
<= TM_PRED
; ++mode
) {
789 int64_t distortion
= 0;
790 int rate
= bmode_costs
[mode
];
792 if (!(cpi
->sf
.intra_y_mode_mask
[TX_4X4
] & (1 << mode
)))
795 // Only do the oblique modes if the best so far is
796 // one of the neighboring directional modes
797 if (cpi
->sf
.mode_search_skip_flags
& FLAG_SKIP_INTRA_DIRMISMATCH
) {
798 if (conditional_skipintra(mode
, *best_mode
))
802 vpx_memcpy(tempa
, ta
, sizeof(ta
));
803 vpx_memcpy(templ
, tl
, sizeof(tl
));
805 for (idy
= 0; idy
< num_4x4_blocks_high
; ++idy
) {
806 for (idx
= 0; idx
< num_4x4_blocks_wide
; ++idx
) {
807 const int block
= ib
+ idy
* 2 + idx
;
808 const uint8_t *const src
= &src_init
[idx
* 4 + idy
* 4 * src_stride
];
809 uint8_t *const dst
= &dst_init
[idx
* 4 + idy
* 4 * dst_stride
];
810 int16_t *const src_diff
= vp9_raster_block_offset_int16(BLOCK_8X8
,
813 tran_low_t
*const coeff
= BLOCK_OFFSET(x
->plane
[0].coeff
, block
);
814 xd
->mi
[0].src_mi
->bmi
[block
].as_mode
= mode
;
815 vp9_predict_intra_block(xd
, block
, 1,
817 x
->skip_encode
? src
: dst
,
818 x
->skip_encode
? src_stride
: dst_stride
,
819 dst
, dst_stride
, idx
, idy
, 0);
820 vp9_highbd_subtract_block(4, 4, src_diff
, 8, src
, src_stride
,
821 dst
, dst_stride
, xd
->bd
);
823 const scan_order
*so
= &vp9_default_scan_orders
[TX_4X4
];
824 vp9_highbd_fwht4x4(src_diff
, coeff
, 8);
825 vp9_regular_quantize_b_4x4(x
, 0, block
, so
->scan
, so
->iscan
);
826 ratey
+= cost_coeffs(x
, 0, block
, tempa
+ idx
, templ
+ idy
, TX_4X4
,
827 so
->scan
, so
->neighbors
,
828 cpi
->sf
.use_fast_coef_costing
);
829 if (RDCOST(x
->rdmult
, x
->rddiv
, ratey
, distortion
) >= best_rd
)
831 vp9_highbd_iwht4x4_add(BLOCK_OFFSET(pd
->dqcoeff
, block
),
833 p
->eobs
[block
], xd
->bd
);
836 const TX_TYPE tx_type
= get_tx_type_4x4(PLANE_TYPE_Y
, xd
, block
);
837 const scan_order
*so
= &vp9_scan_orders
[TX_4X4
][tx_type
];
838 vp9_highbd_fht4x4(src_diff
, coeff
, 8, tx_type
);
839 vp9_regular_quantize_b_4x4(x
, 0, block
, so
->scan
, so
->iscan
);
840 ratey
+= cost_coeffs(x
, 0, block
, tempa
+ idx
, templ
+ idy
, TX_4X4
,
841 so
->scan
, so
->neighbors
,
842 cpi
->sf
.use_fast_coef_costing
);
843 distortion
+= vp9_highbd_block_error(
844 coeff
, BLOCK_OFFSET(pd
->dqcoeff
, block
),
845 16, &unused
, xd
->bd
) >> 2;
846 if (RDCOST(x
->rdmult
, x
->rddiv
, ratey
, distortion
) >= best_rd
)
848 vp9_highbd_iht4x4_add(tx_type
, BLOCK_OFFSET(pd
->dqcoeff
, block
),
849 dst
, dst_stride
, p
->eobs
[block
], xd
->bd
);
855 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate
, distortion
);
857 if (this_rd
< best_rd
) {
860 *bestdistortion
= distortion
;
863 vpx_memcpy(a
, tempa
, sizeof(tempa
));
864 vpx_memcpy(l
, templ
, sizeof(templ
));
865 for (idy
= 0; idy
< num_4x4_blocks_high
* 4; ++idy
) {
866 vpx_memcpy(best_dst16
+ idy
* 8,
867 CONVERT_TO_SHORTPTR(dst_init
+ idy
* dst_stride
),
868 num_4x4_blocks_wide
* 4 * sizeof(uint16_t));
874 if (best_rd
>= rd_thresh
|| x
->skip_encode
)
877 for (idy
= 0; idy
< num_4x4_blocks_high
* 4; ++idy
) {
878 vpx_memcpy(CONVERT_TO_SHORTPTR(dst_init
+ idy
* dst_stride
),
879 best_dst16
+ idy
* 8,
880 num_4x4_blocks_wide
* 4 * sizeof(uint16_t));
885 #endif // CONFIG_VP9_HIGHBITDEPTH
887 for (mode
= DC_PRED
; mode
<= TM_PRED
; ++mode
) {
890 int64_t distortion
= 0;
891 int rate
= bmode_costs
[mode
];
893 if (!(cpi
->sf
.intra_y_mode_mask
[TX_4X4
] & (1 << mode
)))
896 // Only do the oblique modes if the best so far is
897 // one of the neighboring directional modes
898 if (cpi
->sf
.mode_search_skip_flags
& FLAG_SKIP_INTRA_DIRMISMATCH
) {
899 if (conditional_skipintra(mode
, *best_mode
))
903 vpx_memcpy(tempa
, ta
, sizeof(ta
));
904 vpx_memcpy(templ
, tl
, sizeof(tl
));
906 for (idy
= 0; idy
< num_4x4_blocks_high
; ++idy
) {
907 for (idx
= 0; idx
< num_4x4_blocks_wide
; ++idx
) {
908 const int block
= ib
+ idy
* 2 + idx
;
909 const uint8_t *const src
= &src_init
[idx
* 4 + idy
* 4 * src_stride
];
910 uint8_t *const dst
= &dst_init
[idx
* 4 + idy
* 4 * dst_stride
];
911 int16_t *const src_diff
=
912 vp9_raster_block_offset_int16(BLOCK_8X8
, block
, p
->src_diff
);
913 tran_low_t
*const coeff
= BLOCK_OFFSET(x
->plane
[0].coeff
, block
);
914 xd
->mi
[0].src_mi
->bmi
[block
].as_mode
= mode
;
915 vp9_predict_intra_block(xd
, block
, 1,
917 x
->skip_encode
? src
: dst
,
918 x
->skip_encode
? src_stride
: dst_stride
,
919 dst
, dst_stride
, idx
, idy
, 0);
920 vp9_subtract_block(4, 4, src_diff
, 8, src
, src_stride
, dst
, dst_stride
);
923 const scan_order
*so
= &vp9_default_scan_orders
[TX_4X4
];
924 vp9_fwht4x4(src_diff
, coeff
, 8);
925 vp9_regular_quantize_b_4x4(x
, 0, block
, so
->scan
, so
->iscan
);
926 ratey
+= cost_coeffs(x
, 0, block
, tempa
+ idx
, templ
+ idy
, TX_4X4
,
927 so
->scan
, so
->neighbors
,
928 cpi
->sf
.use_fast_coef_costing
);
929 if (RDCOST(x
->rdmult
, x
->rddiv
, ratey
, distortion
) >= best_rd
)
931 vp9_iwht4x4_add(BLOCK_OFFSET(pd
->dqcoeff
, block
), dst
, dst_stride
,
935 const TX_TYPE tx_type
= get_tx_type_4x4(PLANE_TYPE_Y
, xd
, block
);
936 const scan_order
*so
= &vp9_scan_orders
[TX_4X4
][tx_type
];
937 vp9_fht4x4(src_diff
, coeff
, 8, tx_type
);
938 vp9_regular_quantize_b_4x4(x
, 0, block
, so
->scan
, so
->iscan
);
939 ratey
+= cost_coeffs(x
, 0, block
, tempa
+ idx
, templ
+ idy
, TX_4X4
,
940 so
->scan
, so
->neighbors
,
941 cpi
->sf
.use_fast_coef_costing
);
942 distortion
+= vp9_block_error(coeff
, BLOCK_OFFSET(pd
->dqcoeff
, block
),
944 if (RDCOST(x
->rdmult
, x
->rddiv
, ratey
, distortion
) >= best_rd
)
946 vp9_iht4x4_add(tx_type
, BLOCK_OFFSET(pd
->dqcoeff
, block
),
947 dst
, dst_stride
, p
->eobs
[block
]);
953 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate
, distortion
);
955 if (this_rd
< best_rd
) {
958 *bestdistortion
= distortion
;
961 vpx_memcpy(a
, tempa
, sizeof(tempa
));
962 vpx_memcpy(l
, templ
, sizeof(templ
));
963 for (idy
= 0; idy
< num_4x4_blocks_high
* 4; ++idy
)
964 vpx_memcpy(best_dst
+ idy
* 8, dst_init
+ idy
* dst_stride
,
965 num_4x4_blocks_wide
* 4);
971 if (best_rd
>= rd_thresh
|| x
->skip_encode
)
974 for (idy
= 0; idy
< num_4x4_blocks_high
* 4; ++idy
)
975 vpx_memcpy(dst_init
+ idy
* dst_stride
, best_dst
+ idy
* 8,
976 num_4x4_blocks_wide
* 4);
981 static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP
*cpi
, MACROBLOCK
*mb
,
982 int *rate
, int *rate_y
,
986 const MACROBLOCKD
*const xd
= &mb
->e_mbd
;
987 MODE_INFO
*const mic
= xd
->mi
[0].src_mi
;
988 const MODE_INFO
*above_mi
= xd
->above_mi
;
989 const MODE_INFO
*left_mi
= xd
->left_mi
;
990 const BLOCK_SIZE bsize
= xd
->mi
[0].src_mi
->mbmi
.sb_type
;
991 const int num_4x4_blocks_wide
= num_4x4_blocks_wide_lookup
[bsize
];
992 const int num_4x4_blocks_high
= num_4x4_blocks_high_lookup
[bsize
];
995 int64_t total_distortion
= 0;
997 int64_t total_rd
= 0;
998 ENTROPY_CONTEXT t_above
[4], t_left
[4];
999 const int *bmode_costs
= cpi
->mbmode_cost
;
1001 vpx_memcpy(t_above
, xd
->plane
[0].above_context
, sizeof(t_above
));
1002 vpx_memcpy(t_left
, xd
->plane
[0].left_context
, sizeof(t_left
));
1004 // Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block.
1005 for (idy
= 0; idy
< 2; idy
+= num_4x4_blocks_high
) {
1006 for (idx
= 0; idx
< 2; idx
+= num_4x4_blocks_wide
) {
1007 PREDICTION_MODE best_mode
= DC_PRED
;
1008 int r
= INT_MAX
, ry
= INT_MAX
;
1009 int64_t d
= INT64_MAX
, this_rd
= INT64_MAX
;
1011 if (cpi
->common
.frame_type
== KEY_FRAME
) {
1012 const PREDICTION_MODE A
= vp9_above_block_mode(mic
, above_mi
, i
);
1013 const PREDICTION_MODE L
= vp9_left_block_mode(mic
, left_mi
, i
);
1015 bmode_costs
= cpi
->y_mode_costs
[A
][L
];
1018 this_rd
= rd_pick_intra4x4block(cpi
, mb
, i
, &best_mode
, bmode_costs
,
1019 t_above
+ idx
, t_left
+ idy
, &r
, &ry
, &d
,
1020 bsize
, best_rd
- total_rd
);
1021 if (this_rd
>= best_rd
- total_rd
)
1024 total_rd
+= this_rd
;
1026 total_distortion
+= d
;
1029 mic
->bmi
[i
].as_mode
= best_mode
;
1030 for (j
= 1; j
< num_4x4_blocks_high
; ++j
)
1031 mic
->bmi
[i
+ j
* 2].as_mode
= best_mode
;
1032 for (j
= 1; j
< num_4x4_blocks_wide
; ++j
)
1033 mic
->bmi
[i
+ j
].as_mode
= best_mode
;
1035 if (total_rd
>= best_rd
)
1041 *rate_y
= tot_rate_y
;
1042 *distortion
= total_distortion
;
1043 mic
->mbmi
.mode
= mic
->bmi
[3].as_mode
;
1045 return RDCOST(mb
->rdmult
, mb
->rddiv
, cost
, total_distortion
);
1048 // This function is used only for intra_only frames
1049 static int64_t rd_pick_intra_sby_mode(VP9_COMP
*cpi
, MACROBLOCK
*x
,
1050 int *rate
, int *rate_tokenonly
,
1051 int64_t *distortion
, int *skippable
,
1053 int64_t tx_cache
[TX_MODES
],
1055 PREDICTION_MODE mode
;
1056 PREDICTION_MODE mode_selected
= DC_PRED
;
1057 MACROBLOCKD
*const xd
= &x
->e_mbd
;
1058 MODE_INFO
*const mic
= xd
->mi
[0].src_mi
;
1059 int this_rate
, this_rate_tokenonly
, s
;
1060 int64_t this_distortion
, this_rd
;
1061 TX_SIZE best_tx
= TX_4X4
;
1064 const MODE_INFO
*above_mi
= xd
->above_mi
;
1065 const MODE_INFO
*left_mi
= xd
->left_mi
;
1066 const PREDICTION_MODE A
= vp9_above_block_mode(mic
, above_mi
, 0);
1067 const PREDICTION_MODE L
= vp9_left_block_mode(mic
, left_mi
, 0);
1068 bmode_costs
= cpi
->y_mode_costs
[A
][L
];
1070 if (cpi
->sf
.tx_size_search_method
== USE_FULL_RD
)
1071 for (i
= 0; i
< TX_MODES
; i
++)
1072 tx_cache
[i
] = INT64_MAX
;
1074 vpx_memset(x
->skip_txfm
, 0, sizeof(x
->skip_txfm
));
1075 /* Y Search for intra prediction mode */
1076 for (mode
= DC_PRED
; mode
<= TM_PRED
; mode
++) {
1077 int64_t local_tx_cache
[TX_MODES
];
1079 if (cpi
->sf
.use_nonrd_pick_mode
) {
1080 // These speed features are turned on in hybrid non-RD and RD mode
1081 // for key frame coding in the context of real-time setting.
1082 if (conditional_skipintra(mode
, mode_selected
))
1088 mic
->mbmi
.mode
= mode
;
1090 super_block_yrd(cpi
, x
, &this_rate_tokenonly
, &this_distortion
,
1091 &s
, NULL
, bsize
, local_tx_cache
, best_rd
);
1093 if (this_rate_tokenonly
== INT_MAX
)
1096 this_rate
= this_rate_tokenonly
+ bmode_costs
[mode
];
1097 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, this_rate
, this_distortion
);
1099 if (this_rd
< best_rd
) {
1100 mode_selected
= mode
;
1102 best_tx
= mic
->mbmi
.tx_size
;
1104 *rate_tokenonly
= this_rate_tokenonly
;
1105 *distortion
= this_distortion
;
1109 if (cpi
->sf
.tx_size_search_method
== USE_FULL_RD
&& this_rd
< INT64_MAX
) {
1110 for (i
= 0; i
< TX_MODES
&& local_tx_cache
[i
] < INT64_MAX
; i
++) {
1111 const int64_t adj_rd
= this_rd
+ local_tx_cache
[i
] -
1112 local_tx_cache
[cpi
->common
.tx_mode
];
1113 if (adj_rd
< tx_cache
[i
]) {
1114 tx_cache
[i
] = adj_rd
;
1120 mic
->mbmi
.mode
= mode_selected
;
1121 mic
->mbmi
.tx_size
= best_tx
;
1126 // Return value 0: early termination triggered, no valid rd cost available;
1127 // 1: rd cost values are valid.
1128 static int super_block_uvrd(const VP9_COMP
*cpi
, MACROBLOCK
*x
,
1129 int *rate
, int64_t *distortion
, int *skippable
,
1130 int64_t *sse
, BLOCK_SIZE bsize
,
1131 int64_t ref_best_rd
) {
1132 MACROBLOCKD
*const xd
= &x
->e_mbd
;
1133 MB_MODE_INFO
*const mbmi
= &xd
->mi
[0].src_mi
->mbmi
;
1134 const TX_SIZE uv_tx_size
= get_uv_tx_size(mbmi
, &xd
->plane
[1]);
1136 int pnrate
= 0, pnskip
= 1;
1137 int64_t pndist
= 0, pnsse
= 0;
1138 int is_cost_valid
= 1;
1140 if (ref_best_rd
< 0)
1143 if (is_inter_block(mbmi
) && is_cost_valid
) {
1145 for (plane
= 1; plane
< MAX_MB_PLANE
; ++plane
)
1146 vp9_subtract_plane(x
, bsize
, plane
);
1154 for (plane
= 1; plane
< MAX_MB_PLANE
; ++plane
) {
1155 txfm_rd_in_plane(x
, &pnrate
, &pndist
, &pnskip
, &pnsse
,
1156 ref_best_rd
, plane
, bsize
, uv_tx_size
,
1157 cpi
->sf
.use_fast_coef_costing
);
1158 if (pnrate
== INT_MAX
) {
1163 *distortion
+= pndist
;
1165 *skippable
&= pnskip
;
1168 if (!is_cost_valid
) {
1171 *distortion
= INT64_MAX
;
1176 return is_cost_valid
;
1179 static int64_t rd_pick_intra_sbuv_mode(VP9_COMP
*cpi
, MACROBLOCK
*x
,
1180 PICK_MODE_CONTEXT
*ctx
,
1181 int *rate
, int *rate_tokenonly
,
1182 int64_t *distortion
, int *skippable
,
1183 BLOCK_SIZE bsize
, TX_SIZE max_tx_size
) {
1184 MACROBLOCKD
*xd
= &x
->e_mbd
;
1185 PREDICTION_MODE mode
;
1186 PREDICTION_MODE mode_selected
= DC_PRED
;
1187 int64_t best_rd
= INT64_MAX
, this_rd
;
1188 int this_rate_tokenonly
, this_rate
, s
;
1189 int64_t this_distortion
, this_sse
;
1191 vpx_memset(x
->skip_txfm
, 0, sizeof(x
->skip_txfm
));
1192 for (mode
= DC_PRED
; mode
<= TM_PRED
; ++mode
) {
1193 if (!(cpi
->sf
.intra_uv_mode_mask
[max_tx_size
] & (1 << mode
)))
1196 xd
->mi
[0].src_mi
->mbmi
.uv_mode
= mode
;
1198 if (!super_block_uvrd(cpi
, x
, &this_rate_tokenonly
,
1199 &this_distortion
, &s
, &this_sse
, bsize
, best_rd
))
1201 this_rate
= this_rate_tokenonly
+
1202 cpi
->intra_uv_mode_cost
[cpi
->common
.frame_type
][mode
];
1203 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, this_rate
, this_distortion
);
1205 if (this_rd
< best_rd
) {
1206 mode_selected
= mode
;
1209 *rate_tokenonly
= this_rate_tokenonly
;
1210 *distortion
= this_distortion
;
1212 if (!x
->select_tx_size
)
1213 swap_block_ptr(x
, ctx
, 2, 0, 1, MAX_MB_PLANE
);
1217 xd
->mi
[0].src_mi
->mbmi
.uv_mode
= mode_selected
;
1221 static int64_t rd_sbuv_dcpred(const VP9_COMP
*cpi
, MACROBLOCK
*x
,
1222 int *rate
, int *rate_tokenonly
,
1223 int64_t *distortion
, int *skippable
,
1225 const VP9_COMMON
*cm
= &cpi
->common
;
1228 x
->e_mbd
.mi
[0].src_mi
->mbmi
.uv_mode
= DC_PRED
;
1229 vpx_memset(x
->skip_txfm
, 0, sizeof(x
->skip_txfm
));
1230 super_block_uvrd(cpi
, x
, rate_tokenonly
, distortion
,
1231 skippable
, &unused
, bsize
, INT64_MAX
);
1232 *rate
= *rate_tokenonly
+ cpi
->intra_uv_mode_cost
[cm
->frame_type
][DC_PRED
];
1233 return RDCOST(x
->rdmult
, x
->rddiv
, *rate
, *distortion
);
1236 static void choose_intra_uv_mode(VP9_COMP
*cpi
, MACROBLOCK
*const x
,
1237 PICK_MODE_CONTEXT
*ctx
,
1238 BLOCK_SIZE bsize
, TX_SIZE max_tx_size
,
1239 int *rate_uv
, int *rate_uv_tokenonly
,
1240 int64_t *dist_uv
, int *skip_uv
,
1241 PREDICTION_MODE
*mode_uv
) {
1242 // Use an estimated rd for uv_intra based on DC_PRED if the
1243 // appropriate speed flag is set.
1244 if (cpi
->sf
.use_uv_intra_rd_estimate
) {
1245 rd_sbuv_dcpred(cpi
, x
, rate_uv
, rate_uv_tokenonly
, dist_uv
,
1246 skip_uv
, bsize
< BLOCK_8X8
? BLOCK_8X8
: bsize
);
1247 // Else do a proper rd search for each possible transform size that may
1248 // be considered in the main rd loop.
1250 rd_pick_intra_sbuv_mode(cpi
, x
, ctx
,
1251 rate_uv
, rate_uv_tokenonly
, dist_uv
, skip_uv
,
1252 bsize
< BLOCK_8X8
? BLOCK_8X8
: bsize
, max_tx_size
);
1254 *mode_uv
= x
->e_mbd
.mi
[0].src_mi
->mbmi
.uv_mode
;
1257 static int cost_mv_ref(const VP9_COMP
*cpi
, PREDICTION_MODE mode
,
1259 assert(is_inter_mode(mode
));
1260 return cpi
->inter_mode_cost
[mode_context
][INTER_OFFSET(mode
)];
1263 static void joint_motion_search(VP9_COMP
*cpi
, MACROBLOCK
*x
,
1266 int mi_row
, int mi_col
,
1267 int_mv single_newmv
[MAX_REF_FRAMES
],
1270 static int set_and_cost_bmi_mvs(VP9_COMP
*cpi
, MACROBLOCKD
*xd
, int i
,
1271 PREDICTION_MODE mode
, int_mv this_mv
[2],
1272 int_mv frame_mv
[MB_MODE_COUNT
][MAX_REF_FRAMES
],
1273 int_mv seg_mvs
[MAX_REF_FRAMES
],
1274 int_mv
*best_ref_mv
[2], const int *mvjcost
,
1276 MODE_INFO
*const mic
= xd
->mi
[0].src_mi
;
1277 const MB_MODE_INFO
*const mbmi
= &mic
->mbmi
;
1280 const int num_4x4_blocks_wide
= num_4x4_blocks_wide_lookup
[mbmi
->sb_type
];
1281 const int num_4x4_blocks_high
= num_4x4_blocks_high_lookup
[mbmi
->sb_type
];
1282 const int is_compound
= has_second_ref(mbmi
);
1286 this_mv
[0].as_int
= seg_mvs
[mbmi
->ref_frame
[0]].as_int
;
1287 thismvcost
+= vp9_mv_bit_cost(&this_mv
[0].as_mv
, &best_ref_mv
[0]->as_mv
,
1288 mvjcost
, mvcost
, MV_COST_WEIGHT_SUB
);
1290 this_mv
[1].as_int
= seg_mvs
[mbmi
->ref_frame
[1]].as_int
;
1291 thismvcost
+= vp9_mv_bit_cost(&this_mv
[1].as_mv
, &best_ref_mv
[1]->as_mv
,
1292 mvjcost
, mvcost
, MV_COST_WEIGHT_SUB
);
1297 this_mv
[0].as_int
= frame_mv
[mode
][mbmi
->ref_frame
[0]].as_int
;
1299 this_mv
[1].as_int
= frame_mv
[mode
][mbmi
->ref_frame
[1]].as_int
;
1302 this_mv
[0].as_int
= 0;
1304 this_mv
[1].as_int
= 0;
1310 mic
->bmi
[i
].as_mv
[0].as_int
= this_mv
[0].as_int
;
1312 mic
->bmi
[i
].as_mv
[1].as_int
= this_mv
[1].as_int
;
1314 mic
->bmi
[i
].as_mode
= mode
;
1316 for (idy
= 0; idy
< num_4x4_blocks_high
; ++idy
)
1317 for (idx
= 0; idx
< num_4x4_blocks_wide
; ++idx
)
1318 vpx_memcpy(&mic
->bmi
[i
+ idy
* 2 + idx
],
1319 &mic
->bmi
[i
], sizeof(mic
->bmi
[i
]));
1321 return cost_mv_ref(cpi
, mode
, mbmi
->mode_context
[mbmi
->ref_frame
[0]]) +
1325 static int64_t encode_inter_mb_segment(VP9_COMP
*cpi
,
1330 int64_t *distortion
, int64_t *sse
,
1331 ENTROPY_CONTEXT
*ta
,
1332 ENTROPY_CONTEXT
*tl
,
1333 int mi_row
, int mi_col
) {
1335 MACROBLOCKD
*xd
= &x
->e_mbd
;
1336 struct macroblockd_plane
*const pd
= &xd
->plane
[0];
1337 struct macroblock_plane
*const p
= &x
->plane
[0];
1338 MODE_INFO
*const mi
= xd
->mi
[0].src_mi
;
1339 const BLOCK_SIZE plane_bsize
= get_plane_block_size(mi
->mbmi
.sb_type
, pd
);
1340 const int width
= 4 * num_4x4_blocks_wide_lookup
[plane_bsize
];
1341 const int height
= 4 * num_4x4_blocks_high_lookup
[plane_bsize
];
1344 const uint8_t *const src
=
1345 &p
->src
.buf
[vp9_raster_block_offset(BLOCK_8X8
, i
, p
->src
.stride
)];
1346 uint8_t *const dst
= &pd
->dst
.buf
[vp9_raster_block_offset(BLOCK_8X8
, i
,
1348 int64_t thisdistortion
= 0, thissse
= 0;
1349 int thisrate
= 0, ref
;
1350 const scan_order
*so
= &vp9_default_scan_orders
[TX_4X4
];
1351 const int is_compound
= has_second_ref(&mi
->mbmi
);
1352 const InterpKernel
*kernel
= vp9_get_interp_kernel(mi
->mbmi
.interp_filter
);
1354 for (ref
= 0; ref
< 1 + is_compound
; ++ref
) {
1355 const uint8_t *pre
= &pd
->pre
[ref
].buf
[vp9_raster_block_offset(BLOCK_8X8
, i
,
1356 pd
->pre
[ref
].stride
)];
1357 #if CONFIG_VP9_HIGHBITDEPTH
1358 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
1359 vp9_highbd_build_inter_predictor(pre
, pd
->pre
[ref
].stride
,
1360 dst
, pd
->dst
.stride
,
1361 &mi
->bmi
[i
].as_mv
[ref
].as_mv
,
1362 &xd
->block_refs
[ref
]->sf
, width
, height
,
1363 ref
, kernel
, MV_PRECISION_Q3
,
1364 mi_col
* MI_SIZE
+ 4 * (i
% 2),
1365 mi_row
* MI_SIZE
+ 4 * (i
/ 2), xd
->bd
);
1367 vp9_build_inter_predictor(pre
, pd
->pre
[ref
].stride
,
1368 dst
, pd
->dst
.stride
,
1369 &mi
->bmi
[i
].as_mv
[ref
].as_mv
,
1370 &xd
->block_refs
[ref
]->sf
, width
, height
, ref
,
1371 kernel
, MV_PRECISION_Q3
,
1372 mi_col
* MI_SIZE
+ 4 * (i
% 2),
1373 mi_row
* MI_SIZE
+ 4 * (i
/ 2));
1376 vp9_build_inter_predictor(pre
, pd
->pre
[ref
].stride
,
1377 dst
, pd
->dst
.stride
,
1378 &mi
->bmi
[i
].as_mv
[ref
].as_mv
,
1379 &xd
->block_refs
[ref
]->sf
, width
, height
, ref
,
1380 kernel
, MV_PRECISION_Q3
,
1381 mi_col
* MI_SIZE
+ 4 * (i
% 2),
1382 mi_row
* MI_SIZE
+ 4 * (i
/ 2));
1383 #endif // CONFIG_VP9_HIGHBITDEPTH
1386 #if CONFIG_VP9_HIGHBITDEPTH
1387 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
1388 vp9_highbd_subtract_block(
1389 height
, width
, vp9_raster_block_offset_int16(BLOCK_8X8
, i
, p
->src_diff
),
1390 8, src
, p
->src
.stride
, dst
, pd
->dst
.stride
, xd
->bd
);
1393 height
, width
, vp9_raster_block_offset_int16(BLOCK_8X8
, i
, p
->src_diff
),
1394 8, src
, p
->src
.stride
, dst
, pd
->dst
.stride
);
1397 vp9_subtract_block(height
, width
,
1398 vp9_raster_block_offset_int16(BLOCK_8X8
, i
, p
->src_diff
),
1399 8, src
, p
->src
.stride
, dst
, pd
->dst
.stride
);
1400 #endif // CONFIG_VP9_HIGHBITDEPTH
1403 for (idy
= 0; idy
< height
/ 4; ++idy
) {
1404 for (idx
= 0; idx
< width
/ 4; ++idx
) {
1405 int64_t ssz
, rd
, rd1
, rd2
;
1408 k
+= (idy
* 2 + idx
);
1409 coeff
= BLOCK_OFFSET(p
->coeff
, k
);
1410 x
->fwd_txm4x4(vp9_raster_block_offset_int16(BLOCK_8X8
, k
, p
->src_diff
),
1412 vp9_regular_quantize_b_4x4(x
, 0, k
, so
->scan
, so
->iscan
);
1413 #if CONFIG_VP9_HIGHBITDEPTH
1414 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
1415 thisdistortion
+= vp9_highbd_block_error(coeff
,
1416 BLOCK_OFFSET(pd
->dqcoeff
, k
),
1419 thisdistortion
+= vp9_block_error(coeff
, BLOCK_OFFSET(pd
->dqcoeff
, k
),
1423 thisdistortion
+= vp9_block_error(coeff
, BLOCK_OFFSET(pd
->dqcoeff
, k
),
1425 #endif // CONFIG_VP9_HIGHBITDEPTH
1427 thisrate
+= cost_coeffs(x
, 0, k
, ta
+ (k
& 1), tl
+ (k
>> 1), TX_4X4
,
1428 so
->scan
, so
->neighbors
,
1429 cpi
->sf
.use_fast_coef_costing
);
1430 rd1
= RDCOST(x
->rdmult
, x
->rddiv
, thisrate
, thisdistortion
>> 2);
1431 rd2
= RDCOST(x
->rdmult
, x
->rddiv
, 0, thissse
>> 2);
1438 *distortion
= thisdistortion
>> 2;
1439 *labelyrate
= thisrate
;
1440 *sse
= thissse
>> 2;
1442 return RDCOST(x
->rdmult
, x
->rddiv
, *labelyrate
, *distortion
);
1453 ENTROPY_CONTEXT ta
[2];
1454 ENTROPY_CONTEXT tl
[2];
1466 PREDICTION_MODE modes
[4];
1467 SEG_RDSTAT rdstat
[4][INTER_MODES
];
1471 static INLINE
int mv_check_bounds(const MACROBLOCK
*x
, const MV
*mv
) {
1472 return (mv
->row
>> 3) < x
->mv_row_min
||
1473 (mv
->row
>> 3) > x
->mv_row_max
||
1474 (mv
->col
>> 3) < x
->mv_col_min
||
1475 (mv
->col
>> 3) > x
->mv_col_max
;
1478 static INLINE
void mi_buf_shift(MACROBLOCK
*x
, int i
) {
1479 MB_MODE_INFO
*const mbmi
= &x
->e_mbd
.mi
[0].src_mi
->mbmi
;
1480 struct macroblock_plane
*const p
= &x
->plane
[0];
1481 struct macroblockd_plane
*const pd
= &x
->e_mbd
.plane
[0];
1483 p
->src
.buf
= &p
->src
.buf
[vp9_raster_block_offset(BLOCK_8X8
, i
,
1485 assert(((intptr_t)pd
->pre
[0].buf
& 0x7) == 0);
1486 pd
->pre
[0].buf
= &pd
->pre
[0].buf
[vp9_raster_block_offset(BLOCK_8X8
, i
,
1487 pd
->pre
[0].stride
)];
1488 if (has_second_ref(mbmi
))
1489 pd
->pre
[1].buf
= &pd
->pre
[1].buf
[vp9_raster_block_offset(BLOCK_8X8
, i
,
1490 pd
->pre
[1].stride
)];
1493 static INLINE
void mi_buf_restore(MACROBLOCK
*x
, struct buf_2d orig_src
,
1494 struct buf_2d orig_pre
[2]) {
1495 MB_MODE_INFO
*mbmi
= &x
->e_mbd
.mi
[0].src_mi
->mbmi
;
1496 x
->plane
[0].src
= orig_src
;
1497 x
->e_mbd
.plane
[0].pre
[0] = orig_pre
[0];
1498 if (has_second_ref(mbmi
))
1499 x
->e_mbd
.plane
[0].pre
[1] = orig_pre
[1];
1502 static INLINE
int mv_has_subpel(const MV
*mv
) {
1503 return (mv
->row
& 0x0F) || (mv
->col
& 0x0F);
1506 // Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
1507 // TODO(aconverse): Find out if this is still productive then clean up or remove
1508 static int check_best_zero_mv(
1509 const VP9_COMP
*cpi
, const uint8_t mode_context
[MAX_REF_FRAMES
],
1510 int_mv frame_mv
[MB_MODE_COUNT
][MAX_REF_FRAMES
], int this_mode
,
1511 const MV_REFERENCE_FRAME ref_frames
[2]) {
1512 if ((this_mode
== NEARMV
|| this_mode
== NEARESTMV
|| this_mode
== ZEROMV
) &&
1513 frame_mv
[this_mode
][ref_frames
[0]].as_int
== 0 &&
1514 (ref_frames
[1] == NONE
||
1515 frame_mv
[this_mode
][ref_frames
[1]].as_int
== 0)) {
1516 int rfc
= mode_context
[ref_frames
[0]];
1517 int c1
= cost_mv_ref(cpi
, NEARMV
, rfc
);
1518 int c2
= cost_mv_ref(cpi
, NEARESTMV
, rfc
);
1519 int c3
= cost_mv_ref(cpi
, ZEROMV
, rfc
);
1521 if (this_mode
== NEARMV
) {
1522 if (c1
> c3
) return 0;
1523 } else if (this_mode
== NEARESTMV
) {
1524 if (c2
> c3
) return 0;
1526 assert(this_mode
== ZEROMV
);
1527 if (ref_frames
[1] == NONE
) {
1528 if ((c3
>= c2
&& frame_mv
[NEARESTMV
][ref_frames
[0]].as_int
== 0) ||
1529 (c3
>= c1
&& frame_mv
[NEARMV
][ref_frames
[0]].as_int
== 0))
1532 if ((c3
>= c2
&& frame_mv
[NEARESTMV
][ref_frames
[0]].as_int
== 0 &&
1533 frame_mv
[NEARESTMV
][ref_frames
[1]].as_int
== 0) ||
1534 (c3
>= c1
&& frame_mv
[NEARMV
][ref_frames
[0]].as_int
== 0 &&
1535 frame_mv
[NEARMV
][ref_frames
[1]].as_int
== 0))
1543 static int64_t rd_pick_best_sub8x8_mode(VP9_COMP
*cpi
, MACROBLOCK
*x
,
1544 const TileInfo
* const tile
,
1545 int_mv
*best_ref_mv
,
1546 int_mv
*second_best_ref_mv
,
1547 int64_t best_rd
, int *returntotrate
,
1549 int64_t *returndistortion
,
1550 int *skippable
, int64_t *psse
,
1552 int_mv seg_mvs
[4][MAX_REF_FRAMES
],
1553 BEST_SEG_INFO
*bsi_buf
, int filter_idx
,
1554 int mi_row
, int mi_col
) {
1556 BEST_SEG_INFO
*bsi
= bsi_buf
+ filter_idx
;
1557 MACROBLOCKD
*xd
= &x
->e_mbd
;
1558 MODE_INFO
*mi
= xd
->mi
[0].src_mi
;
1559 MB_MODE_INFO
*mbmi
= &mi
->mbmi
;
1561 int k
, br
= 0, idx
, idy
;
1562 int64_t bd
= 0, block_sse
= 0;
1563 PREDICTION_MODE this_mode
;
1564 VP9_COMMON
*cm
= &cpi
->common
;
1565 struct macroblock_plane
*const p
= &x
->plane
[0];
1566 struct macroblockd_plane
*const pd
= &xd
->plane
[0];
1567 const int label_count
= 4;
1568 int64_t this_segment_rd
= 0;
1569 int label_mv_thresh
;
1570 int segmentyrate
= 0;
1571 const BLOCK_SIZE bsize
= mbmi
->sb_type
;
1572 const int num_4x4_blocks_wide
= num_4x4_blocks_wide_lookup
[bsize
];
1573 const int num_4x4_blocks_high
= num_4x4_blocks_high_lookup
[bsize
];
1574 ENTROPY_CONTEXT t_above
[2], t_left
[2];
1575 int subpelmv
= 1, have_ref
= 0;
1576 const int has_second_rf
= has_second_ref(mbmi
);
1577 const int inter_mode_mask
= cpi
->sf
.inter_mode_mask
[bsize
];
1581 bsi
->segment_rd
= best_rd
;
1582 bsi
->ref_mv
[0] = best_ref_mv
;
1583 bsi
->ref_mv
[1] = second_best_ref_mv
;
1584 bsi
->mvp
.as_int
= best_ref_mv
->as_int
;
1585 bsi
->mvthresh
= mvthresh
;
1587 for (i
= 0; i
< 4; i
++)
1588 bsi
->modes
[i
] = ZEROMV
;
1590 vpx_memcpy(t_above
, pd
->above_context
, sizeof(t_above
));
1591 vpx_memcpy(t_left
, pd
->left_context
, sizeof(t_left
));
1593 // 64 makes this threshold really big effectively
1594 // making it so that we very rarely check mvs on
1595 // segments. setting this to 1 would make mv thresh
1596 // roughly equal to what it is for macroblocks
1597 label_mv_thresh
= 1 * bsi
->mvthresh
/ label_count
;
1599 // Segmentation method overheads
1600 for (idy
= 0; idy
< 2; idy
+= num_4x4_blocks_high
) {
1601 for (idx
= 0; idx
< 2; idx
+= num_4x4_blocks_wide
) {
1602 // TODO(jingning,rbultje): rewrite the rate-distortion optimization
1603 // loop for 4x4/4x8/8x4 block coding. to be replaced with new rd loop
1604 int_mv mode_mv
[MB_MODE_COUNT
][2];
1605 int_mv frame_mv
[MB_MODE_COUNT
][MAX_REF_FRAMES
];
1606 PREDICTION_MODE mode_selected
= ZEROMV
;
1607 int64_t best_rd
= INT64_MAX
;
1608 const int i
= idy
* 2 + idx
;
1611 for (ref
= 0; ref
< 1 + has_second_rf
; ++ref
) {
1612 const MV_REFERENCE_FRAME frame
= mbmi
->ref_frame
[ref
];
1613 frame_mv
[ZEROMV
][frame
].as_int
= 0;
1614 vp9_append_sub8x8_mvs_for_idx(cm
, xd
, tile
, i
, ref
, mi_row
, mi_col
,
1615 &frame_mv
[NEARESTMV
][frame
],
1616 &frame_mv
[NEARMV
][frame
]);
1619 // search for the best motion vector on this segment
1620 for (this_mode
= NEARESTMV
; this_mode
<= NEWMV
; ++this_mode
) {
1621 const struct buf_2d orig_src
= x
->plane
[0].src
;
1622 struct buf_2d orig_pre
[2];
1624 mode_idx
= INTER_OFFSET(this_mode
);
1625 bsi
->rdstat
[i
][mode_idx
].brdcost
= INT64_MAX
;
1626 if (!(inter_mode_mask
& (1 << this_mode
)))
1629 if (!check_best_zero_mv(cpi
, mbmi
->mode_context
, frame_mv
,
1630 this_mode
, mbmi
->ref_frame
))
1633 vpx_memcpy(orig_pre
, pd
->pre
, sizeof(orig_pre
));
1634 vpx_memcpy(bsi
->rdstat
[i
][mode_idx
].ta
, t_above
,
1635 sizeof(bsi
->rdstat
[i
][mode_idx
].ta
));
1636 vpx_memcpy(bsi
->rdstat
[i
][mode_idx
].tl
, t_left
,
1637 sizeof(bsi
->rdstat
[i
][mode_idx
].tl
));
1639 // motion search for newmv (single predictor case only)
1640 if (!has_second_rf
&& this_mode
== NEWMV
&&
1641 seg_mvs
[i
][mbmi
->ref_frame
[0]].as_int
== INVALID_MV
) {
1642 MV
*const new_mv
= &mode_mv
[NEWMV
][0].as_mv
;
1644 int thissme
, bestsme
= INT_MAX
;
1645 int sadpb
= x
->sadperbit4
;
1650 /* Is the best so far sufficiently good that we cant justify doing
1651 * and new motion search. */
1652 if (best_rd
< label_mv_thresh
)
1655 if (cpi
->oxcf
.mode
!= BEST
) {
1656 // use previous block's result as next block's MV predictor.
1658 bsi
->mvp
.as_int
= mi
->bmi
[i
- 1].as_mv
[0].as_int
;
1660 bsi
->mvp
.as_int
= mi
->bmi
[i
- 2].as_mv
[0].as_int
;
1664 max_mv
= x
->max_mv_context
[mbmi
->ref_frame
[0]];
1666 max_mv
= MAX(abs(bsi
->mvp
.as_mv
.row
), abs(bsi
->mvp
.as_mv
.col
)) >> 3;
1668 if (cpi
->sf
.mv
.auto_mv_step_size
&& cm
->show_frame
) {
1669 // Take wtd average of the step_params based on the last frame's
1670 // max mv magnitude and the best ref mvs of the current block for
1671 // the given reference.
1672 step_param
= (vp9_init_search_range(max_mv
) +
1673 cpi
->mv_step_param
) / 2;
1675 step_param
= cpi
->mv_step_param
;
1678 mvp_full
.row
= bsi
->mvp
.as_mv
.row
>> 3;
1679 mvp_full
.col
= bsi
->mvp
.as_mv
.col
>> 3;
1681 if (cpi
->sf
.adaptive_motion_search
) {
1682 mvp_full
.row
= x
->pred_mv
[mbmi
->ref_frame
[0]].row
>> 3;
1683 mvp_full
.col
= x
->pred_mv
[mbmi
->ref_frame
[0]].col
>> 3;
1684 step_param
= MAX(step_param
, 8);
1687 // adjust src pointer for this block
1690 vp9_set_mv_search_range(x
, &bsi
->ref_mv
[0]->as_mv
);
1692 bestsme
= vp9_full_pixel_search(
1693 cpi
, x
, bsize
, &mvp_full
, step_param
, sadpb
,
1694 cpi
->sf
.mv
.subpel_search_method
!= SUBPEL_TREE
? cost_list
: NULL
,
1695 &bsi
->ref_mv
[0]->as_mv
, new_mv
,
1698 // Should we do a full search (best quality only)
1699 if (cpi
->oxcf
.mode
== BEST
) {
1700 int_mv
*const best_mv
= &mi
->bmi
[i
].as_mv
[0];
1701 /* Check if mvp_full is within the range. */
1702 clamp_mv(&mvp_full
, x
->mv_col_min
, x
->mv_col_max
,
1703 x
->mv_row_min
, x
->mv_row_max
);
1704 thissme
= cpi
->full_search_sad(x
, &mvp_full
,
1705 sadpb
, 16, &cpi
->fn_ptr
[bsize
],
1706 &bsi
->ref_mv
[0]->as_mv
,
1708 cost_list
[1] = cost_list
[2] = cost_list
[3] = cost_list
[4] = INT_MAX
;
1709 if (thissme
< bestsme
) {
1711 *new_mv
= best_mv
->as_mv
;
1713 // The full search result is actually worse so re-instate the
1714 // previous best vector
1715 best_mv
->as_mv
= *new_mv
;
1719 if (bestsme
< INT_MAX
) {
1721 cpi
->find_fractional_mv_step(
1724 &bsi
->ref_mv
[0]->as_mv
,
1725 cm
->allow_high_precision_mv
,
1726 x
->errorperbit
, &cpi
->fn_ptr
[bsize
],
1727 cpi
->sf
.mv
.subpel_force_stop
,
1728 cpi
->sf
.mv
.subpel_iters_per_step
,
1729 cond_cost_list(cpi
, cost_list
),
1730 x
->nmvjointcost
, x
->mvcost
,
1732 &x
->pred_sse
[mbmi
->ref_frame
[0]],
1735 // save motion search result for use in compound prediction
1736 seg_mvs
[i
][mbmi
->ref_frame
[0]].as_mv
= *new_mv
;
1739 if (cpi
->sf
.adaptive_motion_search
)
1740 x
->pred_mv
[mbmi
->ref_frame
[0]] = *new_mv
;
1742 // restore src pointers
1743 mi_buf_restore(x
, orig_src
, orig_pre
);
1746 if (has_second_rf
) {
1747 if (seg_mvs
[i
][mbmi
->ref_frame
[1]].as_int
== INVALID_MV
||
1748 seg_mvs
[i
][mbmi
->ref_frame
[0]].as_int
== INVALID_MV
)
1752 if (has_second_rf
&& this_mode
== NEWMV
&&
1753 mbmi
->interp_filter
== EIGHTTAP
) {
1754 // adjust src pointers
1756 if (cpi
->sf
.comp_inter_joint_search_thresh
<= bsize
) {
1758 joint_motion_search(cpi
, x
, bsize
, frame_mv
[this_mode
],
1759 mi_row
, mi_col
, seg_mvs
[i
],
1761 seg_mvs
[i
][mbmi
->ref_frame
[0]].as_int
=
1762 frame_mv
[this_mode
][mbmi
->ref_frame
[0]].as_int
;
1763 seg_mvs
[i
][mbmi
->ref_frame
[1]].as_int
=
1764 frame_mv
[this_mode
][mbmi
->ref_frame
[1]].as_int
;
1766 // restore src pointers
1767 mi_buf_restore(x
, orig_src
, orig_pre
);
1770 bsi
->rdstat
[i
][mode_idx
].brate
=
1771 set_and_cost_bmi_mvs(cpi
, xd
, i
, this_mode
, mode_mv
[this_mode
],
1772 frame_mv
, seg_mvs
[i
], bsi
->ref_mv
,
1773 x
->nmvjointcost
, x
->mvcost
);
1775 for (ref
= 0; ref
< 1 + has_second_rf
; ++ref
) {
1776 bsi
->rdstat
[i
][mode_idx
].mvs
[ref
].as_int
=
1777 mode_mv
[this_mode
][ref
].as_int
;
1778 if (num_4x4_blocks_wide
> 1)
1779 bsi
->rdstat
[i
+ 1][mode_idx
].mvs
[ref
].as_int
=
1780 mode_mv
[this_mode
][ref
].as_int
;
1781 if (num_4x4_blocks_high
> 1)
1782 bsi
->rdstat
[i
+ 2][mode_idx
].mvs
[ref
].as_int
=
1783 mode_mv
[this_mode
][ref
].as_int
;
1786 // Trap vectors that reach beyond the UMV borders
1787 if (mv_check_bounds(x
, &mode_mv
[this_mode
][0].as_mv
) ||
1789 mv_check_bounds(x
, &mode_mv
[this_mode
][1].as_mv
)))
1792 if (filter_idx
> 0) {
1793 BEST_SEG_INFO
*ref_bsi
= bsi_buf
;
1797 for (ref
= 0; ref
< 1 + has_second_rf
; ++ref
) {
1798 subpelmv
|= mv_has_subpel(&mode_mv
[this_mode
][ref
].as_mv
);
1799 have_ref
&= mode_mv
[this_mode
][ref
].as_int
==
1800 ref_bsi
->rdstat
[i
][mode_idx
].mvs
[ref
].as_int
;
1803 if (filter_idx
> 1 && !subpelmv
&& !have_ref
) {
1804 ref_bsi
= bsi_buf
+ 1;
1806 for (ref
= 0; ref
< 1 + has_second_rf
; ++ref
)
1807 have_ref
&= mode_mv
[this_mode
][ref
].as_int
==
1808 ref_bsi
->rdstat
[i
][mode_idx
].mvs
[ref
].as_int
;
1811 if (!subpelmv
&& have_ref
&&
1812 ref_bsi
->rdstat
[i
][mode_idx
].brdcost
< INT64_MAX
) {
1813 vpx_memcpy(&bsi
->rdstat
[i
][mode_idx
], &ref_bsi
->rdstat
[i
][mode_idx
],
1814 sizeof(SEG_RDSTAT
));
1815 if (num_4x4_blocks_wide
> 1)
1816 bsi
->rdstat
[i
+ 1][mode_idx
].eobs
=
1817 ref_bsi
->rdstat
[i
+ 1][mode_idx
].eobs
;
1818 if (num_4x4_blocks_high
> 1)
1819 bsi
->rdstat
[i
+ 2][mode_idx
].eobs
=
1820 ref_bsi
->rdstat
[i
+ 2][mode_idx
].eobs
;
1822 if (bsi
->rdstat
[i
][mode_idx
].brdcost
< best_rd
) {
1823 mode_selected
= this_mode
;
1824 best_rd
= bsi
->rdstat
[i
][mode_idx
].brdcost
;
1830 bsi
->rdstat
[i
][mode_idx
].brdcost
=
1831 encode_inter_mb_segment(cpi
, x
,
1832 bsi
->segment_rd
- this_segment_rd
, i
,
1833 &bsi
->rdstat
[i
][mode_idx
].byrate
,
1834 &bsi
->rdstat
[i
][mode_idx
].bdist
,
1835 &bsi
->rdstat
[i
][mode_idx
].bsse
,
1836 bsi
->rdstat
[i
][mode_idx
].ta
,
1837 bsi
->rdstat
[i
][mode_idx
].tl
,
1839 if (bsi
->rdstat
[i
][mode_idx
].brdcost
< INT64_MAX
) {
1840 bsi
->rdstat
[i
][mode_idx
].brdcost
+= RDCOST(x
->rdmult
, x
->rddiv
,
1841 bsi
->rdstat
[i
][mode_idx
].brate
, 0);
1842 bsi
->rdstat
[i
][mode_idx
].brate
+= bsi
->rdstat
[i
][mode_idx
].byrate
;
1843 bsi
->rdstat
[i
][mode_idx
].eobs
= p
->eobs
[i
];
1844 if (num_4x4_blocks_wide
> 1)
1845 bsi
->rdstat
[i
+ 1][mode_idx
].eobs
= p
->eobs
[i
+ 1];
1846 if (num_4x4_blocks_high
> 1)
1847 bsi
->rdstat
[i
+ 2][mode_idx
].eobs
= p
->eobs
[i
+ 2];
1850 if (bsi
->rdstat
[i
][mode_idx
].brdcost
< best_rd
) {
1851 mode_selected
= this_mode
;
1852 best_rd
= bsi
->rdstat
[i
][mode_idx
].brdcost
;
1854 } /*for each 4x4 mode*/
1856 if (best_rd
== INT64_MAX
) {
1858 for (iy
= i
+ 1; iy
< 4; ++iy
)
1859 for (midx
= 0; midx
< INTER_MODES
; ++midx
)
1860 bsi
->rdstat
[iy
][midx
].brdcost
= INT64_MAX
;
1861 bsi
->segment_rd
= INT64_MAX
;
1865 mode_idx
= INTER_OFFSET(mode_selected
);
1866 vpx_memcpy(t_above
, bsi
->rdstat
[i
][mode_idx
].ta
, sizeof(t_above
));
1867 vpx_memcpy(t_left
, bsi
->rdstat
[i
][mode_idx
].tl
, sizeof(t_left
));
1869 set_and_cost_bmi_mvs(cpi
, xd
, i
, mode_selected
, mode_mv
[mode_selected
],
1870 frame_mv
, seg_mvs
[i
], bsi
->ref_mv
, x
->nmvjointcost
,
1873 br
+= bsi
->rdstat
[i
][mode_idx
].brate
;
1874 bd
+= bsi
->rdstat
[i
][mode_idx
].bdist
;
1875 block_sse
+= bsi
->rdstat
[i
][mode_idx
].bsse
;
1876 segmentyrate
+= bsi
->rdstat
[i
][mode_idx
].byrate
;
1877 this_segment_rd
+= bsi
->rdstat
[i
][mode_idx
].brdcost
;
1879 if (this_segment_rd
> bsi
->segment_rd
) {
1881 for (iy
= i
+ 1; iy
< 4; ++iy
)
1882 for (midx
= 0; midx
< INTER_MODES
; ++midx
)
1883 bsi
->rdstat
[iy
][midx
].brdcost
= INT64_MAX
;
1884 bsi
->segment_rd
= INT64_MAX
;
1888 } /* for each label */
1892 bsi
->segment_yrate
= segmentyrate
;
1893 bsi
->segment_rd
= this_segment_rd
;
1894 bsi
->sse
= block_sse
;
1896 // update the coding decisions
1897 for (k
= 0; k
< 4; ++k
)
1898 bsi
->modes
[k
] = mi
->bmi
[k
].as_mode
;
1900 if (bsi
->segment_rd
> best_rd
)
1902 /* set it to the best */
1903 for (i
= 0; i
< 4; i
++) {
1904 mode_idx
= INTER_OFFSET(bsi
->modes
[i
]);
1905 mi
->bmi
[i
].as_mv
[0].as_int
= bsi
->rdstat
[i
][mode_idx
].mvs
[0].as_int
;
1906 if (has_second_ref(mbmi
))
1907 mi
->bmi
[i
].as_mv
[1].as_int
= bsi
->rdstat
[i
][mode_idx
].mvs
[1].as_int
;
1908 x
->plane
[0].eobs
[i
] = bsi
->rdstat
[i
][mode_idx
].eobs
;
1909 mi
->bmi
[i
].as_mode
= bsi
->modes
[i
];
1913 * used to set mbmi->mv.as_int
1915 *returntotrate
= bsi
->r
;
1916 *returndistortion
= bsi
->d
;
1917 *returnyrate
= bsi
->segment_yrate
;
1918 *skippable
= vp9_is_skippable_in_plane(x
, BLOCK_8X8
, 0);
1920 mbmi
->mode
= bsi
->modes
[3];
1922 return bsi
->segment_rd
;
1925 static void estimate_ref_frame_costs(const VP9_COMMON
*cm
,
1926 const MACROBLOCKD
*xd
,
1928 unsigned int *ref_costs_single
,
1929 unsigned int *ref_costs_comp
,
1930 vp9_prob
*comp_mode_p
) {
1931 int seg_ref_active
= vp9_segfeature_active(&cm
->seg
, segment_id
,
1933 if (seg_ref_active
) {
1934 vpx_memset(ref_costs_single
, 0, MAX_REF_FRAMES
* sizeof(*ref_costs_single
));
1935 vpx_memset(ref_costs_comp
, 0, MAX_REF_FRAMES
* sizeof(*ref_costs_comp
));
1938 vp9_prob intra_inter_p
= vp9_get_intra_inter_prob(cm
, xd
);
1939 vp9_prob comp_inter_p
= 128;
1941 if (cm
->reference_mode
== REFERENCE_MODE_SELECT
) {
1942 comp_inter_p
= vp9_get_reference_mode_prob(cm
, xd
);
1943 *comp_mode_p
= comp_inter_p
;
1948 ref_costs_single
[INTRA_FRAME
] = vp9_cost_bit(intra_inter_p
, 0);
1950 if (cm
->reference_mode
!= COMPOUND_REFERENCE
) {
1951 vp9_prob ref_single_p1
= vp9_get_pred_prob_single_ref_p1(cm
, xd
);
1952 vp9_prob ref_single_p2
= vp9_get_pred_prob_single_ref_p2(cm
, xd
);
1953 unsigned int base_cost
= vp9_cost_bit(intra_inter_p
, 1);
1955 if (cm
->reference_mode
== REFERENCE_MODE_SELECT
)
1956 base_cost
+= vp9_cost_bit(comp_inter_p
, 0);
1958 ref_costs_single
[LAST_FRAME
] = ref_costs_single
[GOLDEN_FRAME
] =
1959 ref_costs_single
[ALTREF_FRAME
] = base_cost
;
1960 ref_costs_single
[LAST_FRAME
] += vp9_cost_bit(ref_single_p1
, 0);
1961 ref_costs_single
[GOLDEN_FRAME
] += vp9_cost_bit(ref_single_p1
, 1);
1962 ref_costs_single
[ALTREF_FRAME
] += vp9_cost_bit(ref_single_p1
, 1);
1963 ref_costs_single
[GOLDEN_FRAME
] += vp9_cost_bit(ref_single_p2
, 0);
1964 ref_costs_single
[ALTREF_FRAME
] += vp9_cost_bit(ref_single_p2
, 1);
1966 ref_costs_single
[LAST_FRAME
] = 512;
1967 ref_costs_single
[GOLDEN_FRAME
] = 512;
1968 ref_costs_single
[ALTREF_FRAME
] = 512;
1970 if (cm
->reference_mode
!= SINGLE_REFERENCE
) {
1971 vp9_prob ref_comp_p
= vp9_get_pred_prob_comp_ref_p(cm
, xd
);
1972 unsigned int base_cost
= vp9_cost_bit(intra_inter_p
, 1);
1974 if (cm
->reference_mode
== REFERENCE_MODE_SELECT
)
1975 base_cost
+= vp9_cost_bit(comp_inter_p
, 1);
1977 ref_costs_comp
[LAST_FRAME
] = base_cost
+ vp9_cost_bit(ref_comp_p
, 0);
1978 ref_costs_comp
[GOLDEN_FRAME
] = base_cost
+ vp9_cost_bit(ref_comp_p
, 1);
1980 ref_costs_comp
[LAST_FRAME
] = 512;
1981 ref_costs_comp
[GOLDEN_FRAME
] = 512;
1986 static void store_coding_context(MACROBLOCK
*x
, PICK_MODE_CONTEXT
*ctx
,
1988 int64_t comp_pred_diff
[REFERENCE_MODES
],
1989 const int64_t tx_size_diff
[TX_MODES
],
1990 int64_t best_filter_diff
[SWITCHABLE_FILTER_CONTEXTS
],
1992 MACROBLOCKD
*const xd
= &x
->e_mbd
;
1994 // Take a snapshot of the coding context so it can be
1995 // restored if we decide to encode this way
1996 ctx
->skip
= x
->skip
;
1997 ctx
->skippable
= skippable
;
1998 ctx
->best_mode_index
= mode_index
;
1999 ctx
->mic
= *xd
->mi
[0].src_mi
;
2000 ctx
->single_pred_diff
= (int)comp_pred_diff
[SINGLE_REFERENCE
];
2001 ctx
->comp_pred_diff
= (int)comp_pred_diff
[COMPOUND_REFERENCE
];
2002 ctx
->hybrid_pred_diff
= (int)comp_pred_diff
[REFERENCE_MODE_SELECT
];
2004 vpx_memcpy(ctx
->tx_rd_diff
, tx_size_diff
, sizeof(ctx
->tx_rd_diff
));
2005 vpx_memcpy(ctx
->best_filter_diff
, best_filter_diff
,
2006 sizeof(*best_filter_diff
) * SWITCHABLE_FILTER_CONTEXTS
);
2009 static void setup_buffer_inter(VP9_COMP
*cpi
, MACROBLOCK
*x
,
2010 const TileInfo
*const tile
,
2011 MV_REFERENCE_FRAME ref_frame
,
2012 BLOCK_SIZE block_size
,
2013 int mi_row
, int mi_col
,
2014 int_mv frame_nearest_mv
[MAX_REF_FRAMES
],
2015 int_mv frame_near_mv
[MAX_REF_FRAMES
],
2016 struct buf_2d yv12_mb
[4][MAX_MB_PLANE
]) {
2017 const VP9_COMMON
*cm
= &cpi
->common
;
2018 const YV12_BUFFER_CONFIG
*yv12
= get_ref_frame_buffer(cpi
, ref_frame
);
2019 MACROBLOCKD
*const xd
= &x
->e_mbd
;
2020 MODE_INFO
*const mi
= xd
->mi
[0].src_mi
;
2021 int_mv
*const candidates
= mi
->mbmi
.ref_mvs
[ref_frame
];
2022 const struct scale_factors
*const sf
= &cm
->frame_refs
[ref_frame
- 1].sf
;
2024 // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
2025 // use the UV scaling factors.
2026 vp9_setup_pred_block(xd
, yv12_mb
[ref_frame
], yv12
, mi_row
, mi_col
, sf
, sf
);
2028 // Gets an initial list of candidate vectors from neighbours and orders them
2029 vp9_find_mv_refs(cm
, xd
, tile
, mi
, ref_frame
, candidates
, mi_row
, mi_col
);
2031 // Candidate refinement carried out at encoder and decoder
2032 vp9_find_best_ref_mvs(xd
, cm
->allow_high_precision_mv
, candidates
,
2033 &frame_nearest_mv
[ref_frame
],
2034 &frame_near_mv
[ref_frame
]);
2036 // Further refinement that is encode side only to test the top few candidates
2037 // in full and choose the best as the centre point for subsequent searches.
2038 // The current implementation doesn't support scaling.
2039 if (!vp9_is_scaled(sf
) && block_size
>= BLOCK_8X8
)
2040 vp9_mv_pred(cpi
, x
, yv12_mb
[ref_frame
][0].buf
, yv12
->y_stride
,
2041 ref_frame
, block_size
);
2044 static void single_motion_search(VP9_COMP
*cpi
, MACROBLOCK
*x
,
2046 int mi_row
, int mi_col
,
2047 int_mv
*tmp_mv
, int *rate_mv
) {
2048 MACROBLOCKD
*xd
= &x
->e_mbd
;
2049 const VP9_COMMON
*cm
= &cpi
->common
;
2050 MB_MODE_INFO
*mbmi
= &xd
->mi
[0].src_mi
->mbmi
;
2051 struct buf_2d backup_yv12
[MAX_MB_PLANE
] = {{0, 0}};
2052 int bestsme
= INT_MAX
;
2054 int sadpb
= x
->sadperbit16
;
2056 int ref
= mbmi
->ref_frame
[0];
2057 MV ref_mv
= mbmi
->ref_mvs
[ref
][0].as_mv
;
2059 int tmp_col_min
= x
->mv_col_min
;
2060 int tmp_col_max
= x
->mv_col_max
;
2061 int tmp_row_min
= x
->mv_row_min
;
2062 int tmp_row_max
= x
->mv_row_max
;
2065 const YV12_BUFFER_CONFIG
*scaled_ref_frame
= vp9_get_scaled_ref_frame(cpi
,
2069 pred_mv
[0] = mbmi
->ref_mvs
[ref
][0].as_mv
;
2070 pred_mv
[1] = mbmi
->ref_mvs
[ref
][1].as_mv
;
2071 pred_mv
[2] = x
->pred_mv
[ref
];
2073 if (scaled_ref_frame
) {
2075 // Swap out the reference frame for a version that's been scaled to
2076 // match the resolution of the current frame, allowing the existing
2077 // motion search code to be used without additional modifications.
2078 for (i
= 0; i
< MAX_MB_PLANE
; i
++)
2079 backup_yv12
[i
] = xd
->plane
[i
].pre
[0];
2081 vp9_setup_pre_planes(xd
, 0, scaled_ref_frame
, mi_row
, mi_col
, NULL
);
2084 vp9_set_mv_search_range(x
, &ref_mv
);
2086 // Work out the size of the first step in the mv step search.
2087 // 0 here is maximum length first step. 1 is MAX >> 1 etc.
2088 if (cpi
->sf
.mv
.auto_mv_step_size
&& cm
->show_frame
) {
2089 // Take wtd average of the step_params based on the last frame's
2090 // max mv magnitude and that based on the best ref mvs of the current
2091 // block for the given reference.
2092 step_param
= (vp9_init_search_range(x
->max_mv_context
[ref
]) +
2093 cpi
->mv_step_param
) / 2;
2095 step_param
= cpi
->mv_step_param
;
2098 if (cpi
->sf
.adaptive_motion_search
&& bsize
< BLOCK_64X64
) {
2099 int boffset
= 2 * (b_width_log2_lookup
[BLOCK_64X64
] -
2100 MIN(b_height_log2_lookup
[bsize
], b_width_log2_lookup
[bsize
]));
2101 step_param
= MAX(step_param
, boffset
);
2104 if (cpi
->sf
.adaptive_motion_search
) {
2105 int bwl
= b_width_log2_lookup
[bsize
];
2106 int bhl
= b_height_log2_lookup
[bsize
];
2108 int tlevel
= x
->pred_mv_sad
[ref
] >> (bwl
+ bhl
+ 4);
2113 for (i
= LAST_FRAME
; i
<= ALTREF_FRAME
&& cm
->show_frame
; ++i
) {
2114 if ((x
->pred_mv_sad
[ref
] >> 3) > x
->pred_mv_sad
[i
]) {
2115 x
->pred_mv
[ref
].row
= 0;
2116 x
->pred_mv
[ref
].col
= 0;
2117 tmp_mv
->as_int
= INVALID_MV
;
2119 if (scaled_ref_frame
) {
2121 for (i
= 0; i
< MAX_MB_PLANE
; i
++)
2122 xd
->plane
[i
].pre
[0] = backup_yv12
[i
];
2129 mvp_full
= pred_mv
[x
->mv_best_ref_index
[ref
]];
2134 bestsme
= vp9_full_pixel_search(cpi
, x
, bsize
, &mvp_full
, step_param
, sadpb
,
2135 cond_cost_list(cpi
, cost_list
),
2136 &ref_mv
, &tmp_mv
->as_mv
, INT_MAX
, 1);
2138 x
->mv_col_min
= tmp_col_min
;
2139 x
->mv_col_max
= tmp_col_max
;
2140 x
->mv_row_min
= tmp_row_min
;
2141 x
->mv_row_max
= tmp_row_max
;
2143 if (bestsme
< INT_MAX
) {
2144 int dis
; /* TODO: use dis in distortion calculation later. */
2145 cpi
->find_fractional_mv_step(x
, &tmp_mv
->as_mv
, &ref_mv
,
2146 cm
->allow_high_precision_mv
,
2148 &cpi
->fn_ptr
[bsize
],
2149 cpi
->sf
.mv
.subpel_force_stop
,
2150 cpi
->sf
.mv
.subpel_iters_per_step
,
2151 cond_cost_list(cpi
, cost_list
),
2152 x
->nmvjointcost
, x
->mvcost
,
2153 &dis
, &x
->pred_sse
[ref
], NULL
, 0, 0);
2155 *rate_mv
= vp9_mv_bit_cost(&tmp_mv
->as_mv
, &ref_mv
,
2156 x
->nmvjointcost
, x
->mvcost
, MV_COST_WEIGHT
);
2158 if (cpi
->sf
.adaptive_motion_search
)
2159 x
->pred_mv
[ref
] = tmp_mv
->as_mv
;
2161 if (scaled_ref_frame
) {
2163 for (i
= 0; i
< MAX_MB_PLANE
; i
++)
2164 xd
->plane
[i
].pre
[0] = backup_yv12
[i
];
2168 static void joint_motion_search(VP9_COMP
*cpi
, MACROBLOCK
*x
,
2171 int mi_row
, int mi_col
,
2172 int_mv single_newmv
[MAX_REF_FRAMES
],
2174 const int pw
= 4 * num_4x4_blocks_wide_lookup
[bsize
];
2175 const int ph
= 4 * num_4x4_blocks_high_lookup
[bsize
];
2176 MACROBLOCKD
*xd
= &x
->e_mbd
;
2177 MB_MODE_INFO
*mbmi
= &xd
->mi
[0].src_mi
->mbmi
;
2178 const int refs
[2] = { mbmi
->ref_frame
[0],
2179 mbmi
->ref_frame
[1] < 0 ? 0 : mbmi
->ref_frame
[1] };
2182 // Prediction buffer from second frame.
2183 #if CONFIG_VP9_HIGHBITDEPTH
2184 uint8_t *second_pred
;
2185 uint8_t *second_pred_alloc
;
2187 uint8_t *second_pred
= vpx_memalign(16, pw
* ph
* sizeof(uint8_t));
2188 #endif // CONFIG_VP9_HIGHBITDEPTH
2189 const InterpKernel
*kernel
= vp9_get_interp_kernel(mbmi
->interp_filter
);
2191 // Do joint motion search in compound mode to get more accurate mv.
2192 struct buf_2d backup_yv12
[2][MAX_MB_PLANE
];
2193 struct buf_2d scaled_first_yv12
= xd
->plane
[0].pre
[0];
2194 int last_besterr
[2] = {INT_MAX
, INT_MAX
};
2195 const YV12_BUFFER_CONFIG
*const scaled_ref_frame
[2] = {
2196 vp9_get_scaled_ref_frame(cpi
, mbmi
->ref_frame
[0]),
2197 vp9_get_scaled_ref_frame(cpi
, mbmi
->ref_frame
[1])
2199 #if CONFIG_VP9_HIGHBITDEPTH
2200 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
2201 second_pred_alloc
= vpx_memalign(16, pw
* ph
* sizeof(uint16_t));
2202 second_pred
= CONVERT_TO_BYTEPTR(second_pred_alloc
);
2204 second_pred_alloc
= vpx_memalign(16, pw
* ph
* sizeof(uint8_t));
2205 second_pred
= second_pred_alloc
;
2207 #endif // CONFIG_VP9_HIGHBITDEPTH
2209 for (ref
= 0; ref
< 2; ++ref
) {
2210 ref_mv
[ref
] = mbmi
->ref_mvs
[refs
[ref
]][0];
2212 if (scaled_ref_frame
[ref
]) {
2214 // Swap out the reference frame for a version that's been scaled to
2215 // match the resolution of the current frame, allowing the existing
2216 // motion search code to be used without additional modifications.
2217 for (i
= 0; i
< MAX_MB_PLANE
; i
++)
2218 backup_yv12
[ref
][i
] = xd
->plane
[i
].pre
[ref
];
2219 vp9_setup_pre_planes(xd
, ref
, scaled_ref_frame
[ref
], mi_row
, mi_col
,
2223 frame_mv
[refs
[ref
]].as_int
= single_newmv
[refs
[ref
]].as_int
;
2226 // Allow joint search multiple times iteratively for each ref frame
2227 // and break out the search loop if it couldn't find better mv.
2228 for (ite
= 0; ite
< 4; ite
++) {
2229 struct buf_2d ref_yv12
[2];
2230 int bestsme
= INT_MAX
;
2231 int sadpb
= x
->sadperbit16
;
2233 int search_range
= 3;
2235 int tmp_col_min
= x
->mv_col_min
;
2236 int tmp_col_max
= x
->mv_col_max
;
2237 int tmp_row_min
= x
->mv_row_min
;
2238 int tmp_row_max
= x
->mv_row_max
;
2241 // Initialized here because of compiler problem in Visual Studio.
2242 ref_yv12
[0] = xd
->plane
[0].pre
[0];
2243 ref_yv12
[1] = xd
->plane
[0].pre
[1];
2245 // Get pred block from second frame.
2246 #if CONFIG_VP9_HIGHBITDEPTH
2247 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
2248 vp9_highbd_build_inter_predictor(ref_yv12
[!id
].buf
,
2249 ref_yv12
[!id
].stride
,
2251 &frame_mv
[refs
[!id
]].as_mv
,
2252 &xd
->block_refs
[!id
]->sf
,
2254 kernel
, MV_PRECISION_Q3
,
2255 mi_col
* MI_SIZE
, mi_row
* MI_SIZE
,
2258 vp9_build_inter_predictor(ref_yv12
[!id
].buf
,
2259 ref_yv12
[!id
].stride
,
2261 &frame_mv
[refs
[!id
]].as_mv
,
2262 &xd
->block_refs
[!id
]->sf
,
2264 kernel
, MV_PRECISION_Q3
,
2265 mi_col
* MI_SIZE
, mi_row
* MI_SIZE
);
2268 vp9_build_inter_predictor(ref_yv12
[!id
].buf
,
2269 ref_yv12
[!id
].stride
,
2271 &frame_mv
[refs
[!id
]].as_mv
,
2272 &xd
->block_refs
[!id
]->sf
,
2274 kernel
, MV_PRECISION_Q3
,
2275 mi_col
* MI_SIZE
, mi_row
* MI_SIZE
);
2276 #endif // CONFIG_VP9_HIGHBITDEPTH
2278 // Compound motion search on first ref frame.
2280 xd
->plane
[0].pre
[0] = ref_yv12
[id
];
2281 vp9_set_mv_search_range(x
, &ref_mv
[id
].as_mv
);
2283 // Use mv result from single mode as mvp.
2284 tmp_mv
= frame_mv
[refs
[id
]].as_mv
;
2289 // Small-range full-pixel motion search
2290 bestsme
= vp9_refining_search_8p_c(x
, &tmp_mv
, sadpb
,
2292 &cpi
->fn_ptr
[bsize
],
2293 &ref_mv
[id
].as_mv
, second_pred
);
2294 if (bestsme
< INT_MAX
)
2295 bestsme
= vp9_get_mvpred_av_var(x
, &tmp_mv
, &ref_mv
[id
].as_mv
,
2296 second_pred
, &cpi
->fn_ptr
[bsize
], 1);
2298 x
->mv_col_min
= tmp_col_min
;
2299 x
->mv_col_max
= tmp_col_max
;
2300 x
->mv_row_min
= tmp_row_min
;
2301 x
->mv_row_max
= tmp_row_max
;
2303 if (bestsme
< INT_MAX
) {
2304 int dis
; /* TODO: use dis in distortion calculation later. */
2306 bestsme
= cpi
->find_fractional_mv_step(
2309 cpi
->common
.allow_high_precision_mv
,
2311 &cpi
->fn_ptr
[bsize
],
2312 0, cpi
->sf
.mv
.subpel_iters_per_step
,
2314 x
->nmvjointcost
, x
->mvcost
,
2315 &dis
, &sse
, second_pred
,
2320 xd
->plane
[0].pre
[0] = scaled_first_yv12
;
2322 if (bestsme
< last_besterr
[id
]) {
2323 frame_mv
[refs
[id
]].as_mv
= tmp_mv
;
2324 last_besterr
[id
] = bestsme
;
2332 for (ref
= 0; ref
< 2; ++ref
) {
2333 if (scaled_ref_frame
[ref
]) {
2334 // restore the predictor
2336 for (i
= 0; i
< MAX_MB_PLANE
; i
++)
2337 xd
->plane
[i
].pre
[ref
] = backup_yv12
[ref
][i
];
2340 *rate_mv
+= vp9_mv_bit_cost(&frame_mv
[refs
[ref
]].as_mv
,
2341 &mbmi
->ref_mvs
[refs
[ref
]][0].as_mv
,
2342 x
->nmvjointcost
, x
->mvcost
, MV_COST_WEIGHT
);
2345 #if CONFIG_VP9_HIGHBITDEPTH
2346 vpx_free(second_pred_alloc
);
2348 vpx_free(second_pred
);
2349 #endif // CONFIG_VP9_HIGHBITDEPTH
2352 static INLINE
void restore_dst_buf(MACROBLOCKD
*xd
,
2353 uint8_t *orig_dst
[MAX_MB_PLANE
],
2354 int orig_dst_stride
[MAX_MB_PLANE
]) {
2356 for (i
= 0; i
< MAX_MB_PLANE
; i
++) {
2357 xd
->plane
[i
].dst
.buf
= orig_dst
[i
];
2358 xd
->plane
[i
].dst
.stride
= orig_dst_stride
[i
];
2362 // In some situations we want to discount tha pparent cost of a new motion
2363 // vector. Where there is a subtle motion field and especially where there is
2364 // low spatial complexity then it can be hard to cover the cost of a new motion
2365 // vector in a single block, even if that motion vector reduces distortion.
2366 // However, once established that vector may be usable through the nearest and
2367 // near mv modes to reduce distortion in subsequent blocks and also improve
2369 static int discount_newmv_test(const VP9_COMP
*cpi
,
2372 int_mv (*mode_mv
)[MAX_REF_FRAMES
],
2374 return (!cpi
->rc
.is_src_frame_alt_ref
&&
2375 (this_mode
== NEWMV
) &&
2376 (this_mv
.as_int
!= 0) &&
2377 ((mode_mv
[NEARESTMV
][ref_frame
].as_int
== 0) ||
2378 (mode_mv
[NEARESTMV
][ref_frame
].as_int
== INVALID_MV
)) &&
2379 ((mode_mv
[NEARMV
][ref_frame
].as_int
== 0) ||
2380 (mode_mv
[NEARMV
][ref_frame
].as_int
== INVALID_MV
)));
2383 static int64_t handle_inter_mode(VP9_COMP
*cpi
, MACROBLOCK
*x
,
2385 int64_t txfm_cache
[],
2386 int *rate2
, int64_t *distortion
,
2388 int *rate_y
, int *rate_uv
,
2390 int_mv (*mode_mv
)[MAX_REF_FRAMES
],
2391 int mi_row
, int mi_col
,
2392 int_mv single_newmv
[MAX_REF_FRAMES
],
2393 INTERP_FILTER (*single_filter
)[MAX_REF_FRAMES
],
2394 int (*single_skippable
)[MAX_REF_FRAMES
],
2396 const int64_t ref_best_rd
,
2397 int64_t *mask_filter
,
2398 int64_t filter_cache
[]) {
2399 VP9_COMMON
*cm
= &cpi
->common
;
2400 MACROBLOCKD
*xd
= &x
->e_mbd
;
2401 MB_MODE_INFO
*mbmi
= &xd
->mi
[0].src_mi
->mbmi
;
2402 const int is_comp_pred
= has_second_ref(mbmi
);
2403 const int this_mode
= mbmi
->mode
;
2404 int_mv
*frame_mv
= mode_mv
[this_mode
];
2406 int refs
[2] = { mbmi
->ref_frame
[0],
2407 (mbmi
->ref_frame
[1] < 0 ? 0 : mbmi
->ref_frame
[1]) };
2409 #if CONFIG_VP9_HIGHBITDEPTH
2410 DECLARE_ALIGNED_ARRAY(16, uint16_t, tmp_buf16
, MAX_MB_PLANE
* 64 * 64);
2411 DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf8
, MAX_MB_PLANE
* 64 * 64);
2414 DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf
, MAX_MB_PLANE
* 64 * 64);
2415 #endif // CONFIG_VP9_HIGHBITDEPTH
2416 int pred_exists
= 0;
2418 int64_t rd
, tmp_rd
, best_rd
= INT64_MAX
;
2419 int best_needs_copy
= 0;
2420 uint8_t *orig_dst
[MAX_MB_PLANE
];
2421 int orig_dst_stride
[MAX_MB_PLANE
];
2423 INTERP_FILTER best_filter
= SWITCHABLE
;
2424 uint8_t skip_txfm
[MAX_MB_PLANE
<< 2] = {0};
2425 int64_t bsse
[MAX_MB_PLANE
<< 2] = {0};
2427 int bsl
= mi_width_log2_lookup
[bsize
];
2428 int pred_filter_search
= cpi
->sf
.cb_pred_filter_search
?
2429 (((mi_row
+ mi_col
) >> bsl
) +
2430 get_chessboard_index(cm
->current_video_frame
)) & 0x1 : 0;
2432 int skip_txfm_sb
= 0;
2433 int64_t skip_sse_sb
= INT64_MAX
;
2434 int64_t distortion_y
= 0, distortion_uv
= 0;
2436 #if CONFIG_VP9_HIGHBITDEPTH
2437 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
2438 tmp_buf
= CONVERT_TO_BYTEPTR(tmp_buf16
);
2442 #endif // CONFIG_VP9_HIGHBITDEPTH
2444 if (pred_filter_search
) {
2445 INTERP_FILTER af
= SWITCHABLE
, lf
= SWITCHABLE
;
2446 if (xd
->up_available
)
2447 af
= xd
->mi
[-xd
->mi_stride
].src_mi
->mbmi
.interp_filter
;
2448 if (xd
->left_available
)
2449 lf
= xd
->mi
[-1].src_mi
->mbmi
.interp_filter
;
2451 if ((this_mode
!= NEWMV
) || (af
== lf
))
2456 if (frame_mv
[refs
[0]].as_int
== INVALID_MV
||
2457 frame_mv
[refs
[1]].as_int
== INVALID_MV
)
2460 if (cpi
->sf
.adaptive_mode_search
) {
2461 if (single_filter
[this_mode
][refs
[0]] ==
2462 single_filter
[this_mode
][refs
[1]])
2463 best_filter
= single_filter
[this_mode
][refs
[0]];
2467 if (this_mode
== NEWMV
) {
2470 // Initialize mv using single prediction mode result.
2471 frame_mv
[refs
[0]].as_int
= single_newmv
[refs
[0]].as_int
;
2472 frame_mv
[refs
[1]].as_int
= single_newmv
[refs
[1]].as_int
;
2474 if (cpi
->sf
.comp_inter_joint_search_thresh
<= bsize
) {
2475 joint_motion_search(cpi
, x
, bsize
, frame_mv
,
2476 mi_row
, mi_col
, single_newmv
, &rate_mv
);
2478 rate_mv
= vp9_mv_bit_cost(&frame_mv
[refs
[0]].as_mv
,
2479 &mbmi
->ref_mvs
[refs
[0]][0].as_mv
,
2480 x
->nmvjointcost
, x
->mvcost
, MV_COST_WEIGHT
);
2481 rate_mv
+= vp9_mv_bit_cost(&frame_mv
[refs
[1]].as_mv
,
2482 &mbmi
->ref_mvs
[refs
[1]][0].as_mv
,
2483 x
->nmvjointcost
, x
->mvcost
, MV_COST_WEIGHT
);
2488 single_motion_search(cpi
, x
, bsize
, mi_row
, mi_col
,
2490 if (tmp_mv
.as_int
== INVALID_MV
)
2493 frame_mv
[refs
[0]].as_int
=
2494 xd
->mi
[0].src_mi
->bmi
[0].as_mv
[0].as_int
= tmp_mv
.as_int
;
2495 single_newmv
[refs
[0]].as_int
= tmp_mv
.as_int
;
2497 // Estimate the rate implications of a new mv but discount this
2498 // under certain circumstances where we want to help initiate a weak
2499 // motion field, where the distortion gain for a single block may not
2500 // be enough to overcome the cost of a new mv.
2501 if (discount_newmv_test(cpi
, this_mode
, tmp_mv
, mode_mv
, refs
[0])) {
2502 *rate2
+= MAX((rate_mv
/ NEW_MV_DISCOUNT_FACTOR
), 1);
2509 for (i
= 0; i
< is_comp_pred
+ 1; ++i
) {
2510 cur_mv
[i
] = frame_mv
[refs
[i
]];
2511 // Clip "next_nearest" so that it does not extend to far out of image
2512 if (this_mode
!= NEWMV
)
2513 clamp_mv2(&cur_mv
[i
].as_mv
, xd
);
2515 if (mv_check_bounds(x
, &cur_mv
[i
].as_mv
))
2517 mbmi
->mv
[i
].as_int
= cur_mv
[i
].as_int
;
2520 // do first prediction into the destination buffer. Do the next
2521 // prediction into a temporary buffer. Then keep track of which one
2522 // of these currently holds the best predictor, and use the other
2523 // one for future predictions. In the end, copy from tmp_buf to
2524 // dst if necessary.
2525 for (i
= 0; i
< MAX_MB_PLANE
; i
++) {
2526 orig_dst
[i
] = xd
->plane
[i
].dst
.buf
;
2527 orig_dst_stride
[i
] = xd
->plane
[i
].dst
.stride
;
2530 // We don't include the cost of the second reference here, because there
2531 // are only three options: Last/Golden, ARF/Last or Golden/ARF, or in other
2532 // words if you present them in that order, the second one is always known
2533 // if the first is known.
2535 // Under some circumstances we discount the cost of new mv mode to encourage
2536 // initiation of a motion field.
2537 if (discount_newmv_test(cpi
, this_mode
, frame_mv
[refs
[0]],
2538 mode_mv
, refs
[0])) {
2539 *rate2
+= MIN(cost_mv_ref(cpi
, this_mode
, mbmi
->mode_context
[refs
[0]]),
2540 cost_mv_ref(cpi
, NEARESTMV
, mbmi
->mode_context
[refs
[0]]));
2542 *rate2
+= cost_mv_ref(cpi
, this_mode
, mbmi
->mode_context
[refs
[0]]);
2545 if (RDCOST(x
->rdmult
, x
->rddiv
, *rate2
, 0) > ref_best_rd
&&
2546 mbmi
->mode
!= NEARESTMV
)
2550 // Are all MVs integer pel for Y and UV
2551 intpel_mv
= !mv_has_subpel(&mbmi
->mv
[0].as_mv
);
2553 intpel_mv
&= !mv_has_subpel(&mbmi
->mv
[1].as_mv
);
2555 // Search for best switchable filter by checking the variance of
2556 // pred error irrespective of whether the filter will be used
2557 for (i
= 0; i
< SWITCHABLE_FILTER_CONTEXTS
; ++i
)
2558 filter_cache
[i
] = INT64_MAX
;
2560 if (cm
->interp_filter
!= BILINEAR
) {
2561 if (x
->source_variance
< cpi
->sf
.disable_filter_search_var_thresh
) {
2562 best_filter
= EIGHTTAP
;
2563 } else if (best_filter
== SWITCHABLE
) {
2565 int tmp_rate_sum
= 0;
2566 int64_t tmp_dist_sum
= 0;
2568 for (i
= 0; i
< SWITCHABLE_FILTERS
; ++i
) {
2571 int tmp_skip_sb
= 0;
2572 int64_t tmp_skip_sse
= INT64_MAX
;
2574 mbmi
->interp_filter
= i
;
2575 rs
= vp9_get_switchable_rate(cpi
, xd
);
2576 rs_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rs
, 0);
2578 if (i
> 0 && intpel_mv
) {
2579 rd
= RDCOST(x
->rdmult
, x
->rddiv
, tmp_rate_sum
, tmp_dist_sum
);
2580 filter_cache
[i
] = rd
;
2581 filter_cache
[SWITCHABLE_FILTERS
] =
2582 MIN(filter_cache
[SWITCHABLE_FILTERS
], rd
+ rs_rd
);
2583 if (cm
->interp_filter
== SWITCHABLE
)
2585 *mask_filter
= MAX(*mask_filter
, rd
);
2588 int64_t dist_sum
= 0;
2589 if (i
> 0 && cpi
->sf
.adaptive_interp_filter_search
&&
2590 (cpi
->sf
.interp_filter_search_mask
& (1 << i
))) {
2592 dist_sum
= INT64_MAX
;
2596 if ((cm
->interp_filter
== SWITCHABLE
&&
2597 (!i
|| best_needs_copy
)) ||
2598 (cm
->interp_filter
!= SWITCHABLE
&&
2599 (cm
->interp_filter
== mbmi
->interp_filter
||
2600 (i
== 0 && intpel_mv
)))) {
2601 restore_dst_buf(xd
, orig_dst
, orig_dst_stride
);
2603 for (j
= 0; j
< MAX_MB_PLANE
; j
++) {
2604 xd
->plane
[j
].dst
.buf
= tmp_buf
+ j
* 64 * 64;
2605 xd
->plane
[j
].dst
.stride
= 64;
2608 vp9_build_inter_predictors_sb(xd
, mi_row
, mi_col
, bsize
);
2609 model_rd_for_sb(cpi
, bsize
, x
, xd
, &rate_sum
, &dist_sum
,
2610 &tmp_skip_sb
, &tmp_skip_sse
);
2612 rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate_sum
, dist_sum
);
2613 filter_cache
[i
] = rd
;
2614 filter_cache
[SWITCHABLE_FILTERS
] =
2615 MIN(filter_cache
[SWITCHABLE_FILTERS
], rd
+ rs_rd
);
2616 if (cm
->interp_filter
== SWITCHABLE
)
2618 *mask_filter
= MAX(*mask_filter
, rd
);
2620 if (i
== 0 && intpel_mv
) {
2621 tmp_rate_sum
= rate_sum
;
2622 tmp_dist_sum
= dist_sum
;
2626 if (i
== 0 && cpi
->sf
.use_rd_breakout
&& ref_best_rd
< INT64_MAX
) {
2627 if (rd
/ 2 > ref_best_rd
) {
2628 restore_dst_buf(xd
, orig_dst
, orig_dst_stride
);
2632 newbest
= i
== 0 || rd
< best_rd
;
2636 best_filter
= mbmi
->interp_filter
;
2637 if (cm
->interp_filter
== SWITCHABLE
&& i
&& !intpel_mv
)
2638 best_needs_copy
= !best_needs_copy
;
2641 if ((cm
->interp_filter
== SWITCHABLE
&& newbest
) ||
2642 (cm
->interp_filter
!= SWITCHABLE
&&
2643 cm
->interp_filter
== mbmi
->interp_filter
)) {
2647 skip_txfm_sb
= tmp_skip_sb
;
2648 skip_sse_sb
= tmp_skip_sse
;
2649 vpx_memcpy(skip_txfm
, x
->skip_txfm
, sizeof(skip_txfm
));
2650 vpx_memcpy(bsse
, x
->bsse
, sizeof(bsse
));
2653 restore_dst_buf(xd
, orig_dst
, orig_dst_stride
);
2656 // Set the appropriate filter
2657 mbmi
->interp_filter
= cm
->interp_filter
!= SWITCHABLE
?
2658 cm
->interp_filter
: best_filter
;
2659 rs
= cm
->interp_filter
== SWITCHABLE
? vp9_get_switchable_rate(cpi
, xd
) : 0;
2662 if (best_needs_copy
) {
2663 // again temporarily set the buffers to local memory to prevent a memcpy
2664 for (i
= 0; i
< MAX_MB_PLANE
; i
++) {
2665 xd
->plane
[i
].dst
.buf
= tmp_buf
+ i
* 64 * 64;
2666 xd
->plane
[i
].dst
.stride
= 64;
2669 rd
= tmp_rd
+ RDCOST(x
->rdmult
, x
->rddiv
, rs
, 0);
2673 // Handles the special case when a filter that is not in the
2674 // switchable list (ex. bilinear) is indicated at the frame level, or
2675 // skip condition holds.
2676 vp9_build_inter_predictors_sb(xd
, mi_row
, mi_col
, bsize
);
2677 model_rd_for_sb(cpi
, bsize
, x
, xd
, &tmp_rate
, &tmp_dist
,
2678 &skip_txfm_sb
, &skip_sse_sb
);
2679 rd
= RDCOST(x
->rdmult
, x
->rddiv
, rs
+ tmp_rate
, tmp_dist
);
2680 vpx_memcpy(skip_txfm
, x
->skip_txfm
, sizeof(skip_txfm
));
2681 vpx_memcpy(bsse
, x
->bsse
, sizeof(bsse
));
2685 single_filter
[this_mode
][refs
[0]] = mbmi
->interp_filter
;
2687 if (cpi
->sf
.adaptive_mode_search
)
2689 if (single_skippable
[this_mode
][refs
[0]] &&
2690 single_skippable
[this_mode
][refs
[1]])
2691 vpx_memset(skip_txfm
, 1, sizeof(skip_txfm
));
2693 if (cpi
->sf
.use_rd_breakout
&& ref_best_rd
< INT64_MAX
) {
2694 // if current pred_error modeled rd is substantially more than the best
2695 // so far, do not bother doing full rd
2696 if (rd
/ 2 > ref_best_rd
) {
2697 restore_dst_buf(xd
, orig_dst
, orig_dst_stride
);
2702 if (cm
->interp_filter
== SWITCHABLE
)
2705 vpx_memcpy(x
->skip_txfm
, skip_txfm
, sizeof(skip_txfm
));
2706 vpx_memcpy(x
->bsse
, bsse
, sizeof(bsse
));
2708 if (!skip_txfm_sb
) {
2709 int skippable_y
, skippable_uv
;
2710 int64_t sseuv
= INT64_MAX
;
2711 int64_t rdcosty
= INT64_MAX
;
2713 // Y cost and distortion
2714 vp9_subtract_plane(x
, bsize
, 0);
2715 super_block_yrd(cpi
, x
, rate_y
, &distortion_y
, &skippable_y
, psse
,
2716 bsize
, txfm_cache
, ref_best_rd
);
2718 if (*rate_y
== INT_MAX
) {
2720 *distortion
= INT64_MAX
;
2721 restore_dst_buf(xd
, orig_dst
, orig_dst_stride
);
2726 *distortion
+= distortion_y
;
2728 rdcosty
= RDCOST(x
->rdmult
, x
->rddiv
, *rate2
, *distortion
);
2729 rdcosty
= MIN(rdcosty
, RDCOST(x
->rdmult
, x
->rddiv
, 0, *psse
));
2731 if (!super_block_uvrd(cpi
, x
, rate_uv
, &distortion_uv
, &skippable_uv
,
2732 &sseuv
, bsize
, ref_best_rd
- rdcosty
)) {
2734 *distortion
= INT64_MAX
;
2735 restore_dst_buf(xd
, orig_dst
, orig_dst_stride
);
2741 *distortion
+= distortion_uv
;
2742 *skippable
= skippable_y
&& skippable_uv
;
2747 // The cost of skip bit needs to be added.
2748 *rate2
+= vp9_cost_bit(vp9_get_skip_prob(cm
, xd
), 1);
2750 *distortion
= skip_sse_sb
;
2754 single_skippable
[this_mode
][refs
[0]] = *skippable
;
2756 restore_dst_buf(xd
, orig_dst
, orig_dst_stride
);
2757 return 0; // The rate-distortion cost will be re-calculated by caller.
2760 void vp9_rd_pick_intra_mode_sb(VP9_COMP
*cpi
, MACROBLOCK
*x
,
2761 RD_COST
*rd_cost
, BLOCK_SIZE bsize
,
2762 PICK_MODE_CONTEXT
*ctx
, int64_t best_rd
) {
2763 VP9_COMMON
*const cm
= &cpi
->common
;
2764 MACROBLOCKD
*const xd
= &x
->e_mbd
;
2765 struct macroblockd_plane
*const pd
= xd
->plane
;
2766 int rate_y
= 0, rate_uv
= 0, rate_y_tokenonly
= 0, rate_uv_tokenonly
= 0;
2767 int y_skip
= 0, uv_skip
= 0;
2768 int64_t dist_y
= 0, dist_uv
= 0, tx_cache
[TX_MODES
] = { 0 };
2769 TX_SIZE max_uv_tx_size
;
2772 xd
->mi
[0].src_mi
->mbmi
.ref_frame
[0] = INTRA_FRAME
;
2773 xd
->mi
[0].src_mi
->mbmi
.ref_frame
[1] = NONE
;
2775 if (bsize
>= BLOCK_8X8
) {
2776 if (rd_pick_intra_sby_mode(cpi
, x
, &rate_y
, &rate_y_tokenonly
,
2777 &dist_y
, &y_skip
, bsize
, tx_cache
,
2778 best_rd
) >= best_rd
) {
2779 rd_cost
->rate
= INT_MAX
;
2784 if (rd_pick_intra_sub_8x8_y_mode(cpi
, x
, &rate_y
, &rate_y_tokenonly
,
2785 &dist_y
, best_rd
) >= best_rd
) {
2786 rd_cost
->rate
= INT_MAX
;
2790 max_uv_tx_size
= get_uv_tx_size_impl(xd
->mi
[0].src_mi
->mbmi
.tx_size
, bsize
,
2791 pd
[1].subsampling_x
,
2792 pd
[1].subsampling_y
);
2793 rd_pick_intra_sbuv_mode(cpi
, x
, ctx
, &rate_uv
, &rate_uv_tokenonly
,
2794 &dist_uv
, &uv_skip
, MAX(BLOCK_8X8
, bsize
),
2797 if (y_skip
&& uv_skip
) {
2798 rd_cost
->rate
= rate_y
+ rate_uv
- rate_y_tokenonly
- rate_uv_tokenonly
+
2799 vp9_cost_bit(vp9_get_skip_prob(cm
, xd
), 1);
2800 rd_cost
->dist
= dist_y
+ dist_uv
;
2801 vp9_zero(ctx
->tx_rd_diff
);
2804 rd_cost
->rate
= rate_y
+ rate_uv
+
2805 vp9_cost_bit(vp9_get_skip_prob(cm
, xd
), 0);
2806 rd_cost
->dist
= dist_y
+ dist_uv
;
2807 if (cpi
->sf
.tx_size_search_method
== USE_FULL_RD
)
2808 for (i
= 0; i
< TX_MODES
; i
++) {
2809 if (tx_cache
[i
] < INT64_MAX
&& tx_cache
[cm
->tx_mode
] < INT64_MAX
)
2810 ctx
->tx_rd_diff
[i
] = tx_cache
[i
] - tx_cache
[cm
->tx_mode
];
2812 ctx
->tx_rd_diff
[i
] = 0;
2816 ctx
->mic
= *xd
->mi
[0].src_mi
;
2817 rd_cost
->rdcost
= RDCOST(x
->rdmult
, x
->rddiv
, rd_cost
->rate
, rd_cost
->dist
);
2820 void vp9_rd_pick_inter_mode_sb(VP9_COMP
*cpi
,
2821 TileDataEnc
*tile_data
,
2823 int mi_row
, int mi_col
,
2824 RD_COST
*rd_cost
, BLOCK_SIZE bsize
,
2825 PICK_MODE_CONTEXT
*ctx
,
2826 int64_t best_rd_so_far
) {
2827 VP9_COMMON
*const cm
= &cpi
->common
;
2828 TileInfo
*const tile_info
= &tile_data
->tile_info
;
2829 RD_OPT
*const rd_opt
= &cpi
->rd
;
2830 SPEED_FEATURES
*const sf
= &cpi
->sf
;
2831 MACROBLOCKD
*const xd
= &x
->e_mbd
;
2832 MB_MODE_INFO
*const mbmi
= &xd
->mi
[0].src_mi
->mbmi
;
2833 const struct segmentation
*const seg
= &cm
->seg
;
2834 PREDICTION_MODE this_mode
;
2835 MV_REFERENCE_FRAME ref_frame
, second_ref_frame
;
2836 unsigned char segment_id
= mbmi
->segment_id
;
2837 int comp_pred
, i
, k
;
2838 int_mv frame_mv
[MB_MODE_COUNT
][MAX_REF_FRAMES
];
2839 struct buf_2d yv12_mb
[4][MAX_MB_PLANE
];
2840 int_mv single_newmv
[MAX_REF_FRAMES
] = { { 0 } };
2841 INTERP_FILTER single_inter_filter
[MB_MODE_COUNT
][MAX_REF_FRAMES
];
2842 int single_skippable
[MB_MODE_COUNT
][MAX_REF_FRAMES
];
2843 static const int flag_list
[4] = { 0, VP9_LAST_FLAG
, VP9_GOLD_FLAG
,
2845 int64_t best_rd
= best_rd_so_far
;
2846 int64_t best_tx_rd
[TX_MODES
];
2847 int64_t best_tx_diff
[TX_MODES
];
2848 int64_t best_pred_diff
[REFERENCE_MODES
];
2849 int64_t best_pred_rd
[REFERENCE_MODES
];
2850 int64_t best_filter_rd
[SWITCHABLE_FILTER_CONTEXTS
];
2851 int64_t best_filter_diff
[SWITCHABLE_FILTER_CONTEXTS
];
2852 MB_MODE_INFO best_mbmode
;
2853 int best_mode_skippable
= 0;
2854 int midx
, best_mode_index
= -1;
2855 unsigned int ref_costs_single
[MAX_REF_FRAMES
], ref_costs_comp
[MAX_REF_FRAMES
];
2856 vp9_prob comp_mode_p
;
2857 int64_t best_intra_rd
= INT64_MAX
;
2858 unsigned int best_pred_sse
= UINT_MAX
;
2859 PREDICTION_MODE best_intra_mode
= DC_PRED
;
2860 int rate_uv_intra
[TX_SIZES
], rate_uv_tokenonly
[TX_SIZES
];
2861 int64_t dist_uv
[TX_SIZES
];
2862 int skip_uv
[TX_SIZES
];
2863 PREDICTION_MODE mode_uv
[TX_SIZES
];
2864 const int intra_cost_penalty
= vp9_get_intra_cost_penalty(
2865 cm
->base_qindex
, cm
->y_dc_delta_q
, cm
->bit_depth
);
2867 uint8_t ref_frame_skip_mask
[2] = { 0 };
2868 uint16_t mode_skip_mask
[MAX_REF_FRAMES
] = { 0 };
2869 int mode_skip_start
= sf
->mode_skip_start
+ 1;
2870 const int *const rd_threshes
= rd_opt
->threshes
[segment_id
][bsize
];
2871 const int *const rd_thresh_freq_fact
= tile_data
->thresh_freq_fact
[bsize
];
2872 int64_t mode_threshold
[MAX_MODES
];
2873 int *mode_map
= tile_data
->mode_map
[bsize
];
2874 const int mode_search_skip_flags
= sf
->mode_search_skip_flags
;
2875 int64_t mask_filter
= 0;
2876 int64_t filter_cache
[SWITCHABLE_FILTER_CONTEXTS
];
2878 vp9_zero(best_mbmode
);
2880 x
->skip_encode
= sf
->skip_encode_frame
&& x
->q_index
< QIDX_SKIP_THRESH
;
2882 for (i
= 0; i
< SWITCHABLE_FILTER_CONTEXTS
; ++i
)
2883 filter_cache
[i
] = INT64_MAX
;
2885 estimate_ref_frame_costs(cm
, xd
, segment_id
, ref_costs_single
, ref_costs_comp
,
2888 for (i
= 0; i
< REFERENCE_MODES
; ++i
)
2889 best_pred_rd
[i
] = INT64_MAX
;
2890 for (i
= 0; i
< TX_MODES
; i
++)
2891 best_tx_rd
[i
] = INT64_MAX
;
2892 for (i
= 0; i
< SWITCHABLE_FILTER_CONTEXTS
; i
++)
2893 best_filter_rd
[i
] = INT64_MAX
;
2894 for (i
= 0; i
< TX_SIZES
; i
++)
2895 rate_uv_intra
[i
] = INT_MAX
;
2896 for (i
= 0; i
< MAX_REF_FRAMES
; ++i
)
2897 x
->pred_sse
[i
] = INT_MAX
;
2898 for (i
= 0; i
< MB_MODE_COUNT
; ++i
) {
2899 for (k
= 0; k
< MAX_REF_FRAMES
; ++k
) {
2900 single_inter_filter
[i
][k
] = SWITCHABLE
;
2901 single_skippable
[i
][k
] = 0;
2905 rd_cost
->rate
= INT_MAX
;
2907 for (ref_frame
= LAST_FRAME
; ref_frame
<= ALTREF_FRAME
; ++ref_frame
) {
2908 x
->pred_mv_sad
[ref_frame
] = INT_MAX
;
2909 if (cpi
->ref_frame_flags
& flag_list
[ref_frame
]) {
2910 setup_buffer_inter(cpi
, x
, tile_info
, ref_frame
, bsize
, mi_row
, mi_col
,
2911 frame_mv
[NEARESTMV
], frame_mv
[NEARMV
], yv12_mb
);
2913 frame_mv
[NEWMV
][ref_frame
].as_int
= INVALID_MV
;
2914 frame_mv
[ZEROMV
][ref_frame
].as_int
= 0;
2917 for (ref_frame
= LAST_FRAME
; ref_frame
<= ALTREF_FRAME
; ++ref_frame
) {
2918 if (!(cpi
->ref_frame_flags
& flag_list
[ref_frame
])) {
2919 // Skip checking missing references in both single and compound reference
2920 // modes. Note that a mode will be skipped iff both reference frames
2922 ref_frame_skip_mask
[0] |= (1 << ref_frame
);
2923 ref_frame_skip_mask
[1] |= SECOND_REF_FRAME_MASK
;
2924 } else if (sf
->reference_masking
) {
2925 for (i
= LAST_FRAME
; i
<= ALTREF_FRAME
; ++i
) {
2926 // Skip fixed mv modes for poor references
2927 if ((x
->pred_mv_sad
[ref_frame
] >> 2) > x
->pred_mv_sad
[i
]) {
2928 mode_skip_mask
[ref_frame
] |= INTER_NEAREST_NEAR_ZERO
;
2933 // If the segment reference frame feature is enabled....
2934 // then do nothing if the current ref frame is not allowed..
2935 if (vp9_segfeature_active(seg
, segment_id
, SEG_LVL_REF_FRAME
) &&
2936 vp9_get_segdata(seg
, segment_id
, SEG_LVL_REF_FRAME
) != (int)ref_frame
) {
2937 ref_frame_skip_mask
[0] |= (1 << ref_frame
);
2938 ref_frame_skip_mask
[1] |= SECOND_REF_FRAME_MASK
;
2942 // Disable this drop out case if the ref frame
2943 // segment level feature is enabled for this segment. This is to
2944 // prevent the possibility that we end up unable to pick any mode.
2945 if (!vp9_segfeature_active(seg
, segment_id
, SEG_LVL_REF_FRAME
)) {
2946 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
2947 // unless ARNR filtering is enabled in which case we want
2948 // an unfiltered alternative. We allow near/nearest as well
2949 // because they may result in zero-zero MVs but be cheaper.
2950 if (cpi
->rc
.is_src_frame_alt_ref
&& (cpi
->oxcf
.arnr_max_frames
== 0)) {
2951 ref_frame_skip_mask
[0] = (1 << LAST_FRAME
) | (1 << GOLDEN_FRAME
);
2952 ref_frame_skip_mask
[1] = SECOND_REF_FRAME_MASK
;
2953 mode_skip_mask
[ALTREF_FRAME
] = ~INTER_NEAREST_NEAR_ZERO
;
2954 if (frame_mv
[NEARMV
][ALTREF_FRAME
].as_int
!= 0)
2955 mode_skip_mask
[ALTREF_FRAME
] |= (1 << NEARMV
);
2956 if (frame_mv
[NEARESTMV
][ALTREF_FRAME
].as_int
!= 0)
2957 mode_skip_mask
[ALTREF_FRAME
] |= (1 << NEARESTMV
);
2961 if (cpi
->rc
.is_src_frame_alt_ref
) {
2962 if (sf
->alt_ref_search_fp
) {
2963 mode_skip_mask
[ALTREF_FRAME
] = 0;
2964 ref_frame_skip_mask
[0] = ~(1 << ALTREF_FRAME
);
2965 ref_frame_skip_mask
[1] = SECOND_REF_FRAME_MASK
;
2969 if (sf
->alt_ref_search_fp
)
2970 if (!cm
->show_frame
&& x
->pred_mv_sad
[GOLDEN_FRAME
] < INT_MAX
)
2971 if (x
->pred_mv_sad
[ALTREF_FRAME
] > (x
->pred_mv_sad
[GOLDEN_FRAME
] << 1))
2972 mode_skip_mask
[ALTREF_FRAME
] |= INTER_ALL
;
2974 if (sf
->adaptive_mode_search
) {
2975 if (cm
->show_frame
&& !cpi
->rc
.is_src_frame_alt_ref
&&
2976 cpi
->rc
.frames_since_golden
>= 3)
2977 if (x
->pred_mv_sad
[GOLDEN_FRAME
] > (x
->pred_mv_sad
[LAST_FRAME
] << 1))
2978 mode_skip_mask
[GOLDEN_FRAME
] |= INTER_ALL
;
2981 if (bsize
> sf
->max_intra_bsize
) {
2982 ref_frame_skip_mask
[0] |= (1 << INTRA_FRAME
);
2983 ref_frame_skip_mask
[1] |= (1 << INTRA_FRAME
);
2986 mode_skip_mask
[INTRA_FRAME
] |=
2987 ~(sf
->intra_y_mode_mask
[max_txsize_lookup
[bsize
]]);
2989 for (i
= 0; i
<= LAST_NEW_MV_INDEX
; ++i
)
2990 mode_threshold
[i
] = 0;
2991 for (i
= LAST_NEW_MV_INDEX
+ 1; i
< MAX_MODES
; ++i
)
2992 mode_threshold
[i
] = ((int64_t)rd_threshes
[i
] * rd_thresh_freq_fact
[i
]) >> 5;
2994 midx
= sf
->schedule_mode_search
? mode_skip_start
: 0;
2996 uint8_t end_pos
= 0;
2997 for (i
= 5; i
< midx
; ++i
) {
2998 if (mode_threshold
[mode_map
[i
- 1]] > mode_threshold
[mode_map
[i
]]) {
2999 uint8_t tmp
= mode_map
[i
];
3000 mode_map
[i
] = mode_map
[i
- 1];
3001 mode_map
[i
- 1] = tmp
;
3008 for (midx
= 0; midx
< MAX_MODES
; ++midx
) {
3009 int mode_index
= mode_map
[midx
];
3010 int mode_excluded
= 0;
3011 int64_t this_rd
= INT64_MAX
;
3012 int disable_skip
= 0;
3013 int compmode_cost
= 0;
3014 int rate2
= 0, rate_y
= 0, rate_uv
= 0;
3015 int64_t distortion2
= 0, distortion_y
= 0, distortion_uv
= 0;
3017 int64_t tx_cache
[TX_MODES
];
3019 int64_t total_sse
= INT64_MAX
;
3022 this_mode
= vp9_mode_order
[mode_index
].mode
;
3023 ref_frame
= vp9_mode_order
[mode_index
].ref_frame
[0];
3024 second_ref_frame
= vp9_mode_order
[mode_index
].ref_frame
[1];
3026 // Look at the reference frame of the best mode so far and set the
3027 // skip mask to look at a subset of the remaining modes.
3028 if (midx
== mode_skip_start
&& best_mode_index
>= 0) {
3029 switch (best_mbmode
.ref_frame
[0]) {
3033 ref_frame_skip_mask
[0] |= LAST_FRAME_MODE_MASK
;
3034 ref_frame_skip_mask
[1] |= SECOND_REF_FRAME_MASK
;
3037 ref_frame_skip_mask
[0] |= GOLDEN_FRAME_MODE_MASK
;
3038 ref_frame_skip_mask
[1] |= SECOND_REF_FRAME_MASK
;
3041 ref_frame_skip_mask
[0] |= ALT_REF_MODE_MASK
;
3044 case MAX_REF_FRAMES
:
3045 assert(0 && "Invalid Reference frame");
3050 if (ref_frame_skip_mask
[0] & (1 << ref_frame
) &&
3051 ref_frame_skip_mask
[1] & (1 << MAX(0, second_ref_frame
)))
3054 if (mode_skip_mask
[ref_frame
] & (1 << this_mode
))
3057 // Test best rd so far against threshold for trying this mode.
3058 if (best_mode_skippable
&& sf
->schedule_mode_search
)
3059 mode_threshold
[mode_index
] <<= 1;
3061 if (best_rd
< mode_threshold
[mode_index
])
3064 if (sf
->motion_field_mode_search
) {
3065 const int mi_width
= MIN(num_8x8_blocks_wide_lookup
[bsize
],
3066 tile_info
->mi_col_end
- mi_col
);
3067 const int mi_height
= MIN(num_8x8_blocks_high_lookup
[bsize
],
3068 tile_info
->mi_row_end
- mi_row
);
3069 const int bsl
= mi_width_log2_lookup
[bsize
];
3070 int cb_partition_search_ctrl
= (((mi_row
+ mi_col
) >> bsl
)
3071 + get_chessboard_index(cm
->current_video_frame
)) & 0x1;
3072 MB_MODE_INFO
*ref_mbmi
;
3073 int const_motion
= 1;
3074 int skip_ref_frame
= !cb_partition_search_ctrl
;
3075 MV_REFERENCE_FRAME rf
= NONE
;
3077 ref_mv
.as_int
= INVALID_MV
;
3079 if ((mi_row
- 1) >= tile_info
->mi_row_start
) {
3080 ref_mv
= xd
->mi
[-xd
->mi_stride
].src_mi
->mbmi
.mv
[0];
3081 rf
= xd
->mi
[-xd
->mi_stride
].src_mi
->mbmi
.ref_frame
[0];
3082 for (i
= 0; i
< mi_width
; ++i
) {
3083 ref_mbmi
= &xd
->mi
[-xd
->mi_stride
+ i
].src_mi
->mbmi
;
3084 const_motion
&= (ref_mv
.as_int
== ref_mbmi
->mv
[0].as_int
) &&
3085 (ref_frame
== ref_mbmi
->ref_frame
[0]);
3086 skip_ref_frame
&= (rf
== ref_mbmi
->ref_frame
[0]);
3090 if ((mi_col
- 1) >= tile_info
->mi_col_start
) {
3091 if (ref_mv
.as_int
== INVALID_MV
)
3092 ref_mv
= xd
->mi
[-1].src_mi
->mbmi
.mv
[0];
3094 rf
= xd
->mi
[-1].src_mi
->mbmi
.ref_frame
[0];
3095 for (i
= 0; i
< mi_height
; ++i
) {
3096 ref_mbmi
= &xd
->mi
[i
* xd
->mi_stride
- 1].src_mi
->mbmi
;
3097 const_motion
&= (ref_mv
.as_int
== ref_mbmi
->mv
[0].as_int
) &&
3098 (ref_frame
== ref_mbmi
->ref_frame
[0]);
3099 skip_ref_frame
&= (rf
== ref_mbmi
->ref_frame
[0]);
3103 if (skip_ref_frame
&& this_mode
!= NEARESTMV
&& this_mode
!= NEWMV
)
3104 if (rf
> INTRA_FRAME
)
3105 if (ref_frame
!= rf
)
3109 if (this_mode
== NEARMV
|| this_mode
== ZEROMV
)
3113 comp_pred
= second_ref_frame
> INTRA_FRAME
;
3115 if (!cpi
->allow_comp_inter_inter
)
3118 // Skip compound inter modes if ARF is not available.
3119 if (!(cpi
->ref_frame_flags
& flag_list
[second_ref_frame
]))
3122 // Do not allow compound prediction if the segment level reference frame
3123 // feature is in use as in this case there can only be one reference.
3124 if (vp9_segfeature_active(seg
, segment_id
, SEG_LVL_REF_FRAME
))
3127 if ((mode_search_skip_flags
& FLAG_SKIP_COMP_BESTINTRA
) &&
3128 best_mode_index
>= 0 && best_mbmode
.ref_frame
[0] == INTRA_FRAME
)
3131 mode_excluded
= cm
->reference_mode
== SINGLE_REFERENCE
;
3133 if (ref_frame
!= INTRA_FRAME
)
3134 mode_excluded
= cm
->reference_mode
== COMPOUND_REFERENCE
;
3137 if (ref_frame
== INTRA_FRAME
) {
3138 if (sf
->adaptive_mode_search
)
3139 if ((x
->source_variance
<< num_pels_log2_lookup
[bsize
]) > best_pred_sse
)
3142 if (this_mode
!= DC_PRED
) {
3143 // Disable intra modes other than DC_PRED for blocks with low variance
3144 // Threshold for intra skipping based on source variance
3145 // TODO(debargha): Specialize the threshold for super block sizes
3146 const unsigned int skip_intra_var_thresh
= 64;
3147 if ((mode_search_skip_flags
& FLAG_SKIP_INTRA_LOWVAR
) &&
3148 x
->source_variance
< skip_intra_var_thresh
)
3150 // Only search the oblique modes if the best so far is
3151 // one of the neighboring directional modes
3152 if ((mode_search_skip_flags
& FLAG_SKIP_INTRA_BESTINTER
) &&
3153 (this_mode
>= D45_PRED
&& this_mode
<= TM_PRED
)) {
3154 if (best_mode_index
>= 0 &&
3155 best_mbmode
.ref_frame
[0] > INTRA_FRAME
)
3158 if (mode_search_skip_flags
& FLAG_SKIP_INTRA_DIRMISMATCH
) {
3159 if (conditional_skipintra(this_mode
, best_intra_mode
))
3164 const MV_REFERENCE_FRAME ref_frames
[2] = {ref_frame
, second_ref_frame
};
3165 if (!check_best_zero_mv(cpi
, mbmi
->mode_context
, frame_mv
,
3166 this_mode
, ref_frames
))
3170 mbmi
->mode
= this_mode
;
3171 mbmi
->uv_mode
= DC_PRED
;
3172 mbmi
->ref_frame
[0] = ref_frame
;
3173 mbmi
->ref_frame
[1] = second_ref_frame
;
3174 // Evaluate all sub-pel filters irrespective of whether we can use
3175 // them for this frame.
3176 mbmi
->interp_filter
= cm
->interp_filter
== SWITCHABLE
? EIGHTTAP
3177 : cm
->interp_filter
;
3178 mbmi
->mv
[0].as_int
= mbmi
->mv
[1].as_int
= 0;
3181 set_ref_ptrs(cm
, xd
, ref_frame
, second_ref_frame
);
3183 // Select prediction reference frames.
3184 for (i
= 0; i
< MAX_MB_PLANE
; i
++) {
3185 xd
->plane
[i
].pre
[0] = yv12_mb
[ref_frame
][i
];
3187 xd
->plane
[i
].pre
[1] = yv12_mb
[second_ref_frame
][i
];
3190 for (i
= 0; i
< TX_MODES
; ++i
)
3191 tx_cache
[i
] = INT64_MAX
;
3193 if (ref_frame
== INTRA_FRAME
) {
3195 struct macroblockd_plane
*const pd
= &xd
->plane
[1];
3196 vpx_memset(x
->skip_txfm
, 0, sizeof(x
->skip_txfm
));
3197 super_block_yrd(cpi
, x
, &rate_y
, &distortion_y
, &skippable
,
3198 NULL
, bsize
, tx_cache
, best_rd
);
3199 if (rate_y
== INT_MAX
)
3202 uv_tx
= get_uv_tx_size_impl(mbmi
->tx_size
, bsize
, pd
->subsampling_x
,
3204 if (rate_uv_intra
[uv_tx
] == INT_MAX
) {
3205 choose_intra_uv_mode(cpi
, x
, ctx
, bsize
, uv_tx
,
3206 &rate_uv_intra
[uv_tx
], &rate_uv_tokenonly
[uv_tx
],
3207 &dist_uv
[uv_tx
], &skip_uv
[uv_tx
], &mode_uv
[uv_tx
]);
3210 rate_uv
= rate_uv_tokenonly
[uv_tx
];
3211 distortion_uv
= dist_uv
[uv_tx
];
3212 skippable
= skippable
&& skip_uv
[uv_tx
];
3213 mbmi
->uv_mode
= mode_uv
[uv_tx
];
3215 rate2
= rate_y
+ cpi
->mbmode_cost
[mbmi
->mode
] + rate_uv_intra
[uv_tx
];
3216 if (this_mode
!= DC_PRED
&& this_mode
!= TM_PRED
)
3217 rate2
+= intra_cost_penalty
;
3218 distortion2
= distortion_y
+ distortion_uv
;
3220 this_rd
= handle_inter_mode(cpi
, x
, bsize
,
3222 &rate2
, &distortion2
, &skippable
,
3224 &disable_skip
, frame_mv
,
3226 single_newmv
, single_inter_filter
,
3227 single_skippable
, &total_sse
, best_rd
,
3228 &mask_filter
, filter_cache
);
3229 if (this_rd
== INT64_MAX
)
3232 compmode_cost
= vp9_cost_bit(comp_mode_p
, comp_pred
);
3234 if (cm
->reference_mode
== REFERENCE_MODE_SELECT
)
3235 rate2
+= compmode_cost
;
3238 // Estimate the reference frame signaling cost and add it
3239 // to the rolling cost variable.
3241 rate2
+= ref_costs_comp
[ref_frame
];
3243 rate2
+= ref_costs_single
[ref_frame
];
3246 if (!disable_skip
) {
3248 // Back out the coefficient coding costs
3249 rate2
-= (rate_y
+ rate_uv
);
3251 // Cost the skip mb case
3252 rate2
+= vp9_cost_bit(vp9_get_skip_prob(cm
, xd
), 1);
3253 } else if (ref_frame
!= INTRA_FRAME
&& !xd
->lossless
) {
3254 if (RDCOST(x
->rdmult
, x
->rddiv
, rate_y
+ rate_uv
, distortion2
) <
3255 RDCOST(x
->rdmult
, x
->rddiv
, 0, total_sse
)) {
3256 // Add in the cost of the no skip flag.
3257 rate2
+= vp9_cost_bit(vp9_get_skip_prob(cm
, xd
), 0);
3259 // FIXME(rbultje) make this work for splitmv also
3260 rate2
+= vp9_cost_bit(vp9_get_skip_prob(cm
, xd
), 1);
3261 distortion2
= total_sse
;
3262 assert(total_sse
>= 0);
3263 rate2
-= (rate_y
+ rate_uv
);
3267 // Add in the cost of the no skip flag.
3268 rate2
+= vp9_cost_bit(vp9_get_skip_prob(cm
, xd
), 0);
3271 // Calculate the final RD estimate for this mode.
3272 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate2
, distortion2
);
3275 if (ref_frame
== INTRA_FRAME
) {
3276 // Keep record of best intra rd
3277 if (this_rd
< best_intra_rd
) {
3278 best_intra_rd
= this_rd
;
3279 best_intra_mode
= mbmi
->mode
;
3283 if (!disable_skip
&& ref_frame
== INTRA_FRAME
) {
3284 for (i
= 0; i
< REFERENCE_MODES
; ++i
)
3285 best_pred_rd
[i
] = MIN(best_pred_rd
[i
], this_rd
);
3286 for (i
= 0; i
< SWITCHABLE_FILTER_CONTEXTS
; i
++)
3287 best_filter_rd
[i
] = MIN(best_filter_rd
[i
], this_rd
);
3290 // Did this mode help.. i.e. is it the new best mode
3291 if (this_rd
< best_rd
|| x
->skip
) {
3292 int max_plane
= MAX_MB_PLANE
;
3293 if (!mode_excluded
) {
3294 // Note index of best mode so far
3295 best_mode_index
= mode_index
;
3297 if (ref_frame
== INTRA_FRAME
) {
3298 /* required for left and above block mv */
3299 mbmi
->mv
[0].as_int
= 0;
3302 best_pred_sse
= x
->pred_sse
[ref_frame
];
3305 rd_cost
->rate
= rate2
;
3306 rd_cost
->dist
= distortion2
;
3307 rd_cost
->rdcost
= this_rd
;
3309 best_mbmode
= *mbmi
;
3310 best_skip2
= this_skip2
;
3311 best_mode_skippable
= skippable
;
3313 if (!x
->select_tx_size
)
3314 swap_block_ptr(x
, ctx
, 1, 0, 0, max_plane
);
3315 vpx_memcpy(ctx
->zcoeff_blk
, x
->zcoeff_blk
[mbmi
->tx_size
],
3316 sizeof(uint8_t) * ctx
->num_4x4_blk
);
3318 // TODO(debargha): enhance this test with a better distortion prediction
3319 // based on qp, activity mask and history
3320 if ((mode_search_skip_flags
& FLAG_EARLY_TERMINATE
) &&
3321 (mode_index
> MIN_EARLY_TERM_INDEX
)) {
3322 int qstep
= xd
->plane
[0].dequant
[1];
3323 // TODO(debargha): Enhance this by specializing for each mode_index
3325 #if CONFIG_VP9_HIGHBITDEPTH
3326 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
3327 qstep
>>= (xd
->bd
- 8);
3329 #endif // CONFIG_VP9_HIGHBITDEPTH
3330 if (x
->source_variance
< UINT_MAX
) {
3331 const int var_adjust
= (x
->source_variance
< 16);
3332 scale
-= var_adjust
;
3334 if (ref_frame
> INTRA_FRAME
&&
3335 distortion2
* scale
< qstep
* qstep
) {
3342 /* keep record of best compound/single-only prediction */
3343 if (!disable_skip
&& ref_frame
!= INTRA_FRAME
) {
3344 int64_t single_rd
, hybrid_rd
, single_rate
, hybrid_rate
;
3346 if (cm
->reference_mode
== REFERENCE_MODE_SELECT
) {
3347 single_rate
= rate2
- compmode_cost
;
3348 hybrid_rate
= rate2
;
3350 single_rate
= rate2
;
3351 hybrid_rate
= rate2
+ compmode_cost
;
3354 single_rd
= RDCOST(x
->rdmult
, x
->rddiv
, single_rate
, distortion2
);
3355 hybrid_rd
= RDCOST(x
->rdmult
, x
->rddiv
, hybrid_rate
, distortion2
);
3358 if (single_rd
< best_pred_rd
[SINGLE_REFERENCE
])
3359 best_pred_rd
[SINGLE_REFERENCE
] = single_rd
;
3361 if (single_rd
< best_pred_rd
[COMPOUND_REFERENCE
])
3362 best_pred_rd
[COMPOUND_REFERENCE
] = single_rd
;
3364 if (hybrid_rd
< best_pred_rd
[REFERENCE_MODE_SELECT
])
3365 best_pred_rd
[REFERENCE_MODE_SELECT
] = hybrid_rd
;
3367 /* keep record of best filter type */
3368 if (!mode_excluded
&& cm
->interp_filter
!= BILINEAR
) {
3369 int64_t ref
= filter_cache
[cm
->interp_filter
== SWITCHABLE
?
3370 SWITCHABLE_FILTERS
: cm
->interp_filter
];
3372 for (i
= 0; i
< SWITCHABLE_FILTER_CONTEXTS
; i
++) {
3374 if (ref
== INT64_MAX
)
3376 else if (filter_cache
[i
] == INT64_MAX
)
3377 // when early termination is triggered, the encoder does not have
3378 // access to the rate-distortion cost. it only knows that the cost
3379 // should be above the maximum valid value. hence it takes the known
3380 // maximum plus an arbitrary constant as the rate-distortion cost.
3381 adj_rd
= mask_filter
- ref
+ 10;
3383 adj_rd
= filter_cache
[i
] - ref
;
3386 best_filter_rd
[i
] = MIN(best_filter_rd
[i
], adj_rd
);
3391 /* keep record of best txfm size */
3392 if (bsize
< BLOCK_32X32
) {
3393 if (bsize
< BLOCK_16X16
)
3394 tx_cache
[ALLOW_16X16
] = tx_cache
[ALLOW_8X8
];
3396 tx_cache
[ALLOW_32X32
] = tx_cache
[ALLOW_16X16
];
3398 if (!mode_excluded
&& this_rd
!= INT64_MAX
) {
3399 for (i
= 0; i
< TX_MODES
&& tx_cache
[i
] < INT64_MAX
; i
++) {
3400 int64_t adj_rd
= INT64_MAX
;
3401 adj_rd
= this_rd
+ tx_cache
[i
] - tx_cache
[cm
->tx_mode
];
3403 if (adj_rd
< best_tx_rd
[i
])
3404 best_tx_rd
[i
] = adj_rd
;
3411 if (x
->skip
&& !comp_pred
)
3415 // The inter modes' rate costs are not calculated precisely in some cases.
3416 // Therefore, sometimes, NEWMV is chosen instead of NEARESTMV, NEARMV, and
3417 // ZEROMV. Here, checks are added for those cases, and the mode decisions
3419 if (best_mbmode
.mode
== NEWMV
) {
3420 const MV_REFERENCE_FRAME refs
[2] = {best_mbmode
.ref_frame
[0],
3421 best_mbmode
.ref_frame
[1]};
3422 int comp_pred_mode
= refs
[1] > INTRA_FRAME
;
3424 if (frame_mv
[NEARESTMV
][refs
[0]].as_int
== best_mbmode
.mv
[0].as_int
&&
3425 ((comp_pred_mode
&& frame_mv
[NEARESTMV
][refs
[1]].as_int
==
3426 best_mbmode
.mv
[1].as_int
) || !comp_pred_mode
))
3427 best_mbmode
.mode
= NEARESTMV
;
3428 else if (frame_mv
[NEARMV
][refs
[0]].as_int
== best_mbmode
.mv
[0].as_int
&&
3429 ((comp_pred_mode
&& frame_mv
[NEARMV
][refs
[1]].as_int
==
3430 best_mbmode
.mv
[1].as_int
) || !comp_pred_mode
))
3431 best_mbmode
.mode
= NEARMV
;
3432 else if (best_mbmode
.mv
[0].as_int
== 0 &&
3433 ((comp_pred_mode
&& best_mbmode
.mv
[1].as_int
== 0) || !comp_pred_mode
))
3434 best_mbmode
.mode
= ZEROMV
;
3437 if (best_mode_index
< 0 || best_rd
>= best_rd_so_far
) {
3438 rd_cost
->rate
= INT_MAX
;
3439 rd_cost
->rdcost
= INT64_MAX
;
3443 // If we used an estimate for the uv intra rd in the loop above...
3444 if (sf
->use_uv_intra_rd_estimate
) {
3445 // Do Intra UV best rd mode selection if best mode choice above was intra.
3446 if (best_mbmode
.ref_frame
[0] == INTRA_FRAME
) {
3448 *mbmi
= best_mbmode
;
3449 uv_tx_size
= get_uv_tx_size(mbmi
, &xd
->plane
[1]);
3450 rd_pick_intra_sbuv_mode(cpi
, x
, ctx
, &rate_uv_intra
[uv_tx_size
],
3451 &rate_uv_tokenonly
[uv_tx_size
],
3452 &dist_uv
[uv_tx_size
],
3453 &skip_uv
[uv_tx_size
],
3454 bsize
< BLOCK_8X8
? BLOCK_8X8
: bsize
,
3459 assert((cm
->interp_filter
== SWITCHABLE
) ||
3460 (cm
->interp_filter
== best_mbmode
.interp_filter
) ||
3461 !is_inter_block(&best_mbmode
));
3463 if (!cpi
->rc
.is_src_frame_alt_ref
)
3464 vp9_update_rd_thresh_fact(tile_data
->thresh_freq_fact
,
3465 sf
->adaptive_rd_thresh
, bsize
, best_mode_index
);
3468 *mbmi
= best_mbmode
;
3469 x
->skip
|= best_skip2
;
3471 for (i
= 0; i
< REFERENCE_MODES
; ++i
) {
3472 if (best_pred_rd
[i
] == INT64_MAX
)
3473 best_pred_diff
[i
] = INT_MIN
;
3475 best_pred_diff
[i
] = best_rd
- best_pred_rd
[i
];
3479 for (i
= 0; i
< SWITCHABLE_FILTER_CONTEXTS
; i
++) {
3480 if (best_filter_rd
[i
] == INT64_MAX
)
3481 best_filter_diff
[i
] = 0;
3483 best_filter_diff
[i
] = best_rd
- best_filter_rd
[i
];
3485 if (cm
->interp_filter
== SWITCHABLE
)
3486 assert(best_filter_diff
[SWITCHABLE_FILTERS
] == 0);
3487 for (i
= 0; i
< TX_MODES
; i
++) {
3488 if (best_tx_rd
[i
] == INT64_MAX
)
3489 best_tx_diff
[i
] = 0;
3491 best_tx_diff
[i
] = best_rd
- best_tx_rd
[i
];
3494 vp9_zero(best_filter_diff
);
3495 vp9_zero(best_tx_diff
);
3498 // TODO(yunqingwang): Moving this line in front of the above best_filter_diff
3499 // updating code causes PSNR loss. Need to figure out the confliction.
3500 x
->skip
|= best_mode_skippable
;
3502 if (!x
->skip
&& !x
->select_tx_size
) {
3503 int has_high_freq_coeff
= 0;
3505 int max_plane
= is_inter_block(&xd
->mi
[0].src_mi
->mbmi
)
3507 for (plane
= 0; plane
< max_plane
; ++plane
) {
3508 x
->plane
[plane
].eobs
= ctx
->eobs_pbuf
[plane
][1];
3509 has_high_freq_coeff
|= vp9_has_high_freq_in_plane(x
, bsize
, plane
);
3512 for (plane
= max_plane
; plane
< MAX_MB_PLANE
; ++plane
) {
3513 x
->plane
[plane
].eobs
= ctx
->eobs_pbuf
[plane
][2];
3514 has_high_freq_coeff
|= vp9_has_high_freq_in_plane(x
, bsize
, plane
);
3517 best_mode_skippable
|= !has_high_freq_coeff
;
3520 store_coding_context(x
, ctx
, best_mode_index
, best_pred_diff
,
3521 best_tx_diff
, best_filter_diff
, best_mode_skippable
);
3524 void vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP
*cpi
,
3525 TileDataEnc
*tile_data
,
3529 PICK_MODE_CONTEXT
*ctx
,
3530 int64_t best_rd_so_far
) {
3531 VP9_COMMON
*const cm
= &cpi
->common
;
3532 MACROBLOCKD
*const xd
= &x
->e_mbd
;
3533 MB_MODE_INFO
*const mbmi
= &xd
->mi
[0].src_mi
->mbmi
;
3534 unsigned char segment_id
= mbmi
->segment_id
;
3535 const int comp_pred
= 0;
3537 int64_t best_tx_diff
[TX_MODES
];
3538 int64_t best_pred_diff
[REFERENCE_MODES
];
3539 int64_t best_filter_diff
[SWITCHABLE_FILTER_CONTEXTS
];
3540 unsigned int ref_costs_single
[MAX_REF_FRAMES
], ref_costs_comp
[MAX_REF_FRAMES
];
3541 vp9_prob comp_mode_p
;
3542 INTERP_FILTER best_filter
= SWITCHABLE
;
3543 int64_t this_rd
= INT64_MAX
;
3545 const int64_t distortion2
= 0;
3547 x
->skip_encode
= cpi
->sf
.skip_encode_frame
&& x
->q_index
< QIDX_SKIP_THRESH
;
3549 estimate_ref_frame_costs(cm
, xd
, segment_id
, ref_costs_single
, ref_costs_comp
,
3552 for (i
= 0; i
< MAX_REF_FRAMES
; ++i
)
3553 x
->pred_sse
[i
] = INT_MAX
;
3554 for (i
= LAST_FRAME
; i
< MAX_REF_FRAMES
; ++i
)
3555 x
->pred_mv_sad
[i
] = INT_MAX
;
3557 rd_cost
->rate
= INT_MAX
;
3559 assert(vp9_segfeature_active(&cm
->seg
, segment_id
, SEG_LVL_SKIP
));
3561 mbmi
->mode
= ZEROMV
;
3562 mbmi
->uv_mode
= DC_PRED
;
3563 mbmi
->ref_frame
[0] = LAST_FRAME
;
3564 mbmi
->ref_frame
[1] = NONE
;
3565 mbmi
->mv
[0].as_int
= 0;
3568 if (cm
->interp_filter
!= BILINEAR
) {
3569 best_filter
= EIGHTTAP
;
3570 if (cm
->interp_filter
== SWITCHABLE
&&
3571 x
->source_variance
>= cpi
->sf
.disable_filter_search_var_thresh
) {
3573 int best_rs
= INT_MAX
;
3574 for (i
= 0; i
< SWITCHABLE_FILTERS
; ++i
) {
3575 mbmi
->interp_filter
= i
;
3576 rs
= vp9_get_switchable_rate(cpi
, xd
);
3579 best_filter
= mbmi
->interp_filter
;
3584 // Set the appropriate filter
3585 if (cm
->interp_filter
== SWITCHABLE
) {
3586 mbmi
->interp_filter
= best_filter
;
3587 rate2
+= vp9_get_switchable_rate(cpi
, xd
);
3589 mbmi
->interp_filter
= cm
->interp_filter
;
3592 if (cm
->reference_mode
== REFERENCE_MODE_SELECT
)
3593 rate2
+= vp9_cost_bit(comp_mode_p
, comp_pred
);
3595 // Estimate the reference frame signaling cost and add it
3596 // to the rolling cost variable.
3597 rate2
+= ref_costs_single
[LAST_FRAME
];
3598 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate2
, distortion2
);
3600 rd_cost
->rate
= rate2
;
3601 rd_cost
->dist
= distortion2
;
3602 rd_cost
->rdcost
= this_rd
;
3604 if (this_rd
>= best_rd_so_far
) {
3605 rd_cost
->rate
= INT_MAX
;
3606 rd_cost
->rdcost
= INT64_MAX
;
3610 assert((cm
->interp_filter
== SWITCHABLE
) ||
3611 (cm
->interp_filter
== mbmi
->interp_filter
));
3613 vp9_update_rd_thresh_fact(tile_data
->thresh_freq_fact
,
3614 cpi
->sf
.adaptive_rd_thresh
, bsize
, THR_ZEROMV
);
3616 vp9_zero(best_pred_diff
);
3617 vp9_zero(best_filter_diff
);
3618 vp9_zero(best_tx_diff
);
3620 if (!x
->select_tx_size
)
3621 swap_block_ptr(x
, ctx
, 1, 0, 0, MAX_MB_PLANE
);
3622 store_coding_context(x
, ctx
, THR_ZEROMV
,
3623 best_pred_diff
, best_tx_diff
, best_filter_diff
, 0);
3626 void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP
*cpi
,
3627 TileDataEnc
*tile_data
,
3629 int mi_row
, int mi_col
,
3632 PICK_MODE_CONTEXT
*ctx
,
3633 int64_t best_rd_so_far
) {
3634 VP9_COMMON
*const cm
= &cpi
->common
;
3635 TileInfo
*const tile_info
= &tile_data
->tile_info
;
3636 RD_OPT
*const rd_opt
= &cpi
->rd
;
3637 SPEED_FEATURES
*const sf
= &cpi
->sf
;
3638 MACROBLOCKD
*const xd
= &x
->e_mbd
;
3639 MB_MODE_INFO
*const mbmi
= &xd
->mi
[0].src_mi
->mbmi
;
3640 const struct segmentation
*const seg
= &cm
->seg
;
3641 MV_REFERENCE_FRAME ref_frame
, second_ref_frame
;
3642 unsigned char segment_id
= mbmi
->segment_id
;
3644 int_mv frame_mv
[MB_MODE_COUNT
][MAX_REF_FRAMES
];
3645 struct buf_2d yv12_mb
[4][MAX_MB_PLANE
];
3646 static const int flag_list
[4] = { 0, VP9_LAST_FLAG
, VP9_GOLD_FLAG
,
3648 int64_t best_rd
= best_rd_so_far
;
3649 int64_t best_yrd
= best_rd_so_far
; // FIXME(rbultje) more precise
3650 static const int64_t best_tx_diff
[TX_MODES
] = { 0 };
3651 int64_t best_pred_diff
[REFERENCE_MODES
];
3652 int64_t best_pred_rd
[REFERENCE_MODES
];
3653 int64_t best_filter_rd
[SWITCHABLE_FILTER_CONTEXTS
];
3654 int64_t best_filter_diff
[SWITCHABLE_FILTER_CONTEXTS
];
3655 MB_MODE_INFO best_mbmode
;
3656 int ref_index
, best_ref_index
= 0;
3657 unsigned int ref_costs_single
[MAX_REF_FRAMES
], ref_costs_comp
[MAX_REF_FRAMES
];
3658 vp9_prob comp_mode_p
;
3659 INTERP_FILTER tmp_best_filter
= SWITCHABLE
;
3660 int rate_uv_intra
, rate_uv_tokenonly
;
3663 PREDICTION_MODE mode_uv
= DC_PRED
;
3664 const int intra_cost_penalty
= vp9_get_intra_cost_penalty(
3665 cm
->base_qindex
, cm
->y_dc_delta_q
, cm
->bit_depth
);
3666 int_mv seg_mvs
[4][MAX_REF_FRAMES
];
3667 b_mode_info best_bmodes
[4];
3669 int ref_frame_skip_mask
[2] = { 0 };
3670 int64_t mask_filter
= 0;
3671 int64_t filter_cache
[SWITCHABLE_FILTER_CONTEXTS
];
3673 x
->skip_encode
= sf
->skip_encode_frame
&& x
->q_index
< QIDX_SKIP_THRESH
;
3674 vpx_memset(x
->zcoeff_blk
[TX_4X4
], 0, 4);
3675 vp9_zero(best_mbmode
);
3677 for (i
= 0; i
< SWITCHABLE_FILTER_CONTEXTS
; ++i
)
3678 filter_cache
[i
] = INT64_MAX
;
3680 for (i
= 0; i
< 4; i
++) {
3682 for (j
= 0; j
< MAX_REF_FRAMES
; j
++)
3683 seg_mvs
[i
][j
].as_int
= INVALID_MV
;
3686 estimate_ref_frame_costs(cm
, xd
, segment_id
, ref_costs_single
, ref_costs_comp
,
3689 for (i
= 0; i
< REFERENCE_MODES
; ++i
)
3690 best_pred_rd
[i
] = INT64_MAX
;
3691 for (i
= 0; i
< SWITCHABLE_FILTER_CONTEXTS
; i
++)
3692 best_filter_rd
[i
] = INT64_MAX
;
3693 rate_uv_intra
= INT_MAX
;
3695 rd_cost
->rate
= INT_MAX
;
3697 for (ref_frame
= LAST_FRAME
; ref_frame
<= ALTREF_FRAME
; ref_frame
++) {
3698 if (cpi
->ref_frame_flags
& flag_list
[ref_frame
]) {
3699 setup_buffer_inter(cpi
, x
, tile_info
,
3700 ref_frame
, bsize
, mi_row
, mi_col
,
3701 frame_mv
[NEARESTMV
], frame_mv
[NEARMV
],
3704 ref_frame_skip_mask
[0] |= (1 << ref_frame
);
3705 ref_frame_skip_mask
[1] |= SECOND_REF_FRAME_MASK
;
3707 frame_mv
[NEWMV
][ref_frame
].as_int
= INVALID_MV
;
3708 frame_mv
[ZEROMV
][ref_frame
].as_int
= 0;
3711 for (ref_index
= 0; ref_index
< MAX_REFS
; ++ref_index
) {
3712 int mode_excluded
= 0;
3713 int64_t this_rd
= INT64_MAX
;
3714 int disable_skip
= 0;
3715 int compmode_cost
= 0;
3716 int rate2
= 0, rate_y
= 0, rate_uv
= 0;
3717 int64_t distortion2
= 0, distortion_y
= 0, distortion_uv
= 0;
3721 int64_t total_sse
= INT_MAX
;
3724 ref_frame
= vp9_ref_order
[ref_index
].ref_frame
[0];
3725 second_ref_frame
= vp9_ref_order
[ref_index
].ref_frame
[1];
3727 // Look at the reference frame of the best mode so far and set the
3728 // skip mask to look at a subset of the remaining modes.
3729 if (ref_index
> 2 && sf
->mode_skip_start
< MAX_MODES
) {
3730 if (ref_index
== 3) {
3731 switch (best_mbmode
.ref_frame
[0]) {
3735 ref_frame_skip_mask
[0] |= (1 << GOLDEN_FRAME
) | (1 << ALTREF_FRAME
);
3736 ref_frame_skip_mask
[1] |= SECOND_REF_FRAME_MASK
;
3739 ref_frame_skip_mask
[0] |= (1 << LAST_FRAME
) | (1 << ALTREF_FRAME
);
3740 ref_frame_skip_mask
[1] |= SECOND_REF_FRAME_MASK
;
3743 ref_frame_skip_mask
[0] |= (1 << GOLDEN_FRAME
) | (1 << LAST_FRAME
);
3746 case MAX_REF_FRAMES
:
3747 assert(0 && "Invalid Reference frame");
3753 if (ref_frame_skip_mask
[0] & (1 << ref_frame
) &&
3754 ref_frame_skip_mask
[1] & (1 << MAX(0, second_ref_frame
)))
3757 // Test best rd so far against threshold for trying this mode.
3758 if (rd_less_than_thresh(best_rd
,
3759 rd_opt
->threshes
[segment_id
][bsize
][ref_index
],
3760 tile_data
->thresh_freq_fact
[bsize
][ref_index
]))
3763 comp_pred
= second_ref_frame
> INTRA_FRAME
;
3765 if (!cpi
->allow_comp_inter_inter
)
3767 if (!(cpi
->ref_frame_flags
& flag_list
[second_ref_frame
]))
3769 // Do not allow compound prediction if the segment level reference frame
3770 // feature is in use as in this case there can only be one reference.
3771 if (vp9_segfeature_active(seg
, segment_id
, SEG_LVL_REF_FRAME
))
3774 if ((sf
->mode_search_skip_flags
& FLAG_SKIP_COMP_BESTINTRA
) &&
3775 best_mbmode
.ref_frame
[0] == INTRA_FRAME
)
3779 // TODO(jingning, jkoleszar): scaling reference frame not supported for
3781 if (ref_frame
> INTRA_FRAME
&&
3782 vp9_is_scaled(&cm
->frame_refs
[ref_frame
- 1].sf
))
3785 if (second_ref_frame
> INTRA_FRAME
&&
3786 vp9_is_scaled(&cm
->frame_refs
[second_ref_frame
- 1].sf
))
3790 mode_excluded
= cm
->reference_mode
== SINGLE_REFERENCE
;
3791 else if (ref_frame
!= INTRA_FRAME
)
3792 mode_excluded
= cm
->reference_mode
== COMPOUND_REFERENCE
;
3794 // If the segment reference frame feature is enabled....
3795 // then do nothing if the current ref frame is not allowed..
3796 if (vp9_segfeature_active(seg
, segment_id
, SEG_LVL_REF_FRAME
) &&
3797 vp9_get_segdata(seg
, segment_id
, SEG_LVL_REF_FRAME
) != (int)ref_frame
) {
3799 // Disable this drop out case if the ref frame
3800 // segment level feature is enabled for this segment. This is to
3801 // prevent the possibility that we end up unable to pick any mode.
3802 } else if (!vp9_segfeature_active(seg
, segment_id
, SEG_LVL_REF_FRAME
)) {
3803 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
3804 // unless ARNR filtering is enabled in which case we want
3805 // an unfiltered alternative. We allow near/nearest as well
3806 // because they may result in zero-zero MVs but be cheaper.
3807 if (cpi
->rc
.is_src_frame_alt_ref
&& (cpi
->oxcf
.arnr_max_frames
== 0))
3811 mbmi
->tx_size
= TX_4X4
;
3812 mbmi
->uv_mode
= DC_PRED
;
3813 mbmi
->ref_frame
[0] = ref_frame
;
3814 mbmi
->ref_frame
[1] = second_ref_frame
;
3815 // Evaluate all sub-pel filters irrespective of whether we can use
3816 // them for this frame.
3817 mbmi
->interp_filter
= cm
->interp_filter
== SWITCHABLE
? EIGHTTAP
3818 : cm
->interp_filter
;
3820 set_ref_ptrs(cm
, xd
, ref_frame
, second_ref_frame
);
3822 // Select prediction reference frames.
3823 for (i
= 0; i
< MAX_MB_PLANE
; i
++) {
3824 xd
->plane
[i
].pre
[0] = yv12_mb
[ref_frame
][i
];
3826 xd
->plane
[i
].pre
[1] = yv12_mb
[second_ref_frame
][i
];
3829 if (ref_frame
== INTRA_FRAME
) {
3831 if (rd_pick_intra_sub_8x8_y_mode(cpi
, x
, &rate
, &rate_y
,
3832 &distortion_y
, best_rd
) >= best_rd
)
3835 rate2
+= intra_cost_penalty
;
3836 distortion2
+= distortion_y
;
3838 if (rate_uv_intra
== INT_MAX
) {
3839 choose_intra_uv_mode(cpi
, x
, ctx
, bsize
, TX_4X4
,
3845 rate2
+= rate_uv_intra
;
3846 rate_uv
= rate_uv_tokenonly
;
3847 distortion2
+= dist_uv
;
3848 distortion_uv
= dist_uv
;
3849 mbmi
->uv_mode
= mode_uv
;
3853 int64_t this_rd_thresh
;
3854 int64_t tmp_rd
, tmp_best_rd
= INT64_MAX
, tmp_best_rdu
= INT64_MAX
;
3855 int tmp_best_rate
= INT_MAX
, tmp_best_ratey
= INT_MAX
;
3856 int64_t tmp_best_distortion
= INT_MAX
, tmp_best_sse
, uv_sse
;
3857 int tmp_best_skippable
= 0;
3858 int switchable_filter_index
;
3859 int_mv
*second_ref
= comp_pred
?
3860 &mbmi
->ref_mvs
[second_ref_frame
][0] : NULL
;
3861 b_mode_info tmp_best_bmodes
[16];
3862 MB_MODE_INFO tmp_best_mbmode
;
3863 BEST_SEG_INFO bsi
[SWITCHABLE_FILTERS
];
3864 int pred_exists
= 0;
3867 this_rd_thresh
= (ref_frame
== LAST_FRAME
) ?
3868 rd_opt
->threshes
[segment_id
][bsize
][THR_LAST
] :
3869 rd_opt
->threshes
[segment_id
][bsize
][THR_ALTR
];
3870 this_rd_thresh
= (ref_frame
== GOLDEN_FRAME
) ?
3871 rd_opt
->threshes
[segment_id
][bsize
][THR_GOLD
] : this_rd_thresh
;
3872 for (i
= 0; i
< SWITCHABLE_FILTER_CONTEXTS
; ++i
)
3873 filter_cache
[i
] = INT64_MAX
;
3875 if (cm
->interp_filter
!= BILINEAR
) {
3876 tmp_best_filter
= EIGHTTAP
;
3877 if (x
->source_variance
< sf
->disable_filter_search_var_thresh
) {
3878 tmp_best_filter
= EIGHTTAP
;
3879 } else if (sf
->adaptive_pred_interp_filter
== 1 &&
3880 ctx
->pred_interp_filter
< SWITCHABLE
) {
3881 tmp_best_filter
= ctx
->pred_interp_filter
;
3882 } else if (sf
->adaptive_pred_interp_filter
== 2) {
3883 tmp_best_filter
= ctx
->pred_interp_filter
< SWITCHABLE
?
3884 ctx
->pred_interp_filter
: 0;
3886 for (switchable_filter_index
= 0;
3887 switchable_filter_index
< SWITCHABLE_FILTERS
;
3888 ++switchable_filter_index
) {
3891 mbmi
->interp_filter
= switchable_filter_index
;
3892 tmp_rd
= rd_pick_best_sub8x8_mode(cpi
, x
, tile_info
,
3893 &mbmi
->ref_mvs
[ref_frame
][0],
3894 second_ref
, best_yrd
, &rate
,
3895 &rate_y
, &distortion
,
3896 &skippable
, &total_sse
,
3897 (int) this_rd_thresh
, seg_mvs
,
3898 bsi
, switchable_filter_index
,
3901 if (tmp_rd
== INT64_MAX
)
3903 rs
= vp9_get_switchable_rate(cpi
, xd
);
3904 rs_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rs
, 0);
3905 filter_cache
[switchable_filter_index
] = tmp_rd
;
3906 filter_cache
[SWITCHABLE_FILTERS
] =
3907 MIN(filter_cache
[SWITCHABLE_FILTERS
],
3909 if (cm
->interp_filter
== SWITCHABLE
)
3912 mask_filter
= MAX(mask_filter
, tmp_rd
);
3914 newbest
= (tmp_rd
< tmp_best_rd
);
3916 tmp_best_filter
= mbmi
->interp_filter
;
3917 tmp_best_rd
= tmp_rd
;
3919 if ((newbest
&& cm
->interp_filter
== SWITCHABLE
) ||
3920 (mbmi
->interp_filter
== cm
->interp_filter
&&
3921 cm
->interp_filter
!= SWITCHABLE
)) {
3922 tmp_best_rdu
= tmp_rd
;
3923 tmp_best_rate
= rate
;
3924 tmp_best_ratey
= rate_y
;
3925 tmp_best_distortion
= distortion
;
3926 tmp_best_sse
= total_sse
;
3927 tmp_best_skippable
= skippable
;
3928 tmp_best_mbmode
= *mbmi
;
3929 for (i
= 0; i
< 4; i
++) {
3930 tmp_best_bmodes
[i
] = xd
->mi
[0].src_mi
->bmi
[i
];
3931 x
->zcoeff_blk
[TX_4X4
][i
] = !x
->plane
[0].eobs
[i
];
3934 if (switchable_filter_index
== 0 &&
3935 sf
->use_rd_breakout
&&
3936 best_rd
< INT64_MAX
) {
3937 if (tmp_best_rdu
/ 2 > best_rd
) {
3938 // skip searching the other filters if the first is
3939 // already substantially larger than the best so far
3940 tmp_best_filter
= mbmi
->interp_filter
;
3941 tmp_best_rdu
= INT64_MAX
;
3946 } // switchable_filter_index loop
3950 if (tmp_best_rdu
== INT64_MAX
&& pred_exists
)
3953 mbmi
->interp_filter
= (cm
->interp_filter
== SWITCHABLE
?
3954 tmp_best_filter
: cm
->interp_filter
);
3956 // Handles the special case when a filter that is not in the
3957 // switchable list (bilinear, 6-tap) is indicated at the frame level
3958 tmp_rd
= rd_pick_best_sub8x8_mode(cpi
, x
, tile_info
,
3959 &mbmi
->ref_mvs
[ref_frame
][0],
3960 second_ref
, best_yrd
, &rate
, &rate_y
,
3961 &distortion
, &skippable
, &total_sse
,
3962 (int) this_rd_thresh
, seg_mvs
, bsi
, 0,
3964 if (tmp_rd
== INT64_MAX
)
3967 total_sse
= tmp_best_sse
;
3968 rate
= tmp_best_rate
;
3969 rate_y
= tmp_best_ratey
;
3970 distortion
= tmp_best_distortion
;
3971 skippable
= tmp_best_skippable
;
3972 *mbmi
= tmp_best_mbmode
;
3973 for (i
= 0; i
< 4; i
++)
3974 xd
->mi
[0].src_mi
->bmi
[i
] = tmp_best_bmodes
[i
];
3978 distortion2
+= distortion
;
3980 if (cm
->interp_filter
== SWITCHABLE
)
3981 rate2
+= vp9_get_switchable_rate(cpi
, xd
);
3984 mode_excluded
= comp_pred
? cm
->reference_mode
== SINGLE_REFERENCE
3985 : cm
->reference_mode
== COMPOUND_REFERENCE
;
3987 compmode_cost
= vp9_cost_bit(comp_mode_p
, comp_pred
);
3989 tmp_best_rdu
= best_rd
-
3990 MIN(RDCOST(x
->rdmult
, x
->rddiv
, rate2
, distortion2
),
3991 RDCOST(x
->rdmult
, x
->rddiv
, 0, total_sse
));
3993 if (tmp_best_rdu
> 0) {
3994 // If even the 'Y' rd value of split is higher than best so far
3995 // then dont bother looking at UV
3996 vp9_build_inter_predictors_sbuv(&x
->e_mbd
, mi_row
, mi_col
,
3998 vpx_memset(x
->skip_txfm
, 0, sizeof(x
->skip_txfm
));
3999 if (!super_block_uvrd(cpi
, x
, &rate_uv
, &distortion_uv
, &uv_skippable
,
4000 &uv_sse
, BLOCK_8X8
, tmp_best_rdu
))
4004 distortion2
+= distortion_uv
;
4005 skippable
= skippable
&& uv_skippable
;
4006 total_sse
+= uv_sse
;
4010 if (cm
->reference_mode
== REFERENCE_MODE_SELECT
)
4011 rate2
+= compmode_cost
;
4013 // Estimate the reference frame signaling cost and add it
4014 // to the rolling cost variable.
4015 if (second_ref_frame
> INTRA_FRAME
) {
4016 rate2
+= ref_costs_comp
[ref_frame
];
4018 rate2
+= ref_costs_single
[ref_frame
];
4021 if (!disable_skip
) {
4022 // Skip is never coded at the segment level for sub8x8 blocks and instead
4023 // always coded in the bitstream at the mode info level.
4025 if (ref_frame
!= INTRA_FRAME
&& !xd
->lossless
) {
4026 if (RDCOST(x
->rdmult
, x
->rddiv
, rate_y
+ rate_uv
, distortion2
) <
4027 RDCOST(x
->rdmult
, x
->rddiv
, 0, total_sse
)) {
4028 // Add in the cost of the no skip flag.
4029 rate2
+= vp9_cost_bit(vp9_get_skip_prob(cm
, xd
), 0);
4031 // FIXME(rbultje) make this work for splitmv also
4032 rate2
+= vp9_cost_bit(vp9_get_skip_prob(cm
, xd
), 1);
4033 distortion2
= total_sse
;
4034 assert(total_sse
>= 0);
4035 rate2
-= (rate_y
+ rate_uv
);
4041 // Add in the cost of the no skip flag.
4042 rate2
+= vp9_cost_bit(vp9_get_skip_prob(cm
, xd
), 0);
4045 // Calculate the final RD estimate for this mode.
4046 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate2
, distortion2
);
4049 if (!disable_skip
&& ref_frame
== INTRA_FRAME
) {
4050 for (i
= 0; i
< REFERENCE_MODES
; ++i
)
4051 best_pred_rd
[i
] = MIN(best_pred_rd
[i
], this_rd
);
4052 for (i
= 0; i
< SWITCHABLE_FILTER_CONTEXTS
; i
++)
4053 best_filter_rd
[i
] = MIN(best_filter_rd
[i
], this_rd
);
4056 // Did this mode help.. i.e. is it the new best mode
4057 if (this_rd
< best_rd
|| x
->skip
) {
4058 if (!mode_excluded
) {
4059 int max_plane
= MAX_MB_PLANE
;
4060 // Note index of best mode so far
4061 best_ref_index
= ref_index
;
4063 if (ref_frame
== INTRA_FRAME
) {
4064 /* required for left and above block mv */
4065 mbmi
->mv
[0].as_int
= 0;
4069 rd_cost
->rate
= rate2
;
4070 rd_cost
->dist
= distortion2
;
4071 rd_cost
->rdcost
= this_rd
;
4073 best_yrd
= best_rd
-
4074 RDCOST(x
->rdmult
, x
->rddiv
, rate_uv
, distortion_uv
);
4075 best_mbmode
= *mbmi
;
4076 best_skip2
= this_skip2
;
4077 if (!x
->select_tx_size
)
4078 swap_block_ptr(x
, ctx
, 1, 0, 0, max_plane
);
4079 vpx_memcpy(ctx
->zcoeff_blk
, x
->zcoeff_blk
[TX_4X4
],
4080 sizeof(uint8_t) * ctx
->num_4x4_blk
);
4082 for (i
= 0; i
< 4; i
++)
4083 best_bmodes
[i
] = xd
->mi
[0].src_mi
->bmi
[i
];
4085 // TODO(debargha): enhance this test with a better distortion prediction
4086 // based on qp, activity mask and history
4087 if ((sf
->mode_search_skip_flags
& FLAG_EARLY_TERMINATE
) &&
4088 (ref_index
> MIN_EARLY_TERM_INDEX
)) {
4089 int qstep
= xd
->plane
[0].dequant
[1];
4090 // TODO(debargha): Enhance this by specializing for each mode_index
4092 #if CONFIG_VP9_HIGHBITDEPTH
4093 if (xd
->cur_buf
->flags
& YV12_FLAG_HIGHBITDEPTH
) {
4094 qstep
>>= (xd
->bd
- 8);
4096 #endif // CONFIG_VP9_HIGHBITDEPTH
4097 if (x
->source_variance
< UINT_MAX
) {
4098 const int var_adjust
= (x
->source_variance
< 16);
4099 scale
-= var_adjust
;
4101 if (ref_frame
> INTRA_FRAME
&&
4102 distortion2
* scale
< qstep
* qstep
) {
4109 /* keep record of best compound/single-only prediction */
4110 if (!disable_skip
&& ref_frame
!= INTRA_FRAME
) {
4111 int64_t single_rd
, hybrid_rd
, single_rate
, hybrid_rate
;
4113 if (cm
->reference_mode
== REFERENCE_MODE_SELECT
) {
4114 single_rate
= rate2
- compmode_cost
;
4115 hybrid_rate
= rate2
;
4117 single_rate
= rate2
;
4118 hybrid_rate
= rate2
+ compmode_cost
;
4121 single_rd
= RDCOST(x
->rdmult
, x
->rddiv
, single_rate
, distortion2
);
4122 hybrid_rd
= RDCOST(x
->rdmult
, x
->rddiv
, hybrid_rate
, distortion2
);
4124 if (!comp_pred
&& single_rd
< best_pred_rd
[SINGLE_REFERENCE
])
4125 best_pred_rd
[SINGLE_REFERENCE
] = single_rd
;
4126 else if (comp_pred
&& single_rd
< best_pred_rd
[COMPOUND_REFERENCE
])
4127 best_pred_rd
[COMPOUND_REFERENCE
] = single_rd
;
4129 if (hybrid_rd
< best_pred_rd
[REFERENCE_MODE_SELECT
])
4130 best_pred_rd
[REFERENCE_MODE_SELECT
] = hybrid_rd
;
4133 /* keep record of best filter type */
4134 if (!mode_excluded
&& !disable_skip
&& ref_frame
!= INTRA_FRAME
&&
4135 cm
->interp_filter
!= BILINEAR
) {
4136 int64_t ref
= filter_cache
[cm
->interp_filter
== SWITCHABLE
?
4137 SWITCHABLE_FILTERS
: cm
->interp_filter
];
4139 for (i
= 0; i
< SWITCHABLE_FILTER_CONTEXTS
; i
++) {
4140 if (ref
== INT64_MAX
)
4142 else if (filter_cache
[i
] == INT64_MAX
)
4143 // when early termination is triggered, the encoder does not have
4144 // access to the rate-distortion cost. it only knows that the cost
4145 // should be above the maximum valid value. hence it takes the known
4146 // maximum plus an arbitrary constant as the rate-distortion cost.
4147 adj_rd
= mask_filter
- ref
+ 10;
4149 adj_rd
= filter_cache
[i
] - ref
;
4152 best_filter_rd
[i
] = MIN(best_filter_rd
[i
], adj_rd
);
4159 if (x
->skip
&& !comp_pred
)
4163 if (best_rd
>= best_rd_so_far
) {
4164 rd_cost
->rate
= INT_MAX
;
4165 rd_cost
->rdcost
= INT64_MAX
;
4169 // If we used an estimate for the uv intra rd in the loop above...
4170 if (sf
->use_uv_intra_rd_estimate
) {
4171 // Do Intra UV best rd mode selection if best mode choice above was intra.
4172 if (best_mbmode
.ref_frame
[0] == INTRA_FRAME
) {
4173 *mbmi
= best_mbmode
;
4174 rd_pick_intra_sbuv_mode(cpi
, x
, ctx
, &rate_uv_intra
,
4182 if (best_rd
== INT64_MAX
) {
4183 rd_cost
->rate
= INT_MAX
;
4184 rd_cost
->dist
= INT64_MAX
;
4185 rd_cost
->rdcost
= INT64_MAX
;
4189 assert((cm
->interp_filter
== SWITCHABLE
) ||
4190 (cm
->interp_filter
== best_mbmode
.interp_filter
) ||
4191 !is_inter_block(&best_mbmode
));
4193 vp9_update_rd_thresh_fact(tile_data
->thresh_freq_fact
,
4194 sf
->adaptive_rd_thresh
, bsize
, best_ref_index
);
4197 *mbmi
= best_mbmode
;
4198 x
->skip
|= best_skip2
;
4199 if (!is_inter_block(&best_mbmode
)) {
4200 for (i
= 0; i
< 4; i
++)
4201 xd
->mi
[0].src_mi
->bmi
[i
].as_mode
= best_bmodes
[i
].as_mode
;
4203 for (i
= 0; i
< 4; ++i
)
4204 vpx_memcpy(&xd
->mi
[0].src_mi
->bmi
[i
], &best_bmodes
[i
],
4205 sizeof(b_mode_info
));
4207 mbmi
->mv
[0].as_int
= xd
->mi
[0].src_mi
->bmi
[3].as_mv
[0].as_int
;
4208 mbmi
->mv
[1].as_int
= xd
->mi
[0].src_mi
->bmi
[3].as_mv
[1].as_int
;
4211 for (i
= 0; i
< REFERENCE_MODES
; ++i
) {
4212 if (best_pred_rd
[i
] == INT64_MAX
)
4213 best_pred_diff
[i
] = INT_MIN
;
4215 best_pred_diff
[i
] = best_rd
- best_pred_rd
[i
];
4219 for (i
= 0; i
< SWITCHABLE_FILTER_CONTEXTS
; i
++) {
4220 if (best_filter_rd
[i
] == INT64_MAX
)
4221 best_filter_diff
[i
] = 0;
4223 best_filter_diff
[i
] = best_rd
- best_filter_rd
[i
];
4225 if (cm
->interp_filter
== SWITCHABLE
)
4226 assert(best_filter_diff
[SWITCHABLE_FILTERS
] == 0);
4228 vp9_zero(best_filter_diff
);
4231 store_coding_context(x
, ctx
, best_ref_index
,
4232 best_pred_diff
, best_tx_diff
, best_filter_diff
, 0);