2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "./vpx_config.h"
13 #include "vp9/encoder/vp9_encodeframe.h"
14 #include "vp9/encoder/vp9_encodemb.h"
15 #include "vp9/encoder/vp9_encodemv.h"
16 #include "vp9/common/vp9_common.h"
17 #include "vp9/encoder/vp9_onyx_int.h"
18 #include "vp9/common/vp9_extend.h"
19 #include "vp9/common/vp9_entropy.h"
20 #include "vp9/common/vp9_entropymode.h"
21 #include "vp9/common/vp9_quant_common.h"
22 #include "vp9/encoder/vp9_segmentation.h"
23 #include "vp9/encoder/vp9_encodeintra.h"
24 #include "vp9/common/vp9_reconinter.h"
25 #include "vp9/common/vp9_invtrans.h"
26 #include "vp9/encoder/vp9_rdopt.h"
27 #include "vp9/common/vp9_findnearmv.h"
28 #include "vp9/common/vp9_reconintra.h"
29 #include "vp9/common/vp9_seg_common.h"
30 #include "vp9/common/vp9_tile_common.h"
31 #include "vp9/encoder/vp9_tokenize.h"
32 #include "./vp9_rtcd.h"
36 #include "vpx_ports/vpx_timer.h"
37 #include "vp9/common/vp9_pred_common.h"
38 #include "vp9/common/vp9_mvref_common.h"
40 #define DBG_PRNT_SEGMAP 0
47 void vp9_select_interp_filter_type(VP9_COMP
*cpi
);
49 static void encode_superblock(VP9_COMP
*cpi
, TOKENEXTRA
**t
,
50 int output_enabled
, int mi_row
, int mi_col
,
51 BLOCK_SIZE_TYPE bsize
);
53 static void adjust_act_zbin(VP9_COMP
*cpi
, MACROBLOCK
*x
);
55 /* activity_avg must be positive, or flat regions could get a zero weight
56 * (infinite lambda), which confounds analysis.
57 * This also avoids the need for divide by zero checks in
58 * vp9_activity_masking().
60 #define VP9_ACTIVITY_AVG_MIN (64)
62 /* This is used as a reference when computing the source variance for the
63 * purposes of activity masking.
64 * Eventually this should be replaced by custom no-reference routines,
65 * which will be faster.
67 static const uint8_t VP9_VAR_OFFS
[16] = {
68 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
72 // Original activity measure from Tim T's code.
73 static unsigned int tt_activity_measure(VP9_COMP
*cpi
, MACROBLOCK
*x
) {
76 /* TODO: This could also be done over smaller areas (8x8), but that would
77 * require extensive changes elsewhere, as lambda is assumed to be fixed
78 * over an entire MB in most of the code.
79 * Another option is to compute four 8x8 variances, and pick a single
80 * lambda using a non-linear combination (e.g., the smallest, or second
83 act
= vp9_variance16x16(x
->plane
[0].src
.buf
, x
->plane
[0].src
.stride
,
84 VP9_VAR_OFFS
, 0, &sse
);
87 /* If the region is flat, lower the activity some more. */
89 act
= act
< 5 << 12 ? act
: 5 << 12;
94 // Stub for alternative experimental activity measures.
95 static unsigned int alt_activity_measure(VP9_COMP
*cpi
,
96 MACROBLOCK
*x
, int use_dc_pred
) {
97 return vp9_encode_intra(cpi
, x
, use_dc_pred
);
101 // Measure the activity of the current macroblock
102 // What we measure here is TBD so abstracted to this function
103 #define ALT_ACT_MEASURE 1
104 static unsigned int mb_activity_measure(VP9_COMP
*cpi
, MACROBLOCK
*x
,
105 int mb_row
, int mb_col
) {
106 unsigned int mb_activity
;
108 if (ALT_ACT_MEASURE
) {
109 int use_dc_pred
= (mb_col
|| mb_row
) && (!mb_col
|| !mb_row
);
111 // Or use and alternative.
112 mb_activity
= alt_activity_measure(cpi
, x
, use_dc_pred
);
114 // Original activity measure from Tim T's code.
115 mb_activity
= tt_activity_measure(cpi
, x
);
118 if (mb_activity
< VP9_ACTIVITY_AVG_MIN
)
119 mb_activity
= VP9_ACTIVITY_AVG_MIN
;
124 // Calculate an "average" mb activity value for the frame
126 static void calc_av_activity(VP9_COMP
*cpi
, int64_t activity_sum
) {
128 // Find median: Simple n^2 algorithm for experimentation
132 unsigned int *sortlist
;
135 // Create a list to sort to
136 CHECK_MEM_ERROR(sortlist
,
137 vpx_calloc(sizeof(unsigned int),
140 // Copy map to sort list
141 vpx_memcpy(sortlist
, cpi
->mb_activity_map
,
142 sizeof(unsigned int) * cpi
->common
.MBs
);
145 // Ripple each value down to its correct position
146 for (i
= 1; i
< cpi
->common
.MBs
; i
++) {
147 for (j
= i
; j
> 0; j
--) {
148 if (sortlist
[j
] < sortlist
[j
- 1]) {
150 tmp
= sortlist
[j
- 1];
151 sortlist
[j
- 1] = sortlist
[j
];
158 // Even number MBs so estimate median as mean of two either side.
159 median
= (1 + sortlist
[cpi
->common
.MBs
>> 1] +
160 sortlist
[(cpi
->common
.MBs
>> 1) + 1]) >> 1;
162 cpi
->activity_avg
= median
;
167 // Simple mean for now
168 cpi
->activity_avg
= (unsigned int)(activity_sum
/ cpi
->common
.MBs
);
171 if (cpi
->activity_avg
< VP9_ACTIVITY_AVG_MIN
)
172 cpi
->activity_avg
= VP9_ACTIVITY_AVG_MIN
;
174 // Experimental code: return fixed value normalized for several clips
176 cpi
->activity_avg
= 100000;
179 #define USE_ACT_INDEX 0
180 #define OUTPUT_NORM_ACT_STATS 0
183 // Calculate an activity index for each mb
184 static void calc_activity_index(VP9_COMP
*cpi
, MACROBLOCK
*x
) {
185 VP9_COMMON
*const cm
= &cpi
->common
;
192 #if OUTPUT_NORM_ACT_STATS
193 FILE *f
= fopen("norm_act.stt", "a");
194 fprintf(f
, "\n%12d\n", cpi
->activity_avg
);
197 // Reset pointers to start of activity map
198 x
->mb_activity_ptr
= cpi
->mb_activity_map
;
200 // Calculate normalized mb activity number.
201 for (mb_row
= 0; mb_row
< cm
->mb_rows
; mb_row
++) {
202 // for each macroblock col in image
203 for (mb_col
= 0; mb_col
< cm
->mb_cols
; mb_col
++) {
204 // Read activity from the map
205 act
= *(x
->mb_activity_ptr
);
207 // Calculate a normalized activity number
208 a
= act
+ 4 * cpi
->activity_avg
;
209 b
= 4 * act
+ cpi
->activity_avg
;
212 *(x
->activity_ptr
) = (int)((b
+ (a
>> 1)) / a
) - 1;
214 *(x
->activity_ptr
) = 1 - (int)((a
+ (b
>> 1)) / b
);
216 #if OUTPUT_NORM_ACT_STATS
217 fprintf(f
, " %6d", *(x
->mb_activity_ptr
));
219 // Increment activity map pointers
220 x
->mb_activity_ptr
++;
223 #if OUTPUT_NORM_ACT_STATS
229 #if OUTPUT_NORM_ACT_STATS
236 // Loop through all MBs. Note activity of each, average activity and
237 // calculate a normalized activity for each
238 static void build_activity_map(VP9_COMP
*cpi
) {
239 MACROBLOCK
*const x
= &cpi
->mb
;
240 MACROBLOCKD
*xd
= &x
->e_mbd
;
241 VP9_COMMON
*const cm
= &cpi
->common
;
244 YV12_BUFFER_CONFIG
*new_yv12
= &cm
->yv12_fb
[cm
->new_fb_idx
];
246 int recon_y_stride
= new_yv12
->y_stride
;
250 unsigned int mb_activity
;
251 int64_t activity_sum
= 0;
253 x
->mb_activity_ptr
= cpi
->mb_activity_map
;
255 // for each macroblock row in image
256 for (mb_row
= 0; mb_row
< cm
->mb_rows
; mb_row
++) {
258 // reset above block coeffs
259 xd
->up_available
= (mb_row
!= 0);
260 recon_yoffset
= (mb_row
* recon_y_stride
* 16);
262 // for each macroblock col in image
263 for (mb_col
= 0; mb_col
< cm
->mb_cols
; mb_col
++) {
265 xd
->plane
[0].dst
.buf
= new_yv12
->y_buffer
+ recon_yoffset
;
266 xd
->left_available
= (mb_col
!= 0);
271 mb_activity
= mb_activity_measure(cpi
, x
, mb_row
, mb_col
);
274 activity_sum
+= mb_activity
;
276 // Store MB level activity details.
277 *x
->mb_activity_ptr
= mb_activity
;
279 // Increment activity map pointer
280 x
->mb_activity_ptr
++;
282 // adjust to the next column of source macroblocks
283 x
->plane
[0].src
.buf
+= 16;
287 // adjust to the next row of mbs
288 x
->plane
[0].src
.buf
+= 16 * x
->plane
[0].src
.stride
- 16 * cm
->mb_cols
;
291 // Calculate an "average" MB activity
292 calc_av_activity(cpi
, activity_sum
);
295 // Calculate an activity index number of each mb
296 calc_activity_index(cpi
, x
);
301 // Macroblock activity masking
302 void vp9_activity_masking(VP9_COMP
*cpi
, MACROBLOCK
*x
) {
304 x
->rdmult
+= *(x
->mb_activity_ptr
) * (x
->rdmult
>> 2);
305 x
->errorperbit
= x
->rdmult
* 100 / (110 * x
->rddiv
);
306 x
->errorperbit
+= (x
->errorperbit
== 0);
310 int64_t act
= *(x
->mb_activity_ptr
);
312 // Apply the masking to the RD multiplier.
313 a
= act
+ (2 * cpi
->activity_avg
);
314 b
= (2 * act
) + cpi
->activity_avg
;
316 x
->rdmult
= (unsigned int)(((int64_t)x
->rdmult
* b
+ (a
>> 1)) / a
);
317 x
->errorperbit
= x
->rdmult
* 100 / (110 * x
->rddiv
);
318 x
->errorperbit
+= (x
->errorperbit
== 0);
321 // Activity based Zbin adjustment
322 adjust_act_zbin(cpi
, x
);
325 static void update_state(VP9_COMP
*cpi
,
326 PICK_MODE_CONTEXT
*ctx
,
327 BLOCK_SIZE_TYPE bsize
,
328 int output_enabled
) {
330 VP9_COMMON
*const cm
= &cpi
->common
;
331 MACROBLOCK
*const x
= &cpi
->mb
;
332 MACROBLOCKD
*const xd
= &x
->e_mbd
;
333 MODE_INFO
*mi
= &ctx
->mic
;
334 MB_MODE_INFO
*const mbmi
= &xd
->mode_info_context
->mbmi
;
335 int mb_mode
= mi
->mbmi
.mode
;
336 int mb_mode_index
= ctx
->best_mode_index
;
337 const int mis
= cpi
->common
.mode_info_stride
;
338 const int bh
= 1 << mi_height_log2(bsize
), bw
= 1 << mi_width_log2(bsize
);
341 assert(mb_mode
< MB_MODE_COUNT
);
342 assert(mb_mode_index
< MAX_MODES
);
343 assert(mi
->mbmi
.ref_frame
< MAX_REF_FRAMES
);
346 assert(mi
->mbmi
.sb_type
== bsize
);
347 // Restore the coding context of the MB to that that was in place
348 // when the mode was picked for it
349 for (y
= 0; y
< bh
; y
++) {
350 for (x_idx
= 0; x_idx
< bw
; x_idx
++) {
351 if ((xd
->mb_to_right_edge
>> (3 + LOG2_MI_SIZE
)) + bw
> x_idx
&&
352 (xd
->mb_to_bottom_edge
>> (3 + LOG2_MI_SIZE
)) + bh
> y
) {
353 MODE_INFO
*mi_addr
= xd
->mode_info_context
+ x_idx
+ y
* mis
;
355 vpx_memcpy(mi_addr
, mi
, sizeof(MODE_INFO
));
359 if (bsize
< BLOCK_SIZE_SB32X32
) {
360 if (bsize
< BLOCK_SIZE_MB16X16
)
361 ctx
->txfm_rd_diff
[ALLOW_16X16
] = ctx
->txfm_rd_diff
[ALLOW_8X8
];
362 ctx
->txfm_rd_diff
[ALLOW_32X32
] = ctx
->txfm_rd_diff
[ALLOW_16X16
];
365 if (mb_mode
== SPLITMV
) {
366 vpx_memcpy(x
->partition_info
, &ctx
->partition_info
,
367 sizeof(PARTITION_INFO
));
370 x
->partition_info
->bmi
[3].mv
.as_int
;
372 x
->partition_info
->bmi
[3].second_mv
.as_int
;
380 int segment_id
= mbmi
->segment_id
, ref_pred_flag
;
381 if (!vp9_segfeature_active(xd
, segment_id
, SEG_LVL_SKIP
)) {
382 for (i
= 0; i
< NB_TXFM_MODES
; i
++) {
383 cpi
->rd_tx_select_diff
[i
] += ctx
->txfm_rd_diff
[i
];
387 // Did the chosen reference frame match its predicted value.
388 ref_pred_flag
= ((xd
->mode_info_context
->mbmi
.ref_frame
==
389 vp9_get_pred_ref(cm
, xd
)));
390 vp9_set_pred_flag(xd
, PRED_REF
, ref_pred_flag
);
391 if (!xd
->segmentation_enabled
||
392 !vp9_segfeature_active(xd
, segment_id
, SEG_LVL_REF_FRAME
) ||
393 vp9_check_segref(xd
, segment_id
, INTRA_FRAME
) +
394 vp9_check_segref(xd
, segment_id
, LAST_FRAME
) +
395 vp9_check_segref(xd
, segment_id
, GOLDEN_FRAME
) +
396 vp9_check_segref(xd
, segment_id
, ALTREF_FRAME
) > 1) {
397 // Get the prediction context and status
398 int pred_context
= vp9_get_pred_context(cm
, xd
, PRED_REF
);
400 // Count prediction success
401 cpi
->ref_pred_count
[pred_context
][ref_pred_flag
]++;
405 if (cpi
->common
.frame_type
== KEY_FRAME
) {
406 // Restore the coding modes to that held in the coding context
407 // if (mb_mode == I4X4_PRED)
408 // for (i = 0; i < 16; i++)
410 // xd->block[i].bmi.as_mode =
411 // xd->mode_info_context->bmi[i].as_mode;
412 // assert(xd->mode_info_context->bmi[i].as_mode < MB_MODE_COUNT);
414 #if CONFIG_INTERNAL_STATS
415 static const int kf_mode_index
[] = {
417 THR_V_PRED
/*V_PRED*/,
418 THR_H_PRED
/*H_PRED*/,
419 THR_D45_PRED
/*D45_PRED*/,
420 THR_D135_PRED
/*D135_PRED*/,
421 THR_D117_PRED
/*D117_PRED*/,
422 THR_D153_PRED
/*D153_PRED*/,
423 THR_D27_PRED
/*D27_PRED*/,
424 THR_D63_PRED
/*D63_PRED*/,
426 THR_B_PRED
/*I4X4_PRED*/,
428 cpi
->mode_chosen_counts
[kf_mode_index
[mb_mode
]]++;
432 // Reduce the activation RD thresholds for the best choice mode
433 if ((cpi->rd_baseline_thresh[mb_mode_index] > 0) &&
434 (cpi->rd_baseline_thresh[mb_mode_index] < (INT_MAX >> 2)))
436 int best_adjustment = (cpi->rd_thresh_mult[mb_mode_index] >> 2);
438 cpi->rd_thresh_mult[mb_mode_index] =
439 (cpi->rd_thresh_mult[mb_mode_index]
440 >= (MIN_THRESHMULT + best_adjustment)) ?
441 cpi->rd_thresh_mult[mb_mode_index] - best_adjustment :
443 cpi->rd_threshes[mb_mode_index] =
444 (cpi->rd_baseline_thresh[mb_mode_index] >> 7)
445 * cpi->rd_thresh_mult[mb_mode_index];
449 // Note how often each mode chosen as best
450 cpi
->mode_chosen_counts
[mb_mode_index
]++;
451 if (mbmi
->mode
== SPLITMV
|| mbmi
->mode
== NEWMV
) {
452 int_mv best_mv
, best_second_mv
;
453 MV_REFERENCE_FRAME rf
= mbmi
->ref_frame
;
454 best_mv
.as_int
= ctx
->best_ref_mv
.as_int
;
455 best_second_mv
.as_int
= ctx
->second_best_ref_mv
.as_int
;
456 if (mbmi
->mode
== NEWMV
) {
457 best_mv
.as_int
= mbmi
->ref_mvs
[rf
][0].as_int
;
458 best_second_mv
.as_int
= mbmi
->ref_mvs
[mbmi
->second_ref_frame
][0].as_int
;
460 mbmi
->best_mv
.as_int
= best_mv
.as_int
;
461 mbmi
->best_second_mv
.as_int
= best_second_mv
.as_int
;
462 vp9_update_nmv_count(cpi
, x
, &best_mv
, &best_second_mv
);
465 if (bsize
> BLOCK_SIZE_SB8X8
&& mbmi
->mode
== NEWMV
) {
467 for (j
= 0; j
< bh
; ++j
)
468 for (i
= 0; i
< bw
; ++i
)
469 xd
->mode_info_context
[mis
* j
+ i
].mbmi
= *mbmi
;
472 if (cpi
->common
.mcomp_filter_type
== SWITCHABLE
&&
473 is_inter_mode(mbmi
->mode
)) {
474 ++cpi
->switchable_interp_count
475 [vp9_get_pred_context(&cpi
->common
, xd
, PRED_SWITCHABLE_INTERP
)]
476 [vp9_switchable_interp_map
[mbmi
->interp_filter
]];
479 cpi
->rd_comp_pred_diff
[SINGLE_PREDICTION_ONLY
] += ctx
->single_pred_diff
;
480 cpi
->rd_comp_pred_diff
[COMP_PREDICTION_ONLY
] += ctx
->comp_pred_diff
;
481 cpi
->rd_comp_pred_diff
[HYBRID_PREDICTION
] += ctx
->hybrid_pred_diff
;
485 static unsigned find_seg_id(uint8_t *buf
, BLOCK_SIZE_TYPE bsize
,
486 int start_y
, int height
, int start_x
, int width
) {
487 const int bw
= 1 << mi_width_log2(bsize
), bh
= 1 << mi_height_log2(bsize
);
488 const int end_x
= MIN(start_x
+ bw
, width
);
489 const int end_y
= MIN(start_y
+ bh
, height
);
491 unsigned seg_id
= -1;
493 buf
+= width
* start_y
;
494 for (y
= start_y
; y
< end_y
; y
++, buf
+= width
) {
495 for (x
= start_x
; x
< end_x
; x
++) {
496 seg_id
= MIN(seg_id
, buf
[x
]);
503 void vp9_setup_src_planes(MACROBLOCK
*x
,
504 const YV12_BUFFER_CONFIG
*src
,
505 int mb_row
, int mb_col
) {
506 setup_pred_plane(&x
->plane
[0].src
,
507 src
->y_buffer
, src
->y_stride
,
508 mb_row
, mb_col
, NULL
,
509 x
->e_mbd
.plane
[0].subsampling_x
,
510 x
->e_mbd
.plane
[0].subsampling_y
);
511 setup_pred_plane(&x
->plane
[1].src
,
512 src
->u_buffer
, src
->uv_stride
,
513 mb_row
, mb_col
, NULL
,
514 x
->e_mbd
.plane
[1].subsampling_x
,
515 x
->e_mbd
.plane
[1].subsampling_y
);
516 setup_pred_plane(&x
->plane
[2].src
,
517 src
->v_buffer
, src
->uv_stride
,
518 mb_row
, mb_col
, NULL
,
519 x
->e_mbd
.plane
[2].subsampling_x
,
520 x
->e_mbd
.plane
[2].subsampling_y
);
523 static void set_offsets(VP9_COMP
*cpi
,
524 int mi_row
, int mi_col
, BLOCK_SIZE_TYPE bsize
) {
525 MACROBLOCK
*const x
= &cpi
->mb
;
526 VP9_COMMON
*const cm
= &cpi
->common
;
527 MACROBLOCKD
*const xd
= &x
->e_mbd
;
529 const int dst_fb_idx
= cm
->new_fb_idx
;
530 const int idx_str
= xd
->mode_info_stride
* mi_row
+ mi_col
;
531 const int bw
= 1 << mi_width_log2(bsize
), bh
= 1 << mi_height_log2(bsize
);
532 const int mb_row
= mi_row
>> 1;
533 const int mb_col
= mi_col
>> 1;
534 const int idx_map
= mb_row
* cm
->mb_cols
+ mb_col
;
537 // entropy context structures
538 for (i
= 0; i
< MAX_MB_PLANE
; i
++) {
539 xd
->plane
[i
].above_context
= cm
->above_context
[i
] +
540 (mi_col
* 2 >> xd
->plane
[i
].subsampling_x
);
541 xd
->plane
[i
].left_context
= cm
->left_context
[i
] +
542 (((mi_row
* 2) & 15) >> xd
->plane
[i
].subsampling_y
);
545 // partition contexts
546 set_partition_seg_context(cm
, xd
, mi_row
, mi_col
);
548 // Activity map pointer
549 x
->mb_activity_ptr
= &cpi
->mb_activity_map
[idx_map
];
550 x
->active_ptr
= cpi
->active_map
+ idx_map
;
552 /* pointers to mode info contexts */
553 x
->partition_info
= x
->pi
+ idx_str
;
554 xd
->mode_info_context
= cm
->mi
+ idx_str
;
555 mbmi
= &xd
->mode_info_context
->mbmi
;
556 xd
->prev_mode_info_context
= cm
->prev_mi
+ idx_str
;
558 // Set up destination pointers
559 setup_dst_planes(xd
, &cm
->yv12_fb
[dst_fb_idx
], mi_row
, mi_col
);
561 /* Set up limit values for MV components to prevent them from
562 * extending beyond the UMV borders assuming 16x16 block size */
563 x
->mv_row_min
= -((mi_row
* MI_SIZE
) + VP9BORDERINPIXELS
- VP9_INTERP_EXTEND
);
564 x
->mv_col_min
= -((mi_col
* MI_SIZE
) + VP9BORDERINPIXELS
- VP9_INTERP_EXTEND
);
565 x
->mv_row_max
= ((cm
->mi_rows
- mi_row
) * MI_SIZE
+
566 (VP9BORDERINPIXELS
- MI_SIZE
* bh
- VP9_INTERP_EXTEND
));
567 x
->mv_col_max
= ((cm
->mi_cols
- mi_col
) * MI_SIZE
+
568 (VP9BORDERINPIXELS
- MI_SIZE
* bw
- VP9_INTERP_EXTEND
));
570 // Set up distance of MB to edge of frame in 1/8th pel units
571 assert(!(mi_col
& (bw
- 1)) && !(mi_row
& (bh
- 1)));
572 set_mi_row_col(cm
, xd
, mi_row
, bh
, mi_col
, bw
);
574 /* set up source buffers */
575 vp9_setup_src_planes(x
, cpi
->Source
, mi_row
, mi_col
);
578 x
->rddiv
= cpi
->RDDIV
;
579 x
->rdmult
= cpi
->RDMULT
;
582 if (xd
->segmentation_enabled
) {
583 uint8_t *map
= xd
->update_mb_segmentation_map
? cpi
->segmentation_map
584 : cm
->last_frame_seg_map
;
585 mbmi
->segment_id
= find_seg_id(map
, bsize
, mi_row
,
586 cm
->mi_rows
, mi_col
, cm
->mi_cols
);
588 assert(mbmi
->segment_id
<= (MAX_MB_SEGMENTS
-1));
589 vp9_mb_init_quantizer(cpi
, x
);
591 if (xd
->segmentation_enabled
&& cpi
->seg0_cnt
> 0 &&
592 !vp9_segfeature_active(xd
, 0, SEG_LVL_REF_FRAME
) &&
593 vp9_segfeature_active(xd
, 1, SEG_LVL_REF_FRAME
) &&
594 vp9_check_segref(xd
, 1, INTRA_FRAME
) +
595 vp9_check_segref(xd
, 1, LAST_FRAME
) +
596 vp9_check_segref(xd
, 1, GOLDEN_FRAME
) +
597 vp9_check_segref(xd
, 1, ALTREF_FRAME
) == 1) {
598 cpi
->seg0_progress
= (cpi
->seg0_idx
<< 16) / cpi
->seg0_cnt
;
600 const int y
= mb_row
& ~3;
601 const int x
= mb_col
& ~3;
602 const int p16
= ((mb_row
& 1) << 1) + (mb_col
& 1);
603 const int p32
= ((mb_row
& 2) << 2) + ((mb_col
& 2) << 1);
604 const int tile_progress
=
605 cm
->cur_tile_mi_col_start
* cm
->mb_rows
>> 1;
607 (cm
->cur_tile_mi_col_end
- cm
->cur_tile_mi_col_start
) >> 1;
610 ((y
* mb_cols
+ x
* 4 + p32
+ p16
+ tile_progress
) << 16) / cm
->MBs
;
613 mbmi
->segment_id
= 0;
617 static void pick_sb_modes(VP9_COMP
*cpi
, int mi_row
, int mi_col
,
618 TOKENEXTRA
**tp
, int *totalrate
, int *totaldist
,
619 BLOCK_SIZE_TYPE bsize
, PICK_MODE_CONTEXT
*ctx
) {
620 VP9_COMMON
*const cm
= &cpi
->common
;
621 MACROBLOCK
*const x
= &cpi
->mb
;
622 MACROBLOCKD
*const xd
= &x
->e_mbd
;
625 if (bsize
< BLOCK_SIZE_SB8X8
)
626 if (xd
->ab_index
!= 0)
630 set_offsets(cpi
, mi_row
, mi_col
, bsize
);
631 xd
->mode_info_context
->mbmi
.sb_type
= bsize
;
632 if (cpi
->oxcf
.tuning
== VP8_TUNE_SSIM
)
633 vp9_activity_masking(cpi
, x
);
635 /* Find best coding mode & reconstruct the MB so it is available
636 * as a predictor for MBs that follow in the SB */
637 if (cm
->frame_type
== KEY_FRAME
) {
638 vp9_rd_pick_intra_mode_sb(cpi
, x
, totalrate
, totaldist
, bsize
, ctx
);
640 vp9_rd_pick_inter_mode_sb(cpi
, x
, mi_row
, mi_col
, totalrate
, totaldist
,
645 static void update_stats(VP9_COMP
*cpi
, int mi_row
, int mi_col
) {
646 VP9_COMMON
*const cm
= &cpi
->common
;
647 MACROBLOCK
*const x
= &cpi
->mb
;
648 MACROBLOCKD
*const xd
= &x
->e_mbd
;
649 MODE_INFO
*mi
= xd
->mode_info_context
;
650 MB_MODE_INFO
*const mbmi
= &mi
->mbmi
;
652 if (cm
->frame_type
!= KEY_FRAME
) {
653 int segment_id
, seg_ref_active
;
655 if (mbmi
->ref_frame
) {
656 int pred_context
= vp9_get_pred_context(cm
, xd
, PRED_COMP
);
658 if (mbmi
->second_ref_frame
<= INTRA_FRAME
)
659 cpi
->single_pred_count
[pred_context
]++;
661 cpi
->comp_pred_count
[pred_context
]++;
664 // If we have just a single reference frame coded for a segment then
665 // exclude from the reference frame counts used to work out
666 // probabilities. NOTE: At the moment we dont support custom trees
667 // for the reference frame coding for each segment but this is a
668 // possible future action.
669 segment_id
= mbmi
->segment_id
;
670 seg_ref_active
= vp9_segfeature_active(xd
, segment_id
,
672 if (!seg_ref_active
||
673 ((vp9_check_segref(xd
, segment_id
, INTRA_FRAME
) +
674 vp9_check_segref(xd
, segment_id
, LAST_FRAME
) +
675 vp9_check_segref(xd
, segment_id
, GOLDEN_FRAME
) +
676 vp9_check_segref(xd
, segment_id
, ALTREF_FRAME
)) > 1)) {
677 cpi
->count_mb_ref_frame_usage
[mbmi
->ref_frame
]++;
679 // Count of last ref frame 0,0 usage
680 if ((mbmi
->mode
== ZEROMV
) && (mbmi
->ref_frame
== LAST_FRAME
))
681 cpi
->inter_zz_count
++;
685 // TODO(jingning): the variables used here are little complicated. need further
686 // refactoring on organizing the the temporary buffers, when recursive
687 // partition down to 4x4 block size is enabled.
688 static PICK_MODE_CONTEXT
*get_block_context(MACROBLOCK
*x
,
689 BLOCK_SIZE_TYPE bsize
) {
690 MACROBLOCKD
*const xd
= &x
->e_mbd
;
693 case BLOCK_SIZE_SB64X64
:
694 return &x
->sb64_context
;
695 case BLOCK_SIZE_SB64X32
:
696 return &x
->sb64x32_context
[xd
->sb_index
];
697 case BLOCK_SIZE_SB32X64
:
698 return &x
->sb32x64_context
[xd
->sb_index
];
699 case BLOCK_SIZE_SB32X32
:
700 return &x
->sb32_context
[xd
->sb_index
];
701 case BLOCK_SIZE_SB32X16
:
702 return &x
->sb32x16_context
[xd
->sb_index
][xd
->mb_index
];
703 case BLOCK_SIZE_SB16X32
:
704 return &x
->sb16x32_context
[xd
->sb_index
][xd
->mb_index
];
705 case BLOCK_SIZE_MB16X16
:
706 return &x
->mb_context
[xd
->sb_index
][xd
->mb_index
];
707 case BLOCK_SIZE_SB16X8
:
708 return &x
->sb16x8_context
[xd
->sb_index
][xd
->mb_index
][xd
->b_index
];
709 case BLOCK_SIZE_SB8X16
:
710 return &x
->sb8x16_context
[xd
->sb_index
][xd
->mb_index
][xd
->b_index
];
711 case BLOCK_SIZE_SB8X8
:
712 return &x
->sb8x8_context
[xd
->sb_index
][xd
->mb_index
][xd
->b_index
];
714 case BLOCK_SIZE_SB8X4
:
715 return &x
->sb8x4_context
[xd
->sb_index
][xd
->mb_index
][xd
->b_index
];
716 case BLOCK_SIZE_SB4X8
:
717 return &x
->sb4x8_context
[xd
->sb_index
][xd
->mb_index
][xd
->b_index
];
718 case BLOCK_SIZE_AB4X4
:
719 return &x
->ab4x4_context
[xd
->sb_index
][xd
->mb_index
][xd
->b_index
];
727 static BLOCK_SIZE_TYPE
*get_sb_partitioning(MACROBLOCK
*x
,
728 BLOCK_SIZE_TYPE bsize
) {
729 MACROBLOCKD
*xd
= &x
->e_mbd
;
731 case BLOCK_SIZE_SB64X64
:
732 return &x
->sb64_partitioning
;
733 case BLOCK_SIZE_SB32X32
:
734 return &x
->sb_partitioning
[xd
->sb_index
];
735 case BLOCK_SIZE_MB16X16
:
736 return &x
->mb_partitioning
[xd
->sb_index
][xd
->mb_index
];
738 case BLOCK_SIZE_SB8X8
:
739 return &x
->b_partitioning
[xd
->sb_index
][xd
->mb_index
][xd
->b_index
];
747 static void restore_context(VP9_COMP
*cpi
, int mi_row
, int mi_col
,
748 ENTROPY_CONTEXT a
[16 * MAX_MB_PLANE
],
749 ENTROPY_CONTEXT l
[16 * MAX_MB_PLANE
],
750 PARTITION_CONTEXT sa
[8],
751 PARTITION_CONTEXT sl
[8],
752 BLOCK_SIZE_TYPE bsize
) {
753 VP9_COMMON
*const cm
= &cpi
->common
;
754 MACROBLOCK
*const x
= &cpi
->mb
;
755 MACROBLOCKD
*const xd
= &x
->e_mbd
;
757 int bwl
= b_width_log2(bsize
), bw
= 1 << bwl
;
758 int bhl
= b_height_log2(bsize
), bh
= 1 << bhl
;
759 int mwl
= mi_width_log2(bsize
), mw
= 1 << mwl
;
760 int mhl
= mi_height_log2(bsize
), mh
= 1 << mhl
;
761 for (p
= 0; p
< MAX_MB_PLANE
; p
++) {
762 vpx_memcpy(cm
->above_context
[p
] +
763 ((mi_col
* 2) >> xd
->plane
[p
].subsampling_x
),
765 sizeof(ENTROPY_CONTEXT
) * bw
>> xd
->plane
[p
].subsampling_x
);
766 vpx_memcpy(cm
->left_context
[p
] +
767 ((mi_row
& MI_MASK
) * 2 >> xd
->plane
[p
].subsampling_y
),
769 sizeof(ENTROPY_CONTEXT
) * bh
>> xd
->plane
[p
].subsampling_y
);
771 vpx_memcpy(cm
->above_seg_context
+ mi_col
, sa
,
772 sizeof(PARTITION_CONTEXT
) * mw
);
773 vpx_memcpy(cm
->left_seg_context
+ (mi_row
& MI_MASK
), sl
,
774 sizeof(PARTITION_CONTEXT
) * mh
);
777 static void encode_b(VP9_COMP
*cpi
, TOKENEXTRA
**tp
,
778 int mi_row
, int mi_col
, int output_enabled
,
779 BLOCK_SIZE_TYPE bsize
, int sub_index
) {
780 VP9_COMMON
*const cm
= &cpi
->common
;
781 MACROBLOCK
*const x
= &cpi
->mb
;
782 MACROBLOCKD
*const xd
= &x
->e_mbd
;
784 if (mi_row
>= cm
->mi_rows
|| mi_col
>= cm
->mi_cols
)
788 *(get_sb_index(xd
, bsize
)) = sub_index
;
791 if (bsize
< BLOCK_SIZE_SB8X8
)
792 if (xd
->ab_index
> 0)
795 set_offsets(cpi
, mi_row
, mi_col
, bsize
);
796 update_state(cpi
, get_block_context(x
, bsize
), bsize
, output_enabled
);
797 encode_superblock(cpi
, tp
, output_enabled
, mi_row
, mi_col
, bsize
);
799 if (output_enabled
) {
800 update_stats(cpi
, mi_row
, mi_col
);
802 (*tp
)->token
= EOSB_TOKEN
;
807 static void encode_sb(VP9_COMP
*cpi
, TOKENEXTRA
**tp
,
808 int mi_row
, int mi_col
, int output_enabled
,
809 BLOCK_SIZE_TYPE bsize
) {
810 VP9_COMMON
*const cm
= &cpi
->common
;
811 MACROBLOCK
*const x
= &cpi
->mb
;
812 MACROBLOCKD
*const xd
= &x
->e_mbd
;
813 BLOCK_SIZE_TYPE c1
= BLOCK_SIZE_SB8X8
;
814 const int bsl
= b_width_log2(bsize
), bs
= (1 << bsl
) / 4;
816 int UNINITIALIZED_IS_SAFE(pl
);
818 if (mi_row
>= cm
->mi_rows
|| mi_col
>= cm
->mi_cols
)
822 c1
= BLOCK_SIZE_AB4X4
;
823 if (bsize
>= BLOCK_SIZE_SB8X8
)
825 if (bsize
> BLOCK_SIZE_SB8X8
)
828 set_partition_seg_context(cm
, xd
, mi_row
, mi_col
);
829 pl
= partition_plane_context(xd
, bsize
);
830 c1
= *(get_sb_partitioning(x
, bsize
));
833 bwl
= b_width_log2(c1
), bhl
= b_height_log2(c1
);
835 if (bsl
== bwl
&& bsl
== bhl
) {
837 if (output_enabled
&& bsize
>= BLOCK_SIZE_SB8X8
)
838 cpi
->partition_count
[pl
][PARTITION_NONE
]++;
840 if (output_enabled
&& bsize
> BLOCK_SIZE_SB8X8
)
841 cpi
->partition_count
[pl
][PARTITION_NONE
]++;
843 encode_b(cpi
, tp
, mi_row
, mi_col
, output_enabled
, c1
, -1);
844 } else if (bsl
== bhl
&& bsl
> bwl
) {
846 cpi
->partition_count
[pl
][PARTITION_VERT
]++;
847 encode_b(cpi
, tp
, mi_row
, mi_col
, output_enabled
, c1
, 0);
848 encode_b(cpi
, tp
, mi_row
, mi_col
+ bs
, output_enabled
, c1
, 1);
849 } else if (bsl
== bwl
&& bsl
> bhl
) {
851 cpi
->partition_count
[pl
][PARTITION_HORZ
]++;
852 encode_b(cpi
, tp
, mi_row
, mi_col
, output_enabled
, c1
, 0);
853 encode_b(cpi
, tp
, mi_row
+ bs
, mi_col
, output_enabled
, c1
, 1);
855 BLOCK_SIZE_TYPE subsize
;
858 assert(bwl
< bsl
&& bhl
< bsl
);
859 subsize
= get_subsize(bsize
, PARTITION_SPLIT
);
862 cpi
->partition_count
[pl
][PARTITION_SPLIT
]++;
864 for (i
= 0; i
< 4; i
++) {
865 const int x_idx
= i
& 1, y_idx
= i
>> 1;
867 *(get_sb_index(xd
, subsize
)) = i
;
868 encode_sb(cpi
, tp
, mi_row
+ y_idx
* bs
, mi_col
+ x_idx
* bs
,
869 output_enabled
, subsize
);
874 if (bsize
>= BLOCK_SIZE_SB8X8
&&
875 (bsize
== BLOCK_SIZE_SB8X8
|| bsl
== bwl
|| bsl
== bhl
)) {
877 if (bsize
> BLOCK_SIZE_SB8X8
&&
878 (bsize
== BLOCK_SIZE_MB16X16
|| bsl
== bwl
|| bsl
== bhl
)) {
880 set_partition_seg_context(cm
, xd
, mi_row
, mi_col
);
881 update_partition_context(xd
, c1
, bsize
);
886 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
887 // unlikely to be selected depending on previously rate-distortion optimization
888 // results, for encoding speed-up.
889 static void rd_pick_partition(VP9_COMP
*cpi
, TOKENEXTRA
**tp
,
890 int mi_row
, int mi_col
,
891 BLOCK_SIZE_TYPE bsize
,
892 int *rate
, int *dist
) {
893 VP9_COMMON
*const cm
= &cpi
->common
;
894 MACROBLOCK
*const x
= &cpi
->mb
;
895 MACROBLOCKD
*const xd
= &x
->e_mbd
;
896 int bsl
= b_width_log2(bsize
), bs
= 1 << bsl
;
898 ENTROPY_CONTEXT l
[16 * MAX_MB_PLANE
], a
[16 * MAX_MB_PLANE
];
899 PARTITION_CONTEXT sl
[8], sa
[8];
900 TOKENEXTRA
*tp_orig
= *tp
;
902 BLOCK_SIZE_TYPE subsize
;
903 int srate
= INT_MAX
, sdist
= INT_MAX
;
906 if (bsize
< BLOCK_SIZE_SB8X8
)
907 if (xd
->ab_index
!= 0) {
913 assert(mi_height_log2(bsize
) == mi_width_log2(bsize
));
915 // buffer the above/left context information of the block in search.
916 for (p
= 0; p
< MAX_MB_PLANE
; ++p
) {
917 vpx_memcpy(a
+ bs
* p
, cm
->above_context
[p
] +
918 (mi_col
* 2 >> xd
->plane
[p
].subsampling_x
),
919 sizeof(ENTROPY_CONTEXT
) * bs
>> xd
->plane
[p
].subsampling_x
);
920 vpx_memcpy(l
+ bs
* p
, cm
->left_context
[p
] +
921 ((mi_row
& MI_MASK
) * 2 >> xd
->plane
[p
].subsampling_y
),
922 sizeof(ENTROPY_CONTEXT
) * bs
>> xd
->plane
[p
].subsampling_y
);
924 vpx_memcpy(sa
, cm
->above_seg_context
+ mi_col
,
925 sizeof(PARTITION_CONTEXT
) * ms
);
926 vpx_memcpy(sl
, cm
->left_seg_context
+ (mi_row
& MI_MASK
),
927 sizeof(PARTITION_CONTEXT
) * ms
);
931 if (bsize
>= BLOCK_SIZE_SB8X8
) {
933 if (bsize
>= BLOCK_SIZE_MB16X16
) {
936 subsize
= get_subsize(bsize
, PARTITION_SPLIT
);
937 *(get_sb_partitioning(x
, bsize
)) = subsize
;
939 for (i
= 0; i
< 4; ++i
) {
940 int x_idx
= (i
& 1) * (ms
>> 1);
941 int y_idx
= (i
>> 1) * (ms
>> 1);
944 if ((mi_row
+ y_idx
>= cm
->mi_rows
) || (mi_col
+ x_idx
>= cm
->mi_cols
))
947 *(get_sb_index(xd
, subsize
)) = i
;
948 rd_pick_partition(cpi
, tp
, mi_row
+ y_idx
, mi_col
+ x_idx
, subsize
,
954 set_partition_seg_context(cm
, xd
, mi_row
, mi_col
);
955 pl
= partition_plane_context(xd
, bsize
);
958 r4
+= x
->partition_cost
[pl
][PARTITION_SPLIT
];
960 r4
+= x
->partition_cost
[pl
][PARTITION_SPLIT
];
966 restore_context(cpi
, mi_row
, mi_col
, a
, l
, sa
, sl
, bsize
);
970 if ((mi_col
+ ms
<= cm
->mi_cols
) && (mi_row
+ (ms
>> 1) <= cm
->mi_rows
) &&
972 (bsize
>= BLOCK_SIZE_SB8X8
)) {
974 (bsize
>= BLOCK_SIZE_MB16X16
)) {
978 subsize
= get_subsize(bsize
, PARTITION_HORZ
);
979 *(get_sb_index(xd
, subsize
)) = 0;
980 pick_sb_modes(cpi
, mi_row
, mi_col
, tp
, &r2
, &d2
, subsize
,
981 get_block_context(x
, subsize
));
983 if (mi_row
+ ms
<= cm
->mi_rows
) {
985 update_state(cpi
, get_block_context(x
, subsize
), subsize
, 0);
986 encode_superblock(cpi
, tp
, 0, mi_row
, mi_col
, subsize
);
987 *(get_sb_index(xd
, subsize
)) = 1;
988 pick_sb_modes(cpi
, mi_row
+ (ms
>> 1), mi_col
, tp
, &r
, &d
, subsize
,
989 get_block_context(x
, subsize
));
993 if (mi_row
+ (ms
>> 1) != cm
->mi_rows
)
996 set_partition_seg_context(cm
, xd
, mi_row
, mi_col
);
997 pl
= partition_plane_context(xd
, bsize
);
1000 r2
+= x
->partition_cost
[pl
][PARTITION_HORZ
];
1002 r2
+= x
->partition_cost
[pl
][PARTITION_HORZ
];
1004 if ((RDCOST(x
->rdmult
, x
->rddiv
, r2
, d2
) <
1005 RDCOST(x
->rdmult
, x
->rddiv
, srate
, sdist
)) && !mb_skip
) {
1008 *(get_sb_partitioning(x
, bsize
)) = subsize
;
1010 restore_context(cpi
, mi_row
, mi_col
, a
, l
, sa
, sl
, bsize
);
1014 if ((mi_row
+ ms
<= cm
->mi_rows
) && (mi_col
+ (ms
>> 1) <= cm
->mi_cols
) &&
1016 (bsize
>= BLOCK_SIZE_SB8X8
)) {
1018 (bsize
>= BLOCK_SIZE_MB16X16
)) {
1022 subsize
= get_subsize(bsize
, PARTITION_VERT
);
1023 *(get_sb_index(xd
, subsize
)) = 0;
1024 pick_sb_modes(cpi
, mi_row
, mi_col
, tp
, &r2
, &d2
, subsize
,
1025 get_block_context(x
, subsize
));
1026 if (mi_col
+ ms
<= cm
->mi_cols
) {
1028 update_state(cpi
, get_block_context(x
, subsize
), subsize
, 0);
1029 encode_superblock(cpi
, tp
, 0, mi_row
, mi_col
, subsize
);
1030 *(get_sb_index(xd
, subsize
)) = 1;
1031 pick_sb_modes(cpi
, mi_row
, mi_col
+ (ms
>> 1), tp
, &r
, &d
, subsize
,
1032 get_block_context(x
, subsize
));
1036 if (mi_col
+ (ms
>> 1) != cm
->mi_cols
)
1039 set_partition_seg_context(cm
, xd
, mi_row
, mi_col
);
1040 pl
= partition_plane_context(xd
, bsize
);
1043 r2
+= x
->partition_cost
[pl
][PARTITION_VERT
];
1045 r2
+= x
->partition_cost
[pl
][PARTITION_VERT
];
1047 if ((RDCOST(x
->rdmult
, x
->rddiv
, r2
, d2
) <
1048 RDCOST(x
->rdmult
, x
->rddiv
, srate
, sdist
)) && !mb_skip
) {
1051 *(get_sb_partitioning(x
, bsize
)) = subsize
;
1053 restore_context(cpi
, mi_row
, mi_col
, a
, l
, sa
, sl
, bsize
);
1057 if (mi_row
+ ms
<= cm
->mi_rows
&& mi_col
+ ms
<= cm
->mi_cols
) {
1059 pick_sb_modes(cpi
, mi_row
, mi_col
, tp
, &r
, &d
, bsize
,
1060 get_block_context(x
, bsize
));
1062 if (bsize
>= BLOCK_SIZE_SB8X8
) {
1064 if (bsize
>= BLOCK_SIZE_MB16X16
) {
1066 set_partition_seg_context(cm
, xd
, mi_row
, mi_col
);
1067 pl
= partition_plane_context(xd
, bsize
);
1068 r
+= x
->partition_cost
[pl
][PARTITION_NONE
];
1071 if (RDCOST(x
->rdmult
, x
->rddiv
, r
, d
) <
1072 RDCOST(x
->rdmult
, x
->rddiv
, srate
, sdist
)) {
1076 if (bsize
>= BLOCK_SIZE_SB8X8
)
1078 if (bsize
>= BLOCK_SIZE_MB16X16
)
1080 *(get_sb_partitioning(x
, bsize
)) = bsize
;
1087 if (srate
< INT_MAX
&& sdist
< INT_MAX
)
1088 encode_sb(cpi
, tp
, mi_row
, mi_col
, bsize
== BLOCK_SIZE_SB64X64
, bsize
);
1090 if (bsize
== BLOCK_SIZE_SB64X64
) {
1091 assert(tp_orig
< *tp
);
1092 assert(srate
< INT_MAX
);
1093 assert(sdist
< INT_MAX
);
1095 assert(tp_orig
== *tp
);
1099 static void encode_sb_row(VP9_COMP
*cpi
, int mi_row
,
1100 TOKENEXTRA
**tp
, int *totalrate
) {
1101 VP9_COMMON
*const cm
= &cpi
->common
;
1104 // Initialize the left context for the new SB row
1105 vpx_memset(&cm
->left_context
, 0, sizeof(cm
->left_context
));
1106 vpx_memset(cm
->left_seg_context
, 0, sizeof(cm
->left_seg_context
));
1108 // Code each SB in the row
1109 for (mi_col
= cm
->cur_tile_mi_col_start
;
1110 mi_col
< cm
->cur_tile_mi_col_end
; mi_col
+= 8) {
1111 int dummy_rate
, dummy_dist
;
1112 rd_pick_partition(cpi
, tp
, mi_row
, mi_col
, BLOCK_SIZE_SB64X64
,
1113 &dummy_rate
, &dummy_dist
);
1117 static void init_encode_frame_mb_context(VP9_COMP
*cpi
) {
1118 MACROBLOCK
*const x
= &cpi
->mb
;
1119 VP9_COMMON
*const cm
= &cpi
->common
;
1120 MACROBLOCKD
*const xd
= &x
->e_mbd
;
1122 x
->act_zbin_adj
= 0;
1124 vpx_memset(cpi
->ref_pred_count
, 0, sizeof(cpi
->ref_pred_count
));
1126 xd
->mode_info_stride
= cm
->mode_info_stride
;
1127 xd
->frame_type
= cm
->frame_type
;
1129 xd
->frames_since_golden
= cm
->frames_since_golden
;
1130 xd
->frames_till_alt_ref_frame
= cm
->frames_till_alt_ref_frame
;
1132 // reset intra mode contexts
1133 if (cm
->frame_type
== KEY_FRAME
)
1134 vp9_init_mbmode_probs(cm
);
1136 // Copy data over into macro block data structures.
1137 vp9_setup_src_planes(x
, cpi
->Source
, 0, 0);
1139 // TODO(jkoleszar): are these initializations required?
1140 setup_pre_planes(xd
, &cm
->yv12_fb
[cm
->ref_frame_map
[cpi
->lst_fb_idx
]], NULL
,
1142 setup_dst_planes(xd
, &cm
->yv12_fb
[cm
->new_fb_idx
], 0, 0);
1144 vp9_build_block_offsets(x
);
1146 vp9_setup_block_dptrs(&x
->e_mbd
, cm
->subsampling_x
, cm
->subsampling_y
);
1148 xd
->mode_info_context
->mbmi
.mode
= DC_PRED
;
1149 xd
->mode_info_context
->mbmi
.uv_mode
= DC_PRED
;
1151 vp9_zero(cpi
->count_mb_ref_frame_usage
)
1152 vp9_zero(cpi
->bmode_count
)
1153 vp9_zero(cpi
->ymode_count
)
1154 vp9_zero(cpi
->y_uv_mode_count
)
1155 vp9_zero(cpi
->sub_mv_ref_count
)
1156 vp9_zero(cpi
->common
.fc
.mv_ref_ct
)
1157 vp9_zero(cpi
->sb_ymode_count
)
1158 vp9_zero(cpi
->partition_count
);
1160 // Note: this memset assumes above_context[0], [1] and [2]
1161 // are allocated as part of the same buffer.
1162 vpx_memset(cm
->above_context
[0], 0, sizeof(ENTROPY_CONTEXT
) * 2 *
1163 MAX_MB_PLANE
* mi_cols_aligned_to_sb(cm
));
1164 vpx_memset(cm
->above_seg_context
, 0, sizeof(PARTITION_CONTEXT
) *
1165 mi_cols_aligned_to_sb(cm
));
1168 static void switch_lossless_mode(VP9_COMP
*cpi
, int lossless
) {
1170 cpi
->mb
.fwd_txm8x4
= vp9_short_walsh8x4
;
1171 cpi
->mb
.fwd_txm4x4
= vp9_short_walsh4x4
;
1172 cpi
->mb
.e_mbd
.inv_txm4x4_1_add
= vp9_short_iwalsh4x4_1_add
;
1173 cpi
->mb
.e_mbd
.inv_txm4x4_add
= vp9_short_iwalsh4x4_add
;
1174 cpi
->mb
.optimize
= 0;
1175 cpi
->common
.filter_level
= 0;
1176 cpi
->zbin_mode_boost_enabled
= 0;
1177 cpi
->common
.txfm_mode
= ONLY_4X4
;
1179 cpi
->mb
.fwd_txm8x4
= vp9_short_fdct8x4
;
1180 cpi
->mb
.fwd_txm4x4
= vp9_short_fdct4x4
;
1181 cpi
->mb
.e_mbd
.inv_txm4x4_1_add
= vp9_short_idct4x4_1_add
;
1182 cpi
->mb
.e_mbd
.inv_txm4x4_add
= vp9_short_idct4x4_add
;
1187 static void encode_frame_internal(VP9_COMP
*cpi
) {
1189 MACROBLOCK
*const x
= &cpi
->mb
;
1190 VP9_COMMON
*const cm
= &cpi
->common
;
1191 MACROBLOCKD
*const xd
= &x
->e_mbd
;
1194 // fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
1195 // cpi->common.current_video_frame, cpi->common.show_frame,
1198 // Compute a modified set of reference frame probabilities to use when
1199 // prediction fails. These are based on the current general estimates for
1200 // this frame which may be updated with each iteration of the recode loop.
1201 vp9_compute_mod_refprobs(cm
);
1207 statsfile
= fopen("segmap2.stt", "a");
1208 fprintf(statsfile
, "\n");
1215 // Reset frame count of inter 0,0 motion vector usage.
1216 cpi
->inter_zz_count
= 0;
1218 cpi
->skip_true_count
[0] = cpi
->skip_true_count
[1] = cpi
->skip_true_count
[2] = 0;
1219 cpi
->skip_false_count
[0] = cpi
->skip_false_count
[1] = cpi
->skip_false_count
[2] = 0;
1221 vp9_zero(cpi
->switchable_interp_count
);
1222 vp9_zero(cpi
->best_switchable_interp_count
);
1224 xd
->mode_info_context
= cm
->mi
;
1225 xd
->prev_mode_info_context
= cm
->prev_mi
;
1227 vp9_zero(cpi
->NMVcount
);
1228 vp9_zero(cpi
->coef_counts_4x4
);
1229 vp9_zero(cpi
->coef_counts_8x8
);
1230 vp9_zero(cpi
->coef_counts_16x16
);
1231 vp9_zero(cpi
->coef_counts_32x32
);
1232 vp9_zero(cm
->fc
.eob_branch_counts
);
1234 cpi
->mb
.e_mbd
.lossless
= cm
->base_qindex
== 0 &&
1235 cm
->y_dc_delta_q
== 0 &&
1236 cm
->uv_dc_delta_q
== 0 &&
1237 cm
->uv_ac_delta_q
== 0;
1238 switch_lossless_mode(cpi
, cpi
->mb
.e_mbd
.lossless
);
1240 vp9_frame_init_quantizer(cpi
);
1242 vp9_initialize_rd_consts(cpi
, cm
->base_qindex
+ cm
->y_dc_delta_q
);
1243 vp9_initialize_me_consts(cpi
, cm
->base_qindex
);
1245 if (cpi
->oxcf
.tuning
== VP8_TUNE_SSIM
) {
1246 // Initialize encode frame context.
1247 init_encode_frame_mb_context(cpi
);
1249 // Build a frame level activity map
1250 build_activity_map(cpi
);
1253 // re-initencode frame context.
1254 init_encode_frame_mb_context(cpi
);
1256 vpx_memset(cpi
->rd_comp_pred_diff
, 0, sizeof(cpi
->rd_comp_pred_diff
));
1257 vpx_memset(cpi
->single_pred_count
, 0, sizeof(cpi
->single_pred_count
));
1258 vpx_memset(cpi
->comp_pred_count
, 0, sizeof(cpi
->comp_pred_count
));
1259 vpx_memset(cpi
->txfm_count_32x32p
, 0, sizeof(cpi
->txfm_count_32x32p
));
1260 vpx_memset(cpi
->txfm_count_16x16p
, 0, sizeof(cpi
->txfm_count_16x16p
));
1261 vpx_memset(cpi
->txfm_count_8x8p
, 0, sizeof(cpi
->txfm_count_8x8p
));
1262 vpx_memset(cpi
->rd_tx_select_diff
, 0, sizeof(cpi
->rd_tx_select_diff
));
1264 struct vpx_usec_timer emr_timer
;
1265 vpx_usec_timer_start(&emr_timer
);
1268 // Take tiles into account and give start/end MB
1269 int tile_col
, tile_row
;
1270 TOKENEXTRA
*tp
= cpi
->tok
;
1272 for (tile_row
= 0; tile_row
< cm
->tile_rows
; tile_row
++) {
1273 vp9_get_tile_row_offsets(cm
, tile_row
);
1275 for (tile_col
= 0; tile_col
< cm
->tile_columns
; tile_col
++) {
1276 TOKENEXTRA
*tp_old
= tp
;
1278 // For each row of SBs in the frame
1279 vp9_get_tile_col_offsets(cm
, tile_col
);
1280 for (mi_row
= cm
->cur_tile_mi_row_start
;
1281 mi_row
< cm
->cur_tile_mi_row_end
;
1283 encode_sb_row(cpi
, mi_row
, &tp
, &totalrate
);
1284 cpi
->tok_count
[tile_col
] = (unsigned int)(tp
- tp_old
);
1285 assert(tp
- cpi
->tok
<=
1286 get_token_alloc(cm
->mb_rows
, cm
->mb_cols
));
1291 vpx_usec_timer_mark(&emr_timer
);
1292 cpi
->time_encode_mb_row
+= vpx_usec_timer_elapsed(&emr_timer
);
1295 // 256 rate units to the bit,
1296 // projected_frame_size in units of BYTES
1297 cpi
->projected_frame_size
= totalrate
>> 8;
1300 // Keep record of the total distortion this time around for future use
1301 cpi
->last_frame_distortion
= cpi
->frame_distortion
;
1306 static int check_dual_ref_flags(VP9_COMP
*cpi
) {
1307 MACROBLOCKD
*xd
= &cpi
->mb
.e_mbd
;
1308 int ref_flags
= cpi
->ref_frame_flags
;
1310 if (vp9_segfeature_active(xd
, 1, SEG_LVL_REF_FRAME
)) {
1311 if ((ref_flags
& (VP9_LAST_FLAG
| VP9_GOLD_FLAG
)) == (VP9_LAST_FLAG
| VP9_GOLD_FLAG
) &&
1312 vp9_check_segref(xd
, 1, LAST_FRAME
))
1314 if ((ref_flags
& (VP9_GOLD_FLAG
| VP9_ALT_FLAG
)) == (VP9_GOLD_FLAG
| VP9_ALT_FLAG
) &&
1315 vp9_check_segref(xd
, 1, GOLDEN_FRAME
))
1317 if ((ref_flags
& (VP9_ALT_FLAG
| VP9_LAST_FLAG
)) == (VP9_ALT_FLAG
| VP9_LAST_FLAG
) &&
1318 vp9_check_segref(xd
, 1, ALTREF_FRAME
))
1322 return (!!(ref_flags
& VP9_GOLD_FLAG
) +
1323 !!(ref_flags
& VP9_LAST_FLAG
) +
1324 !!(ref_flags
& VP9_ALT_FLAG
)) >= 2;
1328 static int get_skip_flag(MODE_INFO
*mi
, int mis
, int ymbs
, int xmbs
) {
1331 for (y
= 0; y
< ymbs
; y
++) {
1332 for (x
= 0; x
< xmbs
; x
++) {
1333 if (!mi
[y
* mis
+ x
].mbmi
.mb_skip_coeff
)
1341 static void set_txfm_flag(MODE_INFO
*mi
, int mis
, int ymbs
, int xmbs
,
1342 TX_SIZE txfm_size
) {
1345 for (y
= 0; y
< ymbs
; y
++) {
1346 for (x
= 0; x
< xmbs
; x
++)
1347 mi
[y
* mis
+ x
].mbmi
.txfm_size
= txfm_size
;
1351 static void reset_skip_txfm_size_b(VP9_COMP
*cpi
, MODE_INFO
*mi
,
1352 int mis
, TX_SIZE txfm_max
,
1353 int bw
, int bh
, int mi_row
, int mi_col
,
1354 BLOCK_SIZE_TYPE bsize
) {
1355 VP9_COMMON
*const cm
= &cpi
->common
;
1356 MB_MODE_INFO
*const mbmi
= &mi
->mbmi
;
1358 if (mi_row
>= cm
->mi_rows
|| mi_col
>= cm
->mi_cols
)
1361 if (mbmi
->txfm_size
> txfm_max
) {
1362 MACROBLOCK
*const x
= &cpi
->mb
;
1363 MACROBLOCKD
*const xd
= &x
->e_mbd
;
1364 const int segment_id
= mbmi
->segment_id
;
1365 const int ymbs
= MIN(bh
, cm
->mi_rows
- mi_row
);
1366 const int xmbs
= MIN(bw
, cm
->mi_cols
- mi_col
);
1368 xd
->mode_info_context
= mi
;
1369 assert(vp9_segfeature_active(xd
, segment_id
, SEG_LVL_SKIP
) ||
1370 get_skip_flag(mi
, mis
, ymbs
, xmbs
));
1371 set_txfm_flag(mi
, mis
, ymbs
, xmbs
, txfm_max
);
1375 static void reset_skip_txfm_size_sb(VP9_COMP
*cpi
, MODE_INFO
*mi
,
1377 int mi_row
, int mi_col
,
1378 BLOCK_SIZE_TYPE bsize
) {
1379 VP9_COMMON
*const cm
= &cpi
->common
;
1380 const int mis
= cm
->mode_info_stride
;
1382 const int bsl
= mi_width_log2(bsize
), bs
= 1 << (bsl
- 1);
1384 if (mi_row
>= cm
->mi_rows
|| mi_col
>= cm
->mi_cols
)
1387 bwl
= mi_width_log2(mi
->mbmi
.sb_type
);
1388 bhl
= mi_height_log2(mi
->mbmi
.sb_type
);
1390 if (bwl
== bsl
&& bhl
== bsl
) {
1391 reset_skip_txfm_size_b(cpi
, mi
, mis
, txfm_max
, 1 << bsl
, 1 << bsl
,
1392 mi_row
, mi_col
, bsize
);
1393 } else if (bwl
== bsl
&& bhl
< bsl
) {
1394 reset_skip_txfm_size_b(cpi
, mi
, mis
, txfm_max
, 1 << bsl
, bs
,
1395 mi_row
, mi_col
, bsize
);
1396 reset_skip_txfm_size_b(cpi
, mi
+ bs
* mis
, mis
, txfm_max
, 1 << bsl
, bs
,
1397 mi_row
+ bs
, mi_col
, bsize
);
1398 } else if (bwl
< bsl
&& bhl
== bsl
) {
1399 reset_skip_txfm_size_b(cpi
, mi
, mis
, txfm_max
, bs
, 1 << bsl
,
1400 mi_row
, mi_col
, bsize
);
1401 reset_skip_txfm_size_b(cpi
, mi
+ bs
, mis
, txfm_max
, bs
, 1 << bsl
,
1402 mi_row
, mi_col
+ bs
, bsize
);
1404 BLOCK_SIZE_TYPE subsize
;
1407 assert(bwl
< bsl
&& bhl
< bsl
);
1408 if (bsize
== BLOCK_SIZE_SB64X64
) {
1409 subsize
= BLOCK_SIZE_SB32X32
;
1410 } else if (bsize
== BLOCK_SIZE_SB32X32
) {
1411 subsize
= BLOCK_SIZE_MB16X16
;
1413 assert(bsize
== BLOCK_SIZE_MB16X16
);
1414 subsize
= BLOCK_SIZE_SB8X8
;
1417 for (n
= 0; n
< 4; n
++) {
1418 const int y_idx
= n
>> 1, x_idx
= n
& 0x01;
1420 reset_skip_txfm_size_sb(cpi
, mi
+ y_idx
* bs
* mis
+ x_idx
* bs
,
1421 txfm_max
, mi_row
+ y_idx
* bs
,
1422 mi_col
+ x_idx
* bs
, subsize
);
1427 static void reset_skip_txfm_size(VP9_COMP
*cpi
, TX_SIZE txfm_max
) {
1428 VP9_COMMON
*const cm
= &cpi
->common
;
1430 const int mis
= cm
->mode_info_stride
;
1431 MODE_INFO
*mi
, *mi_ptr
= cm
->mi
;
1433 for (mi_row
= 0; mi_row
< cm
->mi_rows
;
1434 mi_row
+= 8, mi_ptr
+= 8 * mis
) {
1436 for (mi_col
= 0; mi_col
< cm
->mi_cols
;
1437 mi_col
+= 8, mi
+= 8) {
1438 reset_skip_txfm_size_sb(cpi
, mi
, txfm_max
,
1439 mi_row
, mi_col
, BLOCK_SIZE_SB64X64
);
1444 void vp9_encode_frame(VP9_COMP
*cpi
) {
1446 int i
, frame_type
, pred_type
;
1447 TXFM_MODE txfm_type
;
1450 * This code does a single RD pass over the whole frame assuming
1451 * either compound, single or hybrid prediction as per whatever has
1452 * worked best for that type of frame in the past.
1453 * It also predicts whether another coding mode would have worked
1454 * better that this coding mode. If that is the case, it remembers
1455 * that for subsequent frames.
1456 * It does the same analysis for transform size selection also.
1458 if (cpi
->common
.frame_type
== KEY_FRAME
)
1460 else if (cpi
->is_src_frame_alt_ref
&& cpi
->refresh_golden_frame
)
1462 else if (cpi
->refresh_golden_frame
|| cpi
->refresh_alt_ref_frame
)
1467 /* prediction (compound, single or hybrid) mode selection */
1468 if (frame_type
== 3)
1469 pred_type
= SINGLE_PREDICTION_ONLY
;
1470 else if (cpi
->rd_prediction_type_threshes
[frame_type
][1] >
1471 cpi
->rd_prediction_type_threshes
[frame_type
][0] &&
1472 cpi
->rd_prediction_type_threshes
[frame_type
][1] >
1473 cpi
->rd_prediction_type_threshes
[frame_type
][2] &&
1474 check_dual_ref_flags(cpi
) && cpi
->static_mb_pct
== 100)
1475 pred_type
= COMP_PREDICTION_ONLY
;
1476 else if (cpi
->rd_prediction_type_threshes
[frame_type
][0] >
1477 cpi
->rd_prediction_type_threshes
[frame_type
][2])
1478 pred_type
= SINGLE_PREDICTION_ONLY
;
1480 pred_type
= HYBRID_PREDICTION
;
1482 /* transform size (4x4, 8x8, 16x16 or select-per-mb) selection */
1484 cpi
->mb
.e_mbd
.lossless
= 0;
1485 if (cpi
->oxcf
.lossless
) {
1486 txfm_type
= ONLY_4X4
;
1487 cpi
->mb
.e_mbd
.lossless
= 1;
1490 /* FIXME (rbultje): this code is disabled until we support cost updates
1491 * while a frame is being encoded; the problem is that each time we
1492 * "revert" to 4x4 only (or even 8x8 only), the coefficient probabilities
1493 * for 16x16 (and 8x8) start lagging behind, thus leading to them lagging
1494 * further behind and not being chosen for subsequent frames either. This
1495 * is essentially a local minimum problem that we can probably fix by
1496 * estimating real costs more closely within a frame, perhaps by re-
1497 * calculating costs on-the-fly as frame encoding progresses. */
1498 if (cpi
->rd_tx_select_threshes
[frame_type
][TX_MODE_SELECT
] >
1499 cpi
->rd_tx_select_threshes
[frame_type
][ONLY_4X4
] &&
1500 cpi
->rd_tx_select_threshes
[frame_type
][TX_MODE_SELECT
] >
1501 cpi
->rd_tx_select_threshes
[frame_type
][ALLOW_16X16
] &&
1502 cpi
->rd_tx_select_threshes
[frame_type
][TX_MODE_SELECT
] >
1503 cpi
->rd_tx_select_threshes
[frame_type
][ALLOW_8X8
]) {
1504 txfm_type
= TX_MODE_SELECT
;
1505 } else if (cpi
->rd_tx_select_threshes
[frame_type
][ONLY_4X4
] >
1506 cpi
->rd_tx_select_threshes
[frame_type
][ALLOW_8X8
]
1507 && cpi
->rd_tx_select_threshes
[frame_type
][ONLY_4X4
] >
1508 cpi
->rd_tx_select_threshes
[frame_type
][ALLOW_16X16
]
1510 txfm_type
= ONLY_4X4
;
1511 } else if (cpi
->rd_tx_select_threshes
[frame_type
][ALLOW_16X16
] >=
1512 cpi
->rd_tx_select_threshes
[frame_type
][ALLOW_8X8
]) {
1513 txfm_type
= ALLOW_16X16
;
1515 txfm_type
= ALLOW_8X8
;
1517 txfm_type
= cpi
->rd_tx_select_threshes
[frame_type
][ALLOW_32X32
] >=
1518 cpi
->rd_tx_select_threshes
[frame_type
][TX_MODE_SELECT
] ?
1519 ALLOW_32X32
: TX_MODE_SELECT
;
1521 cpi
->common
.txfm_mode
= txfm_type
;
1522 if (txfm_type
!= TX_MODE_SELECT
) {
1523 cpi
->common
.prob_tx
[0] = 128;
1524 cpi
->common
.prob_tx
[1] = 128;
1526 cpi
->common
.comp_pred_mode
= pred_type
;
1527 encode_frame_internal(cpi
);
1529 for (i
= 0; i
< NB_PREDICTION_TYPES
; ++i
) {
1530 const int diff
= (int)(cpi
->rd_comp_pred_diff
[i
] / cpi
->common
.MBs
);
1531 cpi
->rd_prediction_type_threshes
[frame_type
][i
] += diff
;
1532 cpi
->rd_prediction_type_threshes
[frame_type
][i
] >>= 1;
1535 for (i
= 0; i
< NB_TXFM_MODES
; ++i
) {
1536 int64_t pd
= cpi
->rd_tx_select_diff
[i
];
1538 if (i
== TX_MODE_SELECT
)
1539 pd
-= RDCOST(cpi
->mb
.rdmult
, cpi
->mb
.rddiv
,
1540 2048 * (TX_SIZE_MAX_SB
- 1), 0);
1541 diff
= (int)(pd
/ cpi
->common
.MBs
);
1542 cpi
->rd_tx_select_threshes
[frame_type
][i
] += diff
;
1543 cpi
->rd_tx_select_threshes
[frame_type
][i
] /= 2;
1546 if (cpi
->common
.comp_pred_mode
== HYBRID_PREDICTION
) {
1547 int single_count_zero
= 0;
1548 int comp_count_zero
= 0;
1550 for (i
= 0; i
< COMP_PRED_CONTEXTS
; i
++) {
1551 single_count_zero
+= cpi
->single_pred_count
[i
];
1552 comp_count_zero
+= cpi
->comp_pred_count
[i
];
1555 if (comp_count_zero
== 0) {
1556 cpi
->common
.comp_pred_mode
= SINGLE_PREDICTION_ONLY
;
1557 } else if (single_count_zero
== 0) {
1558 cpi
->common
.comp_pred_mode
= COMP_PREDICTION_ONLY
;
1562 if (cpi
->common
.txfm_mode
== TX_MODE_SELECT
) {
1563 const int count4x4
= cpi
->txfm_count_16x16p
[TX_4X4
] +
1564 cpi
->txfm_count_32x32p
[TX_4X4
] +
1565 cpi
->txfm_count_8x8p
[TX_4X4
];
1566 const int count8x8_lp
= cpi
->txfm_count_32x32p
[TX_8X8
] +
1567 cpi
->txfm_count_16x16p
[TX_8X8
];
1568 const int count8x8_8x8p
= cpi
->txfm_count_8x8p
[TX_8X8
];
1569 const int count16x16_16x16p
= cpi
->txfm_count_16x16p
[TX_16X16
];
1570 const int count16x16_lp
= cpi
->txfm_count_32x32p
[TX_16X16
];
1571 const int count32x32
= cpi
->txfm_count_32x32p
[TX_32X32
];
1573 if (count4x4
== 0 && count16x16_lp
== 0 && count16x16_16x16p
== 0 &&
1575 cpi
->common
.txfm_mode
= ALLOW_8X8
;
1576 reset_skip_txfm_size(cpi
, TX_8X8
);
1577 } else if (count8x8_8x8p
== 0 && count16x16_16x16p
== 0 &&
1578 count8x8_lp
== 0 && count16x16_lp
== 0 && count32x32
== 0) {
1579 cpi
->common
.txfm_mode
= ONLY_4X4
;
1580 reset_skip_txfm_size(cpi
, TX_4X4
);
1581 } else if (count8x8_lp
== 0 && count16x16_lp
== 0 && count4x4
== 0) {
1582 cpi
->common
.txfm_mode
= ALLOW_32X32
;
1583 } else if (count32x32
== 0 && count8x8_lp
== 0 && count4x4
== 0) {
1584 cpi
->common
.txfm_mode
= ALLOW_16X16
;
1585 reset_skip_txfm_size(cpi
, TX_16X16
);
1589 // Update interpolation filter strategy for next frame.
1590 if ((cpi
->common
.frame_type
!= KEY_FRAME
) && (cpi
->sf
.search_best_filter
))
1591 vp9_select_interp_filter_type(cpi
);
1593 encode_frame_internal(cpi
);
1598 void vp9_build_block_offsets(MACROBLOCK
*x
) {
1601 static void sum_intra_stats(VP9_COMP
*cpi
, MACROBLOCK
*x
) {
1602 const MACROBLOCKD
*xd
= &x
->e_mbd
;
1603 const MB_PREDICTION_MODE m
= xd
->mode_info_context
->mbmi
.mode
;
1604 const MB_PREDICTION_MODE uvm
= xd
->mode_info_context
->mbmi
.uv_mode
;
1607 if (xd
->mode_info_context
->mbmi
.sb_type
>= BLOCK_SIZE_SB8X8
) {
1609 if (xd
->mode_info_context
->mbmi
.sb_type
> BLOCK_SIZE_SB8X8
) {
1611 ++cpi
->sb_ymode_count
[m
];
1613 ++cpi
->ymode_count
[m
];
1615 ++cpi
->y_uv_mode_count
[m
][uvm
];
1616 if (m
== I4X4_PRED
) {
1619 int m
= xd
->mode_info_context
->bmi
[b
].as_mode
.first
;
1620 ++cpi
->bmode_count
[m
];
1625 // Experimental stub function to create a per MB zbin adjustment based on
1626 // some previously calculated measure of MB activity.
1627 static void adjust_act_zbin(VP9_COMP
*cpi
, MACROBLOCK
*x
) {
1629 x
->act_zbin_adj
= *(x
->mb_activity_ptr
);
1633 int64_t act
= *(x
->mb_activity_ptr
);
1635 // Apply the masking to the RD multiplier.
1636 a
= act
+ 4 * cpi
->activity_avg
;
1637 b
= 4 * act
+ cpi
->activity_avg
;
1639 if (act
> cpi
->activity_avg
)
1640 x
->act_zbin_adj
= (int)(((int64_t)b
+ (a
>> 1)) / a
) - 1;
1642 x
->act_zbin_adj
= 1 - (int)(((int64_t)a
+ (b
>> 1)) / b
);
1646 static void encode_superblock(VP9_COMP
*cpi
, TOKENEXTRA
**t
,
1647 int output_enabled
, int mi_row
, int mi_col
,
1648 BLOCK_SIZE_TYPE bsize
) {
1649 VP9_COMMON
*const cm
= &cpi
->common
;
1650 MACROBLOCK
*const x
= &cpi
->mb
;
1651 MACROBLOCKD
*const xd
= &x
->e_mbd
;
1653 MODE_INFO
*mi
= xd
->mode_info_context
;
1654 MB_MODE_INFO
*mbmi
= &mi
->mbmi
;
1655 unsigned int segment_id
= mbmi
->segment_id
;
1656 const int mis
= cm
->mode_info_stride
;
1657 const int bwl
= mi_width_log2(bsize
);
1658 const int bw
= 1 << bwl
, bh
= 1 << mi_height_log2(bsize
);
1660 if (cm
->frame_type
== KEY_FRAME
) {
1661 if (cpi
->oxcf
.tuning
== VP8_TUNE_SSIM
) {
1662 adjust_act_zbin(cpi
, x
);
1663 vp9_update_zbin_extra(cpi
, x
);
1666 vp9_setup_interp_filters(xd
, mbmi
->interp_filter
, cm
);
1668 if (cpi
->oxcf
.tuning
== VP8_TUNE_SSIM
) {
1669 // Adjust the zbin based on this MB rate.
1670 adjust_act_zbin(cpi
, x
);
1673 // Experimental code. Special case for gf and arf zeromv modes.
1674 // Increase zbin size to suppress noise
1675 cpi
->zbin_mode_boost
= 0;
1676 if (cpi
->zbin_mode_boost_enabled
) {
1677 if (mbmi
->ref_frame
!= INTRA_FRAME
) {
1678 if (mbmi
->mode
== ZEROMV
) {
1679 if (mbmi
->ref_frame
!= LAST_FRAME
)
1680 cpi
->zbin_mode_boost
= GF_ZEROMV_ZBIN_BOOST
;
1682 cpi
->zbin_mode_boost
= LF_ZEROMV_ZBIN_BOOST
;
1683 } else if (mbmi
->mode
== SPLITMV
) {
1684 cpi
->zbin_mode_boost
= SPLIT_MV_ZBIN_BOOST
;
1686 cpi
->zbin_mode_boost
= MV_ZBIN_BOOST
;
1689 cpi
->zbin_mode_boost
= INTRA_ZBIN_BOOST
;
1693 vp9_update_zbin_extra(cpi
, x
);
1697 if (mbmi
->ref_frame
== INTRA_FRAME
&&
1698 bsize
< BLOCK_SIZE_SB8X8
) {
1700 if (mbmi
->mode
== I4X4_PRED
) {
1701 assert(bsize
== BLOCK_SIZE_SB8X8
&& mbmi
->txfm_size
== TX_4X4
);
1703 vp9_encode_intra4x4mby(x
, BLOCK_SIZE_SB8X8
);
1704 vp9_build_intra_predictors_sbuv_s(xd
, BLOCK_SIZE_SB8X8
);
1705 vp9_encode_sbuv(cm
, x
, BLOCK_SIZE_SB8X8
);
1708 sum_intra_stats(cpi
, x
);
1709 } else if (mbmi
->ref_frame
== INTRA_FRAME
) {
1710 vp9_build_intra_predictors_sby_s(xd
, bsize
);
1711 vp9_build_intra_predictors_sbuv_s(xd
, bsize
);
1713 sum_intra_stats(cpi
, x
);
1715 int idx
= cm
->ref_frame_map
[get_ref_frame_idx(cpi
, mbmi
->ref_frame
)];
1716 YV12_BUFFER_CONFIG
*ref_fb
= &cm
->yv12_fb
[idx
];
1717 YV12_BUFFER_CONFIG
*second_ref_fb
= NULL
;
1718 if (mbmi
->second_ref_frame
> 0) {
1719 idx
= cm
->ref_frame_map
[get_ref_frame_idx(cpi
, mbmi
->second_ref_frame
)];
1720 second_ref_fb
= &cm
->yv12_fb
[idx
];
1723 assert(cm
->frame_type
!= KEY_FRAME
);
1725 setup_pre_planes(xd
, ref_fb
, second_ref_fb
,
1726 mi_row
, mi_col
, xd
->scale_factor
, xd
->scale_factor_uv
);
1728 vp9_build_inter_predictors_sb(xd
, mi_row
, mi_col
,
1729 bsize
< BLOCK_SIZE_SB8X8
? BLOCK_SIZE_SB8X8
1734 if (mbmi
->ref_frame
== INTRA_FRAME
&&
1735 bsize
< BLOCK_SIZE_SB8X8
) {
1737 if (mbmi
->mode
== I4X4_PRED
) {
1738 assert(bsize
== BLOCK_SIZE_SB8X8
);
1740 vp9_tokenize_sb(cpi
, xd
, t
, !output_enabled
, BLOCK_SIZE_SB8X8
);
1741 } else if (!x
->skip
) {
1742 vp9_encode_sb(cm
, x
, (bsize
< BLOCK_SIZE_SB8X8
) ? BLOCK_SIZE_SB8X8
: bsize
);
1743 vp9_tokenize_sb(cpi
, xd
, t
, !output_enabled
,
1744 (bsize
< BLOCK_SIZE_SB8X8
) ? BLOCK_SIZE_SB8X8
: bsize
);
1746 // FIXME(rbultje): not tile-aware (mi - 1)
1747 int mb_skip_context
=
1748 (mi
- 1)->mbmi
.mb_skip_coeff
+ (mi
- mis
)->mbmi
.mb_skip_coeff
;
1750 mbmi
->mb_skip_coeff
= 1;
1752 cpi
->skip_true_count
[mb_skip_context
]++;
1753 vp9_reset_sb_tokens_context(xd
,
1754 (bsize
< BLOCK_SIZE_SB8X8
) ? BLOCK_SIZE_SB8X8
: bsize
);
1757 // copy skip flag on all mb_mode_info contexts in this SB
1758 // if this was a skip at this txfm size
1759 for (n
= 1; n
< bw
* bh
; n
++) {
1760 const int x_idx
= n
& (bw
- 1), y_idx
= n
>> bwl
;
1761 if (mi_col
+ x_idx
< cm
->mi_cols
&& mi_row
+ y_idx
< cm
->mi_rows
)
1762 mi
[x_idx
+ y_idx
* mis
].mbmi
.mb_skip_coeff
= mi
->mbmi
.mb_skip_coeff
;
1765 if (output_enabled
) {
1766 if (cm
->txfm_mode
== TX_MODE_SELECT
&&
1767 !(mbmi
->mb_skip_coeff
||
1768 vp9_segfeature_active(xd
, segment_id
, SEG_LVL_SKIP
))) {
1769 if (bsize
>= BLOCK_SIZE_SB32X32
) {
1770 cpi
->txfm_count_32x32p
[mbmi
->txfm_size
]++;
1771 } else if (bsize
>= BLOCK_SIZE_MB16X16
) {
1772 cpi
->txfm_count_16x16p
[mbmi
->txfm_size
]++;
1774 cpi
->txfm_count_8x8p
[mbmi
->txfm_size
]++;
1778 TX_SIZE sz
= (cm
->txfm_mode
== TX_MODE_SELECT
) ? TX_32X32
: cm
->txfm_mode
;
1780 if (sz
== TX_32X32
&& bsize
< BLOCK_SIZE_SB32X32
)
1782 if (sz
== TX_16X16
&& bsize
< BLOCK_SIZE_MB16X16
)
1785 if (sz
== TX_8X8
&& bsize
< BLOCK_SIZE_SB8X8
)
1787 if (sz
== TX_8X8
&& (mbmi
->mode
== SPLITMV
||
1788 mbmi
->mode
== I4X4_PRED
))
1792 for (y
= 0; y
< bh
; y
++) {
1793 for (x
= 0; x
< bw
; x
++) {
1794 if (mi_col
+ x
< cm
->mi_cols
&& mi_row
+ y
< cm
->mi_rows
) {
1795 mi
[mis
* y
+ x
].mbmi
.txfm_size
= sz
;