Refactor intra block prediction and reconstruction process
[aom.git] / vp9 / encoder / vp9_bitstream.c
blob4ca4083a6de0e21f148d586ae0a07af33cae2ea6
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
11 #include <assert.h>
12 #include <stdio.h>
13 #include <limits.h>
15 #include "vpx/vpx_encoder.h"
16 #include "vpx_mem/vpx_mem.h"
17 #include "vpx_ports/mem_ops.h"
19 #include "vp9/common/vp9_entropy.h"
20 #include "vp9/common/vp9_entropymode.h"
21 #include "vp9/common/vp9_entropymv.h"
22 #include "vp9/common/vp9_mvref_common.h"
23 #include "vp9/common/vp9_pred_common.h"
24 #include "vp9/common/vp9_seg_common.h"
25 #include "vp9/common/vp9_systemdependent.h"
26 #include "vp9/common/vp9_tile_common.h"
28 #include "vp9/encoder/vp9_cost.h"
29 #include "vp9/encoder/vp9_bitstream.h"
30 #include "vp9/encoder/vp9_encodemv.h"
31 #include "vp9/encoder/vp9_mcomp.h"
32 #include "vp9/encoder/vp9_segmentation.h"
33 #include "vp9/encoder/vp9_subexp.h"
34 #include "vp9/encoder/vp9_tokenize.h"
35 #include "vp9/encoder/vp9_write_bit_buffer.h"
37 static const struct vp9_token intra_mode_encodings[INTRA_MODES] = {
38 {0, 1}, {6, 3}, {28, 5}, {30, 5}, {58, 6}, {59, 6}, {126, 7}, {127, 7},
39 {62, 6}, {2, 2}};
40 static const struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
41 {{0, 1}, {2, 2}, {3, 2}};
42 static const struct vp9_token partition_encodings[PARTITION_TYPES] =
43 {{0, 1}, {2, 2}, {6, 3}, {7, 3}};
44 static const struct vp9_token inter_mode_encodings[INTER_MODES] =
45 {{2, 2}, {6, 3}, {0, 1}, {7, 3}};
47 static void write_intra_mode(vp9_writer *w, PREDICTION_MODE mode,
48 const vp9_prob *probs) {
49 vp9_write_token(w, vp9_intra_mode_tree, probs, &intra_mode_encodings[mode]);
52 static void write_inter_mode(vp9_writer *w, PREDICTION_MODE mode,
53 const vp9_prob *probs) {
54 assert(is_inter_mode(mode));
55 vp9_write_token(w, vp9_inter_mode_tree, probs,
56 &inter_mode_encodings[INTER_OFFSET(mode)]);
59 static void encode_unsigned_max(struct vp9_write_bit_buffer *wb,
60 int data, int max) {
61 vp9_wb_write_literal(wb, data, get_unsigned_bits(max));
64 static void prob_diff_update(const vp9_tree_index *tree,
65 vp9_prob probs[/*n - 1*/],
66 const unsigned int counts[/*n - 1*/],
67 int n, vp9_writer *w) {
68 int i;
69 unsigned int branch_ct[32][2];
71 // Assuming max number of probabilities <= 32
72 assert(n <= 32);
74 vp9_tree_probs_from_distribution(tree, branch_ct, counts);
75 for (i = 0; i < n - 1; ++i)
76 vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
79 static void write_selected_tx_size(const VP9_COMMON *cm,
80 const MACROBLOCKD *xd, vp9_writer *w) {
81 TX_SIZE tx_size = xd->mi[0]->mbmi.tx_size;
82 BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
83 const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
84 const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd,
85 &cm->fc->tx_probs);
86 vp9_write(w, tx_size != TX_4X4, tx_probs[0]);
87 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
88 vp9_write(w, tx_size != TX_8X8, tx_probs[1]);
89 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32)
90 vp9_write(w, tx_size != TX_16X16, tx_probs[2]);
94 static int write_skip(const VP9_COMMON *cm, const MACROBLOCKD *xd,
95 int segment_id, const MODE_INFO *mi, vp9_writer *w) {
96 if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
97 return 1;
98 } else {
99 const int skip = mi->mbmi.skip;
100 vp9_write(w, skip, vp9_get_skip_prob(cm, xd));
101 return skip;
105 static void update_skip_probs(VP9_COMMON *cm, vp9_writer *w,
106 FRAME_COUNTS *counts) {
107 int k;
109 for (k = 0; k < SKIP_CONTEXTS; ++k)
110 vp9_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
113 static void update_switchable_interp_probs(VP9_COMMON *cm, vp9_writer *w,
114 FRAME_COUNTS *counts) {
115 int j;
116 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
117 prob_diff_update(vp9_switchable_interp_tree,
118 cm->fc->switchable_interp_prob[j],
119 counts->switchable_interp[j], SWITCHABLE_FILTERS, w);
122 static void pack_mb_tokens(vp9_writer *w,
123 TOKENEXTRA **tp, const TOKENEXTRA *const stop,
124 vpx_bit_depth_t bit_depth) {
125 TOKENEXTRA *p = *tp;
127 while (p < stop && p->token != EOSB_TOKEN) {
128 const int t = p->token;
129 const struct vp9_token *const a = &vp9_coef_encodings[t];
130 int i = 0;
131 int v = a->value;
132 int n = a->len;
133 #if CONFIG_VP9_HIGHBITDEPTH
134 const vp9_extra_bit *b;
135 if (bit_depth == VPX_BITS_12)
136 b = &vp9_extra_bits_high12[t];
137 else if (bit_depth == VPX_BITS_10)
138 b = &vp9_extra_bits_high10[t];
139 else
140 b = &vp9_extra_bits[t];
141 #else
142 const vp9_extra_bit *const b = &vp9_extra_bits[t];
143 (void) bit_depth;
144 #endif // CONFIG_VP9_HIGHBITDEPTH
146 /* skip one or two nodes */
147 if (p->skip_eob_node) {
148 n -= p->skip_eob_node;
149 i = 2 * p->skip_eob_node;
152 // TODO(jbb): expanding this can lead to big gains. It allows
153 // much better branch prediction and would enable us to avoid numerous
154 // lookups and compares.
156 // If we have a token that's in the constrained set, the coefficient tree
157 // is split into two treed writes. The first treed write takes care of the
158 // unconstrained nodes. The second treed write takes care of the
159 // constrained nodes.
160 if (t >= TWO_TOKEN && t < EOB_TOKEN) {
161 int len = UNCONSTRAINED_NODES - p->skip_eob_node;
162 int bits = v >> (n - len);
163 vp9_write_tree(w, vp9_coef_tree, p->context_tree, bits, len, i);
164 vp9_write_tree(w, vp9_coef_con_tree,
165 vp9_pareto8_full[p->context_tree[PIVOT_NODE] - 1],
166 v, n - len, 0);
167 } else {
168 vp9_write_tree(w, vp9_coef_tree, p->context_tree, v, n, i);
171 if (b->base_val) {
172 const int e = p->extra, l = b->len;
174 if (l) {
175 const unsigned char *pb = b->prob;
176 int v = e >> 1;
177 int n = l; /* number of bits in v, assumed nonzero */
178 int i = 0;
180 do {
181 const int bb = (v >> --n) & 1;
182 vp9_write(w, bb, pb[i >> 1]);
183 i = b->tree[i + bb];
184 } while (n);
187 vp9_write_bit(w, e & 1);
189 ++p;
192 *tp = p + (p->token == EOSB_TOKEN);
195 static void write_segment_id(vp9_writer *w, const struct segmentation *seg,
196 int segment_id) {
197 if (seg->enabled && seg->update_map)
198 vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0);
201 // This function encodes the reference frame
202 static void write_ref_frames(const VP9_COMMON *cm, const MACROBLOCKD *xd,
203 vp9_writer *w) {
204 const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
205 const int is_compound = has_second_ref(mbmi);
206 const int segment_id = mbmi->segment_id;
208 // If segment level coding of this signal is disabled...
209 // or the segment allows multiple reference frame options
210 if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
211 assert(!is_compound);
212 assert(mbmi->ref_frame[0] ==
213 get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
214 } else {
215 // does the feature use compound prediction or not
216 // (if not specified at the frame/segment level)
217 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
218 vp9_write(w, is_compound, vp9_get_reference_mode_prob(cm, xd));
219 } else {
220 assert(!is_compound == (cm->reference_mode == SINGLE_REFERENCE));
223 if (is_compound) {
224 vp9_write(w, mbmi->ref_frame[0] == GOLDEN_FRAME,
225 vp9_get_pred_prob_comp_ref_p(cm, xd));
226 } else {
227 const int bit0 = mbmi->ref_frame[0] != LAST_FRAME;
228 vp9_write(w, bit0, vp9_get_pred_prob_single_ref_p1(cm, xd));
229 if (bit0) {
230 const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME;
231 vp9_write(w, bit1, vp9_get_pred_prob_single_ref_p2(cm, xd));
237 static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi,
238 vp9_writer *w) {
239 VP9_COMMON *const cm = &cpi->common;
240 const nmv_context *nmvc = &cm->fc->nmvc;
241 const MACROBLOCK *const x = &cpi->td.mb;
242 const MACROBLOCKD *const xd = &x->e_mbd;
243 const struct segmentation *const seg = &cm->seg;
244 const MB_MODE_INFO *const mbmi = &mi->mbmi;
245 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
246 const PREDICTION_MODE mode = mbmi->mode;
247 const int segment_id = mbmi->segment_id;
248 const BLOCK_SIZE bsize = mbmi->sb_type;
249 const int allow_hp = cm->allow_high_precision_mv;
250 const int is_inter = is_inter_block(mbmi);
251 const int is_compound = has_second_ref(mbmi);
252 int skip, ref;
254 if (seg->update_map) {
255 if (seg->temporal_update) {
256 const int pred_flag = mbmi->seg_id_predicted;
257 vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
258 vp9_write(w, pred_flag, pred_prob);
259 if (!pred_flag)
260 write_segment_id(w, seg, segment_id);
261 } else {
262 write_segment_id(w, seg, segment_id);
266 skip = write_skip(cm, xd, segment_id, mi, w);
268 if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
269 vp9_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd));
271 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
272 !(is_inter && skip)) {
273 write_selected_tx_size(cm, xd, w);
276 if (!is_inter) {
277 if (bsize >= BLOCK_8X8) {
278 write_intra_mode(w, mode, cm->fc->y_mode_prob[size_group_lookup[bsize]]);
279 } else {
280 int idx, idy;
281 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
282 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
283 for (idy = 0; idy < 2; idy += num_4x4_h) {
284 for (idx = 0; idx < 2; idx += num_4x4_w) {
285 const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode;
286 write_intra_mode(w, b_mode, cm->fc->y_mode_prob[0]);
290 write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mode]);
291 } else {
292 const int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
293 const vp9_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
294 write_ref_frames(cm, xd, w);
296 // If segment skip is not enabled code the mode.
297 if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
298 if (bsize >= BLOCK_8X8) {
299 write_inter_mode(w, mode, inter_probs);
303 if (cm->interp_filter == SWITCHABLE) {
304 const int ctx = vp9_get_pred_context_switchable_interp(xd);
305 vp9_write_token(w, vp9_switchable_interp_tree,
306 cm->fc->switchable_interp_prob[ctx],
307 &switchable_interp_encodings[mbmi->interp_filter]);
308 ++cpi->interp_filter_selected[0][mbmi->interp_filter];
309 } else {
310 assert(mbmi->interp_filter == cm->interp_filter);
313 if (bsize < BLOCK_8X8) {
314 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
315 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
316 int idx, idy;
317 for (idy = 0; idy < 2; idy += num_4x4_h) {
318 for (idx = 0; idx < 2; idx += num_4x4_w) {
319 const int j = idy * 2 + idx;
320 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
321 write_inter_mode(w, b_mode, inter_probs);
322 if (b_mode == NEWMV) {
323 for (ref = 0; ref < 1 + is_compound; ++ref)
324 vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
325 &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
326 nmvc, allow_hp);
330 } else {
331 if (mode == NEWMV) {
332 for (ref = 0; ref < 1 + is_compound; ++ref)
333 vp9_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
334 &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc,
335 allow_hp);
341 static void write_mb_modes_kf(const VP9_COMMON *cm, const MACROBLOCKD *xd,
342 MODE_INFO **mi_8x8, vp9_writer *w) {
343 const struct segmentation *const seg = &cm->seg;
344 const MODE_INFO *const mi = mi_8x8[0];
345 const MODE_INFO *const above_mi = xd->above_mi;
346 const MODE_INFO *const left_mi = xd->left_mi;
347 const MB_MODE_INFO *const mbmi = &mi->mbmi;
348 const BLOCK_SIZE bsize = mbmi->sb_type;
350 if (seg->update_map)
351 write_segment_id(w, seg, mbmi->segment_id);
353 write_skip(cm, xd, mbmi->segment_id, mi, w);
355 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT)
356 write_selected_tx_size(cm, xd, w);
358 if (bsize >= BLOCK_8X8) {
359 write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0));
360 } else {
361 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
362 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
363 int idx, idy;
365 for (idy = 0; idy < 2; idy += num_4x4_h) {
366 for (idx = 0; idx < 2; idx += num_4x4_w) {
367 const int block = idy * 2 + idx;
368 write_intra_mode(w, mi->bmi[block].as_mode,
369 get_y_mode_probs(mi, above_mi, left_mi, block));
374 write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]);
377 static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile,
378 vp9_writer *w, TOKENEXTRA **tok,
379 const TOKENEXTRA *const tok_end,
380 int mi_row, int mi_col) {
381 const VP9_COMMON *const cm = &cpi->common;
382 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
383 MODE_INFO *m;
385 xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
386 m = xd->mi[0];
388 cpi->td.mb.mbmi_ext = cpi->td.mb.mbmi_ext_base +
389 (mi_row * cm->mi_cols + mi_col);
391 set_mi_row_col(xd, tile,
392 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
393 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type],
394 cm->mi_rows, cm->mi_cols);
395 if (frame_is_intra_only(cm)) {
396 write_mb_modes_kf(cm, xd, xd->mi, w);
397 } else {
398 pack_inter_mode_mvs(cpi, m, w);
401 assert(*tok < tok_end);
402 pack_mb_tokens(w, tok, tok_end, cm->bit_depth);
405 static void write_partition(const VP9_COMMON *const cm,
406 const MACROBLOCKD *const xd,
407 int hbs, int mi_row, int mi_col,
408 PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) {
409 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
410 const vp9_prob *const probs = xd->partition_probs[ctx];
411 const int has_rows = (mi_row + hbs) < cm->mi_rows;
412 const int has_cols = (mi_col + hbs) < cm->mi_cols;
414 if (has_rows && has_cols) {
415 vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]);
416 } else if (!has_rows && has_cols) {
417 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
418 vp9_write(w, p == PARTITION_SPLIT, probs[1]);
419 } else if (has_rows && !has_cols) {
420 assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
421 vp9_write(w, p == PARTITION_SPLIT, probs[2]);
422 } else {
423 assert(p == PARTITION_SPLIT);
427 static void write_modes_sb(VP9_COMP *cpi,
428 const TileInfo *const tile, vp9_writer *w,
429 TOKENEXTRA **tok, const TOKENEXTRA *const tok_end,
430 int mi_row, int mi_col, BLOCK_SIZE bsize) {
431 const VP9_COMMON *const cm = &cpi->common;
432 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
434 const int bsl = b_width_log2_lookup[bsize];
435 const int bs = (1 << bsl) / 4;
436 PARTITION_TYPE partition;
437 BLOCK_SIZE subsize;
438 const MODE_INFO *m = NULL;
440 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
441 return;
443 m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col];
445 partition = partition_lookup[bsl][m->mbmi.sb_type];
446 write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w);
447 subsize = get_subsize(bsize, partition);
448 if (subsize < BLOCK_8X8) {
449 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
450 } else {
451 switch (partition) {
452 case PARTITION_NONE:
453 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
454 break;
455 case PARTITION_HORZ:
456 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
457 if (mi_row + bs < cm->mi_rows)
458 write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col);
459 break;
460 case PARTITION_VERT:
461 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
462 if (mi_col + bs < cm->mi_cols)
463 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs);
464 break;
465 case PARTITION_SPLIT:
466 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize);
467 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs,
468 subsize);
469 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col,
470 subsize);
471 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs,
472 subsize);
473 break;
474 default:
475 assert(0);
479 // update partition context
480 if (bsize >= BLOCK_8X8 &&
481 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
482 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
485 static void write_modes(VP9_COMP *cpi,
486 const TileInfo *const tile, vp9_writer *w,
487 TOKENEXTRA **tok, const TOKENEXTRA *const tok_end) {
488 const VP9_COMMON *const cm = &cpi->common;
489 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
490 int mi_row, mi_col;
492 set_partition_probs(cm, xd);
494 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
495 mi_row += MI_BLOCK_SIZE) {
496 vp9_zero(xd->left_seg_context);
497 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
498 mi_col += MI_BLOCK_SIZE)
499 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col,
500 BLOCK_64X64);
504 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size,
505 vp9_coeff_stats *coef_branch_ct,
506 vp9_coeff_probs_model *coef_probs) {
507 vp9_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
508 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
509 cpi->common.counts.eob_branch[tx_size];
510 int i, j, k, l, m;
512 for (i = 0; i < PLANE_TYPES; ++i) {
513 for (j = 0; j < REF_TYPES; ++j) {
514 for (k = 0; k < COEF_BANDS; ++k) {
515 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
516 vp9_tree_probs_from_distribution(vp9_coef_tree,
517 coef_branch_ct[i][j][k][l],
518 coef_counts[i][j][k][l]);
519 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
520 coef_branch_ct[i][j][k][l][0][0];
521 for (m = 0; m < UNCONSTRAINED_NODES; ++m)
522 coef_probs[i][j][k][l][m] = get_binary_prob(
523 coef_branch_ct[i][j][k][l][m][0],
524 coef_branch_ct[i][j][k][l][m][1]);
531 static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi,
532 TX_SIZE tx_size,
533 vp9_coeff_stats *frame_branch_ct,
534 vp9_coeff_probs_model *new_coef_probs) {
535 vp9_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
536 const vp9_prob upd = DIFF_UPDATE_PROB;
537 const int entropy_nodes_update = UNCONSTRAINED_NODES;
538 int i, j, k, l, t;
539 int stepsize = cpi->sf.coeff_prob_appx_step;
541 switch (cpi->sf.use_fast_coef_updates) {
542 case TWO_LOOP: {
543 /* dry run to see if there is any update at all needed */
544 int savings = 0;
545 int update[2] = {0, 0};
546 for (i = 0; i < PLANE_TYPES; ++i) {
547 for (j = 0; j < REF_TYPES; ++j) {
548 for (k = 0; k < COEF_BANDS; ++k) {
549 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
550 for (t = 0; t < entropy_nodes_update; ++t) {
551 vp9_prob newp = new_coef_probs[i][j][k][l][t];
552 const vp9_prob oldp = old_coef_probs[i][j][k][l][t];
553 int s;
554 int u = 0;
555 if (t == PIVOT_NODE)
556 s = vp9_prob_diff_update_savings_search_model(
557 frame_branch_ct[i][j][k][l][0],
558 old_coef_probs[i][j][k][l], &newp, upd, stepsize);
559 else
560 s = vp9_prob_diff_update_savings_search(
561 frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
562 if (s > 0 && newp != oldp)
563 u = 1;
564 if (u)
565 savings += s - (int)(vp9_cost_zero(upd));
566 else
567 savings -= (int)(vp9_cost_zero(upd));
568 update[u]++;
575 // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
576 /* Is coef updated at all */
577 if (update[1] == 0 || savings < 0) {
578 vp9_write_bit(bc, 0);
579 return;
581 vp9_write_bit(bc, 1);
582 for (i = 0; i < PLANE_TYPES; ++i) {
583 for (j = 0; j < REF_TYPES; ++j) {
584 for (k = 0; k < COEF_BANDS; ++k) {
585 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
586 // calc probs and branch cts for this frame only
587 for (t = 0; t < entropy_nodes_update; ++t) {
588 vp9_prob newp = new_coef_probs[i][j][k][l][t];
589 vp9_prob *oldp = old_coef_probs[i][j][k][l] + t;
590 const vp9_prob upd = DIFF_UPDATE_PROB;
591 int s;
592 int u = 0;
593 if (t == PIVOT_NODE)
594 s = vp9_prob_diff_update_savings_search_model(
595 frame_branch_ct[i][j][k][l][0],
596 old_coef_probs[i][j][k][l], &newp, upd, stepsize);
597 else
598 s = vp9_prob_diff_update_savings_search(
599 frame_branch_ct[i][j][k][l][t],
600 *oldp, &newp, upd);
601 if (s > 0 && newp != *oldp)
602 u = 1;
603 vp9_write(bc, u, upd);
604 if (u) {
605 /* send/use new probability */
606 vp9_write_prob_diff_update(bc, newp, *oldp);
607 *oldp = newp;
614 return;
617 case ONE_LOOP_REDUCED: {
618 int updates = 0;
619 int noupdates_before_first = 0;
620 for (i = 0; i < PLANE_TYPES; ++i) {
621 for (j = 0; j < REF_TYPES; ++j) {
622 for (k = 0; k < COEF_BANDS; ++k) {
623 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
624 // calc probs and branch cts for this frame only
625 for (t = 0; t < entropy_nodes_update; ++t) {
626 vp9_prob newp = new_coef_probs[i][j][k][l][t];
627 vp9_prob *oldp = old_coef_probs[i][j][k][l] + t;
628 int s;
629 int u = 0;
631 if (t == PIVOT_NODE) {
632 s = vp9_prob_diff_update_savings_search_model(
633 frame_branch_ct[i][j][k][l][0],
634 old_coef_probs[i][j][k][l], &newp, upd, stepsize);
635 } else {
636 s = vp9_prob_diff_update_savings_search(
637 frame_branch_ct[i][j][k][l][t],
638 *oldp, &newp, upd);
641 if (s > 0 && newp != *oldp)
642 u = 1;
643 updates += u;
644 if (u == 0 && updates == 0) {
645 noupdates_before_first++;
646 continue;
648 if (u == 1 && updates == 1) {
649 int v;
650 // first update
651 vp9_write_bit(bc, 1);
652 for (v = 0; v < noupdates_before_first; ++v)
653 vp9_write(bc, 0, upd);
655 vp9_write(bc, u, upd);
656 if (u) {
657 /* send/use new probability */
658 vp9_write_prob_diff_update(bc, newp, *oldp);
659 *oldp = newp;
666 if (updates == 0) {
667 vp9_write_bit(bc, 0); // no updates
669 return;
671 default:
672 assert(0);
676 static void update_coef_probs(VP9_COMP *cpi, vp9_writer* w) {
677 const TX_MODE tx_mode = cpi->common.tx_mode;
678 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
679 TX_SIZE tx_size;
680 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) {
681 vp9_coeff_stats frame_branch_ct[PLANE_TYPES];
682 vp9_coeff_probs_model frame_coef_probs[PLANE_TYPES];
683 if (cpi->td.counts->tx.tx_totals[tx_size] <= 20 ||
684 (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
685 vp9_write_bit(w, 0);
686 } else {
687 build_tree_distribution(cpi, tx_size, frame_branch_ct,
688 frame_coef_probs);
689 update_coef_probs_common(w, cpi, tx_size, frame_branch_ct,
690 frame_coef_probs);
695 static void encode_loopfilter(struct loopfilter *lf,
696 struct vp9_write_bit_buffer *wb) {
697 int i;
699 // Encode the loop filter level and type
700 vp9_wb_write_literal(wb, lf->filter_level, 6);
701 vp9_wb_write_literal(wb, lf->sharpness_level, 3);
703 // Write out loop filter deltas applied at the MB level based on mode or
704 // ref frame (if they are enabled).
705 vp9_wb_write_bit(wb, lf->mode_ref_delta_enabled);
707 if (lf->mode_ref_delta_enabled) {
708 vp9_wb_write_bit(wb, lf->mode_ref_delta_update);
709 if (lf->mode_ref_delta_update) {
710 for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
711 const int delta = lf->ref_deltas[i];
712 const int changed = delta != lf->last_ref_deltas[i];
713 vp9_wb_write_bit(wb, changed);
714 if (changed) {
715 lf->last_ref_deltas[i] = delta;
716 vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6);
717 vp9_wb_write_bit(wb, delta < 0);
721 for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
722 const int delta = lf->mode_deltas[i];
723 const int changed = delta != lf->last_mode_deltas[i];
724 vp9_wb_write_bit(wb, changed);
725 if (changed) {
726 lf->last_mode_deltas[i] = delta;
727 vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6);
728 vp9_wb_write_bit(wb, delta < 0);
735 static void write_delta_q(struct vp9_write_bit_buffer *wb, int delta_q) {
736 if (delta_q != 0) {
737 vp9_wb_write_bit(wb, 1);
738 vp9_wb_write_literal(wb, abs(delta_q), 4);
739 vp9_wb_write_bit(wb, delta_q < 0);
740 } else {
741 vp9_wb_write_bit(wb, 0);
745 static void encode_quantization(const VP9_COMMON *const cm,
746 struct vp9_write_bit_buffer *wb) {
747 vp9_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
748 write_delta_q(wb, cm->y_dc_delta_q);
749 write_delta_q(wb, cm->uv_dc_delta_q);
750 write_delta_q(wb, cm->uv_ac_delta_q);
753 static void encode_segmentation(VP9_COMMON *cm, MACROBLOCKD *xd,
754 struct vp9_write_bit_buffer *wb) {
755 int i, j;
757 const struct segmentation *seg = &cm->seg;
759 vp9_wb_write_bit(wb, seg->enabled);
760 if (!seg->enabled)
761 return;
763 // Segmentation map
764 vp9_wb_write_bit(wb, seg->update_map);
765 if (seg->update_map) {
766 // Select the coding strategy (temporal or spatial)
767 vp9_choose_segmap_coding_method(cm, xd);
768 // Write out probabilities used to decode unpredicted macro-block segments
769 for (i = 0; i < SEG_TREE_PROBS; i++) {
770 const int prob = seg->tree_probs[i];
771 const int update = prob != MAX_PROB;
772 vp9_wb_write_bit(wb, update);
773 if (update)
774 vp9_wb_write_literal(wb, prob, 8);
777 // Write out the chosen coding method.
778 vp9_wb_write_bit(wb, seg->temporal_update);
779 if (seg->temporal_update) {
780 for (i = 0; i < PREDICTION_PROBS; i++) {
781 const int prob = seg->pred_probs[i];
782 const int update = prob != MAX_PROB;
783 vp9_wb_write_bit(wb, update);
784 if (update)
785 vp9_wb_write_literal(wb, prob, 8);
790 // Segmentation data
791 vp9_wb_write_bit(wb, seg->update_data);
792 if (seg->update_data) {
793 vp9_wb_write_bit(wb, seg->abs_delta);
795 for (i = 0; i < MAX_SEGMENTS; i++) {
796 for (j = 0; j < SEG_LVL_MAX; j++) {
797 const int active = segfeature_active(seg, i, j);
798 vp9_wb_write_bit(wb, active);
799 if (active) {
800 const int data = get_segdata(seg, i, j);
801 const int data_max = vp9_seg_feature_data_max(j);
803 if (vp9_is_segfeature_signed(j)) {
804 encode_unsigned_max(wb, abs(data), data_max);
805 vp9_wb_write_bit(wb, data < 0);
806 } else {
807 encode_unsigned_max(wb, data, data_max);
815 static void encode_txfm_probs(VP9_COMMON *cm, vp9_writer *w,
816 FRAME_COUNTS *counts) {
817 // Mode
818 vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2);
819 if (cm->tx_mode >= ALLOW_32X32)
820 vp9_write_bit(w, cm->tx_mode == TX_MODE_SELECT);
822 // Probabilities
823 if (cm->tx_mode == TX_MODE_SELECT) {
824 int i, j;
825 unsigned int ct_8x8p[TX_SIZES - 3][2];
826 unsigned int ct_16x16p[TX_SIZES - 2][2];
827 unsigned int ct_32x32p[TX_SIZES - 1][2];
830 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
831 tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p);
832 for (j = 0; j < TX_SIZES - 3; j++)
833 vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p8x8[i][j], ct_8x8p[j]);
836 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
837 tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], ct_16x16p);
838 for (j = 0; j < TX_SIZES - 2; j++)
839 vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j],
840 ct_16x16p[j]);
843 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
844 tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], ct_32x32p);
845 for (j = 0; j < TX_SIZES - 1; j++)
846 vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j],
847 ct_32x32p[j]);
852 static void write_interp_filter(INTERP_FILTER filter,
853 struct vp9_write_bit_buffer *wb) {
854 const int filter_to_literal[] = { 1, 0, 2, 3 };
856 vp9_wb_write_bit(wb, filter == SWITCHABLE);
857 if (filter != SWITCHABLE)
858 vp9_wb_write_literal(wb, filter_to_literal[filter], 2);
861 static void fix_interp_filter(VP9_COMMON *cm, FRAME_COUNTS *counts) {
862 if (cm->interp_filter == SWITCHABLE) {
863 // Check to see if only one of the filters is actually used
864 int count[SWITCHABLE_FILTERS];
865 int i, j, c = 0;
866 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
867 count[i] = 0;
868 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
869 count[i] += counts->switchable_interp[j][i];
870 c += (count[i] > 0);
872 if (c == 1) {
873 // Only one filter is used. So set the filter at frame level
874 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
875 if (count[i]) {
876 cm->interp_filter = i;
877 break;
884 static void write_tile_info(const VP9_COMMON *const cm,
885 struct vp9_write_bit_buffer *wb) {
886 int min_log2_tile_cols, max_log2_tile_cols, ones;
887 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
889 // columns
890 ones = cm->log2_tile_cols - min_log2_tile_cols;
891 while (ones--)
892 vp9_wb_write_bit(wb, 1);
894 if (cm->log2_tile_cols < max_log2_tile_cols)
895 vp9_wb_write_bit(wb, 0);
897 // rows
898 vp9_wb_write_bit(wb, cm->log2_tile_rows != 0);
899 if (cm->log2_tile_rows != 0)
900 vp9_wb_write_bit(wb, cm->log2_tile_rows != 1);
903 static int get_refresh_mask(VP9_COMP *cpi) {
904 if (vp9_preserve_existing_gf(cpi)) {
905 // We have decided to preserve the previously existing golden frame as our
906 // new ARF frame. However, in the short term we leave it in the GF slot and,
907 // if we're updating the GF with the current decoded frame, we save it
908 // instead to the ARF slot.
909 // Later, in the function vp9_encoder.c:vp9_update_reference_frames() we
910 // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
911 // there so that it can be done outside of the recode loop.
912 // Note: This is highly specific to the use of ARF as a forward reference,
913 // and this needs to be generalized as other uses are implemented
914 // (like RTC/temporal scalability).
915 return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
916 (cpi->refresh_golden_frame << cpi->alt_fb_idx);
917 } else {
918 int arf_idx = cpi->alt_fb_idx;
919 if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
920 const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
921 arf_idx = gf_group->arf_update_idx[gf_group->index];
923 return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
924 (cpi->refresh_golden_frame << cpi->gld_fb_idx) |
925 (cpi->refresh_alt_ref_frame << arf_idx);
929 static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) {
930 VP9_COMMON *const cm = &cpi->common;
931 vp9_writer residual_bc;
932 int tile_row, tile_col;
933 TOKENEXTRA *tok_end;
934 size_t total_size = 0;
935 const int tile_cols = 1 << cm->log2_tile_cols;
936 const int tile_rows = 1 << cm->log2_tile_rows;
938 memset(cm->above_seg_context, 0,
939 sizeof(*cm->above_seg_context) * mi_cols_aligned_to_sb(cm->mi_cols));
941 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
942 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
943 int tile_idx = tile_row * tile_cols + tile_col;
944 TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
946 tok_end = cpi->tile_tok[tile_row][tile_col] +
947 cpi->tok_count[tile_row][tile_col];
949 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
950 vp9_start_encode(&residual_bc, data_ptr + total_size + 4);
951 else
952 vp9_start_encode(&residual_bc, data_ptr + total_size);
954 write_modes(cpi, &cpi->tile_data[tile_idx].tile_info,
955 &residual_bc, &tok, tok_end);
956 assert(tok == tok_end);
957 vp9_stop_encode(&residual_bc);
958 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
959 // size of this tile
960 mem_put_be32(data_ptr + total_size, residual_bc.pos);
961 total_size += 4;
964 total_size += residual_bc.pos;
968 return total_size;
971 static void write_display_size(const VP9_COMMON *cm,
972 struct vp9_write_bit_buffer *wb) {
973 const int scaling_active = cm->width != cm->display_width ||
974 cm->height != cm->display_height;
975 vp9_wb_write_bit(wb, scaling_active);
976 if (scaling_active) {
977 vp9_wb_write_literal(wb, cm->display_width - 1, 16);
978 vp9_wb_write_literal(wb, cm->display_height - 1, 16);
982 static void write_frame_size(const VP9_COMMON *cm,
983 struct vp9_write_bit_buffer *wb) {
984 vp9_wb_write_literal(wb, cm->width - 1, 16);
985 vp9_wb_write_literal(wb, cm->height - 1, 16);
987 write_display_size(cm, wb);
990 static void write_frame_size_with_refs(VP9_COMP *cpi,
991 struct vp9_write_bit_buffer *wb) {
992 VP9_COMMON *const cm = &cpi->common;
993 int found = 0;
995 MV_REFERENCE_FRAME ref_frame;
996 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
997 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame);
999 // Set "found" to 0 for temporal svc and for spatial svc key frame
1000 if (cpi->use_svc &&
1001 ((cpi->svc.number_temporal_layers > 1 &&
1002 cpi->oxcf.rc_mode == VPX_CBR) ||
1003 (cpi->svc.number_spatial_layers > 1 &&
1004 cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame) ||
1005 (is_two_pass_svc(cpi) &&
1006 cpi->svc.encode_empty_frame_state == ENCODING &&
1007 cpi->svc.layer_context[0].frames_from_key_frame <
1008 cpi->svc.number_temporal_layers + 1))) {
1009 found = 0;
1010 } else if (cfg != NULL) {
1011 found = cm->width == cfg->y_crop_width &&
1012 cm->height == cfg->y_crop_height;
1014 vp9_wb_write_bit(wb, found);
1015 if (found) {
1016 break;
1020 if (!found) {
1021 vp9_wb_write_literal(wb, cm->width - 1, 16);
1022 vp9_wb_write_literal(wb, cm->height - 1, 16);
1025 write_display_size(cm, wb);
1028 static void write_sync_code(struct vp9_write_bit_buffer *wb) {
1029 vp9_wb_write_literal(wb, VP9_SYNC_CODE_0, 8);
1030 vp9_wb_write_literal(wb, VP9_SYNC_CODE_1, 8);
1031 vp9_wb_write_literal(wb, VP9_SYNC_CODE_2, 8);
1034 static void write_profile(BITSTREAM_PROFILE profile,
1035 struct vp9_write_bit_buffer *wb) {
1036 switch (profile) {
1037 case PROFILE_0:
1038 vp9_wb_write_literal(wb, 0, 2);
1039 break;
1040 case PROFILE_1:
1041 vp9_wb_write_literal(wb, 2, 2);
1042 break;
1043 case PROFILE_2:
1044 vp9_wb_write_literal(wb, 1, 2);
1045 break;
1046 case PROFILE_3:
1047 vp9_wb_write_literal(wb, 6, 3);
1048 break;
1049 default:
1050 assert(0);
1054 static void write_bitdepth_colorspace_sampling(
1055 VP9_COMMON *const cm, struct vp9_write_bit_buffer *wb) {
1056 if (cm->profile >= PROFILE_2) {
1057 assert(cm->bit_depth > VPX_BITS_8);
1058 vp9_wb_write_bit(wb, cm->bit_depth == VPX_BITS_10 ? 0 : 1);
1060 vp9_wb_write_literal(wb, cm->color_space, 3);
1061 if (cm->color_space != VPX_CS_SRGB) {
1062 vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
1063 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1064 assert(cm->subsampling_x != 1 || cm->subsampling_y != 1);
1065 vp9_wb_write_bit(wb, cm->subsampling_x);
1066 vp9_wb_write_bit(wb, cm->subsampling_y);
1067 vp9_wb_write_bit(wb, 0); // unused
1068 } else {
1069 assert(cm->subsampling_x == 1 && cm->subsampling_y == 1);
1071 } else {
1072 assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3);
1073 vp9_wb_write_bit(wb, 0); // unused
1077 static void write_uncompressed_header(VP9_COMP *cpi,
1078 struct vp9_write_bit_buffer *wb) {
1079 VP9_COMMON *const cm = &cpi->common;
1080 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
1082 vp9_wb_write_literal(wb, VP9_FRAME_MARKER, 2);
1084 write_profile(cm->profile, wb);
1086 vp9_wb_write_bit(wb, 0); // show_existing_frame
1087 vp9_wb_write_bit(wb, cm->frame_type);
1088 vp9_wb_write_bit(wb, cm->show_frame);
1089 vp9_wb_write_bit(wb, cm->error_resilient_mode);
1091 if (cm->frame_type == KEY_FRAME) {
1092 write_sync_code(wb);
1093 write_bitdepth_colorspace_sampling(cm, wb);
1094 write_frame_size(cm, wb);
1095 } else {
1096 // In spatial svc if it's not error_resilient_mode then we need to code all
1097 // visible frames as invisible. But we need to keep the show_frame flag so
1098 // that the publisher could know whether it is supposed to be visible.
1099 // So we will code the show_frame flag as it is. Then code the intra_only
1100 // bit here. This will make the bitstream incompatible. In the player we
1101 // will change to show_frame flag to 0, then add an one byte frame with
1102 // show_existing_frame flag which tells the decoder which frame we want to
1103 // show.
1104 if (!cm->show_frame)
1105 vp9_wb_write_bit(wb, cm->intra_only);
1107 if (!cm->error_resilient_mode)
1108 vp9_wb_write_literal(wb, cm->reset_frame_context, 2);
1110 if (cm->intra_only) {
1111 write_sync_code(wb);
1113 // Note for profile 0, 420 8bpp is assumed.
1114 if (cm->profile > PROFILE_0) {
1115 write_bitdepth_colorspace_sampling(cm, wb);
1118 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
1119 write_frame_size(cm, wb);
1120 } else {
1121 MV_REFERENCE_FRAME ref_frame;
1122 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
1123 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
1124 assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX);
1125 vp9_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
1126 REF_FRAMES_LOG2);
1127 vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
1130 write_frame_size_with_refs(cpi, wb);
1132 vp9_wb_write_bit(wb, cm->allow_high_precision_mv);
1134 fix_interp_filter(cm, cpi->td.counts);
1135 write_interp_filter(cm->interp_filter, wb);
1139 if (!cm->error_resilient_mode) {
1140 vp9_wb_write_bit(wb, cm->refresh_frame_context);
1141 vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode);
1144 vp9_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
1146 encode_loopfilter(&cm->lf, wb);
1147 encode_quantization(cm, wb);
1148 encode_segmentation(cm, xd, wb);
1150 write_tile_info(cm, wb);
1153 static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) {
1154 VP9_COMMON *const cm = &cpi->common;
1155 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
1156 FRAME_CONTEXT *const fc = cm->fc;
1157 FRAME_COUNTS *counts = cpi->td.counts;
1158 vp9_writer header_bc;
1160 vp9_start_encode(&header_bc, data);
1162 if (xd->lossless)
1163 cm->tx_mode = ONLY_4X4;
1164 else
1165 encode_txfm_probs(cm, &header_bc, counts);
1167 update_coef_probs(cpi, &header_bc);
1168 update_skip_probs(cm, &header_bc, counts);
1170 if (!frame_is_intra_only(cm)) {
1171 int i;
1173 for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
1174 prob_diff_update(vp9_inter_mode_tree, cm->fc->inter_mode_probs[i],
1175 counts->inter_mode[i], INTER_MODES, &header_bc);
1177 if (cm->interp_filter == SWITCHABLE)
1178 update_switchable_interp_probs(cm, &header_bc, counts);
1180 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
1181 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
1182 counts->intra_inter[i]);
1184 if (cpi->allow_comp_inter_inter) {
1185 const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
1186 const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
1188 vp9_write_bit(&header_bc, use_compound_pred);
1189 if (use_compound_pred) {
1190 vp9_write_bit(&header_bc, use_hybrid_pred);
1191 if (use_hybrid_pred)
1192 for (i = 0; i < COMP_INTER_CONTEXTS; i++)
1193 vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
1194 counts->comp_inter[i]);
1198 if (cm->reference_mode != COMPOUND_REFERENCE) {
1199 for (i = 0; i < REF_CONTEXTS; i++) {
1200 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
1201 counts->single_ref[i][0]);
1202 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
1203 counts->single_ref[i][1]);
1207 if (cm->reference_mode != SINGLE_REFERENCE)
1208 for (i = 0; i < REF_CONTEXTS; i++)
1209 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
1210 counts->comp_ref[i]);
1212 for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
1213 prob_diff_update(vp9_intra_mode_tree, cm->fc->y_mode_prob[i],
1214 counts->y_mode[i], INTRA_MODES, &header_bc);
1216 for (i = 0; i < PARTITION_CONTEXTS; ++i)
1217 prob_diff_update(vp9_partition_tree, fc->partition_prob[i],
1218 counts->partition[i], PARTITION_TYPES, &header_bc);
1220 vp9_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc,
1221 &counts->mv);
1224 vp9_stop_encode(&header_bc);
1225 assert(header_bc.pos <= 0xffff);
1227 return header_bc.pos;
1230 void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) {
1231 uint8_t *data = dest;
1232 size_t first_part_size, uncompressed_hdr_size;
1233 struct vp9_write_bit_buffer wb = {data, 0};
1234 struct vp9_write_bit_buffer saved_wb;
1236 write_uncompressed_header(cpi, &wb);
1237 saved_wb = wb;
1238 vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size
1240 uncompressed_hdr_size = vp9_wb_bytes_written(&wb);
1241 data += uncompressed_hdr_size;
1243 vp9_clear_system_state();
1245 first_part_size = write_compressed_header(cpi, data);
1246 data += first_part_size;
1247 // TODO(jbb): Figure out what to do if first_part_size > 16 bits.
1248 vp9_wb_write_literal(&saved_wb, (int)first_part_size, 16);
1250 data += encode_tiles(cpi, data);
1252 *size = data - dest;