2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "vpx/vpx_encoder.h"
16 #include "vpx_dsp/bitwriter_buffer.h"
17 #include "vpx_dsp/vpx_dsp_common.h"
18 #include "vpx_mem/vpx_mem.h"
19 #include "vpx_ports/mem_ops.h"
20 #include "vpx_ports/system_state.h"
22 #include "vp10/common/entropy.h"
23 #include "vp10/common/entropymode.h"
24 #include "vp10/common/entropymv.h"
25 #include "vp10/common/mvref_common.h"
26 #include "vp10/common/pred_common.h"
27 #include "vp10/common/seg_common.h"
28 #include "vp10/common/tile_common.h"
30 #include "vp10/encoder/cost.h"
31 #include "vp10/encoder/bitstream.h"
32 #include "vp10/encoder/encodemv.h"
33 #include "vp10/encoder/mcomp.h"
34 #include "vp10/encoder/segmentation.h"
35 #include "vp10/encoder/subexp.h"
36 #include "vp10/encoder/tokenize.h"
38 static const struct vp10_token intra_mode_encodings
[INTRA_MODES
] = {
39 {0, 1}, {6, 3}, {28, 5}, {30, 5}, {58, 6}, {59, 6}, {126, 7}, {127, 7},
41 static const struct vp10_token switchable_interp_encodings
[SWITCHABLE_FILTERS
] =
42 {{0, 1}, {2, 2}, {3, 2}};
43 static const struct vp10_token partition_encodings
[PARTITION_TYPES
] =
44 {{0, 1}, {2, 2}, {6, 3}, {7, 3}};
45 static const struct vp10_token inter_mode_encodings
[INTER_MODES
] =
46 {{2, 2}, {6, 3}, {0, 1}, {7, 3}};
48 static INLINE
void write_uniform(vpx_writer
*w
, int n
, int v
) {
49 int l
= get_unsigned_bits(n
);
54 vpx_write_literal(w
, v
, l
- 1);
56 vpx_write_literal(w
, m
+ ((v
- m
) >> 1), l
- 1);
57 vpx_write_literal(w
, (v
- m
) & 1, 1);
61 static void write_intra_mode(vpx_writer
*w
, PREDICTION_MODE mode
,
62 const vpx_prob
*probs
) {
63 vp10_write_token(w
, vp10_intra_mode_tree
, probs
, &intra_mode_encodings
[mode
]);
66 static void write_inter_mode(vpx_writer
*w
, PREDICTION_MODE mode
,
67 const vpx_prob
*probs
) {
68 assert(is_inter_mode(mode
));
69 vp10_write_token(w
, vp10_inter_mode_tree
, probs
,
70 &inter_mode_encodings
[INTER_OFFSET(mode
)]);
73 static void encode_unsigned_max(struct vpx_write_bit_buffer
*wb
,
75 vpx_wb_write_literal(wb
, data
, get_unsigned_bits(max
));
78 static void prob_diff_update(const vpx_tree_index
*tree
,
79 vpx_prob probs
[/*n - 1*/],
80 const unsigned int counts
[/*n - 1*/],
81 int n
, vpx_writer
*w
) {
83 unsigned int branch_ct
[32][2];
85 // Assuming max number of probabilities <= 32
88 vp10_tree_probs_from_distribution(tree
, branch_ct
, counts
);
89 for (i
= 0; i
< n
- 1; ++i
)
90 vp10_cond_prob_diff_update(w
, &probs
[i
], branch_ct
[i
]);
93 static void write_selected_tx_size(const VP10_COMMON
*cm
,
94 const MACROBLOCKD
*xd
, vpx_writer
*w
) {
95 TX_SIZE tx_size
= xd
->mi
[0]->mbmi
.tx_size
;
96 BLOCK_SIZE bsize
= xd
->mi
[0]->mbmi
.sb_type
;
97 const TX_SIZE max_tx_size
= max_txsize_lookup
[bsize
];
98 const vpx_prob
*const tx_probs
= get_tx_probs2(max_tx_size
, xd
,
100 vpx_write(w
, tx_size
!= TX_4X4
, tx_probs
[0]);
101 if (tx_size
!= TX_4X4
&& max_tx_size
>= TX_16X16
) {
102 vpx_write(w
, tx_size
!= TX_8X8
, tx_probs
[1]);
103 if (tx_size
!= TX_8X8
&& max_tx_size
>= TX_32X32
)
104 vpx_write(w
, tx_size
!= TX_16X16
, tx_probs
[2]);
108 static int write_skip(const VP10_COMMON
*cm
, const MACROBLOCKD
*xd
,
109 int segment_id
, const MODE_INFO
*mi
, vpx_writer
*w
) {
110 if (segfeature_active(&cm
->seg
, segment_id
, SEG_LVL_SKIP
)) {
113 const int skip
= mi
->mbmi
.skip
;
114 vpx_write(w
, skip
, vp10_get_skip_prob(cm
, xd
));
119 static void update_skip_probs(VP10_COMMON
*cm
, vpx_writer
*w
,
120 FRAME_COUNTS
*counts
) {
123 for (k
= 0; k
< SKIP_CONTEXTS
; ++k
)
124 vp10_cond_prob_diff_update(w
, &cm
->fc
->skip_probs
[k
], counts
->skip
[k
]);
127 static void update_switchable_interp_probs(VP10_COMMON
*cm
, vpx_writer
*w
,
128 FRAME_COUNTS
*counts
) {
130 for (j
= 0; j
< SWITCHABLE_FILTER_CONTEXTS
; ++j
)
131 prob_diff_update(vp10_switchable_interp_tree
,
132 cm
->fc
->switchable_interp_prob
[j
],
133 counts
->switchable_interp
[j
], SWITCHABLE_FILTERS
, w
);
136 static void pack_mb_tokens(vpx_writer
*w
,
137 TOKENEXTRA
**tp
, const TOKENEXTRA
*const stop
,
138 vpx_bit_depth_t bit_depth
, const TX_SIZE tx
) {
140 #if !CONFIG_MISC_FIXES
144 while (p
< stop
&& p
->token
!= EOSB_TOKEN
) {
145 const int t
= p
->token
;
146 const struct vp10_token
*const a
= &vp10_coef_encodings
[t
];
150 #if CONFIG_VP9_HIGHBITDEPTH
151 const vp10_extra_bit
*b
;
152 if (bit_depth
== VPX_BITS_12
)
153 b
= &vp10_extra_bits_high12
[t
];
154 else if (bit_depth
== VPX_BITS_10
)
155 b
= &vp10_extra_bits_high10
[t
];
157 b
= &vp10_extra_bits
[t
];
159 const vp10_extra_bit
*const b
= &vp10_extra_bits
[t
];
161 #endif // CONFIG_VP9_HIGHBITDEPTH
163 /* skip one or two nodes */
164 if (p
->skip_eob_node
) {
165 n
-= p
->skip_eob_node
;
166 i
= 2 * p
->skip_eob_node
;
169 // TODO(jbb): expanding this can lead to big gains. It allows
170 // much better branch prediction and would enable us to avoid numerous
171 // lookups and compares.
173 // If we have a token that's in the constrained set, the coefficient tree
174 // is split into two treed writes. The first treed write takes care of the
175 // unconstrained nodes. The second treed write takes care of the
176 // constrained nodes.
177 if (t
>= TWO_TOKEN
&& t
< EOB_TOKEN
) {
178 int len
= UNCONSTRAINED_NODES
- p
->skip_eob_node
;
179 int bits
= v
>> (n
- len
);
180 vp10_write_tree(w
, vp10_coef_tree
, p
->context_tree
, bits
, len
, i
);
181 vp10_write_tree(w
, vp10_coef_con_tree
,
182 vp10_pareto8_full
[p
->context_tree
[PIVOT_NODE
] - 1],
185 vp10_write_tree(w
, vp10_coef_tree
, p
->context_tree
, v
, n
, i
);
189 const int e
= p
->extra
, l
= b
->len
;
190 #if CONFIG_MISC_FIXES
192 (b
->base_val
== CAT6_MIN_VAL
) ? TX_SIZES
- 1 - tx
: 0;
198 const unsigned char *pb
= b
->prob
;
200 int n
= l
; /* number of bits in v, assumed nonzero */
204 const int bb
= (v
>> --n
) & 1;
209 vpx_write(w
, bb
, pb
[i
>> 1]);
215 vpx_write_bit(w
, e
& 1);
223 static void write_segment_id(vpx_writer
*w
, const struct segmentation
*seg
,
224 const struct segmentation_probs
*segp
,
226 if (seg
->enabled
&& seg
->update_map
)
227 vp10_write_tree(w
, vp10_segment_tree
, segp
->tree_probs
, segment_id
, 3, 0);
230 // This function encodes the reference frame
231 static void write_ref_frames(const VP10_COMMON
*cm
, const MACROBLOCKD
*xd
,
233 const MB_MODE_INFO
*const mbmi
= &xd
->mi
[0]->mbmi
;
234 const int is_compound
= has_second_ref(mbmi
);
235 const int segment_id
= mbmi
->segment_id
;
237 // If segment level coding of this signal is disabled...
238 // or the segment allows multiple reference frame options
239 if (segfeature_active(&cm
->seg
, segment_id
, SEG_LVL_REF_FRAME
)) {
240 assert(!is_compound
);
241 assert(mbmi
->ref_frame
[0] ==
242 get_segdata(&cm
->seg
, segment_id
, SEG_LVL_REF_FRAME
));
244 // does the feature use compound prediction or not
245 // (if not specified at the frame/segment level)
246 if (cm
->reference_mode
== REFERENCE_MODE_SELECT
) {
247 vpx_write(w
, is_compound
, vp10_get_reference_mode_prob(cm
, xd
));
249 assert(!is_compound
== (cm
->reference_mode
== SINGLE_REFERENCE
));
253 vpx_write(w
, mbmi
->ref_frame
[0] == GOLDEN_FRAME
,
254 vp10_get_pred_prob_comp_ref_p(cm
, xd
));
256 const int bit0
= mbmi
->ref_frame
[0] != LAST_FRAME
;
257 vpx_write(w
, bit0
, vp10_get_pred_prob_single_ref_p1(cm
, xd
));
259 const int bit1
= mbmi
->ref_frame
[0] != GOLDEN_FRAME
;
260 vpx_write(w
, bit1
, vp10_get_pred_prob_single_ref_p2(cm
, xd
));
266 static void pack_inter_mode_mvs(VP10_COMP
*cpi
, const MODE_INFO
*mi
,
268 VP10_COMMON
*const cm
= &cpi
->common
;
269 const nmv_context
*nmvc
= &cm
->fc
->nmvc
;
270 const MACROBLOCK
*const x
= &cpi
->td
.mb
;
271 const MACROBLOCKD
*const xd
= &x
->e_mbd
;
272 const struct segmentation
*const seg
= &cm
->seg
;
273 #if CONFIG_MISC_FIXES
274 const struct segmentation_probs
*const segp
= &cm
->fc
->seg
;
276 const struct segmentation_probs
*const segp
= &cm
->segp
;
278 const MB_MODE_INFO
*const mbmi
= &mi
->mbmi
;
279 const MB_MODE_INFO_EXT
*const mbmi_ext
= x
->mbmi_ext
;
280 const PREDICTION_MODE mode
= mbmi
->mode
;
281 const int segment_id
= mbmi
->segment_id
;
282 const BLOCK_SIZE bsize
= mbmi
->sb_type
;
283 const int allow_hp
= cm
->allow_high_precision_mv
;
284 const int is_inter
= is_inter_block(mbmi
);
285 const int is_compound
= has_second_ref(mbmi
);
288 if (seg
->update_map
) {
289 if (seg
->temporal_update
) {
290 const int pred_flag
= mbmi
->seg_id_predicted
;
291 vpx_prob pred_prob
= vp10_get_pred_prob_seg_id(segp
, xd
);
292 vpx_write(w
, pred_flag
, pred_prob
);
294 write_segment_id(w
, seg
, segp
, segment_id
);
296 write_segment_id(w
, seg
, segp
, segment_id
);
300 skip
= write_skip(cm
, xd
, segment_id
, mi
, w
);
302 if (!segfeature_active(seg
, segment_id
, SEG_LVL_REF_FRAME
))
303 vpx_write(w
, is_inter
, vp10_get_intra_inter_prob(cm
, xd
));
305 if (bsize
>= BLOCK_8X8
&& cm
->tx_mode
== TX_MODE_SELECT
&&
306 !(is_inter
&& skip
)) {
307 write_selected_tx_size(cm
, xd
, w
);
311 if (bsize
>= BLOCK_8X8
) {
312 write_intra_mode(w
, mode
, cm
->fc
->y_mode_prob
[size_group_lookup
[bsize
]]);
315 const int num_4x4_w
= num_4x4_blocks_wide_lookup
[bsize
];
316 const int num_4x4_h
= num_4x4_blocks_high_lookup
[bsize
];
317 for (idy
= 0; idy
< 2; idy
+= num_4x4_h
) {
318 for (idx
= 0; idx
< 2; idx
+= num_4x4_w
) {
319 const PREDICTION_MODE b_mode
= mi
->bmi
[idy
* 2 + idx
].as_mode
;
320 write_intra_mode(w
, b_mode
, cm
->fc
->y_mode_prob
[0]);
324 write_intra_mode(w
, mbmi
->uv_mode
, cm
->fc
->uv_mode_prob
[mode
]);
326 const int mode_ctx
= mbmi_ext
->mode_context
[mbmi
->ref_frame
[0]];
327 const vpx_prob
*const inter_probs
= cm
->fc
->inter_mode_probs
[mode_ctx
];
328 write_ref_frames(cm
, xd
, w
);
330 // If segment skip is not enabled code the mode.
331 if (!segfeature_active(seg
, segment_id
, SEG_LVL_SKIP
)) {
332 if (bsize
>= BLOCK_8X8
) {
333 write_inter_mode(w
, mode
, inter_probs
);
337 if (cm
->interp_filter
== SWITCHABLE
) {
338 const int ctx
= vp10_get_pred_context_switchable_interp(xd
);
339 vp10_write_token(w
, vp10_switchable_interp_tree
,
340 cm
->fc
->switchable_interp_prob
[ctx
],
341 &switchable_interp_encodings
[mbmi
->interp_filter
]);
342 ++cpi
->interp_filter_selected
[0][mbmi
->interp_filter
];
344 assert(mbmi
->interp_filter
== cm
->interp_filter
);
347 if (bsize
< BLOCK_8X8
) {
348 const int num_4x4_w
= num_4x4_blocks_wide_lookup
[bsize
];
349 const int num_4x4_h
= num_4x4_blocks_high_lookup
[bsize
];
351 for (idy
= 0; idy
< 2; idy
+= num_4x4_h
) {
352 for (idx
= 0; idx
< 2; idx
+= num_4x4_w
) {
353 const int j
= idy
* 2 + idx
;
354 const PREDICTION_MODE b_mode
= mi
->bmi
[j
].as_mode
;
355 write_inter_mode(w
, b_mode
, inter_probs
);
356 if (b_mode
== NEWMV
) {
357 for (ref
= 0; ref
< 1 + is_compound
; ++ref
)
358 vp10_encode_mv(cpi
, w
, &mi
->bmi
[j
].as_mv
[ref
].as_mv
,
359 &mbmi_ext
->ref_mvs
[mbmi
->ref_frame
[ref
]][0].as_mv
,
366 for (ref
= 0; ref
< 1 + is_compound
; ++ref
)
367 vp10_encode_mv(cpi
, w
, &mbmi
->mv
[ref
].as_mv
,
368 &mbmi_ext
->ref_mvs
[mbmi
->ref_frame
[ref
]][0].as_mv
, nmvc
,
375 static void write_mb_modes_kf(const VP10_COMMON
*cm
, const MACROBLOCKD
*xd
,
376 MODE_INFO
**mi_8x8
, vpx_writer
*w
) {
377 const struct segmentation
*const seg
= &cm
->seg
;
378 #if CONFIG_MISC_FIXES
379 const struct segmentation_probs
*const segp
= &cm
->fc
->seg
;
381 const struct segmentation_probs
*const segp
= &cm
->segp
;
383 const MODE_INFO
*const mi
= mi_8x8
[0];
384 const MODE_INFO
*const above_mi
= xd
->above_mi
;
385 const MODE_INFO
*const left_mi
= xd
->left_mi
;
386 const MB_MODE_INFO
*const mbmi
= &mi
->mbmi
;
387 const BLOCK_SIZE bsize
= mbmi
->sb_type
;
390 write_segment_id(w
, seg
, segp
, mbmi
->segment_id
);
392 write_skip(cm
, xd
, mbmi
->segment_id
, mi
, w
);
394 if (bsize
>= BLOCK_8X8
&& cm
->tx_mode
== TX_MODE_SELECT
)
395 write_selected_tx_size(cm
, xd
, w
);
397 if (bsize
>= BLOCK_8X8
) {
398 write_intra_mode(w
, mbmi
->mode
,
399 get_y_mode_probs(cm
, mi
, above_mi
, left_mi
, 0));
401 const int num_4x4_w
= num_4x4_blocks_wide_lookup
[bsize
];
402 const int num_4x4_h
= num_4x4_blocks_high_lookup
[bsize
];
405 for (idy
= 0; idy
< 2; idy
+= num_4x4_h
) {
406 for (idx
= 0; idx
< 2; idx
+= num_4x4_w
) {
407 const int block
= idy
* 2 + idx
;
408 write_intra_mode(w
, mi
->bmi
[block
].as_mode
,
409 get_y_mode_probs(cm
, mi
, above_mi
, left_mi
, block
));
414 write_intra_mode(w
, mbmi
->uv_mode
, cm
->fc
->uv_mode_prob
[mbmi
->mode
]);
417 static void write_modes_b(VP10_COMP
*cpi
, const TileInfo
*const tile
,
418 vpx_writer
*w
, TOKENEXTRA
**tok
,
419 const TOKENEXTRA
*const tok_end
,
420 int mi_row
, int mi_col
) {
421 const VP10_COMMON
*const cm
= &cpi
->common
;
422 MACROBLOCKD
*const xd
= &cpi
->td
.mb
.e_mbd
;
426 xd
->mi
= cm
->mi_grid_visible
+ (mi_row
* cm
->mi_stride
+ mi_col
);
429 cpi
->td
.mb
.mbmi_ext
= cpi
->mbmi_ext_base
+ (mi_row
* cm
->mi_cols
+ mi_col
);
431 set_mi_row_col(xd
, tile
,
432 mi_row
, num_8x8_blocks_high_lookup
[m
->mbmi
.sb_type
],
433 mi_col
, num_8x8_blocks_wide_lookup
[m
->mbmi
.sb_type
],
434 cm
->mi_rows
, cm
->mi_cols
);
435 if (frame_is_intra_only(cm
)) {
436 write_mb_modes_kf(cm
, xd
, xd
->mi
, w
);
438 pack_inter_mode_mvs(cpi
, m
, w
);
442 assert(*tok
< tok_end
);
443 for (plane
= 0; plane
< MAX_MB_PLANE
; ++plane
) {
444 TX_SIZE tx
= plane
? get_uv_tx_size(&m
->mbmi
, &xd
->plane
[plane
])
446 pack_mb_tokens(w
, tok
, tok_end
, cm
->bit_depth
, tx
);
447 assert(*tok
< tok_end
&& (*tok
)->token
== EOSB_TOKEN
);
453 static void write_partition(const VP10_COMMON
*const cm
,
454 const MACROBLOCKD
*const xd
,
455 int hbs
, int mi_row
, int mi_col
,
456 PARTITION_TYPE p
, BLOCK_SIZE bsize
, vpx_writer
*w
) {
457 const int ctx
= partition_plane_context(xd
, mi_row
, mi_col
, bsize
);
458 const vpx_prob
*const probs
= cm
->fc
->partition_prob
[ctx
];
459 const int has_rows
= (mi_row
+ hbs
) < cm
->mi_rows
;
460 const int has_cols
= (mi_col
+ hbs
) < cm
->mi_cols
;
462 if (has_rows
&& has_cols
) {
463 vp10_write_token(w
, vp10_partition_tree
, probs
, &partition_encodings
[p
]);
464 } else if (!has_rows
&& has_cols
) {
465 assert(p
== PARTITION_SPLIT
|| p
== PARTITION_HORZ
);
466 vpx_write(w
, p
== PARTITION_SPLIT
, probs
[1]);
467 } else if (has_rows
&& !has_cols
) {
468 assert(p
== PARTITION_SPLIT
|| p
== PARTITION_VERT
);
469 vpx_write(w
, p
== PARTITION_SPLIT
, probs
[2]);
471 assert(p
== PARTITION_SPLIT
);
475 static void write_modes_sb(VP10_COMP
*cpi
,
476 const TileInfo
*const tile
, vpx_writer
*w
,
477 TOKENEXTRA
**tok
, const TOKENEXTRA
*const tok_end
,
478 int mi_row
, int mi_col
, BLOCK_SIZE bsize
) {
479 const VP10_COMMON
*const cm
= &cpi
->common
;
480 MACROBLOCKD
*const xd
= &cpi
->td
.mb
.e_mbd
;
482 const int bsl
= b_width_log2_lookup
[bsize
];
483 const int bs
= (1 << bsl
) / 4;
484 PARTITION_TYPE partition
;
486 const MODE_INFO
*m
= NULL
;
488 if (mi_row
>= cm
->mi_rows
|| mi_col
>= cm
->mi_cols
)
491 m
= cm
->mi_grid_visible
[mi_row
* cm
->mi_stride
+ mi_col
];
493 partition
= partition_lookup
[bsl
][m
->mbmi
.sb_type
];
494 write_partition(cm
, xd
, bs
, mi_row
, mi_col
, partition
, bsize
, w
);
495 subsize
= get_subsize(bsize
, partition
);
496 if (subsize
< BLOCK_8X8
) {
497 write_modes_b(cpi
, tile
, w
, tok
, tok_end
, mi_row
, mi_col
);
501 write_modes_b(cpi
, tile
, w
, tok
, tok_end
, mi_row
, mi_col
);
504 write_modes_b(cpi
, tile
, w
, tok
, tok_end
, mi_row
, mi_col
);
505 if (mi_row
+ bs
< cm
->mi_rows
)
506 write_modes_b(cpi
, tile
, w
, tok
, tok_end
, mi_row
+ bs
, mi_col
);
509 write_modes_b(cpi
, tile
, w
, tok
, tok_end
, mi_row
, mi_col
);
510 if (mi_col
+ bs
< cm
->mi_cols
)
511 write_modes_b(cpi
, tile
, w
, tok
, tok_end
, mi_row
, mi_col
+ bs
);
513 case PARTITION_SPLIT
:
514 write_modes_sb(cpi
, tile
, w
, tok
, tok_end
, mi_row
, mi_col
, subsize
);
515 write_modes_sb(cpi
, tile
, w
, tok
, tok_end
, mi_row
, mi_col
+ bs
,
517 write_modes_sb(cpi
, tile
, w
, tok
, tok_end
, mi_row
+ bs
, mi_col
,
519 write_modes_sb(cpi
, tile
, w
, tok
, tok_end
, mi_row
+ bs
, mi_col
+ bs
,
527 // update partition context
528 if (bsize
>= BLOCK_8X8
&&
529 (bsize
== BLOCK_8X8
|| partition
!= PARTITION_SPLIT
))
530 update_partition_context(xd
, mi_row
, mi_col
, subsize
, bsize
);
533 static void write_modes(VP10_COMP
*cpi
,
534 const TileInfo
*const tile
, vpx_writer
*w
,
535 TOKENEXTRA
**tok
, const TOKENEXTRA
*const tok_end
) {
536 MACROBLOCKD
*const xd
= &cpi
->td
.mb
.e_mbd
;
539 for (mi_row
= tile
->mi_row_start
; mi_row
< tile
->mi_row_end
;
540 mi_row
+= MI_BLOCK_SIZE
) {
541 vp10_zero(xd
->left_seg_context
);
542 for (mi_col
= tile
->mi_col_start
; mi_col
< tile
->mi_col_end
;
543 mi_col
+= MI_BLOCK_SIZE
)
544 write_modes_sb(cpi
, tile
, w
, tok
, tok_end
, mi_row
, mi_col
,
549 static void build_tree_distribution(VP10_COMP
*cpi
, TX_SIZE tx_size
,
550 vp10_coeff_stats
*coef_branch_ct
,
551 vp10_coeff_probs_model
*coef_probs
) {
552 vp10_coeff_count
*coef_counts
= cpi
->td
.rd_counts
.coef_counts
[tx_size
];
553 unsigned int (*eob_branch_ct
)[REF_TYPES
][COEF_BANDS
][COEFF_CONTEXTS
] =
554 cpi
->common
.counts
.eob_branch
[tx_size
];
557 for (i
= 0; i
< PLANE_TYPES
; ++i
) {
558 for (j
= 0; j
< REF_TYPES
; ++j
) {
559 for (k
= 0; k
< COEF_BANDS
; ++k
) {
560 for (l
= 0; l
< BAND_COEFF_CONTEXTS(k
); ++l
) {
561 vp10_tree_probs_from_distribution(vp10_coef_tree
,
562 coef_branch_ct
[i
][j
][k
][l
],
563 coef_counts
[i
][j
][k
][l
]);
564 coef_branch_ct
[i
][j
][k
][l
][0][1] = eob_branch_ct
[i
][j
][k
][l
] -
565 coef_branch_ct
[i
][j
][k
][l
][0][0];
566 for (m
= 0; m
< UNCONSTRAINED_NODES
; ++m
)
567 coef_probs
[i
][j
][k
][l
][m
] = get_binary_prob(
568 coef_branch_ct
[i
][j
][k
][l
][m
][0],
569 coef_branch_ct
[i
][j
][k
][l
][m
][1]);
576 static void update_coef_probs_common(vpx_writer
* const bc
, VP10_COMP
*cpi
,
578 vp10_coeff_stats
*frame_branch_ct
,
579 vp10_coeff_probs_model
*new_coef_probs
) {
580 vp10_coeff_probs_model
*old_coef_probs
= cpi
->common
.fc
->coef_probs
[tx_size
];
581 const vpx_prob upd
= DIFF_UPDATE_PROB
;
582 const int entropy_nodes_update
= UNCONSTRAINED_NODES
;
584 int stepsize
= cpi
->sf
.coeff_prob_appx_step
;
586 switch (cpi
->sf
.use_fast_coef_updates
) {
588 /* dry run to see if there is any update at all needed */
590 int update
[2] = {0, 0};
591 for (i
= 0; i
< PLANE_TYPES
; ++i
) {
592 for (j
= 0; j
< REF_TYPES
; ++j
) {
593 for (k
= 0; k
< COEF_BANDS
; ++k
) {
594 for (l
= 0; l
< BAND_COEFF_CONTEXTS(k
); ++l
) {
595 for (t
= 0; t
< entropy_nodes_update
; ++t
) {
596 vpx_prob newp
= new_coef_probs
[i
][j
][k
][l
][t
];
597 const vpx_prob oldp
= old_coef_probs
[i
][j
][k
][l
][t
];
601 s
= vp10_prob_diff_update_savings_search_model(
602 frame_branch_ct
[i
][j
][k
][l
][0],
603 old_coef_probs
[i
][j
][k
][l
], &newp
, upd
, stepsize
);
605 s
= vp10_prob_diff_update_savings_search(
606 frame_branch_ct
[i
][j
][k
][l
][t
], oldp
, &newp
, upd
);
607 if (s
> 0 && newp
!= oldp
)
610 savings
+= s
- (int)(vp10_cost_zero(upd
));
612 savings
-= (int)(vp10_cost_zero(upd
));
620 // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
621 /* Is coef updated at all */
622 if (update
[1] == 0 || savings
< 0) {
623 vpx_write_bit(bc
, 0);
626 vpx_write_bit(bc
, 1);
627 for (i
= 0; i
< PLANE_TYPES
; ++i
) {
628 for (j
= 0; j
< REF_TYPES
; ++j
) {
629 for (k
= 0; k
< COEF_BANDS
; ++k
) {
630 for (l
= 0; l
< BAND_COEFF_CONTEXTS(k
); ++l
) {
631 // calc probs and branch cts for this frame only
632 for (t
= 0; t
< entropy_nodes_update
; ++t
) {
633 vpx_prob newp
= new_coef_probs
[i
][j
][k
][l
][t
];
634 vpx_prob
*oldp
= old_coef_probs
[i
][j
][k
][l
] + t
;
635 const vpx_prob upd
= DIFF_UPDATE_PROB
;
639 s
= vp10_prob_diff_update_savings_search_model(
640 frame_branch_ct
[i
][j
][k
][l
][0],
641 old_coef_probs
[i
][j
][k
][l
], &newp
, upd
, stepsize
);
643 s
= vp10_prob_diff_update_savings_search(
644 frame_branch_ct
[i
][j
][k
][l
][t
],
646 if (s
> 0 && newp
!= *oldp
)
648 vpx_write(bc
, u
, upd
);
650 /* send/use new probability */
651 vp10_write_prob_diff_update(bc
, newp
, *oldp
);
662 case ONE_LOOP_REDUCED
: {
664 int noupdates_before_first
= 0;
665 for (i
= 0; i
< PLANE_TYPES
; ++i
) {
666 for (j
= 0; j
< REF_TYPES
; ++j
) {
667 for (k
= 0; k
< COEF_BANDS
; ++k
) {
668 for (l
= 0; l
< BAND_COEFF_CONTEXTS(k
); ++l
) {
669 // calc probs and branch cts for this frame only
670 for (t
= 0; t
< entropy_nodes_update
; ++t
) {
671 vpx_prob newp
= new_coef_probs
[i
][j
][k
][l
][t
];
672 vpx_prob
*oldp
= old_coef_probs
[i
][j
][k
][l
] + t
;
676 if (t
== PIVOT_NODE
) {
677 s
= vp10_prob_diff_update_savings_search_model(
678 frame_branch_ct
[i
][j
][k
][l
][0],
679 old_coef_probs
[i
][j
][k
][l
], &newp
, upd
, stepsize
);
681 s
= vp10_prob_diff_update_savings_search(
682 frame_branch_ct
[i
][j
][k
][l
][t
],
686 if (s
> 0 && newp
!= *oldp
)
689 if (u
== 0 && updates
== 0) {
690 noupdates_before_first
++;
693 if (u
== 1 && updates
== 1) {
696 vpx_write_bit(bc
, 1);
697 for (v
= 0; v
< noupdates_before_first
; ++v
)
698 vpx_write(bc
, 0, upd
);
700 vpx_write(bc
, u
, upd
);
702 /* send/use new probability */
703 vp10_write_prob_diff_update(bc
, newp
, *oldp
);
712 vpx_write_bit(bc
, 0); // no updates
721 static void update_coef_probs(VP10_COMP
*cpi
, vpx_writer
* w
) {
722 const TX_MODE tx_mode
= cpi
->common
.tx_mode
;
723 const TX_SIZE max_tx_size
= tx_mode_to_biggest_tx_size
[tx_mode
];
725 for (tx_size
= TX_4X4
; tx_size
<= max_tx_size
; ++tx_size
) {
726 vp10_coeff_stats frame_branch_ct
[PLANE_TYPES
];
727 vp10_coeff_probs_model frame_coef_probs
[PLANE_TYPES
];
728 if (cpi
->td
.counts
->tx
.tx_totals
[tx_size
] <= 20 ||
729 (tx_size
>= TX_16X16
&& cpi
->sf
.tx_size_search_method
== USE_TX_8X8
)) {
732 build_tree_distribution(cpi
, tx_size
, frame_branch_ct
,
734 update_coef_probs_common(w
, cpi
, tx_size
, frame_branch_ct
,
740 static void encode_loopfilter(struct loopfilter
*lf
,
741 struct vpx_write_bit_buffer
*wb
) {
744 // Encode the loop filter level and type
745 vpx_wb_write_literal(wb
, lf
->filter_level
, 6);
746 vpx_wb_write_literal(wb
, lf
->sharpness_level
, 3);
748 // Write out loop filter deltas applied at the MB level based on mode or
749 // ref frame (if they are enabled).
750 vpx_wb_write_bit(wb
, lf
->mode_ref_delta_enabled
);
752 if (lf
->mode_ref_delta_enabled
) {
753 vpx_wb_write_bit(wb
, lf
->mode_ref_delta_update
);
754 if (lf
->mode_ref_delta_update
) {
755 for (i
= 0; i
< MAX_REF_FRAMES
; i
++) {
756 const int delta
= lf
->ref_deltas
[i
];
757 const int changed
= delta
!= lf
->last_ref_deltas
[i
];
758 vpx_wb_write_bit(wb
, changed
);
760 lf
->last_ref_deltas
[i
] = delta
;
761 vpx_wb_write_inv_signed_literal(wb
, delta
, 6);
765 for (i
= 0; i
< MAX_MODE_LF_DELTAS
; i
++) {
766 const int delta
= lf
->mode_deltas
[i
];
767 const int changed
= delta
!= lf
->last_mode_deltas
[i
];
768 vpx_wb_write_bit(wb
, changed
);
770 lf
->last_mode_deltas
[i
] = delta
;
771 vpx_wb_write_inv_signed_literal(wb
, delta
, 6);
778 static void write_delta_q(struct vpx_write_bit_buffer
*wb
, int delta_q
) {
780 vpx_wb_write_bit(wb
, 1);
781 vpx_wb_write_inv_signed_literal(wb
, delta_q
, CONFIG_MISC_FIXES
? 6 : 4);
783 vpx_wb_write_bit(wb
, 0);
787 static void encode_quantization(const VP10_COMMON
*const cm
,
788 struct vpx_write_bit_buffer
*wb
) {
789 vpx_wb_write_literal(wb
, cm
->base_qindex
, QINDEX_BITS
);
790 write_delta_q(wb
, cm
->y_dc_delta_q
);
791 write_delta_q(wb
, cm
->uv_dc_delta_q
);
792 write_delta_q(wb
, cm
->uv_ac_delta_q
);
795 static void encode_segmentation(VP10_COMMON
*cm
, MACROBLOCKD
*xd
,
796 struct vpx_write_bit_buffer
*wb
) {
799 const struct segmentation
*seg
= &cm
->seg
;
800 #if !CONFIG_MISC_FIXES
801 const struct segmentation_probs
*segp
= &cm
->segp
;
804 vpx_wb_write_bit(wb
, seg
->enabled
);
809 if (!frame_is_intra_only(cm
) && !cm
->error_resilient_mode
) {
810 vpx_wb_write_bit(wb
, seg
->update_map
);
812 assert(seg
->update_map
== 1);
814 if (seg
->update_map
) {
815 // Select the coding strategy (temporal or spatial)
816 vp10_choose_segmap_coding_method(cm
, xd
);
817 #if !CONFIG_MISC_FIXES
818 // Write out probabilities used to decode unpredicted macro-block segments
819 for (i
= 0; i
< SEG_TREE_PROBS
; i
++) {
820 const int prob
= segp
->tree_probs
[i
];
821 const int update
= prob
!= MAX_PROB
;
822 vpx_wb_write_bit(wb
, update
);
824 vpx_wb_write_literal(wb
, prob
, 8);
828 // Write out the chosen coding method.
829 if (!frame_is_intra_only(cm
) && !cm
->error_resilient_mode
) {
830 vpx_wb_write_bit(wb
, seg
->temporal_update
);
832 assert(seg
->temporal_update
== 0);
835 #if !CONFIG_MISC_FIXES
836 if (seg
->temporal_update
) {
837 for (i
= 0; i
< PREDICTION_PROBS
; i
++) {
838 const int prob
= segp
->pred_probs
[i
];
839 const int update
= prob
!= MAX_PROB
;
840 vpx_wb_write_bit(wb
, update
);
842 vpx_wb_write_literal(wb
, prob
, 8);
849 vpx_wb_write_bit(wb
, seg
->update_data
);
850 if (seg
->update_data
) {
851 vpx_wb_write_bit(wb
, seg
->abs_delta
);
853 for (i
= 0; i
< MAX_SEGMENTS
; i
++) {
854 for (j
= 0; j
< SEG_LVL_MAX
; j
++) {
855 const int active
= segfeature_active(seg
, i
, j
);
856 vpx_wb_write_bit(wb
, active
);
858 const int data
= get_segdata(seg
, i
, j
);
859 const int data_max
= vp10_seg_feature_data_max(j
);
861 if (vp10_is_segfeature_signed(j
)) {
862 encode_unsigned_max(wb
, abs(data
), data_max
);
863 vpx_wb_write_bit(wb
, data
< 0);
865 encode_unsigned_max(wb
, data
, data_max
);
873 #if CONFIG_MISC_FIXES
874 static void update_seg_probs(VP10_COMP
*cpi
, vpx_writer
*w
) {
875 VP10_COMMON
*cm
= &cpi
->common
;
877 if (!cpi
->common
.seg
.enabled
)
880 if (cpi
->common
.seg
.temporal_update
) {
883 for (i
= 0; i
< PREDICTION_PROBS
; i
++)
884 vp10_cond_prob_diff_update(w
, &cm
->fc
->seg
.pred_probs
[i
],
885 cm
->counts
.seg
.pred
[i
]);
887 prob_diff_update(vp10_segment_tree
, cm
->fc
->seg
.tree_probs
,
888 cm
->counts
.seg
.tree_mispred
, MAX_SEGMENTS
, w
);
890 prob_diff_update(vp10_segment_tree
, cm
->fc
->seg
.tree_probs
,
891 cm
->counts
.seg
.tree_total
, MAX_SEGMENTS
, w
);
895 static void write_txfm_mode(TX_MODE mode
, struct vpx_write_bit_buffer
*wb
) {
896 vpx_wb_write_bit(wb
, mode
== TX_MODE_SELECT
);
897 if (mode
!= TX_MODE_SELECT
)
898 vpx_wb_write_literal(wb
, mode
, 2);
902 static void update_txfm_probs(VP10_COMMON
*cm
, vpx_writer
*w
,
903 FRAME_COUNTS
*counts
) {
904 #if !CONFIG_MISC_FIXES
906 vpx_write_literal(w
, VPXMIN(cm
->tx_mode
, ALLOW_32X32
), 2);
907 if (cm
->tx_mode
>= ALLOW_32X32
)
908 vpx_write_bit(w
, cm
->tx_mode
== TX_MODE_SELECT
);
913 if (cm
->tx_mode
== TX_MODE_SELECT
) {
915 unsigned int ct_8x8p
[TX_SIZES
- 3][2];
916 unsigned int ct_16x16p
[TX_SIZES
- 2][2];
917 unsigned int ct_32x32p
[TX_SIZES
- 1][2];
920 for (i
= 0; i
< TX_SIZE_CONTEXTS
; i
++) {
921 vp10_tx_counts_to_branch_counts_8x8(counts
->tx
.p8x8
[i
], ct_8x8p
);
922 for (j
= 0; j
< TX_SIZES
- 3; j
++)
923 vp10_cond_prob_diff_update(w
, &cm
->fc
->tx_probs
.p8x8
[i
][j
], ct_8x8p
[j
]);
926 for (i
= 0; i
< TX_SIZE_CONTEXTS
; i
++) {
927 vp10_tx_counts_to_branch_counts_16x16(counts
->tx
.p16x16
[i
], ct_16x16p
);
928 for (j
= 0; j
< TX_SIZES
- 2; j
++)
929 vp10_cond_prob_diff_update(w
, &cm
->fc
->tx_probs
.p16x16
[i
][j
],
933 for (i
= 0; i
< TX_SIZE_CONTEXTS
; i
++) {
934 vp10_tx_counts_to_branch_counts_32x32(counts
->tx
.p32x32
[i
], ct_32x32p
);
935 for (j
= 0; j
< TX_SIZES
- 1; j
++)
936 vp10_cond_prob_diff_update(w
, &cm
->fc
->tx_probs
.p32x32
[i
][j
],
942 static void write_interp_filter(INTERP_FILTER filter
,
943 struct vpx_write_bit_buffer
*wb
) {
944 vpx_wb_write_bit(wb
, filter
== SWITCHABLE
);
945 if (filter
!= SWITCHABLE
)
946 vpx_wb_write_literal(wb
, filter
, 2);
949 static void fix_interp_filter(VP10_COMMON
*cm
, FRAME_COUNTS
*counts
) {
950 if (cm
->interp_filter
== SWITCHABLE
) {
951 // Check to see if only one of the filters is actually used
952 int count
[SWITCHABLE_FILTERS
];
954 for (i
= 0; i
< SWITCHABLE_FILTERS
; ++i
) {
956 for (j
= 0; j
< SWITCHABLE_FILTER_CONTEXTS
; ++j
)
957 count
[i
] += counts
->switchable_interp
[j
][i
];
961 // Only one filter is used. So set the filter at frame level
962 for (i
= 0; i
< SWITCHABLE_FILTERS
; ++i
) {
964 cm
->interp_filter
= i
;
972 static void write_tile_info(const VP10_COMMON
*const cm
,
973 struct vpx_write_bit_buffer
*wb
) {
974 int min_log2_tile_cols
, max_log2_tile_cols
, ones
;
975 vp10_get_tile_n_bits(cm
->mi_cols
, &min_log2_tile_cols
, &max_log2_tile_cols
);
978 ones
= cm
->log2_tile_cols
- min_log2_tile_cols
;
980 vpx_wb_write_bit(wb
, 1);
982 if (cm
->log2_tile_cols
< max_log2_tile_cols
)
983 vpx_wb_write_bit(wb
, 0);
986 vpx_wb_write_bit(wb
, cm
->log2_tile_rows
!= 0);
987 if (cm
->log2_tile_rows
!= 0)
988 vpx_wb_write_bit(wb
, cm
->log2_tile_rows
!= 1);
991 static int get_refresh_mask(VP10_COMP
*cpi
) {
992 if (vp10_preserve_existing_gf(cpi
)) {
993 // We have decided to preserve the previously existing golden frame as our
994 // new ARF frame. However, in the short term we leave it in the GF slot and,
995 // if we're updating the GF with the current decoded frame, we save it
996 // instead to the ARF slot.
997 // Later, in the function vp10_encoder.c:vp10_update_reference_frames() we
998 // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
999 // there so that it can be done outside of the recode loop.
1000 // Note: This is highly specific to the use of ARF as a forward reference,
1001 // and this needs to be generalized as other uses are implemented
1002 // (like RTC/temporal scalability).
1003 return (cpi
->refresh_last_frame
<< cpi
->lst_fb_idx
) |
1004 (cpi
->refresh_golden_frame
<< cpi
->alt_fb_idx
);
1006 int arf_idx
= cpi
->alt_fb_idx
;
1007 if ((cpi
->oxcf
.pass
== 2) && cpi
->multi_arf_allowed
) {
1008 const GF_GROUP
*const gf_group
= &cpi
->twopass
.gf_group
;
1009 arf_idx
= gf_group
->arf_update_idx
[gf_group
->index
];
1011 return (cpi
->refresh_last_frame
<< cpi
->lst_fb_idx
) |
1012 (cpi
->refresh_golden_frame
<< cpi
->gld_fb_idx
) |
1013 (cpi
->refresh_alt_ref_frame
<< arf_idx
);
1017 static size_t encode_tiles(VP10_COMP
*cpi
, uint8_t *data_ptr
,
1018 unsigned int *max_tile_sz
) {
1019 VP10_COMMON
*const cm
= &cpi
->common
;
1020 vpx_writer residual_bc
;
1021 int tile_row
, tile_col
;
1022 TOKENEXTRA
*tok_end
;
1023 size_t total_size
= 0;
1024 const int tile_cols
= 1 << cm
->log2_tile_cols
;
1025 const int tile_rows
= 1 << cm
->log2_tile_rows
;
1026 unsigned int max_tile
= 0;
1028 memset(cm
->above_seg_context
, 0,
1029 sizeof(*cm
->above_seg_context
) * mi_cols_aligned_to_sb(cm
->mi_cols
));
1031 for (tile_row
= 0; tile_row
< tile_rows
; tile_row
++) {
1032 for (tile_col
= 0; tile_col
< tile_cols
; tile_col
++) {
1033 int tile_idx
= tile_row
* tile_cols
+ tile_col
;
1034 TOKENEXTRA
*tok
= cpi
->tile_tok
[tile_row
][tile_col
];
1036 tok_end
= cpi
->tile_tok
[tile_row
][tile_col
] +
1037 cpi
->tok_count
[tile_row
][tile_col
];
1039 if (tile_col
< tile_cols
- 1 || tile_row
< tile_rows
- 1)
1040 vpx_start_encode(&residual_bc
, data_ptr
+ total_size
+ 4);
1042 vpx_start_encode(&residual_bc
, data_ptr
+ total_size
);
1044 write_modes(cpi
, &cpi
->tile_data
[tile_idx
].tile_info
,
1045 &residual_bc
, &tok
, tok_end
);
1046 assert(tok
== tok_end
);
1047 vpx_stop_encode(&residual_bc
);
1048 if (tile_col
< tile_cols
- 1 || tile_row
< tile_rows
- 1) {
1049 unsigned int tile_sz
;
1051 // size of this tile
1052 assert(residual_bc
.pos
> 0);
1053 tile_sz
= residual_bc
.pos
- CONFIG_MISC_FIXES
;
1054 mem_put_le32(data_ptr
+ total_size
, tile_sz
);
1055 max_tile
= max_tile
> tile_sz
? max_tile
: tile_sz
;
1059 total_size
+= residual_bc
.pos
;
1062 *max_tile_sz
= max_tile
;
1067 static void write_render_size(const VP10_COMMON
*cm
,
1068 struct vpx_write_bit_buffer
*wb
) {
1069 const int scaling_active
= cm
->width
!= cm
->render_width
||
1070 cm
->height
!= cm
->render_height
;
1071 vpx_wb_write_bit(wb
, scaling_active
);
1072 if (scaling_active
) {
1073 vpx_wb_write_literal(wb
, cm
->render_width
- 1, 16);
1074 vpx_wb_write_literal(wb
, cm
->render_height
- 1, 16);
1078 static void write_frame_size(const VP10_COMMON
*cm
,
1079 struct vpx_write_bit_buffer
*wb
) {
1080 vpx_wb_write_literal(wb
, cm
->width
- 1, 16);
1081 vpx_wb_write_literal(wb
, cm
->height
- 1, 16);
1083 write_render_size(cm
, wb
);
1086 static void write_frame_size_with_refs(VP10_COMP
*cpi
,
1087 struct vpx_write_bit_buffer
*wb
) {
1088 VP10_COMMON
*const cm
= &cpi
->common
;
1091 MV_REFERENCE_FRAME ref_frame
;
1092 for (ref_frame
= LAST_FRAME
; ref_frame
<= ALTREF_FRAME
; ++ref_frame
) {
1093 YV12_BUFFER_CONFIG
*cfg
= get_ref_frame_buffer(cpi
, ref_frame
);
1096 found
= cm
->width
== cfg
->y_crop_width
&&
1097 cm
->height
== cfg
->y_crop_height
;
1098 #if CONFIG_MISC_FIXES
1099 found
&= cm
->render_width
== cfg
->render_width
&&
1100 cm
->render_height
== cfg
->render_height
;
1103 vpx_wb_write_bit(wb
, found
);
1110 vpx_wb_write_literal(wb
, cm
->width
- 1, 16);
1111 vpx_wb_write_literal(wb
, cm
->height
- 1, 16);
1113 #if CONFIG_MISC_FIXES
1114 write_render_size(cm
, wb
);
1118 #if !CONFIG_MISC_FIXES
1119 write_render_size(cm
, wb
);
1123 static void write_sync_code(struct vpx_write_bit_buffer
*wb
) {
1124 vpx_wb_write_literal(wb
, VP10_SYNC_CODE_0
, 8);
1125 vpx_wb_write_literal(wb
, VP10_SYNC_CODE_1
, 8);
1126 vpx_wb_write_literal(wb
, VP10_SYNC_CODE_2
, 8);
1129 static void write_profile(BITSTREAM_PROFILE profile
,
1130 struct vpx_write_bit_buffer
*wb
) {
1133 vpx_wb_write_literal(wb
, 0, 2);
1136 vpx_wb_write_literal(wb
, 2, 2);
1139 vpx_wb_write_literal(wb
, 1, 2);
1142 vpx_wb_write_literal(wb
, 6, 3);
1149 static void write_bitdepth_colorspace_sampling(
1150 VP10_COMMON
*const cm
, struct vpx_write_bit_buffer
*wb
) {
1151 if (cm
->profile
>= PROFILE_2
) {
1152 assert(cm
->bit_depth
> VPX_BITS_8
);
1153 vpx_wb_write_bit(wb
, cm
->bit_depth
== VPX_BITS_10
? 0 : 1);
1155 vpx_wb_write_literal(wb
, cm
->color_space
, 3);
1156 if (cm
->color_space
!= VPX_CS_SRGB
) {
1157 // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
1158 vpx_wb_write_bit(wb
, cm
->color_range
);
1159 if (cm
->profile
== PROFILE_1
|| cm
->profile
== PROFILE_3
) {
1160 assert(cm
->subsampling_x
!= 1 || cm
->subsampling_y
!= 1);
1161 vpx_wb_write_bit(wb
, cm
->subsampling_x
);
1162 vpx_wb_write_bit(wb
, cm
->subsampling_y
);
1163 vpx_wb_write_bit(wb
, 0); // unused
1165 assert(cm
->subsampling_x
== 1 && cm
->subsampling_y
== 1);
1168 assert(cm
->profile
== PROFILE_1
|| cm
->profile
== PROFILE_3
);
1169 vpx_wb_write_bit(wb
, 0); // unused
1173 static void write_uncompressed_header(VP10_COMP
*cpi
,
1174 struct vpx_write_bit_buffer
*wb
) {
1175 VP10_COMMON
*const cm
= &cpi
->common
;
1176 MACROBLOCKD
*const xd
= &cpi
->td
.mb
.e_mbd
;
1178 vpx_wb_write_literal(wb
, VP9_FRAME_MARKER
, 2);
1180 write_profile(cm
->profile
, wb
);
1182 vpx_wb_write_bit(wb
, 0); // show_existing_frame
1183 vpx_wb_write_bit(wb
, cm
->frame_type
);
1184 vpx_wb_write_bit(wb
, cm
->show_frame
);
1185 vpx_wb_write_bit(wb
, cm
->error_resilient_mode
);
1187 if (cm
->frame_type
== KEY_FRAME
) {
1188 write_sync_code(wb
);
1189 write_bitdepth_colorspace_sampling(cm
, wb
);
1190 write_frame_size(cm
, wb
);
1192 if (!cm
->show_frame
)
1193 vpx_wb_write_bit(wb
, cm
->intra_only
);
1195 if (!cm
->error_resilient_mode
) {
1196 #if CONFIG_MISC_FIXES
1197 if (cm
->intra_only
) {
1198 vpx_wb_write_bit(wb
,
1199 cm
->reset_frame_context
== RESET_FRAME_CONTEXT_ALL
);
1201 vpx_wb_write_bit(wb
,
1202 cm
->reset_frame_context
!= RESET_FRAME_CONTEXT_NONE
);
1203 if (cm
->reset_frame_context
!= RESET_FRAME_CONTEXT_NONE
)
1204 vpx_wb_write_bit(wb
,
1205 cm
->reset_frame_context
== RESET_FRAME_CONTEXT_ALL
);
1208 static const int reset_frame_context_conv_tbl
[3] = { 0, 2, 3 };
1210 vpx_wb_write_literal(wb
,
1211 reset_frame_context_conv_tbl
[cm
->reset_frame_context
], 2);
1215 if (cm
->intra_only
) {
1216 write_sync_code(wb
);
1218 #if CONFIG_MISC_FIXES
1219 write_bitdepth_colorspace_sampling(cm
, wb
);
1221 // Note for profile 0, 420 8bpp is assumed.
1222 if (cm
->profile
> PROFILE_0
) {
1223 write_bitdepth_colorspace_sampling(cm
, wb
);
1227 vpx_wb_write_literal(wb
, get_refresh_mask(cpi
), REF_FRAMES
);
1228 write_frame_size(cm
, wb
);
1230 MV_REFERENCE_FRAME ref_frame
;
1231 vpx_wb_write_literal(wb
, get_refresh_mask(cpi
), REF_FRAMES
);
1232 for (ref_frame
= LAST_FRAME
; ref_frame
<= ALTREF_FRAME
; ++ref_frame
) {
1233 assert(get_ref_frame_map_idx(cpi
, ref_frame
) != INVALID_IDX
);
1234 vpx_wb_write_literal(wb
, get_ref_frame_map_idx(cpi
, ref_frame
),
1236 vpx_wb_write_bit(wb
, cm
->ref_frame_sign_bias
[ref_frame
]);
1239 write_frame_size_with_refs(cpi
, wb
);
1241 vpx_wb_write_bit(wb
, cm
->allow_high_precision_mv
);
1243 fix_interp_filter(cm
, cpi
->td
.counts
);
1244 write_interp_filter(cm
->interp_filter
, wb
);
1248 if (!cm
->error_resilient_mode
) {
1249 vpx_wb_write_bit(wb
,
1250 cm
->refresh_frame_context
!= REFRESH_FRAME_CONTEXT_OFF
);
1251 #if CONFIG_MISC_FIXES
1252 if (cm
->refresh_frame_context
!= REFRESH_FRAME_CONTEXT_OFF
)
1254 vpx_wb_write_bit(wb
, cm
->refresh_frame_context
!=
1255 REFRESH_FRAME_CONTEXT_BACKWARD
);
1258 vpx_wb_write_literal(wb
, cm
->frame_context_idx
, FRAME_CONTEXTS_LOG2
);
1260 encode_loopfilter(&cm
->lf
, wb
);
1261 encode_quantization(cm
, wb
);
1262 encode_segmentation(cm
, xd
, wb
);
1263 #if CONFIG_MISC_FIXES
1264 if (!cm
->seg
.enabled
&& xd
->lossless
[0])
1265 cm
->tx_mode
= TX_4X4
;
1267 write_txfm_mode(cm
->tx_mode
, wb
);
1268 if (cpi
->allow_comp_inter_inter
) {
1269 const int use_hybrid_pred
= cm
->reference_mode
== REFERENCE_MODE_SELECT
;
1270 const int use_compound_pred
= cm
->reference_mode
!= SINGLE_REFERENCE
;
1272 vpx_wb_write_bit(wb
, use_hybrid_pred
);
1273 if (!use_hybrid_pred
)
1274 vpx_wb_write_bit(wb
, use_compound_pred
);
1278 write_tile_info(cm
, wb
);
1281 static size_t write_compressed_header(VP10_COMP
*cpi
, uint8_t *data
) {
1282 VP10_COMMON
*const cm
= &cpi
->common
;
1283 FRAME_CONTEXT
*const fc
= cm
->fc
;
1284 FRAME_COUNTS
*counts
= cpi
->td
.counts
;
1285 vpx_writer header_bc
;
1287 #if CONFIG_MISC_FIXES
1291 vpx_start_encode(&header_bc
, data
);
1293 #if !CONFIG_MISC_FIXES
1294 if (cpi
->td
.mb
.e_mbd
.lossless
[0])
1295 cm
->tx_mode
= TX_4X4
;
1297 update_txfm_probs(cm
, &header_bc
, counts
);
1299 update_txfm_probs(cm
, &header_bc
, counts
);
1301 update_coef_probs(cpi
, &header_bc
);
1302 update_skip_probs(cm
, &header_bc
, counts
);
1303 #if CONFIG_MISC_FIXES
1304 update_seg_probs(cpi
, &header_bc
);
1306 for (i
= 0; i
< INTRA_MODES
; ++i
)
1307 prob_diff_update(vp10_intra_mode_tree
, fc
->uv_mode_prob
[i
],
1308 counts
->uv_mode
[i
], INTRA_MODES
, &header_bc
);
1310 for (i
= 0; i
< PARTITION_CONTEXTS
; ++i
)
1311 prob_diff_update(vp10_partition_tree
, fc
->partition_prob
[i
],
1312 counts
->partition
[i
], PARTITION_TYPES
, &header_bc
);
1315 if (frame_is_intra_only(cm
)) {
1316 vp10_copy(cm
->kf_y_prob
, vp10_kf_y_mode_prob
);
1317 #if CONFIG_MISC_FIXES
1318 for (i
= 0; i
< INTRA_MODES
; ++i
)
1319 for (j
= 0; j
< INTRA_MODES
; ++j
)
1320 prob_diff_update(vp10_intra_mode_tree
, cm
->kf_y_prob
[i
][j
],
1321 counts
->kf_y_mode
[i
][j
], INTRA_MODES
, &header_bc
);
1324 for (i
= 0; i
< INTER_MODE_CONTEXTS
; ++i
)
1325 prob_diff_update(vp10_inter_mode_tree
, cm
->fc
->inter_mode_probs
[i
],
1326 counts
->inter_mode
[i
], INTER_MODES
, &header_bc
);
1328 if (cm
->interp_filter
== SWITCHABLE
)
1329 update_switchable_interp_probs(cm
, &header_bc
, counts
);
1331 for (i
= 0; i
< INTRA_INTER_CONTEXTS
; i
++)
1332 vp10_cond_prob_diff_update(&header_bc
, &fc
->intra_inter_prob
[i
],
1333 counts
->intra_inter
[i
]);
1335 if (cpi
->allow_comp_inter_inter
) {
1336 const int use_hybrid_pred
= cm
->reference_mode
== REFERENCE_MODE_SELECT
;
1337 #if !CONFIG_MISC_FIXES
1338 const int use_compound_pred
= cm
->reference_mode
!= SINGLE_REFERENCE
;
1340 vpx_write_bit(&header_bc
, use_compound_pred
);
1341 if (use_compound_pred
) {
1342 vpx_write_bit(&header_bc
, use_hybrid_pred
);
1343 if (use_hybrid_pred
)
1344 for (i
= 0; i
< COMP_INTER_CONTEXTS
; i
++)
1345 vp10_cond_prob_diff_update(&header_bc
, &fc
->comp_inter_prob
[i
],
1346 counts
->comp_inter
[i
]);
1349 if (use_hybrid_pred
)
1350 for (i
= 0; i
< COMP_INTER_CONTEXTS
; i
++)
1351 vp10_cond_prob_diff_update(&header_bc
, &fc
->comp_inter_prob
[i
],
1352 counts
->comp_inter
[i
]);
1356 if (cm
->reference_mode
!= COMPOUND_REFERENCE
) {
1357 for (i
= 0; i
< REF_CONTEXTS
; i
++) {
1358 vp10_cond_prob_diff_update(&header_bc
, &fc
->single_ref_prob
[i
][0],
1359 counts
->single_ref
[i
][0]);
1360 vp10_cond_prob_diff_update(&header_bc
, &fc
->single_ref_prob
[i
][1],
1361 counts
->single_ref
[i
][1]);
1365 if (cm
->reference_mode
!= SINGLE_REFERENCE
)
1366 for (i
= 0; i
< REF_CONTEXTS
; i
++)
1367 vp10_cond_prob_diff_update(&header_bc
, &fc
->comp_ref_prob
[i
],
1368 counts
->comp_ref
[i
]);
1370 for (i
= 0; i
< BLOCK_SIZE_GROUPS
; ++i
)
1371 prob_diff_update(vp10_intra_mode_tree
, cm
->fc
->y_mode_prob
[i
],
1372 counts
->y_mode
[i
], INTRA_MODES
, &header_bc
);
1374 #if !CONFIG_MISC_FIXES
1375 for (i
= 0; i
< PARTITION_CONTEXTS
; ++i
)
1376 prob_diff_update(vp10_partition_tree
, fc
->partition_prob
[i
],
1377 counts
->partition
[i
], PARTITION_TYPES
, &header_bc
);
1380 vp10_write_nmv_probs(cm
, cm
->allow_high_precision_mv
, &header_bc
,
1384 vpx_stop_encode(&header_bc
);
1385 assert(header_bc
.pos
<= 0xffff);
1387 return header_bc
.pos
;
1390 #if CONFIG_MISC_FIXES
1391 static int remux_tiles(uint8_t *dest
, const int sz
,
1392 const int n_tiles
, const int mag
) {
1393 int rpos
= 0, wpos
= 0, n
;
1395 for (n
= 0; n
< n_tiles
; n
++) {
1398 if (n
== n_tiles
- 1) {
1399 tile_sz
= sz
- rpos
;
1401 tile_sz
= mem_get_le32(&dest
[rpos
]) + 1;
1405 dest
[wpos
] = tile_sz
- 1;
1408 mem_put_le16(&dest
[wpos
], tile_sz
- 1);
1411 mem_put_le24(&dest
[wpos
], tile_sz
- 1);
1413 case 3: // remuxing should only happen if mag < 3
1415 assert("Invalid value for tile size magnitude" && 0);
1420 memmove(&dest
[wpos
], &dest
[rpos
], tile_sz
);
1425 assert(rpos
> wpos
);
1432 void vp10_pack_bitstream(VP10_COMP
*const cpi
, uint8_t *dest
, size_t *size
) {
1433 uint8_t *data
= dest
;
1434 size_t first_part_size
, uncompressed_hdr_size
, data_sz
;
1435 struct vpx_write_bit_buffer wb
= {data
, 0};
1436 struct vpx_write_bit_buffer saved_wb
;
1437 unsigned int max_tile
;
1438 #if CONFIG_MISC_FIXES
1439 VP10_COMMON
*const cm
= &cpi
->common
;
1440 const int n_log2_tiles
= cm
->log2_tile_rows
+ cm
->log2_tile_cols
;
1441 const int have_tiles
= n_log2_tiles
> 0;
1443 const int have_tiles
= 0; // we have tiles, but we don't want to write a
1444 // tile size marker in the header
1447 write_uncompressed_header(cpi
, &wb
);
1449 // don't know in advance first part. size
1450 vpx_wb_write_literal(&wb
, 0, 16 + have_tiles
* 2);
1452 uncompressed_hdr_size
= vpx_wb_bytes_written(&wb
);
1453 data
+= uncompressed_hdr_size
;
1455 vpx_clear_system_state();
1457 first_part_size
= write_compressed_header(cpi
, data
);
1458 data
+= first_part_size
;
1460 data_sz
= encode_tiles(cpi
, data
, &max_tile
);
1461 #if CONFIG_MISC_FIXES
1466 // Choose the (tile size) magnitude
1467 for (mag
= 0, mask
= 0xff; mag
< 4; mag
++) {
1468 if (max_tile
<= mask
)
1473 assert(n_log2_tiles
> 0);
1474 vpx_wb_write_literal(&saved_wb
, mag
, 2);
1476 data_sz
= (int)remux_tiles(data
, data_sz
, 1 << n_log2_tiles
, mag
);
1478 assert(n_log2_tiles
== 0);
1483 // TODO(jbb): Figure out what to do if first_part_size > 16 bits.
1484 vpx_wb_write_literal(&saved_wb
, (int)first_part_size
, 16);
1486 *size
= data
- dest
;