2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
14 #include "av1/common/common.h"
15 #include "av1/common/entropy.h"
16 #include "av1/common/entropymode.h"
17 #include "av1/common/entropymv.h"
18 #include "av1/common/mvref_common.h"
19 #include "av1/common/pred_common.h"
20 #include "av1/common/reconinter.h"
22 #include "av1/common/reconintra.h"
23 #endif // CONFIG_EXT_INTRA
24 #include "av1/common/seg_common.h"
25 #include "av1/common/warped_motion.h"
27 #include "av1/decoder/decodeframe.h"
28 #include "av1/decoder/decodemv.h"
30 #include "aom_dsp/aom_dsp_common.h"
33 #include "av1/common/cfl.h"
36 #define ACCT_STR __func__
38 #define DEC_MISMATCH_DEBUG 0
40 static PREDICTION_MODE
read_intra_mode(aom_reader
*r
, aom_cdf_prob
*cdf
) {
41 return (PREDICTION_MODE
)aom_read_symbol(r
, cdf
, INTRA_MODES
, ACCT_STR
);
44 static void read_cdef(AV1_COMMON
*cm
, aom_reader
*r
, MB_MODE_INFO
*const mbmi
,
45 int mi_col
, int mi_row
) {
46 if (cm
->all_lossless
) return;
48 const int m
= ~((1 << (6 - MI_SIZE_LOG2
)) - 1);
49 if (!(mi_col
& (cm
->mib_size
- 1)) &&
50 !(mi_row
& (cm
->mib_size
- 1))) { // Top left?
51 #if CONFIG_EXT_PARTITION
52 cm
->cdef_preset
[0] = cm
->cdef_preset
[1] = cm
->cdef_preset
[2] =
53 cm
->cdef_preset
[3] = -1;
58 // Read CDEF param at first a non-skip coding block
59 #if CONFIG_EXT_PARTITION
60 const int mask
= 1 << (6 - MI_SIZE_LOG2
);
61 const int index
= cm
->sb_size
== BLOCK_128X128
62 ? !!(mi_col
& mask
) + 2 * !!(mi_row
& mask
)
64 cm
->mi_grid_visible
[(mi_row
& m
) * cm
->mi_stride
+ (mi_col
& m
)]
65 ->mbmi
.cdef_strength
= cm
->cdef_preset
[index
] =
66 cm
->cdef_preset
[index
] == -1 && !mbmi
->skip
67 ? aom_read_literal(r
, cm
->cdef_bits
, ACCT_STR
)
68 : cm
->cdef_preset
[index
];
70 cm
->mi_grid_visible
[(mi_row
& m
) * cm
->mi_stride
+ (mi_col
& m
)]
71 ->mbmi
.cdef_strength
= cm
->cdef_preset
=
72 cm
->cdef_preset
== -1 && !mbmi
->skip
73 ? aom_read_literal(r
, cm
->cdef_bits
, ACCT_STR
)
78 static int read_delta_qindex(AV1_COMMON
*cm
, MACROBLOCKD
*xd
, aom_reader
*r
,
79 MB_MODE_INFO
*const mbmi
, int mi_col
, int mi_row
) {
80 FRAME_COUNTS
*counts
= xd
->counts
;
81 int sign
, abs
, reduced_delta_qindex
= 0;
82 BLOCK_SIZE bsize
= mbmi
->sb_type
;
83 const int b_col
= mi_col
& (cm
->mib_size
- 1);
84 const int b_row
= mi_row
& (cm
->mib_size
- 1);
85 const int read_delta_q_flag
= (b_col
== 0 && b_row
== 0);
86 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
89 if ((bsize
!= cm
->sb_size
|| mbmi
->skip
== 0) && read_delta_q_flag
) {
90 abs
= aom_read_symbol(r
, ec_ctx
->delta_q_cdf
, DELTA_Q_PROBS
+ 1, ACCT_STR
);
91 const int smallval
= (abs
< DELTA_Q_SMALL
);
93 for (int i
= 0; i
< abs
; ++i
) counts
->delta_q
[i
][1]++;
94 if (smallval
) counts
->delta_q
[abs
][0]++;
98 const int rem_bits
= aom_read_literal(r
, 3, ACCT_STR
) + 1;
99 const int thr
= (1 << rem_bits
) + 1;
100 abs
= aom_read_literal(r
, rem_bits
, ACCT_STR
) + thr
;
104 sign
= aom_read_bit(r
, ACCT_STR
);
109 reduced_delta_qindex
= sign
? -abs
: abs
;
111 return reduced_delta_qindex
;
113 #if CONFIG_EXT_DELTA_Q
114 static int read_delta_lflevel(AV1_COMMON
*cm
, MACROBLOCKD
*xd
, aom_reader
*r
,
115 #if CONFIG_LOOPFILTER_LEVEL
118 MB_MODE_INFO
*const mbmi
, int mi_col
,
120 FRAME_COUNTS
*counts
= xd
->counts
;
121 int sign
, abs
, reduced_delta_lflevel
= 0;
122 BLOCK_SIZE bsize
= mbmi
->sb_type
;
123 const int b_col
= mi_col
& (cm
->mib_size
- 1);
124 const int b_row
= mi_row
& (cm
->mib_size
- 1);
125 const int read_delta_lf_flag
= (b_col
== 0 && b_row
== 0);
126 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
129 if ((bsize
!= cm
->sb_size
|| mbmi
->skip
== 0) && read_delta_lf_flag
) {
130 #if CONFIG_LOOPFILTER_LEVEL
131 if (cm
->delta_lf_multi
) {
132 assert(lf_id
>= 0 && lf_id
< FRAME_LF_COUNT
);
133 abs
= aom_read_symbol(r
, ec_ctx
->delta_lf_multi_cdf
[lf_id
],
134 DELTA_LF_PROBS
+ 1, ACCT_STR
);
136 abs
= aom_read_symbol(r
, ec_ctx
->delta_lf_cdf
, DELTA_LF_PROBS
+ 1,
141 aom_read_symbol(r
, ec_ctx
->delta_lf_cdf
, DELTA_LF_PROBS
+ 1, ACCT_STR
);
142 #endif // CONFIG_LOOPFILTER_LEVEL
143 const int smallval
= (abs
< DELTA_LF_SMALL
);
145 #if CONFIG_LOOPFILTER_LEVEL
146 if (cm
->delta_lf_multi
) {
147 for (int i
= 0; i
< abs
; ++i
) counts
->delta_lf_multi
[lf_id
][i
][1]++;
148 if (smallval
) counts
->delta_lf_multi
[lf_id
][abs
][0]++;
150 for (int i
= 0; i
< abs
; ++i
) counts
->delta_lf
[i
][1]++;
151 if (smallval
) counts
->delta_lf
[abs
][0]++;
154 for (int i
= 0; i
< abs
; ++i
) counts
->delta_lf
[i
][1]++;
155 if (smallval
) counts
->delta_lf
[abs
][0]++;
156 #endif // CONFIG_LOOPFILTER_LEVEL
159 const int rem_bits
= aom_read_literal(r
, 3, ACCT_STR
) + 1;
160 const int thr
= (1 << rem_bits
) + 1;
161 abs
= aom_read_literal(r
, rem_bits
, ACCT_STR
) + thr
;
165 sign
= aom_read_bit(r
, ACCT_STR
);
170 reduced_delta_lflevel
= sign
? -abs
: abs
;
172 return reduced_delta_lflevel
;
176 static UV_PREDICTION_MODE
read_intra_mode_uv(FRAME_CONTEXT
*ec_ctx
,
178 PREDICTION_MODE y_mode
) {
179 const UV_PREDICTION_MODE uv_mode
=
181 aom_read_symbol(r
, ec_ctx
->uv_mode_cdf
[y_mode
], UV_INTRA_MODES
, ACCT_STR
);
183 read_intra_mode(r
, ec_ctx
->uv_mode_cdf
[y_mode
]);
189 static int read_cfl_alphas(FRAME_CONTEXT
*const ec_ctx
, aom_reader
*r
,
191 const int joint_sign
=
192 aom_read_symbol(r
, ec_ctx
->cfl_sign_cdf
, CFL_JOINT_SIGNS
, "cfl:signs");
194 // Magnitudes are only coded for nonzero values
195 if (CFL_SIGN_U(joint_sign
) != CFL_SIGN_ZERO
) {
196 aom_cdf_prob
*cdf_u
= ec_ctx
->cfl_alpha_cdf
[CFL_CONTEXT_U(joint_sign
)];
197 idx
= aom_read_symbol(r
, cdf_u
, CFL_ALPHABET_SIZE
, "cfl:alpha_u")
198 << CFL_ALPHABET_SIZE_LOG2
;
200 if (CFL_SIGN_V(joint_sign
) != CFL_SIGN_ZERO
) {
201 aom_cdf_prob
*cdf_v
= ec_ctx
->cfl_alpha_cdf
[CFL_CONTEXT_V(joint_sign
)];
202 idx
+= aom_read_symbol(r
, cdf_v
, CFL_ALPHABET_SIZE
, "cfl:alpha_v");
204 *signs_out
= joint_sign
;
209 static INTERINTRA_MODE
read_interintra_mode(MACROBLOCKD
*xd
, aom_reader
*r
,
211 const INTERINTRA_MODE ii_mode
= (INTERINTRA_MODE
)aom_read_symbol(
212 r
, xd
->tile_ctx
->interintra_mode_cdf
[size_group
], INTERINTRA_MODES
,
214 FRAME_COUNTS
*counts
= xd
->counts
;
215 if (counts
) ++counts
->interintra_mode
[size_group
][ii_mode
];
219 static PREDICTION_MODE
read_inter_mode(FRAME_CONTEXT
*ec_ctx
, MACROBLOCKD
*xd
,
220 aom_reader
*r
, int16_t ctx
) {
221 FRAME_COUNTS
*counts
= xd
->counts
;
222 int16_t mode_ctx
= ctx
& NEWMV_CTX_MASK
;
223 int is_newmv
, is_zeromv
, is_refmv
;
224 is_newmv
= aom_read_symbol(r
, ec_ctx
->newmv_cdf
[mode_ctx
], 2, ACCT_STR
) == 0;
226 if (counts
) ++counts
->newmv_mode
[mode_ctx
][0];
229 if (counts
) ++counts
->newmv_mode
[mode_ctx
][1];
230 mode_ctx
= (ctx
>> GLOBALMV_OFFSET
) & GLOBALMV_CTX_MASK
;
232 aom_read_symbol(r
, ec_ctx
->zeromv_cdf
[mode_ctx
], 2, ACCT_STR
) == 0;
234 if (counts
) ++counts
->zeromv_mode
[mode_ctx
][0];
237 if (counts
) ++counts
->zeromv_mode
[mode_ctx
][1];
238 mode_ctx
= (ctx
>> REFMV_OFFSET
) & REFMV_CTX_MASK
;
239 if (ctx
& (1 << SKIP_NEARESTMV_OFFSET
)) mode_ctx
= 6;
240 if (ctx
& (1 << SKIP_NEARMV_OFFSET
)) mode_ctx
= 7;
241 if (ctx
& (1 << SKIP_NEARESTMV_SUB8X8_OFFSET
)) mode_ctx
= 8;
242 is_refmv
= aom_read_symbol(r
, ec_ctx
->refmv_cdf
[mode_ctx
], 2, ACCT_STR
) == 0;
244 if (counts
) ++counts
->refmv_mode
[mode_ctx
][0];
247 if (counts
) ++counts
->refmv_mode
[mode_ctx
][1];
252 static void read_drl_idx(FRAME_CONTEXT
*ec_ctx
, MACROBLOCKD
*xd
,
253 MB_MODE_INFO
*mbmi
, aom_reader
*r
) {
254 uint8_t ref_frame_type
= av1_ref_frame_type(mbmi
->ref_frame
);
255 mbmi
->ref_mv_idx
= 0;
256 if (mbmi
->mode
== NEWMV
|| mbmi
->mode
== NEW_NEWMV
) {
257 for (int idx
= 0; idx
< 2; ++idx
) {
258 if (xd
->ref_mv_count
[ref_frame_type
] > idx
+ 1) {
259 uint8_t drl_ctx
= av1_drl_ctx(xd
->ref_mv_stack
[ref_frame_type
], idx
);
260 int drl_idx
= aom_read_symbol(r
, ec_ctx
->drl_cdf
[drl_ctx
], 2, ACCT_STR
);
261 mbmi
->ref_mv_idx
= idx
+ drl_idx
;
262 if (xd
->counts
) ++xd
->counts
->drl_mode
[drl_ctx
][drl_idx
];
263 if (!drl_idx
) return;
267 if (have_nearmv_in_inter_mode(mbmi
->mode
)) {
268 // Offset the NEARESTMV mode.
269 // TODO(jingning): Unify the two syntax decoding loops after the NEARESTMV
270 // mode is factored in.
271 for (int idx
= 1; idx
< 3; ++idx
) {
272 if (xd
->ref_mv_count
[ref_frame_type
] > idx
+ 1) {
273 uint8_t drl_ctx
= av1_drl_ctx(xd
->ref_mv_stack
[ref_frame_type
], idx
);
274 int drl_idx
= aom_read_symbol(r
, ec_ctx
->drl_cdf
[drl_ctx
], 2, ACCT_STR
);
275 mbmi
->ref_mv_idx
= idx
+ drl_idx
- 1;
276 if (xd
->counts
) ++xd
->counts
->drl_mode
[drl_ctx
][drl_idx
];
277 if (!drl_idx
) return;
283 static MOTION_MODE
read_motion_mode(MACROBLOCKD
*xd
, MODE_INFO
*mi
,
285 MB_MODE_INFO
*mbmi
= &mi
->mbmi
;
288 if (mbmi
->skip_mode
) return SIMPLE_TRANSLATION
;
289 #endif // CONFIG_EXT_SKIP
291 const MOTION_MODE last_motion_mode_allowed
=
292 motion_mode_allowed(xd
->global_motion
, xd
, mi
);
294 FRAME_COUNTS
*counts
= xd
->counts
;
296 if (last_motion_mode_allowed
== SIMPLE_TRANSLATION
) return SIMPLE_TRANSLATION
;
298 if (last_motion_mode_allowed
== OBMC_CAUSAL
) {
300 aom_read_symbol(r
, xd
->tile_ctx
->obmc_cdf
[mbmi
->sb_type
], 2, ACCT_STR
);
301 if (counts
) ++counts
->obmc
[mbmi
->sb_type
][motion_mode
];
302 return (MOTION_MODE
)(SIMPLE_TRANSLATION
+ motion_mode
);
305 aom_read_symbol(r
, xd
->tile_ctx
->motion_mode_cdf
[mbmi
->sb_type
],
306 MOTION_MODES
, ACCT_STR
);
307 if (counts
) ++counts
->motion_mode
[mbmi
->sb_type
][motion_mode
];
308 return (MOTION_MODE
)(SIMPLE_TRANSLATION
+ motion_mode
);
312 static PREDICTION_MODE
read_inter_compound_mode(AV1_COMMON
*cm
, MACROBLOCKD
*xd
,
313 aom_reader
*r
, int16_t ctx
) {
316 aom_read_symbol(r
, xd
->tile_ctx
->inter_compound_mode_cdf
[ctx
],
317 INTER_COMPOUND_MODES
, ACCT_STR
);
318 FRAME_COUNTS
*counts
= xd
->counts
;
319 if (counts
) ++counts
->inter_compound_mode
[ctx
][mode
];
320 assert(is_inter_compound_mode(NEAREST_NEARESTMV
+ mode
));
321 return NEAREST_NEARESTMV
+ mode
;
324 #if CONFIG_SPATIAL_SEGMENTATION
325 static int neg_deinterleave(int diff
, int ref
, int max
) {
326 if (!ref
) return diff
;
327 if (ref
>= (max
- 1)) return max
- diff
- 1;
329 if (diff
<= 2 * ref
) {
331 return ref
+ ((diff
+ 1) >> 1);
333 return ref
- (diff
>> 1);
337 if (diff
<= 2 * (max
- ref
- 1)) {
339 return ref
+ ((diff
+ 1) >> 1);
341 return ref
- (diff
>> 1);
343 return max
- (diff
+ 1);
347 static int read_segment_id(AV1_COMMON
*const cm
, MACROBLOCKD
*const xd
,
348 int mi_row
, int mi_col
, aom_reader
*r
, int skip
) {
349 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
350 struct segmentation_probs
*const segp
= &ec_ctx
->seg
;
351 int prev_ul
= 0; /* Top left segment_id */
352 int prev_l
= 0; /* Current left segment_id */
353 int prev_u
= 0; /* Current top segment_id */
355 MODE_INFO
*const mi
= cm
->mi
+ mi_row
* cm
->mi_stride
+ mi_col
;
356 int tinfo
= mi
->mbmi
.boundary_info
;
357 int above
= (!(tinfo
& TILE_ABOVE_BOUNDARY
)) && ((mi_row
- 1) >= 0);
358 int left
= (!(tinfo
& TILE_LEFT_BOUNDARY
)) && ((mi_col
- 1) >= 0);
361 prev_ul
= get_segment_id(cm
, cm
->current_frame_seg_map
, BLOCK_4X4
,
362 mi_row
- 1, mi_col
- 1);
365 prev_u
= get_segment_id(cm
, cm
->current_frame_seg_map
, BLOCK_4X4
,
366 mi_row
- 1, mi_col
- 0);
369 prev_l
= get_segment_id(cm
, cm
->current_frame_seg_map
, BLOCK_4X4
,
370 mi_row
- 0, mi_col
- 1);
372 int cdf_num
= pick_spatial_seg_cdf(prev_ul
, prev_u
, prev_l
);
373 int pred
= pick_spatial_seg_pred(prev_ul
, prev_u
, prev_l
);
375 if (skip
) return pred
;
377 aom_cdf_prob
*pred_cdf
= segp
->spatial_pred_seg_cdf
[cdf_num
];
378 int coded_id
= aom_read_symbol(r
, pred_cdf
, 8, ACCT_STR
);
380 int segment_id
= neg_deinterleave(coded_id
, pred
, cm
->last_active_segid
+ 1);
382 assert(segment_id
>= 0 && segment_id
<= cm
->last_active_segid
);
387 static int read_segment_id(aom_reader
*r
, struct segmentation_probs
*segp
) {
388 return aom_read_symbol(r
, segp
->tree_cdf
, MAX_SEGMENTS
, ACCT_STR
);
392 static void read_tx_size_vartx(AV1_COMMON
*cm
, MACROBLOCKD
*xd
,
393 MB_MODE_INFO
*mbmi
, FRAME_COUNTS
*counts
,
394 TX_SIZE tx_size
, int depth
, int blk_row
,
395 int blk_col
, aom_reader
*r
) {
396 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
399 const int tx_row
= blk_row
>> 1;
400 const int tx_col
= blk_col
>> 1;
401 const int max_blocks_high
= max_block_high(xd
, mbmi
->sb_type
, 0);
402 const int max_blocks_wide
= max_block_wide(xd
, mbmi
->sb_type
, 0);
403 int ctx
= txfm_partition_context(xd
->above_txfm_context
+ blk_col
,
404 xd
->left_txfm_context
+ blk_row
,
405 mbmi
->sb_type
, tx_size
);
406 TX_SIZE(*const inter_tx_size
)
408 (TX_SIZE(*)[MAX_MIB_SIZE
]) & mbmi
->inter_tx_size
[tx_row
][tx_col
];
409 if (blk_row
>= max_blocks_high
|| blk_col
>= max_blocks_wide
) return;
410 assert(tx_size
> TX_4X4
);
412 if (depth
== MAX_VARTX_DEPTH
) {
414 inter_tx_size
[0][0] = tx_size
;
415 for (idy
= 0; idy
< AOMMAX(1, tx_size_high_unit
[tx_size
] / 2); ++idy
)
416 for (idx
= 0; idx
< AOMMAX(1, tx_size_wide_unit
[tx_size
] / 2); ++idx
)
417 inter_tx_size
[idy
][idx
] = tx_size
;
418 mbmi
->tx_size
= tx_size
;
419 mbmi
->min_tx_size
= TXSIZEMIN(mbmi
->min_tx_size
, tx_size
);
420 txfm_partition_update(xd
->above_txfm_context
+ blk_col
,
421 xd
->left_txfm_context
+ blk_row
, tx_size
, tx_size
);
425 is_split
= aom_read_symbol(r
, ec_ctx
->txfm_partition_cdf
[ctx
], 2, ACCT_STR
);
428 const TX_SIZE sub_txs
= sub_tx_size_map
[1][tx_size
];
429 const int bsw
= tx_size_wide_unit
[sub_txs
];
430 const int bsh
= tx_size_high_unit
[sub_txs
];
432 if (counts
) ++counts
->txfm_partition
[ctx
][1];
434 if (sub_txs
== TX_4X4
) {
436 inter_tx_size
[0][0] = sub_txs
;
437 for (idy
= 0; idy
< AOMMAX(1, tx_size_high_unit
[tx_size
] / 2); ++idy
)
438 for (idx
= 0; idx
< AOMMAX(1, tx_size_wide_unit
[tx_size
] / 2); ++idx
)
439 inter_tx_size
[idy
][idx
] = inter_tx_size
[0][0];
440 mbmi
->tx_size
= sub_txs
;
441 mbmi
->min_tx_size
= mbmi
->tx_size
;
442 txfm_partition_update(xd
->above_txfm_context
+ blk_col
,
443 xd
->left_txfm_context
+ blk_row
, sub_txs
, tx_size
);
447 assert(bsw
> 0 && bsh
> 0);
448 for (int row
= 0; row
< tx_size_high_unit
[tx_size
]; row
+= bsh
) {
449 for (int col
= 0; col
< tx_size_wide_unit
[tx_size
]; col
+= bsw
) {
450 int offsetr
= blk_row
+ row
;
451 int offsetc
= blk_col
+ col
;
452 read_tx_size_vartx(cm
, xd
, mbmi
, counts
, sub_txs
, depth
+ 1, offsetr
,
458 inter_tx_size
[0][0] = tx_size
;
459 for (idy
= 0; idy
< AOMMAX(1, tx_size_high_unit
[tx_size
] / 2); ++idy
)
460 for (idx
= 0; idx
< AOMMAX(1, tx_size_wide_unit
[tx_size
] / 2); ++idx
)
461 inter_tx_size
[idy
][idx
] = tx_size
;
462 mbmi
->tx_size
= tx_size
;
463 mbmi
->min_tx_size
= TXSIZEMIN(mbmi
->min_tx_size
, tx_size
);
464 if (counts
) ++counts
->txfm_partition
[ctx
][0];
465 txfm_partition_update(xd
->above_txfm_context
+ blk_col
,
466 xd
->left_txfm_context
+ blk_row
, tx_size
, tx_size
);
470 static TX_SIZE
read_selected_tx_size(AV1_COMMON
*cm
, MACROBLOCKD
*xd
,
471 int is_inter
, aom_reader
*r
) {
472 // TODO(debargha): Clean up the logic here. This function should only
473 // be called for intra.
474 const BLOCK_SIZE bsize
= xd
->mi
[0]->mbmi
.sb_type
;
475 const int32_t tx_size_cat
= bsize_to_tx_size_cat(bsize
, is_inter
);
476 const int max_depths
= bsize_to_max_depth(bsize
, 0);
477 const int ctx
= get_tx_size_context(xd
);
478 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
481 const int depth
= aom_read_symbol(r
, ec_ctx
->tx_size_cdf
[tx_size_cat
][ctx
],
482 max_depths
+ 1, ACCT_STR
);
483 assert(depth
>= 0 && depth
<= max_depths
);
484 const TX_SIZE tx_size
= depth_to_tx_size(depth
, bsize
, 0);
488 static TX_SIZE
read_tx_size(AV1_COMMON
*cm
, MACROBLOCKD
*xd
, int is_inter
,
489 int allow_select_inter
, aom_reader
*r
) {
490 const TX_MODE tx_mode
= cm
->tx_mode
;
491 const BLOCK_SIZE bsize
= xd
->mi
[0]->mbmi
.sb_type
;
492 if (xd
->lossless
[xd
->mi
[0]->mbmi
.segment_id
]) return TX_4X4
;
494 if (block_signals_txsize(bsize
)) {
495 if ((!is_inter
|| allow_select_inter
) && tx_mode
== TX_MODE_SELECT
) {
496 const TX_SIZE coded_tx_size
= read_selected_tx_size(cm
, xd
, is_inter
, r
);
497 return coded_tx_size
;
499 return tx_size_from_tx_mode(bsize
, tx_mode
, is_inter
);
502 assert(IMPLIES(tx_mode
== ONLY_4X4
, bsize
== BLOCK_4X4
));
503 return get_max_rect_tx_size(bsize
, is_inter
);
507 static int dec_get_segment_id(const AV1_COMMON
*cm
, const uint8_t *segment_ids
,
508 int mi_offset
, int x_mis
, int y_mis
) {
509 int segment_id
= INT_MAX
;
511 for (int y
= 0; y
< y_mis
; y
++)
512 for (int x
= 0; x
< x_mis
; x
++)
514 AOMMIN(segment_id
, segment_ids
[mi_offset
+ y
* cm
->mi_cols
+ x
]);
516 assert(segment_id
>= 0 && segment_id
< MAX_SEGMENTS
);
520 static void set_segment_id(AV1_COMMON
*cm
, int mi_offset
, int x_mis
, int y_mis
,
522 assert(segment_id
>= 0 && segment_id
< MAX_SEGMENTS
);
524 for (int y
= 0; y
< y_mis
; y
++)
525 for (int x
= 0; x
< x_mis
; x
++)
526 cm
->current_frame_seg_map
[mi_offset
+ y
* cm
->mi_cols
+ x
] = segment_id
;
529 static int read_intra_segment_id(AV1_COMMON
*const cm
, MACROBLOCKD
*const xd
,
530 MB_MODE_INFO
*const mbmi
, int mi_row
,
531 int mi_col
, int bsize
, int preskip
,
533 struct segmentation
*const seg
= &cm
->seg
;
534 const int mi_offset
= mi_row
* cm
->mi_cols
+ mi_col
;
535 const int bw
= mi_size_wide
[bsize
];
536 const int bh
= mi_size_high
[bsize
];
537 const int x_mis
= AOMMIN(cm
->mi_cols
- mi_col
, bw
);
538 const int y_mis
= AOMMIN(cm
->mi_rows
- mi_row
, bh
);
540 if (!seg
->enabled
) return 0; // Default for disabled segmentation
542 assert(seg
->update_map
&& !seg
->temporal_update
);
544 #if CONFIG_SPATIAL_SEGMENTATION
546 if (!cm
->preskip_segid
) return 0;
548 if (cm
->preskip_segid
) return mbmi
->segment_id
;
550 const int segment_id
=
551 read_segment_id(cm
, xd
, mi_row
, mi_col
, r
, preskip
? 0 : mbmi
->skip
);
553 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
556 const int segment_id
= read_segment_id(r
, &ec_ctx
->seg
);
558 FRAME_COUNTS
*counts
= xd
->counts
;
559 if (counts
) ++counts
->seg
.tree_total
[segment_id
];
560 set_segment_id(cm
, mi_offset
, x_mis
, y_mis
, segment_id
);
564 static void copy_segment_id(const AV1_COMMON
*cm
,
565 const uint8_t *last_segment_ids
,
566 uint8_t *current_segment_ids
, int mi_offset
,
567 int x_mis
, int y_mis
) {
568 for (int y
= 0; y
< y_mis
; y
++)
569 for (int x
= 0; x
< x_mis
; x
++)
570 current_segment_ids
[mi_offset
+ y
* cm
->mi_cols
+ x
] =
571 last_segment_ids
? last_segment_ids
[mi_offset
+ y
* cm
->mi_cols
+ x
]
575 static int read_inter_segment_id(AV1_COMMON
*const cm
, MACROBLOCKD
*const xd
,
576 int mi_row
, int mi_col
, int preskip
,
578 struct segmentation
*const seg
= &cm
->seg
;
579 FRAME_COUNTS
*counts
= xd
->counts
;
580 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
581 struct segmentation_probs
*const segp
= &ec_ctx
->seg
;
583 MB_MODE_INFO
*const mbmi
= &xd
->mi
[0]->mbmi
;
584 int predicted_segment_id
, segment_id
;
585 const int mi_offset
= mi_row
* cm
->mi_cols
+ mi_col
;
586 const int bw
= mi_size_wide
[mbmi
->sb_type
];
587 const int bh
= mi_size_high
[mbmi
->sb_type
];
589 // TODO(slavarnway): move x_mis, y_mis into xd ?????
590 const int x_mis
= AOMMIN(cm
->mi_cols
- mi_col
, bw
);
591 const int y_mis
= AOMMIN(cm
->mi_rows
- mi_row
, bh
);
593 if (!seg
->enabled
) return 0; // Default for disabled segmentation
595 predicted_segment_id
= cm
->last_frame_seg_map
596 ? dec_get_segment_id(cm
, cm
->last_frame_seg_map
,
597 mi_offset
, x_mis
, y_mis
)
600 if (!seg
->update_map
) {
601 copy_segment_id(cm
, cm
->last_frame_seg_map
, cm
->current_frame_seg_map
,
602 mi_offset
, x_mis
, y_mis
);
603 return predicted_segment_id
;
606 #if CONFIG_SPATIAL_SEGMENTATION
608 if (!cm
->preskip_segid
) return 0;
610 if (cm
->preskip_segid
) return mbmi
->segment_id
;
612 if (seg
->temporal_update
) {
613 const int ctx
= av1_get_pred_context_seg_id(xd
);
614 mbmi
->seg_id_predicted
= 0;
615 if (counts
) ++counts
->seg
.pred
[ctx
][mbmi
->seg_id_predicted
];
617 segment_id
= read_segment_id(cm
, xd
, mi_row
, mi_col
, r
, 0);
618 if (counts
) ++counts
->seg
.tree_total
[segment_id
];
619 set_segment_id(cm
, mi_offset
, x_mis
, y_mis
, segment_id
);
625 if (seg
->temporal_update
) {
626 const int ctx
= av1_get_pred_context_seg_id(xd
);
627 aom_cdf_prob
*pred_cdf
= segp
->pred_cdf
[ctx
];
628 mbmi
->seg_id_predicted
= aom_read_symbol(r
, pred_cdf
, 2, ACCT_STR
);
629 if (counts
) ++counts
->seg
.pred
[ctx
][mbmi
->seg_id_predicted
];
630 if (mbmi
->seg_id_predicted
) {
631 segment_id
= predicted_segment_id
;
633 #if CONFIG_SPATIAL_SEGMENTATION
634 segment_id
= read_segment_id(cm
, xd
, mi_row
, mi_col
, r
, 0);
636 segment_id
= read_segment_id(r
, segp
);
638 if (counts
) ++counts
->seg
.tree_mispred
[segment_id
];
641 #if CONFIG_SPATIAL_SEGMENTATION
642 segment_id
= read_segment_id(cm
, xd
, mi_row
, mi_col
, r
, 0);
644 segment_id
= read_segment_id(r
, segp
);
646 if (counts
) ++counts
->seg
.tree_total
[segment_id
];
648 set_segment_id(cm
, mi_offset
, x_mis
, y_mis
, segment_id
);
653 static int read_skip_mode(AV1_COMMON
*cm
, const MACROBLOCKD
*xd
, int segment_id
,
655 if (!cm
->skip_mode_flag
) return 0;
657 if (segfeature_active(&cm
->seg
, segment_id
, SEG_LVL_SKIP
)) {
658 // TODO(zoeliu): To revisit the handling of this scenario.
662 if (!is_comp_ref_allowed(xd
->mi
[0]->mbmi
.sb_type
)) return 0;
664 const int ctx
= av1_get_skip_mode_context(xd
);
665 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
666 const int skip_mode
=
667 aom_read_symbol(r
, ec_ctx
->skip_mode_cdfs
[ctx
], 2, ACCT_STR
);
668 FRAME_COUNTS
*counts
= xd
->counts
;
669 if (counts
) ++counts
->skip_mode
[ctx
][skip_mode
];
673 #endif // CONFIG_EXT_SKIP
675 static int read_skip(AV1_COMMON
*cm
, const MACROBLOCKD
*xd
, int segment_id
,
677 if (segfeature_active(&cm
->seg
, segment_id
, SEG_LVL_SKIP
)) {
680 const int ctx
= av1_get_skip_context(xd
);
681 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
682 const int skip
= aom_read_symbol(r
, ec_ctx
->skip_cdfs
[ctx
], 2, ACCT_STR
);
683 FRAME_COUNTS
*counts
= xd
->counts
;
684 if (counts
) ++counts
->skip
[ctx
][skip
];
689 // Merge the sorted list of cached colors(cached_colors[0...n_cached_colors-1])
690 // and the sorted list of transmitted colors(colors[n_cached_colors...n-1]) into
691 // one single sorted list(colors[...]).
692 static void merge_colors(uint16_t *colors
, uint16_t *cached_colors
,
693 int n_colors
, int n_cached_colors
) {
694 if (n_cached_colors
== 0) return;
695 int cache_idx
= 0, trans_idx
= n_cached_colors
;
696 for (int i
= 0; i
< n_colors
; ++i
) {
697 if (cache_idx
< n_cached_colors
&&
698 (trans_idx
>= n_colors
||
699 cached_colors
[cache_idx
] <= colors
[trans_idx
])) {
700 colors
[i
] = cached_colors
[cache_idx
++];
702 assert(trans_idx
< n_colors
);
703 colors
[i
] = colors
[trans_idx
++];
708 static void read_palette_colors_y(MACROBLOCKD
*const xd
, int bit_depth
,
709 PALETTE_MODE_INFO
*const pmi
, aom_reader
*r
) {
710 uint16_t color_cache
[2 * PALETTE_MAX_SIZE
];
711 uint16_t cached_colors
[PALETTE_MAX_SIZE
];
712 const int n_cache
= av1_get_palette_cache(xd
, 0, color_cache
);
713 const int n
= pmi
->palette_size
[0];
715 for (int i
= 0; i
< n_cache
&& idx
< n
; ++i
)
716 if (aom_read_bit(r
, ACCT_STR
)) cached_colors
[idx
++] = color_cache
[i
];
718 const int n_cached_colors
= idx
;
719 pmi
->palette_colors
[idx
++] = aom_read_literal(r
, bit_depth
, ACCT_STR
);
721 const int min_bits
= bit_depth
- 3;
722 int bits
= min_bits
+ aom_read_literal(r
, 2, ACCT_STR
);
723 int range
= (1 << bit_depth
) - pmi
->palette_colors
[idx
- 1] - 1;
724 for (; idx
< n
; ++idx
) {
726 const int delta
= aom_read_literal(r
, bits
, ACCT_STR
) + 1;
727 pmi
->palette_colors
[idx
] = clamp(pmi
->palette_colors
[idx
- 1] + delta
,
728 0, (1 << bit_depth
) - 1);
729 range
-= (pmi
->palette_colors
[idx
] - pmi
->palette_colors
[idx
- 1]);
730 bits
= AOMMIN(bits
, av1_ceil_log2(range
));
733 merge_colors(pmi
->palette_colors
, cached_colors
, n
, n_cached_colors
);
735 memcpy(pmi
->palette_colors
, cached_colors
, n
* sizeof(cached_colors
[0]));
739 static void read_palette_colors_uv(MACROBLOCKD
*const xd
, int bit_depth
,
740 PALETTE_MODE_INFO
*const pmi
,
742 const int n
= pmi
->palette_size
[1];
744 uint16_t color_cache
[2 * PALETTE_MAX_SIZE
];
745 uint16_t cached_colors
[PALETTE_MAX_SIZE
];
746 const int n_cache
= av1_get_palette_cache(xd
, 1, color_cache
);
748 for (int i
= 0; i
< n_cache
&& idx
< n
; ++i
)
749 if (aom_read_bit(r
, ACCT_STR
)) cached_colors
[idx
++] = color_cache
[i
];
751 const int n_cached_colors
= idx
;
752 idx
+= PALETTE_MAX_SIZE
;
753 pmi
->palette_colors
[idx
++] = aom_read_literal(r
, bit_depth
, ACCT_STR
);
754 if (idx
< PALETTE_MAX_SIZE
+ n
) {
755 const int min_bits
= bit_depth
- 3;
756 int bits
= min_bits
+ aom_read_literal(r
, 2, ACCT_STR
);
757 int range
= (1 << bit_depth
) - pmi
->palette_colors
[idx
- 1];
758 for (; idx
< PALETTE_MAX_SIZE
+ n
; ++idx
) {
760 const int delta
= aom_read_literal(r
, bits
, ACCT_STR
);
761 pmi
->palette_colors
[idx
] = clamp(pmi
->palette_colors
[idx
- 1] + delta
,
762 0, (1 << bit_depth
) - 1);
763 range
-= (pmi
->palette_colors
[idx
] - pmi
->palette_colors
[idx
- 1]);
764 bits
= AOMMIN(bits
, av1_ceil_log2(range
));
767 merge_colors(pmi
->palette_colors
+ PALETTE_MAX_SIZE
, cached_colors
, n
,
770 memcpy(pmi
->palette_colors
+ PALETTE_MAX_SIZE
, cached_colors
,
771 n
* sizeof(cached_colors
[0]));
775 if (aom_read_bit(r
, ACCT_STR
)) { // Delta encoding.
776 const int min_bits_v
= bit_depth
- 4;
777 const int max_val
= 1 << bit_depth
;
778 int bits
= min_bits_v
+ aom_read_literal(r
, 2, ACCT_STR
);
779 pmi
->palette_colors
[2 * PALETTE_MAX_SIZE
] =
780 aom_read_literal(r
, bit_depth
, ACCT_STR
);
781 for (int i
= 1; i
< n
; ++i
) {
782 int delta
= aom_read_literal(r
, bits
, ACCT_STR
);
783 if (delta
&& aom_read_bit(r
, ACCT_STR
)) delta
= -delta
;
784 int val
= (int)pmi
->palette_colors
[2 * PALETTE_MAX_SIZE
+ i
- 1] + delta
;
785 if (val
< 0) val
+= max_val
;
786 if (val
>= max_val
) val
-= max_val
;
787 pmi
->palette_colors
[2 * PALETTE_MAX_SIZE
+ i
] = val
;
790 for (int i
= 0; i
< n
; ++i
) {
791 pmi
->palette_colors
[2 * PALETTE_MAX_SIZE
+ i
] =
792 aom_read_literal(r
, bit_depth
, ACCT_STR
);
797 static void read_palette_mode_info(AV1_COMMON
*const cm
, MACROBLOCKD
*const xd
,
798 int mi_row
, int mi_col
, aom_reader
*r
) {
799 MODE_INFO
*const mi
= xd
->mi
[0];
800 MB_MODE_INFO
*const mbmi
= &mi
->mbmi
;
801 const MODE_INFO
*const above_mi
= xd
->above_mi
;
802 const MODE_INFO
*const left_mi
= xd
->left_mi
;
803 const BLOCK_SIZE bsize
= mbmi
->sb_type
;
804 assert(av1_allow_palette(cm
->allow_screen_content_tools
, bsize
));
805 PALETTE_MODE_INFO
*const pmi
= &mbmi
->palette_mode_info
;
806 const int bsize_ctx
= av1_get_palette_bsize_ctx(bsize
);
808 if (mbmi
->mode
== DC_PRED
) {
809 int palette_y_mode_ctx
= 0;
811 palette_y_mode_ctx
+=
812 (above_mi
->mbmi
.palette_mode_info
.palette_size
[0] > 0);
815 palette_y_mode_ctx
+=
816 (left_mi
->mbmi
.palette_mode_info
.palette_size
[0] > 0);
818 const int modev
= aom_read_symbol(
819 r
, xd
->tile_ctx
->palette_y_mode_cdf
[bsize_ctx
][palette_y_mode_ctx
], 2,
822 pmi
->palette_size
[0] =
823 aom_read_symbol(r
, xd
->tile_ctx
->palette_y_size_cdf
[bsize_ctx
],
824 PALETTE_SIZES
, ACCT_STR
) +
826 read_palette_colors_y(xd
, cm
->bit_depth
, pmi
, r
);
829 if (mbmi
->uv_mode
== UV_DC_PRED
&&
830 is_chroma_reference(mi_row
, mi_col
, bsize
, xd
->plane
[1].subsampling_x
,
831 xd
->plane
[1].subsampling_y
)) {
832 const int palette_uv_mode_ctx
= (pmi
->palette_size
[0] > 0);
833 const int modev
= aom_read_symbol(
834 r
, xd
->tile_ctx
->palette_uv_mode_cdf
[palette_uv_mode_ctx
], 2, ACCT_STR
);
836 pmi
->palette_size
[1] =
837 aom_read_symbol(r
, xd
->tile_ctx
->palette_uv_size_cdf
[bsize_ctx
],
838 PALETTE_SIZES
, ACCT_STR
) +
840 read_palette_colors_uv(xd
, cm
->bit_depth
, pmi
, r
);
845 #if CONFIG_FILTER_INTRA
846 static void read_filter_intra_mode_info(MACROBLOCKD
*const xd
, aom_reader
*r
) {
847 MODE_INFO
*const mi
= xd
->mi
[0];
848 MB_MODE_INFO
*const mbmi
= &mi
->mbmi
;
849 FILTER_INTRA_MODE_INFO
*filter_intra_mode_info
=
850 &mbmi
->filter_intra_mode_info
;
852 if (mbmi
->mode
== DC_PRED
&& mbmi
->palette_mode_info
.palette_size
[0] == 0 &&
853 av1_filter_intra_allowed_txsize(mbmi
->tx_size
)) {
854 filter_intra_mode_info
->use_filter_intra
= aom_read_symbol(
855 r
, xd
->tile_ctx
->filter_intra_cdfs
[mbmi
->tx_size
], 2, ACCT_STR
);
856 if (filter_intra_mode_info
->use_filter_intra
) {
857 filter_intra_mode_info
->filter_intra_mode
= aom_read_symbol(
858 r
, xd
->tile_ctx
->filter_intra_mode_cdf
, FILTER_INTRA_MODES
, ACCT_STR
);
862 #endif // CONFIG_FILTER_INTRA
865 #if CONFIG_EXT_INTRA_MOD
866 static int read_angle_delta(aom_reader
*r
, aom_cdf_prob
*cdf
) {
867 const int sym
= aom_read_symbol(r
, cdf
, 2 * MAX_ANGLE_DELTA
+ 1, ACCT_STR
);
868 return sym
- MAX_ANGLE_DELTA
;
870 #endif // CONFIG_EXT_INTRA_MOD
872 static void read_intra_angle_info(MACROBLOCKD
*const xd
, aom_reader
*r
) {
873 MB_MODE_INFO
*const mbmi
= &xd
->mi
[0]->mbmi
;
874 const BLOCK_SIZE bsize
= mbmi
->sb_type
;
875 #if CONFIG_EXT_INTRA_MOD
876 FRAME_CONTEXT
*const ec_ctx
= xd
->tile_ctx
;
877 #endif // CONFIG_EXT_INTRA_MOD
879 mbmi
->angle_delta
[0] = 0;
880 mbmi
->angle_delta
[1] = 0;
881 if (!av1_use_angle_delta(bsize
)) return;
883 if (av1_is_directional_mode(mbmi
->mode
, bsize
)) {
884 #if CONFIG_EXT_INTRA_MOD
885 mbmi
->angle_delta
[0] =
886 read_angle_delta(r
, ec_ctx
->angle_delta_cdf
[mbmi
->mode
- V_PRED
]);
888 mbmi
->angle_delta
[0] =
889 av1_read_uniform(r
, 2 * MAX_ANGLE_DELTA
+ 1) - MAX_ANGLE_DELTA
;
890 #endif // CONFIG_EXT_INTRA_MOD
893 if (av1_is_directional_mode(get_uv_mode(mbmi
->uv_mode
), bsize
)) {
894 #if CONFIG_EXT_INTRA_MOD
895 mbmi
->angle_delta
[1] =
896 read_angle_delta(r
, ec_ctx
->angle_delta_cdf
[mbmi
->uv_mode
- V_PRED
]);
898 mbmi
->angle_delta
[1] =
899 av1_read_uniform(r
, 2 * MAX_ANGLE_DELTA
+ 1) - MAX_ANGLE_DELTA
;
900 #endif // CONFIG_EXT_INTRA_MOD
903 #endif // CONFIG_EXT_INTRA
905 void av1_read_tx_type(const AV1_COMMON
*const cm
, MACROBLOCKD
*xd
,
907 int blk_row
, int blk_col
, int plane
, TX_SIZE tx_size
,
910 MB_MODE_INFO
*mbmi
= &xd
->mi
[0]->mbmi
;
911 const int inter_block
= is_inter_block(mbmi
);
913 const TX_SIZE mtx_size
=
914 get_max_rect_tx_size(xd
->mi
[0]->mbmi
.sb_type
, inter_block
);
915 const TX_SIZE tx_size
=
916 inter_block
? AOMMAX(sub_tx_size_map
[1][mtx_size
], mbmi
->min_tx_size
)
918 #endif // !CONFIG_TXK_SEL
919 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
922 TX_TYPE
*tx_type
= &mbmi
->tx_type
;
924 // only y plane's tx_type is transmitted
925 if (plane
> 0) return;
926 TX_TYPE
*tx_type
= &mbmi
->txk_type
[(blk_row
<< MAX_MIB_SIZE_LOG2
) + blk_col
];
929 if (!FIXED_TX_TYPE
) {
930 const TX_SIZE square_tx_size
= txsize_sqr_map
[tx_size
];
931 if (get_ext_tx_types(tx_size
, mbmi
->sb_type
, inter_block
,
932 cm
->reduced_tx_set_used
) > 1 &&
933 ((!cm
->seg
.enabled
&& cm
->base_qindex
> 0) ||
934 (cm
->seg
.enabled
&& xd
->qindex
[mbmi
->segment_id
] > 0)) &&
936 !segfeature_active(&cm
->seg
, mbmi
->segment_id
, SEG_LVL_SKIP
)) {
937 const TxSetType tx_set_type
= get_ext_tx_set_type(
938 tx_size
, mbmi
->sb_type
, inter_block
, cm
->reduced_tx_set_used
);
939 const int eset
= get_ext_tx_set(tx_size
, mbmi
->sb_type
, inter_block
,
940 cm
->reduced_tx_set_used
);
941 // eset == 0 should correspond to a set with only DCT_DCT and
942 // there is no need to read the tx_type
946 *tx_type
= av1_ext_tx_inv
[tx_set_type
][aom_read_symbol(
947 r
, ec_ctx
->inter_ext_tx_cdf
[eset
][square_tx_size
],
948 av1_num_ext_tx_set
[tx_set_type
], ACCT_STR
)];
949 } else if (ALLOW_INTRA_EXT_TX
) {
950 #if CONFIG_FILTER_INTRA
951 PREDICTION_MODE intra_dir
;
952 if (mbmi
->filter_intra_mode_info
.use_filter_intra
)
953 intra_dir
= fimode_to_intradir
[mbmi
->filter_intra_mode_info
956 intra_dir
= mbmi
->mode
;
957 *tx_type
= av1_ext_tx_inv
[tx_set_type
][aom_read_symbol(
958 r
, ec_ctx
->intra_ext_tx_cdf
[eset
][square_tx_size
][intra_dir
],
959 av1_num_ext_tx_set
[tx_set_type
], ACCT_STR
)];
961 *tx_type
= av1_ext_tx_inv
[tx_set_type
][aom_read_symbol(
962 r
, ec_ctx
->intra_ext_tx_cdf
[eset
][square_tx_size
][mbmi
->mode
],
963 av1_num_ext_tx_set
[tx_set_type
], ACCT_STR
)];
971 assert(mbmi
->tx_type
== DCT_DCT
);
976 static INLINE
void read_mv(aom_reader
*r
, MV
*mv
, const MV
*ref
,
977 nmv_context
*ctx
, MvSubpelPrecision precision
);
979 static INLINE
int is_mv_valid(const MV
*mv
);
981 static INLINE
int assign_dv(AV1_COMMON
*cm
, MACROBLOCKD
*xd
, int_mv
*mv
,
982 const int_mv
*ref_mv
, int mi_row
, int mi_col
,
983 BLOCK_SIZE bsize
, aom_reader
*r
) {
984 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
985 read_mv(r
, &mv
->as_mv
, &ref_mv
->as_mv
, &ec_ctx
->ndvc
, MV_SUBPEL_NONE
);
986 // DV should not have sub-pel.
987 assert((mv
->as_mv
.col
& 7) == 0);
988 assert((mv
->as_mv
.row
& 7) == 0);
989 mv
->as_mv
.col
= (mv
->as_mv
.col
>> 3) * 8;
990 mv
->as_mv
.row
= (mv
->as_mv
.row
>> 3) * 8;
991 int valid
= is_mv_valid(&mv
->as_mv
) &&
992 av1_is_dv_valid(mv
->as_mv
, &xd
->tile
, mi_row
, mi_col
, bsize
,
996 #endif // CONFIG_INTRABC
999 static void read_intrabc_info(AV1_COMMON
*const cm
, MACROBLOCKD
*const xd
,
1000 int mi_row
, int mi_col
, aom_reader
*r
) {
1001 MODE_INFO
*const mi
= xd
->mi
[0];
1002 MB_MODE_INFO
*const mbmi
= &mi
->mbmi
;
1003 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
1004 mbmi
->use_intrabc
= aom_read_symbol(r
, ec_ctx
->intrabc_cdf
, 2, ACCT_STR
);
1005 if (mbmi
->use_intrabc
) {
1006 const BLOCK_SIZE bsize
= mbmi
->sb_type
;
1007 const int width
= block_size_wide
[bsize
] >> tx_size_wide_log2
[0];
1008 const int height
= block_size_high
[bsize
] >> tx_size_high_log2
[0];
1009 if ((cm
->tx_mode
== TX_MODE_SELECT
&& block_signals_txsize(bsize
) &&
1010 !xd
->lossless
[mbmi
->segment_id
] && !mbmi
->skip
)) {
1011 const TX_SIZE max_tx_size
= get_max_rect_tx_size(bsize
, 0);
1012 const int bh
= tx_size_high_unit
[max_tx_size
];
1013 const int bw
= tx_size_wide_unit
[max_tx_size
];
1014 mbmi
->min_tx_size
= TX_SIZES_ALL
;
1015 for (int idy
= 0; idy
< height
; idy
+= bh
) {
1016 for (int idx
= 0; idx
< width
; idx
+= bw
) {
1017 read_tx_size_vartx(cm
, xd
, mbmi
, xd
->counts
, max_tx_size
, 0, idy
, idx
,
1022 mbmi
->tx_size
= read_tx_size(cm
, xd
, 1, !mbmi
->skip
, r
);
1023 for (int idy
= 0; idy
< height
; ++idy
)
1024 for (int idx
= 0; idx
< width
; ++idx
)
1025 mbmi
->inter_tx_size
[idy
>> 1][idx
>> 1] = mbmi
->tx_size
;
1026 mbmi
->min_tx_size
= mbmi
->tx_size
;
1027 set_txfm_ctxs(mbmi
->tx_size
, xd
->n8_w
, xd
->n8_h
, mbmi
->skip
, xd
);
1029 mbmi
->mode
= mbmi
->uv_mode
= UV_DC_PRED
;
1030 mbmi
->interp_filters
= av1_broadcast_interp_filter(BILINEAR
);
1032 int16_t inter_mode_ctx
[MODE_CTX_REF_FRAMES
];
1033 int_mv ref_mvs
[MAX_MV_REF_CANDIDATES
];
1035 av1_find_mv_refs(cm
, xd
, mi
, INTRA_FRAME
, &xd
->ref_mv_count
[INTRA_FRAME
],
1036 xd
->ref_mv_stack
[INTRA_FRAME
], NULL
, ref_mvs
, mi_row
,
1037 mi_col
, NULL
, NULL
, inter_mode_ctx
);
1039 int_mv nearestmv
, nearmv
;
1042 av1_find_best_ref_mvs(0, ref_mvs
, &nearestmv
, &nearmv
, 0);
1044 av1_find_best_ref_mvs(0, ref_mvs
, &nearestmv
, &nearmv
);
1046 int_mv dv_ref
= nearestmv
.as_int
== 0 ? nearmv
: nearestmv
;
1047 if (dv_ref
.as_int
== 0)
1048 av1_find_ref_dv(&dv_ref
, &xd
->tile
, cm
->mib_size
, mi_row
, mi_col
);
1049 // Ref DV should not have sub-pel.
1050 assert((dv_ref
.as_mv
.col
& 7) == 0);
1051 assert((dv_ref
.as_mv
.row
& 7) == 0);
1052 dv_ref
.as_mv
.col
= (dv_ref
.as_mv
.col
>> 3) * 8;
1053 dv_ref
.as_mv
.row
= (dv_ref
.as_mv
.row
>> 3) * 8;
1055 !assign_dv(cm
, xd
, &mbmi
->mv
[0], &dv_ref
, mi_row
, mi_col
, bsize
, r
);
1057 av1_read_tx_type(cm
, xd
, r
);
1058 #endif // !CONFIG_TXK_SEL
1061 #endif // CONFIG_INTRABC
1063 static void read_intra_frame_mode_info(AV1_COMMON
*const cm
,
1064 MACROBLOCKD
*const xd
, int mi_row
,
1065 int mi_col
, aom_reader
*r
) {
1066 MODE_INFO
*const mi
= xd
->mi
[0];
1067 MB_MODE_INFO
*const mbmi
= &mi
->mbmi
;
1068 const MODE_INFO
*above_mi
= xd
->above_mi
;
1069 const MODE_INFO
*left_mi
= xd
->left_mi
;
1070 const BLOCK_SIZE bsize
= mbmi
->sb_type
;
1072 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
1075 read_intra_segment_id(cm
, xd
, mbmi
, mi_row
, mi_col
, bsize
, 1, r
);
1076 mbmi
->skip
= read_skip(cm
, xd
, mbmi
->segment_id
, r
);
1078 #if CONFIG_SPATIAL_SEGMENTATION
1080 read_intra_segment_id(cm
, xd
, mbmi
, mi_row
, mi_col
, bsize
, 0, r
);
1083 read_cdef(cm
, r
, mbmi
, mi_col
, mi_row
);
1085 if (cm
->delta_q_present_flag
) {
1086 xd
->current_qindex
=
1088 read_delta_qindex(cm
, xd
, r
, mbmi
, mi_col
, mi_row
) * cm
->delta_q_res
;
1089 /* Normative: Clamp to [1,MAXQ] to not interfere with lossless mode */
1090 xd
->current_qindex
= clamp(xd
->current_qindex
, 1, MAXQ
);
1091 xd
->prev_qindex
= xd
->current_qindex
;
1092 #if CONFIG_EXT_DELTA_Q
1093 if (cm
->delta_lf_present_flag
) {
1094 #if CONFIG_LOOPFILTER_LEVEL
1095 if (cm
->delta_lf_multi
) {
1096 for (int lf_id
= 0; lf_id
< FRAME_LF_COUNT
; ++lf_id
) {
1098 xd
->prev_delta_lf
[lf_id
] +
1099 read_delta_lflevel(cm
, xd
, r
, lf_id
, mbmi
, mi_col
, mi_row
) *
1101 mbmi
->curr_delta_lf
[lf_id
] = xd
->curr_delta_lf
[lf_id
] =
1102 clamp(tmp_lvl
, -MAX_LOOP_FILTER
, MAX_LOOP_FILTER
);
1103 xd
->prev_delta_lf
[lf_id
] = xd
->curr_delta_lf
[lf_id
];
1107 xd
->prev_delta_lf_from_base
+
1108 read_delta_lflevel(cm
, xd
, r
, -1, mbmi
, mi_col
, mi_row
) *
1110 mbmi
->current_delta_lf_from_base
= xd
->current_delta_lf_from_base
=
1111 clamp(tmp_lvl
, -MAX_LOOP_FILTER
, MAX_LOOP_FILTER
);
1112 xd
->prev_delta_lf_from_base
= xd
->current_delta_lf_from_base
;
1115 const int current_delta_lf_from_base
=
1116 xd
->prev_delta_lf_from_base
+
1117 read_delta_lflevel(cm
, xd
, r
, mbmi
, mi_col
, mi_row
) *
1119 mbmi
->current_delta_lf_from_base
= xd
->current_delta_lf_from_base
=
1120 clamp(current_delta_lf_from_base
, -MAX_LOOP_FILTER
, MAX_LOOP_FILTER
);
1121 xd
->prev_delta_lf_from_base
= xd
->current_delta_lf_from_base
;
1122 #endif // CONFIG_LOOPFILTER_LEVEL
1127 mbmi
->current_q_index
= xd
->current_qindex
;
1129 mbmi
->ref_frame
[0] = INTRA_FRAME
;
1130 mbmi
->ref_frame
[1] = NONE_FRAME
;
1133 if (cm
->allow_screen_content_tools
) {
1134 xd
->above_txfm_context
=
1135 cm
->above_txfm_context
+ (mi_col
<< TX_UNIT_WIDE_LOG2
);
1136 xd
->left_txfm_context
= xd
->left_txfm_context_buffer
+
1137 ((mi_row
& MAX_MIB_MASK
) << TX_UNIT_HIGH_LOG2
);
1139 if (av1_allow_intrabc(cm
)) {
1140 read_intrabc_info(cm
, xd
, mi_row
, mi_col
, r
);
1141 if (is_intrabc_block(mbmi
)) return;
1143 #endif // CONFIG_INTRABC
1145 mbmi
->tx_size
= read_tx_size(cm
, xd
, 0, 1, r
);
1147 if (cm
->allow_screen_content_tools
)
1148 set_txfm_ctxs(mbmi
->tx_size
, xd
->n8_w
, xd
->n8_h
, mbmi
->skip
, xd
);
1149 #endif // CONFIG_INTRABC
1151 mbmi
->mode
= read_intra_mode(r
, get_y_mode_cdf(ec_ctx
, above_mi
, left_mi
));
1153 if (is_chroma_reference(mi_row
, mi_col
, bsize
, xd
->plane
[1].subsampling_x
,
1154 xd
->plane
[1].subsampling_y
)) {
1156 xd
->cfl
.is_chroma_reference
= 1;
1157 #endif // CONFIG_CFL
1158 mbmi
->uv_mode
= read_intra_mode_uv(ec_ctx
, r
, mbmi
->mode
);
1161 if (mbmi
->uv_mode
== UV_CFL_PRED
) {
1162 if (!is_cfl_allowed(mbmi
)) {
1164 &cm
->error
, AOM_CODEC_UNSUP_BITSTREAM
,
1165 "Chroma from Luma (CfL) cannot be signaled for a %dx%d block.",
1166 block_size_wide
[bsize
], block_size_high
[bsize
]);
1168 mbmi
->cfl_alpha_idx
= read_cfl_alphas(ec_ctx
, r
, &mbmi
->cfl_alpha_signs
);
1169 xd
->cfl
.store_y
= 1;
1171 xd
->cfl
.store_y
= 0;
1173 #endif // CONFIG_CFL
1176 // Avoid decoding angle_info if there is is no chroma prediction
1177 mbmi
->uv_mode
= UV_DC_PRED
;
1179 xd
->cfl
.is_chroma_reference
= 0;
1180 xd
->cfl
.store_y
= 1;
1184 #if CONFIG_EXT_INTRA
1185 read_intra_angle_info(xd
, r
);
1186 #endif // CONFIG_EXT_INTRA
1187 mbmi
->palette_mode_info
.palette_size
[0] = 0;
1188 mbmi
->palette_mode_info
.palette_size
[1] = 0;
1189 if (av1_allow_palette(cm
->allow_screen_content_tools
, bsize
))
1190 read_palette_mode_info(cm
, xd
, mi_row
, mi_col
, r
);
1191 #if CONFIG_FILTER_INTRA
1192 mbmi
->filter_intra_mode_info
.use_filter_intra
= 0;
1193 read_filter_intra_mode_info(xd
, r
);
1194 #endif // CONFIG_FILTER_INTRA
1197 av1_read_tx_type(cm
, xd
, r
);
1198 #endif // !CONFIG_TXK_SEL
1201 static int read_mv_component(aom_reader
*r
, nmv_component
*mvcomp
,
1202 #if CONFIG_INTRABC || CONFIG_AMVR
1204 #endif // CONFIG_INTRABC || CONFIG_AMVR
1207 const int sign
= aom_read_symbol(r
, mvcomp
->sign_cdf
, 2, ACCT_STR
);
1208 const int mv_class
=
1209 aom_read_symbol(r
, mvcomp
->classes_cdf
, MV_CLASSES
, ACCT_STR
);
1210 const int class0
= mv_class
== MV_CLASS_0
;
1214 d
= aom_read_symbol(r
, mvcomp
->class0_cdf
, CLASS0_SIZE
, ACCT_STR
);
1217 const int n
= mv_class
+ CLASS0_BITS
- 1; // number of bits
1219 for (int i
= 0; i
< n
; ++i
)
1220 d
|= aom_read_symbol(r
, mvcomp
->bits_cdf
[i
], 2, ACCT_STR
) << i
;
1221 mag
= CLASS0_SIZE
<< (mv_class
+ 2);
1224 #if CONFIG_INTRABC || CONFIG_AMVR
1226 #endif // CONFIG_INTRABC || CONFIG_AMVR
1228 fr
= aom_read_symbol(r
, class0
? mvcomp
->class0_fp_cdf
[d
] : mvcomp
->fp_cdf
,
1229 MV_FP_SIZE
, ACCT_STR
);
1231 // High precision part (if hp is not used, the default value of the hp is 1)
1232 hp
= usehp
? aom_read_symbol(
1233 r
, class0
? mvcomp
->class0_hp_cdf
: mvcomp
->hp_cdf
, 2,
1236 #if CONFIG_INTRABC || CONFIG_AMVR
1241 #endif // CONFIG_INTRABC || CONFIG_AMVR
1244 mag
+= ((d
<< 3) | (fr
<< 1) | hp
) + 1;
1245 return sign
? -mag
: mag
;
1248 static INLINE
void read_mv(aom_reader
*r
, MV
*mv
, const MV
*ref
,
1249 nmv_context
*ctx
, MvSubpelPrecision precision
) {
1251 const MV_JOINT_TYPE joint_type
=
1252 (MV_JOINT_TYPE
)aom_read_symbol(r
, ctx
->joints_cdf
, MV_JOINTS
, ACCT_STR
);
1254 if (mv_joint_vertical(joint_type
))
1255 diff
.row
= read_mv_component(r
, &ctx
->comps
[0],
1256 #if CONFIG_INTRABC || CONFIG_AMVR
1257 precision
> MV_SUBPEL_NONE
,
1258 #endif // CONFIG_INTRABC || CONFIG_AMVR
1259 precision
> MV_SUBPEL_LOW_PRECISION
);
1261 if (mv_joint_horizontal(joint_type
))
1262 diff
.col
= read_mv_component(r
, &ctx
->comps
[1],
1263 #if CONFIG_INTRABC || CONFIG_AMVR
1264 precision
> MV_SUBPEL_NONE
,
1265 #endif // CONFIG_INTRABC || CONFIG_AMVR
1266 precision
> MV_SUBPEL_LOW_PRECISION
);
1268 mv
->row
= ref
->row
+ diff
.row
;
1269 mv
->col
= ref
->col
+ diff
.col
;
1272 static REFERENCE_MODE
read_block_reference_mode(AV1_COMMON
*cm
,
1273 const MACROBLOCKD
*xd
,
1275 if (!is_comp_ref_allowed(xd
->mi
[0]->mbmi
.sb_type
)) return SINGLE_REFERENCE
;
1276 if (cm
->reference_mode
== REFERENCE_MODE_SELECT
) {
1277 const int ctx
= av1_get_reference_mode_context(cm
, xd
);
1278 const REFERENCE_MODE mode
= (REFERENCE_MODE
)aom_read_symbol(
1279 r
, xd
->tile_ctx
->comp_inter_cdf
[ctx
], 2, ACCT_STR
);
1280 FRAME_COUNTS
*counts
= xd
->counts
;
1281 if (counts
) ++counts
->comp_inter
[ctx
][mode
];
1282 return mode
; // SINGLE_REFERENCE or COMPOUND_REFERENCE
1284 #if CONFIG_REF_ADAPT
1285 assert(cm
->reference_mode
== SINGLE_REFERENCE
);
1286 #endif // CONFIG_REF_ADAPT
1287 return cm
->reference_mode
;
1291 #define READ_REF_BIT(pname) \
1292 aom_read_symbol(r, av1_get_pred_cdf_##pname(cm, xd), 2, ACCT_STR)
1293 #define READ_REF_BIT2(pname) \
1294 aom_read_symbol(r, av1_get_pred_cdf_##pname(xd), 2, ACCT_STR)
1296 #if CONFIG_EXT_COMP_REFS
1297 static COMP_REFERENCE_TYPE
read_comp_reference_type(const MACROBLOCKD
*xd
,
1299 const int ctx
= av1_get_comp_reference_type_context(xd
);
1300 const COMP_REFERENCE_TYPE comp_ref_type
=
1301 (COMP_REFERENCE_TYPE
)aom_read_symbol(
1302 r
, xd
->tile_ctx
->comp_ref_type_cdf
[ctx
], 2, ACCT_STR
);
1303 FRAME_COUNTS
*counts
= xd
->counts
;
1304 if (counts
) ++counts
->comp_ref_type
[ctx
][comp_ref_type
];
1305 return comp_ref_type
; // UNIDIR_COMP_REFERENCE or BIDIR_COMP_REFERENCE
1307 #endif // CONFIG_EXT_COMP_REFS
1310 static void set_ref_frames_for_skip_mode(AV1_COMMON
*const cm
,
1311 MV_REFERENCE_FRAME ref_frame
[2]) {
1312 ref_frame
[0] = LAST_FRAME
+ cm
->ref_frame_idx_0
;
1313 ref_frame
[1] = LAST_FRAME
+ cm
->ref_frame_idx_1
;
1315 #endif // CONFIG_EXT_SKIP
1317 // Read the referncence frame
1318 static void read_ref_frames(AV1_COMMON
*const cm
, MACROBLOCKD
*const xd
,
1319 aom_reader
*r
, int segment_id
,
1320 MV_REFERENCE_FRAME ref_frame
[2]) {
1321 FRAME_COUNTS
*counts
= xd
->counts
;
1323 if (segfeature_active(&cm
->seg
, segment_id
, SEG_LVL_REF_FRAME
)) {
1324 ref_frame
[0] = (MV_REFERENCE_FRAME
)get_segdata(&cm
->seg
, segment_id
,
1326 ref_frame
[1] = NONE_FRAME
;
1328 #if CONFIG_SEGMENT_GLOBALMV
1329 else if (segfeature_active(&cm
->seg
, segment_id
, SEG_LVL_SKIP
) ||
1330 segfeature_active(&cm
->seg
, segment_id
, SEG_LVL_GLOBALMV
))
1332 else if (segfeature_active(&cm
->seg
, segment_id
, SEG_LVL_SKIP
))
1335 ref_frame
[0] = LAST_FRAME
;
1336 ref_frame
[1] = NONE_FRAME
;
1339 if (xd
->mi
[0]->mbmi
.skip_mode
) {
1340 set_ref_frames_for_skip_mode(cm
, ref_frame
);
1343 #endif // CONFIG_EXT_SKIP
1345 const REFERENCE_MODE mode
= read_block_reference_mode(cm
, xd
, r
);
1347 // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding
1348 if (mode
== COMPOUND_REFERENCE
) {
1349 #if CONFIG_EXT_COMP_REFS
1350 const COMP_REFERENCE_TYPE comp_ref_type
= read_comp_reference_type(xd
, r
);
1352 if (comp_ref_type
== UNIDIR_COMP_REFERENCE
) {
1353 const int ctx
= av1_get_pred_context_uni_comp_ref_p(xd
);
1354 const int bit
= READ_REF_BIT2(uni_comp_ref_p
);
1355 if (counts
) ++counts
->uni_comp_ref
[ctx
][0][bit
];
1358 ref_frame
[0] = BWDREF_FRAME
;
1359 ref_frame
[1] = ALTREF_FRAME
;
1361 const int ctx1
= av1_get_pred_context_uni_comp_ref_p1(xd
);
1362 const int bit1
= READ_REF_BIT2(uni_comp_ref_p1
);
1363 if (counts
) ++counts
->uni_comp_ref
[ctx1
][1][bit1
];
1366 const int ctx2
= av1_get_pred_context_uni_comp_ref_p2(xd
);
1367 const int bit2
= READ_REF_BIT2(uni_comp_ref_p2
);
1368 if (counts
) ++counts
->uni_comp_ref
[ctx2
][2][bit2
];
1371 ref_frame
[0] = LAST_FRAME
;
1372 ref_frame
[1] = GOLDEN_FRAME
;
1374 ref_frame
[0] = LAST_FRAME
;
1375 ref_frame
[1] = LAST3_FRAME
;
1378 ref_frame
[0] = LAST_FRAME
;
1379 ref_frame
[1] = LAST2_FRAME
;
1386 assert(comp_ref_type
== BIDIR_COMP_REFERENCE
);
1387 #endif // CONFIG_EXT_COMP_REFS
1391 const int ctx
= av1_get_pred_context_comp_ref_p(cm
, xd
);
1392 const int bit
= READ_REF_BIT(comp_ref_p
);
1393 if (counts
) ++counts
->comp_ref
[ctx
][0][bit
];
1395 // Decode forward references.
1397 const int ctx1
= av1_get_pred_context_comp_ref_p1(cm
, xd
);
1398 const int bit1
= READ_REF_BIT(comp_ref_p1
);
1399 if (counts
) ++counts
->comp_ref
[ctx1
][1][bit1
];
1400 ref_frame
[!idx
] = cm
->comp_fwd_ref
[bit1
? 1 : 0];
1402 const int ctx2
= av1_get_pred_context_comp_ref_p2(cm
, xd
);
1403 const int bit2
= READ_REF_BIT(comp_ref_p2
);
1404 if (counts
) ++counts
->comp_ref
[ctx2
][2][bit2
];
1405 ref_frame
[!idx
] = cm
->comp_fwd_ref
[bit2
? 3 : 2];
1408 // Decode backward references.
1409 const int ctx_bwd
= av1_get_pred_context_comp_bwdref_p(cm
, xd
);
1410 const int bit_bwd
= READ_REF_BIT(comp_bwdref_p
);
1411 if (counts
) ++counts
->comp_bwdref
[ctx_bwd
][0][bit_bwd
];
1413 const int ctx1_bwd
= av1_get_pred_context_comp_bwdref_p1(cm
, xd
);
1414 const int bit1_bwd
= READ_REF_BIT(comp_bwdref_p1
);
1415 if (counts
) ++counts
->comp_bwdref
[ctx1_bwd
][1][bit1_bwd
];
1416 ref_frame
[idx
] = cm
->comp_bwd_ref
[bit1_bwd
];
1418 ref_frame
[idx
] = cm
->comp_bwd_ref
[2];
1420 } else if (mode
== SINGLE_REFERENCE
) {
1421 const int ctx0
= av1_get_pred_context_single_ref_p1(xd
);
1422 const int bit0
= READ_REF_BIT(single_ref_p1
);
1423 if (counts
) ++counts
->single_ref
[ctx0
][0][bit0
];
1426 const int ctx1
= av1_get_pred_context_single_ref_p2(xd
);
1427 const int bit1
= READ_REF_BIT(single_ref_p2
);
1428 if (counts
) ++counts
->single_ref
[ctx1
][1][bit1
];
1430 const int ctx5
= av1_get_pred_context_single_ref_p6(xd
);
1431 const int bit5
= READ_REF_BIT(single_ref_p6
);
1432 if (counts
) ++counts
->single_ref
[ctx5
][5][bit5
];
1433 ref_frame
[0] = bit5
? ALTREF2_FRAME
: BWDREF_FRAME
;
1435 ref_frame
[0] = ALTREF_FRAME
;
1438 const int ctx2
= av1_get_pred_context_single_ref_p3(xd
);
1439 const int bit2
= READ_REF_BIT(single_ref_p3
);
1440 if (counts
) ++counts
->single_ref
[ctx2
][2][bit2
];
1442 const int ctx4
= av1_get_pred_context_single_ref_p5(xd
);
1443 const int bit4
= READ_REF_BIT(single_ref_p5
);
1444 if (counts
) ++counts
->single_ref
[ctx4
][4][bit4
];
1445 ref_frame
[0] = bit4
? GOLDEN_FRAME
: LAST3_FRAME
;
1447 const int ctx3
= av1_get_pred_context_single_ref_p4(xd
);
1448 const int bit3
= READ_REF_BIT(single_ref_p4
);
1449 if (counts
) ++counts
->single_ref
[ctx3
][3][bit3
];
1450 ref_frame
[0] = bit3
? LAST2_FRAME
: LAST_FRAME
;
1454 ref_frame
[1] = NONE_FRAME
;
1456 assert(0 && "Invalid prediction mode.");
1461 static INLINE
void read_mb_interp_filter(AV1_COMMON
*const cm
,
1462 MACROBLOCKD
*const xd
,
1463 MB_MODE_INFO
*const mbmi
,
1465 FRAME_COUNTS
*counts
= xd
->counts
;
1466 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
1468 if (!av1_is_interp_needed(xd
)) {
1469 set_default_interp_filters(mbmi
, cm
->interp_filter
);
1473 if (cm
->interp_filter
!= SWITCHABLE
) {
1474 mbmi
->interp_filters
= av1_broadcast_interp_filter(cm
->interp_filter
);
1476 #if CONFIG_DUAL_FILTER
1477 InterpFilter ref0_filter
[2] = { EIGHTTAP_REGULAR
, EIGHTTAP_REGULAR
};
1478 for (int dir
= 0; dir
< 2; ++dir
) {
1479 if (has_subpel_mv_component(xd
->mi
[0], xd
, dir
) ||
1480 (mbmi
->ref_frame
[1] > INTRA_FRAME
&&
1481 has_subpel_mv_component(xd
->mi
[0], xd
, dir
+ 2))) {
1482 const int ctx
= av1_get_pred_context_switchable_interp(xd
, dir
);
1484 (InterpFilter
)aom_read_symbol(r
, ec_ctx
->switchable_interp_cdf
[ctx
],
1485 SWITCHABLE_FILTERS
, ACCT_STR
);
1486 if (counts
) ++counts
->switchable_interp
[ctx
][ref0_filter
[dir
]];
1489 // The index system works as: (0, 1) -> (vertical, horizontal) filter types
1490 mbmi
->interp_filters
=
1491 av1_make_interp_filters(ref0_filter
[0], ref0_filter
[1]);
1492 #else // CONFIG_DUAL_FILTER
1493 const int ctx
= av1_get_pred_context_switchable_interp(xd
);
1494 InterpFilter filter
= (InterpFilter
)aom_read_symbol(
1495 r
, ec_ctx
->switchable_interp_cdf
[ctx
], SWITCHABLE_FILTERS
, ACCT_STR
);
1496 mbmi
->interp_filters
= av1_broadcast_interp_filter(filter
);
1497 if (counts
) ++counts
->switchable_interp
[ctx
][filter
];
1498 #endif // CONFIG_DUAL_FILTER
1502 static void read_intra_block_mode_info(AV1_COMMON
*const cm
, const int mi_row
,
1503 const int mi_col
, MACROBLOCKD
*const xd
,
1504 MODE_INFO
*mi
, aom_reader
*r
) {
1505 MB_MODE_INFO
*const mbmi
= &mi
->mbmi
;
1506 const BLOCK_SIZE bsize
= mi
->mbmi
.sb_type
;
1508 mbmi
->ref_frame
[0] = INTRA_FRAME
;
1509 mbmi
->ref_frame
[1] = NONE_FRAME
;
1511 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
1513 mbmi
->mode
= read_intra_mode(r
, ec_ctx
->y_mode_cdf
[size_group_lookup
[bsize
]]);
1515 if (is_chroma_reference(mi_row
, mi_col
, bsize
, xd
->plane
[1].subsampling_x
,
1516 xd
->plane
[1].subsampling_y
)) {
1517 mbmi
->uv_mode
= read_intra_mode_uv(ec_ctx
, r
, mbmi
->mode
);
1520 if (mbmi
->uv_mode
== UV_CFL_PRED
) {
1521 if (!is_cfl_allowed(mbmi
)) {
1523 &cm
->error
, AOM_CODEC_UNSUP_BITSTREAM
,
1524 "Chroma from Luma (CfL) cannot be signaled for a %dx%d block.",
1525 block_size_wide
[bsize
], block_size_high
[bsize
]);
1527 mbmi
->cfl_alpha_idx
=
1528 read_cfl_alphas(xd
->tile_ctx
, r
, &mbmi
->cfl_alpha_signs
);
1529 xd
->cfl
.store_y
= 1;
1531 xd
->cfl
.store_y
= 0;
1533 #endif // CONFIG_CFL
1536 // Avoid decoding angle_info if there is is no chroma prediction
1537 mbmi
->uv_mode
= UV_DC_PRED
;
1539 xd
->cfl
.is_chroma_reference
= 0;
1540 xd
->cfl
.store_y
= 1;
1544 // Explicitly ignore cm here to avoid a compile warning if none of
1545 // ext-intra, palette and filter-intra are enabled.
1548 #if CONFIG_EXT_INTRA
1549 read_intra_angle_info(xd
, r
);
1550 #endif // CONFIG_EXT_INTRA
1551 mbmi
->palette_mode_info
.palette_size
[0] = 0;
1552 mbmi
->palette_mode_info
.palette_size
[1] = 0;
1553 if (av1_allow_palette(cm
->allow_screen_content_tools
, bsize
))
1554 read_palette_mode_info(cm
, xd
, mi_row
, mi_col
, r
);
1555 #if CONFIG_FILTER_INTRA
1556 mbmi
->filter_intra_mode_info
.use_filter_intra
= 0;
1557 read_filter_intra_mode_info(xd
, r
);
1558 #endif // CONFIG_FILTER_INTRA
1561 static INLINE
int is_mv_valid(const MV
*mv
) {
1562 return mv
->row
> MV_LOW
&& mv
->row
< MV_UPP
&& mv
->col
> MV_LOW
&&
1566 static INLINE
int assign_mv(AV1_COMMON
*cm
, MACROBLOCKD
*xd
,
1567 PREDICTION_MODE mode
,
1568 MV_REFERENCE_FRAME ref_frame
[2], int_mv mv
[2],
1569 int_mv ref_mv
[2], int_mv nearest_mv
[2],
1570 int_mv near_mv
[2], int mi_row
, int mi_col
,
1571 int is_compound
, int allow_hp
, aom_reader
*r
) {
1573 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
1574 BLOCK_SIZE bsize
= xd
->mi
[0]->mbmi
.sb_type
;
1575 MB_MODE_INFO
*mbmi
= &xd
->mi
[0]->mbmi
;
1576 int_mv
*pred_mv
= mbmi
->pred_mv
;
1583 if (cm
->cur_frame_force_integer_mv
) {
1584 allow_hp
= MV_SUBPEL_NONE
;
1589 for (int i
= 0; i
< 1 + is_compound
; ++i
) {
1590 int8_t rf_type
= av1_ref_frame_type(mbmi
->ref_frame
);
1592 av1_nmv_ctx(xd
->ref_mv_count
[rf_type
], xd
->ref_mv_stack
[rf_type
], i
,
1594 nmv_context
*const nmvc
= &ec_ctx
->nmvc
[nmv_ctx
];
1595 read_mv(r
, &mv
[i
].as_mv
, &ref_mv
[i
].as_mv
, nmvc
, allow_hp
);
1596 ret
= ret
&& is_mv_valid(&mv
[i
].as_mv
);
1598 pred_mv
[i
].as_int
= ref_mv
[i
].as_int
;
1603 mv
[0].as_int
= nearest_mv
[0].as_int
;
1604 if (is_compound
) mv
[1].as_int
= nearest_mv
[1].as_int
;
1606 pred_mv
[0].as_int
= nearest_mv
[0].as_int
;
1607 if (is_compound
) pred_mv
[1].as_int
= nearest_mv
[1].as_int
;
1611 mv
[0].as_int
= near_mv
[0].as_int
;
1612 if (is_compound
) mv
[1].as_int
= near_mv
[1].as_int
;
1614 pred_mv
[0].as_int
= near_mv
[0].as_int
;
1615 if (is_compound
) pred_mv
[1].as_int
= near_mv
[1].as_int
;
1619 mv
[0].as_int
= gm_get_motion_vector(&cm
->global_motion
[ref_frame
[0]],
1620 cm
->allow_high_precision_mv
, bsize
,
1624 cm
->cur_frame_force_integer_mv
1629 mv
[1].as_int
= gm_get_motion_vector(&cm
->global_motion
[ref_frame
[1]],
1630 cm
->allow_high_precision_mv
, bsize
,
1634 cm
->cur_frame_force_integer_mv
1639 pred_mv
[0].as_int
= mv
[0].as_int
;
1640 if (is_compound
) pred_mv
[1].as_int
= mv
[1].as_int
;
1644 assert(is_compound
);
1645 for (int i
= 0; i
< 2; ++i
) {
1646 int8_t rf_type
= av1_ref_frame_type(mbmi
->ref_frame
);
1648 av1_nmv_ctx(xd
->ref_mv_count
[rf_type
], xd
->ref_mv_stack
[rf_type
], i
,
1650 nmv_context
*const nmvc
= &ec_ctx
->nmvc
[nmv_ctx
];
1651 read_mv(r
, &mv
[i
].as_mv
, &ref_mv
[i
].as_mv
, nmvc
, allow_hp
);
1652 ret
= ret
&& is_mv_valid(&mv
[i
].as_mv
);
1656 case NEAREST_NEARESTMV
: {
1657 assert(is_compound
);
1658 mv
[0].as_int
= nearest_mv
[0].as_int
;
1659 mv
[1].as_int
= nearest_mv
[1].as_int
;
1663 assert(is_compound
);
1664 mv
[0].as_int
= near_mv
[0].as_int
;
1665 mv
[1].as_int
= near_mv
[1].as_int
;
1668 case NEW_NEARESTMV
: {
1669 int8_t rf_type
= av1_ref_frame_type(mbmi
->ref_frame
);
1670 int nmv_ctx
= av1_nmv_ctx(xd
->ref_mv_count
[rf_type
],
1671 xd
->ref_mv_stack
[rf_type
], 0, mbmi
->ref_mv_idx
);
1672 nmv_context
*const nmvc
= &ec_ctx
->nmvc
[nmv_ctx
];
1673 read_mv(r
, &mv
[0].as_mv
, &ref_mv
[0].as_mv
, nmvc
, allow_hp
);
1674 assert(is_compound
);
1675 ret
= ret
&& is_mv_valid(&mv
[0].as_mv
);
1676 mv
[1].as_int
= nearest_mv
[1].as_int
;
1679 case NEAREST_NEWMV
: {
1680 int8_t rf_type
= av1_ref_frame_type(mbmi
->ref_frame
);
1681 int nmv_ctx
= av1_nmv_ctx(xd
->ref_mv_count
[rf_type
],
1682 xd
->ref_mv_stack
[rf_type
], 1, mbmi
->ref_mv_idx
);
1683 nmv_context
*const nmvc
= &ec_ctx
->nmvc
[nmv_ctx
];
1684 mv
[0].as_int
= nearest_mv
[0].as_int
;
1685 read_mv(r
, &mv
[1].as_mv
, &ref_mv
[1].as_mv
, nmvc
, allow_hp
);
1686 assert(is_compound
);
1687 ret
= ret
&& is_mv_valid(&mv
[1].as_mv
);
1691 int8_t rf_type
= av1_ref_frame_type(mbmi
->ref_frame
);
1693 av1_nmv_ctx(xd
->ref_mv_count
[rf_type
], xd
->ref_mv_stack
[rf_type
], 1,
1694 mbmi
->ref_mv_idx
+ 1);
1695 nmv_context
*const nmvc
= &ec_ctx
->nmvc
[nmv_ctx
];
1696 mv
[0].as_int
= near_mv
[0].as_int
;
1697 read_mv(r
, &mv
[1].as_mv
, &ref_mv
[1].as_mv
, nmvc
, allow_hp
);
1698 assert(is_compound
);
1700 ret
= ret
&& is_mv_valid(&mv
[1].as_mv
);
1704 int8_t rf_type
= av1_ref_frame_type(mbmi
->ref_frame
);
1706 av1_nmv_ctx(xd
->ref_mv_count
[rf_type
], xd
->ref_mv_stack
[rf_type
], 0,
1707 mbmi
->ref_mv_idx
+ 1);
1708 nmv_context
*const nmvc
= &ec_ctx
->nmvc
[nmv_ctx
];
1709 read_mv(r
, &mv
[0].as_mv
, &ref_mv
[0].as_mv
, nmvc
, allow_hp
);
1710 assert(is_compound
);
1711 ret
= ret
&& is_mv_valid(&mv
[0].as_mv
);
1712 mv
[1].as_int
= near_mv
[1].as_int
;
1715 case GLOBAL_GLOBALMV
: {
1716 assert(is_compound
);
1717 mv
[0].as_int
= gm_get_motion_vector(&cm
->global_motion
[ref_frame
[0]],
1718 cm
->allow_high_precision_mv
, bsize
,
1722 cm
->cur_frame_force_integer_mv
1726 mv
[1].as_int
= gm_get_motion_vector(&cm
->global_motion
[ref_frame
[1]],
1727 cm
->allow_high_precision_mv
, bsize
,
1731 cm
->cur_frame_force_integer_mv
1737 default: { return 0; }
1742 static int read_is_inter_block(AV1_COMMON
*const cm
, MACROBLOCKD
*const xd
,
1743 int segment_id
, aom_reader
*r
) {
1744 if (segfeature_active(&cm
->seg
, segment_id
, SEG_LVL_REF_FRAME
)) {
1745 return get_segdata(&cm
->seg
, segment_id
, SEG_LVL_REF_FRAME
) != INTRA_FRAME
;
1747 const int ctx
= av1_get_intra_inter_context(xd
);
1748 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
1749 const int is_inter
=
1750 aom_read_symbol(r
, ec_ctx
->intra_inter_cdf
[ctx
], 2, ACCT_STR
);
1751 FRAME_COUNTS
*counts
= xd
->counts
;
1752 if (counts
) ++counts
->intra_inter
[ctx
][is_inter
];
1756 static void fpm_sync(void *const data
, int mi_row
) {
1757 AV1Decoder
*const pbi
= (AV1Decoder
*)data
;
1758 av1_frameworker_wait(pbi
->frame_worker_owner
, pbi
->common
.prev_frame
,
1759 mi_row
<< pbi
->common
.mib_size_log2
);
1762 #if DEC_MISMATCH_DEBUG
1763 static void dec_dump_logs(AV1_COMMON
*cm
, MODE_INFO
*const mi
, int mi_row
,
1764 int mi_col
, int16_t mode_ctx
) {
1765 int_mv mv
[2] = { { 0 } };
1766 MB_MODE_INFO
*const mbmi
= &mi
->mbmi
;
1767 for (int ref
= 0; ref
< 1 + has_second_ref(mbmi
); ++ref
)
1768 mv
[ref
].as_mv
= mbmi
->mv
[ref
].as_mv
;
1770 const int16_t newmv_ctx
= mode_ctx
& NEWMV_CTX_MASK
;
1771 int16_t zeromv_ctx
= -1;
1772 int16_t refmv_ctx
= -1;
1773 if (mbmi
->mode
!= NEWMV
) {
1774 if (mode_ctx
& (1 << ALL_ZERO_FLAG_OFFSET
)) assert(mbmi
->mode
== GLOBALMV
);
1775 zeromv_ctx
= (mode_ctx
>> GLOBALMV_OFFSET
) & GLOBALMV_CTX_MASK
;
1776 if (mbmi
->mode
!= GLOBALMV
) {
1777 refmv_ctx
= (mode_ctx
>> REFMV_OFFSET
) & REFMV_CTX_MASK
;
1778 if (mode_ctx
& (1 << SKIP_NEARESTMV_OFFSET
)) refmv_ctx
= 6;
1779 if (mode_ctx
& (1 << SKIP_NEARMV_OFFSET
)) refmv_ctx
= 7;
1780 if (mode_ctx
& (1 << SKIP_NEARESTMV_SUB8X8_OFFSET
)) refmv_ctx
= 8;
1784 #define FRAME_TO_CHECK 11
1786 if (cm
->current_video_frame
== FRAME_TO_CHECK
&& cm
->show_frame
== 1) {
1789 "Frame=%d, (mi_row,mi_col)=(%d,%d), skip_mode=%d, mode=%d, bsize=%d, "
1790 "show_frame=%d, mv[0]=(%d,%d), mv[1]=(%d,%d), ref[0]=%d, "
1791 "ref[1]=%d, motion_mode=%d, mode_ctx=%d, "
1792 "newmv_ctx=%d, zeromv_ctx=%d, refmv_ctx=%d, tx_size=%d\n",
1793 cm
->current_video_frame
, mi_row
, mi_col
, mbmi
->skip_mode
, mbmi
->mode
,
1794 mbmi
->sb_type
, cm
->show_frame
, mv
[0].as_mv
.row
, mv
[0].as_mv
.col
,
1795 mv
[1].as_mv
.row
, mv
[1].as_mv
.col
, mbmi
->ref_frame
[0],
1796 mbmi
->ref_frame
[1], mbmi
->motion_mode
, mode_ctx
, newmv_ctx
, zeromv_ctx
,
1797 refmv_ctx
, mbmi
->tx_size
);
1799 if (cm
->current_video_frame
== FRAME_TO_CHECK
&& cm
->show_frame
== 1) {
1802 "Frame=%d, (mi_row,mi_col)=(%d,%d), mode=%d, bsize=%d, "
1803 "show_frame=%d, mv[0]=(%d,%d), mv[1]=(%d,%d), ref[0]=%d, "
1804 "ref[1]=%d, motion_mode=%d, mode_ctx=%d, "
1805 "newmv_ctx=%d, zeromv_ctx=%d, refmv_ctx=%d, tx_size=%d\n",
1806 cm
->current_video_frame
, mi_row
, mi_col
, mbmi
->mode
, mbmi
->sb_type
,
1807 cm
->show_frame
, mv
[0].as_mv
.row
, mv
[0].as_mv
.col
, mv
[1].as_mv
.row
,
1808 mv
[1].as_mv
.col
, mbmi
->ref_frame
[0], mbmi
->ref_frame
[1],
1809 mbmi
->motion_mode
, mode_ctx
, newmv_ctx
, zeromv_ctx
, refmv_ctx
,
1811 #endif // CONFIG_EXT_SKIP
1814 #endif // DEC_MISMATCH_DEBUG
1816 static void read_inter_block_mode_info(AV1Decoder
*const pbi
,
1817 MACROBLOCKD
*const xd
,
1818 MODE_INFO
*const mi
, int mi_row
,
1819 int mi_col
, aom_reader
*r
) {
1820 AV1_COMMON
*const cm
= &pbi
->common
;
1821 MB_MODE_INFO
*const mbmi
= &mi
->mbmi
;
1822 const BLOCK_SIZE bsize
= mbmi
->sb_type
;
1823 const int allow_hp
= cm
->allow_high_precision_mv
;
1824 int_mv nearestmv
[2], nearmv
[2];
1825 int_mv ref_mvs
[MODE_CTX_REF_FRAMES
][MAX_MV_REF_CANDIDATES
] = { { { 0 } } };
1826 int16_t inter_mode_ctx
[MODE_CTX_REF_FRAMES
];
1827 int16_t compound_inter_mode_ctx
[MODE_CTX_REF_FRAMES
];
1828 int pts
[SAMPLES_ARRAY_SIZE
], pts_inref
[SAMPLES_ARRAY_SIZE
];
1829 #if CONFIG_EXT_WARPED_MOTION
1830 int pts_mv
[SAMPLES_ARRAY_SIZE
];
1831 #endif // CONFIG_EXT_WARPED_MOTION
1832 FRAME_CONTEXT
*ec_ctx
= xd
->tile_ctx
;
1834 assert(NELEMENTS(mode_2_counter
) == MB_MODE_COUNT
);
1836 mbmi
->uv_mode
= UV_DC_PRED
;
1837 mbmi
->palette_mode_info
.palette_size
[0] = 0;
1838 mbmi
->palette_mode_info
.palette_size
[1] = 0;
1840 read_ref_frames(cm
, xd
, r
, mbmi
->segment_id
, mbmi
->ref_frame
);
1841 const int is_compound
= has_second_ref(mbmi
);
1843 for (int ref
= 0; ref
< 1 + is_compound
; ++ref
) {
1844 MV_REFERENCE_FRAME frame
= mbmi
->ref_frame
[ref
];
1846 av1_find_mv_refs(cm
, xd
, mi
, frame
, &xd
->ref_mv_count
[frame
],
1847 xd
->ref_mv_stack
[frame
], compound_inter_mode_ctx
,
1848 ref_mvs
[frame
], mi_row
, mi_col
, fpm_sync
, (void *)pbi
,
1853 MV_REFERENCE_FRAME ref_frame
= av1_ref_frame_type(mbmi
->ref_frame
);
1854 av1_find_mv_refs(cm
, xd
, mi
, ref_frame
, &xd
->ref_mv_count
[ref_frame
],
1855 xd
->ref_mv_stack
[ref_frame
], compound_inter_mode_ctx
,
1856 ref_mvs
[ref_frame
], mi_row
, mi_col
, fpm_sync
, (void *)pbi
,
1859 if (xd
->ref_mv_count
[ref_frame
] < 2) {
1860 MV_REFERENCE_FRAME rf
[2];
1862 av1_set_ref_frame(rf
, ref_frame
);
1863 zeromv
[0].as_int
= gm_get_motion_vector(&cm
->global_motion
[rf
[0]],
1864 cm
->allow_high_precision_mv
,
1865 bsize
, mi_col
, mi_row
1868 cm
->cur_frame_force_integer_mv
1873 (rf
[1] != NONE_FRAME
)
1874 ? gm_get_motion_vector(&cm
->global_motion
[rf
[1]],
1875 cm
->allow_high_precision_mv
, bsize
, mi_col
,
1879 cm
->cur_frame_force_integer_mv
1884 for (int ref
= 0; ref
< 2; ++ref
) {
1885 if (rf
[ref
] == NONE_FRAME
) continue;
1887 lower_mv_precision(&ref_mvs
[rf
[ref
]][0].as_mv
, allow_hp
,
1888 cm
->cur_frame_force_integer_mv
);
1889 lower_mv_precision(&ref_mvs
[rf
[ref
]][1].as_mv
, allow_hp
,
1890 cm
->cur_frame_force_integer_mv
);
1892 lower_mv_precision(&ref_mvs
[rf
[ref
]][0].as_mv
, allow_hp
);
1893 lower_mv_precision(&ref_mvs
[rf
[ref
]][1].as_mv
, allow_hp
);
1895 if (ref_mvs
[rf
[ref
]][0].as_int
!= zeromv
[ref
].as_int
||
1896 ref_mvs
[rf
[ref
]][1].as_int
!= zeromv
[ref
].as_int
)
1897 inter_mode_ctx
[ref_frame
] &= ~(1 << ALL_ZERO_FLAG_OFFSET
);
1905 mode_ctx
= compound_inter_mode_ctx
[mbmi
->ref_frame
[0]];
1907 mode_ctx
= av1_mode_context_analyzer(inter_mode_ctx
, mbmi
->ref_frame
);
1908 mbmi
->ref_mv_idx
= 0;
1910 #if CONFIG_SEGMENT_GLOBALMV
1911 if (segfeature_active(&cm
->seg
, mbmi
->segment_id
, SEG_LVL_SKIP
) ||
1912 segfeature_active(&cm
->seg
, mbmi
->segment_id
, SEG_LVL_GLOBALMV
))
1914 if (segfeature_active(&cm
->seg
, mbmi
->segment_id
, SEG_LVL_SKIP
))
1917 mbmi
->mode
= GLOBALMV
;
1919 } else if (mbmi
->skip_mode
) {
1920 assert(is_compound
);
1921 mbmi
->mode
= NEAREST_NEARESTMV
;
1922 #endif // CONFIG_EXT_SKIP
1925 mbmi
->mode
= read_inter_compound_mode(cm
, xd
, r
, mode_ctx
);
1927 mbmi
->mode
= read_inter_mode(ec_ctx
, xd
, r
, mode_ctx
);
1928 if (mbmi
->mode
== NEWMV
|| mbmi
->mode
== NEW_NEWMV
||
1929 have_nearmv_in_inter_mode(mbmi
->mode
))
1930 read_drl_idx(ec_ctx
, xd
, mbmi
, r
);
1933 if (is_compound
!= is_inter_compound_mode(mbmi
->mode
)) {
1934 aom_internal_error(&cm
->error
, AOM_CODEC_CORRUPT_FRAME
,
1935 "Prediction mode %d invalid with ref frame %d %d",
1936 mbmi
->mode
, mbmi
->ref_frame
[0], mbmi
->ref_frame
[1]);
1939 if (mbmi
->mode
!= GLOBALMV
&& mbmi
->mode
!= GLOBAL_GLOBALMV
) {
1940 for (int ref
= 0; ref
< 1 + is_compound
; ++ref
) {
1942 av1_find_best_ref_mvs(allow_hp
, ref_mvs
[mbmi
->ref_frame
[ref
]],
1943 &nearestmv
[ref
], &nearmv
[ref
],
1944 cm
->cur_frame_force_integer_mv
);
1946 av1_find_best_ref_mvs(allow_hp
, ref_mvs
[mbmi
->ref_frame
[ref
]],
1947 &nearestmv
[ref
], &nearmv
[ref
]);
1952 if (is_compound
&& mbmi
->mode
!= GLOBAL_GLOBALMV
) {
1953 uint8_t ref_frame_type
= av1_ref_frame_type(mbmi
->ref_frame
);
1955 if (xd
->ref_mv_count
[ref_frame_type
] > 0) {
1956 if (mbmi
->mode
== NEAREST_NEARESTMV
) {
1957 nearestmv
[0] = xd
->ref_mv_stack
[ref_frame_type
][0].this_mv
;
1958 nearestmv
[1] = xd
->ref_mv_stack
[ref_frame_type
][0].comp_mv
;
1960 lower_mv_precision(&nearestmv
[0].as_mv
, allow_hp
,
1961 cm
->cur_frame_force_integer_mv
);
1962 lower_mv_precision(&nearestmv
[1].as_mv
, allow_hp
,
1963 cm
->cur_frame_force_integer_mv
);
1965 lower_mv_precision(&nearestmv
[0].as_mv
, allow_hp
);
1966 lower_mv_precision(&nearestmv
[1].as_mv
, allow_hp
);
1968 } else if (mbmi
->mode
== NEAREST_NEWMV
) {
1969 nearestmv
[0] = xd
->ref_mv_stack
[ref_frame_type
][0].this_mv
;
1972 lower_mv_precision(&nearestmv
[0].as_mv
, allow_hp
,
1973 cm
->cur_frame_force_integer_mv
);
1975 lower_mv_precision(&nearestmv
[0].as_mv
, allow_hp
);
1977 } else if (mbmi
->mode
== NEW_NEARESTMV
) {
1978 nearestmv
[1] = xd
->ref_mv_stack
[ref_frame_type
][0].comp_mv
;
1980 lower_mv_precision(&nearestmv
[1].as_mv
, allow_hp
,
1981 cm
->cur_frame_force_integer_mv
);
1983 lower_mv_precision(&nearestmv
[1].as_mv
, allow_hp
);
1988 if (xd
->ref_mv_count
[ref_frame_type
] > 1) {
1989 int ref_mv_idx
= 1 + mbmi
->ref_mv_idx
;
1990 if (compound_ref0_mode(mbmi
->mode
) == NEARMV
) {
1991 nearmv
[0] = xd
->ref_mv_stack
[ref_frame_type
][ref_mv_idx
].this_mv
;
1993 lower_mv_precision(&nearmv
[0].as_mv
, allow_hp
,
1994 cm
->cur_frame_force_integer_mv
);
1996 lower_mv_precision(&nearmv
[0].as_mv
, allow_hp
);
2000 if (compound_ref1_mode(mbmi
->mode
) == NEARMV
) {
2001 nearmv
[1] = xd
->ref_mv_stack
[ref_frame_type
][ref_mv_idx
].comp_mv
;
2003 lower_mv_precision(&nearmv
[1].as_mv
, allow_hp
,
2004 cm
->cur_frame_force_integer_mv
);
2006 lower_mv_precision(&nearmv
[1].as_mv
, allow_hp
);
2010 } else if (mbmi
->ref_mv_idx
> 0 && mbmi
->mode
== NEARMV
) {
2012 xd
->ref_mv_stack
[mbmi
->ref_frame
[0]][1 + mbmi
->ref_mv_idx
].this_mv
;
2017 ref_mv
[0] = nearestmv
[0];
2018 ref_mv
[1] = nearestmv
[1];
2021 int ref_mv_idx
= mbmi
->ref_mv_idx
;
2022 // Special case: NEAR_NEWMV and NEW_NEARMV modes use
2023 // 1 + mbmi->ref_mv_idx (like NEARMV) instead of
2024 // mbmi->ref_mv_idx (like NEWMV)
2025 if (mbmi
->mode
== NEAR_NEWMV
|| mbmi
->mode
== NEW_NEARMV
)
2026 ref_mv_idx
= 1 + mbmi
->ref_mv_idx
;
2028 if (compound_ref0_mode(mbmi
->mode
) == NEWMV
) {
2029 uint8_t ref_frame_type
= av1_ref_frame_type(mbmi
->ref_frame
);
2030 if (xd
->ref_mv_count
[ref_frame_type
] > 1) {
2031 ref_mv
[0] = xd
->ref_mv_stack
[ref_frame_type
][ref_mv_idx
].this_mv
;
2032 clamp_mv_ref(&ref_mv
[0].as_mv
, xd
->n8_w
<< MI_SIZE_LOG2
,
2033 xd
->n8_h
<< MI_SIZE_LOG2
, xd
);
2035 nearestmv
[0] = ref_mv
[0];
2037 if (compound_ref1_mode(mbmi
->mode
) == NEWMV
) {
2038 uint8_t ref_frame_type
= av1_ref_frame_type(mbmi
->ref_frame
);
2039 if (xd
->ref_mv_count
[ref_frame_type
] > 1) {
2040 ref_mv
[1] = xd
->ref_mv_stack
[ref_frame_type
][ref_mv_idx
].comp_mv
;
2041 clamp_mv_ref(&ref_mv
[1].as_mv
, xd
->n8_w
<< MI_SIZE_LOG2
,
2042 xd
->n8_h
<< MI_SIZE_LOG2
, xd
);
2044 nearestmv
[1] = ref_mv
[1];
2047 if (mbmi
->mode
== NEWMV
) {
2048 uint8_t ref_frame_type
= av1_ref_frame_type(mbmi
->ref_frame
);
2049 if (xd
->ref_mv_count
[ref_frame_type
] > 1) {
2050 ref_mv
[0] = xd
->ref_mv_stack
[ref_frame_type
][mbmi
->ref_mv_idx
].this_mv
;
2051 clamp_mv_ref(&ref_mv
[0].as_mv
, xd
->n8_w
<< MI_SIZE_LOG2
,
2052 xd
->n8_h
<< MI_SIZE_LOG2
, xd
);
2054 nearestmv
[0] = ref_mv
[0];
2059 if (mbmi
->skip_mode
) {
2060 assert(mbmi
->mode
== NEAREST_NEARESTMV
);
2061 mbmi
->mv
[0].as_int
= nearestmv
[0].as_int
;
2062 mbmi
->mv
[1].as_int
= nearestmv
[1].as_int
;
2064 #endif // CONFIG_EXT_SKIP
2065 int mv_corrupted_flag
=
2066 !assign_mv(cm
, xd
, mbmi
->mode
, mbmi
->ref_frame
, mbmi
->mv
, ref_mv
,
2067 nearestmv
, nearmv
, mi_row
, mi_col
, is_compound
, allow_hp
, r
);
2068 aom_merge_corrupted_flag(&xd
->corrupted
, mv_corrupted_flag
);
2071 #endif // CONFIG_EXT_SKIP
2073 mbmi
->use_wedge_interintra
= 0;
2074 if (cm
->allow_interintra_compound
&&
2075 #if !CONFIG_REF_ADAPT
2076 cm
->reference_mode
!= COMPOUND_REFERENCE
&&
2077 #endif // !CONFIG_REF_ADAPT
2080 #endif // CONFIG_EXT_SKIP
2081 is_interintra_allowed(mbmi
)) {
2082 const int bsize_group
= size_group_lookup
[bsize
];
2083 const int interintra
=
2084 aom_read_symbol(r
, ec_ctx
->interintra_cdf
[bsize_group
], 2, ACCT_STR
);
2085 if (xd
->counts
) xd
->counts
->interintra
[bsize_group
][interintra
]++;
2086 assert(mbmi
->ref_frame
[1] == NONE_FRAME
);
2088 const INTERINTRA_MODE interintra_mode
=
2089 read_interintra_mode(xd
, r
, bsize_group
);
2090 mbmi
->ref_frame
[1] = INTRA_FRAME
;
2091 mbmi
->interintra_mode
= interintra_mode
;
2092 #if CONFIG_EXT_INTRA
2093 mbmi
->angle_delta
[0] = 0;
2094 mbmi
->angle_delta
[1] = 0;
2095 #endif // CONFIG_EXT_INTRA
2096 #if CONFIG_FILTER_INTRA
2097 mbmi
->filter_intra_mode_info
.use_filter_intra
= 0;
2098 #endif // CONFIG_FILTER_INTRA
2099 if (is_interintra_wedge_used(bsize
)) {
2100 mbmi
->use_wedge_interintra
= aom_read_symbol(
2101 r
, ec_ctx
->wedge_interintra_cdf
[bsize
], 2, ACCT_STR
);
2103 xd
->counts
->wedge_interintra
[bsize
][mbmi
->use_wedge_interintra
]++;
2104 if (mbmi
->use_wedge_interintra
) {
2105 mbmi
->interintra_wedge_index
=
2106 aom_read_literal(r
, get_wedge_bits_lookup(bsize
), ACCT_STR
);
2107 mbmi
->interintra_wedge_sign
= 0;
2113 for (int ref
= 0; ref
< 1 + has_second_ref(mbmi
); ++ref
) {
2114 const MV_REFERENCE_FRAME frame
= mbmi
->ref_frame
[ref
];
2115 RefBuffer
*ref_buf
= &cm
->frame_refs
[frame
- LAST_FRAME
];
2117 xd
->block_refs
[ref
] = ref_buf
;
2120 mbmi
->motion_mode
= SIMPLE_TRANSLATION
;
2121 if (mbmi
->sb_type
>= BLOCK_8X8
&&
2124 #endif // CONFIG_EXT_SKIP
2125 !has_second_ref(mbmi
))
2126 #if CONFIG_EXT_WARPED_MOTION
2127 mbmi
->num_proj_ref
[0] =
2128 findSamples(cm
, xd
, mi_row
, mi_col
, pts
, pts_inref
, pts_mv
);
2130 mbmi
->num_proj_ref
[0] = findSamples(cm
, xd
, mi_row
, mi_col
, pts
, pts_inref
);
2131 #endif // CONFIG_EXT_WARPED_MOTION
2132 av1_count_overlappable_neighbors(cm
, xd
, mi_row
, mi_col
);
2134 if (mbmi
->ref_frame
[1] != INTRA_FRAME
)
2135 mbmi
->motion_mode
= read_motion_mode(xd
, mi
, r
);
2139 mbmi
->comp_group_idx
= 0;
2140 mbmi
->compound_idx
= 1;
2141 mbmi
->interinter_compound_type
= COMPOUND_AVERAGE
;
2143 if (has_second_ref(mbmi
)
2146 #endif // CONFIG_EXT_SKIP
2148 // Read idx to indicate current compound inter prediction mode group
2149 const int masked_compound_used
=
2150 is_any_masked_compound_used(bsize
) && cm
->allow_masked_compound
;
2152 if (masked_compound_used
) {
2153 const int ctx_comp_group_idx
= get_comp_group_idx_context(xd
);
2154 mbmi
->comp_group_idx
= aom_read_symbol(
2155 r
, ec_ctx
->comp_group_idx_cdf
[ctx_comp_group_idx
], 2, ACCT_STR
);
2157 ++xd
->counts
->comp_group_idx
[ctx_comp_group_idx
][mbmi
->comp_group_idx
];
2160 if (mbmi
->comp_group_idx
== 0) {
2161 const int comp_index_ctx
= get_comp_index_context(cm
, xd
);
2162 mbmi
->compound_idx
= aom_read_symbol(
2163 r
, ec_ctx
->compound_index_cdf
[comp_index_ctx
], 2, ACCT_STR
);
2166 ++xd
->counts
->compound_index
[comp_index_ctx
][mbmi
->compound_idx
];
2168 assert(cm
->reference_mode
!= SINGLE_REFERENCE
&&
2169 is_inter_compound_mode(mbmi
->mode
) &&
2170 mbmi
->motion_mode
== SIMPLE_TRANSLATION
);
2171 assert(masked_compound_used
);
2173 // compound_segment, wedge
2174 if (is_interinter_compound_used(COMPOUND_WEDGE
, bsize
))
2175 mbmi
->interinter_compound_type
=
2176 1 + aom_read_symbol(r
, ec_ctx
->compound_type_cdf
[bsize
],
2177 COMPOUND_TYPES
- 1, ACCT_STR
);
2179 mbmi
->interinter_compound_type
= COMPOUND_SEG
;
2181 if (mbmi
->interinter_compound_type
== COMPOUND_WEDGE
) {
2182 assert(is_interinter_compound_used(COMPOUND_WEDGE
, bsize
));
2184 aom_read_literal(r
, get_wedge_bits_lookup(bsize
), ACCT_STR
);
2185 mbmi
->wedge_sign
= aom_read_bit(r
, ACCT_STR
);
2187 assert(mbmi
->interinter_compound_type
== COMPOUND_SEG
);
2188 mbmi
->mask_type
= aom_read_literal(r
, MAX_SEG_MASK_BITS
, ACCT_STR
);
2193 ->compound_interinter
[bsize
][mbmi
->interinter_compound_type
- 1]++;
2196 #else // CONFIG_JNT_COMP
2197 mbmi
->interinter_compound_type
= COMPOUND_AVERAGE
;
2198 if (cm
->reference_mode
!= SINGLE_REFERENCE
&&
2199 is_inter_compound_mode(mbmi
->mode
) &&
2200 mbmi
->motion_mode
== SIMPLE_TRANSLATION
2203 #endif // CONFIG_EXT_SKIP
2205 if (is_any_masked_compound_used(bsize
)) {
2206 if (cm
->allow_masked_compound
) {
2207 if (!is_interinter_compound_used(COMPOUND_WEDGE
, bsize
))
2208 mbmi
->interinter_compound_type
=
2209 aom_read_bit(r
, ACCT_STR
) ? COMPOUND_AVERAGE
: COMPOUND_SEG
;
2211 mbmi
->interinter_compound_type
= aom_read_symbol(
2212 r
, ec_ctx
->compound_type_cdf
[bsize
], COMPOUND_TYPES
, ACCT_STR
);
2213 if (mbmi
->interinter_compound_type
== COMPOUND_WEDGE
) {
2214 assert(is_interinter_compound_used(COMPOUND_WEDGE
, bsize
));
2216 aom_read_literal(r
, get_wedge_bits_lookup(bsize
), ACCT_STR
);
2217 mbmi
->wedge_sign
= aom_read_bit(r
, ACCT_STR
);
2219 if (mbmi
->interinter_compound_type
== COMPOUND_SEG
) {
2220 mbmi
->mask_type
= aom_read_literal(r
, MAX_SEG_MASK_BITS
, ACCT_STR
);
2224 mbmi
->interinter_compound_type
= COMPOUND_AVERAGE
;
2227 xd
->counts
->compound_interinter
[bsize
][mbmi
->interinter_compound_type
]++;
2229 #endif // CONFIG_JNT_COMP
2231 read_mb_interp_filter(cm
, xd
, mbmi
, r
);
2233 if (mbmi
->motion_mode
== WARPED_CAUSAL
) {
2234 mbmi
->wm_params
[0].wmtype
= DEFAULT_WMTYPE
;
2236 #if CONFIG_EXT_WARPED_MOTION
2237 if (mbmi
->num_proj_ref
[0] > 1)
2238 mbmi
->num_proj_ref
[0] = sortSamples(pts_mv
, &mbmi
->mv
[0].as_mv
, pts
,
2239 pts_inref
, mbmi
->num_proj_ref
[0]);
2240 #endif // CONFIG_EXT_WARPED_MOTION
2242 if (find_projection(mbmi
->num_proj_ref
[0], pts
, pts_inref
, bsize
,
2243 mbmi
->mv
[0].as_mv
.row
, mbmi
->mv
[0].as_mv
.col
,
2244 &mbmi
->wm_params
[0], mi_row
, mi_col
)) {
2245 #if WARPED_MOTION_DEBUG
2246 printf("Warning: unexpected warped model from aomenc\n");
2248 mbmi
->wm_params
[0].invalid
= 1;
2252 #if DEC_MISMATCH_DEBUG
2253 dec_dump_logs(cm
, mi
, mi_row
, mi_col
, mode_ctx
);
2254 #endif // DEC_MISMATCH_DEBUG
2257 static void read_inter_frame_mode_info(AV1Decoder
*const pbi
,
2258 MACROBLOCKD
*const xd
, int mi_row
,
2259 int mi_col
, aom_reader
*r
) {
2260 AV1_COMMON
*const cm
= &pbi
->common
;
2261 MODE_INFO
*const mi
= xd
->mi
[0];
2262 MB_MODE_INFO
*const mbmi
= &mi
->mbmi
;
2263 int inter_block
= 1;
2264 BLOCK_SIZE bsize
= mbmi
->sb_type
;
2266 mbmi
->mv
[0].as_int
= 0;
2267 mbmi
->mv
[1].as_int
= 0;
2268 mbmi
->segment_id
= read_inter_segment_id(cm
, xd
, mi_row
, mi_col
, 1, r
);
2271 mbmi
->skip_mode
= read_skip_mode(cm
, xd
, mbmi
->segment_id
, r
);
2273 if (mbmi
->skip_mode
)
2276 #endif // CONFIG_EXT_SKIP
2277 mbmi
->skip
= read_skip(cm
, xd
, mbmi
->segment_id
, r
);
2279 #if CONFIG_SPATIAL_SEGMENTATION
2280 mbmi
->segment_id
= read_inter_segment_id(cm
, xd
, mi_row
, mi_col
, 0, r
);
2283 read_cdef(cm
, r
, mbmi
, mi_col
, mi_row
);
2285 if (cm
->delta_q_present_flag
) {
2286 xd
->current_qindex
=
2288 read_delta_qindex(cm
, xd
, r
, mbmi
, mi_col
, mi_row
) * cm
->delta_q_res
;
2289 /* Normative: Clamp to [1,MAXQ] to not interfere with lossless mode */
2290 xd
->current_qindex
= clamp(xd
->current_qindex
, 1, MAXQ
);
2291 xd
->prev_qindex
= xd
->current_qindex
;
2292 #if CONFIG_EXT_DELTA_Q
2293 if (cm
->delta_lf_present_flag
) {
2294 #if CONFIG_LOOPFILTER_LEVEL
2295 if (cm
->delta_lf_multi
) {
2296 for (int lf_id
= 0; lf_id
< FRAME_LF_COUNT
; ++lf_id
) {
2298 xd
->prev_delta_lf
[lf_id
] +
2299 read_delta_lflevel(cm
, xd
, r
, lf_id
, mbmi
, mi_col
, mi_row
) *
2301 mbmi
->curr_delta_lf
[lf_id
] = xd
->curr_delta_lf
[lf_id
] =
2302 clamp(tmp_lvl
, -MAX_LOOP_FILTER
, MAX_LOOP_FILTER
);
2303 xd
->prev_delta_lf
[lf_id
] = xd
->curr_delta_lf
[lf_id
];
2307 xd
->prev_delta_lf_from_base
+
2308 read_delta_lflevel(cm
, xd
, r
, -1, mbmi
, mi_col
, mi_row
) *
2310 mbmi
->current_delta_lf_from_base
= xd
->current_delta_lf_from_base
=
2311 clamp(tmp_lvl
, -MAX_LOOP_FILTER
, MAX_LOOP_FILTER
);
2312 xd
->prev_delta_lf_from_base
= xd
->current_delta_lf_from_base
;
2315 const int current_delta_lf_from_base
=
2316 xd
->prev_delta_lf_from_base
+
2317 read_delta_lflevel(cm
, xd
, r
, mbmi
, mi_col
, mi_row
) *
2319 mbmi
->current_delta_lf_from_base
= xd
->current_delta_lf_from_base
=
2320 clamp(current_delta_lf_from_base
, -MAX_LOOP_FILTER
, MAX_LOOP_FILTER
);
2321 xd
->prev_delta_lf_from_base
= xd
->current_delta_lf_from_base
;
2322 #endif // CONFIG_LOOPFILTER_LEVEL
2324 #endif // CONFIG_EXT_DELTA_Q
2328 if (!mbmi
->skip_mode
)
2329 #endif // CONFIG_EXT_SKIP
2330 inter_block
= read_is_inter_block(cm
, xd
, mbmi
->segment_id
, r
);
2332 mbmi
->current_q_index
= xd
->current_qindex
;
2334 xd
->above_txfm_context
=
2335 cm
->above_txfm_context
+ (mi_col
<< TX_UNIT_WIDE_LOG2
);
2336 xd
->left_txfm_context
= xd
->left_txfm_context_buffer
+
2337 ((mi_row
& MAX_MIB_MASK
) << TX_UNIT_HIGH_LOG2
);
2339 if (cm
->tx_mode
== TX_MODE_SELECT
&& block_signals_txsize(bsize
) &&
2340 !mbmi
->skip
&& inter_block
&& !xd
->lossless
[mbmi
->segment_id
]) {
2341 const TX_SIZE max_tx_size
= get_max_rect_tx_size(bsize
, inter_block
);
2342 const int bh
= tx_size_high_unit
[max_tx_size
];
2343 const int bw
= tx_size_wide_unit
[max_tx_size
];
2344 const int width
= block_size_wide
[bsize
] >> tx_size_wide_log2
[0];
2345 const int height
= block_size_high
[bsize
] >> tx_size_wide_log2
[0];
2347 mbmi
->min_tx_size
= TX_SIZES_ALL
;
2348 for (int idy
= 0; idy
< height
; idy
+= bh
)
2349 for (int idx
= 0; idx
< width
; idx
+= bw
)
2350 read_tx_size_vartx(cm
, xd
, mbmi
, xd
->counts
, max_tx_size
, 0, idy
, idx
,
2353 mbmi
->tx_size
= read_tx_size(cm
, xd
, inter_block
, !mbmi
->skip
, r
);
2356 const int width
= block_size_wide
[bsize
] >> tx_size_wide_log2
[0];
2357 const int height
= block_size_high
[bsize
] >> tx_size_high_log2
[0];
2358 for (int idy
= 0; idy
< height
; ++idy
)
2359 for (int idx
= 0; idx
< width
; ++idx
)
2360 mbmi
->inter_tx_size
[idy
>> 1][idx
>> 1] = mbmi
->tx_size
;
2362 mbmi
->min_tx_size
= mbmi
->tx_size
;
2363 set_txfm_ctxs(mbmi
->tx_size
, xd
->n8_w
, xd
->n8_h
, mbmi
->skip
, xd
);
2367 read_inter_block_mode_info(pbi
, xd
, mi
, mi_row
, mi_col
, r
);
2369 read_intra_block_mode_info(cm
, mi_row
, mi_col
, xd
, mi
, r
);
2373 if (!mbmi
->skip_mode
)
2374 #endif // CONFIG_EXT_SKIP
2375 av1_read_tx_type(cm
, xd
, r
);
2376 #endif // !CONFIG_TXK_SEL
2379 static void av1_intra_copy_frame_mvs(AV1_COMMON
*const cm
, int mi_row
,
2380 int mi_col
, int x_mis
, int y_mis
) {
2381 #if CONFIG_TMV || CONFIG_MFMV
2382 const int frame_mvs_stride
= ROUND_POWER_OF_TWO(cm
->mi_cols
, 1);
2384 cm
->cur_frame
->mvs
+ (mi_row
>> 1) * frame_mvs_stride
+ (mi_col
>> 1);
2385 x_mis
= ROUND_POWER_OF_TWO(x_mis
, 1);
2386 y_mis
= ROUND_POWER_OF_TWO(y_mis
, 1);
2388 const int frame_mvs_stride
= cm
->mi_cols
;
2389 MV_REF
*frame_mvs
= cm
->cur_frame
->mvs
+
2390 (mi_row
& 0xfffe) * frame_mvs_stride
+ (mi_col
& 0xfffe);
2391 x_mis
= AOMMAX(x_mis
, 2);
2392 y_mis
= AOMMAX(y_mis
, 2);
2393 #endif // CONFIG_TMV
2395 for (int h
= 0; h
< y_mis
; h
++) {
2396 MV_REF
*mv
= frame_mvs
;
2397 for (int w
= 0; w
< x_mis
; w
++) {
2398 mv
->ref_frame
[0] = NONE_FRAME
;
2399 mv
->ref_frame
[1] = NONE_FRAME
;
2402 frame_mvs
+= frame_mvs_stride
;
2406 void av1_read_mode_info(AV1Decoder
*const pbi
, MACROBLOCKD
*xd
, int mi_row
,
2407 int mi_col
, aom_reader
*r
, int x_mis
, int y_mis
) {
2408 AV1_COMMON
*const cm
= &pbi
->common
;
2409 MODE_INFO
*const mi
= xd
->mi
[0];
2411 mi
->mbmi
.use_intrabc
= 0;
2412 #endif // CONFIG_INTRABC
2414 if (frame_is_intra_only(cm
)) {
2415 read_intra_frame_mode_info(cm
, xd
, mi_row
, mi_col
, r
);
2416 av1_intra_copy_frame_mvs(cm
, mi_row
, mi_col
, x_mis
, y_mis
);
2418 read_inter_frame_mode_info(pbi
, xd
, mi_row
, mi_col
, r
);
2419 av1_copy_frame_mvs(cm
, mi
, mi_row
, mi_col
, x_mis
, y_mis
);