Moved dequantization into the token decoder
[aom.git] / vp9 / common / vp9_blockd.h
blobf5ef3c50758427eb923f6f08d96ff088e7c33a9f
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
12 #ifndef VP9_COMMON_VP9_BLOCKD_H_
13 #define VP9_COMMON_VP9_BLOCKD_H_
15 #include "./vpx_config.h"
16 #include "vpx_scale/yv12config.h"
17 #include "vp9/common/vp9_convolve.h"
18 #include "vp9/common/vp9_mv.h"
19 #include "vp9/common/vp9_treecoder.h"
20 #include "vpx_ports/mem.h"
21 #include "vp9/common/vp9_common.h"
22 #include "vp9/common/vp9_enums.h"
24 // #define MODE_STATS
26 #define MB_FEATURE_TREE_PROBS 3
27 #define PREDICTION_PROBS 3
29 #define DEFAULT_PRED_PROB_0 120
30 #define DEFAULT_PRED_PROB_1 80
31 #define DEFAULT_PRED_PROB_2 40
33 #define MBSKIP_CONTEXTS 3
35 #define MAX_MB_SEGMENTS 4
37 #define MAX_REF_LF_DELTAS 4
38 #define MAX_MODE_LF_DELTAS 4
40 /* Segment Feature Masks */
41 #define SEGMENT_DELTADATA 0
42 #define SEGMENT_ABSDATA 1
43 #define MAX_MV_REFS 9
44 #define MAX_MV_REF_CANDIDATES 2
46 typedef enum {
47 PLANE_TYPE_Y_WITH_DC,
48 PLANE_TYPE_UV,
49 } PLANE_TYPE;
51 typedef char ENTROPY_CONTEXT;
52 typedef struct {
53 ENTROPY_CONTEXT y1[4];
54 ENTROPY_CONTEXT u[2];
55 ENTROPY_CONTEXT v[2];
56 } ENTROPY_CONTEXT_PLANES;
58 static INLINE int combine_entropy_contexts(ENTROPY_CONTEXT a,
59 ENTROPY_CONTEXT b) {
60 return (a != 0) + (b != 0);
63 typedef enum {
64 KEY_FRAME = 0,
65 INTER_FRAME = 1
66 } FRAME_TYPE;
68 typedef enum {
69 #if CONFIG_ENABLE_6TAP
70 SIXTAP,
71 #endif
72 EIGHTTAP_SMOOTH,
73 EIGHTTAP,
74 EIGHTTAP_SHARP,
75 BILINEAR,
76 SWITCHABLE /* should be the last one */
77 } INTERPOLATIONFILTERTYPE;
79 typedef enum {
80 DC_PRED, /* average of above and left pixels */
81 V_PRED, /* vertical prediction */
82 H_PRED, /* horizontal prediction */
83 D45_PRED, /* Directional 45 deg prediction [anti-clockwise from 0 deg hor] */
84 D135_PRED, /* Directional 135 deg prediction [anti-clockwise from 0 deg hor] */
85 D117_PRED, /* Directional 112 deg prediction [anti-clockwise from 0 deg hor] */
86 D153_PRED, /* Directional 157 deg prediction [anti-clockwise from 0 deg hor] */
87 D27_PRED, /* Directional 22 deg prediction [anti-clockwise from 0 deg hor] */
88 D63_PRED, /* Directional 67 deg prediction [anti-clockwise from 0 deg hor] */
89 TM_PRED, /* Truemotion prediction */
90 I8X8_PRED, /* 8x8 based prediction, each 8x8 has its own mode */
91 I4X4_PRED, /* 4x4 based prediction, each 4x4 has its own mode */
92 NEARESTMV,
93 NEARMV,
94 ZEROMV,
95 NEWMV,
96 SPLITMV,
97 MB_MODE_COUNT
98 } MB_PREDICTION_MODE;
100 // Segment level features.
101 typedef enum {
102 SEG_LVL_ALT_Q = 0, // Use alternate Quantizer ....
103 SEG_LVL_ALT_LF = 1, // Use alternate loop filter value...
104 SEG_LVL_REF_FRAME = 2, // Optional Segment reference frame
105 SEG_LVL_SKIP = 3, // Optional Segment (0,0) + skip mode
106 SEG_LVL_MAX = 4 // Number of MB level features supported
107 } SEG_LVL_FEATURES;
109 // Segment level features.
110 typedef enum {
111 TX_4X4 = 0, // 4x4 dct transform
112 TX_8X8 = 1, // 8x8 dct transform
113 TX_16X16 = 2, // 16x16 dct transform
114 TX_SIZE_MAX_MB = 3, // Number of different transforms available
115 TX_32X32 = TX_SIZE_MAX_MB, // 32x32 dct transform
116 TX_SIZE_MAX_SB, // Number of transforms available to SBs
117 } TX_SIZE;
119 typedef enum {
120 DCT_DCT = 0, // DCT in both horizontal and vertical
121 ADST_DCT = 1, // ADST in vertical, DCT in horizontal
122 DCT_ADST = 2, // DCT in vertical, ADST in horizontal
123 ADST_ADST = 3 // ADST in both directions
124 } TX_TYPE;
126 #define VP9_YMODES (I4X4_PRED + 1)
127 #define VP9_UV_MODES (TM_PRED + 1)
128 #define VP9_I8X8_MODES (TM_PRED + 1)
129 #define VP9_I32X32_MODES (TM_PRED + 1)
131 #define VP9_MVREFS (1 + SPLITMV - NEARESTMV)
133 #define WHT_UPSCALE_FACTOR 2
135 typedef enum {
136 B_DC_PRED, /* average of above and left pixels */
137 B_V_PRED, /* vertical prediction */
138 B_H_PRED, /* horizontal prediction */
139 B_D45_PRED,
140 B_D135_PRED,
141 B_D117_PRED,
142 B_D153_PRED,
143 B_D27_PRED,
144 B_D63_PRED,
145 B_TM_PRED,
146 #if CONFIG_NEWBINTRAMODES
147 B_CONTEXT_PRED,
148 #endif
150 LEFT4X4,
151 ABOVE4X4,
152 ZERO4X4,
153 NEW4X4,
155 B_MODE_COUNT
156 } B_PREDICTION_MODE;
158 #define VP9_BINTRAMODES (LEFT4X4)
159 #define VP9_SUBMVREFS (1 + NEW4X4 - LEFT4X4)
161 #if CONFIG_NEWBINTRAMODES
162 /* The number of I4X4_PRED intra modes that are replaced by B_CONTEXT_PRED */
163 #define CONTEXT_PRED_REPLACEMENTS 0
164 #define VP9_KF_BINTRAMODES (VP9_BINTRAMODES - 1)
165 #define VP9_NKF_BINTRAMODES (VP9_BINTRAMODES - CONTEXT_PRED_REPLACEMENTS)
166 #else
167 #define VP9_KF_BINTRAMODES (VP9_BINTRAMODES) /* 10 */
168 #define VP9_NKF_BINTRAMODES (VP9_BINTRAMODES) /* 10 */
169 #endif
171 typedef enum {
172 PARTITIONING_16X8 = 0,
173 PARTITIONING_8X16,
174 PARTITIONING_8X8,
175 PARTITIONING_4X4,
176 NB_PARTITIONINGS,
177 } SPLITMV_PARTITIONING_TYPE;
179 /* For keyframes, intra block modes are predicted by the (already decoded)
180 modes for the Y blocks to the left and above us; for interframes, there
181 is a single probability table. */
183 union b_mode_info {
184 struct {
185 B_PREDICTION_MODE first;
186 #if CONFIG_NEWBINTRAMODES
187 B_PREDICTION_MODE context;
188 #endif
189 } as_mode;
190 int_mv as_mv[2]; // first, second inter predictor motion vectors
193 typedef enum {
194 NONE = -1,
195 INTRA_FRAME = 0,
196 LAST_FRAME = 1,
197 GOLDEN_FRAME = 2,
198 ALTREF_FRAME = 3,
199 MAX_REF_FRAMES = 4
200 } MV_REFERENCE_FRAME;
202 static INLINE int mb_width_log2(BLOCK_SIZE_TYPE sb_type) {
203 switch (sb_type) {
204 #if CONFIG_SBSEGMENT
205 case BLOCK_SIZE_SB16X32:
206 #endif
207 case BLOCK_SIZE_MB16X16: return 0;
208 #if CONFIG_SBSEGMENT
209 case BLOCK_SIZE_SB32X16:
210 case BLOCK_SIZE_SB32X64:
211 #endif
212 case BLOCK_SIZE_SB32X32: return 1;
213 #if CONFIG_SBSEGMENT
214 case BLOCK_SIZE_SB64X32:
215 #endif
216 case BLOCK_SIZE_SB64X64: return 2;
217 default: assert(0);
221 static INLINE int mb_height_log2(BLOCK_SIZE_TYPE sb_type) {
222 switch (sb_type) {
223 #if CONFIG_SBSEGMENT
224 case BLOCK_SIZE_SB32X16:
225 #endif
226 case BLOCK_SIZE_MB16X16: return 0;
227 #if CONFIG_SBSEGMENT
228 case BLOCK_SIZE_SB16X32:
229 case BLOCK_SIZE_SB64X32:
230 #endif
231 case BLOCK_SIZE_SB32X32: return 1;
232 #if CONFIG_SBSEGMENT
233 case BLOCK_SIZE_SB32X64:
234 #endif
235 case BLOCK_SIZE_SB64X64: return 2;
236 default: assert(0);
240 // parse block dimension in the unit of 4x4 blocks
241 static INLINE int b_width_log2(BLOCK_SIZE_TYPE sb_type) {
242 return mb_width_log2(sb_type) + 2;
245 static INLINE int b_height_log2(BLOCK_SIZE_TYPE sb_type) {
246 return mb_height_log2(sb_type) + 2;
249 static INLINE int partition_plane(BLOCK_SIZE_TYPE sb_type) {
250 assert(mb_width_log2(sb_type) == mb_height_log2(sb_type));
251 return (mb_width_log2(sb_type) - 1);
254 typedef struct {
255 MB_PREDICTION_MODE mode, uv_mode;
256 #if CONFIG_COMP_INTERINTRA_PRED
257 MB_PREDICTION_MODE interintra_mode, interintra_uv_mode;
258 #endif
259 MV_REFERENCE_FRAME ref_frame, second_ref_frame;
260 TX_SIZE txfm_size;
261 int_mv mv[2]; // for each reference frame used
262 int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
263 int_mv best_mv, best_second_mv;
265 int mb_mode_context[MAX_REF_FRAMES];
267 SPLITMV_PARTITIONING_TYPE partitioning;
268 unsigned char mb_skip_coeff; /* does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */
269 unsigned char need_to_clamp_mvs;
270 unsigned char need_to_clamp_secondmv;
271 unsigned char segment_id; /* Which set of segmentation parameters should be used for this MB */
273 // Flags used for prediction status of various bistream signals
274 unsigned char seg_id_predicted;
275 unsigned char ref_predicted;
277 // Indicates if the mb is part of the image (1) vs border (0)
278 // This can be useful in determining whether the MB provides
279 // a valid predictor
280 unsigned char mb_in_image;
282 INTERPOLATIONFILTERTYPE interp_filter;
284 BLOCK_SIZE_TYPE sb_type;
285 #if CONFIG_CODE_NONZEROCOUNT
286 uint16_t nzcs[256+64*2];
287 #endif
288 } MB_MODE_INFO;
290 typedef struct {
291 MB_MODE_INFO mbmi;
292 union b_mode_info bmi[16];
293 } MODE_INFO;
295 typedef struct blockd {
296 int16_t *diff;
297 int16_t *dequant;
299 /* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */
300 uint8_t **base_pre;
301 uint8_t **base_second_pre;
302 int pre;
303 int pre_stride;
305 uint8_t **base_dst;
306 int dst;
307 int dst_stride;
309 union b_mode_info bmi;
310 } BLOCKD;
312 struct scale_factors {
313 int x_num;
314 int x_den;
315 int x_offset_q4;
316 int x_step_q4;
317 int y_num;
318 int y_den;
319 int y_offset_q4;
320 int y_step_q4;
322 int (*scale_value_x)(int val, const struct scale_factors *scale);
323 int (*scale_value_y)(int val, const struct scale_factors *scale);
324 void (*set_scaled_offsets)(struct scale_factors *scale, int row, int col);
325 int_mv32 (*scale_motion_vector_q3_to_q4)(const int_mv *src_mv,
326 const struct scale_factors *scale);
327 int32_t (*scale_motion_vector_component_q4)(int mv_q4,
328 int num,
329 int den,
330 int offset_q4);
332 #if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
333 convolve_fn_t predict[2][2][8]; // horiz, vert, weight (0 - 7)
334 #else
335 convolve_fn_t predict[2][2][2]; // horiz, vert, avg
336 #endif
339 enum { MAX_MB_PLANE = 3 };
341 struct buf_2d {
342 uint8_t *buf;
343 int stride;
346 struct mb_plane {
347 DECLARE_ALIGNED(16, int16_t, qcoeff[64 * 64]);
348 DECLARE_ALIGNED(16, int16_t, dqcoeff[64 * 64]);
349 DECLARE_ALIGNED(16, uint16_t, eobs[256]);
350 DECLARE_ALIGNED(16, int16_t, diff[64 * 64]);
351 PLANE_TYPE plane_type;
352 int subsampling_x;
353 int subsampling_y;
354 struct buf_2d dst;
355 struct buf_2d pre[2];
358 #define BLOCK_OFFSET(x, i, n) ((x) + (i) * (n))
360 #define MB_SUBBLOCK_FIELD(x, field, i) (\
361 ((i) < 16) ? BLOCK_OFFSET((x)->plane[0].field, (i), 16) : \
362 ((i) < 20) ? BLOCK_OFFSET((x)->plane[1].field, ((i) - 16), 16) : \
363 BLOCK_OFFSET((x)->plane[2].field, ((i) - 20), 16))
365 typedef struct macroblockd {
366 #if CONFIG_CODE_NONZEROCOUNT
367 DECLARE_ALIGNED(16, uint16_t, nzcs[256+64*2]);
368 #endif
369 struct mb_plane plane[MAX_MB_PLANE];
371 /* 16 Y blocks, 4 U, 4 V, each with 16 entries. */
372 BLOCKD block[24];
374 YV12_BUFFER_CONFIG pre; /* Filtered copy of previous frame reconstruction */
375 YV12_BUFFER_CONFIG second_pre;
376 struct scale_factors scale_factor[2];
377 struct scale_factors scale_factor_uv[2];
379 MODE_INFO *prev_mode_info_context;
380 MODE_INFO *mode_info_context;
381 int mode_info_stride;
383 FRAME_TYPE frame_type;
385 int up_available;
386 int left_available;
387 int right_available;
389 /* Y,U,V */
390 ENTROPY_CONTEXT_PLANES *above_context;
391 ENTROPY_CONTEXT_PLANES *left_context;
393 /* 0 indicates segmentation at MB level is not enabled. Otherwise the individual bits indicate which features are active. */
394 unsigned char segmentation_enabled;
396 /* 0 (do not update) 1 (update) the macroblock segmentation map. */
397 unsigned char update_mb_segmentation_map;
399 /* 0 (do not update) 1 (update) the macroblock segmentation feature data. */
400 unsigned char update_mb_segmentation_data;
402 /* 0 (do not update) 1 (update) the macroblock segmentation feature data. */
403 unsigned char mb_segment_abs_delta;
405 /* Per frame flags that define which MB level features (such as quantizer or loop filter level) */
406 /* are enabled and when enabled the proabilities used to decode the per MB flags in MB_MODE_INFO */
408 // Probability Tree used to code Segment number
409 vp9_prob mb_segment_tree_probs[MB_FEATURE_TREE_PROBS];
410 vp9_prob mb_segment_mispred_tree_probs[MAX_MB_SEGMENTS];
412 // Segment features
413 signed char segment_feature_data[MAX_MB_SEGMENTS][SEG_LVL_MAX];
414 unsigned int segment_feature_mask[MAX_MB_SEGMENTS];
416 /* mode_based Loop filter adjustment */
417 unsigned char mode_ref_lf_delta_enabled;
418 unsigned char mode_ref_lf_delta_update;
420 /* Delta values have the range +/- MAX_LOOP_FILTER */
421 /* 0 = Intra, Last, GF, ARF */
422 signed char last_ref_lf_deltas[MAX_REF_LF_DELTAS];
423 /* 0 = Intra, Last, GF, ARF */
424 signed char ref_lf_deltas[MAX_REF_LF_DELTAS];
425 /* 0 = I4X4_PRED, ZERO_MV, MV, SPLIT */
426 signed char last_mode_lf_deltas[MAX_MODE_LF_DELTAS];
427 /* 0 = I4X4_PRED, ZERO_MV, MV, SPLIT */
428 signed char mode_lf_deltas[MAX_MODE_LF_DELTAS];
430 /* Distance of MB away from frame edges */
431 int mb_to_left_edge;
432 int mb_to_right_edge;
433 int mb_to_top_edge;
434 int mb_to_bottom_edge;
436 unsigned int frames_since_golden;
437 unsigned int frames_till_alt_ref_frame;
439 int lossless;
440 /* Inverse transform function pointers. */
441 void (*inv_txm4x4_1)(int16_t *input, int16_t *output, int pitch);
442 void (*inv_txm4x4)(int16_t *input, int16_t *output, int pitch);
443 void (*itxm_add)(int16_t *input, uint8_t *dest, int stride, int eob);
444 void (*itxm_add_y_block)(int16_t *q, uint8_t *dst, int stride,
445 struct macroblockd *xd);
446 void (*itxm_add_uv_block)(int16_t *q, uint8_t *dst, int stride,
447 uint16_t *eobs);
449 struct subpix_fn_table subpix;
451 int allow_high_precision_mv;
453 int corrupted;
455 int sb_index;
456 int mb_index; // Index of the MB in the SB (0..3)
457 int q_index;
459 } MACROBLOCKD;
461 #define ACTIVE_HT 110 // quantization stepsize threshold
463 #define ACTIVE_HT8 300
465 #define ACTIVE_HT16 300
467 // convert MB_PREDICTION_MODE to B_PREDICTION_MODE
468 static B_PREDICTION_MODE pred_mode_conv(MB_PREDICTION_MODE mode) {
469 switch (mode) {
470 case DC_PRED: return B_DC_PRED;
471 case V_PRED: return B_V_PRED;
472 case H_PRED: return B_H_PRED;
473 case TM_PRED: return B_TM_PRED;
474 case D45_PRED: return B_D45_PRED;
475 case D135_PRED: return B_D135_PRED;
476 case D117_PRED: return B_D117_PRED;
477 case D153_PRED: return B_D153_PRED;
478 case D27_PRED: return B_D27_PRED;
479 case D63_PRED: return B_D63_PRED;
480 default:
481 assert(0);
482 return B_MODE_COUNT; // Dummy value
486 // transform mapping
487 static TX_TYPE txfm_map(B_PREDICTION_MODE bmode) {
488 switch (bmode) {
489 case B_TM_PRED :
490 case B_D135_PRED :
491 return ADST_ADST;
493 case B_V_PRED :
494 case B_D117_PRED :
495 return ADST_DCT;
497 case B_H_PRED :
498 case B_D153_PRED :
499 case B_D27_PRED :
500 return DCT_ADST;
502 #if CONFIG_NEWBINTRAMODES
503 case B_CONTEXT_PRED:
504 assert(0);
505 break;
506 #endif
508 default:
509 return DCT_DCT;
513 extern const uint8_t vp9_block2left[TX_SIZE_MAX_MB][24];
514 extern const uint8_t vp9_block2above[TX_SIZE_MAX_MB][24];
515 extern const uint8_t vp9_block2left_sb[TX_SIZE_MAX_SB][96];
516 extern const uint8_t vp9_block2above_sb[TX_SIZE_MAX_SB][96];
517 extern const uint8_t vp9_block2left_sb64[TX_SIZE_MAX_SB][384];
518 extern const uint8_t vp9_block2above_sb64[TX_SIZE_MAX_SB][384];
519 #if CONFIG_SBSEGMENT
520 extern const uint8_t vp9_block2left_sb16x32[TX_SIZE_MAX_MB][48];
521 extern const uint8_t vp9_block2above_sb16x32[TX_SIZE_MAX_MB][48];
522 extern const uint8_t vp9_block2left_sb32x16[TX_SIZE_MAX_MB][48];
523 extern const uint8_t vp9_block2above_sb32x16[TX_SIZE_MAX_MB][48];
524 extern const uint8_t vp9_block2left_sb32x64[TX_SIZE_MAX_SB][192];
525 extern const uint8_t vp9_block2above_sb32x64[TX_SIZE_MAX_SB][192];
526 extern const uint8_t vp9_block2left_sb64x32[TX_SIZE_MAX_SB][192];
527 extern const uint8_t vp9_block2above_sb64x32[TX_SIZE_MAX_SB][192];
528 #endif
530 #define USE_ADST_FOR_I16X16_8X8 1
531 #define USE_ADST_FOR_I16X16_4X4 1
532 #define USE_ADST_FOR_I8X8_4X4 1
533 #define USE_ADST_PERIPHERY_ONLY 1
534 #define USE_ADST_FOR_SB 1
535 #define USE_ADST_FOR_REMOTE_EDGE 0
537 static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, int ib) {
538 // TODO(debargha): explore different patterns for ADST usage when blocksize
539 // is smaller than the prediction size
540 TX_TYPE tx_type = DCT_DCT;
541 const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
542 const int wb = mb_width_log2(sb_type), hb = mb_height_log2(sb_type);
543 #if !USE_ADST_FOR_SB
544 if (sb_type > BLOCK_SIZE_MB16X16)
545 return tx_type;
546 #endif
547 if (ib >= (16 << (wb + hb))) // no chroma adst
548 return tx_type;
549 if (xd->lossless)
550 return DCT_DCT;
551 if (xd->mode_info_context->mbmi.mode == I4X4_PRED &&
552 xd->q_index < ACTIVE_HT) {
553 const BLOCKD *b = &xd->block[ib];
554 tx_type = txfm_map(
555 #if CONFIG_NEWBINTRAMODES
556 b->bmi.as_mode.first == B_CONTEXT_PRED ? b->bmi.as_mode.context :
557 #endif
558 b->bmi.as_mode.first);
559 } else if (xd->mode_info_context->mbmi.mode == I8X8_PRED &&
560 xd->q_index < ACTIVE_HT) {
561 const BLOCKD *b = &xd->block[ib];
562 const int ic = (ib & 10);
563 #if USE_ADST_FOR_I8X8_4X4
564 #if USE_ADST_PERIPHERY_ONLY
565 // Use ADST for periphery blocks only
566 const int inner = ib & 5;
567 b += ic - ib;
568 tx_type = txfm_map(pred_mode_conv(
569 (MB_PREDICTION_MODE)b->bmi.as_mode.first));
570 #if USE_ADST_FOR_REMOTE_EDGE
571 if (inner == 5)
572 tx_type = DCT_DCT;
573 #else
574 if (inner == 1) {
575 if (tx_type == ADST_ADST) tx_type = ADST_DCT;
576 else if (tx_type == DCT_ADST) tx_type = DCT_DCT;
577 } else if (inner == 4) {
578 if (tx_type == ADST_ADST) tx_type = DCT_ADST;
579 else if (tx_type == ADST_DCT) tx_type = DCT_DCT;
580 } else if (inner == 5) {
581 tx_type = DCT_DCT;
583 #endif
584 #else
585 // Use ADST
586 b += ic - ib;
587 tx_type = txfm_map(pred_mode_conv(
588 (MB_PREDICTION_MODE)b->bmi.as_mode.first));
589 #endif
590 #else
591 // Use 2D DCT
592 tx_type = DCT_DCT;
593 #endif
594 } else if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&
595 xd->q_index < ACTIVE_HT) {
596 #if USE_ADST_FOR_I16X16_4X4
597 #if USE_ADST_PERIPHERY_ONLY
598 const int hmax = 4 << wb;
599 tx_type = txfm_map(pred_mode_conv(xd->mode_info_context->mbmi.mode));
600 #if USE_ADST_FOR_REMOTE_EDGE
601 if ((ib & (hmax - 1)) != 0 && ib >= hmax)
602 tx_type = DCT_DCT;
603 #else
604 if (ib >= 1 && ib < hmax) {
605 if (tx_type == ADST_ADST) tx_type = ADST_DCT;
606 else if (tx_type == DCT_ADST) tx_type = DCT_DCT;
607 } else if (ib >= 1 && (ib & (hmax - 1)) == 0) {
608 if (tx_type == ADST_ADST) tx_type = DCT_ADST;
609 else if (tx_type == ADST_DCT) tx_type = DCT_DCT;
610 } else if (ib != 0) {
611 tx_type = DCT_DCT;
613 #endif
614 #else
615 // Use ADST
616 tx_type = txfm_map(pred_mode_conv(xd->mode_info_context->mbmi.mode));
617 #endif
618 #else
619 // Use 2D DCT
620 tx_type = DCT_DCT;
621 #endif
623 return tx_type;
626 static TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd, int ib) {
627 // TODO(debargha): explore different patterns for ADST usage when blocksize
628 // is smaller than the prediction size
629 TX_TYPE tx_type = DCT_DCT;
630 const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
631 const int wb = mb_width_log2(sb_type), hb = mb_height_log2(sb_type);
632 #if !USE_ADST_FOR_SB
633 if (sb_type > BLOCK_SIZE_MB16X16)
634 return tx_type;
635 #endif
636 if (ib >= (16 << (wb + hb))) // no chroma adst
637 return tx_type;
638 if (xd->mode_info_context->mbmi.mode == I8X8_PRED &&
639 xd->q_index < ACTIVE_HT8) {
640 const BLOCKD *b = &xd->block[ib];
641 // TODO(rbultje): MB_PREDICTION_MODE / B_PREDICTION_MODE should be merged
642 // or the relationship otherwise modified to address this type conversion.
643 tx_type = txfm_map(pred_mode_conv(
644 (MB_PREDICTION_MODE)b->bmi.as_mode.first));
645 } else if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&
646 xd->q_index < ACTIVE_HT8) {
647 #if USE_ADST_FOR_I16X16_8X8
648 #if USE_ADST_PERIPHERY_ONLY
649 const int hmax = 4 << wb;
650 tx_type = txfm_map(pred_mode_conv(xd->mode_info_context->mbmi.mode));
651 #if USE_ADST_FOR_REMOTE_EDGE
652 if ((ib & (hmax - 1)) != 0 && ib >= hmax)
653 tx_type = DCT_DCT;
654 #else
655 if (ib >= 1 && ib < hmax) {
656 if (tx_type == ADST_ADST) tx_type = ADST_DCT;
657 else if (tx_type == DCT_ADST) tx_type = DCT_DCT;
658 } else if (ib >= 1 && (ib & (hmax - 1)) == 0) {
659 if (tx_type == ADST_ADST) tx_type = DCT_ADST;
660 else if (tx_type == ADST_DCT) tx_type = DCT_DCT;
661 } else if (ib != 0) {
662 tx_type = DCT_DCT;
664 #endif
665 #else
666 // Use ADST
667 tx_type = txfm_map(pred_mode_conv(xd->mode_info_context->mbmi.mode));
668 #endif
669 #else
670 // Use 2D DCT
671 tx_type = DCT_DCT;
672 #endif
674 return tx_type;
677 static TX_TYPE get_tx_type_16x16(const MACROBLOCKD *xd, int ib) {
678 TX_TYPE tx_type = DCT_DCT;
679 const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
680 const int wb = mb_width_log2(sb_type), hb = mb_height_log2(sb_type);
681 #if !USE_ADST_FOR_SB
682 if (sb_type > BLOCK_SIZE_MB16X16)
683 return tx_type;
684 #endif
685 if (ib >= (16 << (wb + hb)))
686 return tx_type;
687 if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&
688 xd->q_index < ACTIVE_HT16) {
689 tx_type = txfm_map(pred_mode_conv(xd->mode_info_context->mbmi.mode));
690 #if USE_ADST_PERIPHERY_ONLY
691 if (sb_type > BLOCK_SIZE_MB16X16) {
692 const int hmax = 4 << wb;
693 #if USE_ADST_FOR_REMOTE_EDGE
694 if ((ib & (hmax - 1)) != 0 && ib >= hmax)
695 tx_type = DCT_DCT;
696 #else
697 if (ib >= 1 && ib < hmax) {
698 if (tx_type == ADST_ADST) tx_type = ADST_DCT;
699 else if (tx_type == DCT_ADST) tx_type = DCT_DCT;
700 } else if (ib >= 1 && (ib & (hmax - 1)) == 0) {
701 if (tx_type == ADST_ADST) tx_type = DCT_ADST;
702 else if (tx_type == ADST_DCT) tx_type = DCT_DCT;
703 } else if (ib != 0) {
704 tx_type = DCT_DCT;
706 #endif
708 #endif
710 return tx_type;
713 void vp9_build_block_doffsets(MACROBLOCKD *xd);
714 void vp9_setup_block_dptrs(MACROBLOCKD *xd);
716 static void update_blockd_bmi(MACROBLOCKD *xd) {
717 const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
719 if (mode == SPLITMV || mode == I8X8_PRED || mode == I4X4_PRED) {
720 int i;
721 for (i = 0; i < 16; i++)
722 xd->block[i].bmi = xd->mode_info_context->bmi[i];
726 static TX_SIZE get_uv_tx_size(const MACROBLOCKD *xd) {
727 MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
728 const TX_SIZE size = mbmi->txfm_size;
729 const MB_PREDICTION_MODE mode = mbmi->mode;
731 switch (mbmi->sb_type) {
732 case BLOCK_SIZE_SB64X64:
733 return size;
734 #if CONFIG_SBSEGMENT
735 case BLOCK_SIZE_SB64X32:
736 case BLOCK_SIZE_SB32X64:
737 #endif
738 case BLOCK_SIZE_SB32X32:
739 if (size == TX_32X32)
740 return TX_16X16;
741 else
742 return size;
743 default:
744 if (size == TX_16X16)
745 return TX_8X8;
746 else if (size == TX_8X8 && (mode == I8X8_PRED || mode == SPLITMV))
747 return TX_4X4;
748 else
749 return size;
752 return size;
755 #if CONFIG_CODE_NONZEROCOUNT
756 static int get_nzc_used(TX_SIZE tx_size) {
757 return (tx_size >= TX_16X16);
759 #endif
761 struct plane_block_idx {
762 int plane;
763 int block;
766 // TODO(jkoleszar): returning a struct so it can be used in a const context,
767 // expect to refactor this further later.
768 static INLINE struct plane_block_idx plane_block_idx(int y_blocks,
769 int b_idx) {
770 const int v_offset = y_blocks * 5 / 4;
771 struct plane_block_idx res;
773 if (b_idx < y_blocks) {
774 res.plane = 0;
775 res.block = b_idx;
776 } else if (b_idx < v_offset) {
777 res.plane = 1;
778 res.block = b_idx - y_blocks;
779 } else {
780 assert(b_idx < y_blocks * 3 / 2);
781 res.plane = 2;
782 res.block = b_idx - v_offset;
784 return res;
787 /* TODO(jkoleszar): Probably best to remove instances that require this,
788 * as the data likely becomes per-plane and stored in the per-plane structures.
789 * This is a stub to work with the existing code.
791 static INLINE int old_block_idx_4x4(MACROBLOCKD* const xd, int block_size_b,
792 int plane, int i) {
793 const int luma_blocks = 1 << block_size_b;
794 assert(xd->plane[0].subsampling_x == 0);
795 assert(xd->plane[0].subsampling_y == 0);
796 assert(xd->plane[1].subsampling_x == 1);
797 assert(xd->plane[1].subsampling_y == 1);
798 assert(xd->plane[2].subsampling_x == 1);
799 assert(xd->plane[2].subsampling_y == 1);
800 return plane == 0 ? i :
801 plane == 1 ? luma_blocks + i :
802 luma_blocks * 5 / 4 + i;
805 typedef void (*foreach_transformed_block_visitor)(int plane, int block,
806 BLOCK_SIZE_TYPE bsize,
807 int ss_txfrm_size,
808 void *arg);
809 static INLINE void foreach_transformed_block_in_plane(
810 const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, int plane,
811 int is_split, foreach_transformed_block_visitor visit, void *arg) {
812 const int bw = b_width_log2(bsize), bh = b_height_log2(bsize);
814 // block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
815 // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
816 const TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
817 const int block_size_b = bw + bh;
818 const int txfrm_size_b = tx_size * 2;
820 // subsampled size of the block
821 const int ss_sum = xd->plane[plane].subsampling_x +
822 xd->plane[plane].subsampling_y;
823 const int ss_block_size = block_size_b - ss_sum;
825 // size of the transform to use. scale the transform down if it's larger
826 // than the size of the subsampled data, or forced externally by the mb mode.
827 const int ss_max = MAX(xd->plane[plane].subsampling_x,
828 xd->plane[plane].subsampling_y);
829 const int ss_txfrm_size = txfrm_size_b > ss_block_size || is_split
830 ? txfrm_size_b - ss_max * 2
831 : txfrm_size_b;
832 const int step = 1 << ss_txfrm_size;
834 int i;
836 assert(txfrm_size_b <= block_size_b);
837 assert(ss_txfrm_size <= ss_block_size);
838 for (i = 0; i < (1 << ss_block_size); i += step) {
839 visit(plane, i, bsize, ss_txfrm_size, arg);
843 static INLINE void foreach_transformed_block(
844 const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize,
845 foreach_transformed_block_visitor visit, void *arg) {
846 const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
847 const int is_split =
848 xd->mode_info_context->mbmi.txfm_size == TX_8X8 &&
849 (mode == I8X8_PRED || mode == SPLITMV);
850 int plane;
852 for (plane = 0; plane < MAX_MB_PLANE; plane++) {
853 const int is_split_chroma = is_split &&
854 xd->plane[plane].plane_type == PLANE_TYPE_UV;
856 foreach_transformed_block_in_plane(xd, bsize, plane, is_split_chroma,
857 visit, arg);
861 static INLINE void foreach_transformed_block_uv(
862 const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize,
863 foreach_transformed_block_visitor visit, void *arg) {
864 const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
865 const int is_split =
866 xd->mode_info_context->mbmi.txfm_size == TX_8X8 &&
867 (mode == I8X8_PRED || mode == SPLITMV);
868 int plane;
870 for (plane = 1; plane < MAX_MB_PLANE; plane++) {
871 foreach_transformed_block_in_plane(xd, bsize, plane, is_split,
872 visit, arg);
876 // TODO(jkoleszar): In principle, pred_w, pred_h are unnecessary, as we could
877 // calculate the subsampled BLOCK_SIZE_TYPE, but that type isn't defined for
878 // sizes smaller than 16x16 yet.
879 typedef void (*foreach_predicted_block_visitor)(int plane, int block,
880 BLOCK_SIZE_TYPE bsize,
881 int pred_w, int pred_h,
882 void *arg);
883 static INLINE void foreach_predicted_block_in_plane(
884 const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, int plane,
885 foreach_predicted_block_visitor visit, void *arg) {
886 int i, x, y;
887 const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
889 // block sizes in number of 4x4 blocks log 2 ("*_b")
890 // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
891 // subsampled size of the block
892 const int bw = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
893 const int bh = b_height_log2(bsize) - xd->plane[plane].subsampling_y;
895 // size of the predictor to use.
896 int pred_w, pred_h;
898 if (mode == SPLITMV) {
899 // 4x4 or 8x8
900 const int is_4x4 =
901 (xd->mode_info_context->mbmi.partitioning == PARTITIONING_4X4);
902 pred_w = is_4x4 ? 0 : 1 >> xd->plane[plane].subsampling_x;
903 pred_h = is_4x4 ? 0 : 1 >> xd->plane[plane].subsampling_y;
904 } else {
905 pred_w = bw;
906 pred_h = bh;
908 assert(pred_w <= bw);
909 assert(pred_h <= bh);
911 // visit each subblock in raster order
912 i = 0;
913 for (y = 0; y < 1 << bh; y += 1 << pred_h) {
914 for (x = 0; x < 1 << bw; x += 1 << pred_w) {
915 visit(plane, i, bsize, pred_w, pred_h, arg);
916 i += 1 << pred_w;
918 i -= 1 << bw;
919 i += 1 << (bw + pred_h);
922 static INLINE void foreach_predicted_block(
923 const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize,
924 foreach_predicted_block_visitor visit, void *arg) {
925 int plane;
927 for (plane = 0; plane < MAX_MB_PLANE; plane++) {
928 foreach_predicted_block_in_plane(xd, bsize, plane, visit, arg);
931 static INLINE void foreach_predicted_block_uv(
932 const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize,
933 foreach_predicted_block_visitor visit, void *arg) {
934 int plane;
936 for (plane = 1; plane < MAX_MB_PLANE; plane++) {
937 foreach_predicted_block_in_plane(xd, bsize, plane, visit, arg);
943 #endif // VP9_COMMON_VP9_BLOCKD_H_