Normalize triplets in gain_val_tab[][] so gain_exp_tab[] can be just an
[ffmpeg-lucabe.git] / libavcodec / vp3.c
blobff0b5b6a0090239fee43aa8d14486f204f9abc9b
1 /*
2 * Copyright (C) 2003-2004 the ffmpeg project
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 /**
22 * @file vp3.c
23 * On2 VP3 Video Decoder
25 * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
26 * For more information about the VP3 coding process, visit:
27 * http://wiki.multimedia.cx/index.php?title=On2_VP3
29 * Theora decoder by Alex Beregszaszi
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <unistd.h>
37 #include "avcodec.h"
38 #include "dsputil.h"
39 #include "bitstream.h"
41 #include "vp3data.h"
42 #include "xiph.h"
44 #define FRAGMENT_PIXELS 8
47 * Debugging Variables
49 * Define one or more of the following compile-time variables to 1 to obtain
50 * elaborate information about certain aspects of the decoding process.
52 * KEYFRAMES_ONLY: set this to 1 to only see keyframes (VP3 slideshow mode)
53 * DEBUG_VP3: high-level decoding flow
54 * DEBUG_INIT: initialization parameters
55 * DEBUG_DEQUANTIZERS: display how the dequanization tables are built
56 * DEBUG_BLOCK_CODING: unpacking the superblock/macroblock/fragment coding
57 * DEBUG_MODES: unpacking the coding modes for individual fragments
58 * DEBUG_VECTORS: display the motion vectors
59 * DEBUG_TOKEN: display exhaustive information about each DCT token
60 * DEBUG_VLC: display the VLCs as they are extracted from the stream
61 * DEBUG_DC_PRED: display the process of reversing DC prediction
62 * DEBUG_IDCT: show every detail of the IDCT process
65 #define KEYFRAMES_ONLY 0
67 #define DEBUG_VP3 0
68 #define DEBUG_INIT 0
69 #define DEBUG_DEQUANTIZERS 0
70 #define DEBUG_BLOCK_CODING 0
71 #define DEBUG_MODES 0
72 #define DEBUG_VECTORS 0
73 #define DEBUG_TOKEN 0
74 #define DEBUG_VLC 0
75 #define DEBUG_DC_PRED 0
76 #define DEBUG_IDCT 0
78 #if DEBUG_VP3
79 #define debug_vp3(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
80 #else
81 static inline void debug_vp3(const char *format, ...) { }
82 #endif
84 #if DEBUG_INIT
85 #define debug_init(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
86 #else
87 static inline void debug_init(const char *format, ...) { }
88 #endif
90 #if DEBUG_DEQUANTIZERS
91 #define debug_dequantizers(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
92 #else
93 static inline void debug_dequantizers(const char *format, ...) { }
94 #endif
96 #if DEBUG_BLOCK_CODING
97 #define debug_block_coding(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
98 #else
99 static inline void debug_block_coding(const char *format, ...) { }
100 #endif
102 #if DEBUG_MODES
103 #define debug_modes(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
104 #else
105 static inline void debug_modes(const char *format, ...) { }
106 #endif
108 #if DEBUG_VECTORS
109 #define debug_vectors(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
110 #else
111 static inline void debug_vectors(const char *format, ...) { }
112 #endif
114 #if DEBUG_TOKEN
115 #define debug_token(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
116 #else
117 static inline void debug_token(const char *format, ...) { }
118 #endif
120 #if DEBUG_VLC
121 #define debug_vlc(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
122 #else
123 static inline void debug_vlc(const char *format, ...) { }
124 #endif
126 #if DEBUG_DC_PRED
127 #define debug_dc_pred(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
128 #else
129 static inline void debug_dc_pred(const char *format, ...) { }
130 #endif
132 #if DEBUG_IDCT
133 #define debug_idct(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
134 #else
135 static inline void debug_idct(const char *format, ...) { }
136 #endif
138 typedef struct Coeff {
139 struct Coeff *next;
140 DCTELEM coeff;
141 uint8_t index;
142 } Coeff;
144 //FIXME split things out into their own arrays
145 typedef struct Vp3Fragment {
146 Coeff *next_coeff;
147 /* address of first pixel taking into account which plane the fragment
148 * lives on as well as the plane stride */
149 int first_pixel;
150 /* this is the macroblock that the fragment belongs to */
151 uint16_t macroblock;
152 uint8_t coding_method;
153 int8_t motion_x;
154 int8_t motion_y;
155 } Vp3Fragment;
157 #define SB_NOT_CODED 0
158 #define SB_PARTIALLY_CODED 1
159 #define SB_FULLY_CODED 2
161 #define MODE_INTER_NO_MV 0
162 #define MODE_INTRA 1
163 #define MODE_INTER_PLUS_MV 2
164 #define MODE_INTER_LAST_MV 3
165 #define MODE_INTER_PRIOR_LAST 4
166 #define MODE_USING_GOLDEN 5
167 #define MODE_GOLDEN_MV 6
168 #define MODE_INTER_FOURMV 7
169 #define CODING_MODE_COUNT 8
171 /* special internal mode */
172 #define MODE_COPY 8
174 /* There are 6 preset schemes, plus a free-form scheme */
175 static const int ModeAlphabet[6][CODING_MODE_COUNT] =
177 /* scheme 1: Last motion vector dominates */
178 { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
179 MODE_INTER_PLUS_MV, MODE_INTER_NO_MV,
180 MODE_INTRA, MODE_USING_GOLDEN,
181 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
183 /* scheme 2 */
184 { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
185 MODE_INTER_NO_MV, MODE_INTER_PLUS_MV,
186 MODE_INTRA, MODE_USING_GOLDEN,
187 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
189 /* scheme 3 */
190 { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV,
191 MODE_INTER_PRIOR_LAST, MODE_INTER_NO_MV,
192 MODE_INTRA, MODE_USING_GOLDEN,
193 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
195 /* scheme 4 */
196 { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV,
197 MODE_INTER_NO_MV, MODE_INTER_PRIOR_LAST,
198 MODE_INTRA, MODE_USING_GOLDEN,
199 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
201 /* scheme 5: No motion vector dominates */
202 { MODE_INTER_NO_MV, MODE_INTER_LAST_MV,
203 MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV,
204 MODE_INTRA, MODE_USING_GOLDEN,
205 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
207 /* scheme 6 */
208 { MODE_INTER_NO_MV, MODE_USING_GOLDEN,
209 MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
210 MODE_INTER_PLUS_MV, MODE_INTRA,
211 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
215 #define MIN_DEQUANT_VAL 2
217 typedef struct Vp3DecodeContext {
218 AVCodecContext *avctx;
219 int theora, theora_tables;
220 int version;
221 int width, height;
222 AVFrame golden_frame;
223 AVFrame last_frame;
224 AVFrame current_frame;
225 int keyframe;
226 DSPContext dsp;
227 int flipped_image;
229 int qis[3];
230 int nqis;
231 int quality_index;
232 int last_quality_index;
234 int superblock_count;
235 int superblock_width;
236 int superblock_height;
237 int y_superblock_width;
238 int y_superblock_height;
239 int c_superblock_width;
240 int c_superblock_height;
241 int u_superblock_start;
242 int v_superblock_start;
243 unsigned char *superblock_coding;
245 int macroblock_count;
246 int macroblock_width;
247 int macroblock_height;
249 int fragment_count;
250 int fragment_width;
251 int fragment_height;
253 Vp3Fragment *all_fragments;
254 uint8_t *coeff_counts;
255 Coeff *coeffs;
256 Coeff *next_coeff;
257 int fragment_start[3];
259 ScanTable scantable;
261 /* tables */
262 uint16_t coded_dc_scale_factor[64];
263 uint32_t coded_ac_scale_factor[64];
264 uint8_t base_matrix[384][64];
265 uint8_t qr_count[2][3];
266 uint8_t qr_size [2][3][64];
267 uint16_t qr_base[2][3][64];
269 /* this is a list of indexes into the all_fragments array indicating
270 * which of the fragments are coded */
271 int *coded_fragment_list;
272 int coded_fragment_list_index;
273 int pixel_addresses_initialized;
275 VLC dc_vlc[16];
276 VLC ac_vlc_1[16];
277 VLC ac_vlc_2[16];
278 VLC ac_vlc_3[16];
279 VLC ac_vlc_4[16];
281 VLC superblock_run_length_vlc;
282 VLC fragment_run_length_vlc;
283 VLC mode_code_vlc;
284 VLC motion_vector_vlc;
286 /* these arrays need to be on 16-byte boundaries since SSE2 operations
287 * index into them */
288 DECLARE_ALIGNED_16(int16_t, qmat[2][4][64]); //<qmat[is_inter][plane]
290 /* This table contains superblock_count * 16 entries. Each set of 16
291 * numbers corresponds to the fragment indexes 0..15 of the superblock.
292 * An entry will be -1 to indicate that no entry corresponds to that
293 * index. */
294 int *superblock_fragments;
296 /* This table contains superblock_count * 4 entries. Each set of 4
297 * numbers corresponds to the macroblock indexes 0..3 of the superblock.
298 * An entry will be -1 to indicate that no entry corresponds to that
299 * index. */
300 int *superblock_macroblocks;
302 /* This table contains macroblock_count * 6 entries. Each set of 6
303 * numbers corresponds to the fragment indexes 0..5 which comprise
304 * the macroblock (4 Y fragments and 2 C fragments). */
305 int *macroblock_fragments;
306 /* This is an array that indicates how a particular macroblock
307 * is coded. */
308 unsigned char *macroblock_coding;
310 int first_coded_y_fragment;
311 int first_coded_c_fragment;
312 int last_coded_y_fragment;
313 int last_coded_c_fragment;
315 uint8_t edge_emu_buffer[9*2048]; //FIXME dynamic alloc
316 int8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16
318 /* Huffman decode */
319 int hti;
320 unsigned int hbits;
321 int entries;
322 int huff_code_size;
323 uint16_t huffman_table[80][32][2];
325 uint32_t filter_limit_values[64];
326 int bounding_values_array[256];
327 } Vp3DecodeContext;
329 /************************************************************************
330 * VP3 specific functions
331 ************************************************************************/
334 * This function sets up all of the various blocks mappings:
335 * superblocks <-> fragments, macroblocks <-> fragments,
336 * superblocks <-> macroblocks
338 * Returns 0 is successful; returns 1 if *anything* went wrong.
340 static int init_block_mapping(Vp3DecodeContext *s)
342 int i, j;
343 signed int hilbert_walk_mb[4];
345 int current_fragment = 0;
346 int current_width = 0;
347 int current_height = 0;
348 int right_edge = 0;
349 int bottom_edge = 0;
350 int superblock_row_inc = 0;
351 int *hilbert = NULL;
352 int mapping_index = 0;
354 int current_macroblock;
355 int c_fragment;
357 signed char travel_width[16] = {
358 1, 1, 0, -1,
359 0, 0, 1, 0,
360 1, 0, 1, 0,
361 0, -1, 0, 1
364 signed char travel_height[16] = {
365 0, 0, 1, 0,
366 1, 1, 0, -1,
367 0, 1, 0, -1,
368 -1, 0, -1, 0
371 signed char travel_width_mb[4] = {
372 1, 0, 1, 0
375 signed char travel_height_mb[4] = {
376 0, 1, 0, -1
379 debug_vp3(" vp3: initialize block mapping tables\n");
381 hilbert_walk_mb[0] = 1;
382 hilbert_walk_mb[1] = s->macroblock_width;
383 hilbert_walk_mb[2] = 1;
384 hilbert_walk_mb[3] = -s->macroblock_width;
386 /* iterate through each superblock (all planes) and map the fragments */
387 for (i = 0; i < s->superblock_count; i++) {
388 debug_init(" superblock %d (u starts @ %d, v starts @ %d)\n",
389 i, s->u_superblock_start, s->v_superblock_start);
391 /* time to re-assign the limits? */
392 if (i == 0) {
394 /* start of Y superblocks */
395 right_edge = s->fragment_width;
396 bottom_edge = s->fragment_height;
397 current_width = -1;
398 current_height = 0;
399 superblock_row_inc = 3 * s->fragment_width -
400 (s->y_superblock_width * 4 - s->fragment_width);
402 /* the first operation for this variable is to advance by 1 */
403 current_fragment = -1;
405 } else if (i == s->u_superblock_start) {
407 /* start of U superblocks */
408 right_edge = s->fragment_width / 2;
409 bottom_edge = s->fragment_height / 2;
410 current_width = -1;
411 current_height = 0;
412 superblock_row_inc = 3 * (s->fragment_width / 2) -
413 (s->c_superblock_width * 4 - s->fragment_width / 2);
415 /* the first operation for this variable is to advance by 1 */
416 current_fragment = s->fragment_start[1] - 1;
418 } else if (i == s->v_superblock_start) {
420 /* start of V superblocks */
421 right_edge = s->fragment_width / 2;
422 bottom_edge = s->fragment_height / 2;
423 current_width = -1;
424 current_height = 0;
425 superblock_row_inc = 3 * (s->fragment_width / 2) -
426 (s->c_superblock_width * 4 - s->fragment_width / 2);
428 /* the first operation for this variable is to advance by 1 */
429 current_fragment = s->fragment_start[2] - 1;
433 if (current_width >= right_edge - 1) {
434 /* reset width and move to next superblock row */
435 current_width = -1;
436 current_height += 4;
438 /* fragment is now at the start of a new superblock row */
439 current_fragment += superblock_row_inc;
442 /* iterate through all 16 fragments in a superblock */
443 for (j = 0; j < 16; j++) {
444 current_fragment += travel_width[j] + right_edge * travel_height[j];
445 current_width += travel_width[j];
446 current_height += travel_height[j];
448 /* check if the fragment is in bounds */
449 if ((current_width < right_edge) &&
450 (current_height < bottom_edge)) {
451 s->superblock_fragments[mapping_index] = current_fragment;
452 debug_init(" mapping fragment %d to superblock %d, position %d (%d/%d x %d/%d)\n",
453 s->superblock_fragments[mapping_index], i, j,
454 current_width, right_edge, current_height, bottom_edge);
455 } else {
456 s->superblock_fragments[mapping_index] = -1;
457 debug_init(" superblock %d, position %d has no fragment (%d/%d x %d/%d)\n",
458 i, j,
459 current_width, right_edge, current_height, bottom_edge);
462 mapping_index++;
466 /* initialize the superblock <-> macroblock mapping; iterate through
467 * all of the Y plane superblocks to build this mapping */
468 right_edge = s->macroblock_width;
469 bottom_edge = s->macroblock_height;
470 current_width = -1;
471 current_height = 0;
472 superblock_row_inc = s->macroblock_width -
473 (s->y_superblock_width * 2 - s->macroblock_width);
474 hilbert = hilbert_walk_mb;
475 mapping_index = 0;
476 current_macroblock = -1;
477 for (i = 0; i < s->u_superblock_start; i++) {
479 if (current_width >= right_edge - 1) {
480 /* reset width and move to next superblock row */
481 current_width = -1;
482 current_height += 2;
484 /* macroblock is now at the start of a new superblock row */
485 current_macroblock += superblock_row_inc;
488 /* iterate through each potential macroblock in the superblock */
489 for (j = 0; j < 4; j++) {
490 current_macroblock += hilbert_walk_mb[j];
491 current_width += travel_width_mb[j];
492 current_height += travel_height_mb[j];
494 /* check if the macroblock is in bounds */
495 if ((current_width < right_edge) &&
496 (current_height < bottom_edge)) {
497 s->superblock_macroblocks[mapping_index] = current_macroblock;
498 debug_init(" mapping macroblock %d to superblock %d, position %d (%d/%d x %d/%d)\n",
499 s->superblock_macroblocks[mapping_index], i, j,
500 current_width, right_edge, current_height, bottom_edge);
501 } else {
502 s->superblock_macroblocks[mapping_index] = -1;
503 debug_init(" superblock %d, position %d has no macroblock (%d/%d x %d/%d)\n",
504 i, j,
505 current_width, right_edge, current_height, bottom_edge);
508 mapping_index++;
512 /* initialize the macroblock <-> fragment mapping */
513 current_fragment = 0;
514 current_macroblock = 0;
515 mapping_index = 0;
516 for (i = 0; i < s->fragment_height; i += 2) {
518 for (j = 0; j < s->fragment_width; j += 2) {
520 debug_init(" macroblock %d contains fragments: ", current_macroblock);
521 s->all_fragments[current_fragment].macroblock = current_macroblock;
522 s->macroblock_fragments[mapping_index++] = current_fragment;
523 debug_init("%d ", current_fragment);
525 if (j + 1 < s->fragment_width) {
526 s->all_fragments[current_fragment + 1].macroblock = current_macroblock;
527 s->macroblock_fragments[mapping_index++] = current_fragment + 1;
528 debug_init("%d ", current_fragment + 1);
529 } else
530 s->macroblock_fragments[mapping_index++] = -1;
532 if (i + 1 < s->fragment_height) {
533 s->all_fragments[current_fragment + s->fragment_width].macroblock =
534 current_macroblock;
535 s->macroblock_fragments[mapping_index++] =
536 current_fragment + s->fragment_width;
537 debug_init("%d ", current_fragment + s->fragment_width);
538 } else
539 s->macroblock_fragments[mapping_index++] = -1;
541 if ((j + 1 < s->fragment_width) && (i + 1 < s->fragment_height)) {
542 s->all_fragments[current_fragment + s->fragment_width + 1].macroblock =
543 current_macroblock;
544 s->macroblock_fragments[mapping_index++] =
545 current_fragment + s->fragment_width + 1;
546 debug_init("%d ", current_fragment + s->fragment_width + 1);
547 } else
548 s->macroblock_fragments[mapping_index++] = -1;
550 /* C planes */
551 c_fragment = s->fragment_start[1] +
552 (i * s->fragment_width / 4) + (j / 2);
553 s->all_fragments[c_fragment].macroblock = s->macroblock_count;
554 s->macroblock_fragments[mapping_index++] = c_fragment;
555 debug_init("%d ", c_fragment);
557 c_fragment = s->fragment_start[2] +
558 (i * s->fragment_width / 4) + (j / 2);
559 s->all_fragments[c_fragment].macroblock = s->macroblock_count;
560 s->macroblock_fragments[mapping_index++] = c_fragment;
561 debug_init("%d ", c_fragment);
563 debug_init("\n");
565 if (j + 2 <= s->fragment_width)
566 current_fragment += 2;
567 else
568 current_fragment++;
569 current_macroblock++;
572 current_fragment += s->fragment_width;
575 return 0; /* successful path out */
579 * This function wipes out all of the fragment data.
581 static void init_frame(Vp3DecodeContext *s, GetBitContext *gb)
583 int i;
585 /* zero out all of the fragment information */
586 s->coded_fragment_list_index = 0;
587 for (i = 0; i < s->fragment_count; i++) {
588 s->coeff_counts[i] = 0;
589 s->all_fragments[i].motion_x = 127;
590 s->all_fragments[i].motion_y = 127;
591 s->all_fragments[i].next_coeff= NULL;
592 s->coeffs[i].index=
593 s->coeffs[i].coeff=0;
594 s->coeffs[i].next= NULL;
599 * This function sets up the dequantization tables used for a particular
600 * frame.
602 static void init_dequantizer(Vp3DecodeContext *s)
604 int ac_scale_factor = s->coded_ac_scale_factor[s->quality_index];
605 int dc_scale_factor = s->coded_dc_scale_factor[s->quality_index];
606 int i, plane, inter, qri, bmi, bmj, qistart;
608 debug_vp3(" vp3: initializing dequantization tables\n");
610 for(inter=0; inter<2; inter++){
611 for(plane=0; plane<3; plane++){
612 int sum=0;
613 for(qri=0; qri<s->qr_count[inter][plane]; qri++){
614 sum+= s->qr_size[inter][plane][qri];
615 if(s->quality_index <= sum)
616 break;
618 qistart= sum - s->qr_size[inter][plane][qri];
619 bmi= s->qr_base[inter][plane][qri ];
620 bmj= s->qr_base[inter][plane][qri+1];
621 for(i=0; i<64; i++){
622 int coeff= ( 2*(sum -s->quality_index)*s->base_matrix[bmi][i]
623 - 2*(qistart-s->quality_index)*s->base_matrix[bmj][i]
624 + s->qr_size[inter][plane][qri])
625 / (2*s->qr_size[inter][plane][qri]);
627 int qmin= 8<<(inter + !i);
628 int qscale= i ? ac_scale_factor : dc_scale_factor;
630 s->qmat[inter][plane][i]= av_clip((qscale * coeff)/100 * 4, qmin, 4096);
635 memset(s->qscale_table, (FFMAX(s->qmat[0][0][1], s->qmat[0][1][1])+8)/16, 512); //FIXME finetune
639 * This function initializes the loop filter boundary limits if the frame's
640 * quality index is different from the previous frame's.
642 static void init_loop_filter(Vp3DecodeContext *s)
644 int *bounding_values= s->bounding_values_array+127;
645 int filter_limit;
646 int x;
648 filter_limit = s->filter_limit_values[s->quality_index];
650 /* set up the bounding values */
651 memset(s->bounding_values_array, 0, 256 * sizeof(int));
652 for (x = 0; x < filter_limit; x++) {
653 bounding_values[-x - filter_limit] = -filter_limit + x;
654 bounding_values[-x] = -x;
655 bounding_values[x] = x;
656 bounding_values[x + filter_limit] = filter_limit - x;
661 * This function unpacks all of the superblock/macroblock/fragment coding
662 * information from the bitstream.
664 static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
666 int bit = 0;
667 int current_superblock = 0;
668 int current_run = 0;
669 int decode_fully_flags = 0;
670 int decode_partial_blocks = 0;
671 int first_c_fragment_seen;
673 int i, j;
674 int current_fragment;
676 debug_vp3(" vp3: unpacking superblock coding\n");
678 if (s->keyframe) {
680 debug_vp3(" keyframe-- all superblocks are fully coded\n");
681 memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
683 } else {
685 /* unpack the list of partially-coded superblocks */
686 bit = get_bits1(gb);
687 /* toggle the bit because as soon as the first run length is
688 * fetched the bit will be toggled again */
689 bit ^= 1;
690 while (current_superblock < s->superblock_count) {
691 if (current_run-- == 0) {
692 bit ^= 1;
693 current_run = get_vlc2(gb,
694 s->superblock_run_length_vlc.table, 6, 2);
695 if (current_run == 33)
696 current_run += get_bits(gb, 12);
697 debug_block_coding(" setting superblocks %d..%d to %s\n",
698 current_superblock,
699 current_superblock + current_run - 1,
700 (bit) ? "partially coded" : "not coded");
702 /* if any of the superblocks are not partially coded, flag
703 * a boolean to decode the list of fully-coded superblocks */
704 if (bit == 0) {
705 decode_fully_flags = 1;
706 } else {
708 /* make a note of the fact that there are partially coded
709 * superblocks */
710 decode_partial_blocks = 1;
713 s->superblock_coding[current_superblock++] = bit;
716 /* unpack the list of fully coded superblocks if any of the blocks were
717 * not marked as partially coded in the previous step */
718 if (decode_fully_flags) {
720 current_superblock = 0;
721 current_run = 0;
722 bit = get_bits1(gb);
723 /* toggle the bit because as soon as the first run length is
724 * fetched the bit will be toggled again */
725 bit ^= 1;
726 while (current_superblock < s->superblock_count) {
728 /* skip any superblocks already marked as partially coded */
729 if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
731 if (current_run-- == 0) {
732 bit ^= 1;
733 current_run = get_vlc2(gb,
734 s->superblock_run_length_vlc.table, 6, 2);
735 if (current_run == 33)
736 current_run += get_bits(gb, 12);
739 debug_block_coding(" setting superblock %d to %s\n",
740 current_superblock,
741 (bit) ? "fully coded" : "not coded");
742 s->superblock_coding[current_superblock] = 2*bit;
744 current_superblock++;
748 /* if there were partial blocks, initialize bitstream for
749 * unpacking fragment codings */
750 if (decode_partial_blocks) {
752 current_run = 0;
753 bit = get_bits1(gb);
754 /* toggle the bit because as soon as the first run length is
755 * fetched the bit will be toggled again */
756 bit ^= 1;
760 /* figure out which fragments are coded; iterate through each
761 * superblock (all planes) */
762 s->coded_fragment_list_index = 0;
763 s->next_coeff= s->coeffs + s->fragment_count;
764 s->first_coded_y_fragment = s->first_coded_c_fragment = 0;
765 s->last_coded_y_fragment = s->last_coded_c_fragment = -1;
766 first_c_fragment_seen = 0;
767 memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
768 for (i = 0; i < s->superblock_count; i++) {
770 /* iterate through all 16 fragments in a superblock */
771 for (j = 0; j < 16; j++) {
773 /* if the fragment is in bounds, check its coding status */
774 current_fragment = s->superblock_fragments[i * 16 + j];
775 if (current_fragment >= s->fragment_count) {
776 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_superblocks(): bad fragment number (%d >= %d)\n",
777 current_fragment, s->fragment_count);
778 return 1;
780 if (current_fragment != -1) {
781 if (s->superblock_coding[i] == SB_NOT_CODED) {
783 /* copy all the fragments from the prior frame */
784 s->all_fragments[current_fragment].coding_method =
785 MODE_COPY;
787 } else if (s->superblock_coding[i] == SB_PARTIALLY_CODED) {
789 /* fragment may or may not be coded; this is the case
790 * that cares about the fragment coding runs */
791 if (current_run-- == 0) {
792 bit ^= 1;
793 current_run = get_vlc2(gb,
794 s->fragment_run_length_vlc.table, 5, 2);
797 if (bit) {
798 /* default mode; actual mode will be decoded in
799 * the next phase */
800 s->all_fragments[current_fragment].coding_method =
801 MODE_INTER_NO_MV;
802 s->all_fragments[current_fragment].next_coeff= s->coeffs + current_fragment;
803 s->coded_fragment_list[s->coded_fragment_list_index] =
804 current_fragment;
805 if ((current_fragment >= s->fragment_start[1]) &&
806 (s->last_coded_y_fragment == -1) &&
807 (!first_c_fragment_seen)) {
808 s->first_coded_c_fragment = s->coded_fragment_list_index;
809 s->last_coded_y_fragment = s->first_coded_c_fragment - 1;
810 first_c_fragment_seen = 1;
812 s->coded_fragment_list_index++;
813 s->macroblock_coding[s->all_fragments[current_fragment].macroblock] = MODE_INTER_NO_MV;
814 debug_block_coding(" superblock %d is partially coded, fragment %d is coded\n",
815 i, current_fragment);
816 } else {
817 /* not coded; copy this fragment from the prior frame */
818 s->all_fragments[current_fragment].coding_method =
819 MODE_COPY;
820 debug_block_coding(" superblock %d is partially coded, fragment %d is not coded\n",
821 i, current_fragment);
824 } else {
826 /* fragments are fully coded in this superblock; actual
827 * coding will be determined in next step */
828 s->all_fragments[current_fragment].coding_method =
829 MODE_INTER_NO_MV;
830 s->all_fragments[current_fragment].next_coeff= s->coeffs + current_fragment;
831 s->coded_fragment_list[s->coded_fragment_list_index] =
832 current_fragment;
833 if ((current_fragment >= s->fragment_start[1]) &&
834 (s->last_coded_y_fragment == -1) &&
835 (!first_c_fragment_seen)) {
836 s->first_coded_c_fragment = s->coded_fragment_list_index;
837 s->last_coded_y_fragment = s->first_coded_c_fragment - 1;
838 first_c_fragment_seen = 1;
840 s->coded_fragment_list_index++;
841 s->macroblock_coding[s->all_fragments[current_fragment].macroblock] = MODE_INTER_NO_MV;
842 debug_block_coding(" superblock %d is fully coded, fragment %d is coded\n",
843 i, current_fragment);
849 if (!first_c_fragment_seen)
850 /* only Y fragments coded in this frame */
851 s->last_coded_y_fragment = s->coded_fragment_list_index - 1;
852 else
853 /* end the list of coded C fragments */
854 s->last_coded_c_fragment = s->coded_fragment_list_index - 1;
856 debug_block_coding(" %d total coded fragments, y: %d -> %d, c: %d -> %d\n",
857 s->coded_fragment_list_index,
858 s->first_coded_y_fragment,
859 s->last_coded_y_fragment,
860 s->first_coded_c_fragment,
861 s->last_coded_c_fragment);
863 return 0;
867 * This function unpacks all the coding mode data for individual macroblocks
868 * from the bitstream.
870 static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
872 int i, j, k;
873 int scheme;
874 int current_macroblock;
875 int current_fragment;
876 int coding_mode;
877 int custom_mode_alphabet[CODING_MODE_COUNT];
879 debug_vp3(" vp3: unpacking encoding modes\n");
881 if (s->keyframe) {
882 debug_vp3(" keyframe-- all blocks are coded as INTRA\n");
884 for (i = 0; i < s->fragment_count; i++)
885 s->all_fragments[i].coding_method = MODE_INTRA;
887 } else {
889 /* fetch the mode coding scheme for this frame */
890 scheme = get_bits(gb, 3);
891 debug_modes(" using mode alphabet %d\n", scheme);
893 /* is it a custom coding scheme? */
894 if (scheme == 0) {
895 debug_modes(" custom mode alphabet ahead:\n");
896 for (i = 0; i < 8; i++)
897 custom_mode_alphabet[get_bits(gb, 3)] = i;
900 for (i = 0; i < 8; i++) {
901 if(scheme)
902 debug_modes(" mode[%d][%d] = %d\n", scheme, i,
903 ModeAlphabet[scheme-1][i]);
904 else
905 debug_modes(" mode[0][%d] = %d\n", i,
906 custom_mode_alphabet[i]);
909 /* iterate through all of the macroblocks that contain 1 or more
910 * coded fragments */
911 for (i = 0; i < s->u_superblock_start; i++) {
913 for (j = 0; j < 4; j++) {
914 current_macroblock = s->superblock_macroblocks[i * 4 + j];
915 if ((current_macroblock == -1) ||
916 (s->macroblock_coding[current_macroblock] == MODE_COPY))
917 continue;
918 if (current_macroblock >= s->macroblock_count) {
919 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad macroblock number (%d >= %d)\n",
920 current_macroblock, s->macroblock_count);
921 return 1;
924 /* mode 7 means get 3 bits for each coding mode */
925 if (scheme == 7)
926 coding_mode = get_bits(gb, 3);
927 else if(scheme == 0)
928 coding_mode = custom_mode_alphabet
929 [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
930 else
931 coding_mode = ModeAlphabet[scheme-1]
932 [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
934 s->macroblock_coding[current_macroblock] = coding_mode;
935 for (k = 0; k < 6; k++) {
936 current_fragment =
937 s->macroblock_fragments[current_macroblock * 6 + k];
938 if (current_fragment == -1)
939 continue;
940 if (current_fragment >= s->fragment_count) {
941 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad fragment number (%d >= %d)\n",
942 current_fragment, s->fragment_count);
943 return 1;
945 if (s->all_fragments[current_fragment].coding_method !=
946 MODE_COPY)
947 s->all_fragments[current_fragment].coding_method =
948 coding_mode;
951 debug_modes(" coding method for macroblock starting @ fragment %d = %d\n",
952 s->macroblock_fragments[current_macroblock * 6], coding_mode);
957 return 0;
961 * This function unpacks all the motion vectors for the individual
962 * macroblocks from the bitstream.
964 static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
966 int i, j, k, l;
967 int coding_mode;
968 int motion_x[6];
969 int motion_y[6];
970 int last_motion_x = 0;
971 int last_motion_y = 0;
972 int prior_last_motion_x = 0;
973 int prior_last_motion_y = 0;
974 int current_macroblock;
975 int current_fragment;
977 debug_vp3(" vp3: unpacking motion vectors\n");
978 if (s->keyframe) {
980 debug_vp3(" keyframe-- there are no motion vectors\n");
982 } else {
984 memset(motion_x, 0, 6 * sizeof(int));
985 memset(motion_y, 0, 6 * sizeof(int));
987 /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
988 coding_mode = get_bits1(gb);
989 debug_vectors(" using %s scheme for unpacking motion vectors\n",
990 (coding_mode == 0) ? "VLC" : "fixed-length");
992 /* iterate through all of the macroblocks that contain 1 or more
993 * coded fragments */
994 for (i = 0; i < s->u_superblock_start; i++) {
996 for (j = 0; j < 4; j++) {
997 current_macroblock = s->superblock_macroblocks[i * 4 + j];
998 if ((current_macroblock == -1) ||
999 (s->macroblock_coding[current_macroblock] == MODE_COPY))
1000 continue;
1001 if (current_macroblock >= s->macroblock_count) {
1002 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad macroblock number (%d >= %d)\n",
1003 current_macroblock, s->macroblock_count);
1004 return 1;
1007 current_fragment = s->macroblock_fragments[current_macroblock * 6];
1008 if (current_fragment >= s->fragment_count) {
1009 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d\n",
1010 current_fragment, s->fragment_count);
1011 return 1;
1013 switch (s->macroblock_coding[current_macroblock]) {
1015 case MODE_INTER_PLUS_MV:
1016 case MODE_GOLDEN_MV:
1017 /* all 6 fragments use the same motion vector */
1018 if (coding_mode == 0) {
1019 motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
1020 motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
1021 } else {
1022 motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
1023 motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
1026 for (k = 1; k < 6; k++) {
1027 motion_x[k] = motion_x[0];
1028 motion_y[k] = motion_y[0];
1031 /* vector maintenance, only on MODE_INTER_PLUS_MV */
1032 if (s->macroblock_coding[current_macroblock] ==
1033 MODE_INTER_PLUS_MV) {
1034 prior_last_motion_x = last_motion_x;
1035 prior_last_motion_y = last_motion_y;
1036 last_motion_x = motion_x[0];
1037 last_motion_y = motion_y[0];
1039 break;
1041 case MODE_INTER_FOURMV:
1042 /* vector maintenance */
1043 prior_last_motion_x = last_motion_x;
1044 prior_last_motion_y = last_motion_y;
1046 /* fetch 4 vectors from the bitstream, one for each
1047 * Y fragment, then average for the C fragment vectors */
1048 motion_x[4] = motion_y[4] = 0;
1049 for (k = 0; k < 4; k++) {
1050 for (l = 0; l < s->coded_fragment_list_index; l++)
1051 if (s->coded_fragment_list[l] == s->macroblock_fragments[6*current_macroblock + k])
1052 break;
1053 if (l < s->coded_fragment_list_index) {
1054 if (coding_mode == 0) {
1055 motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
1056 motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
1057 } else {
1058 motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
1059 motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
1061 last_motion_x = motion_x[k];
1062 last_motion_y = motion_y[k];
1063 } else {
1064 motion_x[k] = 0;
1065 motion_y[k] = 0;
1067 motion_x[4] += motion_x[k];
1068 motion_y[4] += motion_y[k];
1071 motion_x[5]=
1072 motion_x[4]= RSHIFT(motion_x[4], 2);
1073 motion_y[5]=
1074 motion_y[4]= RSHIFT(motion_y[4], 2);
1075 break;
1077 case MODE_INTER_LAST_MV:
1078 /* all 6 fragments use the last motion vector */
1079 motion_x[0] = last_motion_x;
1080 motion_y[0] = last_motion_y;
1081 for (k = 1; k < 6; k++) {
1082 motion_x[k] = motion_x[0];
1083 motion_y[k] = motion_y[0];
1086 /* no vector maintenance (last vector remains the
1087 * last vector) */
1088 break;
1090 case MODE_INTER_PRIOR_LAST:
1091 /* all 6 fragments use the motion vector prior to the
1092 * last motion vector */
1093 motion_x[0] = prior_last_motion_x;
1094 motion_y[0] = prior_last_motion_y;
1095 for (k = 1; k < 6; k++) {
1096 motion_x[k] = motion_x[0];
1097 motion_y[k] = motion_y[0];
1100 /* vector maintenance */
1101 prior_last_motion_x = last_motion_x;
1102 prior_last_motion_y = last_motion_y;
1103 last_motion_x = motion_x[0];
1104 last_motion_y = motion_y[0];
1105 break;
1107 default:
1108 /* covers intra, inter without MV, golden without MV */
1109 memset(motion_x, 0, 6 * sizeof(int));
1110 memset(motion_y, 0, 6 * sizeof(int));
1112 /* no vector maintenance */
1113 break;
1116 /* assign the motion vectors to the correct fragments */
1117 debug_vectors(" vectors for macroblock starting @ fragment %d (coding method %d):\n",
1118 current_fragment,
1119 s->macroblock_coding[current_macroblock]);
1120 for (k = 0; k < 6; k++) {
1121 current_fragment =
1122 s->macroblock_fragments[current_macroblock * 6 + k];
1123 if (current_fragment == -1)
1124 continue;
1125 if (current_fragment >= s->fragment_count) {
1126 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d)\n",
1127 current_fragment, s->fragment_count);
1128 return 1;
1130 s->all_fragments[current_fragment].motion_x = motion_x[k];
1131 s->all_fragments[current_fragment].motion_y = motion_y[k];
1132 debug_vectors(" vector %d: fragment %d = (%d, %d)\n",
1133 k, current_fragment, motion_x[k], motion_y[k]);
1139 return 0;
1143 * This function is called by unpack_dct_coeffs() to extract the VLCs from
1144 * the bitstream. The VLCs encode tokens which are used to unpack DCT
1145 * data. This function unpacks all the VLCs for either the Y plane or both
1146 * C planes, and is called for DC coefficients or different AC coefficient
1147 * levels (since different coefficient types require different VLC tables.
1149 * This function returns a residual eob run. E.g, if a particular token gave
1150 * instructions to EOB the next 5 fragments and there were only 2 fragments
1151 * left in the current fragment range, 3 would be returned so that it could
1152 * be passed into the next call to this same function.
1154 static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
1155 VLC *table, int coeff_index,
1156 int first_fragment, int last_fragment,
1157 int eob_run)
1159 int i;
1160 int token;
1161 int zero_run = 0;
1162 DCTELEM coeff = 0;
1163 Vp3Fragment *fragment;
1164 uint8_t *perm= s->scantable.permutated;
1165 int bits_to_get;
1167 if ((first_fragment >= s->fragment_count) ||
1168 (last_fragment >= s->fragment_count)) {
1170 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vlcs(): bad fragment number (%d -> %d ?)\n",
1171 first_fragment, last_fragment);
1172 return 0;
1175 for (i = first_fragment; i <= last_fragment; i++) {
1176 int fragment_num = s->coded_fragment_list[i];
1178 if (s->coeff_counts[fragment_num] > coeff_index)
1179 continue;
1180 fragment = &s->all_fragments[fragment_num];
1182 if (!eob_run) {
1183 /* decode a VLC into a token */
1184 token = get_vlc2(gb, table->table, 5, 3);
1185 debug_vlc(" token = %2d, ", token);
1186 /* use the token to get a zero run, a coefficient, and an eob run */
1187 if (token <= 6) {
1188 eob_run = eob_run_base[token];
1189 if (eob_run_get_bits[token])
1190 eob_run += get_bits(gb, eob_run_get_bits[token]);
1191 coeff = zero_run = 0;
1192 } else {
1193 bits_to_get = coeff_get_bits[token];
1194 if (!bits_to_get)
1195 coeff = coeff_tables[token][0];
1196 else
1197 coeff = coeff_tables[token][get_bits(gb, bits_to_get)];
1199 zero_run = zero_run_base[token];
1200 if (zero_run_get_bits[token])
1201 zero_run += get_bits(gb, zero_run_get_bits[token]);
1205 if (!eob_run) {
1206 s->coeff_counts[fragment_num] += zero_run;
1207 if (s->coeff_counts[fragment_num] < 64){
1208 fragment->next_coeff->coeff= coeff;
1209 fragment->next_coeff->index= perm[s->coeff_counts[fragment_num]++]; //FIXME perm here already?
1210 fragment->next_coeff->next= s->next_coeff;
1211 s->next_coeff->next=NULL;
1212 fragment->next_coeff= s->next_coeff++;
1214 debug_vlc(" fragment %d coeff = %d\n",
1215 s->coded_fragment_list[i], fragment->next_coeff[coeff_index]);
1216 } else {
1217 s->coeff_counts[fragment_num] |= 128;
1218 debug_vlc(" fragment %d eob with %d coefficients\n",
1219 s->coded_fragment_list[i], s->coeff_counts[fragment_num]&127);
1220 eob_run--;
1224 return eob_run;
1228 * This function unpacks all of the DCT coefficient data from the
1229 * bitstream.
1231 static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
1233 int i;
1234 int dc_y_table;
1235 int dc_c_table;
1236 int ac_y_table;
1237 int ac_c_table;
1238 int residual_eob_run = 0;
1240 /* fetch the DC table indexes */
1241 dc_y_table = get_bits(gb, 4);
1242 dc_c_table = get_bits(gb, 4);
1244 /* unpack the Y plane DC coefficients */
1245 debug_vp3(" vp3: unpacking Y plane DC coefficients using table %d\n",
1246 dc_y_table);
1247 residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
1248 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1250 /* unpack the C plane DC coefficients */
1251 debug_vp3(" vp3: unpacking C plane DC coefficients using table %d\n",
1252 dc_c_table);
1253 residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
1254 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1256 /* fetch the AC table indexes */
1257 ac_y_table = get_bits(gb, 4);
1258 ac_c_table = get_bits(gb, 4);
1260 /* unpack the group 1 AC coefficients (coeffs 1-5) */
1261 for (i = 1; i <= 5; i++) {
1263 debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
1264 i, ac_y_table);
1265 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_y_table], i,
1266 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1268 debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
1269 i, ac_c_table);
1270 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_c_table], i,
1271 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1274 /* unpack the group 2 AC coefficients (coeffs 6-14) */
1275 for (i = 6; i <= 14; i++) {
1277 debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
1278 i, ac_y_table);
1279 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_y_table], i,
1280 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1282 debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
1283 i, ac_c_table);
1284 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_c_table], i,
1285 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1288 /* unpack the group 3 AC coefficients (coeffs 15-27) */
1289 for (i = 15; i <= 27; i++) {
1291 debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
1292 i, ac_y_table);
1293 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_y_table], i,
1294 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1296 debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
1297 i, ac_c_table);
1298 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_c_table], i,
1299 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1302 /* unpack the group 4 AC coefficients (coeffs 28-63) */
1303 for (i = 28; i <= 63; i++) {
1305 debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
1306 i, ac_y_table);
1307 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_y_table], i,
1308 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1310 debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
1311 i, ac_c_table);
1312 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_c_table], i,
1313 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1316 return 0;
1320 * This function reverses the DC prediction for each coded fragment in
1321 * the frame. Much of this function is adapted directly from the original
1322 * VP3 source code.
1324 #define COMPATIBLE_FRAME(x) \
1325 (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1326 #define FRAME_CODED(x) (s->all_fragments[x].coding_method != MODE_COPY)
1327 #define DC_COEFF(u) (s->coeffs[u].index ? 0 : s->coeffs[u].coeff) //FIXME do somethin to simplify this
1329 static void reverse_dc_prediction(Vp3DecodeContext *s,
1330 int first_fragment,
1331 int fragment_width,
1332 int fragment_height)
1335 #define PUL 8
1336 #define PU 4
1337 #define PUR 2
1338 #define PL 1
1340 int x, y;
1341 int i = first_fragment;
1343 int predicted_dc;
1345 /* DC values for the left, up-left, up, and up-right fragments */
1346 int vl, vul, vu, vur;
1348 /* indexes for the left, up-left, up, and up-right fragments */
1349 int l, ul, u, ur;
1352 * The 6 fields mean:
1353 * 0: up-left multiplier
1354 * 1: up multiplier
1355 * 2: up-right multiplier
1356 * 3: left multiplier
1358 int predictor_transform[16][4] = {
1359 { 0, 0, 0, 0},
1360 { 0, 0, 0,128}, // PL
1361 { 0, 0,128, 0}, // PUR
1362 { 0, 0, 53, 75}, // PUR|PL
1363 { 0,128, 0, 0}, // PU
1364 { 0, 64, 0, 64}, // PU|PL
1365 { 0,128, 0, 0}, // PU|PUR
1366 { 0, 0, 53, 75}, // PU|PUR|PL
1367 {128, 0, 0, 0}, // PUL
1368 { 0, 0, 0,128}, // PUL|PL
1369 { 64, 0, 64, 0}, // PUL|PUR
1370 { 0, 0, 53, 75}, // PUL|PUR|PL
1371 { 0,128, 0, 0}, // PUL|PU
1372 {-104,116, 0,116}, // PUL|PU|PL
1373 { 24, 80, 24, 0}, // PUL|PU|PUR
1374 {-104,116, 0,116} // PUL|PU|PUR|PL
1377 /* This table shows which types of blocks can use other blocks for
1378 * prediction. For example, INTRA is the only mode in this table to
1379 * have a frame number of 0. That means INTRA blocks can only predict
1380 * from other INTRA blocks. There are 2 golden frame coding types;
1381 * blocks encoding in these modes can only predict from other blocks
1382 * that were encoded with these 1 of these 2 modes. */
1383 unsigned char compatible_frame[8] = {
1384 1, /* MODE_INTER_NO_MV */
1385 0, /* MODE_INTRA */
1386 1, /* MODE_INTER_PLUS_MV */
1387 1, /* MODE_INTER_LAST_MV */
1388 1, /* MODE_INTER_PRIOR_MV */
1389 2, /* MODE_USING_GOLDEN */
1390 2, /* MODE_GOLDEN_MV */
1391 1 /* MODE_INTER_FOUR_MV */
1393 int current_frame_type;
1395 /* there is a last DC predictor for each of the 3 frame types */
1396 short last_dc[3];
1398 int transform = 0;
1400 debug_vp3(" vp3: reversing DC prediction\n");
1402 vul = vu = vur = vl = 0;
1403 last_dc[0] = last_dc[1] = last_dc[2] = 0;
1405 /* for each fragment row... */
1406 for (y = 0; y < fragment_height; y++) {
1408 /* for each fragment in a row... */
1409 for (x = 0; x < fragment_width; x++, i++) {
1411 /* reverse prediction if this block was coded */
1412 if (s->all_fragments[i].coding_method != MODE_COPY) {
1414 current_frame_type =
1415 compatible_frame[s->all_fragments[i].coding_method];
1416 debug_dc_pred(" frag %d: orig DC = %d, ",
1417 i, DC_COEFF(i));
1419 transform= 0;
1420 if(x){
1421 l= i-1;
1422 vl = DC_COEFF(l);
1423 if(FRAME_CODED(l) && COMPATIBLE_FRAME(l))
1424 transform |= PL;
1426 if(y){
1427 u= i-fragment_width;
1428 vu = DC_COEFF(u);
1429 if(FRAME_CODED(u) && COMPATIBLE_FRAME(u))
1430 transform |= PU;
1431 if(x){
1432 ul= i-fragment_width-1;
1433 vul = DC_COEFF(ul);
1434 if(FRAME_CODED(ul) && COMPATIBLE_FRAME(ul))
1435 transform |= PUL;
1437 if(x + 1 < fragment_width){
1438 ur= i-fragment_width+1;
1439 vur = DC_COEFF(ur);
1440 if(FRAME_CODED(ur) && COMPATIBLE_FRAME(ur))
1441 transform |= PUR;
1445 debug_dc_pred("transform = %d, ", transform);
1447 if (transform == 0) {
1449 /* if there were no fragments to predict from, use last
1450 * DC saved */
1451 predicted_dc = last_dc[current_frame_type];
1452 debug_dc_pred("from last DC (%d) = %d\n",
1453 current_frame_type, DC_COEFF(i));
1455 } else {
1457 /* apply the appropriate predictor transform */
1458 predicted_dc =
1459 (predictor_transform[transform][0] * vul) +
1460 (predictor_transform[transform][1] * vu) +
1461 (predictor_transform[transform][2] * vur) +
1462 (predictor_transform[transform][3] * vl);
1464 predicted_dc /= 128;
1466 /* check for outranging on the [ul u l] and
1467 * [ul u ur l] predictors */
1468 if ((transform == 13) || (transform == 15)) {
1469 if (FFABS(predicted_dc - vu) > 128)
1470 predicted_dc = vu;
1471 else if (FFABS(predicted_dc - vl) > 128)
1472 predicted_dc = vl;
1473 else if (FFABS(predicted_dc - vul) > 128)
1474 predicted_dc = vul;
1477 debug_dc_pred("from pred DC = %d\n",
1478 DC_COEFF(i));
1481 /* at long last, apply the predictor */
1482 if(s->coeffs[i].index){
1483 *s->next_coeff= s->coeffs[i];
1484 s->coeffs[i].index=0;
1485 s->coeffs[i].coeff=0;
1486 s->coeffs[i].next= s->next_coeff++;
1488 s->coeffs[i].coeff += predicted_dc;
1489 /* save the DC */
1490 last_dc[current_frame_type] = DC_COEFF(i);
1491 if(DC_COEFF(i) && !(s->coeff_counts[i]&127)){
1492 s->coeff_counts[i]= 129;
1493 // s->all_fragments[i].next_coeff= s->next_coeff;
1494 s->coeffs[i].next= s->next_coeff;
1495 (s->next_coeff++)->next=NULL;
1503 static void horizontal_filter(unsigned char *first_pixel, int stride,
1504 int *bounding_values);
1505 static void vertical_filter(unsigned char *first_pixel, int stride,
1506 int *bounding_values);
1509 * Perform the final rendering for a particular slice of data.
1510 * The slice number ranges from 0..(macroblock_height - 1).
1512 static void render_slice(Vp3DecodeContext *s, int slice)
1514 int x;
1515 int m, n;
1516 int16_t *dequantizer;
1517 DECLARE_ALIGNED_16(DCTELEM, block[64]);
1518 int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
1519 int motion_halfpel_index;
1520 uint8_t *motion_source;
1521 int plane;
1522 int current_macroblock_entry = slice * s->macroblock_width * 6;
1524 if (slice >= s->macroblock_height)
1525 return;
1527 for (plane = 0; plane < 3; plane++) {
1528 uint8_t *output_plane = s->current_frame.data [plane];
1529 uint8_t * last_plane = s-> last_frame.data [plane];
1530 uint8_t *golden_plane = s-> golden_frame.data [plane];
1531 int stride = s->current_frame.linesize[plane];
1532 int plane_width = s->width >> !!plane;
1533 int plane_height = s->height >> !!plane;
1534 int y = slice * FRAGMENT_PIXELS << !plane ;
1535 int slice_height = y + (FRAGMENT_PIXELS << !plane);
1536 int i = s->macroblock_fragments[current_macroblock_entry + plane + 3*!!plane];
1538 if (!s->flipped_image) stride = -stride;
1541 if(FFABS(stride) > 2048)
1542 return; //various tables are fixed size
1544 /* for each fragment row in the slice (both of them)... */
1545 for (; y < slice_height; y += 8) {
1547 /* for each fragment in a row... */
1548 for (x = 0; x < plane_width; x += 8, i++) {
1550 if ((i < 0) || (i >= s->fragment_count)) {
1551 av_log(s->avctx, AV_LOG_ERROR, " vp3:render_slice(): bad fragment number (%d)\n", i);
1552 return;
1555 /* transform if this block was coded */
1556 if ((s->all_fragments[i].coding_method != MODE_COPY) &&
1557 !((s->avctx->flags & CODEC_FLAG_GRAY) && plane)) {
1559 if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
1560 (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
1561 motion_source= golden_plane;
1562 else
1563 motion_source= last_plane;
1565 motion_source += s->all_fragments[i].first_pixel;
1566 motion_halfpel_index = 0;
1568 /* sort out the motion vector if this fragment is coded
1569 * using a motion vector method */
1570 if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
1571 (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
1572 int src_x, src_y;
1573 motion_x = s->all_fragments[i].motion_x;
1574 motion_y = s->all_fragments[i].motion_y;
1575 if(plane){
1576 motion_x= (motion_x>>1) | (motion_x&1);
1577 motion_y= (motion_y>>1) | (motion_y&1);
1580 src_x= (motion_x>>1) + x;
1581 src_y= (motion_y>>1) + y;
1582 if ((motion_x == 127) || (motion_y == 127))
1583 av_log(s->avctx, AV_LOG_ERROR, " help! got invalid motion vector! (%X, %X)\n", motion_x, motion_y);
1585 motion_halfpel_index = motion_x & 0x01;
1586 motion_source += (motion_x >> 1);
1588 motion_halfpel_index |= (motion_y & 0x01) << 1;
1589 motion_source += ((motion_y >> 1) * stride);
1591 if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){
1592 uint8_t *temp= s->edge_emu_buffer;
1593 if(stride<0) temp -= 9*stride;
1594 else temp += 9*stride;
1596 ff_emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height);
1597 motion_source= temp;
1602 /* first, take care of copying a block from either the
1603 * previous or the golden frame */
1604 if (s->all_fragments[i].coding_method != MODE_INTRA) {
1605 /* Note, it is possible to implement all MC cases with
1606 put_no_rnd_pixels_l2 which would look more like the
1607 VP3 source but this would be slower as
1608 put_no_rnd_pixels_tab is better optimzed */
1609 if(motion_halfpel_index != 3){
1610 s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
1611 output_plane + s->all_fragments[i].first_pixel,
1612 motion_source, stride, 8);
1613 }else{
1614 int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1
1615 s->dsp.put_no_rnd_pixels_l2[1](
1616 output_plane + s->all_fragments[i].first_pixel,
1617 motion_source - d,
1618 motion_source + stride + 1 + d,
1619 stride, 8);
1621 dequantizer = s->qmat[1][plane];
1622 }else{
1623 dequantizer = s->qmat[0][plane];
1626 /* dequantize the DCT coefficients */
1627 debug_idct("fragment %d, coding mode %d, DC = %d, dequant = %d:\n",
1628 i, s->all_fragments[i].coding_method,
1629 DC_COEFF(i), dequantizer[0]);
1631 if(s->avctx->idct_algo==FF_IDCT_VP3){
1632 Coeff *coeff= s->coeffs + i;
1633 memset(block, 0, sizeof(block));
1634 while(coeff->next){
1635 block[coeff->index]= coeff->coeff * dequantizer[coeff->index];
1636 coeff= coeff->next;
1638 }else{
1639 Coeff *coeff= s->coeffs + i;
1640 memset(block, 0, sizeof(block));
1641 while(coeff->next){
1642 block[coeff->index]= (coeff->coeff * dequantizer[coeff->index] + 2)>>2;
1643 coeff= coeff->next;
1647 /* invert DCT and place (or add) in final output */
1649 if (s->all_fragments[i].coding_method == MODE_INTRA) {
1650 if(s->avctx->idct_algo!=FF_IDCT_VP3)
1651 block[0] += 128<<3;
1652 s->dsp.idct_put(
1653 output_plane + s->all_fragments[i].first_pixel,
1654 stride,
1655 block);
1656 } else {
1657 s->dsp.idct_add(
1658 output_plane + s->all_fragments[i].first_pixel,
1659 stride,
1660 block);
1663 debug_idct("block after idct_%s():\n",
1664 (s->all_fragments[i].coding_method == MODE_INTRA)?
1665 "put" : "add");
1666 for (m = 0; m < 8; m++) {
1667 for (n = 0; n < 8; n++) {
1668 debug_idct(" %3d", *(output_plane +
1669 s->all_fragments[i].first_pixel + (m * stride + n)));
1671 debug_idct("\n");
1673 debug_idct("\n");
1675 } else {
1677 /* copy directly from the previous frame */
1678 s->dsp.put_pixels_tab[1][0](
1679 output_plane + s->all_fragments[i].first_pixel,
1680 last_plane + s->all_fragments[i].first_pixel,
1681 stride, 8);
1684 #if 0
1685 /* perform the left edge filter if:
1686 * - the fragment is not on the left column
1687 * - the fragment is coded in this frame
1688 * - the fragment is not coded in this frame but the left
1689 * fragment is coded in this frame (this is done instead
1690 * of a right edge filter when rendering the left fragment
1691 * since this fragment is not available yet) */
1692 if ((x > 0) &&
1693 ((s->all_fragments[i].coding_method != MODE_COPY) ||
1694 ((s->all_fragments[i].coding_method == MODE_COPY) &&
1695 (s->all_fragments[i - 1].coding_method != MODE_COPY)) )) {
1696 horizontal_filter(
1697 output_plane + s->all_fragments[i].first_pixel + 7*stride,
1698 -stride, s->bounding_values_array + 127);
1701 /* perform the top edge filter if:
1702 * - the fragment is not on the top row
1703 * - the fragment is coded in this frame
1704 * - the fragment is not coded in this frame but the above
1705 * fragment is coded in this frame (this is done instead
1706 * of a bottom edge filter when rendering the above
1707 * fragment since this fragment is not available yet) */
1708 if ((y > 0) &&
1709 ((s->all_fragments[i].coding_method != MODE_COPY) ||
1710 ((s->all_fragments[i].coding_method == MODE_COPY) &&
1711 (s->all_fragments[i - fragment_width].coding_method != MODE_COPY)) )) {
1712 vertical_filter(
1713 output_plane + s->all_fragments[i].first_pixel - stride,
1714 -stride, s->bounding_values_array + 127);
1716 #endif
1721 /* this looks like a good place for slice dispatch... */
1722 /* algorithm:
1723 * if (slice == s->macroblock_height - 1)
1724 * dispatch (both last slice & 2nd-to-last slice);
1725 * else if (slice > 0)
1726 * dispatch (slice - 1);
1729 emms_c();
1732 static void horizontal_filter(unsigned char *first_pixel, int stride,
1733 int *bounding_values)
1735 unsigned char *end;
1736 int filter_value;
1738 for (end= first_pixel + 8*stride; first_pixel != end; first_pixel += stride) {
1739 filter_value =
1740 (first_pixel[-2] - first_pixel[ 1])
1741 +3*(first_pixel[ 0] - first_pixel[-1]);
1742 filter_value = bounding_values[(filter_value + 4) >> 3];
1743 first_pixel[-1] = av_clip_uint8(first_pixel[-1] + filter_value);
1744 first_pixel[ 0] = av_clip_uint8(first_pixel[ 0] - filter_value);
1748 static void vertical_filter(unsigned char *first_pixel, int stride,
1749 int *bounding_values)
1751 unsigned char *end;
1752 int filter_value;
1753 const int nstride= -stride;
1755 for (end= first_pixel + 8; first_pixel < end; first_pixel++) {
1756 filter_value =
1757 (first_pixel[2 * nstride] - first_pixel[ stride])
1758 +3*(first_pixel[0 ] - first_pixel[nstride]);
1759 filter_value = bounding_values[(filter_value + 4) >> 3];
1760 first_pixel[nstride] = av_clip_uint8(first_pixel[nstride] + filter_value);
1761 first_pixel[0] = av_clip_uint8(first_pixel[0] - filter_value);
1765 static void apply_loop_filter(Vp3DecodeContext *s)
1767 int plane;
1768 int x, y;
1769 int *bounding_values= s->bounding_values_array+127;
1771 #if 0
1772 int bounding_values_array[256];
1773 int filter_limit;
1775 /* find the right loop limit value */
1776 for (x = 63; x >= 0; x--) {
1777 if (vp31_ac_scale_factor[x] >= s->quality_index)
1778 break;
1780 filter_limit = vp31_filter_limit_values[s->quality_index];
1782 /* set up the bounding values */
1783 memset(bounding_values_array, 0, 256 * sizeof(int));
1784 for (x = 0; x < filter_limit; x++) {
1785 bounding_values[-x - filter_limit] = -filter_limit + x;
1786 bounding_values[-x] = -x;
1787 bounding_values[x] = x;
1788 bounding_values[x + filter_limit] = filter_limit - x;
1790 #endif
1792 for (plane = 0; plane < 3; plane++) {
1793 int width = s->fragment_width >> !!plane;
1794 int height = s->fragment_height >> !!plane;
1795 int fragment = s->fragment_start [plane];
1796 int stride = s->current_frame.linesize[plane];
1797 uint8_t *plane_data = s->current_frame.data [plane];
1798 if (!s->flipped_image) stride = -stride;
1800 for (y = 0; y < height; y++) {
1802 for (x = 0; x < width; x++) {
1803 /* do not perform left edge filter for left columns frags */
1804 if ((x > 0) &&
1805 (s->all_fragments[fragment].coding_method != MODE_COPY)) {
1806 horizontal_filter(
1807 plane_data + s->all_fragments[fragment].first_pixel,
1808 stride, bounding_values);
1811 /* do not perform top edge filter for top row fragments */
1812 if ((y > 0) &&
1813 (s->all_fragments[fragment].coding_method != MODE_COPY)) {
1814 vertical_filter(
1815 plane_data + s->all_fragments[fragment].first_pixel,
1816 stride, bounding_values);
1819 /* do not perform right edge filter for right column
1820 * fragments or if right fragment neighbor is also coded
1821 * in this frame (it will be filtered in next iteration) */
1822 if ((x < width - 1) &&
1823 (s->all_fragments[fragment].coding_method != MODE_COPY) &&
1824 (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
1825 horizontal_filter(
1826 plane_data + s->all_fragments[fragment + 1].first_pixel,
1827 stride, bounding_values);
1830 /* do not perform bottom edge filter for bottom row
1831 * fragments or if bottom fragment neighbor is also coded
1832 * in this frame (it will be filtered in the next row) */
1833 if ((y < height - 1) &&
1834 (s->all_fragments[fragment].coding_method != MODE_COPY) &&
1835 (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
1836 vertical_filter(
1837 plane_data + s->all_fragments[fragment + width].first_pixel,
1838 stride, bounding_values);
1841 fragment++;
1848 * This function computes the first pixel addresses for each fragment.
1849 * This function needs to be invoked after the first frame is allocated
1850 * so that it has access to the plane strides.
1852 static void vp3_calculate_pixel_addresses(Vp3DecodeContext *s)
1855 int i, x, y;
1857 /* figure out the first pixel addresses for each of the fragments */
1858 /* Y plane */
1859 i = 0;
1860 for (y = s->fragment_height; y > 0; y--) {
1861 for (x = 0; x < s->fragment_width; x++) {
1862 s->all_fragments[i++].first_pixel =
1863 s->golden_frame.linesize[0] * y * FRAGMENT_PIXELS -
1864 s->golden_frame.linesize[0] +
1865 x * FRAGMENT_PIXELS;
1866 debug_init(" fragment %d, first pixel @ %d\n",
1867 i-1, s->all_fragments[i-1].first_pixel);
1871 /* U plane */
1872 i = s->fragment_start[1];
1873 for (y = s->fragment_height / 2; y > 0; y--) {
1874 for (x = 0; x < s->fragment_width / 2; x++) {
1875 s->all_fragments[i++].first_pixel =
1876 s->golden_frame.linesize[1] * y * FRAGMENT_PIXELS -
1877 s->golden_frame.linesize[1] +
1878 x * FRAGMENT_PIXELS;
1879 debug_init(" fragment %d, first pixel @ %d\n",
1880 i-1, s->all_fragments[i-1].first_pixel);
1884 /* V plane */
1885 i = s->fragment_start[2];
1886 for (y = s->fragment_height / 2; y > 0; y--) {
1887 for (x = 0; x < s->fragment_width / 2; x++) {
1888 s->all_fragments[i++].first_pixel =
1889 s->golden_frame.linesize[2] * y * FRAGMENT_PIXELS -
1890 s->golden_frame.linesize[2] +
1891 x * FRAGMENT_PIXELS;
1892 debug_init(" fragment %d, first pixel @ %d\n",
1893 i-1, s->all_fragments[i-1].first_pixel);
1898 /* FIXME: this should be merged with the above! */
1899 static void theora_calculate_pixel_addresses(Vp3DecodeContext *s)
1902 int i, x, y;
1904 /* figure out the first pixel addresses for each of the fragments */
1905 /* Y plane */
1906 i = 0;
1907 for (y = 1; y <= s->fragment_height; y++) {
1908 for (x = 0; x < s->fragment_width; x++) {
1909 s->all_fragments[i++].first_pixel =
1910 s->golden_frame.linesize[0] * y * FRAGMENT_PIXELS -
1911 s->golden_frame.linesize[0] +
1912 x * FRAGMENT_PIXELS;
1913 debug_init(" fragment %d, first pixel @ %d\n",
1914 i-1, s->all_fragments[i-1].first_pixel);
1918 /* U plane */
1919 i = s->fragment_start[1];
1920 for (y = 1; y <= s->fragment_height / 2; y++) {
1921 for (x = 0; x < s->fragment_width / 2; x++) {
1922 s->all_fragments[i++].first_pixel =
1923 s->golden_frame.linesize[1] * y * FRAGMENT_PIXELS -
1924 s->golden_frame.linesize[1] +
1925 x * FRAGMENT_PIXELS;
1926 debug_init(" fragment %d, first pixel @ %d\n",
1927 i-1, s->all_fragments[i-1].first_pixel);
1931 /* V plane */
1932 i = s->fragment_start[2];
1933 for (y = 1; y <= s->fragment_height / 2; y++) {
1934 for (x = 0; x < s->fragment_width / 2; x++) {
1935 s->all_fragments[i++].first_pixel =
1936 s->golden_frame.linesize[2] * y * FRAGMENT_PIXELS -
1937 s->golden_frame.linesize[2] +
1938 x * FRAGMENT_PIXELS;
1939 debug_init(" fragment %d, first pixel @ %d\n",
1940 i-1, s->all_fragments[i-1].first_pixel);
1946 * This is the ffmpeg/libavcodec API init function.
1948 static av_cold int vp3_decode_init(AVCodecContext *avctx)
1950 Vp3DecodeContext *s = avctx->priv_data;
1951 int i, inter, plane;
1952 int c_width;
1953 int c_height;
1954 int y_superblock_count;
1955 int c_superblock_count;
1957 if (avctx->codec_tag == MKTAG('V','P','3','0'))
1958 s->version = 0;
1959 else
1960 s->version = 1;
1962 s->avctx = avctx;
1963 s->width = (avctx->width + 15) & 0xFFFFFFF0;
1964 s->height = (avctx->height + 15) & 0xFFFFFFF0;
1965 avctx->pix_fmt = PIX_FMT_YUV420P;
1966 if(avctx->idct_algo==FF_IDCT_AUTO)
1967 avctx->idct_algo=FF_IDCT_VP3;
1968 dsputil_init(&s->dsp, avctx);
1970 ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
1972 /* initialize to an impossible value which will force a recalculation
1973 * in the first frame decode */
1974 s->quality_index = -1;
1976 s->y_superblock_width = (s->width + 31) / 32;
1977 s->y_superblock_height = (s->height + 31) / 32;
1978 y_superblock_count = s->y_superblock_width * s->y_superblock_height;
1980 /* work out the dimensions for the C planes */
1981 c_width = s->width / 2;
1982 c_height = s->height / 2;
1983 s->c_superblock_width = (c_width + 31) / 32;
1984 s->c_superblock_height = (c_height + 31) / 32;
1985 c_superblock_count = s->c_superblock_width * s->c_superblock_height;
1987 s->superblock_count = y_superblock_count + (c_superblock_count * 2);
1988 s->u_superblock_start = y_superblock_count;
1989 s->v_superblock_start = s->u_superblock_start + c_superblock_count;
1990 s->superblock_coding = av_malloc(s->superblock_count);
1992 s->macroblock_width = (s->width + 15) / 16;
1993 s->macroblock_height = (s->height + 15) / 16;
1994 s->macroblock_count = s->macroblock_width * s->macroblock_height;
1996 s->fragment_width = s->width / FRAGMENT_PIXELS;
1997 s->fragment_height = s->height / FRAGMENT_PIXELS;
1999 /* fragment count covers all 8x8 blocks for all 3 planes */
2000 s->fragment_count = s->fragment_width * s->fragment_height * 3 / 2;
2001 s->fragment_start[1] = s->fragment_width * s->fragment_height;
2002 s->fragment_start[2] = s->fragment_width * s->fragment_height * 5 / 4;
2004 debug_init(" Y plane: %d x %d\n", s->width, s->height);
2005 debug_init(" C plane: %d x %d\n", c_width, c_height);
2006 debug_init(" Y superblocks: %d x %d, %d total\n",
2007 s->y_superblock_width, s->y_superblock_height, y_superblock_count);
2008 debug_init(" C superblocks: %d x %d, %d total\n",
2009 s->c_superblock_width, s->c_superblock_height, c_superblock_count);
2010 debug_init(" total superblocks = %d, U starts @ %d, V starts @ %d\n",
2011 s->superblock_count, s->u_superblock_start, s->v_superblock_start);
2012 debug_init(" macroblocks: %d x %d, %d total\n",
2013 s->macroblock_width, s->macroblock_height, s->macroblock_count);
2014 debug_init(" %d fragments, %d x %d, u starts @ %d, v starts @ %d\n",
2015 s->fragment_count,
2016 s->fragment_width,
2017 s->fragment_height,
2018 s->fragment_start[1],
2019 s->fragment_start[2]);
2021 s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment));
2022 s->coeff_counts = av_malloc(s->fragment_count * sizeof(*s->coeff_counts));
2023 s->coeffs = av_malloc(s->fragment_count * sizeof(Coeff) * 65);
2024 s->coded_fragment_list = av_malloc(s->fragment_count * sizeof(int));
2025 s->pixel_addresses_initialized = 0;
2027 if (!s->theora_tables)
2029 for (i = 0; i < 64; i++) {
2030 s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i];
2031 s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i];
2032 s->base_matrix[0][i] = vp31_intra_y_dequant[i];
2033 s->base_matrix[1][i] = vp31_intra_c_dequant[i];
2034 s->base_matrix[2][i] = vp31_inter_dequant[i];
2035 s->filter_limit_values[i] = vp31_filter_limit_values[i];
2038 for(inter=0; inter<2; inter++){
2039 for(plane=0; plane<3; plane++){
2040 s->qr_count[inter][plane]= 1;
2041 s->qr_size [inter][plane][0]= 63;
2042 s->qr_base [inter][plane][0]=
2043 s->qr_base [inter][plane][1]= 2*inter + (!!plane)*!inter;
2047 /* init VLC tables */
2048 for (i = 0; i < 16; i++) {
2050 /* DC histograms */
2051 init_vlc(&s->dc_vlc[i], 5, 32,
2052 &dc_bias[i][0][1], 4, 2,
2053 &dc_bias[i][0][0], 4, 2, 0);
2055 /* group 1 AC histograms */
2056 init_vlc(&s->ac_vlc_1[i], 5, 32,
2057 &ac_bias_0[i][0][1], 4, 2,
2058 &ac_bias_0[i][0][0], 4, 2, 0);
2060 /* group 2 AC histograms */
2061 init_vlc(&s->ac_vlc_2[i], 5, 32,
2062 &ac_bias_1[i][0][1], 4, 2,
2063 &ac_bias_1[i][0][0], 4, 2, 0);
2065 /* group 3 AC histograms */
2066 init_vlc(&s->ac_vlc_3[i], 5, 32,
2067 &ac_bias_2[i][0][1], 4, 2,
2068 &ac_bias_2[i][0][0], 4, 2, 0);
2070 /* group 4 AC histograms */
2071 init_vlc(&s->ac_vlc_4[i], 5, 32,
2072 &ac_bias_3[i][0][1], 4, 2,
2073 &ac_bias_3[i][0][0], 4, 2, 0);
2075 } else {
2076 for (i = 0; i < 16; i++) {
2078 /* DC histograms */
2079 init_vlc(&s->dc_vlc[i], 5, 32,
2080 &s->huffman_table[i][0][1], 4, 2,
2081 &s->huffman_table[i][0][0], 4, 2, 0);
2083 /* group 1 AC histograms */
2084 init_vlc(&s->ac_vlc_1[i], 5, 32,
2085 &s->huffman_table[i+16][0][1], 4, 2,
2086 &s->huffman_table[i+16][0][0], 4, 2, 0);
2088 /* group 2 AC histograms */
2089 init_vlc(&s->ac_vlc_2[i], 5, 32,
2090 &s->huffman_table[i+16*2][0][1], 4, 2,
2091 &s->huffman_table[i+16*2][0][0], 4, 2, 0);
2093 /* group 3 AC histograms */
2094 init_vlc(&s->ac_vlc_3[i], 5, 32,
2095 &s->huffman_table[i+16*3][0][1], 4, 2,
2096 &s->huffman_table[i+16*3][0][0], 4, 2, 0);
2098 /* group 4 AC histograms */
2099 init_vlc(&s->ac_vlc_4[i], 5, 32,
2100 &s->huffman_table[i+16*4][0][1], 4, 2,
2101 &s->huffman_table[i+16*4][0][0], 4, 2, 0);
2105 init_vlc(&s->superblock_run_length_vlc, 6, 34,
2106 &superblock_run_length_vlc_table[0][1], 4, 2,
2107 &superblock_run_length_vlc_table[0][0], 4, 2, 0);
2109 init_vlc(&s->fragment_run_length_vlc, 5, 30,
2110 &fragment_run_length_vlc_table[0][1], 4, 2,
2111 &fragment_run_length_vlc_table[0][0], 4, 2, 0);
2113 init_vlc(&s->mode_code_vlc, 3, 8,
2114 &mode_code_vlc_table[0][1], 2, 1,
2115 &mode_code_vlc_table[0][0], 2, 1, 0);
2117 init_vlc(&s->motion_vector_vlc, 6, 63,
2118 &motion_vector_vlc_table[0][1], 2, 1,
2119 &motion_vector_vlc_table[0][0], 2, 1, 0);
2121 /* work out the block mapping tables */
2122 s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int));
2123 s->superblock_macroblocks = av_malloc(s->superblock_count * 4 * sizeof(int));
2124 s->macroblock_fragments = av_malloc(s->macroblock_count * 6 * sizeof(int));
2125 s->macroblock_coding = av_malloc(s->macroblock_count + 1);
2126 init_block_mapping(s);
2128 for (i = 0; i < 3; i++) {
2129 s->current_frame.data[i] = NULL;
2130 s->last_frame.data[i] = NULL;
2131 s->golden_frame.data[i] = NULL;
2134 return 0;
2138 * This is the ffmpeg/libavcodec API frame decode function.
2140 static int vp3_decode_frame(AVCodecContext *avctx,
2141 void *data, int *data_size,
2142 const uint8_t *buf, int buf_size)
2144 Vp3DecodeContext *s = avctx->priv_data;
2145 GetBitContext gb;
2146 static int counter = 0;
2147 int i;
2149 init_get_bits(&gb, buf, buf_size * 8);
2151 if (s->theora && get_bits1(&gb))
2153 av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
2154 return -1;
2157 s->keyframe = !get_bits1(&gb);
2158 if (!s->theora)
2159 skip_bits(&gb, 1);
2160 s->last_quality_index = s->quality_index;
2162 s->nqis=0;
2164 s->qis[s->nqis++]= get_bits(&gb, 6);
2165 } while(s->theora >= 0x030200 && s->nqis<3 && get_bits1(&gb));
2167 s->quality_index= s->qis[0];
2169 if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2170 av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
2171 s->keyframe?"key":"", counter, s->quality_index);
2172 counter++;
2174 if (s->quality_index != s->last_quality_index) {
2175 init_dequantizer(s);
2176 init_loop_filter(s);
2179 if (s->keyframe) {
2180 if (!s->theora)
2182 skip_bits(&gb, 4); /* width code */
2183 skip_bits(&gb, 4); /* height code */
2184 if (s->version)
2186 s->version = get_bits(&gb, 5);
2187 if (counter == 1)
2188 av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
2191 if (s->version || s->theora)
2193 if (get_bits1(&gb))
2194 av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
2195 skip_bits(&gb, 2); /* reserved? */
2198 if (s->last_frame.data[0] == s->golden_frame.data[0]) {
2199 if (s->golden_frame.data[0])
2200 avctx->release_buffer(avctx, &s->golden_frame);
2201 s->last_frame= s->golden_frame; /* ensure that we catch any access to this released frame */
2202 } else {
2203 if (s->golden_frame.data[0])
2204 avctx->release_buffer(avctx, &s->golden_frame);
2205 if (s->last_frame.data[0])
2206 avctx->release_buffer(avctx, &s->last_frame);
2209 s->golden_frame.reference = 3;
2210 if(avctx->get_buffer(avctx, &s->golden_frame) < 0) {
2211 av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
2212 return -1;
2215 /* golden frame is also the current frame */
2216 s->current_frame= s->golden_frame;
2218 /* time to figure out pixel addresses? */
2219 if (!s->pixel_addresses_initialized)
2221 if (!s->flipped_image)
2222 vp3_calculate_pixel_addresses(s);
2223 else
2224 theora_calculate_pixel_addresses(s);
2225 s->pixel_addresses_initialized = 1;
2227 } else {
2228 /* allocate a new current frame */
2229 s->current_frame.reference = 3;
2230 if (!s->pixel_addresses_initialized) {
2231 av_log(s->avctx, AV_LOG_ERROR, "vp3: first frame not a keyframe\n");
2232 return -1;
2234 if(avctx->get_buffer(avctx, &s->current_frame) < 0) {
2235 av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
2236 return -1;
2240 s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame
2241 s->current_frame.qstride= 0;
2243 init_frame(s, &gb);
2245 #if KEYFRAMES_ONLY
2246 if (!s->keyframe) {
2248 memcpy(s->current_frame.data[0], s->golden_frame.data[0],
2249 s->current_frame.linesize[0] * s->height);
2250 memcpy(s->current_frame.data[1], s->golden_frame.data[1],
2251 s->current_frame.linesize[1] * s->height / 2);
2252 memcpy(s->current_frame.data[2], s->golden_frame.data[2],
2253 s->current_frame.linesize[2] * s->height / 2);
2255 } else {
2256 #endif
2258 if (unpack_superblocks(s, &gb)){
2259 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2260 return -1;
2262 if (unpack_modes(s, &gb)){
2263 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2264 return -1;
2266 if (unpack_vectors(s, &gb)){
2267 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2268 return -1;
2270 if (unpack_dct_coeffs(s, &gb)){
2271 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2272 return -1;
2275 reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height);
2276 if ((avctx->flags & CODEC_FLAG_GRAY) == 0) {
2277 reverse_dc_prediction(s, s->fragment_start[1],
2278 s->fragment_width / 2, s->fragment_height / 2);
2279 reverse_dc_prediction(s, s->fragment_start[2],
2280 s->fragment_width / 2, s->fragment_height / 2);
2283 for (i = 0; i < s->macroblock_height; i++)
2284 render_slice(s, i);
2286 apply_loop_filter(s);
2287 #if KEYFRAMES_ONLY
2289 #endif
2291 *data_size=sizeof(AVFrame);
2292 *(AVFrame*)data= s->current_frame;
2294 /* release the last frame, if it is allocated and if it is not the
2295 * golden frame */
2296 if ((s->last_frame.data[0]) &&
2297 (s->last_frame.data[0] != s->golden_frame.data[0]))
2298 avctx->release_buffer(avctx, &s->last_frame);
2300 /* shuffle frames (last = current) */
2301 s->last_frame= s->current_frame;
2302 s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */
2304 return buf_size;
2308 * This is the ffmpeg/libavcodec API module cleanup function.
2310 static av_cold int vp3_decode_end(AVCodecContext *avctx)
2312 Vp3DecodeContext *s = avctx->priv_data;
2313 int i;
2315 av_free(s->superblock_coding);
2316 av_free(s->all_fragments);
2317 av_free(s->coeff_counts);
2318 av_free(s->coeffs);
2319 av_free(s->coded_fragment_list);
2320 av_free(s->superblock_fragments);
2321 av_free(s->superblock_macroblocks);
2322 av_free(s->macroblock_fragments);
2323 av_free(s->macroblock_coding);
2325 for (i = 0; i < 16; i++) {
2326 free_vlc(&s->dc_vlc[i]);
2327 free_vlc(&s->ac_vlc_1[i]);
2328 free_vlc(&s->ac_vlc_2[i]);
2329 free_vlc(&s->ac_vlc_3[i]);
2330 free_vlc(&s->ac_vlc_4[i]);
2333 free_vlc(&s->superblock_run_length_vlc);
2334 free_vlc(&s->fragment_run_length_vlc);
2335 free_vlc(&s->mode_code_vlc);
2336 free_vlc(&s->motion_vector_vlc);
2338 /* release all frames */
2339 if (s->golden_frame.data[0] && s->golden_frame.data[0] != s->last_frame.data[0])
2340 avctx->release_buffer(avctx, &s->golden_frame);
2341 if (s->last_frame.data[0])
2342 avctx->release_buffer(avctx, &s->last_frame);
2343 /* no need to release the current_frame since it will always be pointing
2344 * to the same frame as either the golden or last frame */
2346 return 0;
2349 static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
2351 Vp3DecodeContext *s = avctx->priv_data;
2353 if (get_bits1(gb)) {
2354 int token;
2355 if (s->entries >= 32) { /* overflow */
2356 av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2357 return -1;
2359 token = get_bits(gb, 5);
2360 //av_log(avctx, AV_LOG_DEBUG, "hti %d hbits %x token %d entry : %d size %d\n", s->hti, s->hbits, token, s->entries, s->huff_code_size);
2361 s->huffman_table[s->hti][token][0] = s->hbits;
2362 s->huffman_table[s->hti][token][1] = s->huff_code_size;
2363 s->entries++;
2365 else {
2366 if (s->huff_code_size >= 32) {/* overflow */
2367 av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2368 return -1;
2370 s->huff_code_size++;
2371 s->hbits <<= 1;
2372 read_huffman_tree(avctx, gb);
2373 s->hbits |= 1;
2374 read_huffman_tree(avctx, gb);
2375 s->hbits >>= 1;
2376 s->huff_code_size--;
2378 return 0;
2381 #ifdef CONFIG_THEORA_DECODER
2382 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
2384 Vp3DecodeContext *s = avctx->priv_data;
2385 int visible_width, visible_height;
2387 s->theora = get_bits_long(gb, 24);
2388 av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
2390 /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */
2391 /* but previous versions have the image flipped relative to vp3 */
2392 if (s->theora < 0x030200)
2394 s->flipped_image = 1;
2395 av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n");
2398 s->width = get_bits(gb, 16) << 4;
2399 s->height = get_bits(gb, 16) << 4;
2401 if(avcodec_check_dimensions(avctx, s->width, s->height)){
2402 av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height);
2403 s->width= s->height= 0;
2404 return -1;
2407 if (s->theora >= 0x030400)
2409 skip_bits(gb, 32); /* total number of superblocks in a frame */
2410 // fixme, the next field is 36bits long
2411 skip_bits(gb, 32); /* total number of blocks in a frame */
2412 skip_bits(gb, 4); /* total number of blocks in a frame */
2413 skip_bits(gb, 32); /* total number of macroblocks in a frame */
2416 visible_width = get_bits_long(gb, 24);
2417 visible_height = get_bits_long(gb, 24);
2419 if (s->theora >= 0x030200) {
2420 skip_bits(gb, 8); /* offset x */
2421 skip_bits(gb, 8); /* offset y */
2424 skip_bits(gb, 32); /* fps numerator */
2425 skip_bits(gb, 32); /* fps denumerator */
2426 skip_bits(gb, 24); /* aspect numerator */
2427 skip_bits(gb, 24); /* aspect denumerator */
2429 if (s->theora < 0x030200)
2430 skip_bits(gb, 5); /* keyframe frequency force */
2431 skip_bits(gb, 8); /* colorspace */
2432 if (s->theora >= 0x030400)
2433 skip_bits(gb, 2); /* pixel format: 420,res,422,444 */
2434 skip_bits(gb, 24); /* bitrate */
2436 skip_bits(gb, 6); /* quality hint */
2438 if (s->theora >= 0x030200)
2440 skip_bits(gb, 5); /* keyframe frequency force */
2442 if (s->theora < 0x030400)
2443 skip_bits(gb, 5); /* spare bits */
2446 // align_get_bits(gb);
2448 if ( visible_width <= s->width && visible_width > s->width-16
2449 && visible_height <= s->height && visible_height > s->height-16)
2450 avcodec_set_dimensions(avctx, visible_width, visible_height);
2451 else
2452 avcodec_set_dimensions(avctx, s->width, s->height);
2454 return 0;
2457 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
2459 Vp3DecodeContext *s = avctx->priv_data;
2460 int i, n, matrices, inter, plane;
2462 if (s->theora >= 0x030200) {
2463 n = get_bits(gb, 3);
2464 /* loop filter limit values table */
2465 for (i = 0; i < 64; i++)
2466 s->filter_limit_values[i] = get_bits(gb, n);
2469 if (s->theora >= 0x030200)
2470 n = get_bits(gb, 4) + 1;
2471 else
2472 n = 16;
2473 /* quality threshold table */
2474 for (i = 0; i < 64; i++)
2475 s->coded_ac_scale_factor[i] = get_bits(gb, n);
2477 if (s->theora >= 0x030200)
2478 n = get_bits(gb, 4) + 1;
2479 else
2480 n = 16;
2481 /* dc scale factor table */
2482 for (i = 0; i < 64; i++)
2483 s->coded_dc_scale_factor[i] = get_bits(gb, n);
2485 if (s->theora >= 0x030200)
2486 matrices = get_bits(gb, 9) + 1;
2487 else
2488 matrices = 3;
2490 if(matrices > 384){
2491 av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
2492 return -1;
2495 for(n=0; n<matrices; n++){
2496 for (i = 0; i < 64; i++)
2497 s->base_matrix[n][i]= get_bits(gb, 8);
2500 for (inter = 0; inter <= 1; inter++) {
2501 for (plane = 0; plane <= 2; plane++) {
2502 int newqr= 1;
2503 if (inter || plane > 0)
2504 newqr = get_bits1(gb);
2505 if (!newqr) {
2506 int qtj, plj;
2507 if(inter && get_bits1(gb)){
2508 qtj = 0;
2509 plj = plane;
2510 }else{
2511 qtj= (3*inter + plane - 1) / 3;
2512 plj= (plane + 2) % 3;
2514 s->qr_count[inter][plane]= s->qr_count[qtj][plj];
2515 memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj], sizeof(s->qr_size[0][0]));
2516 memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj], sizeof(s->qr_base[0][0]));
2517 } else {
2518 int qri= 0;
2519 int qi = 0;
2521 for(;;){
2522 i= get_bits(gb, av_log2(matrices-1)+1);
2523 if(i>= matrices){
2524 av_log(avctx, AV_LOG_ERROR, "invalid base matrix index\n");
2525 return -1;
2527 s->qr_base[inter][plane][qri]= i;
2528 if(qi >= 63)
2529 break;
2530 i = get_bits(gb, av_log2(63-qi)+1) + 1;
2531 s->qr_size[inter][plane][qri++]= i;
2532 qi += i;
2535 if (qi > 63) {
2536 av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
2537 return -1;
2539 s->qr_count[inter][plane]= qri;
2544 /* Huffman tables */
2545 for (s->hti = 0; s->hti < 80; s->hti++) {
2546 s->entries = 0;
2547 s->huff_code_size = 1;
2548 if (!get_bits1(gb)) {
2549 s->hbits = 0;
2550 read_huffman_tree(avctx, gb);
2551 s->hbits = 1;
2552 read_huffman_tree(avctx, gb);
2556 s->theora_tables = 1;
2558 return 0;
2561 static int theora_decode_init(AVCodecContext *avctx)
2563 Vp3DecodeContext *s = avctx->priv_data;
2564 GetBitContext gb;
2565 int ptype;
2566 uint8_t *header_start[3];
2567 int header_len[3];
2568 int i;
2570 s->theora = 1;
2572 if (!avctx->extradata_size)
2574 av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
2575 return -1;
2578 if (ff_split_xiph_headers(avctx->extradata, avctx->extradata_size,
2579 42, header_start, header_len) < 0) {
2580 av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
2581 return -1;
2584 for(i=0;i<3;i++) {
2585 init_get_bits(&gb, header_start[i], header_len[i]);
2587 ptype = get_bits(&gb, 8);
2588 debug_vp3("Theora headerpacket type: %x\n", ptype);
2590 if (!(ptype & 0x80))
2592 av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
2593 // return -1;
2596 // FIXME: Check for this as well.
2597 skip_bits(&gb, 6*8); /* "theora" */
2599 switch(ptype)
2601 case 0x80:
2602 theora_decode_header(avctx, &gb);
2603 break;
2604 case 0x81:
2605 // FIXME: is this needed? it breaks sometimes
2606 // theora_decode_comments(avctx, gb);
2607 break;
2608 case 0x82:
2609 theora_decode_tables(avctx, &gb);
2610 break;
2611 default:
2612 av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80);
2613 break;
2615 if(8*header_len[i] != get_bits_count(&gb))
2616 av_log(avctx, AV_LOG_ERROR, "%d bits left in packet %X\n", 8*header_len[i] - get_bits_count(&gb), ptype);
2617 if (s->theora < 0x030200)
2618 break;
2621 vp3_decode_init(avctx);
2622 return 0;
2625 AVCodec theora_decoder = {
2626 "theora",
2627 CODEC_TYPE_VIDEO,
2628 CODEC_ID_THEORA,
2629 sizeof(Vp3DecodeContext),
2630 theora_decode_init,
2631 NULL,
2632 vp3_decode_end,
2633 vp3_decode_frame,
2635 NULL,
2636 .long_name = NULL_IF_CONFIG_SMALL("Theora"),
2638 #endif
2640 AVCodec vp3_decoder = {
2641 "vp3",
2642 CODEC_TYPE_VIDEO,
2643 CODEC_ID_VP3,
2644 sizeof(Vp3DecodeContext),
2645 vp3_decode_init,
2646 NULL,
2647 vp3_decode_end,
2648 vp3_decode_frame,
2650 NULL,
2651 .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"),