2 * Intel Indeo 3 (IV31, IV32, etc.) video decoder for ffmpeg
3 * written, produced, and directed by Alan Smithee
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "bytestream.h"
31 #include "indeo3data.h"
38 unsigned short y_w
, y_h
;
39 unsigned short uv_w
, uv_h
;
42 typedef struct Indeo3DecodeContext
{
43 AVCodecContext
*avctx
;
53 uint8_t *corrector_type
;
54 } Indeo3DecodeContext
;
56 static const uint8_t corrector_type_0
[24] = {
57 195, 159, 133, 115, 101, 93, 87, 77,
58 195, 159, 133, 115, 101, 93, 87, 77,
59 128, 79, 79, 79, 79, 79, 79, 79
62 static const uint8_t corrector_type_2
[8] = { 9, 7, 6, 8, 5, 4, 3, 2 };
64 static av_cold
int build_modpred(Indeo3DecodeContext
*s
)
68 if (!(s
->ModPred
= av_malloc(8 * 128)))
69 return AVERROR(ENOMEM
);
71 for (i
=0; i
< 128; ++i
) {
72 s
->ModPred
[i
+0*128] = i
> 126 ? 254 : 2*(i
+ 1 - ((i
+ 1) % 2));
73 s
->ModPred
[i
+1*128] = i
== 7 ? 20 :
75 i
== 120 ? 236 : 2*(i
+ 2 - ((i
+ 1) % 3));
76 s
->ModPred
[i
+2*128] = i
> 125 ? 248 : 2*(i
+ 2 - ((i
+ 2) % 4));
77 s
->ModPred
[i
+3*128] = 2*(i
+ 1 - ((i
- 3) % 5));
78 s
->ModPred
[i
+4*128] = i
== 8 ? 20 : 2*(i
+ 1 - ((i
- 3) % 6));
79 s
->ModPred
[i
+5*128] = 2*(i
+ 4 - ((i
+ 3) % 7));
80 s
->ModPred
[i
+6*128] = i
> 123 ? 240 : 2*(i
+ 4 - ((i
+ 4) % 8));
81 s
->ModPred
[i
+7*128] = 2*(i
+ 5 - ((i
+ 4) % 9));
84 if (!(s
->corrector_type
= av_malloc(24 * 256)))
85 return AVERROR(ENOMEM
);
87 for (i
=0; i
< 24; ++i
) {
88 for (j
=0; j
< 256; ++j
) {
89 s
->corrector_type
[i
*256+j
] = j
< corrector_type_0
[i
] ? 1 :
90 j
< 248 || (i
== 16 && j
== 248) ? 0 :
91 corrector_type_2
[j
- 248];
98 static av_cold
int iv_alloc_frames(Indeo3DecodeContext
*s
)
100 int luma_width
= (s
->width
+ 3) & ~3,
101 luma_height
= (s
->height
+ 3) & ~3,
102 chroma_width
= ((luma_width
>> 2) + 3) & ~3,
103 chroma_height
= ((luma_height
>> 2) + 3) & ~3,
104 luma_pixels
= luma_width
* luma_height
,
105 chroma_pixels
= chroma_width
* chroma_height
,
107 unsigned int bufsize
= luma_pixels
* 2 + luma_width
* 3 +
108 (chroma_pixels
+ chroma_width
) * 4;
110 if(!(s
->buf
= av_malloc(bufsize
)))
111 return AVERROR(ENOMEM
);
112 s
->iv_frame
[0].y_w
= s
->iv_frame
[1].y_w
= luma_width
;
113 s
->iv_frame
[0].y_h
= s
->iv_frame
[1].y_h
= luma_height
;
114 s
->iv_frame
[0].uv_w
= s
->iv_frame
[1].uv_w
= chroma_width
;
115 s
->iv_frame
[0].uv_h
= s
->iv_frame
[1].uv_h
= chroma_height
;
117 s
->iv_frame
[0].Ybuf
= s
->buf
+ luma_width
;
118 i
= luma_pixels
+ luma_width
* 2;
119 s
->iv_frame
[1].Ybuf
= s
->buf
+ i
;
120 i
+= (luma_pixels
+ luma_width
);
121 s
->iv_frame
[0].Ubuf
= s
->buf
+ i
;
122 i
+= (chroma_pixels
+ chroma_width
);
123 s
->iv_frame
[1].Ubuf
= s
->buf
+ i
;
124 i
+= (chroma_pixels
+ chroma_width
);
125 s
->iv_frame
[0].Vbuf
= s
->buf
+ i
;
126 i
+= (chroma_pixels
+ chroma_width
);
127 s
->iv_frame
[1].Vbuf
= s
->buf
+ i
;
129 for(i
= 1; i
<= luma_width
; i
++)
130 s
->iv_frame
[0].Ybuf
[-i
] = s
->iv_frame
[1].Ybuf
[-i
] =
131 s
->iv_frame
[0].Ubuf
[-i
] = 0x80;
133 for(i
= 1; i
<= chroma_width
; i
++) {
134 s
->iv_frame
[1].Ubuf
[-i
] = 0x80;
135 s
->iv_frame
[0].Vbuf
[-i
] = 0x80;
136 s
->iv_frame
[1].Vbuf
[-i
] = 0x80;
137 s
->iv_frame
[1].Vbuf
[chroma_pixels
+i
-1] = 0x80;
143 static av_cold
void iv_free_func(Indeo3DecodeContext
*s
)
147 av_free(s
->corrector_type
);
156 long split_direction
;
161 #define LV1_CHECK(buf1,rle_v3,lv1,lp2) \
162 if((lv1 & 0x80) != 0) { \
173 #define RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3) \
186 #define LP2_CHECK(buf1,rle_v3,lp2) \
187 if(lp2 == 0 && rle_v3 != 0) \
195 #define RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2) \
203 static void iv_Decode_Chunk(Indeo3DecodeContext
*s
,
204 uint8_t *cur
, uint8_t *ref
, int width
, int height
,
205 const uint8_t *buf1
, long cb_offset
, const uint8_t *hdr
,
206 const uint8_t *buf2
, int min_width_160
)
209 unsigned long bit_pos
, lv
, lv1
, lv2
;
210 long *width_tbl
, width_tbl_arr
[10];
211 const signed char *ref_vectors
;
212 uint8_t *cur_frm_pos
, *ref_frm_pos
, *cp
, *cp2
;
213 uint32_t *cur_lp
, *ref_lp
;
214 const uint32_t *correction_lp
[2], *correctionloworder_lp
[2], *correctionhighorder_lp
[2];
215 uint8_t *correction_type_sp
[2];
216 ustr_t strip_tbl
[20], *strip
;
217 int i
, j
, k
, lp1
, lp2
, flag1
, cmd
, blks_width
, blks_height
, region_160_width
,
218 rle_v1
, rle_v2
, rle_v3
;
224 width_tbl
= width_tbl_arr
+ 1;
225 i
= (width
< 0 ? width
+ 3 : width
)/4;
226 for(j
= -1; j
< 8; j
++)
227 width_tbl
[j
] = i
* j
;
231 for(region_160_width
= 0; region_160_width
< (width
- min_width_160
); region_160_width
+= min_width_160
);
233 strip
->ypos
= strip
->xpos
= 0;
234 for(strip
->width
= min_width_160
; width
> strip
->width
; strip
->width
*= 2);
235 strip
->height
= height
;
236 strip
->split_direction
= 0;
237 strip
->split_flag
= 0;
242 rle_v1
= rle_v2
= rle_v3
= 0;
244 while(strip
>= strip_tbl
) {
251 cmd
= (bit_buf
>> bit_pos
) & 0x03;
255 memcpy(strip
, strip
-1, sizeof(ustr_t
));
256 strip
->split_flag
= 1;
257 strip
->split_direction
= 0;
258 strip
->height
= (strip
->height
> 8 ? ((strip
->height
+8)>>4)<<3 : 4);
260 } else if(cmd
== 1) {
262 memcpy(strip
, strip
-1, sizeof(ustr_t
));
263 strip
->split_flag
= 1;
264 strip
->split_direction
= 1;
265 strip
->width
= (strip
->width
> 8 ? ((strip
->width
+8)>>4)<<3 : 4);
267 } else if(cmd
== 2) {
268 if(strip
->usl7
== 0) {
273 } else if(cmd
== 3) {
274 if(strip
->usl7
== 0) {
276 ref_vectors
= (const signed char*)buf2
+ (*buf1
* 2);
282 cur_frm_pos
= cur
+ width
* strip
->ypos
+ strip
->xpos
;
284 if((blks_width
= strip
->width
) < 0)
287 blks_height
= strip
->height
;
289 if(ref_vectors
!= NULL
) {
290 ref_frm_pos
= ref
+ (ref_vectors
[0] + strip
->ypos
) * width
+
291 ref_vectors
[1] + strip
->xpos
;
293 ref_frm_pos
= cur_frm_pos
- width_tbl
[4];
302 cmd
= (bit_buf
>> bit_pos
) & 0x03;
304 if(cmd
== 0 || ref_vectors
!= NULL
) {
305 for(lp1
= 0; lp1
< blks_width
; lp1
++) {
306 for(i
= 0, j
= 0; i
< blks_height
; i
++, j
+= width_tbl
[1])
307 ((uint32_t *)cur_frm_pos
)[j
] = ((uint32_t *)ref_frm_pos
)[j
];
319 if((lv
- 8) <= 7 && (k
== 0 || k
== 3 || k
== 10)) {
320 cp2
= s
->ModPred
+ ((lv
- 8) << 7);
322 for(i
= 0; i
< blks_width
<< 2; i
++) {
328 if(k
== 1 || k
== 4) {
329 lv
= (hdr
[j
] & 0xf) + cb_offset
;
330 correction_type_sp
[0] = s
->corrector_type
+ (lv
<< 8);
331 correction_lp
[0] = correction
+ (lv
<< 8);
332 lv
= (hdr
[j
] >> 4) + cb_offset
;
333 correction_lp
[1] = correction
+ (lv
<< 8);
334 correction_type_sp
[1] = s
->corrector_type
+ (lv
<< 8);
336 correctionloworder_lp
[0] = correctionloworder_lp
[1] = correctionloworder
+ (lv
<< 8);
337 correctionhighorder_lp
[0] = correctionhighorder_lp
[1] = correctionhighorder
+ (lv
<< 8);
338 correction_type_sp
[0] = correction_type_sp
[1] = s
->corrector_type
+ (lv
<< 8);
339 correction_lp
[0] = correction_lp
[1] = correction
+ (lv
<< 8);
344 case 0: /********** CASE 0 **********/
345 for( ; blks_height
> 0; blks_height
-= 4) {
346 for(lp1
= 0; lp1
< blks_width
; lp1
++) {
347 for(lp2
= 0; lp2
< 4; ) {
349 cur_lp
= ((uint32_t *)cur_frm_pos
) + width_tbl
[lp2
];
350 ref_lp
= ((uint32_t *)ref_frm_pos
) + width_tbl
[lp2
];
352 switch(correction_type_sp
[0][k
]) {
354 *cur_lp
= le2me_32(((le2me_32(*ref_lp
) >> 1) + correction_lp
[lp2
& 0x01][k
]) << 1);
358 res
= ((le2me_16(((unsigned short *)(ref_lp
))[0]) >> 1) + correction_lp
[lp2
& 0x01][*buf1
]) << 1;
359 ((unsigned short *)cur_lp
)[0] = le2me_16(res
);
360 res
= ((le2me_16(((unsigned short *)(ref_lp
))[1]) >> 1) + correction_lp
[lp2
& 0x01][k
]) << 1;
361 ((unsigned short *)cur_lp
)[1] = le2me_16(res
);
367 for(i
= 0, j
= 0; i
< 2; i
++, j
+= width_tbl
[1])
368 cur_lp
[j
] = ref_lp
[j
];
374 for(i
= 0, j
= 0; i
< (3 - lp2
); i
++, j
+= width_tbl
[1])
375 cur_lp
[j
] = ref_lp
[j
];
381 RLE_V3_CHECK(buf1
,rle_v1
,rle_v2
,rle_v3
)
383 if(rle_v1
== 1 || ref_vectors
!= NULL
) {
384 for(i
= 0, j
= 0; i
< 4; i
++, j
+= width_tbl
[1])
385 cur_lp
[j
] = ref_lp
[j
];
388 RLE_V2_CHECK(buf1
,rle_v2
, rle_v3
,lp2
)
395 LP2_CHECK(buf1
,rle_v3
,lp2
)
397 for(i
= 0, j
= 0; i
< (4 - lp2
); i
++, j
+= width_tbl
[1])
398 cur_lp
[j
] = ref_lp
[j
];
410 if(ref_vectors
!= NULL
) {
411 for(i
= 0, j
= 0; i
< 4; i
++, j
+= width_tbl
[1])
412 cur_lp
[j
] = ref_lp
[j
];
419 lv
= (lv1
& 0x7F) << 1;
422 for(i
= 0, j
= 0; i
< 4; i
++, j
+= width_tbl
[1])
425 LV1_CHECK(buf1
,rle_v3
,lv1
,lp2
)
436 cur_frm_pos
+= ((width
- blks_width
) * 4);
437 ref_frm_pos
+= ((width
- blks_width
) * 4);
442 case 3: /********** CASE 3 **********/
443 if(ref_vectors
!= NULL
)
447 for( ; blks_height
> 0; blks_height
-= 8) {
448 for(lp1
= 0; lp1
< blks_width
; lp1
++) {
449 for(lp2
= 0; lp2
< 4; ) {
452 cur_lp
= ((uint32_t *)cur_frm_pos
) + width_tbl
[lp2
* 2];
453 ref_lp
= ((uint32_t *)cur_frm_pos
) + width_tbl
[(lp2
* 2) - 1];
455 switch(correction_type_sp
[lp2
& 0x01][k
]) {
457 cur_lp
[width_tbl
[1]] = le2me_32(((le2me_32(*ref_lp
) >> 1) + correction_lp
[lp2
& 0x01][k
]) << 1);
458 if(lp2
> 0 || flag1
== 0 || strip
->ypos
!= 0)
459 cur_lp
[0] = ((cur_lp
[-width_tbl
[1]] >> 1) + (cur_lp
[width_tbl
[1]] >> 1)) & 0xFEFEFEFE;
461 cur_lp
[0] = le2me_32(((le2me_32(*ref_lp
) >> 1) + correction_lp
[lp2
& 0x01][k
]) << 1);
466 res
= ((le2me_16(((unsigned short *)ref_lp
)[0]) >> 1) + correction_lp
[lp2
& 0x01][*buf1
]) << 1;
467 ((unsigned short *)cur_lp
)[width_tbl
[2]] = le2me_16(res
);
468 res
= ((le2me_16(((unsigned short *)ref_lp
)[1]) >> 1) + correction_lp
[lp2
& 0x01][k
]) << 1;
469 ((unsigned short *)cur_lp
)[width_tbl
[2]+1] = le2me_16(res
);
471 if(lp2
> 0 || flag1
== 0 || strip
->ypos
!= 0)
472 cur_lp
[0] = ((cur_lp
[-width_tbl
[1]] >> 1) + (cur_lp
[width_tbl
[1]] >> 1)) & 0xFEFEFEFE;
474 cur_lp
[0] = cur_lp
[width_tbl
[1]];
481 for(i
= 0, j
= 0; i
< 4; i
++, j
+= width_tbl
[1])
489 for(i
= 0, j
= 0; i
< 6 - (lp2
* 2); i
++, j
+= width_tbl
[1])
511 RLE_V3_CHECK(buf1
,rle_v1
,rle_v2
,rle_v3
)
514 for(i
= 0, j
= 0; i
< 8; i
++, j
+= width_tbl
[1])
515 cur_lp
[j
] = ref_lp
[j
];
518 RLE_V2_CHECK(buf1
,rle_v2
, rle_v3
,lp2
)
521 rle_v2
= (*buf1
) - 1;
525 LP2_CHECK(buf1
,rle_v3
,lp2
)
527 for(i
= 0, j
= 0; i
< 8 - (lp2
* 2); i
++, j
+= width_tbl
[1])
533 av_log(s
->avctx
, AV_LOG_ERROR
, "UNTESTED.\n");
535 lv
= (lv1
& 0x7F) << 1;
539 for(i
= 0, j
= 0; i
< 4; i
++, j
+= width_tbl
[1])
542 LV1_CHECK(buf1
,rle_v3
,lv1
,lp2
)
553 cur_frm_pos
+= (((width
* 2) - blks_width
) * 4);
558 case 10: /********** CASE 10 **********/
559 if(ref_vectors
== NULL
) {
562 for( ; blks_height
> 0; blks_height
-= 8) {
563 for(lp1
= 0; lp1
< blks_width
; lp1
+= 2) {
564 for(lp2
= 0; lp2
< 4; ) {
566 cur_lp
= ((uint32_t *)cur_frm_pos
) + width_tbl
[lp2
* 2];
567 ref_lp
= ((uint32_t *)cur_frm_pos
) + width_tbl
[(lp2
* 2) - 1];
570 if(lp2
== 0 && flag1
!= 0) {
571 #ifdef WORDS_BIGENDIAN
572 lv1
= lv1
& 0xFF00FF00;
573 lv1
= (lv1
>> 8) | lv1
;
574 lv2
= lv2
& 0xFF00FF00;
575 lv2
= (lv2
>> 8) | lv2
;
577 lv1
= lv1
& 0x00FF00FF;
578 lv1
= (lv1
<< 8) | lv1
;
579 lv2
= lv2
& 0x00FF00FF;
580 lv2
= (lv2
<< 8) | lv2
;
584 switch(correction_type_sp
[lp2
& 0x01][k
]) {
586 cur_lp
[width_tbl
[1]] = le2me_32(((le2me_32(lv1
) >> 1) + correctionloworder_lp
[lp2
& 0x01][k
]) << 1);
587 cur_lp
[width_tbl
[1]+1] = le2me_32(((le2me_32(lv2
) >> 1) + correctionhighorder_lp
[lp2
& 0x01][k
]) << 1);
588 if(lp2
> 0 || strip
->ypos
!= 0 || flag1
== 0) {
589 cur_lp
[0] = ((cur_lp
[-width_tbl
[1]] >> 1) + (cur_lp
[width_tbl
[1]] >> 1)) & 0xFEFEFEFE;
590 cur_lp
[1] = ((cur_lp
[-width_tbl
[1]+1] >> 1) + (cur_lp
[width_tbl
[1]+1] >> 1)) & 0xFEFEFEFE;
592 cur_lp
[0] = cur_lp
[width_tbl
[1]];
593 cur_lp
[1] = cur_lp
[width_tbl
[1]+1];
599 cur_lp
[width_tbl
[1]] = le2me_32(((le2me_32(lv1
) >> 1) + correctionloworder_lp
[lp2
& 0x01][*buf1
]) << 1);
600 cur_lp
[width_tbl
[1]+1] = le2me_32(((le2me_32(lv2
) >> 1) + correctionloworder_lp
[lp2
& 0x01][k
]) << 1);
601 if(lp2
> 0 || strip
->ypos
!= 0 || flag1
== 0) {
602 cur_lp
[0] = ((cur_lp
[-width_tbl
[1]] >> 1) + (cur_lp
[width_tbl
[1]] >> 1)) & 0xFEFEFEFE;
603 cur_lp
[1] = ((cur_lp
[-width_tbl
[1]+1] >> 1) + (cur_lp
[width_tbl
[1]+1] >> 1)) & 0xFEFEFEFE;
605 cur_lp
[0] = cur_lp
[width_tbl
[1]];
606 cur_lp
[1] = cur_lp
[width_tbl
[1]+1];
615 for(i
= 0, j
= width_tbl
[1]; i
< 3; i
++, j
+= width_tbl
[1]) {
619 cur_lp
[0] = ((cur_lp
[-width_tbl
[1]] >> 1) + (cur_lp
[width_tbl
[1]] >> 1)) & 0xFEFEFEFE;
620 cur_lp
[1] = ((cur_lp
[-width_tbl
[1]+1] >> 1) + (cur_lp
[width_tbl
[1]+1] >> 1)) & 0xFEFEFEFE;
622 for(i
= 0, j
= 0; i
< 4; i
++, j
+= width_tbl
[1]) {
633 if(lp2
== 0 && flag1
!= 0) {
634 for(i
= 0, j
= width_tbl
[1]; i
< 5; i
++, j
+= width_tbl
[1]) {
638 cur_lp
[0] = ((cur_lp
[-width_tbl
[1]] >> 1) + (cur_lp
[width_tbl
[1]] >> 1)) & 0xFEFEFEFE;
639 cur_lp
[1] = ((cur_lp
[-width_tbl
[1]+1] >> 1) + (cur_lp
[width_tbl
[1]+1] >> 1)) & 0xFEFEFEFE;
641 for(i
= 0, j
= 0; i
< 6 - (lp2
* 2); i
++, j
+= width_tbl
[1]) {
652 RLE_V3_CHECK(buf1
,rle_v1
,rle_v2
,rle_v3
)
655 for(i
= 0, j
= width_tbl
[1]; i
< 7; i
++, j
+= width_tbl
[1]) {
659 cur_lp
[0] = ((cur_lp
[-width_tbl
[1]] >> 1) + (cur_lp
[width_tbl
[1]] >> 1)) & 0xFEFEFEFE;
660 cur_lp
[1] = ((cur_lp
[-width_tbl
[1]+1] >> 1) + (cur_lp
[width_tbl
[1]+1] >> 1)) & 0xFEFEFEFE;
662 for(i
= 0, j
= 0; i
< 8; i
++, j
+= width_tbl
[1]) {
668 RLE_V2_CHECK(buf1
,rle_v2
, rle_v3
,lp2
)
672 rle_v2
= (*buf1
) - 1;
675 LP2_CHECK(buf1
,rle_v3
,lp2
)
677 if(lp2
== 0 && flag1
!= 0) {
678 for(i
= 0, j
= width_tbl
[1]; i
< 7; i
++, j
+= width_tbl
[1]) {
682 cur_lp
[0] = ((cur_lp
[-width_tbl
[1]] >> 1) + (cur_lp
[width_tbl
[1]] >> 1)) & 0xFEFEFEFE;
683 cur_lp
[1] = ((cur_lp
[-width_tbl
[1]+1] >> 1) + (cur_lp
[width_tbl
[1]+1] >> 1)) & 0xFEFEFEFE;
685 for(i
= 0, j
= 0; i
< 8 - (lp2
* 2); i
++, j
+= width_tbl
[1]) {
710 av_log(s
->avctx
, AV_LOG_ERROR
, "UNTESTED.\n");
712 lv
= (lv1
& 0x7F) << 1;
715 for(i
= 0, j
= 0; i
< 8; i
++, j
+= width_tbl
[1])
717 LV1_CHECK(buf1
,rle_v3
,lv1
,lp2
)
728 cur_frm_pos
+= (((width
* 2) - blks_width
) * 4);
732 for( ; blks_height
> 0; blks_height
-= 8) {
733 for(lp1
= 0; lp1
< blks_width
; lp1
+= 2) {
734 for(lp2
= 0; lp2
< 4; ) {
736 cur_lp
= ((uint32_t *)cur_frm_pos
) + width_tbl
[lp2
* 2];
737 ref_lp
= ((uint32_t *)ref_frm_pos
) + width_tbl
[lp2
* 2];
739 switch(correction_type_sp
[lp2
& 0x01][k
]) {
741 lv1
= correctionloworder_lp
[lp2
& 0x01][k
];
742 lv2
= correctionhighorder_lp
[lp2
& 0x01][k
];
743 cur_lp
[0] = le2me_32(((le2me_32(ref_lp
[0]) >> 1) + lv1
) << 1);
744 cur_lp
[1] = le2me_32(((le2me_32(ref_lp
[1]) >> 1) + lv2
) << 1);
745 cur_lp
[width_tbl
[1]] = le2me_32(((le2me_32(ref_lp
[width_tbl
[1]]) >> 1) + lv1
) << 1);
746 cur_lp
[width_tbl
[1]+1] = le2me_32(((le2me_32(ref_lp
[width_tbl
[1]+1]) >> 1) + lv2
) << 1);
751 lv1
= correctionloworder_lp
[lp2
& 0x01][*buf1
++];
752 lv2
= correctionloworder_lp
[lp2
& 0x01][k
];
753 cur_lp
[0] = le2me_32(((le2me_32(ref_lp
[0]) >> 1) + lv1
) << 1);
754 cur_lp
[1] = le2me_32(((le2me_32(ref_lp
[1]) >> 1) + lv2
) << 1);
755 cur_lp
[width_tbl
[1]] = le2me_32(((le2me_32(ref_lp
[width_tbl
[1]]) >> 1) + lv1
) << 1);
756 cur_lp
[width_tbl
[1]+1] = le2me_32(((le2me_32(ref_lp
[width_tbl
[1]+1]) >> 1) + lv2
) << 1);
762 for(i
= 0, j
= 0; i
< 4; i
++, j
+= width_tbl
[1]) {
763 cur_lp
[j
] = ref_lp
[j
];
764 cur_lp
[j
+1] = ref_lp
[j
+1];
772 for(i
= 0, j
= 0; i
< 6 - (lp2
* 2); i
++, j
+= width_tbl
[1]) {
773 cur_lp
[j
] = ref_lp
[j
];
774 cur_lp
[j
+1] = ref_lp
[j
+1];
782 RLE_V3_CHECK(buf1
,rle_v1
,rle_v2
,rle_v3
)
783 for(i
= 0, j
= 0; i
< 8; i
++, j
+= width_tbl
[1]) {
784 ((uint32_t *)cur_frm_pos
)[j
] = ((uint32_t *)ref_frm_pos
)[j
];
785 ((uint32_t *)cur_frm_pos
)[j
+1] = ((uint32_t *)ref_frm_pos
)[j
+1];
787 RLE_V2_CHECK(buf1
,rle_v2
, rle_v3
,lp2
)
791 rle_v2
= (*buf1
) - 1;
795 LP2_CHECK(buf1
,rle_v3
,lp2
)
798 for(i
= 0, j
= 0; i
< 8 - (lp2
* 2); i
++, j
+= width_tbl
[1]) {
799 cur_lp
[j
] = ref_lp
[j
];
800 cur_lp
[j
+1] = ref_lp
[j
+1];
806 av_log(s
->avctx
, AV_LOG_ERROR
, "UNTESTED.\n");
808 lv
= (lv1
& 0x7F) << 1;
811 for(i
= 0, j
= 0; i
< 8; i
++, j
+= width_tbl
[1])
812 ((uint32_t *)cur_frm_pos
)[j
] = ((uint32_t *)cur_frm_pos
)[j
+1] = lv
;
813 LV1_CHECK(buf1
,rle_v3
,lv1
,lp2
)
825 cur_frm_pos
+= (((width
* 2) - blks_width
) * 4);
826 ref_frm_pos
+= (((width
* 2) - blks_width
) * 4);
831 case 11: /********** CASE 11 **********/
832 if(ref_vectors
== NULL
)
835 for( ; blks_height
> 0; blks_height
-= 8) {
836 for(lp1
= 0; lp1
< blks_width
; lp1
++) {
837 for(lp2
= 0; lp2
< 4; ) {
839 cur_lp
= ((uint32_t *)cur_frm_pos
) + width_tbl
[lp2
* 2];
840 ref_lp
= ((uint32_t *)ref_frm_pos
) + width_tbl
[lp2
* 2];
842 switch(correction_type_sp
[lp2
& 0x01][k
]) {
844 cur_lp
[0] = le2me_32(((le2me_32(*ref_lp
) >> 1) + correction_lp
[lp2
& 0x01][k
]) << 1);
845 cur_lp
[width_tbl
[1]] = le2me_32(((le2me_32(ref_lp
[width_tbl
[1]]) >> 1) + correction_lp
[lp2
& 0x01][k
]) << 1);
850 lv1
= (unsigned short)(correction_lp
[lp2
& 0x01][*buf1
++]);
851 lv2
= (unsigned short)(correction_lp
[lp2
& 0x01][k
]);
852 res
= (unsigned short)(((le2me_16(((unsigned short *)ref_lp
)[0]) >> 1) + lv1
) << 1);
853 ((unsigned short *)cur_lp
)[0] = le2me_16(res
);
854 res
= (unsigned short)(((le2me_16(((unsigned short *)ref_lp
)[1]) >> 1) + lv2
) << 1);
855 ((unsigned short *)cur_lp
)[1] = le2me_16(res
);
856 res
= (unsigned short)(((le2me_16(((unsigned short *)ref_lp
)[width_tbl
[2]]) >> 1) + lv1
) << 1);
857 ((unsigned short *)cur_lp
)[width_tbl
[2]] = le2me_16(res
);
858 res
= (unsigned short)(((le2me_16(((unsigned short *)ref_lp
)[width_tbl
[2]+1]) >> 1) + lv2
) << 1);
859 ((unsigned short *)cur_lp
)[width_tbl
[2]+1] = le2me_16(res
);
865 for(i
= 0, j
= 0; i
< 4; i
++, j
+= width_tbl
[1])
866 cur_lp
[j
] = ref_lp
[j
];
873 for(i
= 0, j
= 0; i
< 6 - (lp2
* 2); i
++, j
+= width_tbl
[1])
874 cur_lp
[j
] = ref_lp
[j
];
881 RLE_V3_CHECK(buf1
,rle_v1
,rle_v2
,rle_v3
)
883 for(i
= 0, j
= 0; i
< 8; i
++, j
+= width_tbl
[1])
884 cur_lp
[j
] = ref_lp
[j
];
886 RLE_V2_CHECK(buf1
,rle_v2
, rle_v3
,lp2
)
890 rle_v2
= (*buf1
) - 1;
894 LP2_CHECK(buf1
,rle_v3
,lp2
)
897 for(i
= 0, j
= 0; i
< 8 - (lp2
* 2); i
++, j
+= width_tbl
[1])
898 cur_lp
[j
] = ref_lp
[j
];
903 av_log(s
->avctx
, AV_LOG_ERROR
, "UNTESTED.\n");
905 lv
= (lv1
& 0x7F) << 1;
908 for(i
= 0, j
= 0; i
< 4; i
++, j
+= width_tbl
[1])
910 LV1_CHECK(buf1
,rle_v3
,lv1
,lp2
)
922 cur_frm_pos
+= (((width
* 2) - blks_width
) * 4);
923 ref_frm_pos
+= (((width
* 2) - blks_width
) * 4);
932 if(strip
< strip_tbl
)
935 for( ; strip
>= strip_tbl
; strip
--) {
936 if(strip
->split_flag
!= 0) {
937 strip
->split_flag
= 0;
938 strip
->usl7
= (strip
-1)->usl7
;
940 if(strip
->split_direction
) {
941 strip
->xpos
+= strip
->width
;
942 strip
->width
= (strip
-1)->width
- strip
->width
;
943 if(region_160_width
<= strip
->xpos
&& width
< strip
->width
+ strip
->xpos
)
944 strip
->width
= width
- strip
->xpos
;
946 strip
->ypos
+= strip
->height
;
947 strip
->height
= (strip
-1)->height
- strip
->height
;
955 static av_cold
int indeo3_decode_init(AVCodecContext
*avctx
)
957 Indeo3DecodeContext
*s
= avctx
->priv_data
;
961 s
->width
= avctx
->width
;
962 s
->height
= avctx
->height
;
963 avctx
->pix_fmt
= PIX_FMT_YUV410P
;
965 if (!(ret
= build_modpred(s
)))
966 ret
= iv_alloc_frames(s
);
973 static unsigned long iv_decode_frame(Indeo3DecodeContext
*s
,
974 const uint8_t *buf
, int buf_size
)
976 unsigned int image_width
, image_height
,
977 chroma_width
, chroma_height
;
978 unsigned long flags
, cb_offset
, data_size
,
979 y_offset
, v_offset
, u_offset
, mc_vector_count
;
980 const uint8_t *hdr_pos
, *buf_pos
;
983 buf_pos
+= 18; /* skip OS header (16 bytes) and version number */
985 flags
= bytestream_get_le16(&buf_pos
);
986 data_size
= bytestream_get_le32(&buf_pos
);
987 cb_offset
= *buf_pos
++;
988 buf_pos
+= 3; /* skip reserved byte and checksum */
989 image_height
= bytestream_get_le16(&buf_pos
);
990 image_width
= bytestream_get_le16(&buf_pos
);
992 if(avcodec_check_dimensions(NULL
, image_width
, image_height
))
995 chroma_height
= ((image_height
>> 2) + 3) & 0x7ffc;
996 chroma_width
= ((image_width
>> 2) + 3) & 0x7ffc;
997 y_offset
= bytestream_get_le32(&buf_pos
);
998 v_offset
= bytestream_get_le32(&buf_pos
);
999 u_offset
= bytestream_get_le32(&buf_pos
);
1000 buf_pos
+= 4; /* reserved */
1002 if(data_size
== 0x80) return 4;
1005 s
->cur_frame
= s
->iv_frame
+ 1;
1006 s
->ref_frame
= s
->iv_frame
;
1008 s
->cur_frame
= s
->iv_frame
;
1009 s
->ref_frame
= s
->iv_frame
+ 1;
1012 buf_pos
= buf
+ 16 + y_offset
;
1013 mc_vector_count
= bytestream_get_le32(&buf_pos
);
1015 iv_Decode_Chunk(s
, s
->cur_frame
->Ybuf
, s
->ref_frame
->Ybuf
, image_width
,
1016 image_height
, buf_pos
+ mc_vector_count
* 2, cb_offset
, hdr_pos
, buf_pos
,
1017 FFMIN(image_width
, 160));
1019 if (!(s
->avctx
->flags
& CODEC_FLAG_GRAY
))
1022 buf_pos
= buf
+ 16 + v_offset
;
1023 mc_vector_count
= bytestream_get_le32(&buf_pos
);
1025 iv_Decode_Chunk(s
, s
->cur_frame
->Vbuf
, s
->ref_frame
->Vbuf
, chroma_width
,
1026 chroma_height
, buf_pos
+ mc_vector_count
* 2, cb_offset
, hdr_pos
, buf_pos
,
1027 FFMIN(chroma_width
, 40));
1029 buf_pos
= buf
+ 16 + u_offset
;
1030 mc_vector_count
= bytestream_get_le32(&buf_pos
);
1032 iv_Decode_Chunk(s
, s
->cur_frame
->Ubuf
, s
->ref_frame
->Ubuf
, chroma_width
,
1033 chroma_height
, buf_pos
+ mc_vector_count
* 2, cb_offset
, hdr_pos
, buf_pos
,
1034 FFMIN(chroma_width
, 40));
1041 static int indeo3_decode_frame(AVCodecContext
*avctx
,
1042 void *data
, int *data_size
,
1043 const uint8_t *buf
, int buf_size
)
1045 Indeo3DecodeContext
*s
=avctx
->priv_data
;
1046 uint8_t *src
, *dest
;
1049 iv_decode_frame(s
, buf
, buf_size
);
1051 if(s
->frame
.data
[0])
1052 avctx
->release_buffer(avctx
, &s
->frame
);
1054 s
->frame
.reference
= 0;
1055 if(avctx
->get_buffer(avctx
, &s
->frame
) < 0) {
1056 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed\n");
1060 src
= s
->cur_frame
->Ybuf
;
1061 dest
= s
->frame
.data
[0];
1062 for (y
= 0; y
< s
->height
; y
++) {
1063 memcpy(dest
, src
, s
->cur_frame
->y_w
);
1064 src
+= s
->cur_frame
->y_w
;
1065 dest
+= s
->frame
.linesize
[0];
1068 if (!(s
->avctx
->flags
& CODEC_FLAG_GRAY
))
1070 src
= s
->cur_frame
->Ubuf
;
1071 dest
= s
->frame
.data
[1];
1072 for (y
= 0; y
< s
->height
/ 4; y
++) {
1073 memcpy(dest
, src
, s
->cur_frame
->uv_w
);
1074 src
+= s
->cur_frame
->uv_w
;
1075 dest
+= s
->frame
.linesize
[1];
1078 src
= s
->cur_frame
->Vbuf
;
1079 dest
= s
->frame
.data
[2];
1080 for (y
= 0; y
< s
->height
/ 4; y
++) {
1081 memcpy(dest
, src
, s
->cur_frame
->uv_w
);
1082 src
+= s
->cur_frame
->uv_w
;
1083 dest
+= s
->frame
.linesize
[2];
1087 *data_size
=sizeof(AVFrame
);
1088 *(AVFrame
*)data
= s
->frame
;
1093 static av_cold
int indeo3_decode_end(AVCodecContext
*avctx
)
1095 Indeo3DecodeContext
*s
= avctx
->priv_data
;
1102 AVCodec indeo3_decoder
= {
1106 sizeof(Indeo3DecodeContext
),
1110 indeo3_decode_frame
,
1113 .long_name
= NULL_IF_CONFIG_SMALL("Intel Indeo 3"),