Add frame_size as a codec parameter requirement for Speex in
[ffmpeg-lucabe.git] / libavformat / utils.c
blob59fefd2913483072989534a9f4ab005b5c6c668d
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "avformat.h"
22 #include "internal.h"
23 #include "libavcodec/opt.h"
24 #include "metadata.h"
25 #include "libavutil/avstring.h"
26 #include "riff.h"
27 #include <sys/time.h>
28 #include <time.h>
29 #include <strings.h>
31 #undef NDEBUG
32 #include <assert.h>
34 /**
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
39 unsigned avformat_version(void)
41 return LIBAVFORMAT_VERSION_INT;
44 /* fraction handling */
46 /**
47 * f = val + (num / den) + 0.5.
49 * 'num' is normalized so that it is such as 0 <= num < den.
51 * @param f fractional number
52 * @param val integer value
53 * @param num must be >= 0
54 * @param den must be >= 1
56 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
58 num += (den >> 1);
59 if (num >= den) {
60 val += num / den;
61 num = num % den;
63 f->val = val;
64 f->num = num;
65 f->den = den;
68 /**
69 * Fractional addition to f: f = f + (incr / f->den).
71 * @param f fractional number
72 * @param incr increment, can be positive or negative
74 static void av_frac_add(AVFrac *f, int64_t incr)
76 int64_t num, den;
78 num = f->num + incr;
79 den = f->den;
80 if (num < 0) {
81 f->val += num / den;
82 num = num % den;
83 if (num < 0) {
84 num += den;
85 f->val--;
87 } else if (num >= den) {
88 f->val += num / den;
89 num = num % den;
91 f->num = num;
94 /** head of registered input format linked list */
95 AVInputFormat *first_iformat = NULL;
96 /** head of registered output format linked list */
97 AVOutputFormat *first_oformat = NULL;
99 AVInputFormat *av_iformat_next(AVInputFormat *f)
101 if(f) return f->next;
102 else return first_iformat;
105 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
107 if(f) return f->next;
108 else return first_oformat;
111 void av_register_input_format(AVInputFormat *format)
113 AVInputFormat **p;
114 p = &first_iformat;
115 while (*p != NULL) p = &(*p)->next;
116 *p = format;
117 format->next = NULL;
120 void av_register_output_format(AVOutputFormat *format)
122 AVOutputFormat **p;
123 p = &first_oformat;
124 while (*p != NULL) p = &(*p)->next;
125 *p = format;
126 format->next = NULL;
129 int match_ext(const char *filename, const char *extensions)
131 const char *ext, *p;
132 char ext1[32], *q;
134 if(!filename)
135 return 0;
137 ext = strrchr(filename, '.');
138 if (ext) {
139 ext++;
140 p = extensions;
141 for(;;) {
142 q = ext1;
143 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
144 *q++ = *p++;
145 *q = '\0';
146 if (!strcasecmp(ext1, ext))
147 return 1;
148 if (*p == '\0')
149 break;
150 p++;
153 return 0;
156 static int match_format(const char *name, const char *names)
158 const char *p;
159 int len, namelen;
161 if (!name || !names)
162 return 0;
164 namelen = strlen(name);
165 while ((p = strchr(names, ','))) {
166 len = FFMAX(p - names, namelen);
167 if (!strncasecmp(name, names, len))
168 return 1;
169 names = p+1;
171 return !strcasecmp(name, names);
174 AVOutputFormat *guess_format(const char *short_name, const char *filename,
175 const char *mime_type)
177 AVOutputFormat *fmt, *fmt_found;
178 int score_max, score;
180 /* specific test for image sequences */
181 #if CONFIG_IMAGE2_MUXER
182 if (!short_name && filename &&
183 av_filename_number_test(filename) &&
184 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
185 return guess_format("image2", NULL, NULL);
187 #endif
188 /* Find the proper file type. */
189 fmt_found = NULL;
190 score_max = 0;
191 fmt = first_oformat;
192 while (fmt != NULL) {
193 score = 0;
194 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
195 score += 100;
196 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
197 score += 10;
198 if (filename && fmt->extensions &&
199 match_ext(filename, fmt->extensions)) {
200 score += 5;
202 if (score > score_max) {
203 score_max = score;
204 fmt_found = fmt;
206 fmt = fmt->next;
208 return fmt_found;
211 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
212 const char *mime_type)
214 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
216 if (fmt) {
217 AVOutputFormat *stream_fmt;
218 char stream_format_name[64];
220 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
221 stream_fmt = guess_format(stream_format_name, NULL, NULL);
223 if (stream_fmt)
224 fmt = stream_fmt;
227 return fmt;
230 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
231 const char *filename, const char *mime_type, enum CodecType type){
232 if(type == CODEC_TYPE_VIDEO){
233 enum CodecID codec_id= CODEC_ID_NONE;
235 #if CONFIG_IMAGE2_MUXER
236 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
237 codec_id= av_guess_image2_codec(filename);
239 #endif
240 if(codec_id == CODEC_ID_NONE)
241 codec_id= fmt->video_codec;
242 return codec_id;
243 }else if(type == CODEC_TYPE_AUDIO)
244 return fmt->audio_codec;
245 else
246 return CODEC_ID_NONE;
249 AVInputFormat *av_find_input_format(const char *short_name)
251 AVInputFormat *fmt;
252 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
253 if (match_format(short_name, fmt->name))
254 return fmt;
256 return NULL;
259 /* memory handling */
262 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
264 int ret= av_new_packet(pkt, size);
266 if(ret<0)
267 return ret;
269 pkt->pos= url_ftell(s);
271 ret= get_buffer(s, pkt->data, size);
272 if(ret<=0)
273 av_free_packet(pkt);
274 else
275 av_shrink_packet(pkt, ret);
277 return ret;
281 int av_filename_number_test(const char *filename)
283 char buf[1024];
284 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
287 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
289 AVInputFormat *fmt1, *fmt;
290 int score;
292 fmt = NULL;
293 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
294 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
295 continue;
296 score = 0;
297 if (fmt1->read_probe) {
298 score = fmt1->read_probe(pd);
299 } else if (fmt1->extensions) {
300 if (match_ext(pd->filename, fmt1->extensions)) {
301 score = 50;
304 if (score > *score_max) {
305 *score_max = score;
306 fmt = fmt1;
307 }else if (score == *score_max)
308 fmt = NULL;
310 return fmt;
313 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
314 int score=0;
315 return av_probe_input_format2(pd, is_opened, &score);
318 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
320 AVInputFormat *fmt;
321 fmt = av_probe_input_format2(pd, 1, &score);
323 if (fmt) {
324 if (!strcmp(fmt->name, "mp3")) {
325 st->codec->codec_id = CODEC_ID_MP3;
326 st->codec->codec_type = CODEC_TYPE_AUDIO;
327 } else if (!strcmp(fmt->name, "ac3")) {
328 st->codec->codec_id = CODEC_ID_AC3;
329 st->codec->codec_type = CODEC_TYPE_AUDIO;
330 } else if (!strcmp(fmt->name, "mpegvideo")) {
331 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
332 st->codec->codec_type = CODEC_TYPE_VIDEO;
333 } else if (!strcmp(fmt->name, "m4v")) {
334 st->codec->codec_id = CODEC_ID_MPEG4;
335 st->codec->codec_type = CODEC_TYPE_VIDEO;
336 } else if (!strcmp(fmt->name, "h264")) {
337 st->codec->codec_id = CODEC_ID_H264;
338 st->codec->codec_type = CODEC_TYPE_VIDEO;
339 } else if (!strcmp(fmt->name, "dts")) {
340 st->codec->codec_id = CODEC_ID_DTS;
341 st->codec->codec_type = CODEC_TYPE_AUDIO;
344 return !!fmt;
347 /************************************************************/
348 /* input media file */
351 * Open a media file from an IO stream. 'fmt' must be specified.
353 int av_open_input_stream(AVFormatContext **ic_ptr,
354 ByteIOContext *pb, const char *filename,
355 AVInputFormat *fmt, AVFormatParameters *ap)
357 int err;
358 AVFormatContext *ic;
359 AVFormatParameters default_ap;
361 if(!ap){
362 ap=&default_ap;
363 memset(ap, 0, sizeof(default_ap));
366 if(!ap->prealloced_context)
367 ic = avformat_alloc_context();
368 else
369 ic = *ic_ptr;
370 if (!ic) {
371 err = AVERROR(ENOMEM);
372 goto fail;
374 ic->iformat = fmt;
375 ic->pb = pb;
376 ic->duration = AV_NOPTS_VALUE;
377 ic->start_time = AV_NOPTS_VALUE;
378 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
380 /* allocate private data */
381 if (fmt->priv_data_size > 0) {
382 ic->priv_data = av_mallocz(fmt->priv_data_size);
383 if (!ic->priv_data) {
384 err = AVERROR(ENOMEM);
385 goto fail;
387 } else {
388 ic->priv_data = NULL;
391 if (ic->iformat->read_header) {
392 err = ic->iformat->read_header(ic, ap);
393 if (err < 0)
394 goto fail;
397 if (pb && !ic->data_offset)
398 ic->data_offset = url_ftell(ic->pb);
400 #if LIBAVFORMAT_VERSION_MAJOR < 53
401 ff_metadata_demux_compat(ic);
402 #endif
404 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
406 *ic_ptr = ic;
407 return 0;
408 fail:
409 if (ic) {
410 int i;
411 av_freep(&ic->priv_data);
412 for(i=0;i<ic->nb_streams;i++) {
413 AVStream *st = ic->streams[i];
414 if (st) {
415 av_free(st->priv_data);
416 av_free(st->codec->extradata);
418 av_free(st);
421 av_free(ic);
422 *ic_ptr = NULL;
423 return err;
426 /** size of probe buffer, for guessing file type from file contents */
427 #define PROBE_BUF_MIN 2048
428 #define PROBE_BUF_MAX (1<<20)
430 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
431 AVInputFormat *fmt,
432 int buf_size,
433 AVFormatParameters *ap)
435 int err, probe_size;
436 AVProbeData probe_data, *pd = &probe_data;
437 ByteIOContext *pb = NULL;
439 pd->filename = "";
440 if (filename)
441 pd->filename = filename;
442 pd->buf = NULL;
443 pd->buf_size = 0;
445 if (!fmt) {
446 /* guess format if no file can be opened */
447 fmt = av_probe_input_format(pd, 0);
450 /* Do not open file if the format does not need it. XXX: specific
451 hack needed to handle RTSP/TCP */
452 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
453 /* if no file needed do not try to open one */
454 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
455 goto fail;
457 if (buf_size > 0) {
458 url_setbufsize(pb, buf_size);
461 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
462 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
463 /* read probe data */
464 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
465 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
466 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
467 if (url_fseek(pb, 0, SEEK_SET) < 0) {
468 url_fclose(pb);
469 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
470 pb = NULL;
471 err = AVERROR(EIO);
472 goto fail;
475 /* guess file format */
476 fmt = av_probe_input_format2(pd, 1, &score);
478 av_freep(&pd->buf);
481 /* if still no format found, error */
482 if (!fmt) {
483 err = AVERROR_NOFMT;
484 goto fail;
487 /* check filename in case an image number is expected */
488 if (fmt->flags & AVFMT_NEEDNUMBER) {
489 if (!av_filename_number_test(filename)) {
490 err = AVERROR_NUMEXPECTED;
491 goto fail;
494 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
495 if (err)
496 goto fail;
497 return 0;
498 fail:
499 av_freep(&pd->buf);
500 if (pb)
501 url_fclose(pb);
502 if (ap && ap->prealloced_context)
503 av_free(*ic_ptr);
504 *ic_ptr = NULL;
505 return err;
509 /*******************************************************/
511 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
512 AVPacketList **plast_pktl){
513 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
514 if (!pktl)
515 return NULL;
517 if (*packet_buffer)
518 (*plast_pktl)->next = pktl;
519 else
520 *packet_buffer = pktl;
522 /* add the packet in the buffered packet list */
523 *plast_pktl = pktl;
524 pktl->pkt= *pkt;
525 return &pktl->pkt;
528 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
530 int ret, i;
531 AVStream *st;
533 for(;;){
534 AVPacketList *pktl = s->raw_packet_buffer;
536 if (pktl) {
537 *pkt = pktl->pkt;
538 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
539 !s->streams[pkt->stream_index]->probe_packets ||
540 s->raw_packet_buffer_remaining_size < pkt->size){
541 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
542 av_freep(&pd->buf);
543 pd->buf_size = 0;
544 s->raw_packet_buffer = pktl->next;
545 s->raw_packet_buffer_remaining_size += pkt->size;
546 av_free(pktl);
547 return 0;
551 av_init_packet(pkt);
552 ret= s->iformat->read_packet(s, pkt);
553 if (ret < 0) {
554 if (!pktl || ret == AVERROR(EAGAIN))
555 return ret;
556 for (i = 0; i < s->nb_streams; i++)
557 s->streams[i]->probe_packets = 0;
558 continue;
560 st= s->streams[pkt->stream_index];
562 switch(st->codec->codec_type){
563 case CODEC_TYPE_VIDEO:
564 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
565 break;
566 case CODEC_TYPE_AUDIO:
567 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
568 break;
569 case CODEC_TYPE_SUBTITLE:
570 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
571 break;
574 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
575 !st->probe_packets))
576 return ret;
578 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
579 s->raw_packet_buffer_remaining_size -= pkt->size;
581 if(st->codec->codec_id == CODEC_ID_PROBE){
582 AVProbeData *pd = &st->probe_data;
584 --st->probe_packets;
586 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
587 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
588 pd->buf_size += pkt->size;
589 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
591 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
592 set_codec_from_probe_data(st, pd, 1);
593 if(st->codec->codec_id != CODEC_ID_PROBE){
594 pd->buf_size=0;
595 av_freep(&pd->buf);
602 /**********************************************************/
605 * Get the number of samples of an audio frame. Return -1 on error.
607 static int get_audio_frame_size(AVCodecContext *enc, int size)
609 int frame_size;
611 if(enc->codec_id == CODEC_ID_VORBIS)
612 return -1;
614 if (enc->frame_size <= 1) {
615 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
617 if (bits_per_sample) {
618 if (enc->channels == 0)
619 return -1;
620 frame_size = (size << 3) / (bits_per_sample * enc->channels);
621 } else {
622 /* used for example by ADPCM codecs */
623 if (enc->bit_rate == 0)
624 return -1;
625 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
627 } else {
628 frame_size = enc->frame_size;
630 return frame_size;
635 * Return the frame duration in seconds. Return 0 if not available.
637 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
638 AVCodecParserContext *pc, AVPacket *pkt)
640 int frame_size;
642 *pnum = 0;
643 *pden = 0;
644 switch(st->codec->codec_type) {
645 case CODEC_TYPE_VIDEO:
646 if(st->time_base.num*1000LL > st->time_base.den){
647 *pnum = st->time_base.num;
648 *pden = st->time_base.den;
649 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
650 *pnum = st->codec->time_base.num;
651 *pden = st->codec->time_base.den;
652 if (pc && pc->repeat_pict) {
653 *pnum = (*pnum) * (1 + pc->repeat_pict);
656 break;
657 case CODEC_TYPE_AUDIO:
658 frame_size = get_audio_frame_size(st->codec, pkt->size);
659 if (frame_size < 0)
660 break;
661 *pnum = frame_size;
662 *pden = st->codec->sample_rate;
663 break;
664 default:
665 break;
669 static int is_intra_only(AVCodecContext *enc){
670 if(enc->codec_type == CODEC_TYPE_AUDIO){
671 return 1;
672 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
673 switch(enc->codec_id){
674 case CODEC_ID_MJPEG:
675 case CODEC_ID_MJPEGB:
676 case CODEC_ID_LJPEG:
677 case CODEC_ID_RAWVIDEO:
678 case CODEC_ID_DVVIDEO:
679 case CODEC_ID_HUFFYUV:
680 case CODEC_ID_FFVHUFF:
681 case CODEC_ID_ASV1:
682 case CODEC_ID_ASV2:
683 case CODEC_ID_VCR1:
684 case CODEC_ID_DNXHD:
685 case CODEC_ID_JPEG2000:
686 return 1;
687 default: break;
690 return 0;
693 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
694 int64_t dts, int64_t pts)
696 AVStream *st= s->streams[stream_index];
697 AVPacketList *pktl= s->packet_buffer;
699 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
700 return;
702 st->first_dts= dts - st->cur_dts;
703 st->cur_dts= dts;
705 for(; pktl; pktl= pktl->next){
706 if(pktl->pkt.stream_index != stream_index)
707 continue;
708 //FIXME think more about this check
709 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
710 pktl->pkt.pts += st->first_dts;
712 if(pktl->pkt.dts != AV_NOPTS_VALUE)
713 pktl->pkt.dts += st->first_dts;
715 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
716 st->start_time= pktl->pkt.pts;
718 if (st->start_time == AV_NOPTS_VALUE)
719 st->start_time = pts;
722 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
724 AVPacketList *pktl= s->packet_buffer;
725 int64_t cur_dts= 0;
727 if(st->first_dts != AV_NOPTS_VALUE){
728 cur_dts= st->first_dts;
729 for(; pktl; pktl= pktl->next){
730 if(pktl->pkt.stream_index == pkt->stream_index){
731 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
732 break;
733 cur_dts -= pkt->duration;
736 pktl= s->packet_buffer;
737 st->first_dts = cur_dts;
738 }else if(st->cur_dts)
739 return;
741 for(; pktl; pktl= pktl->next){
742 if(pktl->pkt.stream_index != pkt->stream_index)
743 continue;
744 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
745 && !pktl->pkt.duration){
746 pktl->pkt.dts= cur_dts;
747 if(!st->codec->has_b_frames)
748 pktl->pkt.pts= cur_dts;
749 cur_dts += pkt->duration;
750 pktl->pkt.duration= pkt->duration;
751 }else
752 break;
754 if(st->first_dts == AV_NOPTS_VALUE)
755 st->cur_dts= cur_dts;
758 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
759 AVCodecParserContext *pc, AVPacket *pkt)
761 int num, den, presentation_delayed, delay, i;
762 int64_t offset;
764 if (pc && pc->pict_type == FF_B_TYPE)
765 st->codec->has_b_frames = 1;
767 /* do we have a video B-frame ? */
768 delay= st->codec->has_b_frames;
769 presentation_delayed = 0;
770 /* XXX: need has_b_frame, but cannot get it if the codec is
771 not initialized */
772 if (delay &&
773 pc && pc->pict_type != FF_B_TYPE)
774 presentation_delayed = 1;
776 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
777 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
778 pkt->dts -= 1LL<<st->pts_wrap_bits;
781 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
782 // we take the conservative approach and discard both
783 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
784 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
785 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
786 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
789 if (pkt->duration == 0) {
790 compute_frame_duration(&num, &den, st, pc, pkt);
791 if (den && num) {
792 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
794 if(pkt->duration != 0 && s->packet_buffer)
795 update_initial_durations(s, st, pkt);
799 /* correct timestamps with byte offset if demuxers only have timestamps
800 on packet boundaries */
801 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
802 /* this will estimate bitrate based on this frame's duration and size */
803 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
804 if(pkt->pts != AV_NOPTS_VALUE)
805 pkt->pts += offset;
806 if(pkt->dts != AV_NOPTS_VALUE)
807 pkt->dts += offset;
810 if (pc && pc->dts_sync_point >= 0) {
811 // we have synchronization info from the parser
812 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
813 if (den > 0) {
814 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
815 if (pkt->dts != AV_NOPTS_VALUE) {
816 // got DTS from the stream, update reference timestamp
817 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
818 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
819 } else if (st->reference_dts != AV_NOPTS_VALUE) {
820 // compute DTS based on reference timestamp
821 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
822 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
824 if (pc->dts_sync_point > 0)
825 st->reference_dts = pkt->dts; // new reference
829 /* This may be redundant, but it should not hurt. */
830 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
831 presentation_delayed = 1;
833 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
834 /* interpolate PTS and DTS if they are not present */
835 //We skip H264 currently because delay and has_b_frames are not reliably set
836 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
837 if (presentation_delayed) {
838 /* DTS = decompression timestamp */
839 /* PTS = presentation timestamp */
840 if (pkt->dts == AV_NOPTS_VALUE)
841 pkt->dts = st->last_IP_pts;
842 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
843 if (pkt->dts == AV_NOPTS_VALUE)
844 pkt->dts = st->cur_dts;
846 /* this is tricky: the dts must be incremented by the duration
847 of the frame we are displaying, i.e. the last I- or P-frame */
848 if (st->last_IP_duration == 0)
849 st->last_IP_duration = pkt->duration;
850 if(pkt->dts != AV_NOPTS_VALUE)
851 st->cur_dts = pkt->dts + st->last_IP_duration;
852 st->last_IP_duration = pkt->duration;
853 st->last_IP_pts= pkt->pts;
854 /* cannot compute PTS if not present (we can compute it only
855 by knowing the future */
856 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
857 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
858 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
859 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
860 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
861 pkt->pts += pkt->duration;
862 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
866 /* presentation is not delayed : PTS and DTS are the same */
867 if(pkt->pts == AV_NOPTS_VALUE)
868 pkt->pts = pkt->dts;
869 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
870 if(pkt->pts == AV_NOPTS_VALUE)
871 pkt->pts = st->cur_dts;
872 pkt->dts = pkt->pts;
873 if(pkt->pts != AV_NOPTS_VALUE)
874 st->cur_dts = pkt->pts + pkt->duration;
878 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
879 st->pts_buffer[0]= pkt->pts;
880 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
881 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
882 if(pkt->dts == AV_NOPTS_VALUE)
883 pkt->dts= st->pts_buffer[0];
884 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
885 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
887 if(pkt->dts > st->cur_dts)
888 st->cur_dts = pkt->dts;
891 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
893 /* update flags */
894 if(is_intra_only(st->codec))
895 pkt->flags |= PKT_FLAG_KEY;
896 else if (pc) {
897 pkt->flags = 0;
898 /* keyframe computation */
899 if (pc->key_frame == 1)
900 pkt->flags |= PKT_FLAG_KEY;
901 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
902 pkt->flags |= PKT_FLAG_KEY;
904 if (pc)
905 pkt->convergence_duration = pc->convergence_duration;
909 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
911 AVStream *st;
912 int len, ret, i;
914 av_init_packet(pkt);
916 for(;;) {
917 /* select current input stream component */
918 st = s->cur_st;
919 if (st) {
920 if (!st->need_parsing || !st->parser) {
921 /* no parsing needed: we just output the packet as is */
922 /* raw data support */
923 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
924 compute_pkt_fields(s, st, NULL, pkt);
925 s->cur_st = NULL;
926 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
927 (pkt->flags & PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
928 ff_reduce_index(s, st->index);
929 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
931 break;
932 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
933 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
934 st->cur_ptr, st->cur_len,
935 st->cur_pkt.pts, st->cur_pkt.dts,
936 st->cur_pkt.pos);
937 st->cur_pkt.pts = AV_NOPTS_VALUE;
938 st->cur_pkt.dts = AV_NOPTS_VALUE;
939 /* increment read pointer */
940 st->cur_ptr += len;
941 st->cur_len -= len;
943 /* return packet if any */
944 if (pkt->size) {
945 got_packet:
946 pkt->duration = 0;
947 pkt->stream_index = st->index;
948 pkt->pts = st->parser->pts;
949 pkt->dts = st->parser->dts;
950 pkt->pos = st->parser->pos;
951 pkt->destruct = NULL;
952 compute_pkt_fields(s, st, st->parser, pkt);
954 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
955 ff_reduce_index(s, st->index);
956 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
957 0, 0, AVINDEX_KEYFRAME);
960 break;
962 } else {
963 /* free packet */
964 av_free_packet(&st->cur_pkt);
965 s->cur_st = NULL;
967 } else {
968 AVPacket cur_pkt;
969 /* read next packet */
970 ret = av_read_packet(s, &cur_pkt);
971 if (ret < 0) {
972 if (ret == AVERROR(EAGAIN))
973 return ret;
974 /* return the last frames, if any */
975 for(i = 0; i < s->nb_streams; i++) {
976 st = s->streams[i];
977 if (st->parser && st->need_parsing) {
978 av_parser_parse2(st->parser, st->codec,
979 &pkt->data, &pkt->size,
980 NULL, 0,
981 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
982 AV_NOPTS_VALUE);
983 if (pkt->size)
984 goto got_packet;
987 /* no more packets: really terminate parsing */
988 return ret;
990 st = s->streams[cur_pkt.stream_index];
991 st->cur_pkt= cur_pkt;
993 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
994 st->cur_pkt.dts != AV_NOPTS_VALUE &&
995 st->cur_pkt.pts < st->cur_pkt.dts){
996 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
997 st->cur_pkt.stream_index,
998 st->cur_pkt.pts,
999 st->cur_pkt.dts,
1000 st->cur_pkt.size);
1001 // av_free_packet(&st->cur_pkt);
1002 // return -1;
1005 if(s->debug & FF_FDEBUG_TS)
1006 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1007 st->cur_pkt.stream_index,
1008 st->cur_pkt.pts,
1009 st->cur_pkt.dts,
1010 st->cur_pkt.size,
1011 st->cur_pkt.flags);
1013 s->cur_st = st;
1014 st->cur_ptr = st->cur_pkt.data;
1015 st->cur_len = st->cur_pkt.size;
1016 if (st->need_parsing && !st->parser) {
1017 st->parser = av_parser_init(st->codec->codec_id);
1018 if (!st->parser) {
1019 /* no parser available: just output the raw packets */
1020 st->need_parsing = AVSTREAM_PARSE_NONE;
1021 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1022 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1024 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1025 st->parser->next_frame_offset=
1026 st->parser->cur_offset= st->cur_pkt.pos;
1031 if(s->debug & FF_FDEBUG_TS)
1032 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1033 pkt->stream_index,
1034 pkt->pts,
1035 pkt->dts,
1036 pkt->size,
1037 pkt->flags);
1039 return 0;
1042 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1044 AVPacketList *pktl;
1045 int eof=0;
1046 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1048 for(;;){
1049 pktl = s->packet_buffer;
1050 if (pktl) {
1051 AVPacket *next_pkt= &pktl->pkt;
1053 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1054 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1055 if( pktl->pkt.stream_index == next_pkt->stream_index
1056 && next_pkt->dts < pktl->pkt.dts
1057 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1058 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1059 next_pkt->pts= pktl->pkt.dts;
1061 pktl= pktl->next;
1063 pktl = s->packet_buffer;
1066 if( next_pkt->pts != AV_NOPTS_VALUE
1067 || next_pkt->dts == AV_NOPTS_VALUE
1068 || !genpts || eof){
1069 /* read packet from packet buffer, if there is data */
1070 *pkt = *next_pkt;
1071 s->packet_buffer = pktl->next;
1072 av_free(pktl);
1073 return 0;
1076 if(genpts){
1077 int ret= av_read_frame_internal(s, pkt);
1078 if(ret<0){
1079 if(pktl && ret != AVERROR(EAGAIN)){
1080 eof=1;
1081 continue;
1082 }else
1083 return ret;
1086 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1087 &s->packet_buffer_end)) < 0)
1088 return AVERROR(ENOMEM);
1089 }else{
1090 assert(!s->packet_buffer);
1091 return av_read_frame_internal(s, pkt);
1096 /* XXX: suppress the packet queue */
1097 static void flush_packet_queue(AVFormatContext *s)
1099 AVPacketList *pktl;
1101 for(;;) {
1102 pktl = s->packet_buffer;
1103 if (!pktl)
1104 break;
1105 s->packet_buffer = pktl->next;
1106 av_free_packet(&pktl->pkt);
1107 av_free(pktl);
1109 while(s->raw_packet_buffer){
1110 pktl = s->raw_packet_buffer;
1111 s->raw_packet_buffer = pktl->next;
1112 av_free_packet(&pktl->pkt);
1113 av_free(pktl);
1115 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1118 /*******************************************************/
1119 /* seek support */
1121 int av_find_default_stream_index(AVFormatContext *s)
1123 int first_audio_index = -1;
1124 int i;
1125 AVStream *st;
1127 if (s->nb_streams <= 0)
1128 return -1;
1129 for(i = 0; i < s->nb_streams; i++) {
1130 st = s->streams[i];
1131 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1132 return i;
1134 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1135 first_audio_index = i;
1137 return first_audio_index >= 0 ? first_audio_index : 0;
1141 * Flush the frame reader.
1143 void av_read_frame_flush(AVFormatContext *s)
1145 AVStream *st;
1146 int i;
1148 flush_packet_queue(s);
1150 s->cur_st = NULL;
1152 /* for each stream, reset read state */
1153 for(i = 0; i < s->nb_streams; i++) {
1154 st = s->streams[i];
1156 if (st->parser) {
1157 av_parser_close(st->parser);
1158 st->parser = NULL;
1159 av_free_packet(&st->cur_pkt);
1161 st->last_IP_pts = AV_NOPTS_VALUE;
1162 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1163 st->reference_dts = AV_NOPTS_VALUE;
1164 /* fail safe */
1165 st->cur_ptr = NULL;
1166 st->cur_len = 0;
1168 st->probe_packets = MAX_PROBE_PACKETS;
1172 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1173 int i;
1175 for(i = 0; i < s->nb_streams; i++) {
1176 AVStream *st = s->streams[i];
1178 st->cur_dts = av_rescale(timestamp,
1179 st->time_base.den * (int64_t)ref_st->time_base.num,
1180 st->time_base.num * (int64_t)ref_st->time_base.den);
1184 void ff_reduce_index(AVFormatContext *s, int stream_index)
1186 AVStream *st= s->streams[stream_index];
1187 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1189 if((unsigned)st->nb_index_entries >= max_entries){
1190 int i;
1191 for(i=0; 2*i<st->nb_index_entries; i++)
1192 st->index_entries[i]= st->index_entries[2*i];
1193 st->nb_index_entries= i;
1197 int av_add_index_entry(AVStream *st,
1198 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1200 AVIndexEntry *entries, *ie;
1201 int index;
1203 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1204 return -1;
1206 entries = av_fast_realloc(st->index_entries,
1207 &st->index_entries_allocated_size,
1208 (st->nb_index_entries + 1) *
1209 sizeof(AVIndexEntry));
1210 if(!entries)
1211 return -1;
1213 st->index_entries= entries;
1215 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1217 if(index<0){
1218 index= st->nb_index_entries++;
1219 ie= &entries[index];
1220 assert(index==0 || ie[-1].timestamp < timestamp);
1221 }else{
1222 ie= &entries[index];
1223 if(ie->timestamp != timestamp){
1224 if(ie->timestamp <= timestamp)
1225 return -1;
1226 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1227 st->nb_index_entries++;
1228 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1229 distance= ie->min_distance;
1232 ie->pos = pos;
1233 ie->timestamp = timestamp;
1234 ie->min_distance= distance;
1235 ie->size= size;
1236 ie->flags = flags;
1238 return index;
1241 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1242 int flags)
1244 AVIndexEntry *entries= st->index_entries;
1245 int nb_entries= st->nb_index_entries;
1246 int a, b, m;
1247 int64_t timestamp;
1249 a = - 1;
1250 b = nb_entries;
1252 while (b - a > 1) {
1253 m = (a + b) >> 1;
1254 timestamp = entries[m].timestamp;
1255 if(timestamp >= wanted_timestamp)
1256 b = m;
1257 if(timestamp <= wanted_timestamp)
1258 a = m;
1260 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1262 if(!(flags & AVSEEK_FLAG_ANY)){
1263 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1264 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1268 if(m == nb_entries)
1269 return -1;
1270 return m;
1273 #define DEBUG_SEEK
1275 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1276 AVInputFormat *avif= s->iformat;
1277 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1278 int64_t ts_min, ts_max, ts;
1279 int index;
1280 AVStream *st;
1282 if (stream_index < 0)
1283 return -1;
1285 #ifdef DEBUG_SEEK
1286 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1287 #endif
1289 ts_max=
1290 ts_min= AV_NOPTS_VALUE;
1291 pos_limit= -1; //gcc falsely says it may be uninitialized
1293 st= s->streams[stream_index];
1294 if(st->index_entries){
1295 AVIndexEntry *e;
1297 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1298 index= FFMAX(index, 0);
1299 e= &st->index_entries[index];
1301 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1302 pos_min= e->pos;
1303 ts_min= e->timestamp;
1304 #ifdef DEBUG_SEEK
1305 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1306 pos_min,ts_min);
1307 #endif
1308 }else{
1309 assert(index==0);
1312 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1313 assert(index < st->nb_index_entries);
1314 if(index >= 0){
1315 e= &st->index_entries[index];
1316 assert(e->timestamp >= target_ts);
1317 pos_max= e->pos;
1318 ts_max= e->timestamp;
1319 pos_limit= pos_max - e->min_distance;
1320 #ifdef DEBUG_SEEK
1321 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1322 pos_max,pos_limit, ts_max);
1323 #endif
1327 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1328 if(pos<0)
1329 return -1;
1331 /* do the seek */
1332 url_fseek(s->pb, pos, SEEK_SET);
1334 av_update_cur_dts(s, st, ts);
1336 return 0;
1339 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1340 int64_t pos, ts;
1341 int64_t start_pos, filesize;
1342 int no_change;
1344 #ifdef DEBUG_SEEK
1345 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1346 #endif
1348 if(ts_min == AV_NOPTS_VALUE){
1349 pos_min = s->data_offset;
1350 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1351 if (ts_min == AV_NOPTS_VALUE)
1352 return -1;
1355 if(ts_max == AV_NOPTS_VALUE){
1356 int step= 1024;
1357 filesize = url_fsize(s->pb);
1358 pos_max = filesize - 1;
1360 pos_max -= step;
1361 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1362 step += step;
1363 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1364 if (ts_max == AV_NOPTS_VALUE)
1365 return -1;
1367 for(;;){
1368 int64_t tmp_pos= pos_max + 1;
1369 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1370 if(tmp_ts == AV_NOPTS_VALUE)
1371 break;
1372 ts_max= tmp_ts;
1373 pos_max= tmp_pos;
1374 if(tmp_pos >= filesize)
1375 break;
1377 pos_limit= pos_max;
1380 if(ts_min > ts_max){
1381 return -1;
1382 }else if(ts_min == ts_max){
1383 pos_limit= pos_min;
1386 no_change=0;
1387 while (pos_min < pos_limit) {
1388 #ifdef DEBUG_SEEK
1389 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1390 pos_min, pos_max,
1391 ts_min, ts_max);
1392 #endif
1393 assert(pos_limit <= pos_max);
1395 if(no_change==0){
1396 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1397 // interpolate position (better than dichotomy)
1398 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1399 + pos_min - approximate_keyframe_distance;
1400 }else if(no_change==1){
1401 // bisection, if interpolation failed to change min or max pos last time
1402 pos = (pos_min + pos_limit)>>1;
1403 }else{
1404 /* linear search if bisection failed, can only happen if there
1405 are very few or no keyframes between min/max */
1406 pos=pos_min;
1408 if(pos <= pos_min)
1409 pos= pos_min + 1;
1410 else if(pos > pos_limit)
1411 pos= pos_limit;
1412 start_pos= pos;
1414 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1415 if(pos == pos_max)
1416 no_change++;
1417 else
1418 no_change=0;
1419 #ifdef DEBUG_SEEK
1420 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1421 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1422 start_pos, no_change);
1423 #endif
1424 if(ts == AV_NOPTS_VALUE){
1425 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1426 return -1;
1428 assert(ts != AV_NOPTS_VALUE);
1429 if (target_ts <= ts) {
1430 pos_limit = start_pos - 1;
1431 pos_max = pos;
1432 ts_max = ts;
1434 if (target_ts >= ts) {
1435 pos_min = pos;
1436 ts_min = ts;
1440 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1441 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1442 #ifdef DEBUG_SEEK
1443 pos_min = pos;
1444 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1445 pos_min++;
1446 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1447 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1448 pos, ts_min, target_ts, ts_max);
1449 #endif
1450 *ts_ret= ts;
1451 return pos;
1454 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1455 int64_t pos_min, pos_max;
1456 #if 0
1457 AVStream *st;
1459 if (stream_index < 0)
1460 return -1;
1462 st= s->streams[stream_index];
1463 #endif
1465 pos_min = s->data_offset;
1466 pos_max = url_fsize(s->pb) - 1;
1468 if (pos < pos_min) pos= pos_min;
1469 else if(pos > pos_max) pos= pos_max;
1471 url_fseek(s->pb, pos, SEEK_SET);
1473 #if 0
1474 av_update_cur_dts(s, st, ts);
1475 #endif
1476 return 0;
1479 static int av_seek_frame_generic(AVFormatContext *s,
1480 int stream_index, int64_t timestamp, int flags)
1482 int index, ret;
1483 AVStream *st;
1484 AVIndexEntry *ie;
1486 st = s->streams[stream_index];
1488 index = av_index_search_timestamp(st, timestamp, flags);
1490 if(index < 0 || index==st->nb_index_entries-1){
1491 int i;
1492 AVPacket pkt;
1494 if(st->nb_index_entries){
1495 assert(st->index_entries);
1496 ie= &st->index_entries[st->nb_index_entries-1];
1497 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1498 return ret;
1499 av_update_cur_dts(s, st, ie->timestamp);
1500 }else{
1501 if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
1502 return ret;
1504 for(i=0;; i++) {
1505 int ret;
1507 ret = av_read_frame(s, &pkt);
1508 }while(ret == AVERROR(EAGAIN));
1509 if(ret<0)
1510 break;
1511 av_free_packet(&pkt);
1512 if(stream_index == pkt.stream_index){
1513 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1514 break;
1517 index = av_index_search_timestamp(st, timestamp, flags);
1519 if (index < 0)
1520 return -1;
1522 av_read_frame_flush(s);
1523 if (s->iformat->read_seek){
1524 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1525 return 0;
1527 ie = &st->index_entries[index];
1528 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1529 return ret;
1530 av_update_cur_dts(s, st, ie->timestamp);
1532 return 0;
1535 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1537 int ret;
1538 AVStream *st;
1540 av_read_frame_flush(s);
1542 if(flags & AVSEEK_FLAG_BYTE)
1543 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1545 if(stream_index < 0){
1546 stream_index= av_find_default_stream_index(s);
1547 if(stream_index < 0)
1548 return -1;
1550 st= s->streams[stream_index];
1551 /* timestamp for default must be expressed in AV_TIME_BASE units */
1552 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1555 /* first, we try the format specific seek */
1556 if (s->iformat->read_seek)
1557 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1558 else
1559 ret = -1;
1560 if (ret >= 0) {
1561 return 0;
1564 if(s->iformat->read_timestamp)
1565 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1566 else
1567 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1570 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1572 if(min_ts > ts || max_ts < ts)
1573 return -1;
1575 av_read_frame_flush(s);
1577 if (s->iformat->read_seek2)
1578 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1580 if(s->iformat->read_timestamp){
1581 //try to seek via read_timestamp()
1584 //Fallback to old API if new is not implemented but old is
1585 //Note the old has somewat different sematics
1586 if(s->iformat->read_seek || 1)
1587 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1589 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1592 /*******************************************************/
1595 * Returns TRUE if the stream has accurate duration in any stream.
1597 * @return TRUE if the stream has accurate duration for at least one component.
1599 static int av_has_duration(AVFormatContext *ic)
1601 int i;
1602 AVStream *st;
1604 for(i = 0;i < ic->nb_streams; i++) {
1605 st = ic->streams[i];
1606 if (st->duration != AV_NOPTS_VALUE)
1607 return 1;
1609 return 0;
1613 * Estimate the stream timings from the one of each components.
1615 * Also computes the global bitrate if possible.
1617 static void av_update_stream_timings(AVFormatContext *ic)
1619 int64_t start_time, start_time1, end_time, end_time1;
1620 int64_t duration, duration1;
1621 int i;
1622 AVStream *st;
1624 start_time = INT64_MAX;
1625 end_time = INT64_MIN;
1626 duration = INT64_MIN;
1627 for(i = 0;i < ic->nb_streams; i++) {
1628 st = ic->streams[i];
1629 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1630 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1631 if (start_time1 < start_time)
1632 start_time = start_time1;
1633 if (st->duration != AV_NOPTS_VALUE) {
1634 end_time1 = start_time1
1635 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1636 if (end_time1 > end_time)
1637 end_time = end_time1;
1640 if (st->duration != AV_NOPTS_VALUE) {
1641 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1642 if (duration1 > duration)
1643 duration = duration1;
1646 if (start_time != INT64_MAX) {
1647 ic->start_time = start_time;
1648 if (end_time != INT64_MIN) {
1649 if (end_time - start_time > duration)
1650 duration = end_time - start_time;
1653 if (duration != INT64_MIN) {
1654 ic->duration = duration;
1655 if (ic->file_size > 0) {
1656 /* compute the bitrate */
1657 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1658 (double)ic->duration;
1663 static void fill_all_stream_timings(AVFormatContext *ic)
1665 int i;
1666 AVStream *st;
1668 av_update_stream_timings(ic);
1669 for(i = 0;i < ic->nb_streams; i++) {
1670 st = ic->streams[i];
1671 if (st->start_time == AV_NOPTS_VALUE) {
1672 if(ic->start_time != AV_NOPTS_VALUE)
1673 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1674 if(ic->duration != AV_NOPTS_VALUE)
1675 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1680 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1682 int64_t filesize, duration;
1683 int bit_rate, i;
1684 AVStream *st;
1686 /* if bit_rate is already set, we believe it */
1687 if (ic->bit_rate == 0) {
1688 bit_rate = 0;
1689 for(i=0;i<ic->nb_streams;i++) {
1690 st = ic->streams[i];
1691 bit_rate += st->codec->bit_rate;
1693 ic->bit_rate = bit_rate;
1696 /* if duration is already set, we believe it */
1697 if (ic->duration == AV_NOPTS_VALUE &&
1698 ic->bit_rate != 0 &&
1699 ic->file_size != 0) {
1700 filesize = ic->file_size;
1701 if (filesize > 0) {
1702 for(i = 0; i < ic->nb_streams; i++) {
1703 st = ic->streams[i];
1704 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1705 if (st->duration == AV_NOPTS_VALUE)
1706 st->duration = duration;
1712 #define DURATION_MAX_READ_SIZE 250000
1714 /* only usable for MPEG-PS streams */
1715 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1717 AVPacket pkt1, *pkt = &pkt1;
1718 AVStream *st;
1719 int read_size, i, ret;
1720 int64_t end_time;
1721 int64_t filesize, offset, duration;
1723 ic->cur_st = NULL;
1725 /* flush packet queue */
1726 flush_packet_queue(ic);
1728 for(i=0;i<ic->nb_streams;i++) {
1729 st = ic->streams[i];
1730 if (st->parser) {
1731 av_parser_close(st->parser);
1732 st->parser= NULL;
1733 av_free_packet(&st->cur_pkt);
1737 /* we read the first packets to get the first PTS (not fully
1738 accurate, but it is enough now) */
1739 url_fseek(ic->pb, 0, SEEK_SET);
1740 read_size = 0;
1741 for(;;) {
1742 if (read_size >= DURATION_MAX_READ_SIZE)
1743 break;
1744 /* if all info is available, we can stop */
1745 for(i = 0;i < ic->nb_streams; i++) {
1746 st = ic->streams[i];
1747 if (st->start_time == AV_NOPTS_VALUE)
1748 break;
1750 if (i == ic->nb_streams)
1751 break;
1754 ret = av_read_packet(ic, pkt);
1755 }while(ret == AVERROR(EAGAIN));
1756 if (ret != 0)
1757 break;
1758 read_size += pkt->size;
1759 st = ic->streams[pkt->stream_index];
1760 if (pkt->pts != AV_NOPTS_VALUE) {
1761 if (st->start_time == AV_NOPTS_VALUE)
1762 st->start_time = pkt->pts;
1764 av_free_packet(pkt);
1767 /* estimate the end time (duration) */
1768 /* XXX: may need to support wrapping */
1769 filesize = ic->file_size;
1770 offset = filesize - DURATION_MAX_READ_SIZE;
1771 if (offset < 0)
1772 offset = 0;
1774 url_fseek(ic->pb, offset, SEEK_SET);
1775 read_size = 0;
1776 for(;;) {
1777 if (read_size >= DURATION_MAX_READ_SIZE)
1778 break;
1781 ret = av_read_packet(ic, pkt);
1782 }while(ret == AVERROR(EAGAIN));
1783 if (ret != 0)
1784 break;
1785 read_size += pkt->size;
1786 st = ic->streams[pkt->stream_index];
1787 if (pkt->pts != AV_NOPTS_VALUE &&
1788 st->start_time != AV_NOPTS_VALUE) {
1789 end_time = pkt->pts;
1790 duration = end_time - st->start_time;
1791 if (duration > 0) {
1792 if (st->duration == AV_NOPTS_VALUE ||
1793 st->duration < duration)
1794 st->duration = duration;
1797 av_free_packet(pkt);
1800 fill_all_stream_timings(ic);
1802 url_fseek(ic->pb, old_offset, SEEK_SET);
1803 for(i=0; i<ic->nb_streams; i++){
1804 st= ic->streams[i];
1805 st->cur_dts= st->first_dts;
1806 st->last_IP_pts = AV_NOPTS_VALUE;
1810 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1812 int64_t file_size;
1814 /* get the file size, if possible */
1815 if (ic->iformat->flags & AVFMT_NOFILE) {
1816 file_size = 0;
1817 } else {
1818 file_size = url_fsize(ic->pb);
1819 if (file_size < 0)
1820 file_size = 0;
1822 ic->file_size = file_size;
1824 if ((!strcmp(ic->iformat->name, "mpeg") ||
1825 !strcmp(ic->iformat->name, "mpegts")) &&
1826 file_size && !url_is_streamed(ic->pb)) {
1827 /* get accurate estimate from the PTSes */
1828 av_estimate_timings_from_pts(ic, old_offset);
1829 } else if (av_has_duration(ic)) {
1830 /* at least one component has timings - we use them for all
1831 the components */
1832 fill_all_stream_timings(ic);
1833 } else {
1834 /* less precise: use bitrate info */
1835 av_estimate_timings_from_bit_rate(ic);
1837 av_update_stream_timings(ic);
1839 #if 0
1841 int i;
1842 AVStream *st;
1843 for(i = 0;i < ic->nb_streams; i++) {
1844 st = ic->streams[i];
1845 printf("%d: start_time: %0.3f duration: %0.3f\n",
1846 i, (double)st->start_time / AV_TIME_BASE,
1847 (double)st->duration / AV_TIME_BASE);
1849 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1850 (double)ic->start_time / AV_TIME_BASE,
1851 (double)ic->duration / AV_TIME_BASE,
1852 ic->bit_rate / 1000);
1854 #endif
1857 static int has_codec_parameters(AVCodecContext *enc)
1859 int val;
1860 switch(enc->codec_type) {
1861 case CODEC_TYPE_AUDIO:
1862 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1863 if(!enc->frame_size &&
1864 (enc->codec_id == CODEC_ID_VORBIS ||
1865 enc->codec_id == CODEC_ID_AAC ||
1866 enc->codec_id == CODEC_ID_SPEEX))
1867 return 0;
1868 break;
1869 case CODEC_TYPE_VIDEO:
1870 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1871 break;
1872 default:
1873 val = 1;
1874 break;
1876 return enc->codec_id != CODEC_ID_NONE && val != 0;
1879 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
1881 int16_t *samples;
1882 AVCodec *codec;
1883 int got_picture, data_size, ret=0;
1884 AVFrame picture;
1886 if(!st->codec->codec){
1887 codec = avcodec_find_decoder(st->codec->codec_id);
1888 if (!codec)
1889 return -1;
1890 ret = avcodec_open(st->codec, codec);
1891 if (ret < 0)
1892 return ret;
1895 if(!has_codec_parameters(st->codec)){
1896 switch(st->codec->codec_type) {
1897 case CODEC_TYPE_VIDEO:
1898 avcodec_get_frame_defaults(&picture);
1899 ret = avcodec_decode_video2(st->codec, &picture,
1900 &got_picture, avpkt);
1901 break;
1902 case CODEC_TYPE_AUDIO:
1903 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1904 samples = av_malloc(data_size);
1905 if (!samples)
1906 goto fail;
1907 ret = avcodec_decode_audio3(st->codec, samples,
1908 &data_size, avpkt);
1909 av_free(samples);
1910 break;
1911 default:
1912 break;
1915 fail:
1916 return ret;
1919 unsigned int ff_codec_get_tag(const AVCodecTag *tags, int id)
1921 while (tags->id != CODEC_ID_NONE) {
1922 if (tags->id == id)
1923 return tags->tag;
1924 tags++;
1926 return 0;
1929 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1931 int i;
1932 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1933 if(tag == tags[i].tag)
1934 return tags[i].id;
1936 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1937 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1938 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1939 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1940 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1941 return tags[i].id;
1943 return CODEC_ID_NONE;
1946 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1948 int i;
1949 for(i=0; tags && tags[i]; i++){
1950 int tag= ff_codec_get_tag(tags[i], id);
1951 if(tag) return tag;
1953 return 0;
1956 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1958 int i;
1959 for(i=0; tags && tags[i]; i++){
1960 enum CodecID id= ff_codec_get_id(tags[i], tag);
1961 if(id!=CODEC_ID_NONE) return id;
1963 return CODEC_ID_NONE;
1966 static void compute_chapters_end(AVFormatContext *s)
1968 unsigned int i;
1970 for (i=0; i+1<s->nb_chapters; i++)
1971 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1972 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1973 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1974 s->chapters[i]->end = s->chapters[i+1]->start;
1977 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1978 assert(s->start_time != AV_NOPTS_VALUE);
1979 assert(s->duration > 0);
1980 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1981 AV_TIME_BASE_Q,
1982 s->chapters[i]->time_base);
1986 #define MAX_STD_TIMEBASES (60*12+5)
1987 static int get_std_framerate(int i){
1988 if(i<60*12) return i*1001;
1989 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1993 * Is the time base unreliable.
1994 * This is a heuristic to balance between quick acceptance of the values in
1995 * the headers vs. some extra checks.
1996 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1997 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1998 * And there are "variable" fps files this needs to detect as well.
2000 static int tb_unreliable(AVCodecContext *c){
2001 if( c->time_base.den >= 101L*c->time_base.num
2002 || c->time_base.den < 5L*c->time_base.num
2003 /* || c->codec_tag == AV_RL32("DIVX")
2004 || c->codec_tag == AV_RL32("XVID")*/
2005 || c->codec_id == CODEC_ID_MPEG2VIDEO
2006 || c->codec_id == CODEC_ID_H264
2008 return 1;
2009 return 0;
2012 int av_find_stream_info(AVFormatContext *ic)
2014 int i, count, ret, read_size, j;
2015 AVStream *st;
2016 AVPacket pkt1, *pkt;
2017 int64_t last_dts[MAX_STREAMS];
2018 int64_t duration_gcd[MAX_STREAMS]={0};
2019 int duration_count[MAX_STREAMS]={0};
2020 double (*duration_error)[MAX_STD_TIMEBASES];
2021 int64_t old_offset = url_ftell(ic->pb);
2022 int64_t codec_info_duration[MAX_STREAMS]={0};
2023 int codec_info_nb_frames[MAX_STREAMS]={0};
2025 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2026 if (!duration_error) return AVERROR(ENOMEM);
2028 for(i=0;i<ic->nb_streams;i++) {
2029 st = ic->streams[i];
2030 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2031 /* if(!st->time_base.num)
2032 st->time_base= */
2033 if(!st->codec->time_base.num)
2034 st->codec->time_base= st->time_base;
2036 //only for the split stuff
2037 if (!st->parser) {
2038 st->parser = av_parser_init(st->codec->codec_id);
2039 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2040 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2045 for(i=0;i<MAX_STREAMS;i++){
2046 last_dts[i]= AV_NOPTS_VALUE;
2049 count = 0;
2050 read_size = 0;
2051 for(;;) {
2052 if(url_interrupt_cb()){
2053 ret= AVERROR(EINTR);
2054 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2055 break;
2058 /* check if one codec still needs to be handled */
2059 for(i=0;i<ic->nb_streams;i++) {
2060 st = ic->streams[i];
2061 if (!has_codec_parameters(st->codec))
2062 break;
2063 /* variable fps and no guess at the real fps */
2064 if( tb_unreliable(st->codec)
2065 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2066 break;
2067 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2068 break;
2069 if(st->first_dts == AV_NOPTS_VALUE)
2070 break;
2072 if (i == ic->nb_streams) {
2073 /* NOTE: if the format has no header, then we need to read
2074 some packets to get most of the streams, so we cannot
2075 stop here */
2076 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2077 /* if we found the info for all the codecs, we can stop */
2078 ret = count;
2079 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2080 break;
2083 /* we did not get all the codec info, but we read too much data */
2084 if (read_size >= ic->probesize) {
2085 ret = count;
2086 av_log(ic, AV_LOG_DEBUG, "MAX_READ_SIZE:%d reached\n", ic->probesize);
2087 break;
2090 /* NOTE: a new stream can be added there if no header in file
2091 (AVFMTCTX_NOHEADER) */
2092 ret = av_read_frame_internal(ic, &pkt1);
2093 if(ret == AVERROR(EAGAIN))
2094 continue;
2095 if (ret < 0) {
2096 /* EOF or error */
2097 ret = -1; /* we could not have all the codec parameters before EOF */
2098 for(i=0;i<ic->nb_streams;i++) {
2099 st = ic->streams[i];
2100 if (!has_codec_parameters(st->codec)){
2101 char buf[256];
2102 avcodec_string(buf, sizeof(buf), st->codec, 0);
2103 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2104 } else {
2105 ret = 0;
2108 break;
2111 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2112 if(av_dup_packet(pkt) < 0) {
2113 av_free(duration_error);
2114 return AVERROR(ENOMEM);
2117 read_size += pkt->size;
2119 st = ic->streams[pkt->stream_index];
2120 if(codec_info_nb_frames[st->index]>1) {
2121 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration){
2122 av_log(ic, AV_LOG_DEBUG, "max_analyze_duration reached\n");
2123 break;
2125 codec_info_duration[st->index] += pkt->duration;
2127 if (pkt->duration != 0)
2128 codec_info_nb_frames[st->index]++;
2131 int index= pkt->stream_index;
2132 int64_t last= last_dts[index];
2133 int64_t duration= pkt->dts - last;
2135 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2136 double dur= duration * av_q2d(st->time_base);
2138 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2139 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2140 if(duration_count[index] < 2)
2141 memset(duration_error[index], 0, sizeof(*duration_error));
2142 for(i=1; i<MAX_STD_TIMEBASES; i++){
2143 int framerate= get_std_framerate(i);
2144 int ticks= lrintf(dur*framerate/(1001*12));
2145 double error= dur - ticks*1001*12/(double)framerate;
2146 duration_error[index][i] += error*error;
2148 duration_count[index]++;
2149 // ignore the first 4 values, they might have some random jitter
2150 if (duration_count[index] > 3)
2151 duration_gcd[index] = av_gcd(duration_gcd[index], duration);
2153 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2154 last_dts[pkt->stream_index]= pkt->dts;
2156 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2157 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2158 if(i){
2159 st->codec->extradata_size= i;
2160 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2161 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2162 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2166 /* if still no information, we try to open the codec and to
2167 decompress the frame. We try to avoid that in most cases as
2168 it takes longer and uses more memory. For MPEG-4, we need to
2169 decompress for QuickTime. */
2170 if (!has_codec_parameters(st->codec) /*&&
2171 (st->codec->codec_id == CODEC_ID_FLV1 ||
2172 st->codec->codec_id == CODEC_ID_H264 ||
2173 st->codec->codec_id == CODEC_ID_H263 ||
2174 st->codec->codec_id == CODEC_ID_H261 ||
2175 st->codec->codec_id == CODEC_ID_VORBIS ||
2176 st->codec->codec_id == CODEC_ID_MJPEG ||
2177 st->codec->codec_id == CODEC_ID_PNG ||
2178 st->codec->codec_id == CODEC_ID_PAM ||
2179 st->codec->codec_id == CODEC_ID_PGM ||
2180 st->codec->codec_id == CODEC_ID_PGMYUV ||
2181 st->codec->codec_id == CODEC_ID_PBM ||
2182 st->codec->codec_id == CODEC_ID_PPM ||
2183 st->codec->codec_id == CODEC_ID_SHORTEN ||
2184 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2185 try_decode_frame(st, pkt);
2187 count++;
2190 // close codecs which were opened in try_decode_frame()
2191 for(i=0;i<ic->nb_streams;i++) {
2192 st = ic->streams[i];
2193 if(st->codec->codec)
2194 avcodec_close(st->codec);
2196 for(i=0;i<ic->nb_streams;i++) {
2197 st = ic->streams[i];
2198 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2199 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2200 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2202 // the check for tb_unreliable() is not completely correct, since this is not about handling
2203 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2204 // ipmovie.c produces.
2205 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1)
2206 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
2207 if(duration_count[i]
2208 && tb_unreliable(st->codec) /*&&
2209 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2210 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2211 int num = 0;
2212 double best_error= 2*av_q2d(st->time_base);
2213 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2215 for(j=1; j<MAX_STD_TIMEBASES; j++){
2216 double error= duration_error[i][j] * get_std_framerate(j);
2217 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2218 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2219 if(error < best_error){
2220 best_error= error;
2221 num = get_std_framerate(j);
2224 // do not increase frame rate by more than 1 % in order to match a standard rate.
2225 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2226 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2229 if (!st->r_frame_rate.num){
2230 if( st->codec->time_base.den * (int64_t)st->time_base.num
2231 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2232 st->r_frame_rate.num = st->codec->time_base.den;
2233 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2234 }else{
2235 st->r_frame_rate.num = st->time_base.den;
2236 st->r_frame_rate.den = st->time_base.num;
2239 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2240 if(!st->codec->bits_per_coded_sample)
2241 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2245 av_estimate_timings(ic, old_offset);
2247 compute_chapters_end(ic);
2249 #if 0
2250 /* correct DTS for B-frame streams with no timestamps */
2251 for(i=0;i<ic->nb_streams;i++) {
2252 st = ic->streams[i];
2253 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2254 if(b-frames){
2255 ppktl = &ic->packet_buffer;
2256 while(ppkt1){
2257 if(ppkt1->stream_index != i)
2258 continue;
2259 if(ppkt1->pkt->dts < 0)
2260 break;
2261 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2262 break;
2263 ppkt1->pkt->dts -= delta;
2264 ppkt1= ppkt1->next;
2266 if(ppkt1)
2267 continue;
2268 st->cur_dts -= delta;
2272 #endif
2274 av_free(duration_error);
2276 return ret;
2279 /*******************************************************/
2281 int av_read_play(AVFormatContext *s)
2283 if (s->iformat->read_play)
2284 return s->iformat->read_play(s);
2285 if (s->pb)
2286 return av_url_read_fpause(s->pb, 0);
2287 return AVERROR(ENOSYS);
2290 int av_read_pause(AVFormatContext *s)
2292 if (s->iformat->read_pause)
2293 return s->iformat->read_pause(s);
2294 if (s->pb)
2295 return av_url_read_fpause(s->pb, 1);
2296 return AVERROR(ENOSYS);
2299 void av_close_input_stream(AVFormatContext *s)
2301 int i;
2302 AVStream *st;
2304 if (s->iformat->read_close)
2305 s->iformat->read_close(s);
2306 for(i=0;i<s->nb_streams;i++) {
2307 /* free all data in a stream component */
2308 st = s->streams[i];
2309 if (st->parser) {
2310 av_parser_close(st->parser);
2311 av_free_packet(&st->cur_pkt);
2313 av_metadata_free(&st->metadata);
2314 av_free(st->index_entries);
2315 av_free(st->codec->extradata);
2316 av_free(st->codec);
2317 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2318 av_free(st->filename);
2319 #endif
2320 av_free(st->priv_data);
2321 av_free(st);
2323 for(i=s->nb_programs-1; i>=0; i--) {
2324 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2325 av_freep(&s->programs[i]->provider_name);
2326 av_freep(&s->programs[i]->name);
2327 #endif
2328 av_metadata_free(&s->programs[i]->metadata);
2329 av_freep(&s->programs[i]->stream_index);
2330 av_freep(&s->programs[i]);
2332 av_freep(&s->programs);
2333 flush_packet_queue(s);
2334 av_freep(&s->priv_data);
2335 while(s->nb_chapters--) {
2336 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2337 av_free(s->chapters[s->nb_chapters]->title);
2338 #endif
2339 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2340 av_free(s->chapters[s->nb_chapters]);
2342 av_freep(&s->chapters);
2343 av_metadata_free(&s->metadata);
2344 av_free(s);
2347 void av_close_input_file(AVFormatContext *s)
2349 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2350 av_close_input_stream(s);
2351 if (pb)
2352 url_fclose(pb);
2355 AVStream *av_new_stream(AVFormatContext *s, int id)
2357 AVStream *st;
2358 int i;
2360 if (s->nb_streams >= MAX_STREAMS)
2361 return NULL;
2363 st = av_mallocz(sizeof(AVStream));
2364 if (!st)
2365 return NULL;
2367 st->codec= avcodec_alloc_context();
2368 if (s->iformat) {
2369 /* no default bitrate if decoding */
2370 st->codec->bit_rate = 0;
2372 st->index = s->nb_streams;
2373 st->id = id;
2374 st->start_time = AV_NOPTS_VALUE;
2375 st->duration = AV_NOPTS_VALUE;
2376 /* we set the current DTS to 0 so that formats without any timestamps
2377 but durations get some timestamps, formats with some unknown
2378 timestamps have their first few packets buffered and the
2379 timestamps corrected before they are returned to the user */
2380 st->cur_dts = 0;
2381 st->first_dts = AV_NOPTS_VALUE;
2382 st->probe_packets = MAX_PROBE_PACKETS;
2384 /* default pts setting is MPEG-like */
2385 av_set_pts_info(st, 33, 1, 90000);
2386 st->last_IP_pts = AV_NOPTS_VALUE;
2387 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2388 st->pts_buffer[i]= AV_NOPTS_VALUE;
2389 st->reference_dts = AV_NOPTS_VALUE;
2391 st->sample_aspect_ratio = (AVRational){0,1};
2393 s->streams[s->nb_streams++] = st;
2394 return st;
2397 AVProgram *av_new_program(AVFormatContext *ac, int id)
2399 AVProgram *program=NULL;
2400 int i;
2402 #ifdef DEBUG_SI
2403 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2404 #endif
2406 for(i=0; i<ac->nb_programs; i++)
2407 if(ac->programs[i]->id == id)
2408 program = ac->programs[i];
2410 if(!program){
2411 program = av_mallocz(sizeof(AVProgram));
2412 if (!program)
2413 return NULL;
2414 dynarray_add(&ac->programs, &ac->nb_programs, program);
2415 program->discard = AVDISCARD_NONE;
2417 program->id = id;
2419 return program;
2422 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2424 AVChapter *chapter = NULL;
2425 int i;
2427 for(i=0; i<s->nb_chapters; i++)
2428 if(s->chapters[i]->id == id)
2429 chapter = s->chapters[i];
2431 if(!chapter){
2432 chapter= av_mallocz(sizeof(AVChapter));
2433 if(!chapter)
2434 return NULL;
2435 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2437 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2438 av_free(chapter->title);
2439 #endif
2440 av_metadata_set(&chapter->metadata, "title", title);
2441 chapter->id = id;
2442 chapter->time_base= time_base;
2443 chapter->start = start;
2444 chapter->end = end;
2446 return chapter;
2449 /************************************************************/
2450 /* output media file */
2452 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2454 int ret;
2456 if (s->oformat->priv_data_size > 0) {
2457 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2458 if (!s->priv_data)
2459 return AVERROR(ENOMEM);
2460 } else
2461 s->priv_data = NULL;
2463 if (s->oformat->set_parameters) {
2464 ret = s->oformat->set_parameters(s, ap);
2465 if (ret < 0)
2466 return ret;
2468 return 0;
2471 int av_write_header(AVFormatContext *s)
2473 int ret, i;
2474 AVStream *st;
2476 // some sanity checks
2477 for(i=0;i<s->nb_streams;i++) {
2478 st = s->streams[i];
2480 switch (st->codec->codec_type) {
2481 case CODEC_TYPE_AUDIO:
2482 if(st->codec->sample_rate<=0){
2483 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2484 return -1;
2486 if(!st->codec->block_align)
2487 st->codec->block_align = st->codec->channels *
2488 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2489 break;
2490 case CODEC_TYPE_VIDEO:
2491 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2492 av_log(s, AV_LOG_ERROR, "time base not set\n");
2493 return -1;
2495 if(st->codec->width<=0 || st->codec->height<=0){
2496 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2497 return -1;
2499 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2500 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2501 return -1;
2503 break;
2506 if(s->oformat->codec_tag){
2507 if(st->codec->codec_tag){
2508 //FIXME
2509 //check that tag + id is in the table
2510 //if neither is in the table -> OK
2511 //if tag is in the table with another id -> FAIL
2512 //if id is in the table with another tag -> FAIL unless strict < ?
2513 }else
2514 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2517 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2518 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2519 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2522 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2523 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2524 if (!s->priv_data)
2525 return AVERROR(ENOMEM);
2528 #if LIBAVFORMAT_VERSION_MAJOR < 53
2529 ff_metadata_mux_compat(s);
2530 #endif
2532 if(s->oformat->write_header){
2533 ret = s->oformat->write_header(s);
2534 if (ret < 0)
2535 return ret;
2538 /* init PTS generation */
2539 for(i=0;i<s->nb_streams;i++) {
2540 int64_t den = AV_NOPTS_VALUE;
2541 st = s->streams[i];
2543 switch (st->codec->codec_type) {
2544 case CODEC_TYPE_AUDIO:
2545 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2546 break;
2547 case CODEC_TYPE_VIDEO:
2548 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2549 break;
2550 default:
2551 break;
2553 if (den != AV_NOPTS_VALUE) {
2554 if (den <= 0)
2555 return AVERROR_INVALIDDATA;
2556 av_frac_init(&st->pts, 0, 0, den);
2559 return 0;
2562 //FIXME merge with compute_pkt_fields
2563 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2564 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2565 int num, den, frame_size, i;
2567 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2569 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2570 return -1;*/
2572 /* duration field */
2573 if (pkt->duration == 0) {
2574 compute_frame_duration(&num, &den, st, NULL, pkt);
2575 if (den && num) {
2576 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2580 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2581 pkt->pts= pkt->dts;
2583 //XXX/FIXME this is a temporary hack until all encoders output pts
2584 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2585 pkt->dts=
2586 // pkt->pts= st->cur_dts;
2587 pkt->pts= st->pts.val;
2590 //calculate dts from pts
2591 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2592 st->pts_buffer[0]= pkt->pts;
2593 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2594 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2595 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2596 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2598 pkt->dts= st->pts_buffer[0];
2601 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2602 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2603 return -1;
2605 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2606 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2607 return -1;
2610 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2611 st->cur_dts= pkt->dts;
2612 st->pts.val= pkt->dts;
2614 /* update pts */
2615 switch (st->codec->codec_type) {
2616 case CODEC_TYPE_AUDIO:
2617 frame_size = get_audio_frame_size(st->codec, pkt->size);
2619 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2620 likely equal to the encoder delay, but it would be better if we
2621 had the real timestamps from the encoder */
2622 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2623 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2625 break;
2626 case CODEC_TYPE_VIDEO:
2627 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2628 break;
2629 default:
2630 break;
2632 return 0;
2635 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2637 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2639 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2640 return ret;
2642 ret= s->oformat->write_packet(s, pkt);
2643 if(!ret)
2644 ret= url_ferror(s->pb);
2645 return ret;
2648 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2649 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2651 AVPacketList **next_point, *this_pktl;
2653 this_pktl = av_mallocz(sizeof(AVPacketList));
2654 this_pktl->pkt= *pkt;
2655 pkt->destruct= NULL; // do not free original but only the copy
2656 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
2658 next_point = &s->packet_buffer;
2659 while(*next_point){
2660 if(compare(s, &(*next_point)->pkt, pkt))
2661 break;
2662 next_point= &(*next_point)->next;
2664 this_pktl->next= *next_point;
2665 *next_point= this_pktl;
2668 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2670 AVStream *st = s->streams[ pkt ->stream_index];
2671 AVStream *st2= s->streams[ next->stream_index];
2672 int64_t left = st2->time_base.num * (int64_t)st ->time_base.den;
2673 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2675 if (pkt->dts == AV_NOPTS_VALUE)
2676 return 0;
2678 return next->dts * left > pkt->dts * right; //FIXME this can overflow
2681 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2682 AVPacketList *pktl;
2683 int stream_count=0;
2684 int streams[MAX_STREAMS];
2686 if(pkt){
2687 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2690 memset(streams, 0, sizeof(streams));
2691 pktl= s->packet_buffer;
2692 while(pktl){
2693 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2694 if(streams[ pktl->pkt.stream_index ] == 0)
2695 stream_count++;
2696 streams[ pktl->pkt.stream_index ]++;
2697 pktl= pktl->next;
2700 if(stream_count && (s->nb_streams == stream_count || flush)){
2701 pktl= s->packet_buffer;
2702 *out= pktl->pkt;
2704 s->packet_buffer= pktl->next;
2705 av_freep(&pktl);
2706 return 1;
2707 }else{
2708 av_init_packet(out);
2709 return 0;
2714 * Interleaves an AVPacket correctly so it can be muxed.
2715 * @param out the interleaved packet will be output here
2716 * @param in the input packet
2717 * @param flush 1 if no further packets are available as input and all
2718 * remaining packets should be output
2719 * @return 1 if a packet was output, 0 if no packet could be output,
2720 * < 0 if an error occurred
2722 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2723 if(s->oformat->interleave_packet)
2724 return s->oformat->interleave_packet(s, out, in, flush);
2725 else
2726 return av_interleave_packet_per_dts(s, out, in, flush);
2729 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2730 AVStream *st= s->streams[ pkt->stream_index];
2732 //FIXME/XXX/HACK drop zero sized packets
2733 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2734 return 0;
2736 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2737 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2738 return -1;
2740 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2741 return -1;
2743 for(;;){
2744 AVPacket opkt;
2745 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2746 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2747 return ret;
2749 ret= s->oformat->write_packet(s, &opkt);
2751 av_free_packet(&opkt);
2752 pkt= NULL;
2754 if(ret<0)
2755 return ret;
2756 if(url_ferror(s->pb))
2757 return url_ferror(s->pb);
2761 int av_write_trailer(AVFormatContext *s)
2763 int ret, i;
2765 for(;;){
2766 AVPacket pkt;
2767 ret= av_interleave_packet(s, &pkt, NULL, 1);
2768 if(ret<0) //FIXME cleanup needed for ret<0 ?
2769 goto fail;
2770 if(!ret)
2771 break;
2773 ret= s->oformat->write_packet(s, &pkt);
2775 av_free_packet(&pkt);
2777 if(ret<0)
2778 goto fail;
2779 if(url_ferror(s->pb))
2780 goto fail;
2783 if(s->oformat->write_trailer)
2784 ret = s->oformat->write_trailer(s);
2785 fail:
2786 if(ret == 0)
2787 ret=url_ferror(s->pb);
2788 for(i=0;i<s->nb_streams;i++)
2789 av_freep(&s->streams[i]->priv_data);
2790 av_freep(&s->priv_data);
2791 return ret;
2794 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2796 int i, j;
2797 AVProgram *program=NULL;
2798 void *tmp;
2800 for(i=0; i<ac->nb_programs; i++){
2801 if(ac->programs[i]->id != progid)
2802 continue;
2803 program = ac->programs[i];
2804 for(j=0; j<program->nb_stream_indexes; j++)
2805 if(program->stream_index[j] == idx)
2806 return;
2808 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2809 if(!tmp)
2810 return;
2811 program->stream_index = tmp;
2812 program->stream_index[program->nb_stream_indexes++] = idx;
2813 return;
2817 static void print_fps(double d, const char *postfix){
2818 uint64_t v= lrintf(d*100);
2819 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2820 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2821 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
2824 /* "user interface" functions */
2825 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2827 char buf[256];
2828 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2829 AVStream *st = ic->streams[i];
2830 int g = av_gcd(st->time_base.num, st->time_base.den);
2831 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
2832 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2833 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2834 /* the pid is an important information, so we display it */
2835 /* XXX: add a generic system */
2836 if (flags & AVFMT_SHOW_IDS)
2837 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2838 if (lang)
2839 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
2840 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2841 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2842 if (st->sample_aspect_ratio.num && // default
2843 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2844 AVRational display_aspect_ratio;
2845 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2846 st->codec->width*st->sample_aspect_ratio.num,
2847 st->codec->height*st->sample_aspect_ratio.den,
2848 1024*1024);
2849 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2850 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2851 display_aspect_ratio.num, display_aspect_ratio.den);
2853 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2854 if(st->r_frame_rate.den && st->r_frame_rate.num)
2855 print_fps(av_q2d(st->r_frame_rate), "tbr");
2856 if(st->time_base.den && st->time_base.num)
2857 print_fps(1/av_q2d(st->time_base), "tbn");
2858 if(st->codec->time_base.den && st->codec->time_base.num)
2859 print_fps(1/av_q2d(st->codec->time_base), "tbc");
2861 av_log(NULL, AV_LOG_INFO, "\n");
2864 void dump_format(AVFormatContext *ic,
2865 int index,
2866 const char *url,
2867 int is_output)
2869 int i;
2871 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2872 is_output ? "Output" : "Input",
2873 index,
2874 is_output ? ic->oformat->name : ic->iformat->name,
2875 is_output ? "to" : "from", url);
2876 if (!is_output) {
2877 av_log(NULL, AV_LOG_INFO, " Duration: ");
2878 if (ic->duration != AV_NOPTS_VALUE) {
2879 int hours, mins, secs, us;
2880 secs = ic->duration / AV_TIME_BASE;
2881 us = ic->duration % AV_TIME_BASE;
2882 mins = secs / 60;
2883 secs %= 60;
2884 hours = mins / 60;
2885 mins %= 60;
2886 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2887 (100 * us) / AV_TIME_BASE);
2888 } else {
2889 av_log(NULL, AV_LOG_INFO, "N/A");
2891 if (ic->start_time != AV_NOPTS_VALUE) {
2892 int secs, us;
2893 av_log(NULL, AV_LOG_INFO, ", start: ");
2894 secs = ic->start_time / AV_TIME_BASE;
2895 us = ic->start_time % AV_TIME_BASE;
2896 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2897 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2899 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2900 if (ic->bit_rate) {
2901 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2902 } else {
2903 av_log(NULL, AV_LOG_INFO, "N/A");
2905 av_log(NULL, AV_LOG_INFO, "\n");
2907 if(ic->nb_programs) {
2908 int j, k;
2909 for(j=0; j<ic->nb_programs; j++) {
2910 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
2911 "name", NULL, 0);
2912 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2913 name ? name->value : "");
2914 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2915 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2917 } else
2918 for(i=0;i<ic->nb_streams;i++)
2919 dump_stream_format(ic, i, index, is_output);
2920 if (ic->metadata) {
2921 AVMetadataTag *tag=NULL;
2922 av_log(NULL, AV_LOG_INFO, " Metadata\n");
2923 while((tag=av_metadata_get(ic->metadata, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
2924 av_log(NULL, AV_LOG_INFO, " %-16s: %s\n", tag->key, tag->value);
2930 #if LIBAVFORMAT_VERSION_MAJOR < 53
2931 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2933 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2936 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2938 AVRational frame_rate;
2939 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2940 *frame_rate_num= frame_rate.num;
2941 *frame_rate_den= frame_rate.den;
2942 return ret;
2944 #endif
2946 int64_t av_gettime(void)
2948 struct timeval tv;
2949 gettimeofday(&tv,NULL);
2950 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2953 int64_t parse_date(const char *datestr, int duration)
2955 const char *p;
2956 int64_t t;
2957 struct tm dt;
2958 int i;
2959 static const char * const date_fmt[] = {
2960 "%Y-%m-%d",
2961 "%Y%m%d",
2963 static const char * const time_fmt[] = {
2964 "%H:%M:%S",
2965 "%H%M%S",
2967 const char *q;
2968 int is_utc, len;
2969 char lastch;
2970 int negative = 0;
2972 #undef time
2973 time_t now = time(0);
2975 len = strlen(datestr);
2976 if (len > 0)
2977 lastch = datestr[len - 1];
2978 else
2979 lastch = '\0';
2980 is_utc = (lastch == 'z' || lastch == 'Z');
2982 memset(&dt, 0, sizeof(dt));
2984 p = datestr;
2985 q = NULL;
2986 if (!duration) {
2987 if (!strncasecmp(datestr, "now", len))
2988 return (int64_t) now * 1000000;
2990 /* parse the year-month-day part */
2991 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
2992 q = small_strptime(p, date_fmt[i], &dt);
2993 if (q) {
2994 break;
2998 /* if the year-month-day part is missing, then take the
2999 * current year-month-day time */
3000 if (!q) {
3001 if (is_utc) {
3002 dt = *gmtime(&now);
3003 } else {
3004 dt = *localtime(&now);
3006 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
3007 } else {
3008 p = q;
3011 if (*p == 'T' || *p == 't' || *p == ' ')
3012 p++;
3014 /* parse the hour-minute-second part */
3015 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
3016 q = small_strptime(p, time_fmt[i], &dt);
3017 if (q) {
3018 break;
3021 } else {
3022 /* parse datestr as a duration */
3023 if (p[0] == '-') {
3024 negative = 1;
3025 ++p;
3027 /* parse datestr as HH:MM:SS */
3028 q = small_strptime(p, time_fmt[0], &dt);
3029 if (!q) {
3030 /* parse datestr as S+ */
3031 dt.tm_sec = strtol(p, (char **)&q, 10);
3032 if (q == p)
3033 /* the parsing didn't succeed */
3034 return INT64_MIN;
3035 dt.tm_min = 0;
3036 dt.tm_hour = 0;
3040 /* Now we have all the fields that we can get */
3041 if (!q) {
3042 return INT64_MIN;
3045 if (duration) {
3046 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
3047 } else {
3048 dt.tm_isdst = -1; /* unknown */
3049 if (is_utc) {
3050 t = mktimegm(&dt);
3051 } else {
3052 t = mktime(&dt);
3056 t *= 1000000;
3058 /* parse the .m... part */
3059 if (*q == '.') {
3060 int val, n;
3061 q++;
3062 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
3063 if (!isdigit(*q))
3064 break;
3065 val += n * (*q - '0');
3067 t += val;
3069 return negative ? -t : t;
3072 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3074 const char *p;
3075 char tag[128], *q;
3077 p = info;
3078 if (*p == '?')
3079 p++;
3080 for(;;) {
3081 q = tag;
3082 while (*p != '\0' && *p != '=' && *p != '&') {
3083 if ((q - tag) < sizeof(tag) - 1)
3084 *q++ = *p;
3085 p++;
3087 *q = '\0';
3088 q = arg;
3089 if (*p == '=') {
3090 p++;
3091 while (*p != '&' && *p != '\0') {
3092 if ((q - arg) < arg_size - 1) {
3093 if (*p == '+')
3094 *q++ = ' ';
3095 else
3096 *q++ = *p;
3098 p++;
3100 *q = '\0';
3102 if (!strcmp(tag, tag1))
3103 return 1;
3104 if (*p != '&')
3105 break;
3106 p++;
3108 return 0;
3111 int av_get_frame_filename(char *buf, int buf_size,
3112 const char *path, int number)
3114 const char *p;
3115 char *q, buf1[20], c;
3116 int nd, len, percentd_found;
3118 q = buf;
3119 p = path;
3120 percentd_found = 0;
3121 for(;;) {
3122 c = *p++;
3123 if (c == '\0')
3124 break;
3125 if (c == '%') {
3126 do {
3127 nd = 0;
3128 while (isdigit(*p)) {
3129 nd = nd * 10 + *p++ - '0';
3131 c = *p++;
3132 } while (isdigit(c));
3134 switch(c) {
3135 case '%':
3136 goto addchar;
3137 case 'd':
3138 if (percentd_found)
3139 goto fail;
3140 percentd_found = 1;
3141 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3142 len = strlen(buf1);
3143 if ((q - buf + len) > buf_size - 1)
3144 goto fail;
3145 memcpy(q, buf1, len);
3146 q += len;
3147 break;
3148 default:
3149 goto fail;
3151 } else {
3152 addchar:
3153 if ((q - buf) < buf_size - 1)
3154 *q++ = c;
3157 if (!percentd_found)
3158 goto fail;
3159 *q = '\0';
3160 return 0;
3161 fail:
3162 *q = '\0';
3163 return -1;
3166 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3168 int len, i, j, c;
3169 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3171 for(i=0;i<size;i+=16) {
3172 len = size - i;
3173 if (len > 16)
3174 len = 16;
3175 PRINT("%08x ", i);
3176 for(j=0;j<16;j++) {
3177 if (j < len)
3178 PRINT(" %02x", buf[i+j]);
3179 else
3180 PRINT(" ");
3182 PRINT(" ");
3183 for(j=0;j<len;j++) {
3184 c = buf[i+j];
3185 if (c < ' ' || c > '~')
3186 c = '.';
3187 PRINT("%c", c);
3189 PRINT("\n");
3191 #undef PRINT
3194 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3196 hex_dump_internal(NULL, f, 0, buf, size);
3199 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3201 hex_dump_internal(avcl, NULL, level, buf, size);
3204 //FIXME needs to know the time_base
3205 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3207 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3208 PRINT("stream #%d:\n", pkt->stream_index);
3209 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3210 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3211 /* DTS is _always_ valid after av_read_frame() */
3212 PRINT(" dts=");
3213 if (pkt->dts == AV_NOPTS_VALUE)
3214 PRINT("N/A");
3215 else
3216 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3217 /* PTS may not be known if B-frames are present. */
3218 PRINT(" pts=");
3219 if (pkt->pts == AV_NOPTS_VALUE)
3220 PRINT("N/A");
3221 else
3222 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3223 PRINT("\n");
3224 PRINT(" size=%d\n", pkt->size);
3225 #undef PRINT
3226 if (dump_payload)
3227 av_hex_dump(f, pkt->data, pkt->size);
3230 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3232 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3235 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3237 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3240 void url_split(char *proto, int proto_size,
3241 char *authorization, int authorization_size,
3242 char *hostname, int hostname_size,
3243 int *port_ptr,
3244 char *path, int path_size,
3245 const char *url)
3247 const char *p, *ls, *at, *col, *brk;
3249 if (port_ptr) *port_ptr = -1;
3250 if (proto_size > 0) proto[0] = 0;
3251 if (authorization_size > 0) authorization[0] = 0;
3252 if (hostname_size > 0) hostname[0] = 0;
3253 if (path_size > 0) path[0] = 0;
3255 /* parse protocol */
3256 if ((p = strchr(url, ':'))) {
3257 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3258 p++; /* skip ':' */
3259 if (*p == '/') p++;
3260 if (*p == '/') p++;
3261 } else {
3262 /* no protocol means plain filename */
3263 av_strlcpy(path, url, path_size);
3264 return;
3267 /* separate path from hostname */
3268 ls = strchr(p, '/');
3269 if(!ls)
3270 ls = strchr(p, '?');
3271 if(ls)
3272 av_strlcpy(path, ls, path_size);
3273 else
3274 ls = &p[strlen(p)]; // XXX
3276 /* the rest is hostname, use that to parse auth/port */
3277 if (ls != p) {
3278 /* authorization (user[:pass]@hostname) */
3279 if ((at = strchr(p, '@')) && at < ls) {
3280 av_strlcpy(authorization, p,
3281 FFMIN(authorization_size, at + 1 - p));
3282 p = at + 1; /* skip '@' */
3285 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3286 /* [host]:port */
3287 av_strlcpy(hostname, p + 1,
3288 FFMIN(hostname_size, brk - p));
3289 if (brk[1] == ':' && port_ptr)
3290 *port_ptr = atoi(brk + 2);
3291 } else if ((col = strchr(p, ':')) && col < ls) {
3292 av_strlcpy(hostname, p,
3293 FFMIN(col + 1 - p, hostname_size));
3294 if (port_ptr) *port_ptr = atoi(col + 1);
3295 } else
3296 av_strlcpy(hostname, p,
3297 FFMIN(ls + 1 - p, hostname_size));
3301 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3303 int i;
3304 static const char hex_table[16] = { '0', '1', '2', '3',
3305 '4', '5', '6', '7',
3306 '8', '9', 'A', 'B',
3307 'C', 'D', 'E', 'F' };
3309 for(i = 0; i < s; i++) {
3310 buff[i * 2] = hex_table[src[i] >> 4];
3311 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3314 return buff;
3317 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3318 unsigned int pts_num, unsigned int pts_den)
3320 s->pts_wrap_bits = pts_wrap_bits;
3322 if(av_reduce(&s->time_base.num, &s->time_base.den, pts_num, pts_den, INT_MAX)){
3323 if(s->time_base.num != pts_num)
3324 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/s->time_base.num);
3325 }else
3326 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3328 if(!s->time_base.num || !s->time_base.den)
3329 s->time_base.num= s->time_base.den= 0;