Probability table should be uint32, instead of int32.
[FFMpeg-mirror/lagarith.git] / libavformat / utils.c
blob1b23a00f51455eaa622cf5472fab72c28d50c573
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "avformat.h"
22 #include "internal.h"
23 #include "libavcodec/opt.h"
24 #include "metadata.h"
25 #include "libavutil/avstring.h"
26 #include "riff.h"
27 #include <sys/time.h>
28 #include <time.h>
29 #include <strings.h>
31 #undef NDEBUG
32 #include <assert.h>
34 /**
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
39 unsigned avformat_version(void)
41 return LIBAVFORMAT_VERSION_INT;
44 /* fraction handling */
46 /**
47 * f = val + (num / den) + 0.5.
49 * 'num' is normalized so that it is such as 0 <= num < den.
51 * @param f fractional number
52 * @param val integer value
53 * @param num must be >= 0
54 * @param den must be >= 1
56 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
58 num += (den >> 1);
59 if (num >= den) {
60 val += num / den;
61 num = num % den;
63 f->val = val;
64 f->num = num;
65 f->den = den;
68 /**
69 * Fractional addition to f: f = f + (incr / f->den).
71 * @param f fractional number
72 * @param incr increment, can be positive or negative
74 static void av_frac_add(AVFrac *f, int64_t incr)
76 int64_t num, den;
78 num = f->num + incr;
79 den = f->den;
80 if (num < 0) {
81 f->val += num / den;
82 num = num % den;
83 if (num < 0) {
84 num += den;
85 f->val--;
87 } else if (num >= den) {
88 f->val += num / den;
89 num = num % den;
91 f->num = num;
94 /** head of registered input format linked list */
95 AVInputFormat *first_iformat = NULL;
96 /** head of registered output format linked list */
97 AVOutputFormat *first_oformat = NULL;
99 AVInputFormat *av_iformat_next(AVInputFormat *f)
101 if(f) return f->next;
102 else return first_iformat;
105 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
107 if(f) return f->next;
108 else return first_oformat;
111 void av_register_input_format(AVInputFormat *format)
113 AVInputFormat **p;
114 p = &first_iformat;
115 while (*p != NULL) p = &(*p)->next;
116 *p = format;
117 format->next = NULL;
120 void av_register_output_format(AVOutputFormat *format)
122 AVOutputFormat **p;
123 p = &first_oformat;
124 while (*p != NULL) p = &(*p)->next;
125 *p = format;
126 format->next = NULL;
129 int match_ext(const char *filename, const char *extensions)
131 const char *ext, *p;
132 char ext1[32], *q;
134 if(!filename)
135 return 0;
137 ext = strrchr(filename, '.');
138 if (ext) {
139 ext++;
140 p = extensions;
141 for(;;) {
142 q = ext1;
143 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
144 *q++ = *p++;
145 *q = '\0';
146 if (!strcasecmp(ext1, ext))
147 return 1;
148 if (*p == '\0')
149 break;
150 p++;
153 return 0;
156 static int match_format(const char *name, const char *names)
158 const char *p;
159 int len, namelen;
161 if (!name || !names)
162 return 0;
164 namelen = strlen(name);
165 while ((p = strchr(names, ','))) {
166 len = FFMAX(p - names, namelen);
167 if (!strncasecmp(name, names, len))
168 return 1;
169 names = p+1;
171 return !strcasecmp(name, names);
174 AVOutputFormat *guess_format(const char *short_name, const char *filename,
175 const char *mime_type)
177 AVOutputFormat *fmt, *fmt_found;
178 int score_max, score;
180 /* specific test for image sequences */
181 #if CONFIG_IMAGE2_MUXER
182 if (!short_name && filename &&
183 av_filename_number_test(filename) &&
184 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
185 return guess_format("image2", NULL, NULL);
187 #endif
188 /* Find the proper file type. */
189 fmt_found = NULL;
190 score_max = 0;
191 fmt = first_oformat;
192 while (fmt != NULL) {
193 score = 0;
194 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
195 score += 100;
196 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
197 score += 10;
198 if (filename && fmt->extensions &&
199 match_ext(filename, fmt->extensions)) {
200 score += 5;
202 if (score > score_max) {
203 score_max = score;
204 fmt_found = fmt;
206 fmt = fmt->next;
208 return fmt_found;
211 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
212 const char *mime_type)
214 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
216 if (fmt) {
217 AVOutputFormat *stream_fmt;
218 char stream_format_name[64];
220 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
221 stream_fmt = guess_format(stream_format_name, NULL, NULL);
223 if (stream_fmt)
224 fmt = stream_fmt;
227 return fmt;
230 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
231 const char *filename, const char *mime_type, enum CodecType type){
232 if(type == CODEC_TYPE_VIDEO){
233 enum CodecID codec_id= CODEC_ID_NONE;
235 #if CONFIG_IMAGE2_MUXER
236 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
237 codec_id= av_guess_image2_codec(filename);
239 #endif
240 if(codec_id == CODEC_ID_NONE)
241 codec_id= fmt->video_codec;
242 return codec_id;
243 }else if(type == CODEC_TYPE_AUDIO)
244 return fmt->audio_codec;
245 else
246 return CODEC_ID_NONE;
249 AVInputFormat *av_find_input_format(const char *short_name)
251 AVInputFormat *fmt;
252 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
253 if (match_format(short_name, fmt->name))
254 return fmt;
256 return NULL;
259 /* memory handling */
262 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
264 int ret= av_new_packet(pkt, size);
266 if(ret<0)
267 return ret;
269 pkt->pos= url_ftell(s);
271 ret= get_buffer(s, pkt->data, size);
272 if(ret<=0)
273 av_free_packet(pkt);
274 else
275 av_shrink_packet(pkt, ret);
277 return ret;
281 int av_filename_number_test(const char *filename)
283 char buf[1024];
284 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
287 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
289 AVInputFormat *fmt1, *fmt;
290 int score;
292 fmt = NULL;
293 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
294 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
295 continue;
296 score = 0;
297 if (fmt1->read_probe) {
298 score = fmt1->read_probe(pd);
299 } else if (fmt1->extensions) {
300 if (match_ext(pd->filename, fmt1->extensions)) {
301 score = 50;
304 if (score > *score_max) {
305 *score_max = score;
306 fmt = fmt1;
307 }else if (score == *score_max)
308 fmt = NULL;
310 return fmt;
313 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
314 int score=0;
315 return av_probe_input_format2(pd, is_opened, &score);
318 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
320 AVInputFormat *fmt;
321 fmt = av_probe_input_format2(pd, 1, &score);
323 if (fmt) {
324 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
325 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
326 if (!strcmp(fmt->name, "mp3")) {
327 st->codec->codec_id = CODEC_ID_MP3;
328 st->codec->codec_type = CODEC_TYPE_AUDIO;
329 } else if (!strcmp(fmt->name, "ac3")) {
330 st->codec->codec_id = CODEC_ID_AC3;
331 st->codec->codec_type = CODEC_TYPE_AUDIO;
332 } else if (!strcmp(fmt->name, "eac3")) {
333 st->codec->codec_id = CODEC_ID_EAC3;
334 st->codec->codec_type = CODEC_TYPE_AUDIO;
335 } else if (!strcmp(fmt->name, "mpegvideo")) {
336 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
337 st->codec->codec_type = CODEC_TYPE_VIDEO;
338 } else if (!strcmp(fmt->name, "m4v")) {
339 st->codec->codec_id = CODEC_ID_MPEG4;
340 st->codec->codec_type = CODEC_TYPE_VIDEO;
341 } else if (!strcmp(fmt->name, "h264")) {
342 st->codec->codec_id = CODEC_ID_H264;
343 st->codec->codec_type = CODEC_TYPE_VIDEO;
344 } else if (!strcmp(fmt->name, "dts")) {
345 st->codec->codec_id = CODEC_ID_DTS;
346 st->codec->codec_type = CODEC_TYPE_AUDIO;
349 return !!fmt;
352 /************************************************************/
353 /* input media file */
356 * Open a media file from an IO stream. 'fmt' must be specified.
358 int av_open_input_stream(AVFormatContext **ic_ptr,
359 ByteIOContext *pb, const char *filename,
360 AVInputFormat *fmt, AVFormatParameters *ap)
362 int err;
363 AVFormatContext *ic;
364 AVFormatParameters default_ap;
366 if(!ap){
367 ap=&default_ap;
368 memset(ap, 0, sizeof(default_ap));
371 if(!ap->prealloced_context)
372 ic = avformat_alloc_context();
373 else
374 ic = *ic_ptr;
375 if (!ic) {
376 err = AVERROR(ENOMEM);
377 goto fail;
379 ic->iformat = fmt;
380 ic->pb = pb;
381 ic->duration = AV_NOPTS_VALUE;
382 ic->start_time = AV_NOPTS_VALUE;
383 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
385 /* allocate private data */
386 if (fmt->priv_data_size > 0) {
387 ic->priv_data = av_mallocz(fmt->priv_data_size);
388 if (!ic->priv_data) {
389 err = AVERROR(ENOMEM);
390 goto fail;
392 } else {
393 ic->priv_data = NULL;
396 if (ic->iformat->read_header) {
397 err = ic->iformat->read_header(ic, ap);
398 if (err < 0)
399 goto fail;
402 if (pb && !ic->data_offset)
403 ic->data_offset = url_ftell(ic->pb);
405 #if LIBAVFORMAT_VERSION_MAJOR < 53
406 ff_metadata_demux_compat(ic);
407 #endif
409 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
411 *ic_ptr = ic;
412 return 0;
413 fail:
414 if (ic) {
415 int i;
416 av_freep(&ic->priv_data);
417 for(i=0;i<ic->nb_streams;i++) {
418 AVStream *st = ic->streams[i];
419 if (st) {
420 av_free(st->priv_data);
421 av_free(st->codec->extradata);
423 av_free(st);
426 av_free(ic);
427 *ic_ptr = NULL;
428 return err;
431 /** size of probe buffer, for guessing file type from file contents */
432 #define PROBE_BUF_MIN 2048
433 #define PROBE_BUF_MAX (1<<20)
435 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
436 AVInputFormat *fmt,
437 int buf_size,
438 AVFormatParameters *ap)
440 int err, probe_size;
441 AVProbeData probe_data, *pd = &probe_data;
442 ByteIOContext *pb = NULL;
443 void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
445 pd->filename = "";
446 if (filename)
447 pd->filename = filename;
448 pd->buf = NULL;
449 pd->buf_size = 0;
451 if (!fmt) {
452 /* guess format if no file can be opened */
453 fmt = av_probe_input_format(pd, 0);
456 /* Do not open file if the format does not need it. XXX: specific
457 hack needed to handle RTSP/TCP */
458 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
459 /* if no file needed do not try to open one */
460 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
461 goto fail;
463 if (buf_size > 0) {
464 url_setbufsize(pb, buf_size);
467 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
468 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
469 /* read probe data */
470 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
471 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
472 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
473 if (url_fseek(pb, 0, SEEK_SET) < 0) {
474 url_fclose(pb);
475 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
476 pb = NULL;
477 err = AVERROR(EIO);
478 goto fail;
481 /* guess file format */
482 fmt = av_probe_input_format2(pd, 1, &score);
483 if(fmt){
484 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
485 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
486 }else
487 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
490 av_freep(&pd->buf);
493 /* if still no format found, error */
494 if (!fmt) {
495 err = AVERROR_NOFMT;
496 goto fail;
499 /* check filename in case an image number is expected */
500 if (fmt->flags & AVFMT_NEEDNUMBER) {
501 if (!av_filename_number_test(filename)) {
502 err = AVERROR_NUMEXPECTED;
503 goto fail;
506 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
507 if (err)
508 goto fail;
509 return 0;
510 fail:
511 av_freep(&pd->buf);
512 if (pb)
513 url_fclose(pb);
514 if (ap && ap->prealloced_context)
515 av_free(*ic_ptr);
516 *ic_ptr = NULL;
517 return err;
521 /*******************************************************/
523 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
524 AVPacketList **plast_pktl){
525 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
526 if (!pktl)
527 return NULL;
529 if (*packet_buffer)
530 (*plast_pktl)->next = pktl;
531 else
532 *packet_buffer = pktl;
534 /* add the packet in the buffered packet list */
535 *plast_pktl = pktl;
536 pktl->pkt= *pkt;
537 return &pktl->pkt;
540 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
542 int ret, i;
543 AVStream *st;
545 for(;;){
546 AVPacketList *pktl = s->raw_packet_buffer;
548 if (pktl) {
549 *pkt = pktl->pkt;
550 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
551 !s->streams[pkt->stream_index]->probe_packets ||
552 s->raw_packet_buffer_remaining_size < pkt->size){
553 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
554 av_freep(&pd->buf);
555 pd->buf_size = 0;
556 s->raw_packet_buffer = pktl->next;
557 s->raw_packet_buffer_remaining_size += pkt->size;
558 av_free(pktl);
559 return 0;
563 av_init_packet(pkt);
564 ret= s->iformat->read_packet(s, pkt);
565 if (ret < 0) {
566 if (!pktl || ret == AVERROR(EAGAIN))
567 return ret;
568 for (i = 0; i < s->nb_streams; i++)
569 s->streams[i]->probe_packets = 0;
570 continue;
572 st= s->streams[pkt->stream_index];
574 switch(st->codec->codec_type){
575 case CODEC_TYPE_VIDEO:
576 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
577 break;
578 case CODEC_TYPE_AUDIO:
579 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
580 break;
581 case CODEC_TYPE_SUBTITLE:
582 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
583 break;
586 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
587 !st->probe_packets))
588 return ret;
590 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
591 s->raw_packet_buffer_remaining_size -= pkt->size;
593 if(st->codec->codec_id == CODEC_ID_PROBE){
594 AVProbeData *pd = &st->probe_data;
596 --st->probe_packets;
598 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
599 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
600 pd->buf_size += pkt->size;
601 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
603 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
604 set_codec_from_probe_data(s, st, pd, 1);
605 if(st->codec->codec_id != CODEC_ID_PROBE){
606 pd->buf_size=0;
607 av_freep(&pd->buf);
614 /**********************************************************/
617 * Get the number of samples of an audio frame. Return -1 on error.
619 static int get_audio_frame_size(AVCodecContext *enc, int size)
621 int frame_size;
623 if(enc->codec_id == CODEC_ID_VORBIS)
624 return -1;
626 if (enc->frame_size <= 1) {
627 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
629 if (bits_per_sample) {
630 if (enc->channels == 0)
631 return -1;
632 frame_size = (size << 3) / (bits_per_sample * enc->channels);
633 } else {
634 /* used for example by ADPCM codecs */
635 if (enc->bit_rate == 0)
636 return -1;
637 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
639 } else {
640 frame_size = enc->frame_size;
642 return frame_size;
647 * Return the frame duration in seconds. Return 0 if not available.
649 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
650 AVCodecParserContext *pc, AVPacket *pkt)
652 int frame_size;
654 *pnum = 0;
655 *pden = 0;
656 switch(st->codec->codec_type) {
657 case CODEC_TYPE_VIDEO:
658 if(st->time_base.num*1000LL > st->time_base.den){
659 *pnum = st->time_base.num;
660 *pden = st->time_base.den;
661 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
662 *pnum = st->codec->time_base.num;
663 *pden = st->codec->time_base.den;
664 if (pc && pc->repeat_pict) {
665 *pnum = (*pnum) * (1 + pc->repeat_pict);
668 break;
669 case CODEC_TYPE_AUDIO:
670 frame_size = get_audio_frame_size(st->codec, pkt->size);
671 if (frame_size < 0)
672 break;
673 *pnum = frame_size;
674 *pden = st->codec->sample_rate;
675 break;
676 default:
677 break;
681 static int is_intra_only(AVCodecContext *enc){
682 if(enc->codec_type == CODEC_TYPE_AUDIO){
683 return 1;
684 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
685 switch(enc->codec_id){
686 case CODEC_ID_MJPEG:
687 case CODEC_ID_MJPEGB:
688 case CODEC_ID_LJPEG:
689 case CODEC_ID_RAWVIDEO:
690 case CODEC_ID_DVVIDEO:
691 case CODEC_ID_HUFFYUV:
692 case CODEC_ID_FFVHUFF:
693 case CODEC_ID_ASV1:
694 case CODEC_ID_ASV2:
695 case CODEC_ID_VCR1:
696 case CODEC_ID_DNXHD:
697 case CODEC_ID_JPEG2000:
698 return 1;
699 default: break;
702 return 0;
705 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
706 int64_t dts, int64_t pts)
708 AVStream *st= s->streams[stream_index];
709 AVPacketList *pktl= s->packet_buffer;
711 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
712 return;
714 st->first_dts= dts - st->cur_dts;
715 st->cur_dts= dts;
717 for(; pktl; pktl= pktl->next){
718 if(pktl->pkt.stream_index != stream_index)
719 continue;
720 //FIXME think more about this check
721 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
722 pktl->pkt.pts += st->first_dts;
724 if(pktl->pkt.dts != AV_NOPTS_VALUE)
725 pktl->pkt.dts += st->first_dts;
727 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
728 st->start_time= pktl->pkt.pts;
730 if (st->start_time == AV_NOPTS_VALUE)
731 st->start_time = pts;
734 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
736 AVPacketList *pktl= s->packet_buffer;
737 int64_t cur_dts= 0;
739 if(st->first_dts != AV_NOPTS_VALUE){
740 cur_dts= st->first_dts;
741 for(; pktl; pktl= pktl->next){
742 if(pktl->pkt.stream_index == pkt->stream_index){
743 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
744 break;
745 cur_dts -= pkt->duration;
748 pktl= s->packet_buffer;
749 st->first_dts = cur_dts;
750 }else if(st->cur_dts)
751 return;
753 for(; pktl; pktl= pktl->next){
754 if(pktl->pkt.stream_index != pkt->stream_index)
755 continue;
756 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
757 && !pktl->pkt.duration){
758 pktl->pkt.dts= cur_dts;
759 if(!st->codec->has_b_frames)
760 pktl->pkt.pts= cur_dts;
761 cur_dts += pkt->duration;
762 pktl->pkt.duration= pkt->duration;
763 }else
764 break;
766 if(st->first_dts == AV_NOPTS_VALUE)
767 st->cur_dts= cur_dts;
770 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
771 AVCodecParserContext *pc, AVPacket *pkt)
773 int num, den, presentation_delayed, delay, i;
774 int64_t offset;
776 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE)
777 //FIXME Set low_delay = 0 when has_b_frames = 1
778 st->codec->has_b_frames = 1;
780 /* do we have a video B-frame ? */
781 delay= st->codec->has_b_frames;
782 presentation_delayed = 0;
783 /* XXX: need has_b_frame, but cannot get it if the codec is
784 not initialized */
785 if (delay &&
786 pc && pc->pict_type != FF_B_TYPE)
787 presentation_delayed = 1;
789 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
790 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
791 pkt->dts -= 1LL<<st->pts_wrap_bits;
794 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
795 // we take the conservative approach and discard both
796 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
797 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
798 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
799 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
802 if (pkt->duration == 0) {
803 compute_frame_duration(&num, &den, st, pc, pkt);
804 if (den && num) {
805 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
807 if(pkt->duration != 0 && s->packet_buffer)
808 update_initial_durations(s, st, pkt);
812 /* correct timestamps with byte offset if demuxers only have timestamps
813 on packet boundaries */
814 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
815 /* this will estimate bitrate based on this frame's duration and size */
816 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
817 if(pkt->pts != AV_NOPTS_VALUE)
818 pkt->pts += offset;
819 if(pkt->dts != AV_NOPTS_VALUE)
820 pkt->dts += offset;
823 if (pc && pc->dts_sync_point >= 0) {
824 // we have synchronization info from the parser
825 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
826 if (den > 0) {
827 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
828 if (pkt->dts != AV_NOPTS_VALUE) {
829 // got DTS from the stream, update reference timestamp
830 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
831 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
832 } else if (st->reference_dts != AV_NOPTS_VALUE) {
833 // compute DTS based on reference timestamp
834 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
835 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
837 if (pc->dts_sync_point > 0)
838 st->reference_dts = pkt->dts; // new reference
842 /* This may be redundant, but it should not hurt. */
843 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
844 presentation_delayed = 1;
846 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
847 /* interpolate PTS and DTS if they are not present */
848 //We skip H264 currently because delay and has_b_frames are not reliably set
849 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
850 if (presentation_delayed) {
851 /* DTS = decompression timestamp */
852 /* PTS = presentation timestamp */
853 if (pkt->dts == AV_NOPTS_VALUE)
854 pkt->dts = st->last_IP_pts;
855 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
856 if (pkt->dts == AV_NOPTS_VALUE)
857 pkt->dts = st->cur_dts;
859 /* this is tricky: the dts must be incremented by the duration
860 of the frame we are displaying, i.e. the last I- or P-frame */
861 if (st->last_IP_duration == 0)
862 st->last_IP_duration = pkt->duration;
863 if(pkt->dts != AV_NOPTS_VALUE)
864 st->cur_dts = pkt->dts + st->last_IP_duration;
865 st->last_IP_duration = pkt->duration;
866 st->last_IP_pts= pkt->pts;
867 /* cannot compute PTS if not present (we can compute it only
868 by knowing the future */
869 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
870 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
871 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
872 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
873 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
874 pkt->pts += pkt->duration;
875 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
879 /* presentation is not delayed : PTS and DTS are the same */
880 if(pkt->pts == AV_NOPTS_VALUE)
881 pkt->pts = pkt->dts;
882 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
883 if(pkt->pts == AV_NOPTS_VALUE)
884 pkt->pts = st->cur_dts;
885 pkt->dts = pkt->pts;
886 if(pkt->pts != AV_NOPTS_VALUE)
887 st->cur_dts = pkt->pts + pkt->duration;
891 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
892 st->pts_buffer[0]= pkt->pts;
893 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
894 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
895 if(pkt->dts == AV_NOPTS_VALUE)
896 pkt->dts= st->pts_buffer[0];
897 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
898 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
900 if(pkt->dts > st->cur_dts)
901 st->cur_dts = pkt->dts;
904 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
906 /* update flags */
907 if(is_intra_only(st->codec))
908 pkt->flags |= PKT_FLAG_KEY;
909 else if (pc) {
910 pkt->flags = 0;
911 /* keyframe computation */
912 if (pc->key_frame == 1)
913 pkt->flags |= PKT_FLAG_KEY;
914 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
915 pkt->flags |= PKT_FLAG_KEY;
917 if (pc)
918 pkt->convergence_duration = pc->convergence_duration;
922 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
924 AVStream *st;
925 int len, ret, i;
927 av_init_packet(pkt);
929 for(;;) {
930 /* select current input stream component */
931 st = s->cur_st;
932 if (st) {
933 if (!st->need_parsing || !st->parser) {
934 /* no parsing needed: we just output the packet as is */
935 /* raw data support */
936 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
937 compute_pkt_fields(s, st, NULL, pkt);
938 s->cur_st = NULL;
939 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
940 (pkt->flags & PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
941 ff_reduce_index(s, st->index);
942 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
944 break;
945 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
946 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
947 st->cur_ptr, st->cur_len,
948 st->cur_pkt.pts, st->cur_pkt.dts,
949 st->cur_pkt.pos);
950 st->cur_pkt.pts = AV_NOPTS_VALUE;
951 st->cur_pkt.dts = AV_NOPTS_VALUE;
952 /* increment read pointer */
953 st->cur_ptr += len;
954 st->cur_len -= len;
956 /* return packet if any */
957 if (pkt->size) {
958 got_packet:
959 pkt->duration = 0;
960 pkt->stream_index = st->index;
961 pkt->pts = st->parser->pts;
962 pkt->dts = st->parser->dts;
963 pkt->pos = st->parser->pos;
964 pkt->destruct = NULL;
965 compute_pkt_fields(s, st, st->parser, pkt);
967 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
968 ff_reduce_index(s, st->index);
969 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
970 0, 0, AVINDEX_KEYFRAME);
973 break;
975 } else {
976 /* free packet */
977 av_free_packet(&st->cur_pkt);
978 s->cur_st = NULL;
980 } else {
981 AVPacket cur_pkt;
982 /* read next packet */
983 ret = av_read_packet(s, &cur_pkt);
984 if (ret < 0) {
985 if (ret == AVERROR(EAGAIN))
986 return ret;
987 /* return the last frames, if any */
988 for(i = 0; i < s->nb_streams; i++) {
989 st = s->streams[i];
990 if (st->parser && st->need_parsing) {
991 av_parser_parse2(st->parser, st->codec,
992 &pkt->data, &pkt->size,
993 NULL, 0,
994 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
995 AV_NOPTS_VALUE);
996 if (pkt->size)
997 goto got_packet;
1000 /* no more packets: really terminate parsing */
1001 return ret;
1003 st = s->streams[cur_pkt.stream_index];
1004 st->cur_pkt= cur_pkt;
1006 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1007 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1008 st->cur_pkt.pts < st->cur_pkt.dts){
1009 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1010 st->cur_pkt.stream_index,
1011 st->cur_pkt.pts,
1012 st->cur_pkt.dts,
1013 st->cur_pkt.size);
1014 // av_free_packet(&st->cur_pkt);
1015 // return -1;
1018 if(s->debug & FF_FDEBUG_TS)
1019 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1020 st->cur_pkt.stream_index,
1021 st->cur_pkt.pts,
1022 st->cur_pkt.dts,
1023 st->cur_pkt.size,
1024 st->cur_pkt.flags);
1026 s->cur_st = st;
1027 st->cur_ptr = st->cur_pkt.data;
1028 st->cur_len = st->cur_pkt.size;
1029 if (st->need_parsing && !st->parser) {
1030 st->parser = av_parser_init(st->codec->codec_id);
1031 if (!st->parser) {
1032 /* no parser available: just output the raw packets */
1033 st->need_parsing = AVSTREAM_PARSE_NONE;
1034 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1035 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1037 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1038 st->parser->next_frame_offset=
1039 st->parser->cur_offset= st->cur_pkt.pos;
1044 if(s->debug & FF_FDEBUG_TS)
1045 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1046 pkt->stream_index,
1047 pkt->pts,
1048 pkt->dts,
1049 pkt->size,
1050 pkt->flags);
1052 return 0;
1055 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1057 AVPacketList *pktl;
1058 int eof=0;
1059 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1061 for(;;){
1062 pktl = s->packet_buffer;
1063 if (pktl) {
1064 AVPacket *next_pkt= &pktl->pkt;
1066 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1067 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1068 if( pktl->pkt.stream_index == next_pkt->stream_index
1069 && next_pkt->dts < pktl->pkt.dts
1070 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1071 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1072 next_pkt->pts= pktl->pkt.dts;
1074 pktl= pktl->next;
1076 pktl = s->packet_buffer;
1079 if( next_pkt->pts != AV_NOPTS_VALUE
1080 || next_pkt->dts == AV_NOPTS_VALUE
1081 || !genpts || eof){
1082 /* read packet from packet buffer, if there is data */
1083 *pkt = *next_pkt;
1084 s->packet_buffer = pktl->next;
1085 av_free(pktl);
1086 return 0;
1089 if(genpts){
1090 int ret= av_read_frame_internal(s, pkt);
1091 if(ret<0){
1092 if(pktl && ret != AVERROR(EAGAIN)){
1093 eof=1;
1094 continue;
1095 }else
1096 return ret;
1099 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1100 &s->packet_buffer_end)) < 0)
1101 return AVERROR(ENOMEM);
1102 }else{
1103 assert(!s->packet_buffer);
1104 return av_read_frame_internal(s, pkt);
1109 /* XXX: suppress the packet queue */
1110 static void flush_packet_queue(AVFormatContext *s)
1112 AVPacketList *pktl;
1114 for(;;) {
1115 pktl = s->packet_buffer;
1116 if (!pktl)
1117 break;
1118 s->packet_buffer = pktl->next;
1119 av_free_packet(&pktl->pkt);
1120 av_free(pktl);
1122 while(s->raw_packet_buffer){
1123 pktl = s->raw_packet_buffer;
1124 s->raw_packet_buffer = pktl->next;
1125 av_free_packet(&pktl->pkt);
1126 av_free(pktl);
1128 s->packet_buffer_end=
1129 s->raw_packet_buffer_end= NULL;
1130 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1133 /*******************************************************/
1134 /* seek support */
1136 int av_find_default_stream_index(AVFormatContext *s)
1138 int first_audio_index = -1;
1139 int i;
1140 AVStream *st;
1142 if (s->nb_streams <= 0)
1143 return -1;
1144 for(i = 0; i < s->nb_streams; i++) {
1145 st = s->streams[i];
1146 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1147 return i;
1149 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1150 first_audio_index = i;
1152 return first_audio_index >= 0 ? first_audio_index : 0;
1156 * Flush the frame reader.
1158 void av_read_frame_flush(AVFormatContext *s)
1160 AVStream *st;
1161 int i;
1163 flush_packet_queue(s);
1165 s->cur_st = NULL;
1167 /* for each stream, reset read state */
1168 for(i = 0; i < s->nb_streams; i++) {
1169 st = s->streams[i];
1171 if (st->parser) {
1172 av_parser_close(st->parser);
1173 st->parser = NULL;
1174 av_free_packet(&st->cur_pkt);
1176 st->last_IP_pts = AV_NOPTS_VALUE;
1177 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1178 st->reference_dts = AV_NOPTS_VALUE;
1179 /* fail safe */
1180 st->cur_ptr = NULL;
1181 st->cur_len = 0;
1183 st->probe_packets = MAX_PROBE_PACKETS;
1187 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1188 int i;
1190 for(i = 0; i < s->nb_streams; i++) {
1191 AVStream *st = s->streams[i];
1193 st->cur_dts = av_rescale(timestamp,
1194 st->time_base.den * (int64_t)ref_st->time_base.num,
1195 st->time_base.num * (int64_t)ref_st->time_base.den);
1199 void ff_reduce_index(AVFormatContext *s, int stream_index)
1201 AVStream *st= s->streams[stream_index];
1202 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1204 if((unsigned)st->nb_index_entries >= max_entries){
1205 int i;
1206 for(i=0; 2*i<st->nb_index_entries; i++)
1207 st->index_entries[i]= st->index_entries[2*i];
1208 st->nb_index_entries= i;
1212 int av_add_index_entry(AVStream *st,
1213 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1215 AVIndexEntry *entries, *ie;
1216 int index;
1218 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1219 return -1;
1221 entries = av_fast_realloc(st->index_entries,
1222 &st->index_entries_allocated_size,
1223 (st->nb_index_entries + 1) *
1224 sizeof(AVIndexEntry));
1225 if(!entries)
1226 return -1;
1228 st->index_entries= entries;
1230 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1232 if(index<0){
1233 index= st->nb_index_entries++;
1234 ie= &entries[index];
1235 assert(index==0 || ie[-1].timestamp < timestamp);
1236 }else{
1237 ie= &entries[index];
1238 if(ie->timestamp != timestamp){
1239 if(ie->timestamp <= timestamp)
1240 return -1;
1241 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1242 st->nb_index_entries++;
1243 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1244 distance= ie->min_distance;
1247 ie->pos = pos;
1248 ie->timestamp = timestamp;
1249 ie->min_distance= distance;
1250 ie->size= size;
1251 ie->flags = flags;
1253 return index;
1256 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1257 int flags)
1259 AVIndexEntry *entries= st->index_entries;
1260 int nb_entries= st->nb_index_entries;
1261 int a, b, m;
1262 int64_t timestamp;
1264 a = - 1;
1265 b = nb_entries;
1267 while (b - a > 1) {
1268 m = (a + b) >> 1;
1269 timestamp = entries[m].timestamp;
1270 if(timestamp >= wanted_timestamp)
1271 b = m;
1272 if(timestamp <= wanted_timestamp)
1273 a = m;
1275 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1277 if(!(flags & AVSEEK_FLAG_ANY)){
1278 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1279 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1283 if(m == nb_entries)
1284 return -1;
1285 return m;
1288 #define DEBUG_SEEK
1290 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1291 AVInputFormat *avif= s->iformat;
1292 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1293 int64_t ts_min, ts_max, ts;
1294 int index;
1295 AVStream *st;
1297 if (stream_index < 0)
1298 return -1;
1300 #ifdef DEBUG_SEEK
1301 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1302 #endif
1304 ts_max=
1305 ts_min= AV_NOPTS_VALUE;
1306 pos_limit= -1; //gcc falsely says it may be uninitialized
1308 st= s->streams[stream_index];
1309 if(st->index_entries){
1310 AVIndexEntry *e;
1312 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1313 index= FFMAX(index, 0);
1314 e= &st->index_entries[index];
1316 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1317 pos_min= e->pos;
1318 ts_min= e->timestamp;
1319 #ifdef DEBUG_SEEK
1320 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1321 pos_min,ts_min);
1322 #endif
1323 }else{
1324 assert(index==0);
1327 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1328 assert(index < st->nb_index_entries);
1329 if(index >= 0){
1330 e= &st->index_entries[index];
1331 assert(e->timestamp >= target_ts);
1332 pos_max= e->pos;
1333 ts_max= e->timestamp;
1334 pos_limit= pos_max - e->min_distance;
1335 #ifdef DEBUG_SEEK
1336 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1337 pos_max,pos_limit, ts_max);
1338 #endif
1342 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1343 if(pos<0)
1344 return -1;
1346 /* do the seek */
1347 url_fseek(s->pb, pos, SEEK_SET);
1349 av_update_cur_dts(s, st, ts);
1351 return 0;
1354 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1355 int64_t pos, ts;
1356 int64_t start_pos, filesize;
1357 int no_change;
1359 #ifdef DEBUG_SEEK
1360 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1361 #endif
1363 if(ts_min == AV_NOPTS_VALUE){
1364 pos_min = s->data_offset;
1365 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1366 if (ts_min == AV_NOPTS_VALUE)
1367 return -1;
1370 if(ts_max == AV_NOPTS_VALUE){
1371 int step= 1024;
1372 filesize = url_fsize(s->pb);
1373 pos_max = filesize - 1;
1375 pos_max -= step;
1376 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1377 step += step;
1378 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1379 if (ts_max == AV_NOPTS_VALUE)
1380 return -1;
1382 for(;;){
1383 int64_t tmp_pos= pos_max + 1;
1384 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1385 if(tmp_ts == AV_NOPTS_VALUE)
1386 break;
1387 ts_max= tmp_ts;
1388 pos_max= tmp_pos;
1389 if(tmp_pos >= filesize)
1390 break;
1392 pos_limit= pos_max;
1395 if(ts_min > ts_max){
1396 return -1;
1397 }else if(ts_min == ts_max){
1398 pos_limit= pos_min;
1401 no_change=0;
1402 while (pos_min < pos_limit) {
1403 #ifdef DEBUG_SEEK
1404 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1405 pos_min, pos_max,
1406 ts_min, ts_max);
1407 #endif
1408 assert(pos_limit <= pos_max);
1410 if(no_change==0){
1411 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1412 // interpolate position (better than dichotomy)
1413 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1414 + pos_min - approximate_keyframe_distance;
1415 }else if(no_change==1){
1416 // bisection, if interpolation failed to change min or max pos last time
1417 pos = (pos_min + pos_limit)>>1;
1418 }else{
1419 /* linear search if bisection failed, can only happen if there
1420 are very few or no keyframes between min/max */
1421 pos=pos_min;
1423 if(pos <= pos_min)
1424 pos= pos_min + 1;
1425 else if(pos > pos_limit)
1426 pos= pos_limit;
1427 start_pos= pos;
1429 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1430 if(pos == pos_max)
1431 no_change++;
1432 else
1433 no_change=0;
1434 #ifdef DEBUG_SEEK
1435 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1436 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1437 start_pos, no_change);
1438 #endif
1439 if(ts == AV_NOPTS_VALUE){
1440 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1441 return -1;
1443 assert(ts != AV_NOPTS_VALUE);
1444 if (target_ts <= ts) {
1445 pos_limit = start_pos - 1;
1446 pos_max = pos;
1447 ts_max = ts;
1449 if (target_ts >= ts) {
1450 pos_min = pos;
1451 ts_min = ts;
1455 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1456 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1457 #ifdef DEBUG_SEEK
1458 pos_min = pos;
1459 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1460 pos_min++;
1461 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1462 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1463 pos, ts_min, target_ts, ts_max);
1464 #endif
1465 *ts_ret= ts;
1466 return pos;
1469 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1470 int64_t pos_min, pos_max;
1471 #if 0
1472 AVStream *st;
1474 if (stream_index < 0)
1475 return -1;
1477 st= s->streams[stream_index];
1478 #endif
1480 pos_min = s->data_offset;
1481 pos_max = url_fsize(s->pb) - 1;
1483 if (pos < pos_min) pos= pos_min;
1484 else if(pos > pos_max) pos= pos_max;
1486 url_fseek(s->pb, pos, SEEK_SET);
1488 #if 0
1489 av_update_cur_dts(s, st, ts);
1490 #endif
1491 return 0;
1494 static int av_seek_frame_generic(AVFormatContext *s,
1495 int stream_index, int64_t timestamp, int flags)
1497 int index, ret;
1498 AVStream *st;
1499 AVIndexEntry *ie;
1501 st = s->streams[stream_index];
1503 index = av_index_search_timestamp(st, timestamp, flags);
1505 if(index < 0 || index==st->nb_index_entries-1){
1506 int i;
1507 AVPacket pkt;
1509 if(st->nb_index_entries){
1510 assert(st->index_entries);
1511 ie= &st->index_entries[st->nb_index_entries-1];
1512 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1513 return ret;
1514 av_update_cur_dts(s, st, ie->timestamp);
1515 }else{
1516 if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
1517 return ret;
1519 for(i=0;; i++) {
1520 int ret;
1522 ret = av_read_frame(s, &pkt);
1523 }while(ret == AVERROR(EAGAIN));
1524 if(ret<0)
1525 break;
1526 av_free_packet(&pkt);
1527 if(stream_index == pkt.stream_index){
1528 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1529 break;
1532 index = av_index_search_timestamp(st, timestamp, flags);
1534 if (index < 0)
1535 return -1;
1537 av_read_frame_flush(s);
1538 if (s->iformat->read_seek){
1539 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1540 return 0;
1542 ie = &st->index_entries[index];
1543 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1544 return ret;
1545 av_update_cur_dts(s, st, ie->timestamp);
1547 return 0;
1550 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1552 int ret;
1553 AVStream *st;
1555 av_read_frame_flush(s);
1557 if(flags & AVSEEK_FLAG_BYTE)
1558 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1560 if(stream_index < 0){
1561 stream_index= av_find_default_stream_index(s);
1562 if(stream_index < 0)
1563 return -1;
1565 st= s->streams[stream_index];
1566 /* timestamp for default must be expressed in AV_TIME_BASE units */
1567 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1570 /* first, we try the format specific seek */
1571 if (s->iformat->read_seek)
1572 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1573 else
1574 ret = -1;
1575 if (ret >= 0) {
1576 return 0;
1579 if(s->iformat->read_timestamp)
1580 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1581 else
1582 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1585 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1587 if(min_ts > ts || max_ts < ts)
1588 return -1;
1590 av_read_frame_flush(s);
1592 if (s->iformat->read_seek2)
1593 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1595 if(s->iformat->read_timestamp){
1596 //try to seek via read_timestamp()
1599 //Fallback to old API if new is not implemented but old is
1600 //Note the old has somewat different sematics
1601 if(s->iformat->read_seek || 1)
1602 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1604 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1607 /*******************************************************/
1610 * Returns TRUE if the stream has accurate duration in any stream.
1612 * @return TRUE if the stream has accurate duration for at least one component.
1614 static int av_has_duration(AVFormatContext *ic)
1616 int i;
1617 AVStream *st;
1619 for(i = 0;i < ic->nb_streams; i++) {
1620 st = ic->streams[i];
1621 if (st->duration != AV_NOPTS_VALUE)
1622 return 1;
1624 return 0;
1628 * Estimate the stream timings from the one of each components.
1630 * Also computes the global bitrate if possible.
1632 static void av_update_stream_timings(AVFormatContext *ic)
1634 int64_t start_time, start_time1, end_time, end_time1;
1635 int64_t duration, duration1;
1636 int i;
1637 AVStream *st;
1639 start_time = INT64_MAX;
1640 end_time = INT64_MIN;
1641 duration = INT64_MIN;
1642 for(i = 0;i < ic->nb_streams; i++) {
1643 st = ic->streams[i];
1644 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1645 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1646 if (start_time1 < start_time)
1647 start_time = start_time1;
1648 if (st->duration != AV_NOPTS_VALUE) {
1649 end_time1 = start_time1
1650 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1651 if (end_time1 > end_time)
1652 end_time = end_time1;
1655 if (st->duration != AV_NOPTS_VALUE) {
1656 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1657 if (duration1 > duration)
1658 duration = duration1;
1661 if (start_time != INT64_MAX) {
1662 ic->start_time = start_time;
1663 if (end_time != INT64_MIN) {
1664 if (end_time - start_time > duration)
1665 duration = end_time - start_time;
1668 if (duration != INT64_MIN) {
1669 ic->duration = duration;
1670 if (ic->file_size > 0) {
1671 /* compute the bitrate */
1672 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1673 (double)ic->duration;
1678 static void fill_all_stream_timings(AVFormatContext *ic)
1680 int i;
1681 AVStream *st;
1683 av_update_stream_timings(ic);
1684 for(i = 0;i < ic->nb_streams; i++) {
1685 st = ic->streams[i];
1686 if (st->start_time == AV_NOPTS_VALUE) {
1687 if(ic->start_time != AV_NOPTS_VALUE)
1688 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1689 if(ic->duration != AV_NOPTS_VALUE)
1690 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1695 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1697 int64_t filesize, duration;
1698 int bit_rate, i;
1699 AVStream *st;
1701 /* if bit_rate is already set, we believe it */
1702 if (ic->bit_rate == 0) {
1703 bit_rate = 0;
1704 for(i=0;i<ic->nb_streams;i++) {
1705 st = ic->streams[i];
1706 bit_rate += st->codec->bit_rate;
1708 ic->bit_rate = bit_rate;
1711 /* if duration is already set, we believe it */
1712 if (ic->duration == AV_NOPTS_VALUE &&
1713 ic->bit_rate != 0 &&
1714 ic->file_size != 0) {
1715 filesize = ic->file_size;
1716 if (filesize > 0) {
1717 for(i = 0; i < ic->nb_streams; i++) {
1718 st = ic->streams[i];
1719 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1720 if (st->duration == AV_NOPTS_VALUE)
1721 st->duration = duration;
1727 #define DURATION_MAX_READ_SIZE 250000
1729 /* only usable for MPEG-PS streams */
1730 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1732 AVPacket pkt1, *pkt = &pkt1;
1733 AVStream *st;
1734 int read_size, i, ret;
1735 int64_t end_time;
1736 int64_t filesize, offset, duration;
1738 ic->cur_st = NULL;
1740 /* flush packet queue */
1741 flush_packet_queue(ic);
1743 for(i=0;i<ic->nb_streams;i++) {
1744 st = ic->streams[i];
1745 if (st->parser) {
1746 av_parser_close(st->parser);
1747 st->parser= NULL;
1748 av_free_packet(&st->cur_pkt);
1752 /* we read the first packets to get the first PTS (not fully
1753 accurate, but it is enough now) */
1754 url_fseek(ic->pb, 0, SEEK_SET);
1755 read_size = 0;
1756 for(;;) {
1757 if (read_size >= DURATION_MAX_READ_SIZE)
1758 break;
1759 /* if all info is available, we can stop */
1760 for(i = 0;i < ic->nb_streams; i++) {
1761 st = ic->streams[i];
1762 if (st->start_time == AV_NOPTS_VALUE)
1763 break;
1765 if (i == ic->nb_streams)
1766 break;
1769 ret = av_read_packet(ic, pkt);
1770 }while(ret == AVERROR(EAGAIN));
1771 if (ret != 0)
1772 break;
1773 read_size += pkt->size;
1774 st = ic->streams[pkt->stream_index];
1775 if (pkt->pts != AV_NOPTS_VALUE) {
1776 if (st->start_time == AV_NOPTS_VALUE)
1777 st->start_time = pkt->pts;
1779 av_free_packet(pkt);
1782 /* estimate the end time (duration) */
1783 /* XXX: may need to support wrapping */
1784 filesize = ic->file_size;
1785 offset = filesize - DURATION_MAX_READ_SIZE;
1786 if (offset < 0)
1787 offset = 0;
1789 url_fseek(ic->pb, offset, SEEK_SET);
1790 read_size = 0;
1791 for(;;) {
1792 if (read_size >= DURATION_MAX_READ_SIZE)
1793 break;
1796 ret = av_read_packet(ic, pkt);
1797 }while(ret == AVERROR(EAGAIN));
1798 if (ret != 0)
1799 break;
1800 read_size += pkt->size;
1801 st = ic->streams[pkt->stream_index];
1802 if (pkt->pts != AV_NOPTS_VALUE &&
1803 st->start_time != AV_NOPTS_VALUE) {
1804 end_time = pkt->pts;
1805 duration = end_time - st->start_time;
1806 if (duration > 0) {
1807 if (st->duration == AV_NOPTS_VALUE ||
1808 st->duration < duration)
1809 st->duration = duration;
1812 av_free_packet(pkt);
1815 fill_all_stream_timings(ic);
1817 url_fseek(ic->pb, old_offset, SEEK_SET);
1818 for(i=0; i<ic->nb_streams; i++){
1819 st= ic->streams[i];
1820 st->cur_dts= st->first_dts;
1821 st->last_IP_pts = AV_NOPTS_VALUE;
1825 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1827 int64_t file_size;
1829 /* get the file size, if possible */
1830 if (ic->iformat->flags & AVFMT_NOFILE) {
1831 file_size = 0;
1832 } else {
1833 file_size = url_fsize(ic->pb);
1834 if (file_size < 0)
1835 file_size = 0;
1837 ic->file_size = file_size;
1839 if ((!strcmp(ic->iformat->name, "mpeg") ||
1840 !strcmp(ic->iformat->name, "mpegts")) &&
1841 file_size && !url_is_streamed(ic->pb)) {
1842 /* get accurate estimate from the PTSes */
1843 av_estimate_timings_from_pts(ic, old_offset);
1844 } else if (av_has_duration(ic)) {
1845 /* at least one component has timings - we use them for all
1846 the components */
1847 fill_all_stream_timings(ic);
1848 } else {
1849 /* less precise: use bitrate info */
1850 av_estimate_timings_from_bit_rate(ic);
1852 av_update_stream_timings(ic);
1854 #if 0
1856 int i;
1857 AVStream *st;
1858 for(i = 0;i < ic->nb_streams; i++) {
1859 st = ic->streams[i];
1860 printf("%d: start_time: %0.3f duration: %0.3f\n",
1861 i, (double)st->start_time / AV_TIME_BASE,
1862 (double)st->duration / AV_TIME_BASE);
1864 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1865 (double)ic->start_time / AV_TIME_BASE,
1866 (double)ic->duration / AV_TIME_BASE,
1867 ic->bit_rate / 1000);
1869 #endif
1872 static int has_codec_parameters(AVCodecContext *enc)
1874 int val;
1875 switch(enc->codec_type) {
1876 case CODEC_TYPE_AUDIO:
1877 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1878 if(!enc->frame_size &&
1879 (enc->codec_id == CODEC_ID_VORBIS ||
1880 enc->codec_id == CODEC_ID_AAC ||
1881 enc->codec_id == CODEC_ID_SPEEX))
1882 return 0;
1883 break;
1884 case CODEC_TYPE_VIDEO:
1885 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1886 break;
1887 default:
1888 val = 1;
1889 break;
1891 return enc->codec_id != CODEC_ID_NONE && val != 0;
1894 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
1896 int16_t *samples;
1897 AVCodec *codec;
1898 int got_picture, data_size, ret=0;
1899 AVFrame picture;
1901 if(!st->codec->codec){
1902 codec = avcodec_find_decoder(st->codec->codec_id);
1903 if (!codec)
1904 return -1;
1905 ret = avcodec_open(st->codec, codec);
1906 if (ret < 0)
1907 return ret;
1910 if(!has_codec_parameters(st->codec)){
1911 switch(st->codec->codec_type) {
1912 case CODEC_TYPE_VIDEO:
1913 avcodec_get_frame_defaults(&picture);
1914 ret = avcodec_decode_video2(st->codec, &picture,
1915 &got_picture, avpkt);
1916 break;
1917 case CODEC_TYPE_AUDIO:
1918 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1919 samples = av_malloc(data_size);
1920 if (!samples)
1921 goto fail;
1922 ret = avcodec_decode_audio3(st->codec, samples,
1923 &data_size, avpkt);
1924 av_free(samples);
1925 break;
1926 default:
1927 break;
1930 fail:
1931 return ret;
1934 unsigned int ff_codec_get_tag(const AVCodecTag *tags, int id)
1936 while (tags->id != CODEC_ID_NONE) {
1937 if (tags->id == id)
1938 return tags->tag;
1939 tags++;
1941 return 0;
1944 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1946 int i;
1947 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1948 if(tag == tags[i].tag)
1949 return tags[i].id;
1951 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1952 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1953 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1954 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1955 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1956 return tags[i].id;
1958 return CODEC_ID_NONE;
1961 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1963 int i;
1964 for(i=0; tags && tags[i]; i++){
1965 int tag= ff_codec_get_tag(tags[i], id);
1966 if(tag) return tag;
1968 return 0;
1971 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1973 int i;
1974 for(i=0; tags && tags[i]; i++){
1975 enum CodecID id= ff_codec_get_id(tags[i], tag);
1976 if(id!=CODEC_ID_NONE) return id;
1978 return CODEC_ID_NONE;
1981 static void compute_chapters_end(AVFormatContext *s)
1983 unsigned int i;
1985 for (i=0; i+1<s->nb_chapters; i++)
1986 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1987 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1988 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1989 s->chapters[i]->end = s->chapters[i+1]->start;
1992 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1993 assert(s->start_time != AV_NOPTS_VALUE);
1994 assert(s->duration > 0);
1995 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1996 AV_TIME_BASE_Q,
1997 s->chapters[i]->time_base);
2001 #define MAX_STD_TIMEBASES (60*12+5)
2002 static int get_std_framerate(int i){
2003 if(i<60*12) return i*1001;
2004 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2008 * Is the time base unreliable.
2009 * This is a heuristic to balance between quick acceptance of the values in
2010 * the headers vs. some extra checks.
2011 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2012 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2013 * And there are "variable" fps files this needs to detect as well.
2015 static int tb_unreliable(AVCodecContext *c){
2016 if( c->time_base.den >= 101L*c->time_base.num
2017 || c->time_base.den < 5L*c->time_base.num
2018 /* || c->codec_tag == AV_RL32("DIVX")
2019 || c->codec_tag == AV_RL32("XVID")*/
2020 || c->codec_id == CODEC_ID_MPEG2VIDEO
2021 || c->codec_id == CODEC_ID_H264
2023 return 1;
2024 return 0;
2027 int av_find_stream_info(AVFormatContext *ic)
2029 int i, count, ret, read_size, j;
2030 AVStream *st;
2031 AVPacket pkt1, *pkt;
2032 int64_t last_dts[MAX_STREAMS];
2033 int64_t duration_gcd[MAX_STREAMS]={0};
2034 int duration_count[MAX_STREAMS]={0};
2035 double (*duration_error)[MAX_STD_TIMEBASES];
2036 int64_t old_offset = url_ftell(ic->pb);
2037 int64_t codec_info_duration[MAX_STREAMS]={0};
2038 int codec_info_nb_frames[MAX_STREAMS]={0};
2040 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2041 if (!duration_error) return AVERROR(ENOMEM);
2043 for(i=0;i<ic->nb_streams;i++) {
2044 st = ic->streams[i];
2045 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2046 /* if(!st->time_base.num)
2047 st->time_base= */
2048 if(!st->codec->time_base.num)
2049 st->codec->time_base= st->time_base;
2051 //only for the split stuff
2052 if (!st->parser) {
2053 st->parser = av_parser_init(st->codec->codec_id);
2054 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2055 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2060 for(i=0;i<MAX_STREAMS;i++){
2061 last_dts[i]= AV_NOPTS_VALUE;
2064 count = 0;
2065 read_size = 0;
2066 for(;;) {
2067 if(url_interrupt_cb()){
2068 ret= AVERROR(EINTR);
2069 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2070 break;
2073 /* check if one codec still needs to be handled */
2074 for(i=0;i<ic->nb_streams;i++) {
2075 st = ic->streams[i];
2076 if (!has_codec_parameters(st->codec))
2077 break;
2078 /* variable fps and no guess at the real fps */
2079 if( tb_unreliable(st->codec)
2080 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2081 break;
2082 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2083 break;
2084 if(st->first_dts == AV_NOPTS_VALUE)
2085 break;
2087 if (i == ic->nb_streams) {
2088 /* NOTE: if the format has no header, then we need to read
2089 some packets to get most of the streams, so we cannot
2090 stop here */
2091 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2092 /* if we found the info for all the codecs, we can stop */
2093 ret = count;
2094 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2095 break;
2098 /* we did not get all the codec info, but we read too much data */
2099 if (read_size >= ic->probesize) {
2100 ret = count;
2101 av_log(ic, AV_LOG_WARNING, "MAX_READ_SIZE:%d reached\n", ic->probesize);
2102 break;
2105 /* NOTE: a new stream can be added there if no header in file
2106 (AVFMTCTX_NOHEADER) */
2107 ret = av_read_frame_internal(ic, &pkt1);
2108 if(ret == AVERROR(EAGAIN))
2109 continue;
2110 if (ret < 0) {
2111 /* EOF or error */
2112 ret = -1; /* we could not have all the codec parameters before EOF */
2113 for(i=0;i<ic->nb_streams;i++) {
2114 st = ic->streams[i];
2115 if (!has_codec_parameters(st->codec)){
2116 char buf[256];
2117 avcodec_string(buf, sizeof(buf), st->codec, 0);
2118 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2119 } else {
2120 ret = 0;
2123 break;
2126 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2127 if(av_dup_packet(pkt) < 0) {
2128 av_free(duration_error);
2129 return AVERROR(ENOMEM);
2132 read_size += pkt->size;
2134 st = ic->streams[pkt->stream_index];
2135 if(codec_info_nb_frames[st->index]>1) {
2136 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration){
2137 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2138 break;
2140 codec_info_duration[st->index] += pkt->duration;
2142 if (pkt->duration != 0)
2143 codec_info_nb_frames[st->index]++;
2146 int index= pkt->stream_index;
2147 int64_t last= last_dts[index];
2148 int64_t duration= pkt->dts - last;
2150 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2151 double dur= duration * av_q2d(st->time_base);
2153 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2154 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2155 if(duration_count[index] < 2)
2156 memset(duration_error[index], 0, sizeof(*duration_error));
2157 for(i=1; i<MAX_STD_TIMEBASES; i++){
2158 int framerate= get_std_framerate(i);
2159 int ticks= lrintf(dur*framerate/(1001*12));
2160 double error= dur - ticks*1001*12/(double)framerate;
2161 duration_error[index][i] += error*error;
2163 duration_count[index]++;
2164 // ignore the first 4 values, they might have some random jitter
2165 if (duration_count[index] > 3)
2166 duration_gcd[index] = av_gcd(duration_gcd[index], duration);
2168 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2169 last_dts[pkt->stream_index]= pkt->dts;
2171 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2172 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2173 if(i){
2174 st->codec->extradata_size= i;
2175 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2176 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2177 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2181 /* if still no information, we try to open the codec and to
2182 decompress the frame. We try to avoid that in most cases as
2183 it takes longer and uses more memory. For MPEG-4, we need to
2184 decompress for QuickTime. */
2185 if (!has_codec_parameters(st->codec) /*&&
2186 (st->codec->codec_id == CODEC_ID_FLV1 ||
2187 st->codec->codec_id == CODEC_ID_H264 ||
2188 st->codec->codec_id == CODEC_ID_H263 ||
2189 st->codec->codec_id == CODEC_ID_H261 ||
2190 st->codec->codec_id == CODEC_ID_VORBIS ||
2191 st->codec->codec_id == CODEC_ID_MJPEG ||
2192 st->codec->codec_id == CODEC_ID_PNG ||
2193 st->codec->codec_id == CODEC_ID_PAM ||
2194 st->codec->codec_id == CODEC_ID_PGM ||
2195 st->codec->codec_id == CODEC_ID_PGMYUV ||
2196 st->codec->codec_id == CODEC_ID_PBM ||
2197 st->codec->codec_id == CODEC_ID_PPM ||
2198 st->codec->codec_id == CODEC_ID_SHORTEN ||
2199 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2200 try_decode_frame(st, pkt);
2202 count++;
2205 // close codecs which were opened in try_decode_frame()
2206 for(i=0;i<ic->nb_streams;i++) {
2207 st = ic->streams[i];
2208 if(st->codec->codec)
2209 avcodec_close(st->codec);
2211 for(i=0;i<ic->nb_streams;i++) {
2212 st = ic->streams[i];
2213 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2214 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2215 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2217 // the check for tb_unreliable() is not completely correct, since this is not about handling
2218 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2219 // ipmovie.c produces.
2220 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1)
2221 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
2222 if(duration_count[i]
2223 && tb_unreliable(st->codec) /*&&
2224 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2225 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2226 int num = 0;
2227 double best_error= 2*av_q2d(st->time_base);
2228 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2230 for(j=1; j<MAX_STD_TIMEBASES; j++){
2231 double error= duration_error[i][j] * get_std_framerate(j);
2232 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2233 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2234 if(error < best_error){
2235 best_error= error;
2236 num = get_std_framerate(j);
2239 // do not increase frame rate by more than 1 % in order to match a standard rate.
2240 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2241 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2244 if (!st->r_frame_rate.num){
2245 if( st->codec->time_base.den * (int64_t)st->time_base.num
2246 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2247 st->r_frame_rate.num = st->codec->time_base.den;
2248 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2249 }else{
2250 st->r_frame_rate.num = st->time_base.den;
2251 st->r_frame_rate.den = st->time_base.num;
2254 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2255 if(!st->codec->bits_per_coded_sample)
2256 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2260 av_estimate_timings(ic, old_offset);
2262 compute_chapters_end(ic);
2264 #if 0
2265 /* correct DTS for B-frame streams with no timestamps */
2266 for(i=0;i<ic->nb_streams;i++) {
2267 st = ic->streams[i];
2268 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2269 if(b-frames){
2270 ppktl = &ic->packet_buffer;
2271 while(ppkt1){
2272 if(ppkt1->stream_index != i)
2273 continue;
2274 if(ppkt1->pkt->dts < 0)
2275 break;
2276 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2277 break;
2278 ppkt1->pkt->dts -= delta;
2279 ppkt1= ppkt1->next;
2281 if(ppkt1)
2282 continue;
2283 st->cur_dts -= delta;
2287 #endif
2289 av_free(duration_error);
2291 return ret;
2294 /*******************************************************/
2296 int av_read_play(AVFormatContext *s)
2298 if (s->iformat->read_play)
2299 return s->iformat->read_play(s);
2300 if (s->pb)
2301 return av_url_read_fpause(s->pb, 0);
2302 return AVERROR(ENOSYS);
2305 int av_read_pause(AVFormatContext *s)
2307 if (s->iformat->read_pause)
2308 return s->iformat->read_pause(s);
2309 if (s->pb)
2310 return av_url_read_fpause(s->pb, 1);
2311 return AVERROR(ENOSYS);
2314 void av_close_input_stream(AVFormatContext *s)
2316 int i;
2317 AVStream *st;
2319 if (s->iformat->read_close)
2320 s->iformat->read_close(s);
2321 for(i=0;i<s->nb_streams;i++) {
2322 /* free all data in a stream component */
2323 st = s->streams[i];
2324 if (st->parser) {
2325 av_parser_close(st->parser);
2326 av_free_packet(&st->cur_pkt);
2328 av_metadata_free(&st->metadata);
2329 av_free(st->index_entries);
2330 av_free(st->codec->extradata);
2331 av_free(st->codec);
2332 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2333 av_free(st->filename);
2334 #endif
2335 av_free(st->priv_data);
2336 av_free(st);
2338 for(i=s->nb_programs-1; i>=0; i--) {
2339 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2340 av_freep(&s->programs[i]->provider_name);
2341 av_freep(&s->programs[i]->name);
2342 #endif
2343 av_metadata_free(&s->programs[i]->metadata);
2344 av_freep(&s->programs[i]->stream_index);
2345 av_freep(&s->programs[i]);
2347 av_freep(&s->programs);
2348 flush_packet_queue(s);
2349 av_freep(&s->priv_data);
2350 while(s->nb_chapters--) {
2351 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2352 av_free(s->chapters[s->nb_chapters]->title);
2353 #endif
2354 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2355 av_free(s->chapters[s->nb_chapters]);
2357 av_freep(&s->chapters);
2358 av_metadata_free(&s->metadata);
2359 av_free(s);
2362 void av_close_input_file(AVFormatContext *s)
2364 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2365 av_close_input_stream(s);
2366 if (pb)
2367 url_fclose(pb);
2370 AVStream *av_new_stream(AVFormatContext *s, int id)
2372 AVStream *st;
2373 int i;
2375 if (s->nb_streams >= MAX_STREAMS)
2376 return NULL;
2378 st = av_mallocz(sizeof(AVStream));
2379 if (!st)
2380 return NULL;
2382 st->codec= avcodec_alloc_context();
2383 if (s->iformat) {
2384 /* no default bitrate if decoding */
2385 st->codec->bit_rate = 0;
2387 st->index = s->nb_streams;
2388 st->id = id;
2389 st->start_time = AV_NOPTS_VALUE;
2390 st->duration = AV_NOPTS_VALUE;
2391 /* we set the current DTS to 0 so that formats without any timestamps
2392 but durations get some timestamps, formats with some unknown
2393 timestamps have their first few packets buffered and the
2394 timestamps corrected before they are returned to the user */
2395 st->cur_dts = 0;
2396 st->first_dts = AV_NOPTS_VALUE;
2397 st->probe_packets = MAX_PROBE_PACKETS;
2399 /* default pts setting is MPEG-like */
2400 av_set_pts_info(st, 33, 1, 90000);
2401 st->last_IP_pts = AV_NOPTS_VALUE;
2402 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2403 st->pts_buffer[i]= AV_NOPTS_VALUE;
2404 st->reference_dts = AV_NOPTS_VALUE;
2406 st->sample_aspect_ratio = (AVRational){0,1};
2408 s->streams[s->nb_streams++] = st;
2409 return st;
2412 AVProgram *av_new_program(AVFormatContext *ac, int id)
2414 AVProgram *program=NULL;
2415 int i;
2417 #ifdef DEBUG_SI
2418 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2419 #endif
2421 for(i=0; i<ac->nb_programs; i++)
2422 if(ac->programs[i]->id == id)
2423 program = ac->programs[i];
2425 if(!program){
2426 program = av_mallocz(sizeof(AVProgram));
2427 if (!program)
2428 return NULL;
2429 dynarray_add(&ac->programs, &ac->nb_programs, program);
2430 program->discard = AVDISCARD_NONE;
2432 program->id = id;
2434 return program;
2437 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2439 AVChapter *chapter = NULL;
2440 int i;
2442 for(i=0; i<s->nb_chapters; i++)
2443 if(s->chapters[i]->id == id)
2444 chapter = s->chapters[i];
2446 if(!chapter){
2447 chapter= av_mallocz(sizeof(AVChapter));
2448 if(!chapter)
2449 return NULL;
2450 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2452 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2453 av_free(chapter->title);
2454 #endif
2455 av_metadata_set(&chapter->metadata, "title", title);
2456 chapter->id = id;
2457 chapter->time_base= time_base;
2458 chapter->start = start;
2459 chapter->end = end;
2461 return chapter;
2464 /************************************************************/
2465 /* output media file */
2467 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2469 int ret;
2471 if (s->oformat->priv_data_size > 0) {
2472 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2473 if (!s->priv_data)
2474 return AVERROR(ENOMEM);
2475 } else
2476 s->priv_data = NULL;
2478 if (s->oformat->set_parameters) {
2479 ret = s->oformat->set_parameters(s, ap);
2480 if (ret < 0)
2481 return ret;
2483 return 0;
2486 int av_write_header(AVFormatContext *s)
2488 int ret, i;
2489 AVStream *st;
2491 // some sanity checks
2492 for(i=0;i<s->nb_streams;i++) {
2493 st = s->streams[i];
2495 switch (st->codec->codec_type) {
2496 case CODEC_TYPE_AUDIO:
2497 if(st->codec->sample_rate<=0){
2498 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2499 return -1;
2501 if(!st->codec->block_align)
2502 st->codec->block_align = st->codec->channels *
2503 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2504 break;
2505 case CODEC_TYPE_VIDEO:
2506 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2507 av_log(s, AV_LOG_ERROR, "time base not set\n");
2508 return -1;
2510 if(st->codec->width<=0 || st->codec->height<=0){
2511 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2512 return -1;
2514 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2515 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2516 return -1;
2518 break;
2521 if(s->oformat->codec_tag){
2522 if(st->codec->codec_tag){
2523 //FIXME
2524 //check that tag + id is in the table
2525 //if neither is in the table -> OK
2526 //if tag is in the table with another id -> FAIL
2527 //if id is in the table with another tag -> FAIL unless strict < ?
2528 }else
2529 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2532 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2533 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2534 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2537 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2538 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2539 if (!s->priv_data)
2540 return AVERROR(ENOMEM);
2543 #if LIBAVFORMAT_VERSION_MAJOR < 53
2544 ff_metadata_mux_compat(s);
2545 #endif
2547 if(s->oformat->write_header){
2548 ret = s->oformat->write_header(s);
2549 if (ret < 0)
2550 return ret;
2553 /* init PTS generation */
2554 for(i=0;i<s->nb_streams;i++) {
2555 int64_t den = AV_NOPTS_VALUE;
2556 st = s->streams[i];
2558 switch (st->codec->codec_type) {
2559 case CODEC_TYPE_AUDIO:
2560 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2561 break;
2562 case CODEC_TYPE_VIDEO:
2563 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2564 break;
2565 default:
2566 break;
2568 if (den != AV_NOPTS_VALUE) {
2569 if (den <= 0)
2570 return AVERROR_INVALIDDATA;
2571 av_frac_init(&st->pts, 0, 0, den);
2574 return 0;
2577 //FIXME merge with compute_pkt_fields
2578 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2579 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2580 int num, den, frame_size, i;
2582 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2584 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2585 return -1;*/
2587 /* duration field */
2588 if (pkt->duration == 0) {
2589 compute_frame_duration(&num, &den, st, NULL, pkt);
2590 if (den && num) {
2591 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2595 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2596 pkt->pts= pkt->dts;
2598 //XXX/FIXME this is a temporary hack until all encoders output pts
2599 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2600 pkt->dts=
2601 // pkt->pts= st->cur_dts;
2602 pkt->pts= st->pts.val;
2605 //calculate dts from pts
2606 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2607 st->pts_buffer[0]= pkt->pts;
2608 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2609 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2610 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2611 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2613 pkt->dts= st->pts_buffer[0];
2616 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2617 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2618 return -1;
2620 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2621 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2622 return -1;
2625 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2626 st->cur_dts= pkt->dts;
2627 st->pts.val= pkt->dts;
2629 /* update pts */
2630 switch (st->codec->codec_type) {
2631 case CODEC_TYPE_AUDIO:
2632 frame_size = get_audio_frame_size(st->codec, pkt->size);
2634 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2635 likely equal to the encoder delay, but it would be better if we
2636 had the real timestamps from the encoder */
2637 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2638 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2640 break;
2641 case CODEC_TYPE_VIDEO:
2642 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2643 break;
2644 default:
2645 break;
2647 return 0;
2650 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2652 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2654 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2655 return ret;
2657 ret= s->oformat->write_packet(s, pkt);
2658 if(!ret)
2659 ret= url_ferror(s->pb);
2660 return ret;
2663 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2664 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2666 AVPacketList **next_point, *this_pktl;
2668 this_pktl = av_mallocz(sizeof(AVPacketList));
2669 this_pktl->pkt= *pkt;
2670 pkt->destruct= NULL; // do not free original but only the copy
2671 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
2673 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
2674 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
2675 }else
2676 next_point = &s->packet_buffer;
2678 if(*next_point){
2679 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
2680 while(!compare(s, &(*next_point)->pkt, pkt)){
2681 next_point= &(*next_point)->next;
2683 goto next_non_null;
2684 }else{
2685 next_point = &(s->packet_buffer_end->next);
2688 assert(!*next_point);
2690 s->packet_buffer_end= this_pktl;
2691 next_non_null:
2693 this_pktl->next= *next_point;
2695 s->streams[pkt->stream_index]->last_in_packet_buffer=
2696 *next_point= this_pktl;
2699 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2701 AVStream *st = s->streams[ pkt ->stream_index];
2702 AVStream *st2= s->streams[ next->stream_index];
2703 int64_t left = st2->time_base.num * (int64_t)st ->time_base.den;
2704 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2706 if (pkt->dts == AV_NOPTS_VALUE)
2707 return 0;
2709 return next->dts * left > pkt->dts * right; //FIXME this can overflow
2712 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2713 AVPacketList *pktl;
2714 int stream_count=0;
2715 int i;
2717 if(pkt){
2718 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2721 for(i=0; i < s->nb_streams; i++)
2722 stream_count+= !!s->streams[i]->last_in_packet_buffer;
2724 if(stream_count && (s->nb_streams == stream_count || flush)){
2725 pktl= s->packet_buffer;
2726 *out= pktl->pkt;
2728 s->packet_buffer= pktl->next;
2729 if(!s->packet_buffer)
2730 s->packet_buffer_end= NULL;
2732 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
2733 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
2734 av_freep(&pktl);
2735 return 1;
2736 }else{
2737 av_init_packet(out);
2738 return 0;
2743 * Interleaves an AVPacket correctly so it can be muxed.
2744 * @param out the interleaved packet will be output here
2745 * @param in the input packet
2746 * @param flush 1 if no further packets are available as input and all
2747 * remaining packets should be output
2748 * @return 1 if a packet was output, 0 if no packet could be output,
2749 * < 0 if an error occurred
2751 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2752 if(s->oformat->interleave_packet)
2753 return s->oformat->interleave_packet(s, out, in, flush);
2754 else
2755 return av_interleave_packet_per_dts(s, out, in, flush);
2758 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2759 AVStream *st= s->streams[ pkt->stream_index];
2761 //FIXME/XXX/HACK drop zero sized packets
2762 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2763 return 0;
2765 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2766 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2767 return -1;
2769 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2770 return -1;
2772 for(;;){
2773 AVPacket opkt;
2774 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2775 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2776 return ret;
2778 ret= s->oformat->write_packet(s, &opkt);
2780 av_free_packet(&opkt);
2781 pkt= NULL;
2783 if(ret<0)
2784 return ret;
2785 if(url_ferror(s->pb))
2786 return url_ferror(s->pb);
2790 int av_write_trailer(AVFormatContext *s)
2792 int ret, i;
2794 for(;;){
2795 AVPacket pkt;
2796 ret= av_interleave_packet(s, &pkt, NULL, 1);
2797 if(ret<0) //FIXME cleanup needed for ret<0 ?
2798 goto fail;
2799 if(!ret)
2800 break;
2802 ret= s->oformat->write_packet(s, &pkt);
2804 av_free_packet(&pkt);
2806 if(ret<0)
2807 goto fail;
2808 if(url_ferror(s->pb))
2809 goto fail;
2812 if(s->oformat->write_trailer)
2813 ret = s->oformat->write_trailer(s);
2814 fail:
2815 if(ret == 0)
2816 ret=url_ferror(s->pb);
2817 for(i=0;i<s->nb_streams;i++)
2818 av_freep(&s->streams[i]->priv_data);
2819 av_freep(&s->priv_data);
2820 return ret;
2823 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2825 int i, j;
2826 AVProgram *program=NULL;
2827 void *tmp;
2829 for(i=0; i<ac->nb_programs; i++){
2830 if(ac->programs[i]->id != progid)
2831 continue;
2832 program = ac->programs[i];
2833 for(j=0; j<program->nb_stream_indexes; j++)
2834 if(program->stream_index[j] == idx)
2835 return;
2837 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2838 if(!tmp)
2839 return;
2840 program->stream_index = tmp;
2841 program->stream_index[program->nb_stream_indexes++] = idx;
2842 return;
2846 static void print_fps(double d, const char *postfix){
2847 uint64_t v= lrintf(d*100);
2848 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2849 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2850 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
2853 /* "user interface" functions */
2854 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2856 char buf[256];
2857 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2858 AVStream *st = ic->streams[i];
2859 int g = av_gcd(st->time_base.num, st->time_base.den);
2860 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
2861 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2862 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2863 /* the pid is an important information, so we display it */
2864 /* XXX: add a generic system */
2865 if (flags & AVFMT_SHOW_IDS)
2866 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2867 if (lang)
2868 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
2869 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2870 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2871 if (st->sample_aspect_ratio.num && // default
2872 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2873 AVRational display_aspect_ratio;
2874 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2875 st->codec->width*st->sample_aspect_ratio.num,
2876 st->codec->height*st->sample_aspect_ratio.den,
2877 1024*1024);
2878 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2879 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2880 display_aspect_ratio.num, display_aspect_ratio.den);
2882 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2883 if(st->r_frame_rate.den && st->r_frame_rate.num)
2884 print_fps(av_q2d(st->r_frame_rate), "tbr");
2885 if(st->time_base.den && st->time_base.num)
2886 print_fps(1/av_q2d(st->time_base), "tbn");
2887 if(st->codec->time_base.den && st->codec->time_base.num)
2888 print_fps(1/av_q2d(st->codec->time_base), "tbc");
2890 av_log(NULL, AV_LOG_INFO, "\n");
2893 void dump_format(AVFormatContext *ic,
2894 int index,
2895 const char *url,
2896 int is_output)
2898 int i;
2900 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2901 is_output ? "Output" : "Input",
2902 index,
2903 is_output ? ic->oformat->name : ic->iformat->name,
2904 is_output ? "to" : "from", url);
2905 if (!is_output) {
2906 av_log(NULL, AV_LOG_INFO, " Duration: ");
2907 if (ic->duration != AV_NOPTS_VALUE) {
2908 int hours, mins, secs, us;
2909 secs = ic->duration / AV_TIME_BASE;
2910 us = ic->duration % AV_TIME_BASE;
2911 mins = secs / 60;
2912 secs %= 60;
2913 hours = mins / 60;
2914 mins %= 60;
2915 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2916 (100 * us) / AV_TIME_BASE);
2917 } else {
2918 av_log(NULL, AV_LOG_INFO, "N/A");
2920 if (ic->start_time != AV_NOPTS_VALUE) {
2921 int secs, us;
2922 av_log(NULL, AV_LOG_INFO, ", start: ");
2923 secs = ic->start_time / AV_TIME_BASE;
2924 us = ic->start_time % AV_TIME_BASE;
2925 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2926 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2928 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2929 if (ic->bit_rate) {
2930 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2931 } else {
2932 av_log(NULL, AV_LOG_INFO, "N/A");
2934 av_log(NULL, AV_LOG_INFO, "\n");
2936 if(ic->nb_programs) {
2937 int j, k;
2938 for(j=0; j<ic->nb_programs; j++) {
2939 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
2940 "name", NULL, 0);
2941 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2942 name ? name->value : "");
2943 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2944 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2946 } else
2947 for(i=0;i<ic->nb_streams;i++)
2948 dump_stream_format(ic, i, index, is_output);
2949 if (ic->metadata) {
2950 AVMetadataTag *tag=NULL;
2951 av_log(NULL, AV_LOG_INFO, " Metadata\n");
2952 while((tag=av_metadata_get(ic->metadata, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
2953 av_log(NULL, AV_LOG_INFO, " %-16s: %s\n", tag->key, tag->value);
2959 #if LIBAVFORMAT_VERSION_MAJOR < 53
2960 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2962 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2965 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2967 AVRational frame_rate;
2968 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2969 *frame_rate_num= frame_rate.num;
2970 *frame_rate_den= frame_rate.den;
2971 return ret;
2973 #endif
2975 int64_t av_gettime(void)
2977 struct timeval tv;
2978 gettimeofday(&tv,NULL);
2979 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2982 int64_t parse_date(const char *datestr, int duration)
2984 const char *p;
2985 int64_t t;
2986 struct tm dt;
2987 int i;
2988 static const char * const date_fmt[] = {
2989 "%Y-%m-%d",
2990 "%Y%m%d",
2992 static const char * const time_fmt[] = {
2993 "%H:%M:%S",
2994 "%H%M%S",
2996 const char *q;
2997 int is_utc, len;
2998 char lastch;
2999 int negative = 0;
3001 #undef time
3002 time_t now = time(0);
3004 len = strlen(datestr);
3005 if (len > 0)
3006 lastch = datestr[len - 1];
3007 else
3008 lastch = '\0';
3009 is_utc = (lastch == 'z' || lastch == 'Z');
3011 memset(&dt, 0, sizeof(dt));
3013 p = datestr;
3014 q = NULL;
3015 if (!duration) {
3016 if (!strncasecmp(datestr, "now", len))
3017 return (int64_t) now * 1000000;
3019 /* parse the year-month-day part */
3020 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
3021 q = small_strptime(p, date_fmt[i], &dt);
3022 if (q) {
3023 break;
3027 /* if the year-month-day part is missing, then take the
3028 * current year-month-day time */
3029 if (!q) {
3030 if (is_utc) {
3031 dt = *gmtime(&now);
3032 } else {
3033 dt = *localtime(&now);
3035 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
3036 } else {
3037 p = q;
3040 if (*p == 'T' || *p == 't' || *p == ' ')
3041 p++;
3043 /* parse the hour-minute-second part */
3044 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
3045 q = small_strptime(p, time_fmt[i], &dt);
3046 if (q) {
3047 break;
3050 } else {
3051 /* parse datestr as a duration */
3052 if (p[0] == '-') {
3053 negative = 1;
3054 ++p;
3056 /* parse datestr as HH:MM:SS */
3057 q = small_strptime(p, time_fmt[0], &dt);
3058 if (!q) {
3059 /* parse datestr as S+ */
3060 dt.tm_sec = strtol(p, (char **)&q, 10);
3061 if (q == p)
3062 /* the parsing didn't succeed */
3063 return INT64_MIN;
3064 dt.tm_min = 0;
3065 dt.tm_hour = 0;
3069 /* Now we have all the fields that we can get */
3070 if (!q) {
3071 return INT64_MIN;
3074 if (duration) {
3075 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
3076 } else {
3077 dt.tm_isdst = -1; /* unknown */
3078 if (is_utc) {
3079 t = mktimegm(&dt);
3080 } else {
3081 t = mktime(&dt);
3085 t *= 1000000;
3087 /* parse the .m... part */
3088 if (*q == '.') {
3089 int val, n;
3090 q++;
3091 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
3092 if (!isdigit(*q))
3093 break;
3094 val += n * (*q - '0');
3096 t += val;
3098 return negative ? -t : t;
3101 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3103 const char *p;
3104 char tag[128], *q;
3106 p = info;
3107 if (*p == '?')
3108 p++;
3109 for(;;) {
3110 q = tag;
3111 while (*p != '\0' && *p != '=' && *p != '&') {
3112 if ((q - tag) < sizeof(tag) - 1)
3113 *q++ = *p;
3114 p++;
3116 *q = '\0';
3117 q = arg;
3118 if (*p == '=') {
3119 p++;
3120 while (*p != '&' && *p != '\0') {
3121 if ((q - arg) < arg_size - 1) {
3122 if (*p == '+')
3123 *q++ = ' ';
3124 else
3125 *q++ = *p;
3127 p++;
3129 *q = '\0';
3131 if (!strcmp(tag, tag1))
3132 return 1;
3133 if (*p != '&')
3134 break;
3135 p++;
3137 return 0;
3140 int av_get_frame_filename(char *buf, int buf_size,
3141 const char *path, int number)
3143 const char *p;
3144 char *q, buf1[20], c;
3145 int nd, len, percentd_found;
3147 q = buf;
3148 p = path;
3149 percentd_found = 0;
3150 for(;;) {
3151 c = *p++;
3152 if (c == '\0')
3153 break;
3154 if (c == '%') {
3155 do {
3156 nd = 0;
3157 while (isdigit(*p)) {
3158 nd = nd * 10 + *p++ - '0';
3160 c = *p++;
3161 } while (isdigit(c));
3163 switch(c) {
3164 case '%':
3165 goto addchar;
3166 case 'd':
3167 if (percentd_found)
3168 goto fail;
3169 percentd_found = 1;
3170 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3171 len = strlen(buf1);
3172 if ((q - buf + len) > buf_size - 1)
3173 goto fail;
3174 memcpy(q, buf1, len);
3175 q += len;
3176 break;
3177 default:
3178 goto fail;
3180 } else {
3181 addchar:
3182 if ((q - buf) < buf_size - 1)
3183 *q++ = c;
3186 if (!percentd_found)
3187 goto fail;
3188 *q = '\0';
3189 return 0;
3190 fail:
3191 *q = '\0';
3192 return -1;
3195 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3197 int len, i, j, c;
3198 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3200 for(i=0;i<size;i+=16) {
3201 len = size - i;
3202 if (len > 16)
3203 len = 16;
3204 PRINT("%08x ", i);
3205 for(j=0;j<16;j++) {
3206 if (j < len)
3207 PRINT(" %02x", buf[i+j]);
3208 else
3209 PRINT(" ");
3211 PRINT(" ");
3212 for(j=0;j<len;j++) {
3213 c = buf[i+j];
3214 if (c < ' ' || c > '~')
3215 c = '.';
3216 PRINT("%c", c);
3218 PRINT("\n");
3220 #undef PRINT
3223 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3225 hex_dump_internal(NULL, f, 0, buf, size);
3228 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3230 hex_dump_internal(avcl, NULL, level, buf, size);
3233 //FIXME needs to know the time_base
3234 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3236 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3237 PRINT("stream #%d:\n", pkt->stream_index);
3238 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3239 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3240 /* DTS is _always_ valid after av_read_frame() */
3241 PRINT(" dts=");
3242 if (pkt->dts == AV_NOPTS_VALUE)
3243 PRINT("N/A");
3244 else
3245 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3246 /* PTS may not be known if B-frames are present. */
3247 PRINT(" pts=");
3248 if (pkt->pts == AV_NOPTS_VALUE)
3249 PRINT("N/A");
3250 else
3251 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3252 PRINT("\n");
3253 PRINT(" size=%d\n", pkt->size);
3254 #undef PRINT
3255 if (dump_payload)
3256 av_hex_dump(f, pkt->data, pkt->size);
3259 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3261 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3264 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3266 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3269 void url_split(char *proto, int proto_size,
3270 char *authorization, int authorization_size,
3271 char *hostname, int hostname_size,
3272 int *port_ptr,
3273 char *path, int path_size,
3274 const char *url)
3276 const char *p, *ls, *at, *col, *brk;
3278 if (port_ptr) *port_ptr = -1;
3279 if (proto_size > 0) proto[0] = 0;
3280 if (authorization_size > 0) authorization[0] = 0;
3281 if (hostname_size > 0) hostname[0] = 0;
3282 if (path_size > 0) path[0] = 0;
3284 /* parse protocol */
3285 if ((p = strchr(url, ':'))) {
3286 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3287 p++; /* skip ':' */
3288 if (*p == '/') p++;
3289 if (*p == '/') p++;
3290 } else {
3291 /* no protocol means plain filename */
3292 av_strlcpy(path, url, path_size);
3293 return;
3296 /* separate path from hostname */
3297 ls = strchr(p, '/');
3298 if(!ls)
3299 ls = strchr(p, '?');
3300 if(ls)
3301 av_strlcpy(path, ls, path_size);
3302 else
3303 ls = &p[strlen(p)]; // XXX
3305 /* the rest is hostname, use that to parse auth/port */
3306 if (ls != p) {
3307 /* authorization (user[:pass]@hostname) */
3308 if ((at = strchr(p, '@')) && at < ls) {
3309 av_strlcpy(authorization, p,
3310 FFMIN(authorization_size, at + 1 - p));
3311 p = at + 1; /* skip '@' */
3314 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3315 /* [host]:port */
3316 av_strlcpy(hostname, p + 1,
3317 FFMIN(hostname_size, brk - p));
3318 if (brk[1] == ':' && port_ptr)
3319 *port_ptr = atoi(brk + 2);
3320 } else if ((col = strchr(p, ':')) && col < ls) {
3321 av_strlcpy(hostname, p,
3322 FFMIN(col + 1 - p, hostname_size));
3323 if (port_ptr) *port_ptr = atoi(col + 1);
3324 } else
3325 av_strlcpy(hostname, p,
3326 FFMIN(ls + 1 - p, hostname_size));
3330 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3332 int i;
3333 static const char hex_table[16] = { '0', '1', '2', '3',
3334 '4', '5', '6', '7',
3335 '8', '9', 'A', 'B',
3336 'C', 'D', 'E', 'F' };
3338 for(i = 0; i < s; i++) {
3339 buff[i * 2] = hex_table[src[i] >> 4];
3340 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3343 return buff;
3346 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3347 unsigned int pts_num, unsigned int pts_den)
3349 s->pts_wrap_bits = pts_wrap_bits;
3351 if(av_reduce(&s->time_base.num, &s->time_base.den, pts_num, pts_den, INT_MAX)){
3352 if(s->time_base.num != pts_num)
3353 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/s->time_base.num);
3354 }else
3355 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3357 if(!s->time_base.num || !s->time_base.den)
3358 s->time_base.num= s->time_base.den= 0;