Remove implicit conversions from scoped_refptr to T* in media/
[chromium-blink-merge.git] / media / filters / frame_processor.cc
blobdae8577e8f6c9f0b913982c9be0d812d3a9345d7
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/filters/frame_processor.h"
7 #include <cstdlib>
9 #include "base/stl_util.h"
10 #include "media/base/buffers.h"
11 #include "media/base/stream_parser_buffer.h"
13 namespace media {
15 // Helper class to capture per-track details needed by a frame processor. Some
16 // of this information may be duplicated in the short-term in the associated
17 // ChunkDemuxerStream and SourceBufferStream for a track.
18 // This parallels the MSE spec each of a SourceBuffer's Track Buffers at
19 // http://www.w3.org/TR/media-source/#track-buffers.
20 class MseTrackBuffer {
21 public:
22 explicit MseTrackBuffer(ChunkDemuxerStream* stream);
23 ~MseTrackBuffer();
25 // Get/set |last_decode_timestamp_|.
26 DecodeTimestamp last_decode_timestamp() const {
27 return last_decode_timestamp_;
29 void set_last_decode_timestamp(DecodeTimestamp timestamp) {
30 last_decode_timestamp_ = timestamp;
33 // Get/set |last_frame_duration_|.
34 base::TimeDelta last_frame_duration() const {
35 return last_frame_duration_;
37 void set_last_frame_duration(base::TimeDelta duration) {
38 last_frame_duration_ = duration;
41 // Gets |highest_presentation_timestamp_|.
42 base::TimeDelta highest_presentation_timestamp() const {
43 return highest_presentation_timestamp_;
46 // Get/set |needs_random_access_point_|.
47 bool needs_random_access_point() const {
48 return needs_random_access_point_;
50 void set_needs_random_access_point(bool needs_random_access_point) {
51 needs_random_access_point_ = needs_random_access_point;
54 // Gets a pointer to this track's ChunkDemuxerStream.
55 ChunkDemuxerStream* stream() const { return stream_; }
57 // Unsets |last_decode_timestamp_|, unsets |last_frame_duration_|,
58 // unsets |highest_presentation_timestamp_|, and sets
59 // |needs_random_access_point_| to true.
60 void Reset();
62 // If |highest_presentation_timestamp_| is unset or |timestamp| is greater
63 // than |highest_presentation_timestamp_|, sets
64 // |highest_presentation_timestamp_| to |timestamp|. Note that bidirectional
65 // prediction between coded frames can cause |timestamp| to not be
66 // monotonically increasing even though the decode timestamps are
67 // monotonically increasing.
68 void SetHighestPresentationTimestampIfIncreased(base::TimeDelta timestamp);
70 // Adds |frame| to the end of |processed_frames_|.
71 void EnqueueProcessedFrame(const scoped_refptr<StreamParserBuffer>& frame);
73 // Appends |processed_frames_|, if not empty, to |stream_| and clears
74 // |processed_frames_|. Returns false if append failed, true otherwise.
75 // |processed_frames_| is cleared in both cases.
76 bool FlushProcessedFrames();
78 private:
79 // The decode timestamp of the last coded frame appended in the current coded
80 // frame group. Initially kNoTimestamp(), meaning "unset".
81 DecodeTimestamp last_decode_timestamp_;
83 // The coded frame duration of the last coded frame appended in the current
84 // coded frame group. Initially kNoTimestamp(), meaning "unset".
85 base::TimeDelta last_frame_duration_;
87 // The highest presentation timestamp encountered in a coded frame appended
88 // in the current coded frame group. Initially kNoTimestamp(), meaning
89 // "unset".
90 base::TimeDelta highest_presentation_timestamp_;
92 // Keeps track of whether the track buffer is waiting for a random access
93 // point coded frame. Initially set to true to indicate that a random access
94 // point coded frame is needed before anything can be added to the track
95 // buffer.
96 bool needs_random_access_point_;
98 // Pointer to the stream associated with this track. The stream is not owned
99 // by |this|.
100 ChunkDemuxerStream* const stream_;
102 // Queue of processed frames that have not yet been appended to |stream_|.
103 // EnqueueProcessedFrame() adds to this queue, and FlushProcessedFrames()
104 // clears it.
105 StreamParser::BufferQueue processed_frames_;
107 DISALLOW_COPY_AND_ASSIGN(MseTrackBuffer);
110 MseTrackBuffer::MseTrackBuffer(ChunkDemuxerStream* stream)
111 : last_decode_timestamp_(kNoDecodeTimestamp()),
112 last_frame_duration_(kNoTimestamp()),
113 highest_presentation_timestamp_(kNoTimestamp()),
114 needs_random_access_point_(true),
115 stream_(stream) {
116 DCHECK(stream_);
119 MseTrackBuffer::~MseTrackBuffer() {
120 DVLOG(2) << __FUNCTION__ << "()";
123 void MseTrackBuffer::Reset() {
124 DVLOG(2) << __FUNCTION__ << "()";
126 last_decode_timestamp_ = kNoDecodeTimestamp();
127 last_frame_duration_ = kNoTimestamp();
128 highest_presentation_timestamp_ = kNoTimestamp();
129 needs_random_access_point_ = true;
132 void MseTrackBuffer::SetHighestPresentationTimestampIfIncreased(
133 base::TimeDelta timestamp) {
134 if (highest_presentation_timestamp_ == kNoTimestamp() ||
135 timestamp > highest_presentation_timestamp_) {
136 highest_presentation_timestamp_ = timestamp;
140 void MseTrackBuffer::EnqueueProcessedFrame(
141 const scoped_refptr<StreamParserBuffer>& frame) {
142 processed_frames_.push_back(frame);
145 bool MseTrackBuffer::FlushProcessedFrames() {
146 if (processed_frames_.empty())
147 return true;
149 bool result = stream_->Append(processed_frames_);
150 processed_frames_.clear();
151 DVLOG_IF(3, !result) << __FUNCTION__
152 << "(): Failure appending processed frames to stream";
154 return result;
157 FrameProcessor::FrameProcessor(const UpdateDurationCB& update_duration_cb)
158 : sequence_mode_(false),
159 group_start_timestamp_(kNoTimestamp()),
160 update_duration_cb_(update_duration_cb) {
161 DVLOG(2) << __FUNCTION__ << "()";
162 DCHECK(!update_duration_cb.is_null());
165 FrameProcessor::~FrameProcessor() {
166 DVLOG(2) << __FUNCTION__ << "()";
167 STLDeleteValues(&track_buffers_);
170 void FrameProcessor::SetSequenceMode(bool sequence_mode) {
171 DVLOG(2) << __FUNCTION__ << "(" << sequence_mode << ")";
173 // Per April 1, 2014 MSE spec editor's draft:
174 // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/media-source.html#widl-SourceBuffer-mode
175 // Step 7: If the new mode equals "sequence", then set the group start
176 // timestamp to the group end timestamp.
177 if (sequence_mode) {
178 DCHECK(kNoTimestamp() != group_end_timestamp_);
179 group_start_timestamp_ = group_end_timestamp_;
182 // Step 8: Update the attribute to new mode.
183 sequence_mode_ = sequence_mode;
186 bool FrameProcessor::ProcessFrames(
187 const StreamParser::BufferQueue& audio_buffers,
188 const StreamParser::BufferQueue& video_buffers,
189 const StreamParser::TextBufferQueueMap& text_map,
190 base::TimeDelta append_window_start,
191 base::TimeDelta append_window_end,
192 bool* new_media_segment,
193 base::TimeDelta* timestamp_offset) {
194 StreamParser::BufferQueue frames;
195 if (!MergeBufferQueues(audio_buffers, video_buffers, text_map, &frames)) {
196 DVLOG(2) << "Parse error discovered while merging parser's buffers";
197 return false;
200 DCHECK(!frames.empty());
202 // Implements the coded frame processing algorithm's outer loop for step 1.
203 // Note that ProcessFrame() implements an inner loop for a single frame that
204 // handles "jump to the Loop Top step to restart processing of the current
205 // coded frame" per April 1, 2014 MSE spec editor's draft:
206 // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/
207 // media-source.html#sourcebuffer-coded-frame-processing
208 // 1. For each coded frame in the media segment run the following steps:
209 for (StreamParser::BufferQueue::const_iterator frames_itr = frames.begin();
210 frames_itr != frames.end(); ++frames_itr) {
211 if (!ProcessFrame(*frames_itr, append_window_start, append_window_end,
212 timestamp_offset, new_media_segment)) {
213 FlushProcessedFrames();
214 return false;
218 if (!FlushProcessedFrames())
219 return false;
221 // 2. - 4. Are handled by the WebMediaPlayer / Pipeline / Media Element.
223 // Step 5:
224 update_duration_cb_.Run(group_end_timestamp_);
226 return true;
229 void FrameProcessor::SetGroupStartTimestampIfInSequenceMode(
230 base::TimeDelta timestamp_offset) {
231 DVLOG(2) << __FUNCTION__ << "(" << timestamp_offset.InSecondsF() << ")";
232 DCHECK(kNoTimestamp() != timestamp_offset);
233 if (sequence_mode_)
234 group_start_timestamp_ = timestamp_offset;
236 // Changes to timestampOffset should invalidate the preroll buffer.
237 audio_preroll_buffer_ = NULL;
240 bool FrameProcessor::AddTrack(StreamParser::TrackId id,
241 ChunkDemuxerStream* stream) {
242 DVLOG(2) << __FUNCTION__ << "(): id=" << id;
244 MseTrackBuffer* existing_track = FindTrack(id);
245 DCHECK(!existing_track);
246 if (existing_track)
247 return false;
249 track_buffers_[id] = new MseTrackBuffer(stream);
250 return true;
253 bool FrameProcessor::UpdateTrack(StreamParser::TrackId old_id,
254 StreamParser::TrackId new_id) {
255 DVLOG(2) << __FUNCTION__ << "() : old_id=" << old_id << ", new_id=" << new_id;
257 if (old_id == new_id || !FindTrack(old_id) || FindTrack(new_id))
258 return false;
260 track_buffers_[new_id] = track_buffers_[old_id];
261 CHECK_EQ(1u, track_buffers_.erase(old_id));
262 return true;
265 void FrameProcessor::SetAllTrackBuffersNeedRandomAccessPoint() {
266 for (TrackBufferMap::iterator itr = track_buffers_.begin();
267 itr != track_buffers_.end();
268 ++itr) {
269 itr->second->set_needs_random_access_point(true);
273 void FrameProcessor::Reset() {
274 DVLOG(2) << __FUNCTION__ << "()";
275 for (TrackBufferMap::iterator itr = track_buffers_.begin();
276 itr != track_buffers_.end(); ++itr) {
277 itr->second->Reset();
281 void FrameProcessor::OnPossibleAudioConfigUpdate(
282 const AudioDecoderConfig& config) {
283 DCHECK(config.IsValidConfig());
285 // Always clear the preroll buffer when a config update is received.
286 audio_preroll_buffer_ = NULL;
288 if (config.Matches(current_audio_config_))
289 return;
291 current_audio_config_ = config;
292 sample_duration_ = base::TimeDelta::FromSecondsD(
293 1.0 / current_audio_config_.samples_per_second());
296 MseTrackBuffer* FrameProcessor::FindTrack(StreamParser::TrackId id) {
297 TrackBufferMap::iterator itr = track_buffers_.find(id);
298 if (itr == track_buffers_.end())
299 return NULL;
301 return itr->second;
304 void FrameProcessor::NotifyNewMediaSegmentStarting(
305 DecodeTimestamp segment_timestamp) {
306 DVLOG(2) << __FUNCTION__ << "(" << segment_timestamp.InSecondsF() << ")";
308 for (TrackBufferMap::iterator itr = track_buffers_.begin();
309 itr != track_buffers_.end();
310 ++itr) {
311 itr->second->stream()->OnNewMediaSegment(segment_timestamp);
315 bool FrameProcessor::FlushProcessedFrames() {
316 DVLOG(2) << __FUNCTION__ << "()";
318 bool result = true;
319 for (TrackBufferMap::iterator itr = track_buffers_.begin();
320 itr != track_buffers_.end();
321 ++itr) {
322 if (!itr->second->FlushProcessedFrames())
323 result = false;
326 return result;
329 bool FrameProcessor::HandlePartialAppendWindowTrimming(
330 base::TimeDelta append_window_start,
331 base::TimeDelta append_window_end,
332 const scoped_refptr<StreamParserBuffer>& buffer) {
333 DCHECK(buffer->duration() > base::TimeDelta());
334 DCHECK_EQ(DemuxerStream::AUDIO, buffer->type());
335 DCHECK(buffer->IsKeyframe());
337 const base::TimeDelta frame_end_timestamp =
338 buffer->timestamp() + buffer->duration();
340 // If the buffer is entirely before |append_window_start|, save it as preroll
341 // for the first buffer which overlaps |append_window_start|.
342 if (buffer->timestamp() < append_window_start &&
343 frame_end_timestamp <= append_window_start) {
344 audio_preroll_buffer_ = buffer;
345 return false;
348 // If the buffer is entirely after |append_window_end| there's nothing to do.
349 if (buffer->timestamp() >= append_window_end)
350 return false;
352 DCHECK(buffer->timestamp() >= append_window_start ||
353 frame_end_timestamp > append_window_start);
355 bool processed_buffer = false;
357 // If we have a preroll buffer see if we can attach it to the first buffer
358 // overlapping or after |append_window_start|.
359 if (audio_preroll_buffer_.get()) {
360 // We only want to use the preroll buffer if it directly precedes (less
361 // than one sample apart) the current buffer.
362 const int64 delta = std::abs((audio_preroll_buffer_->timestamp() +
363 audio_preroll_buffer_->duration() -
364 buffer->timestamp()).InMicroseconds());
365 if (delta < sample_duration_.InMicroseconds()) {
366 DVLOG(1) << "Attaching audio preroll buffer ["
367 << audio_preroll_buffer_->timestamp().InSecondsF() << ", "
368 << (audio_preroll_buffer_->timestamp() +
369 audio_preroll_buffer_->duration()).InSecondsF() << ") to "
370 << buffer->timestamp().InSecondsF();
371 buffer->SetPrerollBuffer(audio_preroll_buffer_);
372 processed_buffer = true;
373 } else {
374 // TODO(dalecurtis): Add a MEDIA_LOG() for when this is dropped unused.
376 audio_preroll_buffer_ = NULL;
379 // See if a partial discard can be done around |append_window_start|.
380 if (buffer->timestamp() < append_window_start) {
381 DVLOG(1) << "Truncating buffer which overlaps append window start."
382 << " presentation_timestamp " << buffer->timestamp().InSecondsF()
383 << " frame_end_timestamp " << frame_end_timestamp.InSecondsF()
384 << " append_window_start " << append_window_start.InSecondsF();
386 // Mark the overlapping portion of the buffer for discard.
387 buffer->set_discard_padding(std::make_pair(
388 append_window_start - buffer->timestamp(), base::TimeDelta()));
390 // Adjust the timestamp of this buffer forward to |append_window_start| and
391 // decrease the duration to compensate.
392 buffer->set_timestamp(append_window_start);
393 buffer->SetDecodeTimestamp(
394 DecodeTimestamp::FromPresentationTime(append_window_start));
395 buffer->set_duration(frame_end_timestamp - append_window_start);
396 processed_buffer = true;
399 // See if a partial discard can be done around |append_window_end|.
400 if (frame_end_timestamp > append_window_end) {
401 DVLOG(1) << "Truncating buffer which overlaps append window end."
402 << " presentation_timestamp " << buffer->timestamp().InSecondsF()
403 << " frame_end_timestamp " << frame_end_timestamp.InSecondsF()
404 << " append_window_end " << append_window_end.InSecondsF();
406 // Mark the overlapping portion of the buffer for discard.
407 buffer->set_discard_padding(
408 std::make_pair(buffer->discard_padding().first,
409 frame_end_timestamp - append_window_end));
411 // Decrease the duration of the buffer to remove the discarded portion.
412 buffer->set_duration(append_window_end - buffer->timestamp());
413 processed_buffer = true;
416 return processed_buffer;
419 bool FrameProcessor::ProcessFrame(
420 const scoped_refptr<StreamParserBuffer>& frame,
421 base::TimeDelta append_window_start,
422 base::TimeDelta append_window_end,
423 base::TimeDelta* timestamp_offset,
424 bool* new_media_segment) {
425 // Implements the loop within step 1 of the coded frame processing algorithm
426 // for a single input frame per April 1, 2014 MSE spec editor's draft:
427 // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/
428 // media-source.html#sourcebuffer-coded-frame-processing
430 while (true) {
431 // 1. Loop Top: Let presentation timestamp be a double precision floating
432 // point representation of the coded frame's presentation timestamp in
433 // seconds.
434 // 2. Let decode timestamp be a double precision floating point
435 // representation of the coded frame's decode timestamp in seconds.
436 // 3. Let frame duration be a double precision floating point representation
437 // of the coded frame's duration in seconds.
438 // We use base::TimeDelta and DecodeTimestamp instead of double.
439 base::TimeDelta presentation_timestamp = frame->timestamp();
440 DecodeTimestamp decode_timestamp = frame->GetDecodeTimestamp();
441 base::TimeDelta frame_duration = frame->duration();
443 DVLOG(3) << __FUNCTION__ << ": Processing frame "
444 << "Type=" << frame->type()
445 << ", TrackID=" << frame->track_id()
446 << ", PTS=" << presentation_timestamp.InSecondsF()
447 << ", DTS=" << decode_timestamp.InSecondsF()
448 << ", DUR=" << frame_duration.InSecondsF()
449 << ", RAP=" << frame->IsKeyframe();
451 // Sanity check the timestamps.
452 if (presentation_timestamp == kNoTimestamp()) {
453 DVLOG(2) << __FUNCTION__ << ": Unknown frame PTS";
454 return false;
456 if (decode_timestamp == kNoDecodeTimestamp()) {
457 DVLOG(2) << __FUNCTION__ << ": Unknown frame DTS";
458 return false;
460 if (decode_timestamp.ToPresentationTime() > presentation_timestamp) {
461 // TODO(wolenetz): Determine whether DTS>PTS should really be allowed. See
462 // http://crbug.com/354518.
463 DVLOG(2) << __FUNCTION__ << ": WARNING: Frame DTS("
464 << decode_timestamp.InSecondsF() << ") > PTS("
465 << presentation_timestamp.InSecondsF() << ")";
468 // TODO(acolwell/wolenetz): All stream parsers must emit valid (positive)
469 // frame durations. For now, we allow non-negative frame duration.
470 // See http://crbug.com/351166.
471 if (frame_duration == kNoTimestamp()) {
472 DVLOG(2) << __FUNCTION__ << ": Frame missing duration (kNoTimestamp())";
473 return false;
475 if (frame_duration < base::TimeDelta()) {
476 DVLOG(2) << __FUNCTION__ << ": Frame duration negative: "
477 << frame_duration.InSecondsF();
478 return false;
481 // 4. If mode equals "sequence" and group start timestamp is set, then run
482 // the following steps:
483 if (sequence_mode_ && group_start_timestamp_ != kNoTimestamp()) {
484 // 4.1. Set timestampOffset equal to group start timestamp -
485 // presentation timestamp.
486 *timestamp_offset = group_start_timestamp_ - presentation_timestamp;
488 DVLOG(3) << __FUNCTION__ << ": updated timestampOffset is now "
489 << timestamp_offset->InSecondsF();
491 // 4.2. Set group end timestamp equal to group start timestamp.
492 group_end_timestamp_ = group_start_timestamp_;
494 // 4.3. Set the need random access point flag on all track buffers to
495 // true.
496 SetAllTrackBuffersNeedRandomAccessPoint();
498 // 4.4. Unset group start timestamp.
499 group_start_timestamp_ = kNoTimestamp();
502 // 5. If timestampOffset is not 0, then run the following steps:
503 if (*timestamp_offset != base::TimeDelta()) {
504 // 5.1. Add timestampOffset to the presentation timestamp.
505 // Note: |frame| PTS is only updated if it survives discontinuity
506 // processing.
507 presentation_timestamp += *timestamp_offset;
509 // 5.2. Add timestampOffset to the decode timestamp.
510 // Frame DTS is only updated if it survives discontinuity processing.
511 decode_timestamp += *timestamp_offset;
514 // 6. Let track buffer equal the track buffer that the coded frame will be
515 // added to.
517 // Remap audio and video track types to their special singleton identifiers.
518 StreamParser::TrackId track_id = kAudioTrackId;
519 switch (frame->type()) {
520 case DemuxerStream::AUDIO:
521 break;
522 case DemuxerStream::VIDEO:
523 track_id = kVideoTrackId;
524 break;
525 case DemuxerStream::TEXT:
526 track_id = frame->track_id();
527 break;
528 case DemuxerStream::UNKNOWN:
529 case DemuxerStream::NUM_TYPES:
530 DCHECK(false) << ": Invalid frame type " << frame->type();
531 return false;
534 MseTrackBuffer* track_buffer = FindTrack(track_id);
535 if (!track_buffer) {
536 DVLOG(2) << __FUNCTION__ << ": Unknown track: type=" << frame->type()
537 << ", frame processor track id=" << track_id
538 << ", parser track id=" << frame->track_id();
539 return false;
542 // 7. If last decode timestamp for track buffer is set and decode timestamp
543 // is less than last decode timestamp
544 // OR
545 // If last decode timestamp for track buffer is set and the difference
546 // between decode timestamp and last decode timestamp is greater than 2
547 // times last frame duration:
548 DecodeTimestamp last_decode_timestamp =
549 track_buffer->last_decode_timestamp();
550 if (last_decode_timestamp != kNoDecodeTimestamp()) {
551 base::TimeDelta dts_delta = decode_timestamp - last_decode_timestamp;
552 if (dts_delta < base::TimeDelta() ||
553 dts_delta > 2 * track_buffer->last_frame_duration()) {
554 // 7.1. If mode equals "segments": Set group end timestamp to
555 // presentation timestamp.
556 // If mode equals "sequence": Set group start timestamp equal to
557 // the group end timestamp.
558 if (!sequence_mode_) {
559 group_end_timestamp_ = presentation_timestamp;
560 // This triggers a discontinuity so we need to treat the next frames
561 // appended within the append window as if they were the beginning of
562 // a new segment.
563 *new_media_segment = true;
564 } else {
565 DVLOG(3) << __FUNCTION__ << " : Sequence mode discontinuity, GETS: "
566 << group_end_timestamp_.InSecondsF();
567 DCHECK(kNoTimestamp() != group_end_timestamp_);
568 group_start_timestamp_ = group_end_timestamp_;
571 // 7.2. - 7.5.:
572 Reset();
574 // 7.6. Jump to the Loop Top step above to restart processing of the
575 // current coded frame.
576 DVLOG(3) << __FUNCTION__ << ": Discontinuity: reprocessing frame";
577 continue;
581 // 9. Let frame end timestamp equal the sum of presentation timestamp and
582 // frame duration.
583 base::TimeDelta frame_end_timestamp =
584 presentation_timestamp + frame_duration;
586 // 10. If presentation timestamp is less than appendWindowStart, then set
587 // the need random access point flag to true, drop the coded frame, and
588 // jump to the top of the loop to start processing the next coded
589 // frame.
590 // Note: We keep the result of partial discard of a buffer that overlaps
591 // |append_window_start| and does not end after |append_window_end|.
592 // 11. If frame end timestamp is greater than appendWindowEnd, then set the
593 // need random access point flag to true, drop the coded frame, and jump
594 // to the top of the loop to start processing the next coded frame.
595 frame->set_timestamp(presentation_timestamp);
596 frame->SetDecodeTimestamp(decode_timestamp);
597 if (track_buffer->stream()->supports_partial_append_window_trimming() &&
598 HandlePartialAppendWindowTrimming(append_window_start,
599 append_window_end,
600 frame)) {
601 // If |frame| was front-trimmed a discontinuity may exist, so treat the
602 // next frames appended as if they were the beginning of a new media
603 // segment.
604 if (frame->timestamp() != presentation_timestamp && !sequence_mode_)
605 *new_media_segment = true;
607 // |frame| has been partially trimmed or had preroll added. Though
608 // |frame|'s duration may have changed, do not update |frame_duration|
609 // here, so |track_buffer|'s last frame duration update uses original
610 // frame duration and reduces spurious discontinuity detection.
611 decode_timestamp = frame->GetDecodeTimestamp();
612 presentation_timestamp = frame->timestamp();
613 frame_end_timestamp = frame->timestamp() + frame->duration();
616 if (presentation_timestamp < append_window_start ||
617 frame_end_timestamp > append_window_end) {
618 track_buffer->set_needs_random_access_point(true);
619 DVLOG(3) << "Dropping frame that is outside append window.";
620 return true;
623 // Note: This step is relocated, versus April 1 spec, to allow append window
624 // processing to first filter coded frames shifted by |timestamp_offset_| in
625 // such a way that their PTS is negative.
626 // 8. If the presentation timestamp or decode timestamp is less than the
627 // presentation start time, then run the end of stream algorithm with the
628 // error parameter set to "decode", and abort these steps.
629 DCHECK(presentation_timestamp >= base::TimeDelta());
630 if (decode_timestamp < DecodeTimestamp()) {
631 // B-frames may still result in negative DTS here after being shifted by
632 // |timestamp_offset_|.
633 DVLOG(2) << __FUNCTION__
634 << ": frame PTS=" << presentation_timestamp.InSecondsF()
635 << " has negative DTS=" << decode_timestamp.InSecondsF()
636 << " after applying timestampOffset, handling any discontinuity,"
637 << " and filtering against append window";
638 return false;
641 // 12. If the need random access point flag on track buffer equals true,
642 // then run the following steps:
643 if (track_buffer->needs_random_access_point()) {
644 // 12.1. If the coded frame is not a random access point, then drop the
645 // coded frame and jump to the top of the loop to start processing
646 // the next coded frame.
647 if (!frame->IsKeyframe()) {
648 DVLOG(3) << __FUNCTION__
649 << ": Dropping frame that is not a random access point";
650 return true;
653 // 12.2. Set the need random access point flag on track buffer to false.
654 track_buffer->set_needs_random_access_point(false);
657 // We now have a processed buffer to append to the track buffer's stream.
658 // If it is the first in a new media segment or following a discontinuity,
659 // notify all the track buffers' streams that a new segment is beginning.
660 if (*new_media_segment) {
661 // First, complete the append to track buffer streams of previous media
662 // segment's frames, if any.
663 if (!FlushProcessedFrames())
664 return false;
666 *new_media_segment = false;
668 // TODO(acolwell/wolenetz): This should be changed to a presentation
669 // timestamp. See http://crbug.com/402502
670 NotifyNewMediaSegmentStarting(decode_timestamp);
673 DVLOG(3) << __FUNCTION__ << ": Sending processed frame to stream, "
674 << "PTS=" << presentation_timestamp.InSecondsF()
675 << ", DTS=" << decode_timestamp.InSecondsF();
677 // Steps 13-18: Note, we optimize by appending groups of contiguous
678 // processed frames for each track buffer at end of ProcessFrames() or prior
679 // to NotifyNewMediaSegmentStarting().
680 // TODO(wolenetz): Refactor SourceBufferStream to conform to spec GC timing.
681 // See http://crbug.com/371197.
682 track_buffer->EnqueueProcessedFrame(frame);
684 // 19. Set last decode timestamp for track buffer to decode timestamp.
685 track_buffer->set_last_decode_timestamp(decode_timestamp);
687 // 20. Set last frame duration for track buffer to frame duration.
688 track_buffer->set_last_frame_duration(frame_duration);
690 // 21. If highest presentation timestamp for track buffer is unset or frame
691 // end timestamp is greater than highest presentation timestamp, then
692 // set highest presentation timestamp for track buffer to frame end
693 // timestamp.
694 track_buffer->SetHighestPresentationTimestampIfIncreased(
695 frame_end_timestamp);
697 // 22. If frame end timestamp is greater than group end timestamp, then set
698 // group end timestamp equal to frame end timestamp.
699 if (frame_end_timestamp > group_end_timestamp_)
700 group_end_timestamp_ = frame_end_timestamp;
701 DCHECK(group_end_timestamp_ >= base::TimeDelta());
703 return true;
706 NOTREACHED();
707 return false;
710 } // namespace media