2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2 of the License,
9 or (at your option) any later version.
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
32 #include <pulse/xmalloc.h>
34 #include <pulsecore/log.h>
35 #include <pulsecore/mcalign.h>
36 #include <pulsecore/macro.h>
37 #include <pulsecore/flist.h>
39 #include "memblockq.h"
42 struct list_item
*next
, *prev
;
47 PA_STATIC_FLIST_DECLARE(list_items
, 0, pa_xfree
);
50 struct list_item
*blocks
, *blocks_tail
;
51 struct list_item
*current_read
, *current_write
;
53 size_t maxlength
, tlength
, base
, prebuf
, minreq
, maxrewind
;
54 int64_t read_index
, write_index
;
62 pa_memblockq
* pa_memblockq_new(
70 pa_memchunk
*silence
) {
76 bq
= pa_xnew(pa_memblockq
, 1);
77 bq
->blocks
= bq
->blocks_tail
= NULL
;
78 bq
->current_read
= bq
->current_write
= NULL
;
82 bq
->read_index
= bq
->write_index
= idx
;
84 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
85 (unsigned long) maxlength
, (unsigned long) tlength
, (unsigned long) base
, (unsigned long) prebuf
, (unsigned long) minreq
, (unsigned long) maxrewind
);
88 bq
->requested
= bq
->maxlength
= bq
->tlength
= bq
->prebuf
= bq
->minreq
= bq
->maxrewind
= 0;
91 pa_memblockq_set_maxlength(bq
, maxlength
);
92 pa_memblockq_set_tlength(bq
, tlength
);
93 pa_memblockq_set_prebuf(bq
, prebuf
);
94 pa_memblockq_set_minreq(bq
, minreq
);
95 pa_memblockq_set_maxrewind(bq
, maxrewind
);
97 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
98 (unsigned long) bq
->maxlength
, (unsigned long) bq
->tlength
, (unsigned long) bq
->base
, (unsigned long) bq
->prebuf
, (unsigned long) bq
->minreq
, (unsigned long) bq
->maxrewind
);
101 bq
->silence
= *silence
;
102 pa_memblock_ref(bq
->silence
.memblock
);
104 pa_memchunk_reset(&bq
->silence
);
106 bq
->mcalign
= pa_mcalign_new(bq
->base
);
111 void pa_memblockq_free(pa_memblockq
* bq
) {
114 pa_memblockq_silence(bq
);
116 if (bq
->silence
.memblock
)
117 pa_memblock_unref(bq
->silence
.memblock
);
120 pa_mcalign_free(bq
->mcalign
);
125 static void fix_current_read(pa_memblockq
*bq
) {
128 if (PA_UNLIKELY(!bq
->blocks
)) {
129 bq
->current_read
= NULL
;
133 if (PA_UNLIKELY(!bq
->current_read
))
134 bq
->current_read
= bq
->blocks
;
137 while (PA_UNLIKELY(bq
->current_read
->index
> bq
->read_index
))
139 if (bq
->current_read
->prev
)
140 bq
->current_read
= bq
->current_read
->prev
;
145 while (PA_LIKELY(bq
->current_read
!= NULL
) && PA_UNLIKELY(bq
->current_read
->index
+ (int64_t) bq
->current_read
->chunk
.length
<= bq
->read_index
))
146 bq
->current_read
= bq
->current_read
->next
;
148 /* At this point current_read will either point at or left of the
149 next block to play. It may be NULL in case everything in
150 the queue was already played */
153 static void fix_current_write(pa_memblockq
*bq
) {
156 if (PA_UNLIKELY(!bq
->blocks
)) {
157 bq
->current_write
= NULL
;
161 if (PA_UNLIKELY(!bq
->current_write
))
162 bq
->current_write
= bq
->blocks_tail
;
165 while (PA_UNLIKELY(bq
->current_write
->index
+ (int64_t) bq
->current_write
->chunk
.length
<= bq
->write_index
))
167 if (bq
->current_write
->next
)
168 bq
->current_write
= bq
->current_write
->next
;
173 while (PA_LIKELY(bq
->current_write
!= NULL
) && PA_UNLIKELY(bq
->current_write
->index
> bq
->write_index
))
174 bq
->current_write
= bq
->current_write
->prev
;
176 /* At this point current_write will either point at or right of
177 the next block to write data to. It may be NULL in case
178 everything in the queue is still to be played */
181 static void drop_block(pa_memblockq
*bq
, struct list_item
*q
) {
185 pa_assert(bq
->n_blocks
>= 1);
188 q
->prev
->next
= q
->next
;
190 pa_assert(bq
->blocks
== q
);
191 bq
->blocks
= q
->next
;
195 q
->next
->prev
= q
->prev
;
197 pa_assert(bq
->blocks_tail
== q
);
198 bq
->blocks_tail
= q
->prev
;
201 if (bq
->current_write
== q
)
202 bq
->current_write
= q
->prev
;
204 if (bq
->current_read
== q
)
205 bq
->current_read
= q
->next
;
207 pa_memblock_unref(q
->chunk
.memblock
);
209 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items
), q
) < 0)
215 static void drop_backlog(pa_memblockq
*bq
) {
219 boundary
= bq
->read_index
- (int64_t) bq
->maxrewind
;
221 while (bq
->blocks
&& (bq
->blocks
->index
+ (int64_t) bq
->blocks
->chunk
.length
<= boundary
))
222 drop_block(bq
, bq
->blocks
);
225 static pa_bool_t
can_push(pa_memblockq
*bq
, size_t l
) {
230 if (bq
->read_index
> bq
->write_index
) {
231 int64_t d
= bq
->read_index
- bq
->write_index
;
239 end
= bq
->blocks_tail
? bq
->blocks_tail
->index
+ (int64_t) bq
->blocks_tail
->chunk
.length
: bq
->write_index
;
241 /* Make sure that the list doesn't get too long */
242 if (bq
->write_index
+ (int64_t) l
> end
)
243 if (bq
->write_index
+ (int64_t) l
- bq
->read_index
> (int64_t) bq
->maxlength
)
249 int pa_memblockq_push(pa_memblockq
* bq
, const pa_memchunk
*uchunk
) {
250 struct list_item
*q
, *n
;
256 pa_assert(uchunk
->memblock
);
257 pa_assert(uchunk
->length
> 0);
258 pa_assert(uchunk
->index
+ uchunk
->length
<= pa_memblock_get_length(uchunk
->memblock
));
260 if (uchunk
->length
% bq
->base
)
263 if (!can_push(bq
, uchunk
->length
))
266 old
= bq
->write_index
;
269 fix_current_write(bq
);
270 q
= bq
->current_write
;
272 /* First we advance the q pointer right of where we want to
276 while (bq
->write_index
+ (int64_t) chunk
.length
> q
->index
)
286 /* We go from back to front to look for the right place to add
287 * this new entry. Drop data we will overwrite on the way */
291 if (bq
->write_index
>= q
->index
+ (int64_t) q
->chunk
.length
)
292 /* We found the entry where we need to place the new entry immediately after */
294 else if (bq
->write_index
+ (int64_t) chunk
.length
<= q
->index
) {
295 /* This entry isn't touched at all, let's skip it */
297 } else if (bq
->write_index
<= q
->index
&&
298 bq
->write_index
+ (int64_t) chunk
.length
>= q
->index
+ (int64_t) q
->chunk
.length
) {
300 /* This entry is fully replaced by the new entry, so let's drop it */
306 } else if (bq
->write_index
>= q
->index
) {
307 /* The write index points into this memblock, so let's
308 * truncate or split it */
310 if (bq
->write_index
+ (int64_t) chunk
.length
< q
->index
+ (int64_t) q
->chunk
.length
) {
312 /* We need to save the end of this memchunk */
316 /* Create a new list entry for the end of thie memchunk */
317 if (!(p
= pa_flist_pop(PA_STATIC_FLIST_GET(list_items
))))
318 p
= pa_xnew(struct list_item
, 1);
321 pa_memblock_ref(p
->chunk
.memblock
);
323 /* Calculate offset */
324 d
= (size_t) (bq
->write_index
+ (int64_t) chunk
.length
- q
->index
);
327 /* Drop it from the new entry */
328 p
->index
= q
->index
+ (int64_t) d
;
329 p
->chunk
.length
-= d
;
331 /* Add it to the list */
333 if ((p
->next
= q
->next
))
342 /* Truncate the chunk */
343 if (!(q
->chunk
.length
= (size_t) (bq
->write_index
- q
->index
))) {
350 /* We had to truncate this block, hence we're now at the right position */
355 pa_assert(bq
->write_index
+ (int64_t)chunk
.length
> q
->index
&&
356 bq
->write_index
+ (int64_t)chunk
.length
< q
->index
+ (int64_t)q
->chunk
.length
&&
357 bq
->write_index
< q
->index
);
359 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
361 d
= (size_t) (bq
->write_index
+ (int64_t) chunk
.length
- q
->index
);
362 q
->index
+= (int64_t) d
;
364 q
->chunk
.length
-= d
;
371 pa_assert(bq
->write_index
>= q
->index
+ (int64_t)q
->chunk
.length
);
372 pa_assert(!q
->next
|| (bq
->write_index
+ (int64_t)chunk
.length
<= q
->next
->index
));
374 /* Try to merge memory blocks */
376 if (q
->chunk
.memblock
== chunk
.memblock
&&
377 q
->chunk
.index
+ q
->chunk
.length
== chunk
.index
&&
378 bq
->write_index
== q
->index
+ (int64_t) q
->chunk
.length
) {
380 q
->chunk
.length
+= chunk
.length
;
381 bq
->write_index
+= (int64_t) chunk
.length
;
385 pa_assert(!bq
->blocks
|| (bq
->write_index
+ (int64_t)chunk
.length
<= bq
->blocks
->index
));
387 if (!(n
= pa_flist_pop(PA_STATIC_FLIST_GET(list_items
))))
388 n
= pa_xnew(struct list_item
, 1);
391 pa_memblock_ref(n
->chunk
.memblock
);
392 n
->index
= bq
->write_index
;
393 bq
->write_index
+= (int64_t) n
->chunk
.length
;
395 n
->next
= q
? q
->next
: bq
->blocks
;
412 delta
= bq
->write_index
- old
;
414 if (delta
>= (int64_t) bq
->requested
) {
415 delta
-= (int64_t) bq
->requested
;
418 bq
->requested
-= (size_t) delta
;
422 bq
->missing
-= delta
;
427 pa_bool_t
pa_memblockq_prebuf_active(pa_memblockq
*bq
) {
431 return pa_memblockq_get_length(bq
) < bq
->prebuf
;
433 return bq
->prebuf
> 0 && bq
->read_index
>= bq
->write_index
;
436 static pa_bool_t
update_prebuf(pa_memblockq
*bq
) {
441 if (pa_memblockq_get_length(bq
) < bq
->prebuf
)
444 bq
->in_prebuf
= FALSE
;
448 if (bq
->prebuf
> 0 && bq
->read_index
>= bq
->write_index
) {
449 bq
->in_prebuf
= TRUE
;
457 int pa_memblockq_peek(pa_memblockq
* bq
, pa_memchunk
*chunk
) {
462 /* We need to pre-buffer */
463 if (update_prebuf(bq
))
466 fix_current_read(bq
);
468 /* Do we need to spit out silence? */
469 if (!bq
->current_read
|| bq
->current_read
->index
> bq
->read_index
) {
473 /* How much silence shall we return? */
474 if (bq
->current_read
)
475 length
= (size_t) (bq
->current_read
->index
- bq
->read_index
);
476 else if (bq
->write_index
> bq
->read_index
)
477 length
= (size_t) (bq
->write_index
- bq
->read_index
);
481 /* We need to return silence, since no data is yet available */
482 if (bq
->silence
.memblock
) {
483 *chunk
= bq
->silence
;
484 pa_memblock_ref(chunk
->memblock
);
486 if (length
> 0 && length
< chunk
->length
)
487 chunk
->length
= length
;
491 /* If the memblockq is empty, return -1, otherwise return
492 * the time to sleep */
496 chunk
->memblock
= NULL
;
497 chunk
->length
= length
;
504 /* Ok, let's pass real data to the caller */
505 *chunk
= bq
->current_read
->chunk
;
506 pa_memblock_ref(chunk
->memblock
);
508 pa_assert(bq
->read_index
>= bq
->current_read
->index
);
509 d
= bq
->read_index
- bq
->current_read
->index
;
510 chunk
->index
+= (size_t) d
;
511 chunk
->length
-= (size_t) d
;
516 void pa_memblockq_drop(pa_memblockq
*bq
, size_t length
) {
519 pa_assert(length
% bq
->base
== 0);
521 old
= bq
->read_index
;
525 /* Do not drop any data when we are in prebuffering mode */
526 if (update_prebuf(bq
))
529 fix_current_read(bq
);
531 if (bq
->current_read
) {
534 /* We go through this piece by piece to make sure we don't
535 * drop more than allowed by prebuf */
537 p
= bq
->current_read
->index
+ (int64_t) bq
->current_read
->chunk
.length
;
538 pa_assert(p
>= bq
->read_index
);
539 d
= p
- bq
->read_index
;
541 if (d
> (int64_t) length
)
542 d
= (int64_t) length
;
545 length
-= (size_t) d
;
549 /* The list is empty, there's nothing we could drop */
550 bq
->read_index
+= (int64_t) length
;
557 delta
= bq
->read_index
- old
;
558 bq
->missing
+= delta
;
561 void pa_memblockq_rewind(pa_memblockq
*bq
, size_t length
) {
563 pa_assert(length
% bq
->base
== 0);
565 /* This is kind of the inverse of pa_memblockq_drop() */
567 bq
->read_index
-= (int64_t) length
;
568 bq
->missing
-= (int64_t) length
;
571 pa_bool_t
pa_memblockq_is_readable(pa_memblockq
*bq
) {
574 if (pa_memblockq_prebuf_active(bq
))
577 if (pa_memblockq_get_length(bq
) <= 0)
583 size_t pa_memblockq_get_length(pa_memblockq
*bq
) {
586 if (bq
->write_index
<= bq
->read_index
)
589 return (size_t) (bq
->write_index
- bq
->read_index
);
592 size_t pa_memblockq_missing(pa_memblockq
*bq
) {
596 if ((l
= pa_memblockq_get_length(bq
)) >= bq
->tlength
)
601 return l
>= bq
->minreq
? l
: 0;
604 void pa_memblockq_seek(pa_memblockq
*bq
, int64_t offset
, pa_seek_mode_t seek
) {
608 old
= bq
->write_index
;
611 case PA_SEEK_RELATIVE
:
612 bq
->write_index
+= offset
;
614 case PA_SEEK_ABSOLUTE
:
615 bq
->write_index
= offset
;
617 case PA_SEEK_RELATIVE_ON_READ
:
618 bq
->write_index
= bq
->read_index
+ offset
;
620 case PA_SEEK_RELATIVE_END
:
621 bq
->write_index
= (bq
->blocks_tail
? bq
->blocks_tail
->index
+ (int64_t) bq
->blocks_tail
->chunk
.length
: bq
->read_index
) + offset
;
624 pa_assert_not_reached();
629 delta
= bq
->write_index
- old
;
631 if (delta
>= (int64_t) bq
->requested
) {
632 delta
-= (int64_t) bq
->requested
;
634 } else if (delta
>= 0) {
635 bq
->requested
-= (size_t) delta
;
639 bq
->missing
-= delta
;
642 void pa_memblockq_flush_write(pa_memblockq
*bq
) {
646 pa_memblockq_silence(bq
);
648 old
= bq
->write_index
;
649 bq
->write_index
= bq
->read_index
;
651 pa_memblockq_prebuf_force(bq
);
653 delta
= bq
->write_index
- old
;
655 if (delta
>= (int64_t) bq
->requested
) {
656 delta
-= (int64_t) bq
->requested
;
658 } else if (delta
>= 0) {
659 bq
->requested
-= (size_t) delta
;
663 bq
->missing
-= delta
;
666 void pa_memblockq_flush_read(pa_memblockq
*bq
) {
670 pa_memblockq_silence(bq
);
672 old
= bq
->read_index
;
673 bq
->read_index
= bq
->write_index
;
675 pa_memblockq_prebuf_force(bq
);
677 delta
= bq
->read_index
- old
;
678 bq
->missing
+= delta
;
681 size_t pa_memblockq_get_tlength(pa_memblockq
*bq
) {
687 size_t pa_memblockq_get_minreq(pa_memblockq
*bq
) {
693 int64_t pa_memblockq_get_read_index(pa_memblockq
*bq
) {
696 return bq
->read_index
;
699 int64_t pa_memblockq_get_write_index(pa_memblockq
*bq
) {
702 return bq
->write_index
;
705 int pa_memblockq_push_align(pa_memblockq
* bq
, const pa_memchunk
*chunk
) {
712 return pa_memblockq_push(bq
, chunk
);
714 if (!can_push(bq
, pa_mcalign_csize(bq
->mcalign
, chunk
->length
)))
717 pa_mcalign_push(bq
->mcalign
, chunk
);
719 while (pa_mcalign_pop(bq
->mcalign
, &rchunk
) >= 0) {
721 r
= pa_memblockq_push(bq
, &rchunk
);
722 pa_memblock_unref(rchunk
.memblock
);
725 pa_mcalign_flush(bq
->mcalign
);
733 void pa_memblockq_prebuf_disable(pa_memblockq
*bq
) {
736 bq
->in_prebuf
= FALSE
;
739 void pa_memblockq_prebuf_force(pa_memblockq
*bq
) {
743 bq
->in_prebuf
= TRUE
;
746 size_t pa_memblockq_get_maxlength(pa_memblockq
*bq
) {
749 return bq
->maxlength
;
752 size_t pa_memblockq_get_prebuf(pa_memblockq
*bq
) {
758 size_t pa_memblockq_pop_missing(pa_memblockq
*bq
) {
763 /* pa_log("pop: %lli", bq->missing); */
765 if (bq
->missing
<= 0)
768 l
= (size_t) bq
->missing
;
775 void pa_memblockq_set_maxlength(pa_memblockq
*bq
, size_t maxlength
) {
778 bq
->maxlength
= ((maxlength
+bq
->base
-1)/bq
->base
)*bq
->base
;
780 if (bq
->maxlength
< bq
->base
)
781 bq
->maxlength
= bq
->base
;
783 if (bq
->tlength
> bq
->maxlength
)
784 pa_memblockq_set_tlength(bq
, bq
->maxlength
);
786 if (bq
->prebuf
> bq
->maxlength
)
787 pa_memblockq_set_prebuf(bq
, bq
->maxlength
);
790 void pa_memblockq_set_tlength(pa_memblockq
*bq
, size_t tlength
) {
795 tlength
= bq
->maxlength
;
797 old_tlength
= bq
->tlength
;
798 bq
->tlength
= ((tlength
+bq
->base
-1)/bq
->base
)*bq
->base
;
800 if (bq
->tlength
> bq
->maxlength
)
801 bq
->tlength
= bq
->maxlength
;
803 if (bq
->prebuf
> bq
->tlength
)
804 pa_memblockq_set_prebuf(bq
, bq
->tlength
);
806 if (bq
->minreq
> bq
->tlength
)
807 pa_memblockq_set_minreq(bq
, bq
->tlength
);
809 bq
->missing
+= (int64_t) bq
->tlength
- (int64_t) old_tlength
;
812 void pa_memblockq_set_prebuf(pa_memblockq
*bq
, size_t prebuf
) {
815 if (prebuf
== (size_t) -1)
816 prebuf
= bq
->tlength
;
818 bq
->prebuf
= ((prebuf
+bq
->base
-1)/bq
->base
)*bq
->base
;
820 if (prebuf
> 0 && bq
->prebuf
< bq
->base
)
821 bq
->prebuf
= bq
->base
;
823 if (bq
->prebuf
> bq
->tlength
)
824 bq
->prebuf
= bq
->tlength
;
826 if (bq
->prebuf
<= 0 || pa_memblockq_get_length(bq
) >= bq
->prebuf
)
827 bq
->in_prebuf
= FALSE
;
829 if (bq
->minreq
> bq
->prebuf
)
830 pa_memblockq_set_minreq(bq
, bq
->prebuf
);
833 void pa_memblockq_set_minreq(pa_memblockq
*bq
, size_t minreq
) {
836 bq
->minreq
= (minreq
/bq
->base
)*bq
->base
;
838 if (bq
->minreq
> bq
->tlength
)
839 bq
->minreq
= bq
->tlength
;
841 if (bq
->minreq
> bq
->prebuf
)
842 bq
->minreq
= bq
->prebuf
;
844 if (bq
->minreq
< bq
->base
)
845 bq
->minreq
= bq
->base
;
848 void pa_memblockq_set_maxrewind(pa_memblockq
*bq
, size_t maxrewind
) {
851 bq
->maxrewind
= (maxrewind
/bq
->base
)*bq
->base
;
854 int pa_memblockq_splice(pa_memblockq
*bq
, pa_memblockq
*source
) {
859 pa_memblockq_prebuf_disable(bq
);
864 if (pa_memblockq_peek(source
, &chunk
) < 0)
867 pa_assert(chunk
.length
> 0);
869 if (chunk
.memblock
) {
871 if (pa_memblockq_push_align(bq
, &chunk
) < 0) {
872 pa_memblock_unref(chunk
.memblock
);
876 pa_memblock_unref(chunk
.memblock
);
878 pa_memblockq_seek(bq
, (int64_t) chunk
.length
, PA_SEEK_RELATIVE
);
880 pa_memblockq_drop(bq
, chunk
.length
);
884 void pa_memblockq_willneed(pa_memblockq
*bq
) {
889 fix_current_read(bq
);
891 for (q
= bq
->current_read
; q
; q
= q
->next
)
892 pa_memchunk_will_need(&q
->chunk
);
895 void pa_memblockq_set_silence(pa_memblockq
*bq
, pa_memchunk
*silence
) {
898 if (bq
->silence
.memblock
)
899 pa_memblock_unref(bq
->silence
.memblock
);
902 bq
->silence
= *silence
;
903 pa_memblock_ref(bq
->silence
.memblock
);
905 pa_memchunk_reset(&bq
->silence
);
908 pa_bool_t
pa_memblockq_is_empty(pa_memblockq
*bq
) {
914 void pa_memblockq_silence(pa_memblockq
*bq
) {
918 drop_block(bq
, bq
->blocks
);
920 pa_assert(bq
->n_blocks
== 0);
923 unsigned pa_memblockq_get_nblocks(pa_memblockq
*bq
) {
929 size_t pa_memblockq_get_base(pa_memblockq
*bq
) {