Linking fix for rtclock on libpulsedsp
[pulseaudio-mirror.git] / src / pulsecore / memblockq.c
blob265da37fc2b9f9c5cb06172860b5b17f9f815e9b
1 /***
2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2 of the License,
9 or (at your option) any later version.
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
26 #include <sys/time.h>
27 #include <time.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
32 #include <pulse/xmalloc.h>
34 #include <pulsecore/log.h>
35 #include <pulsecore/mcalign.h>
36 #include <pulsecore/macro.h>
37 #include <pulsecore/flist.h>
39 #include "memblockq.h"
41 struct list_item {
42 struct list_item *next, *prev;
43 int64_t index;
44 pa_memchunk chunk;
47 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
49 struct pa_memblockq {
50 struct list_item *blocks, *blocks_tail;
51 struct list_item *current_read, *current_write;
52 unsigned n_blocks;
53 size_t maxlength, tlength, base, prebuf, minreq, maxrewind;
54 int64_t read_index, write_index;
55 pa_bool_t in_prebuf;
56 pa_memchunk silence;
57 pa_mcalign *mcalign;
58 int64_t missing;
59 size_t requested;
62 pa_memblockq* pa_memblockq_new(
63 int64_t idx,
64 size_t maxlength,
65 size_t tlength,
66 size_t base,
67 size_t prebuf,
68 size_t minreq,
69 size_t maxrewind,
70 pa_memchunk *silence) {
72 pa_memblockq* bq;
74 pa_assert(base > 0);
76 bq = pa_xnew(pa_memblockq, 1);
77 bq->blocks = bq->blocks_tail = NULL;
78 bq->current_read = bq->current_write = NULL;
79 bq->n_blocks = 0;
81 bq->base = base;
82 bq->read_index = bq->write_index = idx;
84 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
85 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
87 bq->missing = 0;
88 bq->requested = bq->maxlength = bq->tlength = bq->prebuf = bq->minreq = bq->maxrewind = 0;
89 bq->in_prebuf = TRUE;
91 pa_memblockq_set_maxlength(bq, maxlength);
92 pa_memblockq_set_tlength(bq, tlength);
93 pa_memblockq_set_prebuf(bq, prebuf);
94 pa_memblockq_set_minreq(bq, minreq);
95 pa_memblockq_set_maxrewind(bq, maxrewind);
97 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
98 (unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
100 if (silence) {
101 bq->silence = *silence;
102 pa_memblock_ref(bq->silence.memblock);
103 } else
104 pa_memchunk_reset(&bq->silence);
106 bq->mcalign = pa_mcalign_new(bq->base);
108 return bq;
111 void pa_memblockq_free(pa_memblockq* bq) {
112 pa_assert(bq);
114 pa_memblockq_silence(bq);
116 if (bq->silence.memblock)
117 pa_memblock_unref(bq->silence.memblock);
119 if (bq->mcalign)
120 pa_mcalign_free(bq->mcalign);
122 pa_xfree(bq);
125 static void fix_current_read(pa_memblockq *bq) {
126 pa_assert(bq);
128 if (PA_UNLIKELY(!bq->blocks)) {
129 bq->current_read = NULL;
130 return;
133 if (PA_UNLIKELY(!bq->current_read))
134 bq->current_read = bq->blocks;
136 /* Scan left */
137 while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
139 if (bq->current_read->prev)
140 bq->current_read = bq->current_read->prev;
141 else
142 break;
144 /* Scan right */
145 while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + (int64_t) bq->current_read->chunk.length <= bq->read_index))
146 bq->current_read = bq->current_read->next;
148 /* At this point current_read will either point at or left of the
149 next block to play. It may be NULL in case everything in
150 the queue was already played */
153 static void fix_current_write(pa_memblockq *bq) {
154 pa_assert(bq);
156 if (PA_UNLIKELY(!bq->blocks)) {
157 bq->current_write = NULL;
158 return;
161 if (PA_UNLIKELY(!bq->current_write))
162 bq->current_write = bq->blocks_tail;
164 /* Scan right */
165 while (PA_UNLIKELY(bq->current_write->index + (int64_t) bq->current_write->chunk.length <= bq->write_index))
167 if (bq->current_write->next)
168 bq->current_write = bq->current_write->next;
169 else
170 break;
172 /* Scan left */
173 while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
174 bq->current_write = bq->current_write->prev;
176 /* At this point current_write will either point at or right of
177 the next block to write data to. It may be NULL in case
178 everything in the queue is still to be played */
181 static void drop_block(pa_memblockq *bq, struct list_item *q) {
182 pa_assert(bq);
183 pa_assert(q);
185 pa_assert(bq->n_blocks >= 1);
187 if (q->prev)
188 q->prev->next = q->next;
189 else {
190 pa_assert(bq->blocks == q);
191 bq->blocks = q->next;
194 if (q->next)
195 q->next->prev = q->prev;
196 else {
197 pa_assert(bq->blocks_tail == q);
198 bq->blocks_tail = q->prev;
201 if (bq->current_write == q)
202 bq->current_write = q->prev;
204 if (bq->current_read == q)
205 bq->current_read = q->next;
207 pa_memblock_unref(q->chunk.memblock);
209 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
210 pa_xfree(q);
212 bq->n_blocks--;
215 static void drop_backlog(pa_memblockq *bq) {
216 int64_t boundary;
217 pa_assert(bq);
219 boundary = bq->read_index - (int64_t) bq->maxrewind;
221 while (bq->blocks && (bq->blocks->index + (int64_t) bq->blocks->chunk.length <= boundary))
222 drop_block(bq, bq->blocks);
225 static pa_bool_t can_push(pa_memblockq *bq, size_t l) {
226 int64_t end;
228 pa_assert(bq);
230 if (bq->read_index > bq->write_index) {
231 int64_t d = bq->read_index - bq->write_index;
233 if ((int64_t) l > d)
234 l -= (size_t) d;
235 else
236 return TRUE;
239 end = bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->write_index;
241 /* Make sure that the list doesn't get too long */
242 if (bq->write_index + (int64_t) l > end)
243 if (bq->write_index + (int64_t) l - bq->read_index > (int64_t) bq->maxlength)
244 return FALSE;
246 return TRUE;
249 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
250 struct list_item *q, *n;
251 pa_memchunk chunk;
252 int64_t old, delta;
254 pa_assert(bq);
255 pa_assert(uchunk);
256 pa_assert(uchunk->memblock);
257 pa_assert(uchunk->length > 0);
258 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
260 if (uchunk->length % bq->base)
261 return -1;
263 if (!can_push(bq, uchunk->length))
264 return -1;
266 old = bq->write_index;
267 chunk = *uchunk;
269 fix_current_write(bq);
270 q = bq->current_write;
272 /* First we advance the q pointer right of where we want to
273 * write to */
275 if (q) {
276 while (bq->write_index + (int64_t) chunk.length > q->index)
277 if (q->next)
278 q = q->next;
279 else
280 break;
283 if (!q)
284 q = bq->blocks_tail;
286 /* We go from back to front to look for the right place to add
287 * this new entry. Drop data we will overwrite on the way */
289 while (q) {
291 if (bq->write_index >= q->index + (int64_t) q->chunk.length)
292 /* We found the entry where we need to place the new entry immediately after */
293 break;
294 else if (bq->write_index + (int64_t) chunk.length <= q->index) {
295 /* This entry isn't touched at all, let's skip it */
296 q = q->prev;
297 } else if (bq->write_index <= q->index &&
298 bq->write_index + (int64_t) chunk.length >= q->index + (int64_t) q->chunk.length) {
300 /* This entry is fully replaced by the new entry, so let's drop it */
302 struct list_item *p;
303 p = q;
304 q = q->prev;
305 drop_block(bq, p);
306 } else if (bq->write_index >= q->index) {
307 /* The write index points into this memblock, so let's
308 * truncate or split it */
310 if (bq->write_index + (int64_t) chunk.length < q->index + (int64_t) q->chunk.length) {
312 /* We need to save the end of this memchunk */
313 struct list_item *p;
314 size_t d;
316 /* Create a new list entry for the end of thie memchunk */
317 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
318 p = pa_xnew(struct list_item, 1);
320 p->chunk = q->chunk;
321 pa_memblock_ref(p->chunk.memblock);
323 /* Calculate offset */
324 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
325 pa_assert(d > 0);
327 /* Drop it from the new entry */
328 p->index = q->index + (int64_t) d;
329 p->chunk.length -= d;
331 /* Add it to the list */
332 p->prev = q;
333 if ((p->next = q->next))
334 q->next->prev = p;
335 else
336 bq->blocks_tail = p;
337 q->next = p;
339 bq->n_blocks++;
342 /* Truncate the chunk */
343 if (!(q->chunk.length = (size_t) (bq->write_index - q->index))) {
344 struct list_item *p;
345 p = q;
346 q = q->prev;
347 drop_block(bq, p);
350 /* We had to truncate this block, hence we're now at the right position */
351 break;
352 } else {
353 size_t d;
355 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
356 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
357 bq->write_index < q->index);
359 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
361 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
362 q->index += (int64_t) d;
363 q->chunk.index += d;
364 q->chunk.length -= d;
366 q = q->prev;
370 if (q) {
371 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
372 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
374 /* Try to merge memory blocks */
376 if (q->chunk.memblock == chunk.memblock &&
377 q->chunk.index + q->chunk.length == chunk.index &&
378 bq->write_index == q->index + (int64_t) q->chunk.length) {
380 q->chunk.length += chunk.length;
381 bq->write_index += (int64_t) chunk.length;
382 goto finish;
384 } else
385 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
387 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
388 n = pa_xnew(struct list_item, 1);
390 n->chunk = chunk;
391 pa_memblock_ref(n->chunk.memblock);
392 n->index = bq->write_index;
393 bq->write_index += (int64_t) n->chunk.length;
395 n->next = q ? q->next : bq->blocks;
396 n->prev = q;
398 if (n->next)
399 n->next->prev = n;
400 else
401 bq->blocks_tail = n;
403 if (n->prev)
404 n->prev->next = n;
405 else
406 bq->blocks = n;
408 bq->n_blocks++;
410 finish:
412 delta = bq->write_index - old;
414 if (delta >= (int64_t) bq->requested) {
415 delta -= (int64_t) bq->requested;
416 bq->requested = 0;
417 } else {
418 bq->requested -= (size_t) delta;
419 delta = 0;
422 bq->missing -= delta;
424 return 0;
427 pa_bool_t pa_memblockq_prebuf_active(pa_memblockq *bq) {
428 pa_assert(bq);
430 if (bq->in_prebuf)
431 return pa_memblockq_get_length(bq) < bq->prebuf;
432 else
433 return bq->prebuf > 0 && bq->read_index >= bq->write_index;
436 static pa_bool_t update_prebuf(pa_memblockq *bq) {
437 pa_assert(bq);
439 if (bq->in_prebuf) {
441 if (pa_memblockq_get_length(bq) < bq->prebuf)
442 return TRUE;
444 bq->in_prebuf = FALSE;
445 return FALSE;
446 } else {
448 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
449 bq->in_prebuf = TRUE;
450 return TRUE;
453 return FALSE;
457 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
458 int64_t d;
459 pa_assert(bq);
460 pa_assert(chunk);
462 /* We need to pre-buffer */
463 if (update_prebuf(bq))
464 return -1;
466 fix_current_read(bq);
468 /* Do we need to spit out silence? */
469 if (!bq->current_read || bq->current_read->index > bq->read_index) {
471 size_t length;
473 /* How much silence shall we return? */
474 if (bq->current_read)
475 length = (size_t) (bq->current_read->index - bq->read_index);
476 else if (bq->write_index > bq->read_index)
477 length = (size_t) (bq->write_index - bq->read_index);
478 else
479 length = 0;
481 /* We need to return silence, since no data is yet available */
482 if (bq->silence.memblock) {
483 *chunk = bq->silence;
484 pa_memblock_ref(chunk->memblock);
486 if (length > 0 && length < chunk->length)
487 chunk->length = length;
489 } else {
491 /* If the memblockq is empty, return -1, otherwise return
492 * the time to sleep */
493 if (length <= 0)
494 return -1;
496 chunk->memblock = NULL;
497 chunk->length = length;
500 chunk->index = 0;
501 return 0;
504 /* Ok, let's pass real data to the caller */
505 *chunk = bq->current_read->chunk;
506 pa_memblock_ref(chunk->memblock);
508 pa_assert(bq->read_index >= bq->current_read->index);
509 d = bq->read_index - bq->current_read->index;
510 chunk->index += (size_t) d;
511 chunk->length -= (size_t) d;
513 return 0;
516 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
517 int64_t old, delta;
518 pa_assert(bq);
519 pa_assert(length % bq->base == 0);
521 old = bq->read_index;
523 while (length > 0) {
525 /* Do not drop any data when we are in prebuffering mode */
526 if (update_prebuf(bq))
527 break;
529 fix_current_read(bq);
531 if (bq->current_read) {
532 int64_t p, d;
534 /* We go through this piece by piece to make sure we don't
535 * drop more than allowed by prebuf */
537 p = bq->current_read->index + (int64_t) bq->current_read->chunk.length;
538 pa_assert(p >= bq->read_index);
539 d = p - bq->read_index;
541 if (d > (int64_t) length)
542 d = (int64_t) length;
544 bq->read_index += d;
545 length -= (size_t) d;
547 } else {
549 /* The list is empty, there's nothing we could drop */
550 bq->read_index += (int64_t) length;
551 break;
555 drop_backlog(bq);
557 delta = bq->read_index - old;
558 bq->missing += delta;
561 void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
562 pa_assert(bq);
563 pa_assert(length % bq->base == 0);
565 /* This is kind of the inverse of pa_memblockq_drop() */
567 bq->read_index -= (int64_t) length;
568 bq->missing -= (int64_t) length;
571 pa_bool_t pa_memblockq_is_readable(pa_memblockq *bq) {
572 pa_assert(bq);
574 if (pa_memblockq_prebuf_active(bq))
575 return FALSE;
577 if (pa_memblockq_get_length(bq) <= 0)
578 return FALSE;
580 return TRUE;
583 size_t pa_memblockq_get_length(pa_memblockq *bq) {
584 pa_assert(bq);
586 if (bq->write_index <= bq->read_index)
587 return 0;
589 return (size_t) (bq->write_index - bq->read_index);
592 size_t pa_memblockq_missing(pa_memblockq *bq) {
593 size_t l;
594 pa_assert(bq);
596 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
597 return 0;
599 l = bq->tlength - l;
601 return l >= bq->minreq ? l : 0;
604 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek) {
605 int64_t old, delta;
606 pa_assert(bq);
608 old = bq->write_index;
610 switch (seek) {
611 case PA_SEEK_RELATIVE:
612 bq->write_index += offset;
613 break;
614 case PA_SEEK_ABSOLUTE:
615 bq->write_index = offset;
616 break;
617 case PA_SEEK_RELATIVE_ON_READ:
618 bq->write_index = bq->read_index + offset;
619 break;
620 case PA_SEEK_RELATIVE_END:
621 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
622 break;
623 default:
624 pa_assert_not_reached();
627 drop_backlog(bq);
629 delta = bq->write_index - old;
631 if (delta >= (int64_t) bq->requested) {
632 delta -= (int64_t) bq->requested;
633 bq->requested = 0;
634 } else if (delta >= 0) {
635 bq->requested -= (size_t) delta;
636 delta = 0;
639 bq->missing -= delta;
642 void pa_memblockq_flush_write(pa_memblockq *bq) {
643 int64_t old, delta;
644 pa_assert(bq);
646 pa_memblockq_silence(bq);
648 old = bq->write_index;
649 bq->write_index = bq->read_index;
651 pa_memblockq_prebuf_force(bq);
653 delta = bq->write_index - old;
655 if (delta >= (int64_t) bq->requested) {
656 delta -= (int64_t) bq->requested;
657 bq->requested = 0;
658 } else if (delta >= 0) {
659 bq->requested -= (size_t) delta;
660 delta = 0;
663 bq->missing -= delta;
666 void pa_memblockq_flush_read(pa_memblockq *bq) {
667 int64_t old, delta;
668 pa_assert(bq);
670 pa_memblockq_silence(bq);
672 old = bq->read_index;
673 bq->read_index = bq->write_index;
675 pa_memblockq_prebuf_force(bq);
677 delta = bq->read_index - old;
678 bq->missing += delta;
681 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
682 pa_assert(bq);
684 return bq->tlength;
687 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
688 pa_assert(bq);
690 return bq->minreq;
693 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
694 pa_assert(bq);
696 return bq->read_index;
699 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
700 pa_assert(bq);
702 return bq->write_index;
705 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
706 pa_memchunk rchunk;
708 pa_assert(bq);
709 pa_assert(chunk);
711 if (bq->base == 1)
712 return pa_memblockq_push(bq, chunk);
714 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
715 return -1;
717 pa_mcalign_push(bq->mcalign, chunk);
719 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
720 int r;
721 r = pa_memblockq_push(bq, &rchunk);
722 pa_memblock_unref(rchunk.memblock);
724 if (r < 0) {
725 pa_mcalign_flush(bq->mcalign);
726 return -1;
730 return 0;
733 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
734 pa_assert(bq);
736 bq->in_prebuf = FALSE;
739 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
740 pa_assert(bq);
742 if (bq->prebuf > 0)
743 bq->in_prebuf = TRUE;
746 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
747 pa_assert(bq);
749 return bq->maxlength;
752 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
753 pa_assert(bq);
755 return bq->prebuf;
758 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
759 size_t l;
761 pa_assert(bq);
763 /* pa_log("pop: %lli", bq->missing); */
765 if (bq->missing <= 0)
766 return 0;
768 l = (size_t) bq->missing;
769 bq->missing = 0;
770 bq->requested += l;
772 return l;
775 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
776 pa_assert(bq);
778 bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
780 if (bq->maxlength < bq->base)
781 bq->maxlength = bq->base;
783 if (bq->tlength > bq->maxlength)
784 pa_memblockq_set_tlength(bq, bq->maxlength);
786 if (bq->prebuf > bq->maxlength)
787 pa_memblockq_set_prebuf(bq, bq->maxlength);
790 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
791 size_t old_tlength;
792 pa_assert(bq);
794 if (tlength <= 0)
795 tlength = bq->maxlength;
797 old_tlength = bq->tlength;
798 bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
800 if (bq->tlength > bq->maxlength)
801 bq->tlength = bq->maxlength;
803 if (bq->prebuf > bq->tlength)
804 pa_memblockq_set_prebuf(bq, bq->tlength);
806 if (bq->minreq > bq->tlength)
807 pa_memblockq_set_minreq(bq, bq->tlength);
809 bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
812 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
813 pa_assert(bq);
815 if (prebuf == (size_t) -1)
816 prebuf = bq->tlength;
818 bq->prebuf = ((prebuf+bq->base-1)/bq->base)*bq->base;
820 if (prebuf > 0 && bq->prebuf < bq->base)
821 bq->prebuf = bq->base;
823 if (bq->prebuf > bq->tlength)
824 bq->prebuf = bq->tlength;
826 if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
827 bq->in_prebuf = FALSE;
829 if (bq->minreq > bq->prebuf)
830 pa_memblockq_set_minreq(bq, bq->prebuf);
833 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
834 pa_assert(bq);
836 bq->minreq = (minreq/bq->base)*bq->base;
838 if (bq->minreq > bq->tlength)
839 bq->minreq = bq->tlength;
841 if (bq->minreq > bq->prebuf)
842 bq->minreq = bq->prebuf;
844 if (bq->minreq < bq->base)
845 bq->minreq = bq->base;
848 void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
849 pa_assert(bq);
851 bq->maxrewind = (maxrewind/bq->base)*bq->base;
854 int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
856 pa_assert(bq);
857 pa_assert(source);
859 pa_memblockq_prebuf_disable(bq);
861 for (;;) {
862 pa_memchunk chunk;
864 if (pa_memblockq_peek(source, &chunk) < 0)
865 return 0;
867 pa_assert(chunk.length > 0);
869 if (chunk.memblock) {
871 if (pa_memblockq_push_align(bq, &chunk) < 0) {
872 pa_memblock_unref(chunk.memblock);
873 return -1;
876 pa_memblock_unref(chunk.memblock);
877 } else
878 pa_memblockq_seek(bq, (int64_t) chunk.length, PA_SEEK_RELATIVE);
880 pa_memblockq_drop(bq, chunk.length);
884 void pa_memblockq_willneed(pa_memblockq *bq) {
885 struct list_item *q;
887 pa_assert(bq);
889 fix_current_read(bq);
891 for (q = bq->current_read; q; q = q->next)
892 pa_memchunk_will_need(&q->chunk);
895 void pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence) {
896 pa_assert(bq);
898 if (bq->silence.memblock)
899 pa_memblock_unref(bq->silence.memblock);
901 if (silence) {
902 bq->silence = *silence;
903 pa_memblock_ref(bq->silence.memblock);
904 } else
905 pa_memchunk_reset(&bq->silence);
908 pa_bool_t pa_memblockq_is_empty(pa_memblockq *bq) {
909 pa_assert(bq);
911 return !bq->blocks;
914 void pa_memblockq_silence(pa_memblockq *bq) {
915 pa_assert(bq);
917 while (bq->blocks)
918 drop_block(bq, bq->blocks);
920 pa_assert(bq->n_blocks == 0);
923 unsigned pa_memblockq_get_nblocks(pa_memblockq *bq) {
924 pa_assert(bq);
926 return bq->n_blocks;
929 size_t pa_memblockq_get_base(pa_memblockq *bq) {
930 pa_assert(bq);
932 return bq->base;