Fix up according to Coding Style
[pulseaudio-mirror.git] / src / pulsecore / memblockq.c
blobc76ca841930a7a654f4ce46933c1bba1684de063
1 /***
2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2.1 of the License,
9 or (at your option) any later version.
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
30 #include <pulse/xmalloc.h>
32 #include <pulsecore/log.h>
33 #include <pulsecore/mcalign.h>
34 #include <pulsecore/macro.h>
35 #include <pulsecore/flist.h>
37 #include "memblockq.h"
39 struct list_item {
40 struct list_item *next, *prev;
41 int64_t index;
42 pa_memchunk chunk;
45 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
47 struct pa_memblockq {
48 struct list_item *blocks, *blocks_tail;
49 struct list_item *current_read, *current_write;
50 unsigned n_blocks;
51 size_t maxlength, tlength, base, prebuf, minreq, maxrewind;
52 int64_t read_index, write_index;
53 pa_bool_t in_prebuf;
54 pa_memchunk silence;
55 pa_mcalign *mcalign;
56 int64_t missing, requested;
59 pa_memblockq* pa_memblockq_new(
60 int64_t idx,
61 size_t maxlength,
62 size_t tlength,
63 size_t base,
64 size_t prebuf,
65 size_t minreq,
66 size_t maxrewind,
67 pa_memchunk *silence) {
69 pa_memblockq* bq;
71 pa_assert(base > 0);
73 bq = pa_xnew(pa_memblockq, 1);
74 bq->blocks = bq->blocks_tail = NULL;
75 bq->current_read = bq->current_write = NULL;
76 bq->n_blocks = 0;
78 bq->base = base;
79 bq->read_index = bq->write_index = idx;
81 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
82 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
84 bq->missing = bq->requested = 0;
85 bq->maxlength = bq->tlength = bq->prebuf = bq->minreq = bq->maxrewind = 0;
86 bq->in_prebuf = TRUE;
88 pa_memblockq_set_maxlength(bq, maxlength);
89 pa_memblockq_set_tlength(bq, tlength);
90 pa_memblockq_set_minreq(bq, minreq);
91 pa_memblockq_set_prebuf(bq, prebuf);
92 pa_memblockq_set_maxrewind(bq, maxrewind);
94 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
95 (unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
97 if (silence) {
98 bq->silence = *silence;
99 pa_memblock_ref(bq->silence.memblock);
100 } else
101 pa_memchunk_reset(&bq->silence);
103 bq->mcalign = pa_mcalign_new(bq->base);
105 return bq;
108 void pa_memblockq_free(pa_memblockq* bq) {
109 pa_assert(bq);
111 pa_memblockq_silence(bq);
113 if (bq->silence.memblock)
114 pa_memblock_unref(bq->silence.memblock);
116 if (bq->mcalign)
117 pa_mcalign_free(bq->mcalign);
119 pa_xfree(bq);
122 static void fix_current_read(pa_memblockq *bq) {
123 pa_assert(bq);
125 if (PA_UNLIKELY(!bq->blocks)) {
126 bq->current_read = NULL;
127 return;
130 if (PA_UNLIKELY(!bq->current_read))
131 bq->current_read = bq->blocks;
133 /* Scan left */
134 while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
136 if (bq->current_read->prev)
137 bq->current_read = bq->current_read->prev;
138 else
139 break;
141 /* Scan right */
142 while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + (int64_t) bq->current_read->chunk.length <= bq->read_index))
143 bq->current_read = bq->current_read->next;
145 /* At this point current_read will either point at or left of the
146 next block to play. It may be NULL in case everything in
147 the queue was already played */
150 static void fix_current_write(pa_memblockq *bq) {
151 pa_assert(bq);
153 if (PA_UNLIKELY(!bq->blocks)) {
154 bq->current_write = NULL;
155 return;
158 if (PA_UNLIKELY(!bq->current_write))
159 bq->current_write = bq->blocks_tail;
161 /* Scan right */
162 while (PA_UNLIKELY(bq->current_write->index + (int64_t) bq->current_write->chunk.length <= bq->write_index))
164 if (bq->current_write->next)
165 bq->current_write = bq->current_write->next;
166 else
167 break;
169 /* Scan left */
170 while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
171 bq->current_write = bq->current_write->prev;
173 /* At this point current_write will either point at or right of
174 the next block to write data to. It may be NULL in case
175 everything in the queue is still to be played */
178 static void drop_block(pa_memblockq *bq, struct list_item *q) {
179 pa_assert(bq);
180 pa_assert(q);
182 pa_assert(bq->n_blocks >= 1);
184 if (q->prev)
185 q->prev->next = q->next;
186 else {
187 pa_assert(bq->blocks == q);
188 bq->blocks = q->next;
191 if (q->next)
192 q->next->prev = q->prev;
193 else {
194 pa_assert(bq->blocks_tail == q);
195 bq->blocks_tail = q->prev;
198 if (bq->current_write == q)
199 bq->current_write = q->prev;
201 if (bq->current_read == q)
202 bq->current_read = q->next;
204 pa_memblock_unref(q->chunk.memblock);
206 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
207 pa_xfree(q);
209 bq->n_blocks--;
212 static void drop_backlog(pa_memblockq *bq) {
213 int64_t boundary;
214 pa_assert(bq);
216 boundary = bq->read_index - (int64_t) bq->maxrewind;
218 while (bq->blocks && (bq->blocks->index + (int64_t) bq->blocks->chunk.length <= boundary))
219 drop_block(bq, bq->blocks);
222 static pa_bool_t can_push(pa_memblockq *bq, size_t l) {
223 int64_t end;
225 pa_assert(bq);
227 if (bq->read_index > bq->write_index) {
228 int64_t d = bq->read_index - bq->write_index;
230 if ((int64_t) l > d)
231 l -= (size_t) d;
232 else
233 return TRUE;
236 end = bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->write_index;
238 /* Make sure that the list doesn't get too long */
239 if (bq->write_index + (int64_t) l > end)
240 if (bq->write_index + (int64_t) l - bq->read_index > (int64_t) bq->maxlength)
241 return FALSE;
243 return TRUE;
246 static void write_index_changed(pa_memblockq *bq, int64_t old_write_index, pa_bool_t account) {
247 int64_t delta;
249 pa_assert(bq);
251 delta = bq->write_index - old_write_index;
253 if (account)
254 bq->requested -= delta;
255 else
256 bq->missing -= delta;
258 /* pa_log("pushed/seeked %lli: requested counter at %lli, account=%i", (long long) delta, (long long) bq->requested, account); */
261 static void read_index_changed(pa_memblockq *bq, int64_t old_read_index) {
262 int64_t delta;
264 pa_assert(bq);
266 delta = bq->read_index - old_read_index;
267 bq->missing += delta;
269 /* pa_log("popped %lli: missing counter at %lli", (long long) delta, (long long) bq->missing); */
272 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
273 struct list_item *q, *n;
274 pa_memchunk chunk;
275 int64_t old;
277 pa_assert(bq);
278 pa_assert(uchunk);
279 pa_assert(uchunk->memblock);
280 pa_assert(uchunk->length > 0);
281 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
283 if (uchunk->length % bq->base)
284 return -1;
286 if (!can_push(bq, uchunk->length))
287 return -1;
289 old = bq->write_index;
290 chunk = *uchunk;
292 fix_current_write(bq);
293 q = bq->current_write;
295 /* First we advance the q pointer right of where we want to
296 * write to */
298 if (q) {
299 while (bq->write_index + (int64_t) chunk.length > q->index)
300 if (q->next)
301 q = q->next;
302 else
303 break;
306 if (!q)
307 q = bq->blocks_tail;
309 /* We go from back to front to look for the right place to add
310 * this new entry. Drop data we will overwrite on the way */
312 while (q) {
314 if (bq->write_index >= q->index + (int64_t) q->chunk.length)
315 /* We found the entry where we need to place the new entry immediately after */
316 break;
317 else if (bq->write_index + (int64_t) chunk.length <= q->index) {
318 /* This entry isn't touched at all, let's skip it */
319 q = q->prev;
320 } else if (bq->write_index <= q->index &&
321 bq->write_index + (int64_t) chunk.length >= q->index + (int64_t) q->chunk.length) {
323 /* This entry is fully replaced by the new entry, so let's drop it */
325 struct list_item *p;
326 p = q;
327 q = q->prev;
328 drop_block(bq, p);
329 } else if (bq->write_index >= q->index) {
330 /* The write index points into this memblock, so let's
331 * truncate or split it */
333 if (bq->write_index + (int64_t) chunk.length < q->index + (int64_t) q->chunk.length) {
335 /* We need to save the end of this memchunk */
336 struct list_item *p;
337 size_t d;
339 /* Create a new list entry for the end of thie memchunk */
340 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
341 p = pa_xnew(struct list_item, 1);
343 p->chunk = q->chunk;
344 pa_memblock_ref(p->chunk.memblock);
346 /* Calculate offset */
347 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
348 pa_assert(d > 0);
350 /* Drop it from the new entry */
351 p->index = q->index + (int64_t) d;
352 p->chunk.length -= d;
354 /* Add it to the list */
355 p->prev = q;
356 if ((p->next = q->next))
357 q->next->prev = p;
358 else
359 bq->blocks_tail = p;
360 q->next = p;
362 bq->n_blocks++;
365 /* Truncate the chunk */
366 if (!(q->chunk.length = (size_t) (bq->write_index - q->index))) {
367 struct list_item *p;
368 p = q;
369 q = q->prev;
370 drop_block(bq, p);
373 /* We had to truncate this block, hence we're now at the right position */
374 break;
375 } else {
376 size_t d;
378 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
379 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
380 bq->write_index < q->index);
382 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
384 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
385 q->index += (int64_t) d;
386 q->chunk.index += d;
387 q->chunk.length -= d;
389 q = q->prev;
393 if (q) {
394 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
395 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
397 /* Try to merge memory blocks */
399 if (q->chunk.memblock == chunk.memblock &&
400 q->chunk.index + q->chunk.length == chunk.index &&
401 bq->write_index == q->index + (int64_t) q->chunk.length) {
403 q->chunk.length += chunk.length;
404 bq->write_index += (int64_t) chunk.length;
405 goto finish;
407 } else
408 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
410 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
411 n = pa_xnew(struct list_item, 1);
413 n->chunk = chunk;
414 pa_memblock_ref(n->chunk.memblock);
415 n->index = bq->write_index;
416 bq->write_index += (int64_t) n->chunk.length;
418 n->next = q ? q->next : bq->blocks;
419 n->prev = q;
421 if (n->next)
422 n->next->prev = n;
423 else
424 bq->blocks_tail = n;
426 if (n->prev)
427 n->prev->next = n;
428 else
429 bq->blocks = n;
431 bq->n_blocks++;
433 finish:
435 write_index_changed(bq, old, TRUE);
436 return 0;
439 pa_bool_t pa_memblockq_prebuf_active(pa_memblockq *bq) {
440 pa_assert(bq);
442 if (bq->in_prebuf)
443 return pa_memblockq_get_length(bq) < bq->prebuf;
444 else
445 return bq->prebuf > 0 && bq->read_index >= bq->write_index;
448 static pa_bool_t update_prebuf(pa_memblockq *bq) {
449 pa_assert(bq);
451 if (bq->in_prebuf) {
453 if (pa_memblockq_get_length(bq) < bq->prebuf)
454 return TRUE;
456 bq->in_prebuf = FALSE;
457 return FALSE;
458 } else {
460 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
461 bq->in_prebuf = TRUE;
462 return TRUE;
465 return FALSE;
469 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
470 int64_t d;
471 pa_assert(bq);
472 pa_assert(chunk);
474 /* We need to pre-buffer */
475 if (update_prebuf(bq))
476 return -1;
478 fix_current_read(bq);
480 /* Do we need to spit out silence? */
481 if (!bq->current_read || bq->current_read->index > bq->read_index) {
482 size_t length;
484 /* How much silence shall we return? */
485 if (bq->current_read)
486 length = (size_t) (bq->current_read->index - bq->read_index);
487 else if (bq->write_index > bq->read_index)
488 length = (size_t) (bq->write_index - bq->read_index);
489 else
490 length = 0;
492 /* We need to return silence, since no data is yet available */
493 if (bq->silence.memblock) {
494 *chunk = bq->silence;
495 pa_memblock_ref(chunk->memblock);
497 if (length > 0 && length < chunk->length)
498 chunk->length = length;
500 } else {
502 /* If the memblockq is empty, return -1, otherwise return
503 * the time to sleep */
504 if (length <= 0)
505 return -1;
507 chunk->memblock = NULL;
508 chunk->length = length;
511 chunk->index = 0;
512 return 0;
515 /* Ok, let's pass real data to the caller */
516 *chunk = bq->current_read->chunk;
517 pa_memblock_ref(chunk->memblock);
519 pa_assert(bq->read_index >= bq->current_read->index);
520 d = bq->read_index - bq->current_read->index;
521 chunk->index += (size_t) d;
522 chunk->length -= (size_t) d;
524 return 0;
527 int pa_memblockq_peek_fixed_size(pa_memblockq *bq, size_t block_size, pa_memchunk *chunk) {
528 pa_memchunk tchunk, rchunk;
529 int64_t ri;
530 struct list_item *item;
532 pa_assert(bq);
533 pa_assert(block_size > 0);
534 pa_assert(chunk);
535 pa_assert(bq->silence.memblock);
537 if (pa_memblockq_peek(bq, &tchunk) < 0)
538 return -1;
540 if (tchunk.length >= block_size) {
541 *chunk = tchunk;
542 chunk->length = block_size;
543 return 0;
546 rchunk.memblock = pa_memblock_new(pa_memblock_get_pool(tchunk.memblock), block_size);
547 rchunk.index = 0;
548 rchunk.length = tchunk.length;
550 pa_memchunk_memcpy(&rchunk, &tchunk);
551 pa_memblock_unref(tchunk.memblock);
553 rchunk.index += tchunk.length;
555 /* We don't need to call fix_current_read() here, since
556 * pa_memblock_peek() already did that */
557 item = bq->current_read;
558 ri = bq->read_index + tchunk.length;
560 while (rchunk.index < block_size) {
562 if (!item || item->index > ri) {
563 /* Do we need to append silence? */
564 tchunk = bq->silence;
566 if (item)
567 tchunk.length = PA_MIN(tchunk.length, (size_t) (item->index - ri));
569 } else {
570 int64_t d;
572 /* We can append real data! */
573 tchunk = item->chunk;
575 d = ri - item->index;
576 tchunk.index += (size_t) d;
577 tchunk.length -= (size_t) d;
579 /* Go to next item for the next iteration */
580 item = item->next;
583 rchunk.length = tchunk.length = PA_MIN(tchunk.length, block_size - rchunk.index);
584 pa_memchunk_memcpy(&rchunk, &tchunk);
586 rchunk.index += rchunk.length;
587 ri += rchunk.length;
590 rchunk.index = 0;
591 rchunk.length = block_size;
593 *chunk = rchunk;
594 return 0;
597 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
598 int64_t old;
599 pa_assert(bq);
600 pa_assert(length % bq->base == 0);
602 old = bq->read_index;
604 while (length > 0) {
606 /* Do not drop any data when we are in prebuffering mode */
607 if (update_prebuf(bq))
608 break;
610 fix_current_read(bq);
612 if (bq->current_read) {
613 int64_t p, d;
615 /* We go through this piece by piece to make sure we don't
616 * drop more than allowed by prebuf */
618 p = bq->current_read->index + (int64_t) bq->current_read->chunk.length;
619 pa_assert(p >= bq->read_index);
620 d = p - bq->read_index;
622 if (d > (int64_t) length)
623 d = (int64_t) length;
625 bq->read_index += d;
626 length -= (size_t) d;
628 } else {
630 /* The list is empty, there's nothing we could drop */
631 bq->read_index += (int64_t) length;
632 break;
636 drop_backlog(bq);
637 read_index_changed(bq, old);
640 void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
641 int64_t old;
642 pa_assert(bq);
643 pa_assert(length % bq->base == 0);
645 old = bq->read_index;
647 /* This is kind of the inverse of pa_memblockq_drop() */
649 bq->read_index -= (int64_t) length;
651 read_index_changed(bq, old);
654 pa_bool_t pa_memblockq_is_readable(pa_memblockq *bq) {
655 pa_assert(bq);
657 if (pa_memblockq_prebuf_active(bq))
658 return FALSE;
660 if (pa_memblockq_get_length(bq) <= 0)
661 return FALSE;
663 return TRUE;
666 size_t pa_memblockq_get_length(pa_memblockq *bq) {
667 pa_assert(bq);
669 if (bq->write_index <= bq->read_index)
670 return 0;
672 return (size_t) (bq->write_index - bq->read_index);
675 size_t pa_memblockq_missing(pa_memblockq *bq) {
676 size_t l;
677 pa_assert(bq);
679 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
680 return 0;
682 l = bq->tlength - l;
684 return l >= bq->minreq ? l : 0;
687 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek, pa_bool_t account) {
688 int64_t old;
689 pa_assert(bq);
691 old = bq->write_index;
693 switch (seek) {
694 case PA_SEEK_RELATIVE:
695 bq->write_index += offset;
696 break;
697 case PA_SEEK_ABSOLUTE:
698 bq->write_index = offset;
699 break;
700 case PA_SEEK_RELATIVE_ON_READ:
701 bq->write_index = bq->read_index + offset;
702 break;
703 case PA_SEEK_RELATIVE_END:
704 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
705 break;
706 default:
707 pa_assert_not_reached();
710 drop_backlog(bq);
711 write_index_changed(bq, old, account);
714 void pa_memblockq_flush_write(pa_memblockq *bq, pa_bool_t account) {
715 int64_t old;
716 pa_assert(bq);
718 pa_memblockq_silence(bq);
720 old = bq->write_index;
721 bq->write_index = bq->read_index;
723 pa_memblockq_prebuf_force(bq);
724 write_index_changed(bq, old, account);
727 void pa_memblockq_flush_read(pa_memblockq *bq) {
728 int64_t old;
729 pa_assert(bq);
731 pa_memblockq_silence(bq);
733 old = bq->read_index;
734 bq->read_index = bq->write_index;
736 pa_memblockq_prebuf_force(bq);
737 read_index_changed(bq, old);
740 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
741 pa_assert(bq);
743 return bq->tlength;
746 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
747 pa_assert(bq);
749 return bq->minreq;
752 size_t pa_memblockq_get_maxrewind(pa_memblockq *bq) {
753 pa_assert(bq);
755 return bq->maxrewind;
758 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
759 pa_assert(bq);
761 return bq->read_index;
764 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
765 pa_assert(bq);
767 return bq->write_index;
770 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
771 pa_memchunk rchunk;
773 pa_assert(bq);
774 pa_assert(chunk);
776 if (bq->base == 1)
777 return pa_memblockq_push(bq, chunk);
779 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
780 return -1;
782 pa_mcalign_push(bq->mcalign, chunk);
784 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
785 int r;
786 r = pa_memblockq_push(bq, &rchunk);
787 pa_memblock_unref(rchunk.memblock);
789 if (r < 0) {
790 pa_mcalign_flush(bq->mcalign);
791 return -1;
795 return 0;
798 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
799 pa_assert(bq);
801 bq->in_prebuf = FALSE;
804 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
805 pa_assert(bq);
807 if (bq->prebuf > 0)
808 bq->in_prebuf = TRUE;
811 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
812 pa_assert(bq);
814 return bq->maxlength;
817 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
818 pa_assert(bq);
820 return bq->prebuf;
823 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
824 size_t l;
826 pa_assert(bq);
828 /* pa_log("pop: %lli", bq->missing); */
830 if (bq->missing <= 0)
831 return 0;
833 l = (size_t) bq->missing;
835 bq->requested += bq->missing;
836 bq->missing = 0;
838 /* pa_log("sent %lli: request counter is at %lli", (long long) l, (long long) bq->requested); */
840 return l;
843 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
844 pa_assert(bq);
846 bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
848 if (bq->maxlength < bq->base)
849 bq->maxlength = bq->base;
851 if (bq->tlength > bq->maxlength)
852 pa_memblockq_set_tlength(bq, bq->maxlength);
855 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
856 size_t old_tlength;
857 pa_assert(bq);
859 if (tlength <= 0 || tlength == (size_t) -1)
860 tlength = bq->maxlength;
862 old_tlength = bq->tlength;
863 bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
865 if (bq->tlength > bq->maxlength)
866 bq->tlength = bq->maxlength;
868 if (bq->minreq > bq->tlength)
869 pa_memblockq_set_minreq(bq, bq->tlength);
871 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
872 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
874 bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
877 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
878 pa_assert(bq);
880 bq->minreq = (minreq/bq->base)*bq->base;
882 if (bq->minreq > bq->tlength)
883 bq->minreq = bq->tlength;
885 if (bq->minreq < bq->base)
886 bq->minreq = bq->base;
888 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
889 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
892 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
893 pa_assert(bq);
895 if (prebuf == (size_t) -1)
896 prebuf = bq->tlength+bq->base-bq->minreq;
898 bq->prebuf = ((prebuf+bq->base-1)/bq->base)*bq->base;
900 if (prebuf > 0 && bq->prebuf < bq->base)
901 bq->prebuf = bq->base;
903 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
904 bq->prebuf = bq->tlength+bq->base-bq->minreq;
906 if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
907 bq->in_prebuf = FALSE;
910 void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
911 pa_assert(bq);
913 bq->maxrewind = (maxrewind/bq->base)*bq->base;
916 void pa_memblockq_apply_attr(pa_memblockq *bq, const pa_buffer_attr *a) {
917 pa_assert(bq);
918 pa_assert(a);
920 pa_memblockq_set_maxlength(bq, a->maxlength);
921 pa_memblockq_set_tlength(bq, a->tlength);
922 pa_memblockq_set_prebuf(bq, a->prebuf);
923 pa_memblockq_set_minreq(bq, a->minreq);
926 void pa_memblockq_get_attr(pa_memblockq *bq, pa_buffer_attr *a) {
927 pa_assert(bq);
928 pa_assert(a);
930 a->maxlength = (uint32_t) pa_memblockq_get_maxlength(bq);
931 a->tlength = (uint32_t) pa_memblockq_get_tlength(bq);
932 a->prebuf = (uint32_t) pa_memblockq_get_prebuf(bq);
933 a->minreq = (uint32_t) pa_memblockq_get_minreq(bq);
936 int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
938 pa_assert(bq);
939 pa_assert(source);
941 pa_memblockq_prebuf_disable(bq);
943 for (;;) {
944 pa_memchunk chunk;
946 if (pa_memblockq_peek(source, &chunk) < 0)
947 return 0;
949 pa_assert(chunk.length > 0);
951 if (chunk.memblock) {
953 if (pa_memblockq_push_align(bq, &chunk) < 0) {
954 pa_memblock_unref(chunk.memblock);
955 return -1;
958 pa_memblock_unref(chunk.memblock);
959 } else
960 pa_memblockq_seek(bq, (int64_t) chunk.length, PA_SEEK_RELATIVE, TRUE);
962 pa_memblockq_drop(bq, chunk.length);
966 void pa_memblockq_willneed(pa_memblockq *bq) {
967 struct list_item *q;
969 pa_assert(bq);
971 fix_current_read(bq);
973 for (q = bq->current_read; q; q = q->next)
974 pa_memchunk_will_need(&q->chunk);
977 void pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence) {
978 pa_assert(bq);
980 if (bq->silence.memblock)
981 pa_memblock_unref(bq->silence.memblock);
983 if (silence) {
984 bq->silence = *silence;
985 pa_memblock_ref(bq->silence.memblock);
986 } else
987 pa_memchunk_reset(&bq->silence);
990 pa_bool_t pa_memblockq_is_empty(pa_memblockq *bq) {
991 pa_assert(bq);
993 return !bq->blocks;
996 void pa_memblockq_silence(pa_memblockq *bq) {
997 pa_assert(bq);
999 while (bq->blocks)
1000 drop_block(bq, bq->blocks);
1002 pa_assert(bq->n_blocks == 0);
1005 unsigned pa_memblockq_get_nblocks(pa_memblockq *bq) {
1006 pa_assert(bq);
1008 return bq->n_blocks;
1011 size_t pa_memblockq_get_base(pa_memblockq *bq) {
1012 pa_assert(bq);
1014 return bq->base;