alsa: Fix assertion on mmap_write (triggered via a52 plugin)
[pulseaudio-mirror.git] / src / modules / alsa / alsa-sink.c
blob2995c3cebd4d5a6cb520d5efa64c8f985058dff5
1 /***
2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
27 #include <stdio.h>
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
57 #include <modules/reserve-wrap.h>
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
62 /* #define DEBUG_TIMING */
64 #define DEFAULT_DEVICE "default"
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
82 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
84 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
86 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256) /* 1.33ms @48kHz, should work for most hardware */
88 struct userdata {
89 pa_core *core;
90 pa_module *module;
91 pa_sink *sink;
93 pa_thread *thread;
94 pa_thread_mq thread_mq;
95 pa_rtpoll *rtpoll;
97 snd_pcm_t *pcm_handle;
99 pa_alsa_fdlist *mixer_fdl;
100 snd_mixer_t *mixer_handle;
101 pa_alsa_path_set *mixer_path_set;
102 pa_alsa_path *mixer_path;
104 pa_cvolume hardware_volume;
106 size_t
107 frame_size,
108 fragment_size,
109 hwbuf_size,
110 tsched_watermark,
111 hwbuf_unused,
112 min_sleep,
113 min_wakeup,
114 watermark_inc_step,
115 watermark_dec_step,
116 watermark_inc_threshold,
117 watermark_dec_threshold,
118 rewind_safeguard;
120 pa_usec_t watermark_dec_not_before;
122 pa_memchunk memchunk;
124 char *device_name; /* name of the PCM device */
125 char *control_device; /* name of the control device */
127 pa_bool_t use_mmap:1, use_tsched:1;
129 pa_bool_t first, after_rewind;
131 pa_rtpoll_item *alsa_rtpoll_item;
133 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
135 pa_smoother *smoother;
136 uint64_t write_count;
137 uint64_t since_start;
138 pa_usec_t smoother_interval;
139 pa_usec_t last_smoother_update;
141 pa_reserve_wrapper *reserve;
142 pa_hook_slot *reserve_slot;
143 pa_reserve_monitor_wrapper *monitor;
144 pa_hook_slot *monitor_slot;
147 static void userdata_free(struct userdata *u);
149 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
150 pa_assert(r);
151 pa_assert(u);
153 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
154 return PA_HOOK_CANCEL;
156 return PA_HOOK_OK;
159 static void reserve_done(struct userdata *u) {
160 pa_assert(u);
162 if (u->reserve_slot) {
163 pa_hook_slot_free(u->reserve_slot);
164 u->reserve_slot = NULL;
167 if (u->reserve) {
168 pa_reserve_wrapper_unref(u->reserve);
169 u->reserve = NULL;
173 static void reserve_update(struct userdata *u) {
174 const char *description;
175 pa_assert(u);
177 if (!u->sink || !u->reserve)
178 return;
180 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
181 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
184 static int reserve_init(struct userdata *u, const char *dname) {
185 char *rname;
187 pa_assert(u);
188 pa_assert(dname);
190 if (u->reserve)
191 return 0;
193 if (pa_in_system_mode())
194 return 0;
196 if (!(rname = pa_alsa_get_reserve_name(dname)))
197 return 0;
199 /* We are resuming, try to lock the device */
200 u->reserve = pa_reserve_wrapper_get(u->core, rname);
201 pa_xfree(rname);
203 if (!(u->reserve))
204 return -1;
206 reserve_update(u);
208 pa_assert(!u->reserve_slot);
209 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
211 return 0;
214 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
215 pa_bool_t b;
217 pa_assert(w);
218 pa_assert(u);
220 b = PA_PTR_TO_UINT(busy) && !u->reserve;
222 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
223 return PA_HOOK_OK;
226 static void monitor_done(struct userdata *u) {
227 pa_assert(u);
229 if (u->monitor_slot) {
230 pa_hook_slot_free(u->monitor_slot);
231 u->monitor_slot = NULL;
234 if (u->monitor) {
235 pa_reserve_monitor_wrapper_unref(u->monitor);
236 u->monitor = NULL;
240 static int reserve_monitor_init(struct userdata *u, const char *dname) {
241 char *rname;
243 pa_assert(u);
244 pa_assert(dname);
246 if (pa_in_system_mode())
247 return 0;
249 if (!(rname = pa_alsa_get_reserve_name(dname)))
250 return 0;
252 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
253 pa_xfree(rname);
255 if (!(u->monitor))
256 return -1;
258 pa_assert(!u->monitor_slot);
259 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
261 return 0;
264 static void fix_min_sleep_wakeup(struct userdata *u) {
265 size_t max_use, max_use_2;
267 pa_assert(u);
268 pa_assert(u->use_tsched);
270 max_use = u->hwbuf_size - u->hwbuf_unused;
271 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
273 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
274 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
276 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
277 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
280 static void fix_tsched_watermark(struct userdata *u) {
281 size_t max_use;
282 pa_assert(u);
283 pa_assert(u->use_tsched);
285 max_use = u->hwbuf_size - u->hwbuf_unused;
287 if (u->tsched_watermark > max_use - u->min_sleep)
288 u->tsched_watermark = max_use - u->min_sleep;
290 if (u->tsched_watermark < u->min_wakeup)
291 u->tsched_watermark = u->min_wakeup;
294 static void increase_watermark(struct userdata *u) {
295 size_t old_watermark;
296 pa_usec_t old_min_latency, new_min_latency;
298 pa_assert(u);
299 pa_assert(u->use_tsched);
301 /* First, just try to increase the watermark */
302 old_watermark = u->tsched_watermark;
303 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
304 fix_tsched_watermark(u);
306 if (old_watermark != u->tsched_watermark) {
307 pa_log_info("Increasing wakeup watermark to %0.2f ms",
308 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
309 return;
312 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
313 old_min_latency = u->sink->thread_info.min_latency;
314 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
315 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
317 if (old_min_latency != new_min_latency) {
318 pa_log_info("Increasing minimal latency to %0.2f ms",
319 (double) new_min_latency / PA_USEC_PER_MSEC);
321 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
324 /* When we reach this we're officialy fucked! */
327 static void decrease_watermark(struct userdata *u) {
328 size_t old_watermark;
329 pa_usec_t now;
331 pa_assert(u);
332 pa_assert(u->use_tsched);
334 now = pa_rtclock_now();
336 if (u->watermark_dec_not_before <= 0)
337 goto restart;
339 if (u->watermark_dec_not_before > now)
340 return;
342 old_watermark = u->tsched_watermark;
344 if (u->tsched_watermark < u->watermark_dec_step)
345 u->tsched_watermark = u->tsched_watermark / 2;
346 else
347 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
349 fix_tsched_watermark(u);
351 if (old_watermark != u->tsched_watermark)
352 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
353 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
355 /* We don't change the latency range*/
357 restart:
358 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
361 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
362 pa_usec_t usec, wm;
364 pa_assert(sleep_usec);
365 pa_assert(process_usec);
367 pa_assert(u);
368 pa_assert(u->use_tsched);
370 usec = pa_sink_get_requested_latency_within_thread(u->sink);
372 if (usec == (pa_usec_t) -1)
373 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
375 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
377 if (wm > usec)
378 wm = usec/2;
380 *sleep_usec = usec - wm;
381 *process_usec = wm;
383 #ifdef DEBUG_TIMING
384 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
385 (unsigned long) (usec / PA_USEC_PER_MSEC),
386 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
387 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
388 #endif
391 static int try_recover(struct userdata *u, const char *call, int err) {
392 pa_assert(u);
393 pa_assert(call);
394 pa_assert(err < 0);
396 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
398 pa_assert(err != -EAGAIN);
400 if (err == -EPIPE)
401 pa_log_debug("%s: Buffer underrun!", call);
403 if (err == -ESTRPIPE)
404 pa_log_debug("%s: System suspended!", call);
406 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
407 pa_log("%s: %s", call, pa_alsa_strerror(err));
408 return -1;
411 u->first = TRUE;
412 u->since_start = 0;
413 return 0;
416 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
417 size_t left_to_play;
418 pa_bool_t underrun = FALSE;
420 /* We use <= instead of < for this check here because an underrun
421 * only happens after the last sample was processed, not already when
422 * it is removed from the buffer. This is particularly important
423 * when block transfer is used. */
425 if (n_bytes <= u->hwbuf_size)
426 left_to_play = u->hwbuf_size - n_bytes;
427 else {
429 /* We got a dropout. What a mess! */
430 left_to_play = 0;
431 underrun = TRUE;
433 #ifdef DEBUG_TIMING
434 PA_DEBUG_TRAP;
435 #endif
437 if (!u->first && !u->after_rewind)
438 if (pa_log_ratelimit())
439 pa_log_info("Underrun!");
442 #ifdef DEBUG_TIMING
443 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
444 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
445 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
446 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
447 #endif
449 if (u->use_tsched) {
450 pa_bool_t reset_not_before = TRUE;
452 if (!u->first && !u->after_rewind) {
453 if (underrun || left_to_play < u->watermark_inc_threshold)
454 increase_watermark(u);
455 else if (left_to_play > u->watermark_dec_threshold) {
456 reset_not_before = FALSE;
458 /* We decrease the watermark only if have actually
459 * been woken up by a timeout. If something else woke
460 * us up it's too easy to fulfill the deadlines... */
462 if (on_timeout)
463 decrease_watermark(u);
467 if (reset_not_before)
468 u->watermark_dec_not_before = 0;
471 return left_to_play;
474 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
475 pa_bool_t work_done = TRUE;
476 pa_usec_t max_sleep_usec = 0, process_usec = 0;
477 size_t left_to_play;
478 unsigned j = 0;
480 pa_assert(u);
481 pa_sink_assert_ref(u->sink);
483 if (u->use_tsched)
484 hw_sleep_time(u, &max_sleep_usec, &process_usec);
486 for (;;) {
487 snd_pcm_sframes_t n;
488 size_t n_bytes;
489 int r;
490 pa_bool_t after_avail = TRUE;
492 /* First we determine how many samples are missing to fill the
493 * buffer up to 100% */
495 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
497 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
498 continue;
500 return r;
503 n_bytes = (size_t) n * u->frame_size;
505 #ifdef DEBUG_TIMING
506 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
507 #endif
509 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
510 on_timeout = FALSE;
512 if (u->use_tsched)
514 /* We won't fill up the playback buffer before at least
515 * half the sleep time is over because otherwise we might
516 * ask for more data from the clients then they expect. We
517 * need to guarantee that clients only have to keep around
518 * a single hw buffer length. */
520 if (!polled &&
521 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
522 #ifdef DEBUG_TIMING
523 pa_log_debug("Not filling up, because too early.");
524 #endif
525 break;
528 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
530 if (polled)
531 PA_ONCE_BEGIN {
532 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
533 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
534 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
535 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
536 pa_strnull(dn));
537 pa_xfree(dn);
538 } PA_ONCE_END;
540 #ifdef DEBUG_TIMING
541 pa_log_debug("Not filling up, because not necessary.");
542 #endif
543 break;
547 if (++j > 10) {
548 #ifdef DEBUG_TIMING
549 pa_log_debug("Not filling up, because already too many iterations.");
550 #endif
552 break;
555 n_bytes -= u->hwbuf_unused;
556 polled = FALSE;
558 #ifdef DEBUG_TIMING
559 pa_log_debug("Filling up");
560 #endif
562 for (;;) {
563 pa_memchunk chunk;
564 void *p;
565 int err;
566 const snd_pcm_channel_area_t *areas;
567 snd_pcm_uframes_t offset, frames;
568 snd_pcm_sframes_t sframes;
570 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
571 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
573 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
575 if (!after_avail && err == -EAGAIN)
576 break;
578 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
579 continue;
581 return r;
584 /* Make sure that if these memblocks need to be copied they will fit into one slot */
585 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
586 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
588 if (!after_avail && frames == 0)
589 break;
591 pa_assert(frames > 0);
592 after_avail = FALSE;
594 /* Check these are multiples of 8 bit */
595 pa_assert((areas[0].first & 7) == 0);
596 pa_assert((areas[0].step & 7)== 0);
598 /* We assume a single interleaved memory buffer */
599 pa_assert((areas[0].first >> 3) == 0);
600 pa_assert((areas[0].step >> 3) == u->frame_size);
602 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
604 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
605 chunk.length = pa_memblock_get_length(chunk.memblock);
606 chunk.index = 0;
608 pa_sink_render_into_full(u->sink, &chunk);
609 pa_memblock_unref_fixed(chunk.memblock);
611 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
613 if (!after_avail && (int) sframes == -EAGAIN)
614 break;
616 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
617 continue;
619 return r;
622 work_done = TRUE;
624 u->write_count += frames * u->frame_size;
625 u->since_start += frames * u->frame_size;
627 #ifdef DEBUG_TIMING
628 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
629 #endif
631 if ((size_t) frames * u->frame_size >= n_bytes)
632 break;
634 n_bytes -= (size_t) frames * u->frame_size;
638 if (u->use_tsched) {
639 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
641 if (*sleep_usec > process_usec)
642 *sleep_usec -= process_usec;
643 else
644 *sleep_usec = 0;
645 } else
646 *sleep_usec = 0;
648 return work_done ? 1 : 0;
651 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
652 pa_bool_t work_done = FALSE;
653 pa_usec_t max_sleep_usec = 0, process_usec = 0;
654 size_t left_to_play;
655 unsigned j = 0;
657 pa_assert(u);
658 pa_sink_assert_ref(u->sink);
660 if (u->use_tsched)
661 hw_sleep_time(u, &max_sleep_usec, &process_usec);
663 for (;;) {
664 snd_pcm_sframes_t n;
665 size_t n_bytes;
666 int r;
667 pa_bool_t after_avail = TRUE;
669 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
671 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
672 continue;
674 return r;
677 n_bytes = (size_t) n * u->frame_size;
678 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
679 on_timeout = FALSE;
681 if (u->use_tsched)
683 /* We won't fill up the playback buffer before at least
684 * half the sleep time is over because otherwise we might
685 * ask for more data from the clients then they expect. We
686 * need to guarantee that clients only have to keep around
687 * a single hw buffer length. */
689 if (!polled &&
690 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
691 break;
693 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
695 if (polled)
696 PA_ONCE_BEGIN {
697 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
698 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
699 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
700 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
701 pa_strnull(dn));
702 pa_xfree(dn);
703 } PA_ONCE_END;
705 break;
708 if (++j > 10) {
709 #ifdef DEBUG_TIMING
710 pa_log_debug("Not filling up, because already too many iterations.");
711 #endif
713 break;
716 n_bytes -= u->hwbuf_unused;
717 polled = FALSE;
719 for (;;) {
720 snd_pcm_sframes_t frames;
721 void *p;
723 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
725 if (u->memchunk.length <= 0)
726 pa_sink_render(u->sink, n_bytes, &u->memchunk);
728 pa_assert(u->memchunk.length > 0);
730 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
732 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
733 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
735 p = pa_memblock_acquire(u->memchunk.memblock);
736 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
737 pa_memblock_release(u->memchunk.memblock);
739 if (PA_UNLIKELY(frames < 0)) {
741 if (!after_avail && (int) frames == -EAGAIN)
742 break;
744 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
745 continue;
747 return r;
750 if (!after_avail && frames == 0)
751 break;
753 pa_assert(frames > 0);
754 after_avail = FALSE;
756 u->memchunk.index += (size_t) frames * u->frame_size;
757 u->memchunk.length -= (size_t) frames * u->frame_size;
759 if (u->memchunk.length <= 0) {
760 pa_memblock_unref(u->memchunk.memblock);
761 pa_memchunk_reset(&u->memchunk);
764 work_done = TRUE;
766 u->write_count += frames * u->frame_size;
767 u->since_start += frames * u->frame_size;
769 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
771 if ((size_t) frames * u->frame_size >= n_bytes)
772 break;
774 n_bytes -= (size_t) frames * u->frame_size;
778 if (u->use_tsched) {
779 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
781 if (*sleep_usec > process_usec)
782 *sleep_usec -= process_usec;
783 else
784 *sleep_usec = 0;
785 } else
786 *sleep_usec = 0;
788 return work_done ? 1 : 0;
791 static void update_smoother(struct userdata *u) {
792 snd_pcm_sframes_t delay = 0;
793 int64_t position;
794 int err;
795 pa_usec_t now1 = 0, now2;
796 snd_pcm_status_t *status;
798 snd_pcm_status_alloca(&status);
800 pa_assert(u);
801 pa_assert(u->pcm_handle);
803 /* Let's update the time smoother */
805 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
806 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
807 return;
810 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
811 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
812 else {
813 snd_htimestamp_t htstamp = { 0, 0 };
814 snd_pcm_status_get_htstamp(status, &htstamp);
815 now1 = pa_timespec_load(&htstamp);
818 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
819 if (now1 <= 0)
820 now1 = pa_rtclock_now();
822 /* check if the time since the last update is bigger than the interval */
823 if (u->last_smoother_update > 0)
824 if (u->last_smoother_update + u->smoother_interval > now1)
825 return;
827 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
829 if (PA_UNLIKELY(position < 0))
830 position = 0;
832 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
834 pa_smoother_put(u->smoother, now1, now2);
836 u->last_smoother_update = now1;
837 /* exponentially increase the update interval up to the MAX limit */
838 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
841 static pa_usec_t sink_get_latency(struct userdata *u) {
842 pa_usec_t r;
843 int64_t delay;
844 pa_usec_t now1, now2;
846 pa_assert(u);
848 now1 = pa_rtclock_now();
849 now2 = pa_smoother_get(u->smoother, now1);
851 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
853 r = delay >= 0 ? (pa_usec_t) delay : 0;
855 if (u->memchunk.memblock)
856 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
858 return r;
861 static int build_pollfd(struct userdata *u) {
862 pa_assert(u);
863 pa_assert(u->pcm_handle);
865 if (u->alsa_rtpoll_item)
866 pa_rtpoll_item_free(u->alsa_rtpoll_item);
868 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
869 return -1;
871 return 0;
874 /* Called from IO context */
875 static int suspend(struct userdata *u) {
876 pa_assert(u);
877 pa_assert(u->pcm_handle);
879 pa_smoother_pause(u->smoother, pa_rtclock_now());
881 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
882 * take awfully long with our long buffer sizes today. */
883 snd_pcm_close(u->pcm_handle);
884 u->pcm_handle = NULL;
886 if (u->alsa_rtpoll_item) {
887 pa_rtpoll_item_free(u->alsa_rtpoll_item);
888 u->alsa_rtpoll_item = NULL;
891 /* We reset max_rewind/max_request here to make sure that while we
892 * are suspended the old max_request/max_rewind values set before
893 * the suspend can influence the per-stream buffer of newly
894 * created streams, without their requirements having any
895 * influence on them. */
896 pa_sink_set_max_rewind_within_thread(u->sink, 0);
897 pa_sink_set_max_request_within_thread(u->sink, 0);
899 pa_log_info("Device suspended...");
901 return 0;
904 /* Called from IO context */
905 static int update_sw_params(struct userdata *u) {
906 snd_pcm_uframes_t avail_min;
907 int err;
909 pa_assert(u);
911 /* Use the full buffer if noone asked us for anything specific */
912 u->hwbuf_unused = 0;
914 if (u->use_tsched) {
915 pa_usec_t latency;
917 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
918 size_t b;
920 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
922 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
924 /* We need at least one sample in our buffer */
926 if (PA_UNLIKELY(b < u->frame_size))
927 b = u->frame_size;
929 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
932 fix_min_sleep_wakeup(u);
933 fix_tsched_watermark(u);
936 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
938 /* We need at last one frame in the used part of the buffer */
939 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
941 if (u->use_tsched) {
942 pa_usec_t sleep_usec, process_usec;
944 hw_sleep_time(u, &sleep_usec, &process_usec);
945 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
948 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
950 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
951 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
952 return err;
955 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
956 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
958 return 0;
961 /* Called from IO context */
962 static int unsuspend(struct userdata *u) {
963 pa_sample_spec ss;
964 int err;
965 pa_bool_t b, d;
966 snd_pcm_uframes_t period_size, buffer_size;
968 pa_assert(u);
969 pa_assert(!u->pcm_handle);
971 pa_log_info("Trying resume...");
973 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
974 SND_PCM_NONBLOCK|
975 SND_PCM_NO_AUTO_RESAMPLE|
976 SND_PCM_NO_AUTO_CHANNELS|
977 SND_PCM_NO_AUTO_FORMAT)) < 0) {
978 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
979 goto fail;
982 ss = u->sink->sample_spec;
983 period_size = u->fragment_size / u->frame_size;
984 buffer_size = u->hwbuf_size / u->frame_size;
985 b = u->use_mmap;
986 d = u->use_tsched;
988 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
989 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
990 goto fail;
993 if (b != u->use_mmap || d != u->use_tsched) {
994 pa_log_warn("Resume failed, couldn't get original access mode.");
995 goto fail;
998 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
999 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1000 goto fail;
1003 if (period_size*u->frame_size != u->fragment_size ||
1004 buffer_size*u->frame_size != u->hwbuf_size) {
1005 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1006 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1007 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1008 goto fail;
1011 if (update_sw_params(u) < 0)
1012 goto fail;
1014 if (build_pollfd(u) < 0)
1015 goto fail;
1017 u->write_count = 0;
1018 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1019 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1020 u->last_smoother_update = 0;
1022 u->first = TRUE;
1023 u->since_start = 0;
1025 pa_log_info("Resumed successfully...");
1027 return 0;
1029 fail:
1030 if (u->pcm_handle) {
1031 snd_pcm_close(u->pcm_handle);
1032 u->pcm_handle = NULL;
1035 return -PA_ERR_IO;
1038 /* Called from IO context */
1039 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1040 struct userdata *u = PA_SINK(o)->userdata;
1042 switch (code) {
1044 case PA_SINK_MESSAGE_GET_LATENCY: {
1045 pa_usec_t r = 0;
1047 if (u->pcm_handle)
1048 r = sink_get_latency(u);
1050 *((pa_usec_t*) data) = r;
1052 return 0;
1055 case PA_SINK_MESSAGE_SET_STATE:
1057 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1059 case PA_SINK_SUSPENDED: {
1060 int r;
1062 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1064 if ((r = suspend(u)) < 0)
1065 return r;
1067 break;
1070 case PA_SINK_IDLE:
1071 case PA_SINK_RUNNING: {
1072 int r;
1074 if (u->sink->thread_info.state == PA_SINK_INIT) {
1075 if (build_pollfd(u) < 0)
1076 return -PA_ERR_IO;
1079 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1080 if ((r = unsuspend(u)) < 0)
1081 return r;
1084 break;
1087 case PA_SINK_UNLINKED:
1088 case PA_SINK_INIT:
1089 case PA_SINK_INVALID_STATE:
1093 break;
1096 return pa_sink_process_msg(o, code, data, offset, chunk);
1099 /* Called from main context */
1100 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1101 pa_sink_state_t old_state;
1102 struct userdata *u;
1104 pa_sink_assert_ref(s);
1105 pa_assert_se(u = s->userdata);
1107 old_state = pa_sink_get_state(u->sink);
1109 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1110 reserve_done(u);
1111 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1112 if (reserve_init(u, u->device_name) < 0)
1113 return -PA_ERR_BUSY;
1115 return 0;
1118 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1119 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1121 pa_assert(u);
1122 pa_assert(u->mixer_handle);
1124 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1125 return 0;
1127 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1128 return 0;
1130 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1131 pa_sink_get_volume(u->sink, TRUE);
1132 pa_sink_get_mute(u->sink, TRUE);
1135 return 0;
1138 static void sink_get_volume_cb(pa_sink *s) {
1139 struct userdata *u = s->userdata;
1140 pa_cvolume r;
1141 char t[PA_CVOLUME_SNPRINT_MAX];
1143 pa_assert(u);
1144 pa_assert(u->mixer_path);
1145 pa_assert(u->mixer_handle);
1147 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1148 return;
1150 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1151 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1153 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1155 if (pa_cvolume_equal(&u->hardware_volume, &r))
1156 return;
1158 s->real_volume = u->hardware_volume = r;
1160 /* Hmm, so the hardware volume changed, let's reset our software volume */
1161 if (u->mixer_path->has_dB)
1162 pa_sink_set_soft_volume(s, NULL);
1165 static void sink_set_volume_cb(pa_sink *s) {
1166 struct userdata *u = s->userdata;
1167 pa_cvolume r;
1168 char t[PA_CVOLUME_SNPRINT_MAX];
1170 pa_assert(u);
1171 pa_assert(u->mixer_path);
1172 pa_assert(u->mixer_handle);
1174 /* Shift up by the base volume */
1175 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1177 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1178 return;
1180 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1181 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1183 u->hardware_volume = r;
1185 if (u->mixer_path->has_dB) {
1186 pa_cvolume new_soft_volume;
1187 pa_bool_t accurate_enough;
1189 /* Match exactly what the user requested by software */
1190 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1192 /* If the adjustment to do in software is only minimal we
1193 * can skip it. That saves us CPU at the expense of a bit of
1194 * accuracy */
1195 accurate_enough =
1196 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1197 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1199 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1200 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1201 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1202 pa_yes_no(accurate_enough));
1204 if (!accurate_enough)
1205 s->soft_volume = new_soft_volume;
1207 } else {
1208 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1210 /* We can't match exactly what the user requested, hence let's
1211 * at least tell the user about it */
1213 s->real_volume = r;
1217 static void sink_get_mute_cb(pa_sink *s) {
1218 struct userdata *u = s->userdata;
1219 pa_bool_t b;
1221 pa_assert(u);
1222 pa_assert(u->mixer_path);
1223 pa_assert(u->mixer_handle);
1225 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1226 return;
1228 s->muted = b;
1231 static void sink_set_mute_cb(pa_sink *s) {
1232 struct userdata *u = s->userdata;
1234 pa_assert(u);
1235 pa_assert(u->mixer_path);
1236 pa_assert(u->mixer_handle);
1238 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1241 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1242 struct userdata *u = s->userdata;
1243 pa_alsa_port_data *data;
1245 pa_assert(u);
1246 pa_assert(p);
1247 pa_assert(u->mixer_handle);
1249 data = PA_DEVICE_PORT_DATA(p);
1251 pa_assert_se(u->mixer_path = data->path);
1252 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1254 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1255 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1256 s->n_volume_steps = PA_VOLUME_NORM+1;
1258 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1259 } else {
1260 s->base_volume = PA_VOLUME_NORM;
1261 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1264 if (data->setting)
1265 pa_alsa_setting_select(data->setting, u->mixer_handle);
1267 if (s->set_mute)
1268 s->set_mute(s);
1269 if (s->set_volume)
1270 s->set_volume(s);
1272 return 0;
1275 static void sink_update_requested_latency_cb(pa_sink *s) {
1276 struct userdata *u = s->userdata;
1277 size_t before;
1278 pa_assert(u);
1279 pa_assert(u->use_tsched); /* only when timer scheduling is used
1280 * we can dynamically adjust the
1281 * latency */
1283 if (!u->pcm_handle)
1284 return;
1286 before = u->hwbuf_unused;
1287 update_sw_params(u);
1289 /* Let's check whether we now use only a smaller part of the
1290 buffer then before. If so, we need to make sure that subsequent
1291 rewinds are relative to the new maximum fill level and not to the
1292 current fill level. Thus, let's do a full rewind once, to clear
1293 things up. */
1295 if (u->hwbuf_unused > before) {
1296 pa_log_debug("Requesting rewind due to latency change.");
1297 pa_sink_request_rewind(s, (size_t) -1);
1301 static int process_rewind(struct userdata *u) {
1302 snd_pcm_sframes_t unused;
1303 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1304 pa_assert(u);
1306 /* Figure out how much we shall rewind and reset the counter */
1307 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1309 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1311 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1312 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1313 return -1;
1316 unused_nbytes = (size_t) unused * u->frame_size;
1318 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1319 unused_nbytes += u->rewind_safeguard;
1321 if (u->hwbuf_size > unused_nbytes)
1322 limit_nbytes = u->hwbuf_size - unused_nbytes;
1323 else
1324 limit_nbytes = 0;
1326 if (rewind_nbytes > limit_nbytes)
1327 rewind_nbytes = limit_nbytes;
1329 if (rewind_nbytes > 0) {
1330 snd_pcm_sframes_t in_frames, out_frames;
1332 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1334 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1335 pa_log_debug("before: %lu", (unsigned long) in_frames);
1336 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1337 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1338 if (try_recover(u, "process_rewind", out_frames) < 0)
1339 return -1;
1340 out_frames = 0;
1343 pa_log_debug("after: %lu", (unsigned long) out_frames);
1345 rewind_nbytes = (size_t) out_frames * u->frame_size;
1347 if (rewind_nbytes <= 0)
1348 pa_log_info("Tried rewind, but was apparently not possible.");
1349 else {
1350 u->write_count -= rewind_nbytes;
1351 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1352 pa_sink_process_rewind(u->sink, rewind_nbytes);
1354 u->after_rewind = TRUE;
1355 return 0;
1357 } else
1358 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1360 pa_sink_process_rewind(u->sink, 0);
1361 return 0;
1364 static void thread_func(void *userdata) {
1365 struct userdata *u = userdata;
1366 unsigned short revents = 0;
1368 pa_assert(u);
1370 pa_log_debug("Thread starting up");
1372 if (u->core->realtime_scheduling)
1373 pa_make_realtime(u->core->realtime_priority);
1375 pa_thread_mq_install(&u->thread_mq);
1377 for (;;) {
1378 int ret;
1380 #ifdef DEBUG_TIMING
1381 pa_log_debug("Loop");
1382 #endif
1384 /* Render some data and write it to the dsp */
1385 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1386 int work_done;
1387 pa_usec_t sleep_usec = 0;
1388 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1390 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1391 if (process_rewind(u) < 0)
1392 goto fail;
1394 if (u->use_mmap)
1395 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1396 else
1397 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1399 if (work_done < 0)
1400 goto fail;
1402 /* pa_log_debug("work_done = %i", work_done); */
1404 if (work_done) {
1406 if (u->first) {
1407 pa_log_info("Starting playback.");
1408 snd_pcm_start(u->pcm_handle);
1410 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1413 update_smoother(u);
1416 if (u->use_tsched) {
1417 pa_usec_t cusec;
1419 if (u->since_start <= u->hwbuf_size) {
1421 /* USB devices on ALSA seem to hit a buffer
1422 * underrun during the first iterations much
1423 * quicker then we calculate here, probably due to
1424 * the transport latency. To accommodate for that
1425 * we artificially decrease the sleep time until
1426 * we have filled the buffer at least once
1427 * completely.*/
1429 if (pa_log_ratelimit())
1430 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1431 sleep_usec /= 2;
1434 /* OK, the playback buffer is now full, let's
1435 * calculate when to wake up next */
1436 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1438 /* Convert from the sound card time domain to the
1439 * system time domain */
1440 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1442 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1444 /* We don't trust the conversion, so we wake up whatever comes first */
1445 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1448 u->first = FALSE;
1449 u->after_rewind = FALSE;
1451 } else if (u->use_tsched)
1453 /* OK, we're in an invalid state, let's disable our timers */
1454 pa_rtpoll_set_timer_disabled(u->rtpoll);
1456 /* Hmm, nothing to do. Let's sleep */
1457 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1458 goto fail;
1460 if (ret == 0)
1461 goto finish;
1463 /* Tell ALSA about this and process its response */
1464 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1465 struct pollfd *pollfd;
1466 int err;
1467 unsigned n;
1469 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1471 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1472 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1473 goto fail;
1476 if (revents & ~POLLOUT) {
1477 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1478 goto fail;
1480 u->first = TRUE;
1481 u->since_start = 0;
1482 } else if (revents && u->use_tsched && pa_log_ratelimit())
1483 pa_log_debug("Wakeup from ALSA!");
1485 } else
1486 revents = 0;
1489 fail:
1490 /* If this was no regular exit from the loop we have to continue
1491 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1492 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1493 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1495 finish:
1496 pa_log_debug("Thread shutting down");
1499 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1500 const char *n;
1501 char *t;
1503 pa_assert(data);
1504 pa_assert(ma);
1505 pa_assert(device_name);
1507 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1508 pa_sink_new_data_set_name(data, n);
1509 data->namereg_fail = TRUE;
1510 return;
1513 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1514 data->namereg_fail = TRUE;
1515 else {
1516 n = device_id ? device_id : device_name;
1517 data->namereg_fail = FALSE;
1520 if (mapping)
1521 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1522 else
1523 t = pa_sprintf_malloc("alsa_output.%s", n);
1525 pa_sink_new_data_set_name(data, t);
1526 pa_xfree(t);
1529 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1531 if (!mapping && !element)
1532 return;
1534 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1535 pa_log_info("Failed to find a working mixer device.");
1536 return;
1539 if (element) {
1541 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1542 goto fail;
1544 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1545 goto fail;
1547 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1548 pa_alsa_path_dump(u->mixer_path);
1549 } else {
1551 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1552 goto fail;
1554 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1556 pa_log_debug("Probed mixer paths:");
1557 pa_alsa_path_set_dump(u->mixer_path_set);
1560 return;
1562 fail:
1564 if (u->mixer_path_set) {
1565 pa_alsa_path_set_free(u->mixer_path_set);
1566 u->mixer_path_set = NULL;
1567 } else if (u->mixer_path) {
1568 pa_alsa_path_free(u->mixer_path);
1569 u->mixer_path = NULL;
1572 if (u->mixer_handle) {
1573 snd_mixer_close(u->mixer_handle);
1574 u->mixer_handle = NULL;
1578 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1579 pa_assert(u);
1581 if (!u->mixer_handle)
1582 return 0;
1584 if (u->sink->active_port) {
1585 pa_alsa_port_data *data;
1587 /* We have a list of supported paths, so let's activate the
1588 * one that has been chosen as active */
1590 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1591 u->mixer_path = data->path;
1593 pa_alsa_path_select(data->path, u->mixer_handle);
1595 if (data->setting)
1596 pa_alsa_setting_select(data->setting, u->mixer_handle);
1598 } else {
1600 if (!u->mixer_path && u->mixer_path_set)
1601 u->mixer_path = u->mixer_path_set->paths;
1603 if (u->mixer_path) {
1604 /* Hmm, we have only a single path, then let's activate it */
1606 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1608 if (u->mixer_path->settings)
1609 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1610 } else
1611 return 0;
1614 if (!u->mixer_path->has_volume)
1615 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1616 else {
1618 if (u->mixer_path->has_dB) {
1619 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1621 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1622 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1624 if (u->mixer_path->max_dB > 0.0)
1625 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1626 else
1627 pa_log_info("No particular base volume set, fixing to 0 dB");
1629 } else {
1630 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1631 u->sink->base_volume = PA_VOLUME_NORM;
1632 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1635 u->sink->get_volume = sink_get_volume_cb;
1636 u->sink->set_volume = sink_set_volume_cb;
1638 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1639 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1642 if (!u->mixer_path->has_mute) {
1643 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1644 } else {
1645 u->sink->get_mute = sink_get_mute_cb;
1646 u->sink->set_mute = sink_set_mute_cb;
1647 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1648 pa_log_info("Using hardware mute control.");
1651 u->mixer_fdl = pa_alsa_fdlist_new();
1653 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1654 pa_log("Failed to initialize file descriptor monitoring");
1655 return -1;
1658 if (u->mixer_path_set)
1659 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1660 else
1661 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1663 return 0;
1666 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1668 struct userdata *u = NULL;
1669 const char *dev_id = NULL;
1670 pa_sample_spec ss, requested_ss;
1671 pa_channel_map map;
1672 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1673 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1674 size_t frame_size, rewind_safeguard;
1675 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1676 pa_sink_new_data data;
1677 pa_alsa_profile_set *profile_set = NULL;
1679 pa_assert(m);
1680 pa_assert(ma);
1682 ss = m->core->default_sample_spec;
1683 map = m->core->default_channel_map;
1684 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1685 pa_log("Failed to parse sample specification and channel map");
1686 goto fail;
1689 requested_ss = ss;
1690 frame_size = pa_frame_size(&ss);
1692 nfrags = m->core->default_n_fragments;
1693 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1694 if (frag_size <= 0)
1695 frag_size = (uint32_t) frame_size;
1696 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1697 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1699 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1700 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1701 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1702 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1703 pa_log("Failed to parse buffer metrics");
1704 goto fail;
1707 buffer_size = nfrags * frag_size;
1709 period_frames = frag_size/frame_size;
1710 buffer_frames = buffer_size/frame_size;
1711 tsched_frames = tsched_size/frame_size;
1713 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1714 pa_log("Failed to parse mmap argument.");
1715 goto fail;
1718 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1719 pa_log("Failed to parse tsched argument.");
1720 goto fail;
1723 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1724 pa_log("Failed to parse ignore_dB argument.");
1725 goto fail;
1728 rewind_safeguard = DEFAULT_REWIND_SAFEGUARD_BYTES;
1729 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1730 pa_log("Failed to parse rewind_safeguard argument");
1731 goto fail;
1734 use_tsched = pa_alsa_may_tsched(use_tsched);
1736 u = pa_xnew0(struct userdata, 1);
1737 u->core = m->core;
1738 u->module = m;
1739 u->use_mmap = use_mmap;
1740 u->use_tsched = use_tsched;
1741 u->first = TRUE;
1742 u->rewind_safeguard = rewind_safeguard;
1743 u->rtpoll = pa_rtpoll_new();
1744 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1746 u->smoother = pa_smoother_new(
1747 DEFAULT_TSCHED_BUFFER_USEC*2,
1748 DEFAULT_TSCHED_BUFFER_USEC*2,
1749 TRUE,
1750 TRUE,
1752 pa_rtclock_now(),
1753 TRUE);
1754 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1756 dev_id = pa_modargs_get_value(
1757 ma, "device_id",
1758 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1760 if (reserve_init(u, dev_id) < 0)
1761 goto fail;
1763 if (reserve_monitor_init(u, dev_id) < 0)
1764 goto fail;
1766 b = use_mmap;
1767 d = use_tsched;
1769 if (mapping) {
1771 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1772 pa_log("device_id= not set");
1773 goto fail;
1776 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1777 dev_id,
1778 &u->device_name,
1779 &ss, &map,
1780 SND_PCM_STREAM_PLAYBACK,
1781 &period_frames, &buffer_frames, tsched_frames,
1782 &b, &d, mapping)))
1784 goto fail;
1786 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1788 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1789 goto fail;
1791 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1792 dev_id,
1793 &u->device_name,
1794 &ss, &map,
1795 SND_PCM_STREAM_PLAYBACK,
1796 &period_frames, &buffer_frames, tsched_frames,
1797 &b, &d, profile_set, &mapping)))
1799 goto fail;
1801 } else {
1803 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1804 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1805 &u->device_name,
1806 &ss, &map,
1807 SND_PCM_STREAM_PLAYBACK,
1808 &period_frames, &buffer_frames, tsched_frames,
1809 &b, &d, FALSE)))
1810 goto fail;
1813 pa_assert(u->device_name);
1814 pa_log_info("Successfully opened device %s.", u->device_name);
1816 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1817 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1818 goto fail;
1821 if (mapping)
1822 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1824 if (use_mmap && !b) {
1825 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1826 u->use_mmap = use_mmap = FALSE;
1829 if (use_tsched && (!b || !d)) {
1830 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1831 u->use_tsched = use_tsched = FALSE;
1834 if (u->use_mmap)
1835 pa_log_info("Successfully enabled mmap() mode.");
1837 if (u->use_tsched)
1838 pa_log_info("Successfully enabled timer-based scheduling mode.");
1840 /* ALSA might tweak the sample spec, so recalculate the frame size */
1841 frame_size = pa_frame_size(&ss);
1843 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1845 pa_sink_new_data_init(&data);
1846 data.driver = driver;
1847 data.module = m;
1848 data.card = card;
1849 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1850 pa_sink_new_data_set_sample_spec(&data, &ss);
1851 pa_sink_new_data_set_channel_map(&data, &map);
1853 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1854 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1855 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1856 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1857 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1859 if (mapping) {
1860 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1861 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1864 pa_alsa_init_description(data.proplist);
1866 if (u->control_device)
1867 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1869 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1870 pa_log("Invalid properties");
1871 pa_sink_new_data_done(&data);
1872 goto fail;
1875 if (u->mixer_path_set)
1876 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1878 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1879 pa_sink_new_data_done(&data);
1881 if (!u->sink) {
1882 pa_log("Failed to create sink object");
1883 goto fail;
1886 u->sink->parent.process_msg = sink_process_msg;
1887 if (u->use_tsched)
1888 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1889 u->sink->set_state = sink_set_state_cb;
1890 u->sink->set_port = sink_set_port_cb;
1891 u->sink->userdata = u;
1893 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1894 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1896 u->frame_size = frame_size;
1897 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1898 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1899 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1901 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1902 (double) u->hwbuf_size / (double) u->fragment_size,
1903 (long unsigned) u->fragment_size,
1904 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1905 (long unsigned) u->hwbuf_size,
1906 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1908 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1909 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1911 if (u->use_tsched) {
1912 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1914 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1915 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1917 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1918 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1920 fix_min_sleep_wakeup(u);
1921 fix_tsched_watermark(u);
1923 pa_sink_set_latency_range(u->sink,
1925 pa_bytes_to_usec(u->hwbuf_size, &ss));
1927 pa_log_info("Time scheduling watermark is %0.2fms",
1928 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1929 } else
1930 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1932 reserve_update(u);
1934 if (update_sw_params(u) < 0)
1935 goto fail;
1937 if (setup_mixer(u, ignore_dB) < 0)
1938 goto fail;
1940 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1942 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
1943 pa_log("Failed to create thread.");
1944 goto fail;
1947 /* Get initial mixer settings */
1948 if (data.volume_is_set) {
1949 if (u->sink->set_volume)
1950 u->sink->set_volume(u->sink);
1951 } else {
1952 if (u->sink->get_volume)
1953 u->sink->get_volume(u->sink);
1956 if (data.muted_is_set) {
1957 if (u->sink->set_mute)
1958 u->sink->set_mute(u->sink);
1959 } else {
1960 if (u->sink->get_mute)
1961 u->sink->get_mute(u->sink);
1964 pa_sink_put(u->sink);
1966 if (profile_set)
1967 pa_alsa_profile_set_free(profile_set);
1969 return u->sink;
1971 fail:
1973 if (u)
1974 userdata_free(u);
1976 if (profile_set)
1977 pa_alsa_profile_set_free(profile_set);
1979 return NULL;
1982 static void userdata_free(struct userdata *u) {
1983 pa_assert(u);
1985 if (u->sink)
1986 pa_sink_unlink(u->sink);
1988 if (u->thread) {
1989 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1990 pa_thread_free(u->thread);
1993 pa_thread_mq_done(&u->thread_mq);
1995 if (u->sink)
1996 pa_sink_unref(u->sink);
1998 if (u->memchunk.memblock)
1999 pa_memblock_unref(u->memchunk.memblock);
2001 if (u->alsa_rtpoll_item)
2002 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2004 if (u->rtpoll)
2005 pa_rtpoll_free(u->rtpoll);
2007 if (u->pcm_handle) {
2008 snd_pcm_drop(u->pcm_handle);
2009 snd_pcm_close(u->pcm_handle);
2012 if (u->mixer_fdl)
2013 pa_alsa_fdlist_free(u->mixer_fdl);
2015 if (u->mixer_path_set)
2016 pa_alsa_path_set_free(u->mixer_path_set);
2017 else if (u->mixer_path)
2018 pa_alsa_path_free(u->mixer_path);
2020 if (u->mixer_handle)
2021 snd_mixer_close(u->mixer_handle);
2023 if (u->smoother)
2024 pa_smoother_free(u->smoother);
2026 reserve_done(u);
2027 monitor_done(u);
2029 pa_xfree(u->device_name);
2030 pa_xfree(u->control_device);
2031 pa_xfree(u);
2034 void pa_alsa_sink_free(pa_sink *s) {
2035 struct userdata *u;
2037 pa_sink_assert_ref(s);
2038 pa_assert_se(u = s->userdata);
2040 userdata_free(u);