alsa: Only set the 'first' flag to false when we actually call snd_pcm_start()
[pulseaudio-mirror.git] / src / modules / alsa / alsa-sink.c
blob1108a79742af8b91a1cf2630a5bbaea2ca3f658a
1 /***
2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
27 #include <stdio.h>
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
57 #include <modules/reserve-wrap.h>
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
62 /* #define DEBUG_TIMING */
64 #define DEFAULT_DEVICE "default"
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
101 snd_pcm_t *pcm_handle;
103 pa_alsa_fdlist *mixer_fdl;
104 snd_mixer_t *mixer_handle;
105 pa_alsa_path_set *mixer_path_set;
106 pa_alsa_path *mixer_path;
108 pa_cvolume hardware_volume;
110 size_t
111 frame_size,
112 fragment_size,
113 hwbuf_size,
114 tsched_watermark,
115 hwbuf_unused,
116 min_sleep,
117 min_wakeup,
118 watermark_inc_step,
119 watermark_dec_step,
120 watermark_inc_threshold,
121 watermark_dec_threshold,
122 rewind_safeguard;
124 pa_usec_t watermark_dec_not_before;
126 pa_memchunk memchunk;
128 char *device_name; /* name of the PCM device */
129 char *control_device; /* name of the control device */
131 pa_bool_t use_mmap:1, use_tsched:1;
133 pa_bool_t first, after_rewind;
135 pa_rtpoll_item *alsa_rtpoll_item;
137 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
139 pa_smoother *smoother;
140 uint64_t write_count;
141 uint64_t since_start;
142 pa_usec_t smoother_interval;
143 pa_usec_t last_smoother_update;
145 pa_reserve_wrapper *reserve;
146 pa_hook_slot *reserve_slot;
147 pa_reserve_monitor_wrapper *monitor;
148 pa_hook_slot *monitor_slot;
151 static void userdata_free(struct userdata *u);
153 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
154 pa_assert(r);
155 pa_assert(u);
157 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
158 return PA_HOOK_CANCEL;
160 return PA_HOOK_OK;
163 static void reserve_done(struct userdata *u) {
164 pa_assert(u);
166 if (u->reserve_slot) {
167 pa_hook_slot_free(u->reserve_slot);
168 u->reserve_slot = NULL;
171 if (u->reserve) {
172 pa_reserve_wrapper_unref(u->reserve);
173 u->reserve = NULL;
177 static void reserve_update(struct userdata *u) {
178 const char *description;
179 pa_assert(u);
181 if (!u->sink || !u->reserve)
182 return;
184 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
185 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
188 static int reserve_init(struct userdata *u, const char *dname) {
189 char *rname;
191 pa_assert(u);
192 pa_assert(dname);
194 if (u->reserve)
195 return 0;
197 if (pa_in_system_mode())
198 return 0;
200 if (!(rname = pa_alsa_get_reserve_name(dname)))
201 return 0;
203 /* We are resuming, try to lock the device */
204 u->reserve = pa_reserve_wrapper_get(u->core, rname);
205 pa_xfree(rname);
207 if (!(u->reserve))
208 return -1;
210 reserve_update(u);
212 pa_assert(!u->reserve_slot);
213 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
215 return 0;
218 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
219 pa_bool_t b;
221 pa_assert(w);
222 pa_assert(u);
224 b = PA_PTR_TO_UINT(busy) && !u->reserve;
226 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
227 return PA_HOOK_OK;
230 static void monitor_done(struct userdata *u) {
231 pa_assert(u);
233 if (u->monitor_slot) {
234 pa_hook_slot_free(u->monitor_slot);
235 u->monitor_slot = NULL;
238 if (u->monitor) {
239 pa_reserve_monitor_wrapper_unref(u->monitor);
240 u->monitor = NULL;
244 static int reserve_monitor_init(struct userdata *u, const char *dname) {
245 char *rname;
247 pa_assert(u);
248 pa_assert(dname);
250 if (pa_in_system_mode())
251 return 0;
253 if (!(rname = pa_alsa_get_reserve_name(dname)))
254 return 0;
256 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
257 pa_xfree(rname);
259 if (!(u->monitor))
260 return -1;
262 pa_assert(!u->monitor_slot);
263 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
265 return 0;
268 static void fix_min_sleep_wakeup(struct userdata *u) {
269 size_t max_use, max_use_2;
271 pa_assert(u);
272 pa_assert(u->use_tsched);
274 max_use = u->hwbuf_size - u->hwbuf_unused;
275 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
277 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
278 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
280 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
281 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
284 static void fix_tsched_watermark(struct userdata *u) {
285 size_t max_use;
286 pa_assert(u);
287 pa_assert(u->use_tsched);
289 max_use = u->hwbuf_size - u->hwbuf_unused;
291 if (u->tsched_watermark > max_use - u->min_sleep)
292 u->tsched_watermark = max_use - u->min_sleep;
294 if (u->tsched_watermark < u->min_wakeup)
295 u->tsched_watermark = u->min_wakeup;
298 static void increase_watermark(struct userdata *u) {
299 size_t old_watermark;
300 pa_usec_t old_min_latency, new_min_latency;
302 pa_assert(u);
303 pa_assert(u->use_tsched);
305 /* First, just try to increase the watermark */
306 old_watermark = u->tsched_watermark;
307 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
308 fix_tsched_watermark(u);
310 if (old_watermark != u->tsched_watermark) {
311 pa_log_info("Increasing wakeup watermark to %0.2f ms",
312 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
313 return;
316 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
317 old_min_latency = u->sink->thread_info.min_latency;
318 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
319 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
321 if (old_min_latency != new_min_latency) {
322 pa_log_info("Increasing minimal latency to %0.2f ms",
323 (double) new_min_latency / PA_USEC_PER_MSEC);
325 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
328 /* When we reach this we're officialy fucked! */
331 static void decrease_watermark(struct userdata *u) {
332 size_t old_watermark;
333 pa_usec_t now;
335 pa_assert(u);
336 pa_assert(u->use_tsched);
338 now = pa_rtclock_now();
340 if (u->watermark_dec_not_before <= 0)
341 goto restart;
343 if (u->watermark_dec_not_before > now)
344 return;
346 old_watermark = u->tsched_watermark;
348 if (u->tsched_watermark < u->watermark_dec_step)
349 u->tsched_watermark = u->tsched_watermark / 2;
350 else
351 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
353 fix_tsched_watermark(u);
355 if (old_watermark != u->tsched_watermark)
356 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
357 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
359 /* We don't change the latency range*/
361 restart:
362 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
365 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
366 pa_usec_t usec, wm;
368 pa_assert(sleep_usec);
369 pa_assert(process_usec);
371 pa_assert(u);
372 pa_assert(u->use_tsched);
374 usec = pa_sink_get_requested_latency_within_thread(u->sink);
376 if (usec == (pa_usec_t) -1)
377 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
379 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
381 if (wm > usec)
382 wm = usec/2;
384 *sleep_usec = usec - wm;
385 *process_usec = wm;
387 #ifdef DEBUG_TIMING
388 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
389 (unsigned long) (usec / PA_USEC_PER_MSEC),
390 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
391 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
392 #endif
395 static int try_recover(struct userdata *u, const char *call, int err) {
396 pa_assert(u);
397 pa_assert(call);
398 pa_assert(err < 0);
400 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
402 pa_assert(err != -EAGAIN);
404 if (err == -EPIPE)
405 pa_log_debug("%s: Buffer underrun!", call);
407 if (err == -ESTRPIPE)
408 pa_log_debug("%s: System suspended!", call);
410 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
411 pa_log("%s: %s", call, pa_alsa_strerror(err));
412 return -1;
415 u->first = TRUE;
416 u->since_start = 0;
417 return 0;
420 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
421 size_t left_to_play;
422 pa_bool_t underrun = FALSE;
424 /* We use <= instead of < for this check here because an underrun
425 * only happens after the last sample was processed, not already when
426 * it is removed from the buffer. This is particularly important
427 * when block transfer is used. */
429 if (n_bytes <= u->hwbuf_size)
430 left_to_play = u->hwbuf_size - n_bytes;
431 else {
433 /* We got a dropout. What a mess! */
434 left_to_play = 0;
435 underrun = TRUE;
437 #ifdef DEBUG_TIMING
438 PA_DEBUG_TRAP;
439 #endif
441 if (!u->first && !u->after_rewind)
442 if (pa_log_ratelimit())
443 pa_log_info("Underrun!");
446 #ifdef DEBUG_TIMING
447 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
448 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
449 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
450 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
451 #endif
453 if (u->use_tsched) {
454 pa_bool_t reset_not_before = TRUE;
456 if (!u->first && !u->after_rewind) {
457 if (underrun || left_to_play < u->watermark_inc_threshold)
458 increase_watermark(u);
459 else if (left_to_play > u->watermark_dec_threshold) {
460 reset_not_before = FALSE;
462 /* We decrease the watermark only if have actually
463 * been woken up by a timeout. If something else woke
464 * us up it's too easy to fulfill the deadlines... */
466 if (on_timeout)
467 decrease_watermark(u);
471 if (reset_not_before)
472 u->watermark_dec_not_before = 0;
475 return left_to_play;
478 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
479 pa_bool_t work_done = TRUE;
480 pa_usec_t max_sleep_usec = 0, process_usec = 0;
481 size_t left_to_play;
482 unsigned j = 0;
484 pa_assert(u);
485 pa_sink_assert_ref(u->sink);
487 if (u->use_tsched)
488 hw_sleep_time(u, &max_sleep_usec, &process_usec);
490 for (;;) {
491 snd_pcm_sframes_t n;
492 size_t n_bytes;
493 int r;
494 pa_bool_t after_avail = TRUE;
496 /* First we determine how many samples are missing to fill the
497 * buffer up to 100% */
499 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
501 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
502 continue;
504 return r;
507 n_bytes = (size_t) n * u->frame_size;
509 #ifdef DEBUG_TIMING
510 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
511 #endif
513 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
514 on_timeout = FALSE;
516 if (u->use_tsched)
518 /* We won't fill up the playback buffer before at least
519 * half the sleep time is over because otherwise we might
520 * ask for more data from the clients then they expect. We
521 * need to guarantee that clients only have to keep around
522 * a single hw buffer length. */
524 if (!polled &&
525 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
526 #ifdef DEBUG_TIMING
527 pa_log_debug("Not filling up, because too early.");
528 #endif
529 break;
532 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
534 if (polled)
535 PA_ONCE_BEGIN {
536 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
537 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
538 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
539 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
540 pa_strnull(dn));
541 pa_xfree(dn);
542 } PA_ONCE_END;
544 #ifdef DEBUG_TIMING
545 pa_log_debug("Not filling up, because not necessary.");
546 #endif
547 break;
551 if (++j > 10) {
552 #ifdef DEBUG_TIMING
553 pa_log_debug("Not filling up, because already too many iterations.");
554 #endif
556 break;
559 n_bytes -= u->hwbuf_unused;
560 polled = FALSE;
562 #ifdef DEBUG_TIMING
563 pa_log_debug("Filling up");
564 #endif
566 for (;;) {
567 pa_memchunk chunk;
568 void *p;
569 int err;
570 const snd_pcm_channel_area_t *areas;
571 snd_pcm_uframes_t offset, frames;
572 snd_pcm_sframes_t sframes;
574 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
575 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
577 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
579 if (!after_avail && err == -EAGAIN)
580 break;
582 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
583 continue;
585 return r;
588 /* Make sure that if these memblocks need to be copied they will fit into one slot */
589 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
590 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
592 if (!after_avail && frames == 0)
593 break;
595 pa_assert(frames > 0);
596 after_avail = FALSE;
598 /* Check these are multiples of 8 bit */
599 pa_assert((areas[0].first & 7) == 0);
600 pa_assert((areas[0].step & 7)== 0);
602 /* We assume a single interleaved memory buffer */
603 pa_assert((areas[0].first >> 3) == 0);
604 pa_assert((areas[0].step >> 3) == u->frame_size);
606 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
608 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
609 chunk.length = pa_memblock_get_length(chunk.memblock);
610 chunk.index = 0;
612 pa_sink_render_into_full(u->sink, &chunk);
613 pa_memblock_unref_fixed(chunk.memblock);
615 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
617 if (!after_avail && (int) sframes == -EAGAIN)
618 break;
620 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
621 continue;
623 return r;
626 work_done = TRUE;
628 u->write_count += frames * u->frame_size;
629 u->since_start += frames * u->frame_size;
631 #ifdef DEBUG_TIMING
632 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
633 #endif
635 if ((size_t) frames * u->frame_size >= n_bytes)
636 break;
638 n_bytes -= (size_t) frames * u->frame_size;
642 if (u->use_tsched) {
643 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
645 if (*sleep_usec > process_usec)
646 *sleep_usec -= process_usec;
647 else
648 *sleep_usec = 0;
649 } else
650 *sleep_usec = 0;
652 return work_done ? 1 : 0;
655 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
656 pa_bool_t work_done = FALSE;
657 pa_usec_t max_sleep_usec = 0, process_usec = 0;
658 size_t left_to_play;
659 unsigned j = 0;
661 pa_assert(u);
662 pa_sink_assert_ref(u->sink);
664 if (u->use_tsched)
665 hw_sleep_time(u, &max_sleep_usec, &process_usec);
667 for (;;) {
668 snd_pcm_sframes_t n;
669 size_t n_bytes;
670 int r;
671 pa_bool_t after_avail = TRUE;
673 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
675 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
676 continue;
678 return r;
681 n_bytes = (size_t) n * u->frame_size;
682 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
683 on_timeout = FALSE;
685 if (u->use_tsched)
687 /* We won't fill up the playback buffer before at least
688 * half the sleep time is over because otherwise we might
689 * ask for more data from the clients then they expect. We
690 * need to guarantee that clients only have to keep around
691 * a single hw buffer length. */
693 if (!polled &&
694 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
695 break;
697 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
699 if (polled)
700 PA_ONCE_BEGIN {
701 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
702 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
703 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
704 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
705 pa_strnull(dn));
706 pa_xfree(dn);
707 } PA_ONCE_END;
709 break;
712 if (++j > 10) {
713 #ifdef DEBUG_TIMING
714 pa_log_debug("Not filling up, because already too many iterations.");
715 #endif
717 break;
720 n_bytes -= u->hwbuf_unused;
721 polled = FALSE;
723 for (;;) {
724 snd_pcm_sframes_t frames;
725 void *p;
727 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
729 if (u->memchunk.length <= 0)
730 pa_sink_render(u->sink, n_bytes, &u->memchunk);
732 pa_assert(u->memchunk.length > 0);
734 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
736 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
737 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
739 p = pa_memblock_acquire(u->memchunk.memblock);
740 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
741 pa_memblock_release(u->memchunk.memblock);
743 if (PA_UNLIKELY(frames < 0)) {
745 if (!after_avail && (int) frames == -EAGAIN)
746 break;
748 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
749 continue;
751 return r;
754 if (!after_avail && frames == 0)
755 break;
757 pa_assert(frames > 0);
758 after_avail = FALSE;
760 u->memchunk.index += (size_t) frames * u->frame_size;
761 u->memchunk.length -= (size_t) frames * u->frame_size;
763 if (u->memchunk.length <= 0) {
764 pa_memblock_unref(u->memchunk.memblock);
765 pa_memchunk_reset(&u->memchunk);
768 work_done = TRUE;
770 u->write_count += frames * u->frame_size;
771 u->since_start += frames * u->frame_size;
773 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
775 if ((size_t) frames * u->frame_size >= n_bytes)
776 break;
778 n_bytes -= (size_t) frames * u->frame_size;
782 if (u->use_tsched) {
783 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
785 if (*sleep_usec > process_usec)
786 *sleep_usec -= process_usec;
787 else
788 *sleep_usec = 0;
789 } else
790 *sleep_usec = 0;
792 return work_done ? 1 : 0;
795 static void update_smoother(struct userdata *u) {
796 snd_pcm_sframes_t delay = 0;
797 int64_t position;
798 int err;
799 pa_usec_t now1 = 0, now2;
800 snd_pcm_status_t *status;
802 snd_pcm_status_alloca(&status);
804 pa_assert(u);
805 pa_assert(u->pcm_handle);
807 /* Let's update the time smoother */
809 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
810 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
811 return;
814 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
815 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
816 else {
817 snd_htimestamp_t htstamp = { 0, 0 };
818 snd_pcm_status_get_htstamp(status, &htstamp);
819 now1 = pa_timespec_load(&htstamp);
822 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
823 if (now1 <= 0)
824 now1 = pa_rtclock_now();
826 /* check if the time since the last update is bigger than the interval */
827 if (u->last_smoother_update > 0)
828 if (u->last_smoother_update + u->smoother_interval > now1)
829 return;
831 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
833 if (PA_UNLIKELY(position < 0))
834 position = 0;
836 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
838 pa_smoother_put(u->smoother, now1, now2);
840 u->last_smoother_update = now1;
841 /* exponentially increase the update interval up to the MAX limit */
842 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
845 static pa_usec_t sink_get_latency(struct userdata *u) {
846 pa_usec_t r;
847 int64_t delay;
848 pa_usec_t now1, now2;
850 pa_assert(u);
852 now1 = pa_rtclock_now();
853 now2 = pa_smoother_get(u->smoother, now1);
855 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
857 r = delay >= 0 ? (pa_usec_t) delay : 0;
859 if (u->memchunk.memblock)
860 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
862 return r;
865 static int build_pollfd(struct userdata *u) {
866 pa_assert(u);
867 pa_assert(u->pcm_handle);
869 if (u->alsa_rtpoll_item)
870 pa_rtpoll_item_free(u->alsa_rtpoll_item);
872 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
873 return -1;
875 return 0;
878 /* Called from IO context */
879 static int suspend(struct userdata *u) {
880 pa_assert(u);
881 pa_assert(u->pcm_handle);
883 pa_smoother_pause(u->smoother, pa_rtclock_now());
885 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
886 * take awfully long with our long buffer sizes today. */
887 snd_pcm_close(u->pcm_handle);
888 u->pcm_handle = NULL;
890 if (u->alsa_rtpoll_item) {
891 pa_rtpoll_item_free(u->alsa_rtpoll_item);
892 u->alsa_rtpoll_item = NULL;
895 /* We reset max_rewind/max_request here to make sure that while we
896 * are suspended the old max_request/max_rewind values set before
897 * the suspend can influence the per-stream buffer of newly
898 * created streams, without their requirements having any
899 * influence on them. */
900 pa_sink_set_max_rewind_within_thread(u->sink, 0);
901 pa_sink_set_max_request_within_thread(u->sink, 0);
903 pa_log_info("Device suspended...");
905 return 0;
908 /* Called from IO context */
909 static int update_sw_params(struct userdata *u) {
910 snd_pcm_uframes_t avail_min;
911 int err;
913 pa_assert(u);
915 /* Use the full buffer if noone asked us for anything specific */
916 u->hwbuf_unused = 0;
918 if (u->use_tsched) {
919 pa_usec_t latency;
921 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
922 size_t b;
924 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
926 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
928 /* We need at least one sample in our buffer */
930 if (PA_UNLIKELY(b < u->frame_size))
931 b = u->frame_size;
933 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
936 fix_min_sleep_wakeup(u);
937 fix_tsched_watermark(u);
940 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
942 /* We need at last one frame in the used part of the buffer */
943 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
945 if (u->use_tsched) {
946 pa_usec_t sleep_usec, process_usec;
948 hw_sleep_time(u, &sleep_usec, &process_usec);
949 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
952 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
954 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
955 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
956 return err;
959 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
960 if (pa_alsa_pcm_is_hw(u->pcm_handle))
961 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
962 else {
963 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
964 pa_sink_set_max_rewind_within_thread(u->sink, 0);
967 return 0;
970 /* Called from IO context */
971 static int unsuspend(struct userdata *u) {
972 pa_sample_spec ss;
973 int err;
974 pa_bool_t b, d;
975 snd_pcm_uframes_t period_size, buffer_size;
977 pa_assert(u);
978 pa_assert(!u->pcm_handle);
980 pa_log_info("Trying resume...");
982 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
983 SND_PCM_NONBLOCK|
984 SND_PCM_NO_AUTO_RESAMPLE|
985 SND_PCM_NO_AUTO_CHANNELS|
986 SND_PCM_NO_AUTO_FORMAT)) < 0) {
987 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
988 goto fail;
991 ss = u->sink->sample_spec;
992 period_size = u->fragment_size / u->frame_size;
993 buffer_size = u->hwbuf_size / u->frame_size;
994 b = u->use_mmap;
995 d = u->use_tsched;
997 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
998 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
999 goto fail;
1002 if (b != u->use_mmap || d != u->use_tsched) {
1003 pa_log_warn("Resume failed, couldn't get original access mode.");
1004 goto fail;
1007 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1008 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1009 goto fail;
1012 if (period_size*u->frame_size != u->fragment_size ||
1013 buffer_size*u->frame_size != u->hwbuf_size) {
1014 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1015 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1016 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1017 goto fail;
1020 if (update_sw_params(u) < 0)
1021 goto fail;
1023 if (build_pollfd(u) < 0)
1024 goto fail;
1026 u->write_count = 0;
1027 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1028 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1029 u->last_smoother_update = 0;
1031 u->first = TRUE;
1032 u->since_start = 0;
1034 pa_log_info("Resumed successfully...");
1036 return 0;
1038 fail:
1039 if (u->pcm_handle) {
1040 snd_pcm_close(u->pcm_handle);
1041 u->pcm_handle = NULL;
1044 return -PA_ERR_IO;
1047 /* Called from IO context */
1048 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1049 struct userdata *u = PA_SINK(o)->userdata;
1051 switch (code) {
1053 case PA_SINK_MESSAGE_GET_LATENCY: {
1054 pa_usec_t r = 0;
1056 if (u->pcm_handle)
1057 r = sink_get_latency(u);
1059 *((pa_usec_t*) data) = r;
1061 return 0;
1064 case PA_SINK_MESSAGE_SET_STATE:
1066 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1068 case PA_SINK_SUSPENDED: {
1069 int r;
1071 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1073 if ((r = suspend(u)) < 0)
1074 return r;
1076 break;
1079 case PA_SINK_IDLE:
1080 case PA_SINK_RUNNING: {
1081 int r;
1083 if (u->sink->thread_info.state == PA_SINK_INIT) {
1084 if (build_pollfd(u) < 0)
1085 return -PA_ERR_IO;
1088 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1089 if ((r = unsuspend(u)) < 0)
1090 return r;
1093 break;
1096 case PA_SINK_UNLINKED:
1097 case PA_SINK_INIT:
1098 case PA_SINK_INVALID_STATE:
1102 break;
1105 return pa_sink_process_msg(o, code, data, offset, chunk);
1108 /* Called from main context */
1109 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1110 pa_sink_state_t old_state;
1111 struct userdata *u;
1113 pa_sink_assert_ref(s);
1114 pa_assert_se(u = s->userdata);
1116 old_state = pa_sink_get_state(u->sink);
1118 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1119 reserve_done(u);
1120 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1121 if (reserve_init(u, u->device_name) < 0)
1122 return -PA_ERR_BUSY;
1124 return 0;
1127 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1128 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1130 pa_assert(u);
1131 pa_assert(u->mixer_handle);
1133 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1134 return 0;
1136 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1137 return 0;
1139 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1140 pa_sink_get_volume(u->sink, TRUE);
1141 pa_sink_get_mute(u->sink, TRUE);
1144 return 0;
1147 static void sink_get_volume_cb(pa_sink *s) {
1148 struct userdata *u = s->userdata;
1149 pa_cvolume r;
1150 char t[PA_CVOLUME_SNPRINT_MAX];
1152 pa_assert(u);
1153 pa_assert(u->mixer_path);
1154 pa_assert(u->mixer_handle);
1156 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1157 return;
1159 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1160 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1162 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1164 if (pa_cvolume_equal(&u->hardware_volume, &r))
1165 return;
1167 s->real_volume = u->hardware_volume = r;
1169 /* Hmm, so the hardware volume changed, let's reset our software volume */
1170 if (u->mixer_path->has_dB)
1171 pa_sink_set_soft_volume(s, NULL);
1174 static void sink_set_volume_cb(pa_sink *s) {
1175 struct userdata *u = s->userdata;
1176 pa_cvolume r;
1177 char t[PA_CVOLUME_SNPRINT_MAX];
1179 pa_assert(u);
1180 pa_assert(u->mixer_path);
1181 pa_assert(u->mixer_handle);
1183 /* Shift up by the base volume */
1184 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1186 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1187 return;
1189 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1190 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1192 u->hardware_volume = r;
1194 if (u->mixer_path->has_dB) {
1195 pa_cvolume new_soft_volume;
1196 pa_bool_t accurate_enough;
1198 /* Match exactly what the user requested by software */
1199 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1201 /* If the adjustment to do in software is only minimal we
1202 * can skip it. That saves us CPU at the expense of a bit of
1203 * accuracy */
1204 accurate_enough =
1205 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1206 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1208 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1209 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1210 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1211 pa_yes_no(accurate_enough));
1213 if (!accurate_enough)
1214 s->soft_volume = new_soft_volume;
1216 } else {
1217 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1219 /* We can't match exactly what the user requested, hence let's
1220 * at least tell the user about it */
1222 s->real_volume = r;
1226 static void sink_get_mute_cb(pa_sink *s) {
1227 struct userdata *u = s->userdata;
1228 pa_bool_t b;
1230 pa_assert(u);
1231 pa_assert(u->mixer_path);
1232 pa_assert(u->mixer_handle);
1234 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1235 return;
1237 s->muted = b;
1240 static void sink_set_mute_cb(pa_sink *s) {
1241 struct userdata *u = s->userdata;
1243 pa_assert(u);
1244 pa_assert(u->mixer_path);
1245 pa_assert(u->mixer_handle);
1247 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1250 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1251 struct userdata *u = s->userdata;
1252 pa_alsa_port_data *data;
1254 pa_assert(u);
1255 pa_assert(p);
1256 pa_assert(u->mixer_handle);
1258 data = PA_DEVICE_PORT_DATA(p);
1260 pa_assert_se(u->mixer_path = data->path);
1261 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1263 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1264 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1265 s->n_volume_steps = PA_VOLUME_NORM+1;
1267 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1268 } else {
1269 s->base_volume = PA_VOLUME_NORM;
1270 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1273 if (data->setting)
1274 pa_alsa_setting_select(data->setting, u->mixer_handle);
1276 if (s->set_mute)
1277 s->set_mute(s);
1278 if (s->set_volume)
1279 s->set_volume(s);
1281 return 0;
1284 static void sink_update_requested_latency_cb(pa_sink *s) {
1285 struct userdata *u = s->userdata;
1286 size_t before;
1287 pa_assert(u);
1288 pa_assert(u->use_tsched); /* only when timer scheduling is used
1289 * we can dynamically adjust the
1290 * latency */
1292 if (!u->pcm_handle)
1293 return;
1295 before = u->hwbuf_unused;
1296 update_sw_params(u);
1298 /* Let's check whether we now use only a smaller part of the
1299 buffer then before. If so, we need to make sure that subsequent
1300 rewinds are relative to the new maximum fill level and not to the
1301 current fill level. Thus, let's do a full rewind once, to clear
1302 things up. */
1304 if (u->hwbuf_unused > before) {
1305 pa_log_debug("Requesting rewind due to latency change.");
1306 pa_sink_request_rewind(s, (size_t) -1);
1310 static int process_rewind(struct userdata *u) {
1311 snd_pcm_sframes_t unused;
1312 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1313 pa_assert(u);
1315 /* Figure out how much we shall rewind and reset the counter */
1316 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1318 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1320 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1321 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1322 return -1;
1325 unused_nbytes = (size_t) unused * u->frame_size;
1327 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1328 unused_nbytes += u->rewind_safeguard;
1330 if (u->hwbuf_size > unused_nbytes)
1331 limit_nbytes = u->hwbuf_size - unused_nbytes;
1332 else
1333 limit_nbytes = 0;
1335 if (rewind_nbytes > limit_nbytes)
1336 rewind_nbytes = limit_nbytes;
1338 if (rewind_nbytes > 0) {
1339 snd_pcm_sframes_t in_frames, out_frames;
1341 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1343 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1344 pa_log_debug("before: %lu", (unsigned long) in_frames);
1345 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1346 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1347 if (try_recover(u, "process_rewind", out_frames) < 0)
1348 return -1;
1349 out_frames = 0;
1352 pa_log_debug("after: %lu", (unsigned long) out_frames);
1354 rewind_nbytes = (size_t) out_frames * u->frame_size;
1356 if (rewind_nbytes <= 0)
1357 pa_log_info("Tried rewind, but was apparently not possible.");
1358 else {
1359 u->write_count -= rewind_nbytes;
1360 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1361 pa_sink_process_rewind(u->sink, rewind_nbytes);
1363 u->after_rewind = TRUE;
1364 return 0;
1366 } else
1367 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1369 pa_sink_process_rewind(u->sink, 0);
1370 return 0;
1373 static void thread_func(void *userdata) {
1374 struct userdata *u = userdata;
1375 unsigned short revents = 0;
1377 pa_assert(u);
1379 pa_log_debug("Thread starting up");
1381 if (u->core->realtime_scheduling)
1382 pa_make_realtime(u->core->realtime_priority);
1384 pa_thread_mq_install(&u->thread_mq);
1386 for (;;) {
1387 int ret;
1389 #ifdef DEBUG_TIMING
1390 pa_log_debug("Loop");
1391 #endif
1393 /* Render some data and write it to the dsp */
1394 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1395 int work_done;
1396 pa_usec_t sleep_usec = 0;
1397 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1399 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1400 if (process_rewind(u) < 0)
1401 goto fail;
1403 if (u->use_mmap)
1404 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1405 else
1406 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1408 if (work_done < 0)
1409 goto fail;
1411 /* pa_log_debug("work_done = %i", work_done); */
1413 if (work_done) {
1415 if (u->first) {
1416 pa_log_info("Starting playback.");
1417 snd_pcm_start(u->pcm_handle);
1419 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1421 u->first = FALSE;
1424 update_smoother(u);
1427 if (u->use_tsched) {
1428 pa_usec_t cusec;
1430 if (u->since_start <= u->hwbuf_size) {
1432 /* USB devices on ALSA seem to hit a buffer
1433 * underrun during the first iterations much
1434 * quicker then we calculate here, probably due to
1435 * the transport latency. To accommodate for that
1436 * we artificially decrease the sleep time until
1437 * we have filled the buffer at least once
1438 * completely.*/
1440 if (pa_log_ratelimit())
1441 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1442 sleep_usec /= 2;
1445 /* OK, the playback buffer is now full, let's
1446 * calculate when to wake up next */
1447 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1449 /* Convert from the sound card time domain to the
1450 * system time domain */
1451 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1453 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1455 /* We don't trust the conversion, so we wake up whatever comes first */
1456 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1459 u->after_rewind = FALSE;
1461 } else if (u->use_tsched)
1463 /* OK, we're in an invalid state, let's disable our timers */
1464 pa_rtpoll_set_timer_disabled(u->rtpoll);
1466 /* Hmm, nothing to do. Let's sleep */
1467 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1468 goto fail;
1470 if (ret == 0)
1471 goto finish;
1473 /* Tell ALSA about this and process its response */
1474 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1475 struct pollfd *pollfd;
1476 int err;
1477 unsigned n;
1479 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1481 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1482 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1483 goto fail;
1486 if (revents & ~POLLOUT) {
1487 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1488 goto fail;
1490 u->first = TRUE;
1491 u->since_start = 0;
1492 } else if (revents && u->use_tsched && pa_log_ratelimit())
1493 pa_log_debug("Wakeup from ALSA!");
1495 } else
1496 revents = 0;
1499 fail:
1500 /* If this was no regular exit from the loop we have to continue
1501 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1502 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1503 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1505 finish:
1506 pa_log_debug("Thread shutting down");
1509 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1510 const char *n;
1511 char *t;
1513 pa_assert(data);
1514 pa_assert(ma);
1515 pa_assert(device_name);
1517 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1518 pa_sink_new_data_set_name(data, n);
1519 data->namereg_fail = TRUE;
1520 return;
1523 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1524 data->namereg_fail = TRUE;
1525 else {
1526 n = device_id ? device_id : device_name;
1527 data->namereg_fail = FALSE;
1530 if (mapping)
1531 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1532 else
1533 t = pa_sprintf_malloc("alsa_output.%s", n);
1535 pa_sink_new_data_set_name(data, t);
1536 pa_xfree(t);
1539 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1541 if (!mapping && !element)
1542 return;
1544 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1545 pa_log_info("Failed to find a working mixer device.");
1546 return;
1549 if (element) {
1551 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1552 goto fail;
1554 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1555 goto fail;
1557 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1558 pa_alsa_path_dump(u->mixer_path);
1559 } else {
1561 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1562 goto fail;
1564 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1566 pa_log_debug("Probed mixer paths:");
1567 pa_alsa_path_set_dump(u->mixer_path_set);
1570 return;
1572 fail:
1574 if (u->mixer_path_set) {
1575 pa_alsa_path_set_free(u->mixer_path_set);
1576 u->mixer_path_set = NULL;
1577 } else if (u->mixer_path) {
1578 pa_alsa_path_free(u->mixer_path);
1579 u->mixer_path = NULL;
1582 if (u->mixer_handle) {
1583 snd_mixer_close(u->mixer_handle);
1584 u->mixer_handle = NULL;
1588 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1589 pa_assert(u);
1591 if (!u->mixer_handle)
1592 return 0;
1594 if (u->sink->active_port) {
1595 pa_alsa_port_data *data;
1597 /* We have a list of supported paths, so let's activate the
1598 * one that has been chosen as active */
1600 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1601 u->mixer_path = data->path;
1603 pa_alsa_path_select(data->path, u->mixer_handle);
1605 if (data->setting)
1606 pa_alsa_setting_select(data->setting, u->mixer_handle);
1608 } else {
1610 if (!u->mixer_path && u->mixer_path_set)
1611 u->mixer_path = u->mixer_path_set->paths;
1613 if (u->mixer_path) {
1614 /* Hmm, we have only a single path, then let's activate it */
1616 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1618 if (u->mixer_path->settings)
1619 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1620 } else
1621 return 0;
1624 /* FIXME: need automatic detection rather than hard-coded path */
1625 if (!strcmp(u->mixer_path->name, "iec958-passthrough-output")) {
1626 u->sink->flags |= PA_SINK_PASSTHROUGH;
1627 } else {
1628 u->sink->flags &= ~PA_SINK_PASSTHROUGH;
1631 if (!u->mixer_path->has_volume)
1632 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1633 else {
1635 if (u->mixer_path->has_dB) {
1636 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1638 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1639 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1641 if (u->mixer_path->max_dB > 0.0)
1642 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1643 else
1644 pa_log_info("No particular base volume set, fixing to 0 dB");
1646 } else {
1647 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1648 u->sink->base_volume = PA_VOLUME_NORM;
1649 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1652 u->sink->get_volume = sink_get_volume_cb;
1653 u->sink->set_volume = sink_set_volume_cb;
1655 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1656 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1659 if (!u->mixer_path->has_mute) {
1660 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1661 } else {
1662 u->sink->get_mute = sink_get_mute_cb;
1663 u->sink->set_mute = sink_set_mute_cb;
1664 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1665 pa_log_info("Using hardware mute control.");
1668 u->mixer_fdl = pa_alsa_fdlist_new();
1670 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1671 pa_log("Failed to initialize file descriptor monitoring");
1672 return -1;
1675 if (u->mixer_path_set)
1676 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1677 else
1678 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1680 return 0;
1683 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1685 struct userdata *u = NULL;
1686 const char *dev_id = NULL;
1687 pa_sample_spec ss, requested_ss;
1688 pa_channel_map map;
1689 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1690 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1691 size_t frame_size;
1692 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE;
1693 pa_sink_new_data data;
1694 pa_alsa_profile_set *profile_set = NULL;
1696 pa_assert(m);
1697 pa_assert(ma);
1699 ss = m->core->default_sample_spec;
1700 map = m->core->default_channel_map;
1701 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1702 pa_log("Failed to parse sample specification and channel map");
1703 goto fail;
1706 requested_ss = ss;
1707 frame_size = pa_frame_size(&ss);
1709 nfrags = m->core->default_n_fragments;
1710 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1711 if (frag_size <= 0)
1712 frag_size = (uint32_t) frame_size;
1713 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1714 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1716 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1717 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1718 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1719 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1720 pa_log("Failed to parse buffer metrics");
1721 goto fail;
1724 buffer_size = nfrags * frag_size;
1726 period_frames = frag_size/frame_size;
1727 buffer_frames = buffer_size/frame_size;
1728 tsched_frames = tsched_size/frame_size;
1730 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1731 pa_log("Failed to parse mmap argument.");
1732 goto fail;
1735 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1736 pa_log("Failed to parse tsched argument.");
1737 goto fail;
1740 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1741 pa_log("Failed to parse ignore_dB argument.");
1742 goto fail;
1745 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
1746 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1747 pa_log("Failed to parse rewind_safeguard argument");
1748 goto fail;
1751 use_tsched = pa_alsa_may_tsched(use_tsched);
1753 u = pa_xnew0(struct userdata, 1);
1754 u->core = m->core;
1755 u->module = m;
1756 u->use_mmap = use_mmap;
1757 u->use_tsched = use_tsched;
1758 u->first = TRUE;
1759 u->rewind_safeguard = rewind_safeguard;
1760 u->rtpoll = pa_rtpoll_new();
1761 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1763 u->smoother = pa_smoother_new(
1764 SMOOTHER_ADJUST_USEC,
1765 SMOOTHER_WINDOW_USEC,
1766 TRUE,
1767 TRUE,
1769 pa_rtclock_now(),
1770 TRUE);
1771 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1773 dev_id = pa_modargs_get_value(
1774 ma, "device_id",
1775 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1777 if (reserve_init(u, dev_id) < 0)
1778 goto fail;
1780 if (reserve_monitor_init(u, dev_id) < 0)
1781 goto fail;
1783 b = use_mmap;
1784 d = use_tsched;
1786 if (mapping) {
1788 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1789 pa_log("device_id= not set");
1790 goto fail;
1793 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1794 dev_id,
1795 &u->device_name,
1796 &ss, &map,
1797 SND_PCM_STREAM_PLAYBACK,
1798 &period_frames, &buffer_frames, tsched_frames,
1799 &b, &d, mapping)))
1801 goto fail;
1803 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1805 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1806 goto fail;
1808 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1809 dev_id,
1810 &u->device_name,
1811 &ss, &map,
1812 SND_PCM_STREAM_PLAYBACK,
1813 &period_frames, &buffer_frames, tsched_frames,
1814 &b, &d, profile_set, &mapping)))
1816 goto fail;
1818 } else {
1820 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1821 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1822 &u->device_name,
1823 &ss, &map,
1824 SND_PCM_STREAM_PLAYBACK,
1825 &period_frames, &buffer_frames, tsched_frames,
1826 &b, &d, FALSE)))
1827 goto fail;
1830 pa_assert(u->device_name);
1831 pa_log_info("Successfully opened device %s.", u->device_name);
1833 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1834 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1835 goto fail;
1838 if (mapping)
1839 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1841 if (use_mmap && !b) {
1842 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1843 u->use_mmap = use_mmap = FALSE;
1846 if (use_tsched && (!b || !d)) {
1847 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1848 u->use_tsched = use_tsched = FALSE;
1851 if (u->use_mmap)
1852 pa_log_info("Successfully enabled mmap() mode.");
1854 if (u->use_tsched)
1855 pa_log_info("Successfully enabled timer-based scheduling mode.");
1857 /* ALSA might tweak the sample spec, so recalculate the frame size */
1858 frame_size = pa_frame_size(&ss);
1860 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1862 pa_sink_new_data_init(&data);
1863 data.driver = driver;
1864 data.module = m;
1865 data.card = card;
1866 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1868 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1869 * variable instead of using &data.namereg_fail directly, because
1870 * data.namereg_fail is a bitfield and taking the address of a bitfield
1871 * variable is impossible. */
1872 namereg_fail = data.namereg_fail;
1873 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1874 pa_log("Failed to parse boolean argument namereg_fail.");
1875 pa_sink_new_data_done(&data);
1876 goto fail;
1878 data.namereg_fail = namereg_fail;
1880 pa_sink_new_data_set_sample_spec(&data, &ss);
1881 pa_sink_new_data_set_channel_map(&data, &map);
1883 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1884 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1885 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1886 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1887 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1889 if (mapping) {
1890 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1891 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1894 pa_alsa_init_description(data.proplist);
1896 if (u->control_device)
1897 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1899 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1900 pa_log("Invalid properties");
1901 pa_sink_new_data_done(&data);
1902 goto fail;
1905 if (u->mixer_path_set)
1906 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1908 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1909 pa_sink_new_data_done(&data);
1911 if (!u->sink) {
1912 pa_log("Failed to create sink object");
1913 goto fail;
1916 u->sink->parent.process_msg = sink_process_msg;
1917 if (u->use_tsched)
1918 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1919 u->sink->set_state = sink_set_state_cb;
1920 u->sink->set_port = sink_set_port_cb;
1921 u->sink->userdata = u;
1923 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1924 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1926 u->frame_size = frame_size;
1927 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1928 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1929 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1931 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1932 (double) u->hwbuf_size / (double) u->fragment_size,
1933 (long unsigned) u->fragment_size,
1934 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1935 (long unsigned) u->hwbuf_size,
1936 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1938 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1939 if (pa_alsa_pcm_is_hw(u->pcm_handle))
1940 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1941 else {
1942 pa_log_info("Disabling rewind for device %s", u->device_name);
1943 pa_sink_set_max_rewind(u->sink, 0);
1946 if (u->use_tsched) {
1947 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1949 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1950 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1952 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1953 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1955 fix_min_sleep_wakeup(u);
1956 fix_tsched_watermark(u);
1958 pa_sink_set_latency_range(u->sink,
1960 pa_bytes_to_usec(u->hwbuf_size, &ss));
1962 pa_log_info("Time scheduling watermark is %0.2fms",
1963 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1964 } else
1965 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1967 reserve_update(u);
1969 if (update_sw_params(u) < 0)
1970 goto fail;
1972 if (setup_mixer(u, ignore_dB) < 0)
1973 goto fail;
1975 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1977 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
1978 pa_log("Failed to create thread.");
1979 goto fail;
1982 /* Get initial mixer settings */
1983 if (data.volume_is_set) {
1984 if (u->sink->set_volume)
1985 u->sink->set_volume(u->sink);
1986 } else {
1987 if (u->sink->get_volume)
1988 u->sink->get_volume(u->sink);
1991 if (data.muted_is_set) {
1992 if (u->sink->set_mute)
1993 u->sink->set_mute(u->sink);
1994 } else {
1995 if (u->sink->get_mute)
1996 u->sink->get_mute(u->sink);
1999 pa_sink_put(u->sink);
2001 if (profile_set)
2002 pa_alsa_profile_set_free(profile_set);
2004 return u->sink;
2006 fail:
2008 if (u)
2009 userdata_free(u);
2011 if (profile_set)
2012 pa_alsa_profile_set_free(profile_set);
2014 return NULL;
2017 static void userdata_free(struct userdata *u) {
2018 pa_assert(u);
2020 if (u->sink)
2021 pa_sink_unlink(u->sink);
2023 if (u->thread) {
2024 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2025 pa_thread_free(u->thread);
2028 pa_thread_mq_done(&u->thread_mq);
2030 if (u->sink)
2031 pa_sink_unref(u->sink);
2033 if (u->memchunk.memblock)
2034 pa_memblock_unref(u->memchunk.memblock);
2036 if (u->alsa_rtpoll_item)
2037 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2039 if (u->rtpoll)
2040 pa_rtpoll_free(u->rtpoll);
2042 if (u->pcm_handle) {
2043 snd_pcm_drop(u->pcm_handle);
2044 snd_pcm_close(u->pcm_handle);
2047 if (u->mixer_fdl)
2048 pa_alsa_fdlist_free(u->mixer_fdl);
2050 if (u->mixer_path_set)
2051 pa_alsa_path_set_free(u->mixer_path_set);
2052 else if (u->mixer_path)
2053 pa_alsa_path_free(u->mixer_path);
2055 if (u->mixer_handle)
2056 snd_mixer_close(u->mixer_handle);
2058 if (u->smoother)
2059 pa_smoother_free(u->smoother);
2061 reserve_done(u);
2062 monitor_done(u);
2064 pa_xfree(u->device_name);
2065 pa_xfree(u->control_device);
2066 pa_xfree(u);
2069 void pa_alsa_sink_free(pa_sink *s) {
2070 struct userdata *u;
2072 pa_sink_assert_ref(s);
2073 pa_assert_se(u = s->userdata);
2075 userdata_free(u);