2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
57 #include <modules/reserve-wrap.h>
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
62 /* #define DEBUG_TIMING */
64 #define DEFAULT_DEVICE "default"
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (1*PA_USEC_PER_MSEC) /* 3ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
75 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
76 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89 pa_thread_mq thread_mq
;
92 snd_pcm_t
*pcm_handle
;
94 pa_alsa_fdlist
*mixer_fdl
;
95 snd_mixer_t
*mixer_handle
;
96 pa_alsa_path_set
*mixer_path_set
;
97 pa_alsa_path
*mixer_path
;
99 pa_cvolume hardware_volume
;
111 watermark_inc_threshold
,
112 watermark_dec_threshold
;
114 pa_usec_t watermark_dec_not_before
;
117 pa_memchunk memchunk
;
119 char *device_name
; /* name of the PCM device */
120 char *control_device
; /* name of the control device */
122 pa_bool_t use_mmap
:1, use_tsched
:1;
124 pa_bool_t first
, after_rewind
;
126 pa_rtpoll_item
*alsa_rtpoll_item
;
128 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
130 pa_smoother
*smoother
;
131 uint64_t write_count
;
132 uint64_t since_start
;
133 pa_usec_t smoother_interval
;
134 pa_usec_t last_smoother_update
;
136 pa_reserve_wrapper
*reserve
;
137 pa_hook_slot
*reserve_slot
;
138 pa_reserve_monitor_wrapper
*monitor
;
139 pa_hook_slot
*monitor_slot
;
142 static void userdata_free(struct userdata
*u
);
144 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
148 if (pa_sink_suspend(u
->sink
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
149 return PA_HOOK_CANCEL
;
154 static void reserve_done(struct userdata
*u
) {
157 if (u
->reserve_slot
) {
158 pa_hook_slot_free(u
->reserve_slot
);
159 u
->reserve_slot
= NULL
;
163 pa_reserve_wrapper_unref(u
->reserve
);
168 static void reserve_update(struct userdata
*u
) {
169 const char *description
;
172 if (!u
->sink
|| !u
->reserve
)
175 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
176 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
179 static int reserve_init(struct userdata
*u
, const char *dname
) {
188 if (pa_in_system_mode())
191 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
194 /* We are resuming, try to lock the device */
195 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
203 pa_assert(!u
->reserve_slot
);
204 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
209 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
215 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
217 pa_sink_suspend(u
->sink
, b
, PA_SUSPEND_APPLICATION
);
221 static void monitor_done(struct userdata
*u
) {
224 if (u
->monitor_slot
) {
225 pa_hook_slot_free(u
->monitor_slot
);
226 u
->monitor_slot
= NULL
;
230 pa_reserve_monitor_wrapper_unref(u
->monitor
);
235 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
241 if (pa_in_system_mode())
244 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
247 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
253 pa_assert(!u
->monitor_slot
);
254 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
259 static void fix_min_sleep_wakeup(struct userdata
*u
) {
260 size_t max_use
, max_use_2
;
263 pa_assert(u
->use_tsched
);
265 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
266 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
268 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
269 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
271 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
272 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
275 static void fix_tsched_watermark(struct userdata
*u
) {
278 pa_assert(u
->use_tsched
);
280 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
282 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
283 u
->tsched_watermark
= max_use
- u
->min_sleep
;
285 if (u
->tsched_watermark
< u
->min_wakeup
)
286 u
->tsched_watermark
= u
->min_wakeup
;
289 static void increase_watermark(struct userdata
*u
) {
290 size_t old_watermark
;
291 pa_usec_t old_min_latency
, new_min_latency
;
294 pa_assert(u
->use_tsched
);
296 /* First, just try to increase the watermark */
297 old_watermark
= u
->tsched_watermark
;
298 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_inc_step
);
299 fix_tsched_watermark(u
);
301 if (old_watermark
!= u
->tsched_watermark
) {
302 pa_log_info("Increasing wakeup watermark to %0.2f ms",
303 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
307 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
308 old_min_latency
= u
->sink
->thread_info
.min_latency
;
309 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_INC_STEP_USEC
);
310 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
312 if (old_min_latency
!= new_min_latency
) {
313 pa_log_info("Increasing minimal latency to %0.2f ms",
314 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
316 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
319 /* When we reach this we're officialy fucked! */
322 static void decrease_watermark(struct userdata
*u
) {
323 size_t old_watermark
;
327 pa_assert(u
->use_tsched
);
329 now
= pa_rtclock_now();
331 if (u
->watermark_dec_not_before
<= 0)
334 if (u
->watermark_dec_not_before
> now
)
337 old_watermark
= u
->tsched_watermark
;
339 if (u
->tsched_watermark
< u
->watermark_dec_step
)
340 u
->tsched_watermark
= u
->tsched_watermark
/ 2;
342 u
->tsched_watermark
= PA_MAX(u
->tsched_watermark
/ 2, u
->tsched_watermark
- u
->watermark_dec_step
);
344 fix_tsched_watermark(u
);
346 if (old_watermark
!= u
->tsched_watermark
)
347 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
348 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
350 /* We don't change the latency range*/
353 u
->watermark_dec_not_before
= now
+ TSCHED_WATERMARK_VERIFY_AFTER_USEC
;
356 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
359 pa_assert(sleep_usec
);
360 pa_assert(process_usec
);
363 pa_assert(u
->use_tsched
);
365 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
367 if (usec
== (pa_usec_t
) -1)
368 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
370 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
375 *sleep_usec
= usec
- wm
;
379 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
380 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
381 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
382 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
386 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
391 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
393 pa_assert(err
!= -EAGAIN
);
396 pa_log_debug("%s: Buffer underrun!", call
);
398 if (err
== -ESTRPIPE
)
399 pa_log_debug("%s: System suspended!", call
);
401 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
402 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
411 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
, pa_bool_t on_timeout
) {
414 /* We use <= instead of < for this check here because an underrun
415 * only happens after the last sample was processed, not already when
416 * it is removed from the buffer. This is particularly important
417 * when block transfer is used. */
419 if (n_bytes
<= u
->hwbuf_size
)
420 left_to_play
= u
->hwbuf_size
- n_bytes
;
423 /* We got a dropout. What a mess! */
430 if (!u
->first
&& !u
->after_rewind
)
431 if (pa_log_ratelimit())
432 pa_log_info("Underrun!");
436 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
437 (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
438 (double) pa_bytes_to_usec(u
->watermark_inc_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
439 (double) pa_bytes_to_usec(u
->watermark_dec_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
443 pa_bool_t reset_not_before
= TRUE
;
445 if (!u
->first
&& !u
->after_rewind
) {
446 if (left_to_play
< u
->watermark_inc_threshold
)
447 increase_watermark(u
);
448 else if (left_to_play
> u
->watermark_dec_threshold
) {
449 reset_not_before
= FALSE
;
451 /* We decrease the watermark only if have actually
452 * been woken up by a timeout. If something else woke
453 * us up it's too easy to fulfill the deadlines... */
456 decrease_watermark(u
);
460 if (reset_not_before
)
461 u
->watermark_dec_not_before
= 0;
467 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
468 pa_bool_t work_done
= TRUE
;
469 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
474 pa_sink_assert_ref(u
->sink
);
477 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
483 pa_bool_t after_avail
= TRUE
;
485 /* First we determine how many samples are missing to fill the
486 * buffer up to 100% */
488 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
490 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
496 n_bytes
= (size_t) n
* u
->frame_size
;
499 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
502 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
507 /* We won't fill up the playback buffer before at least
508 * half the sleep time is over because otherwise we might
509 * ask for more data from the clients then they expect. We
510 * need to guarantee that clients only have to keep around
511 * a single hw buffer length. */
514 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
516 pa_log_debug("Not filling up, because too early.");
521 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
525 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
526 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
527 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
528 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
534 pa_log_debug("Not filling up, because not necessary.");
542 pa_log_debug("Not filling up, because already too many iterations.");
548 n_bytes
-= u
->hwbuf_unused
;
552 pa_log_debug("Filling up");
559 const snd_pcm_channel_area_t
*areas
;
560 snd_pcm_uframes_t offset
, frames
;
561 snd_pcm_sframes_t sframes
;
563 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
564 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
566 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
568 if (!after_avail
&& err
== -EAGAIN
)
571 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
577 /* Make sure that if these memblocks need to be copied they will fit into one slot */
578 if (frames
> pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
)
579 frames
= pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
;
581 if (!after_avail
&& frames
== 0)
584 pa_assert(frames
> 0);
587 /* Check these are multiples of 8 bit */
588 pa_assert((areas
[0].first
& 7) == 0);
589 pa_assert((areas
[0].step
& 7)== 0);
591 /* We assume a single interleaved memory buffer */
592 pa_assert((areas
[0].first
>> 3) == 0);
593 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
595 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
597 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
598 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
601 pa_sink_render_into_full(u
->sink
, &chunk
);
602 pa_memblock_unref_fixed(chunk
.memblock
);
604 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
606 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
614 u
->write_count
+= frames
* u
->frame_size
;
615 u
->since_start
+= frames
* u
->frame_size
;
618 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
621 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
624 n_bytes
-= (size_t) frames
* u
->frame_size
;
628 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
630 if (*sleep_usec
> process_usec
)
631 *sleep_usec
-= process_usec
;
635 return work_done
? 1 : 0;
638 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
639 pa_bool_t work_done
= FALSE
;
640 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
645 pa_sink_assert_ref(u
->sink
);
648 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
655 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
657 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
663 n_bytes
= (size_t) n
* u
->frame_size
;
664 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
669 /* We won't fill up the playback buffer before at least
670 * half the sleep time is over because otherwise we might
671 * ask for more data from the clients then they expect. We
672 * need to guarantee that clients only have to keep around
673 * a single hw buffer length. */
676 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
679 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
683 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
684 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
685 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
686 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
696 pa_log_debug("Not filling up, because already too many iterations.");
702 n_bytes
-= u
->hwbuf_unused
;
706 snd_pcm_sframes_t frames
;
708 pa_bool_t after_avail
= TRUE
;
710 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
712 if (u
->memchunk
.length
<= 0)
713 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
715 pa_assert(u
->memchunk
.length
> 0);
717 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
719 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
720 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
722 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
723 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
724 pa_memblock_release(u
->memchunk
.memblock
);
726 if (PA_UNLIKELY(frames
< 0)) {
728 if (!after_avail
&& (int) frames
== -EAGAIN
)
731 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
737 if (!after_avail
&& frames
== 0)
740 pa_assert(frames
> 0);
743 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
744 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
746 if (u
->memchunk
.length
<= 0) {
747 pa_memblock_unref(u
->memchunk
.memblock
);
748 pa_memchunk_reset(&u
->memchunk
);
753 u
->write_count
+= frames
* u
->frame_size
;
754 u
->since_start
+= frames
* u
->frame_size
;
756 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
758 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
761 n_bytes
-= (size_t) frames
* u
->frame_size
;
765 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
767 if (*sleep_usec
> process_usec
)
768 *sleep_usec
-= process_usec
;
772 return work_done
? 1 : 0;
775 static void update_smoother(struct userdata
*u
) {
776 snd_pcm_sframes_t delay
= 0;
779 pa_usec_t now1
= 0, now2
;
780 snd_pcm_status_t
*status
;
782 snd_pcm_status_alloca(&status
);
785 pa_assert(u
->pcm_handle
);
787 /* Let's update the time smoother */
789 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
790 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err
));
794 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
795 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
797 snd_htimestamp_t htstamp
= { 0, 0 };
798 snd_pcm_status_get_htstamp(status
, &htstamp
);
799 now1
= pa_timespec_load(&htstamp
);
802 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
804 now1
= pa_rtclock_now();
806 /* check if the time since the last update is bigger than the interval */
807 if (u
->last_smoother_update
> 0)
808 if (u
->last_smoother_update
+ u
->smoother_interval
> now1
)
811 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
813 if (PA_UNLIKELY(position
< 0))
816 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
818 pa_smoother_put(u
->smoother
, now1
, now2
);
820 u
->last_smoother_update
= now1
;
821 /* exponentially increase the update interval up to the MAX limit */
822 u
->smoother_interval
= PA_MIN (u
->smoother_interval
* 2, SMOOTHER_MAX_INTERVAL
);
825 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
828 pa_usec_t now1
, now2
;
832 now1
= pa_rtclock_now();
833 now2
= pa_smoother_get(u
->smoother
, now1
);
835 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
837 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
839 if (u
->memchunk
.memblock
)
840 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
845 static int build_pollfd(struct userdata
*u
) {
847 pa_assert(u
->pcm_handle
);
849 if (u
->alsa_rtpoll_item
)
850 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
852 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
858 /* Called from IO context */
859 static int suspend(struct userdata
*u
) {
861 pa_assert(u
->pcm_handle
);
863 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
865 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
866 * take awfully long with our long buffer sizes today. */
867 snd_pcm_close(u
->pcm_handle
);
868 u
->pcm_handle
= NULL
;
870 if (u
->alsa_rtpoll_item
) {
871 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
872 u
->alsa_rtpoll_item
= NULL
;
875 pa_log_info("Device suspended...");
880 /* Called from IO context */
881 static int update_sw_params(struct userdata
*u
) {
882 snd_pcm_uframes_t avail_min
;
887 /* Use the full buffer if noone asked us for anything specific */
893 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
896 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
898 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
900 /* We need at least one sample in our buffer */
902 if (PA_UNLIKELY(b
< u
->frame_size
))
905 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
908 fix_min_sleep_wakeup(u
);
909 fix_tsched_watermark(u
);
912 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
914 /* We need at last one frame in the used part of the buffer */
915 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
918 pa_usec_t sleep_usec
, process_usec
;
920 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
921 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
924 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
926 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
)) < 0) {
927 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
931 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
936 /* Called from IO context */
937 static int unsuspend(struct userdata
*u
) {
942 snd_pcm_uframes_t period_size
;
945 pa_assert(!u
->pcm_handle
);
947 pa_log_info("Trying resume...");
949 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
950 /*SND_PCM_NONBLOCK|*/
951 SND_PCM_NO_AUTO_RESAMPLE
|
952 SND_PCM_NO_AUTO_CHANNELS
|
953 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
954 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
958 ss
= u
->sink
->sample_spec
;
959 nfrags
= u
->nfragments
;
960 period_size
= u
->fragment_size
/ u
->frame_size
;
964 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &nfrags
, &period_size
, u
->hwbuf_size
/ u
->frame_size
, &b
, &d
, TRUE
)) < 0) {
965 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
969 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
970 pa_log_warn("Resume failed, couldn't get original access mode.");
974 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
975 pa_log_warn("Resume failed, couldn't restore original sample settings.");
979 if (nfrags
!= u
->nfragments
|| period_size
*u
->frame_size
!= u
->fragment_size
) {
980 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
981 (unsigned long) u
->nfragments
, (unsigned long) u
->fragment_size
,
982 (unsigned long) nfrags
, period_size
* u
->frame_size
);
986 if (update_sw_params(u
) < 0)
989 if (build_pollfd(u
) < 0)
993 pa_smoother_reset(u
->smoother
, pa_rtclock_now(), TRUE
);
994 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
995 u
->last_smoother_update
= 0;
1000 pa_log_info("Resumed successfully...");
1005 if (u
->pcm_handle
) {
1006 snd_pcm_close(u
->pcm_handle
);
1007 u
->pcm_handle
= NULL
;
1013 /* Called from IO context */
1014 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
1015 struct userdata
*u
= PA_SINK(o
)->userdata
;
1019 case PA_SINK_MESSAGE_GET_LATENCY
: {
1023 r
= sink_get_latency(u
);
1025 *((pa_usec_t
*) data
) = r
;
1030 case PA_SINK_MESSAGE_SET_STATE
:
1032 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
1034 case PA_SINK_SUSPENDED
:
1035 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
1043 case PA_SINK_RUNNING
:
1045 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
1046 if (build_pollfd(u
) < 0)
1050 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1051 if (unsuspend(u
) < 0)
1057 case PA_SINK_UNLINKED
:
1059 case PA_SINK_INVALID_STATE
:
1066 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
1069 /* Called from main context */
1070 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
1071 pa_sink_state_t old_state
;
1074 pa_sink_assert_ref(s
);
1075 pa_assert_se(u
= s
->userdata
);
1077 old_state
= pa_sink_get_state(u
->sink
);
1079 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
1081 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
1082 if (reserve_init(u
, u
->device_name
) < 0)
1088 static int mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1089 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1092 pa_assert(u
->mixer_handle
);
1094 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1097 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
1098 pa_sink_get_volume(u
->sink
, TRUE
);
1099 pa_sink_get_mute(u
->sink
, TRUE
);
1105 static void sink_get_volume_cb(pa_sink
*s
) {
1106 struct userdata
*u
= s
->userdata
;
1108 char t
[PA_CVOLUME_SNPRINT_MAX
];
1111 pa_assert(u
->mixer_path
);
1112 pa_assert(u
->mixer_handle
);
1114 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1117 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1118 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1120 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1122 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1125 s
->real_volume
= u
->hardware_volume
= r
;
1127 /* Hmm, so the hardware volume changed, let's reset our software volume */
1128 if (u
->mixer_path
->has_dB
)
1129 pa_sink_set_soft_volume(s
, NULL
);
1132 static void sink_set_volume_cb(pa_sink
*s
) {
1133 struct userdata
*u
= s
->userdata
;
1135 char t
[PA_CVOLUME_SNPRINT_MAX
];
1138 pa_assert(u
->mixer_path
);
1139 pa_assert(u
->mixer_handle
);
1141 /* Shift up by the base volume */
1142 pa_sw_cvolume_divide_scalar(&r
, &s
->real_volume
, s
->base_volume
);
1144 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1147 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1148 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1150 u
->hardware_volume
= r
;
1152 if (u
->mixer_path
->has_dB
) {
1153 pa_cvolume new_soft_volume
;
1154 pa_bool_t accurate_enough
;
1156 /* Match exactly what the user requested by software */
1157 pa_sw_cvolume_divide(&new_soft_volume
, &s
->real_volume
, &u
->hardware_volume
);
1159 /* If the adjustment to do in software is only minimal we
1160 * can skip it. That saves us CPU at the expense of a bit of
1163 (pa_cvolume_min(&new_soft_volume
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1164 (pa_cvolume_max(&new_soft_volume
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1166 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->real_volume
));
1167 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &u
->hardware_volume
));
1168 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t
, sizeof(t
), &new_soft_volume
),
1169 pa_yes_no(accurate_enough
));
1171 if (!accurate_enough
)
1172 s
->soft_volume
= new_soft_volume
;
1175 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1177 /* We can't match exactly what the user requested, hence let's
1178 * at least tell the user about it */
1184 static void sink_get_mute_cb(pa_sink
*s
) {
1185 struct userdata
*u
= s
->userdata
;
1189 pa_assert(u
->mixer_path
);
1190 pa_assert(u
->mixer_handle
);
1192 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1198 static void sink_set_mute_cb(pa_sink
*s
) {
1199 struct userdata
*u
= s
->userdata
;
1202 pa_assert(u
->mixer_path
);
1203 pa_assert(u
->mixer_handle
);
1205 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1208 static int sink_set_port_cb(pa_sink
*s
, pa_device_port
*p
) {
1209 struct userdata
*u
= s
->userdata
;
1210 pa_alsa_port_data
*data
;
1214 pa_assert(u
->mixer_handle
);
1216 data
= PA_DEVICE_PORT_DATA(p
);
1218 pa_assert_se(u
->mixer_path
= data
->path
);
1219 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1221 if (u
->mixer_path
->has_volume
&& u
->mixer_path
->has_dB
) {
1222 s
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1223 s
->n_volume_steps
= PA_VOLUME_NORM
+1;
1225 if (u
->mixer_path
->max_dB
> 0.0)
1226 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s
->base_volume
));
1228 pa_log_info("No particular base volume set, fixing to 0 dB");
1230 s
->base_volume
= PA_VOLUME_NORM
;
1231 s
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1235 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1245 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1246 struct userdata
*u
= s
->userdata
;
1253 before
= u
->hwbuf_unused
;
1254 update_sw_params(u
);
1256 /* Let's check whether we now use only a smaller part of the
1257 buffer then before. If so, we need to make sure that subsequent
1258 rewinds are relative to the new maximum fill level and not to the
1259 current fill level. Thus, let's do a full rewind once, to clear
1262 if (u
->hwbuf_unused
> before
) {
1263 pa_log_debug("Requesting rewind due to latency change.");
1264 pa_sink_request_rewind(s
, (size_t) -1);
1268 static int process_rewind(struct userdata
*u
) {
1269 snd_pcm_sframes_t unused
;
1270 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1273 /* Figure out how much we shall rewind and reset the counter */
1274 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1276 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1278 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1279 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused
));
1283 unused_nbytes
= u
->tsched_watermark
+ (size_t) unused
* u
->frame_size
;
1285 if (u
->hwbuf_size
> unused_nbytes
)
1286 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1290 if (rewind_nbytes
> limit_nbytes
)
1291 rewind_nbytes
= limit_nbytes
;
1293 if (rewind_nbytes
> 0) {
1294 snd_pcm_sframes_t in_frames
, out_frames
;
1296 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1298 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1299 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1300 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1301 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames
));
1302 if (try_recover(u
, "process_rewind", out_frames
) < 0)
1307 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1309 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1311 if (rewind_nbytes
<= 0)
1312 pa_log_info("Tried rewind, but was apparently not possible.");
1314 u
->write_count
-= rewind_nbytes
;
1315 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1316 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1318 u
->after_rewind
= TRUE
;
1322 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1324 pa_sink_process_rewind(u
->sink
, 0);
1328 static void thread_func(void *userdata
) {
1329 struct userdata
*u
= userdata
;
1330 unsigned short revents
= 0;
1334 pa_log_debug("Thread starting up");
1336 if (u
->core
->realtime_scheduling
)
1337 pa_make_realtime(u
->core
->realtime_priority
);
1339 pa_thread_mq_install(&u
->thread_mq
);
1345 pa_log_debug("Loop");
1348 /* Render some data and write it to the dsp */
1349 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1351 pa_usec_t sleep_usec
= 0;
1352 pa_bool_t on_timeout
= pa_rtpoll_timer_elapsed(u
->rtpoll
);
1354 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
))
1355 if (process_rewind(u
) < 0)
1359 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1361 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1366 /* pa_log_debug("work_done = %i", work_done); */
1371 pa_log_info("Starting playback.");
1372 snd_pcm_start(u
->pcm_handle
);
1374 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1380 if (u
->use_tsched
) {
1383 if (u
->since_start
<= u
->hwbuf_size
) {
1385 /* USB devices on ALSA seem to hit a buffer
1386 * underrun during the first iterations much
1387 * quicker then we calculate here, probably due to
1388 * the transport latency. To accommodate for that
1389 * we artificially decrease the sleep time until
1390 * we have filled the buffer at least once
1393 if (pa_log_ratelimit())
1394 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1398 /* OK, the playback buffer is now full, let's
1399 * calculate when to wake up next */
1400 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1402 /* Convert from the sound card time domain to the
1403 * system time domain */
1404 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1406 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1408 /* We don't trust the conversion, so we wake up whatever comes first */
1409 pa_rtpoll_set_timer_relative(u
->rtpoll
, PA_MIN(sleep_usec
, cusec
));
1413 u
->after_rewind
= FALSE
;
1415 } else if (u
->use_tsched
)
1417 /* OK, we're in an invalid state, let's disable our timers */
1418 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1420 /* Hmm, nothing to do. Let's sleep */
1421 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1427 /* Tell ALSA about this and process its response */
1428 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1429 struct pollfd
*pollfd
;
1433 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1435 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1436 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1440 if (revents
& ~POLLOUT
) {
1441 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1446 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit())
1447 pa_log_debug("Wakeup from ALSA!");
1454 /* If this was no regular exit from the loop we have to continue
1455 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1456 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1457 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1460 pa_log_debug("Thread shutting down");
1463 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1469 pa_assert(device_name
);
1471 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1472 pa_sink_new_data_set_name(data
, n
);
1473 data
->namereg_fail
= TRUE
;
1477 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1478 data
->namereg_fail
= TRUE
;
1480 n
= device_id
? device_id
: device_name
;
1481 data
->namereg_fail
= FALSE
;
1485 t
= pa_sprintf_malloc("alsa_output.%s.%s", n
, mapping
->name
);
1487 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1489 pa_sink_new_data_set_name(data
, t
);
1493 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1495 if (!mapping
&& !element
)
1498 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
))) {
1499 pa_log_info("Failed to find a working mixer device.");
1505 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_OUTPUT
)))
1508 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, ignore_dB
) < 0)
1511 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1512 pa_alsa_path_dump(u
->mixer_path
);
1515 if (!(u
->mixer_path_set
= pa_alsa_path_set_new(mapping
, PA_ALSA_DIRECTION_OUTPUT
)))
1518 pa_alsa_path_set_probe(u
->mixer_path_set
, u
->mixer_handle
, ignore_dB
);
1520 pa_log_debug("Probed mixer paths:");
1521 pa_alsa_path_set_dump(u
->mixer_path_set
);
1528 if (u
->mixer_path_set
) {
1529 pa_alsa_path_set_free(u
->mixer_path_set
);
1530 u
->mixer_path_set
= NULL
;
1531 } else if (u
->mixer_path
) {
1532 pa_alsa_path_free(u
->mixer_path
);
1533 u
->mixer_path
= NULL
;
1536 if (u
->mixer_handle
) {
1537 snd_mixer_close(u
->mixer_handle
);
1538 u
->mixer_handle
= NULL
;
1542 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1545 if (!u
->mixer_handle
)
1548 if (u
->sink
->active_port
) {
1549 pa_alsa_port_data
*data
;
1551 /* We have a list of supported paths, so let's activate the
1552 * one that has been chosen as active */
1554 data
= PA_DEVICE_PORT_DATA(u
->sink
->active_port
);
1555 u
->mixer_path
= data
->path
;
1557 pa_alsa_path_select(data
->path
, u
->mixer_handle
);
1560 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1564 if (!u
->mixer_path
&& u
->mixer_path_set
)
1565 u
->mixer_path
= u
->mixer_path_set
->paths
;
1567 if (u
->mixer_path
) {
1568 /* Hmm, we have only a single path, then let's activate it */
1570 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1572 if (u
->mixer_path
->settings
)
1573 pa_alsa_setting_select(u
->mixer_path
->settings
, u
->mixer_handle
);
1578 if (!u
->mixer_path
->has_volume
)
1579 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1582 if (u
->mixer_path
->has_dB
) {
1583 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1585 u
->sink
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1586 u
->sink
->n_volume_steps
= PA_VOLUME_NORM
+1;
1588 if (u
->mixer_path
->max_dB
> 0.0)
1589 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1591 pa_log_info("No particular base volume set, fixing to 0 dB");
1594 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1595 u
->sink
->base_volume
= PA_VOLUME_NORM
;
1596 u
->sink
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1599 u
->sink
->get_volume
= sink_get_volume_cb
;
1600 u
->sink
->set_volume
= sink_set_volume_cb
;
1602 u
->sink
->flags
|= PA_SINK_HW_VOLUME_CTRL
| (u
->mixer_path
->has_dB
? PA_SINK_DECIBEL_VOLUME
: 0);
1603 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1606 if (!u
->mixer_path
->has_mute
) {
1607 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1609 u
->sink
->get_mute
= sink_get_mute_cb
;
1610 u
->sink
->set_mute
= sink_set_mute_cb
;
1611 u
->sink
->flags
|= PA_SINK_HW_MUTE_CTRL
;
1612 pa_log_info("Using hardware mute control.");
1615 u
->mixer_fdl
= pa_alsa_fdlist_new();
1617 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, u
->core
->mainloop
) < 0) {
1618 pa_log("Failed to initialize file descriptor monitoring");
1622 if (u
->mixer_path_set
)
1623 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1625 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1630 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1632 struct userdata
*u
= NULL
;
1633 const char *dev_id
= NULL
;
1634 pa_sample_spec ss
, requested_ss
;
1636 uint32_t nfrags
, hwbuf_size
, frag_size
, tsched_size
, tsched_watermark
;
1637 snd_pcm_uframes_t period_frames
, tsched_frames
;
1639 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
;
1640 pa_sink_new_data data
;
1641 pa_alsa_profile_set
*profile_set
= NULL
;
1646 ss
= m
->core
->default_sample_spec
;
1647 map
= m
->core
->default_channel_map
;
1648 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1649 pa_log("Failed to parse sample specification and channel map");
1654 frame_size
= pa_frame_size(&ss
);
1656 nfrags
= m
->core
->default_n_fragments
;
1657 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1659 frag_size
= (uint32_t) frame_size
;
1660 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1661 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1663 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1664 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1665 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1666 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1667 pa_log("Failed to parse buffer metrics");
1671 hwbuf_size
= frag_size
* nfrags
;
1672 period_frames
= frag_size
/frame_size
;
1673 tsched_frames
= tsched_size
/frame_size
;
1675 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1676 pa_log("Failed to parse mmap argument.");
1680 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1681 pa_log("Failed to parse tsched argument.");
1685 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
1686 pa_log("Failed to parse ignore_dB argument.");
1690 if (use_tsched
&& !pa_rtclock_hrtimer()) {
1691 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1695 u
= pa_xnew0(struct userdata
, 1);
1698 u
->use_mmap
= use_mmap
;
1699 u
->use_tsched
= use_tsched
;
1701 u
->rtpoll
= pa_rtpoll_new();
1702 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1704 u
->smoother
= pa_smoother_new(
1705 DEFAULT_TSCHED_BUFFER_USEC
*2,
1706 DEFAULT_TSCHED_BUFFER_USEC
*2,
1712 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1714 dev_id
= pa_modargs_get_value(
1716 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
1718 if (reserve_init(u
, dev_id
) < 0)
1721 if (reserve_monitor_init(u
, dev_id
) < 0)
1729 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1730 pa_log("device_id= not set");
1734 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
1738 SND_PCM_STREAM_PLAYBACK
,
1739 &nfrags
, &period_frames
, tsched_frames
,
1744 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1746 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
1749 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
1753 SND_PCM_STREAM_PLAYBACK
,
1754 &nfrags
, &period_frames
, tsched_frames
,
1755 &b
, &d
, profile_set
, &mapping
)))
1761 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1762 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1765 SND_PCM_STREAM_PLAYBACK
,
1766 &nfrags
, &period_frames
, tsched_frames
,
1771 pa_assert(u
->device_name
);
1772 pa_log_info("Successfully opened device %s.", u
->device_name
);
1774 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
1775 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
1780 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
1782 if (use_mmap
&& !b
) {
1783 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1784 u
->use_mmap
= use_mmap
= FALSE
;
1787 if (use_tsched
&& (!b
|| !d
)) {
1788 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1789 u
->use_tsched
= use_tsched
= FALSE
;
1792 if (use_tsched
&& !pa_alsa_pcm_is_hw(u
->pcm_handle
)) {
1793 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1794 u
->use_tsched
= use_tsched
= FALSE
;
1798 pa_log_info("Successfully enabled mmap() mode.");
1801 pa_log_info("Successfully enabled timer-based scheduling mode.");
1803 /* ALSA might tweak the sample spec, so recalculate the frame size */
1804 frame_size
= pa_frame_size(&ss
);
1806 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
1808 pa_sink_new_data_init(&data
);
1809 data
.driver
= driver
;
1812 set_sink_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
1813 pa_sink_new_data_set_sample_spec(&data
, &ss
);
1814 pa_sink_new_data_set_channel_map(&data
, &map
);
1816 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
1817 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1818 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
* nfrags
));
1819 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1820 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1823 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
1824 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
1827 pa_alsa_init_description(data
.proplist
);
1829 if (u
->control_device
)
1830 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
1832 if (pa_modargs_get_proplist(ma
, "sink_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
1833 pa_log("Invalid properties");
1834 pa_sink_new_data_done(&data
);
1838 if (u
->mixer_path_set
)
1839 pa_alsa_add_ports(&data
.ports
, u
->mixer_path_set
);
1841 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
|(u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0));
1842 pa_sink_new_data_done(&data
);
1845 pa_log("Failed to create sink object");
1849 u
->sink
->parent
.process_msg
= sink_process_msg
;
1850 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
1851 u
->sink
->set_state
= sink_set_state_cb
;
1852 u
->sink
->set_port
= sink_set_port_cb
;
1853 u
->sink
->userdata
= u
;
1855 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
1856 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
1858 u
->frame_size
= frame_size
;
1859 u
->fragment_size
= frag_size
= (uint32_t) (period_frames
* frame_size
);
1860 u
->nfragments
= nfrags
;
1861 u
->hwbuf_size
= u
->fragment_size
* nfrags
;
1862 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
1864 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1865 nfrags
, (long unsigned) u
->fragment_size
,
1866 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1868 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
1869 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
1871 if (u
->use_tsched
) {
1872 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, &requested_ss
), &u
->sink
->sample_spec
);
1874 u
->watermark_inc_step
= pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC
, &u
->sink
->sample_spec
);
1875 u
->watermark_dec_step
= pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC
, &u
->sink
->sample_spec
);
1877 u
->watermark_inc_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1878 u
->watermark_dec_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1880 fix_min_sleep_wakeup(u
);
1881 fix_tsched_watermark(u
);
1883 pa_sink_set_latency_range(u
->sink
,
1885 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1887 pa_log_info("Time scheduling watermark is %0.2fms",
1888 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1890 pa_sink_set_fixed_latency(u
->sink
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1895 if (update_sw_params(u
) < 0)
1898 if (setup_mixer(u
, ignore_dB
) < 0)
1901 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
1903 if (!(u
->thread
= pa_thread_new(thread_func
, u
))) {
1904 pa_log("Failed to create thread.");
1908 /* Get initial mixer settings */
1909 if (data
.volume_is_set
) {
1910 if (u
->sink
->set_volume
)
1911 u
->sink
->set_volume(u
->sink
);
1913 if (u
->sink
->get_volume
)
1914 u
->sink
->get_volume(u
->sink
);
1917 if (data
.muted_is_set
) {
1918 if (u
->sink
->set_mute
)
1919 u
->sink
->set_mute(u
->sink
);
1921 if (u
->sink
->get_mute
)
1922 u
->sink
->get_mute(u
->sink
);
1925 pa_sink_put(u
->sink
);
1928 pa_alsa_profile_set_free(profile_set
);
1938 pa_alsa_profile_set_free(profile_set
);
1943 static void userdata_free(struct userdata
*u
) {
1947 pa_sink_unlink(u
->sink
);
1950 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1951 pa_thread_free(u
->thread
);
1954 pa_thread_mq_done(&u
->thread_mq
);
1957 pa_sink_unref(u
->sink
);
1959 if (u
->memchunk
.memblock
)
1960 pa_memblock_unref(u
->memchunk
.memblock
);
1962 if (u
->alsa_rtpoll_item
)
1963 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
1966 pa_rtpoll_free(u
->rtpoll
);
1968 if (u
->pcm_handle
) {
1969 snd_pcm_drop(u
->pcm_handle
);
1970 snd_pcm_close(u
->pcm_handle
);
1974 pa_alsa_fdlist_free(u
->mixer_fdl
);
1976 if (u
->mixer_path_set
)
1977 pa_alsa_path_set_free(u
->mixer_path_set
);
1978 else if (u
->mixer_path
)
1979 pa_alsa_path_free(u
->mixer_path
);
1981 if (u
->mixer_handle
)
1982 snd_mixer_close(u
->mixer_handle
);
1985 pa_smoother_free(u
->smoother
);
1990 pa_xfree(u
->device_name
);
1991 pa_xfree(u
->control_device
);
1995 void pa_alsa_sink_free(pa_sink
*s
) {
1998 pa_sink_assert_ref(s
);
1999 pa_assert_se(u
= s
->userdata
);