core: call pa_sink_get_latency_within_thread() instead of going directly via process_...
[pulseaudio-mirror.git] / src / pulsecore / sink.c
blobe82688929b32c60c91e2d19cddede1d5fb8f227b
1 /***
2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
47 #include "sink.h"
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
57 static void sink_free(pa_object *s);
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
65 return data;
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
113 pa_proplist_free(data->proplist);
115 if (data->ports) {
116 pa_device_port *p;
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
121 pa_hashmap_free(data->ports, NULL, NULL);
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
131 pa_assert(name);
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
137 p->priority = 0;
139 return p;
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
182 s = pa_msgobject_new(pa_sink);
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
190 pa_sink_new_data_set_name(data, name);
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
198 /* FIXME, need to free s here on failure */
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
217 if (!data->muted_is_set)
218 data->muted = FALSE;
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->suspend_cause = 0;
240 s->name = pa_xstrdup(name);
241 s->proplist = pa_proplist_copy(data->proplist);
242 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
243 s->module = data->module;
244 s->card = data->card;
246 s->sample_spec = data->sample_spec;
247 s->channel_map = data->channel_map;
249 s->inputs = pa_idxset_new(NULL, NULL);
250 s->n_corked = 0;
252 s->reference_volume = s->virtual_volume = data->volume;
253 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
254 s->base_volume = PA_VOLUME_NORM;
255 s->n_volume_steps = PA_VOLUME_NORM+1;
256 s->muted = data->muted;
257 s->refresh_volume = s->refresh_muted = FALSE;
259 reset_callbacks(s);
260 s->userdata = NULL;
262 s->asyncmsgq = NULL;
264 /* As a minor optimization we just steal the list instead of
265 * copying it here */
266 s->ports = data->ports;
267 data->ports = NULL;
269 s->active_port = NULL;
270 s->save_port = FALSE;
272 if (data->active_port && s->ports)
273 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
274 s->save_port = data->save_port;
276 if (!s->active_port && s->ports) {
277 void *state;
278 pa_device_port *p;
280 PA_HASHMAP_FOREACH(p, s->ports, state)
281 if (!s->active_port || p->priority > s->active_port->priority)
282 s->active_port = p;
285 s->save_volume = data->save_volume;
286 s->save_muted = data->save_muted;
288 pa_silence_memchunk_get(
289 &core->silence_cache,
290 core->mempool,
291 &s->silence,
292 &s->sample_spec,
295 s->thread_info.rtpoll = NULL;
296 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
297 s->thread_info.soft_volume = s->soft_volume;
298 s->thread_info.soft_muted = s->muted;
299 s->thread_info.state = s->state;
300 s->thread_info.rewind_nbytes = 0;
301 s->thread_info.rewind_requested = FALSE;
302 s->thread_info.max_rewind = 0;
303 s->thread_info.max_request = 0;
304 s->thread_info.requested_latency_valid = FALSE;
305 s->thread_info.requested_latency = 0;
306 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
307 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
308 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
310 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
312 if (s->card)
313 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
315 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
316 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
317 s->index,
318 s->name,
319 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
320 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
321 pt);
322 pa_xfree(pt);
324 pa_source_new_data_init(&source_data);
325 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
326 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
327 source_data.name = pa_sprintf_malloc("%s.monitor", name);
328 source_data.driver = data->driver;
329 source_data.module = data->module;
330 source_data.card = data->card;
332 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
333 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
334 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
336 s->monitor_source = pa_source_new(core, &source_data,
337 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
338 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
340 pa_source_new_data_done(&source_data);
342 if (!s->monitor_source) {
343 pa_sink_unlink(s);
344 pa_sink_unref(s);
345 return NULL;
348 s->monitor_source->monitor_of = s;
350 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
351 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
352 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
354 return s;
357 /* Called from main context */
358 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
359 int ret;
360 pa_bool_t suspend_change;
361 pa_sink_state_t original_state;
363 pa_assert(s);
364 pa_assert_ctl_context();
366 if (s->state == state)
367 return 0;
369 original_state = s->state;
371 suspend_change =
372 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
373 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
375 if (s->set_state)
376 if ((ret = s->set_state(s, state)) < 0)
377 return ret;
379 if (s->asyncmsgq)
380 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
382 if (s->set_state)
383 s->set_state(s, original_state);
385 return ret;
388 s->state = state;
390 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
391 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
392 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
395 if (suspend_change) {
396 pa_sink_input *i;
397 uint32_t idx;
399 /* We're suspending or resuming, tell everyone about it */
401 PA_IDXSET_FOREACH(i, s->inputs, idx)
402 if (s->state == PA_SINK_SUSPENDED &&
403 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
404 pa_sink_input_kill(i);
405 else if (i->suspend)
406 i->suspend(i, state == PA_SINK_SUSPENDED);
408 if (s->monitor_source)
409 pa_source_sync_suspend(s->monitor_source);
412 return 0;
415 /* Called from main context */
416 void pa_sink_put(pa_sink* s) {
417 pa_sink_assert_ref(s);
418 pa_assert_ctl_context();
420 pa_assert(s->state == PA_SINK_INIT);
422 /* The following fields must be initialized properly when calling _put() */
423 pa_assert(s->asyncmsgq);
424 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
426 /* Generally, flags should be initialized via pa_sink_new(). As a
427 * special exception we allow volume related flags to be set
428 * between _new() and _put(). */
430 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
431 s->flags |= PA_SINK_DECIBEL_VOLUME;
433 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
434 s->flags |= PA_SINK_FLAT_VOLUME;
436 s->thread_info.soft_volume = s->soft_volume;
437 s->thread_info.soft_muted = s->muted;
439 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
440 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
441 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
442 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
443 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
445 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
446 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
447 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
449 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
451 pa_source_put(s->monitor_source);
453 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
454 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
457 /* Called from main context */
458 void pa_sink_unlink(pa_sink* s) {
459 pa_bool_t linked;
460 pa_sink_input *i, *j = NULL;
462 pa_assert(s);
463 pa_assert_ctl_context();
465 /* Please note that pa_sink_unlink() does more than simply
466 * reversing pa_sink_put(). It also undoes the registrations
467 * already done in pa_sink_new()! */
469 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
470 * may be called multiple times on the same sink without bad
471 * effects. */
473 linked = PA_SINK_IS_LINKED(s->state);
475 if (linked)
476 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
478 if (s->state != PA_SINK_UNLINKED)
479 pa_namereg_unregister(s->core, s->name);
480 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
482 if (s->card)
483 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
485 while ((i = pa_idxset_first(s->inputs, NULL))) {
486 pa_assert(i != j);
487 pa_sink_input_kill(i);
488 j = i;
491 if (linked)
492 sink_set_state(s, PA_SINK_UNLINKED);
493 else
494 s->state = PA_SINK_UNLINKED;
496 reset_callbacks(s);
498 if (s->monitor_source)
499 pa_source_unlink(s->monitor_source);
501 if (linked) {
502 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
503 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
507 /* Called from main context */
508 static void sink_free(pa_object *o) {
509 pa_sink *s = PA_SINK(o);
510 pa_sink_input *i;
512 pa_assert(s);
513 pa_assert_ctl_context();
514 pa_assert(pa_sink_refcnt(s) == 0);
516 if (PA_SINK_IS_LINKED(s->state))
517 pa_sink_unlink(s);
519 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
521 if (s->monitor_source) {
522 pa_source_unref(s->monitor_source);
523 s->monitor_source = NULL;
526 pa_idxset_free(s->inputs, NULL, NULL);
528 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
529 pa_sink_input_unref(i);
531 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
533 if (s->silence.memblock)
534 pa_memblock_unref(s->silence.memblock);
536 pa_xfree(s->name);
537 pa_xfree(s->driver);
539 if (s->proplist)
540 pa_proplist_free(s->proplist);
542 if (s->ports) {
543 pa_device_port *p;
545 while ((p = pa_hashmap_steal_first(s->ports)))
546 pa_device_port_free(p);
548 pa_hashmap_free(s->ports, NULL, NULL);
551 pa_xfree(s);
554 /* Called from main context */
555 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
556 pa_sink_assert_ref(s);
557 pa_assert_ctl_context();
559 s->asyncmsgq = q;
561 if (s->monitor_source)
562 pa_source_set_asyncmsgq(s->monitor_source, q);
565 /* Called from IO context, or before _put() from main context */
566 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
567 pa_sink_assert_ref(s);
568 pa_sink_assert_io_context(s);
570 s->thread_info.rtpoll = p;
572 if (s->monitor_source)
573 pa_source_set_rtpoll(s->monitor_source, p);
576 /* Called from main context */
577 int pa_sink_update_status(pa_sink*s) {
578 pa_sink_assert_ref(s);
579 pa_assert_ctl_context();
580 pa_assert(PA_SINK_IS_LINKED(s->state));
582 if (s->state == PA_SINK_SUSPENDED)
583 return 0;
585 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
588 /* Called from main context */
589 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
590 pa_sink_assert_ref(s);
591 pa_assert_ctl_context();
592 pa_assert(PA_SINK_IS_LINKED(s->state));
593 pa_assert(cause != 0);
595 if (suspend) {
596 s->suspend_cause |= cause;
597 s->monitor_source->suspend_cause |= cause;
598 } else {
599 s->suspend_cause &= ~cause;
600 s->monitor_source->suspend_cause &= ~cause;
603 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
604 return 0;
606 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
608 if (s->suspend_cause)
609 return sink_set_state(s, PA_SINK_SUSPENDED);
610 else
611 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
614 /* Called from main context */
615 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
616 pa_sink_input *i, *n;
617 uint32_t idx;
619 pa_sink_assert_ref(s);
620 pa_assert_ctl_context();
621 pa_assert(PA_SINK_IS_LINKED(s->state));
623 if (!q)
624 q = pa_queue_new();
626 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
627 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
629 pa_sink_input_ref(i);
631 if (pa_sink_input_start_move(i) >= 0)
632 pa_queue_push(q, i);
633 else
634 pa_sink_input_unref(i);
637 return q;
640 /* Called from main context */
641 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
642 pa_sink_input *i;
644 pa_sink_assert_ref(s);
645 pa_assert_ctl_context();
646 pa_assert(PA_SINK_IS_LINKED(s->state));
647 pa_assert(q);
649 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
650 if (pa_sink_input_finish_move(i, s, save) < 0)
651 pa_sink_input_fail_move(i);
653 pa_sink_input_unref(i);
656 pa_queue_free(q, NULL, NULL);
659 /* Called from main context */
660 void pa_sink_move_all_fail(pa_queue *q) {
661 pa_sink_input *i;
663 pa_assert_ctl_context();
664 pa_assert(q);
666 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
667 pa_sink_input_fail_move(i);
668 pa_sink_input_unref(i);
671 pa_queue_free(q, NULL, NULL);
674 /* Called from IO thread context */
675 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
676 pa_sink_input *i;
677 void *state = NULL;
679 pa_sink_assert_ref(s);
680 pa_sink_assert_io_context(s);
681 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
683 /* If nobody requested this and this is actually no real rewind
684 * then we can short cut this. Please note that this means that
685 * not all rewind requests triggered upstream will always be
686 * translated in actual requests! */
687 if (!s->thread_info.rewind_requested && nbytes <= 0)
688 return;
690 s->thread_info.rewind_nbytes = 0;
691 s->thread_info.rewind_requested = FALSE;
693 if (s->thread_info.state == PA_SINK_SUSPENDED)
694 return;
696 if (nbytes > 0)
697 pa_log_debug("Processing rewind...");
699 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
700 pa_sink_input_assert_ref(i);
701 pa_sink_input_process_rewind(i, nbytes);
704 if (nbytes > 0)
705 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
706 pa_source_process_rewind(s->monitor_source, nbytes);
709 /* Called from IO thread context */
710 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
711 pa_sink_input *i;
712 unsigned n = 0;
713 void *state = NULL;
714 size_t mixlength = *length;
716 pa_sink_assert_ref(s);
717 pa_sink_assert_io_context(s);
718 pa_assert(info);
720 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
721 pa_sink_input_assert_ref(i);
723 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
725 if (mixlength == 0 || info->chunk.length < mixlength)
726 mixlength = info->chunk.length;
728 if (pa_memblock_is_silence(info->chunk.memblock)) {
729 pa_memblock_unref(info->chunk.memblock);
730 continue;
733 info->userdata = pa_sink_input_ref(i);
735 pa_assert(info->chunk.memblock);
736 pa_assert(info->chunk.length > 0);
738 info++;
739 n++;
740 maxinfo--;
743 if (mixlength > 0)
744 *length = mixlength;
746 return n;
749 /* Called from IO thread context */
750 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
751 pa_sink_input *i;
752 void *state = NULL;
753 unsigned p = 0;
754 unsigned n_unreffed = 0;
756 pa_sink_assert_ref(s);
757 pa_sink_assert_io_context(s);
758 pa_assert(result);
759 pa_assert(result->memblock);
760 pa_assert(result->length > 0);
762 /* We optimize for the case where the order of the inputs has not changed */
764 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
765 unsigned j;
766 pa_mix_info* m = NULL;
768 pa_sink_input_assert_ref(i);
770 /* Let's try to find the matching entry info the pa_mix_info array */
771 for (j = 0; j < n; j ++) {
773 if (info[p].userdata == i) {
774 m = info + p;
775 break;
778 p++;
779 if (p >= n)
780 p = 0;
783 /* Drop read data */
784 pa_sink_input_drop(i, result->length);
786 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
788 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
789 void *ostate = NULL;
790 pa_source_output *o;
791 pa_memchunk c;
793 if (m && m->chunk.memblock) {
794 c = m->chunk;
795 pa_memblock_ref(c.memblock);
796 pa_assert(result->length <= c.length);
797 c.length = result->length;
799 pa_memchunk_make_writable(&c, 0);
800 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
801 } else {
802 c = s->silence;
803 pa_memblock_ref(c.memblock);
804 pa_assert(result->length <= c.length);
805 c.length = result->length;
808 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
809 pa_source_output_assert_ref(o);
810 pa_assert(o->direct_on_input == i);
811 pa_source_post_direct(s->monitor_source, o, &c);
814 pa_memblock_unref(c.memblock);
818 if (m) {
819 if (m->chunk.memblock)
820 pa_memblock_unref(m->chunk.memblock);
821 pa_memchunk_reset(&m->chunk);
823 pa_sink_input_unref(m->userdata);
824 m->userdata = NULL;
826 n_unreffed += 1;
830 /* Now drop references to entries that are included in the
831 * pa_mix_info array but don't exist anymore */
833 if (n_unreffed < n) {
834 for (; n > 0; info++, n--) {
835 if (info->userdata)
836 pa_sink_input_unref(info->userdata);
837 if (info->chunk.memblock)
838 pa_memblock_unref(info->chunk.memblock);
842 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
843 pa_source_post(s->monitor_source, result);
846 /* Called from IO thread context */
847 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
848 pa_mix_info info[MAX_MIX_CHANNELS];
849 unsigned n;
850 size_t block_size_max;
852 pa_sink_assert_ref(s);
853 pa_sink_assert_io_context(s);
854 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
855 pa_assert(pa_frame_aligned(length, &s->sample_spec));
856 pa_assert(result);
858 pa_sink_ref(s);
860 pa_assert(!s->thread_info.rewind_requested);
861 pa_assert(s->thread_info.rewind_nbytes == 0);
863 if (s->thread_info.state == PA_SINK_SUSPENDED) {
864 result->memblock = pa_memblock_ref(s->silence.memblock);
865 result->index = s->silence.index;
866 result->length = PA_MIN(s->silence.length, length);
867 return;
870 if (length <= 0)
871 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
873 block_size_max = pa_mempool_block_size_max(s->core->mempool);
874 if (length > block_size_max)
875 length = pa_frame_align(block_size_max, &s->sample_spec);
877 pa_assert(length > 0);
879 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
881 if (n == 0) {
883 *result = s->silence;
884 pa_memblock_ref(result->memblock);
886 if (result->length > length)
887 result->length = length;
889 } else if (n == 1) {
890 pa_cvolume volume;
892 *result = info[0].chunk;
893 pa_memblock_ref(result->memblock);
895 if (result->length > length)
896 result->length = length;
898 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
900 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
901 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
902 pa_memblock_unref(result->memblock);
903 pa_silence_memchunk_get(&s->core->silence_cache,
904 s->core->mempool,
905 result,
906 &s->sample_spec,
907 result->length);
908 } else {
909 pa_memchunk_make_writable(result, 0);
910 pa_volume_memchunk(result, &s->sample_spec, &volume);
913 } else {
914 void *ptr;
915 result->memblock = pa_memblock_new(s->core->mempool, length);
917 ptr = pa_memblock_acquire(result->memblock);
918 result->length = pa_mix(info, n,
919 ptr, length,
920 &s->sample_spec,
921 &s->thread_info.soft_volume,
922 s->thread_info.soft_muted);
923 pa_memblock_release(result->memblock);
925 result->index = 0;
928 inputs_drop(s, info, n, result);
930 pa_sink_unref(s);
933 /* Called from IO thread context */
934 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
935 pa_mix_info info[MAX_MIX_CHANNELS];
936 unsigned n;
937 size_t length, block_size_max;
939 pa_sink_assert_ref(s);
940 pa_sink_assert_io_context(s);
941 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
942 pa_assert(target);
943 pa_assert(target->memblock);
944 pa_assert(target->length > 0);
945 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
947 pa_sink_ref(s);
949 pa_assert(!s->thread_info.rewind_requested);
950 pa_assert(s->thread_info.rewind_nbytes == 0);
952 if (s->thread_info.state == PA_SINK_SUSPENDED) {
953 pa_silence_memchunk(target, &s->sample_spec);
954 return;
957 length = target->length;
958 block_size_max = pa_mempool_block_size_max(s->core->mempool);
959 if (length > block_size_max)
960 length = pa_frame_align(block_size_max, &s->sample_spec);
962 pa_assert(length > 0);
964 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
966 if (n == 0) {
967 if (target->length > length)
968 target->length = length;
970 pa_silence_memchunk(target, &s->sample_spec);
971 } else if (n == 1) {
972 pa_cvolume volume;
974 if (target->length > length)
975 target->length = length;
977 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
979 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
980 pa_silence_memchunk(target, &s->sample_spec);
981 else {
982 pa_memchunk vchunk;
984 vchunk = info[0].chunk;
985 pa_memblock_ref(vchunk.memblock);
987 if (vchunk.length > length)
988 vchunk.length = length;
990 if (!pa_cvolume_is_norm(&volume)) {
991 pa_memchunk_make_writable(&vchunk, 0);
992 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
995 pa_memchunk_memcpy(target, &vchunk);
996 pa_memblock_unref(vchunk.memblock);
999 } else {
1000 void *ptr;
1002 ptr = pa_memblock_acquire(target->memblock);
1004 target->length = pa_mix(info, n,
1005 (uint8_t*) ptr + target->index, length,
1006 &s->sample_spec,
1007 &s->thread_info.soft_volume,
1008 s->thread_info.soft_muted);
1010 pa_memblock_release(target->memblock);
1013 inputs_drop(s, info, n, target);
1015 pa_sink_unref(s);
1018 /* Called from IO thread context */
1019 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1020 pa_memchunk chunk;
1021 size_t l, d;
1023 pa_sink_assert_ref(s);
1024 pa_sink_assert_io_context(s);
1025 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1026 pa_assert(target);
1027 pa_assert(target->memblock);
1028 pa_assert(target->length > 0);
1029 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1031 pa_sink_ref(s);
1033 pa_assert(!s->thread_info.rewind_requested);
1034 pa_assert(s->thread_info.rewind_nbytes == 0);
1036 l = target->length;
1037 d = 0;
1038 while (l > 0) {
1039 chunk = *target;
1040 chunk.index += d;
1041 chunk.length -= d;
1043 pa_sink_render_into(s, &chunk);
1045 d += chunk.length;
1046 l -= chunk.length;
1049 pa_sink_unref(s);
1052 /* Called from IO thread context */
1053 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1054 pa_mix_info info[MAX_MIX_CHANNELS];
1055 size_t length1st = length;
1056 unsigned n;
1058 pa_sink_assert_ref(s);
1059 pa_sink_assert_io_context(s);
1060 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1061 pa_assert(length > 0);
1062 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1063 pa_assert(result);
1065 pa_sink_ref(s);
1067 pa_assert(!s->thread_info.rewind_requested);
1068 pa_assert(s->thread_info.rewind_nbytes == 0);
1070 pa_assert(length > 0);
1072 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1074 if (n == 0) {
1075 pa_silence_memchunk_get(&s->core->silence_cache,
1076 s->core->mempool,
1077 result,
1078 &s->sample_spec,
1079 length1st);
1080 } else if (n == 1) {
1081 pa_cvolume volume;
1083 *result = info[0].chunk;
1084 pa_memblock_ref(result->memblock);
1086 if (result->length > length)
1087 result->length = length;
1089 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1091 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1092 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1093 pa_memblock_unref(result->memblock);
1094 pa_silence_memchunk_get(&s->core->silence_cache,
1095 s->core->mempool,
1096 result,
1097 &s->sample_spec,
1098 result->length);
1099 } else {
1100 pa_memchunk_make_writable(result, length);
1101 pa_volume_memchunk(result, &s->sample_spec, &volume);
1104 } else {
1105 void *ptr;
1107 result->index = 0;
1108 result->memblock = pa_memblock_new(s->core->mempool, length);
1110 ptr = pa_memblock_acquire(result->memblock);
1112 result->length = pa_mix(info, n,
1113 (uint8_t*) ptr + result->index, length1st,
1114 &s->sample_spec,
1115 &s->thread_info.soft_volume,
1116 s->thread_info.soft_muted);
1118 pa_memblock_release(result->memblock);
1121 inputs_drop(s, info, n, result);
1123 if (result->length < length) {
1124 pa_memchunk chunk;
1125 size_t l, d;
1126 pa_memchunk_make_writable(result, length);
1128 l = length - result->length;
1129 d = result->index + result->length;
1130 while (l > 0) {
1131 chunk = *result;
1132 chunk.index = d;
1133 chunk.length = l;
1135 pa_sink_render_into(s, &chunk);
1137 d += chunk.length;
1138 l -= chunk.length;
1140 result->length = length;
1143 pa_sink_unref(s);
1146 /* Called from main thread */
1147 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1148 pa_usec_t usec = 0;
1150 pa_sink_assert_ref(s);
1151 pa_assert_ctl_context();
1152 pa_assert(PA_SINK_IS_LINKED(s->state));
1154 /* The returned value is supposed to be in the time domain of the sound card! */
1156 if (s->state == PA_SINK_SUSPENDED)
1157 return 0;
1159 if (!(s->flags & PA_SINK_LATENCY))
1160 return 0;
1162 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1164 return usec;
1167 /* Called from IO thread */
1168 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1169 pa_usec_t usec = 0;
1170 pa_msgobject *o;
1172 pa_sink_assert_ref(s);
1173 pa_sink_assert_io_context(s);
1174 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1176 /* The returned value is supposed to be in the time domain of the sound card! */
1178 if (s->thread_info.state == PA_SINK_SUSPENDED)
1179 return 0;
1181 if (!(s->flags & PA_SINK_LATENCY))
1182 return 0;
1184 o = PA_MSGOBJECT(s);
1186 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1188 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1189 return -1;
1191 return usec;
1194 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1195 unsigned c;
1197 pa_sink_input_assert_ref(i);
1198 pa_assert(new_volume->channels == i->sample_spec.channels);
1201 * This basically calculates:
1203 * i->relative_volume := i->virtual_volume / new_volume
1204 * i->soft_volume := i->relative_volume * i->volume_factor
1207 /* The new sink volume passed in here must already be remapped to
1208 * the sink input's channel map! */
1210 i->soft_volume.channels = i->sample_spec.channels;
1212 for (c = 0; c < i->sample_spec.channels; c++)
1214 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1215 /* We leave i->relative_volume untouched */
1216 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1217 else {
1218 i->relative_volume[c] =
1219 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1220 pa_sw_volume_to_linear(new_volume->values[c]);
1222 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1223 i->relative_volume[c] *
1224 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1227 /* Hooks have the ability to play games with i->soft_volume */
1228 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1230 /* We don't copy the soft_volume to the thread_info data
1231 * here. That must be done by the caller */
1234 /* Called from main thread */
1235 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1236 pa_sink_input *i;
1237 uint32_t idx;
1239 pa_sink_assert_ref(s);
1240 pa_assert_ctl_context();
1241 pa_assert(new_volume);
1242 pa_assert(PA_SINK_IS_LINKED(s->state));
1243 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1245 /* This is called whenever a sink input volume changes or a sink
1246 * input is added/removed and we might need to fix up the sink
1247 * volume accordingly. Please note that we don't actually update
1248 * the sinks volume here, we only return how it needs to be
1249 * updated. The caller should then call pa_sink_set_volume().*/
1251 if (pa_idxset_isempty(s->inputs)) {
1252 /* In the special case that we have no sink input we leave the
1253 * volume unmodified. */
1254 *new_volume = s->reference_volume;
1255 return;
1258 pa_cvolume_mute(new_volume, s->channel_map.channels);
1260 /* First let's determine the new maximum volume of all inputs
1261 * connected to this sink */
1262 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1263 unsigned c;
1264 pa_cvolume remapped_volume;
1266 remapped_volume = i->virtual_volume;
1267 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1269 for (c = 0; c < new_volume->channels; c++)
1270 if (remapped_volume.values[c] > new_volume->values[c])
1271 new_volume->values[c] = remapped_volume.values[c];
1274 /* Then, let's update the soft volumes of all inputs connected
1275 * to this sink */
1276 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1277 pa_cvolume remapped_new_volume;
1279 remapped_new_volume = *new_volume;
1280 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1281 compute_new_soft_volume(i, &remapped_new_volume);
1283 /* We don't copy soft_volume to the thread_info data here
1284 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1285 * want the update to be atomically with the sink volume
1286 * update, hence we do it within the pa_sink_set_volume() call
1287 * below */
1291 /* Called from main thread */
1292 void pa_sink_propagate_flat_volume(pa_sink *s) {
1293 pa_sink_input *i;
1294 uint32_t idx;
1296 pa_sink_assert_ref(s);
1297 pa_assert_ctl_context();
1298 pa_assert(PA_SINK_IS_LINKED(s->state));
1299 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1301 /* This is called whenever the sink volume changes that is not
1302 * caused by a sink input volume change. We need to fix up the
1303 * sink input volumes accordingly */
1305 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1306 pa_cvolume sink_volume, new_virtual_volume;
1307 unsigned c;
1309 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1311 sink_volume = s->virtual_volume;
1312 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1314 for (c = 0; c < i->sample_spec.channels; c++)
1315 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1316 i->relative_volume[c] *
1317 pa_sw_volume_to_linear(sink_volume.values[c]));
1319 new_virtual_volume.channels = i->sample_spec.channels;
1321 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1322 i->virtual_volume = new_virtual_volume;
1324 /* Hmm, the soft volume might no longer actually match
1325 * what has been chosen as new virtual volume here,
1326 * especially when the old volume was
1327 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1328 * volumes here. */
1329 compute_new_soft_volume(i, &sink_volume);
1331 /* The virtual volume changed, let's tell people so */
1332 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1336 /* If the soft_volume of any of the sink inputs got changed, let's
1337 * make sure the thread copies are synced up. */
1338 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1341 /* Called from main thread */
1342 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference, pa_bool_t save) {
1343 pa_bool_t virtual_volume_changed;
1345 pa_sink_assert_ref(s);
1346 pa_assert_ctl_context();
1347 pa_assert(PA_SINK_IS_LINKED(s->state));
1348 pa_assert(volume);
1349 pa_assert(pa_cvolume_valid(volume));
1350 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1352 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1353 s->virtual_volume = *volume;
1354 s->save_volume = (!virtual_volume_changed && s->save_volume) || save;
1356 if (become_reference)
1357 s->reference_volume = s->virtual_volume;
1359 /* Propagate this volume change back to the inputs */
1360 if (virtual_volume_changed)
1361 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1362 pa_sink_propagate_flat_volume(s);
1364 if (s->set_volume) {
1365 /* If we have a function set_volume(), then we do not apply a
1366 * soft volume by default. However, set_volume() is free to
1367 * apply one to s->soft_volume */
1369 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1370 s->set_volume(s);
1372 } else
1373 /* If we have no function set_volume(), then the soft volume
1374 * becomes the virtual volume */
1375 s->soft_volume = s->virtual_volume;
1377 /* This tells the sink that soft and/or virtual volume changed */
1378 if (sendmsg)
1379 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1381 if (virtual_volume_changed)
1382 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1385 /* Called from main thread. Only to be called by sink implementor */
1386 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1387 pa_sink_assert_ref(s);
1388 pa_assert_ctl_context();
1389 pa_assert(volume);
1391 s->soft_volume = *volume;
1393 if (PA_SINK_IS_LINKED(s->state))
1394 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1395 else
1396 s->thread_info.soft_volume = *volume;
1399 /* Called from main thread */
1400 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1401 pa_sink_assert_ref(s);
1402 pa_assert_ctl_context();
1403 pa_assert(PA_SINK_IS_LINKED(s->state));
1405 if (s->refresh_volume || force_refresh) {
1406 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1408 if (s->get_volume)
1409 s->get_volume(s);
1411 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1413 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1415 s->reference_volume = s->virtual_volume;
1417 /* Something got changed in the hardware. It probably
1418 * makes sense to save changed hw settings given that hw
1419 * volume changes not triggered by PA are almost certainly
1420 * done by the user. */
1421 s->save_volume = TRUE;
1423 if (s->flags & PA_SINK_FLAT_VOLUME)
1424 pa_sink_propagate_flat_volume(s);
1426 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1430 return reference ? &s->reference_volume : &s->virtual_volume;
1433 /* Called from main thread */
1434 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume) {
1435 pa_sink_assert_ref(s);
1436 pa_assert_ctl_context();
1437 pa_assert(PA_SINK_IS_LINKED(s->state));
1439 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1440 if (pa_cvolume_equal(&s->virtual_volume, new_volume))
1441 return;
1443 s->reference_volume = s->virtual_volume = *new_volume;
1444 s->save_volume = TRUE;
1446 if (s->flags & PA_SINK_FLAT_VOLUME)
1447 pa_sink_propagate_flat_volume(s);
1449 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1452 /* Called from main thread */
1453 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1454 pa_bool_t old_muted;
1456 pa_sink_assert_ref(s);
1457 pa_assert_ctl_context();
1458 pa_assert(PA_SINK_IS_LINKED(s->state));
1460 old_muted = s->muted;
1461 s->muted = mute;
1462 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1464 if (s->set_mute)
1465 s->set_mute(s);
1467 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1469 if (old_muted != s->muted)
1470 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1473 /* Called from main thread */
1474 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1476 pa_sink_assert_ref(s);
1477 pa_assert_ctl_context();
1478 pa_assert(PA_SINK_IS_LINKED(s->state));
1480 if (s->refresh_muted || force_refresh) {
1481 pa_bool_t old_muted = s->muted;
1483 if (s->get_mute)
1484 s->get_mute(s);
1486 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1488 if (old_muted != s->muted) {
1489 s->save_muted = TRUE;
1491 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1493 /* Make sure the soft mute status stays in sync */
1494 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1499 return s->muted;
1502 /* Called from main thread */
1503 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1504 pa_sink_assert_ref(s);
1505 pa_assert_ctl_context();
1506 pa_assert(PA_SINK_IS_LINKED(s->state));
1508 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1510 if (s->muted == new_muted)
1511 return;
1513 s->muted = new_muted;
1514 s->save_muted = TRUE;
1516 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1519 /* Called from main thread */
1520 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1521 pa_sink_assert_ref(s);
1522 pa_assert_ctl_context();
1524 if (p)
1525 pa_proplist_update(s->proplist, mode, p);
1527 if (PA_SINK_IS_LINKED(s->state)) {
1528 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1529 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1532 return TRUE;
1535 /* Called from main thread */
1536 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1537 void pa_sink_set_description(pa_sink *s, const char *description) {
1538 const char *old;
1539 pa_sink_assert_ref(s);
1540 pa_assert_ctl_context();
1542 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1543 return;
1545 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1547 if (old && description && pa_streq(old, description))
1548 return;
1550 if (description)
1551 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1552 else
1553 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1555 if (s->monitor_source) {
1556 char *n;
1558 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1559 pa_source_set_description(s->monitor_source, n);
1560 pa_xfree(n);
1563 if (PA_SINK_IS_LINKED(s->state)) {
1564 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1565 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1569 /* Called from main thread */
1570 unsigned pa_sink_linked_by(pa_sink *s) {
1571 unsigned ret;
1573 pa_sink_assert_ref(s);
1574 pa_assert_ctl_context();
1575 pa_assert(PA_SINK_IS_LINKED(s->state));
1577 ret = pa_idxset_size(s->inputs);
1579 /* We add in the number of streams connected to us here. Please
1580 * note the asymmmetry to pa_sink_used_by()! */
1582 if (s->monitor_source)
1583 ret += pa_source_linked_by(s->monitor_source);
1585 return ret;
1588 /* Called from main thread */
1589 unsigned pa_sink_used_by(pa_sink *s) {
1590 unsigned ret;
1592 pa_sink_assert_ref(s);
1593 pa_assert_ctl_context();
1594 pa_assert(PA_SINK_IS_LINKED(s->state));
1596 ret = pa_idxset_size(s->inputs);
1597 pa_assert(ret >= s->n_corked);
1599 /* Streams connected to our monitor source do not matter for
1600 * pa_sink_used_by()!.*/
1602 return ret - s->n_corked;
1605 /* Called from main thread */
1606 unsigned pa_sink_check_suspend(pa_sink *s) {
1607 unsigned ret;
1608 pa_sink_input *i;
1609 uint32_t idx;
1611 pa_sink_assert_ref(s);
1612 pa_assert_ctl_context();
1614 if (!PA_SINK_IS_LINKED(s->state))
1615 return 0;
1617 ret = 0;
1619 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1620 pa_sink_input_state_t st;
1622 st = pa_sink_input_get_state(i);
1623 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1625 if (st == PA_SINK_INPUT_CORKED)
1626 continue;
1628 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1629 continue;
1631 ret ++;
1634 if (s->monitor_source)
1635 ret += pa_source_check_suspend(s->monitor_source);
1637 return ret;
1640 /* Called from the IO thread */
1641 static void sync_input_volumes_within_thread(pa_sink *s) {
1642 pa_sink_input *i;
1643 void *state = NULL;
1645 pa_sink_assert_ref(s);
1646 pa_sink_assert_io_context(s);
1648 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1649 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1650 continue;
1652 i->thread_info.soft_volume = i->soft_volume;
1653 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1657 /* Called from IO thread, except when it is not */
1658 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1659 pa_sink *s = PA_SINK(o);
1660 pa_sink_assert_ref(s);
1662 switch ((pa_sink_message_t) code) {
1664 case PA_SINK_MESSAGE_ADD_INPUT: {
1665 pa_sink_input *i = PA_SINK_INPUT(userdata);
1667 /* If you change anything here, make sure to change the
1668 * sink input handling a few lines down at
1669 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1671 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1673 /* Since the caller sleeps in pa_sink_input_put(), we can
1674 * safely access data outside of thread_info even though
1675 * it is mutable */
1677 if ((i->thread_info.sync_prev = i->sync_prev)) {
1678 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1679 pa_assert(i->sync_prev->sync_next == i);
1680 i->thread_info.sync_prev->thread_info.sync_next = i;
1683 if ((i->thread_info.sync_next = i->sync_next)) {
1684 pa_assert(i->sink == i->thread_info.sync_next->sink);
1685 pa_assert(i->sync_next->sync_prev == i);
1686 i->thread_info.sync_next->thread_info.sync_prev = i;
1689 pa_assert(!i->thread_info.attached);
1690 i->thread_info.attached = TRUE;
1692 if (i->attach)
1693 i->attach(i);
1695 pa_sink_input_set_state_within_thread(i, i->state);
1697 /* The requested latency of the sink input needs to be
1698 * fixed up and then configured on the sink */
1700 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1701 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1703 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1704 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1706 /* We don't rewind here automatically. This is left to the
1707 * sink input implementor because some sink inputs need a
1708 * slow start, i.e. need some time to buffer client
1709 * samples before beginning streaming. */
1711 /* In flat volume mode we need to update the volume as
1712 * well */
1713 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1716 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1717 pa_sink_input *i = PA_SINK_INPUT(userdata);
1719 /* If you change anything here, make sure to change the
1720 * sink input handling a few lines down at
1721 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1723 if (i->detach)
1724 i->detach(i);
1726 pa_sink_input_set_state_within_thread(i, i->state);
1728 pa_assert(i->thread_info.attached);
1729 i->thread_info.attached = FALSE;
1731 /* Since the caller sleeps in pa_sink_input_unlink(),
1732 * we can safely access data outside of thread_info even
1733 * though it is mutable */
1735 pa_assert(!i->sync_prev);
1736 pa_assert(!i->sync_next);
1738 if (i->thread_info.sync_prev) {
1739 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1740 i->thread_info.sync_prev = NULL;
1743 if (i->thread_info.sync_next) {
1744 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1745 i->thread_info.sync_next = NULL;
1748 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1749 pa_sink_input_unref(i);
1751 pa_sink_invalidate_requested_latency(s, TRUE);
1752 pa_sink_request_rewind(s, (size_t) -1);
1754 /* In flat volume mode we need to update the volume as
1755 * well */
1756 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1759 case PA_SINK_MESSAGE_START_MOVE: {
1760 pa_sink_input *i = PA_SINK_INPUT(userdata);
1762 /* We don't support moving synchronized streams. */
1763 pa_assert(!i->sync_prev);
1764 pa_assert(!i->sync_next);
1765 pa_assert(!i->thread_info.sync_next);
1766 pa_assert(!i->thread_info.sync_prev);
1768 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1769 pa_usec_t usec = 0;
1770 size_t sink_nbytes, total_nbytes;
1772 /* Get the latency of the sink */
1773 usec = pa_sink_get_latency_within_thread(s);
1774 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1775 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1777 if (total_nbytes > 0) {
1778 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1779 i->thread_info.rewrite_flush = TRUE;
1780 pa_sink_input_process_rewind(i, sink_nbytes);
1784 if (i->detach)
1785 i->detach(i);
1787 pa_assert(i->thread_info.attached);
1788 i->thread_info.attached = FALSE;
1790 /* Let's remove the sink input ...*/
1791 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1792 pa_sink_input_unref(i);
1794 pa_sink_invalidate_requested_latency(s, TRUE);
1796 pa_log_debug("Requesting rewind due to started move");
1797 pa_sink_request_rewind(s, (size_t) -1);
1799 /* In flat volume mode we need to update the volume as
1800 * well */
1801 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1804 case PA_SINK_MESSAGE_FINISH_MOVE: {
1805 pa_sink_input *i = PA_SINK_INPUT(userdata);
1807 /* We don't support moving synchronized streams. */
1808 pa_assert(!i->sync_prev);
1809 pa_assert(!i->sync_next);
1810 pa_assert(!i->thread_info.sync_next);
1811 pa_assert(!i->thread_info.sync_prev);
1813 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1815 pa_assert(!i->thread_info.attached);
1816 i->thread_info.attached = TRUE;
1818 if (i->attach)
1819 i->attach(i);
1821 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1822 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1824 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1825 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1827 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1828 pa_usec_t usec = 0;
1829 size_t nbytes;
1831 /* Get the latency of the sink */
1832 usec = pa_sink_get_latency_within_thread(s);
1833 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1835 if (nbytes > 0)
1836 pa_sink_input_drop(i, nbytes);
1838 pa_log_debug("Requesting rewind due to finished move");
1839 pa_sink_request_rewind(s, nbytes);
1842 /* In flat volume mode we need to update the volume as
1843 * well */
1844 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1847 case PA_SINK_MESSAGE_SET_VOLUME:
1849 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1850 s->thread_info.soft_volume = s->soft_volume;
1851 pa_sink_request_rewind(s, (size_t) -1);
1854 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1855 return 0;
1857 /* Fall through ... */
1859 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1860 sync_input_volumes_within_thread(s);
1861 return 0;
1863 case PA_SINK_MESSAGE_GET_VOLUME:
1864 return 0;
1866 case PA_SINK_MESSAGE_SET_MUTE:
1868 if (s->thread_info.soft_muted != s->muted) {
1869 s->thread_info.soft_muted = s->muted;
1870 pa_sink_request_rewind(s, (size_t) -1);
1873 return 0;
1875 case PA_SINK_MESSAGE_GET_MUTE:
1876 return 0;
1878 case PA_SINK_MESSAGE_SET_STATE: {
1880 pa_bool_t suspend_change =
1881 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1882 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1884 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1886 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1887 s->thread_info.rewind_nbytes = 0;
1888 s->thread_info.rewind_requested = FALSE;
1891 if (suspend_change) {
1892 pa_sink_input *i;
1893 void *state = NULL;
1895 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1896 if (i->suspend_within_thread)
1897 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1900 return 0;
1903 case PA_SINK_MESSAGE_DETACH:
1905 /* Detach all streams */
1906 pa_sink_detach_within_thread(s);
1907 return 0;
1909 case PA_SINK_MESSAGE_ATTACH:
1911 /* Reattach all streams */
1912 pa_sink_attach_within_thread(s);
1913 return 0;
1915 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1917 pa_usec_t *usec = userdata;
1918 *usec = pa_sink_get_requested_latency_within_thread(s);
1920 if (*usec == (pa_usec_t) -1)
1921 *usec = s->thread_info.max_latency;
1923 return 0;
1926 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1927 pa_usec_t *r = userdata;
1929 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1931 return 0;
1934 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1935 pa_usec_t *r = userdata;
1937 r[0] = s->thread_info.min_latency;
1938 r[1] = s->thread_info.max_latency;
1940 return 0;
1943 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
1945 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
1946 return 0;
1948 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
1950 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
1951 return 0;
1953 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1955 *((size_t*) userdata) = s->thread_info.max_rewind;
1956 return 0;
1958 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1960 *((size_t*) userdata) = s->thread_info.max_request;
1961 return 0;
1963 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1965 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1966 return 0;
1968 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1970 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1971 return 0;
1973 case PA_SINK_MESSAGE_GET_LATENCY:
1974 case PA_SINK_MESSAGE_MAX:
1978 return -1;
1981 /* Called from main thread */
1982 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1983 pa_sink *sink;
1984 uint32_t idx;
1985 int ret = 0;
1987 pa_core_assert_ref(c);
1988 pa_assert_ctl_context();
1989 pa_assert(cause != 0);
1991 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
1992 int r;
1994 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
1995 ret = r;
1998 return ret;
2001 /* Called from main thread */
2002 void pa_sink_detach(pa_sink *s) {
2003 pa_sink_assert_ref(s);
2004 pa_assert_ctl_context();
2005 pa_assert(PA_SINK_IS_LINKED(s->state));
2007 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2010 /* Called from main thread */
2011 void pa_sink_attach(pa_sink *s) {
2012 pa_sink_assert_ref(s);
2013 pa_assert_ctl_context();
2014 pa_assert(PA_SINK_IS_LINKED(s->state));
2016 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2019 /* Called from IO thread */
2020 void pa_sink_detach_within_thread(pa_sink *s) {
2021 pa_sink_input *i;
2022 void *state = NULL;
2024 pa_sink_assert_ref(s);
2025 pa_sink_assert_io_context(s);
2026 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2028 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2029 if (i->detach)
2030 i->detach(i);
2032 if (s->monitor_source)
2033 pa_source_detach_within_thread(s->monitor_source);
2036 /* Called from IO thread */
2037 void pa_sink_attach_within_thread(pa_sink *s) {
2038 pa_sink_input *i;
2039 void *state = NULL;
2041 pa_sink_assert_ref(s);
2042 pa_sink_assert_io_context(s);
2043 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2045 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2046 if (i->attach)
2047 i->attach(i);
2049 if (s->monitor_source)
2050 pa_source_attach_within_thread(s->monitor_source);
2053 /* Called from IO thread */
2054 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2055 pa_sink_assert_ref(s);
2056 pa_sink_assert_io_context(s);
2057 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2059 if (s->thread_info.state == PA_SINK_SUSPENDED)
2060 return;
2062 if (nbytes == (size_t) -1)
2063 nbytes = s->thread_info.max_rewind;
2065 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2067 if (s->thread_info.rewind_requested &&
2068 nbytes <= s->thread_info.rewind_nbytes)
2069 return;
2071 s->thread_info.rewind_nbytes = nbytes;
2072 s->thread_info.rewind_requested = TRUE;
2074 if (s->request_rewind)
2075 s->request_rewind(s);
2078 /* Called from IO thread */
2079 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2080 pa_usec_t result = (pa_usec_t) -1;
2081 pa_sink_input *i;
2082 void *state = NULL;
2083 pa_usec_t monitor_latency;
2085 pa_sink_assert_ref(s);
2086 pa_sink_assert_io_context(s);
2088 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2089 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2091 if (s->thread_info.requested_latency_valid)
2092 return s->thread_info.requested_latency;
2094 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2095 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2096 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2097 result = i->thread_info.requested_sink_latency;
2099 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2101 if (monitor_latency != (pa_usec_t) -1 &&
2102 (result == (pa_usec_t) -1 || result > monitor_latency))
2103 result = monitor_latency;
2105 if (result != (pa_usec_t) -1)
2106 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2108 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2109 /* Only cache if properly initialized */
2110 s->thread_info.requested_latency = result;
2111 s->thread_info.requested_latency_valid = TRUE;
2114 return result;
2117 /* Called from main thread */
2118 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2119 pa_usec_t usec = 0;
2121 pa_sink_assert_ref(s);
2122 pa_assert_ctl_context();
2123 pa_assert(PA_SINK_IS_LINKED(s->state));
2125 if (s->state == PA_SINK_SUSPENDED)
2126 return 0;
2128 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2129 return usec;
2132 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2133 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2134 pa_sink_input *i;
2135 void *state = NULL;
2137 pa_sink_assert_ref(s);
2138 pa_sink_assert_io_context(s);
2140 if (max_rewind == s->thread_info.max_rewind)
2141 return;
2143 s->thread_info.max_rewind = max_rewind;
2145 if (PA_SINK_IS_LINKED(s->thread_info.state))
2146 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2147 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2149 if (s->monitor_source)
2150 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2153 /* Called from main thread */
2154 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2155 pa_sink_assert_ref(s);
2156 pa_assert_ctl_context();
2158 if (PA_SINK_IS_LINKED(s->state))
2159 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2160 else
2161 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2164 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2165 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2166 void *state = NULL;
2168 pa_sink_assert_ref(s);
2169 pa_sink_assert_io_context(s);
2171 if (max_request == s->thread_info.max_request)
2172 return;
2174 s->thread_info.max_request = max_request;
2176 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2177 pa_sink_input *i;
2179 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2180 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2184 /* Called from main thread */
2185 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2186 pa_sink_assert_ref(s);
2187 pa_assert_ctl_context();
2189 if (PA_SINK_IS_LINKED(s->state))
2190 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2191 else
2192 pa_sink_set_max_request_within_thread(s, max_request);
2195 /* Called from IO thread */
2196 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2197 pa_sink_input *i;
2198 void *state = NULL;
2200 pa_sink_assert_ref(s);
2201 pa_sink_assert_io_context(s);
2203 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2204 s->thread_info.requested_latency_valid = FALSE;
2205 else if (dynamic)
2206 return;
2208 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2210 if (s->update_requested_latency)
2211 s->update_requested_latency(s);
2213 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2214 if (i->update_sink_requested_latency)
2215 i->update_sink_requested_latency(i);
2219 /* Called from main thread */
2220 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2221 pa_sink_assert_ref(s);
2222 pa_assert_ctl_context();
2224 /* min_latency == 0: no limit
2225 * min_latency anything else: specified limit
2227 * Similar for max_latency */
2229 if (min_latency < ABSOLUTE_MIN_LATENCY)
2230 min_latency = ABSOLUTE_MIN_LATENCY;
2232 if (max_latency <= 0 ||
2233 max_latency > ABSOLUTE_MAX_LATENCY)
2234 max_latency = ABSOLUTE_MAX_LATENCY;
2236 pa_assert(min_latency <= max_latency);
2238 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2239 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2240 max_latency == ABSOLUTE_MAX_LATENCY) ||
2241 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2243 if (PA_SINK_IS_LINKED(s->state)) {
2244 pa_usec_t r[2];
2246 r[0] = min_latency;
2247 r[1] = max_latency;
2249 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2250 } else
2251 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2254 /* Called from main thread */
2255 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2256 pa_sink_assert_ref(s);
2257 pa_assert_ctl_context();
2258 pa_assert(min_latency);
2259 pa_assert(max_latency);
2261 if (PA_SINK_IS_LINKED(s->state)) {
2262 pa_usec_t r[2] = { 0, 0 };
2264 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2266 *min_latency = r[0];
2267 *max_latency = r[1];
2268 } else {
2269 *min_latency = s->thread_info.min_latency;
2270 *max_latency = s->thread_info.max_latency;
2274 /* Called from IO thread */
2275 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2276 void *state = NULL;
2278 pa_sink_assert_ref(s);
2279 pa_sink_assert_io_context(s);
2281 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2282 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2283 pa_assert(min_latency <= max_latency);
2285 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2286 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2287 max_latency == ABSOLUTE_MAX_LATENCY) ||
2288 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2290 s->thread_info.min_latency = min_latency;
2291 s->thread_info.max_latency = max_latency;
2293 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2294 pa_sink_input *i;
2296 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2297 if (i->update_sink_latency_range)
2298 i->update_sink_latency_range(i);
2301 pa_sink_invalidate_requested_latency(s, FALSE);
2303 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2306 /* Called from main thread */
2307 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2308 pa_sink_assert_ref(s);
2309 pa_assert_ctl_context();
2311 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2312 pa_assert(latency == 0);
2313 return;
2316 if (latency < ABSOLUTE_MIN_LATENCY)
2317 latency = ABSOLUTE_MIN_LATENCY;
2319 if (latency > ABSOLUTE_MAX_LATENCY)
2320 latency = ABSOLUTE_MAX_LATENCY;
2322 if (PA_SINK_IS_LINKED(s->state))
2323 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2324 else
2325 s->thread_info.fixed_latency = latency;
2327 pa_source_set_fixed_latency(s->monitor_source, latency);
2330 /* Called from main thread */
2331 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2332 pa_usec_t latency;
2334 pa_sink_assert_ref(s);
2335 pa_assert_ctl_context();
2337 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2338 return 0;
2340 if (PA_SINK_IS_LINKED(s->state))
2341 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2342 else
2343 latency = s->thread_info.fixed_latency;
2345 return latency;
2348 /* Called from IO thread */
2349 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2350 pa_sink_assert_ref(s);
2351 pa_sink_assert_io_context(s);
2353 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2354 pa_assert(latency == 0);
2355 return;
2358 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2359 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2361 if (s->thread_info.fixed_latency == latency)
2362 return;
2364 s->thread_info.fixed_latency = latency;
2366 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2367 pa_sink_input *i;
2368 void *state = NULL;
2370 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2371 if (i->update_sink_fixed_latency)
2372 i->update_sink_fixed_latency(i);
2375 pa_sink_invalidate_requested_latency(s, FALSE);
2377 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2380 /* Called from main context */
2381 size_t pa_sink_get_max_rewind(pa_sink *s) {
2382 size_t r;
2383 pa_sink_assert_ref(s);
2384 pa_assert_ctl_context();
2386 if (!PA_SINK_IS_LINKED(s->state))
2387 return s->thread_info.max_rewind;
2389 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2391 return r;
2394 /* Called from main context */
2395 size_t pa_sink_get_max_request(pa_sink *s) {
2396 size_t r;
2397 pa_sink_assert_ref(s);
2398 pa_assert_ctl_context();
2400 if (!PA_SINK_IS_LINKED(s->state))
2401 return s->thread_info.max_request;
2403 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2405 return r;
2408 /* Called from main context */
2409 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2410 pa_device_port *port;
2412 pa_sink_assert_ref(s);
2413 pa_assert_ctl_context();
2415 if (!s->set_port) {
2416 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2417 return -PA_ERR_NOTIMPLEMENTED;
2420 if (!s->ports)
2421 return -PA_ERR_NOENTITY;
2423 if (!(port = pa_hashmap_get(s->ports, name)))
2424 return -PA_ERR_NOENTITY;
2426 if (s->active_port == port) {
2427 s->save_port = s->save_port || save;
2428 return 0;
2431 if ((s->set_port(s, port)) < 0)
2432 return -PA_ERR_NOENTITY;
2434 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2436 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2438 s->active_port = port;
2439 s->save_port = save;
2441 return 0;
2444 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2445 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2447 pa_assert(p);
2449 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2450 return TRUE;
2452 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2454 if (pa_streq(ff, "microphone"))
2455 t = "audio-input-microphone";
2456 else if (pa_streq(ff, "webcam"))
2457 t = "camera-web";
2458 else if (pa_streq(ff, "computer"))
2459 t = "computer";
2460 else if (pa_streq(ff, "handset"))
2461 t = "phone";
2462 else if (pa_streq(ff, "portable"))
2463 t = "multimedia-player";
2464 else if (pa_streq(ff, "tv"))
2465 t = "video-display";
2468 * The following icons are not part of the icon naming spec,
2469 * because Rodney Dawes sucks as the maintainer of that spec.
2471 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2473 else if (pa_streq(ff, "headset"))
2474 t = "audio-headset";
2475 else if (pa_streq(ff, "headphone"))
2476 t = "audio-headphones";
2477 else if (pa_streq(ff, "speaker"))
2478 t = "audio-speakers";
2479 else if (pa_streq(ff, "hands-free"))
2480 t = "audio-handsfree";
2483 if (!t)
2484 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2485 if (pa_streq(c, "modem"))
2486 t = "modem";
2488 if (!t) {
2489 if (is_sink)
2490 t = "audio-card";
2491 else
2492 t = "audio-input-microphone";
2495 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2496 if (strstr(profile, "analog"))
2497 s = "-analog";
2498 else if (strstr(profile, "iec958"))
2499 s = "-iec958";
2500 else if (strstr(profile, "hdmi"))
2501 s = "-hdmi";
2504 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2506 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2508 return TRUE;
2511 pa_bool_t pa_device_init_description(pa_proplist *p) {
2512 const char *s, *d = NULL, *k;
2513 pa_assert(p);
2515 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2516 return TRUE;
2518 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2519 if (pa_streq(s, "internal"))
2520 d = _("Internal Audio");
2522 if (!d)
2523 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2524 if (pa_streq(s, "modem"))
2525 d = _("Modem");
2527 if (!d)
2528 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2530 if (!d)
2531 return FALSE;
2533 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2535 if (d && k)
2536 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2537 else if (d)
2538 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2540 return TRUE;
2543 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2544 const char *s;
2545 pa_assert(p);
2547 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2548 return TRUE;
2550 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2551 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2552 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2553 return TRUE;
2556 return FALSE;