ladspa/remap: make sure we process all requested rewinds unconditionally
[pulseaudio-mirror.git] / src / pulsecore / sink.c
blob5e9662c2b75794b3904a6a41aad99dc286dd2acf
1 /***
2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
47 #include "sink.h"
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
57 static void sink_free(pa_object *s);
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
62 memset(data, 0, sizeof(*data));
63 data->proplist = pa_proplist_new();
65 return data;
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
113 pa_proplist_free(data->proplist);
115 if (data->ports) {
116 pa_device_port *p;
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
121 pa_hashmap_free(data->ports, NULL, NULL);
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
131 pa_assert(name);
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
137 p->priority = 0;
139 return p;
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
181 s = pa_msgobject_new(pa_sink);
183 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
184 pa_log_debug("Failed to register name %s.", data->name);
185 pa_xfree(s);
186 return NULL;
189 pa_sink_new_data_set_name(data, name);
191 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
192 pa_xfree(s);
193 pa_namereg_unregister(core, name);
194 return NULL;
197 /* FIXME, need to free s here on failure */
199 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
200 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
202 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
204 if (!data->channel_map_is_set)
205 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
207 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
208 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
210 if (!data->volume_is_set)
211 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
213 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
214 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
216 if (!data->muted_is_set)
217 data->muted = FALSE;
219 if (data->card)
220 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
222 pa_device_init_description(data->proplist);
223 pa_device_init_icon(data->proplist, TRUE);
224 pa_device_init_intended_roles(data->proplist);
226 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
227 pa_xfree(s);
228 pa_namereg_unregister(core, name);
229 return NULL;
232 s->parent.parent.free = sink_free;
233 s->parent.process_msg = pa_sink_process_msg;
235 s->core = core;
236 s->state = PA_SINK_INIT;
237 s->flags = flags;
238 s->suspend_cause = 0;
239 s->name = pa_xstrdup(name);
240 s->proplist = pa_proplist_copy(data->proplist);
241 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
242 s->module = data->module;
243 s->card = data->card;
245 s->sample_spec = data->sample_spec;
246 s->channel_map = data->channel_map;
248 s->inputs = pa_idxset_new(NULL, NULL);
249 s->n_corked = 0;
251 s->reference_volume = s->virtual_volume = data->volume;
252 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
253 s->base_volume = PA_VOLUME_NORM;
254 s->n_volume_steps = PA_VOLUME_NORM+1;
255 s->muted = data->muted;
256 s->refresh_volume = s->refresh_muted = FALSE;
258 s->fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
260 reset_callbacks(s);
261 s->userdata = NULL;
263 s->asyncmsgq = NULL;
264 s->rtpoll = NULL;
266 /* As a minor optimization we just steal the list instead of
267 * copying it here */
268 s->ports = data->ports;
269 data->ports = NULL;
271 s->active_port = NULL;
272 s->save_port = FALSE;
274 if (data->active_port && s->ports)
275 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
276 s->save_port = data->save_port;
278 if (!s->active_port && s->ports) {
279 void *state;
280 pa_device_port *p;
282 PA_HASHMAP_FOREACH(p, s->ports, state)
283 if (!s->active_port || p->priority > s->active_port->priority)
284 s->active_port = p;
287 s->save_volume = data->save_volume;
288 s->save_muted = data->save_muted;
290 pa_silence_memchunk_get(
291 &core->silence_cache,
292 core->mempool,
293 &s->silence,
294 &s->sample_spec,
297 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
298 s->thread_info.soft_volume = s->soft_volume;
299 s->thread_info.soft_muted = s->muted;
300 s->thread_info.state = s->state;
301 s->thread_info.rewind_nbytes = 0;
302 s->thread_info.rewind_requested = FALSE;
303 s->thread_info.max_rewind = 0;
304 s->thread_info.max_request = 0;
305 s->thread_info.requested_latency_valid = FALSE;
306 s->thread_info.requested_latency = 0;
307 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
308 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
310 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
312 if (s->card)
313 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
315 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
316 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
317 s->index,
318 s->name,
319 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
320 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
321 pt);
322 pa_xfree(pt);
324 pa_source_new_data_init(&source_data);
325 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
326 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
327 source_data.name = pa_sprintf_malloc("%s.monitor", name);
328 source_data.driver = data->driver;
329 source_data.module = data->module;
330 source_data.card = data->card;
332 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
333 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
334 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
336 s->monitor_source = pa_source_new(core, &source_data,
337 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
338 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
340 pa_source_new_data_done(&source_data);
342 if (!s->monitor_source) {
343 pa_sink_unlink(s);
344 pa_sink_unref(s);
345 return NULL;
348 s->monitor_source->monitor_of = s;
350 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
351 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
353 return s;
356 /* Called from main context */
357 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
358 int ret;
359 pa_bool_t suspend_change;
360 pa_sink_state_t original_state;
362 pa_assert(s);
364 if (s->state == state)
365 return 0;
367 original_state = s->state;
369 suspend_change =
370 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
371 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
373 if (s->set_state)
374 if ((ret = s->set_state(s, state)) < 0)
375 return ret;
377 if (s->asyncmsgq)
378 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
380 if (s->set_state)
381 s->set_state(s, original_state);
383 return ret;
386 s->state = state;
388 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
389 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
390 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
393 if (suspend_change) {
394 pa_sink_input *i;
395 uint32_t idx;
397 /* We're suspending or resuming, tell everyone about it */
399 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx)))
400 if (s->state == PA_SINK_SUSPENDED &&
401 (i->flags & PA_SINK_INPUT_FAIL_ON_SUSPEND))
402 pa_sink_input_kill(i);
403 else if (i->suspend)
404 i->suspend(i, state == PA_SINK_SUSPENDED);
406 if (s->monitor_source)
407 pa_source_sync_suspend(s->monitor_source);
410 return 0;
413 /* Called from main context */
414 void pa_sink_put(pa_sink* s) {
415 pa_sink_assert_ref(s);
417 pa_assert(s->state == PA_SINK_INIT);
419 /* The following fields must be initialized properly when calling _put() */
420 pa_assert(s->asyncmsgq);
421 pa_assert(s->rtpoll);
422 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
424 /* Generally, flags should be initialized via pa_sink_new(). As a
425 * special exception we allow volume related flags to be set
426 * between _new() and _put(). */
428 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
429 s->flags |= PA_SINK_DECIBEL_VOLUME;
431 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
432 s->flags |= PA_SINK_FLAT_VOLUME;
434 s->thread_info.soft_volume = s->soft_volume;
435 s->thread_info.soft_muted = s->muted;
437 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
438 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
439 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->fixed_latency != 0));
440 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
441 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
443 pa_assert(s->monitor_source->fixed_latency == s->fixed_latency);
444 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
445 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
447 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
449 pa_source_put(s->monitor_source);
451 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
452 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
455 /* Called from main context */
456 void pa_sink_unlink(pa_sink* s) {
457 pa_bool_t linked;
458 pa_sink_input *i, *j = NULL;
460 pa_assert(s);
462 /* Please note that pa_sink_unlink() does more than simply
463 * reversing pa_sink_put(). It also undoes the registrations
464 * already done in pa_sink_new()! */
466 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
467 * may be called multiple times on the same sink without bad
468 * effects. */
470 linked = PA_SINK_IS_LINKED(s->state);
472 if (linked)
473 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
475 if (s->state != PA_SINK_UNLINKED)
476 pa_namereg_unregister(s->core, s->name);
477 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
479 if (s->card)
480 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
482 while ((i = pa_idxset_first(s->inputs, NULL))) {
483 pa_assert(i != j);
484 pa_sink_input_kill(i);
485 j = i;
488 if (linked)
489 sink_set_state(s, PA_SINK_UNLINKED);
490 else
491 s->state = PA_SINK_UNLINKED;
493 reset_callbacks(s);
495 if (s->monitor_source)
496 pa_source_unlink(s->monitor_source);
498 if (linked) {
499 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
500 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
504 /* Called from main context */
505 static void sink_free(pa_object *o) {
506 pa_sink *s = PA_SINK(o);
507 pa_sink_input *i;
509 pa_assert(s);
510 pa_assert(pa_sink_refcnt(s) == 0);
512 if (PA_SINK_IS_LINKED(s->state))
513 pa_sink_unlink(s);
515 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
517 if (s->monitor_source) {
518 pa_source_unref(s->monitor_source);
519 s->monitor_source = NULL;
522 pa_idxset_free(s->inputs, NULL, NULL);
524 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
525 pa_sink_input_unref(i);
527 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
529 if (s->silence.memblock)
530 pa_memblock_unref(s->silence.memblock);
532 pa_xfree(s->name);
533 pa_xfree(s->driver);
535 if (s->proplist)
536 pa_proplist_free(s->proplist);
538 if (s->ports) {
539 pa_device_port *p;
541 while ((p = pa_hashmap_steal_first(s->ports)))
542 pa_device_port_free(p);
544 pa_hashmap_free(s->ports, NULL, NULL);
547 pa_xfree(s);
550 /* Called from main context */
551 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
552 pa_sink_assert_ref(s);
554 s->asyncmsgq = q;
556 if (s->monitor_source)
557 pa_source_set_asyncmsgq(s->monitor_source, q);
560 /* Called from main context */
561 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
562 pa_sink_assert_ref(s);
564 s->rtpoll = p;
566 if (s->monitor_source)
567 pa_source_set_rtpoll(s->monitor_source, p);
570 /* Called from main context */
571 int pa_sink_update_status(pa_sink*s) {
572 pa_sink_assert_ref(s);
573 pa_assert(PA_SINK_IS_LINKED(s->state));
575 if (s->state == PA_SINK_SUSPENDED)
576 return 0;
578 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
581 /* Called from main context */
582 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
583 pa_sink_assert_ref(s);
584 pa_assert(PA_SINK_IS_LINKED(s->state));
585 pa_assert(cause != 0);
587 if (suspend) {
588 s->suspend_cause |= cause;
589 s->monitor_source->suspend_cause |= cause;
590 } else {
591 s->suspend_cause &= ~cause;
592 s->monitor_source->suspend_cause &= ~cause;
595 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
596 return 0;
598 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
600 if (s->suspend_cause)
601 return sink_set_state(s, PA_SINK_SUSPENDED);
602 else
603 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
606 /* Called from main context */
607 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
608 pa_sink_input *i, *n;
609 uint32_t idx;
611 pa_sink_assert_ref(s);
612 pa_assert(PA_SINK_IS_LINKED(s->state));
614 if (!q)
615 q = pa_queue_new();
617 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
618 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
620 pa_sink_input_ref(i);
622 if (pa_sink_input_start_move(i) >= 0)
623 pa_queue_push(q, i);
624 else
625 pa_sink_input_unref(i);
628 return q;
631 /* Called from main context */
632 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
633 pa_sink_input *i;
635 pa_sink_assert_ref(s);
636 pa_assert(PA_SINK_IS_LINKED(s->state));
637 pa_assert(q);
639 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
640 if (pa_sink_input_finish_move(i, s, save) < 0)
641 pa_sink_input_kill(i);
643 pa_sink_input_unref(i);
646 pa_queue_free(q, NULL, NULL);
649 /* Called from main context */
650 void pa_sink_move_all_fail(pa_queue *q) {
651 pa_sink_input *i;
652 pa_assert(q);
654 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
655 if (pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_MOVE_FAIL], i) == PA_HOOK_OK) {
656 pa_sink_input_kill(i);
657 pa_sink_input_unref(i);
661 pa_queue_free(q, NULL, NULL);
664 /* Called from IO thread context */
665 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
666 pa_sink_input *i;
667 void *state = NULL;
669 pa_sink_assert_ref(s);
670 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
672 /* If nobody requested this and this is actually no real rewind
673 * then we can short cut this. Please note that this means that
674 * not all rewind requests triggered upstream will always be
675 * translated in actual requests! */
676 if (!s->thread_info.rewind_requested && nbytes <= 0)
677 return;
679 s->thread_info.rewind_nbytes = 0;
680 s->thread_info.rewind_requested = FALSE;
682 if (s->thread_info.state == PA_SINK_SUSPENDED)
683 return;
685 if (nbytes > 0)
686 pa_log_debug("Processing rewind...");
688 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
689 pa_sink_input_assert_ref(i);
690 pa_sink_input_process_rewind(i, nbytes);
693 if (nbytes > 0)
694 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
695 pa_source_process_rewind(s->monitor_source, nbytes);
698 /* Called from IO thread context */
699 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
700 pa_sink_input *i;
701 unsigned n = 0;
702 void *state = NULL;
703 size_t mixlength = *length;
705 pa_sink_assert_ref(s);
706 pa_assert(info);
708 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
709 pa_sink_input_assert_ref(i);
711 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
713 if (mixlength == 0 || info->chunk.length < mixlength)
714 mixlength = info->chunk.length;
716 if (pa_memblock_is_silence(info->chunk.memblock)) {
717 pa_memblock_unref(info->chunk.memblock);
718 continue;
721 info->userdata = pa_sink_input_ref(i);
723 pa_assert(info->chunk.memblock);
724 pa_assert(info->chunk.length > 0);
726 info++;
727 n++;
728 maxinfo--;
731 if (mixlength > 0)
732 *length = mixlength;
734 return n;
737 /* Called from IO thread context */
738 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
739 pa_sink_input *i;
740 void *state = NULL;
741 unsigned p = 0;
742 unsigned n_unreffed = 0;
744 pa_sink_assert_ref(s);
745 pa_assert(result);
746 pa_assert(result->memblock);
747 pa_assert(result->length > 0);
749 /* We optimize for the case where the order of the inputs has not changed */
751 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
752 unsigned j;
753 pa_mix_info* m = NULL;
755 pa_sink_input_assert_ref(i);
757 /* Let's try to find the matching entry info the pa_mix_info array */
758 for (j = 0; j < n; j ++) {
760 if (info[p].userdata == i) {
761 m = info + p;
762 break;
765 p++;
766 if (p >= n)
767 p = 0;
770 /* Drop read data */
771 pa_sink_input_drop(i, result->length);
773 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
775 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
776 void *ostate = NULL;
777 pa_source_output *o;
778 pa_memchunk c;
780 if (m && m->chunk.memblock) {
781 c = m->chunk;
782 pa_memblock_ref(c.memblock);
783 pa_assert(result->length <= c.length);
784 c.length = result->length;
786 pa_memchunk_make_writable(&c, 0);
787 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
788 } else {
789 c = s->silence;
790 pa_memblock_ref(c.memblock);
791 pa_assert(result->length <= c.length);
792 c.length = result->length;
795 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
796 pa_source_output_assert_ref(o);
797 pa_assert(o->direct_on_input == i);
798 pa_source_post_direct(s->monitor_source, o, &c);
801 pa_memblock_unref(c.memblock);
805 if (m) {
806 if (m->chunk.memblock)
807 pa_memblock_unref(m->chunk.memblock);
808 pa_memchunk_reset(&m->chunk);
810 pa_sink_input_unref(m->userdata);
811 m->userdata = NULL;
813 n_unreffed += 1;
817 /* Now drop references to entries that are included in the
818 * pa_mix_info array but don't exist anymore */
820 if (n_unreffed < n) {
821 for (; n > 0; info++, n--) {
822 if (info->userdata)
823 pa_sink_input_unref(info->userdata);
824 if (info->chunk.memblock)
825 pa_memblock_unref(info->chunk.memblock);
829 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
830 pa_source_post(s->monitor_source, result);
833 /* Called from IO thread context */
834 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
835 pa_mix_info info[MAX_MIX_CHANNELS];
836 unsigned n;
837 size_t block_size_max;
839 pa_sink_assert_ref(s);
840 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
841 pa_assert(pa_frame_aligned(length, &s->sample_spec));
842 pa_assert(result);
844 pa_sink_ref(s);
846 pa_assert(!s->thread_info.rewind_requested);
847 pa_assert(s->thread_info.rewind_nbytes == 0);
849 if (s->thread_info.state == PA_SINK_SUSPENDED) {
850 result->memblock = pa_memblock_ref(s->silence.memblock);
851 result->index = s->silence.index;
852 result->length = PA_MIN(s->silence.length, length);
853 return;
856 if (length <= 0)
857 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
859 block_size_max = pa_mempool_block_size_max(s->core->mempool);
860 if (length > block_size_max)
861 length = pa_frame_align(block_size_max, &s->sample_spec);
863 pa_assert(length > 0);
865 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
867 if (n == 0) {
869 *result = s->silence;
870 pa_memblock_ref(result->memblock);
872 if (result->length > length)
873 result->length = length;
875 } else if (n == 1) {
876 pa_cvolume volume;
878 *result = info[0].chunk;
879 pa_memblock_ref(result->memblock);
881 if (result->length > length)
882 result->length = length;
884 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
886 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
887 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
888 pa_memblock_unref(result->memblock);
889 pa_silence_memchunk_get(&s->core->silence_cache,
890 s->core->mempool,
891 result,
892 &s->sample_spec,
893 result->length);
894 } else {
895 pa_memchunk_make_writable(result, 0);
896 pa_volume_memchunk(result, &s->sample_spec, &volume);
899 } else {
900 void *ptr;
901 result->memblock = pa_memblock_new(s->core->mempool, length);
903 ptr = pa_memblock_acquire(result->memblock);
904 result->length = pa_mix(info, n,
905 ptr, length,
906 &s->sample_spec,
907 &s->thread_info.soft_volume,
908 s->thread_info.soft_muted);
909 pa_memblock_release(result->memblock);
911 result->index = 0;
914 inputs_drop(s, info, n, result);
916 pa_sink_unref(s);
919 /* Called from IO thread context */
920 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
921 pa_mix_info info[MAX_MIX_CHANNELS];
922 unsigned n;
923 size_t length, block_size_max;
925 pa_sink_assert_ref(s);
926 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
927 pa_assert(target);
928 pa_assert(target->memblock);
929 pa_assert(target->length > 0);
930 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
932 pa_sink_ref(s);
934 pa_assert(!s->thread_info.rewind_requested);
935 pa_assert(s->thread_info.rewind_nbytes == 0);
937 if (s->thread_info.state == PA_SINK_SUSPENDED) {
938 pa_silence_memchunk(target, &s->sample_spec);
939 return;
942 length = target->length;
943 block_size_max = pa_mempool_block_size_max(s->core->mempool);
944 if (length > block_size_max)
945 length = pa_frame_align(block_size_max, &s->sample_spec);
947 pa_assert(length > 0);
949 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
951 if (n == 0) {
952 if (target->length > length)
953 target->length = length;
955 pa_silence_memchunk(target, &s->sample_spec);
956 } else if (n == 1) {
957 pa_cvolume volume;
959 if (target->length > length)
960 target->length = length;
962 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
964 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
965 pa_silence_memchunk(target, &s->sample_spec);
966 else {
967 pa_memchunk vchunk;
969 vchunk = info[0].chunk;
970 pa_memblock_ref(vchunk.memblock);
972 if (vchunk.length > length)
973 vchunk.length = length;
975 if (!pa_cvolume_is_norm(&volume)) {
976 pa_memchunk_make_writable(&vchunk, 0);
977 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
980 pa_memchunk_memcpy(target, &vchunk);
981 pa_memblock_unref(vchunk.memblock);
984 } else {
985 void *ptr;
987 ptr = pa_memblock_acquire(target->memblock);
989 target->length = pa_mix(info, n,
990 (uint8_t*) ptr + target->index, length,
991 &s->sample_spec,
992 &s->thread_info.soft_volume,
993 s->thread_info.soft_muted);
995 pa_memblock_release(target->memblock);
998 inputs_drop(s, info, n, target);
1000 pa_sink_unref(s);
1003 /* Called from IO thread context */
1004 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1005 pa_memchunk chunk;
1006 size_t l, d;
1008 pa_sink_assert_ref(s);
1009 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1010 pa_assert(target);
1011 pa_assert(target->memblock);
1012 pa_assert(target->length > 0);
1013 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1015 pa_sink_ref(s);
1017 pa_assert(!s->thread_info.rewind_requested);
1018 pa_assert(s->thread_info.rewind_nbytes == 0);
1020 l = target->length;
1021 d = 0;
1022 while (l > 0) {
1023 chunk = *target;
1024 chunk.index += d;
1025 chunk.length -= d;
1027 pa_sink_render_into(s, &chunk);
1029 d += chunk.length;
1030 l -= chunk.length;
1033 pa_sink_unref(s);
1036 /* Called from IO thread context */
1037 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1038 pa_mix_info info[MAX_MIX_CHANNELS];
1039 size_t length1st = length;
1040 unsigned n;
1042 pa_sink_assert_ref(s);
1043 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1044 pa_assert(length > 0);
1045 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1046 pa_assert(result);
1048 pa_sink_ref(s);
1050 pa_assert(!s->thread_info.rewind_requested);
1051 pa_assert(s->thread_info.rewind_nbytes == 0);
1053 pa_assert(length > 0);
1055 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1057 if (n == 0) {
1058 pa_silence_memchunk_get(&s->core->silence_cache,
1059 s->core->mempool,
1060 result,
1061 &s->sample_spec,
1062 length1st);
1063 } else if (n == 1) {
1064 pa_cvolume volume;
1066 *result = info[0].chunk;
1067 pa_memblock_ref(result->memblock);
1069 if (result->length > length)
1070 result->length = length;
1072 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1074 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1075 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1076 pa_memblock_unref(result->memblock);
1077 pa_silence_memchunk_get(&s->core->silence_cache,
1078 s->core->mempool,
1079 result,
1080 &s->sample_spec,
1081 result->length);
1082 } else {
1083 pa_memchunk_make_writable(result, length);
1084 pa_volume_memchunk(result, &s->sample_spec, &volume);
1087 } else {
1088 void *ptr;
1090 result->index = 0;
1091 result->memblock = pa_memblock_new(s->core->mempool, length);
1093 ptr = pa_memblock_acquire(result->memblock);
1095 result->length = pa_mix(info, n,
1096 (uint8_t*) ptr + result->index, length1st,
1097 &s->sample_spec,
1098 &s->thread_info.soft_volume,
1099 s->thread_info.soft_muted);
1101 pa_memblock_release(result->memblock);
1104 inputs_drop(s, info, n, result);
1106 if (result->length < length) {
1107 pa_memchunk chunk;
1108 size_t l, d;
1109 pa_memchunk_make_writable(result, length);
1111 l = length - result->length;
1112 d = result->index + result->length;
1113 while (l > 0) {
1114 chunk = *result;
1115 chunk.index = d;
1116 chunk.length = l;
1118 pa_sink_render_into(s, &chunk);
1120 d += chunk.length;
1121 l -= chunk.length;
1123 result->length = length;
1126 pa_sink_unref(s);
1129 /* Called from main thread */
1130 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1131 pa_usec_t usec = 0;
1133 pa_sink_assert_ref(s);
1134 pa_assert(PA_SINK_IS_LINKED(s->state));
1136 /* The returned value is supposed to be in the time domain of the sound card! */
1138 if (s->state == PA_SINK_SUSPENDED)
1139 return 0;
1141 if (!(s->flags & PA_SINK_LATENCY))
1142 return 0;
1144 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1146 return usec;
1149 /* Called from IO thread */
1150 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1151 pa_usec_t usec = 0;
1152 pa_msgobject *o;
1154 pa_sink_assert_ref(s);
1155 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1157 /* The returned value is supposed to be in the time domain of the sound card! */
1159 if (s->thread_info.state == PA_SINK_SUSPENDED)
1160 return 0;
1162 if (!(s->flags & PA_SINK_LATENCY))
1163 return 0;
1165 o = PA_MSGOBJECT(s);
1167 /* We probably should make this a proper vtable callback instead of going through process_msg() */
1169 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1170 return -1;
1172 return usec;
1175 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1176 unsigned c;
1178 pa_sink_input_assert_ref(i);
1179 pa_assert(new_volume->channels == i->sample_spec.channels);
1182 * This basically calculates:
1184 * i->relative_volume := i->virtual_volume / new_volume
1185 * i->soft_volume := i->relative_volume * i->volume_factor
1188 /* The new sink volume passed in here must already be remapped to
1189 * the sink input's channel map! */
1191 i->soft_volume.channels = i->sample_spec.channels;
1193 for (c = 0; c < i->sample_spec.channels; c++)
1195 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1196 /* We leave i->relative_volume untouched */
1197 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1198 else {
1199 i->relative_volume[c] =
1200 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1201 pa_sw_volume_to_linear(new_volume->values[c]);
1203 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1204 i->relative_volume[c] *
1205 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1208 /* Hooks have the ability to play games with i->soft_volume */
1209 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1211 /* We don't copy the soft_volume to the thread_info data
1212 * here. That must be done by the caller */
1215 /* Called from main thread */
1216 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1217 pa_sink_input *i;
1218 uint32_t idx;
1220 pa_sink_assert_ref(s);
1221 pa_assert(new_volume);
1222 pa_assert(PA_SINK_IS_LINKED(s->state));
1223 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1225 /* This is called whenever a sink input volume changes or a sink
1226 * input is added/removed and we might need to fix up the sink
1227 * volume accordingly. Please note that we don't actually update
1228 * the sinks volume here, we only return how it needs to be
1229 * updated. The caller should then call pa_sink_set_volume().*/
1231 if (pa_idxset_isempty(s->inputs)) {
1232 /* In the special case that we have no sink input we leave the
1233 * volume unmodified. */
1234 *new_volume = s->reference_volume;
1235 return;
1238 pa_cvolume_mute(new_volume, s->channel_map.channels);
1240 /* First let's determine the new maximum volume of all inputs
1241 * connected to this sink */
1242 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1243 unsigned c;
1244 pa_cvolume remapped_volume;
1246 remapped_volume = i->virtual_volume;
1247 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1249 for (c = 0; c < new_volume->channels; c++)
1250 if (remapped_volume.values[c] > new_volume->values[c])
1251 new_volume->values[c] = remapped_volume.values[c];
1254 /* Then, let's update the soft volumes of all inputs connected
1255 * to this sink */
1256 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1257 pa_cvolume remapped_new_volume;
1259 remapped_new_volume = *new_volume;
1260 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1261 compute_new_soft_volume(i, &remapped_new_volume);
1263 /* We don't copy soft_volume to the thread_info data here
1264 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1265 * want the update to be atomically with the sink volume
1266 * update, hence we do it within the pa_sink_set_volume() call
1267 * below */
1271 /* Called from main thread */
1272 void pa_sink_propagate_flat_volume(pa_sink *s) {
1273 pa_sink_input *i;
1274 uint32_t idx;
1276 pa_sink_assert_ref(s);
1277 pa_assert(PA_SINK_IS_LINKED(s->state));
1278 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1280 /* This is called whenever the sink volume changes that is not
1281 * caused by a sink input volume change. We need to fix up the
1282 * sink input volumes accordingly */
1284 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1285 pa_cvolume sink_volume, new_virtual_volume;
1286 unsigned c;
1288 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1290 sink_volume = s->virtual_volume;
1291 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1293 for (c = 0; c < i->sample_spec.channels; c++)
1294 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1295 i->relative_volume[c] *
1296 pa_sw_volume_to_linear(sink_volume.values[c]));
1298 new_virtual_volume.channels = i->sample_spec.channels;
1300 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1301 i->virtual_volume = new_virtual_volume;
1303 /* Hmm, the soft volume might no longer actually match
1304 * what has been chosen as new virtual volume here,
1305 * especially when the old volume was
1306 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1307 * volumes here. */
1308 compute_new_soft_volume(i, &sink_volume);
1310 /* The virtual volume changed, let's tell people so */
1311 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1315 /* If the soft_volume of any of the sink inputs got changed, let's
1316 * make sure the thread copies are synced up. */
1317 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1320 /* Called from main thread */
1321 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference, pa_bool_t save) {
1322 pa_bool_t virtual_volume_changed;
1324 pa_sink_assert_ref(s);
1325 pa_assert(PA_SINK_IS_LINKED(s->state));
1326 pa_assert(volume);
1327 pa_assert(pa_cvolume_valid(volume));
1328 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1330 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1331 s->virtual_volume = *volume;
1332 s->save_volume = (!virtual_volume_changed && s->save_volume) || save;
1334 if (become_reference)
1335 s->reference_volume = s->virtual_volume;
1337 /* Propagate this volume change back to the inputs */
1338 if (virtual_volume_changed)
1339 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1340 pa_sink_propagate_flat_volume(s);
1342 if (s->set_volume) {
1343 /* If we have a function set_volume(), then we do not apply a
1344 * soft volume by default. However, set_volume() is free to
1345 * apply one to s->soft_volume */
1347 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1348 s->set_volume(s);
1350 } else
1351 /* If we have no function set_volume(), then the soft volume
1352 * becomes the virtual volume */
1353 s->soft_volume = s->virtual_volume;
1355 /* This tells the sink that soft and/or virtual volume changed */
1356 if (sendmsg)
1357 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1359 if (virtual_volume_changed)
1360 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1363 /* Called from main thread. Only to be called by sink implementor */
1364 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1365 pa_sink_assert_ref(s);
1366 pa_assert(volume);
1368 s->soft_volume = *volume;
1370 if (PA_SINK_IS_LINKED(s->state))
1371 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1372 else
1373 s->thread_info.soft_volume = *volume;
1376 /* Called from main thread */
1377 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1378 pa_sink_assert_ref(s);
1380 if (s->refresh_volume || force_refresh) {
1381 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1383 if (s->get_volume)
1384 s->get_volume(s);
1386 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1388 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1390 s->reference_volume = s->virtual_volume;
1392 /* Something got changed in the hardware. It probably
1393 * makes sense to save changed hw settings given that hw
1394 * volume changes not triggered by PA are almost certainly
1395 * done by the user. */
1396 s->save_volume = TRUE;
1398 if (s->flags & PA_SINK_FLAT_VOLUME)
1399 pa_sink_propagate_flat_volume(s);
1401 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1405 return reference ? &s->reference_volume : &s->virtual_volume;
1408 /* Called from main thread */
1409 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume) {
1410 pa_sink_assert_ref(s);
1412 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1413 if (pa_cvolume_equal(&s->virtual_volume, new_volume))
1414 return;
1416 s->reference_volume = s->virtual_volume = *new_volume;
1417 s->save_volume = TRUE;
1419 if (s->flags & PA_SINK_FLAT_VOLUME)
1420 pa_sink_propagate_flat_volume(s);
1422 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1425 /* Called from main thread */
1426 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1427 pa_bool_t old_muted;
1429 pa_sink_assert_ref(s);
1430 pa_assert(PA_SINK_IS_LINKED(s->state));
1432 old_muted = s->muted;
1433 s->muted = mute;
1434 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1436 if (s->set_mute)
1437 s->set_mute(s);
1439 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1441 if (old_muted != s->muted)
1442 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1445 /* Called from main thread */
1446 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1448 pa_sink_assert_ref(s);
1450 if (s->refresh_muted || force_refresh) {
1451 pa_bool_t old_muted = s->muted;
1453 if (s->get_mute)
1454 s->get_mute(s);
1456 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1458 if (old_muted != s->muted) {
1459 s->save_muted = TRUE;
1461 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1463 /* Make sure the soft mute status stays in sync */
1464 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1469 return s->muted;
1472 /* Called from main thread */
1473 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1474 pa_sink_assert_ref(s);
1476 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1478 if (s->muted == new_muted)
1479 return;
1481 s->muted = new_muted;
1482 s->save_muted = TRUE;
1484 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1487 /* Called from main thread */
1488 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1489 pa_sink_assert_ref(s);
1491 if (p)
1492 pa_proplist_update(s->proplist, mode, p);
1494 if (PA_SINK_IS_LINKED(s->state)) {
1495 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1496 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1499 return TRUE;
1502 /* Called from main thread */
1503 void pa_sink_set_description(pa_sink *s, const char *description) {
1504 const char *old;
1505 pa_sink_assert_ref(s);
1507 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1508 return;
1510 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1512 if (old && description && !strcmp(old, description))
1513 return;
1515 if (description)
1516 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1517 else
1518 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1520 if (s->monitor_source) {
1521 char *n;
1523 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1524 pa_source_set_description(s->monitor_source, n);
1525 pa_xfree(n);
1528 if (PA_SINK_IS_LINKED(s->state)) {
1529 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1530 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1534 /* Called from main thread */
1535 unsigned pa_sink_linked_by(pa_sink *s) {
1536 unsigned ret;
1538 pa_sink_assert_ref(s);
1539 pa_assert(PA_SINK_IS_LINKED(s->state));
1541 ret = pa_idxset_size(s->inputs);
1543 /* We add in the number of streams connected to us here. Please
1544 * note the asymmmetry to pa_sink_used_by()! */
1546 if (s->monitor_source)
1547 ret += pa_source_linked_by(s->monitor_source);
1549 return ret;
1552 /* Called from main thread */
1553 unsigned pa_sink_used_by(pa_sink *s) {
1554 unsigned ret;
1556 pa_sink_assert_ref(s);
1557 pa_assert(PA_SINK_IS_LINKED(s->state));
1559 ret = pa_idxset_size(s->inputs);
1560 pa_assert(ret >= s->n_corked);
1562 /* Streams connected to our monitor source do not matter for
1563 * pa_sink_used_by()!.*/
1565 return ret - s->n_corked;
1568 /* Called from main thread */
1569 unsigned pa_sink_check_suspend(pa_sink *s) {
1570 unsigned ret;
1571 pa_sink_input *i;
1572 uint32_t idx;
1574 pa_sink_assert_ref(s);
1576 if (!PA_SINK_IS_LINKED(s->state))
1577 return 0;
1579 ret = 0;
1581 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1582 pa_sink_input_state_t st;
1584 st = pa_sink_input_get_state(i);
1585 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1587 if (st == PA_SINK_INPUT_CORKED)
1588 continue;
1590 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1591 continue;
1593 ret ++;
1596 if (s->monitor_source)
1597 ret += pa_source_check_suspend(s->monitor_source);
1599 return ret;
1602 /* Called from the IO thread */
1603 static void sync_input_volumes_within_thread(pa_sink *s) {
1604 pa_sink_input *i;
1605 void *state = NULL;
1607 pa_sink_assert_ref(s);
1609 while ((i = PA_SINK_INPUT(pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))) {
1610 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1611 continue;
1613 i->thread_info.soft_volume = i->soft_volume;
1614 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1618 /* Called from IO thread, except when it is not */
1619 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1620 pa_sink *s = PA_SINK(o);
1621 pa_sink_assert_ref(s);
1623 switch ((pa_sink_message_t) code) {
1625 case PA_SINK_MESSAGE_ADD_INPUT: {
1626 pa_sink_input *i = PA_SINK_INPUT(userdata);
1628 /* If you change anything here, make sure to change the
1629 * sink input handling a few lines down at
1630 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1632 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1634 /* Since the caller sleeps in pa_sink_input_put(), we can
1635 * safely access data outside of thread_info even though
1636 * it is mutable */
1638 if ((i->thread_info.sync_prev = i->sync_prev)) {
1639 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1640 pa_assert(i->sync_prev->sync_next == i);
1641 i->thread_info.sync_prev->thread_info.sync_next = i;
1644 if ((i->thread_info.sync_next = i->sync_next)) {
1645 pa_assert(i->sink == i->thread_info.sync_next->sink);
1646 pa_assert(i->sync_next->sync_prev == i);
1647 i->thread_info.sync_next->thread_info.sync_prev = i;
1650 pa_assert(!i->thread_info.attached);
1651 i->thread_info.attached = TRUE;
1653 if (i->attach)
1654 i->attach(i);
1656 pa_sink_input_set_state_within_thread(i, i->state);
1658 /* The requested latency of the sink input needs to be
1659 * fixed up and then configured on the sink */
1661 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1662 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1664 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1665 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1667 /* We don't rewind here automatically. This is left to the
1668 * sink input implementor because some sink inputs need a
1669 * slow start, i.e. need some time to buffer client
1670 * samples before beginning streaming. */
1672 /* In flat volume mode we need to update the volume as
1673 * well */
1674 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1677 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1678 pa_sink_input *i = PA_SINK_INPUT(userdata);
1680 /* If you change anything here, make sure to change the
1681 * sink input handling a few lines down at
1682 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1684 if (i->detach)
1685 i->detach(i);
1687 pa_sink_input_set_state_within_thread(i, i->state);
1689 pa_assert(i->thread_info.attached);
1690 i->thread_info.attached = FALSE;
1692 /* Since the caller sleeps in pa_sink_input_unlink(),
1693 * we can safely access data outside of thread_info even
1694 * though it is mutable */
1696 pa_assert(!i->sync_prev);
1697 pa_assert(!i->sync_next);
1699 if (i->thread_info.sync_prev) {
1700 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1701 i->thread_info.sync_prev = NULL;
1704 if (i->thread_info.sync_next) {
1705 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1706 i->thread_info.sync_next = NULL;
1709 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1710 pa_sink_input_unref(i);
1712 pa_sink_invalidate_requested_latency(s);
1713 pa_sink_request_rewind(s, (size_t) -1);
1715 /* In flat volume mode we need to update the volume as
1716 * well */
1717 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1720 case PA_SINK_MESSAGE_START_MOVE: {
1721 pa_sink_input *i = PA_SINK_INPUT(userdata);
1723 /* We don't support moving synchronized streams. */
1724 pa_assert(!i->sync_prev);
1725 pa_assert(!i->sync_next);
1726 pa_assert(!i->thread_info.sync_next);
1727 pa_assert(!i->thread_info.sync_prev);
1729 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1730 pa_usec_t usec = 0;
1731 size_t sink_nbytes, total_nbytes;
1733 /* Get the latency of the sink */
1734 if (!(s->flags & PA_SINK_LATENCY) ||
1735 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1736 usec = 0;
1738 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1739 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1741 if (total_nbytes > 0) {
1742 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1743 i->thread_info.rewrite_flush = TRUE;
1744 pa_sink_input_process_rewind(i, sink_nbytes);
1748 if (i->detach)
1749 i->detach(i);
1751 pa_assert(i->thread_info.attached);
1752 i->thread_info.attached = FALSE;
1754 /* Let's remove the sink input ...*/
1755 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1756 pa_sink_input_unref(i);
1758 pa_sink_invalidate_requested_latency(s);
1760 pa_log_debug("Requesting rewind due to started move");
1761 pa_sink_request_rewind(s, (size_t) -1);
1763 /* In flat volume mode we need to update the volume as
1764 * well */
1765 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1768 case PA_SINK_MESSAGE_FINISH_MOVE: {
1769 pa_sink_input *i = PA_SINK_INPUT(userdata);
1771 /* We don't support moving synchronized streams. */
1772 pa_assert(!i->sync_prev);
1773 pa_assert(!i->sync_next);
1774 pa_assert(!i->thread_info.sync_next);
1775 pa_assert(!i->thread_info.sync_prev);
1777 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1779 pa_assert(!i->thread_info.attached);
1780 i->thread_info.attached = TRUE;
1782 if (i->attach)
1783 i->attach(i);
1785 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1786 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1788 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1789 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1791 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1792 pa_usec_t usec = 0;
1793 size_t nbytes;
1795 /* Get the latency of the sink */
1796 if (!(s->flags & PA_SINK_LATENCY) ||
1797 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1798 usec = 0;
1800 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1802 if (nbytes > 0)
1803 pa_sink_input_drop(i, nbytes);
1805 pa_log_debug("Requesting rewind due to finished move");
1806 pa_sink_request_rewind(s, nbytes);
1809 /* In flat volume mode we need to update the volume as
1810 * well */
1811 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1814 case PA_SINK_MESSAGE_SET_VOLUME:
1816 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1817 s->thread_info.soft_volume = s->soft_volume;
1818 pa_sink_request_rewind(s, (size_t) -1);
1821 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1822 return 0;
1824 /* Fall through ... */
1826 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1827 sync_input_volumes_within_thread(s);
1828 return 0;
1830 case PA_SINK_MESSAGE_GET_VOLUME:
1831 return 0;
1833 case PA_SINK_MESSAGE_SET_MUTE:
1835 if (s->thread_info.soft_muted != s->muted) {
1836 s->thread_info.soft_muted = s->muted;
1837 pa_sink_request_rewind(s, (size_t) -1);
1840 return 0;
1842 case PA_SINK_MESSAGE_GET_MUTE:
1843 return 0;
1845 case PA_SINK_MESSAGE_SET_STATE: {
1847 pa_bool_t suspend_change =
1848 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1849 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1851 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1853 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1854 s->thread_info.rewind_nbytes = 0;
1855 s->thread_info.rewind_requested = FALSE;
1858 if (suspend_change) {
1859 pa_sink_input *i;
1860 void *state = NULL;
1862 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1863 if (i->suspend_within_thread)
1864 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1867 return 0;
1870 case PA_SINK_MESSAGE_DETACH:
1872 /* Detach all streams */
1873 pa_sink_detach_within_thread(s);
1874 return 0;
1876 case PA_SINK_MESSAGE_ATTACH:
1878 /* Reattach all streams */
1879 pa_sink_attach_within_thread(s);
1880 return 0;
1882 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1884 pa_usec_t *usec = userdata;
1885 *usec = pa_sink_get_requested_latency_within_thread(s);
1887 if (*usec == (pa_usec_t) -1)
1888 *usec = s->thread_info.max_latency;
1890 return 0;
1893 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1894 pa_usec_t *r = userdata;
1896 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1898 return 0;
1901 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1902 pa_usec_t *r = userdata;
1904 r[0] = s->thread_info.min_latency;
1905 r[1] = s->thread_info.max_latency;
1907 return 0;
1910 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1912 *((size_t*) userdata) = s->thread_info.max_rewind;
1913 return 0;
1915 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1917 *((size_t*) userdata) = s->thread_info.max_request;
1918 return 0;
1920 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1922 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1923 return 0;
1925 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1927 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1928 return 0;
1930 case PA_SINK_MESSAGE_GET_LATENCY:
1931 case PA_SINK_MESSAGE_MAX:
1935 return -1;
1938 /* Called from main thread */
1939 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1940 pa_sink *sink;
1941 uint32_t idx;
1942 int ret = 0;
1944 pa_core_assert_ref(c);
1945 pa_assert(cause != 0);
1947 for (sink = PA_SINK(pa_idxset_first(c->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(c->sinks, &idx))) {
1948 int r;
1950 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
1951 ret = r;
1954 return ret;
1957 /* Called from main thread */
1958 void pa_sink_detach(pa_sink *s) {
1959 pa_sink_assert_ref(s);
1960 pa_assert(PA_SINK_IS_LINKED(s->state));
1962 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1965 /* Called from main thread */
1966 void pa_sink_attach(pa_sink *s) {
1967 pa_sink_assert_ref(s);
1968 pa_assert(PA_SINK_IS_LINKED(s->state));
1970 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1973 /* Called from IO thread */
1974 void pa_sink_detach_within_thread(pa_sink *s) {
1975 pa_sink_input *i;
1976 void *state = NULL;
1978 pa_sink_assert_ref(s);
1979 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1981 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1982 if (i->detach)
1983 i->detach(i);
1985 if (s->monitor_source)
1986 pa_source_detach_within_thread(s->monitor_source);
1989 /* Called from IO thread */
1990 void pa_sink_attach_within_thread(pa_sink *s) {
1991 pa_sink_input *i;
1992 void *state = NULL;
1994 pa_sink_assert_ref(s);
1995 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1997 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1998 if (i->attach)
1999 i->attach(i);
2001 if (s->monitor_source)
2002 pa_source_attach_within_thread(s->monitor_source);
2005 /* Called from IO thread */
2006 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2007 pa_sink_assert_ref(s);
2008 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2010 if (s->thread_info.state == PA_SINK_SUSPENDED)
2011 return;
2013 if (nbytes == (size_t) -1)
2014 nbytes = s->thread_info.max_rewind;
2016 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2018 if (s->thread_info.rewind_requested &&
2019 nbytes <= s->thread_info.rewind_nbytes)
2020 return;
2022 s->thread_info.rewind_nbytes = nbytes;
2023 s->thread_info.rewind_requested = TRUE;
2025 if (s->request_rewind)
2026 s->request_rewind(s);
2029 /* Called from IO thread */
2030 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2031 pa_usec_t result = (pa_usec_t) -1;
2032 pa_sink_input *i;
2033 void *state = NULL;
2034 pa_usec_t monitor_latency;
2036 pa_sink_assert_ref(s);
2038 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2039 return PA_CLAMP(s->fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2041 if (s->thread_info.requested_latency_valid)
2042 return s->thread_info.requested_latency;
2044 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2046 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2047 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2048 result = i->thread_info.requested_sink_latency;
2050 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2052 if (monitor_latency != (pa_usec_t) -1 &&
2053 (result == (pa_usec_t) -1 || result > monitor_latency))
2054 result = monitor_latency;
2056 if (result != (pa_usec_t) -1)
2057 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2059 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2060 /* Only cache if properly initialized */
2061 s->thread_info.requested_latency = result;
2062 s->thread_info.requested_latency_valid = TRUE;
2065 return result;
2068 /* Called from main thread */
2069 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2070 pa_usec_t usec = 0;
2072 pa_sink_assert_ref(s);
2073 pa_assert(PA_SINK_IS_LINKED(s->state));
2075 if (s->state == PA_SINK_SUSPENDED)
2076 return 0;
2078 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2079 return usec;
2082 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2083 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2084 pa_sink_input *i;
2085 void *state = NULL;
2087 pa_sink_assert_ref(s);
2089 if (max_rewind == s->thread_info.max_rewind)
2090 return;
2092 s->thread_info.max_rewind = max_rewind;
2094 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2095 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2096 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2099 if (s->monitor_source)
2100 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2103 /* Called from main thread */
2104 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2105 pa_sink_assert_ref(s);
2107 if (PA_SINK_IS_LINKED(s->state))
2108 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2109 else
2110 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2113 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2114 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2115 void *state = NULL;
2117 pa_sink_assert_ref(s);
2119 if (max_request == s->thread_info.max_request)
2120 return;
2122 s->thread_info.max_request = max_request;
2124 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2125 pa_sink_input *i;
2127 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2128 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2132 /* Called from main thread */
2133 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2134 pa_sink_assert_ref(s);
2136 if (PA_SINK_IS_LINKED(s->state))
2137 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2138 else
2139 pa_sink_set_max_request_within_thread(s, max_request);
2142 /* Called from IO thread */
2143 void pa_sink_invalidate_requested_latency(pa_sink *s) {
2144 pa_sink_input *i;
2145 void *state = NULL;
2147 pa_sink_assert_ref(s);
2149 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2150 return;
2152 s->thread_info.requested_latency_valid = FALSE;
2154 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2156 if (s->update_requested_latency)
2157 s->update_requested_latency(s);
2159 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2160 if (i->update_sink_requested_latency)
2161 i->update_sink_requested_latency(i);
2165 /* Called from main thread */
2166 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2167 pa_sink_assert_ref(s);
2169 /* min_latency == 0: no limit
2170 * min_latency anything else: specified limit
2172 * Similar for max_latency */
2174 if (min_latency < ABSOLUTE_MIN_LATENCY)
2175 min_latency = ABSOLUTE_MIN_LATENCY;
2177 if (max_latency <= 0 ||
2178 max_latency > ABSOLUTE_MAX_LATENCY)
2179 max_latency = ABSOLUTE_MAX_LATENCY;
2181 pa_assert(min_latency <= max_latency);
2183 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2184 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2185 max_latency == ABSOLUTE_MAX_LATENCY) ||
2186 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2188 if (PA_SINK_IS_LINKED(s->state)) {
2189 pa_usec_t r[2];
2191 r[0] = min_latency;
2192 r[1] = max_latency;
2194 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2195 } else
2196 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2199 /* Called from main thread */
2200 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2201 pa_sink_assert_ref(s);
2202 pa_assert(min_latency);
2203 pa_assert(max_latency);
2205 if (PA_SINK_IS_LINKED(s->state)) {
2206 pa_usec_t r[2] = { 0, 0 };
2208 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2210 *min_latency = r[0];
2211 *max_latency = r[1];
2212 } else {
2213 *min_latency = s->thread_info.min_latency;
2214 *max_latency = s->thread_info.max_latency;
2218 /* Called from IO thread */
2219 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2220 void *state = NULL;
2222 pa_sink_assert_ref(s);
2224 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2225 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2226 pa_assert(min_latency <= max_latency);
2228 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2229 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2230 max_latency == ABSOLUTE_MAX_LATENCY) ||
2231 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2233 s->thread_info.min_latency = min_latency;
2234 s->thread_info.max_latency = max_latency;
2236 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2237 pa_sink_input *i;
2239 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2240 if (i->update_sink_latency_range)
2241 i->update_sink_latency_range(i);
2244 pa_sink_invalidate_requested_latency(s);
2246 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2249 /* Called from main thread, before the sink is put */
2250 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2251 pa_sink_assert_ref(s);
2253 pa_assert(pa_sink_get_state(s) == PA_SINK_INIT);
2255 if (latency < ABSOLUTE_MIN_LATENCY)
2256 latency = ABSOLUTE_MIN_LATENCY;
2258 if (latency > ABSOLUTE_MAX_LATENCY)
2259 latency = ABSOLUTE_MAX_LATENCY;
2261 s->fixed_latency = latency;
2262 pa_source_set_fixed_latency(s->monitor_source, latency);
2265 /* Called from main context */
2266 size_t pa_sink_get_max_rewind(pa_sink *s) {
2267 size_t r;
2268 pa_sink_assert_ref(s);
2270 if (!PA_SINK_IS_LINKED(s->state))
2271 return s->thread_info.max_rewind;
2273 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2275 return r;
2278 /* Called from main context */
2279 size_t pa_sink_get_max_request(pa_sink *s) {
2280 size_t r;
2281 pa_sink_assert_ref(s);
2283 if (!PA_SINK_IS_LINKED(s->state))
2284 return s->thread_info.max_request;
2286 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2288 return r;
2291 /* Called from main context */
2292 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2293 pa_device_port *port;
2295 pa_assert(s);
2297 if (!s->set_port) {
2298 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2299 return -PA_ERR_NOTIMPLEMENTED;
2302 if (!s->ports)
2303 return -PA_ERR_NOENTITY;
2305 if (!(port = pa_hashmap_get(s->ports, name)))
2306 return -PA_ERR_NOENTITY;
2308 if (s->active_port == port) {
2309 s->save_port = s->save_port || save;
2310 return 0;
2313 if ((s->set_port(s, port)) < 0)
2314 return -PA_ERR_NOENTITY;
2316 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2318 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2320 s->active_port = port;
2321 s->save_port = save;
2323 return 0;
2326 /* Called from main context */
2327 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2328 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2330 pa_assert(p);
2332 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2333 return TRUE;
2335 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2337 if (pa_streq(ff, "microphone"))
2338 t = "audio-input-microphone";
2339 else if (pa_streq(ff, "webcam"))
2340 t = "camera-web";
2341 else if (pa_streq(ff, "computer"))
2342 t = "computer";
2343 else if (pa_streq(ff, "handset"))
2344 t = "phone";
2345 else if (pa_streq(ff, "portable"))
2346 t = "multimedia-player";
2347 else if (pa_streq(ff, "tv"))
2348 t = "video-display";
2351 * The following icons are not part of the icon naming spec,
2352 * because Rodney Dawes sucks as the maintainer of that spec.
2354 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2356 else if (pa_streq(ff, "headset"))
2357 t = "audio-headset";
2358 else if (pa_streq(ff, "headphone"))
2359 t = "audio-headphones";
2360 else if (pa_streq(ff, "speaker"))
2361 t = "audio-speakers";
2362 else if (pa_streq(ff, "hands-free"))
2363 t = "audio-handsfree";
2366 if (!t)
2367 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2368 if (pa_streq(c, "modem"))
2369 t = "modem";
2371 if (!t) {
2372 if (is_sink)
2373 t = "audio-card";
2374 else
2375 t = "audio-input-microphone";
2378 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2379 if (strstr(profile, "analog"))
2380 s = "-analog";
2381 else if (strstr(profile, "iec958"))
2382 s = "-iec958";
2383 else if (strstr(profile, "hdmi"))
2384 s = "-hdmi";
2387 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2389 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2391 return TRUE;
2394 pa_bool_t pa_device_init_description(pa_proplist *p) {
2395 const char *s, *d = NULL, *k;
2396 pa_assert(p);
2398 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2399 return TRUE;
2401 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2402 if (pa_streq(s, "internal"))
2403 d = _("Internal Audio");
2405 if (!d)
2406 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2407 if (pa_streq(s, "modem"))
2408 d = _("Modem");
2410 if (!d)
2411 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2413 if (!d)
2414 return FALSE;
2416 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2418 if (d && k)
2419 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2420 else if (d)
2421 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2423 return TRUE;
2426 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2427 const char *s;
2428 pa_assert(p);
2430 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2431 return TRUE;
2433 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2434 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2435 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2436 return TRUE;
2439 return FALSE;