core: rework how stream volumes affect sink volumes
[pulseaudio-mirror.git] / src / pulsecore / sink.c
blobd69f03882201b07604b30bb3049cf823a289bd51
1 /***
2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
47 #include "sink.h"
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
55 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
57 static void sink_free(pa_object *s);
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
65 return data;
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
113 pa_proplist_free(data->proplist);
115 if (data->ports) {
116 pa_device_port *p;
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
121 pa_hashmap_free(data->ports, NULL, NULL);
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
131 pa_assert(name);
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
137 p->priority = 0;
139 return p;
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
182 s = pa_msgobject_new(pa_sink);
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
190 pa_sink_new_data_set_name(data, name);
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
198 /* FIXME, need to free s here on failure */
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
217 if (!data->muted_is_set)
218 data->muted = FALSE;
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->priority = 0;
240 s->suspend_cause = 0;
241 s->name = pa_xstrdup(name);
242 s->proplist = pa_proplist_copy(data->proplist);
243 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
244 s->module = data->module;
245 s->card = data->card;
247 s->priority = pa_device_init_priority(s->proplist);
249 s->sample_spec = data->sample_spec;
250 s->channel_map = data->channel_map;
252 s->inputs = pa_idxset_new(NULL, NULL);
253 s->n_corked = 0;
255 s->reference_volume = s->real_volume = data->volume;
256 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
257 s->base_volume = PA_VOLUME_NORM;
258 s->n_volume_steps = PA_VOLUME_NORM+1;
259 s->muted = data->muted;
260 s->refresh_volume = s->refresh_muted = FALSE;
262 reset_callbacks(s);
263 s->userdata = NULL;
265 s->asyncmsgq = NULL;
267 /* As a minor optimization we just steal the list instead of
268 * copying it here */
269 s->ports = data->ports;
270 data->ports = NULL;
272 s->active_port = NULL;
273 s->save_port = FALSE;
275 if (data->active_port && s->ports)
276 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
277 s->save_port = data->save_port;
279 if (!s->active_port && s->ports) {
280 void *state;
281 pa_device_port *p;
283 PA_HASHMAP_FOREACH(p, s->ports, state)
284 if (!s->active_port || p->priority > s->active_port->priority)
285 s->active_port = p;
288 s->save_volume = data->save_volume;
289 s->save_muted = data->save_muted;
291 pa_silence_memchunk_get(
292 &core->silence_cache,
293 core->mempool,
294 &s->silence,
295 &s->sample_spec,
298 s->thread_info.rtpoll = NULL;
299 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
300 s->thread_info.soft_volume = s->soft_volume;
301 s->thread_info.soft_muted = s->muted;
302 s->thread_info.state = s->state;
303 s->thread_info.rewind_nbytes = 0;
304 s->thread_info.rewind_requested = FALSE;
305 s->thread_info.max_rewind = 0;
306 s->thread_info.max_request = 0;
307 s->thread_info.requested_latency_valid = FALSE;
308 s->thread_info.requested_latency = 0;
309 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
310 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
311 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
313 /* FIXME: This should probably be moved to pa_sink_put() */
314 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
316 if (s->card)
317 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
319 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
320 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
321 s->index,
322 s->name,
323 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
324 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
325 pt);
326 pa_xfree(pt);
328 pa_source_new_data_init(&source_data);
329 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
330 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
331 source_data.name = pa_sprintf_malloc("%s.monitor", name);
332 source_data.driver = data->driver;
333 source_data.module = data->module;
334 source_data.card = data->card;
336 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
337 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
338 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
340 s->monitor_source = pa_source_new(core, &source_data,
341 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
342 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
344 pa_source_new_data_done(&source_data);
346 if (!s->monitor_source) {
347 pa_sink_unlink(s);
348 pa_sink_unref(s);
349 return NULL;
352 s->monitor_source->monitor_of = s;
354 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
355 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
356 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
358 return s;
361 /* Called from main context */
362 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
363 int ret;
364 pa_bool_t suspend_change;
365 pa_sink_state_t original_state;
367 pa_assert(s);
368 pa_assert_ctl_context();
370 if (s->state == state)
371 return 0;
373 original_state = s->state;
375 suspend_change =
376 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
377 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
379 if (s->set_state)
380 if ((ret = s->set_state(s, state)) < 0)
381 return ret;
383 if (s->asyncmsgq)
384 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
386 if (s->set_state)
387 s->set_state(s, original_state);
389 return ret;
392 s->state = state;
394 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
395 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
396 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
399 if (suspend_change) {
400 pa_sink_input *i;
401 uint32_t idx;
403 /* We're suspending or resuming, tell everyone about it */
405 PA_IDXSET_FOREACH(i, s->inputs, idx)
406 if (s->state == PA_SINK_SUSPENDED &&
407 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
408 pa_sink_input_kill(i);
409 else if (i->suspend)
410 i->suspend(i, state == PA_SINK_SUSPENDED);
412 if (s->monitor_source)
413 pa_source_sync_suspend(s->monitor_source);
416 return 0;
419 /* Called from main context */
420 void pa_sink_put(pa_sink* s) {
421 pa_sink_assert_ref(s);
422 pa_assert_ctl_context();
424 pa_assert(s->state == PA_SINK_INIT);
426 /* The following fields must be initialized properly when calling _put() */
427 pa_assert(s->asyncmsgq);
428 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
430 /* Generally, flags should be initialized via pa_sink_new(). As a
431 * special exception we allow volume related flags to be set
432 * between _new() and _put(). */
434 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
435 s->flags |= PA_SINK_DECIBEL_VOLUME;
437 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
438 s->flags |= PA_SINK_FLAT_VOLUME;
440 /* We assume that if the sink implementor changed the default
441 * volume he did so in real_volume, because that is the usual
442 * place where he is supposed to place his changes. */
443 s->reference_volume = s->real_volume;
445 s->thread_info.soft_volume = s->soft_volume;
446 s->thread_info.soft_muted = s->muted;
448 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
449 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
450 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
451 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
452 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
454 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
455 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
456 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
458 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
460 pa_source_put(s->monitor_source);
462 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
463 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
466 /* Called from main context */
467 void pa_sink_unlink(pa_sink* s) {
468 pa_bool_t linked;
469 pa_sink_input *i, *j = NULL;
471 pa_assert(s);
472 pa_assert_ctl_context();
474 /* Please note that pa_sink_unlink() does more than simply
475 * reversing pa_sink_put(). It also undoes the registrations
476 * already done in pa_sink_new()! */
478 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
479 * may be called multiple times on the same sink without bad
480 * effects. */
482 linked = PA_SINK_IS_LINKED(s->state);
484 if (linked)
485 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
487 if (s->state != PA_SINK_UNLINKED)
488 pa_namereg_unregister(s->core, s->name);
489 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
491 if (s->card)
492 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
494 while ((i = pa_idxset_first(s->inputs, NULL))) {
495 pa_assert(i != j);
496 pa_sink_input_kill(i);
497 j = i;
500 if (linked)
501 sink_set_state(s, PA_SINK_UNLINKED);
502 else
503 s->state = PA_SINK_UNLINKED;
505 reset_callbacks(s);
507 if (s->monitor_source)
508 pa_source_unlink(s->monitor_source);
510 if (linked) {
511 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
512 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
516 /* Called from main context */
517 static void sink_free(pa_object *o) {
518 pa_sink *s = PA_SINK(o);
519 pa_sink_input *i;
521 pa_assert(s);
522 pa_assert_ctl_context();
523 pa_assert(pa_sink_refcnt(s) == 0);
525 if (PA_SINK_IS_LINKED(s->state))
526 pa_sink_unlink(s);
528 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
530 if (s->monitor_source) {
531 pa_source_unref(s->monitor_source);
532 s->monitor_source = NULL;
535 pa_idxset_free(s->inputs, NULL, NULL);
537 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
538 pa_sink_input_unref(i);
540 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
542 if (s->silence.memblock)
543 pa_memblock_unref(s->silence.memblock);
545 pa_xfree(s->name);
546 pa_xfree(s->driver);
548 if (s->proplist)
549 pa_proplist_free(s->proplist);
551 if (s->ports) {
552 pa_device_port *p;
554 while ((p = pa_hashmap_steal_first(s->ports)))
555 pa_device_port_free(p);
557 pa_hashmap_free(s->ports, NULL, NULL);
560 pa_xfree(s);
563 /* Called from main context, and not while the IO thread is active, please */
564 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
565 pa_sink_assert_ref(s);
566 pa_assert_ctl_context();
568 s->asyncmsgq = q;
570 if (s->monitor_source)
571 pa_source_set_asyncmsgq(s->monitor_source, q);
574 /* Called from main context, and not while the IO thread is active, please */
575 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
576 pa_sink_assert_ref(s);
577 pa_assert_ctl_context();
579 if (mask == 0)
580 return;
582 /* For now, allow only a minimal set of flags to be changed. */
583 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
585 s->flags = (s->flags & ~mask) | (value & mask);
587 pa_source_update_flags(s->monitor_source,
588 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
589 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
590 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
591 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
594 /* Called from IO context, or before _put() from main context */
595 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
596 pa_sink_assert_ref(s);
597 pa_sink_assert_io_context(s);
599 s->thread_info.rtpoll = p;
601 if (s->monitor_source)
602 pa_source_set_rtpoll(s->monitor_source, p);
605 /* Called from main context */
606 int pa_sink_update_status(pa_sink*s) {
607 pa_sink_assert_ref(s);
608 pa_assert_ctl_context();
609 pa_assert(PA_SINK_IS_LINKED(s->state));
611 if (s->state == PA_SINK_SUSPENDED)
612 return 0;
614 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
617 /* Called from main context */
618 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
619 pa_sink_assert_ref(s);
620 pa_assert_ctl_context();
621 pa_assert(PA_SINK_IS_LINKED(s->state));
622 pa_assert(cause != 0);
624 if (suspend) {
625 s->suspend_cause |= cause;
626 s->monitor_source->suspend_cause |= cause;
627 } else {
628 s->suspend_cause &= ~cause;
629 s->monitor_source->suspend_cause &= ~cause;
632 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
633 return 0;
635 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
637 if (s->suspend_cause)
638 return sink_set_state(s, PA_SINK_SUSPENDED);
639 else
640 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
643 /* Called from main context */
644 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
645 pa_sink_input *i, *n;
646 uint32_t idx;
648 pa_sink_assert_ref(s);
649 pa_assert_ctl_context();
650 pa_assert(PA_SINK_IS_LINKED(s->state));
652 if (!q)
653 q = pa_queue_new();
655 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
656 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
658 pa_sink_input_ref(i);
660 if (pa_sink_input_start_move(i) >= 0)
661 pa_queue_push(q, i);
662 else
663 pa_sink_input_unref(i);
666 return q;
669 /* Called from main context */
670 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
671 pa_sink_input *i;
673 pa_sink_assert_ref(s);
674 pa_assert_ctl_context();
675 pa_assert(PA_SINK_IS_LINKED(s->state));
676 pa_assert(q);
678 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
679 if (pa_sink_input_finish_move(i, s, save) < 0)
680 pa_sink_input_fail_move(i);
682 pa_sink_input_unref(i);
685 pa_queue_free(q, NULL, NULL);
688 /* Called from main context */
689 void pa_sink_move_all_fail(pa_queue *q) {
690 pa_sink_input *i;
692 pa_assert_ctl_context();
693 pa_assert(q);
695 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
696 pa_sink_input_fail_move(i);
697 pa_sink_input_unref(i);
700 pa_queue_free(q, NULL, NULL);
703 /* Called from IO thread context */
704 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
705 pa_sink_input *i;
706 void *state = NULL;
708 pa_sink_assert_ref(s);
709 pa_sink_assert_io_context(s);
710 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
712 /* If nobody requested this and this is actually no real rewind
713 * then we can short cut this. Please note that this means that
714 * not all rewind requests triggered upstream will always be
715 * translated in actual requests! */
716 if (!s->thread_info.rewind_requested && nbytes <= 0)
717 return;
719 s->thread_info.rewind_nbytes = 0;
720 s->thread_info.rewind_requested = FALSE;
722 if (s->thread_info.state == PA_SINK_SUSPENDED)
723 return;
725 if (nbytes > 0)
726 pa_log_debug("Processing rewind...");
728 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
729 pa_sink_input_assert_ref(i);
730 pa_sink_input_process_rewind(i, nbytes);
733 if (nbytes > 0)
734 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
735 pa_source_process_rewind(s->monitor_source, nbytes);
738 /* Called from IO thread context */
739 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
740 pa_sink_input *i;
741 unsigned n = 0;
742 void *state = NULL;
743 size_t mixlength = *length;
745 pa_sink_assert_ref(s);
746 pa_sink_assert_io_context(s);
747 pa_assert(info);
749 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
750 pa_sink_input_assert_ref(i);
752 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
754 if (mixlength == 0 || info->chunk.length < mixlength)
755 mixlength = info->chunk.length;
757 if (pa_memblock_is_silence(info->chunk.memblock)) {
758 pa_memblock_unref(info->chunk.memblock);
759 continue;
762 info->userdata = pa_sink_input_ref(i);
764 pa_assert(info->chunk.memblock);
765 pa_assert(info->chunk.length > 0);
767 info++;
768 n++;
769 maxinfo--;
772 if (mixlength > 0)
773 *length = mixlength;
775 return n;
778 /* Called from IO thread context */
779 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
780 pa_sink_input *i;
781 void *state;
782 unsigned p = 0;
783 unsigned n_unreffed = 0;
785 pa_sink_assert_ref(s);
786 pa_sink_assert_io_context(s);
787 pa_assert(result);
788 pa_assert(result->memblock);
789 pa_assert(result->length > 0);
791 /* We optimize for the case where the order of the inputs has not changed */
793 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
794 unsigned j;
795 pa_mix_info* m = NULL;
797 pa_sink_input_assert_ref(i);
799 /* Let's try to find the matching entry info the pa_mix_info array */
800 for (j = 0; j < n; j ++) {
802 if (info[p].userdata == i) {
803 m = info + p;
804 break;
807 p++;
808 if (p >= n)
809 p = 0;
812 /* Drop read data */
813 pa_sink_input_drop(i, result->length);
815 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
817 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
818 void *ostate = NULL;
819 pa_source_output *o;
820 pa_memchunk c;
822 if (m && m->chunk.memblock) {
823 c = m->chunk;
824 pa_memblock_ref(c.memblock);
825 pa_assert(result->length <= c.length);
826 c.length = result->length;
828 pa_memchunk_make_writable(&c, 0);
829 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
830 } else {
831 c = s->silence;
832 pa_memblock_ref(c.memblock);
833 pa_assert(result->length <= c.length);
834 c.length = result->length;
837 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
838 pa_source_output_assert_ref(o);
839 pa_assert(o->direct_on_input == i);
840 pa_source_post_direct(s->monitor_source, o, &c);
843 pa_memblock_unref(c.memblock);
847 if (m) {
848 if (m->chunk.memblock)
849 pa_memblock_unref(m->chunk.memblock);
850 pa_memchunk_reset(&m->chunk);
852 pa_sink_input_unref(m->userdata);
853 m->userdata = NULL;
855 n_unreffed += 1;
859 /* Now drop references to entries that are included in the
860 * pa_mix_info array but don't exist anymore */
862 if (n_unreffed < n) {
863 for (; n > 0; info++, n--) {
864 if (info->userdata)
865 pa_sink_input_unref(info->userdata);
866 if (info->chunk.memblock)
867 pa_memblock_unref(info->chunk.memblock);
871 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
872 pa_source_post(s->monitor_source, result);
875 /* Called from IO thread context */
876 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
877 pa_mix_info info[MAX_MIX_CHANNELS];
878 unsigned n;
879 size_t block_size_max;
881 pa_sink_assert_ref(s);
882 pa_sink_assert_io_context(s);
883 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
884 pa_assert(pa_frame_aligned(length, &s->sample_spec));
885 pa_assert(result);
887 pa_assert(!s->thread_info.rewind_requested);
888 pa_assert(s->thread_info.rewind_nbytes == 0);
890 if (s->thread_info.state == PA_SINK_SUSPENDED) {
891 result->memblock = pa_memblock_ref(s->silence.memblock);
892 result->index = s->silence.index;
893 result->length = PA_MIN(s->silence.length, length);
894 return;
897 pa_sink_ref(s);
899 if (length <= 0)
900 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
902 block_size_max = pa_mempool_block_size_max(s->core->mempool);
903 if (length > block_size_max)
904 length = pa_frame_align(block_size_max, &s->sample_spec);
906 pa_assert(length > 0);
908 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
910 if (n == 0) {
912 *result = s->silence;
913 pa_memblock_ref(result->memblock);
915 if (result->length > length)
916 result->length = length;
918 } else if (n == 1) {
919 pa_cvolume volume;
921 *result = info[0].chunk;
922 pa_memblock_ref(result->memblock);
924 if (result->length > length)
925 result->length = length;
927 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
929 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
930 pa_memblock_unref(result->memblock);
931 pa_silence_memchunk_get(&s->core->silence_cache,
932 s->core->mempool,
933 result,
934 &s->sample_spec,
935 result->length);
936 } else if (!pa_cvolume_is_norm(&volume)) {
937 pa_memchunk_make_writable(result, 0);
938 pa_volume_memchunk(result, &s->sample_spec, &volume);
940 } else {
941 void *ptr;
942 result->memblock = pa_memblock_new(s->core->mempool, length);
944 ptr = pa_memblock_acquire(result->memblock);
945 result->length = pa_mix(info, n,
946 ptr, length,
947 &s->sample_spec,
948 &s->thread_info.soft_volume,
949 s->thread_info.soft_muted);
950 pa_memblock_release(result->memblock);
952 result->index = 0;
955 inputs_drop(s, info, n, result);
957 pa_sink_unref(s);
960 /* Called from IO thread context */
961 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
962 pa_mix_info info[MAX_MIX_CHANNELS];
963 unsigned n;
964 size_t length, block_size_max;
966 pa_sink_assert_ref(s);
967 pa_sink_assert_io_context(s);
968 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
969 pa_assert(target);
970 pa_assert(target->memblock);
971 pa_assert(target->length > 0);
972 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
974 pa_assert(!s->thread_info.rewind_requested);
975 pa_assert(s->thread_info.rewind_nbytes == 0);
977 if (s->thread_info.state == PA_SINK_SUSPENDED) {
978 pa_silence_memchunk(target, &s->sample_spec);
979 return;
982 pa_sink_ref(s);
984 length = target->length;
985 block_size_max = pa_mempool_block_size_max(s->core->mempool);
986 if (length > block_size_max)
987 length = pa_frame_align(block_size_max, &s->sample_spec);
989 pa_assert(length > 0);
991 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
993 if (n == 0) {
994 if (target->length > length)
995 target->length = length;
997 pa_silence_memchunk(target, &s->sample_spec);
998 } else if (n == 1) {
999 pa_cvolume volume;
1001 if (target->length > length)
1002 target->length = length;
1004 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1006 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1007 pa_silence_memchunk(target, &s->sample_spec);
1008 else {
1009 pa_memchunk vchunk;
1011 vchunk = info[0].chunk;
1012 pa_memblock_ref(vchunk.memblock);
1014 if (vchunk.length > length)
1015 vchunk.length = length;
1017 if (!pa_cvolume_is_norm(&volume)) {
1018 pa_memchunk_make_writable(&vchunk, 0);
1019 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1022 pa_memchunk_memcpy(target, &vchunk);
1023 pa_memblock_unref(vchunk.memblock);
1026 } else {
1027 void *ptr;
1029 ptr = pa_memblock_acquire(target->memblock);
1031 target->length = pa_mix(info, n,
1032 (uint8_t*) ptr + target->index, length,
1033 &s->sample_spec,
1034 &s->thread_info.soft_volume,
1035 s->thread_info.soft_muted);
1037 pa_memblock_release(target->memblock);
1040 inputs_drop(s, info, n, target);
1042 pa_sink_unref(s);
1045 /* Called from IO thread context */
1046 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1047 pa_memchunk chunk;
1048 size_t l, d;
1050 pa_sink_assert_ref(s);
1051 pa_sink_assert_io_context(s);
1052 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1053 pa_assert(target);
1054 pa_assert(target->memblock);
1055 pa_assert(target->length > 0);
1056 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1058 pa_assert(!s->thread_info.rewind_requested);
1059 pa_assert(s->thread_info.rewind_nbytes == 0);
1061 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1062 pa_silence_memchunk(target, &s->sample_spec);
1063 return;
1066 pa_sink_ref(s);
1068 l = target->length;
1069 d = 0;
1070 while (l > 0) {
1071 chunk = *target;
1072 chunk.index += d;
1073 chunk.length -= d;
1075 pa_sink_render_into(s, &chunk);
1077 d += chunk.length;
1078 l -= chunk.length;
1081 pa_sink_unref(s);
1084 /* Called from IO thread context */
1085 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1086 pa_sink_assert_ref(s);
1087 pa_sink_assert_io_context(s);
1088 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1089 pa_assert(length > 0);
1090 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1091 pa_assert(result);
1093 pa_assert(!s->thread_info.rewind_requested);
1094 pa_assert(s->thread_info.rewind_nbytes == 0);
1096 pa_sink_ref(s);
1098 pa_sink_render(s, length, result);
1100 if (result->length < length) {
1101 pa_memchunk chunk;
1103 pa_memchunk_make_writable(result, length);
1105 chunk.memblock = result->memblock;
1106 chunk.index = result->index + result->length;
1107 chunk.length = length - result->length;
1109 pa_sink_render_into_full(s, &chunk);
1111 result->length = length;
1114 pa_sink_unref(s);
1117 /* Called from main thread */
1118 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1119 pa_usec_t usec = 0;
1121 pa_sink_assert_ref(s);
1122 pa_assert_ctl_context();
1123 pa_assert(PA_SINK_IS_LINKED(s->state));
1125 /* The returned value is supposed to be in the time domain of the sound card! */
1127 if (s->state == PA_SINK_SUSPENDED)
1128 return 0;
1130 if (!(s->flags & PA_SINK_LATENCY))
1131 return 0;
1133 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1135 return usec;
1138 /* Called from IO thread */
1139 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1140 pa_usec_t usec = 0;
1141 pa_msgobject *o;
1143 pa_sink_assert_ref(s);
1144 pa_sink_assert_io_context(s);
1145 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1147 /* The returned value is supposed to be in the time domain of the sound card! */
1149 if (s->thread_info.state == PA_SINK_SUSPENDED)
1150 return 0;
1152 if (!(s->flags & PA_SINK_LATENCY))
1153 return 0;
1155 o = PA_MSGOBJECT(s);
1157 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1159 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1160 return -1;
1162 return usec;
1165 static pa_cvolume* cvolume_remap_minimal_impact(
1166 pa_cvolume *v,
1167 const pa_cvolume *template,
1168 const pa_channel_map *from,
1169 const pa_channel_map *to) {
1171 pa_cvolume t;
1173 pa_assert(v);
1174 pa_assert(template);
1175 pa_assert(from);
1176 pa_assert(to);
1178 pa_return_val_if_fail(pa_cvolume_compatible_with_channel_map(v, from), NULL);
1179 pa_return_val_if_fail(pa_cvolume_compatible_with_channel_map(template, to), NULL);
1181 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1182 * mapping from sink input to sink volumes:
1184 * If template is a possible remapping from v it is used instead
1185 * of remapping anew.
1187 * If the channel maps don't match we set an all-channel volume on
1188 * the sink to ensure that changing a volume on one stream has no
1189 * effect that cannot be compensated for in another stream that
1190 * does not have the same channel map as the sink. */
1192 if (pa_channel_map_equal(from, to))
1193 return v;
1195 t = *template;
1196 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1197 *v = *template;
1198 return v;
1201 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1202 return v;
1205 /* Called from main context */
1206 static void compute_reference_ratios(pa_sink *s) {
1207 uint32_t idx;
1208 pa_sink_input *i;
1210 pa_sink_assert_ref(s);
1211 pa_assert_ctl_context();
1212 pa_assert(PA_SINK_IS_LINKED(s->state));
1213 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1215 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1216 unsigned c;
1217 pa_cvolume remapped;
1220 * Calculates the reference volume from the sink's reference
1221 * volume. This basically calculates:
1223 * i->reference_ratio = i->volume / s->reference_volume
1226 remapped = s->reference_volume;
1227 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1229 i->reference_ratio.channels = i->sample_spec.channels;
1231 for (c = 0; c < i->sample_spec.channels; c++) {
1233 /* We don't update when the sink volume is 0 anyway */
1234 if (remapped.values[c] <= PA_VOLUME_MUTED)
1235 continue;
1237 /* Don't update the reference ratio unless necessary */
1238 if (pa_sw_volume_multiply(
1239 i->reference_ratio.values[c],
1240 remapped.values[c]) == i->volume.values[c])
1241 continue;
1243 i->reference_ratio.values[c] = pa_sw_volume_divide(
1244 i->volume.values[c],
1245 remapped.values[c]);
1250 /* Called from main context */
1251 static void compute_real_ratios(pa_sink *s) {
1252 pa_sink_input *i;
1253 uint32_t idx;
1255 pa_sink_assert_ref(s);
1256 pa_assert_ctl_context();
1257 pa_assert(PA_SINK_IS_LINKED(s->state));
1258 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1260 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1261 unsigned c;
1262 pa_cvolume remapped;
1265 * This basically calculates:
1267 * i->real_ratio := i->volume / s->real_volume
1268 * i->soft_volume := i->real_ratio * i->volume_factor
1271 remapped = s->real_volume;
1272 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1274 i->real_ratio.channels = i->sample_spec.channels;
1275 i->soft_volume.channels = i->sample_spec.channels;
1277 for (c = 0; c < i->sample_spec.channels; c++) {
1279 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1280 /* We leave i->real_ratio untouched */
1281 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1282 continue;
1285 /* Don't lose accuracy unless necessary */
1286 if (pa_sw_volume_multiply(
1287 i->real_ratio.values[c],
1288 remapped.values[c]) != i->volume.values[c])
1290 i->real_ratio.values[c] = pa_sw_volume_divide(
1291 i->volume.values[c],
1292 remapped.values[c]);
1294 i->soft_volume.values[c] = pa_sw_volume_multiply(
1295 i->real_ratio.values[c],
1296 i->volume_factor.values[c]);
1299 /* We don't copy the soft_volume to the thread_info data
1300 * here. That must be done by the caller */
1304 /* Called from main thread */
1305 static void compute_real_volume(pa_sink *s) {
1306 pa_sink_input *i;
1307 uint32_t idx;
1309 pa_sink_assert_ref(s);
1310 pa_assert_ctl_context();
1311 pa_assert(PA_SINK_IS_LINKED(s->state));
1312 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1314 /* This determines the maximum volume of all streams and sets
1315 * s->real_volume accordingly. */
1317 if (pa_idxset_isempty(s->inputs)) {
1318 /* In the special case that we have no sink input we leave the
1319 * volume unmodified. */
1320 s->real_volume = s->reference_volume;
1321 return;
1324 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1326 /* First let's determine the new maximum volume of all inputs
1327 * connected to this sink */
1328 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1329 pa_cvolume remapped;
1331 remapped = i->volume;
1332 cvolume_remap_minimal_impact(&remapped, &s->real_volume, &i->channel_map, &s->channel_map);
1333 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1336 /* Then, let's update the real ratios/soft volumes of all inputs
1337 * connected to this sink */
1338 compute_real_ratios(s);
1341 /* Called from main thread */
1342 static void propagate_reference_volume(pa_sink *s) {
1343 pa_sink_input *i;
1344 uint32_t idx;
1346 pa_sink_assert_ref(s);
1347 pa_assert_ctl_context();
1348 pa_assert(PA_SINK_IS_LINKED(s->state));
1349 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1351 /* This is called whenever the sink volume changes that is not
1352 * caused by a sink input volume change. We need to fix up the
1353 * sink input volumes accordingly */
1355 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1356 pa_cvolume old_volume, remapped;
1358 old_volume = i->volume;
1360 /* This basically calculates:
1362 * i->volume := s->reference_volume * i->reference_ratio */
1364 remapped = s->reference_volume;
1365 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1366 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1368 /* The volume changed, let's tell people so */
1369 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1371 if (i->volume_changed)
1372 i->volume_changed(i);
1374 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1379 /* Called from main thread */
1380 void pa_sink_set_volume(
1381 pa_sink *s,
1382 const pa_cvolume *volume,
1383 pa_bool_t send_msg,
1384 pa_bool_t save) {
1386 pa_cvolume old_reference_volume;
1387 pa_bool_t reference_changed;
1389 pa_sink_assert_ref(s);
1390 pa_assert_ctl_context();
1391 pa_assert(PA_SINK_IS_LINKED(s->state));
1392 pa_assert(!volume || pa_cvolume_valid(volume));
1393 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1394 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1396 /* As a special exception we accept mono volumes on all sinks --
1397 * even on those with more complex channel maps */
1399 /* If volume is NULL we synchronize the sink's real and reference
1400 * volumes with the stream volumes. If it is not NULL we update
1401 * the reference_volume with it. */
1403 old_reference_volume = s->reference_volume;
1405 if (volume) {
1407 if (pa_cvolume_compatible(volume, &s->sample_spec))
1408 s->reference_volume = *volume;
1409 else
1410 pa_cvolume_scale(&s->reference_volume, pa_cvolume_max(volume));
1412 if (s->flags & PA_SINK_FLAT_VOLUME) {
1413 /* OK, propagate this volume change back to the inputs */
1414 propagate_reference_volume(s);
1416 /* And now recalculate the real volume */
1417 compute_real_volume(s);
1418 } else
1419 s->real_volume = s->reference_volume;
1421 } else {
1422 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1424 /* Ok, let's determine the new real volume */
1425 compute_real_volume(s);
1427 /* Let's 'push' the reference volume if necessary */
1428 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1430 /* We need to fix the reference ratios of all streams now that
1431 * we changed the reference volume */
1432 compute_reference_ratios(s);
1435 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1436 s->save_volume = (!reference_changed && s->save_volume) || save;
1438 if (s->set_volume) {
1439 /* If we have a function set_volume(), then we do not apply a
1440 * soft volume by default. However, set_volume() is free to
1441 * apply one to s->soft_volume */
1443 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1444 s->set_volume(s);
1446 } else
1447 /* If we have no function set_volume(), then the soft volume
1448 * becomes the virtual volume */
1449 s->soft_volume = s->real_volume;
1451 /* This tells the sink that soft and/or virtual volume changed */
1452 if (send_msg)
1453 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1455 if (reference_changed)
1456 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1459 /* Called from main thread. Only to be called by sink implementor */
1460 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1461 pa_sink_assert_ref(s);
1462 pa_assert_ctl_context();
1464 if (!volume)
1465 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1466 else
1467 s->soft_volume = *volume;
1469 if (PA_SINK_IS_LINKED(s->state))
1470 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1471 else
1472 s->thread_info.soft_volume = s->soft_volume;
1475 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1476 pa_sink_input *i;
1477 uint32_t idx;
1478 pa_cvolume old_reference_volume;
1480 pa_sink_assert_ref(s);
1481 pa_assert_ctl_context();
1482 pa_assert(PA_SINK_IS_LINKED(s->state));
1484 /* This is called when the hardware's real volume changes due to
1485 * some external event. We copy the real volume into our
1486 * reference volume and then rebuild the stream volumes based on
1487 * i->real_ratio which should stay fixed. */
1489 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1490 return;
1492 old_reference_volume = s->reference_volume;
1494 /* 1. Make the real volume the reference volume */
1495 s->reference_volume = s->real_volume;
1497 if (s->flags & PA_SINK_FLAT_VOLUME) {
1499 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1500 pa_cvolume old_volume, remapped;
1502 old_volume = i->volume;
1504 /* 2. Since the sink's reference and real volumes are equal
1505 * now our ratios should be too. */
1506 i->reference_ratio = i->real_ratio;
1508 /* 3. Recalculate the new stream reference volume based on the
1509 * reference ratio and the sink's reference volume.
1511 * This basically calculates:
1513 * i->volume = s->reference_volume * i->reference_ratio
1515 * This is identical to propagate_reference_volume() */
1516 remapped = s->reference_volume;
1517 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1518 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1520 /* Notify if something changed */
1521 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1523 if (i->volume_changed)
1524 i->volume_changed(i);
1526 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1531 /* Something got changed in the hardware. It probably makes sense
1532 * to save changed hw settings given that hw volume changes not
1533 * triggered by PA are almost certainly done by the user. */
1534 s->save_volume = TRUE;
1536 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1537 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1540 /* Called from main thread */
1541 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1542 pa_sink_assert_ref(s);
1543 pa_assert_ctl_context();
1544 pa_assert(PA_SINK_IS_LINKED(s->state));
1546 if (s->refresh_volume || force_refresh) {
1547 struct pa_cvolume old_real_volume;
1549 old_real_volume = s->real_volume;
1551 if (s->get_volume)
1552 s->get_volume(s);
1554 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1556 propagate_real_volume(s, &old_real_volume);
1559 return &s->reference_volume;
1562 /* Called from main thread */
1563 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1564 pa_cvolume old_real_volume;
1566 pa_sink_assert_ref(s);
1567 pa_assert_ctl_context();
1568 pa_assert(PA_SINK_IS_LINKED(s->state));
1570 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1572 old_real_volume = s->real_volume;
1573 s->real_volume = *new_real_volume;
1575 propagate_real_volume(s, &old_real_volume);
1578 /* Called from main thread */
1579 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1580 pa_bool_t old_muted;
1582 pa_sink_assert_ref(s);
1583 pa_assert_ctl_context();
1584 pa_assert(PA_SINK_IS_LINKED(s->state));
1586 old_muted = s->muted;
1587 s->muted = mute;
1588 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1590 if (s->set_mute)
1591 s->set_mute(s);
1593 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1595 if (old_muted != s->muted)
1596 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1599 /* Called from main thread */
1600 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1602 pa_sink_assert_ref(s);
1603 pa_assert_ctl_context();
1604 pa_assert(PA_SINK_IS_LINKED(s->state));
1606 if (s->refresh_muted || force_refresh) {
1607 pa_bool_t old_muted = s->muted;
1609 if (s->get_mute)
1610 s->get_mute(s);
1612 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1614 if (old_muted != s->muted) {
1615 s->save_muted = TRUE;
1617 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1619 /* Make sure the soft mute status stays in sync */
1620 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1624 return s->muted;
1627 /* Called from main thread */
1628 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1629 pa_sink_assert_ref(s);
1630 pa_assert_ctl_context();
1631 pa_assert(PA_SINK_IS_LINKED(s->state));
1633 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1635 if (s->muted == new_muted)
1636 return;
1638 s->muted = new_muted;
1639 s->save_muted = TRUE;
1641 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1644 /* Called from main thread */
1645 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1646 pa_sink_assert_ref(s);
1647 pa_assert_ctl_context();
1649 if (p)
1650 pa_proplist_update(s->proplist, mode, p);
1652 if (PA_SINK_IS_LINKED(s->state)) {
1653 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1654 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1657 return TRUE;
1660 /* Called from main thread */
1661 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1662 void pa_sink_set_description(pa_sink *s, const char *description) {
1663 const char *old;
1664 pa_sink_assert_ref(s);
1665 pa_assert_ctl_context();
1667 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1668 return;
1670 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1672 if (old && description && pa_streq(old, description))
1673 return;
1675 if (description)
1676 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1677 else
1678 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1680 if (s->monitor_source) {
1681 char *n;
1683 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1684 pa_source_set_description(s->monitor_source, n);
1685 pa_xfree(n);
1688 if (PA_SINK_IS_LINKED(s->state)) {
1689 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1690 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1694 /* Called from main thread */
1695 unsigned pa_sink_linked_by(pa_sink *s) {
1696 unsigned ret;
1698 pa_sink_assert_ref(s);
1699 pa_assert_ctl_context();
1700 pa_assert(PA_SINK_IS_LINKED(s->state));
1702 ret = pa_idxset_size(s->inputs);
1704 /* We add in the number of streams connected to us here. Please
1705 * note the asymmmetry to pa_sink_used_by()! */
1707 if (s->monitor_source)
1708 ret += pa_source_linked_by(s->monitor_source);
1710 return ret;
1713 /* Called from main thread */
1714 unsigned pa_sink_used_by(pa_sink *s) {
1715 unsigned ret;
1717 pa_sink_assert_ref(s);
1718 pa_assert_ctl_context();
1719 pa_assert(PA_SINK_IS_LINKED(s->state));
1721 ret = pa_idxset_size(s->inputs);
1722 pa_assert(ret >= s->n_corked);
1724 /* Streams connected to our monitor source do not matter for
1725 * pa_sink_used_by()!.*/
1727 return ret - s->n_corked;
1730 /* Called from main thread */
1731 unsigned pa_sink_check_suspend(pa_sink *s) {
1732 unsigned ret;
1733 pa_sink_input *i;
1734 uint32_t idx;
1736 pa_sink_assert_ref(s);
1737 pa_assert_ctl_context();
1739 if (!PA_SINK_IS_LINKED(s->state))
1740 return 0;
1742 ret = 0;
1744 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1745 pa_sink_input_state_t st;
1747 st = pa_sink_input_get_state(i);
1748 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1750 if (st == PA_SINK_INPUT_CORKED)
1751 continue;
1753 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1754 continue;
1756 ret ++;
1759 if (s->monitor_source)
1760 ret += pa_source_check_suspend(s->monitor_source);
1762 return ret;
1765 /* Called from the IO thread */
1766 static void sync_input_volumes_within_thread(pa_sink *s) {
1767 pa_sink_input *i;
1768 void *state = NULL;
1770 pa_sink_assert_ref(s);
1771 pa_sink_assert_io_context(s);
1773 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1774 if (pa_atomic_load(&i->before_ramping_v))
1775 i->thread_info.future_soft_volume = i->soft_volume;
1777 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1778 continue;
1780 if (!pa_atomic_load(&i->before_ramping_v))
1781 i->thread_info.soft_volume = i->soft_volume;
1782 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1786 /* Called from IO thread, except when it is not */
1787 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1788 pa_sink *s = PA_SINK(o);
1789 pa_sink_assert_ref(s);
1791 switch ((pa_sink_message_t) code) {
1793 case PA_SINK_MESSAGE_ADD_INPUT: {
1794 pa_sink_input *i = PA_SINK_INPUT(userdata);
1796 /* If you change anything here, make sure to change the
1797 * sink input handling a few lines down at
1798 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1800 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1802 /* Since the caller sleeps in pa_sink_input_put(), we can
1803 * safely access data outside of thread_info even though
1804 * it is mutable */
1806 if ((i->thread_info.sync_prev = i->sync_prev)) {
1807 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1808 pa_assert(i->sync_prev->sync_next == i);
1809 i->thread_info.sync_prev->thread_info.sync_next = i;
1812 if ((i->thread_info.sync_next = i->sync_next)) {
1813 pa_assert(i->sink == i->thread_info.sync_next->sink);
1814 pa_assert(i->sync_next->sync_prev == i);
1815 i->thread_info.sync_next->thread_info.sync_prev = i;
1818 pa_assert(!i->thread_info.attached);
1819 i->thread_info.attached = TRUE;
1821 if (i->attach)
1822 i->attach(i);
1824 pa_sink_input_set_state_within_thread(i, i->state);
1826 /* The requested latency of the sink input needs to be
1827 * fixed up and then configured on the sink */
1829 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1830 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1832 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1833 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1835 /* We don't rewind here automatically. This is left to the
1836 * sink input implementor because some sink inputs need a
1837 * slow start, i.e. need some time to buffer client
1838 * samples before beginning streaming. */
1840 /* In flat volume mode we need to update the volume as
1841 * well */
1842 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1845 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1846 pa_sink_input *i = PA_SINK_INPUT(userdata);
1848 /* If you change anything here, make sure to change the
1849 * sink input handling a few lines down at
1850 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1852 if (i->detach)
1853 i->detach(i);
1855 pa_sink_input_set_state_within_thread(i, i->state);
1857 pa_assert(i->thread_info.attached);
1858 i->thread_info.attached = FALSE;
1860 /* Since the caller sleeps in pa_sink_input_unlink(),
1861 * we can safely access data outside of thread_info even
1862 * though it is mutable */
1864 pa_assert(!i->sync_prev);
1865 pa_assert(!i->sync_next);
1867 if (i->thread_info.sync_prev) {
1868 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1869 i->thread_info.sync_prev = NULL;
1872 if (i->thread_info.sync_next) {
1873 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1874 i->thread_info.sync_next = NULL;
1877 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1878 pa_sink_input_unref(i);
1880 pa_sink_invalidate_requested_latency(s, TRUE);
1881 pa_sink_request_rewind(s, (size_t) -1);
1883 /* In flat volume mode we need to update the volume as
1884 * well */
1885 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1888 case PA_SINK_MESSAGE_START_MOVE: {
1889 pa_sink_input *i = PA_SINK_INPUT(userdata);
1891 /* We don't support moving synchronized streams. */
1892 pa_assert(!i->sync_prev);
1893 pa_assert(!i->sync_next);
1894 pa_assert(!i->thread_info.sync_next);
1895 pa_assert(!i->thread_info.sync_prev);
1897 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1898 pa_usec_t usec = 0;
1899 size_t sink_nbytes, total_nbytes;
1901 /* Get the latency of the sink */
1902 usec = pa_sink_get_latency_within_thread(s);
1903 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1904 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1906 if (total_nbytes > 0) {
1907 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1908 i->thread_info.rewrite_flush = TRUE;
1909 pa_sink_input_process_rewind(i, sink_nbytes);
1913 if (i->detach)
1914 i->detach(i);
1916 pa_assert(i->thread_info.attached);
1917 i->thread_info.attached = FALSE;
1919 /* Let's remove the sink input ...*/
1920 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1921 pa_sink_input_unref(i);
1923 pa_sink_invalidate_requested_latency(s, TRUE);
1925 pa_log_debug("Requesting rewind due to started move");
1926 pa_sink_request_rewind(s, (size_t) -1);
1928 /* In flat volume mode we need to update the volume as
1929 * well */
1930 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1933 case PA_SINK_MESSAGE_FINISH_MOVE: {
1934 pa_sink_input *i = PA_SINK_INPUT(userdata);
1936 /* We don't support moving synchronized streams. */
1937 pa_assert(!i->sync_prev);
1938 pa_assert(!i->sync_next);
1939 pa_assert(!i->thread_info.sync_next);
1940 pa_assert(!i->thread_info.sync_prev);
1942 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1944 pa_assert(!i->thread_info.attached);
1945 i->thread_info.attached = TRUE;
1947 if (i->attach)
1948 i->attach(i);
1950 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1951 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1953 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1954 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1956 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1957 pa_usec_t usec = 0;
1958 size_t nbytes;
1960 /* Get the latency of the sink */
1961 usec = pa_sink_get_latency_within_thread(s);
1962 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1964 if (nbytes > 0)
1965 pa_sink_input_drop(i, nbytes);
1967 pa_log_debug("Requesting rewind due to finished move");
1968 pa_sink_request_rewind(s, nbytes);
1971 /* In flat volume mode we need to update the volume as
1972 * well */
1973 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1976 case PA_SINK_MESSAGE_SET_VOLUME:
1978 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1979 s->thread_info.soft_volume = s->soft_volume;
1980 pa_sink_request_rewind(s, (size_t) -1);
1983 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1984 return 0;
1986 /* Fall through ... */
1988 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1989 sync_input_volumes_within_thread(s);
1990 return 0;
1992 case PA_SINK_MESSAGE_GET_VOLUME:
1993 return 0;
1995 case PA_SINK_MESSAGE_SET_MUTE:
1997 if (s->thread_info.soft_muted != s->muted) {
1998 s->thread_info.soft_muted = s->muted;
1999 pa_sink_request_rewind(s, (size_t) -1);
2002 return 0;
2004 case PA_SINK_MESSAGE_GET_MUTE:
2005 return 0;
2007 case PA_SINK_MESSAGE_SET_STATE: {
2009 pa_bool_t suspend_change =
2010 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2011 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2013 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2015 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2016 s->thread_info.rewind_nbytes = 0;
2017 s->thread_info.rewind_requested = FALSE;
2020 if (suspend_change) {
2021 pa_sink_input *i;
2022 void *state = NULL;
2024 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2025 if (i->suspend_within_thread)
2026 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2029 return 0;
2032 case PA_SINK_MESSAGE_DETACH:
2034 /* Detach all streams */
2035 pa_sink_detach_within_thread(s);
2036 return 0;
2038 case PA_SINK_MESSAGE_ATTACH:
2040 /* Reattach all streams */
2041 pa_sink_attach_within_thread(s);
2042 return 0;
2044 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2046 pa_usec_t *usec = userdata;
2047 *usec = pa_sink_get_requested_latency_within_thread(s);
2049 /* Yes, that's right, the IO thread will see -1 when no
2050 * explicit requested latency is configured, the main
2051 * thread will see max_latency */
2052 if (*usec == (pa_usec_t) -1)
2053 *usec = s->thread_info.max_latency;
2055 return 0;
2058 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2059 pa_usec_t *r = userdata;
2061 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2063 return 0;
2066 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2067 pa_usec_t *r = userdata;
2069 r[0] = s->thread_info.min_latency;
2070 r[1] = s->thread_info.max_latency;
2072 return 0;
2075 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2077 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2078 return 0;
2080 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2082 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2083 return 0;
2085 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2087 *((size_t*) userdata) = s->thread_info.max_rewind;
2088 return 0;
2090 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2092 *((size_t*) userdata) = s->thread_info.max_request;
2093 return 0;
2095 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2097 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2098 return 0;
2100 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2102 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2103 return 0;
2105 case PA_SINK_MESSAGE_GET_LATENCY:
2106 case PA_SINK_MESSAGE_MAX:
2110 return -1;
2113 /* Called from main thread */
2114 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2115 pa_sink *sink;
2116 uint32_t idx;
2117 int ret = 0;
2119 pa_core_assert_ref(c);
2120 pa_assert_ctl_context();
2121 pa_assert(cause != 0);
2123 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2124 int r;
2126 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2127 ret = r;
2130 return ret;
2133 /* Called from main thread */
2134 void pa_sink_detach(pa_sink *s) {
2135 pa_sink_assert_ref(s);
2136 pa_assert_ctl_context();
2137 pa_assert(PA_SINK_IS_LINKED(s->state));
2139 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2142 /* Called from main thread */
2143 void pa_sink_attach(pa_sink *s) {
2144 pa_sink_assert_ref(s);
2145 pa_assert_ctl_context();
2146 pa_assert(PA_SINK_IS_LINKED(s->state));
2148 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2151 /* Called from IO thread */
2152 void pa_sink_detach_within_thread(pa_sink *s) {
2153 pa_sink_input *i;
2154 void *state = NULL;
2156 pa_sink_assert_ref(s);
2157 pa_sink_assert_io_context(s);
2158 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2160 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2161 if (i->detach)
2162 i->detach(i);
2164 if (s->monitor_source)
2165 pa_source_detach_within_thread(s->monitor_source);
2168 /* Called from IO thread */
2169 void pa_sink_attach_within_thread(pa_sink *s) {
2170 pa_sink_input *i;
2171 void *state = NULL;
2173 pa_sink_assert_ref(s);
2174 pa_sink_assert_io_context(s);
2175 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2177 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2178 if (i->attach)
2179 i->attach(i);
2181 if (s->monitor_source)
2182 pa_source_attach_within_thread(s->monitor_source);
2185 /* Called from IO thread */
2186 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2187 pa_sink_assert_ref(s);
2188 pa_sink_assert_io_context(s);
2189 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2191 if (s->thread_info.state == PA_SINK_SUSPENDED)
2192 return;
2194 if (nbytes == (size_t) -1)
2195 nbytes = s->thread_info.max_rewind;
2197 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2199 if (s->thread_info.rewind_requested &&
2200 nbytes <= s->thread_info.rewind_nbytes)
2201 return;
2203 s->thread_info.rewind_nbytes = nbytes;
2204 s->thread_info.rewind_requested = TRUE;
2206 if (s->request_rewind)
2207 s->request_rewind(s);
2210 /* Called from IO thread */
2211 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2212 pa_usec_t result = (pa_usec_t) -1;
2213 pa_sink_input *i;
2214 void *state = NULL;
2215 pa_usec_t monitor_latency;
2217 pa_sink_assert_ref(s);
2218 pa_sink_assert_io_context(s);
2220 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2221 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2223 if (s->thread_info.requested_latency_valid)
2224 return s->thread_info.requested_latency;
2226 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2227 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2228 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2229 result = i->thread_info.requested_sink_latency;
2231 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2233 if (monitor_latency != (pa_usec_t) -1 &&
2234 (result == (pa_usec_t) -1 || result > monitor_latency))
2235 result = monitor_latency;
2237 if (result != (pa_usec_t) -1)
2238 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2240 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2241 /* Only cache if properly initialized */
2242 s->thread_info.requested_latency = result;
2243 s->thread_info.requested_latency_valid = TRUE;
2246 return result;
2249 /* Called from main thread */
2250 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2251 pa_usec_t usec = 0;
2253 pa_sink_assert_ref(s);
2254 pa_assert_ctl_context();
2255 pa_assert(PA_SINK_IS_LINKED(s->state));
2257 if (s->state == PA_SINK_SUSPENDED)
2258 return 0;
2260 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2261 return usec;
2264 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2265 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2266 pa_sink_input *i;
2267 void *state = NULL;
2269 pa_sink_assert_ref(s);
2270 pa_sink_assert_io_context(s);
2272 if (max_rewind == s->thread_info.max_rewind)
2273 return;
2275 s->thread_info.max_rewind = max_rewind;
2277 if (PA_SINK_IS_LINKED(s->thread_info.state))
2278 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2279 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2281 if (s->monitor_source)
2282 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2285 /* Called from main thread */
2286 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2287 pa_sink_assert_ref(s);
2288 pa_assert_ctl_context();
2290 if (PA_SINK_IS_LINKED(s->state))
2291 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2292 else
2293 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2296 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2297 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2298 void *state = NULL;
2300 pa_sink_assert_ref(s);
2301 pa_sink_assert_io_context(s);
2303 if (max_request == s->thread_info.max_request)
2304 return;
2306 s->thread_info.max_request = max_request;
2308 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2309 pa_sink_input *i;
2311 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2312 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2316 /* Called from main thread */
2317 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2318 pa_sink_assert_ref(s);
2319 pa_assert_ctl_context();
2321 if (PA_SINK_IS_LINKED(s->state))
2322 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2323 else
2324 pa_sink_set_max_request_within_thread(s, max_request);
2327 /* Called from IO thread */
2328 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2329 pa_sink_input *i;
2330 void *state = NULL;
2332 pa_sink_assert_ref(s);
2333 pa_sink_assert_io_context(s);
2335 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2336 s->thread_info.requested_latency_valid = FALSE;
2337 else if (dynamic)
2338 return;
2340 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2342 if (s->update_requested_latency)
2343 s->update_requested_latency(s);
2345 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2346 if (i->update_sink_requested_latency)
2347 i->update_sink_requested_latency(i);
2351 /* Called from main thread */
2352 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2353 pa_sink_assert_ref(s);
2354 pa_assert_ctl_context();
2356 /* min_latency == 0: no limit
2357 * min_latency anything else: specified limit
2359 * Similar for max_latency */
2361 if (min_latency < ABSOLUTE_MIN_LATENCY)
2362 min_latency = ABSOLUTE_MIN_LATENCY;
2364 if (max_latency <= 0 ||
2365 max_latency > ABSOLUTE_MAX_LATENCY)
2366 max_latency = ABSOLUTE_MAX_LATENCY;
2368 pa_assert(min_latency <= max_latency);
2370 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2371 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2372 max_latency == ABSOLUTE_MAX_LATENCY) ||
2373 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2375 if (PA_SINK_IS_LINKED(s->state)) {
2376 pa_usec_t r[2];
2378 r[0] = min_latency;
2379 r[1] = max_latency;
2381 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2382 } else
2383 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2386 /* Called from main thread */
2387 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2388 pa_sink_assert_ref(s);
2389 pa_assert_ctl_context();
2390 pa_assert(min_latency);
2391 pa_assert(max_latency);
2393 if (PA_SINK_IS_LINKED(s->state)) {
2394 pa_usec_t r[2] = { 0, 0 };
2396 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2398 *min_latency = r[0];
2399 *max_latency = r[1];
2400 } else {
2401 *min_latency = s->thread_info.min_latency;
2402 *max_latency = s->thread_info.max_latency;
2406 /* Called from IO thread */
2407 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2408 pa_sink_assert_ref(s);
2409 pa_sink_assert_io_context(s);
2411 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2412 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2413 pa_assert(min_latency <= max_latency);
2415 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2416 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2417 max_latency == ABSOLUTE_MAX_LATENCY) ||
2418 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2420 if (s->thread_info.min_latency == min_latency &&
2421 s->thread_info.max_latency == max_latency)
2422 return;
2424 s->thread_info.min_latency = min_latency;
2425 s->thread_info.max_latency = max_latency;
2427 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2428 pa_sink_input *i;
2429 void *state = NULL;
2431 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2432 if (i->update_sink_latency_range)
2433 i->update_sink_latency_range(i);
2436 pa_sink_invalidate_requested_latency(s, FALSE);
2438 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2441 /* Called from main thread */
2442 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2443 pa_sink_assert_ref(s);
2444 pa_assert_ctl_context();
2446 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2447 pa_assert(latency == 0);
2448 return;
2451 if (latency < ABSOLUTE_MIN_LATENCY)
2452 latency = ABSOLUTE_MIN_LATENCY;
2454 if (latency > ABSOLUTE_MAX_LATENCY)
2455 latency = ABSOLUTE_MAX_LATENCY;
2457 if (PA_SINK_IS_LINKED(s->state))
2458 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2459 else
2460 s->thread_info.fixed_latency = latency;
2462 pa_source_set_fixed_latency(s->monitor_source, latency);
2465 /* Called from main thread */
2466 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2467 pa_usec_t latency;
2469 pa_sink_assert_ref(s);
2470 pa_assert_ctl_context();
2472 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2473 return 0;
2475 if (PA_SINK_IS_LINKED(s->state))
2476 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2477 else
2478 latency = s->thread_info.fixed_latency;
2480 return latency;
2483 /* Called from IO thread */
2484 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2485 pa_sink_assert_ref(s);
2486 pa_sink_assert_io_context(s);
2488 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2489 pa_assert(latency == 0);
2490 return;
2493 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2494 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2496 if (s->thread_info.fixed_latency == latency)
2497 return;
2499 s->thread_info.fixed_latency = latency;
2501 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2502 pa_sink_input *i;
2503 void *state = NULL;
2505 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2506 if (i->update_sink_fixed_latency)
2507 i->update_sink_fixed_latency(i);
2510 pa_sink_invalidate_requested_latency(s, FALSE);
2512 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2515 /* Called from main context */
2516 size_t pa_sink_get_max_rewind(pa_sink *s) {
2517 size_t r;
2518 pa_sink_assert_ref(s);
2519 pa_assert_ctl_context();
2521 if (!PA_SINK_IS_LINKED(s->state))
2522 return s->thread_info.max_rewind;
2524 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2526 return r;
2529 /* Called from main context */
2530 size_t pa_sink_get_max_request(pa_sink *s) {
2531 size_t r;
2532 pa_sink_assert_ref(s);
2533 pa_assert_ctl_context();
2535 if (!PA_SINK_IS_LINKED(s->state))
2536 return s->thread_info.max_request;
2538 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2540 return r;
2543 /* Called from main context */
2544 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2545 pa_device_port *port;
2547 pa_sink_assert_ref(s);
2548 pa_assert_ctl_context();
2550 if (!s->set_port) {
2551 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2552 return -PA_ERR_NOTIMPLEMENTED;
2555 if (!s->ports)
2556 return -PA_ERR_NOENTITY;
2558 if (!(port = pa_hashmap_get(s->ports, name)))
2559 return -PA_ERR_NOENTITY;
2561 if (s->active_port == port) {
2562 s->save_port = s->save_port || save;
2563 return 0;
2566 if ((s->set_port(s, port)) < 0)
2567 return -PA_ERR_NOENTITY;
2569 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2571 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2573 s->active_port = port;
2574 s->save_port = save;
2576 return 0;
2579 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2580 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2582 pa_assert(p);
2584 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2585 return TRUE;
2587 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2589 if (pa_streq(ff, "microphone"))
2590 t = "audio-input-microphone";
2591 else if (pa_streq(ff, "webcam"))
2592 t = "camera-web";
2593 else if (pa_streq(ff, "computer"))
2594 t = "computer";
2595 else if (pa_streq(ff, "handset"))
2596 t = "phone";
2597 else if (pa_streq(ff, "portable"))
2598 t = "multimedia-player";
2599 else if (pa_streq(ff, "tv"))
2600 t = "video-display";
2603 * The following icons are not part of the icon naming spec,
2604 * because Rodney Dawes sucks as the maintainer of that spec.
2606 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2608 else if (pa_streq(ff, "headset"))
2609 t = "audio-headset";
2610 else if (pa_streq(ff, "headphone"))
2611 t = "audio-headphones";
2612 else if (pa_streq(ff, "speaker"))
2613 t = "audio-speakers";
2614 else if (pa_streq(ff, "hands-free"))
2615 t = "audio-handsfree";
2618 if (!t)
2619 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2620 if (pa_streq(c, "modem"))
2621 t = "modem";
2623 if (!t) {
2624 if (is_sink)
2625 t = "audio-card";
2626 else
2627 t = "audio-input-microphone";
2630 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2631 if (strstr(profile, "analog"))
2632 s = "-analog";
2633 else if (strstr(profile, "iec958"))
2634 s = "-iec958";
2635 else if (strstr(profile, "hdmi"))
2636 s = "-hdmi";
2639 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2641 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2643 return TRUE;
2646 pa_bool_t pa_device_init_description(pa_proplist *p) {
2647 const char *s, *d = NULL, *k;
2648 pa_assert(p);
2650 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2651 return TRUE;
2653 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2654 if (pa_streq(s, "internal"))
2655 d = _("Internal Audio");
2657 if (!d)
2658 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2659 if (pa_streq(s, "modem"))
2660 d = _("Modem");
2662 if (!d)
2663 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2665 if (!d)
2666 return FALSE;
2668 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2670 if (d && k)
2671 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2672 else if (d)
2673 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2675 return TRUE;
2678 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2679 const char *s;
2680 pa_assert(p);
2682 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2683 return TRUE;
2685 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2686 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2687 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2688 return TRUE;
2691 return FALSE;
2694 unsigned pa_device_init_priority(pa_proplist *p) {
2695 const char *s;
2696 unsigned priority = 0;
2698 pa_assert(p);
2700 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
2702 if (pa_streq(s, "sound"))
2703 priority += 9000;
2704 else if (!pa_streq(s, "modem"))
2705 priority += 1000;
2708 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2710 if (pa_streq(s, "internal"))
2711 priority += 900;
2712 else if (pa_streq(s, "speaker"))
2713 priority += 500;
2714 else if (pa_streq(s, "headphone"))
2715 priority += 400;
2718 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
2720 if (pa_streq(s, "pci"))
2721 priority += 50;
2722 else if (pa_streq(s, "usb"))
2723 priority += 40;
2724 else if (pa_streq(s, "bluetooth"))
2725 priority += 30;
2728 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2730 if (pa_startswith(s, "analog-"))
2731 priority += 9;
2732 else if (pa_startswith(s, "iec958-"))
2733 priority += 8;
2736 return priority;