core: fill up memblock with pa_sink_render_into_full() in pa_sink_render_full() inste...
[pulseaudio-mirror.git] / src / pulsecore / sink.c
blob9388d3095d4c3764ed01129ac1af9e8338c56e1f
1 /***
2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
47 #include "sink.h"
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
55 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
57 static void sink_free(pa_object *s);
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
65 return data;
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
113 pa_proplist_free(data->proplist);
115 if (data->ports) {
116 pa_device_port *p;
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
121 pa_hashmap_free(data->ports, NULL, NULL);
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
131 pa_assert(name);
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
137 p->priority = 0;
139 return p;
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
182 s = pa_msgobject_new(pa_sink);
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
190 pa_sink_new_data_set_name(data, name);
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
198 /* FIXME, need to free s here on failure */
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
217 if (!data->muted_is_set)
218 data->muted = FALSE;
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->priority = 0;
240 s->suspend_cause = 0;
241 s->name = pa_xstrdup(name);
242 s->proplist = pa_proplist_copy(data->proplist);
243 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
244 s->module = data->module;
245 s->card = data->card;
247 s->priority = pa_device_init_priority(s->proplist);
249 s->sample_spec = data->sample_spec;
250 s->channel_map = data->channel_map;
252 s->inputs = pa_idxset_new(NULL, NULL);
253 s->n_corked = 0;
255 s->reference_volume = s->real_volume = data->volume;
256 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
257 s->base_volume = PA_VOLUME_NORM;
258 s->n_volume_steps = PA_VOLUME_NORM+1;
259 s->muted = data->muted;
260 s->refresh_volume = s->refresh_muted = FALSE;
262 reset_callbacks(s);
263 s->userdata = NULL;
265 s->asyncmsgq = NULL;
267 /* As a minor optimization we just steal the list instead of
268 * copying it here */
269 s->ports = data->ports;
270 data->ports = NULL;
272 s->active_port = NULL;
273 s->save_port = FALSE;
275 if (data->active_port && s->ports)
276 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
277 s->save_port = data->save_port;
279 if (!s->active_port && s->ports) {
280 void *state;
281 pa_device_port *p;
283 PA_HASHMAP_FOREACH(p, s->ports, state)
284 if (!s->active_port || p->priority > s->active_port->priority)
285 s->active_port = p;
288 s->save_volume = data->save_volume;
289 s->save_muted = data->save_muted;
291 pa_silence_memchunk_get(
292 &core->silence_cache,
293 core->mempool,
294 &s->silence,
295 &s->sample_spec,
298 s->thread_info.rtpoll = NULL;
299 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
300 s->thread_info.soft_volume = s->soft_volume;
301 s->thread_info.soft_muted = s->muted;
302 s->thread_info.state = s->state;
303 s->thread_info.rewind_nbytes = 0;
304 s->thread_info.rewind_requested = FALSE;
305 s->thread_info.max_rewind = 0;
306 s->thread_info.max_request = 0;
307 s->thread_info.requested_latency_valid = FALSE;
308 s->thread_info.requested_latency = 0;
309 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
310 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
311 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
313 /* FIXME: This should probably be moved to pa_sink_put() */
314 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
316 if (s->card)
317 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
319 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
320 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
321 s->index,
322 s->name,
323 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
324 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
325 pt);
326 pa_xfree(pt);
328 pa_source_new_data_init(&source_data);
329 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
330 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
331 source_data.name = pa_sprintf_malloc("%s.monitor", name);
332 source_data.driver = data->driver;
333 source_data.module = data->module;
334 source_data.card = data->card;
336 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
337 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
338 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
340 s->monitor_source = pa_source_new(core, &source_data,
341 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
342 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
344 pa_source_new_data_done(&source_data);
346 if (!s->monitor_source) {
347 pa_sink_unlink(s);
348 pa_sink_unref(s);
349 return NULL;
352 s->monitor_source->monitor_of = s;
354 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
355 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
356 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
358 return s;
361 /* Called from main context */
362 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
363 int ret;
364 pa_bool_t suspend_change;
365 pa_sink_state_t original_state;
367 pa_assert(s);
368 pa_assert_ctl_context();
370 if (s->state == state)
371 return 0;
373 original_state = s->state;
375 suspend_change =
376 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
377 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
379 if (s->set_state)
380 if ((ret = s->set_state(s, state)) < 0)
381 return ret;
383 if (s->asyncmsgq)
384 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
386 if (s->set_state)
387 s->set_state(s, original_state);
389 return ret;
392 s->state = state;
394 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
395 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
396 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
399 if (suspend_change) {
400 pa_sink_input *i;
401 uint32_t idx;
403 /* We're suspending or resuming, tell everyone about it */
405 PA_IDXSET_FOREACH(i, s->inputs, idx)
406 if (s->state == PA_SINK_SUSPENDED &&
407 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
408 pa_sink_input_kill(i);
409 else if (i->suspend)
410 i->suspend(i, state == PA_SINK_SUSPENDED);
412 if (s->monitor_source)
413 pa_source_sync_suspend(s->monitor_source);
416 return 0;
419 /* Called from main context */
420 void pa_sink_put(pa_sink* s) {
421 pa_sink_assert_ref(s);
422 pa_assert_ctl_context();
424 pa_assert(s->state == PA_SINK_INIT);
426 /* The following fields must be initialized properly when calling _put() */
427 pa_assert(s->asyncmsgq);
428 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
430 /* Generally, flags should be initialized via pa_sink_new(). As a
431 * special exception we allow volume related flags to be set
432 * between _new() and _put(). */
434 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
435 s->flags |= PA_SINK_DECIBEL_VOLUME;
437 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
438 s->flags |= PA_SINK_FLAT_VOLUME;
440 /* We assume that if the sink implementor changed the default
441 * volume he did so in real_volume, because that is the usual
442 * place where he is supposed to place his changes. */
443 s->reference_volume = s->real_volume;
445 s->thread_info.soft_volume = s->soft_volume;
446 s->thread_info.soft_muted = s->muted;
448 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
449 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
450 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
451 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
452 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
454 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
455 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
456 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
458 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
460 pa_source_put(s->monitor_source);
462 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
463 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
466 /* Called from main context */
467 void pa_sink_unlink(pa_sink* s) {
468 pa_bool_t linked;
469 pa_sink_input *i, *j = NULL;
471 pa_assert(s);
472 pa_assert_ctl_context();
474 /* Please note that pa_sink_unlink() does more than simply
475 * reversing pa_sink_put(). It also undoes the registrations
476 * already done in pa_sink_new()! */
478 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
479 * may be called multiple times on the same sink without bad
480 * effects. */
482 linked = PA_SINK_IS_LINKED(s->state);
484 if (linked)
485 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
487 if (s->state != PA_SINK_UNLINKED)
488 pa_namereg_unregister(s->core, s->name);
489 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
491 if (s->card)
492 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
494 while ((i = pa_idxset_first(s->inputs, NULL))) {
495 pa_assert(i != j);
496 pa_sink_input_kill(i);
497 j = i;
500 if (linked)
501 sink_set_state(s, PA_SINK_UNLINKED);
502 else
503 s->state = PA_SINK_UNLINKED;
505 reset_callbacks(s);
507 if (s->monitor_source)
508 pa_source_unlink(s->monitor_source);
510 if (linked) {
511 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
512 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
516 /* Called from main context */
517 static void sink_free(pa_object *o) {
518 pa_sink *s = PA_SINK(o);
519 pa_sink_input *i;
521 pa_assert(s);
522 pa_assert_ctl_context();
523 pa_assert(pa_sink_refcnt(s) == 0);
525 if (PA_SINK_IS_LINKED(s->state))
526 pa_sink_unlink(s);
528 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
530 if (s->monitor_source) {
531 pa_source_unref(s->monitor_source);
532 s->monitor_source = NULL;
535 pa_idxset_free(s->inputs, NULL, NULL);
537 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
538 pa_sink_input_unref(i);
540 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
542 if (s->silence.memblock)
543 pa_memblock_unref(s->silence.memblock);
545 pa_xfree(s->name);
546 pa_xfree(s->driver);
548 if (s->proplist)
549 pa_proplist_free(s->proplist);
551 if (s->ports) {
552 pa_device_port *p;
554 while ((p = pa_hashmap_steal_first(s->ports)))
555 pa_device_port_free(p);
557 pa_hashmap_free(s->ports, NULL, NULL);
560 pa_xfree(s);
563 /* Called from main context, and not while the IO thread is active, please */
564 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
565 pa_sink_assert_ref(s);
566 pa_assert_ctl_context();
568 s->asyncmsgq = q;
570 if (s->monitor_source)
571 pa_source_set_asyncmsgq(s->monitor_source, q);
574 /* Called from main context, and not while the IO thread is active, please */
575 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
576 pa_sink_assert_ref(s);
577 pa_assert_ctl_context();
579 if (mask == 0)
580 return;
582 /* For now, allow only a minimal set of flags to be changed. */
583 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
585 s->flags = (s->flags & ~mask) | (value & mask);
587 pa_source_update_flags(s->monitor_source,
588 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
589 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
590 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
591 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
594 /* Called from IO context, or before _put() from main context */
595 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
596 pa_sink_assert_ref(s);
597 pa_sink_assert_io_context(s);
599 s->thread_info.rtpoll = p;
601 if (s->monitor_source)
602 pa_source_set_rtpoll(s->monitor_source, p);
605 /* Called from main context */
606 int pa_sink_update_status(pa_sink*s) {
607 pa_sink_assert_ref(s);
608 pa_assert_ctl_context();
609 pa_assert(PA_SINK_IS_LINKED(s->state));
611 if (s->state == PA_SINK_SUSPENDED)
612 return 0;
614 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
617 /* Called from main context */
618 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
619 pa_sink_assert_ref(s);
620 pa_assert_ctl_context();
621 pa_assert(PA_SINK_IS_LINKED(s->state));
622 pa_assert(cause != 0);
624 if (suspend) {
625 s->suspend_cause |= cause;
626 s->monitor_source->suspend_cause |= cause;
627 } else {
628 s->suspend_cause &= ~cause;
629 s->monitor_source->suspend_cause &= ~cause;
632 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
633 return 0;
635 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
637 if (s->suspend_cause)
638 return sink_set_state(s, PA_SINK_SUSPENDED);
639 else
640 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
643 /* Called from main context */
644 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
645 pa_sink_input *i, *n;
646 uint32_t idx;
648 pa_sink_assert_ref(s);
649 pa_assert_ctl_context();
650 pa_assert(PA_SINK_IS_LINKED(s->state));
652 if (!q)
653 q = pa_queue_new();
655 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
656 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
658 pa_sink_input_ref(i);
660 if (pa_sink_input_start_move(i) >= 0)
661 pa_queue_push(q, i);
662 else
663 pa_sink_input_unref(i);
666 return q;
669 /* Called from main context */
670 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
671 pa_sink_input *i;
673 pa_sink_assert_ref(s);
674 pa_assert_ctl_context();
675 pa_assert(PA_SINK_IS_LINKED(s->state));
676 pa_assert(q);
678 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
679 if (pa_sink_input_finish_move(i, s, save) < 0)
680 pa_sink_input_fail_move(i);
682 pa_sink_input_unref(i);
685 pa_queue_free(q, NULL, NULL);
688 /* Called from main context */
689 void pa_sink_move_all_fail(pa_queue *q) {
690 pa_sink_input *i;
692 pa_assert_ctl_context();
693 pa_assert(q);
695 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
696 pa_sink_input_fail_move(i);
697 pa_sink_input_unref(i);
700 pa_queue_free(q, NULL, NULL);
703 /* Called from IO thread context */
704 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
705 pa_sink_input *i;
706 void *state = NULL;
708 pa_sink_assert_ref(s);
709 pa_sink_assert_io_context(s);
710 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
712 /* If nobody requested this and this is actually no real rewind
713 * then we can short cut this. Please note that this means that
714 * not all rewind requests triggered upstream will always be
715 * translated in actual requests! */
716 if (!s->thread_info.rewind_requested && nbytes <= 0)
717 return;
719 s->thread_info.rewind_nbytes = 0;
720 s->thread_info.rewind_requested = FALSE;
722 if (s->thread_info.state == PA_SINK_SUSPENDED)
723 return;
725 if (nbytes > 0)
726 pa_log_debug("Processing rewind...");
728 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
729 pa_sink_input_assert_ref(i);
730 pa_sink_input_process_rewind(i, nbytes);
733 if (nbytes > 0)
734 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
735 pa_source_process_rewind(s->monitor_source, nbytes);
738 /* Called from IO thread context */
739 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
740 pa_sink_input *i;
741 unsigned n = 0;
742 void *state = NULL;
743 size_t mixlength = *length;
745 pa_sink_assert_ref(s);
746 pa_sink_assert_io_context(s);
747 pa_assert(info);
749 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
750 pa_sink_input_assert_ref(i);
752 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
754 if (mixlength == 0 || info->chunk.length < mixlength)
755 mixlength = info->chunk.length;
757 if (pa_memblock_is_silence(info->chunk.memblock)) {
758 pa_memblock_unref(info->chunk.memblock);
759 continue;
762 info->userdata = pa_sink_input_ref(i);
764 pa_assert(info->chunk.memblock);
765 pa_assert(info->chunk.length > 0);
767 info++;
768 n++;
769 maxinfo--;
772 if (mixlength > 0)
773 *length = mixlength;
775 return n;
778 /* Called from IO thread context */
779 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
780 pa_sink_input *i;
781 void *state = NULL;
782 unsigned p = 0;
783 unsigned n_unreffed = 0;
785 pa_sink_assert_ref(s);
786 pa_sink_assert_io_context(s);
787 pa_assert(result);
788 pa_assert(result->memblock);
789 pa_assert(result->length > 0);
791 /* We optimize for the case where the order of the inputs has not changed */
793 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
794 unsigned j;
795 pa_mix_info* m = NULL;
797 pa_sink_input_assert_ref(i);
799 /* Let's try to find the matching entry info the pa_mix_info array */
800 for (j = 0; j < n; j ++) {
802 if (info[p].userdata == i) {
803 m = info + p;
804 break;
807 p++;
808 if (p >= n)
809 p = 0;
812 /* Drop read data */
813 pa_sink_input_drop(i, result->length);
815 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
817 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
818 void *ostate = NULL;
819 pa_source_output *o;
820 pa_memchunk c;
822 if (m && m->chunk.memblock) {
823 c = m->chunk;
824 pa_memblock_ref(c.memblock);
825 pa_assert(result->length <= c.length);
826 c.length = result->length;
828 pa_memchunk_make_writable(&c, 0);
829 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
830 } else {
831 c = s->silence;
832 pa_memblock_ref(c.memblock);
833 pa_assert(result->length <= c.length);
834 c.length = result->length;
837 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
838 pa_source_output_assert_ref(o);
839 pa_assert(o->direct_on_input == i);
840 pa_source_post_direct(s->monitor_source, o, &c);
843 pa_memblock_unref(c.memblock);
847 if (m) {
848 if (m->chunk.memblock)
849 pa_memblock_unref(m->chunk.memblock);
850 pa_memchunk_reset(&m->chunk);
852 pa_sink_input_unref(m->userdata);
853 m->userdata = NULL;
855 n_unreffed += 1;
859 /* Now drop references to entries that are included in the
860 * pa_mix_info array but don't exist anymore */
862 if (n_unreffed < n) {
863 for (; n > 0; info++, n--) {
864 if (info->userdata)
865 pa_sink_input_unref(info->userdata);
866 if (info->chunk.memblock)
867 pa_memblock_unref(info->chunk.memblock);
871 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
872 pa_source_post(s->monitor_source, result);
875 /* Called from IO thread context */
876 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
877 pa_mix_info info[MAX_MIX_CHANNELS];
878 unsigned n;
879 size_t block_size_max;
881 pa_sink_assert_ref(s);
882 pa_sink_assert_io_context(s);
883 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
884 pa_assert(pa_frame_aligned(length, &s->sample_spec));
885 pa_assert(result);
887 pa_sink_ref(s);
889 pa_assert(!s->thread_info.rewind_requested);
890 pa_assert(s->thread_info.rewind_nbytes == 0);
892 if (s->thread_info.state == PA_SINK_SUSPENDED) {
893 result->memblock = pa_memblock_ref(s->silence.memblock);
894 result->index = s->silence.index;
895 result->length = PA_MIN(s->silence.length, length);
897 pa_sink_unref(s);
898 return;
901 if (length <= 0)
902 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
904 block_size_max = pa_mempool_block_size_max(s->core->mempool);
905 if (length > block_size_max)
906 length = pa_frame_align(block_size_max, &s->sample_spec);
908 pa_assert(length > 0);
910 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
912 if (n == 0) {
914 *result = s->silence;
915 pa_memblock_ref(result->memblock);
917 if (result->length > length)
918 result->length = length;
920 } else if (n == 1) {
921 pa_cvolume volume;
923 *result = info[0].chunk;
924 pa_memblock_ref(result->memblock);
926 if (result->length > length)
927 result->length = length;
929 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
931 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
932 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
933 pa_memblock_unref(result->memblock);
934 pa_silence_memchunk_get(&s->core->silence_cache,
935 s->core->mempool,
936 result,
937 &s->sample_spec,
938 result->length);
939 } else {
940 pa_memchunk_make_writable(result, 0);
941 pa_volume_memchunk(result, &s->sample_spec, &volume);
944 } else {
945 void *ptr;
946 result->memblock = pa_memblock_new(s->core->mempool, length);
948 ptr = pa_memblock_acquire(result->memblock);
949 result->length = pa_mix(info, n,
950 ptr, length,
951 &s->sample_spec,
952 &s->thread_info.soft_volume,
953 s->thread_info.soft_muted);
954 pa_memblock_release(result->memblock);
956 result->index = 0;
959 inputs_drop(s, info, n, result);
961 pa_sink_unref(s);
964 /* Called from IO thread context */
965 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
966 pa_mix_info info[MAX_MIX_CHANNELS];
967 unsigned n;
968 size_t length, block_size_max;
970 pa_sink_assert_ref(s);
971 pa_sink_assert_io_context(s);
972 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
973 pa_assert(target);
974 pa_assert(target->memblock);
975 pa_assert(target->length > 0);
976 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
978 pa_sink_ref(s);
980 pa_assert(!s->thread_info.rewind_requested);
981 pa_assert(s->thread_info.rewind_nbytes == 0);
983 if (s->thread_info.state == PA_SINK_SUSPENDED) {
984 pa_silence_memchunk(target, &s->sample_spec);
985 pa_sink_unref(s);
986 return;
989 length = target->length;
990 block_size_max = pa_mempool_block_size_max(s->core->mempool);
991 if (length > block_size_max)
992 length = pa_frame_align(block_size_max, &s->sample_spec);
994 pa_assert(length > 0);
996 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
998 if (n == 0) {
999 if (target->length > length)
1000 target->length = length;
1002 pa_silence_memchunk(target, &s->sample_spec);
1003 } else if (n == 1) {
1004 pa_cvolume volume;
1006 if (target->length > length)
1007 target->length = length;
1009 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1011 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1012 pa_silence_memchunk(target, &s->sample_spec);
1013 else {
1014 pa_memchunk vchunk;
1016 vchunk = info[0].chunk;
1017 pa_memblock_ref(vchunk.memblock);
1019 if (vchunk.length > length)
1020 vchunk.length = length;
1022 if (!pa_cvolume_is_norm(&volume)) {
1023 pa_memchunk_make_writable(&vchunk, 0);
1024 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1027 pa_memchunk_memcpy(target, &vchunk);
1028 pa_memblock_unref(vchunk.memblock);
1031 } else {
1032 void *ptr;
1034 ptr = pa_memblock_acquire(target->memblock);
1036 target->length = pa_mix(info, n,
1037 (uint8_t*) ptr + target->index, length,
1038 &s->sample_spec,
1039 &s->thread_info.soft_volume,
1040 s->thread_info.soft_muted);
1042 pa_memblock_release(target->memblock);
1045 inputs_drop(s, info, n, target);
1047 pa_sink_unref(s);
1050 /* Called from IO thread context */
1051 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1052 pa_memchunk chunk;
1053 size_t l, d;
1055 pa_sink_assert_ref(s);
1056 pa_sink_assert_io_context(s);
1057 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1058 pa_assert(target);
1059 pa_assert(target->memblock);
1060 pa_assert(target->length > 0);
1061 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1063 pa_sink_ref(s);
1065 pa_assert(!s->thread_info.rewind_requested);
1066 pa_assert(s->thread_info.rewind_nbytes == 0);
1068 l = target->length;
1069 d = 0;
1070 while (l > 0) {
1071 chunk = *target;
1072 chunk.index += d;
1073 chunk.length -= d;
1075 pa_sink_render_into(s, &chunk);
1077 d += chunk.length;
1078 l -= chunk.length;
1081 pa_sink_unref(s);
1084 /* Called from IO thread context */
1085 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1086 pa_mix_info info[MAX_MIX_CHANNELS];
1087 size_t length1st = length;
1088 unsigned n;
1090 pa_sink_assert_ref(s);
1091 pa_sink_assert_io_context(s);
1092 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1093 pa_assert(length > 0);
1094 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1095 pa_assert(result);
1097 pa_sink_ref(s);
1099 pa_assert(!s->thread_info.rewind_requested);
1100 pa_assert(s->thread_info.rewind_nbytes == 0);
1102 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1103 pa_silence_memchunk_get(&s->core->silence_cache,
1104 s->core->mempool,
1105 result,
1106 &s->sample_spec,
1107 length1st);
1109 pa_sink_unref(s);
1110 return;
1113 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1115 if (n == 0) {
1116 pa_silence_memchunk_get(&s->core->silence_cache,
1117 s->core->mempool,
1118 result,
1119 &s->sample_spec,
1120 length1st);
1121 } else if (n == 1) {
1122 pa_cvolume volume;
1124 *result = info[0].chunk;
1125 pa_memblock_ref(result->memblock);
1127 if (result->length > length)
1128 result->length = length;
1130 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1132 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1133 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1134 pa_memblock_unref(result->memblock);
1135 pa_silence_memchunk_get(&s->core->silence_cache,
1136 s->core->mempool,
1137 result,
1138 &s->sample_spec,
1139 result->length);
1140 } else {
1141 pa_memchunk_make_writable(result, length);
1142 pa_volume_memchunk(result, &s->sample_spec, &volume);
1145 } else {
1146 void *ptr;
1148 result->index = 0;
1149 result->memblock = pa_memblock_new(s->core->mempool, length);
1151 ptr = pa_memblock_acquire(result->memblock);
1153 result->length = pa_mix(info, n,
1154 (uint8_t*) ptr + result->index, length1st,
1155 &s->sample_spec,
1156 &s->thread_info.soft_volume,
1157 s->thread_info.soft_muted);
1159 pa_memblock_release(result->memblock);
1162 inputs_drop(s, info, n, result);
1164 if (result->length < length) {
1165 pa_memchunk chunk;
1167 pa_memchunk_make_writable(result, length);
1169 chunk.memblock = result->memblock;
1170 chunk.index = result->index + result->length;
1171 chunk.length = length - result->length;
1173 pa_sink_render_into_full(s, &chunk);
1175 result->length = length;
1178 pa_sink_unref(s);
1181 /* Called from main thread */
1182 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1183 pa_usec_t usec = 0;
1185 pa_sink_assert_ref(s);
1186 pa_assert_ctl_context();
1187 pa_assert(PA_SINK_IS_LINKED(s->state));
1189 /* The returned value is supposed to be in the time domain of the sound card! */
1191 if (s->state == PA_SINK_SUSPENDED)
1192 return 0;
1194 if (!(s->flags & PA_SINK_LATENCY))
1195 return 0;
1197 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1199 return usec;
1202 /* Called from IO thread */
1203 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1204 pa_usec_t usec = 0;
1205 pa_msgobject *o;
1207 pa_sink_assert_ref(s);
1208 pa_sink_assert_io_context(s);
1209 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1211 /* The returned value is supposed to be in the time domain of the sound card! */
1213 if (s->thread_info.state == PA_SINK_SUSPENDED)
1214 return 0;
1216 if (!(s->flags & PA_SINK_LATENCY))
1217 return 0;
1219 o = PA_MSGOBJECT(s);
1221 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1223 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1224 return -1;
1226 return usec;
1229 /* Called from main context */
1230 static void compute_reference_ratios(pa_sink *s) {
1231 uint32_t idx;
1232 pa_sink_input *i;
1234 pa_sink_assert_ref(s);
1235 pa_assert_ctl_context();
1236 pa_assert(PA_SINK_IS_LINKED(s->state));
1237 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1239 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1240 unsigned c;
1241 pa_cvolume remapped;
1244 * Calculates the reference volume from the sink's reference
1245 * volume. This basically calculates:
1247 * i->reference_ratio = i->volume / s->reference_volume
1250 remapped = s->reference_volume;
1251 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1253 i->reference_ratio.channels = i->sample_spec.channels;
1255 for (c = 0; c < i->sample_spec.channels; c++) {
1257 /* We don't update when the sink volume is 0 anyway */
1258 if (remapped.values[c] <= PA_VOLUME_MUTED)
1259 continue;
1261 /* Don't update the reference ratio unless necessary */
1262 if (pa_sw_volume_multiply(
1263 i->reference_ratio.values[c],
1264 remapped.values[c]) == i->volume.values[c])
1265 continue;
1267 i->reference_ratio.values[c] = pa_sw_volume_divide(
1268 i->volume.values[c],
1269 remapped.values[c]);
1274 /* Called from main context */
1275 static void compute_real_ratios(pa_sink *s) {
1276 pa_sink_input *i;
1277 uint32_t idx;
1279 pa_sink_assert_ref(s);
1280 pa_assert_ctl_context();
1281 pa_assert(PA_SINK_IS_LINKED(s->state));
1282 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1284 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1285 unsigned c;
1286 pa_cvolume remapped;
1289 * This basically calculates:
1291 * i->real_ratio := i->volume / s->real_volume
1292 * i->soft_volume := i->real_ratio * i->volume_factor
1295 remapped = s->real_volume;
1296 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1298 i->real_ratio.channels = i->sample_spec.channels;
1299 i->soft_volume.channels = i->sample_spec.channels;
1301 for (c = 0; c < i->sample_spec.channels; c++) {
1303 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1304 /* We leave i->real_ratio untouched */
1305 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1306 continue;
1309 /* Don't lose accuracy unless necessary */
1310 if (pa_sw_volume_multiply(
1311 i->real_ratio.values[c],
1312 remapped.values[c]) != i->volume.values[c])
1314 i->real_ratio.values[c] = pa_sw_volume_divide(
1315 i->volume.values[c],
1316 remapped.values[c]);
1318 i->soft_volume.values[c] = pa_sw_volume_multiply(
1319 i->real_ratio.values[c],
1320 i->volume_factor.values[c]);
1323 /* We don't copy the soft_volume to the thread_info data
1324 * here. That must be done by the caller */
1328 /* Called from main thread */
1329 static void compute_real_volume(pa_sink *s) {
1330 pa_sink_input *i;
1331 uint32_t idx;
1333 pa_sink_assert_ref(s);
1334 pa_assert_ctl_context();
1335 pa_assert(PA_SINK_IS_LINKED(s->state));
1336 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1338 /* This determines the maximum volume of all streams and sets
1339 * s->real_volume accordingly. */
1341 if (pa_idxset_isempty(s->inputs)) {
1342 /* In the special case that we have no sink input we leave the
1343 * volume unmodified. */
1344 s->real_volume = s->reference_volume;
1345 return;
1348 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1350 /* First let's determine the new maximum volume of all inputs
1351 * connected to this sink */
1352 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1353 pa_cvolume remapped;
1355 remapped = i->volume;
1356 pa_cvolume_remap(&remapped, &i->channel_map, &s->channel_map);
1357 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1360 /* Then, let's update the real ratios/soft volumes of all inputs
1361 * connected to this sink */
1362 compute_real_ratios(s);
1365 /* Called from main thread */
1366 static void propagate_reference_volume(pa_sink *s) {
1367 pa_sink_input *i;
1368 uint32_t idx;
1370 pa_sink_assert_ref(s);
1371 pa_assert_ctl_context();
1372 pa_assert(PA_SINK_IS_LINKED(s->state));
1373 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1375 /* This is called whenever the sink volume changes that is not
1376 * caused by a sink input volume change. We need to fix up the
1377 * sink input volumes accordingly */
1379 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1380 pa_cvolume old_volume, remapped;
1382 old_volume = i->volume;
1384 /* This basically calculates:
1386 * i->volume := s->reference_volume * i->reference_ratio */
1388 remapped = s->reference_volume;
1389 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1390 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1392 /* The volume changed, let's tell people so */
1393 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1395 if (i->volume_changed)
1396 i->volume_changed(i);
1398 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1403 /* Called from main thread */
1404 void pa_sink_set_volume(
1405 pa_sink *s,
1406 const pa_cvolume *volume,
1407 pa_bool_t sendmsg,
1408 pa_bool_t save) {
1410 pa_cvolume old_reference_volume;
1411 pa_bool_t reference_changed;
1413 pa_sink_assert_ref(s);
1414 pa_assert_ctl_context();
1415 pa_assert(PA_SINK_IS_LINKED(s->state));
1416 pa_assert(!volume || pa_cvolume_valid(volume));
1417 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1418 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1420 /* As a special exception we accept mono volumes on all sinks --
1421 * even on those with more complex channel maps */
1423 /* If volume is NULL we synchronize the sink's real and reference
1424 * volumes with the stream volumes. If it is not NULL we update
1425 * the reference_volume with it. */
1427 old_reference_volume = s->reference_volume;
1429 if (volume) {
1431 if (pa_cvolume_compatible(volume, &s->sample_spec))
1432 s->reference_volume = *volume;
1433 else
1434 pa_cvolume_scale(&s->reference_volume, pa_cvolume_max(volume));
1436 if (s->flags & PA_SINK_FLAT_VOLUME) {
1437 /* OK, propagate this volume change back to the inputs */
1438 propagate_reference_volume(s);
1440 /* And now recalculate the real volume */
1441 compute_real_volume(s);
1442 } else
1443 s->real_volume = s->reference_volume;
1445 } else {
1446 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1448 /* Ok, let's determine the new real volume */
1449 compute_real_volume(s);
1451 /* Let's 'push' the reference volume if necessary */
1452 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1454 /* We need to fix the reference ratios of all streams now that
1455 * we changed the reference volume */
1456 compute_reference_ratios(s);
1459 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1460 s->save_volume = (!reference_changed && s->save_volume) || save;
1462 if (s->set_volume) {
1463 /* If we have a function set_volume(), then we do not apply a
1464 * soft volume by default. However, set_volume() is free to
1465 * apply one to s->soft_volume */
1467 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1468 s->set_volume(s);
1470 } else
1471 /* If we have no function set_volume(), then the soft volume
1472 * becomes the virtual volume */
1473 s->soft_volume = s->real_volume;
1475 /* This tells the sink that soft and/or virtual volume changed */
1476 if (sendmsg)
1477 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1479 if (reference_changed)
1480 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1483 /* Called from main thread. Only to be called by sink implementor */
1484 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1485 pa_sink_assert_ref(s);
1486 pa_assert_ctl_context();
1488 if (!volume)
1489 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1490 else
1491 s->soft_volume = *volume;
1493 if (PA_SINK_IS_LINKED(s->state))
1494 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1495 else
1496 s->thread_info.soft_volume = s->soft_volume;
1499 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1500 pa_sink_input *i;
1501 uint32_t idx;
1502 pa_cvolume old_reference_volume;
1504 pa_sink_assert_ref(s);
1505 pa_assert_ctl_context();
1506 pa_assert(PA_SINK_IS_LINKED(s->state));
1508 /* This is called when the hardware's real volume changes due to
1509 * some external event. We copy the real volume into our
1510 * reference volume and then rebuild the stream volumes based on
1511 * i->real_ratio which should stay fixed. */
1513 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1514 return;
1516 old_reference_volume = s->reference_volume;
1518 /* 1. Make the real volume the reference volume */
1519 s->reference_volume = s->real_volume;
1521 if (s->flags & PA_SINK_FLAT_VOLUME) {
1523 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1524 pa_cvolume old_volume, remapped;
1526 old_volume = i->volume;
1528 /* 2. Since the sink's reference and real volumes are equal
1529 * now our ratios should be too. */
1530 i->reference_ratio = i->real_ratio;
1532 /* 3. Recalculate the new stream reference volume based on the
1533 * reference ratio and the sink's reference volume.
1535 * This basically calculates:
1537 * i->volume = s->reference_volume * i->reference_ratio
1539 * This is identical to propagate_reference_volume() */
1540 remapped = s->reference_volume;
1541 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1542 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1544 /* Notify if something changed */
1545 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1547 if (i->volume_changed)
1548 i->volume_changed(i);
1550 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1555 /* Something got changed in the hardware. It probably makes sense
1556 * to save changed hw settings given that hw volume changes not
1557 * triggered by PA are almost certainly done by the user. */
1558 s->save_volume = TRUE;
1560 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1561 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1564 /* Called from main thread */
1565 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1566 pa_sink_assert_ref(s);
1567 pa_assert_ctl_context();
1568 pa_assert(PA_SINK_IS_LINKED(s->state));
1570 if (s->refresh_volume || force_refresh) {
1571 struct pa_cvolume old_real_volume;
1573 old_real_volume = s->real_volume;
1575 if (s->get_volume)
1576 s->get_volume(s);
1578 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1580 propagate_real_volume(s, &old_real_volume);
1583 return &s->reference_volume;
1586 /* Called from main thread */
1587 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1588 pa_cvolume old_real_volume;
1590 pa_sink_assert_ref(s);
1591 pa_assert_ctl_context();
1592 pa_assert(PA_SINK_IS_LINKED(s->state));
1594 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1596 old_real_volume = s->real_volume;
1597 s->real_volume = *new_real_volume;
1599 propagate_real_volume(s, &old_real_volume);
1602 /* Called from main thread */
1603 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1604 pa_bool_t old_muted;
1606 pa_sink_assert_ref(s);
1607 pa_assert_ctl_context();
1608 pa_assert(PA_SINK_IS_LINKED(s->state));
1610 old_muted = s->muted;
1611 s->muted = mute;
1612 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1614 if (s->set_mute)
1615 s->set_mute(s);
1617 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1619 if (old_muted != s->muted)
1620 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1623 /* Called from main thread */
1624 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1626 pa_sink_assert_ref(s);
1627 pa_assert_ctl_context();
1628 pa_assert(PA_SINK_IS_LINKED(s->state));
1630 if (s->refresh_muted || force_refresh) {
1631 pa_bool_t old_muted = s->muted;
1633 if (s->get_mute)
1634 s->get_mute(s);
1636 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1638 if (old_muted != s->muted) {
1639 s->save_muted = TRUE;
1641 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1643 /* Make sure the soft mute status stays in sync */
1644 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1648 return s->muted;
1651 /* Called from main thread */
1652 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1653 pa_sink_assert_ref(s);
1654 pa_assert_ctl_context();
1655 pa_assert(PA_SINK_IS_LINKED(s->state));
1657 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1659 if (s->muted == new_muted)
1660 return;
1662 s->muted = new_muted;
1663 s->save_muted = TRUE;
1665 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1668 /* Called from main thread */
1669 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1670 pa_sink_assert_ref(s);
1671 pa_assert_ctl_context();
1673 if (p)
1674 pa_proplist_update(s->proplist, mode, p);
1676 if (PA_SINK_IS_LINKED(s->state)) {
1677 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1678 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1681 return TRUE;
1684 /* Called from main thread */
1685 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1686 void pa_sink_set_description(pa_sink *s, const char *description) {
1687 const char *old;
1688 pa_sink_assert_ref(s);
1689 pa_assert_ctl_context();
1691 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1692 return;
1694 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1696 if (old && description && pa_streq(old, description))
1697 return;
1699 if (description)
1700 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1701 else
1702 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1704 if (s->monitor_source) {
1705 char *n;
1707 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1708 pa_source_set_description(s->monitor_source, n);
1709 pa_xfree(n);
1712 if (PA_SINK_IS_LINKED(s->state)) {
1713 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1714 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1718 /* Called from main thread */
1719 unsigned pa_sink_linked_by(pa_sink *s) {
1720 unsigned ret;
1722 pa_sink_assert_ref(s);
1723 pa_assert_ctl_context();
1724 pa_assert(PA_SINK_IS_LINKED(s->state));
1726 ret = pa_idxset_size(s->inputs);
1728 /* We add in the number of streams connected to us here. Please
1729 * note the asymmmetry to pa_sink_used_by()! */
1731 if (s->monitor_source)
1732 ret += pa_source_linked_by(s->monitor_source);
1734 return ret;
1737 /* Called from main thread */
1738 unsigned pa_sink_used_by(pa_sink *s) {
1739 unsigned ret;
1741 pa_sink_assert_ref(s);
1742 pa_assert_ctl_context();
1743 pa_assert(PA_SINK_IS_LINKED(s->state));
1745 ret = pa_idxset_size(s->inputs);
1746 pa_assert(ret >= s->n_corked);
1748 /* Streams connected to our monitor source do not matter for
1749 * pa_sink_used_by()!.*/
1751 return ret - s->n_corked;
1754 /* Called from main thread */
1755 unsigned pa_sink_check_suspend(pa_sink *s) {
1756 unsigned ret;
1757 pa_sink_input *i;
1758 uint32_t idx;
1760 pa_sink_assert_ref(s);
1761 pa_assert_ctl_context();
1763 if (!PA_SINK_IS_LINKED(s->state))
1764 return 0;
1766 ret = 0;
1768 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1769 pa_sink_input_state_t st;
1771 st = pa_sink_input_get_state(i);
1772 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1774 if (st == PA_SINK_INPUT_CORKED)
1775 continue;
1777 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1778 continue;
1780 ret ++;
1783 if (s->monitor_source)
1784 ret += pa_source_check_suspend(s->monitor_source);
1786 return ret;
1789 /* Called from the IO thread */
1790 static void sync_input_volumes_within_thread(pa_sink *s) {
1791 pa_sink_input *i;
1792 void *state = NULL;
1794 pa_sink_assert_ref(s);
1795 pa_sink_assert_io_context(s);
1797 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1798 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1799 continue;
1801 i->thread_info.soft_volume = i->soft_volume;
1802 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1806 /* Called from IO thread, except when it is not */
1807 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1808 pa_sink *s = PA_SINK(o);
1809 pa_sink_assert_ref(s);
1811 switch ((pa_sink_message_t) code) {
1813 case PA_SINK_MESSAGE_ADD_INPUT: {
1814 pa_sink_input *i = PA_SINK_INPUT(userdata);
1816 /* If you change anything here, make sure to change the
1817 * sink input handling a few lines down at
1818 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1820 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1822 /* Since the caller sleeps in pa_sink_input_put(), we can
1823 * safely access data outside of thread_info even though
1824 * it is mutable */
1826 if ((i->thread_info.sync_prev = i->sync_prev)) {
1827 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1828 pa_assert(i->sync_prev->sync_next == i);
1829 i->thread_info.sync_prev->thread_info.sync_next = i;
1832 if ((i->thread_info.sync_next = i->sync_next)) {
1833 pa_assert(i->sink == i->thread_info.sync_next->sink);
1834 pa_assert(i->sync_next->sync_prev == i);
1835 i->thread_info.sync_next->thread_info.sync_prev = i;
1838 pa_assert(!i->thread_info.attached);
1839 i->thread_info.attached = TRUE;
1841 if (i->attach)
1842 i->attach(i);
1844 pa_sink_input_set_state_within_thread(i, i->state);
1846 /* The requested latency of the sink input needs to be
1847 * fixed up and then configured on the sink */
1849 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1850 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1852 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1853 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1855 /* We don't rewind here automatically. This is left to the
1856 * sink input implementor because some sink inputs need a
1857 * slow start, i.e. need some time to buffer client
1858 * samples before beginning streaming. */
1860 /* In flat volume mode we need to update the volume as
1861 * well */
1862 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1865 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1866 pa_sink_input *i = PA_SINK_INPUT(userdata);
1868 /* If you change anything here, make sure to change the
1869 * sink input handling a few lines down at
1870 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1872 if (i->detach)
1873 i->detach(i);
1875 pa_sink_input_set_state_within_thread(i, i->state);
1877 pa_assert(i->thread_info.attached);
1878 i->thread_info.attached = FALSE;
1880 /* Since the caller sleeps in pa_sink_input_unlink(),
1881 * we can safely access data outside of thread_info even
1882 * though it is mutable */
1884 pa_assert(!i->sync_prev);
1885 pa_assert(!i->sync_next);
1887 if (i->thread_info.sync_prev) {
1888 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1889 i->thread_info.sync_prev = NULL;
1892 if (i->thread_info.sync_next) {
1893 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1894 i->thread_info.sync_next = NULL;
1897 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1898 pa_sink_input_unref(i);
1900 pa_sink_invalidate_requested_latency(s, TRUE);
1901 pa_sink_request_rewind(s, (size_t) -1);
1903 /* In flat volume mode we need to update the volume as
1904 * well */
1905 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1908 case PA_SINK_MESSAGE_START_MOVE: {
1909 pa_sink_input *i = PA_SINK_INPUT(userdata);
1911 /* We don't support moving synchronized streams. */
1912 pa_assert(!i->sync_prev);
1913 pa_assert(!i->sync_next);
1914 pa_assert(!i->thread_info.sync_next);
1915 pa_assert(!i->thread_info.sync_prev);
1917 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1918 pa_usec_t usec = 0;
1919 size_t sink_nbytes, total_nbytes;
1921 /* Get the latency of the sink */
1922 usec = pa_sink_get_latency_within_thread(s);
1923 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1924 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1926 if (total_nbytes > 0) {
1927 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1928 i->thread_info.rewrite_flush = TRUE;
1929 pa_sink_input_process_rewind(i, sink_nbytes);
1933 if (i->detach)
1934 i->detach(i);
1936 pa_assert(i->thread_info.attached);
1937 i->thread_info.attached = FALSE;
1939 /* Let's remove the sink input ...*/
1940 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1941 pa_sink_input_unref(i);
1943 pa_sink_invalidate_requested_latency(s, TRUE);
1945 pa_log_debug("Requesting rewind due to started move");
1946 pa_sink_request_rewind(s, (size_t) -1);
1948 /* In flat volume mode we need to update the volume as
1949 * well */
1950 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1953 case PA_SINK_MESSAGE_FINISH_MOVE: {
1954 pa_sink_input *i = PA_SINK_INPUT(userdata);
1956 /* We don't support moving synchronized streams. */
1957 pa_assert(!i->sync_prev);
1958 pa_assert(!i->sync_next);
1959 pa_assert(!i->thread_info.sync_next);
1960 pa_assert(!i->thread_info.sync_prev);
1962 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1964 pa_assert(!i->thread_info.attached);
1965 i->thread_info.attached = TRUE;
1967 if (i->attach)
1968 i->attach(i);
1970 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1971 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1973 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1974 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1976 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1977 pa_usec_t usec = 0;
1978 size_t nbytes;
1980 /* Get the latency of the sink */
1981 usec = pa_sink_get_latency_within_thread(s);
1982 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1984 if (nbytes > 0)
1985 pa_sink_input_drop(i, nbytes);
1987 pa_log_debug("Requesting rewind due to finished move");
1988 pa_sink_request_rewind(s, nbytes);
1991 /* In flat volume mode we need to update the volume as
1992 * well */
1993 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1996 case PA_SINK_MESSAGE_SET_VOLUME:
1998 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1999 s->thread_info.soft_volume = s->soft_volume;
2000 pa_sink_request_rewind(s, (size_t) -1);
2003 if (!(s->flags & PA_SINK_FLAT_VOLUME))
2004 return 0;
2006 /* Fall through ... */
2008 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2009 sync_input_volumes_within_thread(s);
2010 return 0;
2012 case PA_SINK_MESSAGE_GET_VOLUME:
2013 return 0;
2015 case PA_SINK_MESSAGE_SET_MUTE:
2017 if (s->thread_info.soft_muted != s->muted) {
2018 s->thread_info.soft_muted = s->muted;
2019 pa_sink_request_rewind(s, (size_t) -1);
2022 return 0;
2024 case PA_SINK_MESSAGE_GET_MUTE:
2025 return 0;
2027 case PA_SINK_MESSAGE_SET_STATE: {
2029 pa_bool_t suspend_change =
2030 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2031 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2033 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2035 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2036 s->thread_info.rewind_nbytes = 0;
2037 s->thread_info.rewind_requested = FALSE;
2040 if (suspend_change) {
2041 pa_sink_input *i;
2042 void *state = NULL;
2044 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2045 if (i->suspend_within_thread)
2046 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2049 return 0;
2052 case PA_SINK_MESSAGE_DETACH:
2054 /* Detach all streams */
2055 pa_sink_detach_within_thread(s);
2056 return 0;
2058 case PA_SINK_MESSAGE_ATTACH:
2060 /* Reattach all streams */
2061 pa_sink_attach_within_thread(s);
2062 return 0;
2064 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2066 pa_usec_t *usec = userdata;
2067 *usec = pa_sink_get_requested_latency_within_thread(s);
2069 /* Yes, that's right, the IO thread will see -1 when no
2070 * explicit requested latency is configured, the main
2071 * thread will see max_latency */
2072 if (*usec == (pa_usec_t) -1)
2073 *usec = s->thread_info.max_latency;
2075 return 0;
2078 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2079 pa_usec_t *r = userdata;
2081 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2083 return 0;
2086 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2087 pa_usec_t *r = userdata;
2089 r[0] = s->thread_info.min_latency;
2090 r[1] = s->thread_info.max_latency;
2092 return 0;
2095 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2097 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2098 return 0;
2100 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2102 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2103 return 0;
2105 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2107 *((size_t*) userdata) = s->thread_info.max_rewind;
2108 return 0;
2110 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2112 *((size_t*) userdata) = s->thread_info.max_request;
2113 return 0;
2115 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2117 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2118 return 0;
2120 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2122 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2123 return 0;
2125 case PA_SINK_MESSAGE_GET_LATENCY:
2126 case PA_SINK_MESSAGE_MAX:
2130 return -1;
2133 /* Called from main thread */
2134 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2135 pa_sink *sink;
2136 uint32_t idx;
2137 int ret = 0;
2139 pa_core_assert_ref(c);
2140 pa_assert_ctl_context();
2141 pa_assert(cause != 0);
2143 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2144 int r;
2146 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2147 ret = r;
2150 return ret;
2153 /* Called from main thread */
2154 void pa_sink_detach(pa_sink *s) {
2155 pa_sink_assert_ref(s);
2156 pa_assert_ctl_context();
2157 pa_assert(PA_SINK_IS_LINKED(s->state));
2159 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2162 /* Called from main thread */
2163 void pa_sink_attach(pa_sink *s) {
2164 pa_sink_assert_ref(s);
2165 pa_assert_ctl_context();
2166 pa_assert(PA_SINK_IS_LINKED(s->state));
2168 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2171 /* Called from IO thread */
2172 void pa_sink_detach_within_thread(pa_sink *s) {
2173 pa_sink_input *i;
2174 void *state = NULL;
2176 pa_sink_assert_ref(s);
2177 pa_sink_assert_io_context(s);
2178 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2180 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2181 if (i->detach)
2182 i->detach(i);
2184 if (s->monitor_source)
2185 pa_source_detach_within_thread(s->monitor_source);
2188 /* Called from IO thread */
2189 void pa_sink_attach_within_thread(pa_sink *s) {
2190 pa_sink_input *i;
2191 void *state = NULL;
2193 pa_sink_assert_ref(s);
2194 pa_sink_assert_io_context(s);
2195 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2197 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2198 if (i->attach)
2199 i->attach(i);
2201 if (s->monitor_source)
2202 pa_source_attach_within_thread(s->monitor_source);
2205 /* Called from IO thread */
2206 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2207 pa_sink_assert_ref(s);
2208 pa_sink_assert_io_context(s);
2209 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2211 if (s->thread_info.state == PA_SINK_SUSPENDED)
2212 return;
2214 if (nbytes == (size_t) -1)
2215 nbytes = s->thread_info.max_rewind;
2217 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2219 if (s->thread_info.rewind_requested &&
2220 nbytes <= s->thread_info.rewind_nbytes)
2221 return;
2223 s->thread_info.rewind_nbytes = nbytes;
2224 s->thread_info.rewind_requested = TRUE;
2226 if (s->request_rewind)
2227 s->request_rewind(s);
2230 /* Called from IO thread */
2231 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2232 pa_usec_t result = (pa_usec_t) -1;
2233 pa_sink_input *i;
2234 void *state = NULL;
2235 pa_usec_t monitor_latency;
2237 pa_sink_assert_ref(s);
2238 pa_sink_assert_io_context(s);
2240 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2241 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2243 if (s->thread_info.requested_latency_valid)
2244 return s->thread_info.requested_latency;
2246 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2247 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2248 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2249 result = i->thread_info.requested_sink_latency;
2251 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2253 if (monitor_latency != (pa_usec_t) -1 &&
2254 (result == (pa_usec_t) -1 || result > monitor_latency))
2255 result = monitor_latency;
2257 if (result != (pa_usec_t) -1)
2258 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2260 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2261 /* Only cache if properly initialized */
2262 s->thread_info.requested_latency = result;
2263 s->thread_info.requested_latency_valid = TRUE;
2266 return result;
2269 /* Called from main thread */
2270 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2271 pa_usec_t usec = 0;
2273 pa_sink_assert_ref(s);
2274 pa_assert_ctl_context();
2275 pa_assert(PA_SINK_IS_LINKED(s->state));
2277 if (s->state == PA_SINK_SUSPENDED)
2278 return 0;
2280 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2281 return usec;
2284 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2285 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2286 pa_sink_input *i;
2287 void *state = NULL;
2289 pa_sink_assert_ref(s);
2290 pa_sink_assert_io_context(s);
2292 if (max_rewind == s->thread_info.max_rewind)
2293 return;
2295 s->thread_info.max_rewind = max_rewind;
2297 if (PA_SINK_IS_LINKED(s->thread_info.state))
2298 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2299 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2301 if (s->monitor_source)
2302 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2305 /* Called from main thread */
2306 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2307 pa_sink_assert_ref(s);
2308 pa_assert_ctl_context();
2310 if (PA_SINK_IS_LINKED(s->state))
2311 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2312 else
2313 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2316 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2317 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2318 void *state = NULL;
2320 pa_sink_assert_ref(s);
2321 pa_sink_assert_io_context(s);
2323 if (max_request == s->thread_info.max_request)
2324 return;
2326 s->thread_info.max_request = max_request;
2328 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2329 pa_sink_input *i;
2331 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2332 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2336 /* Called from main thread */
2337 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2338 pa_sink_assert_ref(s);
2339 pa_assert_ctl_context();
2341 if (PA_SINK_IS_LINKED(s->state))
2342 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2343 else
2344 pa_sink_set_max_request_within_thread(s, max_request);
2347 /* Called from IO thread */
2348 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2349 pa_sink_input *i;
2350 void *state = NULL;
2352 pa_sink_assert_ref(s);
2353 pa_sink_assert_io_context(s);
2355 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2356 s->thread_info.requested_latency_valid = FALSE;
2357 else if (dynamic)
2358 return;
2360 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2362 if (s->update_requested_latency)
2363 s->update_requested_latency(s);
2365 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2366 if (i->update_sink_requested_latency)
2367 i->update_sink_requested_latency(i);
2371 /* Called from main thread */
2372 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2373 pa_sink_assert_ref(s);
2374 pa_assert_ctl_context();
2376 /* min_latency == 0: no limit
2377 * min_latency anything else: specified limit
2379 * Similar for max_latency */
2381 if (min_latency < ABSOLUTE_MIN_LATENCY)
2382 min_latency = ABSOLUTE_MIN_LATENCY;
2384 if (max_latency <= 0 ||
2385 max_latency > ABSOLUTE_MAX_LATENCY)
2386 max_latency = ABSOLUTE_MAX_LATENCY;
2388 pa_assert(min_latency <= max_latency);
2390 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2391 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2392 max_latency == ABSOLUTE_MAX_LATENCY) ||
2393 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2395 if (PA_SINK_IS_LINKED(s->state)) {
2396 pa_usec_t r[2];
2398 r[0] = min_latency;
2399 r[1] = max_latency;
2401 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2402 } else
2403 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2406 /* Called from main thread */
2407 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2408 pa_sink_assert_ref(s);
2409 pa_assert_ctl_context();
2410 pa_assert(min_latency);
2411 pa_assert(max_latency);
2413 if (PA_SINK_IS_LINKED(s->state)) {
2414 pa_usec_t r[2] = { 0, 0 };
2416 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2418 *min_latency = r[0];
2419 *max_latency = r[1];
2420 } else {
2421 *min_latency = s->thread_info.min_latency;
2422 *max_latency = s->thread_info.max_latency;
2426 /* Called from IO thread */
2427 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2428 pa_sink_assert_ref(s);
2429 pa_sink_assert_io_context(s);
2431 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2432 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2433 pa_assert(min_latency <= max_latency);
2435 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2436 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2437 max_latency == ABSOLUTE_MAX_LATENCY) ||
2438 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2440 if (s->thread_info.min_latency == min_latency &&
2441 s->thread_info.max_latency == max_latency)
2442 return;
2444 s->thread_info.min_latency = min_latency;
2445 s->thread_info.max_latency = max_latency;
2447 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2448 pa_sink_input *i;
2449 void *state = NULL;
2451 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2452 if (i->update_sink_latency_range)
2453 i->update_sink_latency_range(i);
2456 pa_sink_invalidate_requested_latency(s, FALSE);
2458 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2461 /* Called from main thread */
2462 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2463 pa_sink_assert_ref(s);
2464 pa_assert_ctl_context();
2466 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2467 pa_assert(latency == 0);
2468 return;
2471 if (latency < ABSOLUTE_MIN_LATENCY)
2472 latency = ABSOLUTE_MIN_LATENCY;
2474 if (latency > ABSOLUTE_MAX_LATENCY)
2475 latency = ABSOLUTE_MAX_LATENCY;
2477 if (PA_SINK_IS_LINKED(s->state))
2478 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2479 else
2480 s->thread_info.fixed_latency = latency;
2482 pa_source_set_fixed_latency(s->monitor_source, latency);
2485 /* Called from main thread */
2486 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2487 pa_usec_t latency;
2489 pa_sink_assert_ref(s);
2490 pa_assert_ctl_context();
2492 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2493 return 0;
2495 if (PA_SINK_IS_LINKED(s->state))
2496 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2497 else
2498 latency = s->thread_info.fixed_latency;
2500 return latency;
2503 /* Called from IO thread */
2504 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2505 pa_sink_assert_ref(s);
2506 pa_sink_assert_io_context(s);
2508 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2509 pa_assert(latency == 0);
2510 return;
2513 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2514 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2516 if (s->thread_info.fixed_latency == latency)
2517 return;
2519 s->thread_info.fixed_latency = latency;
2521 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2522 pa_sink_input *i;
2523 void *state = NULL;
2525 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2526 if (i->update_sink_fixed_latency)
2527 i->update_sink_fixed_latency(i);
2530 pa_sink_invalidate_requested_latency(s, FALSE);
2532 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2535 /* Called from main context */
2536 size_t pa_sink_get_max_rewind(pa_sink *s) {
2537 size_t r;
2538 pa_sink_assert_ref(s);
2539 pa_assert_ctl_context();
2541 if (!PA_SINK_IS_LINKED(s->state))
2542 return s->thread_info.max_rewind;
2544 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2546 return r;
2549 /* Called from main context */
2550 size_t pa_sink_get_max_request(pa_sink *s) {
2551 size_t r;
2552 pa_sink_assert_ref(s);
2553 pa_assert_ctl_context();
2555 if (!PA_SINK_IS_LINKED(s->state))
2556 return s->thread_info.max_request;
2558 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2560 return r;
2563 /* Called from main context */
2564 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2565 pa_device_port *port;
2567 pa_sink_assert_ref(s);
2568 pa_assert_ctl_context();
2570 if (!s->set_port) {
2571 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2572 return -PA_ERR_NOTIMPLEMENTED;
2575 if (!s->ports)
2576 return -PA_ERR_NOENTITY;
2578 if (!(port = pa_hashmap_get(s->ports, name)))
2579 return -PA_ERR_NOENTITY;
2581 if (s->active_port == port) {
2582 s->save_port = s->save_port || save;
2583 return 0;
2586 if ((s->set_port(s, port)) < 0)
2587 return -PA_ERR_NOENTITY;
2589 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2591 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2593 s->active_port = port;
2594 s->save_port = save;
2596 return 0;
2599 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2600 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2602 pa_assert(p);
2604 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2605 return TRUE;
2607 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2609 if (pa_streq(ff, "microphone"))
2610 t = "audio-input-microphone";
2611 else if (pa_streq(ff, "webcam"))
2612 t = "camera-web";
2613 else if (pa_streq(ff, "computer"))
2614 t = "computer";
2615 else if (pa_streq(ff, "handset"))
2616 t = "phone";
2617 else if (pa_streq(ff, "portable"))
2618 t = "multimedia-player";
2619 else if (pa_streq(ff, "tv"))
2620 t = "video-display";
2623 * The following icons are not part of the icon naming spec,
2624 * because Rodney Dawes sucks as the maintainer of that spec.
2626 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2628 else if (pa_streq(ff, "headset"))
2629 t = "audio-headset";
2630 else if (pa_streq(ff, "headphone"))
2631 t = "audio-headphones";
2632 else if (pa_streq(ff, "speaker"))
2633 t = "audio-speakers";
2634 else if (pa_streq(ff, "hands-free"))
2635 t = "audio-handsfree";
2638 if (!t)
2639 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2640 if (pa_streq(c, "modem"))
2641 t = "modem";
2643 if (!t) {
2644 if (is_sink)
2645 t = "audio-card";
2646 else
2647 t = "audio-input-microphone";
2650 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2651 if (strstr(profile, "analog"))
2652 s = "-analog";
2653 else if (strstr(profile, "iec958"))
2654 s = "-iec958";
2655 else if (strstr(profile, "hdmi"))
2656 s = "-hdmi";
2659 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2661 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2663 return TRUE;
2666 pa_bool_t pa_device_init_description(pa_proplist *p) {
2667 const char *s, *d = NULL, *k;
2668 pa_assert(p);
2670 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2671 return TRUE;
2673 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2674 if (pa_streq(s, "internal"))
2675 d = _("Internal Audio");
2677 if (!d)
2678 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2679 if (pa_streq(s, "modem"))
2680 d = _("Modem");
2682 if (!d)
2683 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2685 if (!d)
2686 return FALSE;
2688 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2690 if (d && k)
2691 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2692 else if (d)
2693 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2695 return TRUE;
2698 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2699 const char *s;
2700 pa_assert(p);
2702 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2703 return TRUE;
2705 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2706 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2707 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2708 return TRUE;
2711 return FALSE;
2714 unsigned pa_device_init_priority(pa_proplist *p) {
2715 const char *s;
2716 unsigned priority = 0;
2718 pa_assert(p);
2720 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
2722 if (pa_streq(s, "sound"))
2723 priority += 9000;
2724 else if (!pa_streq(s, "modem"))
2725 priority += 1000;
2728 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2730 if (pa_streq(s, "internal"))
2731 priority += 900;
2732 else if (pa_streq(s, "speaker"))
2733 priority += 500;
2734 else if (pa_streq(s, "headphone"))
2735 priority += 400;
2738 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
2740 if (pa_streq(s, "pci"))
2741 priority += 50;
2742 else if (pa_streq(s, "usb"))
2743 priority += 40;
2744 else if (pa_streq(s, "bluetooth"))
2745 priority += 30;
2748 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2750 if (pa_startswith(s, "analog-"))
2751 priority += 9;
2752 else if (pa_startswith(s, "iec958-"))
2753 priority += 8;
2756 return priority;