build-sys: Use ax_check_flag macros from autoconf archive
[pulseaudio-mirror.git] / src / pulsecore / source.c
blob70248026d059fdc5c8366d014342089b3faaf5c8
1 /***
2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
27 #include <stdio.h>
28 #include <stdlib.h>
30 #include <pulse/utf8.h>
31 #include <pulse/xmalloc.h>
32 #include <pulse/timeval.h>
33 #include <pulse/util.h>
34 #include <pulse/rtclock.h>
35 #include <pulse/internal.h>
37 #include <pulsecore/core-util.h>
38 #include <pulsecore/source-output.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-subscribe.h>
41 #include <pulsecore/log.h>
42 #include <pulsecore/sample-util.h>
43 #include <pulsecore/flist.h>
45 #include "source.h"
47 #define ABSOLUTE_MIN_LATENCY (500)
48 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
49 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
51 PA_DEFINE_PUBLIC_CLASS(pa_source, pa_msgobject);
53 struct pa_source_volume_change {
54 pa_usec_t at;
55 pa_cvolume hw_volume;
57 PA_LLIST_FIELDS(pa_source_volume_change);
60 struct source_message_set_port {
61 pa_device_port *port;
62 int ret;
65 static void source_free(pa_object *o);
67 static void pa_source_volume_change_push(pa_source *s);
68 static void pa_source_volume_change_flush(pa_source *s);
70 pa_source_new_data* pa_source_new_data_init(pa_source_new_data *data) {
71 pa_assert(data);
73 pa_zero(*data);
74 data->proplist = pa_proplist_new();
76 return data;
79 void pa_source_new_data_set_name(pa_source_new_data *data, const char *name) {
80 pa_assert(data);
82 pa_xfree(data->name);
83 data->name = pa_xstrdup(name);
86 void pa_source_new_data_set_sample_spec(pa_source_new_data *data, const pa_sample_spec *spec) {
87 pa_assert(data);
89 if ((data->sample_spec_is_set = !!spec))
90 data->sample_spec = *spec;
93 void pa_source_new_data_set_channel_map(pa_source_new_data *data, const pa_channel_map *map) {
94 pa_assert(data);
96 if ((data->channel_map_is_set = !!map))
97 data->channel_map = *map;
100 void pa_source_new_data_set_volume(pa_source_new_data *data, const pa_cvolume *volume) {
101 pa_assert(data);
103 if ((data->volume_is_set = !!volume))
104 data->volume = *volume;
107 void pa_source_new_data_set_muted(pa_source_new_data *data, pa_bool_t mute) {
108 pa_assert(data);
110 data->muted_is_set = TRUE;
111 data->muted = !!mute;
114 void pa_source_new_data_set_port(pa_source_new_data *data, const char *port) {
115 pa_assert(data);
117 pa_xfree(data->active_port);
118 data->active_port = pa_xstrdup(port);
121 void pa_source_new_data_done(pa_source_new_data *data) {
122 pa_assert(data);
124 pa_proplist_free(data->proplist);
126 if (data->ports) {
127 pa_device_port *p;
129 while ((p = pa_hashmap_steal_first(data->ports)))
130 pa_device_port_free(p);
132 pa_hashmap_free(data->ports, NULL, NULL);
135 pa_xfree(data->name);
136 pa_xfree(data->active_port);
139 /* Called from main context */
140 static void reset_callbacks(pa_source *s) {
141 pa_assert(s);
143 s->set_state = NULL;
144 s->get_volume = NULL;
145 s->set_volume = NULL;
146 s->get_mute = NULL;
147 s->set_mute = NULL;
148 s->update_requested_latency = NULL;
149 s->set_port = NULL;
150 s->get_formats = NULL;
153 /* Called from main context */
154 pa_source* pa_source_new(
155 pa_core *core,
156 pa_source_new_data *data,
157 pa_source_flags_t flags) {
159 pa_source *s;
160 const char *name;
161 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
162 char *pt;
164 pa_assert(core);
165 pa_assert(data);
166 pa_assert(data->name);
167 pa_assert_ctl_context();
169 s = pa_msgobject_new(pa_source);
171 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SOURCE, s, data->namereg_fail))) {
172 pa_log_debug("Failed to register name %s.", data->name);
173 pa_xfree(s);
174 return NULL;
177 pa_source_new_data_set_name(data, name);
179 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_NEW], data) < 0) {
180 pa_xfree(s);
181 pa_namereg_unregister(core, name);
182 return NULL;
185 /* FIXME, need to free s here on failure */
187 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
188 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
190 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
192 if (!data->channel_map_is_set)
193 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
195 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
196 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
198 /* FIXME: There should probably be a general function for checking whether
199 * the source volume is allowed to be set, like there is for source outputs. */
200 pa_assert(!data->volume_is_set || !(flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
202 if (!data->volume_is_set) {
203 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
204 data->save_volume = FALSE;
207 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
208 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
210 if (!data->muted_is_set)
211 data->muted = FALSE;
213 if (data->card)
214 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
216 pa_device_init_description(data->proplist);
217 pa_device_init_icon(data->proplist, FALSE);
218 pa_device_init_intended_roles(data->proplist);
220 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_FIXATE], data) < 0) {
221 pa_xfree(s);
222 pa_namereg_unregister(core, name);
223 return NULL;
226 s->parent.parent.free = source_free;
227 s->parent.process_msg = pa_source_process_msg;
229 s->core = core;
230 s->state = PA_SOURCE_INIT;
231 s->flags = flags;
232 s->priority = 0;
233 s->suspend_cause = 0;
234 s->name = pa_xstrdup(name);
235 s->proplist = pa_proplist_copy(data->proplist);
236 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
237 s->module = data->module;
238 s->card = data->card;
240 s->priority = pa_device_init_priority(s->proplist);
242 s->sample_spec = data->sample_spec;
243 s->channel_map = data->channel_map;
245 s->outputs = pa_idxset_new(NULL, NULL);
246 s->n_corked = 0;
247 s->monitor_of = NULL;
248 s->output_from_master = NULL;
250 s->reference_volume = s->real_volume = data->volume;
251 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
252 s->base_volume = PA_VOLUME_NORM;
253 s->n_volume_steps = PA_VOLUME_NORM+1;
254 s->muted = data->muted;
255 s->refresh_volume = s->refresh_muted = FALSE;
257 reset_callbacks(s);
258 s->userdata = NULL;
260 s->asyncmsgq = NULL;
262 /* As a minor optimization we just steal the list instead of
263 * copying it here */
264 s->ports = data->ports;
265 data->ports = NULL;
267 s->active_port = NULL;
268 s->save_port = FALSE;
270 if (data->active_port && s->ports)
271 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
272 s->save_port = data->save_port;
274 if (!s->active_port && s->ports) {
275 void *state;
276 pa_device_port *p;
278 PA_HASHMAP_FOREACH(p, s->ports, state)
279 if (!s->active_port || p->priority > s->active_port->priority)
280 s->active_port = p;
283 s->save_volume = data->save_volume;
284 s->save_muted = data->save_muted;
286 pa_silence_memchunk_get(
287 &core->silence_cache,
288 core->mempool,
289 &s->silence,
290 &s->sample_spec,
293 s->thread_info.rtpoll = NULL;
294 s->thread_info.outputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
295 s->thread_info.soft_volume = s->soft_volume;
296 s->thread_info.soft_muted = s->muted;
297 s->thread_info.state = s->state;
298 s->thread_info.max_rewind = 0;
299 s->thread_info.requested_latency_valid = FALSE;
300 s->thread_info.requested_latency = 0;
301 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
302 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
303 s->thread_info.fixed_latency = flags & PA_SOURCE_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
305 PA_LLIST_HEAD_INIT(pa_source_volume_change, s->thread_info.volume_changes);
306 s->thread_info.volume_changes_tail = NULL;
307 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
308 s->thread_info.volume_change_safety_margin = core->sync_volume_safety_margin_usec;
309 s->thread_info.volume_change_extra_delay = core->sync_volume_extra_delay_usec;
311 /* FIXME: This should probably be moved to pa_source_put() */
312 pa_assert_se(pa_idxset_put(core->sources, s, &s->index) >= 0);
314 if (s->card)
315 pa_assert_se(pa_idxset_put(s->card->sources, s, NULL) >= 0);
317 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
318 pa_log_info("Created source %u \"%s\" with sample spec %s and channel map %s\n %s",
319 s->index,
320 s->name,
321 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
322 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
323 pt);
324 pa_xfree(pt);
326 return s;
329 /* Called from main context */
330 static int source_set_state(pa_source *s, pa_source_state_t state) {
331 int ret;
332 pa_bool_t suspend_change;
333 pa_source_state_t original_state;
335 pa_assert(s);
336 pa_assert_ctl_context();
338 if (s->state == state)
339 return 0;
341 original_state = s->state;
343 suspend_change =
344 (original_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(state)) ||
345 (PA_SOURCE_IS_OPENED(original_state) && state == PA_SOURCE_SUSPENDED);
347 if (s->set_state)
348 if ((ret = s->set_state(s, state)) < 0)
349 return ret;
351 if (s->asyncmsgq)
352 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
354 if (s->set_state)
355 s->set_state(s, original_state);
357 return ret;
360 s->state = state;
362 if (state != PA_SOURCE_UNLINKED) { /* if we enter UNLINKED state pa_source_unlink() will fire the apropriate events */
363 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], s);
364 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
367 if (suspend_change) {
368 pa_source_output *o;
369 uint32_t idx;
371 /* We're suspending or resuming, tell everyone about it */
373 PA_IDXSET_FOREACH(o, s->outputs, idx)
374 if (s->state == PA_SOURCE_SUSPENDED &&
375 (o->flags & PA_SOURCE_OUTPUT_KILL_ON_SUSPEND))
376 pa_source_output_kill(o);
377 else if (o->suspend)
378 o->suspend(o, state == PA_SOURCE_SUSPENDED);
381 return 0;
384 /* Called from main context */
385 void pa_source_put(pa_source *s) {
386 pa_source_assert_ref(s);
387 pa_assert_ctl_context();
389 pa_assert(s->state == PA_SOURCE_INIT);
390 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || s->output_from_master);
392 /* The following fields must be initialized properly when calling _put() */
393 pa_assert(s->asyncmsgq);
394 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
396 /* Generally, flags should be initialized via pa_source_new(). As a
397 * special exception we allow volume related flags to be set
398 * between _new() and _put(). */
400 /* XXX: Currently decibel volume is disabled for all sources that use volume
401 * sharing. When the master source supports decibel volume, it would be good
402 * to have the flag also in the filter source, but currently we don't do that
403 * so that the flags of the filter source never change when it's moved from
404 * a master source to another. One solution for this problem would be to
405 * remove user-visible volume altogether from filter sources when volume
406 * sharing is used, but the current approach was easier to implement... */
407 if (!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
408 s->flags |= PA_SOURCE_DECIBEL_VOLUME;
410 if ((s->flags & PA_SOURCE_DECIBEL_VOLUME) && s->core->flat_volumes)
411 s->flags |= PA_SOURCE_FLAT_VOLUME;
413 if (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) {
414 pa_source *root_source = s->output_from_master->source;
416 while (root_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
417 root_source = root_source->output_from_master->source;
419 s->reference_volume = root_source->reference_volume;
420 pa_cvolume_remap(&s->reference_volume, &root_source->channel_map, &s->channel_map);
422 s->real_volume = root_source->real_volume;
423 pa_cvolume_remap(&s->real_volume, &root_source->channel_map, &s->channel_map);
424 } else
425 /* We assume that if the sink implementor changed the default
426 * volume he did so in real_volume, because that is the usual
427 * place where he is supposed to place his changes. */
428 s->reference_volume = s->real_volume;
430 s->thread_info.soft_volume = s->soft_volume;
431 s->thread_info.soft_muted = s->muted;
432 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
434 pa_assert((s->flags & PA_SOURCE_HW_VOLUME_CTRL)
435 || (s->base_volume == PA_VOLUME_NORM
436 && ((s->flags & PA_SOURCE_DECIBEL_VOLUME || (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)))));
437 pa_assert(!(s->flags & PA_SOURCE_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
438 pa_assert(!(s->flags & PA_SOURCE_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
439 pa_assert(!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) || s->set_volume);
440 pa_assert(!(s->flags & PA_SOURCE_SYNC_VOLUME) || (s->flags & PA_SOURCE_HW_VOLUME_CTRL));
441 pa_assert(!(s->flags & PA_SOURCE_SYNC_VOLUME) || s->write_volume);
442 pa_assert(!(s->flags & PA_SOURCE_HW_MUTE_CTRL) || s->set_mute);
444 pa_assert_se(source_set_state(s, PA_SOURCE_IDLE) == 0);
446 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_NEW, s->index);
447 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PUT], s);
450 /* Called from main context */
451 void pa_source_unlink(pa_source *s) {
452 pa_bool_t linked;
453 pa_source_output *o, *j = NULL;
455 pa_assert(s);
456 pa_assert_ctl_context();
458 /* See pa_sink_unlink() for a couple of comments how this function
459 * works. */
461 linked = PA_SOURCE_IS_LINKED(s->state);
463 if (linked)
464 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], s);
466 if (s->state != PA_SOURCE_UNLINKED)
467 pa_namereg_unregister(s->core, s->name);
468 pa_idxset_remove_by_data(s->core->sources, s, NULL);
470 if (s->card)
471 pa_idxset_remove_by_data(s->card->sources, s, NULL);
473 while ((o = pa_idxset_first(s->outputs, NULL))) {
474 pa_assert(o != j);
475 pa_source_output_kill(o);
476 j = o;
479 if (linked)
480 source_set_state(s, PA_SOURCE_UNLINKED);
481 else
482 s->state = PA_SOURCE_UNLINKED;
484 reset_callbacks(s);
486 if (linked) {
487 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
488 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], s);
492 /* Called from main context */
493 static void source_free(pa_object *o) {
494 pa_source_output *so;
495 pa_source *s = PA_SOURCE(o);
497 pa_assert(s);
498 pa_assert_ctl_context();
499 pa_assert(pa_source_refcnt(s) == 0);
501 if (PA_SOURCE_IS_LINKED(s->state))
502 pa_source_unlink(s);
504 pa_log_info("Freeing source %u \"%s\"", s->index, s->name);
506 pa_idxset_free(s->outputs, NULL, NULL);
508 while ((so = pa_hashmap_steal_first(s->thread_info.outputs)))
509 pa_source_output_unref(so);
511 pa_hashmap_free(s->thread_info.outputs, NULL, NULL);
513 if (s->silence.memblock)
514 pa_memblock_unref(s->silence.memblock);
516 pa_xfree(s->name);
517 pa_xfree(s->driver);
519 if (s->proplist)
520 pa_proplist_free(s->proplist);
522 if (s->ports) {
523 pa_device_port *p;
525 while ((p = pa_hashmap_steal_first(s->ports)))
526 pa_device_port_free(p);
528 pa_hashmap_free(s->ports, NULL, NULL);
531 pa_xfree(s);
534 /* Called from main context, and not while the IO thread is active, please */
535 void pa_source_set_asyncmsgq(pa_source *s, pa_asyncmsgq *q) {
536 pa_source_assert_ref(s);
537 pa_assert_ctl_context();
539 s->asyncmsgq = q;
542 /* Called from main context, and not while the IO thread is active, please */
543 void pa_source_update_flags(pa_source *s, pa_source_flags_t mask, pa_source_flags_t value) {
544 pa_source_assert_ref(s);
545 pa_assert_ctl_context();
547 if (mask == 0)
548 return;
550 /* For now, allow only a minimal set of flags to be changed. */
551 pa_assert((mask & ~(PA_SOURCE_DYNAMIC_LATENCY|PA_SOURCE_LATENCY)) == 0);
553 s->flags = (s->flags & ~mask) | (value & mask);
556 /* Called from IO context, or before _put() from main context */
557 void pa_source_set_rtpoll(pa_source *s, pa_rtpoll *p) {
558 pa_source_assert_ref(s);
559 pa_source_assert_io_context(s);
561 s->thread_info.rtpoll = p;
564 /* Called from main context */
565 int pa_source_update_status(pa_source*s) {
566 pa_source_assert_ref(s);
567 pa_assert_ctl_context();
568 pa_assert(PA_SOURCE_IS_LINKED(s->state));
570 if (s->state == PA_SOURCE_SUSPENDED)
571 return 0;
573 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
576 /* Called from main context */
577 int pa_source_suspend(pa_source *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
578 pa_source_assert_ref(s);
579 pa_assert_ctl_context();
580 pa_assert(PA_SOURCE_IS_LINKED(s->state));
581 pa_assert(cause != 0);
583 if (s->monitor_of && cause != PA_SUSPEND_PASSTHROUGH)
584 return -PA_ERR_NOTSUPPORTED;
586 if (suspend)
587 s->suspend_cause |= cause;
588 else
589 s->suspend_cause &= ~cause;
591 if ((pa_source_get_state(s) == PA_SOURCE_SUSPENDED) == !!s->suspend_cause)
592 return 0;
594 pa_log_debug("Suspend cause of source %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
596 if (s->suspend_cause)
597 return source_set_state(s, PA_SOURCE_SUSPENDED);
598 else
599 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
602 /* Called from main context */
603 int pa_source_sync_suspend(pa_source *s) {
604 pa_sink_state_t state;
606 pa_source_assert_ref(s);
607 pa_assert_ctl_context();
608 pa_assert(PA_SOURCE_IS_LINKED(s->state));
609 pa_assert(s->monitor_of);
611 state = pa_sink_get_state(s->monitor_of);
613 if (state == PA_SINK_SUSPENDED)
614 return source_set_state(s, PA_SOURCE_SUSPENDED);
616 pa_assert(PA_SINK_IS_OPENED(state));
618 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
621 /* Called from main context */
622 pa_queue *pa_source_move_all_start(pa_source *s, pa_queue *q) {
623 pa_source_output *o, *n;
624 uint32_t idx;
626 pa_source_assert_ref(s);
627 pa_assert_ctl_context();
628 pa_assert(PA_SOURCE_IS_LINKED(s->state));
630 if (!q)
631 q = pa_queue_new();
633 for (o = PA_SOURCE_OUTPUT(pa_idxset_first(s->outputs, &idx)); o; o = n) {
634 n = PA_SOURCE_OUTPUT(pa_idxset_next(s->outputs, &idx));
636 pa_source_output_ref(o);
638 if (pa_source_output_start_move(o) >= 0)
639 pa_queue_push(q, o);
640 else
641 pa_source_output_unref(o);
644 return q;
647 /* Called from main context */
648 void pa_source_move_all_finish(pa_source *s, pa_queue *q, pa_bool_t save) {
649 pa_source_output *o;
651 pa_source_assert_ref(s);
652 pa_assert_ctl_context();
653 pa_assert(PA_SOURCE_IS_LINKED(s->state));
654 pa_assert(q);
656 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
657 if (pa_source_output_finish_move(o, s, save) < 0)
658 pa_source_output_fail_move(o);
660 pa_source_output_unref(o);
663 pa_queue_free(q, NULL, NULL);
666 /* Called from main context */
667 void pa_source_move_all_fail(pa_queue *q) {
668 pa_source_output *o;
670 pa_assert_ctl_context();
671 pa_assert(q);
673 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
674 pa_source_output_fail_move(o);
675 pa_source_output_unref(o);
678 pa_queue_free(q, NULL, NULL);
681 /* Called from IO thread context */
682 void pa_source_process_rewind(pa_source *s, size_t nbytes) {
683 pa_source_output *o;
684 void *state = NULL;
686 pa_source_assert_ref(s);
687 pa_source_assert_io_context(s);
688 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
690 if (nbytes <= 0)
691 return;
693 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
694 return;
696 pa_log_debug("Processing rewind...");
698 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
699 pa_source_output_assert_ref(o);
700 pa_source_output_process_rewind(o, nbytes);
704 /* Called from IO thread context */
705 void pa_source_post(pa_source*s, const pa_memchunk *chunk) {
706 pa_source_output *o;
707 void *state = NULL;
709 pa_source_assert_ref(s);
710 pa_source_assert_io_context(s);
711 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
712 pa_assert(chunk);
714 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
715 return;
717 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
718 pa_memchunk vchunk = *chunk;
720 pa_memblock_ref(vchunk.memblock);
721 pa_memchunk_make_writable(&vchunk, 0);
723 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
724 pa_silence_memchunk(&vchunk, &s->sample_spec);
725 else
726 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
728 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
729 pa_source_output_assert_ref(o);
731 if (!o->thread_info.direct_on_input)
732 pa_source_output_push(o, &vchunk);
735 pa_memblock_unref(vchunk.memblock);
736 } else {
738 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
739 pa_source_output_assert_ref(o);
741 if (!o->thread_info.direct_on_input)
742 pa_source_output_push(o, chunk);
747 /* Called from IO thread context */
748 void pa_source_post_direct(pa_source*s, pa_source_output *o, const pa_memchunk *chunk) {
749 pa_source_assert_ref(s);
750 pa_source_assert_io_context(s);
751 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
752 pa_source_output_assert_ref(o);
753 pa_assert(o->thread_info.direct_on_input);
754 pa_assert(chunk);
756 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
757 return;
759 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
760 pa_memchunk vchunk = *chunk;
762 pa_memblock_ref(vchunk.memblock);
763 pa_memchunk_make_writable(&vchunk, 0);
765 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
766 pa_silence_memchunk(&vchunk, &s->sample_spec);
767 else
768 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
770 pa_source_output_push(o, &vchunk);
772 pa_memblock_unref(vchunk.memblock);
773 } else
774 pa_source_output_push(o, chunk);
777 /* Called from main thread */
778 pa_usec_t pa_source_get_latency(pa_source *s) {
779 pa_usec_t usec;
781 pa_source_assert_ref(s);
782 pa_assert_ctl_context();
783 pa_assert(PA_SOURCE_IS_LINKED(s->state));
785 if (s->state == PA_SOURCE_SUSPENDED)
786 return 0;
788 if (!(s->flags & PA_SOURCE_LATENCY))
789 return 0;
791 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
793 return usec;
796 /* Called from IO thread */
797 pa_usec_t pa_source_get_latency_within_thread(pa_source *s) {
798 pa_usec_t usec = 0;
799 pa_msgobject *o;
801 pa_source_assert_ref(s);
802 pa_source_assert_io_context(s);
803 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
805 /* The returned value is supposed to be in the time domain of the sound card! */
807 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
808 return 0;
810 if (!(s->flags & PA_SOURCE_LATENCY))
811 return 0;
813 o = PA_MSGOBJECT(s);
815 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
817 if (o->process_msg(o, PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
818 return -1;
820 return usec;
823 /* Called from the main thread (and also from the IO thread while the main
824 * thread is waiting).
826 * When a source uses volume sharing, it never has the PA_SOURCE_FLAT_VOLUME flag
827 * set. Instead, flat volume mode is detected by checking whether the root source
828 * has the flag set. */
829 pa_bool_t pa_source_flat_volume_enabled(pa_source *s) {
830 pa_source_assert_ref(s);
832 while (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
833 s = s->output_from_master->source;
835 return (s->flags & PA_SOURCE_FLAT_VOLUME);
838 /* Called from main context */
839 pa_bool_t pa_source_is_passthrough(pa_source *s) {
841 pa_source_assert_ref(s);
843 /* NB Currently only monitor sources support passthrough mode */
844 return (s->monitor_of && pa_sink_is_passthrough(s->monitor_of));
847 /* Called from main context. */
848 static void compute_reference_ratio(pa_source_output *o) {
849 unsigned c = 0;
850 pa_cvolume remapped;
852 pa_assert(o);
853 pa_assert(pa_source_flat_volume_enabled(o->source));
856 * Calculates the reference ratio from the source's reference
857 * volume. This basically calculates:
859 * o->reference_ratio = o->volume / o->source->reference_volume
862 remapped = o->source->reference_volume;
863 pa_cvolume_remap(&remapped, &o->source->channel_map, &o->channel_map);
865 o->reference_ratio.channels = o->sample_spec.channels;
867 for (c = 0; c < o->sample_spec.channels; c++) {
869 /* We don't update when the source volume is 0 anyway */
870 if (remapped.values[c] <= PA_VOLUME_MUTED)
871 continue;
873 /* Don't update the reference ratio unless necessary */
874 if (pa_sw_volume_multiply(
875 o->reference_ratio.values[c],
876 remapped.values[c]) == o->volume.values[c])
877 continue;
879 o->reference_ratio.values[c] = pa_sw_volume_divide(
880 o->volume.values[c],
881 remapped.values[c]);
885 /* Called from main context. Only called for the root source in volume sharing
886 * cases, except for internal recursive calls. */
887 static void compute_reference_ratios(pa_source *s) {
888 uint32_t idx;
889 pa_source_output *o;
891 pa_source_assert_ref(s);
892 pa_assert_ctl_context();
893 pa_assert(PA_SOURCE_IS_LINKED(s->state));
894 pa_assert(pa_source_flat_volume_enabled(s));
896 PA_IDXSET_FOREACH(o, s->outputs, idx) {
897 compute_reference_ratio(o);
899 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
900 compute_reference_ratios(o->destination_source);
904 /* Called from main context. Only called for the root source in volume sharing
905 * cases, except for internal recursive calls. */
906 static void compute_real_ratios(pa_source *s) {
907 pa_source_output *o;
908 uint32_t idx;
910 pa_source_assert_ref(s);
911 pa_assert_ctl_context();
912 pa_assert(PA_SOURCE_IS_LINKED(s->state));
913 pa_assert(pa_source_flat_volume_enabled(s));
915 PA_IDXSET_FOREACH(o, s->outputs, idx) {
916 unsigned c;
917 pa_cvolume remapped;
919 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
920 /* The origin source uses volume sharing, so this input's real ratio
921 * is handled as a special case - the real ratio must be 0 dB, and
922 * as a result i->soft_volume must equal i->volume_factor. */
923 pa_cvolume_reset(&o->real_ratio, o->real_ratio.channels);
924 o->soft_volume = o->volume_factor;
926 compute_real_ratios(o->destination_source);
928 continue;
932 * This basically calculates:
934 * i->real_ratio := i->volume / s->real_volume
935 * i->soft_volume := i->real_ratio * i->volume_factor
938 remapped = s->real_volume;
939 pa_cvolume_remap(&remapped, &s->channel_map, &o->channel_map);
941 o->real_ratio.channels = o->sample_spec.channels;
942 o->soft_volume.channels = o->sample_spec.channels;
944 for (c = 0; c < o->sample_spec.channels; c++) {
946 if (remapped.values[c] <= PA_VOLUME_MUTED) {
947 /* We leave o->real_ratio untouched */
948 o->soft_volume.values[c] = PA_VOLUME_MUTED;
949 continue;
952 /* Don't lose accuracy unless necessary */
953 if (pa_sw_volume_multiply(
954 o->real_ratio.values[c],
955 remapped.values[c]) != o->volume.values[c])
957 o->real_ratio.values[c] = pa_sw_volume_divide(
958 o->volume.values[c],
959 remapped.values[c]);
961 o->soft_volume.values[c] = pa_sw_volume_multiply(
962 o->real_ratio.values[c],
963 o->volume_factor.values[c]);
966 /* We don't copy the soft_volume to the thread_info data
967 * here. That must be done by the caller */
971 static pa_cvolume *cvolume_remap_minimal_impact(
972 pa_cvolume *v,
973 const pa_cvolume *template,
974 const pa_channel_map *from,
975 const pa_channel_map *to) {
977 pa_cvolume t;
979 pa_assert(v);
980 pa_assert(template);
981 pa_assert(from);
982 pa_assert(to);
983 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
984 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
986 /* Much like pa_cvolume_remap(), but tries to minimize impact when
987 * mapping from source output to source volumes:
989 * If template is a possible remapping from v it is used instead
990 * of remapping anew.
992 * If the channel maps don't match we set an all-channel volume on
993 * the source to ensure that changing a volume on one stream has no
994 * effect that cannot be compensated for in another stream that
995 * does not have the same channel map as the source. */
997 if (pa_channel_map_equal(from, to))
998 return v;
1000 t = *template;
1001 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1002 *v = *template;
1003 return v;
1006 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1007 return v;
1010 /* Called from main thread. Only called for the root source in volume sharing
1011 * cases, except for internal recursive calls. */
1012 static void get_maximum_output_volume(pa_source *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1013 pa_source_output *o;
1014 uint32_t idx;
1016 pa_source_assert_ref(s);
1017 pa_assert(max_volume);
1018 pa_assert(channel_map);
1019 pa_assert(pa_source_flat_volume_enabled(s));
1021 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1022 pa_cvolume remapped;
1024 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1025 get_maximum_output_volume(o->destination_source, max_volume, channel_map);
1027 /* Ignore this output. The origin source uses volume sharing, so this
1028 * output's volume will be set to be equal to the root source's real
1029 * volume. Obviously this outputs's current volume must not then
1030 * affect what the root source's real volume will be. */
1031 continue;
1034 remapped = o->volume;
1035 cvolume_remap_minimal_impact(&remapped, max_volume, &o->channel_map, channel_map);
1036 pa_cvolume_merge(max_volume, max_volume, &remapped);
1040 /* Called from main thread. Only called for the root source in volume sharing
1041 * cases, except for internal recursive calls. */
1042 static pa_bool_t has_outputs(pa_source *s) {
1043 pa_source_output *o;
1044 uint32_t idx;
1046 pa_source_assert_ref(s);
1048 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1049 if (!o->destination_source || !(o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || has_outputs(o->destination_source))
1050 return TRUE;
1053 return FALSE;
1056 /* Called from main thread. Only called for the root source in volume sharing
1057 * cases, except for internal recursive calls. */
1058 static void update_real_volume(pa_source *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1059 pa_source_output *o;
1060 uint32_t idx;
1062 pa_source_assert_ref(s);
1063 pa_assert(new_volume);
1064 pa_assert(channel_map);
1066 s->real_volume = *new_volume;
1067 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1069 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1070 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1071 if (pa_source_flat_volume_enabled(s)) {
1072 pa_cvolume old_volume = o->volume;
1074 /* Follow the root source's real volume. */
1075 o->volume = *new_volume;
1076 pa_cvolume_remap(&o->volume, channel_map, &o->channel_map);
1077 compute_reference_ratio(o);
1079 /* The volume changed, let's tell people so */
1080 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1081 if (o->volume_changed)
1082 o->volume_changed(o);
1084 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1088 update_real_volume(o->destination_source, new_volume, channel_map);
1093 /* Called from main thread. Only called for the root source in shared volume
1094 * cases. */
1095 static void compute_real_volume(pa_source *s) {
1096 pa_source_assert_ref(s);
1097 pa_assert_ctl_context();
1098 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1099 pa_assert(pa_source_flat_volume_enabled(s));
1100 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1102 /* This determines the maximum volume of all streams and sets
1103 * s->real_volume accordingly. */
1105 if (!has_outputs(s)) {
1106 /* In the special case that we have no source outputs we leave the
1107 * volume unmodified. */
1108 update_real_volume(s, &s->reference_volume, &s->channel_map);
1109 return;
1112 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1114 /* First let's determine the new maximum volume of all outputs
1115 * connected to this source */
1116 get_maximum_output_volume(s, &s->real_volume, &s->channel_map);
1117 update_real_volume(s, &s->real_volume, &s->channel_map);
1119 /* Then, let's update the real ratios/soft volumes of all outputs
1120 * connected to this source */
1121 compute_real_ratios(s);
1124 /* Called from main thread. Only called for the root source in shared volume
1125 * cases, except for internal recursive calls. */
1126 static void propagate_reference_volume(pa_source *s) {
1127 pa_source_output *o;
1128 uint32_t idx;
1130 pa_source_assert_ref(s);
1131 pa_assert_ctl_context();
1132 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1133 pa_assert(pa_source_flat_volume_enabled(s));
1135 /* This is called whenever the source volume changes that is not
1136 * caused by a source output volume change. We need to fix up the
1137 * source output volumes accordingly */
1139 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1140 pa_cvolume old_volume;
1142 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1143 propagate_reference_volume(o->destination_source);
1145 /* Since the origin source uses volume sharing, this output's volume
1146 * needs to be updated to match the root source's real volume, but
1147 * that will be done later in update_shared_real_volume(). */
1148 continue;
1151 old_volume = o->volume;
1153 /* This basically calculates:
1155 * o->volume := o->reference_volume * o->reference_ratio */
1157 o->volume = s->reference_volume;
1158 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1159 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1161 /* The volume changed, let's tell people so */
1162 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1164 if (o->volume_changed)
1165 o->volume_changed(o);
1167 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1172 /* Called from main thread. Only called for the root source in volume sharing
1173 * cases, except for internal recursive calls. The return value indicates
1174 * whether any reference volume actually changed. */
1175 static pa_bool_t update_reference_volume(pa_source *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
1176 pa_cvolume volume;
1177 pa_bool_t reference_volume_changed;
1178 pa_source_output *o;
1179 uint32_t idx;
1181 pa_source_assert_ref(s);
1182 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1183 pa_assert(v);
1184 pa_assert(channel_map);
1185 pa_assert(pa_cvolume_valid(v));
1187 volume = *v;
1188 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1190 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1191 s->reference_volume = volume;
1193 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1195 if (reference_volume_changed)
1196 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1197 else if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1198 /* If the root source's volume doesn't change, then there can't be any
1199 * changes in the other source in the source tree either.
1201 * It's probably theoretically possible that even if the root source's
1202 * volume changes slightly, some filter source doesn't change its volume
1203 * due to rounding errors. If that happens, we still want to propagate
1204 * the changed root source volume to the sources connected to the
1205 * intermediate source that didn't change its volume. This theoretical
1206 * possiblity is the reason why we have that !(s->flags &
1207 * PA_SOURCE_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1208 * notice even if we returned here FALSE always if
1209 * reference_volume_changed is FALSE. */
1210 return FALSE;
1212 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1213 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1214 update_reference_volume(o->destination_source, v, channel_map, FALSE);
1217 return TRUE;
1220 /* Called from main thread */
1221 void pa_source_set_volume(
1222 pa_source *s,
1223 const pa_cvolume *volume,
1224 pa_bool_t send_msg,
1225 pa_bool_t save) {
1227 pa_cvolume new_reference_volume;
1228 pa_source *root_source = s;
1230 pa_source_assert_ref(s);
1231 pa_assert_ctl_context();
1232 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1233 pa_assert(!volume || pa_cvolume_valid(volume));
1234 pa_assert(volume || pa_source_flat_volume_enabled(s));
1235 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1237 /* make sure we don't change the volume when a PASSTHROUGH output is connected */
1238 if (pa_source_is_passthrough(s)) {
1239 /* FIXME: Need to notify client that volume control is disabled */
1240 pa_log_warn("Cannot change volume, Source is monitor of a PASSTHROUGH sink");
1241 return;
1244 /* In case of volume sharing, the volume is set for the root source first,
1245 * from which it's then propagated to the sharing sources. */
1246 while (root_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1247 root_source = root_source->output_from_master->source;
1249 /* As a special exception we accept mono volumes on all sources --
1250 * even on those with more complex channel maps */
1252 if (volume) {
1253 if (pa_cvolume_compatible(volume, &s->sample_spec))
1254 new_reference_volume = *volume;
1255 else {
1256 new_reference_volume = s->reference_volume;
1257 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1260 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1263 /* If volume is NULL we synchronize the source's real and reference
1264 * volumes with the stream volumes. If it is not NULL we update
1265 * the reference_volume with it. */
1267 if (volume) {
1268 if (update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save)) {
1269 if (pa_source_flat_volume_enabled(root_source)) {
1270 /* OK, propagate this volume change back to the outputs */
1271 propagate_reference_volume(root_source);
1273 /* And now recalculate the real volume */
1274 compute_real_volume(root_source);
1275 } else
1276 update_real_volume(root_source, &root_source->reference_volume, &root_source->channel_map);
1279 } else {
1280 pa_assert(pa_source_flat_volume_enabled(root_source));
1282 /* Ok, let's determine the new real volume */
1283 compute_real_volume(root_source);
1285 /* Let's 'push' the reference volume if necessary */
1286 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_source->real_volume);
1287 update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save);
1289 /* Now that the reference volume is updated, we can update the streams'
1290 * reference ratios. */
1291 compute_reference_ratios(root_source);
1294 if (root_source->set_volume) {
1295 /* If we have a function set_volume(), then we do not apply a
1296 * soft volume by default. However, set_volume() is free to
1297 * apply one to root_source->soft_volume */
1299 pa_cvolume_reset(&root_source->soft_volume, root_source->sample_spec.channels);
1300 if (!(root_source->flags & PA_SOURCE_SYNC_VOLUME))
1301 root_source->set_volume(root_source);
1303 } else
1304 /* If we have no function set_volume(), then the soft volume
1305 * becomes the real volume */
1306 root_source->soft_volume = root_source->real_volume;
1308 /* This tells the source that soft volume and/or real volume changed */
1309 if (send_msg)
1310 pa_assert_se(pa_asyncmsgq_send(root_source->asyncmsgq, PA_MSGOBJECT(root_source), PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1313 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1314 * Only to be called by source implementor */
1315 void pa_source_set_soft_volume(pa_source *s, const pa_cvolume *volume) {
1317 pa_source_assert_ref(s);
1318 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1320 if (s->flags & PA_SOURCE_SYNC_VOLUME)
1321 pa_source_assert_io_context(s);
1322 else
1323 pa_assert_ctl_context();
1325 if (!volume)
1326 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1327 else
1328 s->soft_volume = *volume;
1330 if (PA_SOURCE_IS_LINKED(s->state) && !(s->flags & PA_SOURCE_SYNC_VOLUME))
1331 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1332 else
1333 s->thread_info.soft_volume = s->soft_volume;
1336 /* Called from the main thread. Only called for the root source in volume sharing
1337 * cases, except for internal recursive calls. */
1338 static void propagate_real_volume(pa_source *s, const pa_cvolume *old_real_volume) {
1339 pa_source_output *o;
1340 uint32_t idx;
1342 pa_source_assert_ref(s);
1343 pa_assert(old_real_volume);
1344 pa_assert_ctl_context();
1345 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1347 /* This is called when the hardware's real volume changes due to
1348 * some external event. We copy the real volume into our
1349 * reference volume and then rebuild the stream volumes based on
1350 * i->real_ratio which should stay fixed. */
1352 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1353 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1354 return;
1356 /* 1. Make the real volume the reference volume */
1357 update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
1360 if (pa_source_flat_volume_enabled(s)) {
1362 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1363 pa_cvolume old_volume = o->volume;
1365 /* 2. Since the source's reference and real volumes are equal
1366 * now our ratios should be too. */
1367 o->reference_ratio = o->real_ratio;
1369 /* 3. Recalculate the new stream reference volume based on the
1370 * reference ratio and the sink's reference volume.
1372 * This basically calculates:
1374 * o->volume = s->reference_volume * o->reference_ratio
1376 * This is identical to propagate_reference_volume() */
1377 o->volume = s->reference_volume;
1378 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1379 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1381 /* Notify if something changed */
1382 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1384 if (o->volume_changed)
1385 o->volume_changed(o);
1387 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1390 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1391 propagate_real_volume(o->destination_source, old_real_volume);
1395 /* Something got changed in the hardware. It probably makes sense
1396 * to save changed hw settings given that hw volume changes not
1397 * triggered by PA are almost certainly done by the user. */
1398 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1399 s->save_volume = TRUE;
1402 /* Called from io thread */
1403 void pa_source_update_volume_and_mute(pa_source *s) {
1404 pa_assert(s);
1405 pa_source_assert_io_context(s);
1407 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1410 /* Called from main thread */
1411 const pa_cvolume *pa_source_get_volume(pa_source *s, pa_bool_t force_refresh) {
1412 pa_source_assert_ref(s);
1413 pa_assert_ctl_context();
1414 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1416 if (s->refresh_volume || force_refresh) {
1417 struct pa_cvolume old_real_volume;
1419 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1421 old_real_volume = s->real_volume;
1423 if (!(s->flags & PA_SOURCE_SYNC_VOLUME) && s->get_volume)
1424 s->get_volume(s);
1426 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1428 update_real_volume(s, &s->real_volume, &s->channel_map);
1429 propagate_real_volume(s, &old_real_volume);
1432 return &s->reference_volume;
1435 /* Called from main thread. In volume sharing cases, only the root source may
1436 * call this. */
1437 void pa_source_volume_changed(pa_source *s, const pa_cvolume *new_real_volume) {
1438 pa_cvolume old_real_volume;
1440 pa_source_assert_ref(s);
1441 pa_assert_ctl_context();
1442 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1443 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1445 /* The source implementor may call this if the volume changed to make sure everyone is notified */
1447 old_real_volume = s->real_volume;
1448 update_real_volume(s, new_real_volume, &s->channel_map);
1449 propagate_real_volume(s, &old_real_volume);
1452 /* Called from main thread */
1453 void pa_source_set_mute(pa_source *s, pa_bool_t mute, pa_bool_t save) {
1454 pa_bool_t old_muted;
1456 pa_source_assert_ref(s);
1457 pa_assert_ctl_context();
1458 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1460 old_muted = s->muted;
1461 s->muted = mute;
1462 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1464 if (!(s->flags & PA_SOURCE_SYNC_VOLUME) && s->set_mute)
1465 s->set_mute(s);
1467 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1469 if (old_muted != s->muted)
1470 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1473 /* Called from main thread */
1474 pa_bool_t pa_source_get_mute(pa_source *s, pa_bool_t force_refresh) {
1476 pa_source_assert_ref(s);
1477 pa_assert_ctl_context();
1478 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1480 if (s->refresh_muted || force_refresh) {
1481 pa_bool_t old_muted = s->muted;
1483 if (!(s->flags & PA_SOURCE_SYNC_VOLUME) && s->get_mute)
1484 s->get_mute(s);
1486 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1488 if (old_muted != s->muted) {
1489 s->save_muted = TRUE;
1491 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1493 /* Make sure the soft mute status stays in sync */
1494 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1498 return s->muted;
1501 /* Called from main thread */
1502 void pa_source_mute_changed(pa_source *s, pa_bool_t new_muted) {
1503 pa_source_assert_ref(s);
1504 pa_assert_ctl_context();
1505 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1507 /* The source implementor may call this if the mute state changed to make sure everyone is notified */
1509 if (s->muted == new_muted)
1510 return;
1512 s->muted = new_muted;
1513 s->save_muted = TRUE;
1515 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1518 /* Called from main thread */
1519 pa_bool_t pa_source_update_proplist(pa_source *s, pa_update_mode_t mode, pa_proplist *p) {
1520 pa_source_assert_ref(s);
1521 pa_assert_ctl_context();
1523 if (p)
1524 pa_proplist_update(s->proplist, mode, p);
1526 if (PA_SOURCE_IS_LINKED(s->state)) {
1527 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1528 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1531 return TRUE;
1534 /* Called from main thread */
1535 /* FIXME -- this should be dropped and be merged into pa_source_update_proplist() */
1536 void pa_source_set_description(pa_source *s, const char *description) {
1537 const char *old;
1538 pa_source_assert_ref(s);
1539 pa_assert_ctl_context();
1541 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1542 return;
1544 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1546 if (old && description && pa_streq(old, description))
1547 return;
1549 if (description)
1550 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1551 else
1552 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1554 if (PA_SOURCE_IS_LINKED(s->state)) {
1555 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1556 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1560 /* Called from main thread */
1561 unsigned pa_source_linked_by(pa_source *s) {
1562 pa_source_assert_ref(s);
1563 pa_assert_ctl_context();
1564 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1566 return pa_idxset_size(s->outputs);
1569 /* Called from main thread */
1570 unsigned pa_source_used_by(pa_source *s) {
1571 unsigned ret;
1573 pa_source_assert_ref(s);
1574 pa_assert_ctl_context();
1575 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1577 ret = pa_idxset_size(s->outputs);
1578 pa_assert(ret >= s->n_corked);
1580 return ret - s->n_corked;
1583 /* Called from main thread */
1584 unsigned pa_source_check_suspend(pa_source *s) {
1585 unsigned ret;
1586 pa_source_output *o;
1587 uint32_t idx;
1589 pa_source_assert_ref(s);
1590 pa_assert_ctl_context();
1592 if (!PA_SOURCE_IS_LINKED(s->state))
1593 return 0;
1595 ret = 0;
1597 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1598 pa_source_output_state_t st;
1600 st = pa_source_output_get_state(o);
1602 /* We do not assert here. It is perfectly valid for a source output to
1603 * be in the INIT state (i.e. created, marked done but not yet put)
1604 * and we should not care if it's unlinked as it won't contribute
1605 * towarards our busy status.
1607 if (!PA_SOURCE_OUTPUT_IS_LINKED(st))
1608 continue;
1610 if (st == PA_SOURCE_OUTPUT_CORKED)
1611 continue;
1613 if (o->flags & PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND)
1614 continue;
1616 ret ++;
1619 return ret;
1622 /* Called from the IO thread */
1623 static void sync_output_volumes_within_thread(pa_source *s) {
1624 pa_source_output *o;
1625 void *state = NULL;
1627 pa_source_assert_ref(s);
1628 pa_source_assert_io_context(s);
1630 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1631 if (pa_cvolume_equal(&o->thread_info.soft_volume, &o->soft_volume))
1632 continue;
1634 o->thread_info.soft_volume = o->soft_volume;
1635 //pa_source_output_request_rewind(o, 0, TRUE, FALSE, FALSE);
1639 /* Called from the IO thread. Only called for the root source in volume sharing
1640 * cases, except for internal recursive calls. */
1641 static void set_shared_volume_within_thread(pa_source *s) {
1642 pa_source_output *o;
1643 void *state = NULL;
1645 pa_source_assert_ref(s);
1647 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
1649 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1650 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1651 set_shared_volume_within_thread(o->destination_source);
1655 /* Called from IO thread, except when it is not */
1656 int pa_source_process_msg(pa_msgobject *object, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1657 pa_source *s = PA_SOURCE(object);
1658 pa_source_assert_ref(s);
1660 switch ((pa_source_message_t) code) {
1662 case PA_SOURCE_MESSAGE_ADD_OUTPUT: {
1663 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
1665 pa_hashmap_put(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index), pa_source_output_ref(o));
1667 if (o->direct_on_input) {
1668 o->thread_info.direct_on_input = o->direct_on_input;
1669 pa_hashmap_put(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index), o);
1672 pa_assert(!o->thread_info.attached);
1673 o->thread_info.attached = TRUE;
1675 if (o->attach)
1676 o->attach(o);
1678 pa_source_output_set_state_within_thread(o, o->state);
1680 if (o->thread_info.requested_source_latency != (pa_usec_t) -1)
1681 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
1683 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
1685 /* We don't just invalidate the requested latency here,
1686 * because if we are in a move we might need to fix up the
1687 * requested latency. */
1688 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
1690 /* In flat volume mode we need to update the volume as
1691 * well */
1692 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
1695 case PA_SOURCE_MESSAGE_REMOVE_OUTPUT: {
1696 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
1698 pa_source_output_set_state_within_thread(o, o->state);
1700 if (o->detach)
1701 o->detach(o);
1703 pa_assert(o->thread_info.attached);
1704 o->thread_info.attached = FALSE;
1706 if (o->thread_info.direct_on_input) {
1707 pa_hashmap_remove(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index));
1708 o->thread_info.direct_on_input = NULL;
1711 if (pa_hashmap_remove(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index)))
1712 pa_source_output_unref(o);
1714 pa_source_invalidate_requested_latency(s, TRUE);
1716 /* In flat volume mode we need to update the volume as
1717 * well */
1718 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
1721 case PA_SOURCE_MESSAGE_SET_SHARED_VOLUME: {
1722 pa_source *root_source = s;
1724 while (root_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1725 root_source = root_source->output_from_master->source;
1727 set_shared_volume_within_thread(root_source);
1728 return 0;
1731 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
1733 if (s->flags & PA_SOURCE_SYNC_VOLUME) {
1734 s->set_volume(s);
1735 pa_source_volume_change_push(s);
1737 /* Fall through ... */
1739 case PA_SOURCE_MESSAGE_SET_VOLUME:
1741 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1742 s->thread_info.soft_volume = s->soft_volume;
1745 /* Fall through ... */
1747 case PA_SOURCE_MESSAGE_SYNC_VOLUMES:
1748 sync_output_volumes_within_thread(s);
1749 return 0;
1751 case PA_SOURCE_MESSAGE_GET_VOLUME:
1753 if ((s->flags & PA_SOURCE_SYNC_VOLUME) && s->get_volume) {
1754 s->get_volume(s);
1755 pa_source_volume_change_flush(s);
1756 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
1759 /* In case source implementor reset SW volume. */
1760 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1761 s->thread_info.soft_volume = s->soft_volume;
1764 return 0;
1766 case PA_SOURCE_MESSAGE_SET_MUTE:
1768 if (s->thread_info.soft_muted != s->muted) {
1769 s->thread_info.soft_muted = s->muted;
1772 if (s->flags & PA_SOURCE_SYNC_VOLUME && s->set_mute)
1773 s->set_mute(s);
1775 return 0;
1777 case PA_SOURCE_MESSAGE_GET_MUTE:
1779 if (s->flags & PA_SOURCE_SYNC_VOLUME && s->get_mute)
1780 s->get_mute(s);
1782 return 0;
1784 case PA_SOURCE_MESSAGE_SET_STATE: {
1786 pa_bool_t suspend_change =
1787 (s->thread_info.state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1788 (PA_SOURCE_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SOURCE_SUSPENDED);
1790 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1792 if (suspend_change) {
1793 pa_source_output *o;
1794 void *state = NULL;
1796 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
1797 if (o->suspend_within_thread)
1798 o->suspend_within_thread(o, s->thread_info.state == PA_SOURCE_SUSPENDED);
1801 return 0;
1804 case PA_SOURCE_MESSAGE_DETACH:
1806 /* Detach all streams */
1807 pa_source_detach_within_thread(s);
1808 return 0;
1810 case PA_SOURCE_MESSAGE_ATTACH:
1812 /* Reattach all streams */
1813 pa_source_attach_within_thread(s);
1814 return 0;
1816 case PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY: {
1818 pa_usec_t *usec = userdata;
1819 *usec = pa_source_get_requested_latency_within_thread(s);
1821 /* Yes, that's right, the IO thread will see -1 when no
1822 * explicit requested latency is configured, the main
1823 * thread will see max_latency */
1824 if (*usec == (pa_usec_t) -1)
1825 *usec = s->thread_info.max_latency;
1827 return 0;
1830 case PA_SOURCE_MESSAGE_SET_LATENCY_RANGE: {
1831 pa_usec_t *r = userdata;
1833 pa_source_set_latency_range_within_thread(s, r[0], r[1]);
1835 return 0;
1838 case PA_SOURCE_MESSAGE_GET_LATENCY_RANGE: {
1839 pa_usec_t *r = userdata;
1841 r[0] = s->thread_info.min_latency;
1842 r[1] = s->thread_info.max_latency;
1844 return 0;
1847 case PA_SOURCE_MESSAGE_GET_FIXED_LATENCY:
1849 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
1850 return 0;
1852 case PA_SOURCE_MESSAGE_SET_FIXED_LATENCY:
1854 pa_source_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
1855 return 0;
1857 case PA_SOURCE_MESSAGE_GET_MAX_REWIND:
1859 *((size_t*) userdata) = s->thread_info.max_rewind;
1860 return 0;
1862 case PA_SOURCE_MESSAGE_SET_MAX_REWIND:
1864 pa_source_set_max_rewind_within_thread(s, (size_t) offset);
1865 return 0;
1867 case PA_SOURCE_MESSAGE_GET_LATENCY:
1869 if (s->monitor_of) {
1870 *((pa_usec_t*) userdata) = 0;
1871 return 0;
1874 /* Implementors need to overwrite this implementation! */
1875 return -1;
1877 case PA_SOURCE_MESSAGE_SET_PORT:
1879 pa_assert(userdata);
1880 if (s->set_port) {
1881 struct source_message_set_port *msg_data = userdata;
1882 msg_data->ret = s->set_port(s, msg_data->port);
1884 return 0;
1886 case PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE:
1887 /* This message is sent from IO-thread and handled in main thread. */
1888 pa_assert_ctl_context();
1890 pa_source_get_volume(s, TRUE);
1891 pa_source_get_mute(s, TRUE);
1892 return 0;
1894 case PA_SOURCE_MESSAGE_MAX:
1898 return -1;
1901 /* Called from main thread */
1902 int pa_source_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1903 pa_source *source;
1904 uint32_t idx;
1905 int ret = 0;
1907 pa_core_assert_ref(c);
1908 pa_assert_ctl_context();
1909 pa_assert(cause != 0);
1911 for (source = PA_SOURCE(pa_idxset_first(c->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(c->sources, &idx))) {
1912 int r;
1914 if (source->monitor_of)
1915 continue;
1917 if ((r = pa_source_suspend(source, suspend, cause)) < 0)
1918 ret = r;
1921 return ret;
1924 /* Called from main thread */
1925 void pa_source_detach(pa_source *s) {
1926 pa_source_assert_ref(s);
1927 pa_assert_ctl_context();
1928 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1930 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1933 /* Called from main thread */
1934 void pa_source_attach(pa_source *s) {
1935 pa_source_assert_ref(s);
1936 pa_assert_ctl_context();
1937 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1939 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1942 /* Called from IO thread */
1943 void pa_source_detach_within_thread(pa_source *s) {
1944 pa_source_output *o;
1945 void *state = NULL;
1947 pa_source_assert_ref(s);
1948 pa_source_assert_io_context(s);
1949 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1951 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
1952 if (o->detach)
1953 o->detach(o);
1956 /* Called from IO thread */
1957 void pa_source_attach_within_thread(pa_source *s) {
1958 pa_source_output *o;
1959 void *state = NULL;
1961 pa_source_assert_ref(s);
1962 pa_source_assert_io_context(s);
1963 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1965 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
1966 if (o->attach)
1967 o->attach(o);
1970 /* Called from IO thread */
1971 pa_usec_t pa_source_get_requested_latency_within_thread(pa_source *s) {
1972 pa_usec_t result = (pa_usec_t) -1;
1973 pa_source_output *o;
1974 void *state = NULL;
1976 pa_source_assert_ref(s);
1977 pa_source_assert_io_context(s);
1979 if (!(s->flags & PA_SOURCE_DYNAMIC_LATENCY))
1980 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
1982 if (s->thread_info.requested_latency_valid)
1983 return s->thread_info.requested_latency;
1985 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
1986 if (o->thread_info.requested_source_latency != (pa_usec_t) -1 &&
1987 (result == (pa_usec_t) -1 || result > o->thread_info.requested_source_latency))
1988 result = o->thread_info.requested_source_latency;
1990 if (result != (pa_usec_t) -1)
1991 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
1993 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
1994 /* Only cache this if we are fully set up */
1995 s->thread_info.requested_latency = result;
1996 s->thread_info.requested_latency_valid = TRUE;
1999 return result;
2002 /* Called from main thread */
2003 pa_usec_t pa_source_get_requested_latency(pa_source *s) {
2004 pa_usec_t usec = 0;
2006 pa_source_assert_ref(s);
2007 pa_assert_ctl_context();
2008 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2010 if (s->state == PA_SOURCE_SUSPENDED)
2011 return 0;
2013 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2015 return usec;
2018 /* Called from IO thread */
2019 void pa_source_set_max_rewind_within_thread(pa_source *s, size_t max_rewind) {
2020 pa_source_output *o;
2021 void *state = NULL;
2023 pa_source_assert_ref(s);
2024 pa_source_assert_io_context(s);
2026 if (max_rewind == s->thread_info.max_rewind)
2027 return;
2029 s->thread_info.max_rewind = max_rewind;
2031 if (PA_SOURCE_IS_LINKED(s->thread_info.state))
2032 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2033 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2036 /* Called from main thread */
2037 void pa_source_set_max_rewind(pa_source *s, size_t max_rewind) {
2038 pa_source_assert_ref(s);
2039 pa_assert_ctl_context();
2041 if (PA_SOURCE_IS_LINKED(s->state))
2042 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2043 else
2044 pa_source_set_max_rewind_within_thread(s, max_rewind);
2047 /* Called from IO thread */
2048 void pa_source_invalidate_requested_latency(pa_source *s, pa_bool_t dynamic) {
2049 pa_source_output *o;
2050 void *state = NULL;
2052 pa_source_assert_ref(s);
2053 pa_source_assert_io_context(s);
2055 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2056 s->thread_info.requested_latency_valid = FALSE;
2057 else if (dynamic)
2058 return;
2060 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2062 if (s->update_requested_latency)
2063 s->update_requested_latency(s);
2065 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2066 if (o->update_source_requested_latency)
2067 o->update_source_requested_latency(o);
2070 if (s->monitor_of)
2071 pa_sink_invalidate_requested_latency(s->monitor_of, dynamic);
2074 /* Called from main thread */
2075 void pa_source_set_latency_range(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2076 pa_source_assert_ref(s);
2077 pa_assert_ctl_context();
2079 /* min_latency == 0: no limit
2080 * min_latency anything else: specified limit
2082 * Similar for max_latency */
2084 if (min_latency < ABSOLUTE_MIN_LATENCY)
2085 min_latency = ABSOLUTE_MIN_LATENCY;
2087 if (max_latency <= 0 ||
2088 max_latency > ABSOLUTE_MAX_LATENCY)
2089 max_latency = ABSOLUTE_MAX_LATENCY;
2091 pa_assert(min_latency <= max_latency);
2093 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2094 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2095 max_latency == ABSOLUTE_MAX_LATENCY) ||
2096 (s->flags & PA_SOURCE_DYNAMIC_LATENCY));
2098 if (PA_SOURCE_IS_LINKED(s->state)) {
2099 pa_usec_t r[2];
2101 r[0] = min_latency;
2102 r[1] = max_latency;
2104 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2105 } else
2106 pa_source_set_latency_range_within_thread(s, min_latency, max_latency);
2109 /* Called from main thread */
2110 void pa_source_get_latency_range(pa_source *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2111 pa_source_assert_ref(s);
2112 pa_assert_ctl_context();
2113 pa_assert(min_latency);
2114 pa_assert(max_latency);
2116 if (PA_SOURCE_IS_LINKED(s->state)) {
2117 pa_usec_t r[2] = { 0, 0 };
2119 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2121 *min_latency = r[0];
2122 *max_latency = r[1];
2123 } else {
2124 *min_latency = s->thread_info.min_latency;
2125 *max_latency = s->thread_info.max_latency;
2129 /* Called from IO thread, and from main thread before pa_source_put() is called */
2130 void pa_source_set_latency_range_within_thread(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2131 pa_source_assert_ref(s);
2132 pa_source_assert_io_context(s);
2134 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2135 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2136 pa_assert(min_latency <= max_latency);
2138 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2139 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2140 max_latency == ABSOLUTE_MAX_LATENCY) ||
2141 (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ||
2142 s->monitor_of);
2144 if (s->thread_info.min_latency == min_latency &&
2145 s->thread_info.max_latency == max_latency)
2146 return;
2148 s->thread_info.min_latency = min_latency;
2149 s->thread_info.max_latency = max_latency;
2151 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2152 pa_source_output *o;
2153 void *state = NULL;
2155 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2156 if (o->update_source_latency_range)
2157 o->update_source_latency_range(o);
2160 pa_source_invalidate_requested_latency(s, FALSE);
2163 /* Called from main thread, before the source is put */
2164 void pa_source_set_fixed_latency(pa_source *s, pa_usec_t latency) {
2165 pa_source_assert_ref(s);
2166 pa_assert_ctl_context();
2168 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2169 pa_assert(latency == 0);
2170 return;
2173 if (latency < ABSOLUTE_MIN_LATENCY)
2174 latency = ABSOLUTE_MIN_LATENCY;
2176 if (latency > ABSOLUTE_MAX_LATENCY)
2177 latency = ABSOLUTE_MAX_LATENCY;
2179 if (PA_SOURCE_IS_LINKED(s->state))
2180 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2181 else
2182 s->thread_info.fixed_latency = latency;
2185 /* Called from main thread */
2186 pa_usec_t pa_source_get_fixed_latency(pa_source *s) {
2187 pa_usec_t latency;
2189 pa_source_assert_ref(s);
2190 pa_assert_ctl_context();
2192 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY)
2193 return 0;
2195 if (PA_SOURCE_IS_LINKED(s->state))
2196 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2197 else
2198 latency = s->thread_info.fixed_latency;
2200 return latency;
2203 /* Called from IO thread */
2204 void pa_source_set_fixed_latency_within_thread(pa_source *s, pa_usec_t latency) {
2205 pa_source_assert_ref(s);
2206 pa_source_assert_io_context(s);
2208 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2209 pa_assert(latency == 0);
2210 return;
2213 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2214 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2216 if (s->thread_info.fixed_latency == latency)
2217 return;
2219 s->thread_info.fixed_latency = latency;
2221 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2222 pa_source_output *o;
2223 void *state = NULL;
2225 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2226 if (o->update_source_fixed_latency)
2227 o->update_source_fixed_latency(o);
2230 pa_source_invalidate_requested_latency(s, FALSE);
2233 /* Called from main thread */
2234 size_t pa_source_get_max_rewind(pa_source *s) {
2235 size_t r;
2236 pa_assert_ctl_context();
2237 pa_source_assert_ref(s);
2239 if (!PA_SOURCE_IS_LINKED(s->state))
2240 return s->thread_info.max_rewind;
2242 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2244 return r;
2247 /* Called from main context */
2248 int pa_source_set_port(pa_source *s, const char *name, pa_bool_t save) {
2249 pa_device_port *port;
2250 int ret;
2252 pa_source_assert_ref(s);
2253 pa_assert_ctl_context();
2255 if (!s->set_port) {
2256 pa_log_debug("set_port() operation not implemented for source %u \"%s\"", s->index, s->name);
2257 return -PA_ERR_NOTIMPLEMENTED;
2260 if (!s->ports)
2261 return -PA_ERR_NOENTITY;
2263 if (!(port = pa_hashmap_get(s->ports, name)))
2264 return -PA_ERR_NOENTITY;
2266 if (s->active_port == port) {
2267 s->save_port = s->save_port || save;
2268 return 0;
2271 if (s->flags & PA_SOURCE_SYNC_VOLUME) {
2272 struct source_message_set_port msg = { .port = port, .ret = 0 };
2273 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2274 ret = msg.ret;
2276 else
2277 ret = s->set_port(s, port);
2279 if (ret < 0)
2280 return -PA_ERR_NOENTITY;
2282 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2284 pa_log_info("Changed port of source %u \"%s\" to %s", s->index, s->name, port->name);
2286 s->active_port = port;
2287 s->save_port = save;
2289 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_CHANGED], s);
2291 return 0;
2294 PA_STATIC_FLIST_DECLARE(pa_source_volume_change, 0, pa_xfree);
2296 /* Called from the IO thread. */
2297 static pa_source_volume_change *pa_source_volume_change_new(pa_source *s) {
2298 pa_source_volume_change *c;
2299 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_source_volume_change))))
2300 c = pa_xnew(pa_source_volume_change, 1);
2302 PA_LLIST_INIT(pa_source_volume_change, c);
2303 c->at = 0;
2304 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2305 return c;
2308 /* Called from the IO thread. */
2309 static void pa_source_volume_change_free(pa_source_volume_change *c) {
2310 pa_assert(c);
2311 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_source_volume_change), c) < 0)
2312 pa_xfree(c);
2315 /* Called from the IO thread. */
2316 void pa_source_volume_change_push(pa_source *s) {
2317 pa_source_volume_change *c = NULL;
2318 pa_source_volume_change *nc = NULL;
2319 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2321 const char *direction = NULL;
2323 pa_assert(s);
2324 nc = pa_source_volume_change_new(s);
2326 /* NOTE: There is already more different volumes in pa_source that I can remember.
2327 * Adding one more volume for HW would get us rid of this, but I am trying
2328 * to survive with the ones we already have. */
2329 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2331 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2332 pa_log_debug("Volume not changing");
2333 pa_source_volume_change_free(nc);
2334 return;
2337 nc->at = pa_source_get_latency_within_thread(s);
2338 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2340 if (s->thread_info.volume_changes_tail) {
2341 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2342 /* If volume is going up let's do it a bit late. If it is going
2343 * down let's do it a bit early. */
2344 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2345 if (nc->at + safety_margin > c->at) {
2346 nc->at += safety_margin;
2347 direction = "up";
2348 break;
2351 else if (nc->at - safety_margin > c->at) {
2352 nc->at -= safety_margin;
2353 direction = "down";
2354 break;
2359 if (c == NULL) {
2360 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2361 nc->at += safety_margin;
2362 direction = "up";
2363 } else {
2364 nc->at -= safety_margin;
2365 direction = "down";
2367 PA_LLIST_PREPEND(pa_source_volume_change, s->thread_info.volume_changes, nc);
2369 else {
2370 PA_LLIST_INSERT_AFTER(pa_source_volume_change, s->thread_info.volume_changes, c, nc);
2373 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
2375 /* We can ignore volume events that came earlier but should happen later than this. */
2376 PA_LLIST_FOREACH(c, nc->next) {
2377 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
2378 pa_source_volume_change_free(c);
2380 nc->next = NULL;
2381 s->thread_info.volume_changes_tail = nc;
2384 /* Called from the IO thread. */
2385 static void pa_source_volume_change_flush(pa_source *s) {
2386 pa_source_volume_change *c = s->thread_info.volume_changes;
2387 pa_assert(s);
2388 s->thread_info.volume_changes = NULL;
2389 s->thread_info.volume_changes_tail = NULL;
2390 while (c) {
2391 pa_source_volume_change *next = c->next;
2392 pa_source_volume_change_free(c);
2393 c = next;
2397 /* Called from the IO thread. */
2398 pa_bool_t pa_source_volume_change_apply(pa_source *s, pa_usec_t *usec_to_next) {
2399 pa_usec_t now = pa_rtclock_now();
2400 pa_bool_t ret = FALSE;
2402 pa_assert(s);
2403 pa_assert(s->write_volume);
2405 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2406 pa_source_volume_change *c = s->thread_info.volume_changes;
2407 PA_LLIST_REMOVE(pa_source_volume_change, s->thread_info.volume_changes, c);
2408 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
2409 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
2410 ret = TRUE;
2411 s->thread_info.current_hw_volume = c->hw_volume;
2412 pa_source_volume_change_free(c);
2415 if (s->write_volume && ret)
2416 s->write_volume(s);
2418 if (s->thread_info.volume_changes) {
2419 if (usec_to_next)
2420 *usec_to_next = s->thread_info.volume_changes->at - now;
2421 if (pa_log_ratelimit(PA_LOG_DEBUG))
2422 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
2424 else {
2425 if (usec_to_next)
2426 *usec_to_next = 0;
2427 s->thread_info.volume_changes_tail = NULL;
2429 return ret;
2433 /* Called from the main thread */
2434 /* Gets the list of formats supported by the source. The members and idxset must
2435 * be freed by the caller. */
2436 pa_idxset* pa_source_get_formats(pa_source *s) {
2437 pa_idxset *ret;
2439 pa_assert(s);
2441 if (s->get_formats) {
2442 /* Source supports format query, all is good */
2443 ret = s->get_formats(s);
2444 } else {
2445 /* Source doesn't support format query, so assume it does PCM */
2446 pa_format_info *f = pa_format_info_new();
2447 f->encoding = PA_ENCODING_PCM;
2449 ret = pa_idxset_new(NULL, NULL);
2450 pa_idxset_put(ret, f, NULL);
2453 return ret;
2456 /* Called from the main thread */
2457 /* Checks if the source can accept this format */
2458 pa_bool_t pa_source_check_format(pa_source *s, pa_format_info *f)
2460 pa_idxset *formats = NULL;
2461 pa_bool_t ret = FALSE;
2463 pa_assert(s);
2464 pa_assert(f);
2466 formats = pa_source_get_formats(s);
2468 if (formats) {
2469 pa_format_info *finfo_device;
2470 uint32_t i;
2472 PA_IDXSET_FOREACH(finfo_device, formats, i) {
2473 if (pa_format_info_is_compatible(finfo_device, f)) {
2474 ret = TRUE;
2475 break;
2479 pa_idxset_free(formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2482 return ret;
2485 /* Called from the main thread */
2486 /* Calculates the intersection between formats supported by the source and
2487 * in_formats, and returns these, in the order of the source's formats. */
2488 pa_idxset* pa_source_check_formats(pa_source *s, pa_idxset *in_formats) {
2489 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *source_formats = NULL;
2490 pa_format_info *f_source, *f_in;
2491 uint32_t i, j;
2493 pa_assert(s);
2495 if (!in_formats || pa_idxset_isempty(in_formats))
2496 goto done;
2498 source_formats = pa_source_get_formats(s);
2500 PA_IDXSET_FOREACH(f_source, source_formats, i) {
2501 PA_IDXSET_FOREACH(f_in, in_formats, j) {
2502 if (pa_format_info_is_compatible(f_source, f_in))
2503 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
2507 done:
2508 if (source_formats)
2509 pa_idxset_free(source_formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2511 return out_formats;