modargs: New function: pa_modargs_get_value_double().
[pulseaudio-raopUDP/pulseaudio-raop-alac.git] / src / modules / echo-cancel / module-echo-cancel.c
blob8a9823b987e12dbd8d0a372c94ea817d7559c51b
1 /***
2 This file is part of PulseAudio.
4 Copyright 2010 Wim Taymans <wim.taymans@gmail.com>
6 Based on module-virtual-sink.c
7 module-virtual-source.c
8 module-loopback.c
10 Copyright 2010 Intel Corporation
11 Contributor: Pierre-Louis Bossart <pierre-louis.bossart@intel.com>
13 PulseAudio is free software; you can redistribute it and/or modify
14 it under the terms of the GNU Lesser General Public License as published
15 by the Free Software Foundation; either version 2.1 of the License,
16 or (at your option) any later version.
18 PulseAudio is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU Lesser General Public License
24 along with PulseAudio; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 USA.
27 ***/
29 #ifdef HAVE_CONFIG_H
30 #include <config.h>
31 #endif
33 #include <stdio.h>
34 #include <math.h>
36 #include "echo-cancel.h"
38 #include <pulse/xmalloc.h>
39 #include <pulse/timeval.h>
40 #include <pulse/rtclock.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/atomic.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/namereg.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/module.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/modargs.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/sample-util.h>
54 #include <pulsecore/ltdl-helper.h>
56 #include "module-echo-cancel-symdef.h"
58 PA_MODULE_AUTHOR("Wim Taymans");
59 PA_MODULE_DESCRIPTION("Echo Cancellation");
60 PA_MODULE_VERSION(PACKAGE_VERSION);
61 PA_MODULE_LOAD_ONCE(FALSE);
62 PA_MODULE_USAGE(
63 _("source_name=<name for the source> "
64 "source_properties=<properties for the source> "
65 "source_master=<name of source to filter> "
66 "sink_name=<name for the sink> "
67 "sink_properties=<properties for the sink> "
68 "sink_master=<name of sink to filter> "
69 "adjust_time=<how often to readjust rates in s> "
70 "adjust_threshold=<how much drift to readjust after in ms> "
71 "format=<sample format> "
72 "rate=<sample rate> "
73 "channels=<number of channels> "
74 "channel_map=<channel map> "
75 "aec_method=<implementation to use> "
76 "aec_args=<parameters for the AEC engine> "
77 "save_aec=<save AEC data in /tmp> "
78 "autoloaded=<set if this module is being loaded automatically> "
79 "use_volume_sharing=<yes or no> "
80 ));
82 /* NOTE: Make sure the enum and ec_table are maintained in the correct order */
83 typedef enum {
84 PA_ECHO_CANCELLER_INVALID = -1,
85 PA_ECHO_CANCELLER_NULL,
86 #ifdef HAVE_SPEEX
87 PA_ECHO_CANCELLER_SPEEX,
88 #endif
89 #ifdef HAVE_ADRIAN_EC
90 PA_ECHO_CANCELLER_ADRIAN,
91 #endif
92 #ifdef HAVE_WEBRTC
93 PA_ECHO_CANCELLER_WEBRTC,
94 #endif
95 } pa_echo_canceller_method_t;
97 #ifdef HAVE_WEBRTC
98 #define DEFAULT_ECHO_CANCELLER "webrtc"
99 #else
100 #define DEFAULT_ECHO_CANCELLER "speex"
101 #endif
103 static const pa_echo_canceller ec_table[] = {
105 /* Null, Dummy echo canceller (just copies data) */
106 .init = pa_null_ec_init,
107 .run = pa_null_ec_run,
108 .done = pa_null_ec_done,
110 #ifdef HAVE_SPEEX
112 /* Speex */
113 .init = pa_speex_ec_init,
114 .run = pa_speex_ec_run,
115 .done = pa_speex_ec_done,
117 #endif
118 #ifdef HAVE_ADRIAN_EC
120 /* Adrian Andre's NLMS implementation */
121 .init = pa_adrian_ec_init,
122 .run = pa_adrian_ec_run,
123 .done = pa_adrian_ec_done,
125 #endif
126 #ifdef HAVE_WEBRTC
128 /* WebRTC's audio processing engine */
129 .init = pa_webrtc_ec_init,
130 .play = pa_webrtc_ec_play,
131 .record = pa_webrtc_ec_record,
132 .set_drift = pa_webrtc_ec_set_drift,
133 .run = pa_webrtc_ec_run,
134 .done = pa_webrtc_ec_done,
136 #endif
139 #define DEFAULT_RATE 32000
140 #define DEFAULT_CHANNELS 1
141 #define DEFAULT_ADJUST_TIME_USEC (1*PA_USEC_PER_SEC)
142 #define DEFAULT_ADJUST_TOLERANCE (5*PA_USEC_PER_MSEC)
143 #define DEFAULT_SAVE_AEC FALSE
144 #define DEFAULT_AUTOLOADED FALSE
146 #define MEMBLOCKQ_MAXLENGTH (16*1024*1024)
148 /* Can only be used in main context */
149 #define IS_ACTIVE(u) ((pa_source_get_state((u)->source) == PA_SOURCE_RUNNING) && \
150 (pa_sink_get_state((u)->sink) == PA_SINK_RUNNING))
152 /* This module creates a new (virtual) source and sink.
154 * The data sent to the new sink is kept in a memblockq before being
155 * forwarded to the real sink_master.
157 * Data read from source_master is matched against the saved sink data and
158 * echo canceled data is then pushed onto the new source.
160 * Both source and sink masters have their own threads to push/pull data
161 * respectively. We however perform all our actions in the source IO thread.
162 * To do this we send all played samples to the source IO thread where they
163 * are then pushed into the memblockq.
165 * Alignment is performed in two steps:
167 * 1) when something happens that requires quick adjustment of the alignment of
168 * capture and playback samples, we perform a resync. This adjusts the
169 * position in the playback memblock to the requested sample. Quick
170 * adjustments include moving the playback samples before the capture
171 * samples (because else the echo canceler does not work) or when the
172 * playback pointer drifts too far away.
174 * 2) periodically check the difference between capture and playback. We use a
175 * low and high watermark for adjusting the alignment. Playback should always
176 * be before capture and the difference should not be bigger than one frame
177 * size. We would ideally like to resample the sink_input but most driver
178 * don't give enough accuracy to be able to do that right now.
181 struct userdata;
183 struct pa_echo_canceller_msg {
184 pa_msgobject parent;
185 struct userdata *userdata;
188 PA_DEFINE_PRIVATE_CLASS(pa_echo_canceller_msg, pa_msgobject);
189 #define PA_ECHO_CANCELLER_MSG(o) (pa_echo_canceller_msg_cast(o))
191 struct snapshot {
192 pa_usec_t sink_now;
193 pa_usec_t sink_latency;
194 size_t sink_delay;
195 int64_t send_counter;
197 pa_usec_t source_now;
198 pa_usec_t source_latency;
199 size_t source_delay;
200 int64_t recv_counter;
201 size_t rlen;
202 size_t plen;
205 struct userdata {
206 pa_core *core;
207 pa_module *module;
209 pa_bool_t autoloaded;
210 pa_bool_t dead;
211 pa_bool_t save_aec;
213 pa_echo_canceller *ec;
214 uint32_t blocksize;
216 pa_bool_t need_realign;
218 /* to wakeup the source I/O thread */
219 pa_asyncmsgq *asyncmsgq;
220 pa_rtpoll_item *rtpoll_item_read, *rtpoll_item_write;
222 pa_source *source;
223 pa_bool_t source_auto_desc;
224 pa_source_output *source_output;
225 pa_memblockq *source_memblockq; /* echo canceler needs fixed sized chunks */
226 size_t source_skip;
228 pa_sink *sink;
229 pa_bool_t sink_auto_desc;
230 pa_sink_input *sink_input;
231 pa_memblockq *sink_memblockq;
232 int64_t send_counter; /* updated in sink IO thread */
233 int64_t recv_counter;
234 size_t sink_skip;
236 /* Bytes left over from previous iteration */
237 size_t sink_rem;
238 size_t source_rem;
240 pa_atomic_t request_resync;
242 pa_time_event *time_event;
243 pa_usec_t adjust_time;
244 int adjust_threshold;
246 FILE *captured_file;
247 FILE *played_file;
248 FILE *canceled_file;
249 FILE *drift_file;
251 pa_bool_t use_volume_sharing;
253 struct {
254 pa_cvolume current_volume;
255 } thread_info;
258 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot);
260 static const char* const valid_modargs[] = {
261 "source_name",
262 "source_properties",
263 "source_master",
264 "sink_name",
265 "sink_properties",
266 "sink_master",
267 "adjust_time",
268 "adjust_threshold",
269 "format",
270 "rate",
271 "channels",
272 "channel_map",
273 "aec_method",
274 "aec_args",
275 "save_aec",
276 "autoloaded",
277 "use_volume_sharing",
278 NULL
281 enum {
282 SOURCE_OUTPUT_MESSAGE_POST = PA_SOURCE_OUTPUT_MESSAGE_MAX,
283 SOURCE_OUTPUT_MESSAGE_REWIND,
284 SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT,
285 SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME
288 enum {
289 SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT
292 enum {
293 ECHO_CANCELLER_MESSAGE_SET_VOLUME,
296 static int64_t calc_diff(struct userdata *u, struct snapshot *snapshot) {
297 int64_t buffer, diff_time, buffer_latency;
299 /* get the number of samples between capture and playback */
300 if (snapshot->plen > snapshot->rlen)
301 buffer = snapshot->plen - snapshot->rlen;
302 else
303 buffer = 0;
305 buffer += snapshot->source_delay + snapshot->sink_delay;
307 /* add the amount of samples not yet transferred to the source context */
308 if (snapshot->recv_counter <= snapshot->send_counter)
309 buffer += (int64_t) (snapshot->send_counter - snapshot->recv_counter);
310 else
311 buffer += PA_CLIP_SUB(buffer, (int64_t) (snapshot->recv_counter - snapshot->send_counter));
313 /* convert to time */
314 buffer_latency = pa_bytes_to_usec(buffer, &u->source_output->sample_spec);
316 /* capture and playback samples are perfectly aligned when diff_time is 0 */
317 diff_time = (snapshot->sink_now + snapshot->sink_latency - buffer_latency) -
318 (snapshot->source_now - snapshot->source_latency);
320 pa_log_debug("Diff %lld (%lld - %lld + %lld) %lld %lld %lld %lld", (long long) diff_time,
321 (long long) snapshot->sink_latency,
322 (long long) buffer_latency, (long long) snapshot->source_latency,
323 (long long) snapshot->source_delay, (long long) snapshot->sink_delay,
324 (long long) (snapshot->send_counter - snapshot->recv_counter),
325 (long long) (snapshot->sink_now - snapshot->source_now));
327 return diff_time;
330 /* Called from main context */
331 static void time_callback(pa_mainloop_api *a, pa_time_event *e, const struct timeval *t, void *userdata) {
332 struct userdata *u = userdata;
333 uint32_t old_rate, base_rate, new_rate;
334 int64_t diff_time;
335 /*size_t fs*/
336 struct snapshot latency_snapshot;
338 pa_assert(u);
339 pa_assert(a);
340 pa_assert(u->time_event == e);
341 pa_assert_ctl_context();
343 if (!IS_ACTIVE(u))
344 return;
346 /* update our snapshots */
347 pa_asyncmsgq_send(u->source_output->source->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
348 pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
350 /* calculate drift between capture and playback */
351 diff_time = calc_diff(u, &latency_snapshot);
353 /*fs = pa_frame_size(&u->source_output->sample_spec);*/
354 old_rate = u->sink_input->sample_spec.rate;
355 base_rate = u->source_output->sample_spec.rate;
357 if (diff_time < 0) {
358 /* recording before playback, we need to adjust quickly. The echo
359 * canceler does not work in this case. */
360 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
361 NULL, diff_time, NULL, NULL);
362 /*new_rate = base_rate - ((pa_usec_to_bytes(-diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
363 new_rate = base_rate;
365 else {
366 if (diff_time > u->adjust_threshold) {
367 /* diff too big, quickly adjust */
368 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
369 NULL, diff_time, NULL, NULL);
372 /* recording behind playback, we need to slowly adjust the rate to match */
373 /*new_rate = base_rate + ((pa_usec_to_bytes(diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
375 /* assume equal samplerates for now */
376 new_rate = base_rate;
379 /* make sure we don't make too big adjustments because that sounds horrible */
380 if (new_rate > base_rate * 1.1 || new_rate < base_rate * 0.9)
381 new_rate = base_rate;
383 if (new_rate != old_rate) {
384 pa_log_info("Old rate %lu Hz, new rate %lu Hz", (unsigned long) old_rate, (unsigned long) new_rate);
386 pa_sink_input_set_rate(u->sink_input, new_rate);
389 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
392 /* Called from source I/O thread context */
393 static int source_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
394 struct userdata *u = PA_SOURCE(o)->userdata;
396 switch (code) {
398 case PA_SOURCE_MESSAGE_GET_LATENCY:
400 /* The source is _put() before the source output is, so let's
401 * make sure we don't access it in that time. Also, the
402 * source output is first shut down, the source second. */
403 if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
404 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) {
405 *((pa_usec_t*) data) = 0;
406 return 0;
409 *((pa_usec_t*) data) =
411 /* Get the latency of the master source */
412 pa_source_get_latency_within_thread(u->source_output->source) +
413 /* Add the latency internal to our source output on top */
414 pa_bytes_to_usec(pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq), &u->source_output->source->sample_spec) +
415 /* and the buffering we do on the source */
416 pa_bytes_to_usec(u->blocksize, &u->source_output->source->sample_spec);
418 return 0;
420 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
421 u->thread_info.current_volume = u->source->reference_volume;
422 break;
425 return pa_source_process_msg(o, code, data, offset, chunk);
428 /* Called from sink I/O thread context */
429 static int sink_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
430 struct userdata *u = PA_SINK(o)->userdata;
432 switch (code) {
434 case PA_SINK_MESSAGE_GET_LATENCY:
436 /* The sink is _put() before the sink input is, so let's
437 * make sure we don't access it in that time. Also, the
438 * sink input is first shut down, the sink second. */
439 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
440 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) {
441 *((pa_usec_t*) data) = 0;
442 return 0;
445 *((pa_usec_t*) data) =
447 /* Get the latency of the master sink */
448 pa_sink_get_latency_within_thread(u->sink_input->sink) +
450 /* Add the latency internal to our sink input on top */
451 pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec);
453 return 0;
456 return pa_sink_process_msg(o, code, data, offset, chunk);
460 /* Called from main context */
461 static int source_set_state_cb(pa_source *s, pa_source_state_t state) {
462 struct userdata *u;
464 pa_source_assert_ref(s);
465 pa_assert_se(u = s->userdata);
467 if (!PA_SOURCE_IS_LINKED(state) ||
468 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
469 return 0;
471 if (state == PA_SOURCE_RUNNING) {
472 /* restart timer when both sink and source are active */
473 if (IS_ACTIVE(u) && u->adjust_time)
474 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
476 pa_atomic_store(&u->request_resync, 1);
477 pa_source_output_cork(u->source_output, FALSE);
478 } else if (state == PA_SOURCE_SUSPENDED) {
479 pa_source_output_cork(u->source_output, TRUE);
482 return 0;
485 /* Called from main context */
486 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t state) {
487 struct userdata *u;
489 pa_sink_assert_ref(s);
490 pa_assert_se(u = s->userdata);
492 if (!PA_SINK_IS_LINKED(state) ||
493 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
494 return 0;
496 if (state == PA_SINK_RUNNING) {
497 /* restart timer when both sink and source are active */
498 if (IS_ACTIVE(u) && u->adjust_time)
499 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
501 pa_atomic_store(&u->request_resync, 1);
502 pa_sink_input_cork(u->sink_input, FALSE);
503 } else if (state == PA_SINK_SUSPENDED) {
504 pa_sink_input_cork(u->sink_input, TRUE);
507 return 0;
510 /* Called from source I/O thread context */
511 static void source_update_requested_latency_cb(pa_source *s) {
512 struct userdata *u;
514 pa_source_assert_ref(s);
515 pa_assert_se(u = s->userdata);
517 if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
518 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state))
519 return;
521 pa_log_debug("Source update requested latency");
523 /* Just hand this one over to the master source */
524 pa_source_output_set_requested_latency_within_thread(
525 u->source_output,
526 pa_source_get_requested_latency_within_thread(s));
529 /* Called from sink I/O thread context */
530 static void sink_update_requested_latency_cb(pa_sink *s) {
531 struct userdata *u;
533 pa_sink_assert_ref(s);
534 pa_assert_se(u = s->userdata);
536 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
537 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
538 return;
540 pa_log_debug("Sink update requested latency");
542 /* Just hand this one over to the master sink */
543 pa_sink_input_set_requested_latency_within_thread(
544 u->sink_input,
545 pa_sink_get_requested_latency_within_thread(s));
548 /* Called from sink I/O thread context */
549 static void sink_request_rewind_cb(pa_sink *s) {
550 struct userdata *u;
552 pa_sink_assert_ref(s);
553 pa_assert_se(u = s->userdata);
555 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
556 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
557 return;
559 pa_log_debug("Sink request rewind %lld", (long long) s->thread_info.rewind_nbytes);
561 /* Just hand this one over to the master sink */
562 pa_sink_input_request_rewind(u->sink_input,
563 s->thread_info.rewind_nbytes, TRUE, FALSE, FALSE);
566 /* Called from main context */
567 static void source_set_volume_cb(pa_source *s) {
568 struct userdata *u;
570 pa_source_assert_ref(s);
571 pa_assert_se(u = s->userdata);
573 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
574 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
575 return;
577 pa_source_output_set_volume(u->source_output, &s->real_volume, s->save_volume, TRUE);
580 /* Called from main context */
581 static void sink_set_volume_cb(pa_sink *s) {
582 struct userdata *u;
584 pa_sink_assert_ref(s);
585 pa_assert_se(u = s->userdata);
587 if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) ||
588 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
589 return;
591 pa_sink_input_set_volume(u->sink_input, &s->real_volume, s->save_volume, TRUE);
594 /* Called from main context. */
595 static void source_get_volume_cb(pa_source *s) {
596 struct userdata *u;
597 pa_cvolume v;
599 pa_source_assert_ref(s);
600 pa_assert_se(u = s->userdata);
602 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
603 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
604 return;
606 pa_source_output_get_volume(u->source_output, &v, TRUE);
608 if (pa_cvolume_equal(&s->real_volume, &v))
609 /* no change */
610 return;
612 s->real_volume = v;
613 pa_source_set_soft_volume(s, NULL);
616 /* Called from main context */
617 static void source_set_mute_cb(pa_source *s) {
618 struct userdata *u;
620 pa_source_assert_ref(s);
621 pa_assert_se(u = s->userdata);
623 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
624 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
625 return;
627 pa_source_output_set_mute(u->source_output, s->muted, s->save_muted);
630 /* Called from main context */
631 static void sink_set_mute_cb(pa_sink *s) {
632 struct userdata *u;
634 pa_sink_assert_ref(s);
635 pa_assert_se(u = s->userdata);
637 if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) ||
638 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
639 return;
641 pa_sink_input_set_mute(u->sink_input, s->muted, s->save_muted);
644 /* Called from main context */
645 static void source_get_mute_cb(pa_source *s) {
646 struct userdata *u;
648 pa_source_assert_ref(s);
649 pa_assert_se(u = s->userdata);
651 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
652 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
653 return;
655 pa_source_output_get_mute(u->source_output);
658 /* Called from source I/O thread context. */
659 static void apply_diff_time(struct userdata *u, int64_t diff_time) {
660 int64_t diff;
662 if (diff_time < 0) {
663 diff = pa_usec_to_bytes(-diff_time, &u->source_output->sample_spec);
665 if (diff > 0) {
666 /* add some extra safety samples to compensate for jitter in the
667 * timings */
668 diff += 10 * pa_frame_size (&u->source_output->sample_spec);
670 pa_log("Playback after capture (%lld), drop sink %lld", (long long) diff_time, (long long) diff);
672 u->sink_skip = diff;
673 u->source_skip = 0;
675 } else if (diff_time > 0) {
676 diff = pa_usec_to_bytes(diff_time, &u->source_output->sample_spec);
678 if (diff > 0) {
679 pa_log("Playback too far ahead (%lld), drop source %lld", (long long) diff_time, (long long) diff);
681 u->source_skip = diff;
682 u->sink_skip = 0;
687 /* Called from source I/O thread context. */
688 static void do_resync(struct userdata *u) {
689 int64_t diff_time;
690 struct snapshot latency_snapshot;
692 pa_log("Doing resync");
694 /* update our snapshot */
695 source_output_snapshot_within_thread(u, &latency_snapshot);
696 pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
698 /* calculate drift between capture and playback */
699 diff_time = calc_diff(u, &latency_snapshot);
701 /* and adjust for the drift */
702 apply_diff_time(u, diff_time);
705 /* 1. Calculate drift at this point, pass to canceller
706 * 2. Push out playback samples in blocksize chunks
707 * 3. Push out capture samples in blocksize chunks
708 * 4. ???
709 * 5. Profit
711 * Called from source I/O thread context.
713 static void do_push_drift_comp(struct userdata *u) {
714 size_t rlen, plen;
715 pa_memchunk rchunk, pchunk, cchunk;
716 uint8_t *rdata, *pdata, *cdata;
717 float drift;
718 int unused PA_GCC_UNUSED;
720 rlen = pa_memblockq_get_length(u->source_memblockq);
721 plen = pa_memblockq_get_length(u->sink_memblockq);
723 /* Estimate snapshot drift as follows:
724 * pd: amount of data consumed since last time
725 * rd: amount of data consumed since last time
727 * drift = (pd - rd) / rd;
729 * We calculate pd and rd as the memblockq length less the number of
730 * samples left from the last iteration (to avoid double counting
731 * those remainder samples.
733 drift = ((float)(plen - u->sink_rem) - (rlen - u->source_rem)) / ((float)(rlen - u->source_rem));
734 u->sink_rem = plen % u->blocksize;
735 u->source_rem = rlen % u->blocksize;
737 /* Now let the canceller work its drift compensation magic */
738 u->ec->set_drift(u->ec, drift);
740 if (u->save_aec) {
741 if (u->drift_file)
742 fprintf(u->drift_file, "d %a\n", drift);
745 /* Send in the playback samples first */
746 while (plen >= u->blocksize) {
747 pa_memblockq_peek_fixed_size(u->sink_memblockq, u->blocksize, &pchunk);
748 pdata = pa_memblock_acquire(pchunk.memblock);
749 pdata += pchunk.index;
751 u->ec->play(u->ec, pdata);
753 if (u->save_aec) {
754 if (u->drift_file)
755 fprintf(u->drift_file, "p %d\n", u->blocksize);
756 if (u->played_file)
757 unused = fwrite(pdata, 1, u->blocksize, u->played_file);
760 pa_memblock_release(pchunk.memblock);
761 pa_memblockq_drop(u->sink_memblockq, u->blocksize);
762 pa_memblock_unref(pchunk.memblock);
764 plen -= u->blocksize;
767 /* And now the capture samples */
768 while (rlen >= u->blocksize) {
769 pa_memblockq_peek_fixed_size(u->source_memblockq, u->blocksize, &rchunk);
771 rdata = pa_memblock_acquire(rchunk.memblock);
772 rdata += rchunk.index;
774 cchunk.index = 0;
775 cchunk.length = u->blocksize;
776 cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
777 cdata = pa_memblock_acquire(cchunk.memblock);
779 u->ec->record(u->ec, rdata, cdata);
781 if (u->save_aec) {
782 if (u->drift_file)
783 fprintf(u->drift_file, "c %d\n", u->blocksize);
784 if (u->captured_file)
785 unused = fwrite(rdata, 1, u->blocksize, u->captured_file);
786 if (u->canceled_file)
787 unused = fwrite(cdata, 1, u->blocksize, u->canceled_file);
790 pa_memblock_release(cchunk.memblock);
791 pa_memblock_release(rchunk.memblock);
793 pa_memblock_unref(rchunk.memblock);
795 pa_source_post(u->source, &cchunk);
796 pa_memblock_unref(cchunk.memblock);
798 pa_memblockq_drop(u->source_memblockq, u->blocksize);
799 rlen -= u->blocksize;
803 /* This one's simpler than the drift compensation case -- we just iterate over
804 * the capture buffer, and pass the canceller blocksize bytes of playback and
805 * capture data.
807 * Called from source I/O thread context. */
808 static void do_push(struct userdata *u) {
809 size_t rlen, plen;
810 pa_memchunk rchunk, pchunk, cchunk;
811 uint8_t *rdata, *pdata, *cdata;
812 int unused PA_GCC_UNUSED;
814 rlen = pa_memblockq_get_length(u->source_memblockq);
815 plen = pa_memblockq_get_length(u->sink_memblockq);
817 while (rlen >= u->blocksize) {
818 /* take fixed block from recorded samples */
819 pa_memblockq_peek_fixed_size(u->source_memblockq, u->blocksize, &rchunk);
821 if (plen >= u->blocksize) {
822 /* take fixed block from played samples */
823 pa_memblockq_peek_fixed_size(u->sink_memblockq, u->blocksize, &pchunk);
825 rdata = pa_memblock_acquire(rchunk.memblock);
826 rdata += rchunk.index;
827 pdata = pa_memblock_acquire(pchunk.memblock);
828 pdata += pchunk.index;
830 cchunk.index = 0;
831 cchunk.length = u->blocksize;
832 cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
833 cdata = pa_memblock_acquire(cchunk.memblock);
835 if (u->save_aec) {
836 if (u->captured_file)
837 unused = fwrite(rdata, 1, u->blocksize, u->captured_file);
838 if (u->played_file)
839 unused = fwrite(pdata, 1, u->blocksize, u->played_file);
842 /* perform echo cancellation */
843 u->ec->run(u->ec, rdata, pdata, cdata);
845 if (u->save_aec) {
846 if (u->canceled_file)
847 unused = fwrite(cdata, 1, u->blocksize, u->canceled_file);
850 pa_memblock_release(cchunk.memblock);
851 pa_memblock_release(pchunk.memblock);
852 pa_memblock_release(rchunk.memblock);
854 /* drop consumed sink samples */
855 pa_memblockq_drop(u->sink_memblockq, u->blocksize);
856 pa_memblock_unref(pchunk.memblock);
858 pa_memblock_unref(rchunk.memblock);
859 /* the filtered samples now become the samples from our
860 * source */
861 rchunk = cchunk;
863 plen -= u->blocksize;
866 /* forward the (echo-canceled) data to the virtual source */
867 pa_source_post(u->source, &rchunk);
868 pa_memblock_unref(rchunk.memblock);
870 pa_memblockq_drop(u->source_memblockq, u->blocksize);
871 rlen -= u->blocksize;
875 /* Called from source I/O thread context. */
876 static void source_output_push_cb(pa_source_output *o, const pa_memchunk *chunk) {
877 struct userdata *u;
878 size_t rlen, plen, to_skip;
879 pa_memchunk rchunk;
881 pa_source_output_assert_ref(o);
882 pa_source_output_assert_io_context(o);
883 pa_assert_se(u = o->userdata);
885 if (!PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output))) {
886 pa_log("Push when no link?");
887 return;
890 if (PA_UNLIKELY(u->source->thread_info.state != PA_SOURCE_RUNNING ||
891 u->sink->thread_info.state != PA_SINK_RUNNING)) {
892 pa_source_post(u->source, chunk);
893 return;
896 /* handle queued messages, do any message sending of our own */
897 while (pa_asyncmsgq_process_one(u->asyncmsgq) > 0)
900 pa_memblockq_push_align(u->source_memblockq, chunk);
902 rlen = pa_memblockq_get_length(u->source_memblockq);
903 plen = pa_memblockq_get_length(u->sink_memblockq);
905 /* Let's not do anything else till we have enough data to process */
906 if (rlen < u->blocksize)
907 return;
909 /* See if we need to drop samples in order to sync */
910 if (pa_atomic_cmpxchg (&u->request_resync, 1, 0)) {
911 do_resync(u);
914 /* Okay, skip cancellation for skipped source samples if needed. */
915 if (PA_UNLIKELY(u->source_skip)) {
916 /* The slightly tricky bit here is that we drop all but modulo
917 * blocksize bytes and then adjust for that last bit on the sink side.
918 * We do this because the source data is coming at a fixed rate, which
919 * means the only way to try to catch up is drop sink samples and let
920 * the canceller cope up with this. */
921 to_skip = rlen >= u->source_skip ? u->source_skip : rlen;
922 to_skip -= to_skip % u->blocksize;
924 if (to_skip) {
925 pa_memblockq_peek_fixed_size(u->source_memblockq, to_skip, &rchunk);
926 pa_source_post(u->source, &rchunk);
928 pa_memblock_unref(rchunk.memblock);
929 pa_memblockq_drop(u->source_memblockq, to_skip);
931 rlen -= to_skip;
932 u->source_skip -= to_skip;
935 if (rlen && u->source_skip % u->blocksize) {
936 u->sink_skip += u->blocksize - (u->source_skip % u->blocksize);
937 u->source_skip -= (u->source_skip % u->blocksize);
941 /* And for the sink, these samples have been played back already, so we can
942 * just drop them and get on with it. */
943 if (PA_UNLIKELY(u->sink_skip)) {
944 to_skip = plen >= u->sink_skip ? u->sink_skip : plen;
946 pa_memblockq_drop(u->sink_memblockq, to_skip);
948 plen -= to_skip;
949 u->sink_skip -= to_skip;
952 /* process and push out samples */
953 if (u->ec->params.drift_compensation)
954 do_push_drift_comp(u);
955 else
956 do_push(u);
959 /* Called from sink I/O thread context. */
960 static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) {
961 struct userdata *u;
963 pa_sink_input_assert_ref(i);
964 pa_assert(chunk);
965 pa_assert_se(u = i->userdata);
967 if (u->sink->thread_info.rewind_requested)
968 pa_sink_process_rewind(u->sink, 0);
970 pa_sink_render_full(u->sink, nbytes, chunk);
972 if (i->thread_info.underrun_for > 0) {
973 pa_log_debug("Handling end of underrun.");
974 pa_atomic_store(&u->request_resync, 1);
977 /* let source thread handle the chunk. pass the sample count as well so that
978 * the source IO thread can update the right variables. */
979 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_POST,
980 NULL, 0, chunk, NULL);
981 u->send_counter += chunk->length;
983 return 0;
986 /* Called from source I/O thread context. */
987 static void source_output_process_rewind_cb(pa_source_output *o, size_t nbytes) {
988 struct userdata *u;
990 pa_source_output_assert_ref(o);
991 pa_source_output_assert_io_context(o);
992 pa_assert_se(u = o->userdata);
994 pa_source_process_rewind(u->source, nbytes);
996 /* go back on read side, we need to use older sink data for this */
997 pa_memblockq_rewind(u->sink_memblockq, nbytes);
999 /* manipulate write index */
1000 pa_memblockq_seek(u->source_memblockq, -nbytes, PA_SEEK_RELATIVE, TRUE);
1002 pa_log_debug("Source rewind (%lld) %lld", (long long) nbytes,
1003 (long long) pa_memblockq_get_length (u->source_memblockq));
1006 /* Called from sink I/O thread context. */
1007 static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
1008 struct userdata *u;
1010 pa_sink_input_assert_ref(i);
1011 pa_assert_se(u = i->userdata);
1013 pa_log_debug("Sink process rewind %lld", (long long) nbytes);
1015 pa_sink_process_rewind(u->sink, nbytes);
1017 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_REWIND, NULL, (int64_t) nbytes, NULL, NULL);
1018 u->send_counter -= nbytes;
1021 /* Called from source I/O thread context. */
1022 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot) {
1023 size_t delay, rlen, plen;
1024 pa_usec_t now, latency;
1026 now = pa_rtclock_now();
1027 latency = pa_source_get_latency_within_thread(u->source_output->source);
1028 delay = pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq);
1030 delay = (u->source_output->thread_info.resampler ? pa_resampler_request(u->source_output->thread_info.resampler, delay) : delay);
1031 rlen = pa_memblockq_get_length(u->source_memblockq);
1032 plen = pa_memblockq_get_length(u->sink_memblockq);
1034 snapshot->source_now = now;
1035 snapshot->source_latency = latency;
1036 snapshot->source_delay = delay;
1037 snapshot->recv_counter = u->recv_counter;
1038 snapshot->rlen = rlen + u->sink_skip;
1039 snapshot->plen = plen + u->source_skip;
1042 /* Called from source I/O thread context. */
1043 static int source_output_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1044 struct userdata *u = PA_SOURCE_OUTPUT(obj)->userdata;
1046 switch (code) {
1048 case SOURCE_OUTPUT_MESSAGE_POST:
1050 pa_source_output_assert_io_context(u->source_output);
1052 if (u->source_output->source->thread_info.state == PA_SOURCE_RUNNING)
1053 pa_memblockq_push_align(u->sink_memblockq, chunk);
1054 else
1055 pa_memblockq_flush_write(u->sink_memblockq, TRUE);
1057 u->recv_counter += (int64_t) chunk->length;
1059 return 0;
1061 case SOURCE_OUTPUT_MESSAGE_REWIND:
1062 pa_source_output_assert_io_context(u->source_output);
1064 /* manipulate write index, never go past what we have */
1065 if (PA_SOURCE_IS_OPENED(u->source_output->source->thread_info.state))
1066 pa_memblockq_seek(u->sink_memblockq, -offset, PA_SEEK_RELATIVE, TRUE);
1067 else
1068 pa_memblockq_flush_write(u->sink_memblockq, TRUE);
1070 pa_log_debug("Sink rewind (%lld)", (long long) offset);
1072 u->recv_counter -= offset;
1074 return 0;
1076 case SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT: {
1077 struct snapshot *snapshot = (struct snapshot *) data;
1079 source_output_snapshot_within_thread(u, snapshot);
1080 return 0;
1083 case SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME:
1084 apply_diff_time(u, offset);
1085 return 0;
1089 return pa_source_output_process_msg(obj, code, data, offset, chunk);
1092 /* Called from sink I/O thread context. */
1093 static int sink_input_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1094 struct userdata *u = PA_SINK_INPUT(obj)->userdata;
1096 switch (code) {
1098 case SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT: {
1099 size_t delay;
1100 pa_usec_t now, latency;
1101 struct snapshot *snapshot = (struct snapshot *) data;
1103 pa_sink_input_assert_io_context(u->sink_input);
1105 now = pa_rtclock_now();
1106 latency = pa_sink_get_latency_within_thread(u->sink_input->sink);
1107 delay = pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq);
1109 delay = (u->sink_input->thread_info.resampler ? pa_resampler_request(u->sink_input->thread_info.resampler, delay) : delay);
1111 snapshot->sink_now = now;
1112 snapshot->sink_latency = latency;
1113 snapshot->sink_delay = delay;
1114 snapshot->send_counter = u->send_counter;
1115 return 0;
1119 return pa_sink_input_process_msg(obj, code, data, offset, chunk);
1122 /* Called from sink I/O thread context. */
1123 static void sink_input_update_max_rewind_cb(pa_sink_input *i, size_t nbytes) {
1124 struct userdata *u;
1126 pa_sink_input_assert_ref(i);
1127 pa_assert_se(u = i->userdata);
1129 pa_log_debug("Sink input update max rewind %lld", (long long) nbytes);
1131 /* FIXME: Too small max_rewind:
1132 * https://bugs.freedesktop.org/show_bug.cgi?id=53709 */
1133 pa_memblockq_set_maxrewind(u->sink_memblockq, nbytes);
1134 pa_sink_set_max_rewind_within_thread(u->sink, nbytes);
1137 /* Called from source I/O thread context. */
1138 static void source_output_update_max_rewind_cb(pa_source_output *o, size_t nbytes) {
1139 struct userdata *u;
1141 pa_source_output_assert_ref(o);
1142 pa_assert_se(u = o->userdata);
1144 pa_log_debug("Source output update max rewind %lld", (long long) nbytes);
1146 pa_source_set_max_rewind_within_thread(u->source, nbytes);
1149 /* Called from sink I/O thread context. */
1150 static void sink_input_update_max_request_cb(pa_sink_input *i, size_t nbytes) {
1151 struct userdata *u;
1153 pa_sink_input_assert_ref(i);
1154 pa_assert_se(u = i->userdata);
1156 pa_log_debug("Sink input update max request %lld", (long long) nbytes);
1158 pa_sink_set_max_request_within_thread(u->sink, nbytes);
1161 /* Called from sink I/O thread context. */
1162 static void sink_input_update_sink_requested_latency_cb(pa_sink_input *i) {
1163 struct userdata *u;
1164 pa_usec_t latency;
1166 pa_sink_input_assert_ref(i);
1167 pa_assert_se(u = i->userdata);
1169 latency = pa_sink_get_requested_latency_within_thread(i->sink);
1171 pa_log_debug("Sink input update requested latency %lld", (long long) latency);
1174 /* Called from source I/O thread context. */
1175 static void source_output_update_source_requested_latency_cb(pa_source_output *o) {
1176 struct userdata *u;
1177 pa_usec_t latency;
1179 pa_source_output_assert_ref(o);
1180 pa_assert_se(u = o->userdata);
1182 latency = pa_source_get_requested_latency_within_thread(o->source);
1184 pa_log_debug("Source output update requested latency %lld", (long long) latency);
1187 /* Called from sink I/O thread context. */
1188 static void sink_input_update_sink_latency_range_cb(pa_sink_input *i) {
1189 struct userdata *u;
1191 pa_sink_input_assert_ref(i);
1192 pa_assert_se(u = i->userdata);
1194 pa_log_debug("Sink input update latency range %lld %lld",
1195 (long long) i->sink->thread_info.min_latency,
1196 (long long) i->sink->thread_info.max_latency);
1198 pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1201 /* Called from source I/O thread context. */
1202 static void source_output_update_source_latency_range_cb(pa_source_output *o) {
1203 struct userdata *u;
1205 pa_source_output_assert_ref(o);
1206 pa_assert_se(u = o->userdata);
1208 pa_log_debug("Source output update latency range %lld %lld",
1209 (long long) o->source->thread_info.min_latency,
1210 (long long) o->source->thread_info.max_latency);
1212 pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1215 /* Called from sink I/O thread context. */
1216 static void sink_input_update_sink_fixed_latency_cb(pa_sink_input *i) {
1217 struct userdata *u;
1219 pa_sink_input_assert_ref(i);
1220 pa_assert_se(u = i->userdata);
1222 pa_log_debug("Sink input update fixed latency %lld",
1223 (long long) i->sink->thread_info.fixed_latency);
1225 pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1228 /* Called from source I/O thread context. */
1229 static void source_output_update_source_fixed_latency_cb(pa_source_output *o) {
1230 struct userdata *u;
1232 pa_source_output_assert_ref(o);
1233 pa_assert_se(u = o->userdata);
1235 pa_log_debug("Source output update fixed latency %lld",
1236 (long long) o->source->thread_info.fixed_latency);
1238 pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1241 /* Called from source I/O thread context. */
1242 static void source_output_attach_cb(pa_source_output *o) {
1243 struct userdata *u;
1245 pa_source_output_assert_ref(o);
1246 pa_source_output_assert_io_context(o);
1247 pa_assert_se(u = o->userdata);
1249 pa_source_set_rtpoll(u->source, o->source->thread_info.rtpoll);
1250 pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1251 pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1252 pa_source_set_max_rewind_within_thread(u->source, pa_source_output_get_max_rewind(o));
1254 pa_log_debug("Source output %d attach", o->index);
1256 pa_source_attach_within_thread(u->source);
1258 u->rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
1259 o->source->thread_info.rtpoll,
1260 PA_RTPOLL_LATE,
1261 u->asyncmsgq);
1264 /* Called from sink I/O thread context. */
1265 static void sink_input_attach_cb(pa_sink_input *i) {
1266 struct userdata *u;
1268 pa_sink_input_assert_ref(i);
1269 pa_assert_se(u = i->userdata);
1271 pa_sink_set_rtpoll(u->sink, i->sink->thread_info.rtpoll);
1272 pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1274 /* (8.1) IF YOU NEED A FIXED BLOCK SIZE ADD THE LATENCY FOR ONE
1275 * BLOCK MINUS ONE SAMPLE HERE. SEE (7) */
1276 pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1278 /* (8.2) IF YOU NEED A FIXED BLOCK SIZE ROUND
1279 * pa_sink_input_get_max_request(i) UP TO MULTIPLES OF IT
1280 * HERE. SEE (6) */
1281 pa_sink_set_max_request_within_thread(u->sink, pa_sink_input_get_max_request(i));
1283 /* FIXME: Too small max_rewind:
1284 * https://bugs.freedesktop.org/show_bug.cgi?id=53709 */
1285 pa_sink_set_max_rewind_within_thread(u->sink, pa_sink_input_get_max_rewind(i));
1287 pa_log_debug("Sink input %d attach", i->index);
1289 u->rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
1290 i->sink->thread_info.rtpoll,
1291 PA_RTPOLL_LATE,
1292 u->asyncmsgq);
1294 pa_sink_attach_within_thread(u->sink);
1298 /* Called from source I/O thread context. */
1299 static void source_output_detach_cb(pa_source_output *o) {
1300 struct userdata *u;
1302 pa_source_output_assert_ref(o);
1303 pa_source_output_assert_io_context(o);
1304 pa_assert_se(u = o->userdata);
1306 pa_source_detach_within_thread(u->source);
1307 pa_source_set_rtpoll(u->source, NULL);
1309 pa_log_debug("Source output %d detach", o->index);
1311 if (u->rtpoll_item_read) {
1312 pa_rtpoll_item_free(u->rtpoll_item_read);
1313 u->rtpoll_item_read = NULL;
1317 /* Called from sink I/O thread context. */
1318 static void sink_input_detach_cb(pa_sink_input *i) {
1319 struct userdata *u;
1321 pa_sink_input_assert_ref(i);
1322 pa_assert_se(u = i->userdata);
1324 pa_sink_detach_within_thread(u->sink);
1326 pa_sink_set_rtpoll(u->sink, NULL);
1328 pa_log_debug("Sink input %d detach", i->index);
1330 if (u->rtpoll_item_write) {
1331 pa_rtpoll_item_free(u->rtpoll_item_write);
1332 u->rtpoll_item_write = NULL;
1336 /* Called from source I/O thread context. */
1337 static void source_output_state_change_cb(pa_source_output *o, pa_source_output_state_t state) {
1338 struct userdata *u;
1340 pa_source_output_assert_ref(o);
1341 pa_source_output_assert_io_context(o);
1342 pa_assert_se(u = o->userdata);
1344 pa_log_debug("Source output %d state %d", o->index, state);
1347 /* Called from sink I/O thread context. */
1348 static void sink_input_state_change_cb(pa_sink_input *i, pa_sink_input_state_t state) {
1349 struct userdata *u;
1351 pa_sink_input_assert_ref(i);
1352 pa_assert_se(u = i->userdata);
1354 pa_log_debug("Sink input %d state %d", i->index, state);
1356 /* If we are added for the first time, ask for a rewinding so that
1357 * we are heard right-away. */
1358 if (PA_SINK_INPUT_IS_LINKED(state) &&
1359 i->thread_info.state == PA_SINK_INPUT_INIT) {
1360 pa_log_debug("Requesting rewind due to state change.");
1361 pa_sink_input_request_rewind(i, 0, FALSE, TRUE, TRUE);
1365 /* Called from main context. */
1366 static void source_output_kill_cb(pa_source_output *o) {
1367 struct userdata *u;
1369 pa_source_output_assert_ref(o);
1370 pa_assert_ctl_context();
1371 pa_assert_se(u = o->userdata);
1373 u->dead = TRUE;
1375 /* The order here matters! We first kill the source output, followed
1376 * by the source. That means the source callbacks must be protected
1377 * against an unconnected source output! */
1378 pa_source_output_unlink(u->source_output);
1379 pa_source_unlink(u->source);
1381 pa_source_output_unref(u->source_output);
1382 u->source_output = NULL;
1384 pa_source_unref(u->source);
1385 u->source = NULL;
1387 pa_log_debug("Source output kill %d", o->index);
1389 pa_module_unload_request(u->module, TRUE);
1392 /* Called from main context */
1393 static void sink_input_kill_cb(pa_sink_input *i) {
1394 struct userdata *u;
1396 pa_sink_input_assert_ref(i);
1397 pa_assert_se(u = i->userdata);
1399 u->dead = TRUE;
1401 /* The order here matters! We first kill the sink input, followed
1402 * by the sink. That means the sink callbacks must be protected
1403 * against an unconnected sink input! */
1404 pa_sink_input_unlink(u->sink_input);
1405 pa_sink_unlink(u->sink);
1407 pa_sink_input_unref(u->sink_input);
1408 u->sink_input = NULL;
1410 pa_sink_unref(u->sink);
1411 u->sink = NULL;
1413 pa_log_debug("Sink input kill %d", i->index);
1415 pa_module_unload_request(u->module, TRUE);
1418 /* Called from main context. */
1419 static pa_bool_t source_output_may_move_to_cb(pa_source_output *o, pa_source *dest) {
1420 struct userdata *u;
1422 pa_source_output_assert_ref(o);
1423 pa_assert_ctl_context();
1424 pa_assert_se(u = o->userdata);
1426 if (u->dead || u->autoloaded)
1427 return FALSE;
1429 return (u->source != dest) && (u->sink != dest->monitor_of);
1432 /* Called from main context */
1433 static pa_bool_t sink_input_may_move_to_cb(pa_sink_input *i, pa_sink *dest) {
1434 struct userdata *u;
1436 pa_sink_input_assert_ref(i);
1437 pa_assert_se(u = i->userdata);
1439 if (u->dead || u->autoloaded)
1440 return FALSE;
1442 return u->sink != dest;
1445 /* Called from main context. */
1446 static void source_output_moving_cb(pa_source_output *o, pa_source *dest) {
1447 struct userdata *u;
1449 pa_source_output_assert_ref(o);
1450 pa_assert_ctl_context();
1451 pa_assert_se(u = o->userdata);
1453 if (dest) {
1454 pa_source_set_asyncmsgq(u->source, dest->asyncmsgq);
1455 pa_source_update_flags(u->source, PA_SOURCE_LATENCY|PA_SOURCE_DYNAMIC_LATENCY, dest->flags);
1456 } else
1457 pa_source_set_asyncmsgq(u->source, NULL);
1459 if (u->source_auto_desc && dest) {
1460 const char *y, *z;
1461 pa_proplist *pl;
1463 pl = pa_proplist_new();
1464 y = pa_proplist_gets(u->sink_input->sink->proplist, PA_PROP_DEVICE_DESCRIPTION);
1465 z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1466 pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)", z ? z : dest->name,
1467 y ? y : u->sink_input->sink->name);
1469 pa_source_update_proplist(u->source, PA_UPDATE_REPLACE, pl);
1470 pa_proplist_free(pl);
1474 /* Called from main context */
1475 static void sink_input_moving_cb(pa_sink_input *i, pa_sink *dest) {
1476 struct userdata *u;
1478 pa_sink_input_assert_ref(i);
1479 pa_assert_se(u = i->userdata);
1481 if (dest) {
1482 pa_sink_set_asyncmsgq(u->sink, dest->asyncmsgq);
1483 pa_sink_update_flags(u->sink, PA_SINK_LATENCY|PA_SINK_DYNAMIC_LATENCY, dest->flags);
1484 } else
1485 pa_sink_set_asyncmsgq(u->sink, NULL);
1487 if (u->sink_auto_desc && dest) {
1488 const char *y, *z;
1489 pa_proplist *pl;
1491 pl = pa_proplist_new();
1492 y = pa_proplist_gets(u->source_output->source->proplist, PA_PROP_DEVICE_DESCRIPTION);
1493 z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1494 pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)", z ? z : dest->name,
1495 y ? y : u->source_output->source->name);
1497 pa_sink_update_proplist(u->sink, PA_UPDATE_REPLACE, pl);
1498 pa_proplist_free(pl);
1502 /* Called from main context */
1503 static void sink_input_volume_changed_cb(pa_sink_input *i) {
1504 struct userdata *u;
1506 pa_sink_input_assert_ref(i);
1507 pa_assert_se(u = i->userdata);
1509 pa_sink_volume_changed(u->sink, &i->volume);
1512 /* Called from main context */
1513 static void sink_input_mute_changed_cb(pa_sink_input *i) {
1514 struct userdata *u;
1516 pa_sink_input_assert_ref(i);
1517 pa_assert_se(u = i->userdata);
1519 pa_sink_mute_changed(u->sink, i->muted);
1522 /* Called from main context */
1523 static int canceller_process_msg_cb(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1524 struct pa_echo_canceller_msg *msg;
1525 struct userdata *u;
1527 pa_assert(o);
1529 msg = PA_ECHO_CANCELLER_MSG(o);
1530 u = msg->userdata;
1532 switch (code) {
1533 case ECHO_CANCELLER_MESSAGE_SET_VOLUME: {
1534 pa_cvolume *v = (pa_cvolume *) userdata;
1536 if (u->use_volume_sharing)
1537 pa_source_set_volume(u->source, v, TRUE, FALSE);
1538 else
1539 pa_source_output_set_volume(u->source_output, v, FALSE, TRUE);
1541 break;
1544 default:
1545 pa_assert_not_reached();
1546 break;
1549 return 0;
1552 /* Called by the canceller, so source I/O thread context. */
1553 void pa_echo_canceller_get_capture_volume(pa_echo_canceller *ec, pa_cvolume *v) {
1554 *v = ec->msg->userdata->thread_info.current_volume;
1557 /* Called by the canceller, so source I/O thread context. */
1558 void pa_echo_canceller_set_capture_volume(pa_echo_canceller *ec, pa_cvolume *v) {
1559 if (!pa_cvolume_equal(&ec->msg->userdata->thread_info.current_volume, v)) {
1560 pa_cvolume *vol = pa_xnewdup(pa_cvolume, v, 1);
1562 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(ec->msg), ECHO_CANCELLER_MESSAGE_SET_VOLUME, vol, 0, NULL,
1563 pa_xfree);
1567 static pa_echo_canceller_method_t get_ec_method_from_string(const char *method) {
1568 if (pa_streq(method, "null"))
1569 return PA_ECHO_CANCELLER_NULL;
1570 #ifdef HAVE_SPEEX
1571 if (pa_streq(method, "speex"))
1572 return PA_ECHO_CANCELLER_SPEEX;
1573 #endif
1574 #ifdef HAVE_ADRIAN_EC
1575 if (pa_streq(method, "adrian"))
1576 return PA_ECHO_CANCELLER_ADRIAN;
1577 #endif
1578 #ifdef HAVE_WEBRTC
1579 if (pa_streq(method, "webrtc"))
1580 return PA_ECHO_CANCELLER_WEBRTC;
1581 #endif
1582 return PA_ECHO_CANCELLER_INVALID;
1585 /* Common initialisation bits between module-echo-cancel and the standalone
1586 * test program.
1588 * Called from main context. */
1589 static int init_common(pa_modargs *ma, struct userdata *u, pa_sample_spec *source_ss, pa_channel_map *source_map) {
1590 const char *ec_string;
1591 pa_echo_canceller_method_t ec_method;
1593 if (pa_modargs_get_sample_spec_and_channel_map(ma, source_ss, source_map, PA_CHANNEL_MAP_DEFAULT) < 0) {
1594 pa_log("Invalid sample format specification or channel map");
1595 goto fail;
1598 u->ec = pa_xnew0(pa_echo_canceller, 1);
1599 if (!u->ec) {
1600 pa_log("Failed to alloc echo canceller");
1601 goto fail;
1604 ec_string = pa_modargs_get_value(ma, "aec_method", DEFAULT_ECHO_CANCELLER);
1605 if ((ec_method = get_ec_method_from_string(ec_string)) < 0) {
1606 pa_log("Invalid echo canceller implementation");
1607 goto fail;
1610 pa_log_info("Using AEC engine: %s", ec_string);
1612 u->ec->init = ec_table[ec_method].init;
1613 u->ec->play = ec_table[ec_method].play;
1614 u->ec->record = ec_table[ec_method].record;
1615 u->ec->set_drift = ec_table[ec_method].set_drift;
1616 u->ec->run = ec_table[ec_method].run;
1617 u->ec->done = ec_table[ec_method].done;
1619 return 0;
1621 fail:
1622 return -1;
1625 /* Called from main context. */
1626 int pa__init(pa_module*m) {
1627 struct userdata *u;
1628 pa_sample_spec source_ss, sink_ss;
1629 pa_channel_map source_map, sink_map;
1630 pa_modargs *ma;
1631 pa_source *source_master=NULL;
1632 pa_sink *sink_master=NULL;
1633 pa_source_output_new_data source_output_data;
1634 pa_sink_input_new_data sink_input_data;
1635 pa_source_new_data source_data;
1636 pa_sink_new_data sink_data;
1637 pa_memchunk silence;
1638 uint32_t temp;
1640 pa_assert(m);
1642 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1643 pa_log("Failed to parse module arguments.");
1644 goto fail;
1647 if (!(source_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "source_master", NULL), PA_NAMEREG_SOURCE))) {
1648 pa_log("Master source not found");
1649 goto fail;
1651 pa_assert(source_master);
1653 if (!(sink_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "sink_master", NULL), PA_NAMEREG_SINK))) {
1654 pa_log("Master sink not found");
1655 goto fail;
1657 pa_assert(sink_master);
1659 if (source_master->monitor_of == sink_master) {
1660 pa_log("Can't cancel echo between a sink and its monitor");
1661 goto fail;
1664 source_ss = source_master->sample_spec;
1665 source_ss.rate = DEFAULT_RATE;
1666 source_ss.channels = DEFAULT_CHANNELS;
1667 pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
1669 sink_ss = sink_master->sample_spec;
1670 sink_map = sink_master->channel_map;
1672 u = pa_xnew0(struct userdata, 1);
1673 if (!u) {
1674 pa_log("Failed to alloc userdata");
1675 goto fail;
1677 u->core = m->core;
1678 u->module = m;
1679 m->userdata = u;
1680 u->dead = FALSE;
1682 u->use_volume_sharing = TRUE;
1683 if (pa_modargs_get_value_boolean(ma, "use_volume_sharing", &u->use_volume_sharing) < 0) {
1684 pa_log("use_volume_sharing= expects a boolean argument");
1685 goto fail;
1688 temp = DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC;
1689 if (pa_modargs_get_value_u32(ma, "adjust_time", &temp) < 0) {
1690 pa_log("Failed to parse adjust_time value");
1691 goto fail;
1694 if (temp != DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC)
1695 u->adjust_time = temp * PA_USEC_PER_SEC;
1696 else
1697 u->adjust_time = DEFAULT_ADJUST_TIME_USEC;
1699 temp = DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC;
1700 if (pa_modargs_get_value_u32(ma, "adjust_threshold", &temp) < 0) {
1701 pa_log("Failed to parse adjust_threshold value");
1702 goto fail;
1705 if (temp != DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC)
1706 u->adjust_threshold = temp * PA_USEC_PER_MSEC;
1707 else
1708 u->adjust_threshold = DEFAULT_ADJUST_TOLERANCE;
1710 u->save_aec = DEFAULT_SAVE_AEC;
1711 if (pa_modargs_get_value_boolean(ma, "save_aec", &u->save_aec) < 0) {
1712 pa_log("Failed to parse save_aec value");
1713 goto fail;
1716 u->autoloaded = DEFAULT_AUTOLOADED;
1717 if (pa_modargs_get_value_boolean(ma, "autoloaded", &u->autoloaded) < 0) {
1718 pa_log("Failed to parse autoloaded value");
1719 goto fail;
1722 if (init_common(ma, u, &source_ss, &source_map) < 0)
1723 goto fail;
1725 u->asyncmsgq = pa_asyncmsgq_new(0);
1726 u->need_realign = TRUE;
1728 if (u->ec->init) {
1729 if (!u->ec->init(u->core, u->ec, &source_ss, &source_map, &sink_ss, &sink_map, &u->blocksize, pa_modargs_get_value(ma, "aec_args", NULL))) {
1730 pa_log("Failed to init AEC engine");
1731 goto fail;
1735 if (u->ec->params.drift_compensation)
1736 pa_assert(u->ec->set_drift);
1738 /* Create source */
1739 pa_source_new_data_init(&source_data);
1740 source_data.driver = __FILE__;
1741 source_data.module = m;
1742 if (!(source_data.name = pa_xstrdup(pa_modargs_get_value(ma, "source_name", NULL))))
1743 source_data.name = pa_sprintf_malloc("%s.echo-cancel", source_master->name);
1744 pa_source_new_data_set_sample_spec(&source_data, &source_ss);
1745 pa_source_new_data_set_channel_map(&source_data, &source_map);
1746 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, source_master->name);
1747 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1748 if (!u->autoloaded)
1749 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1751 if (pa_modargs_get_proplist(ma, "source_properties", source_data.proplist, PA_UPDATE_REPLACE) < 0) {
1752 pa_log("Invalid properties");
1753 pa_source_new_data_done(&source_data);
1754 goto fail;
1757 if ((u->source_auto_desc = !pa_proplist_contains(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1758 const char *y, *z;
1760 y = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1761 z = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1762 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)",
1763 z ? z : source_master->name, y ? y : sink_master->name);
1766 u->source = pa_source_new(m->core, &source_data, (source_master->flags & (PA_SOURCE_LATENCY | PA_SOURCE_DYNAMIC_LATENCY))
1767 | (u->use_volume_sharing ? PA_SOURCE_SHARE_VOLUME_WITH_MASTER : 0));
1768 pa_source_new_data_done(&source_data);
1770 if (!u->source) {
1771 pa_log("Failed to create source.");
1772 goto fail;
1775 u->source->parent.process_msg = source_process_msg_cb;
1776 u->source->set_state = source_set_state_cb;
1777 u->source->update_requested_latency = source_update_requested_latency_cb;
1778 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1779 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1780 if (!u->use_volume_sharing) {
1781 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1782 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1783 pa_source_enable_decibel_volume(u->source, TRUE);
1785 u->source->userdata = u;
1787 pa_source_set_asyncmsgq(u->source, source_master->asyncmsgq);
1789 /* Create sink */
1790 pa_sink_new_data_init(&sink_data);
1791 sink_data.driver = __FILE__;
1792 sink_data.module = m;
1793 if (!(sink_data.name = pa_xstrdup(pa_modargs_get_value(ma, "sink_name", NULL))))
1794 sink_data.name = pa_sprintf_malloc("%s.echo-cancel", sink_master->name);
1795 pa_sink_new_data_set_sample_spec(&sink_data, &sink_ss);
1796 pa_sink_new_data_set_channel_map(&sink_data, &sink_map);
1797 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, sink_master->name);
1798 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1799 if (!u->autoloaded)
1800 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1802 if (pa_modargs_get_proplist(ma, "sink_properties", sink_data.proplist, PA_UPDATE_REPLACE) < 0) {
1803 pa_log("Invalid properties");
1804 pa_sink_new_data_done(&sink_data);
1805 goto fail;
1808 if ((u->sink_auto_desc = !pa_proplist_contains(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1809 const char *y, *z;
1811 y = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1812 z = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1813 pa_proplist_setf(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)",
1814 z ? z : sink_master->name, y ? y : source_master->name);
1817 u->sink = pa_sink_new(m->core, &sink_data, (sink_master->flags & (PA_SINK_LATENCY | PA_SINK_DYNAMIC_LATENCY))
1818 | (u->use_volume_sharing ? PA_SINK_SHARE_VOLUME_WITH_MASTER : 0));
1819 pa_sink_new_data_done(&sink_data);
1821 if (!u->sink) {
1822 pa_log("Failed to create sink.");
1823 goto fail;
1826 u->sink->parent.process_msg = sink_process_msg_cb;
1827 u->sink->set_state = sink_set_state_cb;
1828 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1829 u->sink->request_rewind = sink_request_rewind_cb;
1830 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1831 if (!u->use_volume_sharing) {
1832 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1833 pa_sink_enable_decibel_volume(u->sink, TRUE);
1835 u->sink->userdata = u;
1837 pa_sink_set_asyncmsgq(u->sink, sink_master->asyncmsgq);
1839 /* Create source output */
1840 pa_source_output_new_data_init(&source_output_data);
1841 source_output_data.driver = __FILE__;
1842 source_output_data.module = m;
1843 pa_source_output_new_data_set_source(&source_output_data, source_master, FALSE);
1844 source_output_data.destination_source = u->source;
1845 /* FIXME
1846 source_output_data.flags = PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND; */
1848 pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Source Stream");
1849 pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1850 pa_source_output_new_data_set_sample_spec(&source_output_data, &source_ss);
1851 pa_source_output_new_data_set_channel_map(&source_output_data, &source_map);
1853 pa_source_output_new(&u->source_output, m->core, &source_output_data);
1854 pa_source_output_new_data_done(&source_output_data);
1856 if (!u->source_output)
1857 goto fail;
1859 u->source_output->parent.process_msg = source_output_process_msg_cb;
1860 u->source_output->push = source_output_push_cb;
1861 u->source_output->process_rewind = source_output_process_rewind_cb;
1862 u->source_output->update_max_rewind = source_output_update_max_rewind_cb;
1863 u->source_output->update_source_requested_latency = source_output_update_source_requested_latency_cb;
1864 u->source_output->update_source_latency_range = source_output_update_source_latency_range_cb;
1865 u->source_output->update_source_fixed_latency = source_output_update_source_fixed_latency_cb;
1866 u->source_output->kill = source_output_kill_cb;
1867 u->source_output->attach = source_output_attach_cb;
1868 u->source_output->detach = source_output_detach_cb;
1869 u->source_output->state_change = source_output_state_change_cb;
1870 u->source_output->may_move_to = source_output_may_move_to_cb;
1871 u->source_output->moving = source_output_moving_cb;
1872 u->source_output->userdata = u;
1874 u->source->output_from_master = u->source_output;
1876 /* Create sink input */
1877 pa_sink_input_new_data_init(&sink_input_data);
1878 sink_input_data.driver = __FILE__;
1879 sink_input_data.module = m;
1880 pa_sink_input_new_data_set_sink(&sink_input_data, sink_master, FALSE);
1881 sink_input_data.origin_sink = u->sink;
1882 pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Sink Stream");
1883 pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1884 pa_sink_input_new_data_set_sample_spec(&sink_input_data, &sink_ss);
1885 pa_sink_input_new_data_set_channel_map(&sink_input_data, &sink_map);
1886 sink_input_data.flags = PA_SINK_INPUT_VARIABLE_RATE;
1888 pa_sink_input_new(&u->sink_input, m->core, &sink_input_data);
1889 pa_sink_input_new_data_done(&sink_input_data);
1891 if (!u->sink_input)
1892 goto fail;
1894 u->sink_input->parent.process_msg = sink_input_process_msg_cb;
1895 u->sink_input->pop = sink_input_pop_cb;
1896 u->sink_input->process_rewind = sink_input_process_rewind_cb;
1897 u->sink_input->update_max_rewind = sink_input_update_max_rewind_cb;
1898 u->sink_input->update_max_request = sink_input_update_max_request_cb;
1899 u->sink_input->update_sink_requested_latency = sink_input_update_sink_requested_latency_cb;
1900 u->sink_input->update_sink_latency_range = sink_input_update_sink_latency_range_cb;
1901 u->sink_input->update_sink_fixed_latency = sink_input_update_sink_fixed_latency_cb;
1902 u->sink_input->kill = sink_input_kill_cb;
1903 u->sink_input->attach = sink_input_attach_cb;
1904 u->sink_input->detach = sink_input_detach_cb;
1905 u->sink_input->state_change = sink_input_state_change_cb;
1906 u->sink_input->may_move_to = sink_input_may_move_to_cb;
1907 u->sink_input->moving = sink_input_moving_cb;
1908 if (!u->use_volume_sharing)
1909 u->sink_input->volume_changed = sink_input_volume_changed_cb;
1910 u->sink_input->mute_changed = sink_input_mute_changed_cb;
1911 u->sink_input->userdata = u;
1913 u->sink->input_to_master = u->sink_input;
1915 pa_sink_input_get_silence(u->sink_input, &silence);
1917 u->source_memblockq = pa_memblockq_new("module-echo-cancel source_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
1918 &source_ss, 1, 1, 0, &silence);
1919 u->sink_memblockq = pa_memblockq_new("module-echo-cancel sink_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
1920 &sink_ss, 1, 1, 0, &silence);
1922 pa_memblock_unref(silence.memblock);
1924 if (!u->source_memblockq || !u->sink_memblockq) {
1925 pa_log("Failed to create memblockq.");
1926 goto fail;
1929 if (u->adjust_time > 0 && !u->ec->params.drift_compensation)
1930 u->time_event = pa_core_rttime_new(m->core, pa_rtclock_now() + u->adjust_time, time_callback, u);
1931 else if (u->ec->params.drift_compensation) {
1932 pa_log_info("Canceller does drift compensation -- built-in compensation will be disabled");
1933 u->adjust_time = 0;
1934 /* Perform resync just once to give the canceller a leg up */
1935 pa_atomic_store(&u->request_resync, 1);
1938 if (u->save_aec) {
1939 pa_log("Creating AEC files in /tmp");
1940 u->captured_file = fopen("/tmp/aec_rec.sw", "wb");
1941 if (u->captured_file == NULL)
1942 perror ("fopen failed");
1943 u->played_file = fopen("/tmp/aec_play.sw", "wb");
1944 if (u->played_file == NULL)
1945 perror ("fopen failed");
1946 u->canceled_file = fopen("/tmp/aec_out.sw", "wb");
1947 if (u->canceled_file == NULL)
1948 perror ("fopen failed");
1949 if (u->ec->params.drift_compensation) {
1950 u->drift_file = fopen("/tmp/aec_drift.txt", "w");
1951 if (u->drift_file == NULL)
1952 perror ("fopen failed");
1956 u->ec->msg = pa_msgobject_new(pa_echo_canceller_msg);
1957 u->ec->msg->parent.process_msg = canceller_process_msg_cb;
1958 u->ec->msg->userdata = u;
1960 u->thread_info.current_volume = u->source->reference_volume;
1962 pa_sink_put(u->sink);
1963 pa_source_put(u->source);
1965 pa_sink_input_put(u->sink_input);
1966 pa_source_output_put(u->source_output);
1967 pa_modargs_free(ma);
1969 return 0;
1971 fail:
1972 if (ma)
1973 pa_modargs_free(ma);
1975 pa__done(m);
1977 return -1;
1980 /* Called from main context. */
1981 int pa__get_n_used(pa_module *m) {
1982 struct userdata *u;
1984 pa_assert(m);
1985 pa_assert_se(u = m->userdata);
1987 return pa_sink_linked_by(u->sink) + pa_source_linked_by(u->source);
1990 /* Called from main context. */
1991 void pa__done(pa_module*m) {
1992 struct userdata *u;
1994 pa_assert(m);
1996 if (!(u = m->userdata))
1997 return;
1999 u->dead = TRUE;
2001 /* See comments in source_output_kill_cb() above regarding
2002 * destruction order! */
2004 if (u->time_event)
2005 u->core->mainloop->time_free(u->time_event);
2007 if (u->source_output)
2008 pa_source_output_unlink(u->source_output);
2009 if (u->sink_input)
2010 pa_sink_input_unlink(u->sink_input);
2012 if (u->source)
2013 pa_source_unlink(u->source);
2014 if (u->sink)
2015 pa_sink_unlink(u->sink);
2017 if (u->source_output)
2018 pa_source_output_unref(u->source_output);
2019 if (u->sink_input)
2020 pa_sink_input_unref(u->sink_input);
2022 if (u->source)
2023 pa_source_unref(u->source);
2024 if (u->sink)
2025 pa_sink_unref(u->sink);
2027 if (u->source_memblockq)
2028 pa_memblockq_free(u->source_memblockq);
2029 if (u->sink_memblockq)
2030 pa_memblockq_free(u->sink_memblockq);
2032 if (u->ec) {
2033 if (u->ec->done)
2034 u->ec->done(u->ec);
2036 pa_xfree(u->ec);
2039 if (u->asyncmsgq)
2040 pa_asyncmsgq_unref(u->asyncmsgq);
2042 if (u->save_aec) {
2043 if (u->played_file)
2044 fclose(u->played_file);
2045 if (u->captured_file)
2046 fclose(u->captured_file);
2047 if (u->canceled_file)
2048 fclose(u->canceled_file);
2049 if (u->drift_file)
2050 fclose(u->drift_file);
2053 pa_xfree(u);
2056 #ifdef ECHO_CANCEL_TEST
2058 * Stand-alone test program for running in the canceller on pre-recorded files.
2060 int main(int argc, char* argv[]) {
2061 struct userdata u;
2062 pa_sample_spec source_ss, sink_ss;
2063 pa_channel_map source_map, sink_map;
2064 pa_modargs *ma = NULL;
2065 uint8_t *rdata = NULL, *pdata = NULL, *cdata = NULL;
2066 int unused PA_GCC_UNUSED;
2067 int ret = 0, i;
2068 char c;
2069 float drift;
2071 pa_memzero(&u, sizeof(u));
2073 if (argc < 4 || argc > 7) {
2074 goto usage;
2077 u.captured_file = fopen(argv[2], "rb");
2078 if (u.captured_file == NULL) {
2079 perror ("fopen failed");
2080 goto fail;
2082 u.played_file = fopen(argv[1], "rb");
2083 if (u.played_file == NULL) {
2084 perror ("fopen failed");
2085 goto fail;
2087 u.canceled_file = fopen(argv[3], "wb");
2088 if (u.canceled_file == NULL) {
2089 perror ("fopen failed");
2090 goto fail;
2093 u.core = pa_xnew0(pa_core, 1);
2094 u.core->cpu_info.cpu_type = PA_CPU_X86;
2095 u.core->cpu_info.flags.x86 |= PA_CPU_X86_SSE;
2097 if (!(ma = pa_modargs_new(argc > 4 ? argv[4] : NULL, valid_modargs))) {
2098 pa_log("Failed to parse module arguments.");
2099 goto fail;
2102 source_ss.format = PA_SAMPLE_S16LE;
2103 source_ss.rate = DEFAULT_RATE;
2104 source_ss.channels = DEFAULT_CHANNELS;
2105 pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
2107 if (init_common(ma, &u, &source_ss, &source_map) < 0)
2108 goto fail;
2110 if (!u.ec->init(u.core, u.ec, &source_ss, &source_map, &sink_ss, &sink_map, &u.blocksize,
2111 (argc > 4) ? argv[5] : NULL )) {
2112 pa_log("Failed to init AEC engine");
2113 goto fail;
2116 if (u.ec->params.drift_compensation) {
2117 if (argc < 7) {
2118 pa_log("Drift compensation enabled but drift file not specified");
2119 goto fail;
2122 u.drift_file = fopen(argv[6], "rt");
2124 if (u.drift_file == NULL) {
2125 perror ("fopen failed");
2126 goto fail;
2130 rdata = pa_xmalloc(u.blocksize);
2131 pdata = pa_xmalloc(u.blocksize);
2132 cdata = pa_xmalloc(u.blocksize);
2134 if (!u.ec->params.drift_compensation) {
2135 while (fread(rdata, u.blocksize, 1, u.captured_file) > 0) {
2136 if (fread(pdata, u.blocksize, 1, u.played_file) == 0) {
2137 perror("Played file ended before captured file");
2138 goto fail;
2141 u.ec->run(u.ec, rdata, pdata, cdata);
2143 unused = fwrite(cdata, u.blocksize, 1, u.canceled_file);
2145 } else {
2146 while (fscanf(u.drift_file, "%c", &c) > 0) {
2147 switch (c) {
2148 case 'd':
2149 if (!fscanf(u.drift_file, "%a", &drift)) {
2150 perror("Drift file incomplete");
2151 goto fail;
2154 u.ec->set_drift(u.ec, drift);
2156 break;
2158 case 'c':
2159 if (!fscanf(u.drift_file, "%d", &i)) {
2160 perror("Drift file incomplete");
2161 goto fail;
2164 if (fread(rdata, i, 1, u.captured_file) <= 0) {
2165 perror("Captured file ended prematurely");
2166 goto fail;
2169 u.ec->record(u.ec, rdata, cdata);
2171 unused = fwrite(cdata, i, 1, u.canceled_file);
2173 break;
2175 case 'p':
2176 if (!fscanf(u.drift_file, "%d", &i)) {
2177 perror("Drift file incomplete");
2178 goto fail;
2181 if (fread(pdata, i, 1, u.played_file) <= 0) {
2182 perror("Played file ended prematurely");
2183 goto fail;
2186 u.ec->play(u.ec, pdata);
2188 break;
2192 if (fread(rdata, i, 1, u.captured_file) > 0)
2193 pa_log("All capture data was not consumed");
2194 if (fread(pdata, i, 1, u.played_file) > 0)
2195 pa_log("All playback data was not consumed");
2198 u.ec->done(u.ec);
2200 out:
2201 if (u.captured_file)
2202 fclose(u.captured_file);
2203 if (u.played_file)
2204 fclose(u.played_file);
2205 if (u.canceled_file)
2206 fclose(u.canceled_file);
2207 if (u.drift_file)
2208 fclose(u.drift_file);
2210 pa_xfree(rdata);
2211 pa_xfree(pdata);
2212 pa_xfree(cdata);
2214 pa_xfree(u.ec);
2215 pa_xfree(u.core);
2217 if (ma)
2218 pa_modargs_free(ma);
2220 return ret;
2222 usage:
2223 pa_log("Usage: %s play_file rec_file out_file [module args] [aec_args] [drift_file]", argv[0]);
2225 fail:
2226 ret = -1;
2227 goto out;
2229 #endif /* ECHO_CANCEL_TEST */