echo-cancel: Fix memblockq length check.
[pulseaudio-raopUDP/pulseaudio-raop-alac.git] / src / modules / echo-cancel / module-echo-cancel.c
blob297f19eedcd8ac0ecdc81254446ea43ae036d38d
1 /***
2 This file is part of PulseAudio.
4 Copyright 2010 Wim Taymans <wim.taymans@gmail.com>
6 Based on module-virtual-sink.c
7 module-virtual-source.c
8 module-loopback.c
10 Copyright 2010 Intel Corporation
11 Contributor: Pierre-Louis Bossart <pierre-louis.bossart@intel.com>
13 PulseAudio is free software; you can redistribute it and/or modify
14 it under the terms of the GNU Lesser General Public License as published
15 by the Free Software Foundation; either version 2.1 of the License,
16 or (at your option) any later version.
18 PulseAudio is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU Lesser General Public License
24 along with PulseAudio; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 USA.
27 ***/
29 #ifdef HAVE_CONFIG_H
30 #include <config.h>
31 #endif
33 #include <stdio.h>
34 #include <math.h>
36 #include "echo-cancel.h"
38 #include <pulse/xmalloc.h>
39 #include <pulse/timeval.h>
40 #include <pulse/rtclock.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/atomic.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/namereg.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/module.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/modargs.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/sample-util.h>
54 #include <pulsecore/ltdl-helper.h>
56 #include "module-echo-cancel-symdef.h"
58 PA_MODULE_AUTHOR("Wim Taymans");
59 PA_MODULE_DESCRIPTION("Echo Cancellation");
60 PA_MODULE_VERSION(PACKAGE_VERSION);
61 PA_MODULE_LOAD_ONCE(FALSE);
62 PA_MODULE_USAGE(
63 _("source_name=<name for the source> "
64 "source_properties=<properties for the source> "
65 "source_master=<name of source to filter> "
66 "sink_name=<name for the sink> "
67 "sink_properties=<properties for the sink> "
68 "sink_master=<name of sink to filter> "
69 "adjust_time=<how often to readjust rates in s> "
70 "adjust_threshold=<how much drift to readjust after in ms> "
71 "format=<sample format> "
72 "rate=<sample rate> "
73 "channels=<number of channels> "
74 "channel_map=<channel map> "
75 "aec_method=<implementation to use> "
76 "aec_args=<parameters for the AEC engine> "
77 "save_aec=<save AEC data in /tmp> "
78 "autoloaded=<set if this module is being loaded automatically> "
79 "use_volume_sharing=<yes or no> "
80 ));
82 /* NOTE: Make sure the enum and ec_table are maintained in the correct order */
83 typedef enum {
84 PA_ECHO_CANCELLER_INVALID = -1,
85 #ifdef HAVE_SPEEX
86 PA_ECHO_CANCELLER_SPEEX,
87 #endif
88 #ifdef HAVE_ADRIAN_EC
89 PA_ECHO_CANCELLER_ADRIAN,
90 #endif
91 #ifdef HAVE_WEBRTC
92 PA_ECHO_CANCELLER_WEBRTC,
93 #endif
94 } pa_echo_canceller_method_t;
96 #ifdef HAVE_WEBRTC
97 #define DEFAULT_ECHO_CANCELLER "webrtc"
98 #else
99 #define DEFAULT_ECHO_CANCELLER "speex"
100 #endif
102 static const pa_echo_canceller ec_table[] = {
103 #ifdef HAVE_SPEEX
105 /* Speex */
106 .init = pa_speex_ec_init,
107 .run = pa_speex_ec_run,
108 .done = pa_speex_ec_done,
110 #endif
111 #ifdef HAVE_ADRIAN_EC
113 /* Adrian Andre's NLMS implementation */
114 .init = pa_adrian_ec_init,
115 .run = pa_adrian_ec_run,
116 .done = pa_adrian_ec_done,
118 #endif
119 #ifdef HAVE_WEBRTC
121 /* WebRTC's audio processing engine */
122 .init = pa_webrtc_ec_init,
123 .play = pa_webrtc_ec_play,
124 .record = pa_webrtc_ec_record,
125 .set_drift = pa_webrtc_ec_set_drift,
126 .run = pa_webrtc_ec_run,
127 .done = pa_webrtc_ec_done,
129 #endif
132 #define DEFAULT_RATE 32000
133 #define DEFAULT_CHANNELS 1
134 #define DEFAULT_ADJUST_TIME_USEC (1*PA_USEC_PER_SEC)
135 #define DEFAULT_ADJUST_TOLERANCE (5*PA_USEC_PER_MSEC)
136 #define DEFAULT_SAVE_AEC FALSE
137 #define DEFAULT_AUTOLOADED FALSE
139 #define MEMBLOCKQ_MAXLENGTH (16*1024*1024)
141 /* Can only be used in main context */
142 #define IS_ACTIVE(u) ((pa_source_get_state((u)->source) == PA_SOURCE_RUNNING) && \
143 (pa_sink_get_state((u)->sink) == PA_SINK_RUNNING))
145 /* This module creates a new (virtual) source and sink.
147 * The data sent to the new sink is kept in a memblockq before being
148 * forwarded to the real sink_master.
150 * Data read from source_master is matched against the saved sink data and
151 * echo canceled data is then pushed onto the new source.
153 * Both source and sink masters have their own threads to push/pull data
154 * respectively. We however perform all our actions in the source IO thread.
155 * To do this we send all played samples to the source IO thread where they
156 * are then pushed into the memblockq.
158 * Alignment is performed in two steps:
160 * 1) when something happens that requires quick adjustment of the alignment of
161 * capture and playback samples, we perform a resync. This adjusts the
162 * position in the playback memblock to the requested sample. Quick
163 * adjustments include moving the playback samples before the capture
164 * samples (because else the echo canceler does not work) or when the
165 * playback pointer drifts too far away.
167 * 2) periodically check the difference between capture and playback. We use a
168 * low and high watermark for adjusting the alignment. Playback should always
169 * be before capture and the difference should not be bigger than one frame
170 * size. We would ideally like to resample the sink_input but most driver
171 * don't give enough accuracy to be able to do that right now.
174 struct userdata;
176 struct pa_echo_canceller_msg {
177 pa_msgobject parent;
178 struct userdata *userdata;
181 PA_DEFINE_PRIVATE_CLASS(pa_echo_canceller_msg, pa_msgobject);
182 #define PA_ECHO_CANCELLER_MSG(o) (pa_echo_canceller_msg_cast(o))
184 struct snapshot {
185 pa_usec_t sink_now;
186 pa_usec_t sink_latency;
187 size_t sink_delay;
188 int64_t send_counter;
190 pa_usec_t source_now;
191 pa_usec_t source_latency;
192 size_t source_delay;
193 int64_t recv_counter;
194 size_t rlen;
195 size_t plen;
198 struct userdata {
199 pa_core *core;
200 pa_module *module;
202 pa_bool_t autoloaded;
203 pa_bool_t dead;
204 pa_bool_t save_aec;
206 pa_echo_canceller *ec;
207 uint32_t blocksize;
209 pa_bool_t need_realign;
211 /* to wakeup the source I/O thread */
212 pa_asyncmsgq *asyncmsgq;
213 pa_rtpoll_item *rtpoll_item_read, *rtpoll_item_write;
215 pa_source *source;
216 pa_bool_t source_auto_desc;
217 pa_source_output *source_output;
218 pa_memblockq *source_memblockq; /* echo canceler needs fixed sized chunks */
219 size_t source_skip;
221 pa_sink *sink;
222 pa_bool_t sink_auto_desc;
223 pa_sink_input *sink_input;
224 pa_memblockq *sink_memblockq;
225 int64_t send_counter; /* updated in sink IO thread */
226 int64_t recv_counter;
227 size_t sink_skip;
229 /* Bytes left over from previous iteration */
230 size_t sink_rem;
231 size_t source_rem;
233 pa_atomic_t request_resync;
235 pa_time_event *time_event;
236 pa_usec_t adjust_time;
237 int adjust_threshold;
239 FILE *captured_file;
240 FILE *played_file;
241 FILE *canceled_file;
242 FILE *drift_file;
244 pa_bool_t use_volume_sharing;
246 struct {
247 pa_cvolume current_volume;
248 } thread_info;
251 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot);
253 static const char* const valid_modargs[] = {
254 "source_name",
255 "source_properties",
256 "source_master",
257 "sink_name",
258 "sink_properties",
259 "sink_master",
260 "adjust_time",
261 "adjust_threshold",
262 "format",
263 "rate",
264 "channels",
265 "channel_map",
266 "aec_method",
267 "aec_args",
268 "save_aec",
269 "autoloaded",
270 "use_volume_sharing",
271 NULL
274 enum {
275 SOURCE_OUTPUT_MESSAGE_POST = PA_SOURCE_OUTPUT_MESSAGE_MAX,
276 SOURCE_OUTPUT_MESSAGE_REWIND,
277 SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT,
278 SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME
281 enum {
282 SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT
285 enum {
286 ECHO_CANCELLER_MESSAGE_SET_VOLUME,
289 static int64_t calc_diff(struct userdata *u, struct snapshot *snapshot) {
290 int64_t buffer, diff_time, buffer_latency;
292 /* get the number of samples between capture and playback */
293 if (snapshot->plen > snapshot->rlen)
294 buffer = snapshot->plen - snapshot->rlen;
295 else
296 buffer = 0;
298 buffer += snapshot->source_delay + snapshot->sink_delay;
300 /* add the amount of samples not yet transferred to the source context */
301 if (snapshot->recv_counter <= snapshot->send_counter)
302 buffer += (int64_t) (snapshot->send_counter - snapshot->recv_counter);
303 else
304 buffer += PA_CLIP_SUB(buffer, (int64_t) (snapshot->recv_counter - snapshot->send_counter));
306 /* convert to time */
307 buffer_latency = pa_bytes_to_usec(buffer, &u->source_output->sample_spec);
309 /* capture and playback samples are perfectly aligned when diff_time is 0 */
310 diff_time = (snapshot->sink_now + snapshot->sink_latency - buffer_latency) -
311 (snapshot->source_now - snapshot->source_latency);
313 pa_log_debug("Diff %lld (%lld - %lld + %lld) %lld %lld %lld %lld", (long long) diff_time,
314 (long long) snapshot->sink_latency,
315 (long long) buffer_latency, (long long) snapshot->source_latency,
316 (long long) snapshot->source_delay, (long long) snapshot->sink_delay,
317 (long long) (snapshot->send_counter - snapshot->recv_counter),
318 (long long) (snapshot->sink_now - snapshot->source_now));
320 return diff_time;
323 /* Called from main context */
324 static void time_callback(pa_mainloop_api *a, pa_time_event *e, const struct timeval *t, void *userdata) {
325 struct userdata *u = userdata;
326 uint32_t old_rate, base_rate, new_rate;
327 int64_t diff_time;
328 /*size_t fs*/
329 struct snapshot latency_snapshot;
331 pa_assert(u);
332 pa_assert(a);
333 pa_assert(u->time_event == e);
334 pa_assert_ctl_context();
336 if (!IS_ACTIVE(u))
337 return;
339 /* update our snapshots */
340 pa_asyncmsgq_send(u->source_output->source->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
341 pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
343 /* calculate drift between capture and playback */
344 diff_time = calc_diff(u, &latency_snapshot);
346 /*fs = pa_frame_size(&u->source_output->sample_spec);*/
347 old_rate = u->sink_input->sample_spec.rate;
348 base_rate = u->source_output->sample_spec.rate;
350 if (diff_time < 0) {
351 /* recording before playback, we need to adjust quickly. The echo
352 * canceler does not work in this case. */
353 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
354 NULL, diff_time, NULL, NULL);
355 /*new_rate = base_rate - ((pa_usec_to_bytes(-diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
356 new_rate = base_rate;
358 else {
359 if (diff_time > u->adjust_threshold) {
360 /* diff too big, quickly adjust */
361 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
362 NULL, diff_time, NULL, NULL);
365 /* recording behind playback, we need to slowly adjust the rate to match */
366 /*new_rate = base_rate + ((pa_usec_to_bytes(diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
368 /* assume equal samplerates for now */
369 new_rate = base_rate;
372 /* make sure we don't make too big adjustments because that sounds horrible */
373 if (new_rate > base_rate * 1.1 || new_rate < base_rate * 0.9)
374 new_rate = base_rate;
376 if (new_rate != old_rate) {
377 pa_log_info("Old rate %lu Hz, new rate %lu Hz", (unsigned long) old_rate, (unsigned long) new_rate);
379 pa_sink_input_set_rate(u->sink_input, new_rate);
382 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
385 /* Called from source I/O thread context */
386 static int source_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
387 struct userdata *u = PA_SOURCE(o)->userdata;
389 switch (code) {
391 case PA_SOURCE_MESSAGE_GET_LATENCY:
393 /* The source is _put() before the source output is, so let's
394 * make sure we don't access it in that time. Also, the
395 * source output is first shut down, the source second. */
396 if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
397 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) {
398 *((pa_usec_t*) data) = 0;
399 return 0;
402 *((pa_usec_t*) data) =
404 /* Get the latency of the master source */
405 pa_source_get_latency_within_thread(u->source_output->source) +
406 /* Add the latency internal to our source output on top */
407 pa_bytes_to_usec(pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq), &u->source_output->source->sample_spec) +
408 /* and the buffering we do on the source */
409 pa_bytes_to_usec(u->blocksize, &u->source_output->source->sample_spec);
411 return 0;
413 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
414 u->thread_info.current_volume = u->source->reference_volume;
415 break;
418 return pa_source_process_msg(o, code, data, offset, chunk);
421 /* Called from sink I/O thread context */
422 static int sink_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
423 struct userdata *u = PA_SINK(o)->userdata;
425 switch (code) {
427 case PA_SINK_MESSAGE_GET_LATENCY:
429 /* The sink is _put() before the sink input is, so let's
430 * make sure we don't access it in that time. Also, the
431 * sink input is first shut down, the sink second. */
432 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
433 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) {
434 *((pa_usec_t*) data) = 0;
435 return 0;
438 *((pa_usec_t*) data) =
440 /* Get the latency of the master sink */
441 pa_sink_get_latency_within_thread(u->sink_input->sink) +
443 /* Add the latency internal to our sink input on top */
444 pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec);
446 return 0;
449 return pa_sink_process_msg(o, code, data, offset, chunk);
453 /* Called from main context */
454 static int source_set_state_cb(pa_source *s, pa_source_state_t state) {
455 struct userdata *u;
457 pa_source_assert_ref(s);
458 pa_assert_se(u = s->userdata);
460 if (!PA_SOURCE_IS_LINKED(state) ||
461 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
462 return 0;
464 if (state == PA_SOURCE_RUNNING) {
465 /* restart timer when both sink and source are active */
466 if (IS_ACTIVE(u) && u->adjust_time)
467 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
469 pa_atomic_store(&u->request_resync, 1);
470 pa_source_output_cork(u->source_output, FALSE);
471 } else if (state == PA_SOURCE_SUSPENDED) {
472 pa_source_output_cork(u->source_output, TRUE);
475 return 0;
478 /* Called from main context */
479 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t state) {
480 struct userdata *u;
482 pa_sink_assert_ref(s);
483 pa_assert_se(u = s->userdata);
485 if (!PA_SINK_IS_LINKED(state) ||
486 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
487 return 0;
489 if (state == PA_SINK_RUNNING) {
490 /* restart timer when both sink and source are active */
491 if (IS_ACTIVE(u) && u->adjust_time)
492 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
494 pa_atomic_store(&u->request_resync, 1);
495 pa_sink_input_cork(u->sink_input, FALSE);
496 } else if (state == PA_SINK_SUSPENDED) {
497 pa_sink_input_cork(u->sink_input, TRUE);
500 return 0;
503 /* Called from I/O thread context */
504 static void source_update_requested_latency_cb(pa_source *s) {
505 struct userdata *u;
507 pa_source_assert_ref(s);
508 pa_assert_se(u = s->userdata);
510 if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
511 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state))
512 return;
514 pa_log_debug("Source update requested latency");
516 /* Just hand this one over to the master source */
517 pa_source_output_set_requested_latency_within_thread(
518 u->source_output,
519 pa_source_get_requested_latency_within_thread(s));
522 /* Called from I/O thread context */
523 static void sink_update_requested_latency_cb(pa_sink *s) {
524 struct userdata *u;
526 pa_sink_assert_ref(s);
527 pa_assert_se(u = s->userdata);
529 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
530 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
531 return;
533 pa_log_debug("Sink update requested latency");
535 /* Just hand this one over to the master sink */
536 pa_sink_input_set_requested_latency_within_thread(
537 u->sink_input,
538 pa_sink_get_requested_latency_within_thread(s));
541 /* Called from I/O thread context */
542 static void sink_request_rewind_cb(pa_sink *s) {
543 struct userdata *u;
545 pa_sink_assert_ref(s);
546 pa_assert_se(u = s->userdata);
548 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
549 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
550 return;
552 pa_log_debug("Sink request rewind %lld", (long long) s->thread_info.rewind_nbytes);
554 /* Just hand this one over to the master sink */
555 pa_sink_input_request_rewind(u->sink_input,
556 s->thread_info.rewind_nbytes, TRUE, FALSE, FALSE);
559 /* Called from main context */
560 static void source_set_volume_cb(pa_source *s) {
561 struct userdata *u;
563 pa_source_assert_ref(s);
564 pa_assert_se(u = s->userdata);
566 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
567 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
568 return;
570 pa_source_output_set_volume(u->source_output, &s->real_volume, s->save_volume, TRUE);
573 /* Called from main context */
574 static void sink_set_volume_cb(pa_sink *s) {
575 struct userdata *u;
577 pa_sink_assert_ref(s);
578 pa_assert_se(u = s->userdata);
580 if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) ||
581 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
582 return;
584 pa_sink_input_set_volume(u->sink_input, &s->real_volume, s->save_volume, TRUE);
587 static void source_get_volume_cb(pa_source *s) {
588 struct userdata *u;
589 pa_cvolume v;
591 pa_source_assert_ref(s);
592 pa_assert_se(u = s->userdata);
594 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
595 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
596 return;
598 pa_source_output_get_volume(u->source_output, &v, TRUE);
600 if (pa_cvolume_equal(&s->real_volume, &v))
601 /* no change */
602 return;
604 s->real_volume = v;
605 pa_source_set_soft_volume(s, NULL);
608 /* Called from main context */
609 static void source_set_mute_cb(pa_source *s) {
610 struct userdata *u;
612 pa_source_assert_ref(s);
613 pa_assert_se(u = s->userdata);
615 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
616 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
617 return;
619 pa_source_output_set_mute(u->source_output, s->muted, s->save_muted);
622 /* Called from main context */
623 static void sink_set_mute_cb(pa_sink *s) {
624 struct userdata *u;
626 pa_sink_assert_ref(s);
627 pa_assert_se(u = s->userdata);
629 if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) ||
630 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
631 return;
633 pa_sink_input_set_mute(u->sink_input, s->muted, s->save_muted);
636 /* Called from main context */
637 static void source_get_mute_cb(pa_source *s) {
638 struct userdata *u;
640 pa_source_assert_ref(s);
641 pa_assert_se(u = s->userdata);
643 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
644 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
645 return;
647 pa_source_output_get_mute(u->source_output);
650 /* must be called from the input thread context */
651 static void apply_diff_time(struct userdata *u, int64_t diff_time) {
652 int64_t diff;
654 if (diff_time < 0) {
655 diff = pa_usec_to_bytes(-diff_time, &u->source_output->sample_spec);
657 if (diff > 0) {
658 /* add some extra safety samples to compensate for jitter in the
659 * timings */
660 diff += 10 * pa_frame_size (&u->source_output->sample_spec);
662 pa_log("Playback after capture (%lld), drop sink %lld", (long long) diff_time, (long long) diff);
664 u->sink_skip = diff;
665 u->source_skip = 0;
667 } else if (diff_time > 0) {
668 diff = pa_usec_to_bytes(diff_time, &u->source_output->sample_spec);
670 if (diff > 0) {
671 pa_log("Playback too far ahead (%lld), drop source %lld", (long long) diff_time, (long long) diff);
673 u->source_skip = diff;
674 u->sink_skip = 0;
679 /* must be called from the input thread */
680 static void do_resync(struct userdata *u) {
681 int64_t diff_time;
682 struct snapshot latency_snapshot;
684 pa_log("Doing resync");
686 /* update our snapshot */
687 source_output_snapshot_within_thread(u, &latency_snapshot);
688 pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
690 /* calculate drift between capture and playback */
691 diff_time = calc_diff(u, &latency_snapshot);
693 /* and adjust for the drift */
694 apply_diff_time(u, diff_time);
697 /* 1. Calculate drift at this point, pass to canceller
698 * 2. Push out playback samples in blocksize chunks
699 * 3. Push out capture samples in blocksize chunks
700 * 4. ???
701 * 5. Profit
703 static void do_push_drift_comp(struct userdata *u) {
704 size_t rlen, plen;
705 pa_memchunk rchunk, pchunk, cchunk;
706 uint8_t *rdata, *pdata, *cdata;
707 float drift;
708 int unused PA_GCC_UNUSED;
710 rlen = pa_memblockq_get_length(u->source_memblockq);
711 plen = pa_memblockq_get_length(u->sink_memblockq);
713 /* Estimate snapshot drift as follows:
714 * pd: amount of data consumed since last time
715 * rd: amount of data consumed since last time
717 * drift = (pd - rd) / rd;
719 * We calculate pd and rd as the memblockq length less the number of
720 * samples left from the last iteration (to avoid double counting
721 * those remainder samples.
723 drift = ((float)(plen - u->sink_rem) - (rlen - u->source_rem)) / ((float)(rlen - u->source_rem));
724 u->sink_rem = plen % u->blocksize;
725 u->source_rem = rlen % u->blocksize;
727 /* Now let the canceller work its drift compensation magic */
728 u->ec->set_drift(u->ec, drift);
730 if (u->save_aec) {
731 if (u->drift_file)
732 fprintf(u->drift_file, "d %a\n", drift);
735 /* Send in the playback samples first */
736 while (plen >= u->blocksize) {
737 pa_memblockq_peek_fixed_size(u->sink_memblockq, u->blocksize, &pchunk);
738 pdata = pa_memblock_acquire(pchunk.memblock);
739 pdata += pchunk.index;
741 u->ec->play(u->ec, pdata);
743 if (u->save_aec) {
744 if (u->drift_file)
745 fprintf(u->drift_file, "p %d\n", u->blocksize);
746 if (u->played_file)
747 unused = fwrite(pdata, 1, u->blocksize, u->played_file);
750 pa_memblock_release(pchunk.memblock);
751 pa_memblockq_drop(u->sink_memblockq, u->blocksize);
752 pa_memblock_unref(pchunk.memblock);
754 plen -= u->blocksize;
757 /* And now the capture samples */
758 while (rlen >= u->blocksize) {
759 pa_memblockq_peek_fixed_size(u->source_memblockq, u->blocksize, &rchunk);
761 rdata = pa_memblock_acquire(rchunk.memblock);
762 rdata += rchunk.index;
764 cchunk.index = 0;
765 cchunk.length = u->blocksize;
766 cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
767 cdata = pa_memblock_acquire(cchunk.memblock);
769 u->ec->record(u->ec, rdata, cdata);
771 if (u->save_aec) {
772 if (u->drift_file)
773 fprintf(u->drift_file, "c %d\n", u->blocksize);
774 if (u->captured_file)
775 unused = fwrite(rdata, 1, u->blocksize, u->captured_file);
776 if (u->canceled_file)
777 unused = fwrite(cdata, 1, u->blocksize, u->canceled_file);
780 pa_memblock_release(cchunk.memblock);
781 pa_memblock_release(rchunk.memblock);
783 pa_memblock_unref(rchunk.memblock);
785 pa_source_post(u->source, &cchunk);
786 pa_memblock_unref(cchunk.memblock);
788 pa_memblockq_drop(u->source_memblockq, u->blocksize);
789 rlen -= u->blocksize;
793 /* This one's simpler than the drift compensation case -- we just iterate over
794 * the capture buffer, and pass the canceller blocksize bytes of playback and
795 * capture data. */
796 static void do_push(struct userdata *u) {
797 size_t rlen, plen;
798 pa_memchunk rchunk, pchunk, cchunk;
799 uint8_t *rdata, *pdata, *cdata;
800 int unused PA_GCC_UNUSED;
802 rlen = pa_memblockq_get_length(u->source_memblockq);
803 plen = pa_memblockq_get_length(u->sink_memblockq);
805 while (rlen >= u->blocksize) {
806 /* take fixed block from recorded samples */
807 pa_memblockq_peek_fixed_size(u->source_memblockq, u->blocksize, &rchunk);
809 if (plen >= u->blocksize) {
810 /* take fixed block from played samples */
811 pa_memblockq_peek_fixed_size(u->sink_memblockq, u->blocksize, &pchunk);
813 rdata = pa_memblock_acquire(rchunk.memblock);
814 rdata += rchunk.index;
815 pdata = pa_memblock_acquire(pchunk.memblock);
816 pdata += pchunk.index;
818 cchunk.index = 0;
819 cchunk.length = u->blocksize;
820 cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
821 cdata = pa_memblock_acquire(cchunk.memblock);
823 if (u->save_aec) {
824 if (u->captured_file)
825 unused = fwrite(rdata, 1, u->blocksize, u->captured_file);
826 if (u->played_file)
827 unused = fwrite(pdata, 1, u->blocksize, u->played_file);
830 /* perform echo cancellation */
831 u->ec->run(u->ec, rdata, pdata, cdata);
833 if (u->save_aec) {
834 if (u->canceled_file)
835 unused = fwrite(cdata, 1, u->blocksize, u->canceled_file);
838 pa_memblock_release(cchunk.memblock);
839 pa_memblock_release(pchunk.memblock);
840 pa_memblock_release(rchunk.memblock);
842 /* drop consumed sink samples */
843 pa_memblockq_drop(u->sink_memblockq, u->blocksize);
844 pa_memblock_unref(pchunk.memblock);
846 pa_memblock_unref(rchunk.memblock);
847 /* the filtered samples now become the samples from our
848 * source */
849 rchunk = cchunk;
851 plen -= u->blocksize;
854 /* forward the (echo-canceled) data to the virtual source */
855 pa_source_post(u->source, &rchunk);
856 pa_memblock_unref(rchunk.memblock);
858 pa_memblockq_drop(u->source_memblockq, u->blocksize);
859 rlen -= u->blocksize;
863 /* Called from input thread context */
864 static void source_output_push_cb(pa_source_output *o, const pa_memchunk *chunk) {
865 struct userdata *u;
866 size_t rlen, plen, to_skip;
867 pa_memchunk rchunk;
869 pa_source_output_assert_ref(o);
870 pa_source_output_assert_io_context(o);
871 pa_assert_se(u = o->userdata);
873 if (!PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output))) {
874 pa_log("Push when no link?");
875 return;
878 if (PA_UNLIKELY(u->source->thread_info.state != PA_SOURCE_RUNNING ||
879 u->sink->thread_info.state != PA_SINK_RUNNING)) {
880 pa_source_post(u->source, chunk);
881 return;
884 /* handle queued messages, do any message sending of our own */
885 while (pa_asyncmsgq_process_one(u->asyncmsgq) > 0)
888 pa_memblockq_push_align(u->source_memblockq, chunk);
890 rlen = pa_memblockq_get_length(u->source_memblockq);
891 plen = pa_memblockq_get_length(u->sink_memblockq);
893 /* Let's not do anything else till we have enough data to process */
894 if (rlen < u->blocksize)
895 return;
897 /* See if we need to drop samples in order to sync */
898 if (pa_atomic_cmpxchg (&u->request_resync, 1, 0)) {
899 do_resync(u);
902 /* Okay, skip cancellation for skipped source samples if needed. */
903 if (PA_UNLIKELY(u->source_skip)) {
904 /* The slightly tricky bit here is that we drop all but modulo
905 * blocksize bytes and then adjust for that last bit on the sink side.
906 * We do this because the source data is coming at a fixed rate, which
907 * means the only way to try to catch up is drop sink samples and let
908 * the canceller cope up with this. */
909 to_skip = rlen >= u->source_skip ? u->source_skip : rlen;
910 to_skip -= to_skip % u->blocksize;
912 if (to_skip) {
913 pa_memblockq_peek_fixed_size(u->source_memblockq, to_skip, &rchunk);
914 pa_source_post(u->source, &rchunk);
916 pa_memblock_unref(rchunk.memblock);
917 pa_memblockq_drop(u->source_memblockq, to_skip);
919 rlen -= to_skip;
920 u->source_skip -= to_skip;
923 if (rlen && u->source_skip % u->blocksize) {
924 u->sink_skip += u->blocksize - (u->source_skip % u->blocksize);
925 u->source_skip -= (u->source_skip % u->blocksize);
929 /* And for the sink, these samples have been played back already, so we can
930 * just drop them and get on with it. */
931 if (PA_UNLIKELY(u->sink_skip)) {
932 to_skip = plen >= u->sink_skip ? u->sink_skip : plen;
934 pa_memblockq_drop(u->sink_memblockq, to_skip);
936 plen -= to_skip;
937 u->sink_skip -= to_skip;
940 /* process and push out samples */
941 if (u->ec->params.drift_compensation)
942 do_push_drift_comp(u);
943 else
944 do_push(u);
947 /* Called from I/O thread context */
948 static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) {
949 struct userdata *u;
951 pa_sink_input_assert_ref(i);
952 pa_assert(chunk);
953 pa_assert_se(u = i->userdata);
955 if (u->sink->thread_info.rewind_requested)
956 pa_sink_process_rewind(u->sink, 0);
958 pa_sink_render_full(u->sink, nbytes, chunk);
960 if (i->thread_info.underrun_for > 0) {
961 pa_log_debug("Handling end of underrun.");
962 pa_atomic_store(&u->request_resync, 1);
965 /* let source thread handle the chunk. pass the sample count as well so that
966 * the source IO thread can update the right variables. */
967 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_POST,
968 NULL, 0, chunk, NULL);
969 u->send_counter += chunk->length;
971 return 0;
974 /* Called from input thread context */
975 static void source_output_process_rewind_cb(pa_source_output *o, size_t nbytes) {
976 struct userdata *u;
978 pa_source_output_assert_ref(o);
979 pa_source_output_assert_io_context(o);
980 pa_assert_se(u = o->userdata);
982 pa_source_process_rewind(u->source, nbytes);
984 /* go back on read side, we need to use older sink data for this */
985 pa_memblockq_rewind(u->sink_memblockq, nbytes);
987 /* manipulate write index */
988 pa_memblockq_seek(u->source_memblockq, -nbytes, PA_SEEK_RELATIVE, TRUE);
990 pa_log_debug("Source rewind (%lld) %lld", (long long) nbytes,
991 (long long) pa_memblockq_get_length (u->source_memblockq));
994 /* Called from I/O thread context */
995 static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
996 struct userdata *u;
998 pa_sink_input_assert_ref(i);
999 pa_assert_se(u = i->userdata);
1001 pa_log_debug("Sink process rewind %lld", (long long) nbytes);
1003 pa_sink_process_rewind(u->sink, nbytes);
1005 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_REWIND, NULL, (int64_t) nbytes, NULL, NULL);
1006 u->send_counter -= nbytes;
1009 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot) {
1010 size_t delay, rlen, plen;
1011 pa_usec_t now, latency;
1013 now = pa_rtclock_now();
1014 latency = pa_source_get_latency_within_thread(u->source_output->source);
1015 delay = pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq);
1017 delay = (u->source_output->thread_info.resampler ? pa_resampler_request(u->source_output->thread_info.resampler, delay) : delay);
1018 rlen = pa_memblockq_get_length(u->source_memblockq);
1019 plen = pa_memblockq_get_length(u->sink_memblockq);
1021 snapshot->source_now = now;
1022 snapshot->source_latency = latency;
1023 snapshot->source_delay = delay;
1024 snapshot->recv_counter = u->recv_counter;
1025 snapshot->rlen = rlen + u->sink_skip;
1026 snapshot->plen = plen + u->source_skip;
1030 /* Called from output thread context */
1031 static int source_output_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1032 struct userdata *u = PA_SOURCE_OUTPUT(obj)->userdata;
1034 switch (code) {
1036 case SOURCE_OUTPUT_MESSAGE_POST:
1038 pa_source_output_assert_io_context(u->source_output);
1040 if (u->source_output->source->thread_info.state == PA_SOURCE_RUNNING)
1041 pa_memblockq_push_align(u->sink_memblockq, chunk);
1042 else
1043 pa_memblockq_flush_write(u->sink_memblockq, TRUE);
1045 u->recv_counter += (int64_t) chunk->length;
1047 return 0;
1049 case SOURCE_OUTPUT_MESSAGE_REWIND:
1050 pa_source_output_assert_io_context(u->source_output);
1052 /* manipulate write index, never go past what we have */
1053 if (PA_SOURCE_IS_OPENED(u->source_output->source->thread_info.state))
1054 pa_memblockq_seek(u->sink_memblockq, -offset, PA_SEEK_RELATIVE, TRUE);
1055 else
1056 pa_memblockq_flush_write(u->sink_memblockq, TRUE);
1058 pa_log_debug("Sink rewind (%lld)", (long long) offset);
1060 u->recv_counter -= offset;
1062 return 0;
1064 case SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT: {
1065 struct snapshot *snapshot = (struct snapshot *) data;
1067 source_output_snapshot_within_thread(u, snapshot);
1068 return 0;
1071 case SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME:
1072 apply_diff_time(u, offset);
1073 return 0;
1077 return pa_source_output_process_msg(obj, code, data, offset, chunk);
1080 static int sink_input_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1081 struct userdata *u = PA_SINK_INPUT(obj)->userdata;
1083 switch (code) {
1085 case SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT: {
1086 size_t delay;
1087 pa_usec_t now, latency;
1088 struct snapshot *snapshot = (struct snapshot *) data;
1090 pa_sink_input_assert_io_context(u->sink_input);
1092 now = pa_rtclock_now();
1093 latency = pa_sink_get_latency_within_thread(u->sink_input->sink);
1094 delay = pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq);
1096 delay = (u->sink_input->thread_info.resampler ? pa_resampler_request(u->sink_input->thread_info.resampler, delay) : delay);
1098 snapshot->sink_now = now;
1099 snapshot->sink_latency = latency;
1100 snapshot->sink_delay = delay;
1101 snapshot->send_counter = u->send_counter;
1102 return 0;
1106 return pa_sink_input_process_msg(obj, code, data, offset, chunk);
1109 /* Called from I/O thread context */
1110 static void sink_input_update_max_rewind_cb(pa_sink_input *i, size_t nbytes) {
1111 struct userdata *u;
1113 pa_sink_input_assert_ref(i);
1114 pa_assert_se(u = i->userdata);
1116 pa_log_debug("Sink input update max rewind %lld", (long long) nbytes);
1118 pa_memblockq_set_maxrewind(u->sink_memblockq, nbytes);
1119 pa_sink_set_max_rewind_within_thread(u->sink, nbytes);
1122 /* Called from I/O thread context */
1123 static void source_output_update_max_rewind_cb(pa_source_output *o, size_t nbytes) {
1124 struct userdata *u;
1126 pa_source_output_assert_ref(o);
1127 pa_assert_se(u = o->userdata);
1129 pa_log_debug("Source output update max rewind %lld", (long long) nbytes);
1131 pa_source_set_max_rewind_within_thread(u->source, nbytes);
1134 /* Called from I/O thread context */
1135 static void sink_input_update_max_request_cb(pa_sink_input *i, size_t nbytes) {
1136 struct userdata *u;
1138 pa_sink_input_assert_ref(i);
1139 pa_assert_se(u = i->userdata);
1141 pa_log_debug("Sink input update max request %lld", (long long) nbytes);
1143 pa_sink_set_max_request_within_thread(u->sink, nbytes);
1146 /* Called from I/O thread context */
1147 static void sink_input_update_sink_requested_latency_cb(pa_sink_input *i) {
1148 struct userdata *u;
1149 pa_usec_t latency;
1151 pa_sink_input_assert_ref(i);
1152 pa_assert_se(u = i->userdata);
1154 latency = pa_sink_get_requested_latency_within_thread(i->sink);
1156 pa_log_debug("Sink input update requested latency %lld", (long long) latency);
1159 /* Called from I/O thread context */
1160 static void source_output_update_source_requested_latency_cb(pa_source_output *o) {
1161 struct userdata *u;
1162 pa_usec_t latency;
1164 pa_source_output_assert_ref(o);
1165 pa_assert_se(u = o->userdata);
1167 latency = pa_source_get_requested_latency_within_thread(o->source);
1169 pa_log_debug("Source output update requested latency %lld", (long long) latency);
1172 /* Called from I/O thread context */
1173 static void sink_input_update_sink_latency_range_cb(pa_sink_input *i) {
1174 struct userdata *u;
1176 pa_sink_input_assert_ref(i);
1177 pa_assert_se(u = i->userdata);
1179 pa_log_debug("Sink input update latency range %lld %lld",
1180 (long long) i->sink->thread_info.min_latency,
1181 (long long) i->sink->thread_info.max_latency);
1183 pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1186 /* Called from I/O thread context */
1187 static void source_output_update_source_latency_range_cb(pa_source_output *o) {
1188 struct userdata *u;
1190 pa_source_output_assert_ref(o);
1191 pa_assert_se(u = o->userdata);
1193 pa_log_debug("Source output update latency range %lld %lld",
1194 (long long) o->source->thread_info.min_latency,
1195 (long long) o->source->thread_info.max_latency);
1197 pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1200 /* Called from I/O thread context */
1201 static void sink_input_update_sink_fixed_latency_cb(pa_sink_input *i) {
1202 struct userdata *u;
1204 pa_sink_input_assert_ref(i);
1205 pa_assert_se(u = i->userdata);
1207 pa_log_debug("Sink input update fixed latency %lld",
1208 (long long) i->sink->thread_info.fixed_latency);
1210 pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1213 /* Called from I/O thread context */
1214 static void source_output_update_source_fixed_latency_cb(pa_source_output *o) {
1215 struct userdata *u;
1217 pa_source_output_assert_ref(o);
1218 pa_assert_se(u = o->userdata);
1220 pa_log_debug("Source output update fixed latency %lld",
1221 (long long) o->source->thread_info.fixed_latency);
1223 pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1226 /* Called from output thread context */
1227 static void source_output_attach_cb(pa_source_output *o) {
1228 struct userdata *u;
1230 pa_source_output_assert_ref(o);
1231 pa_source_output_assert_io_context(o);
1232 pa_assert_se(u = o->userdata);
1234 pa_source_set_rtpoll(u->source, o->source->thread_info.rtpoll);
1235 pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1236 pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1237 pa_source_set_max_rewind_within_thread(u->source, pa_source_output_get_max_rewind(o));
1239 pa_log_debug("Source output %d attach", o->index);
1241 pa_source_attach_within_thread(u->source);
1243 u->rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
1244 o->source->thread_info.rtpoll,
1245 PA_RTPOLL_LATE,
1246 u->asyncmsgq);
1249 /* Called from I/O thread context */
1250 static void sink_input_attach_cb(pa_sink_input *i) {
1251 struct userdata *u;
1253 pa_sink_input_assert_ref(i);
1254 pa_assert_se(u = i->userdata);
1256 pa_sink_set_rtpoll(u->sink, i->sink->thread_info.rtpoll);
1257 pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1259 /* (8.1) IF YOU NEED A FIXED BLOCK SIZE ADD THE LATENCY FOR ONE
1260 * BLOCK MINUS ONE SAMPLE HERE. SEE (7) */
1261 pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1263 /* (8.2) IF YOU NEED A FIXED BLOCK SIZE ROUND
1264 * pa_sink_input_get_max_request(i) UP TO MULTIPLES OF IT
1265 * HERE. SEE (6) */
1266 pa_sink_set_max_request_within_thread(u->sink, pa_sink_input_get_max_request(i));
1267 pa_sink_set_max_rewind_within_thread(u->sink, pa_sink_input_get_max_rewind(i));
1269 pa_log_debug("Sink input %d attach", i->index);
1271 u->rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
1272 i->sink->thread_info.rtpoll,
1273 PA_RTPOLL_LATE,
1274 u->asyncmsgq);
1276 pa_sink_attach_within_thread(u->sink);
1280 /* Called from output thread context */
1281 static void source_output_detach_cb(pa_source_output *o) {
1282 struct userdata *u;
1284 pa_source_output_assert_ref(o);
1285 pa_source_output_assert_io_context(o);
1286 pa_assert_se(u = o->userdata);
1288 pa_source_detach_within_thread(u->source);
1289 pa_source_set_rtpoll(u->source, NULL);
1291 pa_log_debug("Source output %d detach", o->index);
1293 if (u->rtpoll_item_read) {
1294 pa_rtpoll_item_free(u->rtpoll_item_read);
1295 u->rtpoll_item_read = NULL;
1299 /* Called from I/O thread context */
1300 static void sink_input_detach_cb(pa_sink_input *i) {
1301 struct userdata *u;
1303 pa_sink_input_assert_ref(i);
1304 pa_assert_se(u = i->userdata);
1306 pa_sink_detach_within_thread(u->sink);
1308 pa_sink_set_rtpoll(u->sink, NULL);
1310 pa_log_debug("Sink input %d detach", i->index);
1312 if (u->rtpoll_item_write) {
1313 pa_rtpoll_item_free(u->rtpoll_item_write);
1314 u->rtpoll_item_write = NULL;
1318 /* Called from output thread context */
1319 static void source_output_state_change_cb(pa_source_output *o, pa_source_output_state_t state) {
1320 struct userdata *u;
1322 pa_source_output_assert_ref(o);
1323 pa_source_output_assert_io_context(o);
1324 pa_assert_se(u = o->userdata);
1326 pa_log_debug("Source output %d state %d", o->index, state);
1329 /* Called from IO thread context */
1330 static void sink_input_state_change_cb(pa_sink_input *i, pa_sink_input_state_t state) {
1331 struct userdata *u;
1333 pa_sink_input_assert_ref(i);
1334 pa_assert_se(u = i->userdata);
1336 pa_log_debug("Sink input %d state %d", i->index, state);
1338 /* If we are added for the first time, ask for a rewinding so that
1339 * we are heard right-away. */
1340 if (PA_SINK_INPUT_IS_LINKED(state) &&
1341 i->thread_info.state == PA_SINK_INPUT_INIT) {
1342 pa_log_debug("Requesting rewind due to state change.");
1343 pa_sink_input_request_rewind(i, 0, FALSE, TRUE, TRUE);
1347 /* Called from main thread */
1348 static void source_output_kill_cb(pa_source_output *o) {
1349 struct userdata *u;
1351 pa_source_output_assert_ref(o);
1352 pa_assert_ctl_context();
1353 pa_assert_se(u = o->userdata);
1355 u->dead = TRUE;
1357 /* The order here matters! We first kill the source output, followed
1358 * by the source. That means the source callbacks must be protected
1359 * against an unconnected source output! */
1360 pa_source_output_unlink(u->source_output);
1361 pa_source_unlink(u->source);
1363 pa_source_output_unref(u->source_output);
1364 u->source_output = NULL;
1366 pa_source_unref(u->source);
1367 u->source = NULL;
1369 pa_log_debug("Source output kill %d", o->index);
1371 pa_module_unload_request(u->module, TRUE);
1374 /* Called from main context */
1375 static void sink_input_kill_cb(pa_sink_input *i) {
1376 struct userdata *u;
1378 pa_sink_input_assert_ref(i);
1379 pa_assert_se(u = i->userdata);
1381 u->dead = TRUE;
1383 /* The order here matters! We first kill the sink input, followed
1384 * by the sink. That means the sink callbacks must be protected
1385 * against an unconnected sink input! */
1386 pa_sink_input_unlink(u->sink_input);
1387 pa_sink_unlink(u->sink);
1389 pa_sink_input_unref(u->sink_input);
1390 u->sink_input = NULL;
1392 pa_sink_unref(u->sink);
1393 u->sink = NULL;
1395 pa_log_debug("Sink input kill %d", i->index);
1397 pa_module_unload_request(u->module, TRUE);
1400 /* Called from main thread */
1401 static pa_bool_t source_output_may_move_to_cb(pa_source_output *o, pa_source *dest) {
1402 struct userdata *u;
1404 pa_source_output_assert_ref(o);
1405 pa_assert_ctl_context();
1406 pa_assert_se(u = o->userdata);
1408 if (u->dead || u->autoloaded)
1409 return FALSE;
1411 return (u->source != dest) && (u->sink != dest->monitor_of);
1414 /* Called from main context */
1415 static pa_bool_t sink_input_may_move_to_cb(pa_sink_input *i, pa_sink *dest) {
1416 struct userdata *u;
1418 pa_sink_input_assert_ref(i);
1419 pa_assert_se(u = i->userdata);
1421 if (u->dead || u->autoloaded)
1422 return FALSE;
1424 return u->sink != dest;
1427 /* Called from main thread */
1428 static void source_output_moving_cb(pa_source_output *o, pa_source *dest) {
1429 struct userdata *u;
1431 pa_source_output_assert_ref(o);
1432 pa_assert_ctl_context();
1433 pa_assert_se(u = o->userdata);
1435 if (dest) {
1436 pa_source_set_asyncmsgq(u->source, dest->asyncmsgq);
1437 pa_source_update_flags(u->source, PA_SOURCE_LATENCY|PA_SOURCE_DYNAMIC_LATENCY, dest->flags);
1438 } else
1439 pa_source_set_asyncmsgq(u->source, NULL);
1441 if (u->source_auto_desc && dest) {
1442 const char *y, *z;
1443 pa_proplist *pl;
1445 pl = pa_proplist_new();
1446 y = pa_proplist_gets(u->sink_input->sink->proplist, PA_PROP_DEVICE_DESCRIPTION);
1447 z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1448 pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)", z ? z : dest->name,
1449 y ? y : u->sink_input->sink->name);
1451 pa_source_update_proplist(u->source, PA_UPDATE_REPLACE, pl);
1452 pa_proplist_free(pl);
1456 /* Called from main context */
1457 static void sink_input_moving_cb(pa_sink_input *i, pa_sink *dest) {
1458 struct userdata *u;
1460 pa_sink_input_assert_ref(i);
1461 pa_assert_se(u = i->userdata);
1463 if (dest) {
1464 pa_sink_set_asyncmsgq(u->sink, dest->asyncmsgq);
1465 pa_sink_update_flags(u->sink, PA_SINK_LATENCY|PA_SINK_DYNAMIC_LATENCY, dest->flags);
1466 } else
1467 pa_sink_set_asyncmsgq(u->sink, NULL);
1469 if (u->sink_auto_desc && dest) {
1470 const char *y, *z;
1471 pa_proplist *pl;
1473 pl = pa_proplist_new();
1474 y = pa_proplist_gets(u->source_output->source->proplist, PA_PROP_DEVICE_DESCRIPTION);
1475 z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1476 pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)", z ? z : dest->name,
1477 y ? y : u->source_output->source->name);
1479 pa_sink_update_proplist(u->sink, PA_UPDATE_REPLACE, pl);
1480 pa_proplist_free(pl);
1484 /* Called from main context */
1485 static void sink_input_volume_changed_cb(pa_sink_input *i) {
1486 struct userdata *u;
1488 pa_sink_input_assert_ref(i);
1489 pa_assert_se(u = i->userdata);
1491 pa_sink_volume_changed(u->sink, &i->volume);
1494 /* Called from main context */
1495 static void sink_input_mute_changed_cb(pa_sink_input *i) {
1496 struct userdata *u;
1498 pa_sink_input_assert_ref(i);
1499 pa_assert_se(u = i->userdata);
1501 pa_sink_mute_changed(u->sink, i->muted);
1504 /* Called from main context */
1505 static int canceller_process_msg_cb(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1506 struct pa_echo_canceller_msg *msg;
1507 struct userdata *u;
1509 pa_assert(o);
1511 msg = PA_ECHO_CANCELLER_MSG(o);
1512 u = msg->userdata;
1514 switch (code) {
1515 case ECHO_CANCELLER_MESSAGE_SET_VOLUME: {
1516 pa_cvolume *v = (pa_cvolume *) userdata;
1518 if (u->use_volume_sharing)
1519 pa_source_set_volume(u->source, v, TRUE, FALSE);
1520 else
1521 pa_source_output_set_volume(u->source_output, v, FALSE, TRUE);
1523 break;
1526 default:
1527 pa_assert_not_reached();
1528 break;
1531 return 0;
1534 /* Called by the canceller, so thread context */
1535 void pa_echo_canceller_get_capture_volume(pa_echo_canceller *ec, pa_cvolume *v) {
1536 *v = ec->msg->userdata->thread_info.current_volume;
1539 /* Called by the canceller, so thread context */
1540 void pa_echo_canceller_set_capture_volume(pa_echo_canceller *ec, pa_cvolume *v) {
1541 if (!pa_cvolume_equal(&ec->msg->userdata->thread_info.current_volume, v)) {
1542 pa_cvolume *vol = pa_xnewdup(pa_cvolume, v, 1);
1544 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(ec->msg), ECHO_CANCELLER_MESSAGE_SET_VOLUME, vol, 0, NULL,
1545 pa_xfree);
1549 static pa_echo_canceller_method_t get_ec_method_from_string(const char *method) {
1550 #ifdef HAVE_SPEEX
1551 if (pa_streq(method, "speex"))
1552 return PA_ECHO_CANCELLER_SPEEX;
1553 #endif
1554 #ifdef HAVE_ADRIAN_EC
1555 if (pa_streq(method, "adrian"))
1556 return PA_ECHO_CANCELLER_ADRIAN;
1557 #endif
1558 #ifdef HAVE_WEBRTC
1559 if (pa_streq(method, "webrtc"))
1560 return PA_ECHO_CANCELLER_WEBRTC;
1561 #endif
1562 return PA_ECHO_CANCELLER_INVALID;
1565 /* Common initialisation bits between module-echo-cancel and the standalone test program */
1566 static int init_common(pa_modargs *ma, struct userdata *u, pa_sample_spec *source_ss, pa_channel_map *source_map) {
1567 pa_echo_canceller_method_t ec_method;
1569 if (pa_modargs_get_sample_spec_and_channel_map(ma, source_ss, source_map, PA_CHANNEL_MAP_DEFAULT) < 0) {
1570 pa_log("Invalid sample format specification or channel map");
1571 goto fail;
1574 u->ec = pa_xnew0(pa_echo_canceller, 1);
1575 if (!u->ec) {
1576 pa_log("Failed to alloc echo canceller");
1577 goto fail;
1580 if ((ec_method = get_ec_method_from_string(pa_modargs_get_value(ma, "aec_method", DEFAULT_ECHO_CANCELLER))) < 0) {
1581 pa_log("Invalid echo canceller implementation");
1582 goto fail;
1585 u->ec->init = ec_table[ec_method].init;
1586 u->ec->play = ec_table[ec_method].play;
1587 u->ec->record = ec_table[ec_method].record;
1588 u->ec->set_drift = ec_table[ec_method].set_drift;
1589 u->ec->run = ec_table[ec_method].run;
1590 u->ec->done = ec_table[ec_method].done;
1592 return 0;
1594 fail:
1595 return -1;
1599 int pa__init(pa_module*m) {
1600 struct userdata *u;
1601 pa_sample_spec source_ss, sink_ss;
1602 pa_channel_map source_map, sink_map;
1603 pa_modargs *ma;
1604 pa_source *source_master=NULL;
1605 pa_sink *sink_master=NULL;
1606 pa_source_output_new_data source_output_data;
1607 pa_sink_input_new_data sink_input_data;
1608 pa_source_new_data source_data;
1609 pa_sink_new_data sink_data;
1610 pa_memchunk silence;
1611 uint32_t temp;
1613 pa_assert(m);
1615 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1616 pa_log("Failed to parse module arguments.");
1617 goto fail;
1620 if (!(source_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "source_master", NULL), PA_NAMEREG_SOURCE))) {
1621 pa_log("Master source not found");
1622 goto fail;
1624 pa_assert(source_master);
1626 if (!(sink_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "sink_master", NULL), PA_NAMEREG_SINK))) {
1627 pa_log("Master sink not found");
1628 goto fail;
1630 pa_assert(sink_master);
1632 if (source_master->monitor_of == sink_master) {
1633 pa_log("Can't cancel echo between a sink and its monitor");
1634 goto fail;
1637 source_ss = source_master->sample_spec;
1638 source_ss.rate = DEFAULT_RATE;
1639 source_ss.channels = DEFAULT_CHANNELS;
1640 pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
1642 sink_ss = sink_master->sample_spec;
1643 sink_map = sink_master->channel_map;
1645 u = pa_xnew0(struct userdata, 1);
1646 if (!u) {
1647 pa_log("Failed to alloc userdata");
1648 goto fail;
1650 u->core = m->core;
1651 u->module = m;
1652 m->userdata = u;
1653 u->dead = FALSE;
1655 u->use_volume_sharing = TRUE;
1656 if (pa_modargs_get_value_boolean(ma, "use_volume_sharing", &u->use_volume_sharing) < 0) {
1657 pa_log("use_volume_sharing= expects a boolean argument");
1658 goto fail;
1661 temp = DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC;
1662 if (pa_modargs_get_value_u32(ma, "adjust_time", &temp) < 0) {
1663 pa_log("Failed to parse adjust_time value");
1664 goto fail;
1667 if (temp != DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC)
1668 u->adjust_time = temp * PA_USEC_PER_SEC;
1669 else
1670 u->adjust_time = DEFAULT_ADJUST_TIME_USEC;
1672 temp = DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC;
1673 if (pa_modargs_get_value_u32(ma, "adjust_threshold", &temp) < 0) {
1674 pa_log("Failed to parse adjust_threshold value");
1675 goto fail;
1678 if (temp != DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC)
1679 u->adjust_threshold = temp * PA_USEC_PER_MSEC;
1680 else
1681 u->adjust_threshold = DEFAULT_ADJUST_TOLERANCE;
1683 u->save_aec = DEFAULT_SAVE_AEC;
1684 if (pa_modargs_get_value_boolean(ma, "save_aec", &u->save_aec) < 0) {
1685 pa_log("Failed to parse save_aec value");
1686 goto fail;
1689 u->autoloaded = DEFAULT_AUTOLOADED;
1690 if (pa_modargs_get_value_boolean(ma, "autoloaded", &u->autoloaded) < 0) {
1691 pa_log("Failed to parse autoloaded value");
1692 goto fail;
1695 if (init_common(ma, u, &source_ss, &source_map) < 0)
1696 goto fail;
1698 u->asyncmsgq = pa_asyncmsgq_new(0);
1699 u->need_realign = TRUE;
1701 if (u->ec->init) {
1702 if (!u->ec->init(u->core, u->ec, &source_ss, &source_map, &sink_ss, &sink_map, &u->blocksize, pa_modargs_get_value(ma, "aec_args", NULL))) {
1703 pa_log("Failed to init AEC engine");
1704 goto fail;
1708 if (u->ec->params.drift_compensation)
1709 pa_assert(u->ec->set_drift);
1711 /* Create source */
1712 pa_source_new_data_init(&source_data);
1713 source_data.driver = __FILE__;
1714 source_data.module = m;
1715 if (!(source_data.name = pa_xstrdup(pa_modargs_get_value(ma, "source_name", NULL))))
1716 source_data.name = pa_sprintf_malloc("%s.echo-cancel", source_master->name);
1717 pa_source_new_data_set_sample_spec(&source_data, &source_ss);
1718 pa_source_new_data_set_channel_map(&source_data, &source_map);
1719 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, source_master->name);
1720 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1721 if (!u->autoloaded)
1722 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1724 if (pa_modargs_get_proplist(ma, "source_properties", source_data.proplist, PA_UPDATE_REPLACE) < 0) {
1725 pa_log("Invalid properties");
1726 pa_source_new_data_done(&source_data);
1727 goto fail;
1730 if ((u->source_auto_desc = !pa_proplist_contains(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1731 const char *y, *z;
1733 y = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1734 z = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1735 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)",
1736 z ? z : source_master->name, y ? y : sink_master->name);
1739 u->source = pa_source_new(m->core, &source_data, (source_master->flags & (PA_SOURCE_LATENCY | PA_SOURCE_DYNAMIC_LATENCY))
1740 | (u->use_volume_sharing ? PA_SOURCE_SHARE_VOLUME_WITH_MASTER : 0));
1741 pa_source_new_data_done(&source_data);
1743 if (!u->source) {
1744 pa_log("Failed to create source.");
1745 goto fail;
1748 u->source->parent.process_msg = source_process_msg_cb;
1749 u->source->set_state = source_set_state_cb;
1750 u->source->update_requested_latency = source_update_requested_latency_cb;
1751 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1752 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1753 if (!u->use_volume_sharing) {
1754 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1755 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1756 pa_source_enable_decibel_volume(u->source, TRUE);
1758 u->source->userdata = u;
1760 pa_source_set_asyncmsgq(u->source, source_master->asyncmsgq);
1762 /* Create sink */
1763 pa_sink_new_data_init(&sink_data);
1764 sink_data.driver = __FILE__;
1765 sink_data.module = m;
1766 if (!(sink_data.name = pa_xstrdup(pa_modargs_get_value(ma, "sink_name", NULL))))
1767 sink_data.name = pa_sprintf_malloc("%s.echo-cancel", sink_master->name);
1768 pa_sink_new_data_set_sample_spec(&sink_data, &sink_ss);
1769 pa_sink_new_data_set_channel_map(&sink_data, &sink_map);
1770 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, sink_master->name);
1771 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1772 if (!u->autoloaded)
1773 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1775 if (pa_modargs_get_proplist(ma, "sink_properties", sink_data.proplist, PA_UPDATE_REPLACE) < 0) {
1776 pa_log("Invalid properties");
1777 pa_sink_new_data_done(&sink_data);
1778 goto fail;
1781 if ((u->sink_auto_desc = !pa_proplist_contains(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1782 const char *y, *z;
1784 y = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1785 z = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1786 pa_proplist_setf(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)",
1787 z ? z : sink_master->name, y ? y : source_master->name);
1790 u->sink = pa_sink_new(m->core, &sink_data, (sink_master->flags & (PA_SINK_LATENCY | PA_SINK_DYNAMIC_LATENCY))
1791 | (u->use_volume_sharing ? PA_SINK_SHARE_VOLUME_WITH_MASTER : 0));
1792 pa_sink_new_data_done(&sink_data);
1794 if (!u->sink) {
1795 pa_log("Failed to create sink.");
1796 goto fail;
1799 u->sink->parent.process_msg = sink_process_msg_cb;
1800 u->sink->set_state = sink_set_state_cb;
1801 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1802 u->sink->request_rewind = sink_request_rewind_cb;
1803 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1804 if (!u->use_volume_sharing) {
1805 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1806 pa_sink_enable_decibel_volume(u->sink, TRUE);
1808 u->sink->userdata = u;
1810 pa_sink_set_asyncmsgq(u->sink, sink_master->asyncmsgq);
1812 /* Create source output */
1813 pa_source_output_new_data_init(&source_output_data);
1814 source_output_data.driver = __FILE__;
1815 source_output_data.module = m;
1816 pa_source_output_new_data_set_source(&source_output_data, source_master, FALSE);
1817 source_output_data.destination_source = u->source;
1818 /* FIXME
1819 source_output_data.flags = PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND; */
1821 pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Source Stream");
1822 pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1823 pa_source_output_new_data_set_sample_spec(&source_output_data, &source_ss);
1824 pa_source_output_new_data_set_channel_map(&source_output_data, &source_map);
1826 pa_source_output_new(&u->source_output, m->core, &source_output_data);
1827 pa_source_output_new_data_done(&source_output_data);
1829 if (!u->source_output)
1830 goto fail;
1832 u->source_output->parent.process_msg = source_output_process_msg_cb;
1833 u->source_output->push = source_output_push_cb;
1834 u->source_output->process_rewind = source_output_process_rewind_cb;
1835 u->source_output->update_max_rewind = source_output_update_max_rewind_cb;
1836 u->source_output->update_source_requested_latency = source_output_update_source_requested_latency_cb;
1837 u->source_output->update_source_latency_range = source_output_update_source_latency_range_cb;
1838 u->source_output->update_source_fixed_latency = source_output_update_source_fixed_latency_cb;
1839 u->source_output->kill = source_output_kill_cb;
1840 u->source_output->attach = source_output_attach_cb;
1841 u->source_output->detach = source_output_detach_cb;
1842 u->source_output->state_change = source_output_state_change_cb;
1843 u->source_output->may_move_to = source_output_may_move_to_cb;
1844 u->source_output->moving = source_output_moving_cb;
1845 u->source_output->userdata = u;
1847 u->source->output_from_master = u->source_output;
1849 /* Create sink input */
1850 pa_sink_input_new_data_init(&sink_input_data);
1851 sink_input_data.driver = __FILE__;
1852 sink_input_data.module = m;
1853 pa_sink_input_new_data_set_sink(&sink_input_data, sink_master, FALSE);
1854 sink_input_data.origin_sink = u->sink;
1855 pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Sink Stream");
1856 pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1857 pa_sink_input_new_data_set_sample_spec(&sink_input_data, &sink_ss);
1858 pa_sink_input_new_data_set_channel_map(&sink_input_data, &sink_map);
1859 sink_input_data.flags = PA_SINK_INPUT_VARIABLE_RATE;
1861 pa_sink_input_new(&u->sink_input, m->core, &sink_input_data);
1862 pa_sink_input_new_data_done(&sink_input_data);
1864 if (!u->sink_input)
1865 goto fail;
1867 u->sink_input->parent.process_msg = sink_input_process_msg_cb;
1868 u->sink_input->pop = sink_input_pop_cb;
1869 u->sink_input->process_rewind = sink_input_process_rewind_cb;
1870 u->sink_input->update_max_rewind = sink_input_update_max_rewind_cb;
1871 u->sink_input->update_max_request = sink_input_update_max_request_cb;
1872 u->sink_input->update_sink_requested_latency = sink_input_update_sink_requested_latency_cb;
1873 u->sink_input->update_sink_latency_range = sink_input_update_sink_latency_range_cb;
1874 u->sink_input->update_sink_fixed_latency = sink_input_update_sink_fixed_latency_cb;
1875 u->sink_input->kill = sink_input_kill_cb;
1876 u->sink_input->attach = sink_input_attach_cb;
1877 u->sink_input->detach = sink_input_detach_cb;
1878 u->sink_input->state_change = sink_input_state_change_cb;
1879 u->sink_input->may_move_to = sink_input_may_move_to_cb;
1880 u->sink_input->moving = sink_input_moving_cb;
1881 if (!u->use_volume_sharing)
1882 u->sink_input->volume_changed = sink_input_volume_changed_cb;
1883 u->sink_input->mute_changed = sink_input_mute_changed_cb;
1884 u->sink_input->userdata = u;
1886 u->sink->input_to_master = u->sink_input;
1888 pa_sink_input_get_silence(u->sink_input, &silence);
1890 u->source_memblockq = pa_memblockq_new("module-echo-cancel source_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
1891 &source_ss, 1, 1, 0, &silence);
1892 u->sink_memblockq = pa_memblockq_new("module-echo-cancel sink_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
1893 &sink_ss, 1, 1, 0, &silence);
1895 pa_memblock_unref(silence.memblock);
1897 if (!u->source_memblockq || !u->sink_memblockq) {
1898 pa_log("Failed to create memblockq.");
1899 goto fail;
1902 if (u->adjust_time > 0 && !u->ec->params.drift_compensation)
1903 u->time_event = pa_core_rttime_new(m->core, pa_rtclock_now() + u->adjust_time, time_callback, u);
1904 else if (u->ec->params.drift_compensation) {
1905 pa_log_info("Canceller does drift compensation -- built-in compensation will be disabled");
1906 u->adjust_time = 0;
1907 /* Perform resync just once to give the canceller a leg up */
1908 pa_atomic_store(&u->request_resync, 1);
1911 if (u->save_aec) {
1912 pa_log("Creating AEC files in /tmp");
1913 u->captured_file = fopen("/tmp/aec_rec.sw", "wb");
1914 if (u->captured_file == NULL)
1915 perror ("fopen failed");
1916 u->played_file = fopen("/tmp/aec_play.sw", "wb");
1917 if (u->played_file == NULL)
1918 perror ("fopen failed");
1919 u->canceled_file = fopen("/tmp/aec_out.sw", "wb");
1920 if (u->canceled_file == NULL)
1921 perror ("fopen failed");
1922 if (u->ec->params.drift_compensation) {
1923 u->drift_file = fopen("/tmp/aec_drift.txt", "w");
1924 if (u->drift_file == NULL)
1925 perror ("fopen failed");
1929 u->ec->msg = pa_msgobject_new(pa_echo_canceller_msg);
1930 u->ec->msg->parent.process_msg = canceller_process_msg_cb;
1931 u->ec->msg->userdata = u;
1933 u->thread_info.current_volume = u->source->reference_volume;
1935 pa_sink_put(u->sink);
1936 pa_source_put(u->source);
1938 pa_sink_input_put(u->sink_input);
1939 pa_source_output_put(u->source_output);
1940 pa_modargs_free(ma);
1942 return 0;
1944 fail:
1945 if (ma)
1946 pa_modargs_free(ma);
1948 pa__done(m);
1950 return -1;
1953 int pa__get_n_used(pa_module *m) {
1954 struct userdata *u;
1956 pa_assert(m);
1957 pa_assert_se(u = m->userdata);
1959 return pa_sink_linked_by(u->sink) + pa_source_linked_by(u->source);
1962 void pa__done(pa_module*m) {
1963 struct userdata *u;
1965 pa_assert(m);
1967 if (!(u = m->userdata))
1968 return;
1970 u->dead = TRUE;
1972 /* See comments in source_output_kill_cb() above regarding
1973 * destruction order! */
1975 if (u->time_event)
1976 u->core->mainloop->time_free(u->time_event);
1978 if (u->source_output)
1979 pa_source_output_unlink(u->source_output);
1980 if (u->sink_input)
1981 pa_sink_input_unlink(u->sink_input);
1983 if (u->source)
1984 pa_source_unlink(u->source);
1985 if (u->sink)
1986 pa_sink_unlink(u->sink);
1988 if (u->source_output)
1989 pa_source_output_unref(u->source_output);
1990 if (u->sink_input)
1991 pa_sink_input_unref(u->sink_input);
1993 if (u->source)
1994 pa_source_unref(u->source);
1995 if (u->sink)
1996 pa_sink_unref(u->sink);
1998 if (u->source_memblockq)
1999 pa_memblockq_free(u->source_memblockq);
2000 if (u->sink_memblockq)
2001 pa_memblockq_free(u->sink_memblockq);
2003 if (u->ec) {
2004 if (u->ec->done)
2005 u->ec->done(u->ec);
2007 pa_xfree(u->ec);
2010 if (u->asyncmsgq)
2011 pa_asyncmsgq_unref(u->asyncmsgq);
2013 if (u->save_aec) {
2014 if (u->played_file)
2015 fclose(u->played_file);
2016 if (u->captured_file)
2017 fclose(u->captured_file);
2018 if (u->canceled_file)
2019 fclose(u->canceled_file);
2020 if (u->drift_file)
2021 fclose(u->drift_file);
2024 pa_xfree(u);
2027 #ifdef ECHO_CANCEL_TEST
2029 * Stand-alone test program for running in the canceller on pre-recorded files.
2031 int main(int argc, char* argv[]) {
2032 struct userdata u;
2033 pa_sample_spec source_ss, sink_ss;
2034 pa_channel_map source_map, sink_map;
2035 pa_modargs *ma = NULL;
2036 uint8_t *rdata = NULL, *pdata = NULL, *cdata = NULL;
2037 int unused PA_GCC_UNUSED;
2038 int ret = 0, i;
2039 char c;
2040 float drift;
2042 pa_memzero(&u, sizeof(u));
2044 if (argc < 4 || argc > 7) {
2045 goto usage;
2048 u.captured_file = fopen(argv[2], "rb");
2049 if (u.captured_file == NULL) {
2050 perror ("fopen failed");
2051 goto fail;
2053 u.played_file = fopen(argv[1], "rb");
2054 if (u.played_file == NULL) {
2055 perror ("fopen failed");
2056 goto fail;
2058 u.canceled_file = fopen(argv[3], "wb");
2059 if (u.canceled_file == NULL) {
2060 perror ("fopen failed");
2061 goto fail;
2064 u.core = pa_xnew0(pa_core, 1);
2065 u.core->cpu_info.cpu_type = PA_CPU_X86;
2066 u.core->cpu_info.flags.x86 |= PA_CPU_X86_SSE;
2068 if (!(ma = pa_modargs_new(argc > 4 ? argv[4] : NULL, valid_modargs))) {
2069 pa_log("Failed to parse module arguments.");
2070 goto fail;
2073 source_ss.format = PA_SAMPLE_S16LE;
2074 source_ss.rate = DEFAULT_RATE;
2075 source_ss.channels = DEFAULT_CHANNELS;
2076 pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
2078 if (init_common(ma, &u, &source_ss, &source_map) < 0)
2079 goto fail;
2081 if (!u.ec->init(u.core, u.ec, &source_ss, &source_map, &sink_ss, &sink_map, &u.blocksize,
2082 (argc > 4) ? argv[5] : NULL )) {
2083 pa_log("Failed to init AEC engine");
2084 goto fail;
2087 if (u.ec->params.drift_compensation) {
2088 if (argc < 7) {
2089 pa_log("Drift compensation enabled but drift file not specified");
2090 goto fail;
2093 u.drift_file = fopen(argv[6], "rt");
2095 if (u.drift_file == NULL) {
2096 perror ("fopen failed");
2097 goto fail;
2101 rdata = pa_xmalloc(u.blocksize);
2102 pdata = pa_xmalloc(u.blocksize);
2103 cdata = pa_xmalloc(u.blocksize);
2105 if (!u.ec->params.drift_compensation) {
2106 while (fread(rdata, u.blocksize, 1, u.captured_file) > 0) {
2107 if (fread(pdata, u.blocksize, 1, u.played_file) == 0) {
2108 perror("Played file ended before captured file");
2109 goto fail;
2112 u.ec->run(u.ec, rdata, pdata, cdata);
2114 unused = fwrite(cdata, u.blocksize, 1, u.canceled_file);
2116 } else {
2117 while (fscanf(u.drift_file, "%c", &c) > 0) {
2118 switch (c) {
2119 case 'd':
2120 if (!fscanf(u.drift_file, "%a", &drift)) {
2121 perror("Drift file incomplete");
2122 goto fail;
2125 u.ec->set_drift(u.ec, drift);
2127 break;
2129 case 'c':
2130 if (!fscanf(u.drift_file, "%d", &i)) {
2131 perror("Drift file incomplete");
2132 goto fail;
2135 if (fread(rdata, i, 1, u.captured_file) <= 0) {
2136 perror("Captured file ended prematurely");
2137 goto fail;
2140 u.ec->record(u.ec, rdata, cdata);
2142 unused = fwrite(cdata, i, 1, u.canceled_file);
2144 break;
2146 case 'p':
2147 if (!fscanf(u.drift_file, "%d", &i)) {
2148 perror("Drift file incomplete");
2149 goto fail;
2152 if (fread(pdata, i, 1, u.played_file) <= 0) {
2153 perror("Played file ended prematurely");
2154 goto fail;
2157 u.ec->play(u.ec, pdata);
2159 break;
2163 if (fread(rdata, i, 1, u.captured_file) > 0)
2164 pa_log("All capture data was not consumed");
2165 if (fread(pdata, i, 1, u.played_file) > 0)
2166 pa_log("All playback data was not consumed");
2169 u.ec->done(u.ec);
2171 out:
2172 if (u.captured_file)
2173 fclose(u.captured_file);
2174 if (u.played_file)
2175 fclose(u.played_file);
2176 if (u.canceled_file)
2177 fclose(u.canceled_file);
2178 if (u.drift_file)
2179 fclose(u.drift_file);
2181 pa_xfree(rdata);
2182 pa_xfree(pdata);
2183 pa_xfree(cdata);
2185 pa_xfree(u.ec);
2186 pa_xfree(u.core);
2188 if (ma)
2189 pa_modargs_free(ma);
2191 return ret;
2193 usage:
2194 pa_log("Usage: %s play_file rec_file out_file [module args] [aec_args] [drift_file]", argv[0]);
2196 fail:
2197 ret = -1;
2198 goto out;
2200 #endif /* ECHO_CANCEL_TEST */