2 Unix SMB/CIFS implementation.
4 testing of the events subsystem
6 Copyright (C) Stefan Metzmacher 2006-2009
7 Copyright (C) Jeremy Allison 2013
9 ** NOTE! The following LGPL license applies to the tevent
10 ** library. This does NOT imply that all of Samba is released
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 3 of the License, or (at your option) any later version.
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "lib/tevent/tevent.h"
29 #include "system/filesys.h"
30 #include "system/select.h"
31 #include "system/network.h"
32 #include "torture/torture.h"
33 #include "torture/local/proto.h"
41 static void fde_handler_read(struct tevent_context
*ev_ctx
, struct tevent_fd
*f
,
42 uint16_t flags
, void *private_data
)
44 int *fd
= (int *)private_data
;
47 kill(getpid(), SIGUSR1
);
49 kill(getpid(), SIGALRM
);
55 static void fde_handler_write(struct tevent_context
*ev_ctx
, struct tevent_fd
*f
,
56 uint16_t flags
, void *private_data
)
58 int *fd
= (int *)private_data
;
64 /* This will only fire if the fd's returned from pipe() are bi-directional. */
65 static void fde_handler_read_1(struct tevent_context
*ev_ctx
, struct tevent_fd
*f
,
66 uint16_t flags
, void *private_data
)
68 int *fd
= (int *)private_data
;
71 kill(getpid(), SIGUSR1
);
73 kill(getpid(), SIGALRM
);
79 /* This will only fire if the fd's returned from pipe() are bi-directional. */
80 static void fde_handler_write_1(struct tevent_context
*ev_ctx
, struct tevent_fd
*f
,
81 uint16_t flags
, void *private_data
)
83 int *fd
= (int *)private_data
;
88 static void finished_handler(struct tevent_context
*ev_ctx
, struct tevent_timer
*te
,
89 struct timeval tval
, void *private_data
)
91 int *finished
= (int *)private_data
;
95 static void count_handler(struct tevent_context
*ev_ctx
, struct tevent_signal
*te
,
96 int signum
, int count
, void *info
, void *private_data
)
98 int *countp
= (int *)private_data
;
102 static bool test_event_context(struct torture_context
*test
,
103 const void *test_data
)
105 struct tevent_context
*ev_ctx
;
106 int fd
[2] = { -1, -1 };
107 const char *backend
= (const char *)test_data
;
108 int alarm_count
=0, info_count
=0;
109 struct tevent_fd
*fde_read
;
110 struct tevent_fd
*fde_read_1
;
111 struct tevent_fd
*fde_write
;
112 struct tevent_fd
*fde_write_1
;
114 struct tevent_signal
*se1
= NULL
;
117 struct tevent_signal
*se2
= NULL
;
120 struct tevent_signal
*se3
= NULL
;
126 ev_ctx
= tevent_context_init_byname(test
, backend
);
127 if (ev_ctx
== NULL
) {
128 torture_comment(test
, "event backend '%s' not supported\n", backend
);
132 torture_comment(test
, "backend '%s' - %s\n",
133 backend
, __FUNCTION__
);
140 torture_assert_int_equal(test
, ret
, 0, "pipe failed");
142 fde_read
= tevent_add_fd(ev_ctx
, ev_ctx
, fd
[0], TEVENT_FD_READ
,
143 fde_handler_read
, fd
);
144 fde_write_1
= tevent_add_fd(ev_ctx
, ev_ctx
, fd
[0], TEVENT_FD_WRITE
,
145 fde_handler_write_1
, fd
);
147 fde_write
= tevent_add_fd(ev_ctx
, ev_ctx
, fd
[1], TEVENT_FD_WRITE
,
148 fde_handler_write
, fd
);
149 fde_read_1
= tevent_add_fd(ev_ctx
, ev_ctx
, fd
[1], TEVENT_FD_READ
,
150 fde_handler_read_1
, fd
);
152 tevent_fd_set_auto_close(fde_read
);
153 tevent_fd_set_auto_close(fde_write
);
155 tevent_add_timer(ev_ctx
, ev_ctx
, timeval_current_ofs(2,0),
156 finished_handler
, &finished
);
159 se1
= tevent_add_signal(ev_ctx
, ev_ctx
, SIGALRM
, SA_RESTART
, count_handler
, &alarm_count
);
160 torture_assert(test
, se1
!= NULL
, "failed to setup se1");
163 se2
= tevent_add_signal(ev_ctx
, ev_ctx
, SIGALRM
, SA_RESETHAND
, count_handler
, &alarm_count
);
164 torture_assert(test
, se2
!= NULL
, "failed to setup se2");
167 se3
= tevent_add_signal(ev_ctx
, ev_ctx
, SIGUSR1
, SA_SIGINFO
, count_handler
, &info_count
);
168 torture_assert(test
, se3
!= NULL
, "failed to setup se3");
171 t
= timeval_current();
174 if (tevent_loop_once(ev_ctx
) == -1) {
176 torture_fail(test
, talloc_asprintf(test
, "Failed event loop %s\n", strerror(errno
)));
180 talloc_free(fde_read_1
);
181 talloc_free(fde_write_1
);
182 talloc_free(fde_read
);
183 talloc_free(fde_write
);
185 while (alarm_count
< fde_count
+1) {
186 if (tevent_loop_once(ev_ctx
) == -1) {
191 torture_comment(test
, "Got %.2f pipe events/sec\n", fde_count
/timeval_elapsed(&t
));
197 torture_assert_int_equal(test
, alarm_count
, 1+fde_count
, "alarm count mismatch");
201 * we do not call talloc_free(se2)
202 * because it is already gone,
203 * after triggering the event handler.
209 torture_assert_int_equal(test
, info_count
, fde_count
, "info count mismatch");
217 struct test_event_fd1_state
{
218 struct torture_context
*tctx
;
220 struct tevent_context
*ev
;
222 struct tevent_timer
*te
;
223 struct tevent_fd
*fde0
;
224 struct tevent_fd
*fde1
;
234 static void test_event_fd1_fde_handler(struct tevent_context
*ev_ctx
,
235 struct tevent_fd
*fde
,
239 struct test_event_fd1_state
*state
=
240 (struct test_event_fd1_state
*)private_data
;
242 if (state
->drain_done
) {
243 state
->finished
= true;
244 state
->error
= __location__
;
252 if (!(flags
& TEVENT_FD_READ
)) {
253 state
->finished
= true;
254 state
->error
= __location__
;
258 ret
= read(state
->sock
[0], &c
, 1);
266 tevent_fd_set_flags(fde
, 0);
267 state
->drain_done
= true;
271 if (!state
->got_write
) {
274 if (flags
!= TEVENT_FD_WRITE
) {
275 state
->finished
= true;
276 state
->error
= __location__
;
279 state
->got_write
= true;
282 * we write to the other socket...
284 write(state
->sock
[1], &c
, 1);
285 TEVENT_FD_NOT_WRITEABLE(fde
);
286 TEVENT_FD_READABLE(fde
);
290 if (!state
->got_read
) {
291 if (flags
!= TEVENT_FD_READ
) {
292 state
->finished
= true;
293 state
->error
= __location__
;
296 state
->got_read
= true;
298 TEVENT_FD_NOT_READABLE(fde
);
302 state
->finished
= true;
303 state
->error
= __location__
;
307 static void test_event_fd1_finished(struct tevent_context
*ev_ctx
,
308 struct tevent_timer
*te
,
312 struct test_event_fd1_state
*state
=
313 (struct test_event_fd1_state
*)private_data
;
315 if (state
->drain_done
) {
316 state
->finished
= true;
320 if (!state
->got_write
) {
321 state
->finished
= true;
322 state
->error
= __location__
;
326 if (!state
->got_read
) {
327 state
->finished
= true;
328 state
->error
= __location__
;
333 if (state
->loop_count
> 3) {
334 state
->finished
= true;
335 state
->error
= __location__
;
339 state
->got_write
= false;
340 state
->got_read
= false;
342 tevent_fd_set_flags(state
->fde0
, TEVENT_FD_WRITE
);
344 if (state
->loop_count
> 2) {
346 TALLOC_FREE(state
->fde1
);
347 TEVENT_FD_READABLE(state
->fde0
);
350 state
->te
= tevent_add_timer(state
->ev
, state
->ev
,
351 timeval_current_ofs(0,2000),
352 test_event_fd1_finished
, state
);
355 static bool test_event_fd1(struct torture_context
*tctx
,
356 const void *test_data
)
358 struct test_event_fd1_state state
;
362 state
.backend
= (const char *)test_data
;
364 state
.ev
= tevent_context_init_byname(tctx
, state
.backend
);
365 if (state
.ev
== NULL
) {
366 torture_skip(tctx
, talloc_asprintf(tctx
,
367 "event backend '%s' not supported\n",
372 tevent_set_debug_stderr(state
.ev
);
373 torture_comment(tctx
, "backend '%s' - %s\n",
374 state
.backend
, __FUNCTION__
);
377 * This tests the following:
379 * It monitors the state of state.sock[0]
380 * with tevent_fd, but we never read/write on state.sock[0]
381 * while state.sock[1] * is only used to write a few bytes.
384 * - we wait only for TEVENT_FD_WRITE on state.sock[0]
385 * - we write 1 byte to state.sock[1]
386 * - we wait only for TEVENT_FD_READ on state.sock[0]
387 * - we disable events on state.sock[0]
388 * - the timer event restarts the loop
389 * Then we close state.sock[1]
391 * - we wait for TEVENT_FD_READ/WRITE on state.sock[0]
392 * - we try to read 1 byte
393 * - if the read gets an error of returns 0
394 * we disable the event handler
395 * - the timer finishes the test
399 socketpair(AF_UNIX
, SOCK_STREAM
, 0, state
.sock
);
401 state
.te
= tevent_add_timer(state
.ev
, state
.ev
,
402 timeval_current_ofs(0,1000),
403 test_event_fd1_finished
, &state
);
404 state
.fde0
= tevent_add_fd(state
.ev
, state
.ev
,
405 state
.sock
[0], TEVENT_FD_WRITE
,
406 test_event_fd1_fde_handler
, &state
);
407 /* state.fde1 is only used to auto close */
408 state
.fde1
= tevent_add_fd(state
.ev
, state
.ev
,
410 test_event_fd1_fde_handler
, &state
);
412 tevent_fd_set_auto_close(state
.fde0
);
413 tevent_fd_set_auto_close(state
.fde1
);
415 while (!state
.finished
) {
417 if (tevent_loop_once(state
.ev
) == -1) {
418 talloc_free(state
.ev
);
419 torture_fail(tctx
, talloc_asprintf(tctx
,
420 "Failed event loop %s\n",
425 talloc_free(state
.ev
);
427 torture_assert(tctx
, state
.error
== NULL
, talloc_asprintf(tctx
,
433 struct test_event_fd2_state
{
434 struct torture_context
*tctx
;
436 struct tevent_context
*ev
;
437 struct tevent_timer
*te
;
438 struct test_event_fd2_sock
{
439 struct test_event_fd2_state
*state
;
441 struct tevent_fd
*fde
;
450 static void test_event_fd2_sock_handler(struct tevent_context
*ev_ctx
,
451 struct tevent_fd
*fde
,
455 struct test_event_fd2_sock
*cur_sock
=
456 (struct test_event_fd2_sock
*)private_data
;
457 struct test_event_fd2_state
*state
= cur_sock
->state
;
458 struct test_event_fd2_sock
*oth_sock
= NULL
;
462 if (cur_sock
== &state
->sock0
) {
463 oth_sock
= &state
->sock1
;
465 oth_sock
= &state
->sock0
;
468 if (oth_sock
->num_written
== 1) {
469 if (flags
!= (TEVENT_FD_READ
| TEVENT_FD_WRITE
)) {
470 state
->finished
= true;
471 state
->error
= __location__
;
476 if (cur_sock
->num_read
== oth_sock
->num_written
) {
477 state
->finished
= true;
478 state
->error
= __location__
;
482 if (!(flags
& TEVENT_FD_READ
)) {
483 state
->finished
= true;
484 state
->error
= __location__
;
488 if (oth_sock
->num_read
>= PIPE_BUF
) {
490 * On Linux we become writable once we've read
491 * one byte. On Solaris we only become writable
492 * again once we've read 4096 bytes. PIPE_BUF
493 * is probably a safe bet to test against.
495 * There should be room to write a byte again
497 if (!(flags
& TEVENT_FD_WRITE
)) {
498 state
->finished
= true;
499 state
->error
= __location__
;
504 if ((flags
& TEVENT_FD_WRITE
) && !cur_sock
->got_full
) {
505 v
= (uint8_t)cur_sock
->num_written
;
506 ret
= write(cur_sock
->fd
, &v
, 1);
508 state
->finished
= true;
509 state
->error
= __location__
;
512 cur_sock
->num_written
++;
513 if (cur_sock
->num_written
> 0x80000000) {
514 state
->finished
= true;
515 state
->error
= __location__
;
521 if (!cur_sock
->got_full
) {
522 cur_sock
->got_full
= true;
524 if (!oth_sock
->got_full
) {
527 * lets wait for oth_sock
530 tevent_fd_set_flags(cur_sock
->fde
, 0);
535 * oth_sock waited for cur_sock,
538 tevent_fd_set_flags(oth_sock
->fde
,
539 TEVENT_FD_READ
|TEVENT_FD_WRITE
);
542 ret
= read(cur_sock
->fd
, &v
, 1);
544 state
->finished
= true;
545 state
->error
= __location__
;
548 c
= (uint8_t)cur_sock
->num_read
;
550 state
->finished
= true;
551 state
->error
= __location__
;
554 cur_sock
->num_read
++;
556 if (cur_sock
->num_read
< oth_sock
->num_written
) {
557 /* there is more to read */
561 * we read everything, we need to remove TEVENT_FD_WRITE
564 TEVENT_FD_NOT_WRITEABLE(cur_sock
->fde
);
566 if (oth_sock
->num_read
== cur_sock
->num_written
) {
568 * both directions are finished
570 state
->finished
= true;
576 static void test_event_fd2_finished(struct tevent_context
*ev_ctx
,
577 struct tevent_timer
*te
,
581 struct test_event_fd2_state
*state
=
582 (struct test_event_fd2_state
*)private_data
;
585 * this should never be triggered
587 state
->finished
= true;
588 state
->error
= __location__
;
591 static bool test_event_fd2(struct torture_context
*tctx
,
592 const void *test_data
)
594 struct test_event_fd2_state state
;
600 state
.backend
= (const char *)test_data
;
602 state
.ev
= tevent_context_init_byname(tctx
, state
.backend
);
603 if (state
.ev
== NULL
) {
604 torture_skip(tctx
, talloc_asprintf(tctx
,
605 "event backend '%s' not supported\n",
610 tevent_set_debug_stderr(state
.ev
);
611 torture_comment(tctx
, "backend '%s' - %s\n",
612 state
.backend
, __FUNCTION__
);
615 * This tests the following
617 * - We write 1 byte to each socket
618 * - We wait for TEVENT_FD_READ/WRITE on both sockets
619 * - When we get TEVENT_FD_WRITE we write 1 byte
620 * until both socket buffers are full, which
621 * means both sockets only get TEVENT_FD_READ.
622 * - Then we read 1 byte until we have consumed
623 * all bytes the other end has written.
627 socketpair(AF_UNIX
, SOCK_STREAM
, 0, sock
);
630 * the timer should never expire
632 state
.te
= tevent_add_timer(state
.ev
, state
.ev
,
633 timeval_current_ofs(600, 0),
634 test_event_fd2_finished
, &state
);
635 state
.sock0
.state
= &state
;
636 state
.sock0
.fd
= sock
[0];
637 state
.sock0
.fde
= tevent_add_fd(state
.ev
, state
.ev
,
639 TEVENT_FD_READ
| TEVENT_FD_WRITE
,
640 test_event_fd2_sock_handler
,
642 state
.sock1
.state
= &state
;
643 state
.sock1
.fd
= sock
[1];
644 state
.sock1
.fde
= tevent_add_fd(state
.ev
, state
.ev
,
646 TEVENT_FD_READ
| TEVENT_FD_WRITE
,
647 test_event_fd2_sock_handler
,
650 tevent_fd_set_auto_close(state
.sock0
.fde
);
651 tevent_fd_set_auto_close(state
.sock1
.fde
);
653 write(state
.sock0
.fd
, &c
, 1);
654 state
.sock0
.num_written
++;
655 write(state
.sock1
.fd
, &c
, 1);
656 state
.sock1
.num_written
++;
658 while (!state
.finished
) {
660 if (tevent_loop_once(state
.ev
) == -1) {
661 talloc_free(state
.ev
);
662 torture_fail(tctx
, talloc_asprintf(tctx
,
663 "Failed event loop %s\n",
668 talloc_free(state
.ev
);
670 torture_assert(tctx
, state
.error
== NULL
, talloc_asprintf(tctx
,
678 static pthread_mutex_t threaded_mutex
= PTHREAD_MUTEX_INITIALIZER
;
679 static bool do_shutdown
= false;
681 static void test_event_threaded_lock(void)
684 ret
= pthread_mutex_lock(&threaded_mutex
);
688 static void test_event_threaded_unlock(void)
691 ret
= pthread_mutex_unlock(&threaded_mutex
);
695 static void test_event_threaded_trace(enum tevent_trace_point point
,
699 case TEVENT_TRACE_BEFORE_WAIT
:
700 test_event_threaded_unlock();
702 case TEVENT_TRACE_AFTER_WAIT
:
703 test_event_threaded_lock();
705 case TEVENT_TRACE_BEFORE_LOOP_ONCE
:
706 case TEVENT_TRACE_AFTER_LOOP_ONCE
:
711 static void test_event_threaded_timer(struct tevent_context
*ev
,
712 struct tevent_timer
*te
,
713 struct timeval current_time
,
719 static void *test_event_poll_thread(void *private_data
)
721 struct tevent_context
*ev
= (struct tevent_context
*)private_data
;
723 test_event_threaded_lock();
727 ret
= tevent_loop_once(ev
);
730 test_event_threaded_unlock();
737 static void test_event_threaded_read_handler(struct tevent_context
*ev
,
738 struct tevent_fd
*fde
,
742 int *pfd
= (int *)private_data
;
746 if ((flags
& TEVENT_FD_READ
) == 0) {
751 nread
= read(*pfd
, &c
, 1);
752 } while ((nread
== -1) && (errno
== EINTR
));
757 static bool test_event_context_threaded(struct torture_context
*test
,
758 const void *test_data
)
760 struct tevent_context
*ev
;
761 struct tevent_timer
*te
;
762 struct tevent_fd
*fde
;
763 pthread_t poll_thread
;
768 ev
= tevent_context_init_byname(test
, "poll_mt");
769 torture_assert(test
, ev
!= NULL
, "poll_mt not supported");
771 tevent_set_trace_callback(ev
, test_event_threaded_trace
, NULL
);
773 te
= tevent_add_timer(ev
, ev
, timeval_current_ofs(5, 0),
774 test_event_threaded_timer
, NULL
);
775 torture_assert(test
, te
!= NULL
, "Could not add timer");
777 ret
= pthread_create(&poll_thread
, NULL
, test_event_poll_thread
, ev
);
778 torture_assert(test
, ret
== 0, "Could not create poll thread");
781 torture_assert(test
, ret
== 0, "Could not create pipe");
785 test_event_threaded_lock();
787 fde
= tevent_add_fd(ev
, ev
, fds
[0], TEVENT_FD_READ
,
788 test_event_threaded_read_handler
, &fds
[0]);
789 torture_assert(test
, fde
!= NULL
, "Could not add fd event");
791 test_event_threaded_unlock();
795 write(fds
[1], &c
, 1);
799 test_event_threaded_lock();
801 test_event_threaded_unlock();
803 write(fds
[1], &c
, 1);
805 ret
= pthread_join(poll_thread
, NULL
);
806 torture_assert(test
, ret
== 0, "pthread_join failed");
811 #define NUM_TEVENT_THREADS 100
813 /* Ugly, but needed for torture_comment... */
814 static struct torture_context
*thread_test_ctx
;
815 static pthread_t thread_map
[NUM_TEVENT_THREADS
];
816 static unsigned thread_counter
;
818 /* Called in master thread context */
819 static void callback_nowait(struct tevent_context
*ev
,
820 struct tevent_immediate
*im
,
823 pthread_t
*thread_id_ptr
=
824 talloc_get_type_abort(private_ptr
, pthread_t
);
827 for (i
= 0; i
< NUM_TEVENT_THREADS
; i
++) {
828 if (pthread_equal(*thread_id_ptr
,
833 torture_comment(thread_test_ctx
,
834 "Callback %u from thread %u\n",
840 /* Blast the master tevent_context with a callback, no waiting. */
841 static void *thread_fn_nowait(void *private_ptr
)
843 struct tevent_thread_proxy
*master_tp
=
844 talloc_get_type_abort(private_ptr
, struct tevent_thread_proxy
);
845 struct tevent_immediate
*im
;
846 pthread_t
*thread_id_ptr
;
848 im
= tevent_create_immediate(NULL
);
852 thread_id_ptr
= talloc(NULL
, pthread_t
);
853 if (thread_id_ptr
== NULL
) {
856 *thread_id_ptr
= pthread_self();
858 tevent_thread_proxy_schedule(master_tp
,
865 static void timeout_fn(struct tevent_context
*ev
,
866 struct tevent_timer
*te
,
867 struct timeval tv
, void *p
)
869 thread_counter
= NUM_TEVENT_THREADS
* 10;
872 static bool test_multi_tevent_threaded(struct torture_context
*test
,
873 const void *test_data
)
876 struct tevent_context
*master_ev
;
877 struct tevent_thread_proxy
*tp
;
879 talloc_disable_null_tracking();
881 /* Ugly global stuff. */
882 thread_test_ctx
= test
;
885 master_ev
= tevent_context_init(NULL
);
886 if (master_ev
== NULL
) {
889 tevent_set_debug_stderr(master_ev
);
891 tp
= tevent_thread_proxy_create(master_ev
);
894 talloc_asprintf(test
,
895 "tevent_thread_proxy_create failed\n"));
896 talloc_free(master_ev
);
900 for (i
= 0; i
< NUM_TEVENT_THREADS
; i
++) {
901 int ret
= pthread_create(&thread_map
[i
],
907 talloc_asprintf(test
,
908 "Failed to create thread %i, %d\n",
914 /* Ensure we don't wait more than 10 seconds. */
915 tevent_add_timer(master_ev
,
917 timeval_current_ofs(10,0),
921 while (thread_counter
< NUM_TEVENT_THREADS
) {
922 int ret
= tevent_loop_once(master_ev
);
923 torture_assert(test
, ret
== 0, "tevent_loop_once failed");
926 torture_assert(test
, thread_counter
== NUM_TEVENT_THREADS
,
927 "thread_counter fail\n");
929 talloc_free(master_ev
);
934 struct tevent_thread_proxy
*reply_tp
;
939 static void thread_timeout_fn(struct tevent_context
*ev
,
940 struct tevent_timer
*te
,
941 struct timeval tv
, void *p
)
943 int *p_finished
= (int *)p
;
948 /* Called in child-thread context */
949 static void thread_callback(struct tevent_context
*ev
,
950 struct tevent_immediate
*im
,
953 struct reply_state
*rsp
=
954 talloc_get_type_abort(private_ptr
, struct reply_state
);
956 talloc_steal(ev
, rsp
);
957 *rsp
->p_finished
= 1;
960 /* Called in master thread context */
961 static void master_callback(struct tevent_context
*ev
,
962 struct tevent_immediate
*im
,
965 struct reply_state
*rsp
=
966 talloc_get_type_abort(private_ptr
, struct reply_state
);
969 talloc_steal(ev
, rsp
);
971 for (i
= 0; i
< NUM_TEVENT_THREADS
; i
++) {
972 if (pthread_equal(rsp
->thread_id
,
977 torture_comment(thread_test_ctx
,
978 "Callback %u from thread %u\n",
981 /* Now reply to the thread ! */
982 tevent_thread_proxy_schedule(rsp
->reply_tp
,
990 static void *thread_fn_1(void *private_ptr
)
992 struct tevent_thread_proxy
*master_tp
=
993 talloc_get_type_abort(private_ptr
, struct tevent_thread_proxy
);
994 struct tevent_thread_proxy
*tp
;
995 struct tevent_immediate
*im
;
996 struct tevent_context
*ev
;
997 struct reply_state
*rsp
;
1001 ev
= tevent_context_init(NULL
);
1006 tp
= tevent_thread_proxy_create(ev
);
1012 im
= tevent_create_immediate(ev
);
1018 rsp
= talloc(ev
, struct reply_state
);
1024 rsp
->thread_id
= pthread_self();
1026 rsp
->p_finished
= &finished
;
1028 /* Introduce a little randomness into the mix.. */
1029 usleep(random() % 7000);
1031 tevent_thread_proxy_schedule(master_tp
,
1036 /* Ensure we don't wait more than 10 seconds. */
1037 tevent_add_timer(ev
,
1039 timeval_current_ofs(10,0),
1043 while (finished
== 0) {
1044 ret
= tevent_loop_once(ev
);
1054 * NB. We should talloc_free(ev) here, but if we do
1055 * we currently get hit by helgrind Fix #323432
1056 * "When calling pthread_cond_destroy or pthread_mutex_destroy
1057 * with initializers as argument Helgrind (incorrectly) reports errors."
1059 * http://valgrind.10908.n7.nabble.com/Helgrind-3-9-0-false-positive-
1060 * with-pthread-mutex-destroy-td47757.html
1062 * Helgrind doesn't understand that the request/reply
1063 * messages provide synchronization between the lock/unlock
1064 * in tevent_thread_proxy_schedule(), and the pthread_destroy()
1065 * when the struct tevent_thread_proxy object is talloc_free'd.
1067 * As a work-around for now return ev for the parent thread to free.
1072 static bool test_multi_tevent_threaded_1(struct torture_context
*test
,
1073 const void *test_data
)
1076 struct tevent_context
*master_ev
;
1077 struct tevent_thread_proxy
*master_tp
;
1080 talloc_disable_null_tracking();
1082 /* Ugly global stuff. */
1083 thread_test_ctx
= test
;
1086 master_ev
= tevent_context_init(NULL
);
1087 if (master_ev
== NULL
) {
1090 tevent_set_debug_stderr(master_ev
);
1092 master_tp
= tevent_thread_proxy_create(master_ev
);
1093 if (master_tp
== NULL
) {
1095 talloc_asprintf(test
,
1096 "tevent_thread_proxy_create failed\n"));
1097 talloc_free(master_ev
);
1101 for (i
= 0; i
< NUM_TEVENT_THREADS
; i
++) {
1102 ret
= pthread_create(&thread_map
[i
],
1108 talloc_asprintf(test
,
1109 "Failed to create thread %i, %d\n",
1115 while (thread_counter
< NUM_TEVENT_THREADS
) {
1116 ret
= tevent_loop_once(master_ev
);
1117 torture_assert(test
, ret
== 0, "tevent_loop_once failed");
1120 /* Wait for all the threads to finish - join 'em. */
1121 for (i
= 0; i
< NUM_TEVENT_THREADS
; i
++) {
1123 ret
= pthread_join(thread_map
[i
], &retval
);
1124 torture_assert(test
, ret
== 0, "pthread_join failed");
1125 /* Free the child thread event context. */
1126 talloc_free(retval
);
1129 talloc_free(master_ev
);
1134 struct torture_suite
*torture_local_event(TALLOC_CTX
*mem_ctx
)
1136 struct torture_suite
*suite
= torture_suite_create(mem_ctx
, "event");
1137 const char **list
= tevent_backend_list(suite
);
1140 for (i
=0;list
&& list
[i
];i
++) {
1141 struct torture_suite
*backend_suite
;
1143 backend_suite
= torture_suite_create(mem_ctx
, list
[i
]);
1145 torture_suite_add_simple_tcase_const(backend_suite
,
1148 (const void *)list
[i
]);
1149 torture_suite_add_simple_tcase_const(backend_suite
,
1152 (const void *)list
[i
]);
1153 torture_suite_add_simple_tcase_const(backend_suite
,
1156 (const void *)list
[i
]);
1158 torture_suite_add_suite(suite
, backend_suite
);
1162 torture_suite_add_simple_tcase_const(suite
, "threaded_poll_mt",
1163 test_event_context_threaded
,
1166 torture_suite_add_simple_tcase_const(suite
, "multi_tevent_threaded",
1167 test_multi_tevent_threaded
,
1170 torture_suite_add_simple_tcase_const(suite
, "multi_tevent_threaded_1",
1171 test_multi_tevent_threaded_1
,