s3-prefork: Fix cast warning.
[Samba/gebeck_regimport.git] / source3 / lib / server_prefork.c
bloba584df7a711d41eed1a95563676f43de3441e125
1 /*
2 Unix SMB/CIFS implementation.
3 Common server globals
5 Copyright (C) Simo Sorce <idra@samba.org> 2011
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "system/time.h"
23 #include "system/shmem.h"
24 #include "system/filesys.h"
25 #include "server_prefork.h"
26 #include "../lib/util/util.h"
27 #include "../lib/util/tevent_unix.h"
29 struct prefork_pool {
31 int listen_fd_size;
32 int *listen_fds;
34 int lock_fd;
36 prefork_main_fn_t *main_fn;
37 void *private_data;
39 int pool_size;
40 struct pf_worker_data *pool;
42 int allowed_clients;
44 prefork_sigchld_fn_t *sigchld_fn;
45 void *sigchld_data;
48 static bool prefork_setup_sigchld_handler(struct tevent_context *ev_ctx,
49 struct prefork_pool *pfp);
51 static int prefork_pool_destructor(struct prefork_pool *pfp)
53 anonymous_shared_free(pfp->pool);
54 return 0;
57 bool prefork_create_pool(TALLOC_CTX *mem_ctx,
58 struct tevent_context *ev_ctx,
59 struct messaging_context *msg_ctx,
60 int listen_fd_size, int *listen_fds,
61 int min_children, int max_children,
62 prefork_main_fn_t *main_fn, void *private_data,
63 struct prefork_pool **pf_pool)
65 struct prefork_pool *pfp;
66 pid_t pid;
67 time_t now = time(NULL);
68 size_t data_size;
69 int ret;
70 int i;
71 bool ok;
73 pfp = talloc_zero(mem_ctx, struct prefork_pool);
74 if (!pfp) {
75 DEBUG(1, ("Out of memory!\n"));
76 return false;
78 pfp->listen_fd_size = listen_fd_size;
79 pfp->listen_fds = talloc_array(pfp, int, listen_fd_size);
80 if (!pfp->listen_fds) {
81 DEBUG(1, ("Out of memory!\n"));
82 return false;
84 for (i = 0; i < listen_fd_size; i++) {
85 pfp->listen_fds[i] = listen_fds[i];
87 pfp->main_fn = main_fn;
88 pfp->private_data = private_data;
90 pfp->lock_fd = create_unlink_tmp(NULL);
91 if (pfp->lock_fd == -1) {
92 DEBUG(1, ("Failed to create prefork lock fd!\n"));
93 talloc_free(pfp);
94 return false;
97 pfp->pool_size = max_children;
98 data_size = sizeof(struct pf_worker_data) * max_children;
100 pfp->pool = anonymous_shared_allocate(data_size);
101 if (pfp->pool == NULL) {
102 DEBUG(1, ("Failed to mmap memory for prefork pool!\n"));
103 talloc_free(pfp);
104 return false;
106 talloc_set_destructor(pfp, prefork_pool_destructor);
108 for (i = 0; i < min_children; i++) {
110 pfp->pool[i].allowed_clients = 1;
111 pfp->pool[i].started = now;
113 pid = sys_fork();
114 switch (pid) {
115 case -1:
116 DEBUG(1, ("Failed to prefork child n. %d !\n", i));
117 break;
119 case 0: /* THE CHILD */
121 pfp->pool[i].status = PF_WORKER_IDLE;
122 ret = pfp->main_fn(ev_ctx, msg_ctx,
123 &pfp->pool[i], i + 1,
124 pfp->listen_fd_size,
125 pfp->listen_fds,
126 pfp->lock_fd,
127 pfp->private_data);
128 exit(ret);
130 default: /* THE PARENT */
131 pfp->pool[i].pid = pid;
132 break;
136 ok = prefork_setup_sigchld_handler(ev_ctx, pfp);
137 if (!ok) {
138 DEBUG(1, ("Failed to setup SIGCHLD Handler!\n"));
139 talloc_free(pfp);
140 return false;
143 *pf_pool = pfp;
144 return true;
147 /* Provide the new max children number in new_max
148 * (must be larger than current max).
149 * Returns: 0 if all fine
150 * ENOSPC if mremap fails to expand
151 * EINVAL if new_max is invalid
153 int prefork_expand_pool(struct prefork_pool *pfp, int new_max)
155 struct prefork_pool *pool;
156 size_t old_size;
157 size_t new_size;
158 int ret;
160 if (new_max <= pfp->pool_size) {
161 return EINVAL;
164 old_size = sizeof(struct pf_worker_data) * pfp->pool_size;
165 new_size = sizeof(struct pf_worker_data) * new_max;
167 pool = anonymous_shared_resize(&pfp->pool, new_size, false);
168 if (pool == NULL) {
169 ret = errno;
170 DEBUG(3, ("Failed to mremap memory (%d: %s)!\n",
171 ret, strerror(ret)));
172 return ret;
175 memset(&pool[pfp->pool_size], 0, new_size - old_size);
177 pfp->pool_size = new_max;
179 return 0;
182 int prefork_add_children(struct tevent_context *ev_ctx,
183 struct messaging_context *msg_ctx,
184 struct prefork_pool *pfp,
185 int num_children)
187 pid_t pid;
188 time_t now = time(NULL);
189 int ret;
190 int i, j;
192 for (i = 0, j = 0; i < pfp->pool_size && j < num_children; i++) {
194 if (pfp->pool[i].status != PF_WORKER_NONE) {
195 continue;
198 pfp->pool[i].allowed_clients = 1;
199 pfp->pool[i].started = now;
201 pid = sys_fork();
202 switch (pid) {
203 case -1:
204 DEBUG(1, ("Failed to prefork child n. %d !\n", j));
205 break;
207 case 0: /* THE CHILD */
209 pfp->pool[i].status = PF_WORKER_IDLE;
210 ret = pfp->main_fn(ev_ctx, msg_ctx,
211 &pfp->pool[i], i + 1,
212 pfp->listen_fd_size,
213 pfp->listen_fds,
214 pfp->lock_fd,
215 pfp->private_data);
217 pfp->pool[i].status = PF_WORKER_EXITING;
218 exit(ret);
220 default: /* THE PARENT */
221 pfp->pool[i].pid = pid;
222 j++;
223 break;
227 DEBUG(5, ("Added %d children!\n", j));
229 return j;
232 struct prefork_oldest {
233 int num;
234 time_t started;
237 /* sort in inverse order */
238 static int prefork_sort_oldest(const void *ap, const void *bp)
240 const struct prefork_oldest *a = (const struct prefork_oldest *)ap;
241 const struct prefork_oldest *b = (const struct prefork_oldest *)bp;
243 if (a->started == b->started) {
244 return 0;
246 if (a->started < b->started) {
247 return 1;
249 return -1;
252 int prefork_retire_children(struct prefork_pool *pfp,
253 int num_children, time_t age_limit)
255 time_t now = time(NULL);
256 struct prefork_oldest *oldest;
257 int i, j;
259 oldest = talloc_array(pfp, struct prefork_oldest, pfp->pool_size);
260 if (!oldest) {
261 return -1;
264 for (i = 0; i < pfp->pool_size; i++) {
265 oldest[i].num = i;
266 if (pfp->pool[i].status == PF_WORKER_IDLE) {
267 oldest[i].started = pfp->pool[i].started;
268 } else {
269 oldest[i].started = now;
273 qsort(oldest, pfp->pool_size,
274 sizeof(struct prefork_oldest),
275 prefork_sort_oldest);
277 for (i = 0, j = 0; i < pfp->pool_size && j < num_children; i++) {
278 if (pfp->pool[i].status == PF_WORKER_IDLE &&
279 pfp->pool[i].started <= age_limit) {
280 /* tell the child it's time to give up */
281 DEBUG(5, ("Retiring pid %d!\n", pfp->pool[i].pid));
282 pfp->pool[i].cmds = PF_SRV_MSG_EXIT;
283 kill(pfp->pool[i].pid, SIGHUP);
284 j++;
288 return j;
291 int prefork_count_active_children(struct prefork_pool *pfp, int *total)
293 int i, a, t;
295 a = 0;
296 t = 0;
297 for (i = 0; i < pfp->pool_size; i++) {
298 if (pfp->pool[i].status == PF_WORKER_NONE) {
299 continue;
302 t++;
304 if (pfp->pool[i].num_clients == 0) {
305 continue;
308 a++;
311 *total = t;
312 return a;
315 static void prefork_cleanup_loop(struct prefork_pool *pfp)
317 int status;
318 pid_t pid;
319 int i;
321 /* TODO: should we use a process group id wait instead of looping ? */
322 for (i = 0; i < pfp->pool_size; i++) {
323 if (pfp->pool[i].status == PF_WORKER_NONE ||
324 pfp->pool[i].pid == 0) {
325 continue;
328 pid = sys_waitpid(pfp->pool[i].pid, &status, WNOHANG);
329 if (pid > 0) {
331 if (pfp->pool[i].status != PF_WORKER_EXITING) {
332 DEBUG(3, ("Child (%d) terminated abnormally:"
333 " %d\n", (int)pid, status));
334 } else {
335 DEBUG(10, ("Child (%d) terminated with status:"
336 " %d\n", (int)pid, status));
339 /* reset all fields,
340 * this makes status = PF_WORK_NONE */
341 memset(&pfp->pool[i], 0,
342 sizeof(struct pf_worker_data));
348 void prefork_increase_allowed_clients(struct prefork_pool *pfp, int max)
350 int i;
352 for (i = 0; i < pfp->pool_size; i++) {
353 if (pfp->pool[i].status == PF_WORKER_NONE) {
354 continue;
357 if (pfp->pool[i].allowed_clients < max) {
358 pfp->pool[i].allowed_clients++;
363 void prefork_reset_allowed_clients(struct prefork_pool *pfp)
365 int i;
367 for (i = 0; i < pfp->pool_size; i++) {
368 pfp->pool[i].allowed_clients = 1;
372 void prefork_send_signal_to_all(struct prefork_pool *pfp, int signal_num)
374 int i;
376 for (i = 0; i < pfp->pool_size; i++) {
377 if (pfp->pool[i].status == PF_WORKER_NONE) {
378 continue;
381 kill(pfp->pool[i].pid, signal_num);
385 static void prefork_sigchld_handler(struct tevent_context *ev_ctx,
386 struct tevent_signal *se,
387 int signum, int count,
388 void *siginfo, void *pvt)
390 struct prefork_pool *pfp;
392 pfp = talloc_get_type_abort(pvt, struct prefork_pool);
394 /* run the cleanup function to make sure all dead children are
395 * properly and timely retired. */
396 prefork_cleanup_loop(pfp);
398 if (pfp->sigchld_fn) {
399 pfp->sigchld_fn(ev_ctx, pfp, pfp->sigchld_data);
403 static bool prefork_setup_sigchld_handler(struct tevent_context *ev_ctx,
404 struct prefork_pool *pfp)
406 struct tevent_signal *se;
408 se = tevent_add_signal(ev_ctx, pfp, SIGCHLD, 0,
409 prefork_sigchld_handler, pfp);
410 if (!se) {
411 DEBUG(0, ("Failed to setup SIGCHLD handler!\n"));
412 return false;
415 return true;
418 void prefork_set_sigchld_callback(struct prefork_pool *pfp,
419 prefork_sigchld_fn_t *sigchld_fn,
420 void *private_data)
422 pfp->sigchld_fn = sigchld_fn;
423 pfp->sigchld_data = private_data;
426 /* ==== Functions used by children ==== */
428 static SIG_ATOMIC_T pf_alarm;
430 static void pf_alarm_cb(int signum)
432 pf_alarm = 1;
437 * Parameters:
438 * pf - the worker shared data structure
439 * lock_fd - the file descriptor used for locking
440 * timeout - expressed in seconds:
441 * -1 never timeouts,
442 * 0 timeouts immediately
443 * N seconds before timing out
445 * Returns values:
446 * negative errno on fatal error
447 * 0 on success to acquire lock
448 * -1 on timeout/lock held by other
449 * -2 on server msg to terminate
450 * ERRNO on other errors
453 static int prefork_grab_lock(struct pf_worker_data *pf,
454 int lock_fd, int timeout)
456 struct flock lock;
457 int op;
458 int ret;
460 if (pf->cmds == PF_SRV_MSG_EXIT) {
461 return -2;
464 pf_alarm = 0;
466 if (timeout > 0) {
467 CatchSignal(SIGALRM, pf_alarm_cb);
468 alarm(timeout);
471 if (timeout == 0) {
472 op = F_SETLK;
473 } else {
474 op = F_SETLKW;
477 ret = 0;
478 do {
479 ZERO_STRUCT(lock);
480 lock.l_type = F_WRLCK;
481 lock.l_whence = SEEK_SET;
483 ret = fcntl(lock_fd, op, &lock);
484 if (ret == 0) break;
486 ret = errno;
488 if (pf->cmds == PF_SRV_MSG_EXIT) {
489 ret = -2;
490 goto done;
493 switch (ret) {
494 case EINTR:
495 break;
497 case EACCES:
498 case EAGAIN:
499 /* lock held by other proc */
500 ret = -1;
501 goto done;
502 default:
503 goto done;
506 if (pf_alarm == 1) {
507 /* timed out */
508 ret = -1;
509 goto done;
511 } while (timeout != 0);
513 if (ret != 0) {
514 /* We have the Lock */
515 pf->status = PF_WORKER_ACCEPTING;
518 done:
519 if (timeout > 0) {
520 alarm(0);
521 CatchSignal(SIGALRM, SIG_IGN);
524 if (ret > 0) {
525 DEBUG(1, ("Failed to get lock (%d, %s)!\n",
526 ret, strerror(ret)));
528 return ret;
532 * Parameters:
533 * pf - the worker shared data structure
534 * lock_fd - the file descriptor used for locking
535 * timeout - expressed in seconds:
536 * -1 never timeouts,
537 * 0 timeouts immediately
538 * N seconds before timing out
540 * Returns values:
541 * negative errno on fatal error
542 * 0 on success to release lock
543 * -1 on timeout
544 * ERRNO on error
547 static int prefork_release_lock(struct pf_worker_data *pf,
548 int lock_fd, int timeout)
550 struct flock lock;
551 int op;
552 int ret;
554 pf_alarm = 0;
556 if (timeout > 0) {
557 CatchSignal(SIGALRM, pf_alarm_cb);
558 alarm(timeout);
561 if (timeout == 0) {
562 op = F_SETLK;
563 } else {
564 op = F_SETLKW;
567 do {
568 ZERO_STRUCT(lock);
569 lock.l_type = F_UNLCK;
570 lock.l_whence = SEEK_SET;
572 ret = fcntl(lock_fd, op, &lock);
573 if (ret == 0) break;
575 ret = errno;
577 if (ret != EINTR) {
578 goto done;
581 if (pf_alarm == 1) {
582 /* timed out */
583 ret = -1;
584 goto done;
586 } while (timeout != 0);
588 done:
589 if (timeout > 0) {
590 alarm(0);
591 CatchSignal(SIGALRM, SIG_IGN);
594 if (ret > 0) {
595 DEBUG(1, ("Failed to release lock (%d, %s)!\n",
596 ret, strerror(ret)));
598 return ret;
601 /* ==== async code ==== */
603 #define PF_ASYNC_LOCK_GRAB 0x01
604 #define PF_ASYNC_LOCK_RELEASE 0x02
605 #define PF_ASYNC_ACTION_MASK 0x03
606 #define PF_ASYNC_LOCK_DONE 0x04
608 struct pf_lock_state {
609 struct pf_worker_data *pf;
610 int lock_fd;
611 int flags;
614 static void prefork_lock_handler(struct tevent_context *ev,
615 struct tevent_timer *te,
616 struct timeval curtime, void *pvt);
618 static struct tevent_req *prefork_lock_send(TALLOC_CTX *mem_ctx,
619 struct tevent_context *ev,
620 struct pf_worker_data *pf,
621 int lock_fd, int action)
623 struct tevent_req *req;
624 struct pf_lock_state *state;
626 req = tevent_req_create(mem_ctx, &state, struct pf_lock_state);
627 if (!req) {
628 return NULL;
631 state->pf = pf;
632 state->lock_fd = lock_fd;
633 state->flags = action;
635 /* try once immediately */
636 prefork_lock_handler(ev, NULL, tevent_timeval_zero(), req);
637 if (state->flags & PF_ASYNC_LOCK_DONE) {
638 tevent_req_post(req, ev);
641 return req;
644 static void prefork_lock_handler(struct tevent_context *ev,
645 struct tevent_timer *te,
646 struct timeval curtime, void *pvt)
648 struct tevent_req *req;
649 struct pf_lock_state *state;
650 struct timeval tv;
651 int timeout = 0;
652 int ret;
654 req = talloc_get_type_abort(pvt, struct tevent_req);
655 state = tevent_req_data(req, struct pf_lock_state);
657 if (state->pf->num_clients > 0) {
658 timeout = 1;
661 switch (state->flags & PF_ASYNC_ACTION_MASK) {
662 case PF_ASYNC_LOCK_GRAB:
663 ret = prefork_grab_lock(state->pf, state->lock_fd, timeout);
664 break;
665 case PF_ASYNC_LOCK_RELEASE:
666 ret = prefork_release_lock(state->pf, state->lock_fd, timeout);
667 break;
668 default:
669 ret = EINVAL;
670 break;
673 switch (ret) {
674 case 0:
675 state->flags |= PF_ASYNC_LOCK_DONE;
676 tevent_req_done(req);
677 return;
678 case -1:
679 if (timeout) {
680 tv = tevent_timeval_zero();
681 } else {
682 tv = tevent_timeval_current_ofs(0, 100000);
684 te = tevent_add_timer(ev, state, tv,
685 prefork_lock_handler, req);
686 tevent_req_nomem(te, req);
687 return;
688 case -2:
689 /* server tells us to stop */
690 state->flags |= PF_ASYNC_LOCK_DONE;
691 tevent_req_error(req, -2);
692 return;
693 default:
694 state->flags |= PF_ASYNC_LOCK_DONE;
695 tevent_req_error(req, ret);
696 return;
700 static int prefork_lock_recv(struct tevent_req *req)
702 int ret;
704 if (!tevent_req_is_unix_error(req, &ret)) {
705 ret = 0;
708 tevent_req_received(req);
709 return ret;
712 struct pf_listen_state {
713 struct tevent_context *ev;
714 struct pf_worker_data *pf;
716 int listen_fd_size;
717 int *listen_fds;
719 int lock_fd;
721 int accept_fd;
723 struct tsocket_address *srv_addr;
724 struct tsocket_address *cli_addr;
726 int error;
729 static void prefork_listen_lock_done(struct tevent_req *subreq);
730 static void prefork_listen_accept_handler(struct tevent_context *ev,
731 struct tevent_fd *fde,
732 uint16_t flags, void *pvt);
733 static void prefork_listen_release_done(struct tevent_req *subreq);
735 struct tevent_req *prefork_listen_send(TALLOC_CTX *mem_ctx,
736 struct tevent_context *ev,
737 struct pf_worker_data *pf,
738 int listen_fd_size,
739 int *listen_fds,
740 int lock_fd)
742 struct tevent_req *req, *subreq;
743 struct pf_listen_state *state;
745 req = tevent_req_create(mem_ctx, &state, struct pf_listen_state);
746 if (!req) {
747 return NULL;
750 state->ev = ev;
751 state->pf = pf;
752 state->lock_fd = lock_fd;
753 state->listen_fd_size = listen_fd_size;
754 state->listen_fds = listen_fds;
755 state->accept_fd = -1;
756 state->error = 0;
758 subreq = prefork_lock_send(state, state->ev, state->pf,
759 state->lock_fd, PF_ASYNC_LOCK_GRAB);
760 if (tevent_req_nomem(subreq, req)) {
761 return tevent_req_post(req, ev);
764 tevent_req_set_callback(subreq, prefork_listen_lock_done, req);
765 return req;
768 struct pf_listen_ctx {
769 TALLOC_CTX *fde_ctx;
770 struct tevent_req *req;
771 int listen_fd;
774 static void prefork_listen_lock_done(struct tevent_req *subreq)
776 struct tevent_req *req;
777 struct pf_listen_state *state;
778 struct pf_listen_ctx *ctx;
779 struct tevent_fd *fde;
780 TALLOC_CTX *fde_ctx;
781 int ret;
782 int i;
784 req = tevent_req_callback_data(subreq, struct tevent_req);
785 state = tevent_req_data(req, struct pf_listen_state);
787 ret = prefork_lock_recv(subreq);
788 if (ret != 0) {
789 tevent_req_error(req, ret);
790 return;
793 fde_ctx = talloc_new(state);
794 if (tevent_req_nomem(fde_ctx, req)) {
795 return;
798 /* next step, accept */
799 for (i = 0; i < state->listen_fd_size; i++) {
800 ctx = talloc(fde_ctx, struct pf_listen_ctx);
801 if (tevent_req_nomem(ctx, req)) {
802 return;
804 ctx->fde_ctx = fde_ctx;
805 ctx->req = req;
806 ctx->listen_fd = state->listen_fds[i];
808 fde = tevent_add_fd(state->ev, fde_ctx,
809 ctx->listen_fd, TEVENT_FD_READ,
810 prefork_listen_accept_handler, ctx);
811 if (tevent_req_nomem(fde, req)) {
812 return;
817 static void prefork_listen_accept_handler(struct tevent_context *ev,
818 struct tevent_fd *fde,
819 uint16_t flags, void *pvt)
821 struct pf_listen_state *state;
822 struct tevent_req *req, *subreq;
823 struct pf_listen_ctx *ctx;
824 struct sockaddr_storage addr;
825 socklen_t addrlen;
826 int err = 0;
827 int sd = -1;
828 int ret;
830 ctx = talloc_get_type_abort(pvt, struct pf_listen_ctx);
831 state = tevent_req_data(ctx->req, struct pf_listen_state);
833 ZERO_STRUCT(addr);
834 addrlen = sizeof(addr);
835 sd = accept(ctx->listen_fd, (struct sockaddr *)&addr, &addrlen);
836 if (sd == -1) {
837 if (errno == EINTR) {
838 /* keep trying */
839 return;
841 err = errno;
842 DEBUG(6, ("Accept failed! (%d, %s)\n", err, strerror(err)));
845 /* do not track the listen fds anymore */
846 req = ctx->req;
847 talloc_free(ctx->fde_ctx);
848 ctx = NULL;
849 if (err) {
850 state->error = err;
851 goto done;
854 state->accept_fd = sd;
856 ret = tsocket_address_bsd_from_sockaddr(state,
857 (struct sockaddr *)(void *)&addr,
858 addrlen, &state->cli_addr);
859 if (ret < 0) {
860 state->error = errno;
861 goto done;
864 ZERO_STRUCT(addr);
865 addrlen = sizeof(addr);
866 ret = getsockname(sd, (struct sockaddr *)(void *)&addr, &addrlen);
867 if (ret < 0) {
868 state->error = errno;
869 goto done;
872 ret = tsocket_address_bsd_from_sockaddr(state,
873 (struct sockaddr *)(void *)&addr,
874 addrlen, &state->srv_addr);
875 if (ret < 0) {
876 state->error = errno;
877 goto done;
880 done:
881 /* release lock now */
882 subreq = prefork_lock_send(state, state->ev, state->pf,
883 state->lock_fd, PF_ASYNC_LOCK_RELEASE);
884 if (tevent_req_nomem(subreq, req)) {
885 return;
887 tevent_req_set_callback(subreq, prefork_listen_release_done, req);
890 static void prefork_listen_release_done(struct tevent_req *subreq)
892 struct tevent_req *req;
893 int ret;
895 req = tevent_req_callback_data(subreq, struct tevent_req);
897 ret = prefork_lock_recv(subreq);
898 if (ret != 0) {
899 tevent_req_error(req, ret);
900 return;
903 tevent_req_done(req);
906 int prefork_listen_recv(struct tevent_req *req,
907 TALLOC_CTX *mem_ctx, int *fd,
908 struct tsocket_address **srv_addr,
909 struct tsocket_address **cli_addr)
911 struct pf_listen_state *state;
912 int ret = 0;
914 state = tevent_req_data(req, struct pf_listen_state);
916 if (state->error) {
917 ret = state->error;
918 } else {
919 tevent_req_is_unix_error(req, &ret);
922 if (ret) {
923 if (state->accept_fd != -1) {
924 close(state->accept_fd);
926 } else {
927 *fd = state->accept_fd;
928 *srv_addr = talloc_move(mem_ctx, &state->srv_addr);
929 *cli_addr = talloc_move(mem_ctx, &state->cli_addr);
930 state->pf->status = PF_WORKER_BUSY;
931 state->pf->num_clients++;
934 tevent_req_received(req);
935 return ret;