s3-prefork: Allow better management of allowed_clients
[Samba/bjacke.git] / source3 / lib / server_prefork.c
blob2df6adea9f81ab6971df1941331121e6ba694950
1 /*
2 Unix SMB/CIFS implementation.
3 Common server globals
5 Copyright (C) Simo Sorce <idra@samba.org> 2011
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "system/time.h"
23 #include "system/shmem.h"
24 #include "system/filesys.h"
25 #include "server_prefork.h"
26 #include "../lib/util/util.h"
27 #include "../lib/util/tevent_unix.h"
29 struct prefork_pool {
31 int listen_fd_size;
32 int *listen_fds;
34 int lock_fd;
36 prefork_main_fn_t *main_fn;
37 void *private_data;
39 int pool_size;
40 struct pf_worker_data *pool;
42 int allowed_clients;
44 prefork_sigchld_fn_t *sigchld_fn;
45 void *sigchld_data;
48 static bool prefork_setup_sigchld_handler(struct tevent_context *ev_ctx,
49 struct prefork_pool *pfp);
51 static int prefork_pool_destructor(struct prefork_pool *pfp)
53 anonymous_shared_free(pfp->pool);
54 return 0;
57 bool prefork_create_pool(TALLOC_CTX *mem_ctx,
58 struct tevent_context *ev_ctx,
59 struct messaging_context *msg_ctx,
60 int listen_fd_size, int *listen_fds,
61 int min_children, int max_children,
62 prefork_main_fn_t *main_fn, void *private_data,
63 struct prefork_pool **pf_pool)
65 struct prefork_pool *pfp;
66 pid_t pid;
67 time_t now = time(NULL);
68 size_t data_size;
69 int ret;
70 int i;
71 bool ok;
73 pfp = talloc_zero(mem_ctx, struct prefork_pool);
74 if (!pfp) {
75 DEBUG(1, ("Out of memory!\n"));
76 return false;
78 pfp->listen_fd_size = listen_fd_size;
79 pfp->listen_fds = talloc_array(pfp, int, listen_fd_size);
80 if (!pfp->listen_fds) {
81 DEBUG(1, ("Out of memory!\n"));
82 return false;
84 for (i = 0; i < listen_fd_size; i++) {
85 pfp->listen_fds[i] = listen_fds[i];
87 pfp->main_fn = main_fn;
88 pfp->private_data = private_data;
90 pfp->lock_fd = create_unlink_tmp(NULL);
91 if (pfp->lock_fd == -1) {
92 DEBUG(1, ("Failed to create prefork lock fd!\n"));
93 talloc_free(pfp);
94 return false;
97 pfp->pool_size = max_children;
98 data_size = sizeof(struct pf_worker_data) * max_children;
100 pfp->pool = anonymous_shared_allocate(data_size);
101 if (pfp->pool == NULL) {
102 DEBUG(1, ("Failed to mmap memory for prefork pool!\n"));
103 talloc_free(pfp);
104 return false;
106 talloc_set_destructor(pfp, prefork_pool_destructor);
108 for (i = 0; i < min_children; i++) {
110 pfp->pool[i].allowed_clients = 1;
111 pfp->pool[i].started = now;
113 pid = sys_fork();
114 switch (pid) {
115 case -1:
116 DEBUG(1, ("Failed to prefork child n. %d !\n", i));
117 break;
119 case 0: /* THE CHILD */
121 pfp->pool[i].status = PF_WORKER_IDLE;
122 ret = pfp->main_fn(ev_ctx, msg_ctx,
123 &pfp->pool[i], i + 1,
124 pfp->listen_fd_size,
125 pfp->listen_fds,
126 pfp->lock_fd,
127 pfp->private_data);
128 exit(ret);
130 default: /* THE PARENT */
131 pfp->pool[i].pid = pid;
132 break;
136 ok = prefork_setup_sigchld_handler(ev_ctx, pfp);
137 if (!ok) {
138 DEBUG(1, ("Failed to setup SIGCHLD Handler!\n"));
139 talloc_free(pfp);
140 return false;
143 *pf_pool = pfp;
144 return true;
147 /* Provide the new max children number in new_max
148 * (must be larger than current max).
149 * Returns: 0 if all fine
150 * ENOSPC if mremap fails to expand
151 * EINVAL if new_max is invalid
153 int prefork_expand_pool(struct prefork_pool *pfp, int new_max)
155 struct prefork_pool *pool;
156 size_t old_size;
157 size_t new_size;
158 int ret;
160 if (new_max <= pfp->pool_size) {
161 return EINVAL;
164 old_size = sizeof(struct pf_worker_data) * pfp->pool_size;
165 new_size = sizeof(struct pf_worker_data) * new_max;
167 pool = anonymous_shared_resize(&pfp->pool, new_size, false);
168 if (pool == NULL) {
169 ret = errno;
170 DEBUG(3, ("Failed to mremap memory (%d: %s)!\n",
171 ret, strerror(ret)));
172 return ret;
175 memset(&pool[pfp->pool_size], 0, new_size - old_size);
177 pfp->pool_size = new_max;
179 return 0;
182 int prefork_add_children(struct tevent_context *ev_ctx,
183 struct messaging_context *msg_ctx,
184 struct prefork_pool *pfp,
185 int num_children)
187 pid_t pid;
188 time_t now = time(NULL);
189 int ret;
190 int i, j;
192 for (i = 0, j = 0; i < pfp->pool_size && j < num_children; i++) {
194 if (pfp->pool[i].status != PF_WORKER_NONE) {
195 continue;
198 pfp->pool[i].allowed_clients = 1;
199 pfp->pool[i].started = now;
201 pid = sys_fork();
202 switch (pid) {
203 case -1:
204 DEBUG(1, ("Failed to prefork child n. %d !\n", j));
205 break;
207 case 0: /* THE CHILD */
209 pfp->pool[i].status = PF_WORKER_IDLE;
210 ret = pfp->main_fn(ev_ctx, msg_ctx,
211 &pfp->pool[i], i + 1,
212 pfp->listen_fd_size,
213 pfp->listen_fds,
214 pfp->lock_fd,
215 pfp->private_data);
217 pfp->pool[i].status = PF_WORKER_EXITING;
218 exit(ret);
220 default: /* THE PARENT */
221 pfp->pool[i].pid = pid;
222 j++;
223 break;
227 DEBUG(5, ("Added %d children!\n", j));
229 return j;
232 struct prefork_oldest {
233 int num;
234 time_t started;
237 /* sort in inverse order */
238 static int prefork_sort_oldest(const void *ap, const void *bp)
240 const struct prefork_oldest *a = (const struct prefork_oldest *)ap;
241 const struct prefork_oldest *b = (const struct prefork_oldest *)bp;
243 if (a->started == b->started) {
244 return 0;
246 if (a->started < b->started) {
247 return 1;
249 return -1;
252 int prefork_retire_children(struct prefork_pool *pfp,
253 int num_children, time_t age_limit)
255 time_t now = time(NULL);
256 struct prefork_oldest *oldest;
257 int i, j;
259 oldest = talloc_array(pfp, struct prefork_oldest, pfp->pool_size);
260 if (!oldest) {
261 return -1;
264 for (i = 0; i < pfp->pool_size; i++) {
265 oldest[i].num = i;
266 if (pfp->pool[i].status == PF_WORKER_IDLE) {
267 oldest[i].started = pfp->pool[i].started;
268 } else {
269 oldest[i].started = now;
273 qsort(oldest, pfp->pool_size,
274 sizeof(struct prefork_oldest),
275 prefork_sort_oldest);
277 for (i = 0, j = 0; i < pfp->pool_size && j < num_children; i++) {
278 if (pfp->pool[i].status == PF_WORKER_IDLE &&
279 pfp->pool[i].started <= age_limit) {
280 /* tell the child it's time to give up */
281 DEBUG(5, ("Retiring pid %d!\n", pfp->pool[i].pid));
282 pfp->pool[i].cmds = PF_SRV_MSG_EXIT;
283 kill(pfp->pool[i].pid, SIGHUP);
284 j++;
288 return j;
291 int prefork_count_active_children(struct prefork_pool *pfp, int *total)
293 int i, a, t;
295 a = 0;
296 t = 0;
297 for (i = 0; i < pfp->pool_size; i++) {
298 if (pfp->pool[i].status == PF_WORKER_NONE) {
299 continue;
302 t++;
304 if (pfp->pool[i].num_clients == 0) {
305 continue;
308 a++;
311 *total = t;
312 return a;
315 static void prefork_cleanup_loop(struct prefork_pool *pfp)
317 int status;
318 pid_t pid;
319 int i;
321 /* TODO: should we use a process group id wait instead of looping ? */
322 for (i = 0; i < pfp->pool_size; i++) {
323 if (pfp->pool[i].status == PF_WORKER_NONE ||
324 pfp->pool[i].pid == 0) {
325 continue;
328 pid = sys_waitpid(pfp->pool[i].pid, &status, WNOHANG);
329 if (pid > 0) {
331 if (pfp->pool[i].status != PF_WORKER_EXITING) {
332 DEBUG(3, ("Child (%d) terminated abnormally:"
333 " %d\n", (int)pid, status));
334 } else {
335 DEBUG(10, ("Child (%d) terminated with status:"
336 " %d\n", (int)pid, status));
339 /* reset all fields,
340 * this makes status = PF_WORK_NONE */
341 memset(&pfp->pool[i], 0,
342 sizeof(struct pf_worker_data));
348 int prefork_count_allowed_connections(struct prefork_pool *pfp)
350 int c;
351 int i;
353 c = 0;
354 for (i = 0; i < pfp->pool_size; i++) {
355 if (pfp->pool[i].status == PF_WORKER_NONE) {
356 continue;
359 c += pfp->pool[i].allowed_clients - pfp->pool[i].num_clients;
362 return c;
365 void prefork_increase_allowed_clients(struct prefork_pool *pfp, int max)
367 int i;
369 for (i = 0; i < pfp->pool_size; i++) {
370 if (pfp->pool[i].status == PF_WORKER_NONE) {
371 continue;
374 if (pfp->pool[i].allowed_clients < max) {
375 pfp->pool[i].allowed_clients++;
380 void prefork_decrease_allowed_clients(struct prefork_pool *pfp)
382 int i;
384 for (i = 0; i < pfp->pool_size; i++) {
385 if (pfp->pool[i].status == PF_WORKER_NONE) {
386 continue;
389 if (pfp->pool[i].allowed_clients > 1) {
390 pfp->pool[i].allowed_clients--;
395 void prefork_reset_allowed_clients(struct prefork_pool *pfp)
397 int i;
399 for (i = 0; i < pfp->pool_size; i++) {
400 pfp->pool[i].allowed_clients = 1;
404 void prefork_send_signal_to_all(struct prefork_pool *pfp, int signal_num)
406 int i;
408 for (i = 0; i < pfp->pool_size; i++) {
409 if (pfp->pool[i].status == PF_WORKER_NONE) {
410 continue;
413 kill(pfp->pool[i].pid, signal_num);
417 static void prefork_sigchld_handler(struct tevent_context *ev_ctx,
418 struct tevent_signal *se,
419 int signum, int count,
420 void *siginfo, void *pvt)
422 struct prefork_pool *pfp;
424 pfp = talloc_get_type_abort(pvt, struct prefork_pool);
426 /* run the cleanup function to make sure all dead children are
427 * properly and timely retired. */
428 prefork_cleanup_loop(pfp);
430 if (pfp->sigchld_fn) {
431 pfp->sigchld_fn(ev_ctx, pfp, pfp->sigchld_data);
435 static bool prefork_setup_sigchld_handler(struct tevent_context *ev_ctx,
436 struct prefork_pool *pfp)
438 struct tevent_signal *se;
440 se = tevent_add_signal(ev_ctx, pfp, SIGCHLD, 0,
441 prefork_sigchld_handler, pfp);
442 if (!se) {
443 DEBUG(0, ("Failed to setup SIGCHLD handler!\n"));
444 return false;
447 return true;
450 void prefork_set_sigchld_callback(struct prefork_pool *pfp,
451 prefork_sigchld_fn_t *sigchld_fn,
452 void *private_data)
454 pfp->sigchld_fn = sigchld_fn;
455 pfp->sigchld_data = private_data;
458 /* ==== Functions used by children ==== */
460 static SIG_ATOMIC_T pf_alarm;
462 static void pf_alarm_cb(int signum)
464 pf_alarm = 1;
469 * Parameters:
470 * pf - the worker shared data structure
471 * lock_fd - the file descriptor used for locking
472 * timeout - expressed in seconds:
473 * -1 never timeouts,
474 * 0 timeouts immediately
475 * N seconds before timing out
477 * Returns values:
478 * negative errno on fatal error
479 * 0 on success to acquire lock
480 * -1 on timeout/lock held by other
481 * -2 on server msg to terminate
482 * ERRNO on other errors
485 static int prefork_grab_lock(struct pf_worker_data *pf,
486 int lock_fd, int timeout)
488 struct flock lock;
489 int op;
490 int ret;
492 if (pf->cmds == PF_SRV_MSG_EXIT) {
493 return -2;
496 pf_alarm = 0;
498 if (timeout > 0) {
499 CatchSignal(SIGALRM, pf_alarm_cb);
500 alarm(timeout);
503 if (timeout == 0) {
504 op = F_SETLK;
505 } else {
506 op = F_SETLKW;
509 ret = 0;
510 do {
511 ZERO_STRUCT(lock);
512 lock.l_type = F_WRLCK;
513 lock.l_whence = SEEK_SET;
515 ret = fcntl(lock_fd, op, &lock);
516 if (ret == 0) break;
518 ret = errno;
520 if (pf->cmds == PF_SRV_MSG_EXIT) {
521 ret = -2;
522 goto done;
525 switch (ret) {
526 case EINTR:
527 break;
529 case EACCES:
530 case EAGAIN:
531 /* lock held by other proc */
532 ret = -1;
533 goto done;
534 default:
535 goto done;
538 if (pf_alarm == 1) {
539 /* timed out */
540 ret = -1;
541 goto done;
543 } while (timeout != 0);
545 if (ret != 0) {
546 /* We have the Lock */
547 pf->status = PF_WORKER_ACCEPTING;
550 done:
551 if (timeout > 0) {
552 alarm(0);
553 CatchSignal(SIGALRM, SIG_IGN);
556 if (ret > 0) {
557 DEBUG(1, ("Failed to get lock (%d, %s)!\n",
558 ret, strerror(ret)));
560 return ret;
564 * Parameters:
565 * pf - the worker shared data structure
566 * lock_fd - the file descriptor used for locking
567 * timeout - expressed in seconds:
568 * -1 never timeouts,
569 * 0 timeouts immediately
570 * N seconds before timing out
572 * Returns values:
573 * negative errno on fatal error
574 * 0 on success to release lock
575 * -1 on timeout
576 * ERRNO on error
579 static int prefork_release_lock(struct pf_worker_data *pf,
580 int lock_fd, int timeout)
582 struct flock lock;
583 int op;
584 int ret;
586 pf_alarm = 0;
588 if (timeout > 0) {
589 CatchSignal(SIGALRM, pf_alarm_cb);
590 alarm(timeout);
593 if (timeout == 0) {
594 op = F_SETLK;
595 } else {
596 op = F_SETLKW;
599 do {
600 ZERO_STRUCT(lock);
601 lock.l_type = F_UNLCK;
602 lock.l_whence = SEEK_SET;
604 ret = fcntl(lock_fd, op, &lock);
605 if (ret == 0) break;
607 ret = errno;
609 if (ret != EINTR) {
610 goto done;
613 if (pf_alarm == 1) {
614 /* timed out */
615 ret = -1;
616 goto done;
618 } while (timeout != 0);
620 done:
621 if (timeout > 0) {
622 alarm(0);
623 CatchSignal(SIGALRM, SIG_IGN);
626 if (ret > 0) {
627 DEBUG(1, ("Failed to release lock (%d, %s)!\n",
628 ret, strerror(ret)));
630 return ret;
633 /* ==== async code ==== */
635 #define PF_ASYNC_LOCK_GRAB 0x01
636 #define PF_ASYNC_LOCK_RELEASE 0x02
637 #define PF_ASYNC_ACTION_MASK 0x03
638 #define PF_ASYNC_LOCK_DONE 0x04
640 struct pf_lock_state {
641 struct pf_worker_data *pf;
642 int lock_fd;
643 int flags;
646 static void prefork_lock_handler(struct tevent_context *ev,
647 struct tevent_timer *te,
648 struct timeval curtime, void *pvt);
650 static struct tevent_req *prefork_lock_send(TALLOC_CTX *mem_ctx,
651 struct tevent_context *ev,
652 struct pf_worker_data *pf,
653 int lock_fd, int action)
655 struct tevent_req *req;
656 struct pf_lock_state *state;
658 req = tevent_req_create(mem_ctx, &state, struct pf_lock_state);
659 if (!req) {
660 return NULL;
663 state->pf = pf;
664 state->lock_fd = lock_fd;
665 state->flags = action;
667 /* try once immediately */
668 prefork_lock_handler(ev, NULL, tevent_timeval_zero(), req);
669 if (state->flags & PF_ASYNC_LOCK_DONE) {
670 tevent_req_post(req, ev);
673 return req;
676 static void prefork_lock_handler(struct tevent_context *ev,
677 struct tevent_timer *te,
678 struct timeval curtime, void *pvt)
680 struct tevent_req *req;
681 struct pf_lock_state *state;
682 struct timeval tv;
683 int timeout = 0;
684 int ret;
686 req = talloc_get_type_abort(pvt, struct tevent_req);
687 state = tevent_req_data(req, struct pf_lock_state);
689 if (state->pf->num_clients > 0) {
690 timeout = 1;
693 switch (state->flags & PF_ASYNC_ACTION_MASK) {
694 case PF_ASYNC_LOCK_GRAB:
695 ret = prefork_grab_lock(state->pf, state->lock_fd, timeout);
696 break;
697 case PF_ASYNC_LOCK_RELEASE:
698 ret = prefork_release_lock(state->pf, state->lock_fd, timeout);
699 break;
700 default:
701 ret = EINVAL;
702 break;
705 switch (ret) {
706 case 0:
707 state->flags |= PF_ASYNC_LOCK_DONE;
708 tevent_req_done(req);
709 return;
710 case -1:
711 if (timeout) {
712 tv = tevent_timeval_zero();
713 } else {
714 tv = tevent_timeval_current_ofs(0, 100000);
716 te = tevent_add_timer(ev, state, tv,
717 prefork_lock_handler, req);
718 tevent_req_nomem(te, req);
719 return;
720 case -2:
721 /* server tells us to stop */
722 state->flags |= PF_ASYNC_LOCK_DONE;
723 tevent_req_error(req, -2);
724 return;
725 default:
726 state->flags |= PF_ASYNC_LOCK_DONE;
727 tevent_req_error(req, ret);
728 return;
732 static int prefork_lock_recv(struct tevent_req *req)
734 int ret;
736 if (!tevent_req_is_unix_error(req, &ret)) {
737 ret = 0;
740 tevent_req_received(req);
741 return ret;
744 struct pf_listen_state {
745 struct tevent_context *ev;
746 struct pf_worker_data *pf;
748 int listen_fd_size;
749 int *listen_fds;
751 int lock_fd;
753 int accept_fd;
755 struct tsocket_address *srv_addr;
756 struct tsocket_address *cli_addr;
758 int error;
761 static void prefork_listen_lock_done(struct tevent_req *subreq);
762 static void prefork_listen_accept_handler(struct tevent_context *ev,
763 struct tevent_fd *fde,
764 uint16_t flags, void *pvt);
765 static void prefork_listen_release_done(struct tevent_req *subreq);
767 struct tevent_req *prefork_listen_send(TALLOC_CTX *mem_ctx,
768 struct tevent_context *ev,
769 struct pf_worker_data *pf,
770 int listen_fd_size,
771 int *listen_fds,
772 int lock_fd)
774 struct tevent_req *req, *subreq;
775 struct pf_listen_state *state;
777 req = tevent_req_create(mem_ctx, &state, struct pf_listen_state);
778 if (!req) {
779 return NULL;
782 state->ev = ev;
783 state->pf = pf;
784 state->lock_fd = lock_fd;
785 state->listen_fd_size = listen_fd_size;
786 state->listen_fds = listen_fds;
787 state->accept_fd = -1;
788 state->error = 0;
790 subreq = prefork_lock_send(state, state->ev, state->pf,
791 state->lock_fd, PF_ASYNC_LOCK_GRAB);
792 if (tevent_req_nomem(subreq, req)) {
793 return tevent_req_post(req, ev);
796 tevent_req_set_callback(subreq, prefork_listen_lock_done, req);
797 return req;
800 struct pf_listen_ctx {
801 TALLOC_CTX *fde_ctx;
802 struct tevent_req *req;
803 int listen_fd;
806 static void prefork_listen_lock_done(struct tevent_req *subreq)
808 struct tevent_req *req;
809 struct pf_listen_state *state;
810 struct pf_listen_ctx *ctx;
811 struct tevent_fd *fde;
812 TALLOC_CTX *fde_ctx;
813 int ret;
814 int i;
816 req = tevent_req_callback_data(subreq, struct tevent_req);
817 state = tevent_req_data(req, struct pf_listen_state);
819 ret = prefork_lock_recv(subreq);
820 if (ret != 0) {
821 tevent_req_error(req, ret);
822 return;
825 fde_ctx = talloc_new(state);
826 if (tevent_req_nomem(fde_ctx, req)) {
827 return;
830 /* next step, accept */
831 for (i = 0; i < state->listen_fd_size; i++) {
832 ctx = talloc(fde_ctx, struct pf_listen_ctx);
833 if (tevent_req_nomem(ctx, req)) {
834 return;
836 ctx->fde_ctx = fde_ctx;
837 ctx->req = req;
838 ctx->listen_fd = state->listen_fds[i];
840 fde = tevent_add_fd(state->ev, fde_ctx,
841 ctx->listen_fd, TEVENT_FD_READ,
842 prefork_listen_accept_handler, ctx);
843 if (tevent_req_nomem(fde, req)) {
844 return;
849 static void prefork_listen_accept_handler(struct tevent_context *ev,
850 struct tevent_fd *fde,
851 uint16_t flags, void *pvt)
853 struct pf_listen_state *state;
854 struct tevent_req *req, *subreq;
855 struct pf_listen_ctx *ctx;
856 struct sockaddr_storage addr;
857 socklen_t addrlen;
858 int err = 0;
859 int sd = -1;
860 int ret;
862 ctx = talloc_get_type_abort(pvt, struct pf_listen_ctx);
863 state = tevent_req_data(ctx->req, struct pf_listen_state);
865 ZERO_STRUCT(addr);
866 addrlen = sizeof(addr);
867 sd = accept(ctx->listen_fd, (struct sockaddr *)&addr, &addrlen);
868 if (sd == -1) {
869 if (errno == EINTR) {
870 /* keep trying */
871 return;
873 err = errno;
874 DEBUG(6, ("Accept failed! (%d, %s)\n", err, strerror(err)));
877 /* do not track the listen fds anymore */
878 req = ctx->req;
879 talloc_free(ctx->fde_ctx);
880 ctx = NULL;
881 if (err) {
882 state->error = err;
883 goto done;
886 state->accept_fd = sd;
888 ret = tsocket_address_bsd_from_sockaddr(state,
889 (struct sockaddr *)(void *)&addr,
890 addrlen, &state->cli_addr);
891 if (ret < 0) {
892 state->error = errno;
893 goto done;
896 ZERO_STRUCT(addr);
897 addrlen = sizeof(addr);
898 ret = getsockname(sd, (struct sockaddr *)(void *)&addr, &addrlen);
899 if (ret < 0) {
900 state->error = errno;
901 goto done;
904 ret = tsocket_address_bsd_from_sockaddr(state,
905 (struct sockaddr *)(void *)&addr,
906 addrlen, &state->srv_addr);
907 if (ret < 0) {
908 state->error = errno;
909 goto done;
912 done:
913 /* release lock now */
914 subreq = prefork_lock_send(state, state->ev, state->pf,
915 state->lock_fd, PF_ASYNC_LOCK_RELEASE);
916 if (tevent_req_nomem(subreq, req)) {
917 return;
919 tevent_req_set_callback(subreq, prefork_listen_release_done, req);
922 static void prefork_listen_release_done(struct tevent_req *subreq)
924 struct tevent_req *req;
925 int ret;
927 req = tevent_req_callback_data(subreq, struct tevent_req);
929 ret = prefork_lock_recv(subreq);
930 if (ret != 0) {
931 tevent_req_error(req, ret);
932 return;
935 tevent_req_done(req);
938 int prefork_listen_recv(struct tevent_req *req,
939 TALLOC_CTX *mem_ctx, int *fd,
940 struct tsocket_address **srv_addr,
941 struct tsocket_address **cli_addr)
943 struct pf_listen_state *state;
944 int ret = 0;
946 state = tevent_req_data(req, struct pf_listen_state);
948 if (state->error) {
949 ret = state->error;
950 } else {
951 tevent_req_is_unix_error(req, &ret);
954 if (ret) {
955 if (state->accept_fd != -1) {
956 close(state->accept_fd);
958 } else {
959 *fd = state->accept_fd;
960 *srv_addr = talloc_move(mem_ctx, &state->srv_addr);
961 *cli_addr = talloc_move(mem_ctx, &state->cli_addr);
962 state->pf->status = PF_WORKER_BUSY;
963 state->pf->num_clients++;
966 tevent_req_received(req);
967 return ret;