s3:rpc_server: call reopen_logs before we print the copyright notice
[Samba.git] / source3 / rpc_server / rpc_host.c
blobb59916b5dda88c1dc7e44b1bd38655bb0484c5ba
1 /*
2 * RPC host
4 * Implements samba-dcerpcd service.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * This binary has two usage modes:
23 * In the normal case when invoked from smbd or winbind it is given a
24 * directory to scan via --libexec-rpcds and will invoke on demand any
25 * binaries it finds there starting with rpcd_ when a named pipe
26 * connection is requested.
28 * In the second mode it can be started explicitly from system startup
29 * scripts.
31 * When Samba is set up as an Active Directory Domain Controller the
32 * normal samba binary overrides and provides DCERPC services, whilst
33 * allowing samba-dcerpcd to provide the services that smbd used to
34 * provide in that set-up, such as SRVSVC.
36 * The second mode can also be useful for use outside of the Samba framework,
37 * for example, use with the Linux kernel SMB2 server ksmbd. In this mode
38 * it behaves like inetd and listens on sockets on behalf of RPC server
39 * implementations.
42 #include "replace.h"
43 #include <fnmatch.h>
44 #include "lib/cmdline/cmdline.h"
45 #include "lib/cmdline/closefrom_except.h"
46 #include "source3/include/includes.h"
47 #include "source3/include/auth.h"
48 #include "rpc_sock_helper.h"
49 #include "messages.h"
50 #include "lib/util_file.h"
51 #include "lib/util/tevent_unix.h"
52 #include "lib/util/tevent_ntstatus.h"
53 #include "lib/util/smb_strtox.h"
54 #include "lib/util/debug.h"
55 #include "lib/util/server_id.h"
56 #include "lib/util/util_tdb.h"
57 #include "lib/tdb_wrap/tdb_wrap.h"
58 #include "lib/async_req/async_sock.h"
59 #include "librpc/rpc/dcerpc_util.h"
60 #include "lib/tsocket/tsocket.h"
61 #include "libcli/named_pipe_auth/npa_tstream.h"
62 #include "librpc/gen_ndr/ndr_rpc_host.h"
63 #include "source3/param/loadparm.h"
64 #include "source3/lib/global_contexts.h"
65 #include "lib/util/strv.h"
66 #include "lib/util/pidfile.h"
67 #include "source3/rpc_client/cli_pipe.h"
68 #include "librpc/gen_ndr/ndr_epmapper.h"
69 #include "librpc/gen_ndr/ndr_epmapper_c.h"
70 #include "nsswitch/winbind_client.h"
71 #include "libcli/security/dom_sid.h"
72 #include "libcli/security/security_token.h"
74 extern bool override_logfile;
76 struct rpc_server;
77 struct rpc_work_process;
80 * samba-dcerpcd state to keep track of rpcd_* servers.
82 struct rpc_host {
83 struct messaging_context *msg_ctx;
84 struct rpc_server **servers;
85 struct tdb_wrap *epmdb;
87 int worker_stdin[2];
89 bool np_helper;
92 * If we're started with --np-helper but nobody contacts us,
93 * we need to exit after a while. This will be deleted once
94 * the first real client connects and our self-exit mechanism
95 * when we don't have any worker processes left kicks in.
97 struct tevent_timer *np_helper_shutdown;
101 * Map a RPC interface to a name. Used when filling the endpoint
102 * mapper database
104 struct rpc_host_iface_name {
105 struct ndr_syntax_id iface;
106 char *name;
110 * rpc_host representation for listening sockets. ncacn_ip_tcp might
111 * listen on multiple explicit IPs, all with the same port.
113 struct rpc_host_endpoint {
114 struct rpc_server *server;
115 struct dcerpc_binding *binding;
116 struct ndr_syntax_id *interfaces;
117 int *fds;
118 size_t num_fds;
122 * Staging area until we sent the socket plus bind to the helper
124 struct rpc_host_pending_client {
125 struct rpc_host_pending_client *prev, *next;
128 * Pointer for the destructor to remove us from the list of
129 * pending clients
131 struct rpc_server *server;
134 * Waiter for client exit before a helper accepted the request
136 struct tevent_req *hangup_wait;
139 * Info to pick the worker
141 struct ncacn_packet *bind_pkt;
144 * This is what we send down to the worker
146 int sock;
147 struct rpc_host_client *client;
151 * Representation of one worker process. For each rpcd_* executable
152 * there will be more of than one of these.
154 struct rpc_work_process {
155 pid_t pid;
158 * !available means:
160 * Worker forked but did not send its initial status yet (not
161 * yet initialized)
163 * Worker died, but we did not receive SIGCHLD yet. We noticed
164 * it because we couldn't send it a message.
166 bool available;
169 * Incremented by us when sending a client, decremented by
170 * MSG_RPC_HOST_WORKER_STATUS sent by workers whenever a
171 * client exits.
173 uint32_t num_clients;
176 * Send SHUTDOWN to an idle child after a while
178 struct tevent_timer *exit_timer;
182 * State for a set of running instances of an rpcd_* server executable
184 struct rpc_server {
185 struct rpc_host *host;
187 * Index into the rpc_host_state->servers array
189 uint32_t server_index;
191 const char *rpc_server_exe;
193 struct rpc_host_endpoint **endpoints;
194 struct rpc_host_iface_name *iface_names;
196 size_t max_workers;
197 size_t idle_seconds;
200 * "workers" can be larger than "max_workers": Internal
201 * connections require an idle worker to avoid deadlocks
202 * between RPC servers: netlogon requires samr, everybody
203 * requires winreg. And if a deep call in netlogon asks for a
204 * samr connection, this must never end up in the same
205 * process. named_pipe_auth_req_info7->need_idle_server is set
206 * in those cases.
208 struct rpc_work_process *workers;
210 struct rpc_host_pending_client *pending_clients;
213 struct rpc_server_get_endpoints_state {
214 char **argl;
215 char *ncalrpc_endpoint;
216 enum dcerpc_transport_t only_transport;
218 struct rpc_host_iface_name *iface_names;
219 struct rpc_host_endpoint **endpoints;
221 unsigned long num_workers;
222 unsigned long idle_seconds;
225 static void rpc_server_get_endpoints_done(struct tevent_req *subreq);
228 * @brief Query interfaces from an rpcd helper
230 * Spawn a rpcd helper, ask it for the interfaces it serves via
231 * --list-interfaces, parse the output
233 * @param[in] mem_ctx Memory context for the tevent_req
234 * @param[in] ev Event context to run this on
235 * @param[in] rpc_server_exe Binary to ask with --list-interfaces
236 * @param[in] only_transport Filter out anything but this
237 * @return The tevent_req representing this process
240 static struct tevent_req *rpc_server_get_endpoints_send(
241 TALLOC_CTX *mem_ctx,
242 struct tevent_context *ev,
243 const char *rpc_server_exe,
244 enum dcerpc_transport_t only_transport)
246 struct tevent_req *req = NULL, *subreq = NULL;
247 struct rpc_server_get_endpoints_state *state = NULL;
248 const char *progname = NULL;
250 req = tevent_req_create(
251 mem_ctx, &state, struct rpc_server_get_endpoints_state);
252 if (req == NULL) {
253 return NULL;
255 state->only_transport = only_transport;
257 progname = strrchr(rpc_server_exe, '/');
258 if (progname != NULL) {
259 progname += 1;
260 } else {
261 progname = rpc_server_exe;
264 state->ncalrpc_endpoint = talloc_strdup(state, progname);
265 if (tevent_req_nomem(state->ncalrpc_endpoint, req)) {
266 return tevent_req_post(req, ev);
269 state->argl = talloc_array(state, char *, 4);
270 if (tevent_req_nomem(state->argl, req)) {
271 return tevent_req_post(req, ev);
274 state->argl = str_list_make_empty(state);
275 str_list_add_printf(&state->argl, "%s", rpc_server_exe);
276 str_list_add_printf(&state->argl, "--list-interfaces");
277 str_list_add_printf(
278 &state->argl, "--configfile=%s", get_dyn_CONFIGFILE());
280 if (tevent_req_nomem(state->argl, req)) {
281 return tevent_req_post(req, ev);
284 subreq = file_ploadv_send(state, ev, state->argl, 65536);
285 if (tevent_req_nomem(subreq, req)) {
286 return tevent_req_post(req, ev);
288 tevent_req_set_callback(subreq, rpc_server_get_endpoints_done, req);
289 return req;
293 * Parse a line of format
295 * 338cd001-2244-31f1-aaaa-900038001003/0x00000001 winreg
297 * and add it to the "piface_names" array.
300 static struct rpc_host_iface_name *rpc_exe_parse_iface_line(
301 TALLOC_CTX *mem_ctx,
302 struct rpc_host_iface_name **piface_names,
303 const char *line)
305 struct rpc_host_iface_name *iface_names = *piface_names;
306 struct rpc_host_iface_name *tmp = NULL, *result = NULL;
307 size_t i, num_ifaces = talloc_array_length(iface_names);
308 struct ndr_syntax_id iface;
309 char *name = NULL;
310 bool ok;
312 ok = ndr_syntax_id_from_string(line, &iface);
313 if (!ok) {
314 DBG_WARNING("ndr_syntax_id_from_string() failed for: [%s]\n",
315 line);
316 return NULL;
319 name = strchr(line, ' ');
320 if (name == NULL) {
321 return NULL;
323 name += 1;
325 for (i=0; i<num_ifaces; i++) {
326 result = &iface_names[i];
328 if (ndr_syntax_id_equal(&result->iface, &iface)) {
329 return result;
333 if (num_ifaces + 1 < num_ifaces) {
334 return NULL;
337 name = talloc_strdup(mem_ctx, name);
338 if (name == NULL) {
339 return NULL;
342 tmp = talloc_realloc(
343 mem_ctx,
344 iface_names,
345 struct rpc_host_iface_name,
346 num_ifaces + 1);
347 if (tmp == NULL) {
348 TALLOC_FREE(name);
349 return NULL;
351 iface_names = tmp;
353 result = &iface_names[num_ifaces];
355 *result = (struct rpc_host_iface_name) {
356 .iface = iface,
357 .name = talloc_move(iface_names, &name),
360 *piface_names = iface_names;
362 return result;
365 static struct rpc_host_iface_name *rpc_host_iface_names_find(
366 struct rpc_host_iface_name *iface_names,
367 const struct ndr_syntax_id *iface)
369 size_t i, num_iface_names = talloc_array_length(iface_names);
371 for (i=0; i<num_iface_names; i++) {
372 struct rpc_host_iface_name *iface_name = &iface_names[i];
374 if (ndr_syntax_id_equal(iface, &iface_name->iface)) {
375 return iface_name;
379 return NULL;
382 static bool dcerpc_binding_same_endpoint(
383 const struct dcerpc_binding *b1, const struct dcerpc_binding *b2)
385 enum dcerpc_transport_t t1 = dcerpc_binding_get_transport(b1);
386 enum dcerpc_transport_t t2 = dcerpc_binding_get_transport(b2);
387 const char *e1 = NULL, *e2 = NULL;
388 int cmp;
390 if (t1 != t2) {
391 return false;
394 e1 = dcerpc_binding_get_string_option(b1, "endpoint");
395 e2 = dcerpc_binding_get_string_option(b2, "endpoint");
397 if ((e1 == NULL) && (e2 == NULL)) {
398 return true;
400 if ((e1 == NULL) || (e2 == NULL)) {
401 return false;
403 cmp = strcmp(e1, e2);
404 return (cmp == 0);
408 * @brief Filter whether we want to serve an endpoint
410 * samba-dcerpcd might want to serve all endpoints a rpcd reported to
411 * us via --list-interfaces.
413 * In member mode, we only serve named pipes. Indicated by NCACN_NP
414 * passed in via "only_transport".
416 * @param[in] binding Which binding is in question?
417 * @param[in] only_transport Exclusive transport to serve
418 * @return Do we want to serve "binding" from samba-dcerpcd?
421 static bool rpc_host_serve_endpoint(
422 struct dcerpc_binding *binding,
423 enum dcerpc_transport_t only_transport)
425 enum dcerpc_transport_t transport =
426 dcerpc_binding_get_transport(binding);
428 if (only_transport == NCA_UNKNOWN) {
429 /* no filter around */
430 return true;
433 if (transport != only_transport) {
434 /* filter out */
435 return false;
438 return true;
441 static struct rpc_host_endpoint *rpc_host_endpoint_find(
442 struct rpc_server_get_endpoints_state *state,
443 const char *binding_string)
445 size_t i, num_endpoints = talloc_array_length(state->endpoints);
446 struct rpc_host_endpoint **tmp = NULL, *ep = NULL;
447 enum dcerpc_transport_t transport;
448 NTSTATUS status;
449 bool serve_this;
451 ep = talloc_zero(state, struct rpc_host_endpoint);
452 if (ep == NULL) {
453 goto fail;
456 status = dcerpc_parse_binding(ep, binding_string, &ep->binding);
457 if (!NT_STATUS_IS_OK(status)) {
458 DBG_DEBUG("dcerpc_parse_binding(%s) failed: %s\n",
459 binding_string,
460 nt_errstr(status));
461 goto fail;
464 serve_this = rpc_host_serve_endpoint(
465 ep->binding, state->only_transport);
466 if (!serve_this) {
467 goto fail;
470 transport = dcerpc_binding_get_transport(ep->binding);
472 if (transport == NCALRPC) {
473 const char *ncalrpc_sock = dcerpc_binding_get_string_option(
474 ep->binding, "endpoint");
476 if (ncalrpc_sock == NULL) {
478 * generic ncalrpc:, set program-specific
479 * socket name. epmapper will redirect clients
480 * properly.
482 status = dcerpc_binding_set_string_option(
483 ep->binding,
484 "endpoint",
485 state->ncalrpc_endpoint);
486 if (!NT_STATUS_IS_OK(status)) {
487 DBG_DEBUG("dcerpc_binding_set_string_option "
488 "failed: %s\n",
489 nt_errstr(status));
490 goto fail;
495 for (i=0; i<num_endpoints; i++) {
497 bool ok = dcerpc_binding_same_endpoint(
498 ep->binding, state->endpoints[i]->binding);
500 if (ok) {
501 TALLOC_FREE(ep);
502 return state->endpoints[i];
506 if (num_endpoints + 1 < num_endpoints) {
507 goto fail;
510 tmp = talloc_realloc(
511 state,
512 state->endpoints,
513 struct rpc_host_endpoint *,
514 num_endpoints + 1);
515 if (tmp == NULL) {
516 goto fail;
518 state->endpoints = tmp;
519 state->endpoints[num_endpoints] = talloc_move(state->endpoints, &ep);
521 return state->endpoints[num_endpoints];
522 fail:
523 TALLOC_FREE(ep);
524 return NULL;
527 static bool ndr_interfaces_add_unique(
528 TALLOC_CTX *mem_ctx,
529 struct ndr_syntax_id **pifaces,
530 const struct ndr_syntax_id *iface)
532 struct ndr_syntax_id *ifaces = *pifaces;
533 size_t i, num_ifaces = talloc_array_length(ifaces);
535 for (i=0; i<num_ifaces; i++) {
536 if (ndr_syntax_id_equal(iface, &ifaces[i])) {
537 return true;
541 if (num_ifaces + 1 < num_ifaces) {
542 return false;
544 ifaces = talloc_realloc(
545 mem_ctx,
546 ifaces,
547 struct ndr_syntax_id,
548 num_ifaces + 1);
549 if (ifaces == NULL) {
550 return false;
552 ifaces[num_ifaces] = *iface;
554 *pifaces = ifaces;
555 return true;
559 * Read the text reply from the rpcd_* process telling us what
560 * endpoints it will serve when asked with --list-interfaces.
562 static void rpc_server_get_endpoints_done(struct tevent_req *subreq)
564 struct tevent_req *req = tevent_req_callback_data(
565 subreq, struct tevent_req);
566 struct rpc_server_get_endpoints_state *state = tevent_req_data(
567 req, struct rpc_server_get_endpoints_state);
568 struct rpc_host_iface_name *iface = NULL;
569 uint8_t *buf = NULL;
570 size_t buflen;
571 char **lines = NULL;
572 int ret, i, num_lines;
574 ret = file_ploadv_recv(subreq, state, &buf);
575 TALLOC_FREE(subreq);
576 if (tevent_req_error(req, ret)) {
577 return;
580 buflen = talloc_get_size(buf);
581 if (buflen == 0) {
582 tevent_req_done(req);
583 return;
586 lines = file_lines_parse((char *)buf, buflen, &num_lines, state);
587 if (tevent_req_nomem(lines, req)) {
588 return;
591 if (num_lines < 2) {
592 DBG_DEBUG("Got %d lines, expected at least 2\n", num_lines);
593 tevent_req_error(req, EINVAL);
594 return;
597 state->num_workers = smb_strtoul(
598 lines[0], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
599 if (ret != 0) {
600 DBG_DEBUG("Could not parse num_workers(%s): %s\n",
601 lines[0],
602 strerror(ret));
603 tevent_req_error(req, ret);
604 return;
607 state->idle_seconds = smb_strtoul(
608 lines[1], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
609 if (ret != 0) {
610 DBG_DEBUG("Could not parse idle_seconds (%s): %s\n",
611 lines[1],
612 strerror(ret));
613 tevent_req_error(req, ret);
614 return;
617 DBG_DEBUG("num_workers=%lu, idle_seconds=%lu for %s\n",
618 state->num_workers,
619 state->idle_seconds,
620 state->argl[0]);
622 for (i=2; i<num_lines; i++) {
623 char *line = lines[i];
624 struct rpc_host_endpoint *endpoint = NULL;
625 bool ok;
627 if (line[0] != ' ') {
628 iface = rpc_exe_parse_iface_line(
629 state, &state->iface_names, line);
630 if (iface == NULL) {
631 DBG_WARNING(
632 "rpc_exe_parse_iface_line failed "
633 "for: [%s] from %s\n",
634 line,
635 state->argl[0]);
636 tevent_req_oom(req);
637 return;
639 continue;
642 if (iface == NULL) {
643 DBG_DEBUG("Interface GUID line missing\n");
644 tevent_req_error(req, EINVAL);
645 return;
648 endpoint = rpc_host_endpoint_find(state, line+1);
649 if (endpoint == NULL) {
650 DBG_DEBUG("rpc_host_endpoint_find for %s failed\n",
651 line+1);
652 continue;
655 ok = ndr_interfaces_add_unique(
656 endpoint,
657 &endpoint->interfaces,
658 &iface->iface);
659 if (!ok) {
660 DBG_DEBUG("ndr_interfaces_add_unique failed\n");
661 tevent_req_oom(req);
662 return;
666 tevent_req_done(req);
670 * @brief Receive output from --list-interfaces
672 * @param[in] req The async req that just finished
673 * @param[in] mem_ctx Where to put the output on
674 * @param[out] endpoints The endpoints to be listened on
675 * @param[out] iface_names Annotation for epm_Lookup's epm_entry_t
676 * @return 0/errno
678 static int rpc_server_get_endpoints_recv(
679 struct tevent_req *req,
680 TALLOC_CTX *mem_ctx,
681 struct rpc_host_endpoint ***endpoints,
682 struct rpc_host_iface_name **iface_names,
683 size_t *num_workers,
684 size_t *idle_seconds)
686 struct rpc_server_get_endpoints_state *state = tevent_req_data(
687 req, struct rpc_server_get_endpoints_state);
688 int err;
690 if (tevent_req_is_unix_error(req, &err)) {
691 tevent_req_received(req);
692 return err;
695 *endpoints = talloc_move(mem_ctx, &state->endpoints);
696 *iface_names = talloc_move(mem_ctx, &state->iface_names);
697 *num_workers = state->num_workers;
698 *idle_seconds = state->idle_seconds;
699 tevent_req_received(req);
700 return 0;
704 * For NCACN_NP we get the named pipe auth info from smbd, if a client
705 * comes in via TCP or NCALPRC we need to invent it ourselves with
706 * anonymous session info.
709 static NTSTATUS rpc_host_generate_npa_info7_from_sock(
710 TALLOC_CTX *mem_ctx,
711 enum dcerpc_transport_t transport,
712 int sock,
713 const struct samba_sockaddr *peer_addr,
714 struct named_pipe_auth_req_info7 **pinfo7)
716 struct named_pipe_auth_req_info7 *info7 = NULL;
717 struct samba_sockaddr local_addr = {
718 .sa_socklen = sizeof(struct sockaddr_storage),
720 struct tsocket_address *taddr = NULL;
721 char *remote_client_name = NULL;
722 char *remote_client_addr = NULL;
723 char *local_server_name = NULL;
724 char *local_server_addr = NULL;
725 char *(*tsocket_address_to_name_fn)(
726 const struct tsocket_address *addr,
727 TALLOC_CTX *mem_ctx) = NULL;
728 NTSTATUS status = NT_STATUS_NO_MEMORY;
729 int ret;
732 * For NCACN_NP we get the npa info from smbd
734 SMB_ASSERT((transport == NCACN_IP_TCP) || (transport == NCALRPC));
736 tsocket_address_to_name_fn = (transport == NCACN_IP_TCP) ?
737 tsocket_address_inet_addr_string : tsocket_address_unix_path;
739 info7 = talloc_zero(mem_ctx, struct named_pipe_auth_req_info7);
740 if (info7 == NULL) {
741 goto fail;
743 info7->session_info =
744 talloc_zero(info7, struct auth_session_info_transport);
745 if (info7->session_info == NULL) {
746 goto fail;
749 status = make_session_info_anonymous(
750 info7->session_info,
751 &info7->session_info->session_info);
752 if (!NT_STATUS_IS_OK(status)) {
753 DBG_DEBUG("make_session_info_anonymous failed: %s\n",
754 nt_errstr(status));
755 goto fail;
758 ret = tsocket_address_bsd_from_samba_sockaddr(info7,
759 peer_addr,
760 &taddr);
761 if (ret == -1) {
762 status = map_nt_error_from_unix(errno);
763 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
764 "%s\n",
765 strerror(errno));
766 goto fail;
768 remote_client_addr = tsocket_address_to_name_fn(taddr, info7);
769 if (remote_client_addr == NULL) {
770 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
771 goto nomem;
773 TALLOC_FREE(taddr);
775 remote_client_name = talloc_strdup(info7, remote_client_addr);
776 if (remote_client_name == NULL) {
777 DBG_DEBUG("talloc_strdup failed\n");
778 goto nomem;
781 if (transport == NCACN_IP_TCP) {
782 bool ok = samba_sockaddr_get_port(peer_addr,
783 &info7->remote_client_port);
784 if (!ok) {
785 DBG_DEBUG("samba_sockaddr_get_port failed\n");
786 status = NT_STATUS_INVALID_PARAMETER;
787 goto fail;
791 ret = getsockname(sock, &local_addr.u.sa, &local_addr.sa_socklen);
792 if (ret == -1) {
793 status = map_nt_error_from_unix(errno);
794 DBG_DEBUG("getsockname failed: %s\n", strerror(errno));
795 goto fail;
798 ret = tsocket_address_bsd_from_samba_sockaddr(info7,
799 &local_addr,
800 &taddr);
801 if (ret == -1) {
802 status = map_nt_error_from_unix(errno);
803 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
804 "%s\n",
805 strerror(errno));
806 goto fail;
808 local_server_addr = tsocket_address_to_name_fn(taddr, info7);
809 if (local_server_addr == NULL) {
810 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
811 goto nomem;
813 TALLOC_FREE(taddr);
815 local_server_name = talloc_strdup(info7, local_server_addr);
816 if (local_server_name == NULL) {
817 DBG_DEBUG("talloc_strdup failed\n");
818 goto nomem;
821 if (transport == NCACN_IP_TCP) {
822 bool ok = samba_sockaddr_get_port(&local_addr,
823 &info7->local_server_port);
824 if (!ok) {
825 DBG_DEBUG("samba_sockaddr_get_port failed\n");
826 status = NT_STATUS_INVALID_PARAMETER;
827 goto fail;
831 if (transport == NCALRPC) {
832 uid_t uid;
833 gid_t gid;
835 ret = getpeereid(sock, &uid, &gid);
836 if (ret < 0) {
837 status = map_nt_error_from_unix(errno);
838 DBG_DEBUG("getpeereid failed: %s\n", strerror(errno));
839 goto fail;
842 if (uid == sec_initial_uid()) {
845 * Indicate "root" to gensec
848 TALLOC_FREE(remote_client_addr);
849 TALLOC_FREE(remote_client_name);
851 ret = tsocket_address_unix_from_path(
852 info7,
853 AS_SYSTEM_MAGIC_PATH_TOKEN,
854 &taddr);
855 if (ret == -1) {
856 DBG_DEBUG("tsocket_address_unix_from_path "
857 "failed\n");
858 goto nomem;
861 remote_client_addr =
862 tsocket_address_unix_path(taddr, info7);
863 if (remote_client_addr == NULL) {
864 DBG_DEBUG("tsocket_address_unix_path "
865 "failed\n");
866 goto nomem;
868 remote_client_name =
869 talloc_strdup(info7, remote_client_addr);
870 if (remote_client_name == NULL) {
871 DBG_DEBUG("talloc_strdup failed\n");
872 goto nomem;
877 info7->remote_client_addr = remote_client_addr;
878 info7->remote_client_name = remote_client_name;
879 info7->local_server_addr = local_server_addr;
880 info7->local_server_name = local_server_name;
882 *pinfo7 = info7;
883 return NT_STATUS_OK;
885 nomem:
886 status = NT_STATUS_NO_MEMORY;
887 fail:
888 TALLOC_FREE(info7);
889 return status;
892 struct rpc_host_bind_read_state {
893 struct tevent_context *ev;
895 int sock;
896 struct tstream_context *plain;
897 struct tstream_context *npa_stream;
899 struct ncacn_packet *pkt;
900 struct rpc_host_client *client;
903 static void rpc_host_bind_read_cleanup(
904 struct tevent_req *req, enum tevent_req_state req_state);
905 static void rpc_host_bind_read_got_npa(struct tevent_req *subreq);
906 static void rpc_host_bind_read_got_bind(struct tevent_req *subreq);
909 * Wait for a bind packet from a client.
911 static struct tevent_req *rpc_host_bind_read_send(
912 TALLOC_CTX *mem_ctx,
913 struct tevent_context *ev,
914 enum dcerpc_transport_t transport,
915 int *psock,
916 const struct samba_sockaddr *peer_addr)
918 struct tevent_req *req = NULL, *subreq = NULL;
919 struct rpc_host_bind_read_state *state = NULL;
920 int rc, sock_dup;
921 NTSTATUS status;
923 req = tevent_req_create(
924 mem_ctx, &state, struct rpc_host_bind_read_state);
925 if (req == NULL) {
926 return NULL;
928 state->ev = ev;
930 state->sock = *psock;
931 *psock = -1;
933 tevent_req_set_cleanup_fn(req, rpc_host_bind_read_cleanup);
935 state->client = talloc_zero(state, struct rpc_host_client);
936 if (tevent_req_nomem(state->client, req)) {
937 return tevent_req_post(req, ev);
941 * Dup the socket to read the first RPC packet:
942 * tstream_bsd_existing_socket() takes ownership with
943 * autoclose, but we need to send "sock" down to our worker
944 * process later.
946 sock_dup = dup(state->sock);
947 if (sock_dup == -1) {
948 tevent_req_error(req, errno);
949 return tevent_req_post(req, ev);
952 rc = tstream_bsd_existing_socket(state, sock_dup, &state->plain);
953 if (rc == -1) {
954 DBG_DEBUG("tstream_bsd_existing_socket failed: %s\n",
955 strerror(errno));
956 tevent_req_error(req, errno);
957 close(sock_dup);
958 return tevent_req_post(req, ev);
961 if (transport == NCACN_NP) {
962 subreq = tstream_npa_accept_existing_send(
963 state,
965 state->plain,
966 FILE_TYPE_MESSAGE_MODE_PIPE,
967 0xff | 0x0400 | 0x0100,
968 4096);
969 if (tevent_req_nomem(subreq, req)) {
970 return tevent_req_post(req, ev);
972 tevent_req_set_callback(
973 subreq, rpc_host_bind_read_got_npa, req);
974 return req;
977 status = rpc_host_generate_npa_info7_from_sock(
978 state->client,
979 transport,
980 state->sock,
981 peer_addr,
982 &state->client->npa_info7);
983 if (!NT_STATUS_IS_OK(status)) {
984 tevent_req_oom(req);
985 return tevent_req_post(req, ev);
988 subreq = dcerpc_read_ncacn_packet_send(state, ev, state->plain);
989 if (tevent_req_nomem(subreq, req)) {
990 return tevent_req_post(req, ev);
992 tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
993 return req;
996 static void rpc_host_bind_read_cleanup(
997 struct tevent_req *req, enum tevent_req_state req_state)
999 struct rpc_host_bind_read_state *state = tevent_req_data(
1000 req, struct rpc_host_bind_read_state);
1002 if ((req_state == TEVENT_REQ_RECEIVED) && (state->sock != -1)) {
1003 close(state->sock);
1004 state->sock = -1;
1008 static void rpc_host_bind_read_got_npa(struct tevent_req *subreq)
1010 struct tevent_req *req = tevent_req_callback_data(
1011 subreq, struct tevent_req);
1012 struct rpc_host_bind_read_state *state = tevent_req_data(
1013 req, struct rpc_host_bind_read_state);
1014 struct named_pipe_auth_req_info7 *info7 = NULL;
1015 int ret, err;
1017 ret = tstream_npa_accept_existing_recv(subreq,
1018 &err,
1019 state,
1020 &state->npa_stream,
1021 &info7,
1022 NULL, /* transport */
1023 NULL, /* remote_client_addr */
1024 NULL, /* remote_client_name */
1025 NULL, /* local_server_addr */
1026 NULL, /* local_server_name */
1027 NULL); /* session_info */
1028 if (ret == -1) {
1029 tevent_req_error(req, err);
1030 return;
1033 state->client->npa_info7 = talloc_move(state->client, &info7);
1035 subreq = dcerpc_read_ncacn_packet_send(
1036 state, state->ev, state->npa_stream);
1037 if (tevent_req_nomem(subreq, req)) {
1038 return;
1040 tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1043 static void rpc_host_bind_read_got_bind(struct tevent_req *subreq)
1045 struct tevent_req *req = tevent_req_callback_data(
1046 subreq, struct tevent_req);
1047 struct rpc_host_bind_read_state *state = tevent_req_data(
1048 req, struct rpc_host_bind_read_state);
1049 struct ncacn_packet *pkt = NULL;
1050 NTSTATUS status;
1052 status = dcerpc_read_ncacn_packet_recv(
1053 subreq,
1054 state->client,
1055 &pkt,
1056 &state->client->bind_packet);
1057 TALLOC_FREE(subreq);
1058 if (!NT_STATUS_IS_OK(status)) {
1059 DBG_DEBUG("dcerpc_read_ncacn_packet_recv failed: %s\n",
1060 nt_errstr(status));
1061 tevent_req_error(req, EINVAL); /* TODO */
1062 return;
1064 state->pkt = talloc_move(state, &pkt);
1066 tevent_req_done(req);
1069 static int rpc_host_bind_read_recv(
1070 struct tevent_req *req,
1071 TALLOC_CTX *mem_ctx,
1072 int *sock,
1073 struct rpc_host_client **client,
1074 struct ncacn_packet **bind_pkt)
1076 struct rpc_host_bind_read_state *state = tevent_req_data(
1077 req, struct rpc_host_bind_read_state);
1078 int err;
1080 if (tevent_req_is_unix_error(req, &err)) {
1081 tevent_req_received(req);
1082 return err;
1085 *sock = state->sock;
1086 state->sock = -1;
1088 *client = talloc_move(mem_ctx, &state->client);
1089 *bind_pkt = talloc_move(mem_ctx, &state->pkt);
1090 tevent_req_received(req);
1091 return 0;
1095 * Start the given rpcd_* binary.
1097 static int rpc_host_exec_worker(struct rpc_server *server, size_t idx)
1099 struct rpc_work_process *worker = &server->workers[idx];
1100 char **argv = NULL;
1101 int ret = ENOMEM;
1103 argv = str_list_make_empty(server);
1104 str_list_add_printf(
1105 &argv, "%s", server->rpc_server_exe);
1106 str_list_add_printf(
1107 &argv, "--configfile=%s", get_dyn_CONFIGFILE());
1108 str_list_add_printf(
1109 &argv, "--worker-group=%"PRIu32, server->server_index);
1110 str_list_add_printf(
1111 &argv, "--worker-index=%zu", idx);
1112 str_list_add_printf(
1113 &argv, "--debuglevel=%d", debuglevel_get_class(DBGC_RPC_SRV));
1114 if (!is_default_dyn_LOGFILEBASE()) {
1115 str_list_add_printf(
1116 &argv, "--log-basename=%s", get_dyn_LOGFILEBASE());
1118 if (argv == NULL) {
1119 ret = ENOMEM;
1120 goto fail;
1123 worker->pid = fork();
1124 if (worker->pid == -1) {
1125 ret = errno;
1126 goto fail;
1128 if (worker->pid == 0) {
1129 /* Child. */
1130 close(server->host->worker_stdin[1]);
1131 ret = dup2(server->host->worker_stdin[0], 0);
1132 if (ret != 0) {
1133 exit(1);
1135 execv(argv[0], argv);
1136 _exit(1);
1139 DBG_DEBUG("Creating worker %s for index %zu: pid=%d\n",
1140 server->rpc_server_exe,
1141 idx,
1142 (int)worker->pid);
1144 ret = 0;
1145 fail:
1146 TALLOC_FREE(argv);
1147 return ret;
1151 * Find an rpcd_* worker for an external client, respect server->max_workers
1153 static struct rpc_work_process *rpc_host_find_worker(struct rpc_server *server)
1155 struct rpc_work_process *worker = NULL;
1156 size_t i;
1157 size_t empty_slot = SIZE_MAX;
1159 uint32_t min_clients = UINT32_MAX;
1160 size_t min_worker = server->max_workers;
1162 for (i=0; i<server->max_workers; i++) {
1163 worker = &server->workers[i];
1165 if (worker->pid == -1) {
1166 empty_slot = MIN(empty_slot, i);
1167 continue;
1169 if (!worker->available) {
1170 continue;
1172 if (worker->num_clients < min_clients) {
1173 min_clients = worker->num_clients;
1174 min_worker = i;
1178 if (min_clients == 0) {
1179 return &server->workers[min_worker];
1182 if (empty_slot < SIZE_MAX) {
1183 int ret = rpc_host_exec_worker(server, empty_slot);
1184 if (ret != 0) {
1185 DBG_WARNING("Could not fork worker: %s\n",
1186 strerror(ret));
1188 return NULL;
1191 if (min_worker < server->max_workers) {
1192 return &server->workers[min_worker];
1195 return NULL;
1199 * Find an rpcd_* worker for an internal connection, possibly go beyond
1200 * server->max_workers
1202 static struct rpc_work_process *rpc_host_find_idle_worker(
1203 struct rpc_server *server)
1205 struct rpc_work_process *worker = NULL, *tmp = NULL;
1206 size_t i, num_workers = talloc_array_length(server->workers);
1207 size_t empty_slot = SIZE_MAX;
1208 int ret;
1210 for (i=server->max_workers; i<num_workers; i++) {
1211 worker = &server->workers[i];
1213 if (worker->pid == -1) {
1214 empty_slot = MIN(empty_slot, i);
1215 continue;
1217 if (!worker->available) {
1218 continue;
1220 if (worker->num_clients == 0) {
1221 return &server->workers[i];
1225 if (empty_slot < SIZE_MAX) {
1226 ret = rpc_host_exec_worker(server, empty_slot);
1227 if (ret != 0) {
1228 DBG_WARNING("Could not fork worker: %s\n",
1229 strerror(ret));
1231 return NULL;
1235 * All workers are busy. We need to expand the number of
1236 * workers because we were asked for an idle worker.
1238 if (num_workers+1 < num_workers) {
1239 return NULL;
1241 tmp = talloc_realloc(
1242 server,
1243 server->workers,
1244 struct rpc_work_process,
1245 num_workers+1);
1246 if (tmp == NULL) {
1247 return NULL;
1249 server->workers = tmp;
1251 server->workers[num_workers] = (struct rpc_work_process) { .pid=-1, };
1253 ret = rpc_host_exec_worker(server, num_workers);
1254 if (ret != 0) {
1255 DBG_WARNING("Could not exec worker: %s\n", strerror(ret));
1258 return NULL;
1262 * Find an rpcd_* process to talk to. Start a new one if necessary.
1264 static void rpc_host_distribute_clients(struct rpc_server *server)
1266 struct rpc_work_process *worker = NULL;
1267 struct rpc_host_pending_client *pending_client = NULL;
1268 uint32_t assoc_group_id;
1269 DATA_BLOB blob;
1270 struct iovec iov;
1271 enum ndr_err_code ndr_err;
1272 NTSTATUS status;
1274 again:
1275 pending_client = server->pending_clients;
1276 if (pending_client == NULL) {
1277 DBG_DEBUG("No pending clients\n");
1278 return;
1281 assoc_group_id = pending_client->bind_pkt->u.bind.assoc_group_id;
1283 if (assoc_group_id != 0) {
1284 size_t num_workers = talloc_array_length(server->workers);
1285 uint8_t worker_index = assoc_group_id >> 24;
1287 if (worker_index >= num_workers) {
1288 DBG_DEBUG("Invalid assoc group id %"PRIu32"\n",
1289 assoc_group_id);
1290 goto done;
1292 worker = &server->workers[worker_index];
1294 if ((worker->pid == -1) || !worker->available) {
1295 DBG_DEBUG("Requested worker index %"PRIu8": "
1296 "pid=%d, available=%d",
1297 worker_index,
1298 (int)worker->pid,
1299 (int)worker->available);
1301 * Pick a random one for a proper bind nack
1303 worker = rpc_host_find_worker(server);
1305 } else {
1306 struct auth_session_info_transport *session_info =
1307 pending_client->client->npa_info7->session_info;
1308 uint32_t flags = 0;
1309 bool found;
1311 found = security_token_find_npa_flags(
1312 session_info->session_info->security_token,
1313 &flags);
1315 /* fresh assoc group requested */
1316 if (found & (flags & SAMBA_NPA_FLAGS_NEED_IDLE)) {
1317 worker = rpc_host_find_idle_worker(server);
1318 } else {
1319 worker = rpc_host_find_worker(server);
1323 if (worker == NULL) {
1324 DBG_DEBUG("No worker found\n");
1325 return;
1328 DLIST_REMOVE(server->pending_clients, pending_client);
1330 ndr_err = ndr_push_struct_blob(
1331 &blob,
1332 pending_client,
1333 pending_client->client,
1334 (ndr_push_flags_fn_t)ndr_push_rpc_host_client);
1335 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1336 DBG_WARNING("ndr_push_rpc_host_client failed: %s\n",
1337 ndr_errstr(ndr_err));
1338 goto done;
1341 DBG_INFO("Sending new client %s to %d with %"PRIu32" clients\n",
1342 server->rpc_server_exe,
1343 worker->pid,
1344 worker->num_clients);
1346 iov = (struct iovec) {
1347 .iov_base = blob.data, .iov_len = blob.length,
1350 status = messaging_send_iov(
1351 server->host->msg_ctx,
1352 pid_to_procid(worker->pid),
1353 MSG_RPC_HOST_NEW_CLIENT,
1354 &iov,
1356 &pending_client->sock,
1358 if (NT_STATUS_EQUAL(status, NT_STATUS_OBJECT_NAME_NOT_FOUND)) {
1359 DBG_DEBUG("worker %d died, sigchld not yet received?\n",
1360 worker->pid);
1361 DLIST_ADD(server->pending_clients, pending_client);
1362 worker->available = false;
1363 goto again;
1365 if (!NT_STATUS_IS_OK(status)) {
1366 DBG_DEBUG("messaging_send_iov failed: %s\n",
1367 nt_errstr(status));
1368 goto done;
1370 worker->num_clients += 1;
1371 TALLOC_FREE(worker->exit_timer);
1373 TALLOC_FREE(server->host->np_helper_shutdown);
1375 done:
1376 TALLOC_FREE(pending_client);
1379 static int rpc_host_pending_client_destructor(
1380 struct rpc_host_pending_client *p)
1382 TALLOC_FREE(p->hangup_wait);
1383 if (p->sock != -1) {
1384 close(p->sock);
1385 p->sock = -1;
1387 DLIST_REMOVE(p->server->pending_clients, p);
1388 return 0;
1392 * Exception condition handler before rpcd_* worker
1393 * is handling the socket. Either the client exited or
1394 * sent unexpected data after the initial bind.
1396 static void rpc_host_client_exited(struct tevent_req *subreq)
1398 struct rpc_host_pending_client *pending = tevent_req_callback_data(
1399 subreq, struct rpc_host_pending_client);
1400 bool ok;
1401 int err;
1403 ok = wait_for_read_recv(subreq, &err);
1405 TALLOC_FREE(subreq);
1406 pending->hangup_wait = NULL;
1408 if (ok) {
1409 DBG_DEBUG("client on sock %d sent data\n", pending->sock);
1410 } else {
1411 DBG_DEBUG("client exited with %s\n", strerror(err));
1413 TALLOC_FREE(pending);
1416 struct rpc_iface_binding_map {
1417 struct ndr_syntax_id iface;
1418 char *bindings;
1421 static bool rpc_iface_binding_map_add_endpoint(
1422 TALLOC_CTX *mem_ctx,
1423 const struct rpc_host_endpoint *ep,
1424 struct rpc_host_iface_name *iface_names,
1425 struct rpc_iface_binding_map **pmaps)
1427 const struct ndr_syntax_id mgmt_iface = {
1428 {0xafa8bd80,
1429 0x7d8a,
1430 0x11c9,
1431 {0xbe,0xf4},
1432 {0x08,0x00,0x2b,0x10,0x29,0x89}
1434 1.0};
1436 struct rpc_iface_binding_map *maps = *pmaps;
1437 size_t i, num_ifaces = talloc_array_length(ep->interfaces);
1438 char *binding_string = NULL;
1439 bool ok = false;
1441 binding_string = dcerpc_binding_string(mem_ctx, ep->binding);
1442 if (binding_string == NULL) {
1443 return false;
1446 for (i=0; i<num_ifaces; i++) {
1447 const struct ndr_syntax_id *iface = &ep->interfaces[i];
1448 size_t j, num_maps = talloc_array_length(maps);
1449 struct rpc_iface_binding_map *map = NULL;
1450 char *p = NULL;
1452 if (ndr_syntax_id_equal(iface, &mgmt_iface)) {
1454 * mgmt is offered everywhere, don't put it
1455 * into epmdb.tdb.
1457 continue;
1460 for (j=0; j<num_maps; j++) {
1461 map = &maps[j];
1462 if (ndr_syntax_id_equal(&map->iface, iface)) {
1463 break;
1467 if (j == num_maps) {
1468 struct rpc_iface_binding_map *tmp = NULL;
1469 struct rpc_host_iface_name *iface_name = NULL;
1471 iface_name = rpc_host_iface_names_find(
1472 iface_names, iface);
1473 if (iface_name == NULL) {
1474 goto fail;
1477 tmp = talloc_realloc(
1478 mem_ctx,
1479 maps,
1480 struct rpc_iface_binding_map,
1481 num_maps+1);
1482 if (tmp == NULL) {
1483 goto fail;
1485 maps = tmp;
1487 map = &maps[num_maps];
1488 *map = (struct rpc_iface_binding_map) {
1489 .iface = *iface,
1490 .bindings = talloc_move(
1491 maps, &iface_name->name),
1495 p = strv_find(map->bindings, binding_string);
1496 if (p == NULL) {
1497 int ret = strv_add(
1498 maps, &map->bindings, binding_string);
1499 if (ret != 0) {
1500 goto fail;
1505 ok = true;
1506 fail:
1507 *pmaps = maps;
1508 return ok;
1511 static bool rpc_iface_binding_map_add_endpoints(
1512 TALLOC_CTX *mem_ctx,
1513 struct rpc_host_endpoint **endpoints,
1514 struct rpc_host_iface_name *iface_names,
1515 struct rpc_iface_binding_map **pbinding_maps)
1517 size_t i, num_endpoints = talloc_array_length(endpoints);
1519 for (i=0; i<num_endpoints; i++) {
1520 bool ok = rpc_iface_binding_map_add_endpoint(
1521 mem_ctx, endpoints[i], iface_names, pbinding_maps);
1522 if (!ok) {
1523 return false;
1526 return true;
1529 static bool rpc_host_fill_epm_db(
1530 struct tdb_wrap *db,
1531 struct rpc_host_endpoint **endpoints,
1532 struct rpc_host_iface_name *iface_names)
1534 struct rpc_iface_binding_map *maps = NULL;
1535 size_t i, num_maps;
1536 bool ret = false;
1537 bool ok;
1539 ok = rpc_iface_binding_map_add_endpoints(
1540 talloc_tos(), endpoints, iface_names, &maps);
1541 if (!ok) {
1542 goto fail;
1545 num_maps = talloc_array_length(maps);
1547 for (i=0; i<num_maps; i++) {
1548 struct rpc_iface_binding_map *map = &maps[i];
1549 struct ndr_syntax_id_buf buf;
1550 char *keystr = ndr_syntax_id_buf_string(&map->iface, &buf);
1551 TDB_DATA value = {
1552 .dptr = (uint8_t *)map->bindings,
1553 .dsize = talloc_array_length(map->bindings),
1555 int rc;
1557 rc = tdb_store(
1558 db->tdb, string_term_tdb_data(keystr), value, 0);
1559 if (rc == -1) {
1560 DBG_DEBUG("tdb_store() failed: %s\n",
1561 tdb_errorstr(db->tdb));
1562 goto fail;
1566 ret = true;
1567 fail:
1568 TALLOC_FREE(maps);
1569 return ret;
1572 struct rpc_server_setup_state {
1573 struct rpc_server *server;
1576 static void rpc_server_setup_got_endpoints(struct tevent_req *subreq);
1579 * Async initialize state for all possible rpcd_* servers.
1580 * Note this does not start them.
1582 static struct tevent_req *rpc_server_setup_send(
1583 TALLOC_CTX *mem_ctx,
1584 struct tevent_context *ev,
1585 struct rpc_host *host,
1586 const char *rpc_server_exe)
1588 struct tevent_req *req = NULL, *subreq = NULL;
1589 struct rpc_server_setup_state *state = NULL;
1590 struct rpc_server *server = NULL;
1592 req = tevent_req_create(
1593 mem_ctx, &state, struct rpc_server_setup_state);
1594 if (req == NULL) {
1595 return NULL;
1597 state->server = talloc_zero(state, struct rpc_server);
1598 if (tevent_req_nomem(state->server, req)) {
1599 return tevent_req_post(req, ev);
1602 server = state->server;
1604 *server = (struct rpc_server) {
1605 .host = host,
1606 .server_index = UINT32_MAX,
1607 .rpc_server_exe = talloc_strdup(server, rpc_server_exe),
1609 if (tevent_req_nomem(server->rpc_server_exe, req)) {
1610 return tevent_req_post(req, ev);
1613 subreq = rpc_server_get_endpoints_send(
1614 state,
1616 rpc_server_exe,
1617 host->np_helper ? NCACN_NP : NCA_UNKNOWN);
1618 if (tevent_req_nomem(subreq, req)) {
1619 return tevent_req_post(req, ev);
1621 tevent_req_set_callback(subreq, rpc_server_setup_got_endpoints, req);
1622 return req;
1625 static void rpc_server_setup_got_endpoints(struct tevent_req *subreq)
1627 struct tevent_req *req = tevent_req_callback_data(
1628 subreq, struct tevent_req);
1629 struct rpc_server_setup_state *state = tevent_req_data(
1630 req, struct rpc_server_setup_state);
1631 struct rpc_server *server = state->server;
1632 int ret;
1633 size_t i, num_endpoints;
1634 bool ok;
1636 ret = rpc_server_get_endpoints_recv(
1637 subreq,
1638 server,
1639 &server->endpoints,
1640 &server->iface_names,
1641 &server->max_workers,
1642 &server->idle_seconds);
1643 TALLOC_FREE(subreq);
1644 if (ret != 0) {
1645 tevent_req_nterror(req, map_nt_error_from_unix(ret));
1646 return;
1649 server->workers = talloc_array(
1650 server, struct rpc_work_process, server->max_workers);
1651 if (tevent_req_nomem(server->workers, req)) {
1652 return;
1655 for (i=0; i<server->max_workers; i++) {
1656 /* mark as not yet created */
1657 server->workers[i] = (struct rpc_work_process) { .pid=-1, };
1660 num_endpoints = talloc_array_length(server->endpoints);
1662 for (i=0; i<num_endpoints; i++) {
1663 struct rpc_host_endpoint *e = server->endpoints[i];
1664 NTSTATUS status;
1665 size_t j;
1667 e->server = server;
1669 status = dcesrv_create_binding_sockets(
1670 e->binding, e, &e->num_fds, &e->fds);
1671 if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_SUPPORTED)) {
1672 continue;
1674 if (tevent_req_nterror(req, status)) {
1675 DBG_DEBUG("dcesrv_create_binding_sockets failed: %s\n",
1676 nt_errstr(status));
1677 return;
1680 for (j=0; j<e->num_fds; j++) {
1681 ret = listen(e->fds[j], 256);
1682 if (ret == -1) {
1683 tevent_req_nterror(
1684 req, map_nt_error_from_unix(errno));
1685 return;
1690 ok = rpc_host_fill_epm_db(
1691 server->host->epmdb, server->endpoints, server->iface_names);
1692 if (!ok) {
1693 DBG_DEBUG("rpc_host_fill_epm_db failed\n");
1696 tevent_req_done(req);
1699 static NTSTATUS rpc_server_setup_recv(
1700 struct tevent_req *req, TALLOC_CTX *mem_ctx, struct rpc_server **server)
1702 struct rpc_server_setup_state *state = tevent_req_data(
1703 req, struct rpc_server_setup_state);
1704 NTSTATUS status;
1706 if (tevent_req_is_nterror(req, &status)) {
1707 tevent_req_received(req);
1708 return status;
1711 *server = talloc_move(mem_ctx, &state->server);
1712 tevent_req_received(req);
1713 return NT_STATUS_OK;
1717 * rpcd_* died. Called from SIGCHLD handler.
1719 static void rpc_worker_exited(struct rpc_host *host, pid_t pid)
1721 size_t i, num_servers = talloc_array_length(host->servers);
1722 struct rpc_work_process *worker = NULL;
1723 bool found_pid = false;
1724 bool have_active_worker = false;
1726 for (i=0; i<num_servers; i++) {
1727 struct rpc_server *server = host->servers[i];
1728 size_t j, num_workers;
1730 if (server == NULL) {
1731 /* SIGCHLD for --list-interfaces run */
1732 continue;
1735 num_workers = talloc_array_length(server->workers);
1737 for (j=0; j<num_workers; j++) {
1738 worker = &server->workers[j];
1739 if (worker->pid == pid) {
1740 found_pid = true;
1741 worker->pid = -1;
1742 worker->available = false;
1745 if (worker->pid != -1) {
1746 have_active_worker = true;
1751 if (!found_pid) {
1752 DBG_WARNING("No worker with PID %d\n", (int)pid);
1753 return;
1756 if (!have_active_worker && host->np_helper) {
1758 * We have nothing left to do as an np_helper.
1759 * Terminate ourselves (samba-dcerpcd). We will
1760 * be restarted on demand anyway.
1762 DBG_DEBUG("Exiting idle np helper\n");
1763 exit(0);
1768 * rpcd_* died.
1770 static void rpc_host_sigchld(
1771 struct tevent_context *ev,
1772 struct tevent_signal *se,
1773 int signum,
1774 int count,
1775 void *siginfo,
1776 void *private_data)
1778 struct rpc_host *state = talloc_get_type_abort(
1779 private_data, struct rpc_host);
1780 pid_t pid;
1781 int wstatus;
1783 while ((pid = waitpid(-1, &wstatus, WNOHANG)) > 0) {
1784 DBG_DEBUG("pid=%d, wstatus=%d\n", (int)pid, wstatus);
1785 rpc_worker_exited(state, pid);
1790 * Idle timer fired for a rcpd_* worker. Ask it to terminate.
1792 static void rpc_host_exit_worker(
1793 struct tevent_context *ev,
1794 struct tevent_timer *te,
1795 struct timeval current_time,
1796 void *private_data)
1798 struct rpc_server *server = talloc_get_type_abort(
1799 private_data, struct rpc_server);
1800 size_t i, num_workers = talloc_array_length(server->workers);
1803 * Scan for the right worker. We don't have too many of those,
1804 * and maintaining an index would be more data structure effort.
1807 for (i=0; i<num_workers; i++) {
1808 struct rpc_work_process *w = &server->workers[i];
1809 NTSTATUS status;
1811 if (w->exit_timer != te) {
1812 continue;
1814 w->exit_timer = NULL;
1816 SMB_ASSERT(w->num_clients == 0);
1818 status = messaging_send(
1819 server->host->msg_ctx,
1820 pid_to_procid(w->pid),
1821 MSG_SHUTDOWN,
1822 NULL);
1823 if (!NT_STATUS_IS_OK(status)) {
1824 DBG_DEBUG("Could not send SHUTDOWN msg: %s\n",
1825 nt_errstr(status));
1828 w->available = false;
1829 break;
1834 * rcpd_* worker replied with its status.
1836 static void rpc_host_child_status_recv(
1837 struct messaging_context *msg,
1838 void *private_data,
1839 uint32_t msg_type,
1840 struct server_id server_id,
1841 DATA_BLOB *data)
1843 struct rpc_host *host = talloc_get_type_abort(
1844 private_data, struct rpc_host);
1845 size_t num_servers = talloc_array_length(host->servers);
1846 struct rpc_server *server = NULL;
1847 size_t num_workers;
1848 pid_t src_pid = procid_to_pid(&server_id);
1849 struct rpc_work_process *worker = NULL;
1850 struct rpc_worker_status status_message;
1851 enum ndr_err_code ndr_err;
1853 ndr_err = ndr_pull_struct_blob_all_noalloc(
1854 data,
1855 &status_message,
1856 (ndr_pull_flags_fn_t)ndr_pull_rpc_worker_status);
1857 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1858 struct server_id_buf buf;
1859 DBG_WARNING("Got invalid message from pid %s\n",
1860 server_id_str_buf(server_id, &buf));
1861 return;
1863 if (DEBUGLEVEL >= 10) {
1864 NDR_PRINT_DEBUG(rpc_worker_status, &status_message);
1867 if (status_message.server_index >= num_servers) {
1868 DBG_WARNING("Got invalid server_index=%"PRIu32", "
1869 "num_servers=%zu\n",
1870 status_message.server_index,
1871 num_servers);
1872 return;
1875 server = host->servers[status_message.server_index];
1877 num_workers = talloc_array_length(server->workers);
1878 if (status_message.worker_index >= num_workers) {
1879 DBG_WARNING("Got invalid worker_index=%"PRIu32", "
1880 "num_workers=%zu\n",
1881 status_message.worker_index,
1882 num_workers);
1883 return;
1885 worker = &server->workers[status_message.worker_index];
1887 if (src_pid != worker->pid) {
1888 DBG_WARNING("Got idx=%"PRIu32" from %d, expected %d\n",
1889 status_message.worker_index,
1890 (int)src_pid,
1891 worker->pid);
1892 return;
1895 worker->available = true;
1896 worker->num_clients = status_message.num_clients;
1898 if (worker->num_clients != 0) {
1899 TALLOC_FREE(worker->exit_timer);
1900 } else {
1901 worker->exit_timer = tevent_add_timer(
1902 messaging_tevent_context(msg),
1903 server->workers,
1904 tevent_timeval_current_ofs(server->idle_seconds, 0),
1905 rpc_host_exit_worker,
1906 server);
1907 /* No NULL check, it's not fatal if this does not work */
1910 rpc_host_distribute_clients(server);
1914 * samba-dcerpcd has been asked to shutdown.
1915 * Mark the initial tevent_req as done so we
1916 * exit the event loop.
1918 static void rpc_host_msg_shutdown(
1919 struct messaging_context *msg,
1920 void *private_data,
1921 uint32_t msg_type,
1922 struct server_id server_id,
1923 DATA_BLOB *data)
1925 struct tevent_req *req = talloc_get_type_abort(
1926 private_data, struct tevent_req);
1927 tevent_req_done(req);
1931 * Only match directory entries starting in rpcd_
1933 static int rpcd_filter(const struct dirent *d)
1935 int match = fnmatch("rpcd_*", d->d_name, 0);
1936 return (match == 0) ? 1 : 0;
1940 * Scan the given libexecdir for rpcd_* services
1941 * and return them as a strv list.
1943 static int rpc_host_list_servers(
1944 const char *libexecdir, TALLOC_CTX *mem_ctx, char **pservers)
1946 char *servers = NULL;
1947 struct dirent **namelist = NULL;
1948 int i, num_servers;
1949 int ret = ENOMEM;
1951 num_servers = scandir(libexecdir, &namelist, rpcd_filter, alphasort);
1952 if (num_servers == -1) {
1953 DBG_DEBUG("scandir failed: %s\n", strerror(errno));
1954 return errno;
1957 for (i=0; i<num_servers; i++) {
1958 char *exe = talloc_asprintf(
1959 mem_ctx, "%s/%s", libexecdir, namelist[i]->d_name);
1960 if (exe == NULL) {
1961 goto fail;
1964 ret = strv_add(mem_ctx, &servers, exe);
1965 TALLOC_FREE(exe);
1966 if (ret != 0) {
1967 goto fail;
1970 fail:
1971 for (i=0; i<num_servers; i++) {
1972 SAFE_FREE(namelist[i]);
1974 SAFE_FREE(namelist);
1976 if (ret != 0) {
1977 TALLOC_FREE(servers);
1978 return ret;
1980 *pservers = servers;
1981 return 0;
1984 struct rpc_host_endpoint_accept_state {
1985 struct tevent_context *ev;
1986 struct rpc_host_endpoint *endpoint;
1989 static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq);
1990 static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq);
1993 * Asynchronously wait for a DCERPC connection from a client.
1995 static struct tevent_req *rpc_host_endpoint_accept_send(
1996 TALLOC_CTX *mem_ctx,
1997 struct tevent_context *ev,
1998 struct rpc_host_endpoint *endpoint)
2000 struct tevent_req *req = NULL;
2001 struct rpc_host_endpoint_accept_state *state = NULL;
2002 size_t i;
2004 req = tevent_req_create(
2005 mem_ctx, &state, struct rpc_host_endpoint_accept_state);
2006 if (req == NULL) {
2007 return NULL;
2009 state->ev = ev;
2010 state->endpoint = endpoint;
2012 for (i=0; i<endpoint->num_fds; i++) {
2013 struct tevent_req *subreq = NULL;
2015 subreq = accept_send(state, ev, endpoint->fds[i]);
2016 if (tevent_req_nomem(subreq, req)) {
2017 return tevent_req_post(req, ev);
2019 tevent_req_set_callback(
2020 subreq, rpc_host_endpoint_accept_accepted, req);
2023 return req;
2027 * Accept a DCERPC connection from a client.
2029 static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq)
2031 struct tevent_req *req = tevent_req_callback_data(
2032 subreq, struct tevent_req);
2033 struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2034 req, struct rpc_host_endpoint_accept_state);
2035 struct rpc_host_endpoint *endpoint = state->endpoint;
2036 int sock, listen_sock, err;
2037 struct samba_sockaddr peer_addr;
2039 sock = accept_recv(subreq, &listen_sock, &peer_addr, &err);
2040 TALLOC_FREE(subreq);
2041 if (sock == -1) {
2042 /* What to do here? Just ignore the error and retry? */
2043 DBG_DEBUG("accept_recv failed: %s\n", strerror(err));
2044 tevent_req_error(req, err);
2045 return;
2048 subreq = accept_send(state, state->ev, listen_sock);
2049 if (tevent_req_nomem(subreq, req)) {
2050 close(sock);
2051 sock = -1;
2052 return;
2054 tevent_req_set_callback(
2055 subreq, rpc_host_endpoint_accept_accepted, req);
2057 subreq = rpc_host_bind_read_send(
2058 state,
2059 state->ev,
2060 dcerpc_binding_get_transport(endpoint->binding),
2061 &sock,
2062 &peer_addr);
2063 if (tevent_req_nomem(subreq, req)) {
2064 return;
2066 tevent_req_set_callback(
2067 subreq, rpc_host_endpoint_accept_got_bind, req);
2071 * Client sent us a DCERPC bind packet.
2073 static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq)
2075 struct tevent_req *req = tevent_req_callback_data(
2076 subreq, struct tevent_req);
2077 struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2078 req, struct rpc_host_endpoint_accept_state);
2079 struct rpc_host_endpoint *endpoint = state->endpoint;
2080 struct rpc_server *server = endpoint->server;
2081 struct rpc_host_pending_client *pending = NULL;
2082 struct rpc_host_client *client = NULL;
2083 struct ncacn_packet *bind_pkt = NULL;
2084 int ret;
2085 int sock=-1;
2087 ret = rpc_host_bind_read_recv(
2088 subreq, state, &sock, &client, &bind_pkt);
2089 TALLOC_FREE(subreq);
2090 if (ret != 0) {
2091 DBG_DEBUG("rpc_host_bind_read_recv returned %s\n",
2092 strerror(ret));
2093 goto fail;
2096 client->binding = dcerpc_binding_string(client, endpoint->binding);
2097 if (client->binding == NULL) {
2098 DBG_WARNING("dcerpc_binding_string failed, dropping client\n");
2099 goto fail;
2102 pending = talloc_zero(server, struct rpc_host_pending_client);
2103 if (pending == NULL) {
2104 DBG_WARNING("talloc failed, dropping client\n");
2105 goto fail;
2107 pending->server = server;
2108 pending->sock = sock;
2109 pending->bind_pkt = talloc_move(pending, &bind_pkt);
2110 pending->client = talloc_move(pending, &client);
2111 talloc_set_destructor(pending, rpc_host_pending_client_destructor);
2112 sock = -1;
2114 pending->hangup_wait = wait_for_read_send(
2115 pending, state->ev, pending->sock, true);
2116 if (pending->hangup_wait == NULL) {
2117 DBG_WARNING("wait_for_read_send failed, dropping client\n");
2118 TALLOC_FREE(pending);
2119 return;
2121 tevent_req_set_callback(
2122 pending->hangup_wait, rpc_host_client_exited, pending);
2124 DLIST_ADD_END(server->pending_clients, pending);
2125 rpc_host_distribute_clients(server);
2126 return;
2128 fail:
2129 TALLOC_FREE(client);
2130 if (sock != -1) {
2131 close(sock);
2135 static int rpc_host_endpoint_accept_recv(
2136 struct tevent_req *req, struct rpc_host_endpoint **ep)
2138 struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2139 req, struct rpc_host_endpoint_accept_state);
2141 *ep = state->endpoint;
2143 return tevent_req_simple_recv_unix(req);
2147 * Full state for samba-dcerpcd. Everything else
2148 * is hung off this.
2150 struct rpc_host_state {
2151 struct tevent_context *ev;
2152 struct rpc_host *host;
2154 bool is_ready;
2155 const char *daemon_ready_progname;
2156 struct tevent_immediate *ready_signal_immediate;
2157 int *ready_signal_fds;
2159 size_t num_servers;
2160 size_t num_prepared;
2164 * Tell whoever invoked samba-dcerpcd we're ready to
2165 * serve.
2167 static void rpc_host_report_readiness(
2168 struct tevent_context *ev,
2169 struct tevent_immediate *im,
2170 void *private_data)
2172 struct rpc_host_state *state = talloc_get_type_abort(
2173 private_data, struct rpc_host_state);
2174 size_t i, num_fds = talloc_array_length(state->ready_signal_fds);
2176 if (!state->is_ready) {
2177 DBG_DEBUG("Not yet ready\n");
2178 return;
2181 for (i=0; i<num_fds; i++) {
2182 uint8_t byte = 0;
2183 ssize_t nwritten;
2185 do {
2186 nwritten = write(
2187 state->ready_signal_fds[i],
2188 (void *)&byte,
2189 sizeof(byte));
2190 } while ((nwritten == -1) && (errno == EINTR));
2192 close(state->ready_signal_fds[i]);
2195 TALLOC_FREE(state->ready_signal_fds);
2199 * Respond to a "are you ready" message.
2201 static bool rpc_host_ready_signal_filter(
2202 struct messaging_rec *rec, void *private_data)
2204 struct rpc_host_state *state = talloc_get_type_abort(
2205 private_data, struct rpc_host_state);
2206 size_t num_fds = talloc_array_length(state->ready_signal_fds);
2207 int *tmp = NULL;
2209 if (rec->msg_type != MSG_DAEMON_READY_FD) {
2210 return false;
2212 if (rec->num_fds != 1) {
2213 DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2214 return false;
2217 if (num_fds + 1 < num_fds) {
2218 return false;
2220 tmp = talloc_realloc(state, state->ready_signal_fds, int, num_fds+1);
2221 if (tmp == NULL) {
2222 return false;
2224 state->ready_signal_fds = tmp;
2226 state->ready_signal_fds[num_fds] = rec->fds[0];
2227 rec->fds[0] = -1;
2229 tevent_schedule_immediate(
2230 state->ready_signal_immediate,
2231 state->ev,
2232 rpc_host_report_readiness,
2233 state);
2235 return false;
2239 * Respond to a "what is your status" message.
2241 static bool rpc_host_dump_status_filter(
2242 struct messaging_rec *rec, void *private_data)
2244 struct rpc_host_state *state = talloc_get_type_abort(
2245 private_data, struct rpc_host_state);
2246 struct rpc_host *host = state->host;
2247 struct rpc_server **servers = host->servers;
2248 size_t i, num_servers = talloc_array_length(servers);
2249 FILE *f = NULL;
2250 int fd;
2252 if (rec->msg_type != MSG_RPC_DUMP_STATUS) {
2253 return false;
2255 if (rec->num_fds != 1) {
2256 DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2257 return false;
2260 fd = dup(rec->fds[0]);
2261 if (fd == -1) {
2262 DBG_DEBUG("dup(%"PRIi64") failed: %s\n",
2263 rec->fds[0],
2264 strerror(errno));
2265 return false;
2268 f = fdopen(fd, "w");
2269 if (f == NULL) {
2270 DBG_DEBUG("fdopen failed: %s\n", strerror(errno));
2271 close(fd);
2272 return false;
2275 for (i=0; i<num_servers; i++) {
2276 struct rpc_server *server = servers[i];
2277 size_t j, num_workers = talloc_array_length(server->workers);
2278 size_t active_workers = 0;
2280 for (j=0; j<num_workers; j++) {
2281 if (server->workers[j].pid != -1) {
2282 active_workers += 1;
2286 fprintf(f,
2287 "%s: active_workers=%zu\n",
2288 server->rpc_server_exe,
2289 active_workers);
2291 for (j=0; j<num_workers; j++) {
2292 struct rpc_work_process *w = &server->workers[j];
2294 if (w->pid == (pid_t)-1) {
2295 continue;
2298 fprintf(f,
2299 " worker[%zu]: pid=%d, num_clients=%"PRIu32"\n",
2301 (int)w->pid,
2302 w->num_clients);
2306 fclose(f);
2308 return false;
2311 static void rpc_host_server_setup_done(struct tevent_req *subreq);
2312 static void rpc_host_endpoint_failed(struct tevent_req *subreq);
2315 * Async startup for samba-dcerpcd.
2317 static struct tevent_req *rpc_host_send(
2318 TALLOC_CTX *mem_ctx,
2319 struct tevent_context *ev,
2320 struct messaging_context *msg_ctx,
2321 char *servers,
2322 int ready_signal_fd,
2323 const char *daemon_ready_progname,
2324 bool is_np_helper)
2326 struct tevent_req *req = NULL, *subreq = NULL;
2327 struct rpc_host_state *state = NULL;
2328 struct rpc_host *host = NULL;
2329 struct tevent_signal *se = NULL;
2330 char *epmdb_path = NULL;
2331 char *exe = NULL;
2332 size_t i, num_servers = strv_count(servers);
2333 NTSTATUS status;
2334 int ret;
2336 req = tevent_req_create(req, &state, struct rpc_host_state);
2337 if (req == NULL) {
2338 return NULL;
2340 state->ev = ev;
2341 state->daemon_ready_progname = daemon_ready_progname;
2343 state->ready_signal_immediate = tevent_create_immediate(state);
2344 if (tevent_req_nomem(state->ready_signal_immediate, req)) {
2345 return tevent_req_post(req, ev);
2348 if (ready_signal_fd != -1) {
2349 state->ready_signal_fds = talloc_array(state, int, 1);
2350 if (tevent_req_nomem(state->ready_signal_fds, req)) {
2351 return tevent_req_post(req, ev);
2353 state->ready_signal_fds[0] = ready_signal_fd;
2356 state->host = talloc_zero(state, struct rpc_host);
2357 if (tevent_req_nomem(state->host, req)) {
2358 return tevent_req_post(req, ev);
2360 host = state->host;
2362 host->msg_ctx = msg_ctx;
2363 host->np_helper = is_np_helper;
2365 ret = pipe(host->worker_stdin);
2366 if (ret == -1) {
2367 tevent_req_nterror(req, map_nt_error_from_unix(errno));
2368 return tevent_req_post(req, ev);
2371 host->servers = talloc_zero_array(
2372 host, struct rpc_server *, num_servers);
2373 if (tevent_req_nomem(host->servers, req)) {
2374 return tevent_req_post(req, ev);
2377 se = tevent_add_signal(ev, state, SIGCHLD, 0, rpc_host_sigchld, host);
2378 if (tevent_req_nomem(se, req)) {
2379 return tevent_req_post(req, ev);
2381 BlockSignals(false, SIGCHLD);
2383 status = messaging_register(
2384 msg_ctx,
2385 host,
2386 MSG_RPC_WORKER_STATUS,
2387 rpc_host_child_status_recv);
2388 if (tevent_req_nterror(req, status)) {
2389 return tevent_req_post(req, ev);
2392 status = messaging_register(
2393 msg_ctx, req, MSG_SHUTDOWN, rpc_host_msg_shutdown);
2394 if (tevent_req_nterror(req, status)) {
2395 return tevent_req_post(req, ev);
2398 subreq = messaging_filtered_read_send(
2399 state, ev, msg_ctx, rpc_host_ready_signal_filter, state);
2400 if (tevent_req_nomem(subreq, req)) {
2401 return tevent_req_post(req, ev);
2404 subreq = messaging_filtered_read_send(
2405 state, ev, msg_ctx, rpc_host_dump_status_filter, state);
2406 if (tevent_req_nomem(subreq, req)) {
2407 return tevent_req_post(req, ev);
2410 epmdb_path = lock_path(state, "epmdb.tdb");
2411 if (tevent_req_nomem(epmdb_path, req)) {
2412 return tevent_req_post(req, ev);
2415 host->epmdb = tdb_wrap_open(
2416 host,
2417 epmdb_path,
2419 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,
2420 O_RDWR|O_CREAT,
2421 0644);
2422 if (host->epmdb == NULL) {
2423 DBG_DEBUG("tdb_wrap_open(%s) failed: %s\n",
2424 epmdb_path,
2425 strerror(errno));
2426 tevent_req_nterror(req, map_nt_error_from_unix(errno));
2427 return tevent_req_post(req, ev);
2429 TALLOC_FREE(epmdb_path);
2431 for (exe = strv_next(servers, exe), i = 0;
2432 exe != NULL;
2433 exe = strv_next(servers, exe), i++) {
2435 DBG_DEBUG("server_setup for %s index %zu\n", exe, i);
2437 subreq = rpc_server_setup_send(
2438 state,
2440 host,
2441 exe);
2442 if (tevent_req_nomem(subreq, req)) {
2443 return tevent_req_post(req, ev);
2445 tevent_req_set_callback(
2446 subreq, rpc_host_server_setup_done, req);
2449 return req;
2453 * Timer function called after we were initialized but no one
2454 * connected. Shutdown.
2456 static void rpc_host_shutdown(
2457 struct tevent_context *ev,
2458 struct tevent_timer *te,
2459 struct timeval current_time,
2460 void *private_data)
2462 struct tevent_req *req = talloc_get_type_abort(
2463 private_data, struct tevent_req);
2464 DBG_DEBUG("Nobody connected -- shutting down\n");
2465 tevent_req_done(req);
2468 static void rpc_host_server_setup_done(struct tevent_req *subreq)
2470 struct tevent_req *req = tevent_req_callback_data(
2471 subreq, struct tevent_req);
2472 struct rpc_host_state *state = tevent_req_data(
2473 req, struct rpc_host_state);
2474 struct rpc_server *server = NULL;
2475 struct rpc_host *host = state->host;
2476 size_t i, num_servers = talloc_array_length(host->servers);
2477 NTSTATUS status;
2479 status = rpc_server_setup_recv(subreq, host, &server);
2480 TALLOC_FREE(subreq);
2481 if (!NT_STATUS_IS_OK(status)) {
2482 DBG_DEBUG("rpc_server_setup_recv returned %s, ignoring\n",
2483 nt_errstr(status));
2484 host->servers = talloc_realloc(
2485 host,
2486 host->servers,
2487 struct rpc_server *,
2488 num_servers-1);
2489 return;
2492 server->server_index = state->num_prepared;
2493 host->servers[state->num_prepared] = server;
2495 state->num_prepared += 1;
2497 if (state->num_prepared < num_servers) {
2498 return;
2501 for (i=0; i<num_servers; i++) {
2502 size_t j, num_endpoints;
2504 server = host->servers[i];
2505 num_endpoints = talloc_array_length(server->endpoints);
2507 for (j=0; j<num_endpoints; j++) {
2508 subreq = rpc_host_endpoint_accept_send(
2509 state, state->ev, server->endpoints[j]);
2510 if (tevent_req_nomem(subreq, req)) {
2511 return;
2513 tevent_req_set_callback(
2514 subreq, rpc_host_endpoint_failed, req);
2518 state->is_ready = true;
2520 if (state->daemon_ready_progname != NULL) {
2521 daemon_ready(state->daemon_ready_progname);
2524 if (host->np_helper) {
2526 * If we're started as an np helper, and no one talks to
2527 * us within 10 seconds, just shut ourselves down.
2529 host->np_helper_shutdown = tevent_add_timer(
2530 state->ev,
2531 state,
2532 timeval_current_ofs(10, 0),
2533 rpc_host_shutdown,
2534 req);
2535 if (tevent_req_nomem(host->np_helper_shutdown, req)) {
2536 return;
2540 tevent_schedule_immediate(
2541 state->ready_signal_immediate,
2542 state->ev,
2543 rpc_host_report_readiness,
2544 state);
2548 * Log accept fail on an endpoint.
2550 static void rpc_host_endpoint_failed(struct tevent_req *subreq)
2552 struct tevent_req *req = tevent_req_callback_data(
2553 subreq, struct tevent_req);
2554 struct rpc_host_state *state = tevent_req_data(
2555 req, struct rpc_host_state);
2556 struct rpc_host_endpoint *endpoint = NULL;
2557 char *binding_string = NULL;
2558 int ret;
2560 ret = rpc_host_endpoint_accept_recv(subreq, &endpoint);
2561 TALLOC_FREE(subreq);
2563 binding_string = dcerpc_binding_string(state, endpoint->binding);
2564 DBG_DEBUG("rpc_host_endpoint_accept_recv for %s returned %s\n",
2565 binding_string,
2566 strerror(ret));
2567 TALLOC_FREE(binding_string);
2570 static NTSTATUS rpc_host_recv(struct tevent_req *req)
2572 return tevent_req_simple_recv_ntstatus(req);
2575 static int rpc_host_pidfile_create(
2576 struct messaging_context *msg_ctx,
2577 const char *progname,
2578 int ready_signal_fd)
2580 const char *piddir = lp_pid_directory();
2581 size_t len = strlen(piddir) + strlen(progname) + 6;
2582 char pidFile[len];
2583 pid_t existing_pid;
2584 int fd, ret;
2586 snprintf(pidFile,
2587 sizeof(pidFile),
2588 "%s/%s.pid",
2589 piddir, progname);
2591 ret = pidfile_path_create(pidFile, &fd, &existing_pid);
2592 if (ret == 0) {
2593 /* leak fd */
2594 return 0;
2597 if (ret != EAGAIN) {
2598 DBG_DEBUG("pidfile_path_create() failed: %s\n",
2599 strerror(ret));
2600 return ret;
2603 DBG_DEBUG("%s pid %d exists\n", progname, (int)existing_pid);
2605 if (ready_signal_fd != -1) {
2606 NTSTATUS status = messaging_send_iov(
2607 msg_ctx,
2608 pid_to_procid(existing_pid),
2609 MSG_DAEMON_READY_FD,
2610 NULL,
2612 &ready_signal_fd,
2614 if (!NT_STATUS_IS_OK(status)) {
2615 DBG_DEBUG("Could not send ready_signal_fd: %s\n",
2616 nt_errstr(status));
2620 return EAGAIN;
2623 static void samba_dcerpcd_stdin_handler(
2624 struct tevent_context *ev,
2625 struct tevent_fd *fde,
2626 uint16_t flags,
2627 void *private_data)
2629 struct tevent_req *req = talloc_get_type_abort(
2630 private_data, struct tevent_req);
2631 char c;
2633 if (read(0, &c, 1) != 1) {
2634 /* we have reached EOF on stdin, which means the
2635 parent has exited. Shutdown the server */
2636 tevent_req_done(req);
2641 * samba-dcerpcd microservice startup !
2643 int main(int argc, const char *argv[])
2645 const struct loadparm_substitution *lp_sub =
2646 loadparm_s3_global_substitution();
2647 const char *progname = getprogname();
2648 TALLOC_CTX *frame = NULL;
2649 struct tevent_context *ev_ctx = NULL;
2650 struct messaging_context *msg_ctx = NULL;
2651 struct tevent_req *req = NULL;
2652 char *servers = NULL;
2653 const char *arg = NULL;
2654 size_t num_servers;
2655 poptContext pc;
2656 int ret, err;
2657 NTSTATUS status;
2658 bool log_stdout;
2659 bool ok;
2661 int libexec_rpcds = 0;
2662 int np_helper = 0;
2663 int ready_signal_fd = -1;
2665 struct samba_cmdline_daemon_cfg *cmdline_daemon_cfg = NULL;
2666 struct poptOption long_options[] = {
2667 POPT_AUTOHELP
2669 .longName = "libexec-rpcds",
2670 .argInfo = POPT_ARG_NONE,
2671 .arg = &libexec_rpcds,
2672 .descrip = "Use all rpcds in libexec",
2675 .longName = "ready-signal-fd",
2676 .argInfo = POPT_ARG_INT,
2677 .arg = &ready_signal_fd,
2678 .descrip = "fd to close when initialized",
2681 .longName = "np-helper",
2682 .argInfo = POPT_ARG_NONE,
2683 .arg = &np_helper,
2684 .descrip = "Internal named pipe server",
2686 POPT_COMMON_SAMBA
2687 POPT_COMMON_DAEMON
2688 POPT_COMMON_VERSION
2689 POPT_TABLEEND
2693 const char *fd_params[] = { "ready-signal-fd", };
2695 closefrom_except_fd_params(
2696 3, ARRAY_SIZE(fd_params), fd_params, argc, argv);
2699 talloc_enable_null_tracking();
2700 frame = talloc_stackframe();
2701 umask(0);
2702 sec_init();
2703 smb_init_locale();
2705 ok = samba_cmdline_init(frame,
2706 SAMBA_CMDLINE_CONFIG_SERVER,
2707 true /* require_smbconf */);
2708 if (!ok) {
2709 DBG_ERR("Failed to init cmdline parser!\n");
2710 TALLOC_FREE(frame);
2711 exit(ENOMEM);
2714 pc = samba_popt_get_context(getprogname(),
2715 argc,
2716 argv,
2717 long_options,
2719 if (pc == NULL) {
2720 DBG_ERR("Failed to setup popt context!\n");
2721 TALLOC_FREE(frame);
2722 exit(1);
2725 poptSetOtherOptionHelp(
2726 pc, "[OPTIONS] [SERVICE_1 SERVICE_2 .. SERVICE_N]");
2728 ret = poptGetNextOpt(pc);
2730 if (ret != -1) {
2731 if (ret >= 0) {
2732 fprintf(stderr,
2733 "\nGot unexpected option %d\n",
2734 ret);
2735 } else if (ret == POPT_ERROR_BADOPT) {
2736 fprintf(stderr,
2737 "\nInvalid option %s: %s\n\n",
2738 poptBadOption(pc, 0),
2739 poptStrerror(ret));
2740 } else {
2741 fprintf(stderr,
2742 "\npoptGetNextOpt returned %s\n",
2743 poptStrerror(ret));
2746 poptFreeContext(pc);
2747 TALLOC_FREE(frame);
2748 exit(1);
2751 while ((arg = poptGetArg(pc)) != NULL) {
2752 ret = strv_add(frame, &servers, arg);
2753 if (ret != 0) {
2754 DBG_ERR("strv_add() failed\n");
2755 poptFreeContext(pc);
2756 TALLOC_FREE(frame);
2757 exit(1);
2761 log_stdout = (debug_get_log_type() == DEBUG_STDOUT);
2762 if (log_stdout) {
2763 setup_logging(progname, DEBUG_STDOUT);
2764 } else {
2765 setup_logging(progname, DEBUG_FILE);
2769 * If "rpc start on demand helpers = true" in smb.conf we must
2770 * not start as standalone, only on demand from
2771 * local_np_connect() functions. Log an error message telling
2772 * the admin how to fix and then exit.
2774 if (lp_rpc_start_on_demand_helpers() && np_helper == 0) {
2775 DBG_ERR("Cannot start in standalone mode if smb.conf "
2776 "[global] setting "
2777 "\"rpc start on demand helpers = true\" - "
2778 "exiting\n");
2779 TALLOC_FREE(frame);
2780 exit(1);
2783 if (libexec_rpcds != 0) {
2784 ret = rpc_host_list_servers(
2785 dyn_SAMBA_LIBEXECDIR, frame, &servers);
2786 if (ret != 0) {
2787 DBG_ERR("Could not list libexec: %s\n",
2788 strerror(ret));
2789 poptFreeContext(pc);
2790 TALLOC_FREE(frame);
2791 exit(1);
2795 num_servers = strv_count(servers);
2796 if (num_servers == 0) {
2797 poptPrintUsage(pc, stderr, 0);
2798 poptFreeContext(pc);
2799 TALLOC_FREE(frame);
2800 exit(1);
2803 poptFreeContext(pc);
2805 cmdline_daemon_cfg = samba_cmdline_get_daemon_cfg();
2807 if (log_stdout && cmdline_daemon_cfg->fork) {
2808 DBG_ERR("Can't log to stdout unless in foreground\n");
2809 TALLOC_FREE(frame);
2810 exit(1);
2813 msg_ctx = global_messaging_context();
2814 if (msg_ctx == NULL) {
2815 DBG_ERR("messaging_init() failed\n");
2816 TALLOC_FREE(frame);
2817 exit(1);
2819 ev_ctx = messaging_tevent_context(msg_ctx);
2821 if (cmdline_daemon_cfg->fork) {
2822 become_daemon(
2823 true,
2824 cmdline_daemon_cfg->no_process_group,
2825 log_stdout);
2827 status = reinit_after_fork(msg_ctx, ev_ctx, false);
2828 if (!NT_STATUS_IS_OK(status)) {
2829 exit_daemon("reinit_after_fork() failed",
2830 map_errno_from_nt_status(status));
2832 } else {
2833 DBG_DEBUG("Calling daemon_status\n");
2834 daemon_status(progname, "Starting process ... ");
2837 BlockSignals(true, SIGPIPE);
2839 dump_core_setup(progname, lp_logfile(frame, lp_sub));
2841 reopen_logs();
2843 DEBUG(0, ("%s version %s started.\n",
2844 progname,
2845 samba_version_string()));
2846 DEBUGADD(0,("%s\n", COPYRIGHT_STARTUP_MESSAGE));
2848 (void)winbind_off();
2849 ok = init_guest_session_info(frame);
2850 (void)winbind_on();
2851 if (!ok) {
2852 DBG_ERR("init_guest_session_info failed\n");
2853 global_messaging_context_free();
2854 TALLOC_FREE(frame);
2855 exit(1);
2858 ret = rpc_host_pidfile_create(msg_ctx, progname, ready_signal_fd);
2859 if (ret != 0) {
2860 DBG_DEBUG("rpc_host_pidfile_create failed: %s\n",
2861 strerror(ret));
2862 global_messaging_context_free();
2863 TALLOC_FREE(frame);
2864 exit(1);
2867 req = rpc_host_send(
2868 ev_ctx,
2869 ev_ctx,
2870 msg_ctx,
2871 servers,
2872 ready_signal_fd,
2873 cmdline_daemon_cfg->fork ? NULL : progname,
2874 np_helper != 0);
2875 if (req == NULL) {
2876 DBG_ERR("rpc_host_send failed\n");
2877 global_messaging_context_free();
2878 TALLOC_FREE(frame);
2879 exit(1);
2882 if (!cmdline_daemon_cfg->fork) {
2883 struct stat st;
2884 if (fstat(0, &st) != 0) {
2885 DBG_DEBUG("fstat(0) failed: %s\n",
2886 strerror(errno));
2887 global_messaging_context_free();
2888 TALLOC_FREE(frame);
2889 exit(1);
2891 if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) {
2892 tevent_add_fd(
2893 ev_ctx,
2894 ev_ctx,
2896 TEVENT_FD_READ,
2897 samba_dcerpcd_stdin_handler,
2898 req);
2902 ok = tevent_req_poll_unix(req, ev_ctx, &err);
2903 if (!ok) {
2904 DBG_ERR("tevent_req_poll_unix failed: %s\n",
2905 strerror(err));
2906 global_messaging_context_free();
2907 TALLOC_FREE(frame);
2908 exit(1);
2911 status = rpc_host_recv(req);
2912 if (!NT_STATUS_IS_OK(status)) {
2913 DBG_ERR("rpc_host_recv returned %s\n", nt_errstr(status));
2914 global_messaging_context_free();
2915 TALLOC_FREE(frame);
2916 exit(1);
2919 TALLOC_FREE(frame);
2921 return 0;