lib: Give lib/util/util_file.c its own header file
[Samba.git] / source3 / rpc_server / rpc_host.c
blob7c5785f3a3bbe58b375a6a2b10224853839eb0ad
1 /*
2 * RPC host
4 * Implements samba-dcerpcd service.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * This binary has two usage modes:
23 * In the normal case when invoked from smbd or winbind it is given a
24 * directory to scan via --libexec-rpcds and will invoke on demand any
25 * binaries it finds there starting with rpcd_ when a named pipe
26 * connection is requested.
28 * In the second mode it can be started explicitly from system startup
29 * scripts.
31 * When Samba is set up as an Active Directory Domain Controller the
32 * normal samba binary overrides and provides DCERPC services, whilst
33 * allowing samba-dcerpcd to provide the services that smbd used to
34 * provide in that set-up, such as SRVSVC.
36 * The second mode can also be useful for use outside of the Samba framework,
37 * for example, use with the Linux kernel SMB2 server ksmbd. In this mode
38 * it behaves like inetd and listens on sockets on behalf of RPC server
39 * implementations.
42 #include "replace.h"
43 #include <fnmatch.h>
44 #include "lib/cmdline/cmdline.h"
45 #include "lib/cmdline/closefrom_except.h"
46 #include "source3/include/includes.h"
47 #include "source3/include/auth.h"
48 #include "rpc_sock_helper.h"
49 #include "messages.h"
50 #include "lib/util_file.h"
51 #include "lib/util/tevent_unix.h"
52 #include "lib/util/tevent_ntstatus.h"
53 #include "lib/util/smb_strtox.h"
54 #include "lib/util/debug.h"
55 #include "lib/util/server_id.h"
56 #include "lib/util/util_tdb.h"
57 #include "lib/util/util_file.h"
58 #include "lib/tdb_wrap/tdb_wrap.h"
59 #include "lib/async_req/async_sock.h"
60 #include "librpc/rpc/dcerpc_util.h"
61 #include "lib/tsocket/tsocket.h"
62 #include "libcli/named_pipe_auth/npa_tstream.h"
63 #include "librpc/gen_ndr/ndr_rpc_host.h"
64 #include "source3/param/loadparm.h"
65 #include "source3/lib/global_contexts.h"
66 #include "lib/util/strv.h"
67 #include "lib/util/pidfile.h"
68 #include "source3/rpc_client/cli_pipe.h"
69 #include "librpc/gen_ndr/ndr_epmapper.h"
70 #include "librpc/gen_ndr/ndr_epmapper_c.h"
71 #include "nsswitch/winbind_client.h"
72 #include "libcli/security/dom_sid.h"
73 #include "libcli/security/security_token.h"
75 extern bool override_logfile;
77 struct rpc_server;
78 struct rpc_work_process;
81 * samba-dcerpcd state to keep track of rpcd_* servers.
83 struct rpc_host {
84 struct messaging_context *msg_ctx;
85 struct rpc_server **servers;
86 struct tdb_wrap *epmdb;
88 int worker_stdin[2];
90 bool np_helper;
93 * If we're started with --np-helper but nobody contacts us,
94 * we need to exit after a while. This will be deleted once
95 * the first real client connects and our self-exit mechanism
96 * when we don't have any worker processes left kicks in.
98 struct tevent_timer *np_helper_shutdown;
102 * Map a RPC interface to a name. Used when filling the endpoint
103 * mapper database
105 struct rpc_host_iface_name {
106 struct ndr_syntax_id iface;
107 char *name;
111 * rpc_host representation for listening sockets. ncacn_ip_tcp might
112 * listen on multiple explicit IPs, all with the same port.
114 struct rpc_host_endpoint {
115 struct rpc_server *server;
116 struct dcerpc_binding *binding;
117 struct ndr_syntax_id *interfaces;
118 int *fds;
119 size_t num_fds;
123 * Staging area until we sent the socket plus bind to the helper
125 struct rpc_host_pending_client {
126 struct rpc_host_pending_client *prev, *next;
129 * Pointer for the destructor to remove us from the list of
130 * pending clients
132 struct rpc_server *server;
135 * Waiter for client exit before a helper accepted the request
137 struct tevent_req *hangup_wait;
140 * Info to pick the worker
142 struct ncacn_packet *bind_pkt;
145 * This is what we send down to the worker
147 int sock;
148 struct rpc_host_client *client;
152 * Representation of one worker process. For each rpcd_* executable
153 * there will be more of than one of these.
155 struct rpc_work_process {
156 pid_t pid;
159 * !available means:
161 * Worker forked but did not send its initial status yet (not
162 * yet initialized)
164 * Worker died, but we did not receive SIGCHLD yet. We noticed
165 * it because we couldn't send it a message.
167 bool available;
170 * Incremented by us when sending a client, decremented by
171 * MSG_RPC_HOST_WORKER_STATUS sent by workers whenever a
172 * client exits.
174 uint32_t num_associations;
175 uint32_t num_connections;
178 * Send SHUTDOWN to an idle child after a while
180 struct tevent_timer *exit_timer;
184 * State for a set of running instances of an rpcd_* server executable
186 struct rpc_server {
187 struct rpc_host *host;
189 * Index into the rpc_host_state->servers array
191 uint32_t server_index;
193 const char *rpc_server_exe;
195 struct rpc_host_endpoint **endpoints;
196 struct rpc_host_iface_name *iface_names;
198 size_t max_workers;
199 size_t idle_seconds;
202 * "workers" can be larger than "max_workers": Internal
203 * connections require an idle worker to avoid deadlocks
204 * between RPC servers: netlogon requires samr, everybody
205 * requires winreg. And if a deep call in netlogon asks for a
206 * samr connection, this must never end up in the same
207 * process. named_pipe_auth_req_info8->need_idle_server is set
208 * in those cases.
210 struct rpc_work_process *workers;
212 struct rpc_host_pending_client *pending_clients;
215 struct rpc_server_get_endpoints_state {
216 char **argl;
217 char *ncalrpc_endpoint;
218 enum dcerpc_transport_t only_transport;
220 struct rpc_host_iface_name *iface_names;
221 struct rpc_host_endpoint **endpoints;
223 unsigned long num_workers;
224 unsigned long idle_seconds;
227 static void rpc_server_get_endpoints_done(struct tevent_req *subreq);
230 * @brief Query interfaces from an rpcd helper
232 * Spawn a rpcd helper, ask it for the interfaces it serves via
233 * --list-interfaces, parse the output
235 * @param[in] mem_ctx Memory context for the tevent_req
236 * @param[in] ev Event context to run this on
237 * @param[in] rpc_server_exe Binary to ask with --list-interfaces
238 * @param[in] only_transport Filter out anything but this
239 * @return The tevent_req representing this process
242 static struct tevent_req *rpc_server_get_endpoints_send(
243 TALLOC_CTX *mem_ctx,
244 struct tevent_context *ev,
245 const char *rpc_server_exe,
246 enum dcerpc_transport_t only_transport)
248 struct tevent_req *req = NULL, *subreq = NULL;
249 struct rpc_server_get_endpoints_state *state = NULL;
250 const char *progname = NULL;
252 req = tevent_req_create(
253 mem_ctx, &state, struct rpc_server_get_endpoints_state);
254 if (req == NULL) {
255 return NULL;
257 state->only_transport = only_transport;
259 progname = strrchr(rpc_server_exe, '/');
260 if (progname != NULL) {
261 progname += 1;
262 } else {
263 progname = rpc_server_exe;
266 state->ncalrpc_endpoint = talloc_strdup(state, progname);
267 if (tevent_req_nomem(state->ncalrpc_endpoint, req)) {
268 return tevent_req_post(req, ev);
271 state->argl = talloc_array(state, char *, 4);
272 if (tevent_req_nomem(state->argl, req)) {
273 return tevent_req_post(req, ev);
276 state->argl = str_list_make_empty(state);
277 str_list_add_printf(&state->argl, "%s", rpc_server_exe);
278 str_list_add_printf(&state->argl, "--list-interfaces");
279 str_list_add_printf(
280 &state->argl, "--configfile=%s", get_dyn_CONFIGFILE());
282 if (tevent_req_nomem(state->argl, req)) {
283 return tevent_req_post(req, ev);
286 subreq = file_ploadv_send(state, ev, state->argl, 65536);
287 if (tevent_req_nomem(subreq, req)) {
288 return tevent_req_post(req, ev);
290 tevent_req_set_callback(subreq, rpc_server_get_endpoints_done, req);
291 return req;
295 * Parse a line of format
297 * 338cd001-2244-31f1-aaaa-900038001003/0x00000001 winreg
299 * and add it to the "piface_names" array.
302 static struct rpc_host_iface_name *rpc_exe_parse_iface_line(
303 TALLOC_CTX *mem_ctx,
304 struct rpc_host_iface_name **piface_names,
305 const char *line)
307 struct rpc_host_iface_name *iface_names = *piface_names;
308 struct rpc_host_iface_name *tmp = NULL, *result = NULL;
309 size_t i, num_ifaces = talloc_array_length(iface_names);
310 struct ndr_syntax_id iface;
311 char *name = NULL;
312 bool ok;
314 ok = ndr_syntax_id_from_string(line, &iface);
315 if (!ok) {
316 DBG_WARNING("ndr_syntax_id_from_string() failed for: [%s]\n",
317 line);
318 return NULL;
321 name = strchr(line, ' ');
322 if (name == NULL) {
323 return NULL;
325 name += 1;
327 for (i=0; i<num_ifaces; i++) {
328 result = &iface_names[i];
330 if (ndr_syntax_id_equal(&result->iface, &iface)) {
331 return result;
335 if (num_ifaces + 1 < num_ifaces) {
336 return NULL;
339 name = talloc_strdup(mem_ctx, name);
340 if (name == NULL) {
341 return NULL;
344 tmp = talloc_realloc(
345 mem_ctx,
346 iface_names,
347 struct rpc_host_iface_name,
348 num_ifaces + 1);
349 if (tmp == NULL) {
350 TALLOC_FREE(name);
351 return NULL;
353 iface_names = tmp;
355 result = &iface_names[num_ifaces];
357 *result = (struct rpc_host_iface_name) {
358 .iface = iface,
359 .name = talloc_move(iface_names, &name),
362 *piface_names = iface_names;
364 return result;
367 static struct rpc_host_iface_name *rpc_host_iface_names_find(
368 struct rpc_host_iface_name *iface_names,
369 const struct ndr_syntax_id *iface)
371 size_t i, num_iface_names = talloc_array_length(iface_names);
373 for (i=0; i<num_iface_names; i++) {
374 struct rpc_host_iface_name *iface_name = &iface_names[i];
376 if (ndr_syntax_id_equal(iface, &iface_name->iface)) {
377 return iface_name;
381 return NULL;
384 static bool dcerpc_binding_same_endpoint(
385 const struct dcerpc_binding *b1, const struct dcerpc_binding *b2)
387 enum dcerpc_transport_t t1 = dcerpc_binding_get_transport(b1);
388 enum dcerpc_transport_t t2 = dcerpc_binding_get_transport(b2);
389 const char *e1 = NULL, *e2 = NULL;
390 int cmp;
392 if (t1 != t2) {
393 return false;
396 e1 = dcerpc_binding_get_string_option(b1, "endpoint");
397 e2 = dcerpc_binding_get_string_option(b2, "endpoint");
399 if ((e1 == NULL) && (e2 == NULL)) {
400 return true;
402 if ((e1 == NULL) || (e2 == NULL)) {
403 return false;
405 cmp = strcmp(e1, e2);
406 return (cmp == 0);
410 * @brief Filter whether we want to serve an endpoint
412 * samba-dcerpcd might want to serve all endpoints a rpcd reported to
413 * us via --list-interfaces.
415 * In member mode, we only serve named pipes. Indicated by NCACN_NP
416 * passed in via "only_transport".
418 * @param[in] binding Which binding is in question?
419 * @param[in] only_transport Exclusive transport to serve
420 * @return Do we want to serve "binding" from samba-dcerpcd?
423 static bool rpc_host_serve_endpoint(
424 struct dcerpc_binding *binding,
425 enum dcerpc_transport_t only_transport)
427 enum dcerpc_transport_t transport =
428 dcerpc_binding_get_transport(binding);
430 if (only_transport == NCA_UNKNOWN) {
431 /* no filter around */
432 return true;
435 if (transport != only_transport) {
436 /* filter out */
437 return false;
440 return true;
443 static struct rpc_host_endpoint *rpc_host_endpoint_find(
444 struct rpc_server_get_endpoints_state *state,
445 const char *binding_string)
447 size_t i, num_endpoints = talloc_array_length(state->endpoints);
448 struct rpc_host_endpoint **tmp = NULL, *ep = NULL;
449 enum dcerpc_transport_t transport;
450 NTSTATUS status;
451 bool serve_this;
453 ep = talloc_zero(state, struct rpc_host_endpoint);
454 if (ep == NULL) {
455 goto fail;
458 status = dcerpc_parse_binding(ep, binding_string, &ep->binding);
459 if (!NT_STATUS_IS_OK(status)) {
460 DBG_DEBUG("dcerpc_parse_binding(%s) failed: %s\n",
461 binding_string,
462 nt_errstr(status));
463 goto fail;
466 serve_this = rpc_host_serve_endpoint(
467 ep->binding, state->only_transport);
468 if (!serve_this) {
469 goto fail;
472 transport = dcerpc_binding_get_transport(ep->binding);
474 if (transport == NCALRPC) {
475 const char *ncalrpc_sock = dcerpc_binding_get_string_option(
476 ep->binding, "endpoint");
478 if (ncalrpc_sock == NULL) {
480 * generic ncalrpc:, set program-specific
481 * socket name. epmapper will redirect clients
482 * properly.
484 status = dcerpc_binding_set_string_option(
485 ep->binding,
486 "endpoint",
487 state->ncalrpc_endpoint);
488 if (!NT_STATUS_IS_OK(status)) {
489 DBG_DEBUG("dcerpc_binding_set_string_option "
490 "failed: %s\n",
491 nt_errstr(status));
492 goto fail;
497 for (i=0; i<num_endpoints; i++) {
499 bool ok = dcerpc_binding_same_endpoint(
500 ep->binding, state->endpoints[i]->binding);
502 if (ok) {
503 TALLOC_FREE(ep);
504 return state->endpoints[i];
508 if (num_endpoints + 1 < num_endpoints) {
509 goto fail;
512 tmp = talloc_realloc(
513 state,
514 state->endpoints,
515 struct rpc_host_endpoint *,
516 num_endpoints + 1);
517 if (tmp == NULL) {
518 goto fail;
520 state->endpoints = tmp;
521 state->endpoints[num_endpoints] = talloc_move(state->endpoints, &ep);
523 return state->endpoints[num_endpoints];
524 fail:
525 TALLOC_FREE(ep);
526 return NULL;
529 static bool ndr_interfaces_add_unique(
530 TALLOC_CTX *mem_ctx,
531 struct ndr_syntax_id **pifaces,
532 const struct ndr_syntax_id *iface)
534 struct ndr_syntax_id *ifaces = *pifaces;
535 size_t i, num_ifaces = talloc_array_length(ifaces);
537 for (i=0; i<num_ifaces; i++) {
538 if (ndr_syntax_id_equal(iface, &ifaces[i])) {
539 return true;
543 if (num_ifaces + 1 < num_ifaces) {
544 return false;
546 ifaces = talloc_realloc(
547 mem_ctx,
548 ifaces,
549 struct ndr_syntax_id,
550 num_ifaces + 1);
551 if (ifaces == NULL) {
552 return false;
554 ifaces[num_ifaces] = *iface;
556 *pifaces = ifaces;
557 return true;
561 * Read the text reply from the rpcd_* process telling us what
562 * endpoints it will serve when asked with --list-interfaces.
564 static void rpc_server_get_endpoints_done(struct tevent_req *subreq)
566 struct tevent_req *req = tevent_req_callback_data(
567 subreq, struct tevent_req);
568 struct rpc_server_get_endpoints_state *state = tevent_req_data(
569 req, struct rpc_server_get_endpoints_state);
570 struct rpc_host_iface_name *iface = NULL;
571 uint8_t *buf = NULL;
572 size_t buflen;
573 char **lines = NULL;
574 int ret, i, num_lines;
576 ret = file_ploadv_recv(subreq, state, &buf);
577 TALLOC_FREE(subreq);
578 if (tevent_req_error(req, ret)) {
579 return;
582 buflen = talloc_get_size(buf);
583 if (buflen == 0) {
584 tevent_req_done(req);
585 return;
588 lines = file_lines_parse((char *)buf, buflen, &num_lines, state);
589 if (tevent_req_nomem(lines, req)) {
590 return;
593 if (num_lines < 2) {
594 DBG_DEBUG("Got %d lines, expected at least 2\n", num_lines);
595 tevent_req_error(req, EINVAL);
596 return;
599 state->num_workers = smb_strtoul(
600 lines[0], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
601 if (ret != 0) {
602 DBG_DEBUG("Could not parse num_workers(%s): %s\n",
603 lines[0],
604 strerror(ret));
605 tevent_req_error(req, ret);
606 return;
609 * We need to limit the number of workers in order
610 * to put the worker index into a 16-bit space,
611 * in order to use a 16-bit association group space
612 * per worker.
614 if (state->num_workers > 65536) {
615 state->num_workers = 65536;
618 state->idle_seconds = smb_strtoul(
619 lines[1], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
620 if (ret != 0) {
621 DBG_DEBUG("Could not parse idle_seconds (%s): %s\n",
622 lines[1],
623 strerror(ret));
624 tevent_req_error(req, ret);
625 return;
628 DBG_DEBUG("num_workers=%lu, idle_seconds=%lu for %s\n",
629 state->num_workers,
630 state->idle_seconds,
631 state->argl[0]);
633 for (i=2; i<num_lines; i++) {
634 char *line = lines[i];
635 struct rpc_host_endpoint *endpoint = NULL;
636 bool ok;
638 if (line[0] != ' ') {
639 iface = rpc_exe_parse_iface_line(
640 state, &state->iface_names, line);
641 if (iface == NULL) {
642 DBG_WARNING(
643 "rpc_exe_parse_iface_line failed "
644 "for: [%s] from %s\n",
645 line,
646 state->argl[0]);
647 tevent_req_oom(req);
648 return;
650 continue;
653 if (iface == NULL) {
654 DBG_DEBUG("Interface GUID line missing\n");
655 tevent_req_error(req, EINVAL);
656 return;
659 endpoint = rpc_host_endpoint_find(state, line+1);
660 if (endpoint == NULL) {
661 DBG_DEBUG("rpc_host_endpoint_find for %s failed\n",
662 line+1);
663 continue;
666 ok = ndr_interfaces_add_unique(
667 endpoint,
668 &endpoint->interfaces,
669 &iface->iface);
670 if (!ok) {
671 DBG_DEBUG("ndr_interfaces_add_unique failed\n");
672 tevent_req_oom(req);
673 return;
677 tevent_req_done(req);
681 * @brief Receive output from --list-interfaces
683 * @param[in] req The async req that just finished
684 * @param[in] mem_ctx Where to put the output on
685 * @param[out] endpoints The endpoints to be listened on
686 * @param[out] iface_names Annotation for epm_Lookup's epm_entry_t
687 * @return 0/errno
689 static int rpc_server_get_endpoints_recv(
690 struct tevent_req *req,
691 TALLOC_CTX *mem_ctx,
692 struct rpc_host_endpoint ***endpoints,
693 struct rpc_host_iface_name **iface_names,
694 size_t *num_workers,
695 size_t *idle_seconds)
697 struct rpc_server_get_endpoints_state *state = tevent_req_data(
698 req, struct rpc_server_get_endpoints_state);
699 int err;
701 if (tevent_req_is_unix_error(req, &err)) {
702 tevent_req_received(req);
703 return err;
706 *endpoints = talloc_move(mem_ctx, &state->endpoints);
707 *iface_names = talloc_move(mem_ctx, &state->iface_names);
708 *num_workers = state->num_workers;
709 *idle_seconds = state->idle_seconds;
710 tevent_req_received(req);
711 return 0;
715 * For NCACN_NP we get the named pipe auth info from smbd, if a client
716 * comes in via TCP or NCALPRC we need to invent it ourselves with
717 * anonymous session info.
720 static NTSTATUS rpc_host_generate_npa_info8_from_sock(
721 TALLOC_CTX *mem_ctx,
722 enum dcerpc_transport_t transport,
723 int sock,
724 const struct samba_sockaddr *peer_addr,
725 struct named_pipe_auth_req_info8 **pinfo8)
727 struct named_pipe_auth_req_info8 *info8 = NULL;
728 struct samba_sockaddr local_addr = {
729 .sa_socklen = sizeof(struct sockaddr_storage),
731 struct tsocket_address *taddr = NULL;
732 char *remote_client_name = NULL;
733 char *remote_client_addr = NULL;
734 char *local_server_name = NULL;
735 char *local_server_addr = NULL;
736 char *(*tsocket_address_to_name_fn)(
737 const struct tsocket_address *addr,
738 TALLOC_CTX *mem_ctx) = NULL;
739 NTSTATUS status = NT_STATUS_NO_MEMORY;
740 int ret;
743 * For NCACN_NP we get the npa info from smbd
745 SMB_ASSERT((transport == NCACN_IP_TCP) || (transport == NCALRPC));
747 tsocket_address_to_name_fn = (transport == NCACN_IP_TCP) ?
748 tsocket_address_inet_addr_string : tsocket_address_unix_path;
750 info8 = talloc_zero(mem_ctx, struct named_pipe_auth_req_info8);
751 if (info8 == NULL) {
752 goto fail;
754 info8->session_info =
755 talloc_zero(info8, struct auth_session_info_transport);
756 if (info8->session_info == NULL) {
757 goto fail;
760 status = make_session_info_anonymous(
761 info8->session_info,
762 &info8->session_info->session_info);
763 if (!NT_STATUS_IS_OK(status)) {
764 DBG_DEBUG("make_session_info_anonymous failed: %s\n",
765 nt_errstr(status));
766 goto fail;
769 ret = tsocket_address_bsd_from_samba_sockaddr(info8,
770 peer_addr,
771 &taddr);
772 if (ret == -1) {
773 status = map_nt_error_from_unix(errno);
774 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
775 "%s\n",
776 strerror(errno));
777 goto fail;
779 remote_client_addr = tsocket_address_to_name_fn(taddr, info8);
780 if (remote_client_addr == NULL) {
781 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
782 goto nomem;
784 TALLOC_FREE(taddr);
786 remote_client_name = talloc_strdup(info8, remote_client_addr);
787 if (remote_client_name == NULL) {
788 DBG_DEBUG("talloc_strdup failed\n");
789 goto nomem;
792 if (transport == NCACN_IP_TCP) {
793 bool ok = samba_sockaddr_get_port(peer_addr,
794 &info8->remote_client_port);
795 if (!ok) {
796 DBG_DEBUG("samba_sockaddr_get_port failed\n");
797 status = NT_STATUS_INVALID_PARAMETER;
798 goto fail;
802 ret = getsockname(sock, &local_addr.u.sa, &local_addr.sa_socklen);
803 if (ret == -1) {
804 status = map_nt_error_from_unix(errno);
805 DBG_DEBUG("getsockname failed: %s\n", strerror(errno));
806 goto fail;
809 ret = tsocket_address_bsd_from_samba_sockaddr(info8,
810 &local_addr,
811 &taddr);
812 if (ret == -1) {
813 status = map_nt_error_from_unix(errno);
814 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
815 "%s\n",
816 strerror(errno));
817 goto fail;
819 local_server_addr = tsocket_address_to_name_fn(taddr, info8);
820 if (local_server_addr == NULL) {
821 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
822 goto nomem;
824 TALLOC_FREE(taddr);
826 local_server_name = talloc_strdup(info8, local_server_addr);
827 if (local_server_name == NULL) {
828 DBG_DEBUG("talloc_strdup failed\n");
829 goto nomem;
832 if (transport == NCACN_IP_TCP) {
833 bool ok = samba_sockaddr_get_port(&local_addr,
834 &info8->local_server_port);
835 if (!ok) {
836 DBG_DEBUG("samba_sockaddr_get_port failed\n");
837 status = NT_STATUS_INVALID_PARAMETER;
838 goto fail;
842 if (transport == NCALRPC) {
843 uid_t uid;
844 gid_t gid;
846 ret = getpeereid(sock, &uid, &gid);
847 if (ret < 0) {
848 status = map_nt_error_from_unix(errno);
849 DBG_DEBUG("getpeereid failed: %s\n", strerror(errno));
850 goto fail;
853 if (uid == sec_initial_uid()) {
856 * Indicate "root" to gensec
859 TALLOC_FREE(remote_client_addr);
860 TALLOC_FREE(remote_client_name);
862 ret = tsocket_address_unix_from_path(
863 info8,
864 AS_SYSTEM_MAGIC_PATH_TOKEN,
865 &taddr);
866 if (ret == -1) {
867 DBG_DEBUG("tsocket_address_unix_from_path "
868 "failed\n");
869 goto nomem;
872 remote_client_addr =
873 tsocket_address_unix_path(taddr, info8);
874 if (remote_client_addr == NULL) {
875 DBG_DEBUG("tsocket_address_unix_path "
876 "failed\n");
877 goto nomem;
879 remote_client_name =
880 talloc_strdup(info8, remote_client_addr);
881 if (remote_client_name == NULL) {
882 DBG_DEBUG("talloc_strdup failed\n");
883 goto nomem;
888 info8->remote_client_addr = remote_client_addr;
889 info8->remote_client_name = remote_client_name;
890 info8->local_server_addr = local_server_addr;
891 info8->local_server_name = local_server_name;
893 *pinfo8 = info8;
894 return NT_STATUS_OK;
896 nomem:
897 status = NT_STATUS_NO_MEMORY;
898 fail:
899 TALLOC_FREE(info8);
900 return status;
903 struct rpc_host_bind_read_state {
904 struct tevent_context *ev;
906 int sock;
907 struct tstream_context *plain;
908 struct tstream_context *npa_stream;
910 struct ncacn_packet *pkt;
911 struct rpc_host_client *client;
914 static void rpc_host_bind_read_cleanup(
915 struct tevent_req *req, enum tevent_req_state req_state);
916 static void rpc_host_bind_read_got_npa(struct tevent_req *subreq);
917 static void rpc_host_bind_read_got_bind(struct tevent_req *subreq);
920 * Wait for a bind packet from a client.
922 static struct tevent_req *rpc_host_bind_read_send(
923 TALLOC_CTX *mem_ctx,
924 struct tevent_context *ev,
925 enum dcerpc_transport_t transport,
926 int *psock,
927 const struct samba_sockaddr *peer_addr)
929 struct tevent_req *req = NULL, *subreq = NULL;
930 struct rpc_host_bind_read_state *state = NULL;
931 int rc, sock_dup;
932 NTSTATUS status;
934 req = tevent_req_create(
935 mem_ctx, &state, struct rpc_host_bind_read_state);
936 if (req == NULL) {
937 return NULL;
939 state->ev = ev;
941 state->sock = *psock;
942 *psock = -1;
944 tevent_req_set_cleanup_fn(req, rpc_host_bind_read_cleanup);
946 state->client = talloc_zero(state, struct rpc_host_client);
947 if (tevent_req_nomem(state->client, req)) {
948 return tevent_req_post(req, ev);
952 * Dup the socket to read the first RPC packet:
953 * tstream_bsd_existing_socket() takes ownership with
954 * autoclose, but we need to send "sock" down to our worker
955 * process later.
957 sock_dup = dup(state->sock);
958 if (sock_dup == -1) {
959 tevent_req_error(req, errno);
960 return tevent_req_post(req, ev);
963 rc = tstream_bsd_existing_socket(state, sock_dup, &state->plain);
964 if (rc == -1) {
965 DBG_DEBUG("tstream_bsd_existing_socket failed: %s\n",
966 strerror(errno));
967 tevent_req_error(req, errno);
968 close(sock_dup);
969 return tevent_req_post(req, ev);
971 /* as server we want to fail early */
972 tstream_bsd_fail_readv_first_error(state->plain, true);
974 if (transport == NCACN_NP) {
975 subreq = tstream_npa_accept_existing_send(
976 state,
978 state->plain,
979 FILE_TYPE_MESSAGE_MODE_PIPE,
980 0xff | 0x0400 | 0x0100,
981 4096);
982 if (tevent_req_nomem(subreq, req)) {
983 return tevent_req_post(req, ev);
985 tevent_req_set_callback(
986 subreq, rpc_host_bind_read_got_npa, req);
987 return req;
990 status = rpc_host_generate_npa_info8_from_sock(
991 state->client,
992 transport,
993 state->sock,
994 peer_addr,
995 &state->client->npa_info8);
996 if (!NT_STATUS_IS_OK(status)) {
997 tevent_req_oom(req);
998 return tevent_req_post(req, ev);
1001 subreq = dcerpc_read_ncacn_packet_send(state, ev, state->plain);
1002 if (tevent_req_nomem(subreq, req)) {
1003 return tevent_req_post(req, ev);
1005 tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1006 return req;
1009 static void rpc_host_bind_read_cleanup(
1010 struct tevent_req *req, enum tevent_req_state req_state)
1012 struct rpc_host_bind_read_state *state = tevent_req_data(
1013 req, struct rpc_host_bind_read_state);
1015 if ((req_state == TEVENT_REQ_RECEIVED) && (state->sock != -1)) {
1016 close(state->sock);
1017 state->sock = -1;
1021 static void rpc_host_bind_read_got_npa(struct tevent_req *subreq)
1023 struct tevent_req *req = tevent_req_callback_data(
1024 subreq, struct tevent_req);
1025 struct rpc_host_bind_read_state *state = tevent_req_data(
1026 req, struct rpc_host_bind_read_state);
1027 struct named_pipe_auth_req_info8 *info8 = NULL;
1028 int ret, err;
1030 ret = tstream_npa_accept_existing_recv(subreq,
1031 &err,
1032 state,
1033 &state->npa_stream,
1034 &info8,
1035 NULL, /* transport */
1036 NULL, /* remote_client_addr */
1037 NULL, /* remote_client_name */
1038 NULL, /* local_server_addr */
1039 NULL, /* local_server_name */
1040 NULL); /* session_info */
1041 if (ret == -1) {
1042 tevent_req_error(req, err);
1043 return;
1046 state->client->npa_info8 = talloc_move(state->client, &info8);
1048 subreq = dcerpc_read_ncacn_packet_send(
1049 state, state->ev, state->npa_stream);
1050 if (tevent_req_nomem(subreq, req)) {
1051 return;
1053 tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1056 static void rpc_host_bind_read_got_bind(struct tevent_req *subreq)
1058 struct tevent_req *req = tevent_req_callback_data(
1059 subreq, struct tevent_req);
1060 struct rpc_host_bind_read_state *state = tevent_req_data(
1061 req, struct rpc_host_bind_read_state);
1062 struct ncacn_packet *pkt = NULL;
1063 NTSTATUS status;
1065 status = dcerpc_read_ncacn_packet_recv(
1066 subreq,
1067 state->client,
1068 &pkt,
1069 &state->client->bind_packet);
1070 TALLOC_FREE(subreq);
1071 if (!NT_STATUS_IS_OK(status)) {
1072 DBG_DEBUG("dcerpc_read_ncacn_packet_recv failed: %s\n",
1073 nt_errstr(status));
1074 tevent_req_error(req, EINVAL); /* TODO */
1075 return;
1077 state->pkt = talloc_move(state, &pkt);
1079 tevent_req_done(req);
1082 static int rpc_host_bind_read_recv(
1083 struct tevent_req *req,
1084 TALLOC_CTX *mem_ctx,
1085 int *sock,
1086 struct rpc_host_client **client,
1087 struct ncacn_packet **bind_pkt)
1089 struct rpc_host_bind_read_state *state = tevent_req_data(
1090 req, struct rpc_host_bind_read_state);
1091 int err;
1093 if (tevent_req_is_unix_error(req, &err)) {
1094 tevent_req_received(req);
1095 return err;
1098 *sock = state->sock;
1099 state->sock = -1;
1101 *client = talloc_move(mem_ctx, &state->client);
1102 *bind_pkt = talloc_move(mem_ctx, &state->pkt);
1103 tevent_req_received(req);
1104 return 0;
1108 * Start the given rpcd_* binary.
1110 static int rpc_host_exec_worker(struct rpc_server *server, size_t idx)
1112 struct rpc_work_process *worker = &server->workers[idx];
1113 char **argv = NULL;
1114 int ret = ENOMEM;
1116 argv = str_list_make_empty(server);
1117 str_list_add_printf(
1118 &argv, "%s", server->rpc_server_exe);
1119 str_list_add_printf(
1120 &argv, "--configfile=%s", get_dyn_CONFIGFILE());
1121 str_list_add_printf(
1122 &argv, "--worker-group=%"PRIu32, server->server_index);
1123 str_list_add_printf(
1124 &argv, "--worker-index=%zu", idx);
1125 str_list_add_printf(
1126 &argv, "--debuglevel=%d", debuglevel_get_class(DBGC_RPC_SRV));
1127 if (!is_default_dyn_LOGFILEBASE()) {
1128 str_list_add_printf(
1129 &argv, "--log-basename=%s", get_dyn_LOGFILEBASE());
1131 if (argv == NULL) {
1132 ret = ENOMEM;
1133 goto fail;
1136 worker->pid = fork();
1137 if (worker->pid == -1) {
1138 ret = errno;
1139 goto fail;
1141 if (worker->pid == 0) {
1142 /* Child. */
1143 close(server->host->worker_stdin[1]);
1144 ret = dup2(server->host->worker_stdin[0], 0);
1145 if (ret != 0) {
1146 exit(1);
1148 execv(argv[0], argv);
1149 _exit(1);
1152 DBG_DEBUG("Creating worker %s for index %zu: pid=%d\n",
1153 server->rpc_server_exe,
1154 idx,
1155 (int)worker->pid);
1157 ret = 0;
1158 fail:
1159 TALLOC_FREE(argv);
1160 return ret;
1164 * Find an rpcd_* worker for an external client, respect server->max_workers
1166 static struct rpc_work_process *rpc_host_find_worker(struct rpc_server *server)
1168 struct rpc_work_process *worker = NULL;
1169 struct rpc_work_process *perfect_worker = NULL;
1170 struct rpc_work_process *best_worker = NULL;
1171 size_t empty_slot = SIZE_MAX;
1172 size_t i;
1174 for (i=0; i<server->max_workers; i++) {
1175 worker = &server->workers[i];
1177 if (worker->pid == -1) {
1178 empty_slot = MIN(empty_slot, i);
1179 continue;
1181 if (!worker->available) {
1182 continue;
1184 if (worker->num_associations == 0) {
1186 * We have an idle worker...
1188 perfect_worker = worker;
1189 break;
1191 if (best_worker == NULL) {
1193 * It's busy, but the best so far...
1195 best_worker = worker;
1196 continue;
1198 if (worker->num_associations < best_worker->num_associations) {
1200 * It's also busy, but has less association groups
1201 * (logical clients)
1203 best_worker = worker;
1204 continue;
1206 if (worker->num_associations > best_worker->num_associations) {
1208 * It's not better
1210 continue;
1213 * Ok, with the same number of association groups
1214 * we pick the one with the lowest number of connections
1216 if (worker->num_connections < best_worker->num_connections) {
1217 best_worker = worker;
1218 continue;
1222 if (perfect_worker != NULL) {
1223 return perfect_worker;
1226 if (empty_slot < SIZE_MAX) {
1227 int ret = rpc_host_exec_worker(server, empty_slot);
1228 if (ret != 0) {
1229 DBG_WARNING("Could not fork worker: %s\n",
1230 strerror(ret));
1232 return NULL;
1235 if (best_worker != NULL) {
1236 return best_worker;
1239 return NULL;
1243 * Find an rpcd_* worker for an internal connection, possibly go beyond
1244 * server->max_workers
1246 static struct rpc_work_process *rpc_host_find_idle_worker(
1247 struct rpc_server *server)
1249 struct rpc_work_process *worker = NULL, *tmp = NULL;
1250 size_t i, num_workers = talloc_array_length(server->workers);
1251 size_t empty_slot = SIZE_MAX;
1252 int ret;
1254 for (i=server->max_workers; i<num_workers; i++) {
1255 worker = &server->workers[i];
1257 if (worker->pid == -1) {
1258 empty_slot = MIN(empty_slot, i);
1259 continue;
1261 if (!worker->available) {
1262 continue;
1264 if (worker->num_associations == 0) {
1265 return &server->workers[i];
1269 if (empty_slot < SIZE_MAX) {
1270 ret = rpc_host_exec_worker(server, empty_slot);
1271 if (ret != 0) {
1272 DBG_WARNING("Could not fork worker: %s\n",
1273 strerror(ret));
1275 return NULL;
1279 * All workers are busy. We need to expand the number of
1280 * workers because we were asked for an idle worker.
1282 if (num_workers >= UINT16_MAX) {
1284 * The worker index would not fit into 16-bits
1286 return NULL;
1288 tmp = talloc_realloc(
1289 server,
1290 server->workers,
1291 struct rpc_work_process,
1292 num_workers+1);
1293 if (tmp == NULL) {
1294 return NULL;
1296 server->workers = tmp;
1298 server->workers[num_workers] = (struct rpc_work_process) { .pid=-1, };
1300 ret = rpc_host_exec_worker(server, num_workers);
1301 if (ret != 0) {
1302 DBG_WARNING("Could not exec worker: %s\n", strerror(ret));
1305 return NULL;
1309 * Find an rpcd_* process to talk to. Start a new one if necessary.
1311 static void rpc_host_distribute_clients(struct rpc_server *server)
1313 struct rpc_work_process *worker = NULL;
1314 struct rpc_host_pending_client *pending_client = NULL;
1315 uint32_t assoc_group_id;
1316 DATA_BLOB blob;
1317 struct iovec iov;
1318 enum ndr_err_code ndr_err;
1319 NTSTATUS status;
1320 const char *client_type = NULL;
1322 again:
1323 pending_client = server->pending_clients;
1324 if (pending_client == NULL) {
1325 DBG_DEBUG("No pending clients\n");
1326 return;
1329 assoc_group_id = pending_client->bind_pkt->u.bind.assoc_group_id;
1331 if (assoc_group_id != 0) {
1332 size_t num_workers = talloc_array_length(server->workers);
1333 uint16_t worker_index = assoc_group_id >> 16;
1335 client_type = "associated";
1337 if (worker_index >= num_workers) {
1338 DBG_DEBUG("Invalid assoc group id %"PRIu32"\n",
1339 assoc_group_id);
1340 goto done;
1342 worker = &server->workers[worker_index];
1344 if ((worker->pid == -1) || !worker->available) {
1345 DBG_DEBUG("Requested worker index %"PRIu16": "
1346 "pid=%d, available=%d\n",
1347 worker_index,
1348 (int)worker->pid,
1349 (int)worker->available);
1351 * Pick a random one for a proper bind nack
1353 client_type = "associated+lost";
1354 worker = rpc_host_find_worker(server);
1356 } else {
1357 struct auth_session_info_transport *session_info =
1358 pending_client->client->npa_info8->session_info;
1359 uint32_t flags = 0;
1360 bool found;
1362 client_type = "new";
1364 found = security_token_find_npa_flags(
1365 session_info->session_info->security_token,
1366 &flags);
1368 /* fresh assoc group requested */
1369 if (found & (flags & SAMBA_NPA_FLAGS_NEED_IDLE)) {
1370 client_type = "new+exclusive";
1371 worker = rpc_host_find_idle_worker(server);
1372 } else {
1373 client_type = "new";
1374 worker = rpc_host_find_worker(server);
1378 if (worker == NULL) {
1379 DBG_DEBUG("No worker found for %s client\n", client_type);
1380 return;
1383 DLIST_REMOVE(server->pending_clients, pending_client);
1385 ndr_err = ndr_push_struct_blob(
1386 &blob,
1387 pending_client,
1388 pending_client->client,
1389 (ndr_push_flags_fn_t)ndr_push_rpc_host_client);
1390 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1391 DBG_WARNING("ndr_push_rpc_host_client failed: %s\n",
1392 ndr_errstr(ndr_err));
1393 goto done;
1396 DBG_INFO("Sending %s client %s to %d with "
1397 "%"PRIu32" associations and %"PRIu32" connections\n",
1398 client_type,
1399 server->rpc_server_exe,
1400 worker->pid,
1401 worker->num_associations,
1402 worker->num_connections);
1404 iov = (struct iovec) {
1405 .iov_base = blob.data, .iov_len = blob.length,
1408 status = messaging_send_iov(
1409 server->host->msg_ctx,
1410 pid_to_procid(worker->pid),
1411 MSG_RPC_HOST_NEW_CLIENT,
1412 &iov,
1414 &pending_client->sock,
1416 if (NT_STATUS_EQUAL(status, NT_STATUS_OBJECT_NAME_NOT_FOUND)) {
1417 DBG_DEBUG("worker %d died, sigchld not yet received?\n",
1418 worker->pid);
1419 DLIST_ADD(server->pending_clients, pending_client);
1420 worker->available = false;
1421 goto again;
1423 if (!NT_STATUS_IS_OK(status)) {
1424 DBG_DEBUG("messaging_send_iov failed: %s\n",
1425 nt_errstr(status));
1426 goto done;
1428 if (assoc_group_id == 0) {
1429 worker->num_associations += 1;
1431 worker->num_connections += 1;
1432 TALLOC_FREE(worker->exit_timer);
1434 TALLOC_FREE(server->host->np_helper_shutdown);
1436 done:
1437 TALLOC_FREE(pending_client);
1440 static int rpc_host_pending_client_destructor(
1441 struct rpc_host_pending_client *p)
1443 TALLOC_FREE(p->hangup_wait);
1444 if (p->sock != -1) {
1445 close(p->sock);
1446 p->sock = -1;
1448 DLIST_REMOVE(p->server->pending_clients, p);
1449 return 0;
1453 * Exception condition handler before rpcd_* worker
1454 * is handling the socket. Either the client exited or
1455 * sent unexpected data after the initial bind.
1457 static void rpc_host_client_exited(struct tevent_req *subreq)
1459 struct rpc_host_pending_client *pending = tevent_req_callback_data(
1460 subreq, struct rpc_host_pending_client);
1461 bool ok;
1462 int err;
1464 ok = wait_for_read_recv(subreq, &err);
1466 TALLOC_FREE(subreq);
1467 pending->hangup_wait = NULL;
1469 if (ok) {
1470 DBG_DEBUG("client on sock %d sent data\n", pending->sock);
1471 } else {
1472 DBG_DEBUG("client exited with %s\n", strerror(err));
1474 TALLOC_FREE(pending);
1477 struct rpc_iface_binding_map {
1478 struct ndr_syntax_id iface;
1479 char *bindings;
1482 static bool rpc_iface_binding_map_add_endpoint(
1483 TALLOC_CTX *mem_ctx,
1484 const struct rpc_host_endpoint *ep,
1485 struct rpc_host_iface_name *iface_names,
1486 struct rpc_iface_binding_map **pmaps)
1488 const struct ndr_syntax_id mgmt_iface = {
1489 {0xafa8bd80,
1490 0x7d8a,
1491 0x11c9,
1492 {0xbe,0xf4},
1493 {0x08,0x00,0x2b,0x10,0x29,0x89}
1495 1.0};
1497 struct rpc_iface_binding_map *maps = *pmaps;
1498 size_t i, num_ifaces = talloc_array_length(ep->interfaces);
1499 char *binding_string = NULL;
1500 bool ok = false;
1502 binding_string = dcerpc_binding_string(mem_ctx, ep->binding);
1503 if (binding_string == NULL) {
1504 return false;
1507 for (i=0; i<num_ifaces; i++) {
1508 const struct ndr_syntax_id *iface = &ep->interfaces[i];
1509 size_t j, num_maps = talloc_array_length(maps);
1510 struct rpc_iface_binding_map *map = NULL;
1511 char *p = NULL;
1513 if (ndr_syntax_id_equal(iface, &mgmt_iface)) {
1515 * mgmt is offered everywhere, don't put it
1516 * into epmdb.tdb.
1518 continue;
1521 for (j=0; j<num_maps; j++) {
1522 map = &maps[j];
1523 if (ndr_syntax_id_equal(&map->iface, iface)) {
1524 break;
1528 if (j == num_maps) {
1529 struct rpc_iface_binding_map *tmp = NULL;
1530 struct rpc_host_iface_name *iface_name = NULL;
1532 iface_name = rpc_host_iface_names_find(
1533 iface_names, iface);
1534 if (iface_name == NULL) {
1535 goto fail;
1538 tmp = talloc_realloc(
1539 mem_ctx,
1540 maps,
1541 struct rpc_iface_binding_map,
1542 num_maps+1);
1543 if (tmp == NULL) {
1544 goto fail;
1546 maps = tmp;
1548 map = &maps[num_maps];
1549 *map = (struct rpc_iface_binding_map) {
1550 .iface = *iface,
1551 .bindings = talloc_move(
1552 maps, &iface_name->name),
1556 p = strv_find(map->bindings, binding_string);
1557 if (p == NULL) {
1558 int ret = strv_add(
1559 maps, &map->bindings, binding_string);
1560 if (ret != 0) {
1561 goto fail;
1566 ok = true;
1567 fail:
1568 *pmaps = maps;
1569 return ok;
1572 static bool rpc_iface_binding_map_add_endpoints(
1573 TALLOC_CTX *mem_ctx,
1574 struct rpc_host_endpoint **endpoints,
1575 struct rpc_host_iface_name *iface_names,
1576 struct rpc_iface_binding_map **pbinding_maps)
1578 size_t i, num_endpoints = talloc_array_length(endpoints);
1580 for (i=0; i<num_endpoints; i++) {
1581 bool ok = rpc_iface_binding_map_add_endpoint(
1582 mem_ctx, endpoints[i], iface_names, pbinding_maps);
1583 if (!ok) {
1584 return false;
1587 return true;
1590 static bool rpc_host_fill_epm_db(
1591 struct tdb_wrap *db,
1592 struct rpc_host_endpoint **endpoints,
1593 struct rpc_host_iface_name *iface_names)
1595 struct rpc_iface_binding_map *maps = NULL;
1596 size_t i, num_maps;
1597 bool ret = false;
1598 bool ok;
1600 ok = rpc_iface_binding_map_add_endpoints(
1601 talloc_tos(), endpoints, iface_names, &maps);
1602 if (!ok) {
1603 goto fail;
1606 num_maps = talloc_array_length(maps);
1608 for (i=0; i<num_maps; i++) {
1609 struct rpc_iface_binding_map *map = &maps[i];
1610 struct ndr_syntax_id_buf buf;
1611 char *keystr = ndr_syntax_id_buf_string(&map->iface, &buf);
1612 TDB_DATA value = {
1613 .dptr = (uint8_t *)map->bindings,
1614 .dsize = talloc_array_length(map->bindings),
1616 int rc;
1618 rc = tdb_store(
1619 db->tdb, string_term_tdb_data(keystr), value, 0);
1620 if (rc == -1) {
1621 DBG_DEBUG("tdb_store() failed: %s\n",
1622 tdb_errorstr(db->tdb));
1623 goto fail;
1627 ret = true;
1628 fail:
1629 TALLOC_FREE(maps);
1630 return ret;
1633 struct rpc_server_setup_state {
1634 struct rpc_server *server;
1637 static void rpc_server_setup_got_endpoints(struct tevent_req *subreq);
1640 * Async initialize state for all possible rpcd_* servers.
1641 * Note this does not start them.
1643 static struct tevent_req *rpc_server_setup_send(
1644 TALLOC_CTX *mem_ctx,
1645 struct tevent_context *ev,
1646 struct rpc_host *host,
1647 const char *rpc_server_exe)
1649 struct tevent_req *req = NULL, *subreq = NULL;
1650 struct rpc_server_setup_state *state = NULL;
1651 struct rpc_server *server = NULL;
1653 req = tevent_req_create(
1654 mem_ctx, &state, struct rpc_server_setup_state);
1655 if (req == NULL) {
1656 return NULL;
1658 state->server = talloc_zero(state, struct rpc_server);
1659 if (tevent_req_nomem(state->server, req)) {
1660 return tevent_req_post(req, ev);
1663 server = state->server;
1665 *server = (struct rpc_server) {
1666 .host = host,
1667 .server_index = UINT32_MAX,
1668 .rpc_server_exe = talloc_strdup(server, rpc_server_exe),
1670 if (tevent_req_nomem(server->rpc_server_exe, req)) {
1671 return tevent_req_post(req, ev);
1674 subreq = rpc_server_get_endpoints_send(
1675 state,
1677 rpc_server_exe,
1678 host->np_helper ? NCACN_NP : NCA_UNKNOWN);
1679 if (tevent_req_nomem(subreq, req)) {
1680 return tevent_req_post(req, ev);
1682 tevent_req_set_callback(subreq, rpc_server_setup_got_endpoints, req);
1683 return req;
1686 static void rpc_server_setup_got_endpoints(struct tevent_req *subreq)
1688 struct tevent_req *req = tevent_req_callback_data(
1689 subreq, struct tevent_req);
1690 struct rpc_server_setup_state *state = tevent_req_data(
1691 req, struct rpc_server_setup_state);
1692 struct rpc_server *server = state->server;
1693 int ret;
1694 size_t i, num_endpoints;
1695 bool ok;
1697 ret = rpc_server_get_endpoints_recv(
1698 subreq,
1699 server,
1700 &server->endpoints,
1701 &server->iface_names,
1702 &server->max_workers,
1703 &server->idle_seconds);
1704 TALLOC_FREE(subreq);
1705 if (ret != 0) {
1706 tevent_req_nterror(req, map_nt_error_from_unix(ret));
1707 return;
1710 server->workers = talloc_array(
1711 server, struct rpc_work_process, server->max_workers);
1712 if (tevent_req_nomem(server->workers, req)) {
1713 return;
1716 for (i=0; i<server->max_workers; i++) {
1717 /* mark as not yet created */
1718 server->workers[i] = (struct rpc_work_process) { .pid=-1, };
1721 num_endpoints = talloc_array_length(server->endpoints);
1723 for (i=0; i<num_endpoints; i++) {
1724 struct rpc_host_endpoint *e = server->endpoints[i];
1725 NTSTATUS status;
1726 size_t j;
1728 e->server = server;
1730 status = dcesrv_create_binding_sockets(
1731 e->binding, e, &e->num_fds, &e->fds);
1732 if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_SUPPORTED)) {
1733 continue;
1735 if (tevent_req_nterror(req, status)) {
1736 DBG_DEBUG("dcesrv_create_binding_sockets failed: %s\n",
1737 nt_errstr(status));
1738 return;
1741 for (j=0; j<e->num_fds; j++) {
1742 ret = listen(e->fds[j], 256);
1743 if (ret == -1) {
1744 tevent_req_nterror(
1745 req, map_nt_error_from_unix(errno));
1746 return;
1751 ok = rpc_host_fill_epm_db(
1752 server->host->epmdb, server->endpoints, server->iface_names);
1753 if (!ok) {
1754 DBG_DEBUG("rpc_host_fill_epm_db failed\n");
1757 tevent_req_done(req);
1760 static NTSTATUS rpc_server_setup_recv(
1761 struct tevent_req *req, TALLOC_CTX *mem_ctx, struct rpc_server **server)
1763 struct rpc_server_setup_state *state = tevent_req_data(
1764 req, struct rpc_server_setup_state);
1765 NTSTATUS status;
1767 if (tevent_req_is_nterror(req, &status)) {
1768 tevent_req_received(req);
1769 return status;
1772 *server = talloc_move(mem_ctx, &state->server);
1773 tevent_req_received(req);
1774 return NT_STATUS_OK;
1778 * rpcd_* died. Called from SIGCHLD handler.
1780 static void rpc_worker_exited(struct rpc_host *host, pid_t pid)
1782 size_t i, num_servers = talloc_array_length(host->servers);
1783 struct rpc_work_process *worker = NULL;
1784 bool found_pid = false;
1785 bool have_active_worker = false;
1787 for (i=0; i<num_servers; i++) {
1788 struct rpc_server *server = host->servers[i];
1789 size_t j, num_workers;
1791 if (server == NULL) {
1792 /* SIGCHLD for --list-interfaces run */
1793 continue;
1796 num_workers = talloc_array_length(server->workers);
1798 for (j=0; j<num_workers; j++) {
1799 worker = &server->workers[j];
1800 if (worker->pid == pid) {
1801 found_pid = true;
1802 worker->pid = -1;
1803 worker->available = false;
1806 if (worker->pid != -1) {
1807 have_active_worker = true;
1812 if (!found_pid) {
1813 DBG_WARNING("No worker with PID %d\n", (int)pid);
1814 return;
1817 if (!have_active_worker && host->np_helper) {
1819 * We have nothing left to do as an np_helper.
1820 * Terminate ourselves (samba-dcerpcd). We will
1821 * be restarted on demand anyway.
1823 DBG_DEBUG("Exiting idle np helper\n");
1824 exit(0);
1829 * rpcd_* died.
1831 static void rpc_host_sigchld(
1832 struct tevent_context *ev,
1833 struct tevent_signal *se,
1834 int signum,
1835 int count,
1836 void *siginfo,
1837 void *private_data)
1839 struct rpc_host *state = talloc_get_type_abort(
1840 private_data, struct rpc_host);
1841 pid_t pid;
1842 int wstatus;
1844 while ((pid = waitpid(-1, &wstatus, WNOHANG)) > 0) {
1845 DBG_DEBUG("pid=%d, wstatus=%d\n", (int)pid, wstatus);
1846 rpc_worker_exited(state, pid);
1851 * Idle timer fired for a rcpd_* worker. Ask it to terminate.
1853 static void rpc_host_exit_worker(
1854 struct tevent_context *ev,
1855 struct tevent_timer *te,
1856 struct timeval current_time,
1857 void *private_data)
1859 struct rpc_server *server = talloc_get_type_abort(
1860 private_data, struct rpc_server);
1861 size_t i, num_workers = talloc_array_length(server->workers);
1864 * Scan for the right worker. We don't have too many of those,
1865 * and maintaining an index would be more data structure effort.
1868 for (i=0; i<num_workers; i++) {
1869 struct rpc_work_process *w = &server->workers[i];
1870 NTSTATUS status;
1872 if (w->exit_timer != te) {
1873 continue;
1875 w->exit_timer = NULL;
1877 SMB_ASSERT(w->num_associations == 0);
1879 status = messaging_send(
1880 server->host->msg_ctx,
1881 pid_to_procid(w->pid),
1882 MSG_SHUTDOWN,
1883 NULL);
1884 if (!NT_STATUS_IS_OK(status)) {
1885 DBG_DEBUG("Could not send SHUTDOWN msg: %s\n",
1886 nt_errstr(status));
1889 w->available = false;
1890 break;
1895 * rcpd_* worker replied with its status.
1897 static void rpc_host_child_status_recv(
1898 struct messaging_context *msg,
1899 void *private_data,
1900 uint32_t msg_type,
1901 struct server_id server_id,
1902 DATA_BLOB *data)
1904 struct rpc_host *host = talloc_get_type_abort(
1905 private_data, struct rpc_host);
1906 size_t num_servers = talloc_array_length(host->servers);
1907 struct rpc_server *server = NULL;
1908 size_t num_workers;
1909 pid_t src_pid = procid_to_pid(&server_id);
1910 struct rpc_work_process *worker = NULL;
1911 struct rpc_worker_status status_message;
1912 enum ndr_err_code ndr_err;
1914 ndr_err = ndr_pull_struct_blob_all_noalloc(
1915 data,
1916 &status_message,
1917 (ndr_pull_flags_fn_t)ndr_pull_rpc_worker_status);
1918 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1919 struct server_id_buf buf;
1920 DBG_WARNING("Got invalid message from pid %s\n",
1921 server_id_str_buf(server_id, &buf));
1922 return;
1924 if (DEBUGLEVEL >= 10) {
1925 NDR_PRINT_DEBUG(rpc_worker_status, &status_message);
1928 if (status_message.server_index >= num_servers) {
1929 DBG_WARNING("Got invalid server_index=%"PRIu32", "
1930 "num_servers=%zu\n",
1931 status_message.server_index,
1932 num_servers);
1933 return;
1936 server = host->servers[status_message.server_index];
1938 num_workers = talloc_array_length(server->workers);
1939 if (status_message.worker_index >= num_workers) {
1940 DBG_WARNING("Got invalid worker_index=%"PRIu32", "
1941 "num_workers=%zu\n",
1942 status_message.worker_index,
1943 num_workers);
1944 return;
1946 worker = &server->workers[status_message.worker_index];
1948 if (src_pid != worker->pid) {
1949 DBG_WARNING("Got idx=%"PRIu32" from %d, expected %d\n",
1950 status_message.worker_index,
1951 (int)src_pid,
1952 worker->pid);
1953 return;
1956 worker->available = true;
1957 worker->num_associations = status_message.num_association_groups;
1958 worker->num_connections = status_message.num_connections;
1960 if (worker->num_associations != 0) {
1961 TALLOC_FREE(worker->exit_timer);
1962 } else {
1963 worker->exit_timer = tevent_add_timer(
1964 messaging_tevent_context(msg),
1965 server->workers,
1966 tevent_timeval_current_ofs(server->idle_seconds, 0),
1967 rpc_host_exit_worker,
1968 server);
1969 /* No NULL check, it's not fatal if this does not work */
1972 rpc_host_distribute_clients(server);
1976 * samba-dcerpcd has been asked to shutdown.
1977 * Mark the initial tevent_req as done so we
1978 * exit the event loop.
1980 static void rpc_host_msg_shutdown(
1981 struct messaging_context *msg,
1982 void *private_data,
1983 uint32_t msg_type,
1984 struct server_id server_id,
1985 DATA_BLOB *data)
1987 struct tevent_req *req = talloc_get_type_abort(
1988 private_data, struct tevent_req);
1989 tevent_req_done(req);
1993 * Only match directory entries starting in rpcd_
1995 static int rpcd_filter(const struct dirent *d)
1997 int match = fnmatch("rpcd_*", d->d_name, 0);
1998 return (match == 0) ? 1 : 0;
2002 * Scan the given libexecdir for rpcd_* services
2003 * and return them as a strv list.
2005 static int rpc_host_list_servers(
2006 const char *libexecdir, TALLOC_CTX *mem_ctx, char **pservers)
2008 char *servers = NULL;
2009 struct dirent **namelist = NULL;
2010 int i, num_servers;
2011 int ret = ENOMEM;
2013 num_servers = scandir(libexecdir, &namelist, rpcd_filter, alphasort);
2014 if (num_servers == -1) {
2015 DBG_DEBUG("scandir failed: %s\n", strerror(errno));
2016 return errno;
2019 for (i=0; i<num_servers; i++) {
2020 char *exe = talloc_asprintf(
2021 mem_ctx, "%s/%s", libexecdir, namelist[i]->d_name);
2022 if (exe == NULL) {
2023 goto fail;
2026 ret = strv_add(mem_ctx, &servers, exe);
2027 TALLOC_FREE(exe);
2028 if (ret != 0) {
2029 goto fail;
2032 fail:
2033 for (i=0; i<num_servers; i++) {
2034 SAFE_FREE(namelist[i]);
2036 SAFE_FREE(namelist);
2038 if (ret != 0) {
2039 TALLOC_FREE(servers);
2040 return ret;
2042 *pservers = servers;
2043 return 0;
2046 struct rpc_host_endpoint_accept_state {
2047 struct tevent_context *ev;
2048 struct rpc_host_endpoint *endpoint;
2051 static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq);
2052 static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq);
2055 * Asynchronously wait for a DCERPC connection from a client.
2057 static struct tevent_req *rpc_host_endpoint_accept_send(
2058 TALLOC_CTX *mem_ctx,
2059 struct tevent_context *ev,
2060 struct rpc_host_endpoint *endpoint)
2062 struct tevent_req *req = NULL;
2063 struct rpc_host_endpoint_accept_state *state = NULL;
2064 size_t i;
2066 req = tevent_req_create(
2067 mem_ctx, &state, struct rpc_host_endpoint_accept_state);
2068 if (req == NULL) {
2069 return NULL;
2071 state->ev = ev;
2072 state->endpoint = endpoint;
2074 for (i=0; i<endpoint->num_fds; i++) {
2075 struct tevent_req *subreq = NULL;
2077 subreq = accept_send(state, ev, endpoint->fds[i]);
2078 if (tevent_req_nomem(subreq, req)) {
2079 return tevent_req_post(req, ev);
2081 tevent_req_set_callback(
2082 subreq, rpc_host_endpoint_accept_accepted, req);
2085 return req;
2089 * Accept a DCERPC connection from a client.
2091 static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq)
2093 struct tevent_req *req = tevent_req_callback_data(
2094 subreq, struct tevent_req);
2095 struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2096 req, struct rpc_host_endpoint_accept_state);
2097 struct rpc_host_endpoint *endpoint = state->endpoint;
2098 int sock, listen_sock, err;
2099 struct samba_sockaddr peer_addr;
2101 sock = accept_recv(subreq, &listen_sock, &peer_addr, &err);
2102 TALLOC_FREE(subreq);
2103 if (sock == -1) {
2104 /* What to do here? Just ignore the error and retry? */
2105 DBG_DEBUG("accept_recv failed: %s\n", strerror(err));
2106 tevent_req_error(req, err);
2107 return;
2110 subreq = accept_send(state, state->ev, listen_sock);
2111 if (tevent_req_nomem(subreq, req)) {
2112 close(sock);
2113 sock = -1;
2114 return;
2116 tevent_req_set_callback(
2117 subreq, rpc_host_endpoint_accept_accepted, req);
2119 subreq = rpc_host_bind_read_send(
2120 state,
2121 state->ev,
2122 dcerpc_binding_get_transport(endpoint->binding),
2123 &sock,
2124 &peer_addr);
2125 if (tevent_req_nomem(subreq, req)) {
2126 return;
2128 tevent_req_set_callback(
2129 subreq, rpc_host_endpoint_accept_got_bind, req);
2133 * Client sent us a DCERPC bind packet.
2135 static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq)
2137 struct tevent_req *req = tevent_req_callback_data(
2138 subreq, struct tevent_req);
2139 struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2140 req, struct rpc_host_endpoint_accept_state);
2141 struct rpc_host_endpoint *endpoint = state->endpoint;
2142 struct rpc_server *server = endpoint->server;
2143 struct rpc_host_pending_client *pending = NULL;
2144 struct rpc_host_client *client = NULL;
2145 struct ncacn_packet *bind_pkt = NULL;
2146 int ret;
2147 int sock=-1;
2149 ret = rpc_host_bind_read_recv(
2150 subreq, state, &sock, &client, &bind_pkt);
2151 TALLOC_FREE(subreq);
2152 if (ret != 0) {
2153 DBG_DEBUG("rpc_host_bind_read_recv returned %s\n",
2154 strerror(ret));
2155 goto fail;
2158 client->binding = dcerpc_binding_string(client, endpoint->binding);
2159 if (client->binding == NULL) {
2160 DBG_WARNING("dcerpc_binding_string failed, dropping client\n");
2161 goto fail;
2164 pending = talloc_zero(server, struct rpc_host_pending_client);
2165 if (pending == NULL) {
2166 DBG_WARNING("talloc failed, dropping client\n");
2167 goto fail;
2169 pending->server = server;
2170 pending->sock = sock;
2171 pending->bind_pkt = talloc_move(pending, &bind_pkt);
2172 pending->client = talloc_move(pending, &client);
2173 talloc_set_destructor(pending, rpc_host_pending_client_destructor);
2174 sock = -1;
2176 pending->hangup_wait = wait_for_read_send(
2177 pending, state->ev, pending->sock, true);
2178 if (pending->hangup_wait == NULL) {
2179 DBG_WARNING("wait_for_read_send failed, dropping client\n");
2180 TALLOC_FREE(pending);
2181 return;
2183 tevent_req_set_callback(
2184 pending->hangup_wait, rpc_host_client_exited, pending);
2186 DLIST_ADD_END(server->pending_clients, pending);
2187 rpc_host_distribute_clients(server);
2188 return;
2190 fail:
2191 TALLOC_FREE(client);
2192 if (sock != -1) {
2193 close(sock);
2197 static int rpc_host_endpoint_accept_recv(
2198 struct tevent_req *req, struct rpc_host_endpoint **ep)
2200 struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2201 req, struct rpc_host_endpoint_accept_state);
2203 *ep = state->endpoint;
2205 return tevent_req_simple_recv_unix(req);
2209 * Full state for samba-dcerpcd. Everything else
2210 * is hung off this.
2212 struct rpc_host_state {
2213 struct tevent_context *ev;
2214 struct rpc_host *host;
2216 bool is_ready;
2217 const char *daemon_ready_progname;
2218 struct tevent_immediate *ready_signal_immediate;
2219 int *ready_signal_fds;
2221 size_t num_servers;
2222 size_t num_prepared;
2226 * Tell whoever invoked samba-dcerpcd we're ready to
2227 * serve.
2229 static void rpc_host_report_readiness(
2230 struct tevent_context *ev,
2231 struct tevent_immediate *im,
2232 void *private_data)
2234 struct rpc_host_state *state = talloc_get_type_abort(
2235 private_data, struct rpc_host_state);
2236 size_t i, num_fds = talloc_array_length(state->ready_signal_fds);
2238 if (!state->is_ready) {
2239 DBG_DEBUG("Not yet ready\n");
2240 return;
2243 for (i=0; i<num_fds; i++) {
2244 uint8_t byte = 0;
2245 ssize_t nwritten;
2247 do {
2248 nwritten = write(
2249 state->ready_signal_fds[i],
2250 (void *)&byte,
2251 sizeof(byte));
2252 } while ((nwritten == -1) && (errno == EINTR));
2254 close(state->ready_signal_fds[i]);
2257 TALLOC_FREE(state->ready_signal_fds);
2261 * Respond to a "are you ready" message.
2263 static bool rpc_host_ready_signal_filter(
2264 struct messaging_rec *rec, void *private_data)
2266 struct rpc_host_state *state = talloc_get_type_abort(
2267 private_data, struct rpc_host_state);
2268 size_t num_fds = talloc_array_length(state->ready_signal_fds);
2269 int *tmp = NULL;
2271 if (rec->msg_type != MSG_DAEMON_READY_FD) {
2272 return false;
2274 if (rec->num_fds != 1) {
2275 DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2276 return false;
2279 if (num_fds + 1 < num_fds) {
2280 return false;
2282 tmp = talloc_realloc(state, state->ready_signal_fds, int, num_fds+1);
2283 if (tmp == NULL) {
2284 return false;
2286 state->ready_signal_fds = tmp;
2288 state->ready_signal_fds[num_fds] = rec->fds[0];
2289 rec->fds[0] = -1;
2291 tevent_schedule_immediate(
2292 state->ready_signal_immediate,
2293 state->ev,
2294 rpc_host_report_readiness,
2295 state);
2297 return false;
2301 * Respond to a "what is your status" message.
2303 static bool rpc_host_dump_status_filter(
2304 struct messaging_rec *rec, void *private_data)
2306 struct rpc_host_state *state = talloc_get_type_abort(
2307 private_data, struct rpc_host_state);
2308 struct rpc_host *host = state->host;
2309 struct rpc_server **servers = host->servers;
2310 size_t i, num_servers = talloc_array_length(servers);
2311 FILE *f = NULL;
2312 int fd;
2314 if (rec->msg_type != MSG_RPC_DUMP_STATUS) {
2315 return false;
2317 if (rec->num_fds != 1) {
2318 DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2319 return false;
2322 fd = dup(rec->fds[0]);
2323 if (fd == -1) {
2324 DBG_DEBUG("dup(%"PRIi64") failed: %s\n",
2325 rec->fds[0],
2326 strerror(errno));
2327 return false;
2330 f = fdopen(fd, "w");
2331 if (f == NULL) {
2332 DBG_DEBUG("fdopen failed: %s\n", strerror(errno));
2333 close(fd);
2334 return false;
2337 for (i=0; i<num_servers; i++) {
2338 struct rpc_server *server = servers[i];
2339 size_t j, num_workers = talloc_array_length(server->workers);
2340 size_t active_workers = 0;
2342 for (j=0; j<num_workers; j++) {
2343 if (server->workers[j].pid != -1) {
2344 active_workers += 1;
2348 fprintf(f,
2349 "%s: active_workers=%zu\n",
2350 server->rpc_server_exe,
2351 active_workers);
2353 for (j=0; j<num_workers; j++) {
2354 struct rpc_work_process *w = &server->workers[j];
2356 if (w->pid == (pid_t)-1) {
2357 continue;
2360 fprintf(f,
2361 " worker[%zu]: pid=%d, num_associations=%"PRIu32", num_connections=%"PRIu32"\n",
2363 (int)w->pid,
2364 w->num_associations,
2365 w->num_connections);
2369 fclose(f);
2371 return false;
2374 static void rpc_host_server_setup_done(struct tevent_req *subreq);
2375 static void rpc_host_endpoint_failed(struct tevent_req *subreq);
2378 * Async startup for samba-dcerpcd.
2380 static struct tevent_req *rpc_host_send(
2381 TALLOC_CTX *mem_ctx,
2382 struct tevent_context *ev,
2383 struct messaging_context *msg_ctx,
2384 char *servers,
2385 int ready_signal_fd,
2386 const char *daemon_ready_progname,
2387 bool is_np_helper)
2389 struct tevent_req *req = NULL, *subreq = NULL;
2390 struct rpc_host_state *state = NULL;
2391 struct rpc_host *host = NULL;
2392 struct tevent_signal *se = NULL;
2393 char *epmdb_path = NULL;
2394 char *exe = NULL;
2395 size_t i, num_servers = strv_count(servers);
2396 NTSTATUS status;
2397 int ret;
2399 req = tevent_req_create(req, &state, struct rpc_host_state);
2400 if (req == NULL) {
2401 return NULL;
2403 state->ev = ev;
2404 state->daemon_ready_progname = daemon_ready_progname;
2406 state->ready_signal_immediate = tevent_create_immediate(state);
2407 if (tevent_req_nomem(state->ready_signal_immediate, req)) {
2408 return tevent_req_post(req, ev);
2411 if (ready_signal_fd != -1) {
2412 state->ready_signal_fds = talloc_array(state, int, 1);
2413 if (tevent_req_nomem(state->ready_signal_fds, req)) {
2414 return tevent_req_post(req, ev);
2416 state->ready_signal_fds[0] = ready_signal_fd;
2419 state->host = talloc_zero(state, struct rpc_host);
2420 if (tevent_req_nomem(state->host, req)) {
2421 return tevent_req_post(req, ev);
2423 host = state->host;
2425 host->msg_ctx = msg_ctx;
2426 host->np_helper = is_np_helper;
2428 ret = pipe(host->worker_stdin);
2429 if (ret == -1) {
2430 tevent_req_nterror(req, map_nt_error_from_unix(errno));
2431 return tevent_req_post(req, ev);
2434 host->servers = talloc_zero_array(
2435 host, struct rpc_server *, num_servers);
2436 if (tevent_req_nomem(host->servers, req)) {
2437 return tevent_req_post(req, ev);
2440 se = tevent_add_signal(ev, state, SIGCHLD, 0, rpc_host_sigchld, host);
2441 if (tevent_req_nomem(se, req)) {
2442 return tevent_req_post(req, ev);
2444 BlockSignals(false, SIGCHLD);
2446 status = messaging_register(
2447 msg_ctx,
2448 host,
2449 MSG_RPC_WORKER_STATUS,
2450 rpc_host_child_status_recv);
2451 if (tevent_req_nterror(req, status)) {
2452 return tevent_req_post(req, ev);
2455 status = messaging_register(
2456 msg_ctx, req, MSG_SHUTDOWN, rpc_host_msg_shutdown);
2457 if (tevent_req_nterror(req, status)) {
2458 return tevent_req_post(req, ev);
2461 subreq = messaging_filtered_read_send(
2462 state, ev, msg_ctx, rpc_host_ready_signal_filter, state);
2463 if (tevent_req_nomem(subreq, req)) {
2464 return tevent_req_post(req, ev);
2467 subreq = messaging_filtered_read_send(
2468 state, ev, msg_ctx, rpc_host_dump_status_filter, state);
2469 if (tevent_req_nomem(subreq, req)) {
2470 return tevent_req_post(req, ev);
2473 epmdb_path = lock_path(state, "epmdb.tdb");
2474 if (tevent_req_nomem(epmdb_path, req)) {
2475 return tevent_req_post(req, ev);
2478 host->epmdb = tdb_wrap_open(
2479 host,
2480 epmdb_path,
2482 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,
2483 O_RDWR|O_CREAT,
2484 0644);
2485 if (host->epmdb == NULL) {
2486 DBG_DEBUG("tdb_wrap_open(%s) failed: %s\n",
2487 epmdb_path,
2488 strerror(errno));
2489 tevent_req_nterror(req, map_nt_error_from_unix(errno));
2490 return tevent_req_post(req, ev);
2492 TALLOC_FREE(epmdb_path);
2494 for (exe = strv_next(servers, exe), i = 0;
2495 exe != NULL;
2496 exe = strv_next(servers, exe), i++) {
2498 DBG_DEBUG("server_setup for %s index %zu\n", exe, i);
2500 subreq = rpc_server_setup_send(
2501 state,
2503 host,
2504 exe);
2505 if (tevent_req_nomem(subreq, req)) {
2506 return tevent_req_post(req, ev);
2508 tevent_req_set_callback(
2509 subreq, rpc_host_server_setup_done, req);
2512 return req;
2516 * Timer function called after we were initialized but no one
2517 * connected. Shutdown.
2519 static void rpc_host_shutdown(
2520 struct tevent_context *ev,
2521 struct tevent_timer *te,
2522 struct timeval current_time,
2523 void *private_data)
2525 struct tevent_req *req = talloc_get_type_abort(
2526 private_data, struct tevent_req);
2527 DBG_DEBUG("Nobody connected -- shutting down\n");
2528 tevent_req_done(req);
2531 static void rpc_host_server_setup_done(struct tevent_req *subreq)
2533 struct tevent_req *req = tevent_req_callback_data(
2534 subreq, struct tevent_req);
2535 struct rpc_host_state *state = tevent_req_data(
2536 req, struct rpc_host_state);
2537 struct rpc_server *server = NULL;
2538 struct rpc_host *host = state->host;
2539 size_t i, num_servers = talloc_array_length(host->servers);
2540 NTSTATUS status;
2542 status = rpc_server_setup_recv(subreq, host, &server);
2543 TALLOC_FREE(subreq);
2544 if (!NT_STATUS_IS_OK(status)) {
2545 DBG_DEBUG("rpc_server_setup_recv returned %s, ignoring\n",
2546 nt_errstr(status));
2547 host->servers = talloc_realloc(
2548 host,
2549 host->servers,
2550 struct rpc_server *,
2551 num_servers-1);
2552 return;
2555 server->server_index = state->num_prepared;
2556 host->servers[state->num_prepared] = server;
2558 state->num_prepared += 1;
2560 if (state->num_prepared < num_servers) {
2561 return;
2564 for (i=0; i<num_servers; i++) {
2565 size_t j, num_endpoints;
2567 server = host->servers[i];
2568 num_endpoints = talloc_array_length(server->endpoints);
2570 for (j=0; j<num_endpoints; j++) {
2571 subreq = rpc_host_endpoint_accept_send(
2572 state, state->ev, server->endpoints[j]);
2573 if (tevent_req_nomem(subreq, req)) {
2574 return;
2576 tevent_req_set_callback(
2577 subreq, rpc_host_endpoint_failed, req);
2581 state->is_ready = true;
2583 if (state->daemon_ready_progname != NULL) {
2584 daemon_ready(state->daemon_ready_progname);
2587 if (host->np_helper) {
2589 * If we're started as an np helper, and no one talks to
2590 * us within 10 seconds, just shut ourselves down.
2592 host->np_helper_shutdown = tevent_add_timer(
2593 state->ev,
2594 state,
2595 timeval_current_ofs(10, 0),
2596 rpc_host_shutdown,
2597 req);
2598 if (tevent_req_nomem(host->np_helper_shutdown, req)) {
2599 return;
2603 tevent_schedule_immediate(
2604 state->ready_signal_immediate,
2605 state->ev,
2606 rpc_host_report_readiness,
2607 state);
2611 * Log accept fail on an endpoint.
2613 static void rpc_host_endpoint_failed(struct tevent_req *subreq)
2615 struct tevent_req *req = tevent_req_callback_data(
2616 subreq, struct tevent_req);
2617 struct rpc_host_state *state = tevent_req_data(
2618 req, struct rpc_host_state);
2619 struct rpc_host_endpoint *endpoint = NULL;
2620 char *binding_string = NULL;
2621 int ret;
2623 ret = rpc_host_endpoint_accept_recv(subreq, &endpoint);
2624 TALLOC_FREE(subreq);
2626 binding_string = dcerpc_binding_string(state, endpoint->binding);
2627 DBG_DEBUG("rpc_host_endpoint_accept_recv for %s returned %s\n",
2628 binding_string,
2629 strerror(ret));
2630 TALLOC_FREE(binding_string);
2633 static NTSTATUS rpc_host_recv(struct tevent_req *req)
2635 return tevent_req_simple_recv_ntstatus(req);
2638 static int rpc_host_pidfile_create(
2639 struct messaging_context *msg_ctx,
2640 const char *progname,
2641 int ready_signal_fd)
2643 const char *piddir = lp_pid_directory();
2644 size_t len = strlen(piddir) + strlen(progname) + 6;
2645 char pidFile[len];
2646 pid_t existing_pid;
2647 int fd, ret;
2649 snprintf(pidFile,
2650 sizeof(pidFile),
2651 "%s/%s.pid",
2652 piddir, progname);
2654 ret = pidfile_path_create(pidFile, &fd, &existing_pid);
2655 if (ret == 0) {
2656 /* leak fd */
2657 return 0;
2660 if (ret != EAGAIN) {
2661 DBG_DEBUG("pidfile_path_create() failed: %s\n",
2662 strerror(ret));
2663 return ret;
2666 DBG_DEBUG("%s pid %d exists\n", progname, (int)existing_pid);
2668 if (ready_signal_fd != -1) {
2669 NTSTATUS status = messaging_send_iov(
2670 msg_ctx,
2671 pid_to_procid(existing_pid),
2672 MSG_DAEMON_READY_FD,
2673 NULL,
2675 &ready_signal_fd,
2677 if (!NT_STATUS_IS_OK(status)) {
2678 DBG_DEBUG("Could not send ready_signal_fd: %s\n",
2679 nt_errstr(status));
2683 return EAGAIN;
2686 static void samba_dcerpcd_stdin_handler(
2687 struct tevent_context *ev,
2688 struct tevent_fd *fde,
2689 uint16_t flags,
2690 void *private_data)
2692 struct tevent_req *req = talloc_get_type_abort(
2693 private_data, struct tevent_req);
2694 char c;
2696 if (read(0, &c, 1) != 1) {
2697 /* we have reached EOF on stdin, which means the
2698 parent has exited. Shutdown the server */
2699 tevent_req_done(req);
2704 * samba-dcerpcd microservice startup !
2706 int main(int argc, const char *argv[])
2708 const struct loadparm_substitution *lp_sub =
2709 loadparm_s3_global_substitution();
2710 const char *progname = getprogname();
2711 TALLOC_CTX *frame = NULL;
2712 struct tevent_context *ev_ctx = NULL;
2713 struct messaging_context *msg_ctx = NULL;
2714 struct tevent_req *req = NULL;
2715 char *servers = NULL;
2716 const char *arg = NULL;
2717 size_t num_servers;
2718 poptContext pc;
2719 int ret, err;
2720 NTSTATUS status;
2721 bool log_stdout;
2722 bool ok;
2724 int libexec_rpcds = 0;
2725 int np_helper = 0;
2726 int ready_signal_fd = -1;
2728 struct samba_cmdline_daemon_cfg *cmdline_daemon_cfg = NULL;
2729 struct poptOption long_options[] = {
2730 POPT_AUTOHELP
2732 .longName = "libexec-rpcds",
2733 .argInfo = POPT_ARG_NONE,
2734 .arg = &libexec_rpcds,
2735 .descrip = "Use all rpcds in libexec",
2738 .longName = "ready-signal-fd",
2739 .argInfo = POPT_ARG_INT,
2740 .arg = &ready_signal_fd,
2741 .descrip = "fd to close when initialized",
2744 .longName = "np-helper",
2745 .argInfo = POPT_ARG_NONE,
2746 .arg = &np_helper,
2747 .descrip = "Internal named pipe server",
2749 POPT_COMMON_SAMBA
2750 POPT_COMMON_DAEMON
2751 POPT_COMMON_VERSION
2752 POPT_TABLEEND
2756 const char *fd_params[] = { "ready-signal-fd", };
2758 closefrom_except_fd_params(
2759 3, ARRAY_SIZE(fd_params), fd_params, argc, argv);
2762 talloc_enable_null_tracking();
2763 frame = talloc_stackframe();
2764 umask(0);
2765 sec_init();
2766 smb_init_locale();
2768 ok = samba_cmdline_init(frame,
2769 SAMBA_CMDLINE_CONFIG_SERVER,
2770 true /* require_smbconf */);
2771 if (!ok) {
2772 DBG_ERR("Failed to init cmdline parser!\n");
2773 TALLOC_FREE(frame);
2774 exit(ENOMEM);
2777 pc = samba_popt_get_context(getprogname(),
2778 argc,
2779 argv,
2780 long_options,
2782 if (pc == NULL) {
2783 DBG_ERR("Failed to setup popt context!\n");
2784 TALLOC_FREE(frame);
2785 exit(1);
2788 poptSetOtherOptionHelp(
2789 pc, "[OPTIONS] [SERVICE_1 SERVICE_2 .. SERVICE_N]");
2791 ret = poptGetNextOpt(pc);
2793 if (ret != -1) {
2794 if (ret >= 0) {
2795 fprintf(stderr,
2796 "\nGot unexpected option %d\n",
2797 ret);
2798 } else if (ret == POPT_ERROR_BADOPT) {
2799 fprintf(stderr,
2800 "\nInvalid option %s: %s\n\n",
2801 poptBadOption(pc, 0),
2802 poptStrerror(ret));
2803 } else {
2804 fprintf(stderr,
2805 "\npoptGetNextOpt returned %s\n",
2806 poptStrerror(ret));
2809 poptFreeContext(pc);
2810 TALLOC_FREE(frame);
2811 exit(1);
2814 while ((arg = poptGetArg(pc)) != NULL) {
2815 ret = strv_add(frame, &servers, arg);
2816 if (ret != 0) {
2817 DBG_ERR("strv_add() failed\n");
2818 poptFreeContext(pc);
2819 TALLOC_FREE(frame);
2820 exit(1);
2824 log_stdout = (debug_get_log_type() == DEBUG_STDOUT);
2825 if (log_stdout) {
2826 setup_logging(progname, DEBUG_STDOUT);
2827 } else {
2828 setup_logging(progname, DEBUG_FILE);
2832 * If "rpc start on demand helpers = true" in smb.conf we must
2833 * not start as standalone, only on demand from
2834 * local_np_connect() functions. Log an error message telling
2835 * the admin how to fix and then exit.
2837 if (lp_rpc_start_on_demand_helpers() && np_helper == 0) {
2838 DBG_ERR("Cannot start in standalone mode if smb.conf "
2839 "[global] setting "
2840 "\"rpc start on demand helpers = true\" - "
2841 "exiting\n");
2842 TALLOC_FREE(frame);
2843 exit(1);
2846 if (libexec_rpcds != 0) {
2847 ret = rpc_host_list_servers(
2848 dyn_SAMBA_LIBEXECDIR, frame, &servers);
2849 if (ret != 0) {
2850 DBG_ERR("Could not list libexec: %s\n",
2851 strerror(ret));
2852 poptFreeContext(pc);
2853 TALLOC_FREE(frame);
2854 exit(1);
2858 num_servers = strv_count(servers);
2859 if (num_servers == 0) {
2860 poptPrintUsage(pc, stderr, 0);
2861 poptFreeContext(pc);
2862 TALLOC_FREE(frame);
2863 exit(1);
2866 poptFreeContext(pc);
2868 cmdline_daemon_cfg = samba_cmdline_get_daemon_cfg();
2870 if (log_stdout && cmdline_daemon_cfg->fork) {
2871 DBG_ERR("Can't log to stdout unless in foreground\n");
2872 TALLOC_FREE(frame);
2873 exit(1);
2876 msg_ctx = global_messaging_context();
2877 if (msg_ctx == NULL) {
2878 DBG_ERR("messaging_init() failed\n");
2879 TALLOC_FREE(frame);
2880 exit(1);
2882 ev_ctx = messaging_tevent_context(msg_ctx);
2884 if (cmdline_daemon_cfg->fork) {
2885 become_daemon(
2886 true,
2887 cmdline_daemon_cfg->no_process_group,
2888 log_stdout);
2890 status = reinit_after_fork(msg_ctx, ev_ctx, false);
2891 if (!NT_STATUS_IS_OK(status)) {
2892 exit_daemon("reinit_after_fork() failed",
2893 map_errno_from_nt_status(status));
2895 } else {
2896 DBG_DEBUG("Calling daemon_status\n");
2897 daemon_status(progname, "Starting process ... ");
2900 BlockSignals(true, SIGPIPE);
2902 dump_core_setup(progname, lp_logfile(frame, lp_sub));
2904 reopen_logs();
2906 DBG_STARTUP_NOTICE("%s version %s started.\n%s\n",
2907 progname,
2908 samba_version_string(),
2909 samba_copyright_string());
2911 (void)winbind_off();
2912 ok = init_guest_session_info(frame);
2913 (void)winbind_on();
2914 if (!ok) {
2915 DBG_ERR("init_guest_session_info failed\n");
2916 global_messaging_context_free();
2917 TALLOC_FREE(frame);
2918 exit(1);
2921 ret = rpc_host_pidfile_create(msg_ctx, progname, ready_signal_fd);
2922 if (ret != 0) {
2923 DBG_DEBUG("rpc_host_pidfile_create failed: %s\n",
2924 strerror(ret));
2925 global_messaging_context_free();
2926 TALLOC_FREE(frame);
2927 exit(1);
2930 req = rpc_host_send(
2931 ev_ctx,
2932 ev_ctx,
2933 msg_ctx,
2934 servers,
2935 ready_signal_fd,
2936 cmdline_daemon_cfg->fork ? NULL : progname,
2937 np_helper != 0);
2938 if (req == NULL) {
2939 DBG_ERR("rpc_host_send failed\n");
2940 global_messaging_context_free();
2941 TALLOC_FREE(frame);
2942 exit(1);
2945 if (!cmdline_daemon_cfg->fork) {
2946 struct stat st;
2947 if (fstat(0, &st) != 0) {
2948 DBG_DEBUG("fstat(0) failed: %s\n",
2949 strerror(errno));
2950 global_messaging_context_free();
2951 TALLOC_FREE(frame);
2952 exit(1);
2954 if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) {
2955 tevent_add_fd(
2956 ev_ctx,
2957 ev_ctx,
2959 TEVENT_FD_READ,
2960 samba_dcerpcd_stdin_handler,
2961 req);
2965 ok = tevent_req_poll_unix(req, ev_ctx, &err);
2966 if (!ok) {
2967 DBG_ERR("tevent_req_poll_unix failed: %s\n",
2968 strerror(err));
2969 global_messaging_context_free();
2970 TALLOC_FREE(frame);
2971 exit(1);
2974 status = rpc_host_recv(req);
2975 if (!NT_STATUS_IS_OK(status)) {
2976 DBG_ERR("rpc_host_recv returned %s\n", nt_errstr(status));
2977 global_messaging_context_free();
2978 TALLOC_FREE(frame);
2979 exit(1);
2982 TALLOC_FREE(frame);
2984 return 0;