4 * Implements samba-dcerpcd service.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * This binary has two usage modes:
23 * In the normal case when invoked from smbd or winbind it is given a
24 * directory to scan via --libexec-rpcds and will invoke on demand any
25 * binaries it finds there starting with rpcd_ when a named pipe
26 * connection is requested.
28 * In the second mode it can be started explicitly from system startup
31 * When Samba is set up as an Active Directory Domain Controller the
32 * normal samba binary overrides and provides DCERPC services, whilst
33 * allowing samba-dcerpcd to provide the services that smbd used to
34 * provide in that set-up, such as SRVSVC.
36 * The second mode can also be useful for use outside of the Samba framework,
37 * for example, use with the Linux kernel SMB2 server ksmbd. In this mode
38 * it behaves like inetd and listens on sockets on behalf of RPC server
44 #include "lib/cmdline/cmdline.h"
45 #include "lib/cmdline/closefrom_except.h"
46 #include "source3/include/includes.h"
47 #include "source3/include/auth.h"
48 #include "rpc_sock_helper.h"
50 #include "lib/util_file.h"
51 #include "lib/util/tevent_unix.h"
52 #include "lib/util/tevent_ntstatus.h"
53 #include "lib/util/smb_strtox.h"
54 #include "lib/util/debug.h"
55 #include "lib/util/server_id.h"
56 #include "lib/util/util_tdb.h"
57 #include "lib/tdb_wrap/tdb_wrap.h"
58 #include "lib/async_req/async_sock.h"
59 #include "librpc/rpc/dcerpc_util.h"
60 #include "lib/tsocket/tsocket.h"
61 #include "libcli/named_pipe_auth/npa_tstream.h"
62 #include "librpc/gen_ndr/ndr_rpc_host.h"
63 #include "source3/param/loadparm.h"
64 #include "source3/lib/global_contexts.h"
65 #include "lib/util/strv.h"
66 #include "lib/util/pidfile.h"
67 #include "source3/rpc_client/cli_pipe.h"
68 #include "librpc/gen_ndr/ndr_epmapper.h"
69 #include "librpc/gen_ndr/ndr_epmapper_c.h"
70 #include "nsswitch/winbind_client.h"
71 #include "libcli/security/dom_sid.h"
72 #include "libcli/security/security_token.h"
74 extern bool override_logfile
;
77 struct rpc_work_process
;
80 * samba-dcerpcd state to keep track of rpcd_* servers.
83 struct messaging_context
*msg_ctx
;
84 struct rpc_server
**servers
;
85 struct tdb_wrap
*epmdb
;
92 * If we're started with --np-helper but nobody contacts us,
93 * we need to exit after a while. This will be deleted once
94 * the first real client connects and our self-exit mechanism
95 * when we don't have any worker processes left kicks in.
97 struct tevent_timer
*np_helper_shutdown
;
101 * Map a RPC interface to a name. Used when filling the endpoint
104 struct rpc_host_iface_name
{
105 struct ndr_syntax_id iface
;
110 * rpc_host representation for listening sockets. ncacn_ip_tcp might
111 * listen on multiple explicit IPs, all with the same port.
113 struct rpc_host_endpoint
{
114 struct rpc_server
*server
;
115 struct dcerpc_binding
*binding
;
116 struct ndr_syntax_id
*interfaces
;
122 * Staging area until we sent the socket plus bind to the helper
124 struct rpc_host_pending_client
{
125 struct rpc_host_pending_client
*prev
, *next
;
128 * Pointer for the destructor to remove us from the list of
131 struct rpc_server
*server
;
134 * Waiter for client exit before a helper accepted the request
136 struct tevent_req
*hangup_wait
;
139 * Info to pick the worker
141 struct ncacn_packet
*bind_pkt
;
144 * This is what we send down to the worker
147 struct rpc_host_client
*client
;
151 * Representation of one worker process. For each rpcd_* executable
152 * there will be more of than one of these.
154 struct rpc_work_process
{
160 * Worker forked but did not send its initial status yet (not
163 * Worker died, but we did not receive SIGCHLD yet. We noticed
164 * it because we couldn't send it a message.
169 * Incremented by us when sending a client, decremented by
170 * MSG_RPC_HOST_WORKER_STATUS sent by workers whenever a
173 uint32_t num_clients
;
176 * Send SHUTDOWN to an idle child after a while
178 struct tevent_timer
*exit_timer
;
182 * State for a set of running instances of an rpcd_* server executable
185 struct rpc_host
*host
;
187 * Index into the rpc_host_state->servers array
189 uint32_t server_index
;
191 const char *rpc_server_exe
;
193 struct rpc_host_endpoint
**endpoints
;
194 struct rpc_host_iface_name
*iface_names
;
200 * "workers" can be larger than "max_workers": Internal
201 * connections require an idle worker to avoid deadlocks
202 * between RPC servers: netlogon requires samr, everybody
203 * requires winreg. And if a deep call in netlogon asks for a
204 * samr connection, this must never end up in the same
205 * process. named_pipe_auth_req_info7->need_idle_server is set
208 struct rpc_work_process
*workers
;
210 struct rpc_host_pending_client
*pending_clients
;
213 struct rpc_server_get_endpoints_state
{
215 char *ncalrpc_endpoint
;
216 enum dcerpc_transport_t only_transport
;
218 struct rpc_host_iface_name
*iface_names
;
219 struct rpc_host_endpoint
**endpoints
;
221 unsigned long num_workers
;
222 unsigned long idle_seconds
;
225 static void rpc_server_get_endpoints_done(struct tevent_req
*subreq
);
228 * @brief Query interfaces from an rpcd helper
230 * Spawn a rpcd helper, ask it for the interfaces it serves via
231 * --list-interfaces, parse the output
233 * @param[in] mem_ctx Memory context for the tevent_req
234 * @param[in] ev Event context to run this on
235 * @param[in] rpc_server_exe Binary to ask with --list-interfaces
236 * @param[in] only_transport Filter out anything but this
237 * @return The tevent_req representing this process
240 static struct tevent_req
*rpc_server_get_endpoints_send(
242 struct tevent_context
*ev
,
243 const char *rpc_server_exe
,
244 enum dcerpc_transport_t only_transport
)
246 struct tevent_req
*req
= NULL
, *subreq
= NULL
;
247 struct rpc_server_get_endpoints_state
*state
= NULL
;
248 const char *progname
= NULL
;
250 req
= tevent_req_create(
251 mem_ctx
, &state
, struct rpc_server_get_endpoints_state
);
255 state
->only_transport
= only_transport
;
257 progname
= strrchr(rpc_server_exe
, '/');
258 if (progname
!= NULL
) {
261 progname
= rpc_server_exe
;
264 state
->ncalrpc_endpoint
= talloc_strdup(state
, progname
);
265 if (tevent_req_nomem(state
->ncalrpc_endpoint
, req
)) {
266 return tevent_req_post(req
, ev
);
269 state
->argl
= talloc_array(state
, char *, 4);
270 if (tevent_req_nomem(state
->argl
, req
)) {
271 return tevent_req_post(req
, ev
);
274 state
->argl
= str_list_make_empty(state
);
275 str_list_add_printf(&state
->argl
, "%s", rpc_server_exe
);
276 str_list_add_printf(&state
->argl
, "--list-interfaces");
278 &state
->argl
, "--configfile=%s", get_dyn_CONFIGFILE());
280 if (tevent_req_nomem(state
->argl
, req
)) {
281 return tevent_req_post(req
, ev
);
284 subreq
= file_ploadv_send(state
, ev
, state
->argl
, 65536);
285 if (tevent_req_nomem(subreq
, req
)) {
286 return tevent_req_post(req
, ev
);
288 tevent_req_set_callback(subreq
, rpc_server_get_endpoints_done
, req
);
293 * Parse a line of format
295 * 338cd001-2244-31f1-aaaa-900038001003/0x00000001 winreg
297 * and add it to the "piface_names" array.
300 static struct rpc_host_iface_name
*rpc_exe_parse_iface_line(
302 struct rpc_host_iface_name
**piface_names
,
305 struct rpc_host_iface_name
*iface_names
= *piface_names
;
306 struct rpc_host_iface_name
*tmp
= NULL
, *result
= NULL
;
307 size_t i
, num_ifaces
= talloc_array_length(iface_names
);
308 struct ndr_syntax_id iface
;
312 ok
= ndr_syntax_id_from_string(line
, &iface
);
314 DBG_WARNING("ndr_syntax_id_from_string() failed for: [%s]\n",
319 name
= strchr(line
, ' ');
325 for (i
=0; i
<num_ifaces
; i
++) {
326 result
= &iface_names
[i
];
328 if (ndr_syntax_id_equal(&result
->iface
, &iface
)) {
333 if (num_ifaces
+ 1 < num_ifaces
) {
337 name
= talloc_strdup(mem_ctx
, name
);
342 tmp
= talloc_realloc(
345 struct rpc_host_iface_name
,
353 result
= &iface_names
[num_ifaces
];
355 *result
= (struct rpc_host_iface_name
) {
357 .name
= talloc_move(iface_names
, &name
),
360 *piface_names
= iface_names
;
365 static struct rpc_host_iface_name
*rpc_host_iface_names_find(
366 struct rpc_host_iface_name
*iface_names
,
367 const struct ndr_syntax_id
*iface
)
369 size_t i
, num_iface_names
= talloc_array_length(iface_names
);
371 for (i
=0; i
<num_iface_names
; i
++) {
372 struct rpc_host_iface_name
*iface_name
= &iface_names
[i
];
374 if (ndr_syntax_id_equal(iface
, &iface_name
->iface
)) {
382 static bool dcerpc_binding_same_endpoint(
383 const struct dcerpc_binding
*b1
, const struct dcerpc_binding
*b2
)
385 enum dcerpc_transport_t t1
= dcerpc_binding_get_transport(b1
);
386 enum dcerpc_transport_t t2
= dcerpc_binding_get_transport(b2
);
387 const char *e1
= NULL
, *e2
= NULL
;
394 e1
= dcerpc_binding_get_string_option(b1
, "endpoint");
395 e2
= dcerpc_binding_get_string_option(b2
, "endpoint");
397 if ((e1
== NULL
) && (e2
== NULL
)) {
400 if ((e1
== NULL
) || (e2
== NULL
)) {
403 cmp
= strcmp(e1
, e2
);
408 * @brief Filter whether we want to serve an endpoint
410 * samba-dcerpcd might want to serve all endpoints a rpcd reported to
411 * us via --list-interfaces.
413 * In member mode, we only serve named pipes. Indicated by NCACN_NP
414 * passed in via "only_transport".
416 * @param[in] binding Which binding is in question?
417 * @param[in] only_transport Exclusive transport to serve
418 * @return Do we want to serve "binding" from samba-dcerpcd?
421 static bool rpc_host_serve_endpoint(
422 struct dcerpc_binding
*binding
,
423 enum dcerpc_transport_t only_transport
)
425 enum dcerpc_transport_t transport
=
426 dcerpc_binding_get_transport(binding
);
428 if (only_transport
== NCA_UNKNOWN
) {
429 /* no filter around */
433 if (transport
!= only_transport
) {
441 static struct rpc_host_endpoint
*rpc_host_endpoint_find(
442 struct rpc_server_get_endpoints_state
*state
,
443 const char *binding_string
)
445 size_t i
, num_endpoints
= talloc_array_length(state
->endpoints
);
446 struct rpc_host_endpoint
**tmp
= NULL
, *ep
= NULL
;
447 enum dcerpc_transport_t transport
;
451 ep
= talloc_zero(state
, struct rpc_host_endpoint
);
456 status
= dcerpc_parse_binding(ep
, binding_string
, &ep
->binding
);
457 if (!NT_STATUS_IS_OK(status
)) {
458 DBG_DEBUG("dcerpc_parse_binding(%s) failed: %s\n",
464 serve_this
= rpc_host_serve_endpoint(
465 ep
->binding
, state
->only_transport
);
470 transport
= dcerpc_binding_get_transport(ep
->binding
);
472 if (transport
== NCALRPC
) {
473 const char *ncalrpc_sock
= dcerpc_binding_get_string_option(
474 ep
->binding
, "endpoint");
476 if (ncalrpc_sock
== NULL
) {
478 * generic ncalrpc:, set program-specific
479 * socket name. epmapper will redirect clients
482 status
= dcerpc_binding_set_string_option(
485 state
->ncalrpc_endpoint
);
486 if (!NT_STATUS_IS_OK(status
)) {
487 DBG_DEBUG("dcerpc_binding_set_string_option "
495 for (i
=0; i
<num_endpoints
; i
++) {
497 bool ok
= dcerpc_binding_same_endpoint(
498 ep
->binding
, state
->endpoints
[i
]->binding
);
502 return state
->endpoints
[i
];
506 if (num_endpoints
+ 1 < num_endpoints
) {
510 tmp
= talloc_realloc(
513 struct rpc_host_endpoint
*,
518 state
->endpoints
= tmp
;
519 state
->endpoints
[num_endpoints
] = talloc_move(state
->endpoints
, &ep
);
521 return state
->endpoints
[num_endpoints
];
527 static bool ndr_interfaces_add_unique(
529 struct ndr_syntax_id
**pifaces
,
530 const struct ndr_syntax_id
*iface
)
532 struct ndr_syntax_id
*ifaces
= *pifaces
;
533 size_t i
, num_ifaces
= talloc_array_length(ifaces
);
535 for (i
=0; i
<num_ifaces
; i
++) {
536 if (ndr_syntax_id_equal(iface
, &ifaces
[i
])) {
541 if (num_ifaces
+ 1 < num_ifaces
) {
544 ifaces
= talloc_realloc(
547 struct ndr_syntax_id
,
549 if (ifaces
== NULL
) {
552 ifaces
[num_ifaces
] = *iface
;
559 * Read the text reply from the rpcd_* process telling us what
560 * endpoints it will serve when asked with --list-interfaces.
562 static void rpc_server_get_endpoints_done(struct tevent_req
*subreq
)
564 struct tevent_req
*req
= tevent_req_callback_data(
565 subreq
, struct tevent_req
);
566 struct rpc_server_get_endpoints_state
*state
= tevent_req_data(
567 req
, struct rpc_server_get_endpoints_state
);
568 struct rpc_host_iface_name
*iface
= NULL
;
572 int ret
, i
, num_lines
;
574 ret
= file_ploadv_recv(subreq
, state
, &buf
);
576 if (tevent_req_error(req
, ret
)) {
580 buflen
= talloc_get_size(buf
);
582 tevent_req_done(req
);
586 lines
= file_lines_parse((char *)buf
, buflen
, &num_lines
, state
);
587 if (tevent_req_nomem(lines
, req
)) {
592 DBG_DEBUG("Got %d lines, expected at least 2\n", num_lines
);
593 tevent_req_error(req
, EINVAL
);
597 state
->num_workers
= smb_strtoul(
598 lines
[0], NULL
, 10, &ret
, SMB_STR_FULL_STR_CONV
);
600 DBG_DEBUG("Could not parse num_workers(%s): %s\n",
603 tevent_req_error(req
, ret
);
607 state
->idle_seconds
= smb_strtoul(
608 lines
[1], NULL
, 10, &ret
, SMB_STR_FULL_STR_CONV
);
610 DBG_DEBUG("Could not parse idle_seconds (%s): %s\n",
613 tevent_req_error(req
, ret
);
617 DBG_DEBUG("num_workers=%lu, idle_seconds=%lu for %s\n",
622 for (i
=2; i
<num_lines
; i
++) {
623 char *line
= lines
[i
];
624 struct rpc_host_endpoint
*endpoint
= NULL
;
627 if (line
[0] != ' ') {
628 iface
= rpc_exe_parse_iface_line(
629 state
, &state
->iface_names
, line
);
632 "rpc_exe_parse_iface_line failed "
633 "for: [%s] from %s\n",
643 DBG_DEBUG("Interface GUID line missing\n");
644 tevent_req_error(req
, EINVAL
);
648 endpoint
= rpc_host_endpoint_find(state
, line
+1);
649 if (endpoint
== NULL
) {
650 DBG_DEBUG("rpc_host_endpoint_find for %s failed\n",
655 ok
= ndr_interfaces_add_unique(
657 &endpoint
->interfaces
,
660 DBG_DEBUG("ndr_interfaces_add_unique failed\n");
666 tevent_req_done(req
);
670 * @brief Receive output from --list-interfaces
672 * @param[in] req The async req that just finished
673 * @param[in] mem_ctx Where to put the output on
674 * @param[out] endpoints The endpoints to be listened on
675 * @param[out] iface_names Annotation for epm_Lookup's epm_entry_t
678 static int rpc_server_get_endpoints_recv(
679 struct tevent_req
*req
,
681 struct rpc_host_endpoint
***endpoints
,
682 struct rpc_host_iface_name
**iface_names
,
684 size_t *idle_seconds
)
686 struct rpc_server_get_endpoints_state
*state
= tevent_req_data(
687 req
, struct rpc_server_get_endpoints_state
);
690 if (tevent_req_is_unix_error(req
, &err
)) {
691 tevent_req_received(req
);
695 *endpoints
= talloc_move(mem_ctx
, &state
->endpoints
);
696 *iface_names
= talloc_move(mem_ctx
, &state
->iface_names
);
697 *num_workers
= state
->num_workers
;
698 *idle_seconds
= state
->idle_seconds
;
699 tevent_req_received(req
);
704 * For NCACN_NP we get the named pipe auth info from smbd, if a client
705 * comes in via TCP or NCALPRC we need to invent it ourselves with
706 * anonymous session info.
709 static NTSTATUS
rpc_host_generate_npa_info7_from_sock(
711 enum dcerpc_transport_t transport
,
713 const struct samba_sockaddr
*peer_addr
,
714 struct named_pipe_auth_req_info7
**pinfo7
)
716 struct named_pipe_auth_req_info7
*info7
= NULL
;
717 struct samba_sockaddr local_addr
= {
718 .sa_socklen
= sizeof(struct sockaddr_storage
),
720 struct tsocket_address
*taddr
= NULL
;
721 char *remote_client_name
= NULL
;
722 char *remote_client_addr
= NULL
;
723 char *local_server_name
= NULL
;
724 char *local_server_addr
= NULL
;
725 char *(*tsocket_address_to_name_fn
)(
726 const struct tsocket_address
*addr
,
727 TALLOC_CTX
*mem_ctx
) = NULL
;
728 NTSTATUS status
= NT_STATUS_NO_MEMORY
;
732 * For NCACN_NP we get the npa info from smbd
734 SMB_ASSERT((transport
== NCACN_IP_TCP
) || (transport
== NCALRPC
));
736 tsocket_address_to_name_fn
= (transport
== NCACN_IP_TCP
) ?
737 tsocket_address_inet_addr_string
: tsocket_address_unix_path
;
739 info7
= talloc_zero(mem_ctx
, struct named_pipe_auth_req_info7
);
743 info7
->session_info
=
744 talloc_zero(info7
, struct auth_session_info_transport
);
745 if (info7
->session_info
== NULL
) {
749 status
= make_session_info_anonymous(
751 &info7
->session_info
->session_info
);
752 if (!NT_STATUS_IS_OK(status
)) {
753 DBG_DEBUG("make_session_info_anonymous failed: %s\n",
758 ret
= tsocket_address_bsd_from_samba_sockaddr(info7
,
762 status
= map_nt_error_from_unix(errno
);
763 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
768 remote_client_addr
= tsocket_address_to_name_fn(taddr
, info7
);
769 if (remote_client_addr
== NULL
) {
770 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
775 remote_client_name
= talloc_strdup(info7
, remote_client_addr
);
776 if (remote_client_name
== NULL
) {
777 DBG_DEBUG("talloc_strdup failed\n");
781 if (transport
== NCACN_IP_TCP
) {
782 bool ok
= samba_sockaddr_get_port(peer_addr
,
783 &info7
->remote_client_port
);
785 DBG_DEBUG("samba_sockaddr_get_port failed\n");
786 status
= NT_STATUS_INVALID_PARAMETER
;
791 ret
= getsockname(sock
, &local_addr
.u
.sa
, &local_addr
.sa_socklen
);
793 status
= map_nt_error_from_unix(errno
);
794 DBG_DEBUG("getsockname failed: %s\n", strerror(errno
));
798 ret
= tsocket_address_bsd_from_samba_sockaddr(info7
,
802 status
= map_nt_error_from_unix(errno
);
803 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
808 local_server_addr
= tsocket_address_to_name_fn(taddr
, info7
);
809 if (local_server_addr
== NULL
) {
810 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
815 local_server_name
= talloc_strdup(info7
, local_server_addr
);
816 if (local_server_name
== NULL
) {
817 DBG_DEBUG("talloc_strdup failed\n");
821 if (transport
== NCACN_IP_TCP
) {
822 bool ok
= samba_sockaddr_get_port(&local_addr
,
823 &info7
->local_server_port
);
825 DBG_DEBUG("samba_sockaddr_get_port failed\n");
826 status
= NT_STATUS_INVALID_PARAMETER
;
831 if (transport
== NCALRPC
) {
835 ret
= getpeereid(sock
, &uid
, &gid
);
837 status
= map_nt_error_from_unix(errno
);
838 DBG_DEBUG("getpeereid failed: %s\n", strerror(errno
));
842 if (uid
== sec_initial_uid()) {
845 * Indicate "root" to gensec
848 TALLOC_FREE(remote_client_addr
);
849 TALLOC_FREE(remote_client_name
);
851 ret
= tsocket_address_unix_from_path(
853 AS_SYSTEM_MAGIC_PATH_TOKEN
,
856 DBG_DEBUG("tsocket_address_unix_from_path "
862 tsocket_address_unix_path(taddr
, info7
);
863 if (remote_client_addr
== NULL
) {
864 DBG_DEBUG("tsocket_address_unix_path "
869 talloc_strdup(info7
, remote_client_addr
);
870 if (remote_client_name
== NULL
) {
871 DBG_DEBUG("talloc_strdup failed\n");
877 info7
->remote_client_addr
= remote_client_addr
;
878 info7
->remote_client_name
= remote_client_name
;
879 info7
->local_server_addr
= local_server_addr
;
880 info7
->local_server_name
= local_server_name
;
886 status
= NT_STATUS_NO_MEMORY
;
892 struct rpc_host_bind_read_state
{
893 struct tevent_context
*ev
;
896 struct tstream_context
*plain
;
897 struct tstream_context
*npa_stream
;
899 struct ncacn_packet
*pkt
;
900 struct rpc_host_client
*client
;
903 static void rpc_host_bind_read_cleanup(
904 struct tevent_req
*req
, enum tevent_req_state req_state
);
905 static void rpc_host_bind_read_got_npa(struct tevent_req
*subreq
);
906 static void rpc_host_bind_read_got_bind(struct tevent_req
*subreq
);
909 * Wait for a bind packet from a client.
911 static struct tevent_req
*rpc_host_bind_read_send(
913 struct tevent_context
*ev
,
914 enum dcerpc_transport_t transport
,
916 const struct samba_sockaddr
*peer_addr
)
918 struct tevent_req
*req
= NULL
, *subreq
= NULL
;
919 struct rpc_host_bind_read_state
*state
= NULL
;
923 req
= tevent_req_create(
924 mem_ctx
, &state
, struct rpc_host_bind_read_state
);
930 state
->sock
= *psock
;
933 tevent_req_set_cleanup_fn(req
, rpc_host_bind_read_cleanup
);
935 state
->client
= talloc_zero(state
, struct rpc_host_client
);
936 if (tevent_req_nomem(state
->client
, req
)) {
937 return tevent_req_post(req
, ev
);
941 * Dup the socket to read the first RPC packet:
942 * tstream_bsd_existing_socket() takes ownership with
943 * autoclose, but we need to send "sock" down to our worker
946 sock_dup
= dup(state
->sock
);
947 if (sock_dup
== -1) {
948 tevent_req_error(req
, errno
);
949 return tevent_req_post(req
, ev
);
952 rc
= tstream_bsd_existing_socket(state
, sock_dup
, &state
->plain
);
954 DBG_DEBUG("tstream_bsd_existing_socket failed: %s\n",
956 tevent_req_error(req
, errno
);
958 return tevent_req_post(req
, ev
);
961 if (transport
== NCACN_NP
) {
962 subreq
= tstream_npa_accept_existing_send(
966 FILE_TYPE_MESSAGE_MODE_PIPE
,
967 0xff | 0x0400 | 0x0100,
969 if (tevent_req_nomem(subreq
, req
)) {
970 return tevent_req_post(req
, ev
);
972 tevent_req_set_callback(
973 subreq
, rpc_host_bind_read_got_npa
, req
);
977 status
= rpc_host_generate_npa_info7_from_sock(
982 &state
->client
->npa_info7
);
983 if (!NT_STATUS_IS_OK(status
)) {
985 return tevent_req_post(req
, ev
);
988 subreq
= dcerpc_read_ncacn_packet_send(state
, ev
, state
->plain
);
989 if (tevent_req_nomem(subreq
, req
)) {
990 return tevent_req_post(req
, ev
);
992 tevent_req_set_callback(subreq
, rpc_host_bind_read_got_bind
, req
);
996 static void rpc_host_bind_read_cleanup(
997 struct tevent_req
*req
, enum tevent_req_state req_state
)
999 struct rpc_host_bind_read_state
*state
= tevent_req_data(
1000 req
, struct rpc_host_bind_read_state
);
1002 if ((req_state
== TEVENT_REQ_RECEIVED
) && (state
->sock
!= -1)) {
1008 static void rpc_host_bind_read_got_npa(struct tevent_req
*subreq
)
1010 struct tevent_req
*req
= tevent_req_callback_data(
1011 subreq
, struct tevent_req
);
1012 struct rpc_host_bind_read_state
*state
= tevent_req_data(
1013 req
, struct rpc_host_bind_read_state
);
1014 struct named_pipe_auth_req_info7
*info7
= NULL
;
1017 ret
= tstream_npa_accept_existing_recv(subreq
,
1022 NULL
, /* transport */
1023 NULL
, /* remote_client_addr */
1024 NULL
, /* remote_client_name */
1025 NULL
, /* local_server_addr */
1026 NULL
, /* local_server_name */
1027 NULL
); /* session_info */
1029 tevent_req_error(req
, err
);
1033 state
->client
->npa_info7
= talloc_move(state
->client
, &info7
);
1035 subreq
= dcerpc_read_ncacn_packet_send(
1036 state
, state
->ev
, state
->npa_stream
);
1037 if (tevent_req_nomem(subreq
, req
)) {
1040 tevent_req_set_callback(subreq
, rpc_host_bind_read_got_bind
, req
);
1043 static void rpc_host_bind_read_got_bind(struct tevent_req
*subreq
)
1045 struct tevent_req
*req
= tevent_req_callback_data(
1046 subreq
, struct tevent_req
);
1047 struct rpc_host_bind_read_state
*state
= tevent_req_data(
1048 req
, struct rpc_host_bind_read_state
);
1049 struct ncacn_packet
*pkt
= NULL
;
1052 status
= dcerpc_read_ncacn_packet_recv(
1056 &state
->client
->bind_packet
);
1057 TALLOC_FREE(subreq
);
1058 if (!NT_STATUS_IS_OK(status
)) {
1059 DBG_DEBUG("dcerpc_read_ncacn_packet_recv failed: %s\n",
1061 tevent_req_error(req
, EINVAL
); /* TODO */
1064 state
->pkt
= talloc_move(state
, &pkt
);
1066 tevent_req_done(req
);
1069 static int rpc_host_bind_read_recv(
1070 struct tevent_req
*req
,
1071 TALLOC_CTX
*mem_ctx
,
1073 struct rpc_host_client
**client
,
1074 struct ncacn_packet
**bind_pkt
)
1076 struct rpc_host_bind_read_state
*state
= tevent_req_data(
1077 req
, struct rpc_host_bind_read_state
);
1080 if (tevent_req_is_unix_error(req
, &err
)) {
1081 tevent_req_received(req
);
1085 *sock
= state
->sock
;
1088 *client
= talloc_move(mem_ctx
, &state
->client
);
1089 *bind_pkt
= talloc_move(mem_ctx
, &state
->pkt
);
1090 tevent_req_received(req
);
1095 * Start the given rpcd_* binary.
1097 static int rpc_host_exec_worker(struct rpc_server
*server
, size_t idx
)
1099 struct rpc_work_process
*worker
= &server
->workers
[idx
];
1103 argv
= str_list_make_empty(server
);
1104 str_list_add_printf(
1105 &argv
, "%s", server
->rpc_server_exe
);
1106 str_list_add_printf(
1107 &argv
, "--configfile=%s", get_dyn_CONFIGFILE());
1108 str_list_add_printf(
1109 &argv
, "--worker-group=%"PRIu32
, server
->server_index
);
1110 str_list_add_printf(
1111 &argv
, "--worker-index=%zu", idx
);
1112 str_list_add_printf(
1113 &argv
, "--debuglevel=%d", debuglevel_get_class(DBGC_RPC_SRV
));
1114 if (!is_default_dyn_LOGFILEBASE()) {
1115 str_list_add_printf(
1116 &argv
, "--log-basename=%s", get_dyn_LOGFILEBASE());
1123 worker
->pid
= fork();
1124 if (worker
->pid
== -1) {
1128 if (worker
->pid
== 0) {
1130 close(server
->host
->worker_stdin
[1]);
1131 ret
= dup2(server
->host
->worker_stdin
[0], 0);
1135 execv(argv
[0], argv
);
1139 DBG_DEBUG("Creating worker %s for index %zu: pid=%d\n",
1140 server
->rpc_server_exe
,
1151 * Find an rpcd_* worker for an external client, respect server->max_workers
1153 static struct rpc_work_process
*rpc_host_find_worker(struct rpc_server
*server
)
1155 struct rpc_work_process
*worker
= NULL
;
1157 size_t empty_slot
= SIZE_MAX
;
1159 uint32_t min_clients
= UINT32_MAX
;
1160 size_t min_worker
= server
->max_workers
;
1162 for (i
=0; i
<server
->max_workers
; i
++) {
1163 worker
= &server
->workers
[i
];
1165 if (worker
->pid
== -1) {
1166 empty_slot
= MIN(empty_slot
, i
);
1169 if (!worker
->available
) {
1172 if (worker
->num_clients
< min_clients
) {
1173 min_clients
= worker
->num_clients
;
1178 if (min_clients
== 0) {
1179 return &server
->workers
[min_worker
];
1182 if (empty_slot
< SIZE_MAX
) {
1183 int ret
= rpc_host_exec_worker(server
, empty_slot
);
1185 DBG_WARNING("Could not fork worker: %s\n",
1191 if (min_worker
< server
->max_workers
) {
1192 return &server
->workers
[min_worker
];
1199 * Find an rpcd_* worker for an internal connection, possibly go beyond
1200 * server->max_workers
1202 static struct rpc_work_process
*rpc_host_find_idle_worker(
1203 struct rpc_server
*server
)
1205 struct rpc_work_process
*worker
= NULL
, *tmp
= NULL
;
1206 size_t i
, num_workers
= talloc_array_length(server
->workers
);
1207 size_t empty_slot
= SIZE_MAX
;
1210 for (i
=server
->max_workers
; i
<num_workers
; i
++) {
1211 worker
= &server
->workers
[i
];
1213 if (worker
->pid
== -1) {
1214 empty_slot
= MIN(empty_slot
, i
);
1217 if (!worker
->available
) {
1220 if (worker
->num_clients
== 0) {
1221 return &server
->workers
[i
];
1225 if (empty_slot
< SIZE_MAX
) {
1226 ret
= rpc_host_exec_worker(server
, empty_slot
);
1228 DBG_WARNING("Could not fork worker: %s\n",
1235 * All workers are busy. We need to expand the number of
1236 * workers because we were asked for an idle worker.
1238 if (num_workers
+1 < num_workers
) {
1241 tmp
= talloc_realloc(
1244 struct rpc_work_process
,
1249 server
->workers
= tmp
;
1251 server
->workers
[num_workers
] = (struct rpc_work_process
) { .pid
=-1, };
1253 ret
= rpc_host_exec_worker(server
, num_workers
);
1255 DBG_WARNING("Could not exec worker: %s\n", strerror(ret
));
1262 * Find an rpcd_* process to talk to. Start a new one if necessary.
1264 static void rpc_host_distribute_clients(struct rpc_server
*server
)
1266 struct rpc_work_process
*worker
= NULL
;
1267 struct rpc_host_pending_client
*pending_client
= NULL
;
1268 uint32_t assoc_group_id
;
1271 enum ndr_err_code ndr_err
;
1275 pending_client
= server
->pending_clients
;
1276 if (pending_client
== NULL
) {
1277 DBG_DEBUG("No pending clients\n");
1281 assoc_group_id
= pending_client
->bind_pkt
->u
.bind
.assoc_group_id
;
1283 if (assoc_group_id
!= 0) {
1284 size_t num_workers
= talloc_array_length(server
->workers
);
1285 uint8_t worker_index
= assoc_group_id
>> 24;
1287 if (worker_index
>= num_workers
) {
1288 DBG_DEBUG("Invalid assoc group id %"PRIu32
"\n",
1292 worker
= &server
->workers
[worker_index
];
1294 if ((worker
->pid
== -1) || !worker
->available
) {
1295 DBG_DEBUG("Requested worker index %"PRIu8
": "
1296 "pid=%d, available=%d",
1299 (int)worker
->available
);
1301 * Pick a random one for a proper bind nack
1303 worker
= rpc_host_find_worker(server
);
1306 struct auth_session_info_transport
*session_info
=
1307 pending_client
->client
->npa_info7
->session_info
;
1311 found
= security_token_find_npa_flags(
1312 session_info
->session_info
->security_token
,
1315 /* fresh assoc group requested */
1316 if (found
& (flags
& SAMBA_NPA_FLAGS_NEED_IDLE
)) {
1317 worker
= rpc_host_find_idle_worker(server
);
1319 worker
= rpc_host_find_worker(server
);
1323 if (worker
== NULL
) {
1324 DBG_DEBUG("No worker found\n");
1328 DLIST_REMOVE(server
->pending_clients
, pending_client
);
1330 ndr_err
= ndr_push_struct_blob(
1333 pending_client
->client
,
1334 (ndr_push_flags_fn_t
)ndr_push_rpc_host_client
);
1335 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err
)) {
1336 DBG_WARNING("ndr_push_rpc_host_client failed: %s\n",
1337 ndr_errstr(ndr_err
));
1341 DBG_INFO("Sending new client %s to %d with %"PRIu32
" clients\n",
1342 server
->rpc_server_exe
,
1344 worker
->num_clients
);
1346 iov
= (struct iovec
) {
1347 .iov_base
= blob
.data
, .iov_len
= blob
.length
,
1350 status
= messaging_send_iov(
1351 server
->host
->msg_ctx
,
1352 pid_to_procid(worker
->pid
),
1353 MSG_RPC_HOST_NEW_CLIENT
,
1356 &pending_client
->sock
,
1358 if (NT_STATUS_EQUAL(status
, NT_STATUS_OBJECT_NAME_NOT_FOUND
)) {
1359 DBG_DEBUG("worker %d died, sigchld not yet received?\n",
1361 DLIST_ADD(server
->pending_clients
, pending_client
);
1362 worker
->available
= false;
1365 if (!NT_STATUS_IS_OK(status
)) {
1366 DBG_DEBUG("messaging_send_iov failed: %s\n",
1370 worker
->num_clients
+= 1;
1371 TALLOC_FREE(worker
->exit_timer
);
1373 TALLOC_FREE(server
->host
->np_helper_shutdown
);
1376 TALLOC_FREE(pending_client
);
1379 static int rpc_host_pending_client_destructor(
1380 struct rpc_host_pending_client
*p
)
1382 TALLOC_FREE(p
->hangup_wait
);
1383 if (p
->sock
!= -1) {
1387 DLIST_REMOVE(p
->server
->pending_clients
, p
);
1392 * Exception condition handler before rpcd_* worker
1393 * is handling the socket. Either the client exited or
1394 * sent unexpected data after the initial bind.
1396 static void rpc_host_client_exited(struct tevent_req
*subreq
)
1398 struct rpc_host_pending_client
*pending
= tevent_req_callback_data(
1399 subreq
, struct rpc_host_pending_client
);
1403 ok
= wait_for_read_recv(subreq
, &err
);
1405 TALLOC_FREE(subreq
);
1406 pending
->hangup_wait
= NULL
;
1409 DBG_DEBUG("client on sock %d sent data\n", pending
->sock
);
1411 DBG_DEBUG("client exited with %s\n", strerror(err
));
1413 TALLOC_FREE(pending
);
1416 struct rpc_iface_binding_map
{
1417 struct ndr_syntax_id iface
;
1421 static bool rpc_iface_binding_map_add_endpoint(
1422 TALLOC_CTX
*mem_ctx
,
1423 const struct rpc_host_endpoint
*ep
,
1424 struct rpc_host_iface_name
*iface_names
,
1425 struct rpc_iface_binding_map
**pmaps
)
1427 const struct ndr_syntax_id mgmt_iface
= {
1432 {0x08,0x00,0x2b,0x10,0x29,0x89}
1436 struct rpc_iface_binding_map
*maps
= *pmaps
;
1437 size_t i
, num_ifaces
= talloc_array_length(ep
->interfaces
);
1438 char *binding_string
= NULL
;
1441 binding_string
= dcerpc_binding_string(mem_ctx
, ep
->binding
);
1442 if (binding_string
== NULL
) {
1446 for (i
=0; i
<num_ifaces
; i
++) {
1447 const struct ndr_syntax_id
*iface
= &ep
->interfaces
[i
];
1448 size_t j
, num_maps
= talloc_array_length(maps
);
1449 struct rpc_iface_binding_map
*map
= NULL
;
1452 if (ndr_syntax_id_equal(iface
, &mgmt_iface
)) {
1454 * mgmt is offered everywhere, don't put it
1460 for (j
=0; j
<num_maps
; j
++) {
1462 if (ndr_syntax_id_equal(&map
->iface
, iface
)) {
1467 if (j
== num_maps
) {
1468 struct rpc_iface_binding_map
*tmp
= NULL
;
1469 struct rpc_host_iface_name
*iface_name
= NULL
;
1471 iface_name
= rpc_host_iface_names_find(
1472 iface_names
, iface
);
1473 if (iface_name
== NULL
) {
1477 tmp
= talloc_realloc(
1480 struct rpc_iface_binding_map
,
1487 map
= &maps
[num_maps
];
1488 *map
= (struct rpc_iface_binding_map
) {
1490 .bindings
= talloc_move(
1491 maps
, &iface_name
->name
),
1495 p
= strv_find(map
->bindings
, binding_string
);
1498 maps
, &map
->bindings
, binding_string
);
1511 static bool rpc_iface_binding_map_add_endpoints(
1512 TALLOC_CTX
*mem_ctx
,
1513 struct rpc_host_endpoint
**endpoints
,
1514 struct rpc_host_iface_name
*iface_names
,
1515 struct rpc_iface_binding_map
**pbinding_maps
)
1517 size_t i
, num_endpoints
= talloc_array_length(endpoints
);
1519 for (i
=0; i
<num_endpoints
; i
++) {
1520 bool ok
= rpc_iface_binding_map_add_endpoint(
1521 mem_ctx
, endpoints
[i
], iface_names
, pbinding_maps
);
1529 static bool rpc_host_fill_epm_db(
1530 struct tdb_wrap
*db
,
1531 struct rpc_host_endpoint
**endpoints
,
1532 struct rpc_host_iface_name
*iface_names
)
1534 struct rpc_iface_binding_map
*maps
= NULL
;
1539 ok
= rpc_iface_binding_map_add_endpoints(
1540 talloc_tos(), endpoints
, iface_names
, &maps
);
1545 num_maps
= talloc_array_length(maps
);
1547 for (i
=0; i
<num_maps
; i
++) {
1548 struct rpc_iface_binding_map
*map
= &maps
[i
];
1549 struct ndr_syntax_id_buf buf
;
1550 char *keystr
= ndr_syntax_id_buf_string(&map
->iface
, &buf
);
1552 .dptr
= (uint8_t *)map
->bindings
,
1553 .dsize
= talloc_array_length(map
->bindings
),
1558 db
->tdb
, string_term_tdb_data(keystr
), value
, 0);
1560 DBG_DEBUG("tdb_store() failed: %s\n",
1561 tdb_errorstr(db
->tdb
));
1572 struct rpc_server_setup_state
{
1573 struct rpc_server
*server
;
1576 static void rpc_server_setup_got_endpoints(struct tevent_req
*subreq
);
1579 * Async initialize state for all possible rpcd_* servers.
1580 * Note this does not start them.
1582 static struct tevent_req
*rpc_server_setup_send(
1583 TALLOC_CTX
*mem_ctx
,
1584 struct tevent_context
*ev
,
1585 struct rpc_host
*host
,
1586 const char *rpc_server_exe
)
1588 struct tevent_req
*req
= NULL
, *subreq
= NULL
;
1589 struct rpc_server_setup_state
*state
= NULL
;
1590 struct rpc_server
*server
= NULL
;
1592 req
= tevent_req_create(
1593 mem_ctx
, &state
, struct rpc_server_setup_state
);
1597 state
->server
= talloc_zero(state
, struct rpc_server
);
1598 if (tevent_req_nomem(state
->server
, req
)) {
1599 return tevent_req_post(req
, ev
);
1602 server
= state
->server
;
1604 *server
= (struct rpc_server
) {
1606 .server_index
= UINT32_MAX
,
1607 .rpc_server_exe
= talloc_strdup(server
, rpc_server_exe
),
1609 if (tevent_req_nomem(server
->rpc_server_exe
, req
)) {
1610 return tevent_req_post(req
, ev
);
1613 subreq
= rpc_server_get_endpoints_send(
1617 host
->np_helper
? NCACN_NP
: NCA_UNKNOWN
);
1618 if (tevent_req_nomem(subreq
, req
)) {
1619 return tevent_req_post(req
, ev
);
1621 tevent_req_set_callback(subreq
, rpc_server_setup_got_endpoints
, req
);
1625 static void rpc_server_setup_got_endpoints(struct tevent_req
*subreq
)
1627 struct tevent_req
*req
= tevent_req_callback_data(
1628 subreq
, struct tevent_req
);
1629 struct rpc_server_setup_state
*state
= tevent_req_data(
1630 req
, struct rpc_server_setup_state
);
1631 struct rpc_server
*server
= state
->server
;
1633 size_t i
, num_endpoints
;
1636 ret
= rpc_server_get_endpoints_recv(
1640 &server
->iface_names
,
1641 &server
->max_workers
,
1642 &server
->idle_seconds
);
1643 TALLOC_FREE(subreq
);
1645 tevent_req_nterror(req
, map_nt_error_from_unix(ret
));
1649 server
->workers
= talloc_array(
1650 server
, struct rpc_work_process
, server
->max_workers
);
1651 if (tevent_req_nomem(server
->workers
, req
)) {
1655 for (i
=0; i
<server
->max_workers
; i
++) {
1656 /* mark as not yet created */
1657 server
->workers
[i
] = (struct rpc_work_process
) { .pid
=-1, };
1660 num_endpoints
= talloc_array_length(server
->endpoints
);
1662 for (i
=0; i
<num_endpoints
; i
++) {
1663 struct rpc_host_endpoint
*e
= server
->endpoints
[i
];
1669 status
= dcesrv_create_binding_sockets(
1670 e
->binding
, e
, &e
->num_fds
, &e
->fds
);
1671 if (NT_STATUS_EQUAL(status
, NT_STATUS_NOT_SUPPORTED
)) {
1674 if (tevent_req_nterror(req
, status
)) {
1675 DBG_DEBUG("dcesrv_create_binding_sockets failed: %s\n",
1680 for (j
=0; j
<e
->num_fds
; j
++) {
1681 ret
= listen(e
->fds
[j
], 256);
1684 req
, map_nt_error_from_unix(errno
));
1690 ok
= rpc_host_fill_epm_db(
1691 server
->host
->epmdb
, server
->endpoints
, server
->iface_names
);
1693 DBG_DEBUG("rpc_host_fill_epm_db failed\n");
1696 tevent_req_done(req
);
1699 static NTSTATUS
rpc_server_setup_recv(
1700 struct tevent_req
*req
, TALLOC_CTX
*mem_ctx
, struct rpc_server
**server
)
1702 struct rpc_server_setup_state
*state
= tevent_req_data(
1703 req
, struct rpc_server_setup_state
);
1706 if (tevent_req_is_nterror(req
, &status
)) {
1707 tevent_req_received(req
);
1711 *server
= talloc_move(mem_ctx
, &state
->server
);
1712 tevent_req_received(req
);
1713 return NT_STATUS_OK
;
1717 * rpcd_* died. Called from SIGCHLD handler.
1719 static void rpc_worker_exited(struct rpc_host
*host
, pid_t pid
)
1721 size_t i
, num_servers
= talloc_array_length(host
->servers
);
1722 struct rpc_work_process
*worker
= NULL
;
1723 bool found_pid
= false;
1724 bool have_active_worker
= false;
1726 for (i
=0; i
<num_servers
; i
++) {
1727 struct rpc_server
*server
= host
->servers
[i
];
1728 size_t j
, num_workers
;
1730 if (server
== NULL
) {
1731 /* SIGCHLD for --list-interfaces run */
1735 num_workers
= talloc_array_length(server
->workers
);
1737 for (j
=0; j
<num_workers
; j
++) {
1738 worker
= &server
->workers
[j
];
1739 if (worker
->pid
== pid
) {
1742 worker
->available
= false;
1745 if (worker
->pid
!= -1) {
1746 have_active_worker
= true;
1752 DBG_WARNING("No worker with PID %d\n", (int)pid
);
1756 if (!have_active_worker
&& host
->np_helper
) {
1758 * We have nothing left to do as an np_helper.
1759 * Terminate ourselves (samba-dcerpcd). We will
1760 * be restarted on demand anyway.
1762 DBG_DEBUG("Exiting idle np helper\n");
1770 static void rpc_host_sigchld(
1771 struct tevent_context
*ev
,
1772 struct tevent_signal
*se
,
1778 struct rpc_host
*state
= talloc_get_type_abort(
1779 private_data
, struct rpc_host
);
1783 while ((pid
= waitpid(-1, &wstatus
, WNOHANG
)) > 0) {
1784 DBG_DEBUG("pid=%d, wstatus=%d\n", (int)pid
, wstatus
);
1785 rpc_worker_exited(state
, pid
);
1790 * Idle timer fired for a rcpd_* worker. Ask it to terminate.
1792 static void rpc_host_exit_worker(
1793 struct tevent_context
*ev
,
1794 struct tevent_timer
*te
,
1795 struct timeval current_time
,
1798 struct rpc_server
*server
= talloc_get_type_abort(
1799 private_data
, struct rpc_server
);
1800 size_t i
, num_workers
= talloc_array_length(server
->workers
);
1803 * Scan for the right worker. We don't have too many of those,
1804 * and maintaining an index would be more data structure effort.
1807 for (i
=0; i
<num_workers
; i
++) {
1808 struct rpc_work_process
*w
= &server
->workers
[i
];
1811 if (w
->exit_timer
!= te
) {
1814 w
->exit_timer
= NULL
;
1816 SMB_ASSERT(w
->num_clients
== 0);
1818 status
= messaging_send(
1819 server
->host
->msg_ctx
,
1820 pid_to_procid(w
->pid
),
1823 if (!NT_STATUS_IS_OK(status
)) {
1824 DBG_DEBUG("Could not send SHUTDOWN msg: %s\n",
1828 w
->available
= false;
1834 * rcpd_* worker replied with its status.
1836 static void rpc_host_child_status_recv(
1837 struct messaging_context
*msg
,
1840 struct server_id server_id
,
1843 struct rpc_host
*host
= talloc_get_type_abort(
1844 private_data
, struct rpc_host
);
1845 size_t num_servers
= talloc_array_length(host
->servers
);
1846 struct rpc_server
*server
= NULL
;
1848 pid_t src_pid
= procid_to_pid(&server_id
);
1849 struct rpc_work_process
*worker
= NULL
;
1850 struct rpc_worker_status status_message
;
1851 enum ndr_err_code ndr_err
;
1853 ndr_err
= ndr_pull_struct_blob_all_noalloc(
1856 (ndr_pull_flags_fn_t
)ndr_pull_rpc_worker_status
);
1857 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err
)) {
1858 struct server_id_buf buf
;
1859 DBG_WARNING("Got invalid message from pid %s\n",
1860 server_id_str_buf(server_id
, &buf
));
1863 if (DEBUGLEVEL
>= 10) {
1864 NDR_PRINT_DEBUG(rpc_worker_status
, &status_message
);
1867 if (status_message
.server_index
>= num_servers
) {
1868 DBG_WARNING("Got invalid server_index=%"PRIu32
", "
1869 "num_servers=%zu\n",
1870 status_message
.server_index
,
1875 server
= host
->servers
[status_message
.server_index
];
1877 num_workers
= talloc_array_length(server
->workers
);
1878 if (status_message
.worker_index
>= num_workers
) {
1879 DBG_WARNING("Got invalid worker_index=%"PRIu32
", "
1880 "num_workers=%zu\n",
1881 status_message
.worker_index
,
1885 worker
= &server
->workers
[status_message
.worker_index
];
1887 if (src_pid
!= worker
->pid
) {
1888 DBG_WARNING("Got idx=%"PRIu32
" from %d, expected %d\n",
1889 status_message
.worker_index
,
1895 worker
->available
= true;
1896 worker
->num_clients
= status_message
.num_clients
;
1898 if (worker
->num_clients
!= 0) {
1899 TALLOC_FREE(worker
->exit_timer
);
1901 worker
->exit_timer
= tevent_add_timer(
1902 messaging_tevent_context(msg
),
1904 tevent_timeval_current_ofs(server
->idle_seconds
, 0),
1905 rpc_host_exit_worker
,
1907 /* No NULL check, it's not fatal if this does not work */
1910 rpc_host_distribute_clients(server
);
1914 * samba-dcerpcd has been asked to shutdown.
1915 * Mark the initial tevent_req as done so we
1916 * exit the event loop.
1918 static void rpc_host_msg_shutdown(
1919 struct messaging_context
*msg
,
1922 struct server_id server_id
,
1925 struct tevent_req
*req
= talloc_get_type_abort(
1926 private_data
, struct tevent_req
);
1927 tevent_req_done(req
);
1931 * Only match directory entries starting in rpcd_
1933 static int rpcd_filter(const struct dirent
*d
)
1935 int match
= fnmatch("rpcd_*", d
->d_name
, 0);
1936 return (match
== 0) ? 1 : 0;
1940 * Scan the given libexecdir for rpcd_* services
1941 * and return them as a strv list.
1943 static int rpc_host_list_servers(
1944 const char *libexecdir
, TALLOC_CTX
*mem_ctx
, char **pservers
)
1946 char *servers
= NULL
;
1947 struct dirent
**namelist
= NULL
;
1951 num_servers
= scandir(libexecdir
, &namelist
, rpcd_filter
, alphasort
);
1952 if (num_servers
== -1) {
1953 DBG_DEBUG("scandir failed: %s\n", strerror(errno
));
1957 for (i
=0; i
<num_servers
; i
++) {
1958 char *exe
= talloc_asprintf(
1959 mem_ctx
, "%s/%s", libexecdir
, namelist
[i
]->d_name
);
1964 ret
= strv_add(mem_ctx
, &servers
, exe
);
1971 for (i
=0; i
<num_servers
; i
++) {
1972 SAFE_FREE(namelist
[i
]);
1974 SAFE_FREE(namelist
);
1977 TALLOC_FREE(servers
);
1980 *pservers
= servers
;
1984 struct rpc_host_endpoint_accept_state
{
1985 struct tevent_context
*ev
;
1986 struct rpc_host_endpoint
*endpoint
;
1989 static void rpc_host_endpoint_accept_accepted(struct tevent_req
*subreq
);
1990 static void rpc_host_endpoint_accept_got_bind(struct tevent_req
*subreq
);
1993 * Asynchronously wait for a DCERPC connection from a client.
1995 static struct tevent_req
*rpc_host_endpoint_accept_send(
1996 TALLOC_CTX
*mem_ctx
,
1997 struct tevent_context
*ev
,
1998 struct rpc_host_endpoint
*endpoint
)
2000 struct tevent_req
*req
= NULL
;
2001 struct rpc_host_endpoint_accept_state
*state
= NULL
;
2004 req
= tevent_req_create(
2005 mem_ctx
, &state
, struct rpc_host_endpoint_accept_state
);
2010 state
->endpoint
= endpoint
;
2012 for (i
=0; i
<endpoint
->num_fds
; i
++) {
2013 struct tevent_req
*subreq
= NULL
;
2015 subreq
= accept_send(state
, ev
, endpoint
->fds
[i
]);
2016 if (tevent_req_nomem(subreq
, req
)) {
2017 return tevent_req_post(req
, ev
);
2019 tevent_req_set_callback(
2020 subreq
, rpc_host_endpoint_accept_accepted
, req
);
2027 * Accept a DCERPC connection from a client.
2029 static void rpc_host_endpoint_accept_accepted(struct tevent_req
*subreq
)
2031 struct tevent_req
*req
= tevent_req_callback_data(
2032 subreq
, struct tevent_req
);
2033 struct rpc_host_endpoint_accept_state
*state
= tevent_req_data(
2034 req
, struct rpc_host_endpoint_accept_state
);
2035 struct rpc_host_endpoint
*endpoint
= state
->endpoint
;
2036 int sock
, listen_sock
, err
;
2037 struct samba_sockaddr peer_addr
;
2039 sock
= accept_recv(subreq
, &listen_sock
, &peer_addr
, &err
);
2040 TALLOC_FREE(subreq
);
2042 /* What to do here? Just ignore the error and retry? */
2043 DBG_DEBUG("accept_recv failed: %s\n", strerror(err
));
2044 tevent_req_error(req
, err
);
2048 subreq
= accept_send(state
, state
->ev
, listen_sock
);
2049 if (tevent_req_nomem(subreq
, req
)) {
2054 tevent_req_set_callback(
2055 subreq
, rpc_host_endpoint_accept_accepted
, req
);
2057 subreq
= rpc_host_bind_read_send(
2060 dcerpc_binding_get_transport(endpoint
->binding
),
2063 if (tevent_req_nomem(subreq
, req
)) {
2066 tevent_req_set_callback(
2067 subreq
, rpc_host_endpoint_accept_got_bind
, req
);
2071 * Client sent us a DCERPC bind packet.
2073 static void rpc_host_endpoint_accept_got_bind(struct tevent_req
*subreq
)
2075 struct tevent_req
*req
= tevent_req_callback_data(
2076 subreq
, struct tevent_req
);
2077 struct rpc_host_endpoint_accept_state
*state
= tevent_req_data(
2078 req
, struct rpc_host_endpoint_accept_state
);
2079 struct rpc_host_endpoint
*endpoint
= state
->endpoint
;
2080 struct rpc_server
*server
= endpoint
->server
;
2081 struct rpc_host_pending_client
*pending
= NULL
;
2082 struct rpc_host_client
*client
= NULL
;
2083 struct ncacn_packet
*bind_pkt
= NULL
;
2087 ret
= rpc_host_bind_read_recv(
2088 subreq
, state
, &sock
, &client
, &bind_pkt
);
2089 TALLOC_FREE(subreq
);
2091 DBG_DEBUG("rpc_host_bind_read_recv returned %s\n",
2096 client
->binding
= dcerpc_binding_string(client
, endpoint
->binding
);
2097 if (client
->binding
== NULL
) {
2098 DBG_WARNING("dcerpc_binding_string failed, dropping client\n");
2102 pending
= talloc_zero(server
, struct rpc_host_pending_client
);
2103 if (pending
== NULL
) {
2104 DBG_WARNING("talloc failed, dropping client\n");
2107 pending
->server
= server
;
2108 pending
->sock
= sock
;
2109 pending
->bind_pkt
= talloc_move(pending
, &bind_pkt
);
2110 pending
->client
= talloc_move(pending
, &client
);
2111 talloc_set_destructor(pending
, rpc_host_pending_client_destructor
);
2114 pending
->hangup_wait
= wait_for_read_send(
2115 pending
, state
->ev
, pending
->sock
, true);
2116 if (pending
->hangup_wait
== NULL
) {
2117 DBG_WARNING("wait_for_read_send failed, dropping client\n");
2118 TALLOC_FREE(pending
);
2121 tevent_req_set_callback(
2122 pending
->hangup_wait
, rpc_host_client_exited
, pending
);
2124 DLIST_ADD_END(server
->pending_clients
, pending
);
2125 rpc_host_distribute_clients(server
);
2129 TALLOC_FREE(client
);
2135 static int rpc_host_endpoint_accept_recv(
2136 struct tevent_req
*req
, struct rpc_host_endpoint
**ep
)
2138 struct rpc_host_endpoint_accept_state
*state
= tevent_req_data(
2139 req
, struct rpc_host_endpoint_accept_state
);
2141 *ep
= state
->endpoint
;
2143 return tevent_req_simple_recv_unix(req
);
2147 * Full state for samba-dcerpcd. Everything else
2150 struct rpc_host_state
{
2151 struct tevent_context
*ev
;
2152 struct rpc_host
*host
;
2155 const char *daemon_ready_progname
;
2156 struct tevent_immediate
*ready_signal_immediate
;
2157 int *ready_signal_fds
;
2160 size_t num_prepared
;
2164 * Tell whoever invoked samba-dcerpcd we're ready to
2167 static void rpc_host_report_readiness(
2168 struct tevent_context
*ev
,
2169 struct tevent_immediate
*im
,
2172 struct rpc_host_state
*state
= talloc_get_type_abort(
2173 private_data
, struct rpc_host_state
);
2174 size_t i
, num_fds
= talloc_array_length(state
->ready_signal_fds
);
2176 if (!state
->is_ready
) {
2177 DBG_DEBUG("Not yet ready\n");
2181 for (i
=0; i
<num_fds
; i
++) {
2187 state
->ready_signal_fds
[i
],
2190 } while ((nwritten
== -1) && (errno
== EINTR
));
2192 close(state
->ready_signal_fds
[i
]);
2195 TALLOC_FREE(state
->ready_signal_fds
);
2199 * Respond to a "are you ready" message.
2201 static bool rpc_host_ready_signal_filter(
2202 struct messaging_rec
*rec
, void *private_data
)
2204 struct rpc_host_state
*state
= talloc_get_type_abort(
2205 private_data
, struct rpc_host_state
);
2206 size_t num_fds
= talloc_array_length(state
->ready_signal_fds
);
2209 if (rec
->msg_type
!= MSG_DAEMON_READY_FD
) {
2212 if (rec
->num_fds
!= 1) {
2213 DBG_DEBUG("Got %"PRIu8
" fds\n", rec
->num_fds
);
2217 if (num_fds
+ 1 < num_fds
) {
2220 tmp
= talloc_realloc(state
, state
->ready_signal_fds
, int, num_fds
+1);
2224 state
->ready_signal_fds
= tmp
;
2226 state
->ready_signal_fds
[num_fds
] = rec
->fds
[0];
2229 tevent_schedule_immediate(
2230 state
->ready_signal_immediate
,
2232 rpc_host_report_readiness
,
2239 * Respond to a "what is your status" message.
2241 static bool rpc_host_dump_status_filter(
2242 struct messaging_rec
*rec
, void *private_data
)
2244 struct rpc_host_state
*state
= talloc_get_type_abort(
2245 private_data
, struct rpc_host_state
);
2246 struct rpc_host
*host
= state
->host
;
2247 struct rpc_server
**servers
= host
->servers
;
2248 size_t i
, num_servers
= talloc_array_length(servers
);
2252 if (rec
->msg_type
!= MSG_RPC_DUMP_STATUS
) {
2255 if (rec
->num_fds
!= 1) {
2256 DBG_DEBUG("Got %"PRIu8
" fds\n", rec
->num_fds
);
2260 fd
= dup(rec
->fds
[0]);
2262 DBG_DEBUG("dup(%"PRIi64
") failed: %s\n",
2268 f
= fdopen(fd
, "w");
2270 DBG_DEBUG("fdopen failed: %s\n", strerror(errno
));
2275 for (i
=0; i
<num_servers
; i
++) {
2276 struct rpc_server
*server
= servers
[i
];
2277 size_t j
, num_workers
= talloc_array_length(server
->workers
);
2278 size_t active_workers
= 0;
2280 for (j
=0; j
<num_workers
; j
++) {
2281 if (server
->workers
[j
].pid
!= -1) {
2282 active_workers
+= 1;
2287 "%s: active_workers=%zu\n",
2288 server
->rpc_server_exe
,
2291 for (j
=0; j
<num_workers
; j
++) {
2292 struct rpc_work_process
*w
= &server
->workers
[j
];
2294 if (w
->pid
== (pid_t
)-1) {
2299 " worker[%zu]: pid=%d, num_clients=%"PRIu32
"\n",
2311 static void rpc_host_server_setup_done(struct tevent_req
*subreq
);
2312 static void rpc_host_endpoint_failed(struct tevent_req
*subreq
);
2315 * Async startup for samba-dcerpcd.
2317 static struct tevent_req
*rpc_host_send(
2318 TALLOC_CTX
*mem_ctx
,
2319 struct tevent_context
*ev
,
2320 struct messaging_context
*msg_ctx
,
2322 int ready_signal_fd
,
2323 const char *daemon_ready_progname
,
2326 struct tevent_req
*req
= NULL
, *subreq
= NULL
;
2327 struct rpc_host_state
*state
= NULL
;
2328 struct rpc_host
*host
= NULL
;
2329 struct tevent_signal
*se
= NULL
;
2330 char *epmdb_path
= NULL
;
2332 size_t i
, num_servers
= strv_count(servers
);
2336 req
= tevent_req_create(req
, &state
, struct rpc_host_state
);
2341 state
->daemon_ready_progname
= daemon_ready_progname
;
2343 state
->ready_signal_immediate
= tevent_create_immediate(state
);
2344 if (tevent_req_nomem(state
->ready_signal_immediate
, req
)) {
2345 return tevent_req_post(req
, ev
);
2348 if (ready_signal_fd
!= -1) {
2349 state
->ready_signal_fds
= talloc_array(state
, int, 1);
2350 if (tevent_req_nomem(state
->ready_signal_fds
, req
)) {
2351 return tevent_req_post(req
, ev
);
2353 state
->ready_signal_fds
[0] = ready_signal_fd
;
2356 state
->host
= talloc_zero(state
, struct rpc_host
);
2357 if (tevent_req_nomem(state
->host
, req
)) {
2358 return tevent_req_post(req
, ev
);
2362 host
->msg_ctx
= msg_ctx
;
2363 host
->np_helper
= is_np_helper
;
2365 ret
= pipe(host
->worker_stdin
);
2367 tevent_req_nterror(req
, map_nt_error_from_unix(errno
));
2368 return tevent_req_post(req
, ev
);
2371 host
->servers
= talloc_zero_array(
2372 host
, struct rpc_server
*, num_servers
);
2373 if (tevent_req_nomem(host
->servers
, req
)) {
2374 return tevent_req_post(req
, ev
);
2377 se
= tevent_add_signal(ev
, state
, SIGCHLD
, 0, rpc_host_sigchld
, host
);
2378 if (tevent_req_nomem(se
, req
)) {
2379 return tevent_req_post(req
, ev
);
2381 BlockSignals(false, SIGCHLD
);
2383 status
= messaging_register(
2386 MSG_RPC_WORKER_STATUS
,
2387 rpc_host_child_status_recv
);
2388 if (tevent_req_nterror(req
, status
)) {
2389 return tevent_req_post(req
, ev
);
2392 status
= messaging_register(
2393 msg_ctx
, req
, MSG_SHUTDOWN
, rpc_host_msg_shutdown
);
2394 if (tevent_req_nterror(req
, status
)) {
2395 return tevent_req_post(req
, ev
);
2398 subreq
= messaging_filtered_read_send(
2399 state
, ev
, msg_ctx
, rpc_host_ready_signal_filter
, state
);
2400 if (tevent_req_nomem(subreq
, req
)) {
2401 return tevent_req_post(req
, ev
);
2404 subreq
= messaging_filtered_read_send(
2405 state
, ev
, msg_ctx
, rpc_host_dump_status_filter
, state
);
2406 if (tevent_req_nomem(subreq
, req
)) {
2407 return tevent_req_post(req
, ev
);
2410 epmdb_path
= lock_path(state
, "epmdb.tdb");
2411 if (tevent_req_nomem(epmdb_path
, req
)) {
2412 return tevent_req_post(req
, ev
);
2415 host
->epmdb
= tdb_wrap_open(
2419 TDB_CLEAR_IF_FIRST
|TDB_INCOMPATIBLE_HASH
,
2422 if (host
->epmdb
== NULL
) {
2423 DBG_DEBUG("tdb_wrap_open(%s) failed: %s\n",
2426 tevent_req_nterror(req
, map_nt_error_from_unix(errno
));
2427 return tevent_req_post(req
, ev
);
2429 TALLOC_FREE(epmdb_path
);
2431 for (exe
= strv_next(servers
, exe
), i
= 0;
2433 exe
= strv_next(servers
, exe
), i
++) {
2435 DBG_DEBUG("server_setup for %s index %zu\n", exe
, i
);
2437 subreq
= rpc_server_setup_send(
2442 if (tevent_req_nomem(subreq
, req
)) {
2443 return tevent_req_post(req
, ev
);
2445 tevent_req_set_callback(
2446 subreq
, rpc_host_server_setup_done
, req
);
2453 * Timer function called after we were initialized but no one
2454 * connected. Shutdown.
2456 static void rpc_host_shutdown(
2457 struct tevent_context
*ev
,
2458 struct tevent_timer
*te
,
2459 struct timeval current_time
,
2462 struct tevent_req
*req
= talloc_get_type_abort(
2463 private_data
, struct tevent_req
);
2464 DBG_DEBUG("Nobody connected -- shutting down\n");
2465 tevent_req_done(req
);
2468 static void rpc_host_server_setup_done(struct tevent_req
*subreq
)
2470 struct tevent_req
*req
= tevent_req_callback_data(
2471 subreq
, struct tevent_req
);
2472 struct rpc_host_state
*state
= tevent_req_data(
2473 req
, struct rpc_host_state
);
2474 struct rpc_server
*server
= NULL
;
2475 struct rpc_host
*host
= state
->host
;
2476 size_t i
, num_servers
= talloc_array_length(host
->servers
);
2479 status
= rpc_server_setup_recv(subreq
, host
, &server
);
2480 TALLOC_FREE(subreq
);
2481 if (!NT_STATUS_IS_OK(status
)) {
2482 DBG_DEBUG("rpc_server_setup_recv returned %s, ignoring\n",
2484 host
->servers
= talloc_realloc(
2487 struct rpc_server
*,
2492 server
->server_index
= state
->num_prepared
;
2493 host
->servers
[state
->num_prepared
] = server
;
2495 state
->num_prepared
+= 1;
2497 if (state
->num_prepared
< num_servers
) {
2501 for (i
=0; i
<num_servers
; i
++) {
2502 size_t j
, num_endpoints
;
2504 server
= host
->servers
[i
];
2505 num_endpoints
= talloc_array_length(server
->endpoints
);
2507 for (j
=0; j
<num_endpoints
; j
++) {
2508 subreq
= rpc_host_endpoint_accept_send(
2509 state
, state
->ev
, server
->endpoints
[j
]);
2510 if (tevent_req_nomem(subreq
, req
)) {
2513 tevent_req_set_callback(
2514 subreq
, rpc_host_endpoint_failed
, req
);
2518 state
->is_ready
= true;
2520 if (state
->daemon_ready_progname
!= NULL
) {
2521 daemon_ready(state
->daemon_ready_progname
);
2524 if (host
->np_helper
) {
2526 * If we're started as an np helper, and no one talks to
2527 * us within 10 seconds, just shut ourselves down.
2529 host
->np_helper_shutdown
= tevent_add_timer(
2532 timeval_current_ofs(10, 0),
2535 if (tevent_req_nomem(host
->np_helper_shutdown
, req
)) {
2540 tevent_schedule_immediate(
2541 state
->ready_signal_immediate
,
2543 rpc_host_report_readiness
,
2548 * Log accept fail on an endpoint.
2550 static void rpc_host_endpoint_failed(struct tevent_req
*subreq
)
2552 struct tevent_req
*req
= tevent_req_callback_data(
2553 subreq
, struct tevent_req
);
2554 struct rpc_host_state
*state
= tevent_req_data(
2555 req
, struct rpc_host_state
);
2556 struct rpc_host_endpoint
*endpoint
= NULL
;
2557 char *binding_string
= NULL
;
2560 ret
= rpc_host_endpoint_accept_recv(subreq
, &endpoint
);
2561 TALLOC_FREE(subreq
);
2563 binding_string
= dcerpc_binding_string(state
, endpoint
->binding
);
2564 DBG_DEBUG("rpc_host_endpoint_accept_recv for %s returned %s\n",
2567 TALLOC_FREE(binding_string
);
2570 static NTSTATUS
rpc_host_recv(struct tevent_req
*req
)
2572 return tevent_req_simple_recv_ntstatus(req
);
2575 static int rpc_host_pidfile_create(
2576 struct messaging_context
*msg_ctx
,
2577 const char *progname
,
2578 int ready_signal_fd
)
2580 const char *piddir
= lp_pid_directory();
2581 size_t len
= strlen(piddir
) + strlen(progname
) + 6;
2591 ret
= pidfile_path_create(pidFile
, &fd
, &existing_pid
);
2597 if (ret
!= EAGAIN
) {
2598 DBG_DEBUG("pidfile_path_create() failed: %s\n",
2603 DBG_DEBUG("%s pid %d exists\n", progname
, (int)existing_pid
);
2605 if (ready_signal_fd
!= -1) {
2606 NTSTATUS status
= messaging_send_iov(
2608 pid_to_procid(existing_pid
),
2609 MSG_DAEMON_READY_FD
,
2614 if (!NT_STATUS_IS_OK(status
)) {
2615 DBG_DEBUG("Could not send ready_signal_fd: %s\n",
2623 static void samba_dcerpcd_stdin_handler(
2624 struct tevent_context
*ev
,
2625 struct tevent_fd
*fde
,
2629 struct tevent_req
*req
= talloc_get_type_abort(
2630 private_data
, struct tevent_req
);
2633 if (read(0, &c
, 1) != 1) {
2634 /* we have reached EOF on stdin, which means the
2635 parent has exited. Shutdown the server */
2636 tevent_req_done(req
);
2641 * samba-dcerpcd microservice startup !
2643 int main(int argc
, const char *argv
[])
2645 const struct loadparm_substitution
*lp_sub
=
2646 loadparm_s3_global_substitution();
2647 const char *progname
= getprogname();
2648 TALLOC_CTX
*frame
= NULL
;
2649 struct tevent_context
*ev_ctx
= NULL
;
2650 struct messaging_context
*msg_ctx
= NULL
;
2651 struct tevent_req
*req
= NULL
;
2652 char *servers
= NULL
;
2653 const char *arg
= NULL
;
2661 int libexec_rpcds
= 0;
2663 int ready_signal_fd
= -1;
2665 struct samba_cmdline_daemon_cfg
*cmdline_daemon_cfg
= NULL
;
2666 struct poptOption long_options
[] = {
2669 .longName
= "libexec-rpcds",
2670 .argInfo
= POPT_ARG_NONE
,
2671 .arg
= &libexec_rpcds
,
2672 .descrip
= "Use all rpcds in libexec",
2675 .longName
= "ready-signal-fd",
2676 .argInfo
= POPT_ARG_INT
,
2677 .arg
= &ready_signal_fd
,
2678 .descrip
= "fd to close when initialized",
2681 .longName
= "np-helper",
2682 .argInfo
= POPT_ARG_NONE
,
2684 .descrip
= "Internal named pipe server",
2693 const char *fd_params
[] = { "ready-signal-fd", };
2695 closefrom_except_fd_params(
2696 3, ARRAY_SIZE(fd_params
), fd_params
, argc
, argv
);
2699 talloc_enable_null_tracking();
2700 frame
= talloc_stackframe();
2705 ok
= samba_cmdline_init(frame
,
2706 SAMBA_CMDLINE_CONFIG_SERVER
,
2707 true /* require_smbconf */);
2709 DBG_ERR("Failed to init cmdline parser!\n");
2714 pc
= samba_popt_get_context(getprogname(),
2720 DBG_ERR("Failed to setup popt context!\n");
2725 poptSetOtherOptionHelp(
2726 pc
, "[OPTIONS] [SERVICE_1 SERVICE_2 .. SERVICE_N]");
2728 ret
= poptGetNextOpt(pc
);
2733 "\nGot unexpected option %d\n",
2735 } else if (ret
== POPT_ERROR_BADOPT
) {
2737 "\nInvalid option %s: %s\n\n",
2738 poptBadOption(pc
, 0),
2742 "\npoptGetNextOpt returned %s\n",
2746 poptFreeContext(pc
);
2751 while ((arg
= poptGetArg(pc
)) != NULL
) {
2752 ret
= strv_add(frame
, &servers
, arg
);
2754 DBG_ERR("strv_add() failed\n");
2755 poptFreeContext(pc
);
2761 log_stdout
= (debug_get_log_type() == DEBUG_STDOUT
);
2763 setup_logging(progname
, DEBUG_STDOUT
);
2765 setup_logging(progname
, DEBUG_FILE
);
2769 * If "rpc start on demand helpers = true" in smb.conf we must
2770 * not start as standalone, only on demand from
2771 * local_np_connect() functions. Log an error message telling
2772 * the admin how to fix and then exit.
2774 if (lp_rpc_start_on_demand_helpers() && np_helper
== 0) {
2775 DBG_ERR("Cannot start in standalone mode if smb.conf "
2777 "\"rpc start on demand helpers = true\" - "
2783 if (libexec_rpcds
!= 0) {
2784 ret
= rpc_host_list_servers(
2785 dyn_SAMBA_LIBEXECDIR
, frame
, &servers
);
2787 DBG_ERR("Could not list libexec: %s\n",
2789 poptFreeContext(pc
);
2795 num_servers
= strv_count(servers
);
2796 if (num_servers
== 0) {
2797 poptPrintUsage(pc
, stderr
, 0);
2798 poptFreeContext(pc
);
2803 poptFreeContext(pc
);
2805 cmdline_daemon_cfg
= samba_cmdline_get_daemon_cfg();
2807 if (log_stdout
&& cmdline_daemon_cfg
->fork
) {
2808 DBG_ERR("Can't log to stdout unless in foreground\n");
2813 msg_ctx
= global_messaging_context();
2814 if (msg_ctx
== NULL
) {
2815 DBG_ERR("messaging_init() failed\n");
2819 ev_ctx
= messaging_tevent_context(msg_ctx
);
2821 if (cmdline_daemon_cfg
->fork
) {
2824 cmdline_daemon_cfg
->no_process_group
,
2827 status
= reinit_after_fork(msg_ctx
, ev_ctx
, false);
2828 if (!NT_STATUS_IS_OK(status
)) {
2829 exit_daemon("reinit_after_fork() failed",
2830 map_errno_from_nt_status(status
));
2833 DBG_DEBUG("Calling daemon_status\n");
2834 daemon_status(progname
, "Starting process ... ");
2837 BlockSignals(true, SIGPIPE
);
2839 dump_core_setup(progname
, lp_logfile(frame
, lp_sub
));
2843 DEBUG(0, ("%s version %s started.\n",
2845 samba_version_string()));
2846 DEBUGADD(0,("%s\n", COPYRIGHT_STARTUP_MESSAGE
));
2848 (void)winbind_off();
2849 ok
= init_guest_session_info(frame
);
2852 DBG_ERR("init_guest_session_info failed\n");
2853 global_messaging_context_free();
2858 ret
= rpc_host_pidfile_create(msg_ctx
, progname
, ready_signal_fd
);
2860 DBG_DEBUG("rpc_host_pidfile_create failed: %s\n",
2862 global_messaging_context_free();
2867 req
= rpc_host_send(
2873 cmdline_daemon_cfg
->fork
? NULL
: progname
,
2876 DBG_ERR("rpc_host_send failed\n");
2877 global_messaging_context_free();
2882 if (!cmdline_daemon_cfg
->fork
) {
2884 if (fstat(0, &st
) != 0) {
2885 DBG_DEBUG("fstat(0) failed: %s\n",
2887 global_messaging_context_free();
2891 if (S_ISFIFO(st
.st_mode
) || S_ISSOCK(st
.st_mode
)) {
2897 samba_dcerpcd_stdin_handler
,
2902 ok
= tevent_req_poll_unix(req
, ev_ctx
, &err
);
2904 DBG_ERR("tevent_req_poll_unix failed: %s\n",
2906 global_messaging_context_free();
2911 status
= rpc_host_recv(req
);
2912 if (!NT_STATUS_IS_OK(status
)) {
2913 DBG_ERR("rpc_host_recv returned %s\n", nt_errstr(status
));
2914 global_messaging_context_free();