4 * Implements samba-dcerpcd service.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * This binary has two usage modes:
23 * In the normal case when invoked from smbd or winbind it is given a
24 * directory to scan via --libexec-rpcds and will invoke on demand any
25 * binaries it finds there starting with rpcd_ when a named pipe
26 * connection is requested.
28 * In the second mode it can be started explicitly from system startup
31 * When Samba is set up as an Active Directory Domain Controller the
32 * normal samba binary overrides and provides DCERPC services, whilst
33 * allowing samba-dcerpcd to provide the services that smbd used to
34 * provide in that set-up, such as SRVSVC.
36 * The second mode can also be useful for use outside of the Samba framework,
37 * for example, use with the Linux kernel SMB2 server ksmbd. In this mode
38 * it behaves like inetd and listens on sockets on behalf of RPC server
44 #include "lib/cmdline/cmdline.h"
45 #include "lib/cmdline/closefrom_except.h"
46 #include "source3/include/includes.h"
47 #include "source3/include/auth.h"
48 #include "rpc_sock_helper.h"
50 #include "lib/util_file.h"
51 #include "lib/util/tevent_unix.h"
52 #include "lib/util/tevent_ntstatus.h"
53 #include "lib/util/smb_strtox.h"
54 #include "lib/util/debug.h"
55 #include "lib/util/server_id.h"
56 #include "lib/util/util_tdb.h"
57 #include "lib/util/util_file.h"
58 #include "lib/tdb_wrap/tdb_wrap.h"
59 #include "lib/async_req/async_sock.h"
60 #include "librpc/rpc/dcerpc_util.h"
61 #include "lib/tsocket/tsocket.h"
62 #include "libcli/named_pipe_auth/npa_tstream.h"
63 #include "librpc/gen_ndr/ndr_rpc_host.h"
64 #include "source3/param/loadparm.h"
65 #include "source3/lib/global_contexts.h"
66 #include "lib/util/strv.h"
67 #include "lib/util/pidfile.h"
68 #include "source3/rpc_client/cli_pipe.h"
69 #include "librpc/gen_ndr/ndr_epmapper.h"
70 #include "librpc/gen_ndr/ndr_epmapper_c.h"
71 #include "nsswitch/winbind_client.h"
72 #include "libcli/security/dom_sid.h"
73 #include "libcli/security/security_token.h"
75 extern bool override_logfile
;
78 struct rpc_work_process
;
81 * samba-dcerpcd state to keep track of rpcd_* servers.
84 struct messaging_context
*msg_ctx
;
85 struct rpc_server
**servers
;
86 struct tdb_wrap
*epmdb
;
93 * If we're started with --np-helper but nobody contacts us,
94 * we need to exit after a while. This will be deleted once
95 * the first real client connects and our self-exit mechanism
96 * when we don't have any worker processes left kicks in.
98 struct tevent_timer
*np_helper_shutdown
;
102 * Map a RPC interface to a name. Used when filling the endpoint
105 struct rpc_host_iface_name
{
106 struct ndr_syntax_id iface
;
111 * rpc_host representation for listening sockets. ncacn_ip_tcp might
112 * listen on multiple explicit IPs, all with the same port.
114 struct rpc_host_endpoint
{
115 struct rpc_server
*server
;
116 struct dcerpc_binding
*binding
;
117 struct ndr_syntax_id
*interfaces
;
123 * Staging area until we sent the socket plus bind to the helper
125 struct rpc_host_pending_client
{
126 struct rpc_host_pending_client
*prev
, *next
;
129 * Pointer for the destructor to remove us from the list of
132 struct rpc_server
*server
;
135 * Waiter for client exit before a helper accepted the request
137 struct tevent_req
*hangup_wait
;
140 * Info to pick the worker
142 struct ncacn_packet
*bind_pkt
;
145 * This is what we send down to the worker
148 struct rpc_host_client
*client
;
152 * Representation of one worker process. For each rpcd_* executable
153 * there will be more of than one of these.
155 struct rpc_work_process
{
161 * Worker forked but did not send its initial status yet (not
164 * Worker died, but we did not receive SIGCHLD yet. We noticed
165 * it because we couldn't send it a message.
170 * Incremented by us when sending a client, decremented by
171 * MSG_RPC_HOST_WORKER_STATUS sent by workers whenever a
174 uint32_t num_associations
;
175 uint32_t num_connections
;
178 * Send SHUTDOWN to an idle child after a while
180 struct tevent_timer
*exit_timer
;
184 * State for a set of running instances of an rpcd_* server executable
187 struct rpc_host
*host
;
189 * Index into the rpc_host->servers array
191 uint32_t server_index
;
193 const char *rpc_server_exe
;
195 struct rpc_host_endpoint
**endpoints
;
196 struct rpc_host_iface_name
*iface_names
;
202 * "workers" can be larger than "max_workers": Internal
203 * connections require an idle worker to avoid deadlocks
204 * between RPC servers: netlogon requires samr, everybody
205 * requires winreg. And if a deep call in netlogon asks for a
206 * samr connection, this must never end up in the same
207 * process. named_pipe_auth_req_info8->need_idle_server is set
210 struct rpc_work_process
*workers
;
212 struct rpc_host_pending_client
*pending_clients
;
215 struct rpc_server_get_endpoints_state
{
217 char *ncalrpc_endpoint
;
218 enum dcerpc_transport_t only_transport
;
220 struct rpc_host_iface_name
*iface_names
;
221 struct rpc_host_endpoint
**endpoints
;
223 unsigned long num_workers
;
224 unsigned long idle_seconds
;
227 static void rpc_server_get_endpoints_done(struct tevent_req
*subreq
);
230 * @brief Query interfaces from an rpcd helper
232 * Spawn a rpcd helper, ask it for the interfaces it serves via
233 * --list-interfaces, parse the output
235 * @param[in] mem_ctx Memory context for the tevent_req
236 * @param[in] ev Event context to run this on
237 * @param[in] rpc_server_exe Binary to ask with --list-interfaces
238 * @param[in] only_transport Filter out anything but this
239 * @return The tevent_req representing this process
242 static struct tevent_req
*rpc_server_get_endpoints_send(
244 struct tevent_context
*ev
,
245 const char *rpc_server_exe
,
246 enum dcerpc_transport_t only_transport
)
248 struct tevent_req
*req
= NULL
, *subreq
= NULL
;
249 struct rpc_server_get_endpoints_state
*state
= NULL
;
250 const char *progname
= NULL
;
252 req
= tevent_req_create(
253 mem_ctx
, &state
, struct rpc_server_get_endpoints_state
);
257 state
->only_transport
= only_transport
;
259 progname
= strrchr(rpc_server_exe
, '/');
260 if (progname
!= NULL
) {
263 progname
= rpc_server_exe
;
266 state
->ncalrpc_endpoint
= talloc_strdup(state
, progname
);
267 if (tevent_req_nomem(state
->ncalrpc_endpoint
, req
)) {
268 return tevent_req_post(req
, ev
);
271 state
->argl
= talloc_array(state
, char *, 4);
272 if (tevent_req_nomem(state
->argl
, req
)) {
273 return tevent_req_post(req
, ev
);
276 state
->argl
= str_list_make_empty(state
);
277 str_list_add_printf(&state
->argl
, "%s", rpc_server_exe
);
278 str_list_add_printf(&state
->argl
, "--list-interfaces");
280 &state
->argl
, "--configfile=%s", get_dyn_CONFIGFILE());
282 if (tevent_req_nomem(state
->argl
, req
)) {
283 return tevent_req_post(req
, ev
);
286 subreq
= file_ploadv_send(state
, ev
, state
->argl
, 65536);
287 if (tevent_req_nomem(subreq
, req
)) {
288 return tevent_req_post(req
, ev
);
290 tevent_req_set_callback(subreq
, rpc_server_get_endpoints_done
, req
);
295 * Parse a line of format
297 * 338cd001-2244-31f1-aaaa-900038001003/0x00000001 winreg
299 * and add it to the "piface_names" array.
302 static struct rpc_host_iface_name
*rpc_exe_parse_iface_line(
304 struct rpc_host_iface_name
**piface_names
,
307 struct rpc_host_iface_name
*iface_names
= *piface_names
;
308 struct rpc_host_iface_name
*tmp
= NULL
, *result
= NULL
;
309 size_t i
, num_ifaces
= talloc_array_length(iface_names
);
310 struct ndr_syntax_id iface
;
314 ok
= ndr_syntax_id_from_string(line
, &iface
);
316 DBG_WARNING("ndr_syntax_id_from_string() failed for: [%s]\n",
321 name
= strchr(line
, ' ');
327 for (i
=0; i
<num_ifaces
; i
++) {
328 result
= &iface_names
[i
];
330 if (ndr_syntax_id_equal(&result
->iface
, &iface
)) {
335 if (num_ifaces
+ 1 < num_ifaces
) {
339 name
= talloc_strdup(mem_ctx
, name
);
344 tmp
= talloc_realloc(
347 struct rpc_host_iface_name
,
355 result
= &iface_names
[num_ifaces
];
357 *result
= (struct rpc_host_iface_name
) {
359 .name
= talloc_move(iface_names
, &name
),
362 *piface_names
= iface_names
;
367 static struct rpc_host_iface_name
*rpc_host_iface_names_find(
368 struct rpc_host_iface_name
*iface_names
,
369 const struct ndr_syntax_id
*iface
)
371 size_t i
, num_iface_names
= talloc_array_length(iface_names
);
373 for (i
=0; i
<num_iface_names
; i
++) {
374 struct rpc_host_iface_name
*iface_name
= &iface_names
[i
];
376 if (ndr_syntax_id_equal(iface
, &iface_name
->iface
)) {
384 static bool dcerpc_binding_same_endpoint(
385 const struct dcerpc_binding
*b1
, const struct dcerpc_binding
*b2
)
387 enum dcerpc_transport_t t1
= dcerpc_binding_get_transport(b1
);
388 enum dcerpc_transport_t t2
= dcerpc_binding_get_transport(b2
);
389 const char *e1
= NULL
, *e2
= NULL
;
396 e1
= dcerpc_binding_get_string_option(b1
, "endpoint");
397 e2
= dcerpc_binding_get_string_option(b2
, "endpoint");
399 if ((e1
== NULL
) && (e2
== NULL
)) {
402 if ((e1
== NULL
) || (e2
== NULL
)) {
405 cmp
= strcmp(e1
, e2
);
410 * @brief Filter whether we want to serve an endpoint
412 * samba-dcerpcd might want to serve all endpoints a rpcd reported to
413 * us via --list-interfaces.
415 * In member mode, we only serve named pipes. Indicated by NCACN_NP
416 * passed in via "only_transport".
418 * @param[in] binding Which binding is in question?
419 * @param[in] only_transport Exclusive transport to serve
420 * @return Do we want to serve "binding" from samba-dcerpcd?
423 static bool rpc_host_serve_endpoint(
424 struct dcerpc_binding
*binding
,
425 enum dcerpc_transport_t only_transport
)
427 enum dcerpc_transport_t transport
=
428 dcerpc_binding_get_transport(binding
);
430 if (only_transport
== NCA_UNKNOWN
) {
431 /* no filter around */
435 if (transport
!= only_transport
) {
443 static struct rpc_host_endpoint
*rpc_host_endpoint_find(
444 struct rpc_server_get_endpoints_state
*state
,
445 const char *binding_string
)
447 size_t i
, num_endpoints
= talloc_array_length(state
->endpoints
);
448 struct rpc_host_endpoint
**tmp
= NULL
, *ep
= NULL
;
449 enum dcerpc_transport_t transport
;
453 ep
= talloc_zero(state
, struct rpc_host_endpoint
);
458 status
= dcerpc_parse_binding(ep
, binding_string
, &ep
->binding
);
459 if (!NT_STATUS_IS_OK(status
)) {
460 DBG_DEBUG("dcerpc_parse_binding(%s) failed: %s\n",
466 serve_this
= rpc_host_serve_endpoint(
467 ep
->binding
, state
->only_transport
);
472 transport
= dcerpc_binding_get_transport(ep
->binding
);
474 if (transport
== NCALRPC
) {
475 const char *ncalrpc_sock
= dcerpc_binding_get_string_option(
476 ep
->binding
, "endpoint");
478 if (ncalrpc_sock
== NULL
) {
480 * generic ncalrpc:, set program-specific
481 * socket name. epmapper will redirect clients
484 status
= dcerpc_binding_set_string_option(
487 state
->ncalrpc_endpoint
);
488 if (!NT_STATUS_IS_OK(status
)) {
489 DBG_DEBUG("dcerpc_binding_set_string_option "
497 for (i
=0; i
<num_endpoints
; i
++) {
499 bool ok
= dcerpc_binding_same_endpoint(
500 ep
->binding
, state
->endpoints
[i
]->binding
);
504 return state
->endpoints
[i
];
508 if (num_endpoints
+ 1 < num_endpoints
) {
512 tmp
= talloc_realloc(
515 struct rpc_host_endpoint
*,
520 state
->endpoints
= tmp
;
521 state
->endpoints
[num_endpoints
] = talloc_move(state
->endpoints
, &ep
);
523 return state
->endpoints
[num_endpoints
];
529 static bool ndr_interfaces_add_unique(
531 struct ndr_syntax_id
**pifaces
,
532 const struct ndr_syntax_id
*iface
)
534 struct ndr_syntax_id
*ifaces
= *pifaces
;
535 size_t i
, num_ifaces
= talloc_array_length(ifaces
);
537 for (i
=0; i
<num_ifaces
; i
++) {
538 if (ndr_syntax_id_equal(iface
, &ifaces
[i
])) {
543 if (num_ifaces
+ 1 < num_ifaces
) {
546 ifaces
= talloc_realloc(
549 struct ndr_syntax_id
,
551 if (ifaces
== NULL
) {
554 ifaces
[num_ifaces
] = *iface
;
561 * Read the text reply from the rpcd_* process telling us what
562 * endpoints it will serve when asked with --list-interfaces.
564 static void rpc_server_get_endpoints_done(struct tevent_req
*subreq
)
566 struct tevent_req
*req
= tevent_req_callback_data(
567 subreq
, struct tevent_req
);
568 struct rpc_server_get_endpoints_state
*state
= tevent_req_data(
569 req
, struct rpc_server_get_endpoints_state
);
570 struct rpc_host_iface_name
*iface
= NULL
;
574 int ret
, i
, num_lines
;
576 ret
= file_ploadv_recv(subreq
, state
, &buf
);
578 if (tevent_req_error(req
, ret
)) {
582 buflen
= talloc_get_size(buf
);
584 tevent_req_done(req
);
588 lines
= file_lines_parse((char *)buf
, buflen
, &num_lines
, state
);
589 if (tevent_req_nomem(lines
, req
)) {
594 DBG_DEBUG("Got %d lines, expected at least 2\n", num_lines
);
595 tevent_req_error(req
, EINVAL
);
599 state
->num_workers
= smb_strtoul(
600 lines
[0], NULL
, 10, &ret
, SMB_STR_FULL_STR_CONV
);
602 DBG_DEBUG("Could not parse num_workers(%s): %s\n",
605 tevent_req_error(req
, ret
);
609 * We need to limit the number of workers in order
610 * to put the worker index into a 16-bit space,
611 * in order to use a 16-bit association group space
614 state
->num_workers
= MIN(state
->num_workers
, UINT16_MAX
);
616 state
->idle_seconds
= smb_strtoul(
617 lines
[1], NULL
, 10, &ret
, SMB_STR_FULL_STR_CONV
);
619 DBG_DEBUG("Could not parse idle_seconds (%s): %s\n",
622 tevent_req_error(req
, ret
);
626 DBG_DEBUG("num_workers=%lu, idle_seconds=%lu for %s\n",
631 for (i
=2; i
<num_lines
; i
++) {
632 char *line
= lines
[i
];
633 struct rpc_host_endpoint
*endpoint
= NULL
;
636 if (line
[0] != ' ') {
637 iface
= rpc_exe_parse_iface_line(
638 state
, &state
->iface_names
, line
);
641 "rpc_exe_parse_iface_line failed "
642 "for: [%s] from %s\n",
652 DBG_DEBUG("Interface GUID line missing\n");
653 tevent_req_error(req
, EINVAL
);
657 endpoint
= rpc_host_endpoint_find(state
, line
+1);
658 if (endpoint
== NULL
) {
659 DBG_DEBUG("rpc_host_endpoint_find for %s failed\n",
664 ok
= ndr_interfaces_add_unique(
666 &endpoint
->interfaces
,
669 DBG_DEBUG("ndr_interfaces_add_unique failed\n");
675 tevent_req_done(req
);
679 * @brief Receive output from --list-interfaces
681 * @param[in] req The async req that just finished
682 * @param[in] mem_ctx Where to put the output on
683 * @param[out] endpoints The endpoints to be listened on
684 * @param[out] iface_names Annotation for epm_Lookup's epm_entry_t
687 static int rpc_server_get_endpoints_recv(
688 struct tevent_req
*req
,
690 struct rpc_host_endpoint
***endpoints
,
691 struct rpc_host_iface_name
**iface_names
,
693 size_t *idle_seconds
)
695 struct rpc_server_get_endpoints_state
*state
= tevent_req_data(
696 req
, struct rpc_server_get_endpoints_state
);
699 if (tevent_req_is_unix_error(req
, &err
)) {
700 tevent_req_received(req
);
704 *endpoints
= talloc_move(mem_ctx
, &state
->endpoints
);
705 *iface_names
= talloc_move(mem_ctx
, &state
->iface_names
);
706 *num_workers
= state
->num_workers
;
707 *idle_seconds
= state
->idle_seconds
;
708 tevent_req_received(req
);
713 * For NCACN_NP we get the named pipe auth info from smbd, if a client
714 * comes in via TCP or NCALPRC we need to invent it ourselves with
715 * anonymous session info.
718 static NTSTATUS
rpc_host_generate_npa_info8_from_sock(
720 enum dcerpc_transport_t transport
,
722 const struct samba_sockaddr
*peer_addr
,
723 struct named_pipe_auth_req_info8
**pinfo8
)
725 struct named_pipe_auth_req_info8
*info8
= NULL
;
726 struct samba_sockaddr local_addr
= {
727 .sa_socklen
= sizeof(struct sockaddr_storage
),
729 struct tsocket_address
*taddr
= NULL
;
730 char *remote_client_name
= NULL
;
731 char *remote_client_addr
= NULL
;
732 char *local_server_name
= NULL
;
733 char *local_server_addr
= NULL
;
734 char *(*tsocket_address_to_name_fn
)(
735 const struct tsocket_address
*addr
,
736 TALLOC_CTX
*mem_ctx
) = NULL
;
737 NTSTATUS status
= NT_STATUS_NO_MEMORY
;
741 * For NCACN_NP we get the npa info from smbd
743 SMB_ASSERT((transport
== NCACN_IP_TCP
) || (transport
== NCALRPC
));
745 tsocket_address_to_name_fn
= (transport
== NCACN_IP_TCP
) ?
746 tsocket_address_inet_addr_string
: tsocket_address_unix_path
;
748 info8
= talloc_zero(mem_ctx
, struct named_pipe_auth_req_info8
);
752 info8
->session_info
=
753 talloc_zero(info8
, struct auth_session_info_transport
);
754 if (info8
->session_info
== NULL
) {
758 status
= make_session_info_anonymous(
760 &info8
->session_info
->session_info
);
761 if (!NT_STATUS_IS_OK(status
)) {
762 DBG_DEBUG("make_session_info_anonymous failed: %s\n",
767 ret
= tsocket_address_bsd_from_samba_sockaddr(info8
,
771 status
= map_nt_error_from_unix(errno
);
772 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
777 remote_client_addr
= tsocket_address_to_name_fn(taddr
, info8
);
778 if (remote_client_addr
== NULL
) {
779 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
784 remote_client_name
= talloc_strdup(info8
, remote_client_addr
);
785 if (remote_client_name
== NULL
) {
786 DBG_DEBUG("talloc_strdup failed\n");
790 if (transport
== NCACN_IP_TCP
) {
791 bool ok
= samba_sockaddr_get_port(peer_addr
,
792 &info8
->remote_client_port
);
794 DBG_DEBUG("samba_sockaddr_get_port failed\n");
795 status
= NT_STATUS_INVALID_PARAMETER
;
800 ret
= getsockname(sock
, &local_addr
.u
.sa
, &local_addr
.sa_socklen
);
802 status
= map_nt_error_from_unix(errno
);
803 DBG_DEBUG("getsockname failed: %s\n", strerror(errno
));
807 ret
= tsocket_address_bsd_from_samba_sockaddr(info8
,
811 status
= map_nt_error_from_unix(errno
);
812 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
817 local_server_addr
= tsocket_address_to_name_fn(taddr
, info8
);
818 if (local_server_addr
== NULL
) {
819 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
824 local_server_name
= talloc_strdup(info8
, local_server_addr
);
825 if (local_server_name
== NULL
) {
826 DBG_DEBUG("talloc_strdup failed\n");
830 if (transport
== NCACN_IP_TCP
) {
831 bool ok
= samba_sockaddr_get_port(&local_addr
,
832 &info8
->local_server_port
);
834 DBG_DEBUG("samba_sockaddr_get_port failed\n");
835 status
= NT_STATUS_INVALID_PARAMETER
;
840 if (transport
== NCALRPC
) {
844 ret
= getpeereid(sock
, &uid
, &gid
);
846 status
= map_nt_error_from_unix(errno
);
847 DBG_DEBUG("getpeereid failed: %s\n", strerror(errno
));
851 if (uid
== sec_initial_uid()) {
854 * Indicate "root" to gensec
857 TALLOC_FREE(remote_client_addr
);
858 TALLOC_FREE(remote_client_name
);
860 ret
= tsocket_address_unix_from_path(
862 AS_SYSTEM_MAGIC_PATH_TOKEN
,
865 DBG_DEBUG("tsocket_address_unix_from_path "
871 tsocket_address_unix_path(taddr
, info8
);
872 if (remote_client_addr
== NULL
) {
873 DBG_DEBUG("tsocket_address_unix_path "
878 talloc_strdup(info8
, remote_client_addr
);
879 if (remote_client_name
== NULL
) {
880 DBG_DEBUG("talloc_strdup failed\n");
886 info8
->remote_client_addr
= remote_client_addr
;
887 info8
->remote_client_name
= remote_client_name
;
888 info8
->local_server_addr
= local_server_addr
;
889 info8
->local_server_name
= local_server_name
;
895 status
= NT_STATUS_NO_MEMORY
;
901 struct rpc_host_bind_read_state
{
902 struct tevent_context
*ev
;
905 struct tstream_context
*plain
;
906 struct tstream_context
*npa_stream
;
908 struct ncacn_packet
*pkt
;
909 struct rpc_host_client
*client
;
912 static void rpc_host_bind_read_cleanup(
913 struct tevent_req
*req
, enum tevent_req_state req_state
);
914 static void rpc_host_bind_read_got_npa(struct tevent_req
*subreq
);
915 static void rpc_host_bind_read_got_bind(struct tevent_req
*subreq
);
918 * Wait for a bind packet from a client.
920 static struct tevent_req
*rpc_host_bind_read_send(
922 struct tevent_context
*ev
,
923 enum dcerpc_transport_t transport
,
925 const struct samba_sockaddr
*peer_addr
)
927 struct tevent_req
*req
= NULL
, *subreq
= NULL
;
928 struct rpc_host_bind_read_state
*state
= NULL
;
932 req
= tevent_req_create(
933 mem_ctx
, &state
, struct rpc_host_bind_read_state
);
939 state
->sock
= *psock
;
942 tevent_req_set_cleanup_fn(req
, rpc_host_bind_read_cleanup
);
944 state
->client
= talloc_zero(state
, struct rpc_host_client
);
945 if (tevent_req_nomem(state
->client
, req
)) {
946 return tevent_req_post(req
, ev
);
950 * Dup the socket to read the first RPC packet:
951 * tstream_bsd_existing_socket() takes ownership with
952 * autoclose, but we need to send "sock" down to our worker
955 sock_dup
= dup(state
->sock
);
956 if (sock_dup
== -1) {
957 tevent_req_error(req
, errno
);
958 return tevent_req_post(req
, ev
);
961 rc
= tstream_bsd_existing_socket(state
, sock_dup
, &state
->plain
);
963 DBG_DEBUG("tstream_bsd_existing_socket failed: %s\n",
965 tevent_req_error(req
, errno
);
967 return tevent_req_post(req
, ev
);
969 /* as server we want to fail early */
970 tstream_bsd_fail_readv_first_error(state
->plain
, true);
972 if (transport
== NCACN_NP
) {
973 subreq
= tstream_npa_accept_existing_send(
977 FILE_TYPE_MESSAGE_MODE_PIPE
,
978 0xff | 0x0400 | 0x0100,
980 if (tevent_req_nomem(subreq
, req
)) {
981 return tevent_req_post(req
, ev
);
983 tevent_req_set_callback(
984 subreq
, rpc_host_bind_read_got_npa
, req
);
988 status
= rpc_host_generate_npa_info8_from_sock(
993 &state
->client
->npa_info8
);
994 if (!NT_STATUS_IS_OK(status
)) {
996 return tevent_req_post(req
, ev
);
999 subreq
= dcerpc_read_ncacn_packet_send(state
, ev
, state
->plain
);
1000 if (tevent_req_nomem(subreq
, req
)) {
1001 return tevent_req_post(req
, ev
);
1003 tevent_req_set_callback(subreq
, rpc_host_bind_read_got_bind
, req
);
1007 static void rpc_host_bind_read_cleanup(
1008 struct tevent_req
*req
, enum tevent_req_state req_state
)
1010 struct rpc_host_bind_read_state
*state
= tevent_req_data(
1011 req
, struct rpc_host_bind_read_state
);
1013 if ((req_state
== TEVENT_REQ_RECEIVED
) && (state
->sock
!= -1)) {
1019 static void rpc_host_bind_read_got_npa(struct tevent_req
*subreq
)
1021 struct tevent_req
*req
= tevent_req_callback_data(
1022 subreq
, struct tevent_req
);
1023 struct rpc_host_bind_read_state
*state
= tevent_req_data(
1024 req
, struct rpc_host_bind_read_state
);
1025 struct named_pipe_auth_req_info8
*info8
= NULL
;
1028 ret
= tstream_npa_accept_existing_recv(subreq
,
1033 NULL
, /* transport */
1034 NULL
, /* remote_client_addr */
1035 NULL
, /* remote_client_name */
1036 NULL
, /* local_server_addr */
1037 NULL
, /* local_server_name */
1038 NULL
); /* session_info */
1040 tevent_req_error(req
, err
);
1044 state
->client
->npa_info8
= talloc_move(state
->client
, &info8
);
1046 subreq
= dcerpc_read_ncacn_packet_send(
1047 state
, state
->ev
, state
->npa_stream
);
1048 if (tevent_req_nomem(subreq
, req
)) {
1051 tevent_req_set_callback(subreq
, rpc_host_bind_read_got_bind
, req
);
1054 static void rpc_host_bind_read_got_bind(struct tevent_req
*subreq
)
1056 struct tevent_req
*req
= tevent_req_callback_data(
1057 subreq
, struct tevent_req
);
1058 struct rpc_host_bind_read_state
*state
= tevent_req_data(
1059 req
, struct rpc_host_bind_read_state
);
1060 struct ncacn_packet
*pkt
= NULL
;
1063 status
= dcerpc_read_ncacn_packet_recv(
1067 &state
->client
->bind_packet
);
1068 TALLOC_FREE(subreq
);
1069 if (!NT_STATUS_IS_OK(status
)) {
1070 DBG_DEBUG("dcerpc_read_ncacn_packet_recv failed: %s\n",
1072 tevent_req_error(req
, EINVAL
); /* TODO */
1075 state
->pkt
= talloc_move(state
, &pkt
);
1077 tevent_req_done(req
);
1080 static int rpc_host_bind_read_recv(
1081 struct tevent_req
*req
,
1082 TALLOC_CTX
*mem_ctx
,
1084 struct rpc_host_client
**client
,
1085 struct ncacn_packet
**bind_pkt
)
1087 struct rpc_host_bind_read_state
*state
= tevent_req_data(
1088 req
, struct rpc_host_bind_read_state
);
1091 if (tevent_req_is_unix_error(req
, &err
)) {
1092 tevent_req_received(req
);
1096 *sock
= state
->sock
;
1099 *client
= talloc_move(mem_ctx
, &state
->client
);
1100 *bind_pkt
= talloc_move(mem_ctx
, &state
->pkt
);
1101 tevent_req_received(req
);
1106 * Start the given rpcd_* binary.
1108 static int rpc_host_exec_worker(struct rpc_server
*server
, size_t idx
)
1110 struct rpc_work_process
*worker
= &server
->workers
[idx
];
1114 argv
= str_list_make_empty(server
);
1115 str_list_add_printf(
1116 &argv
, "%s", server
->rpc_server_exe
);
1117 str_list_add_printf(
1118 &argv
, "--configfile=%s", get_dyn_CONFIGFILE());
1119 str_list_add_printf(
1120 &argv
, "--worker-group=%"PRIu32
, server
->server_index
);
1121 str_list_add_printf(
1122 &argv
, "--worker-index=%zu", idx
);
1123 str_list_add_printf(
1124 &argv
, "--debuglevel=%d", debuglevel_get_class(DBGC_RPC_SRV
));
1125 if (!is_default_dyn_LOGFILEBASE()) {
1126 str_list_add_printf(
1127 &argv
, "--log-basename=%s", get_dyn_LOGFILEBASE());
1134 worker
->pid
= fork();
1135 if (worker
->pid
== -1) {
1139 if (worker
->pid
== 0) {
1141 close(server
->host
->worker_stdin
[1]);
1142 ret
= dup2(server
->host
->worker_stdin
[0], 0);
1146 execv(argv
[0], argv
);
1150 DBG_DEBUG("Creating worker %s for index %zu: pid=%d\n",
1151 server
->rpc_server_exe
,
1162 * Find an rpcd_* worker for an external client, respect server->max_workers
1164 static struct rpc_work_process
*rpc_host_find_worker(struct rpc_server
*server
)
1166 struct rpc_work_process
*worker
= NULL
;
1167 struct rpc_work_process
*perfect_worker
= NULL
;
1168 struct rpc_work_process
*best_worker
= NULL
;
1169 size_t empty_slot
= SIZE_MAX
;
1172 for (i
=0; i
<server
->max_workers
; i
++) {
1173 worker
= &server
->workers
[i
];
1175 if (worker
->pid
== -1) {
1176 empty_slot
= MIN(empty_slot
, i
);
1179 if (!worker
->available
) {
1182 if (worker
->num_associations
== 0) {
1184 * We have an idle worker...
1186 perfect_worker
= worker
;
1189 if (best_worker
== NULL
) {
1191 * It's busy, but the best so far...
1193 best_worker
= worker
;
1196 if (worker
->num_associations
< best_worker
->num_associations
) {
1198 * It's also busy, but has less association groups
1201 best_worker
= worker
;
1204 if (worker
->num_associations
> best_worker
->num_associations
) {
1211 * Ok, with the same number of association groups
1212 * we pick the one with the lowest number of connections
1214 if (worker
->num_connections
< best_worker
->num_connections
) {
1215 best_worker
= worker
;
1220 if (perfect_worker
!= NULL
) {
1221 return perfect_worker
;
1224 if (empty_slot
< SIZE_MAX
) {
1225 int ret
= rpc_host_exec_worker(server
, empty_slot
);
1227 DBG_WARNING("Could not fork worker: %s\n",
1233 if (best_worker
!= NULL
) {
1241 * Find an rpcd_* worker for an internal connection, possibly go beyond
1242 * server->max_workers
1244 static struct rpc_work_process
*rpc_host_find_idle_worker(
1245 struct rpc_server
*server
)
1247 struct rpc_work_process
*worker
= NULL
, *tmp
= NULL
;
1248 size_t i
, num_workers
= talloc_array_length(server
->workers
);
1249 size_t empty_slot
= SIZE_MAX
;
1252 for (i
=server
->max_workers
; i
<num_workers
; i
++) {
1253 worker
= &server
->workers
[i
];
1255 if (worker
->pid
== -1) {
1256 empty_slot
= MIN(empty_slot
, i
);
1259 if (!worker
->available
) {
1262 if (worker
->num_associations
== 0) {
1263 return &server
->workers
[i
];
1267 if (empty_slot
< SIZE_MAX
) {
1268 ret
= rpc_host_exec_worker(server
, empty_slot
);
1270 DBG_WARNING("Could not fork worker: %s\n",
1277 * All workers are busy. We need to expand the number of
1278 * workers because we were asked for an idle worker.
1280 if (num_workers
>= UINT16_MAX
) {
1282 * The worker index would not fit into 16-bits
1286 tmp
= talloc_realloc(
1289 struct rpc_work_process
,
1294 server
->workers
= tmp
;
1296 server
->workers
[num_workers
] = (struct rpc_work_process
) { .pid
=-1, };
1298 ret
= rpc_host_exec_worker(server
, num_workers
);
1300 DBG_WARNING("Could not exec worker: %s\n", strerror(ret
));
1307 * Find an rpcd_* process to talk to. Start a new one if necessary.
1309 static void rpc_host_distribute_clients(struct rpc_server
*server
)
1311 struct rpc_work_process
*worker
= NULL
;
1312 struct rpc_host_pending_client
*pending_client
= NULL
;
1313 uint32_t assoc_group_id
;
1316 enum ndr_err_code ndr_err
;
1318 const char *client_type
= NULL
;
1321 pending_client
= server
->pending_clients
;
1322 if (pending_client
== NULL
) {
1323 DBG_DEBUG("No pending clients\n");
1327 assoc_group_id
= pending_client
->bind_pkt
->u
.bind
.assoc_group_id
;
1329 if (assoc_group_id
!= 0) {
1330 size_t num_workers
= talloc_array_length(server
->workers
);
1331 uint16_t worker_index
= assoc_group_id
>> 16;
1333 client_type
= "associated";
1335 if (worker_index
>= num_workers
) {
1336 DBG_DEBUG("Invalid assoc group id %"PRIu32
"\n",
1340 worker
= &server
->workers
[worker_index
];
1342 if ((worker
->pid
== -1) || !worker
->available
) {
1343 DBG_DEBUG("Requested worker index %"PRIu16
": "
1344 "pid=%d, available=%d\n",
1347 (int)worker
->available
);
1349 * Pick a random one for a proper bind nack
1351 client_type
= "associated+lost";
1352 worker
= rpc_host_find_worker(server
);
1355 struct auth_session_info_transport
*session_info
=
1356 pending_client
->client
->npa_info8
->session_info
;
1360 client_type
= "new";
1362 found
= security_token_find_npa_flags(
1363 session_info
->session_info
->security_token
,
1366 /* fresh assoc group requested */
1367 if (found
& (flags
& SAMBA_NPA_FLAGS_NEED_IDLE
)) {
1368 client_type
= "new+exclusive";
1369 worker
= rpc_host_find_idle_worker(server
);
1371 client_type
= "new";
1372 worker
= rpc_host_find_worker(server
);
1376 if (worker
== NULL
) {
1377 DBG_DEBUG("No worker found for %s client\n", client_type
);
1381 DLIST_REMOVE(server
->pending_clients
, pending_client
);
1383 ndr_err
= ndr_push_struct_blob(
1386 pending_client
->client
,
1387 (ndr_push_flags_fn_t
)ndr_push_rpc_host_client
);
1388 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err
)) {
1389 DBG_WARNING("ndr_push_rpc_host_client failed: %s\n",
1390 ndr_errstr(ndr_err
));
1394 DBG_INFO("Sending %s client %s to %d with "
1395 "%"PRIu32
" associations and %"PRIu32
" connections\n",
1397 server
->rpc_server_exe
,
1399 worker
->num_associations
,
1400 worker
->num_connections
);
1402 iov
= (struct iovec
) {
1403 .iov_base
= blob
.data
, .iov_len
= blob
.length
,
1406 status
= messaging_send_iov(
1407 server
->host
->msg_ctx
,
1408 pid_to_procid(worker
->pid
),
1409 MSG_RPC_HOST_NEW_CLIENT
,
1412 &pending_client
->sock
,
1414 if (NT_STATUS_EQUAL(status
, NT_STATUS_OBJECT_NAME_NOT_FOUND
)) {
1415 DBG_DEBUG("worker %d died, sigchld not yet received?\n",
1417 DLIST_ADD(server
->pending_clients
, pending_client
);
1418 worker
->available
= false;
1421 if (!NT_STATUS_IS_OK(status
)) {
1422 DBG_DEBUG("messaging_send_iov failed: %s\n",
1426 if (assoc_group_id
== 0) {
1427 worker
->num_associations
+= 1;
1429 worker
->num_connections
+= 1;
1430 TALLOC_FREE(worker
->exit_timer
);
1432 TALLOC_FREE(server
->host
->np_helper_shutdown
);
1435 TALLOC_FREE(pending_client
);
1438 static int rpc_host_pending_client_destructor(
1439 struct rpc_host_pending_client
*p
)
1441 TALLOC_FREE(p
->hangup_wait
);
1442 if (p
->sock
!= -1) {
1446 DLIST_REMOVE(p
->server
->pending_clients
, p
);
1451 * Exception condition handler before rpcd_* worker
1452 * is handling the socket. Either the client exited or
1453 * sent unexpected data after the initial bind.
1455 static void rpc_host_client_exited(struct tevent_req
*subreq
)
1457 struct rpc_host_pending_client
*pending
= tevent_req_callback_data(
1458 subreq
, struct rpc_host_pending_client
);
1462 ok
= wait_for_read_recv(subreq
, &err
);
1464 TALLOC_FREE(subreq
);
1465 pending
->hangup_wait
= NULL
;
1468 DBG_DEBUG("client on sock %d sent data\n", pending
->sock
);
1470 DBG_DEBUG("client exited with %s\n", strerror(err
));
1472 TALLOC_FREE(pending
);
1475 struct rpc_iface_binding_map
{
1476 struct ndr_syntax_id iface
;
1480 static bool rpc_iface_binding_map_add_endpoint(
1481 TALLOC_CTX
*mem_ctx
,
1482 const struct rpc_host_endpoint
*ep
,
1483 struct rpc_host_iface_name
*iface_names
,
1484 struct rpc_iface_binding_map
**pmaps
)
1486 const struct ndr_syntax_id mgmt_iface
= {
1491 {0x08,0x00,0x2b,0x10,0x29,0x89}
1495 struct rpc_iface_binding_map
*maps
= *pmaps
;
1496 size_t i
, num_ifaces
= talloc_array_length(ep
->interfaces
);
1497 char *binding_string
= NULL
;
1500 binding_string
= dcerpc_binding_string(mem_ctx
, ep
->binding
);
1501 if (binding_string
== NULL
) {
1505 for (i
=0; i
<num_ifaces
; i
++) {
1506 const struct ndr_syntax_id
*iface
= &ep
->interfaces
[i
];
1507 size_t j
, num_maps
= talloc_array_length(maps
);
1508 struct rpc_iface_binding_map
*map
= NULL
;
1511 if (ndr_syntax_id_equal(iface
, &mgmt_iface
)) {
1513 * mgmt is offered everywhere, don't put it
1519 for (j
=0; j
<num_maps
; j
++) {
1521 if (ndr_syntax_id_equal(&map
->iface
, iface
)) {
1526 if (j
== num_maps
) {
1527 struct rpc_iface_binding_map
*tmp
= NULL
;
1528 struct rpc_host_iface_name
*iface_name
= NULL
;
1530 iface_name
= rpc_host_iface_names_find(
1531 iface_names
, iface
);
1532 if (iface_name
== NULL
) {
1536 tmp
= talloc_realloc(
1539 struct rpc_iface_binding_map
,
1546 map
= &maps
[num_maps
];
1547 *map
= (struct rpc_iface_binding_map
) {
1549 .bindings
= talloc_move(
1550 maps
, &iface_name
->name
),
1554 p
= strv_find(map
->bindings
, binding_string
);
1557 maps
, &map
->bindings
, binding_string
);
1570 static bool rpc_iface_binding_map_add_endpoints(
1571 TALLOC_CTX
*mem_ctx
,
1572 struct rpc_host_endpoint
**endpoints
,
1573 struct rpc_host_iface_name
*iface_names
,
1574 struct rpc_iface_binding_map
**pbinding_maps
)
1576 size_t i
, num_endpoints
= talloc_array_length(endpoints
);
1578 for (i
=0; i
<num_endpoints
; i
++) {
1579 bool ok
= rpc_iface_binding_map_add_endpoint(
1580 mem_ctx
, endpoints
[i
], iface_names
, pbinding_maps
);
1588 static bool rpc_host_fill_epm_db(
1589 struct tdb_wrap
*db
,
1590 struct rpc_host_endpoint
**endpoints
,
1591 struct rpc_host_iface_name
*iface_names
)
1593 struct rpc_iface_binding_map
*maps
= NULL
;
1598 ok
= rpc_iface_binding_map_add_endpoints(
1599 talloc_tos(), endpoints
, iface_names
, &maps
);
1604 num_maps
= talloc_array_length(maps
);
1606 for (i
=0; i
<num_maps
; i
++) {
1607 struct rpc_iface_binding_map
*map
= &maps
[i
];
1608 struct ndr_syntax_id_buf buf
;
1609 char *keystr
= ndr_syntax_id_buf_string(&map
->iface
, &buf
);
1611 .dptr
= (uint8_t *)map
->bindings
,
1612 .dsize
= talloc_array_length(map
->bindings
),
1617 db
->tdb
, string_term_tdb_data(keystr
), value
, 0);
1619 DBG_DEBUG("tdb_store() failed: %s\n",
1620 tdb_errorstr(db
->tdb
));
1631 struct rpc_server_setup_state
{
1632 struct rpc_server
*server
;
1635 static void rpc_server_setup_got_endpoints(struct tevent_req
*subreq
);
1638 * Async initialize state for all possible rpcd_* servers.
1639 * Note this does not start them.
1641 static struct tevent_req
*rpc_server_setup_send(
1642 TALLOC_CTX
*mem_ctx
,
1643 struct tevent_context
*ev
,
1644 struct rpc_host
*host
,
1645 const char *rpc_server_exe
)
1647 struct tevent_req
*req
= NULL
, *subreq
= NULL
;
1648 struct rpc_server_setup_state
*state
= NULL
;
1649 struct rpc_server
*server
= NULL
;
1651 req
= tevent_req_create(
1652 mem_ctx
, &state
, struct rpc_server_setup_state
);
1656 state
->server
= talloc_zero(state
, struct rpc_server
);
1657 if (tevent_req_nomem(state
->server
, req
)) {
1658 return tevent_req_post(req
, ev
);
1661 server
= state
->server
;
1663 *server
= (struct rpc_server
) {
1665 .server_index
= UINT32_MAX
,
1666 .rpc_server_exe
= talloc_strdup(server
, rpc_server_exe
),
1668 if (tevent_req_nomem(server
->rpc_server_exe
, req
)) {
1669 return tevent_req_post(req
, ev
);
1672 subreq
= rpc_server_get_endpoints_send(
1676 host
->np_helper
? NCACN_NP
: NCA_UNKNOWN
);
1677 if (tevent_req_nomem(subreq
, req
)) {
1678 return tevent_req_post(req
, ev
);
1680 tevent_req_set_callback(subreq
, rpc_server_setup_got_endpoints
, req
);
1684 static void rpc_server_setup_got_endpoints(struct tevent_req
*subreq
)
1686 struct tevent_req
*req
= tevent_req_callback_data(
1687 subreq
, struct tevent_req
);
1688 struct rpc_server_setup_state
*state
= tevent_req_data(
1689 req
, struct rpc_server_setup_state
);
1690 struct rpc_server
*server
= state
->server
;
1692 size_t i
, num_endpoints
;
1695 ret
= rpc_server_get_endpoints_recv(
1699 &server
->iface_names
,
1700 &server
->max_workers
,
1701 &server
->idle_seconds
);
1702 TALLOC_FREE(subreq
);
1704 tevent_req_nterror(req
, map_nt_error_from_unix(ret
));
1708 server
->workers
= talloc_array(
1709 server
, struct rpc_work_process
, server
->max_workers
);
1710 if (tevent_req_nomem(server
->workers
, req
)) {
1714 for (i
=0; i
<server
->max_workers
; i
++) {
1715 /* mark as not yet created */
1716 server
->workers
[i
] = (struct rpc_work_process
) { .pid
=-1, };
1719 num_endpoints
= talloc_array_length(server
->endpoints
);
1721 for (i
=0; i
<num_endpoints
; i
++) {
1722 struct rpc_host_endpoint
*e
= server
->endpoints
[i
];
1728 status
= dcesrv_create_binding_sockets(
1729 e
->binding
, e
, &e
->num_fds
, &e
->fds
);
1730 if (NT_STATUS_EQUAL(status
, NT_STATUS_NOT_SUPPORTED
)) {
1733 if (tevent_req_nterror(req
, status
)) {
1734 DBG_DEBUG("dcesrv_create_binding_sockets failed: %s\n",
1739 for (j
=0; j
<e
->num_fds
; j
++) {
1740 ret
= listen(e
->fds
[j
], 256);
1743 req
, map_nt_error_from_unix(errno
));
1749 ok
= rpc_host_fill_epm_db(
1750 server
->host
->epmdb
, server
->endpoints
, server
->iface_names
);
1752 DBG_DEBUG("rpc_host_fill_epm_db failed\n");
1755 tevent_req_done(req
);
1758 static NTSTATUS
rpc_server_setup_recv(
1759 struct tevent_req
*req
, TALLOC_CTX
*mem_ctx
, struct rpc_server
**server
)
1761 struct rpc_server_setup_state
*state
= tevent_req_data(
1762 req
, struct rpc_server_setup_state
);
1765 if (tevent_req_is_nterror(req
, &status
)) {
1766 tevent_req_received(req
);
1770 *server
= talloc_move(mem_ctx
, &state
->server
);
1771 tevent_req_received(req
);
1772 return NT_STATUS_OK
;
1776 * rpcd_* died. Called from SIGCHLD handler.
1778 static void rpc_worker_exited(struct rpc_host
*host
, pid_t pid
)
1780 size_t i
, num_servers
= talloc_array_length(host
->servers
);
1781 struct rpc_work_process
*worker
= NULL
;
1782 bool found_pid
= false;
1783 bool have_active_worker
= false;
1785 for (i
=0; i
<num_servers
; i
++) {
1786 struct rpc_server
*server
= host
->servers
[i
];
1787 size_t j
, num_workers
;
1789 if (server
== NULL
) {
1790 /* SIGCHLD for --list-interfaces run */
1794 num_workers
= talloc_array_length(server
->workers
);
1796 for (j
=0; j
<num_workers
; j
++) {
1797 worker
= &server
->workers
[j
];
1798 if (worker
->pid
== pid
) {
1801 worker
->available
= false;
1804 if (worker
->pid
!= -1) {
1805 have_active_worker
= true;
1811 DBG_WARNING("No worker with PID %d\n", (int)pid
);
1815 if (!have_active_worker
&& host
->np_helper
) {
1817 * We have nothing left to do as an np_helper.
1818 * Terminate ourselves (samba-dcerpcd). We will
1819 * be restarted on demand anyway.
1821 DBG_DEBUG("Exiting idle np helper\n");
1829 static void rpc_host_sigchld(
1830 struct tevent_context
*ev
,
1831 struct tevent_signal
*se
,
1837 struct rpc_host
*state
= talloc_get_type_abort(
1838 private_data
, struct rpc_host
);
1842 while ((pid
= waitpid(-1, &wstatus
, WNOHANG
)) > 0) {
1843 DBG_DEBUG("pid=%d, wstatus=%d\n", (int)pid
, wstatus
);
1844 rpc_worker_exited(state
, pid
);
1849 * Idle timer fired for a rcpd_* worker. Ask it to terminate.
1851 static void rpc_host_exit_worker(
1852 struct tevent_context
*ev
,
1853 struct tevent_timer
*te
,
1854 struct timeval current_time
,
1857 struct rpc_server
*server
= talloc_get_type_abort(
1858 private_data
, struct rpc_server
);
1859 size_t i
, num_workers
= talloc_array_length(server
->workers
);
1862 * Scan for the right worker. We don't have too many of those,
1863 * and maintaining an index would be more data structure effort.
1866 for (i
=0; i
<num_workers
; i
++) {
1867 struct rpc_work_process
*w
= &server
->workers
[i
];
1870 if (w
->exit_timer
!= te
) {
1873 w
->exit_timer
= NULL
;
1875 SMB_ASSERT(w
->num_associations
== 0);
1877 status
= messaging_send(
1878 server
->host
->msg_ctx
,
1879 pid_to_procid(w
->pid
),
1882 if (!NT_STATUS_IS_OK(status
)) {
1883 DBG_DEBUG("Could not send SHUTDOWN msg: %s\n",
1887 w
->available
= false;
1893 * rcpd_* worker replied with its status.
1895 static void rpc_host_child_status_recv(
1896 struct messaging_context
*msg
,
1899 struct server_id server_id
,
1902 struct rpc_host
*host
= talloc_get_type_abort(
1903 private_data
, struct rpc_host
);
1904 size_t num_servers
= talloc_array_length(host
->servers
);
1905 struct rpc_server
*server
= NULL
;
1907 pid_t src_pid
= procid_to_pid(&server_id
);
1908 struct rpc_work_process
*worker
= NULL
;
1909 struct rpc_worker_status status_message
;
1910 enum ndr_err_code ndr_err
;
1912 ndr_err
= ndr_pull_struct_blob_all_noalloc(
1915 (ndr_pull_flags_fn_t
)ndr_pull_rpc_worker_status
);
1916 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err
)) {
1917 struct server_id_buf buf
;
1918 DBG_WARNING("Got invalid message from pid %s\n",
1919 server_id_str_buf(server_id
, &buf
));
1922 if (DEBUGLEVEL
>= 10) {
1923 NDR_PRINT_DEBUG(rpc_worker_status
, &status_message
);
1926 if (status_message
.server_index
>= num_servers
) {
1927 DBG_WARNING("Got invalid server_index=%"PRIu32
", "
1928 "num_servers=%zu\n",
1929 status_message
.server_index
,
1934 server
= host
->servers
[status_message
.server_index
];
1936 num_workers
= talloc_array_length(server
->workers
);
1937 if (status_message
.worker_index
>= num_workers
) {
1938 DBG_WARNING("Got invalid worker_index=%"PRIu32
", "
1939 "num_workers=%zu\n",
1940 status_message
.worker_index
,
1944 worker
= &server
->workers
[status_message
.worker_index
];
1946 if (src_pid
!= worker
->pid
) {
1947 DBG_WARNING("Got idx=%"PRIu32
" from %d, expected %d\n",
1948 status_message
.worker_index
,
1954 worker
->available
= true;
1955 worker
->num_associations
= status_message
.num_association_groups
;
1956 worker
->num_connections
= status_message
.num_connections
;
1958 if (worker
->num_associations
!= 0) {
1959 TALLOC_FREE(worker
->exit_timer
);
1961 worker
->exit_timer
= tevent_add_timer(
1962 messaging_tevent_context(msg
),
1964 tevent_timeval_current_ofs(server
->idle_seconds
, 0),
1965 rpc_host_exit_worker
,
1967 /* No NULL check, it's not fatal if this does not work */
1970 rpc_host_distribute_clients(server
);
1974 * samba-dcerpcd has been asked to shutdown.
1975 * Mark the initial tevent_req as done so we
1976 * exit the event loop.
1978 static void rpc_host_msg_shutdown(
1979 struct messaging_context
*msg
,
1982 struct server_id server_id
,
1985 struct tevent_req
*req
= talloc_get_type_abort(
1986 private_data
, struct tevent_req
);
1987 tevent_req_done(req
);
1991 * Only match directory entries starting in rpcd_
1993 static int rpcd_filter(const struct dirent
*d
)
1995 int match
= fnmatch("rpcd_*", d
->d_name
, 0);
1996 return (match
== 0) ? 1 : 0;
2000 * Scan the given libexecdir for rpcd_* services
2001 * and return them as a strv list.
2003 static int rpc_host_list_servers(
2004 const char *libexecdir
, TALLOC_CTX
*mem_ctx
, char **pservers
)
2006 char *servers
= NULL
;
2007 struct dirent
**namelist
= NULL
;
2011 num_servers
= scandir(libexecdir
, &namelist
, rpcd_filter
, alphasort
);
2012 if (num_servers
== -1) {
2013 DBG_DEBUG("scandir failed: %s\n", strerror(errno
));
2017 for (i
=0; i
<num_servers
; i
++) {
2018 char *exe
= talloc_asprintf(
2019 mem_ctx
, "%s/%s", libexecdir
, namelist
[i
]->d_name
);
2024 ret
= strv_add(mem_ctx
, &servers
, exe
);
2031 for (i
=0; i
<num_servers
; i
++) {
2032 SAFE_FREE(namelist
[i
]);
2034 SAFE_FREE(namelist
);
2037 TALLOC_FREE(servers
);
2040 *pservers
= servers
;
2044 struct rpc_host_endpoint_accept_state
{
2045 struct tevent_context
*ev
;
2046 struct rpc_host_endpoint
*endpoint
;
2049 static void rpc_host_endpoint_accept_accepted(struct tevent_req
*subreq
);
2050 static void rpc_host_endpoint_accept_got_bind(struct tevent_req
*subreq
);
2053 * Asynchronously wait for a DCERPC connection from a client.
2055 static struct tevent_req
*rpc_host_endpoint_accept_send(
2056 TALLOC_CTX
*mem_ctx
,
2057 struct tevent_context
*ev
,
2058 struct rpc_host_endpoint
*endpoint
)
2060 struct tevent_req
*req
= NULL
;
2061 struct rpc_host_endpoint_accept_state
*state
= NULL
;
2064 req
= tevent_req_create(
2065 mem_ctx
, &state
, struct rpc_host_endpoint_accept_state
);
2070 state
->endpoint
= endpoint
;
2072 for (i
=0; i
<endpoint
->num_fds
; i
++) {
2073 struct tevent_req
*subreq
= NULL
;
2075 subreq
= accept_send(state
, ev
, endpoint
->fds
[i
]);
2076 if (tevent_req_nomem(subreq
, req
)) {
2077 return tevent_req_post(req
, ev
);
2079 tevent_req_set_callback(
2080 subreq
, rpc_host_endpoint_accept_accepted
, req
);
2087 * Accept a DCERPC connection from a client.
2089 static void rpc_host_endpoint_accept_accepted(struct tevent_req
*subreq
)
2091 struct tevent_req
*req
= tevent_req_callback_data(
2092 subreq
, struct tevent_req
);
2093 struct rpc_host_endpoint_accept_state
*state
= tevent_req_data(
2094 req
, struct rpc_host_endpoint_accept_state
);
2095 struct rpc_host_endpoint
*endpoint
= state
->endpoint
;
2096 int sock
, listen_sock
, err
;
2097 struct samba_sockaddr peer_addr
;
2099 sock
= accept_recv(subreq
, &listen_sock
, &peer_addr
, &err
);
2100 TALLOC_FREE(subreq
);
2102 /* What to do here? Just ignore the error and retry? */
2103 DBG_DEBUG("accept_recv failed: %s\n", strerror(err
));
2104 tevent_req_error(req
, err
);
2108 subreq
= accept_send(state
, state
->ev
, listen_sock
);
2109 if (tevent_req_nomem(subreq
, req
)) {
2114 tevent_req_set_callback(
2115 subreq
, rpc_host_endpoint_accept_accepted
, req
);
2117 subreq
= rpc_host_bind_read_send(
2120 dcerpc_binding_get_transport(endpoint
->binding
),
2123 if (tevent_req_nomem(subreq
, req
)) {
2126 tevent_req_set_callback(
2127 subreq
, rpc_host_endpoint_accept_got_bind
, req
);
2131 * Client sent us a DCERPC bind packet.
2133 static void rpc_host_endpoint_accept_got_bind(struct tevent_req
*subreq
)
2135 struct tevent_req
*req
= tevent_req_callback_data(
2136 subreq
, struct tevent_req
);
2137 struct rpc_host_endpoint_accept_state
*state
= tevent_req_data(
2138 req
, struct rpc_host_endpoint_accept_state
);
2139 struct rpc_host_endpoint
*endpoint
= state
->endpoint
;
2140 struct rpc_server
*server
= endpoint
->server
;
2141 struct rpc_host_pending_client
*pending
= NULL
;
2142 struct rpc_host_client
*client
= NULL
;
2143 struct ncacn_packet
*bind_pkt
= NULL
;
2147 ret
= rpc_host_bind_read_recv(
2148 subreq
, state
, &sock
, &client
, &bind_pkt
);
2149 TALLOC_FREE(subreq
);
2151 DBG_DEBUG("rpc_host_bind_read_recv returned %s\n",
2156 client
->binding
= dcerpc_binding_string(client
, endpoint
->binding
);
2157 if (client
->binding
== NULL
) {
2158 DBG_WARNING("dcerpc_binding_string failed, dropping client\n");
2162 pending
= talloc_zero(server
, struct rpc_host_pending_client
);
2163 if (pending
== NULL
) {
2164 DBG_WARNING("talloc failed, dropping client\n");
2167 pending
->server
= server
;
2168 pending
->sock
= sock
;
2169 pending
->bind_pkt
= talloc_move(pending
, &bind_pkt
);
2170 pending
->client
= talloc_move(pending
, &client
);
2171 talloc_set_destructor(pending
, rpc_host_pending_client_destructor
);
2174 pending
->hangup_wait
= wait_for_read_send(
2175 pending
, state
->ev
, pending
->sock
, true);
2176 if (pending
->hangup_wait
== NULL
) {
2177 DBG_WARNING("wait_for_read_send failed, dropping client\n");
2178 TALLOC_FREE(pending
);
2181 tevent_req_set_callback(
2182 pending
->hangup_wait
, rpc_host_client_exited
, pending
);
2184 DLIST_ADD_END(server
->pending_clients
, pending
);
2185 rpc_host_distribute_clients(server
);
2189 TALLOC_FREE(client
);
2195 static int rpc_host_endpoint_accept_recv(
2196 struct tevent_req
*req
, struct rpc_host_endpoint
**ep
)
2198 struct rpc_host_endpoint_accept_state
*state
= tevent_req_data(
2199 req
, struct rpc_host_endpoint_accept_state
);
2201 *ep
= state
->endpoint
;
2203 return tevent_req_simple_recv_unix(req
);
2207 * Full state for samba-dcerpcd. Everything else
2210 struct rpc_host_state
{
2211 struct tevent_context
*ev
;
2212 struct rpc_host
*host
;
2215 const char *daemon_ready_progname
;
2216 struct tevent_immediate
*ready_signal_immediate
;
2217 int *ready_signal_fds
;
2220 size_t num_prepared
;
2224 * Tell whoever invoked samba-dcerpcd we're ready to
2227 static void rpc_host_report_readiness(
2228 struct tevent_context
*ev
,
2229 struct tevent_immediate
*im
,
2232 struct rpc_host_state
*state
= talloc_get_type_abort(
2233 private_data
, struct rpc_host_state
);
2234 size_t i
, num_fds
= talloc_array_length(state
->ready_signal_fds
);
2236 if (!state
->is_ready
) {
2237 DBG_DEBUG("Not yet ready\n");
2241 for (i
=0; i
<num_fds
; i
++) {
2247 state
->ready_signal_fds
[i
],
2250 } while ((nwritten
== -1) && (errno
== EINTR
));
2252 close(state
->ready_signal_fds
[i
]);
2255 TALLOC_FREE(state
->ready_signal_fds
);
2259 * Respond to a "are you ready" message.
2261 static bool rpc_host_ready_signal_filter(
2262 struct messaging_rec
*rec
, void *private_data
)
2264 struct rpc_host_state
*state
= talloc_get_type_abort(
2265 private_data
, struct rpc_host_state
);
2266 size_t num_fds
= talloc_array_length(state
->ready_signal_fds
);
2269 if (rec
->msg_type
!= MSG_DAEMON_READY_FD
) {
2272 if (rec
->num_fds
!= 1) {
2273 DBG_DEBUG("Got %"PRIu8
" fds\n", rec
->num_fds
);
2277 if (num_fds
+ 1 < num_fds
) {
2280 tmp
= talloc_realloc(state
, state
->ready_signal_fds
, int, num_fds
+1);
2284 state
->ready_signal_fds
= tmp
;
2286 state
->ready_signal_fds
[num_fds
] = rec
->fds
[0];
2289 tevent_schedule_immediate(
2290 state
->ready_signal_immediate
,
2292 rpc_host_report_readiness
,
2299 * Respond to a "what is your status" message.
2301 static bool rpc_host_dump_status_filter(
2302 struct messaging_rec
*rec
, void *private_data
)
2304 struct rpc_host_state
*state
= talloc_get_type_abort(
2305 private_data
, struct rpc_host_state
);
2306 struct rpc_host
*host
= state
->host
;
2307 struct rpc_server
**servers
= host
->servers
;
2308 size_t i
, num_servers
= talloc_array_length(servers
);
2311 if (rec
->msg_type
!= MSG_RPC_DUMP_STATUS
) {
2314 if (rec
->num_fds
!= 1) {
2315 DBG_DEBUG("Got %"PRIu8
" fds\n", rec
->num_fds
);
2319 f
= fdopen_keepfd(rec
->fds
[0], "w");
2321 DBG_DEBUG("fdopen failed: %s\n", strerror(errno
));
2325 for (i
=0; i
<num_servers
; i
++) {
2326 struct rpc_server
*server
= servers
[i
];
2327 size_t j
, num_workers
= talloc_array_length(server
->workers
);
2328 size_t active_workers
= 0;
2330 for (j
=0; j
<num_workers
; j
++) {
2331 if (server
->workers
[j
].pid
!= -1) {
2332 active_workers
+= 1;
2337 "%s: active_workers=%zu\n",
2338 server
->rpc_server_exe
,
2341 for (j
=0; j
<num_workers
; j
++) {
2342 struct rpc_work_process
*w
= &server
->workers
[j
];
2344 if (w
->pid
== (pid_t
)-1) {
2349 " worker[%zu]: pid=%d, num_associations=%"PRIu32
", num_connections=%"PRIu32
"\n",
2352 w
->num_associations
,
2353 w
->num_connections
);
2362 static void rpc_host_server_setup_done(struct tevent_req
*subreq
);
2363 static void rpc_host_endpoint_failed(struct tevent_req
*subreq
);
2366 * Async startup for samba-dcerpcd.
2368 static struct tevent_req
*rpc_host_send(
2369 TALLOC_CTX
*mem_ctx
,
2370 struct tevent_context
*ev
,
2371 struct messaging_context
*msg_ctx
,
2373 int ready_signal_fd
,
2374 const char *daemon_ready_progname
,
2377 struct tevent_req
*req
= NULL
, *subreq
= NULL
;
2378 struct rpc_host_state
*state
= NULL
;
2379 struct rpc_host
*host
= NULL
;
2380 struct tevent_signal
*se
= NULL
;
2381 char *epmdb_path
= NULL
;
2383 size_t i
, num_servers
= strv_count(servers
);
2387 req
= tevent_req_create(req
, &state
, struct rpc_host_state
);
2392 state
->daemon_ready_progname
= daemon_ready_progname
;
2394 state
->ready_signal_immediate
= tevent_create_immediate(state
);
2395 if (tevent_req_nomem(state
->ready_signal_immediate
, req
)) {
2396 return tevent_req_post(req
, ev
);
2399 if (ready_signal_fd
!= -1) {
2400 state
->ready_signal_fds
= talloc_array(state
, int, 1);
2401 if (tevent_req_nomem(state
->ready_signal_fds
, req
)) {
2402 return tevent_req_post(req
, ev
);
2404 state
->ready_signal_fds
[0] = ready_signal_fd
;
2407 state
->host
= talloc_zero(state
, struct rpc_host
);
2408 if (tevent_req_nomem(state
->host
, req
)) {
2409 return tevent_req_post(req
, ev
);
2413 host
->msg_ctx
= msg_ctx
;
2414 host
->np_helper
= is_np_helper
;
2416 ret
= pipe(host
->worker_stdin
);
2418 tevent_req_nterror(req
, map_nt_error_from_unix(errno
));
2419 return tevent_req_post(req
, ev
);
2422 host
->servers
= talloc_zero_array(
2423 host
, struct rpc_server
*, num_servers
);
2424 if (tevent_req_nomem(host
->servers
, req
)) {
2425 return tevent_req_post(req
, ev
);
2428 se
= tevent_add_signal(ev
, state
, SIGCHLD
, 0, rpc_host_sigchld
, host
);
2429 if (tevent_req_nomem(se
, req
)) {
2430 return tevent_req_post(req
, ev
);
2432 BlockSignals(false, SIGCHLD
);
2434 status
= messaging_register(
2437 MSG_RPC_WORKER_STATUS
,
2438 rpc_host_child_status_recv
);
2439 if (tevent_req_nterror(req
, status
)) {
2440 return tevent_req_post(req
, ev
);
2443 status
= messaging_register(
2444 msg_ctx
, req
, MSG_SHUTDOWN
, rpc_host_msg_shutdown
);
2445 if (tevent_req_nterror(req
, status
)) {
2446 return tevent_req_post(req
, ev
);
2449 subreq
= messaging_filtered_read_send(
2450 state
, ev
, msg_ctx
, rpc_host_ready_signal_filter
, state
);
2451 if (tevent_req_nomem(subreq
, req
)) {
2452 return tevent_req_post(req
, ev
);
2455 subreq
= messaging_filtered_read_send(
2456 state
, ev
, msg_ctx
, rpc_host_dump_status_filter
, state
);
2457 if (tevent_req_nomem(subreq
, req
)) {
2458 return tevent_req_post(req
, ev
);
2461 epmdb_path
= lock_path(state
, "epmdb.tdb");
2462 if (tevent_req_nomem(epmdb_path
, req
)) {
2463 return tevent_req_post(req
, ev
);
2466 host
->epmdb
= tdb_wrap_open(
2470 TDB_CLEAR_IF_FIRST
|TDB_INCOMPATIBLE_HASH
,
2473 if (host
->epmdb
== NULL
) {
2474 DBG_DEBUG("tdb_wrap_open(%s) failed: %s\n",
2477 tevent_req_nterror(req
, map_nt_error_from_unix(errno
));
2478 return tevent_req_post(req
, ev
);
2480 TALLOC_FREE(epmdb_path
);
2482 for (exe
= strv_next(servers
, exe
), i
= 0;
2484 exe
= strv_next(servers
, exe
), i
++) {
2486 DBG_DEBUG("server_setup for %s index %zu\n", exe
, i
);
2488 subreq
= rpc_server_setup_send(
2493 if (tevent_req_nomem(subreq
, req
)) {
2494 return tevent_req_post(req
, ev
);
2496 tevent_req_set_callback(
2497 subreq
, rpc_host_server_setup_done
, req
);
2504 * Timer function called after we were initialized but no one
2505 * connected. Shutdown.
2507 static void rpc_host_shutdown(
2508 struct tevent_context
*ev
,
2509 struct tevent_timer
*te
,
2510 struct timeval current_time
,
2513 struct tevent_req
*req
= talloc_get_type_abort(
2514 private_data
, struct tevent_req
);
2515 DBG_DEBUG("Nobody connected -- shutting down\n");
2516 tevent_req_done(req
);
2519 static void rpc_host_server_setup_done(struct tevent_req
*subreq
)
2521 struct tevent_req
*req
= tevent_req_callback_data(
2522 subreq
, struct tevent_req
);
2523 struct rpc_host_state
*state
= tevent_req_data(
2524 req
, struct rpc_host_state
);
2525 struct rpc_server
*server
= NULL
;
2526 struct rpc_host
*host
= state
->host
;
2527 size_t i
, num_servers
= talloc_array_length(host
->servers
);
2530 status
= rpc_server_setup_recv(subreq
, host
, &server
);
2531 TALLOC_FREE(subreq
);
2532 if (!NT_STATUS_IS_OK(status
)) {
2533 DBG_DEBUG("rpc_server_setup_recv returned %s, ignoring\n",
2535 host
->servers
= talloc_realloc(
2538 struct rpc_server
*,
2543 server
->server_index
= state
->num_prepared
;
2544 host
->servers
[state
->num_prepared
] = server
;
2546 state
->num_prepared
+= 1;
2548 if (state
->num_prepared
< num_servers
) {
2552 for (i
=0; i
<num_servers
; i
++) {
2553 size_t j
, num_endpoints
;
2555 server
= host
->servers
[i
];
2556 num_endpoints
= talloc_array_length(server
->endpoints
);
2558 for (j
=0; j
<num_endpoints
; j
++) {
2559 subreq
= rpc_host_endpoint_accept_send(
2560 state
, state
->ev
, server
->endpoints
[j
]);
2561 if (tevent_req_nomem(subreq
, req
)) {
2564 tevent_req_set_callback(
2565 subreq
, rpc_host_endpoint_failed
, req
);
2569 state
->is_ready
= true;
2571 if (state
->daemon_ready_progname
!= NULL
) {
2572 daemon_ready(state
->daemon_ready_progname
);
2575 if (host
->np_helper
) {
2577 * If we're started as an np helper, and no one talks to
2578 * us within 10 seconds, just shut ourselves down.
2580 host
->np_helper_shutdown
= tevent_add_timer(
2583 timeval_current_ofs(10, 0),
2586 if (tevent_req_nomem(host
->np_helper_shutdown
, req
)) {
2591 tevent_schedule_immediate(
2592 state
->ready_signal_immediate
,
2594 rpc_host_report_readiness
,
2599 * Log accept fail on an endpoint.
2601 static void rpc_host_endpoint_failed(struct tevent_req
*subreq
)
2603 struct tevent_req
*req
= tevent_req_callback_data(
2604 subreq
, struct tevent_req
);
2605 struct rpc_host_state
*state
= tevent_req_data(
2606 req
, struct rpc_host_state
);
2607 struct rpc_host_endpoint
*endpoint
= NULL
;
2608 char *binding_string
= NULL
;
2611 ret
= rpc_host_endpoint_accept_recv(subreq
, &endpoint
);
2612 TALLOC_FREE(subreq
);
2614 binding_string
= dcerpc_binding_string(state
, endpoint
->binding
);
2615 DBG_DEBUG("rpc_host_endpoint_accept_recv for %s returned %s\n",
2618 TALLOC_FREE(binding_string
);
2621 static NTSTATUS
rpc_host_recv(struct tevent_req
*req
)
2623 return tevent_req_simple_recv_ntstatus(req
);
2626 static int rpc_host_pidfile_create(
2627 struct messaging_context
*msg_ctx
,
2628 const char *progname
,
2629 int ready_signal_fd
)
2631 const char *piddir
= lp_pid_directory();
2632 size_t len
= strlen(piddir
) + strlen(progname
) + 6;
2642 ret
= pidfile_path_create(pidFile
, &fd
, &existing_pid
);
2648 if (ret
!= EAGAIN
) {
2649 DBG_DEBUG("pidfile_path_create() failed: %s\n",
2654 DBG_DEBUG("%s pid %d exists\n", progname
, (int)existing_pid
);
2656 if (ready_signal_fd
!= -1) {
2657 NTSTATUS status
= messaging_send_iov(
2659 pid_to_procid(existing_pid
),
2660 MSG_DAEMON_READY_FD
,
2665 if (!NT_STATUS_IS_OK(status
)) {
2666 DBG_DEBUG("Could not send ready_signal_fd: %s\n",
2674 static void samba_dcerpcd_stdin_handler(
2675 struct tevent_context
*ev
,
2676 struct tevent_fd
*fde
,
2680 struct tevent_req
*req
= talloc_get_type_abort(
2681 private_data
, struct tevent_req
);
2684 if (read(0, &c
, 1) != 1) {
2685 /* we have reached EOF on stdin, which means the
2686 parent has exited. Shutdown the server */
2687 tevent_req_done(req
);
2692 * samba-dcerpcd microservice startup !
2694 int main(int argc
, const char *argv
[])
2696 const struct loadparm_substitution
*lp_sub
=
2697 loadparm_s3_global_substitution();
2698 const char *progname
= getprogname();
2699 TALLOC_CTX
*frame
= NULL
;
2700 struct tevent_context
*ev_ctx
= NULL
;
2701 struct messaging_context
*msg_ctx
= NULL
;
2702 struct tevent_req
*req
= NULL
;
2703 char *servers
= NULL
;
2704 const char *arg
= NULL
;
2712 int libexec_rpcds
= 0;
2714 int ready_signal_fd
= -1;
2716 struct samba_cmdline_daemon_cfg
*cmdline_daemon_cfg
= NULL
;
2717 struct poptOption long_options
[] = {
2720 .longName
= "libexec-rpcds",
2721 .argInfo
= POPT_ARG_NONE
,
2722 .arg
= &libexec_rpcds
,
2723 .descrip
= "Use all rpcds in libexec",
2726 .longName
= "ready-signal-fd",
2727 .argInfo
= POPT_ARG_INT
,
2728 .arg
= &ready_signal_fd
,
2729 .descrip
= "fd to close when initialized",
2732 .longName
= "np-helper",
2733 .argInfo
= POPT_ARG_NONE
,
2735 .descrip
= "Internal named pipe server",
2744 const char *fd_params
[] = { "ready-signal-fd", };
2746 closefrom_except_fd_params(
2747 3, ARRAY_SIZE(fd_params
), fd_params
, argc
, argv
);
2750 talloc_enable_null_tracking();
2751 frame
= talloc_stackframe();
2756 ok
= samba_cmdline_init(frame
,
2757 SAMBA_CMDLINE_CONFIG_SERVER
,
2758 true /* require_smbconf */);
2760 DBG_ERR("Failed to init cmdline parser!\n");
2765 pc
= samba_popt_get_context(getprogname(),
2771 DBG_ERR("Failed to setup popt context!\n");
2776 poptSetOtherOptionHelp(
2777 pc
, "[OPTIONS] [SERVICE_1 SERVICE_2 .. SERVICE_N]");
2779 ret
= poptGetNextOpt(pc
);
2784 "\nGot unexpected option %d\n",
2786 } else if (ret
== POPT_ERROR_BADOPT
) {
2788 "\nInvalid option %s: %s\n\n",
2789 poptBadOption(pc
, 0),
2793 "\npoptGetNextOpt returned %s\n",
2797 poptFreeContext(pc
);
2802 while ((arg
= poptGetArg(pc
)) != NULL
) {
2803 ret
= strv_add(frame
, &servers
, arg
);
2805 DBG_ERR("strv_add() failed\n");
2806 poptFreeContext(pc
);
2812 log_stdout
= (debug_get_log_type() == DEBUG_STDOUT
);
2814 setup_logging(progname
, DEBUG_STDOUT
);
2816 setup_logging(progname
, DEBUG_FILE
);
2820 * If "rpc start on demand helpers = true" in smb.conf we must
2821 * not start as standalone, only on demand from
2822 * local_np_connect() functions. Log an error message telling
2823 * the admin how to fix and then exit.
2825 if (lp_rpc_start_on_demand_helpers() && np_helper
== 0) {
2826 DBG_ERR("Cannot start in standalone mode if smb.conf "
2828 "\"rpc start on demand helpers = true\" - "
2834 if (libexec_rpcds
!= 0) {
2835 ret
= rpc_host_list_servers(
2836 dyn_SAMBA_LIBEXECDIR
, frame
, &servers
);
2838 DBG_ERR("Could not list libexec: %s\n",
2840 poptFreeContext(pc
);
2846 num_servers
= strv_count(servers
);
2847 if (num_servers
== 0) {
2848 poptPrintUsage(pc
, stderr
, 0);
2849 poptFreeContext(pc
);
2854 poptFreeContext(pc
);
2856 cmdline_daemon_cfg
= samba_cmdline_get_daemon_cfg();
2858 if (log_stdout
&& cmdline_daemon_cfg
->fork
) {
2859 DBG_ERR("Can't log to stdout unless in foreground\n");
2864 msg_ctx
= global_messaging_context();
2865 if (msg_ctx
== NULL
) {
2866 DBG_ERR("messaging_init() failed\n");
2870 ev_ctx
= messaging_tevent_context(msg_ctx
);
2872 if (cmdline_daemon_cfg
->fork
) {
2875 cmdline_daemon_cfg
->no_process_group
,
2878 status
= reinit_after_fork(msg_ctx
, ev_ctx
, false);
2879 if (!NT_STATUS_IS_OK(status
)) {
2880 exit_daemon("reinit_after_fork() failed",
2881 map_errno_from_nt_status(status
));
2884 DBG_DEBUG("Calling daemon_status\n");
2885 daemon_status(progname
, "Starting process ... ");
2888 BlockSignals(true, SIGPIPE
);
2890 dump_core_setup(progname
, lp_logfile(frame
, lp_sub
));
2894 DBG_STARTUP_NOTICE("%s version %s started.\n%s\n",
2896 samba_version_string(),
2897 samba_copyright_string());
2899 (void)winbind_off();
2900 ok
= init_guest_session_info(frame
);
2903 DBG_ERR("init_guest_session_info failed\n");
2904 global_messaging_context_free();
2909 ret
= rpc_host_pidfile_create(msg_ctx
, progname
, ready_signal_fd
);
2911 DBG_DEBUG("rpc_host_pidfile_create failed: %s\n",
2913 global_messaging_context_free();
2918 req
= rpc_host_send(
2924 cmdline_daemon_cfg
->fork
? NULL
: progname
,
2927 DBG_ERR("rpc_host_send failed\n");
2928 global_messaging_context_free();
2933 if (!cmdline_daemon_cfg
->fork
) {
2935 if (fstat(0, &st
) != 0) {
2936 DBG_DEBUG("fstat(0) failed: %s\n",
2938 global_messaging_context_free();
2942 if (S_ISFIFO(st
.st_mode
) || S_ISSOCK(st
.st_mode
)) {
2948 samba_dcerpcd_stdin_handler
,
2953 ok
= tevent_req_poll_unix(req
, ev_ctx
, &err
);
2955 DBG_ERR("tevent_req_poll_unix failed: %s\n",
2957 global_messaging_context_free();
2962 status
= rpc_host_recv(req
);
2963 if (!NT_STATUS_IS_OK(status
)) {
2964 DBG_ERR("rpc_host_recv returned %s\n", nt_errstr(status
));
2965 global_messaging_context_free();