s3:notifyd: Use a watcher per db record
[Samba.git] / source4 / libnet / libnet_rpc.c
blob3662a928e580e99407490c1cf559e7907e255a51
1 /*
2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2004
5 Copyright (C) Rafal Szczesniak 2005
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "libnet/libnet.h"
23 #include "libcli/libcli.h"
24 #include "libcli/composite/composite.h"
25 #include "librpc/rpc/dcerpc_proto.h"
26 #include "librpc/gen_ndr/ndr_lsa_c.h"
27 #include "librpc/gen_ndr/ndr_samr.h"
28 #include "auth/credentials/credentials.h"
30 struct rpc_connect_srv_state {
31 struct libnet_context *ctx;
32 struct libnet_RpcConnect r;
33 const char *binding;
35 /* information about the progress */
36 void (*monitor_fn)(struct monitor_msg*);
40 static void continue_pipe_connect(struct composite_context *ctx);
43 /**
44 * Initiates connection to rpc pipe on remote server
46 * @param ctx initialised libnet context
47 * @param mem_ctx memory context of this call
48 * @param r data structure containing necessary parameters and return values
49 * @return composite context of this call
50 **/
52 static struct composite_context* libnet_RpcConnectSrv_send(struct libnet_context *ctx,
53 TALLOC_CTX *mem_ctx,
54 struct libnet_RpcConnect *r,
55 void (*monitor)(struct monitor_msg*))
57 struct composite_context *c;
58 struct rpc_connect_srv_state *s;
59 struct dcerpc_binding *b;
60 struct composite_context *pipe_connect_req;
62 /* composite context allocation and setup */
63 c = composite_create(ctx, ctx->event_ctx);
64 if (c == NULL) return c;
66 s = talloc_zero(c, struct rpc_connect_srv_state);
67 if (composite_nomem(s, c)) return c;
69 c->private_data = s;
70 s->monitor_fn = monitor;
72 s->ctx = ctx;
73 s->r = *r;
74 ZERO_STRUCT(s->r.out);
76 /* prepare binding string */
77 switch (r->level) {
78 case LIBNET_RPC_CONNECT_SERVER:
79 s->binding = talloc_asprintf(s, "ncacn_np:%s", r->in.name);
80 break;
81 case LIBNET_RPC_CONNECT_SERVER_ADDRESS:
82 s->binding = talloc_asprintf(s, "ncacn_np:%s[target_hostname=%s]",
83 r->in.address, r->in.name);
84 break;
86 case LIBNET_RPC_CONNECT_BINDING:
87 s->binding = talloc_strdup(s, r->in.binding);
88 break;
90 case LIBNET_RPC_CONNECT_DC:
91 case LIBNET_RPC_CONNECT_PDC:
92 /* this should never happen - DC and PDC level has a separate
93 composite function */
94 case LIBNET_RPC_CONNECT_DC_INFO:
95 /* this should never happen - DC_INFO level has a separate
96 composite function */
97 composite_error(c, NT_STATUS_INVALID_LEVEL);
98 return c;
101 /* parse binding string to the structure */
102 c->status = dcerpc_parse_binding(c, s->binding, &b);
103 if (!NT_STATUS_IS_OK(c->status)) {
104 DEBUG(0, ("Failed to parse dcerpc binding '%s'\n", s->binding));
105 composite_error(c, c->status);
106 return c;
109 switch (r->level) {
110 case LIBNET_RPC_CONNECT_SERVER:
111 case LIBNET_RPC_CONNECT_SERVER_ADDRESS:
112 c->status = dcerpc_binding_set_flags(b, r->in.dcerpc_flags, 0);
113 if (!composite_is_ok(c)) return c;
114 break;
115 default:
116 /* other types have already been checked before */
117 break;
120 if (DEBUGLEVEL >= 10) {
121 c->status = dcerpc_binding_set_flags(b, DCERPC_DEBUG_PRINT_BOTH, 0);
122 if (!composite_is_ok(c)) return c;
125 /* connect to remote dcerpc pipe */
126 pipe_connect_req = dcerpc_pipe_connect_b_send(c, b, r->in.dcerpc_iface,
127 ctx->cred, c->event_ctx,
128 ctx->lp_ctx);
129 if (composite_nomem(pipe_connect_req, c)) return c;
131 composite_continue(c, pipe_connect_req, continue_pipe_connect, c);
132 return c;
137 Step 2 of RpcConnectSrv - get rpc connection
139 static void continue_pipe_connect(struct composite_context *ctx)
141 struct composite_context *c;
142 struct rpc_connect_srv_state *s;
144 c = talloc_get_type(ctx->async.private_data, struct composite_context);
145 s = talloc_get_type(c->private_data, struct rpc_connect_srv_state);
147 /* receive result of rpc pipe connection */
148 c->status = dcerpc_pipe_connect_b_recv(ctx, c, &s->r.out.dcerpc_pipe);
150 /* post monitor message */
151 if (s->monitor_fn) {
152 struct monitor_msg msg;
153 struct msg_net_rpc_connect data;
154 struct dcerpc_binding_handle *bh =
155 s->r.out.dcerpc_pipe->binding_handle;
156 const struct dcerpc_binding *b =
157 dcerpc_binding_handle_get_binding(bh);
159 /* prepare monitor message and post it */
160 data.host = dcerpc_binding_get_string_option(b, "host");
161 data.endpoint = dcerpc_binding_get_string_option(b, "endpoint");
162 data.transport = dcerpc_binding_get_transport(b);
163 data.domain_name = dcerpc_binding_get_string_option(b, "target_hostname");
165 msg.type = mon_NetRpcConnect;
166 msg.data = (void*)&data;
167 msg.data_size = sizeof(data);
168 s->monitor_fn(&msg);
171 composite_done(c);
176 * Receives result of connection to rpc pipe on remote server
178 * @param c composite context
179 * @param ctx initialised libnet context
180 * @param mem_ctx memory context of this call
181 * @param r data structure containing necessary parameters and return values
182 * @return nt status of rpc connection
185 static NTSTATUS libnet_RpcConnectSrv_recv(struct composite_context *c,
186 struct libnet_context *ctx,
187 TALLOC_CTX *mem_ctx,
188 struct libnet_RpcConnect *r)
190 NTSTATUS status;
192 status = composite_wait(c);
193 if (NT_STATUS_IS_OK(status)) {
194 struct rpc_connect_srv_state *s;
196 /* move the returned rpc pipe between memory contexts */
197 s = talloc_get_type(c->private_data, struct rpc_connect_srv_state);
198 r->out.dcerpc_pipe = talloc_steal(mem_ctx, s->r.out.dcerpc_pipe);
200 /* reference created pipe structure to long-term libnet_context
201 so that it can be used by other api functions even after short-term
202 mem_ctx is freed */
203 if (r->in.dcerpc_iface == &ndr_table_samr) {
204 ctx->samr.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
205 ctx->samr.samr_handle = ctx->samr.pipe->binding_handle;
207 } else if (r->in.dcerpc_iface == &ndr_table_lsarpc) {
208 ctx->lsa.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
209 ctx->lsa.lsa_handle = ctx->lsa.pipe->binding_handle;
212 r->out.error_string = talloc_strdup(mem_ctx, "Success");
214 } else {
215 r->out.error_string = talloc_asprintf(mem_ctx, "Error: %s", nt_errstr(status));
218 talloc_free(c);
219 return status;
223 struct rpc_connect_dc_state {
224 struct libnet_context *ctx;
225 struct libnet_RpcConnect r;
226 struct libnet_RpcConnect r2;
227 struct libnet_LookupDCs f;
228 const char *connect_name;
230 /* information about the progress */
231 void (*monitor_fn)(struct monitor_msg *);
235 static void continue_lookup_dc(struct tevent_req *req);
236 static void continue_rpc_connect(struct composite_context *ctx);
240 * Initiates connection to rpc pipe on domain pdc
242 * @param ctx initialised libnet context
243 * @param mem_ctx memory context of this call
244 * @param r data structure containing necessary parameters and return values
245 * @return composite context of this call
248 static struct composite_context* libnet_RpcConnectDC_send(struct libnet_context *ctx,
249 TALLOC_CTX *mem_ctx,
250 struct libnet_RpcConnect *r,
251 void (*monitor)(struct monitor_msg *msg))
253 struct composite_context *c;
254 struct rpc_connect_dc_state *s;
255 struct tevent_req *lookup_dc_req;
257 /* composite context allocation and setup */
258 c = composite_create(ctx, ctx->event_ctx);
259 if (c == NULL) return c;
261 s = talloc_zero(c, struct rpc_connect_dc_state);
262 if (composite_nomem(s, c)) return c;
264 c->private_data = s;
265 s->monitor_fn = monitor;
267 s->ctx = ctx;
268 s->r = *r;
269 ZERO_STRUCT(s->r.out);
271 switch (r->level) {
272 case LIBNET_RPC_CONNECT_PDC:
273 s->f.in.name_type = NBT_NAME_PDC;
274 break;
276 case LIBNET_RPC_CONNECT_DC:
277 s->f.in.name_type = NBT_NAME_LOGON;
278 break;
280 default:
281 break;
284 s->f.in.domain_name = r->in.name;
285 s->f.out.num_dcs = 0;
286 s->f.out.dcs = NULL;
288 /* find the domain pdc first */
289 lookup_dc_req = libnet_LookupDCs_send(ctx, c, &s->f);
290 if (composite_nomem(lookup_dc_req, c)) return c;
292 tevent_req_set_callback(lookup_dc_req, continue_lookup_dc, c);
293 return c;
298 Step 2 of RpcConnectDC: get domain controller name and
299 initiate RpcConnect to it
301 static void continue_lookup_dc(struct tevent_req *req)
303 struct composite_context *c;
304 struct rpc_connect_dc_state *s;
305 struct composite_context *rpc_connect_req;
306 struct monitor_msg msg;
307 struct msg_net_lookup_dc data;
309 c = tevent_req_callback_data(req, struct composite_context);
310 s = talloc_get_type_abort(c->private_data, struct rpc_connect_dc_state);
312 /* receive result of domain controller lookup */
313 c->status = libnet_LookupDCs_recv(req, c, &s->f);
314 if (!composite_is_ok(c)) return;
316 /* decide on preferred address type depending on DC type */
317 s->connect_name = s->f.out.dcs[0].name;
319 /* post monitor message */
320 if (s->monitor_fn) {
321 /* prepare a monitor message and post it */
322 data.domain_name = s->f.in.domain_name;
323 data.hostname = s->f.out.dcs[0].name;
324 data.address = s->f.out.dcs[0].address;
326 msg.type = mon_NetLookupDc;
327 msg.data = &data;
328 msg.data_size = sizeof(data);
329 s->monitor_fn(&msg);
332 /* ok, pdc has been found so do attempt to rpc connect */
333 s->r2.level = LIBNET_RPC_CONNECT_SERVER_ADDRESS;
335 /* this will cause yet another name resolution, but at least
336 * we pass the right name down the stack now */
337 s->r2.in.name = talloc_strdup(s, s->connect_name);
338 s->r2.in.address = talloc_steal(s, s->f.out.dcs[0].address);
339 s->r2.in.dcerpc_iface = s->r.in.dcerpc_iface;
340 s->r2.in.dcerpc_flags = s->r.in.dcerpc_flags;
342 /* send rpc connect request to the server */
343 rpc_connect_req = libnet_RpcConnectSrv_send(s->ctx, c, &s->r2, s->monitor_fn);
344 if (composite_nomem(rpc_connect_req, c)) return;
346 composite_continue(c, rpc_connect_req, continue_rpc_connect, c);
351 Step 3 of RpcConnectDC: get rpc connection to the server
353 static void continue_rpc_connect(struct composite_context *ctx)
355 struct composite_context *c;
356 struct rpc_connect_dc_state *s;
358 c = talloc_get_type(ctx->async.private_data, struct composite_context);
359 s = talloc_get_type(c->private_data, struct rpc_connect_dc_state);
361 c->status = libnet_RpcConnectSrv_recv(ctx, s->ctx, c, &s->r2);
363 /* error string is to be passed anyway */
364 s->r.out.error_string = s->r2.out.error_string;
365 if (!composite_is_ok(c)) return;
367 s->r.out.dcerpc_pipe = s->r2.out.dcerpc_pipe;
369 /* post monitor message */
370 if (s->monitor_fn) {
371 struct monitor_msg msg;
372 struct msg_net_rpc_connect data;
373 struct dcerpc_binding_handle *bh =
374 s->r.out.dcerpc_pipe->binding_handle;
375 const struct dcerpc_binding *b =
376 dcerpc_binding_handle_get_binding(bh);
378 data.host = dcerpc_binding_get_string_option(b, "host");
379 data.endpoint = dcerpc_binding_get_string_option(b, "endpoint");
380 data.transport = dcerpc_binding_get_transport(b);
381 data.domain_name = dcerpc_binding_get_string_option(b, "target_hostname");
383 msg.type = mon_NetRpcConnect;
384 msg.data = (void*)&data;
385 msg.data_size = sizeof(data);
386 s->monitor_fn(&msg);
389 composite_done(c);
394 * Receives result of connection to rpc pipe on domain pdc
396 * @param c composite context
397 * @param ctx initialised libnet context
398 * @param mem_ctx memory context of this call
399 * @param r data structure containing necessary parameters and return values
400 * @return nt status of rpc connection
403 static NTSTATUS libnet_RpcConnectDC_recv(struct composite_context *c,
404 struct libnet_context *ctx,
405 TALLOC_CTX *mem_ctx,
406 struct libnet_RpcConnect *r)
408 NTSTATUS status;
409 struct rpc_connect_dc_state *s = talloc_get_type(c->private_data,
410 struct rpc_connect_dc_state);
412 status = composite_wait(c);
413 if (NT_STATUS_IS_OK(status)) {
414 /* move connected rpc pipe between memory contexts
416 The use of talloc_reparent(talloc_parent(), ...) is
417 bizarre, but it is needed because of the absolutely
418 atrocious use of talloc in this code. We need to
419 force the original parent to change, but finding
420 the original parent is well nigh impossible at this
421 point in the code (yes, I tried).
423 r->out.dcerpc_pipe = talloc_reparent(talloc_parent(s->r.out.dcerpc_pipe),
424 mem_ctx, s->r.out.dcerpc_pipe);
426 /* reference created pipe structure to long-term libnet_context
427 so that it can be used by other api functions even after short-term
428 mem_ctx is freed */
429 if (r->in.dcerpc_iface == &ndr_table_samr) {
430 ctx->samr.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
431 ctx->samr.samr_handle = ctx->samr.pipe->binding_handle;
432 } else if (r->in.dcerpc_iface == &ndr_table_lsarpc) {
433 ctx->lsa.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
434 ctx->lsa.lsa_handle = ctx->lsa.pipe->binding_handle;
437 } else {
438 r->out.error_string = talloc_asprintf(mem_ctx,
439 "Failed to rpc connect: %s",
440 nt_errstr(status));
443 talloc_free(c);
444 return status;
449 struct rpc_connect_dci_state {
450 struct libnet_context *ctx;
451 struct libnet_RpcConnect r;
452 struct libnet_RpcConnect rpc_conn;
453 struct policy_handle lsa_handle;
454 struct lsa_QosInfo qos;
455 struct lsa_ObjectAttribute attr;
456 struct lsa_OpenPolicy2 lsa_open_policy;
457 struct dcerpc_pipe *lsa_pipe;
458 struct lsa_QueryInfoPolicy2 lsa_query_info2;
459 struct lsa_QueryInfoPolicy lsa_query_info;
460 struct dcerpc_binding *final_binding;
461 struct dcerpc_pipe *final_pipe;
463 /* information about the progress */
464 void (*monitor_fn)(struct monitor_msg*);
468 static void continue_dci_rpc_connect(struct composite_context *ctx);
469 static void continue_lsa_policy(struct tevent_req *subreq);
470 static void continue_lsa_query_info(struct tevent_req *subreq);
471 static void continue_lsa_query_info2(struct tevent_req *subreq);
472 static void continue_epm_map_binding(struct composite_context *ctx);
473 static void continue_secondary_conn(struct composite_context *ctx);
474 static void continue_epm_map_binding_send(struct composite_context *c);
478 * Initiates connection to rpc pipe on remote server or pdc. Received result
479 * contains info on the domain name, domain sid and realm.
481 * @param ctx initialised libnet context
482 * @param mem_ctx memory context of this call
483 * @param r data structure containing necessary parameters and return values. Must be a talloc context
484 * @return composite context of this call
487 static struct composite_context* libnet_RpcConnectDCInfo_send(struct libnet_context *ctx,
488 TALLOC_CTX *mem_ctx,
489 struct libnet_RpcConnect *r,
490 void (*monitor)(struct monitor_msg*))
492 struct composite_context *c, *conn_req;
493 struct rpc_connect_dci_state *s;
495 /* composite context allocation and setup */
496 c = composite_create(ctx, ctx->event_ctx);
497 if (c == NULL) return c;
499 s = talloc_zero(c, struct rpc_connect_dci_state);
500 if (composite_nomem(s, c)) return c;
502 c->private_data = s;
503 s->monitor_fn = monitor;
505 s->ctx = ctx;
506 s->r = *r;
507 ZERO_STRUCT(s->r.out);
510 /* proceed to pure rpc connection if the binding string is provided,
511 otherwise try to connect domain controller */
512 if (r->in.binding == NULL) {
513 /* Pass on any binding flags (such as anonymous fallback) that have been set */
514 s->rpc_conn.in.dcerpc_flags = r->in.dcerpc_flags;
516 s->rpc_conn.in.name = r->in.name;
517 s->rpc_conn.level = LIBNET_RPC_CONNECT_DC;
518 } else {
519 s->rpc_conn.in.binding = r->in.binding;
520 s->rpc_conn.level = LIBNET_RPC_CONNECT_BINDING;
523 /* we need to query information on lsarpc interface first */
524 s->rpc_conn.in.dcerpc_iface = &ndr_table_lsarpc;
526 /* request connection to the lsa pipe on the pdc */
527 conn_req = libnet_RpcConnect_send(ctx, c, &s->rpc_conn, s->monitor_fn);
528 if (composite_nomem(c, conn_req)) return c;
530 composite_continue(c, conn_req, continue_dci_rpc_connect, c);
531 return c;
536 Step 2 of RpcConnectDCInfo: receive opened rpc pipe and open
537 lsa policy handle
539 static void continue_dci_rpc_connect(struct composite_context *ctx)
541 struct composite_context *c;
542 struct rpc_connect_dci_state *s;
543 struct tevent_req *subreq;
544 enum dcerpc_transport_t transport;
546 c = talloc_get_type(ctx->async.private_data, struct composite_context);
547 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
549 c->status = libnet_RpcConnect_recv(ctx, s->ctx, c, &s->rpc_conn);
550 if (!NT_STATUS_IS_OK(c->status)) {
551 composite_error(c, c->status);
552 return;
555 /* post monitor message */
556 if (s->monitor_fn) {
557 struct monitor_msg msg;
558 struct msg_net_rpc_connect data;
559 struct dcerpc_binding_handle *bh =
560 s->r.out.dcerpc_pipe->binding_handle;
561 const struct dcerpc_binding *b =
562 dcerpc_binding_handle_get_binding(bh);
564 data.host = dcerpc_binding_get_string_option(b, "host");
565 data.endpoint = dcerpc_binding_get_string_option(b, "endpoint");
566 data.transport = dcerpc_binding_get_transport(b);
567 data.domain_name = dcerpc_binding_get_string_option(b, "target_hostname");
569 msg.type = mon_NetRpcConnect;
570 msg.data = (void*)&data;
571 msg.data_size = sizeof(data);
572 s->monitor_fn(&msg);
575 /* prepare to open a policy handle on lsa pipe */
576 s->lsa_pipe = s->ctx->lsa.pipe;
578 s->qos.len = 0;
579 s->qos.impersonation_level = 2;
580 s->qos.context_mode = 1;
581 s->qos.effective_only = 0;
583 s->attr.sec_qos = &s->qos;
585 transport = dcerpc_binding_handle_get_transport(
586 s->lsa_pipe->binding_handle);
587 if (transport == NCACN_IP_TCP) {
589 * Skip to creating the actual connection. We can't open a
590 * policy handle over tcpip.
592 continue_epm_map_binding_send(c);
593 return;
596 s->lsa_open_policy.in.attr = &s->attr;
597 s->lsa_open_policy.in.system_name = talloc_asprintf(c, "\\");
598 if (composite_nomem(s->lsa_open_policy.in.system_name, c)) return;
600 s->lsa_open_policy.in.access_mask = SEC_FLAG_MAXIMUM_ALLOWED;
601 s->lsa_open_policy.out.handle = &s->lsa_handle;
603 subreq = dcerpc_lsa_OpenPolicy2_r_send(s, c->event_ctx,
604 s->lsa_pipe->binding_handle,
605 &s->lsa_open_policy);
606 if (composite_nomem(subreq, c)) return;
608 tevent_req_set_callback(subreq, continue_lsa_policy, c);
613 Step 3 of RpcConnectDCInfo: Get policy handle and query lsa info
614 for kerberos realm (dns name) and guid. The query may fail.
616 static void continue_lsa_policy(struct tevent_req *subreq)
618 struct composite_context *c;
619 struct rpc_connect_dci_state *s;
621 c = tevent_req_callback_data(subreq, struct composite_context);
622 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
624 c->status = dcerpc_lsa_OpenPolicy2_r_recv(subreq, s);
625 TALLOC_FREE(subreq);
626 if (!NT_STATUS_IS_OK(c->status)) {
627 composite_error(c, c->status);
628 return;
631 if (NT_STATUS_EQUAL(s->lsa_open_policy.out.result, NT_STATUS_RPC_PROTSEQ_NOT_SUPPORTED)) {
632 s->r.out.realm = NULL;
633 s->r.out.guid = NULL;
634 s->r.out.domain_name = NULL;
635 s->r.out.domain_sid = NULL;
637 /* Skip to the creating the actual connection, no info available on this transport */
638 continue_epm_map_binding_send(c);
639 return;
641 } else if (!NT_STATUS_IS_OK(s->lsa_open_policy.out.result)) {
642 composite_error(c, s->lsa_open_policy.out.result);
643 return;
646 /* post monitor message */
647 if (s->monitor_fn) {
648 struct monitor_msg msg;
650 msg.type = mon_LsaOpenPolicy;
651 msg.data = NULL;
652 msg.data_size = 0;
653 s->monitor_fn(&msg);
656 /* query lsa info for dns domain name and guid */
657 s->lsa_query_info2.in.handle = &s->lsa_handle;
658 s->lsa_query_info2.in.level = LSA_POLICY_INFO_DNS;
659 s->lsa_query_info2.out.info = talloc_zero(c, union lsa_PolicyInformation *);
660 if (composite_nomem(s->lsa_query_info2.out.info, c)) return;
662 subreq = dcerpc_lsa_QueryInfoPolicy2_r_send(s, c->event_ctx,
663 s->lsa_pipe->binding_handle,
664 &s->lsa_query_info2);
665 if (composite_nomem(subreq, c)) return;
667 tevent_req_set_callback(subreq, continue_lsa_query_info2, c);
672 Step 4 of RpcConnectDCInfo: Get realm and guid if provided (rpc call
673 may result in failure) and query lsa info for domain name and sid.
675 static void continue_lsa_query_info2(struct tevent_req *subreq)
677 struct composite_context *c;
678 struct rpc_connect_dci_state *s;
680 c = tevent_req_callback_data(subreq, struct composite_context);
681 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
683 c->status = dcerpc_lsa_QueryInfoPolicy2_r_recv(subreq, s);
684 TALLOC_FREE(subreq);
686 /* In case of error just null the realm and guid and proceed
687 to the next step. After all, it doesn't have to be AD domain
688 controller we talking to - NT-style PDC also counts */
690 if (NT_STATUS_EQUAL(c->status, NT_STATUS_RPC_PROCNUM_OUT_OF_RANGE)) {
691 s->r.out.realm = NULL;
692 s->r.out.guid = NULL;
694 } else {
695 if (!NT_STATUS_IS_OK(c->status)) {
696 s->r.out.error_string = talloc_asprintf(c,
697 "lsa_QueryInfoPolicy2 failed: %s",
698 nt_errstr(c->status));
699 composite_error(c, c->status);
700 return;
703 if (!NT_STATUS_IS_OK(s->lsa_query_info2.out.result)) {
704 s->r.out.error_string = talloc_asprintf(c,
705 "lsa_QueryInfoPolicy2 failed: %s",
706 nt_errstr(s->lsa_query_info2.out.result));
707 composite_error(c, s->lsa_query_info2.out.result);
708 return;
711 /* Copy the dns domain name and guid from the query result */
713 /* this should actually be a conversion from lsa_StringLarge */
714 s->r.out.realm = (*s->lsa_query_info2.out.info)->dns.dns_domain.string;
715 s->r.out.guid = talloc(c, struct GUID);
716 if (composite_nomem(s->r.out.guid, c)) {
717 s->r.out.error_string = NULL;
718 return;
720 *s->r.out.guid = (*s->lsa_query_info2.out.info)->dns.domain_guid;
723 /* post monitor message */
724 if (s->monitor_fn) {
725 struct monitor_msg msg;
727 msg.type = mon_LsaQueryPolicy;
728 msg.data = NULL;
729 msg.data_size = 0;
730 s->monitor_fn(&msg);
733 /* query lsa info for domain name and sid */
734 s->lsa_query_info.in.handle = &s->lsa_handle;
735 s->lsa_query_info.in.level = LSA_POLICY_INFO_DOMAIN;
736 s->lsa_query_info.out.info = talloc_zero(c, union lsa_PolicyInformation *);
737 if (composite_nomem(s->lsa_query_info.out.info, c)) return;
739 subreq = dcerpc_lsa_QueryInfoPolicy_r_send(s, c->event_ctx,
740 s->lsa_pipe->binding_handle,
741 &s->lsa_query_info);
742 if (composite_nomem(subreq, c)) return;
744 tevent_req_set_callback(subreq, continue_lsa_query_info, c);
749 Step 5 of RpcConnectDCInfo: Get domain name and sid
751 static void continue_lsa_query_info(struct tevent_req *subreq)
753 struct composite_context *c;
754 struct rpc_connect_dci_state *s;
756 c = tevent_req_callback_data(subreq, struct composite_context);
757 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
759 c->status = dcerpc_lsa_QueryInfoPolicy_r_recv(subreq, s);
760 TALLOC_FREE(subreq);
761 if (!NT_STATUS_IS_OK(c->status)) {
762 s->r.out.error_string = talloc_asprintf(c,
763 "lsa_QueryInfoPolicy failed: %s",
764 nt_errstr(c->status));
765 composite_error(c, c->status);
766 return;
769 /* post monitor message */
770 if (s->monitor_fn) {
771 struct monitor_msg msg;
773 msg.type = mon_LsaQueryPolicy;
774 msg.data = NULL;
775 msg.data_size = 0;
776 s->monitor_fn(&msg);
779 /* Copy the domain name and sid from the query result */
780 s->r.out.domain_sid = (*s->lsa_query_info.out.info)->domain.sid;
781 s->r.out.domain_name = (*s->lsa_query_info.out.info)->domain.name.string;
783 continue_epm_map_binding_send(c);
787 Step 5 (continued) of RpcConnectDCInfo: request endpoint
788 map binding.
790 We may short-cut to this step if we don't support LSA OpenPolicy on this transport
792 static void continue_epm_map_binding_send(struct composite_context *c)
794 struct rpc_connect_dci_state *s;
795 struct composite_context *epm_map_req;
796 struct cli_credentials *epm_creds = NULL;
797 const struct dcerpc_binding *b = NULL;
799 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
801 /* prepare to get endpoint mapping for the requested interface */
802 b = dcerpc_binding_handle_get_binding(s->lsa_pipe->binding_handle);
803 s->final_binding = dcerpc_binding_dup(s, b);
804 if (composite_nomem(s->final_binding, c)) return;
807 * We don't want to inherit the assoc_group_id from the
808 * lsa_pipe here!
810 dcerpc_binding_set_assoc_group_id(s->final_binding, 0);
812 epm_creds = cli_credentials_init_anon(s);
813 if (composite_nomem(epm_creds, c)) return;
815 epm_map_req = dcerpc_epm_map_binding_send(c, s->final_binding, s->r.in.dcerpc_iface,
816 epm_creds,
817 s->ctx->event_ctx, s->ctx->lp_ctx);
818 if (composite_nomem(epm_map_req, c)) return;
820 composite_continue(c, epm_map_req, continue_epm_map_binding, c);
824 Step 6 of RpcConnectDCInfo: Receive endpoint mapping and create secondary
825 rpc connection derived from already used pipe but connected to the requested
826 one (as specified in libnet_RpcConnect structure)
828 static void continue_epm_map_binding(struct composite_context *ctx)
830 struct composite_context *c, *sec_conn_req;
831 struct rpc_connect_dci_state *s;
833 c = talloc_get_type(ctx->async.private_data, struct composite_context);
834 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
836 c->status = dcerpc_epm_map_binding_recv(ctx);
837 if (!NT_STATUS_IS_OK(c->status)) {
838 s->r.out.error_string = talloc_asprintf(c,
839 "failed to map pipe with endpoint mapper - %s",
840 nt_errstr(c->status));
841 composite_error(c, c->status);
842 return;
845 /* create secondary connection derived from lsa pipe */
846 sec_conn_req = dcerpc_secondary_auth_connection_send(s->lsa_pipe,
847 s->final_binding,
848 s->r.in.dcerpc_iface,
849 s->ctx->cred,
850 s->ctx->lp_ctx);
851 if (composite_nomem(sec_conn_req, c)) return;
853 composite_continue(c, sec_conn_req, continue_secondary_conn, c);
858 Step 7 of RpcConnectDCInfo: Get actual pipe to be returned
859 and complete this composite call
861 static void continue_secondary_conn(struct composite_context *ctx)
863 struct composite_context *c;
864 struct rpc_connect_dci_state *s;
866 c = talloc_get_type(ctx->async.private_data, struct composite_context);
867 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
869 c->status = dcerpc_secondary_auth_connection_recv(ctx, s->lsa_pipe,
870 &s->final_pipe);
871 if (!NT_STATUS_IS_OK(c->status)) {
872 s->r.out.error_string = talloc_asprintf(c,
873 "secondary connection failed: %s",
874 nt_errstr(c->status));
876 composite_error(c, c->status);
877 return;
880 s->r.out.dcerpc_pipe = s->final_pipe;
882 /* post monitor message */
883 if (s->monitor_fn) {
884 struct monitor_msg msg;
885 struct msg_net_rpc_connect data;
886 struct dcerpc_binding_handle *bh =
887 s->r.out.dcerpc_pipe->binding_handle;
888 const struct dcerpc_binding *b =
889 dcerpc_binding_handle_get_binding(bh);
891 /* prepare monitor message and post it */
892 data.host = dcerpc_binding_get_string_option(b, "host");
893 data.endpoint = dcerpc_binding_get_string_option(b, "endpoint");
894 data.transport = dcerpc_binding_get_transport(b);
895 data.domain_name = dcerpc_binding_get_string_option(b, "target_hostname");
897 msg.type = mon_NetRpcConnect;
898 msg.data = (void*)&data;
899 msg.data_size = sizeof(data);
900 s->monitor_fn(&msg);
903 composite_done(c);
908 * Receives result of connection to rpc pipe and gets basic
909 * domain info (name, sid, realm, guid)
911 * @param c composite context
912 * @param ctx initialised libnet context
913 * @param mem_ctx memory context of this call
914 * @param r data structure containing return values
915 * @return nt status of rpc connection
918 static NTSTATUS libnet_RpcConnectDCInfo_recv(struct composite_context *c, struct libnet_context *ctx,
919 TALLOC_CTX *mem_ctx, struct libnet_RpcConnect *r)
921 NTSTATUS status;
922 struct rpc_connect_dci_state *s = talloc_get_type(c->private_data,
923 struct rpc_connect_dci_state);
925 status = composite_wait(c);
926 if (NT_STATUS_IS_OK(status)) {
927 r->out.realm = talloc_steal(mem_ctx, s->r.out.realm);
928 r->out.guid = talloc_steal(mem_ctx, s->r.out.guid);
929 r->out.domain_name = talloc_steal(mem_ctx, s->r.out.domain_name);
930 r->out.domain_sid = talloc_steal(mem_ctx, s->r.out.domain_sid);
932 r->out.dcerpc_pipe = talloc_steal(mem_ctx, s->r.out.dcerpc_pipe);
934 /* reference created pipe structure to long-term libnet_context
935 so that it can be used by other api functions even after short-term
936 mem_ctx is freed */
937 if (r->in.dcerpc_iface == &ndr_table_samr) {
938 ctx->samr.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
939 ctx->samr.samr_handle = ctx->samr.pipe->binding_handle;
941 } else if (r->in.dcerpc_iface == &ndr_table_lsarpc) {
942 ctx->lsa.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
943 ctx->lsa.lsa_handle = ctx->lsa.pipe->binding_handle;
946 } else {
947 if (s->r.out.error_string) {
948 r->out.error_string = talloc_steal(mem_ctx, s->r.out.error_string);
949 } else if (r->in.binding == NULL) {
950 r->out.error_string = talloc_asprintf(mem_ctx, "Connection to DC failed: %s", nt_errstr(status));
951 } else {
952 r->out.error_string = talloc_asprintf(mem_ctx, "Connection to DC %s failed: %s",
953 r->in.binding, nt_errstr(status));
957 talloc_free(c);
958 return status;
963 * Initiates connection to rpc pipe on remote server or pdc, optionally
964 * providing domain info
966 * @param ctx initialised libnet context
967 * @param mem_ctx memory context of this call
968 * @param r data structure containing necessary parameters and return values
969 * @return composite context of this call
972 struct composite_context* libnet_RpcConnect_send(struct libnet_context *ctx,
973 TALLOC_CTX *mem_ctx,
974 struct libnet_RpcConnect *r,
975 void (*monitor)(struct monitor_msg*))
977 struct composite_context *c;
979 switch (r->level) {
980 case LIBNET_RPC_CONNECT_SERVER:
981 case LIBNET_RPC_CONNECT_SERVER_ADDRESS:
982 case LIBNET_RPC_CONNECT_BINDING:
983 c = libnet_RpcConnectSrv_send(ctx, mem_ctx, r, monitor);
984 break;
986 case LIBNET_RPC_CONNECT_PDC:
987 case LIBNET_RPC_CONNECT_DC:
988 c = libnet_RpcConnectDC_send(ctx, mem_ctx, r, monitor);
989 break;
991 case LIBNET_RPC_CONNECT_DC_INFO:
992 c = libnet_RpcConnectDCInfo_send(ctx, mem_ctx, r, monitor);
993 break;
995 default:
996 c = talloc_zero(mem_ctx, struct composite_context);
997 composite_error(c, NT_STATUS_INVALID_LEVEL);
1000 return c;
1005 * Receives result of connection to rpc pipe on remote server or pdc
1007 * @param c composite context
1008 * @param ctx initialised libnet context
1009 * @param mem_ctx memory context of this call
1010 * @param r data structure containing necessary parameters and return values
1011 * @return nt status of rpc connection
1014 NTSTATUS libnet_RpcConnect_recv(struct composite_context *c, struct libnet_context *ctx,
1015 TALLOC_CTX *mem_ctx, struct libnet_RpcConnect *r)
1017 switch (r->level) {
1018 case LIBNET_RPC_CONNECT_SERVER:
1019 case LIBNET_RPC_CONNECT_BINDING:
1020 return libnet_RpcConnectSrv_recv(c, ctx, mem_ctx, r);
1022 case LIBNET_RPC_CONNECT_PDC:
1023 case LIBNET_RPC_CONNECT_DC:
1024 return libnet_RpcConnectDC_recv(c, ctx, mem_ctx, r);
1026 case LIBNET_RPC_CONNECT_DC_INFO:
1027 return libnet_RpcConnectDCInfo_recv(c, ctx, mem_ctx, r);
1029 default:
1030 ZERO_STRUCT(r->out);
1031 return NT_STATUS_INVALID_LEVEL;
1037 * Connect to a rpc pipe on a remote server - sync version
1039 * @param ctx initialised libnet context
1040 * @param mem_ctx memory context of this call
1041 * @param r data structure containing necessary parameters and return values
1042 * @return nt status of rpc connection
1045 NTSTATUS libnet_RpcConnect(struct libnet_context *ctx, TALLOC_CTX *mem_ctx,
1046 struct libnet_RpcConnect *r)
1048 struct composite_context *c;
1050 c = libnet_RpcConnect_send(ctx, mem_ctx, r, NULL);
1051 return libnet_RpcConnect_recv(c, ctx, mem_ctx, r);