ctdb-mutex: Test the lock by locking a 2nd byte range
[Samba.git] / source4 / libnet / libnet_rpc.c
blob91c538fa58c4525541e1dc98de45087a7be1ec51
1 /*
2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2004
5 Copyright (C) Rafal Szczesniak 2005
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "libnet/libnet.h"
23 #include "libcli/libcli.h"
24 #include "libcli/composite/composite.h"
25 #include "librpc/rpc/dcerpc_proto.h"
26 #include "librpc/gen_ndr/ndr_lsa_c.h"
27 #include "librpc/gen_ndr/ndr_samr.h"
28 #include "auth/credentials/credentials.h"
30 struct rpc_connect_srv_state {
31 struct libnet_context *ctx;
32 struct libnet_RpcConnect r;
33 const char *binding;
35 /* information about the progress */
36 void (*monitor_fn)(struct monitor_msg*);
40 static void continue_pipe_connect(struct composite_context *ctx);
43 /**
44 * Initiates connection to rpc pipe on remote server
46 * @param ctx initialised libnet context
47 * @param mem_ctx memory context of this call
48 * @param r data structure containing necessary parameters and return values
49 * @return composite context of this call
50 **/
52 static struct composite_context* libnet_RpcConnectSrv_send(struct libnet_context *ctx,
53 TALLOC_CTX *mem_ctx,
54 struct libnet_RpcConnect *r,
55 void (*monitor)(struct monitor_msg*))
57 struct composite_context *c;
58 struct rpc_connect_srv_state *s;
59 struct dcerpc_binding *b;
60 struct composite_context *pipe_connect_req;
62 /* composite context allocation and setup */
63 c = composite_create(ctx, ctx->event_ctx);
64 if (c == NULL) return c;
66 s = talloc_zero(c, struct rpc_connect_srv_state);
67 if (composite_nomem(s, c)) return c;
69 c->private_data = s;
70 s->monitor_fn = monitor;
72 s->ctx = ctx;
73 s->r = *r;
74 ZERO_STRUCT(s->r.out);
76 /* prepare binding string */
77 switch (r->level) {
78 case LIBNET_RPC_CONNECT_SERVER:
79 s->binding = talloc_asprintf(s, "ncacn_np:%s", r->in.name);
80 break;
81 case LIBNET_RPC_CONNECT_SERVER_ADDRESS:
82 s->binding = talloc_asprintf(s, "ncacn_np:%s[target_hostname=%s]",
83 r->in.address, r->in.name);
84 break;
86 case LIBNET_RPC_CONNECT_BINDING:
87 s->binding = talloc_strdup(s, r->in.binding);
88 break;
90 case LIBNET_RPC_CONNECT_DC:
91 case LIBNET_RPC_CONNECT_PDC:
92 /* this should never happen - DC and PDC level has a separate
93 composite function */
94 case LIBNET_RPC_CONNECT_DC_INFO:
95 /* this should never happen - DC_INFO level has a separate
96 composite function */
97 composite_error(c, NT_STATUS_INVALID_LEVEL);
98 return c;
101 /* parse binding string to the structure */
102 c->status = dcerpc_parse_binding(c, s->binding, &b);
103 if (!NT_STATUS_IS_OK(c->status)) {
104 DEBUG(0, ("Failed to parse dcerpc binding '%s'\n", s->binding));
105 composite_error(c, c->status);
106 return c;
109 switch (r->level) {
110 case LIBNET_RPC_CONNECT_SERVER:
111 case LIBNET_RPC_CONNECT_SERVER_ADDRESS:
112 c->status = dcerpc_binding_set_flags(b, r->in.dcerpc_flags, 0);
113 if (!composite_is_ok(c)) return c;
114 break;
115 default:
116 /* other types have already been checked before */
117 break;
120 if (DEBUGLEVEL >= 10) {
121 c->status = dcerpc_binding_set_flags(b, DCERPC_DEBUG_PRINT_BOTH, 0);
122 if (!composite_is_ok(c)) return c;
125 /* connect to remote dcerpc pipe */
126 pipe_connect_req = dcerpc_pipe_connect_b_send(c, b, r->in.dcerpc_iface,
127 ctx->cred, c->event_ctx,
128 ctx->lp_ctx);
129 if (composite_nomem(pipe_connect_req, c)) return c;
131 composite_continue(c, pipe_connect_req, continue_pipe_connect, c);
132 return c;
137 Step 2 of RpcConnectSrv - get rpc connection
139 static void continue_pipe_connect(struct composite_context *ctx)
141 struct composite_context *c;
142 struct rpc_connect_srv_state *s;
144 c = talloc_get_type(ctx->async.private_data, struct composite_context);
145 s = talloc_get_type(c->private_data, struct rpc_connect_srv_state);
147 /* receive result of rpc pipe connection */
148 c->status = dcerpc_pipe_connect_b_recv(ctx, c, &s->r.out.dcerpc_pipe);
150 /* post monitor message */
151 if (s->monitor_fn) {
152 struct monitor_msg msg;
153 struct msg_net_rpc_connect data;
154 const struct dcerpc_binding *b = s->r.out.dcerpc_pipe->binding;
156 /* prepare monitor message and post it */
157 data.host = dcerpc_binding_get_string_option(b, "host");
158 data.endpoint = dcerpc_binding_get_string_option(b, "endpoint");
159 data.transport = dcerpc_binding_get_transport(b);
160 data.domain_name = dcerpc_binding_get_string_option(b, "target_hostname");
162 msg.type = mon_NetRpcConnect;
163 msg.data = (void*)&data;
164 msg.data_size = sizeof(data);
165 s->monitor_fn(&msg);
168 composite_done(c);
173 * Receives result of connection to rpc pipe on remote server
175 * @param c composite context
176 * @param ctx initialised libnet context
177 * @param mem_ctx memory context of this call
178 * @param r data structure containing necessary parameters and return values
179 * @return nt status of rpc connection
182 static NTSTATUS libnet_RpcConnectSrv_recv(struct composite_context *c,
183 struct libnet_context *ctx,
184 TALLOC_CTX *mem_ctx,
185 struct libnet_RpcConnect *r)
187 NTSTATUS status;
189 status = composite_wait(c);
190 if (NT_STATUS_IS_OK(status)) {
191 struct rpc_connect_srv_state *s;
193 /* move the returned rpc pipe between memory contexts */
194 s = talloc_get_type(c->private_data, struct rpc_connect_srv_state);
195 r->out.dcerpc_pipe = talloc_steal(mem_ctx, s->r.out.dcerpc_pipe);
197 /* reference created pipe structure to long-term libnet_context
198 so that it can be used by other api functions even after short-term
199 mem_ctx is freed */
200 if (r->in.dcerpc_iface == &ndr_table_samr) {
201 ctx->samr.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
202 ctx->samr.samr_handle = ctx->samr.pipe->binding_handle;
204 } else if (r->in.dcerpc_iface == &ndr_table_lsarpc) {
205 ctx->lsa.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
206 ctx->lsa.lsa_handle = ctx->lsa.pipe->binding_handle;
209 r->out.error_string = talloc_strdup(mem_ctx, "Success");
211 } else {
212 r->out.error_string = talloc_asprintf(mem_ctx, "Error: %s", nt_errstr(status));
215 talloc_free(c);
216 return status;
220 struct rpc_connect_dc_state {
221 struct libnet_context *ctx;
222 struct libnet_RpcConnect r;
223 struct libnet_RpcConnect r2;
224 struct libnet_LookupDCs f;
225 const char *connect_name;
227 /* information about the progress */
228 void (*monitor_fn)(struct monitor_msg *);
232 static void continue_lookup_dc(struct tevent_req *req);
233 static void continue_rpc_connect(struct composite_context *ctx);
237 * Initiates connection to rpc pipe on domain pdc
239 * @param ctx initialised libnet context
240 * @param mem_ctx memory context of this call
241 * @param r data structure containing necessary parameters and return values
242 * @return composite context of this call
245 static struct composite_context* libnet_RpcConnectDC_send(struct libnet_context *ctx,
246 TALLOC_CTX *mem_ctx,
247 struct libnet_RpcConnect *r,
248 void (*monitor)(struct monitor_msg *msg))
250 struct composite_context *c;
251 struct rpc_connect_dc_state *s;
252 struct tevent_req *lookup_dc_req;
254 /* composite context allocation and setup */
255 c = composite_create(ctx, ctx->event_ctx);
256 if (c == NULL) return c;
258 s = talloc_zero(c, struct rpc_connect_dc_state);
259 if (composite_nomem(s, c)) return c;
261 c->private_data = s;
262 s->monitor_fn = monitor;
264 s->ctx = ctx;
265 s->r = *r;
266 ZERO_STRUCT(s->r.out);
268 switch (r->level) {
269 case LIBNET_RPC_CONNECT_PDC:
270 s->f.in.name_type = NBT_NAME_PDC;
271 break;
273 case LIBNET_RPC_CONNECT_DC:
274 s->f.in.name_type = NBT_NAME_LOGON;
275 break;
277 default:
278 break;
281 s->f.in.domain_name = r->in.name;
282 s->f.out.num_dcs = 0;
283 s->f.out.dcs = NULL;
285 /* find the domain pdc first */
286 lookup_dc_req = libnet_LookupDCs_send(ctx, c, &s->f);
287 if (composite_nomem(lookup_dc_req, c)) return c;
289 tevent_req_set_callback(lookup_dc_req, continue_lookup_dc, c);
290 return c;
295 Step 2 of RpcConnectDC: get domain controller name and
296 initiate RpcConnect to it
298 static void continue_lookup_dc(struct tevent_req *req)
300 struct composite_context *c;
301 struct rpc_connect_dc_state *s;
302 struct composite_context *rpc_connect_req;
303 struct monitor_msg msg;
304 struct msg_net_lookup_dc data;
306 c = tevent_req_callback_data(req, struct composite_context);
307 s = talloc_get_type_abort(c->private_data, struct rpc_connect_dc_state);
309 /* receive result of domain controller lookup */
310 c->status = libnet_LookupDCs_recv(req, c, &s->f);
311 if (!composite_is_ok(c)) return;
313 /* decide on preferred address type depending on DC type */
314 s->connect_name = s->f.out.dcs[0].name;
316 /* post monitor message */
317 if (s->monitor_fn) {
318 /* prepare a monitor message and post it */
319 data.domain_name = s->f.in.domain_name;
320 data.hostname = s->f.out.dcs[0].name;
321 data.address = s->f.out.dcs[0].address;
323 msg.type = mon_NetLookupDc;
324 msg.data = &data;
325 msg.data_size = sizeof(data);
326 s->monitor_fn(&msg);
329 /* ok, pdc has been found so do attempt to rpc connect */
330 s->r2.level = LIBNET_RPC_CONNECT_SERVER_ADDRESS;
332 /* this will cause yet another name resolution, but at least
333 * we pass the right name down the stack now */
334 s->r2.in.name = talloc_strdup(s, s->connect_name);
335 s->r2.in.address = talloc_steal(s, s->f.out.dcs[0].address);
336 s->r2.in.dcerpc_iface = s->r.in.dcerpc_iface;
337 s->r2.in.dcerpc_flags = s->r.in.dcerpc_flags;
339 /* send rpc connect request to the server */
340 rpc_connect_req = libnet_RpcConnectSrv_send(s->ctx, c, &s->r2, s->monitor_fn);
341 if (composite_nomem(rpc_connect_req, c)) return;
343 composite_continue(c, rpc_connect_req, continue_rpc_connect, c);
348 Step 3 of RpcConnectDC: get rpc connection to the server
350 static void continue_rpc_connect(struct composite_context *ctx)
352 struct composite_context *c;
353 struct rpc_connect_dc_state *s;
355 c = talloc_get_type(ctx->async.private_data, struct composite_context);
356 s = talloc_get_type(c->private_data, struct rpc_connect_dc_state);
358 c->status = libnet_RpcConnectSrv_recv(ctx, s->ctx, c, &s->r2);
360 /* error string is to be passed anyway */
361 s->r.out.error_string = s->r2.out.error_string;
362 if (!composite_is_ok(c)) return;
364 s->r.out.dcerpc_pipe = s->r2.out.dcerpc_pipe;
366 /* post monitor message */
367 if (s->monitor_fn) {
368 struct monitor_msg msg;
369 struct msg_net_rpc_connect data;
370 const struct dcerpc_binding *b = s->r.out.dcerpc_pipe->binding;
372 data.host = dcerpc_binding_get_string_option(b, "host");
373 data.endpoint = dcerpc_binding_get_string_option(b, "endpoint");
374 data.transport = dcerpc_binding_get_transport(b);
375 data.domain_name = dcerpc_binding_get_string_option(b, "target_hostname");
377 msg.type = mon_NetRpcConnect;
378 msg.data = (void*)&data;
379 msg.data_size = sizeof(data);
380 s->monitor_fn(&msg);
383 composite_done(c);
388 * Receives result of connection to rpc pipe on domain pdc
390 * @param c composite context
391 * @param ctx initialised libnet context
392 * @param mem_ctx memory context of this call
393 * @param r data structure containing necessary parameters and return values
394 * @return nt status of rpc connection
397 static NTSTATUS libnet_RpcConnectDC_recv(struct composite_context *c,
398 struct libnet_context *ctx,
399 TALLOC_CTX *mem_ctx,
400 struct libnet_RpcConnect *r)
402 NTSTATUS status;
403 struct rpc_connect_dc_state *s = talloc_get_type(c->private_data,
404 struct rpc_connect_dc_state);
406 status = composite_wait(c);
407 if (NT_STATUS_IS_OK(status)) {
408 /* move connected rpc pipe between memory contexts
410 The use of talloc_reparent(talloc_parent(), ...) is
411 bizarre, but it is needed because of the absolutely
412 atrocious use of talloc in this code. We need to
413 force the original parent to change, but finding
414 the original parent is well nigh impossible at this
415 point in the code (yes, I tried).
417 r->out.dcerpc_pipe = talloc_reparent(talloc_parent(s->r.out.dcerpc_pipe),
418 mem_ctx, s->r.out.dcerpc_pipe);
420 /* reference created pipe structure to long-term libnet_context
421 so that it can be used by other api functions even after short-term
422 mem_ctx is freed */
423 if (r->in.dcerpc_iface == &ndr_table_samr) {
424 ctx->samr.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
425 ctx->samr.samr_handle = ctx->samr.pipe->binding_handle;
426 } else if (r->in.dcerpc_iface == &ndr_table_lsarpc) {
427 ctx->lsa.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
428 ctx->lsa.lsa_handle = ctx->lsa.pipe->binding_handle;
431 } else {
432 r->out.error_string = talloc_asprintf(mem_ctx,
433 "Failed to rpc connect: %s",
434 nt_errstr(status));
437 talloc_free(c);
438 return status;
443 struct rpc_connect_dci_state {
444 struct libnet_context *ctx;
445 struct libnet_RpcConnect r;
446 struct libnet_RpcConnect rpc_conn;
447 struct policy_handle lsa_handle;
448 struct lsa_QosInfo qos;
449 struct lsa_ObjectAttribute attr;
450 struct lsa_OpenPolicy2 lsa_open_policy;
451 struct dcerpc_pipe *lsa_pipe;
452 struct lsa_QueryInfoPolicy2 lsa_query_info2;
453 struct lsa_QueryInfoPolicy lsa_query_info;
454 struct dcerpc_binding *final_binding;
455 struct dcerpc_pipe *final_pipe;
457 /* information about the progress */
458 void (*monitor_fn)(struct monitor_msg*);
462 static void continue_dci_rpc_connect(struct composite_context *ctx);
463 static void continue_lsa_policy(struct tevent_req *subreq);
464 static void continue_lsa_query_info(struct tevent_req *subreq);
465 static void continue_lsa_query_info2(struct tevent_req *subreq);
466 static void continue_epm_map_binding(struct composite_context *ctx);
467 static void continue_secondary_conn(struct composite_context *ctx);
468 static void continue_epm_map_binding_send(struct composite_context *c);
472 * Initiates connection to rpc pipe on remote server or pdc. Received result
473 * contains info on the domain name, domain sid and realm.
475 * @param ctx initialised libnet context
476 * @param mem_ctx memory context of this call
477 * @param r data structure containing necessary parameters and return values. Must be a talloc context
478 * @return composite context of this call
481 static struct composite_context* libnet_RpcConnectDCInfo_send(struct libnet_context *ctx,
482 TALLOC_CTX *mem_ctx,
483 struct libnet_RpcConnect *r,
484 void (*monitor)(struct monitor_msg*))
486 struct composite_context *c, *conn_req;
487 struct rpc_connect_dci_state *s;
489 /* composite context allocation and setup */
490 c = composite_create(ctx, ctx->event_ctx);
491 if (c == NULL) return c;
493 s = talloc_zero(c, struct rpc_connect_dci_state);
494 if (composite_nomem(s, c)) return c;
496 c->private_data = s;
497 s->monitor_fn = monitor;
499 s->ctx = ctx;
500 s->r = *r;
501 ZERO_STRUCT(s->r.out);
504 /* proceed to pure rpc connection if the binding string is provided,
505 otherwise try to connect domain controller */
506 if (r->in.binding == NULL) {
507 /* Pass on any binding flags (such as anonymous fallback) that have been set */
508 s->rpc_conn.in.dcerpc_flags = r->in.dcerpc_flags;
510 s->rpc_conn.in.name = r->in.name;
511 s->rpc_conn.level = LIBNET_RPC_CONNECT_DC;
512 } else {
513 s->rpc_conn.in.binding = r->in.binding;
514 s->rpc_conn.level = LIBNET_RPC_CONNECT_BINDING;
517 /* we need to query information on lsarpc interface first */
518 s->rpc_conn.in.dcerpc_iface = &ndr_table_lsarpc;
520 /* request connection to the lsa pipe on the pdc */
521 conn_req = libnet_RpcConnect_send(ctx, c, &s->rpc_conn, s->monitor_fn);
522 if (composite_nomem(c, conn_req)) return c;
524 composite_continue(c, conn_req, continue_dci_rpc_connect, c);
525 return c;
530 Step 2 of RpcConnectDCInfo: receive opened rpc pipe and open
531 lsa policy handle
533 static void continue_dci_rpc_connect(struct composite_context *ctx)
535 struct composite_context *c;
536 struct rpc_connect_dci_state *s;
537 struct tevent_req *subreq;
538 enum dcerpc_transport_t transport;
540 c = talloc_get_type(ctx->async.private_data, struct composite_context);
541 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
543 c->status = libnet_RpcConnect_recv(ctx, s->ctx, c, &s->rpc_conn);
544 if (!NT_STATUS_IS_OK(c->status)) {
545 composite_error(c, c->status);
546 return;
549 /* post monitor message */
550 if (s->monitor_fn) {
551 struct monitor_msg msg;
552 struct msg_net_rpc_connect data;
553 const struct dcerpc_binding *b = s->r.out.dcerpc_pipe->binding;
555 data.host = dcerpc_binding_get_string_option(b, "host");
556 data.endpoint = dcerpc_binding_get_string_option(b, "endpoint");
557 data.transport = dcerpc_binding_get_transport(b);
558 data.domain_name = dcerpc_binding_get_string_option(b, "target_hostname");
560 msg.type = mon_NetRpcConnect;
561 msg.data = (void*)&data;
562 msg.data_size = sizeof(data);
563 s->monitor_fn(&msg);
566 /* prepare to open a policy handle on lsa pipe */
567 s->lsa_pipe = s->ctx->lsa.pipe;
569 s->qos.len = 0;
570 s->qos.impersonation_level = 2;
571 s->qos.context_mode = 1;
572 s->qos.effective_only = 0;
574 s->attr.sec_qos = &s->qos;
576 transport = dcerpc_binding_get_transport(s->lsa_pipe->binding);
577 if (transport == NCACN_IP_TCP) {
579 * Skip to creating the actual connection. We can't open a
580 * policy handle over tcpip.
582 continue_epm_map_binding_send(c);
583 return;
586 s->lsa_open_policy.in.attr = &s->attr;
587 s->lsa_open_policy.in.system_name = talloc_asprintf(c, "\\");
588 if (composite_nomem(s->lsa_open_policy.in.system_name, c)) return;
590 s->lsa_open_policy.in.access_mask = SEC_FLAG_MAXIMUM_ALLOWED;
591 s->lsa_open_policy.out.handle = &s->lsa_handle;
593 subreq = dcerpc_lsa_OpenPolicy2_r_send(s, c->event_ctx,
594 s->lsa_pipe->binding_handle,
595 &s->lsa_open_policy);
596 if (composite_nomem(subreq, c)) return;
598 tevent_req_set_callback(subreq, continue_lsa_policy, c);
603 Step 3 of RpcConnectDCInfo: Get policy handle and query lsa info
604 for kerberos realm (dns name) and guid. The query may fail.
606 static void continue_lsa_policy(struct tevent_req *subreq)
608 struct composite_context *c;
609 struct rpc_connect_dci_state *s;
611 c = tevent_req_callback_data(subreq, struct composite_context);
612 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
614 c->status = dcerpc_lsa_OpenPolicy2_r_recv(subreq, s);
615 TALLOC_FREE(subreq);
616 if (!NT_STATUS_IS_OK(c->status)) {
617 composite_error(c, c->status);
618 return;
621 if (NT_STATUS_EQUAL(s->lsa_open_policy.out.result, NT_STATUS_RPC_PROTSEQ_NOT_SUPPORTED)) {
622 s->r.out.realm = NULL;
623 s->r.out.guid = NULL;
624 s->r.out.domain_name = NULL;
625 s->r.out.domain_sid = NULL;
627 /* Skip to the creating the actual connection, no info available on this transport */
628 continue_epm_map_binding_send(c);
629 return;
631 } else if (!NT_STATUS_IS_OK(s->lsa_open_policy.out.result)) {
632 composite_error(c, s->lsa_open_policy.out.result);
633 return;
636 /* post monitor message */
637 if (s->monitor_fn) {
638 struct monitor_msg msg;
640 msg.type = mon_LsaOpenPolicy;
641 msg.data = NULL;
642 msg.data_size = 0;
643 s->monitor_fn(&msg);
646 /* query lsa info for dns domain name and guid */
647 s->lsa_query_info2.in.handle = &s->lsa_handle;
648 s->lsa_query_info2.in.level = LSA_POLICY_INFO_DNS;
649 s->lsa_query_info2.out.info = talloc_zero(c, union lsa_PolicyInformation *);
650 if (composite_nomem(s->lsa_query_info2.out.info, c)) return;
652 subreq = dcerpc_lsa_QueryInfoPolicy2_r_send(s, c->event_ctx,
653 s->lsa_pipe->binding_handle,
654 &s->lsa_query_info2);
655 if (composite_nomem(subreq, c)) return;
657 tevent_req_set_callback(subreq, continue_lsa_query_info2, c);
662 Step 4 of RpcConnectDCInfo: Get realm and guid if provided (rpc call
663 may result in failure) and query lsa info for domain name and sid.
665 static void continue_lsa_query_info2(struct tevent_req *subreq)
667 struct composite_context *c;
668 struct rpc_connect_dci_state *s;
670 c = tevent_req_callback_data(subreq, struct composite_context);
671 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
673 c->status = dcerpc_lsa_QueryInfoPolicy2_r_recv(subreq, s);
674 TALLOC_FREE(subreq);
676 /* In case of error just null the realm and guid and proceed
677 to the next step. After all, it doesn't have to be AD domain
678 controller we talking to - NT-style PDC also counts */
680 if (NT_STATUS_EQUAL(c->status, NT_STATUS_RPC_PROCNUM_OUT_OF_RANGE)) {
681 s->r.out.realm = NULL;
682 s->r.out.guid = NULL;
684 } else {
685 if (!NT_STATUS_IS_OK(c->status)) {
686 s->r.out.error_string = talloc_asprintf(c,
687 "lsa_QueryInfoPolicy2 failed: %s",
688 nt_errstr(c->status));
689 composite_error(c, c->status);
690 return;
693 if (!NT_STATUS_IS_OK(s->lsa_query_info2.out.result)) {
694 s->r.out.error_string = talloc_asprintf(c,
695 "lsa_QueryInfoPolicy2 failed: %s",
696 nt_errstr(s->lsa_query_info2.out.result));
697 composite_error(c, s->lsa_query_info2.out.result);
698 return;
701 /* Copy the dns domain name and guid from the query result */
703 /* this should actually be a conversion from lsa_StringLarge */
704 s->r.out.realm = (*s->lsa_query_info2.out.info)->dns.dns_domain.string;
705 s->r.out.guid = talloc(c, struct GUID);
706 if (composite_nomem(s->r.out.guid, c)) {
707 s->r.out.error_string = NULL;
708 return;
710 *s->r.out.guid = (*s->lsa_query_info2.out.info)->dns.domain_guid;
713 /* post monitor message */
714 if (s->monitor_fn) {
715 struct monitor_msg msg;
717 msg.type = mon_LsaQueryPolicy;
718 msg.data = NULL;
719 msg.data_size = 0;
720 s->monitor_fn(&msg);
723 /* query lsa info for domain name and sid */
724 s->lsa_query_info.in.handle = &s->lsa_handle;
725 s->lsa_query_info.in.level = LSA_POLICY_INFO_DOMAIN;
726 s->lsa_query_info.out.info = talloc_zero(c, union lsa_PolicyInformation *);
727 if (composite_nomem(s->lsa_query_info.out.info, c)) return;
729 subreq = dcerpc_lsa_QueryInfoPolicy_r_send(s, c->event_ctx,
730 s->lsa_pipe->binding_handle,
731 &s->lsa_query_info);
732 if (composite_nomem(subreq, c)) return;
734 tevent_req_set_callback(subreq, continue_lsa_query_info, c);
739 Step 5 of RpcConnectDCInfo: Get domain name and sid
741 static void continue_lsa_query_info(struct tevent_req *subreq)
743 struct composite_context *c;
744 struct rpc_connect_dci_state *s;
746 c = tevent_req_callback_data(subreq, struct composite_context);
747 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
749 c->status = dcerpc_lsa_QueryInfoPolicy_r_recv(subreq, s);
750 TALLOC_FREE(subreq);
751 if (!NT_STATUS_IS_OK(c->status)) {
752 s->r.out.error_string = talloc_asprintf(c,
753 "lsa_QueryInfoPolicy failed: %s",
754 nt_errstr(c->status));
755 composite_error(c, c->status);
756 return;
759 /* post monitor message */
760 if (s->monitor_fn) {
761 struct monitor_msg msg;
763 msg.type = mon_LsaQueryPolicy;
764 msg.data = NULL;
765 msg.data_size = 0;
766 s->monitor_fn(&msg);
769 /* Copy the domain name and sid from the query result */
770 s->r.out.domain_sid = (*s->lsa_query_info.out.info)->domain.sid;
771 s->r.out.domain_name = (*s->lsa_query_info.out.info)->domain.name.string;
773 continue_epm_map_binding_send(c);
777 Step 5 (continued) of RpcConnectDCInfo: request endpoint
778 map binding.
780 We may short-cut to this step if we don't support LSA OpenPolicy on this transport
782 static void continue_epm_map_binding_send(struct composite_context *c)
784 struct rpc_connect_dci_state *s;
785 struct composite_context *epm_map_req;
786 struct cli_credentials *epm_creds = NULL;
788 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
790 /* prepare to get endpoint mapping for the requested interface */
791 s->final_binding = dcerpc_binding_dup(s, s->lsa_pipe->binding);
792 if (composite_nomem(s->final_binding, c)) return;
795 * We don't want to inherit the assoc_group_id from the
796 * lsa_pipe here!
798 dcerpc_binding_set_assoc_group_id(s->final_binding, 0);
800 epm_creds = cli_credentials_init_anon(s);
801 if (composite_nomem(epm_creds, c)) return;
803 epm_map_req = dcerpc_epm_map_binding_send(c, s->final_binding, s->r.in.dcerpc_iface,
804 epm_creds,
805 s->ctx->event_ctx, s->ctx->lp_ctx);
806 if (composite_nomem(epm_map_req, c)) return;
808 composite_continue(c, epm_map_req, continue_epm_map_binding, c);
812 Step 6 of RpcConnectDCInfo: Receive endpoint mapping and create secondary
813 rpc connection derived from already used pipe but connected to the requested
814 one (as specified in libnet_RpcConnect structure)
816 static void continue_epm_map_binding(struct composite_context *ctx)
818 struct composite_context *c, *sec_conn_req;
819 struct rpc_connect_dci_state *s;
821 c = talloc_get_type(ctx->async.private_data, struct composite_context);
822 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
824 c->status = dcerpc_epm_map_binding_recv(ctx);
825 if (!NT_STATUS_IS_OK(c->status)) {
826 s->r.out.error_string = talloc_asprintf(c,
827 "failed to map pipe with endpoint mapper - %s",
828 nt_errstr(c->status));
829 composite_error(c, c->status);
830 return;
833 /* create secondary connection derived from lsa pipe */
834 sec_conn_req = dcerpc_secondary_auth_connection_send(s->lsa_pipe,
835 s->final_binding,
836 s->r.in.dcerpc_iface,
837 s->ctx->cred,
838 s->ctx->lp_ctx);
839 if (composite_nomem(sec_conn_req, c)) return;
841 composite_continue(c, sec_conn_req, continue_secondary_conn, c);
846 Step 7 of RpcConnectDCInfo: Get actual pipe to be returned
847 and complete this composite call
849 static void continue_secondary_conn(struct composite_context *ctx)
851 struct composite_context *c;
852 struct rpc_connect_dci_state *s;
854 c = talloc_get_type(ctx->async.private_data, struct composite_context);
855 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
857 c->status = dcerpc_secondary_auth_connection_recv(ctx, s->lsa_pipe,
858 &s->final_pipe);
859 if (!NT_STATUS_IS_OK(c->status)) {
860 s->r.out.error_string = talloc_asprintf(c,
861 "secondary connection failed: %s",
862 nt_errstr(c->status));
864 composite_error(c, c->status);
865 return;
868 s->r.out.dcerpc_pipe = s->final_pipe;
870 /* post monitor message */
871 if (s->monitor_fn) {
872 struct monitor_msg msg;
873 struct msg_net_rpc_connect data;
874 const struct dcerpc_binding *b = s->r.out.dcerpc_pipe->binding;
876 /* prepare monitor message and post it */
877 data.host = dcerpc_binding_get_string_option(b, "host");
878 data.endpoint = dcerpc_binding_get_string_option(b, "endpoint");
879 data.transport = dcerpc_binding_get_transport(b);
880 data.domain_name = dcerpc_binding_get_string_option(b, "target_hostname");
882 msg.type = mon_NetRpcConnect;
883 msg.data = (void*)&data;
884 msg.data_size = sizeof(data);
885 s->monitor_fn(&msg);
888 composite_done(c);
893 * Receives result of connection to rpc pipe and gets basic
894 * domain info (name, sid, realm, guid)
896 * @param c composite context
897 * @param ctx initialised libnet context
898 * @param mem_ctx memory context of this call
899 * @param r data structure containing return values
900 * @return nt status of rpc connection
903 static NTSTATUS libnet_RpcConnectDCInfo_recv(struct composite_context *c, struct libnet_context *ctx,
904 TALLOC_CTX *mem_ctx, struct libnet_RpcConnect *r)
906 NTSTATUS status;
907 struct rpc_connect_dci_state *s = talloc_get_type(c->private_data,
908 struct rpc_connect_dci_state);
910 status = composite_wait(c);
911 if (NT_STATUS_IS_OK(status)) {
912 r->out.realm = talloc_steal(mem_ctx, s->r.out.realm);
913 r->out.guid = talloc_steal(mem_ctx, s->r.out.guid);
914 r->out.domain_name = talloc_steal(mem_ctx, s->r.out.domain_name);
915 r->out.domain_sid = talloc_steal(mem_ctx, s->r.out.domain_sid);
917 r->out.dcerpc_pipe = talloc_steal(mem_ctx, s->r.out.dcerpc_pipe);
919 /* reference created pipe structure to long-term libnet_context
920 so that it can be used by other api functions even after short-term
921 mem_ctx is freed */
922 if (r->in.dcerpc_iface == &ndr_table_samr) {
923 ctx->samr.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
924 ctx->samr.samr_handle = ctx->samr.pipe->binding_handle;
926 } else if (r->in.dcerpc_iface == &ndr_table_lsarpc) {
927 ctx->lsa.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
928 ctx->lsa.lsa_handle = ctx->lsa.pipe->binding_handle;
931 } else {
932 if (s->r.out.error_string) {
933 r->out.error_string = talloc_steal(mem_ctx, s->r.out.error_string);
934 } else if (r->in.binding == NULL) {
935 r->out.error_string = talloc_asprintf(mem_ctx, "Connection to DC failed: %s", nt_errstr(status));
936 } else {
937 r->out.error_string = talloc_asprintf(mem_ctx, "Connection to DC %s failed: %s",
938 r->in.binding, nt_errstr(status));
942 talloc_free(c);
943 return status;
948 * Initiates connection to rpc pipe on remote server or pdc, optionally
949 * providing domain info
951 * @param ctx initialised libnet context
952 * @param mem_ctx memory context of this call
953 * @param r data structure containing necessary parameters and return values
954 * @return composite context of this call
957 struct composite_context* libnet_RpcConnect_send(struct libnet_context *ctx,
958 TALLOC_CTX *mem_ctx,
959 struct libnet_RpcConnect *r,
960 void (*monitor)(struct monitor_msg*))
962 struct composite_context *c;
964 switch (r->level) {
965 case LIBNET_RPC_CONNECT_SERVER:
966 case LIBNET_RPC_CONNECT_SERVER_ADDRESS:
967 case LIBNET_RPC_CONNECT_BINDING:
968 c = libnet_RpcConnectSrv_send(ctx, mem_ctx, r, monitor);
969 break;
971 case LIBNET_RPC_CONNECT_PDC:
972 case LIBNET_RPC_CONNECT_DC:
973 c = libnet_RpcConnectDC_send(ctx, mem_ctx, r, monitor);
974 break;
976 case LIBNET_RPC_CONNECT_DC_INFO:
977 c = libnet_RpcConnectDCInfo_send(ctx, mem_ctx, r, monitor);
978 break;
980 default:
981 c = talloc_zero(mem_ctx, struct composite_context);
982 composite_error(c, NT_STATUS_INVALID_LEVEL);
985 return c;
990 * Receives result of connection to rpc pipe on remote server or pdc
992 * @param c composite context
993 * @param ctx initialised libnet context
994 * @param mem_ctx memory context of this call
995 * @param r data structure containing necessary parameters and return values
996 * @return nt status of rpc connection
999 NTSTATUS libnet_RpcConnect_recv(struct composite_context *c, struct libnet_context *ctx,
1000 TALLOC_CTX *mem_ctx, struct libnet_RpcConnect *r)
1002 switch (r->level) {
1003 case LIBNET_RPC_CONNECT_SERVER:
1004 case LIBNET_RPC_CONNECT_BINDING:
1005 return libnet_RpcConnectSrv_recv(c, ctx, mem_ctx, r);
1007 case LIBNET_RPC_CONNECT_PDC:
1008 case LIBNET_RPC_CONNECT_DC:
1009 return libnet_RpcConnectDC_recv(c, ctx, mem_ctx, r);
1011 case LIBNET_RPC_CONNECT_DC_INFO:
1012 return libnet_RpcConnectDCInfo_recv(c, ctx, mem_ctx, r);
1014 default:
1015 ZERO_STRUCT(r->out);
1016 return NT_STATUS_INVALID_LEVEL;
1022 * Connect to a rpc pipe on a remote server - sync version
1024 * @param ctx initialised libnet context
1025 * @param mem_ctx memory context of this call
1026 * @param r data structure containing necessary parameters and return values
1027 * @return nt status of rpc connection
1030 NTSTATUS libnet_RpcConnect(struct libnet_context *ctx, TALLOC_CTX *mem_ctx,
1031 struct libnet_RpcConnect *r)
1033 struct composite_context *c;
1035 c = libnet_RpcConnect_send(ctx, mem_ctx, r, NULL);
1036 return libnet_RpcConnect_recv(c, ctx, mem_ctx, r);