s3-rpc_client: add cli_rpc_pipe_open_noauth_transport.
[Samba.git] / source3 / rpc_client / cli_pipe.c
blob75ce7cb32bc7ea6b964b268d4829c842abb0f2c0
1 /*
2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Largely rewritten by Jeremy Allison 2005.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
30 #define PIPE_SRVSVC "\\PIPE\\srvsvc"
31 #define PIPE_SAMR "\\PIPE\\samr"
32 #define PIPE_WINREG "\\PIPE\\winreg"
33 #define PIPE_WKSSVC "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS "\\PIPE\\lsass"
38 #define PIPE_LSARPC "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS "\\PIPE\\spoolss"
40 #define PIPE_NETDFS "\\PIPE\\netdfs"
41 #define PIPE_ECHO "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI "\\PIPE\\drsuapi"
50 * IMPORTANT!! If you update this structure, make sure to
51 * update the index #defines in smb.h.
54 static const struct pipe_id_info {
55 /* the names appear not to matter: the syntaxes _do_ matter */
57 const char *client_pipe;
58 const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
61 { PIPE_LSARPC, &ndr_table_lsarpc.syntax_id },
62 { PIPE_LSARPC, &ndr_table_dssetup.syntax_id },
63 { PIPE_SAMR, &ndr_table_samr.syntax_id },
64 { PIPE_NETLOGON, &ndr_table_netlogon.syntax_id },
65 { PIPE_SRVSVC, &ndr_table_srvsvc.syntax_id },
66 { PIPE_WKSSVC, &ndr_table_wkssvc.syntax_id },
67 { PIPE_WINREG, &ndr_table_winreg.syntax_id },
68 { PIPE_SPOOLSS, &ndr_table_spoolss.syntax_id },
69 { PIPE_NETDFS, &ndr_table_netdfs.syntax_id },
70 { PIPE_ECHO, &ndr_table_rpcecho.syntax_id },
71 { PIPE_SHUTDOWN, &ndr_table_initshutdown.syntax_id },
72 { PIPE_SVCCTL, &ndr_table_svcctl.syntax_id },
73 { PIPE_EVENTLOG, &ndr_table_eventlog.syntax_id },
74 { PIPE_NTSVCS, &ndr_table_ntsvcs.syntax_id },
75 { PIPE_EPMAPPER, &ndr_table_epmapper.syntax_id },
76 { PIPE_DRSUAPI, &ndr_table_drsuapi.syntax_id },
77 { NULL, NULL }
80 /****************************************************************************
81 Return the pipe name from the interface.
82 ****************************************************************************/
84 const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
86 char *guid_str;
87 const char *result;
88 int i;
89 for (i = 0; pipe_names[i].client_pipe; i++) {
90 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
91 interface)) {
92 return &pipe_names[i].client_pipe[5];
97 * Here we should ask \\epmapper, but for now our code is only
98 * interested in the known pipes mentioned in pipe_names[]
101 guid_str = GUID_string(talloc_tos(), &interface->uuid);
102 if (guid_str == NULL) {
103 return NULL;
105 result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
106 (int)interface->if_version);
107 TALLOC_FREE(guid_str);
109 if (result == NULL) {
110 return "PIPE";
112 return result;
115 /********************************************************************
116 Map internal value to wire value.
117 ********************************************************************/
119 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
121 switch (auth_type) {
123 case PIPE_AUTH_TYPE_NONE:
124 return RPC_ANONYMOUS_AUTH_TYPE;
126 case PIPE_AUTH_TYPE_NTLMSSP:
127 return RPC_NTLMSSP_AUTH_TYPE;
129 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
130 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
131 return RPC_SPNEGO_AUTH_TYPE;
133 case PIPE_AUTH_TYPE_SCHANNEL:
134 return RPC_SCHANNEL_AUTH_TYPE;
136 case PIPE_AUTH_TYPE_KRB5:
137 return RPC_KRB5_AUTH_TYPE;
139 default:
140 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
141 "auth type %u\n",
142 (unsigned int)auth_type ));
143 break;
145 return -1;
148 /********************************************************************
149 Pipe description for a DEBUG
150 ********************************************************************/
151 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
152 struct rpc_pipe_client *cli)
154 char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
155 if (result == NULL) {
156 return "pipe";
158 return result;
161 /********************************************************************
162 Rpc pipe call id.
163 ********************************************************************/
165 static uint32 get_rpc_call_id(void)
167 static uint32 call_id = 0;
168 return ++call_id;
172 * Realloc pdu to have a least "size" bytes
175 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
177 size_t extra_size;
179 if (prs_data_size(pdu) >= size) {
180 return true;
183 extra_size = size - prs_data_size(pdu);
185 if (!prs_force_grow(pdu, extra_size)) {
186 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
187 "%d bytes.\n", (int)extra_size));
188 return false;
191 DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
192 (int)extra_size, prs_data_size(pdu)));
193 return true;
197 /*******************************************************************
198 Use SMBreadX to get rest of one fragment's worth of rpc data.
199 Reads the whole size or give an error message
200 ********************************************************************/
202 struct rpc_read_state {
203 struct event_context *ev;
204 struct rpc_cli_transport *transport;
205 uint8_t *data;
206 size_t size;
207 size_t num_read;
210 static void rpc_read_done(struct async_req *subreq);
212 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
213 struct event_context *ev,
214 struct rpc_cli_transport *transport,
215 uint8_t *data, size_t size)
217 struct async_req *result, *subreq;
218 struct rpc_read_state *state;
220 if (!async_req_setup(mem_ctx, &result, &state,
221 struct rpc_read_state)) {
222 return NULL;
224 state->ev = ev;
225 state->transport = transport;
226 state->data = data;
227 state->size = size;
228 state->num_read = 0;
230 DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
232 subreq = transport->read_send(state, ev, (uint8_t *)data, size,
233 transport->priv);
234 if (subreq == NULL) {
235 goto fail;
237 subreq->async.fn = rpc_read_done;
238 subreq->async.priv = result;
239 return result;
241 fail:
242 TALLOC_FREE(result);
243 return NULL;
246 static void rpc_read_done(struct async_req *subreq)
248 struct async_req *req = talloc_get_type_abort(
249 subreq->async.priv, struct async_req);
250 struct rpc_read_state *state = talloc_get_type_abort(
251 req->private_data, struct rpc_read_state);
252 NTSTATUS status;
253 ssize_t received;
255 status = state->transport->read_recv(subreq, &received);
256 TALLOC_FREE(subreq);
257 if (!NT_STATUS_IS_OK(status)) {
258 async_req_nterror(req, status);
259 return;
262 state->num_read += received;
263 if (state->num_read == state->size) {
264 async_req_done(req);
265 return;
268 subreq = state->transport->read_send(state, state->ev,
269 state->data + state->num_read,
270 state->size - state->num_read,
271 state->transport->priv);
272 if (async_req_nomem(subreq, req)) {
273 return;
275 subreq->async.fn = rpc_read_done;
276 subreq->async.priv = req;
279 static NTSTATUS rpc_read_recv(struct async_req *req)
281 return async_req_simple_recv_ntstatus(req);
284 struct rpc_write_state {
285 struct event_context *ev;
286 struct rpc_cli_transport *transport;
287 const uint8_t *data;
288 size_t size;
289 size_t num_written;
292 static void rpc_write_done(struct async_req *subreq);
294 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
295 struct event_context *ev,
296 struct rpc_cli_transport *transport,
297 const uint8_t *data, size_t size)
299 struct async_req *result, *subreq;
300 struct rpc_write_state *state;
302 if (!async_req_setup(mem_ctx, &result, &state,
303 struct rpc_write_state)) {
304 return NULL;
306 state->ev = ev;
307 state->transport = transport;
308 state->data = data;
309 state->size = size;
310 state->num_written = 0;
312 DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
314 subreq = transport->write_send(state, ev, data, size, transport->priv);
315 if (subreq == NULL) {
316 goto fail;
318 subreq->async.fn = rpc_write_done;
319 subreq->async.priv = result;
320 return result;
321 fail:
322 TALLOC_FREE(result);
323 return NULL;
326 static void rpc_write_done(struct async_req *subreq)
328 struct async_req *req = talloc_get_type_abort(
329 subreq->async.priv, struct async_req);
330 struct rpc_write_state *state = talloc_get_type_abort(
331 req->private_data, struct rpc_write_state);
332 NTSTATUS status;
333 ssize_t written;
335 status = state->transport->write_recv(subreq, &written);
336 TALLOC_FREE(subreq);
337 if (!NT_STATUS_IS_OK(status)) {
338 async_req_nterror(req, status);
339 return;
342 state->num_written += written;
344 if (state->num_written == state->size) {
345 async_req_done(req);
346 return;
349 subreq = state->transport->write_send(state, state->ev,
350 state->data + state->num_written,
351 state->size - state->num_written,
352 state->transport->priv);
353 if (async_req_nomem(subreq, req)) {
354 return;
356 subreq->async.fn = rpc_write_done;
357 subreq->async.priv = req;
360 static NTSTATUS rpc_write_recv(struct async_req *req)
362 return async_req_simple_recv_ntstatus(req);
366 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
367 struct rpc_hdr_info *prhdr,
368 prs_struct *pdu)
371 * This next call sets the endian bit correctly in current_pdu. We
372 * will propagate this to rbuf later.
375 if(!smb_io_rpc_hdr("rpc_hdr ", prhdr, pdu, 0)) {
376 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
377 return NT_STATUS_BUFFER_TOO_SMALL;
380 if (prhdr->frag_len > cli->max_recv_frag) {
381 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
382 " we only allow %d\n", (int)prhdr->frag_len,
383 (int)cli->max_recv_frag));
384 return NT_STATUS_BUFFER_TOO_SMALL;
387 return NT_STATUS_OK;
390 /****************************************************************************
391 Try and get a PDU's worth of data from current_pdu. If not, then read more
392 from the wire.
393 ****************************************************************************/
395 struct get_complete_frag_state {
396 struct event_context *ev;
397 struct rpc_pipe_client *cli;
398 struct rpc_hdr_info *prhdr;
399 prs_struct *pdu;
402 static void get_complete_frag_got_header(struct async_req *subreq);
403 static void get_complete_frag_got_rest(struct async_req *subreq);
405 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
406 struct event_context *ev,
407 struct rpc_pipe_client *cli,
408 struct rpc_hdr_info *prhdr,
409 prs_struct *pdu)
411 struct async_req *result, *subreq;
412 struct get_complete_frag_state *state;
413 uint32_t pdu_len;
414 NTSTATUS status;
416 if (!async_req_setup(mem_ctx, &result, &state,
417 struct get_complete_frag_state)) {
418 return NULL;
420 state->ev = ev;
421 state->cli = cli;
422 state->prhdr = prhdr;
423 state->pdu = pdu;
425 pdu_len = prs_data_size(pdu);
426 if (pdu_len < RPC_HEADER_LEN) {
427 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
428 status = NT_STATUS_NO_MEMORY;
429 goto post_status;
431 subreq = rpc_read_send(
432 state, state->ev,
433 state->cli->transport,
434 (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
435 RPC_HEADER_LEN - pdu_len);
436 if (subreq == NULL) {
437 status = NT_STATUS_NO_MEMORY;
438 goto post_status;
440 subreq->async.fn = get_complete_frag_got_header;
441 subreq->async.priv = result;
442 return result;
445 status = parse_rpc_header(cli, prhdr, pdu);
446 if (!NT_STATUS_IS_OK(status)) {
447 goto post_status;
451 * Ensure we have frag_len bytes of data.
453 if (pdu_len < prhdr->frag_len) {
454 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
455 status = NT_STATUS_NO_MEMORY;
456 goto post_status;
458 subreq = rpc_read_send(state, state->ev,
459 state->cli->transport,
460 (uint8_t *)(prs_data_p(pdu) + pdu_len),
461 prhdr->frag_len - pdu_len);
462 if (subreq == NULL) {
463 status = NT_STATUS_NO_MEMORY;
464 goto post_status;
466 subreq->async.fn = get_complete_frag_got_rest;
467 subreq->async.priv = result;
468 return result;
471 status = NT_STATUS_OK;
472 post_status:
473 if (async_post_ntstatus(result, ev, status)) {
474 return result;
476 TALLOC_FREE(result);
477 return NULL;
480 static void get_complete_frag_got_header(struct async_req *subreq)
482 struct async_req *req = talloc_get_type_abort(
483 subreq->async.priv, struct async_req);
484 struct get_complete_frag_state *state = talloc_get_type_abort(
485 req->private_data, struct get_complete_frag_state);
486 NTSTATUS status;
488 status = rpc_read_recv(subreq);
489 TALLOC_FREE(subreq);
490 if (!NT_STATUS_IS_OK(status)) {
491 async_req_nterror(req, status);
492 return;
495 status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
496 if (!NT_STATUS_IS_OK(status)) {
497 async_req_nterror(req, status);
498 return;
501 if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
502 async_req_nterror(req, NT_STATUS_NO_MEMORY);
503 return;
507 * We're here in this piece of code because we've read exactly
508 * RPC_HEADER_LEN bytes into state->pdu.
511 subreq = rpc_read_send(
512 state, state->ev, state->cli->transport,
513 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
514 state->prhdr->frag_len - RPC_HEADER_LEN);
515 if (async_req_nomem(subreq, req)) {
516 return;
518 subreq->async.fn = get_complete_frag_got_rest;
519 subreq->async.priv = req;
522 static void get_complete_frag_got_rest(struct async_req *subreq)
524 struct async_req *req = talloc_get_type_abort(
525 subreq->async.priv, struct async_req);
526 NTSTATUS status;
528 status = rpc_read_recv(subreq);
529 TALLOC_FREE(subreq);
530 if (!NT_STATUS_IS_OK(status)) {
531 async_req_nterror(req, status);
532 return;
534 async_req_done(req);
537 static NTSTATUS get_complete_frag_recv(struct async_req *req)
539 return async_req_simple_recv_ntstatus(req);
542 /****************************************************************************
543 NTLMSSP specific sign/seal.
544 Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
545 In fact I should probably abstract these into identical pieces of code... JRA.
546 ****************************************************************************/
548 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
549 prs_struct *current_pdu,
550 uint8 *p_ss_padding_len)
552 RPC_HDR_AUTH auth_info;
553 uint32 save_offset = prs_offset(current_pdu);
554 uint32 auth_len = prhdr->auth_len;
555 NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
556 unsigned char *data = NULL;
557 size_t data_len;
558 unsigned char *full_packet_data = NULL;
559 size_t full_packet_data_len;
560 DATA_BLOB auth_blob;
561 NTSTATUS status;
563 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
564 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
565 return NT_STATUS_OK;
568 if (!ntlmssp_state) {
569 return NT_STATUS_INVALID_PARAMETER;
572 /* Ensure there's enough data for an authenticated response. */
573 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
574 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
575 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
576 (unsigned int)auth_len ));
577 return NT_STATUS_BUFFER_TOO_SMALL;
581 * We need the full packet data + length (minus auth stuff) as well as the packet data + length
582 * after the RPC header.
583 * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
584 * functions as NTLMv2 checks the rpc headers also.
587 data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
588 data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
590 full_packet_data = (unsigned char *)prs_data_p(current_pdu);
591 full_packet_data_len = prhdr->frag_len - auth_len;
593 /* Pull the auth header and the following data into a blob. */
594 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
595 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
596 (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
597 return NT_STATUS_BUFFER_TOO_SMALL;
600 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
601 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
602 return NT_STATUS_BUFFER_TOO_SMALL;
605 auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
606 auth_blob.length = auth_len;
608 switch (cli->auth->auth_level) {
609 case PIPE_AUTH_LEVEL_PRIVACY:
610 /* Data is encrypted. */
611 status = ntlmssp_unseal_packet(ntlmssp_state,
612 data, data_len,
613 full_packet_data,
614 full_packet_data_len,
615 &auth_blob);
616 if (!NT_STATUS_IS_OK(status)) {
617 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
618 "packet from %s. Error was %s.\n",
619 rpccli_pipe_txt(debug_ctx(), cli),
620 nt_errstr(status) ));
621 return status;
623 break;
624 case PIPE_AUTH_LEVEL_INTEGRITY:
625 /* Data is signed. */
626 status = ntlmssp_check_packet(ntlmssp_state,
627 data, data_len,
628 full_packet_data,
629 full_packet_data_len,
630 &auth_blob);
631 if (!NT_STATUS_IS_OK(status)) {
632 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
633 "packet from %s. Error was %s.\n",
634 rpccli_pipe_txt(debug_ctx(), cli),
635 nt_errstr(status) ));
636 return status;
638 break;
639 default:
640 DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
641 "auth level %d\n", cli->auth->auth_level));
642 return NT_STATUS_INVALID_INFO_CLASS;
646 * Return the current pointer to the data offset.
649 if(!prs_set_offset(current_pdu, save_offset)) {
650 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
651 (unsigned int)save_offset ));
652 return NT_STATUS_BUFFER_TOO_SMALL;
656 * Remember the padding length. We must remove it from the real data
657 * stream once the sign/seal is done.
660 *p_ss_padding_len = auth_info.auth_pad_len;
662 return NT_STATUS_OK;
665 /****************************************************************************
666 schannel specific sign/seal.
667 ****************************************************************************/
669 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
670 prs_struct *current_pdu,
671 uint8 *p_ss_padding_len)
673 RPC_HDR_AUTH auth_info;
674 RPC_AUTH_SCHANNEL_CHK schannel_chk;
675 uint32 auth_len = prhdr->auth_len;
676 uint32 save_offset = prs_offset(current_pdu);
677 struct schannel_auth_struct *schannel_auth =
678 cli->auth->a_u.schannel_auth;
679 uint32 data_len;
681 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
682 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
683 return NT_STATUS_OK;
686 if (auth_len < RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
687 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
688 return NT_STATUS_INVALID_PARAMETER;
691 if (!schannel_auth) {
692 return NT_STATUS_INVALID_PARAMETER;
695 /* Ensure there's enough data for an authenticated response. */
696 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
697 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
698 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
699 (unsigned int)auth_len ));
700 return NT_STATUS_INVALID_PARAMETER;
703 data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
705 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
706 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
707 (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
708 return NT_STATUS_BUFFER_TOO_SMALL;
711 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
712 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
713 return NT_STATUS_BUFFER_TOO_SMALL;
716 if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
717 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
718 auth_info.auth_type));
719 return NT_STATUS_BUFFER_TOO_SMALL;
722 if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
723 &schannel_chk, current_pdu, 0)) {
724 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
725 return NT_STATUS_BUFFER_TOO_SMALL;
728 if (!schannel_decode(schannel_auth,
729 cli->auth->auth_level,
730 SENDER_IS_ACCEPTOR,
731 &schannel_chk,
732 prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
733 data_len)) {
734 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
735 "Connection to %s.\n",
736 rpccli_pipe_txt(debug_ctx(), cli)));
737 return NT_STATUS_INVALID_PARAMETER;
740 /* The sequence number gets incremented on both send and receive. */
741 schannel_auth->seq_num++;
744 * Return the current pointer to the data offset.
747 if(!prs_set_offset(current_pdu, save_offset)) {
748 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
749 (unsigned int)save_offset ));
750 return NT_STATUS_BUFFER_TOO_SMALL;
754 * Remember the padding length. We must remove it from the real data
755 * stream once the sign/seal is done.
758 *p_ss_padding_len = auth_info.auth_pad_len;
760 return NT_STATUS_OK;
763 /****************************************************************************
764 Do the authentication checks on an incoming pdu. Check sign and unseal etc.
765 ****************************************************************************/
767 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
768 prs_struct *current_pdu,
769 uint8 *p_ss_padding_len)
771 NTSTATUS ret = NT_STATUS_OK;
773 /* Paranioa checks for auth_len. */
774 if (prhdr->auth_len) {
775 if (prhdr->auth_len > prhdr->frag_len) {
776 return NT_STATUS_INVALID_PARAMETER;
779 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
780 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
781 /* Integer wrap attempt. */
782 return NT_STATUS_INVALID_PARAMETER;
787 * Now we have a complete RPC request PDU fragment, try and verify any auth data.
790 switch(cli->auth->auth_type) {
791 case PIPE_AUTH_TYPE_NONE:
792 if (prhdr->auth_len) {
793 DEBUG(3, ("cli_pipe_validate_rpc_response: "
794 "Connection to %s - got non-zero "
795 "auth len %u.\n",
796 rpccli_pipe_txt(debug_ctx(), cli),
797 (unsigned int)prhdr->auth_len ));
798 return NT_STATUS_INVALID_PARAMETER;
800 break;
802 case PIPE_AUTH_TYPE_NTLMSSP:
803 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
804 ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
805 if (!NT_STATUS_IS_OK(ret)) {
806 return ret;
808 break;
810 case PIPE_AUTH_TYPE_SCHANNEL:
811 ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
812 if (!NT_STATUS_IS_OK(ret)) {
813 return ret;
815 break;
817 case PIPE_AUTH_TYPE_KRB5:
818 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
819 default:
820 DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
821 "to %s - unknown internal auth type %u.\n",
822 rpccli_pipe_txt(debug_ctx(), cli),
823 cli->auth->auth_type ));
824 return NT_STATUS_INVALID_INFO_CLASS;
827 return NT_STATUS_OK;
830 /****************************************************************************
831 Do basic authentication checks on an incoming pdu.
832 ****************************************************************************/
834 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
835 prs_struct *current_pdu,
836 uint8 expected_pkt_type,
837 char **ppdata,
838 uint32 *pdata_len,
839 prs_struct *return_data)
842 NTSTATUS ret = NT_STATUS_OK;
843 uint32 current_pdu_len = prs_data_size(current_pdu);
845 if (current_pdu_len != prhdr->frag_len) {
846 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
847 (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
848 return NT_STATUS_INVALID_PARAMETER;
852 * Point the return values at the real data including the RPC
853 * header. Just in case the caller wants it.
855 *ppdata = prs_data_p(current_pdu);
856 *pdata_len = current_pdu_len;
858 /* Ensure we have the correct type. */
859 switch (prhdr->pkt_type) {
860 case RPC_ALTCONTRESP:
861 case RPC_BINDACK:
863 /* Alter context and bind ack share the same packet definitions. */
864 break;
867 case RPC_RESPONSE:
869 RPC_HDR_RESP rhdr_resp;
870 uint8 ss_padding_len = 0;
872 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
873 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
874 return NT_STATUS_BUFFER_TOO_SMALL;
877 /* Here's where we deal with incoming sign/seal. */
878 ret = cli_pipe_validate_rpc_response(cli, prhdr,
879 current_pdu, &ss_padding_len);
880 if (!NT_STATUS_IS_OK(ret)) {
881 return ret;
884 /* Point the return values at the NDR data. Remember to remove any ss padding. */
885 *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
887 if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
888 return NT_STATUS_BUFFER_TOO_SMALL;
891 *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
893 /* Remember to remove the auth footer. */
894 if (prhdr->auth_len) {
895 /* We've already done integer wrap tests on auth_len in
896 cli_pipe_validate_rpc_response(). */
897 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
898 return NT_STATUS_BUFFER_TOO_SMALL;
900 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
903 DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
904 current_pdu_len, *pdata_len, ss_padding_len ));
907 * If this is the first reply, and the allocation hint is reasonably, try and
908 * set up the return_data parse_struct to the correct size.
911 if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
912 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
913 DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
914 "too large to allocate\n",
915 (unsigned int)rhdr_resp.alloc_hint ));
916 return NT_STATUS_NO_MEMORY;
920 break;
923 case RPC_BINDNACK:
924 DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
925 "received from %s!\n",
926 rpccli_pipe_txt(debug_ctx(), cli)));
927 /* Use this for now... */
928 return NT_STATUS_NETWORK_ACCESS_DENIED;
930 case RPC_FAULT:
932 RPC_HDR_RESP rhdr_resp;
933 RPC_HDR_FAULT fault_resp;
935 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
936 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
937 return NT_STATUS_BUFFER_TOO_SMALL;
940 if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
941 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
942 return NT_STATUS_BUFFER_TOO_SMALL;
945 DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
946 "code %s received from %s!\n",
947 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
948 rpccli_pipe_txt(debug_ctx(), cli)));
949 if (NT_STATUS_IS_OK(fault_resp.status)) {
950 return NT_STATUS_UNSUCCESSFUL;
951 } else {
952 return fault_resp.status;
956 default:
957 DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
958 "from %s!\n",
959 (unsigned int)prhdr->pkt_type,
960 rpccli_pipe_txt(debug_ctx(), cli)));
961 return NT_STATUS_INVALID_INFO_CLASS;
964 if (prhdr->pkt_type != expected_pkt_type) {
965 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
966 "got an unexpected RPC packet type - %u, not %u\n",
967 rpccli_pipe_txt(debug_ctx(), cli),
968 prhdr->pkt_type,
969 expected_pkt_type));
970 return NT_STATUS_INVALID_INFO_CLASS;
973 /* Do this just before return - we don't want to modify any rpc header
974 data before now as we may have needed to do cryptographic actions on
975 it before. */
977 if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
978 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
979 "setting fragment first/last ON.\n"));
980 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
983 return NT_STATUS_OK;
986 /****************************************************************************
987 Ensure we eat the just processed pdu from the current_pdu prs_struct.
988 Normally the frag_len and buffer size will match, but on the first trans
989 reply there is a theoretical chance that buffer size > frag_len, so we must
990 deal with that.
991 ****************************************************************************/
993 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
995 uint32 current_pdu_len = prs_data_size(current_pdu);
997 if (current_pdu_len < prhdr->frag_len) {
998 return NT_STATUS_BUFFER_TOO_SMALL;
1001 /* Common case. */
1002 if (current_pdu_len == (uint32)prhdr->frag_len) {
1003 prs_mem_free(current_pdu);
1004 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1005 /* Make current_pdu dynamic with no memory. */
1006 prs_give_memory(current_pdu, 0, 0, True);
1007 return NT_STATUS_OK;
1011 * Oh no ! More data in buffer than we processed in current pdu.
1012 * Cheat. Move the data down and shrink the buffer.
1015 memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1016 current_pdu_len - prhdr->frag_len);
1018 /* Remember to set the read offset back to zero. */
1019 prs_set_offset(current_pdu, 0);
1021 /* Shrink the buffer. */
1022 if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1023 return NT_STATUS_BUFFER_TOO_SMALL;
1026 return NT_STATUS_OK;
1029 /****************************************************************************
1030 Call a remote api on an arbitrary pipe. takes param, data and setup buffers.
1031 ****************************************************************************/
1033 struct cli_api_pipe_state {
1034 struct event_context *ev;
1035 struct rpc_cli_transport *transport;
1036 uint8_t *rdata;
1037 uint32_t rdata_len;
1040 static void cli_api_pipe_trans_done(struct async_req *subreq);
1041 static void cli_api_pipe_write_done(struct async_req *subreq);
1042 static void cli_api_pipe_read_done(struct async_req *subreq);
1044 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1045 struct event_context *ev,
1046 struct rpc_cli_transport *transport,
1047 uint8_t *data, size_t data_len,
1048 uint32_t max_rdata_len)
1050 struct async_req *result, *subreq;
1051 struct cli_api_pipe_state *state;
1052 NTSTATUS status;
1054 if (!async_req_setup(mem_ctx, &result, &state,
1055 struct cli_api_pipe_state)) {
1056 return NULL;
1058 state->ev = ev;
1059 state->transport = transport;
1061 if (max_rdata_len < RPC_HEADER_LEN) {
1063 * For a RPC reply we always need at least RPC_HEADER_LEN
1064 * bytes. We check this here because we will receive
1065 * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1067 status = NT_STATUS_INVALID_PARAMETER;
1068 goto post_status;
1071 if (transport->trans_send != NULL) {
1072 subreq = transport->trans_send(state, ev, data, data_len,
1073 max_rdata_len, transport->priv);
1074 if (subreq == NULL) {
1075 status = NT_STATUS_NO_MEMORY;
1076 goto post_status;
1078 subreq->async.fn = cli_api_pipe_trans_done;
1079 subreq->async.priv = result;
1080 return result;
1084 * If the transport does not provide a "trans" routine, i.e. for
1085 * example the ncacn_ip_tcp transport, do the write/read step here.
1088 subreq = rpc_write_send(state, ev, transport, data, data_len);
1089 if (subreq == NULL) {
1090 goto fail;
1092 subreq->async.fn = cli_api_pipe_write_done;
1093 subreq->async.priv = result;
1094 return result;
1096 status = NT_STATUS_INVALID_PARAMETER;
1098 post_status:
1099 if (async_post_ntstatus(result, ev, status)) {
1100 return result;
1102 fail:
1103 TALLOC_FREE(result);
1104 return NULL;
1107 static void cli_api_pipe_trans_done(struct async_req *subreq)
1109 struct async_req *req = talloc_get_type_abort(
1110 subreq->async.priv, struct async_req);
1111 struct cli_api_pipe_state *state = talloc_get_type_abort(
1112 req->private_data, struct cli_api_pipe_state);
1113 NTSTATUS status;
1115 status = state->transport->trans_recv(subreq, state, &state->rdata,
1116 &state->rdata_len);
1117 TALLOC_FREE(subreq);
1118 if (!NT_STATUS_IS_OK(status)) {
1119 async_req_nterror(req, status);
1120 return;
1122 async_req_done(req);
1125 static void cli_api_pipe_write_done(struct async_req *subreq)
1127 struct async_req *req = talloc_get_type_abort(
1128 subreq->async.priv, struct async_req);
1129 struct cli_api_pipe_state *state = talloc_get_type_abort(
1130 req->private_data, struct cli_api_pipe_state);
1131 NTSTATUS status;
1133 status = rpc_write_recv(subreq);
1134 TALLOC_FREE(subreq);
1135 if (!NT_STATUS_IS_OK(status)) {
1136 async_req_nterror(req, status);
1137 return;
1140 state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1141 if (async_req_nomem(state->rdata, req)) {
1142 return;
1146 * We don't need to use rpc_read_send here, the upper layer will cope
1147 * with a short read, transport->trans_send could also return less
1148 * than state->max_rdata_len.
1150 subreq = state->transport->read_send(state, state->ev, state->rdata,
1151 RPC_HEADER_LEN,
1152 state->transport->priv);
1153 if (async_req_nomem(subreq, req)) {
1154 return;
1156 subreq->async.fn = cli_api_pipe_read_done;
1157 subreq->async.priv = req;
1160 static void cli_api_pipe_read_done(struct async_req *subreq)
1162 struct async_req *req = talloc_get_type_abort(
1163 subreq->async.priv, struct async_req);
1164 struct cli_api_pipe_state *state = talloc_get_type_abort(
1165 req->private_data, struct cli_api_pipe_state);
1166 NTSTATUS status;
1167 ssize_t received;
1169 status = state->transport->read_recv(subreq, &received);
1170 TALLOC_FREE(subreq);
1171 if (!NT_STATUS_IS_OK(status)) {
1172 async_req_nterror(req, status);
1173 return;
1175 state->rdata_len = received;
1176 async_req_done(req);
1179 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1180 uint8_t **prdata, uint32_t *prdata_len)
1182 struct cli_api_pipe_state *state = talloc_get_type_abort(
1183 req->private_data, struct cli_api_pipe_state);
1184 NTSTATUS status;
1186 if (async_req_is_nterror(req, &status)) {
1187 return status;
1190 *prdata = talloc_move(mem_ctx, &state->rdata);
1191 *prdata_len = state->rdata_len;
1192 return NT_STATUS_OK;
1195 /****************************************************************************
1196 Send data on an rpc pipe via trans. The prs_struct data must be the last
1197 pdu fragment of an NDR data stream.
1199 Receive response data from an rpc pipe, which may be large...
1201 Read the first fragment: unfortunately have to use SMBtrans for the first
1202 bit, then SMBreadX for subsequent bits.
1204 If first fragment received also wasn't the last fragment, continue
1205 getting fragments until we _do_ receive the last fragment.
1207 Request/Response PDU's look like the following...
1209 |<------------------PDU len----------------------------------------------->|
1210 |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1212 +------------+-----------------+-------------+---------------+-------------+
1213 | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR | AUTH DATA |
1214 +------------+-----------------+-------------+---------------+-------------+
1216 Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1217 signing & sealing being negotiated.
1219 ****************************************************************************/
1221 struct rpc_api_pipe_state {
1222 struct event_context *ev;
1223 struct rpc_pipe_client *cli;
1224 uint8_t expected_pkt_type;
1226 prs_struct incoming_frag;
1227 struct rpc_hdr_info rhdr;
1229 prs_struct incoming_pdu; /* Incoming reply */
1230 uint32_t incoming_pdu_offset;
1233 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1235 prs_mem_free(&state->incoming_frag);
1236 prs_mem_free(&state->incoming_pdu);
1237 return 0;
1240 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1241 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1243 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1244 struct event_context *ev,
1245 struct rpc_pipe_client *cli,
1246 prs_struct *data, /* Outgoing PDU */
1247 uint8_t expected_pkt_type)
1249 struct async_req *result, *subreq;
1250 struct rpc_api_pipe_state *state;
1251 uint16_t max_recv_frag;
1252 NTSTATUS status;
1254 if (!async_req_setup(mem_ctx, &result, &state,
1255 struct rpc_api_pipe_state)) {
1256 return NULL;
1258 state->ev = ev;
1259 state->cli = cli;
1260 state->expected_pkt_type = expected_pkt_type;
1261 state->incoming_pdu_offset = 0;
1263 prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1265 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1266 /* Make incoming_pdu dynamic with no memory. */
1267 prs_give_memory(&state->incoming_pdu, NULL, 0, true);
1269 talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1272 * Ensure we're not sending too much.
1274 if (prs_offset(data) > cli->max_xmit_frag) {
1275 status = NT_STATUS_INVALID_PARAMETER;
1276 goto post_status;
1279 DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1281 max_recv_frag = cli->max_recv_frag;
1283 #ifdef DEVELOPER
1284 max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1285 #endif
1287 subreq = cli_api_pipe_send(state, ev, cli->transport,
1288 (uint8_t *)prs_data_p(data),
1289 prs_offset(data), max_recv_frag);
1290 if (subreq == NULL) {
1291 status = NT_STATUS_NO_MEMORY;
1292 goto post_status;
1294 subreq->async.fn = rpc_api_pipe_trans_done;
1295 subreq->async.priv = result;
1296 return result;
1298 post_status:
1299 if (async_post_ntstatus(result, ev, status)) {
1300 return result;
1302 TALLOC_FREE(result);
1303 return NULL;
1306 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1308 struct async_req *req = talloc_get_type_abort(
1309 subreq->async.priv, struct async_req);
1310 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1311 req->private_data, struct rpc_api_pipe_state);
1312 NTSTATUS status;
1313 uint8_t *rdata = NULL;
1314 uint32_t rdata_len = 0;
1315 char *rdata_copy;
1317 status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1318 TALLOC_FREE(subreq);
1319 if (!NT_STATUS_IS_OK(status)) {
1320 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1321 async_req_nterror(req, status);
1322 return;
1325 if (rdata == NULL) {
1326 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1327 rpccli_pipe_txt(debug_ctx(), state->cli)));
1328 async_req_done(req);
1329 return;
1333 * Give the memory received from cli_trans as dynamic to the current
1334 * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1335 * :-(
1337 rdata_copy = (char *)memdup(rdata, rdata_len);
1338 TALLOC_FREE(rdata);
1339 if (async_req_nomem(rdata_copy, req)) {
1340 return;
1342 prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1344 /* Ensure we have enough data for a pdu. */
1345 subreq = get_complete_frag_send(state, state->ev, state->cli,
1346 &state->rhdr, &state->incoming_frag);
1347 if (async_req_nomem(subreq, req)) {
1348 return;
1350 subreq->async.fn = rpc_api_pipe_got_pdu;
1351 subreq->async.priv = req;
1354 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1356 struct async_req *req = talloc_get_type_abort(
1357 subreq->async.priv, struct async_req);
1358 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1359 req->private_data, struct rpc_api_pipe_state);
1360 NTSTATUS status;
1361 char *rdata = NULL;
1362 uint32_t rdata_len = 0;
1364 status = get_complete_frag_recv(subreq);
1365 TALLOC_FREE(subreq);
1366 if (!NT_STATUS_IS_OK(status)) {
1367 DEBUG(5, ("get_complete_frag failed: %s\n",
1368 nt_errstr(status)));
1369 async_req_nterror(req, status);
1370 return;
1373 status = cli_pipe_validate_current_pdu(
1374 state->cli, &state->rhdr, &state->incoming_frag,
1375 state->expected_pkt_type, &rdata, &rdata_len,
1376 &state->incoming_pdu);
1378 DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1379 (unsigned)prs_data_size(&state->incoming_frag),
1380 (unsigned)state->incoming_pdu_offset,
1381 nt_errstr(status)));
1383 if (!NT_STATUS_IS_OK(status)) {
1384 async_req_nterror(req, status);
1385 return;
1388 if ((state->rhdr.flags & RPC_FLG_FIRST)
1389 && (state->rhdr.pack_type[0] == 0)) {
1391 * Set the data type correctly for big-endian data on the
1392 * first packet.
1394 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1395 "big-endian.\n",
1396 rpccli_pipe_txt(debug_ctx(), state->cli)));
1397 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1400 * Check endianness on subsequent packets.
1402 if (state->incoming_frag.bigendian_data
1403 != state->incoming_pdu.bigendian_data) {
1404 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1405 "%s\n",
1406 state->incoming_pdu.bigendian_data?"big":"little",
1407 state->incoming_frag.bigendian_data?"big":"little"));
1408 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
1409 return;
1412 /* Now copy the data portion out of the pdu into rbuf. */
1413 if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1414 async_req_nterror(req, NT_STATUS_NO_MEMORY);
1415 return;
1418 memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1419 rdata, (size_t)rdata_len);
1420 state->incoming_pdu_offset += rdata_len;
1422 status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1423 &state->incoming_frag);
1424 if (!NT_STATUS_IS_OK(status)) {
1425 async_req_nterror(req, status);
1426 return;
1429 if (state->rhdr.flags & RPC_FLG_LAST) {
1430 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1431 rpccli_pipe_txt(debug_ctx(), state->cli),
1432 (unsigned)prs_data_size(&state->incoming_pdu)));
1433 async_req_done(req);
1434 return;
1437 subreq = get_complete_frag_send(state, state->ev, state->cli,
1438 &state->rhdr, &state->incoming_frag);
1439 if (async_req_nomem(subreq, req)) {
1440 return;
1442 subreq->async.fn = rpc_api_pipe_got_pdu;
1443 subreq->async.priv = req;
1446 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1447 prs_struct *reply_pdu)
1449 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1450 req->private_data, struct rpc_api_pipe_state);
1451 NTSTATUS status;
1453 if (async_req_is_nterror(req, &status)) {
1454 return status;
1457 *reply_pdu = state->incoming_pdu;
1458 reply_pdu->mem_ctx = mem_ctx;
1461 * Prevent state->incoming_pdu from being freed in
1462 * rpc_api_pipe_state_destructor()
1464 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1466 return NT_STATUS_OK;
1469 /*******************************************************************
1470 Creates krb5 auth bind.
1471 ********************************************************************/
1473 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1474 enum pipe_auth_level auth_level,
1475 RPC_HDR_AUTH *pauth_out,
1476 prs_struct *auth_data)
1478 #ifdef HAVE_KRB5
1479 int ret;
1480 struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1481 DATA_BLOB tkt = data_blob_null;
1482 DATA_BLOB tkt_wrapped = data_blob_null;
1484 /* We may change the pad length before marshalling. */
1485 init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1487 DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1488 a->service_principal ));
1490 /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1492 ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1493 &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1495 if (ret) {
1496 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1497 "failed with %s\n",
1498 a->service_principal,
1499 error_message(ret) ));
1501 data_blob_free(&tkt);
1502 prs_mem_free(auth_data);
1503 return NT_STATUS_INVALID_PARAMETER;
1506 /* wrap that up in a nice GSS-API wrapping */
1507 tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1509 data_blob_free(&tkt);
1511 /* Auth len in the rpc header doesn't include auth_header. */
1512 if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1513 data_blob_free(&tkt_wrapped);
1514 prs_mem_free(auth_data);
1515 return NT_STATUS_NO_MEMORY;
1518 DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1519 dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1521 data_blob_free(&tkt_wrapped);
1522 return NT_STATUS_OK;
1523 #else
1524 return NT_STATUS_INVALID_PARAMETER;
1525 #endif
1528 /*******************************************************************
1529 Creates SPNEGO NTLMSSP auth bind.
1530 ********************************************************************/
1532 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1533 enum pipe_auth_level auth_level,
1534 RPC_HDR_AUTH *pauth_out,
1535 prs_struct *auth_data)
1537 NTSTATUS nt_status;
1538 DATA_BLOB null_blob = data_blob_null;
1539 DATA_BLOB request = data_blob_null;
1540 DATA_BLOB spnego_msg = data_blob_null;
1542 /* We may change the pad length before marshalling. */
1543 init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1545 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1546 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1547 null_blob,
1548 &request);
1550 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1551 data_blob_free(&request);
1552 prs_mem_free(auth_data);
1553 return nt_status;
1556 /* Wrap this in SPNEGO. */
1557 spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1559 data_blob_free(&request);
1561 /* Auth len in the rpc header doesn't include auth_header. */
1562 if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1563 data_blob_free(&spnego_msg);
1564 prs_mem_free(auth_data);
1565 return NT_STATUS_NO_MEMORY;
1568 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1569 dump_data(5, spnego_msg.data, spnego_msg.length);
1571 data_blob_free(&spnego_msg);
1572 return NT_STATUS_OK;
1575 /*******************************************************************
1576 Creates NTLMSSP auth bind.
1577 ********************************************************************/
1579 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1580 enum pipe_auth_level auth_level,
1581 RPC_HDR_AUTH *pauth_out,
1582 prs_struct *auth_data)
1584 NTSTATUS nt_status;
1585 DATA_BLOB null_blob = data_blob_null;
1586 DATA_BLOB request = data_blob_null;
1588 /* We may change the pad length before marshalling. */
1589 init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1591 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1592 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1593 null_blob,
1594 &request);
1596 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1597 data_blob_free(&request);
1598 prs_mem_free(auth_data);
1599 return nt_status;
1602 /* Auth len in the rpc header doesn't include auth_header. */
1603 if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1604 data_blob_free(&request);
1605 prs_mem_free(auth_data);
1606 return NT_STATUS_NO_MEMORY;
1609 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1610 dump_data(5, request.data, request.length);
1612 data_blob_free(&request);
1613 return NT_STATUS_OK;
1616 /*******************************************************************
1617 Creates schannel auth bind.
1618 ********************************************************************/
1620 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1621 enum pipe_auth_level auth_level,
1622 RPC_HDR_AUTH *pauth_out,
1623 prs_struct *auth_data)
1625 RPC_AUTH_SCHANNEL_NEG schannel_neg;
1627 /* We may change the pad length before marshalling. */
1628 init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1630 /* Use lp_workgroup() if domain not specified */
1632 if (!cli->auth->domain || !cli->auth->domain[0]) {
1633 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1634 if (cli->auth->domain == NULL) {
1635 return NT_STATUS_NO_MEMORY;
1639 init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1640 global_myname());
1643 * Now marshall the data into the auth parse_struct.
1646 if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1647 &schannel_neg, auth_data, 0)) {
1648 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1649 prs_mem_free(auth_data);
1650 return NT_STATUS_NO_MEMORY;
1653 return NT_STATUS_OK;
1656 /*******************************************************************
1657 Creates the internals of a DCE/RPC bind request or alter context PDU.
1658 ********************************************************************/
1660 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1661 prs_struct *rpc_out,
1662 uint32 rpc_call_id,
1663 const RPC_IFACE *abstract,
1664 const RPC_IFACE *transfer,
1665 RPC_HDR_AUTH *phdr_auth,
1666 prs_struct *pauth_info)
1668 RPC_HDR hdr;
1669 RPC_HDR_RB hdr_rb;
1670 RPC_CONTEXT rpc_ctx;
1671 uint16 auth_len = prs_offset(pauth_info);
1672 uint8 ss_padding_len = 0;
1673 uint16 frag_len = 0;
1675 /* create the RPC context. */
1676 init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1678 /* create the bind request RPC_HDR_RB */
1679 init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1681 /* Start building the frag length. */
1682 frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1684 /* Do we need to pad ? */
1685 if (auth_len) {
1686 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1687 if (data_len % 8) {
1688 ss_padding_len = 8 - (data_len % 8);
1689 phdr_auth->auth_pad_len = ss_padding_len;
1691 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1694 /* Create the request RPC_HDR */
1695 init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1697 /* Marshall the RPC header */
1698 if(!smb_io_rpc_hdr("hdr" , &hdr, rpc_out, 0)) {
1699 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1700 return NT_STATUS_NO_MEMORY;
1703 /* Marshall the bind request data */
1704 if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1705 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1706 return NT_STATUS_NO_MEMORY;
1710 * Grow the outgoing buffer to store any auth info.
1713 if(auth_len != 0) {
1714 if (ss_padding_len) {
1715 char pad[8];
1716 memset(pad, '\0', 8);
1717 if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1718 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1719 return NT_STATUS_NO_MEMORY;
1723 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1724 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1725 return NT_STATUS_NO_MEMORY;
1729 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1730 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1731 return NT_STATUS_NO_MEMORY;
1735 return NT_STATUS_OK;
1738 /*******************************************************************
1739 Creates a DCE/RPC bind request.
1740 ********************************************************************/
1742 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1743 prs_struct *rpc_out,
1744 uint32 rpc_call_id,
1745 const RPC_IFACE *abstract,
1746 const RPC_IFACE *transfer,
1747 enum pipe_auth_type auth_type,
1748 enum pipe_auth_level auth_level)
1750 RPC_HDR_AUTH hdr_auth;
1751 prs_struct auth_info;
1752 NTSTATUS ret = NT_STATUS_OK;
1754 ZERO_STRUCT(hdr_auth);
1755 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1756 return NT_STATUS_NO_MEMORY;
1758 switch (auth_type) {
1759 case PIPE_AUTH_TYPE_SCHANNEL:
1760 ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1761 if (!NT_STATUS_IS_OK(ret)) {
1762 prs_mem_free(&auth_info);
1763 return ret;
1765 break;
1767 case PIPE_AUTH_TYPE_NTLMSSP:
1768 ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1769 if (!NT_STATUS_IS_OK(ret)) {
1770 prs_mem_free(&auth_info);
1771 return ret;
1773 break;
1775 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1776 ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1777 if (!NT_STATUS_IS_OK(ret)) {
1778 prs_mem_free(&auth_info);
1779 return ret;
1781 break;
1783 case PIPE_AUTH_TYPE_KRB5:
1784 ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1785 if (!NT_STATUS_IS_OK(ret)) {
1786 prs_mem_free(&auth_info);
1787 return ret;
1789 break;
1791 case PIPE_AUTH_TYPE_NONE:
1792 break;
1794 default:
1795 /* "Can't" happen. */
1796 return NT_STATUS_INVALID_INFO_CLASS;
1799 ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1800 rpc_out,
1801 rpc_call_id,
1802 abstract,
1803 transfer,
1804 &hdr_auth,
1805 &auth_info);
1807 prs_mem_free(&auth_info);
1808 return ret;
1811 /*******************************************************************
1812 Create and add the NTLMSSP sign/seal auth header and data.
1813 ********************************************************************/
1815 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1816 RPC_HDR *phdr,
1817 uint32 ss_padding_len,
1818 prs_struct *outgoing_pdu)
1820 RPC_HDR_AUTH auth_info;
1821 NTSTATUS status;
1822 DATA_BLOB auth_blob = data_blob_null;
1823 uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1825 if (!cli->auth->a_u.ntlmssp_state) {
1826 return NT_STATUS_INVALID_PARAMETER;
1829 /* Init and marshall the auth header. */
1830 init_rpc_hdr_auth(&auth_info,
1831 map_pipe_auth_type_to_rpc_auth_type(
1832 cli->auth->auth_type),
1833 cli->auth->auth_level,
1834 ss_padding_len,
1835 1 /* context id. */);
1837 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1838 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1839 data_blob_free(&auth_blob);
1840 return NT_STATUS_NO_MEMORY;
1843 switch (cli->auth->auth_level) {
1844 case PIPE_AUTH_LEVEL_PRIVACY:
1845 /* Data portion is encrypted. */
1846 status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1847 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1848 data_and_pad_len,
1849 (unsigned char *)prs_data_p(outgoing_pdu),
1850 (size_t)prs_offset(outgoing_pdu),
1851 &auth_blob);
1852 if (!NT_STATUS_IS_OK(status)) {
1853 data_blob_free(&auth_blob);
1854 return status;
1856 break;
1858 case PIPE_AUTH_LEVEL_INTEGRITY:
1859 /* Data is signed. */
1860 status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1861 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1862 data_and_pad_len,
1863 (unsigned char *)prs_data_p(outgoing_pdu),
1864 (size_t)prs_offset(outgoing_pdu),
1865 &auth_blob);
1866 if (!NT_STATUS_IS_OK(status)) {
1867 data_blob_free(&auth_blob);
1868 return status;
1870 break;
1872 default:
1873 /* Can't happen. */
1874 smb_panic("bad auth level");
1875 /* Notreached. */
1876 return NT_STATUS_INVALID_PARAMETER;
1879 /* Finally marshall the blob. */
1881 if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1882 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1883 (unsigned int)NTLMSSP_SIG_SIZE));
1884 data_blob_free(&auth_blob);
1885 return NT_STATUS_NO_MEMORY;
1888 data_blob_free(&auth_blob);
1889 return NT_STATUS_OK;
1892 /*******************************************************************
1893 Create and add the schannel sign/seal auth header and data.
1894 ********************************************************************/
1896 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1897 RPC_HDR *phdr,
1898 uint32 ss_padding_len,
1899 prs_struct *outgoing_pdu)
1901 RPC_HDR_AUTH auth_info;
1902 RPC_AUTH_SCHANNEL_CHK verf;
1903 struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1904 char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1905 size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1907 if (!sas) {
1908 return NT_STATUS_INVALID_PARAMETER;
1911 /* Init and marshall the auth header. */
1912 init_rpc_hdr_auth(&auth_info,
1913 map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1914 cli->auth->auth_level,
1915 ss_padding_len,
1916 1 /* context id. */);
1918 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1919 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1920 return NT_STATUS_NO_MEMORY;
1923 switch (cli->auth->auth_level) {
1924 case PIPE_AUTH_LEVEL_PRIVACY:
1925 case PIPE_AUTH_LEVEL_INTEGRITY:
1926 DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1927 sas->seq_num));
1929 schannel_encode(sas,
1930 cli->auth->auth_level,
1931 SENDER_IS_INITIATOR,
1932 &verf,
1933 data_p,
1934 data_and_pad_len);
1936 sas->seq_num++;
1937 break;
1939 default:
1940 /* Can't happen. */
1941 smb_panic("bad auth level");
1942 /* Notreached. */
1943 return NT_STATUS_INVALID_PARAMETER;
1946 /* Finally marshall the blob. */
1947 smb_io_rpc_auth_schannel_chk("",
1948 RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1949 &verf,
1950 outgoing_pdu,
1953 return NT_STATUS_OK;
1956 /*******************************************************************
1957 Calculate how much data we're going to send in this packet, also
1958 work out any sign/seal padding length.
1959 ********************************************************************/
1961 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1962 uint32 data_left,
1963 uint16 *p_frag_len,
1964 uint16 *p_auth_len,
1965 uint32 *p_ss_padding)
1967 uint32 data_space, data_len;
1969 #ifdef DEVELOPER
1970 if ((data_left > 0) && (sys_random() % 2)) {
1971 data_left = MAX(data_left/2, 1);
1973 #endif
1975 switch (cli->auth->auth_level) {
1976 case PIPE_AUTH_LEVEL_NONE:
1977 case PIPE_AUTH_LEVEL_CONNECT:
1978 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1979 data_len = MIN(data_space, data_left);
1980 *p_ss_padding = 0;
1981 *p_auth_len = 0;
1982 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1983 return data_len;
1985 case PIPE_AUTH_LEVEL_INTEGRITY:
1986 case PIPE_AUTH_LEVEL_PRIVACY:
1987 /* Treat the same for all authenticated rpc requests. */
1988 switch(cli->auth->auth_type) {
1989 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1990 case PIPE_AUTH_TYPE_NTLMSSP:
1991 *p_auth_len = NTLMSSP_SIG_SIZE;
1992 break;
1993 case PIPE_AUTH_TYPE_SCHANNEL:
1994 *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
1995 break;
1996 default:
1997 smb_panic("bad auth type");
1998 break;
2001 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2002 RPC_HDR_AUTH_LEN - *p_auth_len;
2004 data_len = MIN(data_space, data_left);
2005 *p_ss_padding = 0;
2006 if (data_len % 8) {
2007 *p_ss_padding = 8 - (data_len % 8);
2009 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + /* Normal headers. */
2010 data_len + *p_ss_padding + /* data plus padding. */
2011 RPC_HDR_AUTH_LEN + *p_auth_len; /* Auth header and auth data. */
2012 return data_len;
2014 default:
2015 smb_panic("bad auth level");
2016 /* Notreached. */
2017 return 0;
2021 /*******************************************************************
2022 External interface.
2023 Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2024 Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2025 and deals with signing/sealing details.
2026 ********************************************************************/
2028 struct rpc_api_pipe_req_state {
2029 struct event_context *ev;
2030 struct rpc_pipe_client *cli;
2031 uint8_t op_num;
2032 uint32_t call_id;
2033 prs_struct *req_data;
2034 uint32_t req_data_sent;
2035 prs_struct outgoing_frag;
2036 prs_struct reply_pdu;
2039 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2041 prs_mem_free(&s->outgoing_frag);
2042 prs_mem_free(&s->reply_pdu);
2043 return 0;
2046 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2047 static void rpc_api_pipe_req_done(struct async_req *subreq);
2048 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2049 bool *is_last_frag);
2051 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2052 struct event_context *ev,
2053 struct rpc_pipe_client *cli,
2054 uint8_t op_num,
2055 prs_struct *req_data)
2057 struct async_req *result, *subreq;
2058 struct rpc_api_pipe_req_state *state;
2059 NTSTATUS status;
2060 bool is_last_frag;
2062 if (!async_req_setup(mem_ctx, &result, &state,
2063 struct rpc_api_pipe_req_state)) {
2064 return NULL;
2066 state->ev = ev;
2067 state->cli = cli;
2068 state->op_num = op_num;
2069 state->req_data = req_data;
2070 state->req_data_sent = 0;
2071 state->call_id = get_rpc_call_id();
2073 if (cli->max_xmit_frag
2074 < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2075 /* Server is screwed up ! */
2076 status = NT_STATUS_INVALID_PARAMETER;
2077 goto post_status;
2080 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2082 if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2083 state, MARSHALL)) {
2084 status = NT_STATUS_NO_MEMORY;
2085 goto post_status;
2088 talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2090 status = prepare_next_frag(state, &is_last_frag);
2091 if (!NT_STATUS_IS_OK(status)) {
2092 goto post_status;
2095 if (is_last_frag) {
2096 subreq = rpc_api_pipe_send(state, ev, state->cli,
2097 &state->outgoing_frag,
2098 RPC_RESPONSE);
2099 if (subreq == NULL) {
2100 status = NT_STATUS_NO_MEMORY;
2101 goto post_status;
2103 subreq->async.fn = rpc_api_pipe_req_done;
2104 subreq->async.priv = result;
2105 } else {
2106 subreq = rpc_write_send(
2107 state, ev, cli->transport,
2108 (uint8_t *)prs_data_p(&state->outgoing_frag),
2109 prs_offset(&state->outgoing_frag));
2110 if (subreq == NULL) {
2111 status = NT_STATUS_NO_MEMORY;
2112 goto post_status;
2114 subreq->async.fn = rpc_api_pipe_req_write_done;
2115 subreq->async.priv = result;
2117 return result;
2119 post_status:
2120 if (async_post_ntstatus(result, ev, status)) {
2121 return result;
2123 TALLOC_FREE(result);
2124 return NULL;
2127 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2128 bool *is_last_frag)
2130 RPC_HDR hdr;
2131 RPC_HDR_REQ hdr_req;
2132 uint32_t data_sent_thistime;
2133 uint16_t auth_len;
2134 uint16_t frag_len;
2135 uint8_t flags = 0;
2136 uint32_t ss_padding;
2137 uint32_t data_left;
2138 char pad[8] = { 0, };
2139 NTSTATUS status;
2141 data_left = prs_offset(state->req_data) - state->req_data_sent;
2143 data_sent_thistime = calculate_data_len_tosend(
2144 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2146 if (state->req_data_sent == 0) {
2147 flags = RPC_FLG_FIRST;
2150 if (data_sent_thistime == data_left) {
2151 flags |= RPC_FLG_LAST;
2154 if (!prs_set_offset(&state->outgoing_frag, 0)) {
2155 return NT_STATUS_NO_MEMORY;
2158 /* Create and marshall the header and request header. */
2159 init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2160 auth_len);
2162 if (!smb_io_rpc_hdr("hdr ", &hdr, &state->outgoing_frag, 0)) {
2163 return NT_STATUS_NO_MEMORY;
2166 /* Create the rpc request RPC_HDR_REQ */
2167 init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2168 state->op_num);
2170 if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2171 &state->outgoing_frag, 0)) {
2172 return NT_STATUS_NO_MEMORY;
2175 /* Copy in the data, plus any ss padding. */
2176 if (!prs_append_some_prs_data(&state->outgoing_frag,
2177 state->req_data, state->req_data_sent,
2178 data_sent_thistime)) {
2179 return NT_STATUS_NO_MEMORY;
2182 /* Copy the sign/seal padding data. */
2183 if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2184 return NT_STATUS_NO_MEMORY;
2187 /* Generate any auth sign/seal and add the auth footer. */
2188 switch (state->cli->auth->auth_type) {
2189 case PIPE_AUTH_TYPE_NONE:
2190 status = NT_STATUS_OK;
2191 break;
2192 case PIPE_AUTH_TYPE_NTLMSSP:
2193 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2194 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2195 &state->outgoing_frag);
2196 break;
2197 case PIPE_AUTH_TYPE_SCHANNEL:
2198 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2199 &state->outgoing_frag);
2200 break;
2201 default:
2202 status = NT_STATUS_INVALID_PARAMETER;
2203 break;
2206 state->req_data_sent += data_sent_thistime;
2207 *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2209 return status;
2212 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2214 struct async_req *req = talloc_get_type_abort(
2215 subreq->async.priv, struct async_req);
2216 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2217 req->private_data, struct rpc_api_pipe_req_state);
2218 NTSTATUS status;
2219 bool is_last_frag;
2221 status = rpc_write_recv(subreq);
2222 TALLOC_FREE(subreq);
2223 if (!NT_STATUS_IS_OK(status)) {
2224 async_req_nterror(req, status);
2225 return;
2228 status = prepare_next_frag(state, &is_last_frag);
2229 if (!NT_STATUS_IS_OK(status)) {
2230 async_req_nterror(req, status);
2231 return;
2234 if (is_last_frag) {
2235 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2236 &state->outgoing_frag,
2237 RPC_RESPONSE);
2238 if (async_req_nomem(subreq, req)) {
2239 return;
2241 subreq->async.fn = rpc_api_pipe_req_done;
2242 subreq->async.priv = req;
2243 } else {
2244 subreq = rpc_write_send(
2245 state, state->ev,
2246 state->cli->transport,
2247 (uint8_t *)prs_data_p(&state->outgoing_frag),
2248 prs_offset(&state->outgoing_frag));
2249 if (async_req_nomem(subreq, req)) {
2250 return;
2252 subreq->async.fn = rpc_api_pipe_req_write_done;
2253 subreq->async.priv = req;
2257 static void rpc_api_pipe_req_done(struct async_req *subreq)
2259 struct async_req *req = talloc_get_type_abort(
2260 subreq->async.priv, struct async_req);
2261 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2262 req->private_data, struct rpc_api_pipe_req_state);
2263 NTSTATUS status;
2265 status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2266 TALLOC_FREE(subreq);
2267 if (!NT_STATUS_IS_OK(status)) {
2268 async_req_nterror(req, status);
2269 return;
2271 async_req_done(req);
2274 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2275 prs_struct *reply_pdu)
2277 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2278 req->private_data, struct rpc_api_pipe_req_state);
2279 NTSTATUS status;
2281 if (async_req_is_nterror(req, &status)) {
2283 * We always have to initialize to reply pdu, even if there is
2284 * none. The rpccli_* caller routines expect this.
2286 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2287 return status;
2290 *reply_pdu = state->reply_pdu;
2291 reply_pdu->mem_ctx = mem_ctx;
2294 * Prevent state->req_pdu from being freed in
2295 * rpc_api_pipe_req_state_destructor()
2297 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2299 return NT_STATUS_OK;
2302 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2303 uint8 op_num,
2304 prs_struct *in_data,
2305 prs_struct *out_data)
2307 TALLOC_CTX *frame = talloc_stackframe();
2308 struct event_context *ev;
2309 struct async_req *req;
2310 NTSTATUS status = NT_STATUS_NO_MEMORY;
2312 ev = event_context_init(frame);
2313 if (ev == NULL) {
2314 goto fail;
2317 req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2318 if (req == NULL) {
2319 goto fail;
2322 while (req->state < ASYNC_REQ_DONE) {
2323 event_loop_once(ev);
2326 status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2327 fail:
2328 TALLOC_FREE(frame);
2329 return status;
2332 #if 0
2333 /****************************************************************************
2334 Set the handle state.
2335 ****************************************************************************/
2337 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2338 const char *pipe_name, uint16 device_state)
2340 bool state_set = False;
2341 char param[2];
2342 uint16 setup[2]; /* only need 2 uint16 setup parameters */
2343 char *rparam = NULL;
2344 char *rdata = NULL;
2345 uint32 rparam_len, rdata_len;
2347 if (pipe_name == NULL)
2348 return False;
2350 DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2351 cli->fnum, pipe_name, device_state));
2353 /* create parameters: device state */
2354 SSVAL(param, 0, device_state);
2356 /* create setup parameters. */
2357 setup[0] = 0x0001;
2358 setup[1] = cli->fnum; /* pipe file handle. got this from an SMBOpenX. */
2360 /* send the data on \PIPE\ */
2361 if (cli_api_pipe(cli->cli, "\\PIPE\\",
2362 setup, 2, 0, /* setup, length, max */
2363 param, 2, 0, /* param, length, max */
2364 NULL, 0, 1024, /* data, length, max */
2365 &rparam, &rparam_len, /* return param, length */
2366 &rdata, &rdata_len)) /* return data, length */
2368 DEBUG(5, ("Set Handle state: return OK\n"));
2369 state_set = True;
2372 SAFE_FREE(rparam);
2373 SAFE_FREE(rdata);
2375 return state_set;
2377 #endif
2379 /****************************************************************************
2380 Check the rpc bind acknowledge response.
2381 ****************************************************************************/
2383 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2385 if ( hdr_ba->addr.len == 0) {
2386 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2389 /* check the transfer syntax */
2390 if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2391 (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2392 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2393 return False;
2396 if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2397 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2398 hdr_ba->res.num_results, hdr_ba->res.reason));
2401 DEBUG(5,("check_bind_response: accepted!\n"));
2402 return True;
2405 /*******************************************************************
2406 Creates a DCE/RPC bind authentication response.
2407 This is the packet that is sent back to the server once we
2408 have received a BIND-ACK, to finish the third leg of
2409 the authentication handshake.
2410 ********************************************************************/
2412 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2413 uint32 rpc_call_id,
2414 enum pipe_auth_type auth_type,
2415 enum pipe_auth_level auth_level,
2416 DATA_BLOB *pauth_blob,
2417 prs_struct *rpc_out)
2419 RPC_HDR hdr;
2420 RPC_HDR_AUTH hdr_auth;
2421 uint32 pad = 0;
2423 /* Create the request RPC_HDR */
2424 init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2425 RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2426 pauth_blob->length );
2428 /* Marshall it. */
2429 if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2430 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2431 return NT_STATUS_NO_MEMORY;
2435 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2436 about padding - shouldn't this pad to length 8 ? JRA.
2439 /* 4 bytes padding. */
2440 if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2441 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2442 return NT_STATUS_NO_MEMORY;
2445 /* Create the request RPC_HDR_AUTHA */
2446 init_rpc_hdr_auth(&hdr_auth,
2447 map_pipe_auth_type_to_rpc_auth_type(auth_type),
2448 auth_level, 0, 1);
2450 if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2451 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2452 return NT_STATUS_NO_MEMORY;
2456 * Append the auth data to the outgoing buffer.
2459 if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2460 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2461 return NT_STATUS_NO_MEMORY;
2464 return NT_STATUS_OK;
2467 /*******************************************************************
2468 Creates a DCE/RPC bind alter context authentication request which
2469 may contain a spnego auth blobl
2470 ********************************************************************/
2472 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2473 const RPC_IFACE *abstract,
2474 const RPC_IFACE *transfer,
2475 enum pipe_auth_level auth_level,
2476 const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2477 prs_struct *rpc_out)
2479 RPC_HDR_AUTH hdr_auth;
2480 prs_struct auth_info;
2481 NTSTATUS ret = NT_STATUS_OK;
2483 ZERO_STRUCT(hdr_auth);
2484 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2485 return NT_STATUS_NO_MEMORY;
2487 /* We may change the pad length before marshalling. */
2488 init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2490 if (pauth_blob->length) {
2491 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2492 prs_mem_free(&auth_info);
2493 return NT_STATUS_NO_MEMORY;
2497 ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2498 rpc_out,
2499 rpc_call_id,
2500 abstract,
2501 transfer,
2502 &hdr_auth,
2503 &auth_info);
2504 prs_mem_free(&auth_info);
2505 return ret;
2508 /****************************************************************************
2509 Do an rpc bind.
2510 ****************************************************************************/
2512 struct rpc_pipe_bind_state {
2513 struct event_context *ev;
2514 struct rpc_pipe_client *cli;
2515 prs_struct rpc_out;
2516 uint32_t rpc_call_id;
2519 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2521 prs_mem_free(&state->rpc_out);
2522 return 0;
2525 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2526 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2527 struct rpc_pipe_bind_state *state,
2528 struct rpc_hdr_info *phdr,
2529 prs_struct *reply_pdu);
2530 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2531 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2532 struct rpc_pipe_bind_state *state,
2533 struct rpc_hdr_info *phdr,
2534 prs_struct *reply_pdu);
2535 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2537 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2538 struct event_context *ev,
2539 struct rpc_pipe_client *cli,
2540 struct cli_pipe_auth_data *auth)
2542 struct async_req *result, *subreq;
2543 struct rpc_pipe_bind_state *state;
2544 NTSTATUS status;
2546 if (!async_req_setup(mem_ctx, &result, &state,
2547 struct rpc_pipe_bind_state)) {
2548 return NULL;
2551 DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2552 rpccli_pipe_txt(debug_ctx(), cli),
2553 (unsigned int)auth->auth_type,
2554 (unsigned int)auth->auth_level ));
2556 state->ev = ev;
2557 state->cli = cli;
2558 state->rpc_call_id = get_rpc_call_id();
2560 prs_init_empty(&state->rpc_out, state, MARSHALL);
2561 talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2563 cli->auth = talloc_move(cli, &auth);
2565 /* Marshall the outgoing data. */
2566 status = create_rpc_bind_req(cli, &state->rpc_out,
2567 state->rpc_call_id,
2568 &cli->abstract_syntax,
2569 &cli->transfer_syntax,
2570 cli->auth->auth_type,
2571 cli->auth->auth_level);
2573 if (!NT_STATUS_IS_OK(status)) {
2574 goto post_status;
2577 subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2578 RPC_BINDACK);
2579 if (subreq == NULL) {
2580 status = NT_STATUS_NO_MEMORY;
2581 goto post_status;
2583 subreq->async.fn = rpc_pipe_bind_step_one_done;
2584 subreq->async.priv = result;
2585 return result;
2587 post_status:
2588 if (async_post_ntstatus(result, ev, status)) {
2589 return result;
2591 TALLOC_FREE(result);
2592 return NULL;
2595 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2597 struct async_req *req = talloc_get_type_abort(
2598 subreq->async.priv, struct async_req);
2599 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2600 req->private_data, struct rpc_pipe_bind_state);
2601 prs_struct reply_pdu;
2602 struct rpc_hdr_info hdr;
2603 struct rpc_hdr_ba_info hdr_ba;
2604 NTSTATUS status;
2606 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2607 TALLOC_FREE(subreq);
2608 if (!NT_STATUS_IS_OK(status)) {
2609 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2610 rpccli_pipe_txt(debug_ctx(), state->cli),
2611 nt_errstr(status)));
2612 async_req_nterror(req, status);
2613 return;
2616 /* Unmarshall the RPC header */
2617 if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2618 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2619 prs_mem_free(&reply_pdu);
2620 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2621 return;
2624 if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2625 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2626 "RPC_HDR_BA.\n"));
2627 prs_mem_free(&reply_pdu);
2628 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2629 return;
2632 if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2633 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2634 prs_mem_free(&reply_pdu);
2635 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2636 return;
2639 state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2640 state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2643 * For authenticated binds we may need to do 3 or 4 leg binds.
2646 switch(state->cli->auth->auth_type) {
2648 case PIPE_AUTH_TYPE_NONE:
2649 case PIPE_AUTH_TYPE_SCHANNEL:
2650 /* Bind complete. */
2651 prs_mem_free(&reply_pdu);
2652 async_req_done(req);
2653 break;
2655 case PIPE_AUTH_TYPE_NTLMSSP:
2656 /* Need to send AUTH3 packet - no reply. */
2657 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2658 &reply_pdu);
2659 prs_mem_free(&reply_pdu);
2660 if (!NT_STATUS_IS_OK(status)) {
2661 async_req_nterror(req, status);
2663 break;
2665 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2666 /* Need to send alter context request and reply. */
2667 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2668 &reply_pdu);
2669 prs_mem_free(&reply_pdu);
2670 if (!NT_STATUS_IS_OK(status)) {
2671 async_req_nterror(req, status);
2673 break;
2675 case PIPE_AUTH_TYPE_KRB5:
2676 /* */
2678 default:
2679 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2680 (unsigned int)state->cli->auth->auth_type));
2681 prs_mem_free(&reply_pdu);
2682 async_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2686 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2687 struct rpc_pipe_bind_state *state,
2688 struct rpc_hdr_info *phdr,
2689 prs_struct *reply_pdu)
2691 DATA_BLOB server_response = data_blob_null;
2692 DATA_BLOB client_reply = data_blob_null;
2693 struct rpc_hdr_auth_info hdr_auth;
2694 struct async_req *subreq;
2695 NTSTATUS status;
2697 if ((phdr->auth_len == 0)
2698 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2699 return NT_STATUS_INVALID_PARAMETER;
2702 if (!prs_set_offset(
2703 reply_pdu,
2704 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2705 return NT_STATUS_INVALID_PARAMETER;
2708 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2709 return NT_STATUS_INVALID_PARAMETER;
2712 /* TODO - check auth_type/auth_level match. */
2714 server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2715 prs_copy_data_out((char *)server_response.data, reply_pdu,
2716 phdr->auth_len);
2718 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2719 server_response, &client_reply);
2721 if (!NT_STATUS_IS_OK(status)) {
2722 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2723 "blob failed: %s.\n", nt_errstr(status)));
2724 return status;
2727 prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2729 status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2730 state->cli->auth->auth_type,
2731 state->cli->auth->auth_level,
2732 &client_reply, &state->rpc_out);
2733 data_blob_free(&client_reply);
2735 if (!NT_STATUS_IS_OK(status)) {
2736 return status;
2739 subreq = rpc_write_send(state, state->ev, state->cli->transport,
2740 (uint8_t *)prs_data_p(&state->rpc_out),
2741 prs_offset(&state->rpc_out));
2742 if (subreq == NULL) {
2743 return NT_STATUS_NO_MEMORY;
2745 subreq->async.fn = rpc_bind_auth3_write_done;
2746 subreq->async.priv = req;
2747 return NT_STATUS_OK;
2750 static void rpc_bind_auth3_write_done(struct async_req *subreq)
2752 struct async_req *req = talloc_get_type_abort(
2753 subreq->async.priv, struct async_req);
2754 NTSTATUS status;
2756 status = rpc_write_recv(subreq);
2757 TALLOC_FREE(subreq);
2758 if (!NT_STATUS_IS_OK(status)) {
2759 async_req_nterror(req, status);
2760 return;
2762 async_req_done(req);
2765 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2766 struct rpc_pipe_bind_state *state,
2767 struct rpc_hdr_info *phdr,
2768 prs_struct *reply_pdu)
2770 DATA_BLOB server_spnego_response = data_blob_null;
2771 DATA_BLOB server_ntlm_response = data_blob_null;
2772 DATA_BLOB client_reply = data_blob_null;
2773 DATA_BLOB tmp_blob = data_blob_null;
2774 RPC_HDR_AUTH hdr_auth;
2775 struct async_req *subreq;
2776 NTSTATUS status;
2778 if ((phdr->auth_len == 0)
2779 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2780 return NT_STATUS_INVALID_PARAMETER;
2783 /* Process the returned NTLMSSP blob first. */
2784 if (!prs_set_offset(
2785 reply_pdu,
2786 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2787 return NT_STATUS_INVALID_PARAMETER;
2790 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2791 return NT_STATUS_INVALID_PARAMETER;
2794 server_spnego_response = data_blob(NULL, phdr->auth_len);
2795 prs_copy_data_out((char *)server_spnego_response.data,
2796 reply_pdu, phdr->auth_len);
2799 * The server might give us back two challenges - tmp_blob is for the
2800 * second.
2802 if (!spnego_parse_challenge(server_spnego_response,
2803 &server_ntlm_response, &tmp_blob)) {
2804 data_blob_free(&server_spnego_response);
2805 data_blob_free(&server_ntlm_response);
2806 data_blob_free(&tmp_blob);
2807 return NT_STATUS_INVALID_PARAMETER;
2810 /* We're finished with the server spnego response and the tmp_blob. */
2811 data_blob_free(&server_spnego_response);
2812 data_blob_free(&tmp_blob);
2814 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2815 server_ntlm_response, &client_reply);
2817 /* Finished with the server_ntlm response */
2818 data_blob_free(&server_ntlm_response);
2820 if (!NT_STATUS_IS_OK(status)) {
2821 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2822 "using server blob failed.\n"));
2823 data_blob_free(&client_reply);
2824 return status;
2827 /* SPNEGO wrap the client reply. */
2828 tmp_blob = spnego_gen_auth(client_reply);
2829 data_blob_free(&client_reply);
2830 client_reply = tmp_blob;
2831 tmp_blob = data_blob_null;
2833 /* Now prepare the alter context pdu. */
2834 prs_init_empty(&state->rpc_out, state, MARSHALL);
2836 status = create_rpc_alter_context(state->rpc_call_id,
2837 &state->cli->abstract_syntax,
2838 &state->cli->transfer_syntax,
2839 state->cli->auth->auth_level,
2840 &client_reply,
2841 &state->rpc_out);
2842 data_blob_free(&client_reply);
2844 if (!NT_STATUS_IS_OK(status)) {
2845 return status;
2848 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2849 &state->rpc_out, RPC_ALTCONTRESP);
2850 if (subreq == NULL) {
2851 return NT_STATUS_NO_MEMORY;
2853 subreq->async.fn = rpc_bind_ntlmssp_api_done;
2854 subreq->async.priv = req;
2855 return NT_STATUS_OK;
2858 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2860 struct async_req *req = talloc_get_type_abort(
2861 subreq->async.priv, struct async_req);
2862 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2863 req->private_data, struct rpc_pipe_bind_state);
2864 DATA_BLOB server_spnego_response = data_blob_null;
2865 DATA_BLOB tmp_blob = data_blob_null;
2866 prs_struct reply_pdu;
2867 struct rpc_hdr_info hdr;
2868 struct rpc_hdr_auth_info hdr_auth;
2869 NTSTATUS status;
2871 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2872 TALLOC_FREE(subreq);
2873 if (!NT_STATUS_IS_OK(status)) {
2874 async_req_nterror(req, status);
2875 return;
2878 /* Get the auth blob from the reply. */
2879 if (!smb_io_rpc_hdr("rpc_hdr ", &hdr, &reply_pdu, 0)) {
2880 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2881 "unmarshall RPC_HDR.\n"));
2882 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2883 return;
2886 if (!prs_set_offset(
2887 &reply_pdu,
2888 hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2889 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2890 return;
2893 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2894 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2895 return;
2898 server_spnego_response = data_blob(NULL, hdr.auth_len);
2899 prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2900 hdr.auth_len);
2902 /* Check we got a valid auth response. */
2903 if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2904 OID_NTLMSSP, &tmp_blob)) {
2905 data_blob_free(&server_spnego_response);
2906 data_blob_free(&tmp_blob);
2907 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2908 return;
2911 data_blob_free(&server_spnego_response);
2912 data_blob_free(&tmp_blob);
2914 DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2915 "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2916 async_req_done(req);
2919 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2921 return async_req_simple_recv_ntstatus(req);
2924 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2925 struct cli_pipe_auth_data *auth)
2927 TALLOC_CTX *frame = talloc_stackframe();
2928 struct event_context *ev;
2929 struct async_req *req;
2930 NTSTATUS status = NT_STATUS_NO_MEMORY;
2932 ev = event_context_init(frame);
2933 if (ev == NULL) {
2934 goto fail;
2937 req = rpc_pipe_bind_send(frame, ev, cli, auth);
2938 if (req == NULL) {
2939 goto fail;
2942 while (req->state < ASYNC_REQ_DONE) {
2943 event_loop_once(ev);
2946 status = rpc_pipe_bind_recv(req);
2947 fail:
2948 TALLOC_FREE(frame);
2949 return status;
2952 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2953 unsigned int timeout)
2955 struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2957 if (cli == NULL) {
2958 return 0;
2960 return cli_set_timeout(cli, timeout);
2963 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2965 struct cli_state *cli;
2967 if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2968 || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2969 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2970 return true;
2973 cli = rpc_pipe_np_smb_conn(rpc_cli);
2974 if (cli == NULL) {
2975 return false;
2977 E_md4hash(cli->password ? cli->password : "", nt_hash);
2978 return true;
2981 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2982 struct cli_pipe_auth_data **presult)
2984 struct cli_pipe_auth_data *result;
2986 result = talloc(mem_ctx, struct cli_pipe_auth_data);
2987 if (result == NULL) {
2988 return NT_STATUS_NO_MEMORY;
2991 result->auth_type = PIPE_AUTH_TYPE_NONE;
2992 result->auth_level = PIPE_AUTH_LEVEL_NONE;
2994 result->user_name = talloc_strdup(result, "");
2995 result->domain = talloc_strdup(result, "");
2996 if ((result->user_name == NULL) || (result->domain == NULL)) {
2997 TALLOC_FREE(result);
2998 return NT_STATUS_NO_MEMORY;
3001 *presult = result;
3002 return NT_STATUS_OK;
3005 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3007 ntlmssp_end(&auth->a_u.ntlmssp_state);
3008 return 0;
3011 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3012 enum pipe_auth_type auth_type,
3013 enum pipe_auth_level auth_level,
3014 const char *domain,
3015 const char *username,
3016 const char *password,
3017 struct cli_pipe_auth_data **presult)
3019 struct cli_pipe_auth_data *result;
3020 NTSTATUS status;
3022 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3023 if (result == NULL) {
3024 return NT_STATUS_NO_MEMORY;
3027 result->auth_type = auth_type;
3028 result->auth_level = auth_level;
3030 result->user_name = talloc_strdup(result, username);
3031 result->domain = talloc_strdup(result, domain);
3032 if ((result->user_name == NULL) || (result->domain == NULL)) {
3033 status = NT_STATUS_NO_MEMORY;
3034 goto fail;
3037 status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3038 if (!NT_STATUS_IS_OK(status)) {
3039 goto fail;
3042 talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3044 status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3045 if (!NT_STATUS_IS_OK(status)) {
3046 goto fail;
3049 status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3050 if (!NT_STATUS_IS_OK(status)) {
3051 goto fail;
3054 status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3055 if (!NT_STATUS_IS_OK(status)) {
3056 goto fail;
3060 * Turn off sign+seal to allow selected auth level to turn it back on.
3062 result->a_u.ntlmssp_state->neg_flags &=
3063 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3065 if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3066 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3067 } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3068 result->a_u.ntlmssp_state->neg_flags
3069 |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3072 *presult = result;
3073 return NT_STATUS_OK;
3075 fail:
3076 TALLOC_FREE(result);
3077 return status;
3080 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3081 enum pipe_auth_level auth_level,
3082 const uint8_t sess_key[16],
3083 struct cli_pipe_auth_data **presult)
3085 struct cli_pipe_auth_data *result;
3087 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3088 if (result == NULL) {
3089 return NT_STATUS_NO_MEMORY;
3092 result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3093 result->auth_level = auth_level;
3095 result->user_name = talloc_strdup(result, "");
3096 result->domain = talloc_strdup(result, domain);
3097 if ((result->user_name == NULL) || (result->domain == NULL)) {
3098 goto fail;
3101 result->a_u.schannel_auth = talloc(result,
3102 struct schannel_auth_struct);
3103 if (result->a_u.schannel_auth == NULL) {
3104 goto fail;
3107 memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3108 sizeof(result->a_u.schannel_auth->sess_key));
3109 result->a_u.schannel_auth->seq_num = 0;
3111 *presult = result;
3112 return NT_STATUS_OK;
3114 fail:
3115 TALLOC_FREE(result);
3116 return NT_STATUS_NO_MEMORY;
3119 #ifdef HAVE_KRB5
3120 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3122 data_blob_free(&auth->session_key);
3123 return 0;
3125 #endif
3127 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3128 enum pipe_auth_level auth_level,
3129 const char *service_princ,
3130 const char *username,
3131 const char *password,
3132 struct cli_pipe_auth_data **presult)
3134 #ifdef HAVE_KRB5
3135 struct cli_pipe_auth_data *result;
3137 if ((username != NULL) && (password != NULL)) {
3138 int ret = kerberos_kinit_password(username, password, 0, NULL);
3139 if (ret != 0) {
3140 return NT_STATUS_ACCESS_DENIED;
3144 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3145 if (result == NULL) {
3146 return NT_STATUS_NO_MEMORY;
3149 result->auth_type = PIPE_AUTH_TYPE_KRB5;
3150 result->auth_level = auth_level;
3153 * Username / domain need fixing!
3155 result->user_name = talloc_strdup(result, "");
3156 result->domain = talloc_strdup(result, "");
3157 if ((result->user_name == NULL) || (result->domain == NULL)) {
3158 goto fail;
3161 result->a_u.kerberos_auth = TALLOC_ZERO_P(
3162 result, struct kerberos_auth_struct);
3163 if (result->a_u.kerberos_auth == NULL) {
3164 goto fail;
3166 talloc_set_destructor(result->a_u.kerberos_auth,
3167 cli_auth_kerberos_data_destructor);
3169 result->a_u.kerberos_auth->service_principal = talloc_strdup(
3170 result, service_princ);
3171 if (result->a_u.kerberos_auth->service_principal == NULL) {
3172 goto fail;
3175 *presult = result;
3176 return NT_STATUS_OK;
3178 fail:
3179 TALLOC_FREE(result);
3180 return NT_STATUS_NO_MEMORY;
3181 #else
3182 return NT_STATUS_NOT_SUPPORTED;
3183 #endif
3187 * Create an rpc pipe client struct, connecting to a tcp port.
3189 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3190 uint16_t port,
3191 const struct ndr_syntax_id *abstract_syntax,
3192 struct rpc_pipe_client **presult)
3194 struct rpc_pipe_client *result;
3195 struct sockaddr_storage addr;
3196 NTSTATUS status;
3197 int fd;
3199 result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3200 if (result == NULL) {
3201 return NT_STATUS_NO_MEMORY;
3204 result->abstract_syntax = *abstract_syntax;
3205 result->transfer_syntax = ndr_transfer_syntax;
3206 result->dispatch = cli_do_rpc_ndr;
3208 result->desthost = talloc_strdup(result, host);
3209 result->srv_name_slash = talloc_asprintf_strupper_m(
3210 result, "\\\\%s", result->desthost);
3211 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3212 status = NT_STATUS_NO_MEMORY;
3213 goto fail;
3216 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3217 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3219 if (!resolve_name(host, &addr, 0)) {
3220 status = NT_STATUS_NOT_FOUND;
3221 goto fail;
3224 status = open_socket_out(&addr, port, 60, &fd);
3225 if (!NT_STATUS_IS_OK(status)) {
3226 goto fail;
3228 set_socket_options(fd, lp_socket_options());
3230 status = rpc_transport_sock_init(result, fd, &result->transport);
3231 if (!NT_STATUS_IS_OK(status)) {
3232 close(fd);
3233 goto fail;
3236 result->transport->transport = NCACN_IP_TCP;
3238 *presult = result;
3239 return NT_STATUS_OK;
3241 fail:
3242 TALLOC_FREE(result);
3243 return status;
3247 * Determine the tcp port on which a dcerpc interface is listening
3248 * for the ncacn_ip_tcp transport via the endpoint mapper of the
3249 * target host.
3251 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3252 const struct ndr_syntax_id *abstract_syntax,
3253 uint16_t *pport)
3255 NTSTATUS status;
3256 struct rpc_pipe_client *epm_pipe = NULL;
3257 struct cli_pipe_auth_data *auth = NULL;
3258 struct dcerpc_binding *map_binding = NULL;
3259 struct dcerpc_binding *res_binding = NULL;
3260 struct epm_twr_t *map_tower = NULL;
3261 struct epm_twr_t *res_towers = NULL;
3262 struct policy_handle *entry_handle = NULL;
3263 uint32_t num_towers = 0;
3264 uint32_t max_towers = 1;
3265 struct epm_twr_p_t towers;
3266 TALLOC_CTX *tmp_ctx = talloc_stackframe();
3268 if (pport == NULL) {
3269 status = NT_STATUS_INVALID_PARAMETER;
3270 goto done;
3273 /* open the connection to the endpoint mapper */
3274 status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3275 &ndr_table_epmapper.syntax_id,
3276 &epm_pipe);
3278 if (!NT_STATUS_IS_OK(status)) {
3279 goto done;
3282 status = rpccli_anon_bind_data(tmp_ctx, &auth);
3283 if (!NT_STATUS_IS_OK(status)) {
3284 goto done;
3287 status = rpc_pipe_bind(epm_pipe, auth);
3288 if (!NT_STATUS_IS_OK(status)) {
3289 goto done;
3292 /* create tower for asking the epmapper */
3294 map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3295 if (map_binding == NULL) {
3296 status = NT_STATUS_NO_MEMORY;
3297 goto done;
3300 map_binding->transport = NCACN_IP_TCP;
3301 map_binding->object = *abstract_syntax;
3302 map_binding->host = host; /* needed? */
3303 map_binding->endpoint = "0"; /* correct? needed? */
3305 map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3306 if (map_tower == NULL) {
3307 status = NT_STATUS_NO_MEMORY;
3308 goto done;
3311 status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3312 &(map_tower->tower));
3313 if (!NT_STATUS_IS_OK(status)) {
3314 goto done;
3317 /* allocate further parameters for the epm_Map call */
3319 res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3320 if (res_towers == NULL) {
3321 status = NT_STATUS_NO_MEMORY;
3322 goto done;
3324 towers.twr = res_towers;
3326 entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3327 if (entry_handle == NULL) {
3328 status = NT_STATUS_NO_MEMORY;
3329 goto done;
3332 /* ask the endpoint mapper for the port */
3334 status = rpccli_epm_Map(epm_pipe,
3335 tmp_ctx,
3336 CONST_DISCARD(struct GUID *,
3337 &(abstract_syntax->uuid)),
3338 map_tower,
3339 entry_handle,
3340 max_towers,
3341 &num_towers,
3342 &towers);
3344 if (!NT_STATUS_IS_OK(status)) {
3345 goto done;
3348 if (num_towers != 1) {
3349 status = NT_STATUS_UNSUCCESSFUL;
3350 goto done;
3353 /* extract the port from the answer */
3355 status = dcerpc_binding_from_tower(tmp_ctx,
3356 &(towers.twr->tower),
3357 &res_binding);
3358 if (!NT_STATUS_IS_OK(status)) {
3359 goto done;
3362 /* are further checks here necessary? */
3363 if (res_binding->transport != NCACN_IP_TCP) {
3364 status = NT_STATUS_UNSUCCESSFUL;
3365 goto done;
3368 *pport = (uint16_t)atoi(res_binding->endpoint);
3370 done:
3371 TALLOC_FREE(tmp_ctx);
3372 return status;
3376 * Create a rpc pipe client struct, connecting to a host via tcp.
3377 * The port is determined by asking the endpoint mapper on the given
3378 * host.
3380 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3381 const struct ndr_syntax_id *abstract_syntax,
3382 struct rpc_pipe_client **presult)
3384 NTSTATUS status;
3385 uint16_t port = 0;
3387 *presult = NULL;
3389 status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3390 if (!NT_STATUS_IS_OK(status)) {
3391 goto done;
3394 status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3395 abstract_syntax, presult);
3397 done:
3398 return status;
3401 /********************************************************************
3402 Create a rpc pipe client struct, connecting to a unix domain socket
3403 ********************************************************************/
3404 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3405 const struct ndr_syntax_id *abstract_syntax,
3406 struct rpc_pipe_client **presult)
3408 struct rpc_pipe_client *result;
3409 struct sockaddr_un addr;
3410 NTSTATUS status;
3411 int fd;
3413 result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3414 if (result == NULL) {
3415 return NT_STATUS_NO_MEMORY;
3418 result->abstract_syntax = *abstract_syntax;
3419 result->transfer_syntax = ndr_transfer_syntax;
3420 result->dispatch = cli_do_rpc_ndr;
3422 result->desthost = get_myname(result);
3423 result->srv_name_slash = talloc_asprintf_strupper_m(
3424 result, "\\\\%s", result->desthost);
3425 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3426 status = NT_STATUS_NO_MEMORY;
3427 goto fail;
3430 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3431 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3433 fd = socket(AF_UNIX, SOCK_STREAM, 0);
3434 if (fd == -1) {
3435 status = map_nt_error_from_unix(errno);
3436 goto fail;
3439 ZERO_STRUCT(addr);
3440 addr.sun_family = AF_UNIX;
3441 strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3443 if (sys_connect(fd, (struct sockaddr *)&addr) == -1) {
3444 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3445 strerror(errno)));
3446 close(fd);
3447 return map_nt_error_from_unix(errno);
3450 status = rpc_transport_sock_init(result, fd, &result->transport);
3451 if (!NT_STATUS_IS_OK(status)) {
3452 close(fd);
3453 goto fail;
3456 result->transport->transport = NCALRPC;
3458 *presult = result;
3459 return NT_STATUS_OK;
3461 fail:
3462 TALLOC_FREE(result);
3463 return status;
3466 static int rpc_pipe_client_np_destructor(struct rpc_pipe_client *p)
3468 struct cli_state *cli;
3470 cli = rpc_pipe_np_smb_conn(p);
3471 if (cli != NULL) {
3472 DLIST_REMOVE(cli->pipe_list, p);
3474 return 0;
3477 /****************************************************************************
3478 Open a named pipe over SMB to a remote server.
3480 * CAVEAT CALLER OF THIS FUNCTION:
3481 * The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3482 * so be sure that this function is called AFTER any structure (vs pointer)
3483 * assignment of the cli. In particular, libsmbclient does structure
3484 * assignments of cli, which invalidates the data in the returned
3485 * rpc_pipe_client if this function is called before the structure assignment
3486 * of cli.
3488 ****************************************************************************/
3490 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3491 const struct ndr_syntax_id *abstract_syntax,
3492 struct rpc_pipe_client **presult)
3494 struct rpc_pipe_client *result;
3495 NTSTATUS status;
3497 /* sanity check to protect against crashes */
3499 if ( !cli ) {
3500 return NT_STATUS_INVALID_HANDLE;
3503 result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3504 if (result == NULL) {
3505 return NT_STATUS_NO_MEMORY;
3508 result->abstract_syntax = *abstract_syntax;
3509 result->transfer_syntax = ndr_transfer_syntax;
3510 result->dispatch = cli_do_rpc_ndr;
3511 result->desthost = talloc_strdup(result, cli->desthost);
3512 result->srv_name_slash = talloc_asprintf_strupper_m(
3513 result, "\\\\%s", result->desthost);
3515 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3516 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3518 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3519 TALLOC_FREE(result);
3520 return NT_STATUS_NO_MEMORY;
3523 status = rpc_transport_np_init(result, cli, abstract_syntax,
3524 &result->transport);
3525 if (!NT_STATUS_IS_OK(status)) {
3526 TALLOC_FREE(result);
3527 return status;
3530 result->transport->transport = NCACN_NP;
3532 DLIST_ADD(cli->pipe_list, result);
3533 talloc_set_destructor(result, rpc_pipe_client_np_destructor);
3535 *presult = result;
3536 return NT_STATUS_OK;
3539 NTSTATUS rpc_pipe_open_local(TALLOC_CTX *mem_ctx,
3540 struct rpc_cli_smbd_conn *conn,
3541 const struct ndr_syntax_id *syntax,
3542 struct rpc_pipe_client **presult)
3544 struct rpc_pipe_client *result;
3545 struct cli_pipe_auth_data *auth;
3546 NTSTATUS status;
3548 result = talloc(mem_ctx, struct rpc_pipe_client);
3549 if (result == NULL) {
3550 return NT_STATUS_NO_MEMORY;
3552 result->abstract_syntax = *syntax;
3553 result->transfer_syntax = ndr_transfer_syntax;
3554 result->dispatch = cli_do_rpc_ndr;
3555 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3556 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3558 result->desthost = talloc_strdup(result, global_myname());
3559 result->srv_name_slash = talloc_asprintf_strupper_m(
3560 result, "\\\\%s", global_myname());
3561 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3562 TALLOC_FREE(result);
3563 return NT_STATUS_NO_MEMORY;
3566 status = rpc_transport_smbd_init(result, conn, syntax,
3567 &result->transport);
3568 if (!NT_STATUS_IS_OK(status)) {
3569 DEBUG(1, ("rpc_transport_smbd_init failed: %s\n",
3570 nt_errstr(status)));
3571 TALLOC_FREE(result);
3572 return status;
3575 status = rpccli_anon_bind_data(result, &auth);
3576 if (!NT_STATUS_IS_OK(status)) {
3577 DEBUG(1, ("rpccli_anon_bind_data failed: %s\n",
3578 nt_errstr(status)));
3579 TALLOC_FREE(result);
3580 return status;
3583 status = rpc_pipe_bind(result, auth);
3584 if (!NT_STATUS_IS_OK(status)) {
3585 DEBUG(1, ("rpc_pipe_bind failed: %s\n", nt_errstr(status)));
3586 TALLOC_FREE(result);
3587 return status;
3590 result->transport->transport = NCACN_INTERNAL;
3592 *presult = result;
3593 return NT_STATUS_OK;
3596 /****************************************************************************
3597 Open a pipe to a remote server.
3598 ****************************************************************************/
3600 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3601 enum dcerpc_transport_t transport,
3602 const struct ndr_syntax_id *interface,
3603 struct rpc_pipe_client **presult)
3605 switch (transport) {
3606 case NCACN_IP_TCP:
3607 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3608 presult);
3609 case NCACN_NP:
3610 return rpc_pipe_open_np(cli, interface, presult);
3611 default:
3612 return NT_STATUS_NOT_IMPLEMENTED;
3616 /****************************************************************************
3617 Open a named pipe to an SMB server and bind anonymously.
3618 ****************************************************************************/
3620 NTSTATUS cli_rpc_pipe_open_noauth_transport(struct cli_state *cli,
3621 enum dcerpc_transport_t transport,
3622 const struct ndr_syntax_id *interface,
3623 struct rpc_pipe_client **presult)
3625 struct rpc_pipe_client *result;
3626 struct cli_pipe_auth_data *auth;
3627 NTSTATUS status;
3629 status = cli_rpc_pipe_open(cli, transport, interface, &result);
3630 if (!NT_STATUS_IS_OK(status)) {
3631 return status;
3634 status = rpccli_anon_bind_data(result, &auth);
3635 if (!NT_STATUS_IS_OK(status)) {
3636 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3637 nt_errstr(status)));
3638 TALLOC_FREE(result);
3639 return status;
3643 * This is a bit of an abstraction violation due to the fact that an
3644 * anonymous bind on an authenticated SMB inherits the user/domain
3645 * from the enclosing SMB creds
3648 TALLOC_FREE(auth->user_name);
3649 TALLOC_FREE(auth->domain);
3651 auth->user_name = talloc_strdup(auth, cli->user_name);
3652 auth->domain = talloc_strdup(auth, cli->domain);
3653 auth->user_session_key = data_blob_talloc(auth,
3654 cli->user_session_key.data,
3655 cli->user_session_key.length);
3657 if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3658 TALLOC_FREE(result);
3659 return NT_STATUS_NO_MEMORY;
3662 status = rpc_pipe_bind(result, auth);
3663 if (!NT_STATUS_IS_OK(status)) {
3664 int lvl = 0;
3665 if (ndr_syntax_id_equal(interface,
3666 &ndr_table_dssetup.syntax_id)) {
3667 /* non AD domains just don't have this pipe, avoid
3668 * level 0 statement in that case - gd */
3669 lvl = 3;
3671 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3672 "%s failed with error %s\n",
3673 get_pipe_name_from_iface(interface),
3674 nt_errstr(status) ));
3675 TALLOC_FREE(result);
3676 return status;
3679 DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3680 "%s and bound anonymously.\n",
3681 get_pipe_name_from_iface(interface), cli->desthost));
3683 *presult = result;
3684 return NT_STATUS_OK;
3687 /****************************************************************************
3688 ****************************************************************************/
3690 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3691 const struct ndr_syntax_id *interface,
3692 struct rpc_pipe_client **presult)
3694 return cli_rpc_pipe_open_noauth_transport(cli, NCACN_NP,
3695 interface, presult);
3698 /****************************************************************************
3699 Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3700 ****************************************************************************/
3702 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3703 const struct ndr_syntax_id *interface,
3704 enum pipe_auth_type auth_type,
3705 enum pipe_auth_level auth_level,
3706 const char *domain,
3707 const char *username,
3708 const char *password,
3709 struct rpc_pipe_client **presult)
3711 struct rpc_pipe_client *result;
3712 struct cli_pipe_auth_data *auth;
3713 NTSTATUS status;
3715 status = cli_rpc_pipe_open(cli, NCACN_NP, interface, &result);
3716 if (!NT_STATUS_IS_OK(status)) {
3717 return status;
3720 status = rpccli_ntlmssp_bind_data(
3721 result, auth_type, auth_level, domain, username,
3722 password, &auth);
3723 if (!NT_STATUS_IS_OK(status)) {
3724 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3725 nt_errstr(status)));
3726 goto err;
3729 status = rpc_pipe_bind(result, auth);
3730 if (!NT_STATUS_IS_OK(status)) {
3731 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3732 nt_errstr(status) ));
3733 goto err;
3736 DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3737 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3738 get_pipe_name_from_iface(interface), cli->desthost, domain,
3739 username ));
3741 *presult = result;
3742 return NT_STATUS_OK;
3744 err:
3746 TALLOC_FREE(result);
3747 return status;
3750 /****************************************************************************
3751 External interface.
3752 Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3753 ****************************************************************************/
3755 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3756 const struct ndr_syntax_id *interface,
3757 enum pipe_auth_level auth_level,
3758 const char *domain,
3759 const char *username,
3760 const char *password,
3761 struct rpc_pipe_client **presult)
3763 return cli_rpc_pipe_open_ntlmssp_internal(cli,
3764 interface,
3765 PIPE_AUTH_TYPE_NTLMSSP,
3766 auth_level,
3767 domain,
3768 username,
3769 password,
3770 presult);
3773 /****************************************************************************
3774 External interface.
3775 Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3776 ****************************************************************************/
3778 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3779 const struct ndr_syntax_id *interface,
3780 enum pipe_auth_level auth_level,
3781 const char *domain,
3782 const char *username,
3783 const char *password,
3784 struct rpc_pipe_client **presult)
3786 return cli_rpc_pipe_open_ntlmssp_internal(cli,
3787 interface,
3788 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3789 auth_level,
3790 domain,
3791 username,
3792 password,
3793 presult);
3796 /****************************************************************************
3797 Get a the schannel session key out of an already opened netlogon pipe.
3798 ****************************************************************************/
3799 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3800 struct cli_state *cli,
3801 const char *domain,
3802 uint32 *pneg_flags)
3804 uint32 sec_chan_type = 0;
3805 unsigned char machine_pwd[16];
3806 const char *machine_account;
3807 NTSTATUS status;
3809 /* Get the machine account credentials from secrets.tdb. */
3810 if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3811 &sec_chan_type))
3813 DEBUG(0, ("get_schannel_session_key: could not fetch "
3814 "trust account password for domain '%s'\n",
3815 domain));
3816 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3819 status = rpccli_netlogon_setup_creds(netlogon_pipe,
3820 cli->desthost, /* server name */
3821 domain, /* domain */
3822 global_myname(), /* client name */
3823 machine_account, /* machine account name */
3824 machine_pwd,
3825 sec_chan_type,
3826 pneg_flags);
3828 if (!NT_STATUS_IS_OK(status)) {
3829 DEBUG(3, ("get_schannel_session_key_common: "
3830 "rpccli_netlogon_setup_creds failed with result %s "
3831 "to server %s, domain %s, machine account %s.\n",
3832 nt_errstr(status), cli->desthost, domain,
3833 machine_account ));
3834 return status;
3837 if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3838 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3839 cli->desthost));
3840 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3843 return NT_STATUS_OK;;
3846 /****************************************************************************
3847 Open a netlogon pipe and get the schannel session key.
3848 Now exposed to external callers.
3849 ****************************************************************************/
3852 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3853 const char *domain,
3854 uint32 *pneg_flags,
3855 struct rpc_pipe_client **presult)
3857 struct rpc_pipe_client *netlogon_pipe = NULL;
3858 NTSTATUS status;
3860 status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3861 &netlogon_pipe);
3862 if (!NT_STATUS_IS_OK(status)) {
3863 return status;
3866 status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3867 pneg_flags);
3868 if (!NT_STATUS_IS_OK(status)) {
3869 TALLOC_FREE(netlogon_pipe);
3870 return status;
3873 *presult = netlogon_pipe;
3874 return NT_STATUS_OK;
3877 /****************************************************************************
3878 External interface.
3879 Open a named pipe to an SMB server and bind using schannel (bind type 68)
3880 using session_key. sign and seal.
3881 ****************************************************************************/
3883 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3884 const struct ndr_syntax_id *interface,
3885 enum pipe_auth_level auth_level,
3886 const char *domain,
3887 const struct dcinfo *pdc,
3888 struct rpc_pipe_client **presult)
3890 struct rpc_pipe_client *result;
3891 struct cli_pipe_auth_data *auth;
3892 NTSTATUS status;
3894 status = cli_rpc_pipe_open(cli, NCACN_NP, interface, &result);
3895 if (!NT_STATUS_IS_OK(status)) {
3896 return status;
3899 status = rpccli_schannel_bind_data(result, domain, auth_level,
3900 pdc->sess_key, &auth);
3901 if (!NT_STATUS_IS_OK(status)) {
3902 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3903 nt_errstr(status)));
3904 TALLOC_FREE(result);
3905 return status;
3908 status = rpc_pipe_bind(result, auth);
3909 if (!NT_STATUS_IS_OK(status)) {
3910 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3911 "cli_rpc_pipe_bind failed with error %s\n",
3912 nt_errstr(status) ));
3913 TALLOC_FREE(result);
3914 return status;
3918 * The credentials on a new netlogon pipe are the ones we are passed
3919 * in - copy them over.
3921 result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3922 if (result->dc == NULL) {
3923 DEBUG(0, ("talloc failed\n"));
3924 TALLOC_FREE(result);
3925 return NT_STATUS_NO_MEMORY;
3928 DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3929 "for domain %s and bound using schannel.\n",
3930 get_pipe_name_from_iface(interface),
3931 cli->desthost, domain ));
3933 *presult = result;
3934 return NT_STATUS_OK;
3937 /****************************************************************************
3938 Open a named pipe to an SMB server and bind using schannel (bind type 68).
3939 Fetch the session key ourselves using a temporary netlogon pipe. This
3940 version uses an ntlmssp auth bound netlogon pipe to get the key.
3941 ****************************************************************************/
3943 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3944 const char *domain,
3945 const char *username,
3946 const char *password,
3947 uint32 *pneg_flags,
3948 struct rpc_pipe_client **presult)
3950 struct rpc_pipe_client *netlogon_pipe = NULL;
3951 NTSTATUS status;
3953 status = cli_rpc_pipe_open_spnego_ntlmssp(
3954 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3955 domain, username, password, &netlogon_pipe);
3956 if (!NT_STATUS_IS_OK(status)) {
3957 return status;
3960 status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3961 pneg_flags);
3962 if (!NT_STATUS_IS_OK(status)) {
3963 TALLOC_FREE(netlogon_pipe);
3964 return status;
3967 *presult = netlogon_pipe;
3968 return NT_STATUS_OK;
3971 /****************************************************************************
3972 Open a named pipe to an SMB server and bind using schannel (bind type 68).
3973 Fetch the session key ourselves using a temporary netlogon pipe. This version
3974 uses an ntlmssp bind to get the session key.
3975 ****************************************************************************/
3977 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
3978 const struct ndr_syntax_id *interface,
3979 enum pipe_auth_level auth_level,
3980 const char *domain,
3981 const char *username,
3982 const char *password,
3983 struct rpc_pipe_client **presult)
3985 uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
3986 struct rpc_pipe_client *netlogon_pipe = NULL;
3987 struct rpc_pipe_client *result = NULL;
3988 NTSTATUS status;
3990 status = get_schannel_session_key_auth_ntlmssp(
3991 cli, domain, username, password, &neg_flags, &netlogon_pipe);
3992 if (!NT_STATUS_IS_OK(status)) {
3993 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
3994 "key from server %s for domain %s.\n",
3995 cli->desthost, domain ));
3996 return status;
3999 status = cli_rpc_pipe_open_schannel_with_key(
4000 cli, interface, auth_level, domain, netlogon_pipe->dc,
4001 &result);
4003 /* Now we've bound using the session key we can close the netlog pipe. */
4004 TALLOC_FREE(netlogon_pipe);
4006 if (NT_STATUS_IS_OK(status)) {
4007 *presult = result;
4009 return status;
4012 /****************************************************************************
4013 Open a named pipe to an SMB server and bind using schannel (bind type 68).
4014 Fetch the session key ourselves using a temporary netlogon pipe.
4015 ****************************************************************************/
4017 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
4018 const struct ndr_syntax_id *interface,
4019 enum pipe_auth_level auth_level,
4020 const char *domain,
4021 struct rpc_pipe_client **presult)
4023 uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4024 struct rpc_pipe_client *netlogon_pipe = NULL;
4025 struct rpc_pipe_client *result = NULL;
4026 NTSTATUS status;
4028 status = get_schannel_session_key(cli, domain, &neg_flags,
4029 &netlogon_pipe);
4030 if (!NT_STATUS_IS_OK(status)) {
4031 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4032 "key from server %s for domain %s.\n",
4033 cli->desthost, domain ));
4034 return status;
4037 status = cli_rpc_pipe_open_schannel_with_key(
4038 cli, interface, auth_level, domain, netlogon_pipe->dc,
4039 &result);
4041 /* Now we've bound using the session key we can close the netlog pipe. */
4042 TALLOC_FREE(netlogon_pipe);
4044 if (NT_STATUS_IS_OK(status)) {
4045 *presult = result;
4048 return NT_STATUS_OK;
4051 /****************************************************************************
4052 Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4053 The idea is this can be called with service_princ, username and password all
4054 NULL so long as the caller has a TGT.
4055 ****************************************************************************/
4057 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4058 const struct ndr_syntax_id *interface,
4059 enum pipe_auth_level auth_level,
4060 const char *service_princ,
4061 const char *username,
4062 const char *password,
4063 struct rpc_pipe_client **presult)
4065 #ifdef HAVE_KRB5
4066 struct rpc_pipe_client *result;
4067 struct cli_pipe_auth_data *auth;
4068 NTSTATUS status;
4070 status = cli_rpc_pipe_open(cli, NCACN_NP, interface, &result);
4071 if (!NT_STATUS_IS_OK(status)) {
4072 return status;
4075 status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4076 username, password, &auth);
4077 if (!NT_STATUS_IS_OK(status)) {
4078 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4079 nt_errstr(status)));
4080 TALLOC_FREE(result);
4081 return status;
4084 status = rpc_pipe_bind(result, auth);
4085 if (!NT_STATUS_IS_OK(status)) {
4086 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4087 "with error %s\n", nt_errstr(status)));
4088 TALLOC_FREE(result);
4089 return status;
4092 *presult = result;
4093 return NT_STATUS_OK;
4094 #else
4095 DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4096 return NT_STATUS_NOT_IMPLEMENTED;
4097 #endif
4100 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4101 struct rpc_pipe_client *cli,
4102 DATA_BLOB *session_key)
4104 if (!session_key || !cli) {
4105 return NT_STATUS_INVALID_PARAMETER;
4108 if (!cli->auth) {
4109 return NT_STATUS_INVALID_PARAMETER;
4112 switch (cli->auth->auth_type) {
4113 case PIPE_AUTH_TYPE_SCHANNEL:
4114 *session_key = data_blob_talloc(mem_ctx,
4115 cli->auth->a_u.schannel_auth->sess_key, 16);
4116 break;
4117 case PIPE_AUTH_TYPE_NTLMSSP:
4118 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4119 *session_key = data_blob_talloc(mem_ctx,
4120 cli->auth->a_u.ntlmssp_state->session_key.data,
4121 cli->auth->a_u.ntlmssp_state->session_key.length);
4122 break;
4123 case PIPE_AUTH_TYPE_KRB5:
4124 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4125 *session_key = data_blob_talloc(mem_ctx,
4126 cli->auth->a_u.kerberos_auth->session_key.data,
4127 cli->auth->a_u.kerberos_auth->session_key.length);
4128 break;
4129 case PIPE_AUTH_TYPE_NONE:
4130 *session_key = data_blob_talloc(mem_ctx,
4131 cli->auth->user_session_key.data,
4132 cli->auth->user_session_key.length);
4133 break;
4134 default:
4135 return NT_STATUS_NO_USER_SESSION_KEY;
4138 return NT_STATUS_OK;