Abstract away the transport in cli_pipe.c
[Samba.git] / source3 / rpc_client / cli_pipe.c
blob5a53c0d94097180797d1272599b65c3aa90aa280
1 /*
2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Largely rewritten by Jeremy Allison 2005.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
30 #define PIPE_SRVSVC "\\PIPE\\srvsvc"
31 #define PIPE_SAMR "\\PIPE\\samr"
32 #define PIPE_WINREG "\\PIPE\\winreg"
33 #define PIPE_WKSSVC "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS "\\PIPE\\lsass"
38 #define PIPE_LSARPC "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS "\\PIPE\\spoolss"
40 #define PIPE_NETDFS "\\PIPE\\netdfs"
41 #define PIPE_ECHO "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI "\\PIPE\\drsuapi"
50 * IMPORTANT!! If you update this structure, make sure to
51 * update the index #defines in smb.h.
54 static const struct pipe_id_info {
55 /* the names appear not to matter: the syntaxes _do_ matter */
57 const char *client_pipe;
58 const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
61 { PIPE_LSARPC, &ndr_table_lsarpc.syntax_id },
62 { PIPE_LSARPC, &ndr_table_dssetup.syntax_id },
63 { PIPE_SAMR, &ndr_table_samr.syntax_id },
64 { PIPE_NETLOGON, &ndr_table_netlogon.syntax_id },
65 { PIPE_SRVSVC, &ndr_table_srvsvc.syntax_id },
66 { PIPE_WKSSVC, &ndr_table_wkssvc.syntax_id },
67 { PIPE_WINREG, &ndr_table_winreg.syntax_id },
68 { PIPE_SPOOLSS, &syntax_spoolss },
69 { PIPE_NETDFS, &ndr_table_netdfs.syntax_id },
70 { PIPE_ECHO, &ndr_table_rpcecho.syntax_id },
71 { PIPE_SHUTDOWN, &ndr_table_initshutdown.syntax_id },
72 { PIPE_SVCCTL, &ndr_table_svcctl.syntax_id },
73 { PIPE_EVENTLOG, &ndr_table_eventlog.syntax_id },
74 { PIPE_NTSVCS, &ndr_table_ntsvcs.syntax_id },
75 { PIPE_EPMAPPER, &ndr_table_epmapper.syntax_id },
76 { PIPE_DRSUAPI, &ndr_table_drsuapi.syntax_id },
77 { NULL, NULL }
80 /****************************************************************************
81 Return the pipe name from the interface.
82 ****************************************************************************/
84 const char *cli_get_pipe_name_from_iface(TALLOC_CTX *mem_ctx,
85 const struct ndr_syntax_id *interface)
87 int i;
88 for (i = 0; pipe_names[i].client_pipe; i++) {
89 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
90 interface)) {
91 return &pipe_names[i].client_pipe[5];
96 * Here we should ask \\epmapper, but for now our code is only
97 * interested in the known pipes mentioned in pipe_names[]
100 return NULL;
103 /********************************************************************
104 Map internal value to wire value.
105 ********************************************************************/
107 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
109 switch (auth_type) {
111 case PIPE_AUTH_TYPE_NONE:
112 return RPC_ANONYMOUS_AUTH_TYPE;
114 case PIPE_AUTH_TYPE_NTLMSSP:
115 return RPC_NTLMSSP_AUTH_TYPE;
117 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
118 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
119 return RPC_SPNEGO_AUTH_TYPE;
121 case PIPE_AUTH_TYPE_SCHANNEL:
122 return RPC_SCHANNEL_AUTH_TYPE;
124 case PIPE_AUTH_TYPE_KRB5:
125 return RPC_KRB5_AUTH_TYPE;
127 default:
128 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
129 "auth type %u\n",
130 (unsigned int)auth_type ));
131 break;
133 return -1;
136 /********************************************************************
137 Pipe description for a DEBUG
138 ********************************************************************/
139 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
140 struct rpc_pipe_client *cli)
142 char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
143 if (result == NULL) {
144 return "pipe";
146 return result;
149 /********************************************************************
150 Rpc pipe call id.
151 ********************************************************************/
153 static uint32 get_rpc_call_id(void)
155 static uint32 call_id = 0;
156 return ++call_id;
160 * Realloc pdu to have a least "size" bytes
163 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
165 size_t extra_size;
167 if (prs_data_size(pdu) >= size) {
168 return true;
171 extra_size = size - prs_data_size(pdu);
173 if (!prs_force_grow(pdu, extra_size)) {
174 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
175 "%d bytes.\n", (int)extra_size));
176 return false;
179 DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
180 (int)extra_size, prs_data_size(pdu)));
181 return true;
185 /*******************************************************************
186 Use SMBreadX to get rest of one fragment's worth of rpc data.
187 Reads the whole size or give an error message
188 ********************************************************************/
190 struct rpc_read_state {
191 struct event_context *ev;
192 struct rpc_cli_transport *transport;
193 uint8_t *data;
194 size_t size;
195 size_t num_read;
198 static void rpc_read_done(struct async_req *subreq);
200 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
201 struct event_context *ev,
202 struct rpc_cli_transport *transport,
203 uint8_t *data, size_t size)
205 struct async_req *result, *subreq;
206 struct rpc_read_state *state;
208 if (!async_req_setup(mem_ctx, &result, &state,
209 struct rpc_read_state)) {
210 return NULL;
212 state->ev = ev;
213 state->transport = transport;
214 state->data = data;
215 state->size = size;
216 state->num_read = 0;
218 DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
220 subreq = transport->read_send(state, ev, (uint8_t *)data, size,
221 transport->priv);
222 if (subreq == NULL) {
223 goto fail;
225 subreq->async.fn = rpc_read_done;
226 subreq->async.priv = result;
227 return result;
229 fail:
230 TALLOC_FREE(result);
231 return NULL;
234 static void rpc_read_done(struct async_req *subreq)
236 struct async_req *req = talloc_get_type_abort(
237 subreq->async.priv, struct async_req);
238 struct rpc_read_state *state = talloc_get_type_abort(
239 req->private_data, struct rpc_read_state);
240 NTSTATUS status;
241 ssize_t received;
243 status = state->transport->read_recv(subreq, &received);
244 TALLOC_FREE(subreq);
245 if (!NT_STATUS_IS_OK(status)) {
246 async_req_error(req, status);
247 return;
250 state->num_read += received;
251 if (state->num_read == state->size) {
252 async_req_done(req);
253 return;
256 subreq = state->transport->read_send(state, state->ev,
257 state->data + state->num_read,
258 state->size - state->num_read,
259 state->transport->priv);
260 if (async_req_nomem(subreq, req)) {
261 return;
263 subreq->async.fn = rpc_read_done;
264 subreq->async.priv = req;
267 static NTSTATUS rpc_read_recv(struct async_req *req)
269 return async_req_simple_recv(req);
272 struct rpc_write_state {
273 struct event_context *ev;
274 struct rpc_cli_transport *transport;
275 const uint8_t *data;
276 size_t size;
277 size_t num_written;
280 static void rpc_write_done(struct async_req *subreq);
282 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
283 struct event_context *ev,
284 struct rpc_cli_transport *transport,
285 const uint8_t *data, size_t size)
287 struct async_req *result, *subreq;
288 struct rpc_write_state *state;
290 if (!async_req_setup(mem_ctx, &result, &state,
291 struct rpc_write_state)) {
292 return NULL;
294 state->ev = ev;
295 state->transport = transport;
296 state->data = data;
297 state->size = size;
298 state->num_written = 0;
300 DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
302 subreq = transport->write_send(state, ev, data, size, transport->priv);
303 if (subreq == NULL) {
304 goto fail;
306 subreq->async.fn = rpc_write_done;
307 subreq->async.priv = result;
308 return result;
309 fail:
310 TALLOC_FREE(result);
311 return NULL;
314 static void rpc_write_done(struct async_req *subreq)
316 struct async_req *req = talloc_get_type_abort(
317 subreq->async.priv, struct async_req);
318 struct rpc_write_state *state = talloc_get_type_abort(
319 req->private_data, struct rpc_write_state);
320 NTSTATUS status;
321 ssize_t written;
323 status = state->transport->write_recv(subreq, &written);
324 TALLOC_FREE(subreq);
325 if (!NT_STATUS_IS_OK(status)) {
326 async_req_error(req, status);
327 return;
330 state->num_written += written;
332 if (state->num_written == state->size) {
333 async_req_done(req);
334 return;
337 subreq = state->transport->write_send(state, state->ev,
338 state->data + state->num_written,
339 state->size - state->num_written,
340 state->transport->priv);
341 if (async_req_nomem(subreq, req)) {
342 return;
344 subreq->async.fn = rpc_write_done;
345 subreq->async.priv = req;
348 static NTSTATUS rpc_write_recv(struct async_req *req)
350 return async_req_simple_recv(req);
354 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
355 struct rpc_hdr_info *prhdr,
356 prs_struct *pdu)
359 * This next call sets the endian bit correctly in current_pdu. We
360 * will propagate this to rbuf later.
363 if(!smb_io_rpc_hdr("rpc_hdr ", prhdr, pdu, 0)) {
364 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
365 return NT_STATUS_BUFFER_TOO_SMALL;
368 if (prhdr->frag_len > cli->max_recv_frag) {
369 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
370 " we only allow %d\n", (int)prhdr->frag_len,
371 (int)cli->max_recv_frag));
372 return NT_STATUS_BUFFER_TOO_SMALL;
375 return NT_STATUS_OK;
378 /****************************************************************************
379 Try and get a PDU's worth of data from current_pdu. If not, then read more
380 from the wire.
381 ****************************************************************************/
383 struct get_complete_frag_state {
384 struct event_context *ev;
385 struct rpc_pipe_client *cli;
386 struct rpc_hdr_info *prhdr;
387 prs_struct *pdu;
390 static void get_complete_frag_got_header(struct async_req *subreq);
391 static void get_complete_frag_got_rest(struct async_req *subreq);
393 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
394 struct event_context *ev,
395 struct rpc_pipe_client *cli,
396 struct rpc_hdr_info *prhdr,
397 prs_struct *pdu)
399 struct async_req *result, *subreq;
400 struct get_complete_frag_state *state;
401 uint32_t pdu_len;
402 NTSTATUS status;
404 if (!async_req_setup(mem_ctx, &result, &state,
405 struct get_complete_frag_state)) {
406 return NULL;
408 state->ev = ev;
409 state->cli = cli;
410 state->prhdr = prhdr;
411 state->pdu = pdu;
413 pdu_len = prs_data_size(pdu);
414 if (pdu_len < RPC_HEADER_LEN) {
415 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
416 status = NT_STATUS_NO_MEMORY;
417 goto post_status;
419 subreq = rpc_read_send(
420 state, state->ev,
421 state->cli->transport,
422 (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
423 RPC_HEADER_LEN - pdu_len);
424 if (subreq == NULL) {
425 status = NT_STATUS_NO_MEMORY;
426 goto post_status;
428 subreq->async.fn = get_complete_frag_got_header;
429 subreq->async.priv = result;
430 return result;
433 status = parse_rpc_header(cli, prhdr, pdu);
434 if (!NT_STATUS_IS_OK(status)) {
435 goto post_status;
439 * Ensure we have frag_len bytes of data.
441 if (pdu_len < prhdr->frag_len) {
442 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
443 status = NT_STATUS_NO_MEMORY;
444 goto post_status;
446 subreq = rpc_read_send(state, state->ev,
447 state->cli->transport,
448 (uint8_t *)(prs_data_p(pdu) + pdu_len),
449 prhdr->frag_len - pdu_len);
450 if (subreq == NULL) {
451 status = NT_STATUS_NO_MEMORY;
452 goto post_status;
454 subreq->async.fn = get_complete_frag_got_rest;
455 subreq->async.priv = result;
456 return result;
459 status = NT_STATUS_OK;
460 post_status:
461 if (async_post_status(result, ev, status)) {
462 return result;
464 TALLOC_FREE(result);
465 return NULL;
468 static void get_complete_frag_got_header(struct async_req *subreq)
470 struct async_req *req = talloc_get_type_abort(
471 subreq->async.priv, struct async_req);
472 struct get_complete_frag_state *state = talloc_get_type_abort(
473 req->private_data, struct get_complete_frag_state);
474 NTSTATUS status;
476 status = rpc_read_recv(subreq);
477 TALLOC_FREE(subreq);
478 if (!NT_STATUS_IS_OK(status)) {
479 async_req_error(req, status);
480 return;
483 status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
484 if (!NT_STATUS_IS_OK(status)) {
485 async_req_error(req, status);
486 return;
489 if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
490 async_req_error(req, NT_STATUS_NO_MEMORY);
491 return;
495 * We're here in this piece of code because we've read exactly
496 * RPC_HEADER_LEN bytes into state->pdu.
499 subreq = rpc_read_send(
500 state, state->ev, state->cli->transport,
501 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
502 state->prhdr->frag_len - RPC_HEADER_LEN);
503 if (async_req_nomem(subreq, req)) {
504 return;
506 subreq->async.fn = get_complete_frag_got_rest;
507 subreq->async.priv = req;
510 static void get_complete_frag_got_rest(struct async_req *subreq)
512 struct async_req *req = talloc_get_type_abort(
513 subreq->async.priv, struct async_req);
514 NTSTATUS status;
516 status = rpc_read_recv(subreq);
517 TALLOC_FREE(subreq);
518 if (!NT_STATUS_IS_OK(status)) {
519 async_req_error(req, status);
520 return;
522 async_req_done(req);
525 static NTSTATUS get_complete_frag_recv(struct async_req *req)
527 return async_req_simple_recv(req);
530 /****************************************************************************
531 NTLMSSP specific sign/seal.
532 Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
533 In fact I should probably abstract these into identical pieces of code... JRA.
534 ****************************************************************************/
536 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
537 prs_struct *current_pdu,
538 uint8 *p_ss_padding_len)
540 RPC_HDR_AUTH auth_info;
541 uint32 save_offset = prs_offset(current_pdu);
542 uint32 auth_len = prhdr->auth_len;
543 NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
544 unsigned char *data = NULL;
545 size_t data_len;
546 unsigned char *full_packet_data = NULL;
547 size_t full_packet_data_len;
548 DATA_BLOB auth_blob;
549 NTSTATUS status;
551 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
552 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
553 return NT_STATUS_OK;
556 if (!ntlmssp_state) {
557 return NT_STATUS_INVALID_PARAMETER;
560 /* Ensure there's enough data for an authenticated response. */
561 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
562 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
563 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
564 (unsigned int)auth_len ));
565 return NT_STATUS_BUFFER_TOO_SMALL;
569 * We need the full packet data + length (minus auth stuff) as well as the packet data + length
570 * after the RPC header.
571 * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
572 * functions as NTLMv2 checks the rpc headers also.
575 data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
576 data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
578 full_packet_data = (unsigned char *)prs_data_p(current_pdu);
579 full_packet_data_len = prhdr->frag_len - auth_len;
581 /* Pull the auth header and the following data into a blob. */
582 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
583 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
584 (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
585 return NT_STATUS_BUFFER_TOO_SMALL;
588 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
589 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
590 return NT_STATUS_BUFFER_TOO_SMALL;
593 auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
594 auth_blob.length = auth_len;
596 switch (cli->auth->auth_level) {
597 case PIPE_AUTH_LEVEL_PRIVACY:
598 /* Data is encrypted. */
599 status = ntlmssp_unseal_packet(ntlmssp_state,
600 data, data_len,
601 full_packet_data,
602 full_packet_data_len,
603 &auth_blob);
604 if (!NT_STATUS_IS_OK(status)) {
605 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
606 "packet from %s. Error was %s.\n",
607 rpccli_pipe_txt(debug_ctx(), cli),
608 nt_errstr(status) ));
609 return status;
611 break;
612 case PIPE_AUTH_LEVEL_INTEGRITY:
613 /* Data is signed. */
614 status = ntlmssp_check_packet(ntlmssp_state,
615 data, data_len,
616 full_packet_data,
617 full_packet_data_len,
618 &auth_blob);
619 if (!NT_STATUS_IS_OK(status)) {
620 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
621 "packet from %s. Error was %s.\n",
622 rpccli_pipe_txt(debug_ctx(), cli),
623 nt_errstr(status) ));
624 return status;
626 break;
627 default:
628 DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
629 "auth level %d\n", cli->auth->auth_level));
630 return NT_STATUS_INVALID_INFO_CLASS;
634 * Return the current pointer to the data offset.
637 if(!prs_set_offset(current_pdu, save_offset)) {
638 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
639 (unsigned int)save_offset ));
640 return NT_STATUS_BUFFER_TOO_SMALL;
644 * Remember the padding length. We must remove it from the real data
645 * stream once the sign/seal is done.
648 *p_ss_padding_len = auth_info.auth_pad_len;
650 return NT_STATUS_OK;
653 /****************************************************************************
654 schannel specific sign/seal.
655 ****************************************************************************/
657 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
658 prs_struct *current_pdu,
659 uint8 *p_ss_padding_len)
661 RPC_HDR_AUTH auth_info;
662 RPC_AUTH_SCHANNEL_CHK schannel_chk;
663 uint32 auth_len = prhdr->auth_len;
664 uint32 save_offset = prs_offset(current_pdu);
665 struct schannel_auth_struct *schannel_auth =
666 cli->auth->a_u.schannel_auth;
667 uint32 data_len;
669 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
670 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
671 return NT_STATUS_OK;
674 if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
675 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
676 return NT_STATUS_INVALID_PARAMETER;
679 if (!schannel_auth) {
680 return NT_STATUS_INVALID_PARAMETER;
683 /* Ensure there's enough data for an authenticated response. */
684 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
685 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
686 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
687 (unsigned int)auth_len ));
688 return NT_STATUS_INVALID_PARAMETER;
691 data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
693 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
694 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
695 (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
696 return NT_STATUS_BUFFER_TOO_SMALL;
699 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
700 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
701 return NT_STATUS_BUFFER_TOO_SMALL;
704 if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
705 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
706 auth_info.auth_type));
707 return NT_STATUS_BUFFER_TOO_SMALL;
710 if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
711 &schannel_chk, current_pdu, 0)) {
712 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
713 return NT_STATUS_BUFFER_TOO_SMALL;
716 if (!schannel_decode(schannel_auth,
717 cli->auth->auth_level,
718 SENDER_IS_ACCEPTOR,
719 &schannel_chk,
720 prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
721 data_len)) {
722 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
723 "Connection to %s.\n",
724 rpccli_pipe_txt(debug_ctx(), cli)));
725 return NT_STATUS_INVALID_PARAMETER;
728 /* The sequence number gets incremented on both send and receive. */
729 schannel_auth->seq_num++;
732 * Return the current pointer to the data offset.
735 if(!prs_set_offset(current_pdu, save_offset)) {
736 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
737 (unsigned int)save_offset ));
738 return NT_STATUS_BUFFER_TOO_SMALL;
742 * Remember the padding length. We must remove it from the real data
743 * stream once the sign/seal is done.
746 *p_ss_padding_len = auth_info.auth_pad_len;
748 return NT_STATUS_OK;
751 /****************************************************************************
752 Do the authentication checks on an incoming pdu. Check sign and unseal etc.
753 ****************************************************************************/
755 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
756 prs_struct *current_pdu,
757 uint8 *p_ss_padding_len)
759 NTSTATUS ret = NT_STATUS_OK;
761 /* Paranioa checks for auth_len. */
762 if (prhdr->auth_len) {
763 if (prhdr->auth_len > prhdr->frag_len) {
764 return NT_STATUS_INVALID_PARAMETER;
767 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
768 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
769 /* Integer wrap attempt. */
770 return NT_STATUS_INVALID_PARAMETER;
775 * Now we have a complete RPC request PDU fragment, try and verify any auth data.
778 switch(cli->auth->auth_type) {
779 case PIPE_AUTH_TYPE_NONE:
780 if (prhdr->auth_len) {
781 DEBUG(3, ("cli_pipe_validate_rpc_response: "
782 "Connection to %s - got non-zero "
783 "auth len %u.\n",
784 rpccli_pipe_txt(debug_ctx(), cli),
785 (unsigned int)prhdr->auth_len ));
786 return NT_STATUS_INVALID_PARAMETER;
788 break;
790 case PIPE_AUTH_TYPE_NTLMSSP:
791 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
792 ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
793 if (!NT_STATUS_IS_OK(ret)) {
794 return ret;
796 break;
798 case PIPE_AUTH_TYPE_SCHANNEL:
799 ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
800 if (!NT_STATUS_IS_OK(ret)) {
801 return ret;
803 break;
805 case PIPE_AUTH_TYPE_KRB5:
806 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
807 default:
808 DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
809 "to %s - unknown internal auth type %u.\n",
810 rpccli_pipe_txt(debug_ctx(), cli),
811 cli->auth->auth_type ));
812 return NT_STATUS_INVALID_INFO_CLASS;
815 return NT_STATUS_OK;
818 /****************************************************************************
819 Do basic authentication checks on an incoming pdu.
820 ****************************************************************************/
822 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
823 prs_struct *current_pdu,
824 uint8 expected_pkt_type,
825 char **ppdata,
826 uint32 *pdata_len,
827 prs_struct *return_data)
830 NTSTATUS ret = NT_STATUS_OK;
831 uint32 current_pdu_len = prs_data_size(current_pdu);
833 if (current_pdu_len != prhdr->frag_len) {
834 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
835 (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
836 return NT_STATUS_INVALID_PARAMETER;
840 * Point the return values at the real data including the RPC
841 * header. Just in case the caller wants it.
843 *ppdata = prs_data_p(current_pdu);
844 *pdata_len = current_pdu_len;
846 /* Ensure we have the correct type. */
847 switch (prhdr->pkt_type) {
848 case RPC_ALTCONTRESP:
849 case RPC_BINDACK:
851 /* Alter context and bind ack share the same packet definitions. */
852 break;
855 case RPC_RESPONSE:
857 RPC_HDR_RESP rhdr_resp;
858 uint8 ss_padding_len = 0;
860 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
861 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
862 return NT_STATUS_BUFFER_TOO_SMALL;
865 /* Here's where we deal with incoming sign/seal. */
866 ret = cli_pipe_validate_rpc_response(cli, prhdr,
867 current_pdu, &ss_padding_len);
868 if (!NT_STATUS_IS_OK(ret)) {
869 return ret;
872 /* Point the return values at the NDR data. Remember to remove any ss padding. */
873 *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
875 if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
876 return NT_STATUS_BUFFER_TOO_SMALL;
879 *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
881 /* Remember to remove the auth footer. */
882 if (prhdr->auth_len) {
883 /* We've already done integer wrap tests on auth_len in
884 cli_pipe_validate_rpc_response(). */
885 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
886 return NT_STATUS_BUFFER_TOO_SMALL;
888 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
891 DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
892 current_pdu_len, *pdata_len, ss_padding_len ));
895 * If this is the first reply, and the allocation hint is reasonably, try and
896 * set up the return_data parse_struct to the correct size.
899 if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
900 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
901 DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
902 "too large to allocate\n",
903 (unsigned int)rhdr_resp.alloc_hint ));
904 return NT_STATUS_NO_MEMORY;
908 break;
911 case RPC_BINDNACK:
912 DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
913 "received from %s!\n",
914 rpccli_pipe_txt(debug_ctx(), cli)));
915 /* Use this for now... */
916 return NT_STATUS_NETWORK_ACCESS_DENIED;
918 case RPC_FAULT:
920 RPC_HDR_RESP rhdr_resp;
921 RPC_HDR_FAULT fault_resp;
923 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
924 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
925 return NT_STATUS_BUFFER_TOO_SMALL;
928 if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
929 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
930 return NT_STATUS_BUFFER_TOO_SMALL;
933 DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
934 "code %s received from %s!\n",
935 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
936 rpccli_pipe_txt(debug_ctx(), cli)));
937 if (NT_STATUS_IS_OK(fault_resp.status)) {
938 return NT_STATUS_UNSUCCESSFUL;
939 } else {
940 return fault_resp.status;
944 default:
945 DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
946 "from %s!\n",
947 (unsigned int)prhdr->pkt_type,
948 rpccli_pipe_txt(debug_ctx(), cli)));
949 return NT_STATUS_INVALID_INFO_CLASS;
952 if (prhdr->pkt_type != expected_pkt_type) {
953 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
954 "got an unexpected RPC packet type - %u, not %u\n",
955 rpccli_pipe_txt(debug_ctx(), cli),
956 prhdr->pkt_type,
957 expected_pkt_type));
958 return NT_STATUS_INVALID_INFO_CLASS;
961 /* Do this just before return - we don't want to modify any rpc header
962 data before now as we may have needed to do cryptographic actions on
963 it before. */
965 if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
966 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
967 "setting fragment first/last ON.\n"));
968 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
971 return NT_STATUS_OK;
974 /****************************************************************************
975 Ensure we eat the just processed pdu from the current_pdu prs_struct.
976 Normally the frag_len and buffer size will match, but on the first trans
977 reply there is a theoretical chance that buffer size > frag_len, so we must
978 deal with that.
979 ****************************************************************************/
981 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
983 uint32 current_pdu_len = prs_data_size(current_pdu);
985 if (current_pdu_len < prhdr->frag_len) {
986 return NT_STATUS_BUFFER_TOO_SMALL;
989 /* Common case. */
990 if (current_pdu_len == (uint32)prhdr->frag_len) {
991 prs_mem_free(current_pdu);
992 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
993 /* Make current_pdu dynamic with no memory. */
994 prs_give_memory(current_pdu, 0, 0, True);
995 return NT_STATUS_OK;
999 * Oh no ! More data in buffer than we processed in current pdu.
1000 * Cheat. Move the data down and shrink the buffer.
1003 memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1004 current_pdu_len - prhdr->frag_len);
1006 /* Remember to set the read offset back to zero. */
1007 prs_set_offset(current_pdu, 0);
1009 /* Shrink the buffer. */
1010 if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1011 return NT_STATUS_BUFFER_TOO_SMALL;
1014 return NT_STATUS_OK;
1017 /****************************************************************************
1018 Call a remote api on an arbitrary pipe. takes param, data and setup buffers.
1019 ****************************************************************************/
1021 struct cli_api_pipe_state {
1022 struct event_context *ev;
1023 struct rpc_cli_transport *transport;
1024 uint8_t *rdata;
1025 uint32_t rdata_len;
1028 static void cli_api_pipe_trans_done(struct async_req *subreq);
1029 static void cli_api_pipe_write_done(struct async_req *subreq);
1030 static void cli_api_pipe_read_done(struct async_req *subreq);
1032 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1033 struct event_context *ev,
1034 struct rpc_cli_transport *transport,
1035 uint8_t *data, size_t data_len,
1036 uint32_t max_rdata_len)
1038 struct async_req *result, *subreq;
1039 struct cli_api_pipe_state *state;
1040 NTSTATUS status;
1042 if (!async_req_setup(mem_ctx, &result, &state,
1043 struct cli_api_pipe_state)) {
1044 return NULL;
1046 state->ev = ev;
1047 state->transport = transport;
1049 if (max_rdata_len < RPC_HEADER_LEN) {
1051 * For a RPC reply we always need at least RPC_HEADER_LEN
1052 * bytes. We check this here because we will receive
1053 * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1055 status = NT_STATUS_INVALID_PARAMETER;
1056 goto post_status;
1059 if (transport->trans_send != NULL) {
1060 subreq = transport->trans_send(state, ev, data, data_len,
1061 max_rdata_len, transport->priv);
1062 if (subreq == NULL) {
1063 status = NT_STATUS_NO_MEMORY;
1064 goto post_status;
1066 subreq->async.fn = cli_api_pipe_trans_done;
1067 subreq->async.priv = result;
1068 return result;
1072 * If the transport does not provide a "trans" routine, i.e. for
1073 * example the ncacn_ip_tcp transport, do the write/read step here.
1076 subreq = rpc_write_send(state, ev, transport, data, data_len);
1077 if (subreq == NULL) {
1078 goto fail;
1080 subreq->async.fn = cli_api_pipe_write_done;
1081 subreq->async.priv = result;
1082 return result;
1084 status = NT_STATUS_INVALID_PARAMETER;
1086 post_status:
1087 if (async_post_status(result, ev, status)) {
1088 return result;
1090 fail:
1091 TALLOC_FREE(result);
1092 return NULL;
1095 static void cli_api_pipe_trans_done(struct async_req *subreq)
1097 struct async_req *req = talloc_get_type_abort(
1098 subreq->async.priv, struct async_req);
1099 struct cli_api_pipe_state *state = talloc_get_type_abort(
1100 req->private_data, struct cli_api_pipe_state);
1101 NTSTATUS status;
1103 status = state->transport->trans_recv(subreq, state, &state->rdata,
1104 &state->rdata_len);
1105 TALLOC_FREE(subreq);
1106 if (!NT_STATUS_IS_OK(status)) {
1107 async_req_error(req, status);
1108 return;
1110 async_req_done(req);
1113 static void cli_api_pipe_write_done(struct async_req *subreq)
1115 struct async_req *req = talloc_get_type_abort(
1116 subreq->async.priv, struct async_req);
1117 struct cli_api_pipe_state *state = talloc_get_type_abort(
1118 req->private_data, struct cli_api_pipe_state);
1119 NTSTATUS status;
1121 status = rpc_write_recv(subreq);
1122 TALLOC_FREE(subreq);
1123 if (!NT_STATUS_IS_OK(status)) {
1124 async_req_error(req, status);
1125 return;
1128 state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1129 if (async_req_nomem(state->rdata, req)) {
1130 return;
1134 * We don't need to use rpc_read_send here, the upper layer will cope
1135 * with a short read, transport->trans_send could also return less
1136 * than state->max_rdata_len.
1138 subreq = state->transport->read_send(state, state->ev, state->rdata,
1139 RPC_HEADER_LEN,
1140 state->transport->priv);
1141 if (async_req_nomem(subreq, req)) {
1142 return;
1144 subreq->async.fn = cli_api_pipe_read_done;
1145 subreq->async.priv = req;
1148 static void cli_api_pipe_read_done(struct async_req *subreq)
1150 struct async_req *req = talloc_get_type_abort(
1151 subreq->async.priv, struct async_req);
1152 struct cli_api_pipe_state *state = talloc_get_type_abort(
1153 req->private_data, struct cli_api_pipe_state);
1154 NTSTATUS status;
1155 ssize_t received;
1157 status = state->transport->read_recv(subreq, &received);
1158 TALLOC_FREE(subreq);
1159 if (!NT_STATUS_IS_OK(status)) {
1160 async_req_error(req, status);
1161 return;
1163 state->rdata_len = received;
1164 async_req_done(req);
1167 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1168 uint8_t **prdata, uint32_t *prdata_len)
1170 struct cli_api_pipe_state *state = talloc_get_type_abort(
1171 req->private_data, struct cli_api_pipe_state);
1172 NTSTATUS status;
1174 if (async_req_is_error(req, &status)) {
1175 return status;
1178 *prdata = talloc_move(mem_ctx, &state->rdata);
1179 *prdata_len = state->rdata_len;
1180 return NT_STATUS_OK;
1183 /****************************************************************************
1184 Send data on an rpc pipe via trans. The prs_struct data must be the last
1185 pdu fragment of an NDR data stream.
1187 Receive response data from an rpc pipe, which may be large...
1189 Read the first fragment: unfortunately have to use SMBtrans for the first
1190 bit, then SMBreadX for subsequent bits.
1192 If first fragment received also wasn't the last fragment, continue
1193 getting fragments until we _do_ receive the last fragment.
1195 Request/Response PDU's look like the following...
1197 |<------------------PDU len----------------------------------------------->|
1198 |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1200 +------------+-----------------+-------------+---------------+-------------+
1201 | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR | AUTH DATA |
1202 +------------+-----------------+-------------+---------------+-------------+
1204 Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1205 signing & sealing being negotiated.
1207 ****************************************************************************/
1209 struct rpc_api_pipe_state {
1210 struct event_context *ev;
1211 struct rpc_pipe_client *cli;
1212 uint8_t expected_pkt_type;
1214 prs_struct incoming_frag;
1215 struct rpc_hdr_info rhdr;
1217 prs_struct incoming_pdu; /* Incoming reply */
1218 uint32_t incoming_pdu_offset;
1221 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1223 prs_mem_free(&state->incoming_frag);
1224 prs_mem_free(&state->incoming_pdu);
1225 return 0;
1228 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1229 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1231 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1232 struct event_context *ev,
1233 struct rpc_pipe_client *cli,
1234 prs_struct *data, /* Outgoing PDU */
1235 uint8_t expected_pkt_type)
1237 struct async_req *result, *subreq;
1238 struct rpc_api_pipe_state *state;
1239 uint16_t max_recv_frag;
1240 NTSTATUS status;
1242 if (!async_req_setup(mem_ctx, &result, &state,
1243 struct rpc_api_pipe_state)) {
1244 return NULL;
1246 state->ev = ev;
1247 state->cli = cli;
1248 state->expected_pkt_type = expected_pkt_type;
1249 state->incoming_pdu_offset = 0;
1251 prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1253 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1254 /* Make incoming_pdu dynamic with no memory. */
1255 prs_give_memory(&state->incoming_pdu, 0, 0, true);
1257 talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1260 * Ensure we're not sending too much.
1262 if (prs_offset(data) > cli->max_xmit_frag) {
1263 status = NT_STATUS_INVALID_PARAMETER;
1264 goto post_status;
1267 DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1269 max_recv_frag = cli->max_recv_frag;
1271 #ifdef DEVELOPER
1272 max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1273 #endif
1275 subreq = cli_api_pipe_send(state, ev, cli->transport,
1276 (uint8_t *)prs_data_p(data),
1277 prs_offset(data), max_recv_frag);
1278 if (subreq == NULL) {
1279 status = NT_STATUS_NO_MEMORY;
1280 goto post_status;
1282 subreq->async.fn = rpc_api_pipe_trans_done;
1283 subreq->async.priv = result;
1284 return result;
1286 post_status:
1287 if (async_post_status(result, ev, status)) {
1288 return result;
1290 TALLOC_FREE(result);
1291 return NULL;
1294 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1296 struct async_req *req = talloc_get_type_abort(
1297 subreq->async.priv, struct async_req);
1298 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1299 req->private_data, struct rpc_api_pipe_state);
1300 NTSTATUS status;
1301 uint8_t *rdata = NULL;
1302 uint32_t rdata_len = 0;
1303 char *rdata_copy;
1305 status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1306 TALLOC_FREE(subreq);
1307 if (!NT_STATUS_IS_OK(status)) {
1308 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1309 async_req_error(req, status);
1310 return;
1313 if (rdata == NULL) {
1314 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1315 rpccli_pipe_txt(debug_ctx(), state->cli)));
1316 async_req_done(req);
1317 return;
1321 * Give the memory received from cli_trans as dynamic to the current
1322 * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1323 * :-(
1325 rdata_copy = (char *)memdup(rdata, rdata_len);
1326 TALLOC_FREE(rdata);
1327 if (async_req_nomem(rdata_copy, req)) {
1328 return;
1330 prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1332 /* Ensure we have enough data for a pdu. */
1333 subreq = get_complete_frag_send(state, state->ev, state->cli,
1334 &state->rhdr, &state->incoming_frag);
1335 if (async_req_nomem(subreq, req)) {
1336 return;
1338 subreq->async.fn = rpc_api_pipe_got_pdu;
1339 subreq->async.priv = req;
1342 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1344 struct async_req *req = talloc_get_type_abort(
1345 subreq->async.priv, struct async_req);
1346 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1347 req->private_data, struct rpc_api_pipe_state);
1348 NTSTATUS status;
1349 char *rdata = NULL;
1350 uint32_t rdata_len = 0;
1352 status = get_complete_frag_recv(subreq);
1353 TALLOC_FREE(subreq);
1354 if (!NT_STATUS_IS_OK(status)) {
1355 DEBUG(5, ("get_complete_frag failed: %s\n",
1356 nt_errstr(status)));
1357 async_req_error(req, status);
1358 return;
1361 status = cli_pipe_validate_current_pdu(
1362 state->cli, &state->rhdr, &state->incoming_frag,
1363 state->expected_pkt_type, &rdata, &rdata_len,
1364 &state->incoming_pdu);
1366 DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1367 (unsigned)prs_data_size(&state->incoming_frag),
1368 (unsigned)state->incoming_pdu_offset,
1369 nt_errstr(status)));
1371 if (!NT_STATUS_IS_OK(status)) {
1372 async_req_error(req, status);
1373 return;
1376 if ((state->rhdr.flags & RPC_FLG_FIRST)
1377 && (state->rhdr.pack_type[0] == 0)) {
1379 * Set the data type correctly for big-endian data on the
1380 * first packet.
1382 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1383 "big-endian.\n",
1384 rpccli_pipe_txt(debug_ctx(), state->cli)));
1385 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1388 * Check endianness on subsequent packets.
1390 if (state->incoming_frag.bigendian_data
1391 != state->incoming_pdu.bigendian_data) {
1392 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1393 "%s\n",
1394 state->incoming_pdu.bigendian_data?"big":"little",
1395 state->incoming_frag.bigendian_data?"big":"little"));
1396 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
1397 return;
1400 /* Now copy the data portion out of the pdu into rbuf. */
1401 if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1402 async_req_error(req, NT_STATUS_NO_MEMORY);
1403 return;
1406 memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1407 rdata, (size_t)rdata_len);
1408 state->incoming_pdu_offset += rdata_len;
1410 status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1411 &state->incoming_frag);
1412 if (!NT_STATUS_IS_OK(status)) {
1413 async_req_error(req, status);
1414 return;
1417 if (state->rhdr.flags & RPC_FLG_LAST) {
1418 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1419 rpccli_pipe_txt(debug_ctx(), state->cli),
1420 (unsigned)prs_data_size(&state->incoming_pdu)));
1421 async_req_done(req);
1422 return;
1425 subreq = get_complete_frag_send(state, state->ev, state->cli,
1426 &state->rhdr, &state->incoming_frag);
1427 if (async_req_nomem(subreq, req)) {
1428 return;
1430 subreq->async.fn = rpc_api_pipe_got_pdu;
1431 subreq->async.priv = req;
1434 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1435 prs_struct *reply_pdu)
1437 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1438 req->private_data, struct rpc_api_pipe_state);
1439 NTSTATUS status;
1441 if (async_req_is_error(req, &status)) {
1442 return status;
1445 *reply_pdu = state->incoming_pdu;
1446 reply_pdu->mem_ctx = mem_ctx;
1449 * Prevent state->incoming_pdu from being freed in
1450 * rpc_api_pipe_state_destructor()
1452 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1454 return NT_STATUS_OK;
1457 /*******************************************************************
1458 Creates krb5 auth bind.
1459 ********************************************************************/
1461 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1462 enum pipe_auth_level auth_level,
1463 RPC_HDR_AUTH *pauth_out,
1464 prs_struct *auth_data)
1466 #ifdef HAVE_KRB5
1467 int ret;
1468 struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1469 DATA_BLOB tkt = data_blob_null;
1470 DATA_BLOB tkt_wrapped = data_blob_null;
1472 /* We may change the pad length before marshalling. */
1473 init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1475 DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1476 a->service_principal ));
1478 /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1480 ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1481 &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1483 if (ret) {
1484 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1485 "failed with %s\n",
1486 a->service_principal,
1487 error_message(ret) ));
1489 data_blob_free(&tkt);
1490 prs_mem_free(auth_data);
1491 return NT_STATUS_INVALID_PARAMETER;
1494 /* wrap that up in a nice GSS-API wrapping */
1495 tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1497 data_blob_free(&tkt);
1499 /* Auth len in the rpc header doesn't include auth_header. */
1500 if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1501 data_blob_free(&tkt_wrapped);
1502 prs_mem_free(auth_data);
1503 return NT_STATUS_NO_MEMORY;
1506 DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1507 dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1509 data_blob_free(&tkt_wrapped);
1510 return NT_STATUS_OK;
1511 #else
1512 return NT_STATUS_INVALID_PARAMETER;
1513 #endif
1516 /*******************************************************************
1517 Creates SPNEGO NTLMSSP auth bind.
1518 ********************************************************************/
1520 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1521 enum pipe_auth_level auth_level,
1522 RPC_HDR_AUTH *pauth_out,
1523 prs_struct *auth_data)
1525 NTSTATUS nt_status;
1526 DATA_BLOB null_blob = data_blob_null;
1527 DATA_BLOB request = data_blob_null;
1528 DATA_BLOB spnego_msg = data_blob_null;
1530 /* We may change the pad length before marshalling. */
1531 init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1533 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1534 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1535 null_blob,
1536 &request);
1538 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1539 data_blob_free(&request);
1540 prs_mem_free(auth_data);
1541 return nt_status;
1544 /* Wrap this in SPNEGO. */
1545 spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1547 data_blob_free(&request);
1549 /* Auth len in the rpc header doesn't include auth_header. */
1550 if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1551 data_blob_free(&spnego_msg);
1552 prs_mem_free(auth_data);
1553 return NT_STATUS_NO_MEMORY;
1556 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1557 dump_data(5, spnego_msg.data, spnego_msg.length);
1559 data_blob_free(&spnego_msg);
1560 return NT_STATUS_OK;
1563 /*******************************************************************
1564 Creates NTLMSSP auth bind.
1565 ********************************************************************/
1567 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1568 enum pipe_auth_level auth_level,
1569 RPC_HDR_AUTH *pauth_out,
1570 prs_struct *auth_data)
1572 NTSTATUS nt_status;
1573 DATA_BLOB null_blob = data_blob_null;
1574 DATA_BLOB request = data_blob_null;
1576 /* We may change the pad length before marshalling. */
1577 init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1579 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1580 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1581 null_blob,
1582 &request);
1584 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1585 data_blob_free(&request);
1586 prs_mem_free(auth_data);
1587 return nt_status;
1590 /* Auth len in the rpc header doesn't include auth_header. */
1591 if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1592 data_blob_free(&request);
1593 prs_mem_free(auth_data);
1594 return NT_STATUS_NO_MEMORY;
1597 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1598 dump_data(5, request.data, request.length);
1600 data_blob_free(&request);
1601 return NT_STATUS_OK;
1604 /*******************************************************************
1605 Creates schannel auth bind.
1606 ********************************************************************/
1608 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1609 enum pipe_auth_level auth_level,
1610 RPC_HDR_AUTH *pauth_out,
1611 prs_struct *auth_data)
1613 RPC_AUTH_SCHANNEL_NEG schannel_neg;
1615 /* We may change the pad length before marshalling. */
1616 init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1618 /* Use lp_workgroup() if domain not specified */
1620 if (!cli->auth->domain || !cli->auth->domain[0]) {
1621 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1622 if (cli->auth->domain == NULL) {
1623 return NT_STATUS_NO_MEMORY;
1627 init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1628 global_myname());
1631 * Now marshall the data into the auth parse_struct.
1634 if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1635 &schannel_neg, auth_data, 0)) {
1636 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1637 prs_mem_free(auth_data);
1638 return NT_STATUS_NO_MEMORY;
1641 return NT_STATUS_OK;
1644 /*******************************************************************
1645 Creates the internals of a DCE/RPC bind request or alter context PDU.
1646 ********************************************************************/
1648 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1649 prs_struct *rpc_out,
1650 uint32 rpc_call_id,
1651 const RPC_IFACE *abstract,
1652 const RPC_IFACE *transfer,
1653 RPC_HDR_AUTH *phdr_auth,
1654 prs_struct *pauth_info)
1656 RPC_HDR hdr;
1657 RPC_HDR_RB hdr_rb;
1658 RPC_CONTEXT rpc_ctx;
1659 uint16 auth_len = prs_offset(pauth_info);
1660 uint8 ss_padding_len = 0;
1661 uint16 frag_len = 0;
1663 /* create the RPC context. */
1664 init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1666 /* create the bind request RPC_HDR_RB */
1667 init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1669 /* Start building the frag length. */
1670 frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1672 /* Do we need to pad ? */
1673 if (auth_len) {
1674 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1675 if (data_len % 8) {
1676 ss_padding_len = 8 - (data_len % 8);
1677 phdr_auth->auth_pad_len = ss_padding_len;
1679 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1682 /* Create the request RPC_HDR */
1683 init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1685 /* Marshall the RPC header */
1686 if(!smb_io_rpc_hdr("hdr" , &hdr, rpc_out, 0)) {
1687 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1688 return NT_STATUS_NO_MEMORY;
1691 /* Marshall the bind request data */
1692 if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1693 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1694 return NT_STATUS_NO_MEMORY;
1698 * Grow the outgoing buffer to store any auth info.
1701 if(auth_len != 0) {
1702 if (ss_padding_len) {
1703 char pad[8];
1704 memset(pad, '\0', 8);
1705 if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1706 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1707 return NT_STATUS_NO_MEMORY;
1711 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1712 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1713 return NT_STATUS_NO_MEMORY;
1717 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1718 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1719 return NT_STATUS_NO_MEMORY;
1723 return NT_STATUS_OK;
1726 /*******************************************************************
1727 Creates a DCE/RPC bind request.
1728 ********************************************************************/
1730 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1731 prs_struct *rpc_out,
1732 uint32 rpc_call_id,
1733 const RPC_IFACE *abstract,
1734 const RPC_IFACE *transfer,
1735 enum pipe_auth_type auth_type,
1736 enum pipe_auth_level auth_level)
1738 RPC_HDR_AUTH hdr_auth;
1739 prs_struct auth_info;
1740 NTSTATUS ret = NT_STATUS_OK;
1742 ZERO_STRUCT(hdr_auth);
1743 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1744 return NT_STATUS_NO_MEMORY;
1746 switch (auth_type) {
1747 case PIPE_AUTH_TYPE_SCHANNEL:
1748 ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1749 if (!NT_STATUS_IS_OK(ret)) {
1750 prs_mem_free(&auth_info);
1751 return ret;
1753 break;
1755 case PIPE_AUTH_TYPE_NTLMSSP:
1756 ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1757 if (!NT_STATUS_IS_OK(ret)) {
1758 prs_mem_free(&auth_info);
1759 return ret;
1761 break;
1763 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1764 ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1765 if (!NT_STATUS_IS_OK(ret)) {
1766 prs_mem_free(&auth_info);
1767 return ret;
1769 break;
1771 case PIPE_AUTH_TYPE_KRB5:
1772 ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1773 if (!NT_STATUS_IS_OK(ret)) {
1774 prs_mem_free(&auth_info);
1775 return ret;
1777 break;
1779 case PIPE_AUTH_TYPE_NONE:
1780 break;
1782 default:
1783 /* "Can't" happen. */
1784 return NT_STATUS_INVALID_INFO_CLASS;
1787 ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1788 rpc_out,
1789 rpc_call_id,
1790 abstract,
1791 transfer,
1792 &hdr_auth,
1793 &auth_info);
1795 prs_mem_free(&auth_info);
1796 return ret;
1799 /*******************************************************************
1800 Create and add the NTLMSSP sign/seal auth header and data.
1801 ********************************************************************/
1803 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1804 RPC_HDR *phdr,
1805 uint32 ss_padding_len,
1806 prs_struct *outgoing_pdu)
1808 RPC_HDR_AUTH auth_info;
1809 NTSTATUS status;
1810 DATA_BLOB auth_blob = data_blob_null;
1811 uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1813 if (!cli->auth->a_u.ntlmssp_state) {
1814 return NT_STATUS_INVALID_PARAMETER;
1817 /* Init and marshall the auth header. */
1818 init_rpc_hdr_auth(&auth_info,
1819 map_pipe_auth_type_to_rpc_auth_type(
1820 cli->auth->auth_type),
1821 cli->auth->auth_level,
1822 ss_padding_len,
1823 1 /* context id. */);
1825 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1826 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1827 data_blob_free(&auth_blob);
1828 return NT_STATUS_NO_MEMORY;
1831 switch (cli->auth->auth_level) {
1832 case PIPE_AUTH_LEVEL_PRIVACY:
1833 /* Data portion is encrypted. */
1834 status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1835 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1836 data_and_pad_len,
1837 (unsigned char *)prs_data_p(outgoing_pdu),
1838 (size_t)prs_offset(outgoing_pdu),
1839 &auth_blob);
1840 if (!NT_STATUS_IS_OK(status)) {
1841 data_blob_free(&auth_blob);
1842 return status;
1844 break;
1846 case PIPE_AUTH_LEVEL_INTEGRITY:
1847 /* Data is signed. */
1848 status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1849 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1850 data_and_pad_len,
1851 (unsigned char *)prs_data_p(outgoing_pdu),
1852 (size_t)prs_offset(outgoing_pdu),
1853 &auth_blob);
1854 if (!NT_STATUS_IS_OK(status)) {
1855 data_blob_free(&auth_blob);
1856 return status;
1858 break;
1860 default:
1861 /* Can't happen. */
1862 smb_panic("bad auth level");
1863 /* Notreached. */
1864 return NT_STATUS_INVALID_PARAMETER;
1867 /* Finally marshall the blob. */
1869 if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1870 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1871 (unsigned int)NTLMSSP_SIG_SIZE));
1872 data_blob_free(&auth_blob);
1873 return NT_STATUS_NO_MEMORY;
1876 data_blob_free(&auth_blob);
1877 return NT_STATUS_OK;
1880 /*******************************************************************
1881 Create and add the schannel sign/seal auth header and data.
1882 ********************************************************************/
1884 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1885 RPC_HDR *phdr,
1886 uint32 ss_padding_len,
1887 prs_struct *outgoing_pdu)
1889 RPC_HDR_AUTH auth_info;
1890 RPC_AUTH_SCHANNEL_CHK verf;
1891 struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1892 char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1893 size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1895 if (!sas) {
1896 return NT_STATUS_INVALID_PARAMETER;
1899 /* Init and marshall the auth header. */
1900 init_rpc_hdr_auth(&auth_info,
1901 map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1902 cli->auth->auth_level,
1903 ss_padding_len,
1904 1 /* context id. */);
1906 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1907 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1908 return NT_STATUS_NO_MEMORY;
1911 switch (cli->auth->auth_level) {
1912 case PIPE_AUTH_LEVEL_PRIVACY:
1913 case PIPE_AUTH_LEVEL_INTEGRITY:
1914 DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1915 sas->seq_num));
1917 schannel_encode(sas,
1918 cli->auth->auth_level,
1919 SENDER_IS_INITIATOR,
1920 &verf,
1921 data_p,
1922 data_and_pad_len);
1924 sas->seq_num++;
1925 break;
1927 default:
1928 /* Can't happen. */
1929 smb_panic("bad auth level");
1930 /* Notreached. */
1931 return NT_STATUS_INVALID_PARAMETER;
1934 /* Finally marshall the blob. */
1935 smb_io_rpc_auth_schannel_chk("",
1936 RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1937 &verf,
1938 outgoing_pdu,
1941 return NT_STATUS_OK;
1944 /*******************************************************************
1945 Calculate how much data we're going to send in this packet, also
1946 work out any sign/seal padding length.
1947 ********************************************************************/
1949 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1950 uint32 data_left,
1951 uint16 *p_frag_len,
1952 uint16 *p_auth_len,
1953 uint32 *p_ss_padding)
1955 uint32 data_space, data_len;
1957 #ifdef DEVELOPER
1958 if ((data_left > 0) && (sys_random() % 2)) {
1959 data_left = MAX(data_left/2, 1);
1961 #endif
1963 switch (cli->auth->auth_level) {
1964 case PIPE_AUTH_LEVEL_NONE:
1965 case PIPE_AUTH_LEVEL_CONNECT:
1966 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1967 data_len = MIN(data_space, data_left);
1968 *p_ss_padding = 0;
1969 *p_auth_len = 0;
1970 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1971 return data_len;
1973 case PIPE_AUTH_LEVEL_INTEGRITY:
1974 case PIPE_AUTH_LEVEL_PRIVACY:
1975 /* Treat the same for all authenticated rpc requests. */
1976 switch(cli->auth->auth_type) {
1977 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1978 case PIPE_AUTH_TYPE_NTLMSSP:
1979 *p_auth_len = NTLMSSP_SIG_SIZE;
1980 break;
1981 case PIPE_AUTH_TYPE_SCHANNEL:
1982 *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
1983 break;
1984 default:
1985 smb_panic("bad auth type");
1986 break;
1989 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
1990 RPC_HDR_AUTH_LEN - *p_auth_len;
1992 data_len = MIN(data_space, data_left);
1993 *p_ss_padding = 0;
1994 if (data_len % 8) {
1995 *p_ss_padding = 8 - (data_len % 8);
1997 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + /* Normal headers. */
1998 data_len + *p_ss_padding + /* data plus padding. */
1999 RPC_HDR_AUTH_LEN + *p_auth_len; /* Auth header and auth data. */
2000 return data_len;
2002 default:
2003 smb_panic("bad auth level");
2004 /* Notreached. */
2005 return 0;
2009 /*******************************************************************
2010 External interface.
2011 Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2012 Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2013 and deals with signing/sealing details.
2014 ********************************************************************/
2016 struct rpc_api_pipe_req_state {
2017 struct event_context *ev;
2018 struct rpc_pipe_client *cli;
2019 uint8_t op_num;
2020 uint32_t call_id;
2021 prs_struct *req_data;
2022 uint32_t req_data_sent;
2023 prs_struct outgoing_frag;
2024 prs_struct reply_pdu;
2027 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2029 prs_mem_free(&s->outgoing_frag);
2030 prs_mem_free(&s->reply_pdu);
2031 return 0;
2034 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2035 static void rpc_api_pipe_req_done(struct async_req *subreq);
2036 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2037 bool *is_last_frag);
2039 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2040 struct event_context *ev,
2041 struct rpc_pipe_client *cli,
2042 uint8_t op_num,
2043 prs_struct *req_data)
2045 struct async_req *result, *subreq;
2046 struct rpc_api_pipe_req_state *state;
2047 NTSTATUS status;
2048 bool is_last_frag;
2050 if (!async_req_setup(mem_ctx, &result, &state,
2051 struct rpc_api_pipe_req_state)) {
2052 return NULL;
2054 state->ev = ev;
2055 state->cli = cli;
2056 state->op_num = op_num;
2057 state->req_data = req_data;
2058 state->req_data_sent = 0;
2059 state->call_id = get_rpc_call_id();
2061 if (cli->max_xmit_frag
2062 < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2063 /* Server is screwed up ! */
2064 status = NT_STATUS_INVALID_PARAMETER;
2065 goto post_status;
2068 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2070 if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2071 state, MARSHALL)) {
2072 status = NT_STATUS_NO_MEMORY;
2073 goto post_status;
2076 talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2078 status = prepare_next_frag(state, &is_last_frag);
2079 if (!NT_STATUS_IS_OK(status)) {
2080 goto post_status;
2083 if (is_last_frag) {
2084 subreq = rpc_api_pipe_send(state, ev, state->cli,
2085 &state->outgoing_frag,
2086 RPC_RESPONSE);
2087 if (subreq == NULL) {
2088 status = NT_STATUS_NO_MEMORY;
2089 goto post_status;
2091 subreq->async.fn = rpc_api_pipe_req_done;
2092 subreq->async.priv = result;
2093 } else {
2094 subreq = rpc_write_send(
2095 state, ev, cli->transport,
2096 (uint8_t *)prs_data_p(&state->outgoing_frag),
2097 prs_offset(&state->outgoing_frag));
2098 if (subreq == NULL) {
2099 status = NT_STATUS_NO_MEMORY;
2100 goto post_status;
2102 subreq->async.fn = rpc_api_pipe_req_write_done;
2103 subreq->async.priv = result;
2105 return result;
2107 post_status:
2108 if (async_post_status(result, ev, status)) {
2109 return result;
2111 TALLOC_FREE(result);
2112 return NULL;
2115 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2116 bool *is_last_frag)
2118 RPC_HDR hdr;
2119 RPC_HDR_REQ hdr_req;
2120 uint32_t data_sent_thistime;
2121 uint16_t auth_len;
2122 uint16_t frag_len;
2123 uint8_t flags = 0;
2124 uint32_t ss_padding;
2125 uint32_t data_left;
2126 char pad[8] = { 0, };
2127 NTSTATUS status;
2129 data_left = prs_offset(state->req_data) - state->req_data_sent;
2131 data_sent_thistime = calculate_data_len_tosend(
2132 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2134 if (state->req_data_sent == 0) {
2135 flags = RPC_FLG_FIRST;
2138 if (data_sent_thistime == data_left) {
2139 flags |= RPC_FLG_LAST;
2142 if (!prs_set_offset(&state->outgoing_frag, 0)) {
2143 return NT_STATUS_NO_MEMORY;
2146 /* Create and marshall the header and request header. */
2147 init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2148 auth_len);
2150 if (!smb_io_rpc_hdr("hdr ", &hdr, &state->outgoing_frag, 0)) {
2151 return NT_STATUS_NO_MEMORY;
2154 /* Create the rpc request RPC_HDR_REQ */
2155 init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2156 state->op_num);
2158 if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2159 &state->outgoing_frag, 0)) {
2160 return NT_STATUS_NO_MEMORY;
2163 /* Copy in the data, plus any ss padding. */
2164 if (!prs_append_some_prs_data(&state->outgoing_frag,
2165 state->req_data, state->req_data_sent,
2166 data_sent_thistime)) {
2167 return NT_STATUS_NO_MEMORY;
2170 /* Copy the sign/seal padding data. */
2171 if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2172 return NT_STATUS_NO_MEMORY;
2175 /* Generate any auth sign/seal and add the auth footer. */
2176 switch (state->cli->auth->auth_type) {
2177 case PIPE_AUTH_TYPE_NONE:
2178 status = NT_STATUS_OK;
2179 break;
2180 case PIPE_AUTH_TYPE_NTLMSSP:
2181 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2182 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2183 &state->outgoing_frag);
2184 break;
2185 case PIPE_AUTH_TYPE_SCHANNEL:
2186 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2187 &state->outgoing_frag);
2188 break;
2189 default:
2190 status = NT_STATUS_INVALID_PARAMETER;
2191 break;
2194 state->req_data_sent += data_sent_thistime;
2195 *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2197 return status;
2200 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2202 struct async_req *req = talloc_get_type_abort(
2203 subreq->async.priv, struct async_req);
2204 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2205 req->private_data, struct rpc_api_pipe_req_state);
2206 NTSTATUS status;
2207 bool is_last_frag;
2209 status = rpc_write_recv(subreq);
2210 TALLOC_FREE(subreq);
2211 if (!NT_STATUS_IS_OK(status)) {
2212 async_req_error(req, status);
2213 return;
2216 status = prepare_next_frag(state, &is_last_frag);
2217 if (!NT_STATUS_IS_OK(status)) {
2218 async_req_error(req, status);
2219 return;
2222 if (is_last_frag) {
2223 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2224 &state->outgoing_frag,
2225 RPC_RESPONSE);
2226 if (async_req_nomem(subreq, req)) {
2227 return;
2229 subreq->async.fn = rpc_api_pipe_req_done;
2230 subreq->async.priv = req;
2231 } else {
2232 subreq = rpc_write_send(
2233 state, state->ev,
2234 state->cli->transport,
2235 (uint8_t *)prs_data_p(&state->outgoing_frag),
2236 prs_offset(&state->outgoing_frag));
2237 if (async_req_nomem(subreq, req)) {
2238 return;
2240 subreq->async.fn = rpc_api_pipe_req_write_done;
2241 subreq->async.priv = req;
2245 static void rpc_api_pipe_req_done(struct async_req *subreq)
2247 struct async_req *req = talloc_get_type_abort(
2248 subreq->async.priv, struct async_req);
2249 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2250 req->private_data, struct rpc_api_pipe_req_state);
2251 NTSTATUS status;
2253 status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2254 TALLOC_FREE(subreq);
2255 if (!NT_STATUS_IS_OK(status)) {
2256 async_req_error(req, status);
2257 return;
2259 async_req_done(req);
2262 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2263 prs_struct *reply_pdu)
2265 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2266 req->private_data, struct rpc_api_pipe_req_state);
2267 NTSTATUS status;
2269 if (async_req_is_error(req, &status)) {
2271 * We always have to initialize to reply pdu, even if there is
2272 * none. The rpccli_* caller routines expect this.
2274 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2275 return status;
2278 *reply_pdu = state->reply_pdu;
2279 reply_pdu->mem_ctx = mem_ctx;
2282 * Prevent state->req_pdu from being freed in
2283 * rpc_api_pipe_req_state_destructor()
2285 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2287 return NT_STATUS_OK;
2290 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2291 uint8 op_num,
2292 prs_struct *in_data,
2293 prs_struct *out_data)
2295 TALLOC_CTX *frame = talloc_stackframe();
2296 struct event_context *ev;
2297 struct async_req *req;
2298 NTSTATUS status = NT_STATUS_NO_MEMORY;
2300 ev = event_context_init(frame);
2301 if (ev == NULL) {
2302 goto fail;
2305 req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2306 if (req == NULL) {
2307 goto fail;
2310 while (req->state < ASYNC_REQ_DONE) {
2311 event_loop_once(ev);
2314 status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2315 fail:
2316 TALLOC_FREE(frame);
2317 return status;
2320 #if 0
2321 /****************************************************************************
2322 Set the handle state.
2323 ****************************************************************************/
2325 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2326 const char *pipe_name, uint16 device_state)
2328 bool state_set = False;
2329 char param[2];
2330 uint16 setup[2]; /* only need 2 uint16 setup parameters */
2331 char *rparam = NULL;
2332 char *rdata = NULL;
2333 uint32 rparam_len, rdata_len;
2335 if (pipe_name == NULL)
2336 return False;
2338 DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2339 cli->fnum, pipe_name, device_state));
2341 /* create parameters: device state */
2342 SSVAL(param, 0, device_state);
2344 /* create setup parameters. */
2345 setup[0] = 0x0001;
2346 setup[1] = cli->fnum; /* pipe file handle. got this from an SMBOpenX. */
2348 /* send the data on \PIPE\ */
2349 if (cli_api_pipe(cli->cli, "\\PIPE\\",
2350 setup, 2, 0, /* setup, length, max */
2351 param, 2, 0, /* param, length, max */
2352 NULL, 0, 1024, /* data, length, max */
2353 &rparam, &rparam_len, /* return param, length */
2354 &rdata, &rdata_len)) /* return data, length */
2356 DEBUG(5, ("Set Handle state: return OK\n"));
2357 state_set = True;
2360 SAFE_FREE(rparam);
2361 SAFE_FREE(rdata);
2363 return state_set;
2365 #endif
2367 /****************************************************************************
2368 Check the rpc bind acknowledge response.
2369 ****************************************************************************/
2371 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2373 if ( hdr_ba->addr.len == 0) {
2374 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2377 /* check the transfer syntax */
2378 if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2379 (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2380 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2381 return False;
2384 if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2385 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2386 hdr_ba->res.num_results, hdr_ba->res.reason));
2389 DEBUG(5,("check_bind_response: accepted!\n"));
2390 return True;
2393 /*******************************************************************
2394 Creates a DCE/RPC bind authentication response.
2395 This is the packet that is sent back to the server once we
2396 have received a BIND-ACK, to finish the third leg of
2397 the authentication handshake.
2398 ********************************************************************/
2400 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2401 uint32 rpc_call_id,
2402 enum pipe_auth_type auth_type,
2403 enum pipe_auth_level auth_level,
2404 DATA_BLOB *pauth_blob,
2405 prs_struct *rpc_out)
2407 RPC_HDR hdr;
2408 RPC_HDR_AUTH hdr_auth;
2409 uint32 pad = 0;
2411 /* Create the request RPC_HDR */
2412 init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2413 RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2414 pauth_blob->length );
2416 /* Marshall it. */
2417 if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2418 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2419 return NT_STATUS_NO_MEMORY;
2423 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2424 about padding - shouldn't this pad to length 8 ? JRA.
2427 /* 4 bytes padding. */
2428 if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2429 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2430 return NT_STATUS_NO_MEMORY;
2433 /* Create the request RPC_HDR_AUTHA */
2434 init_rpc_hdr_auth(&hdr_auth,
2435 map_pipe_auth_type_to_rpc_auth_type(auth_type),
2436 auth_level, 0, 1);
2438 if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2439 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2440 return NT_STATUS_NO_MEMORY;
2444 * Append the auth data to the outgoing buffer.
2447 if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2448 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2449 return NT_STATUS_NO_MEMORY;
2452 return NT_STATUS_OK;
2455 /*******************************************************************
2456 Creates a DCE/RPC bind alter context authentication request which
2457 may contain a spnego auth blobl
2458 ********************************************************************/
2460 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2461 const RPC_IFACE *abstract,
2462 const RPC_IFACE *transfer,
2463 enum pipe_auth_level auth_level,
2464 const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2465 prs_struct *rpc_out)
2467 RPC_HDR_AUTH hdr_auth;
2468 prs_struct auth_info;
2469 NTSTATUS ret = NT_STATUS_OK;
2471 ZERO_STRUCT(hdr_auth);
2472 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2473 return NT_STATUS_NO_MEMORY;
2475 /* We may change the pad length before marshalling. */
2476 init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2478 if (pauth_blob->length) {
2479 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2480 prs_mem_free(&auth_info);
2481 return NT_STATUS_NO_MEMORY;
2485 ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2486 rpc_out,
2487 rpc_call_id,
2488 abstract,
2489 transfer,
2490 &hdr_auth,
2491 &auth_info);
2492 prs_mem_free(&auth_info);
2493 return ret;
2496 /****************************************************************************
2497 Do an rpc bind.
2498 ****************************************************************************/
2500 struct rpc_pipe_bind_state {
2501 struct event_context *ev;
2502 struct rpc_pipe_client *cli;
2503 prs_struct rpc_out;
2504 uint32_t rpc_call_id;
2507 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2509 prs_mem_free(&state->rpc_out);
2510 return 0;
2513 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2514 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2515 struct rpc_pipe_bind_state *state,
2516 struct rpc_hdr_info *phdr,
2517 prs_struct *reply_pdu);
2518 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2519 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2520 struct rpc_pipe_bind_state *state,
2521 struct rpc_hdr_info *phdr,
2522 prs_struct *reply_pdu);
2523 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2525 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2526 struct event_context *ev,
2527 struct rpc_pipe_client *cli,
2528 struct cli_pipe_auth_data *auth)
2530 struct async_req *result, *subreq;
2531 struct rpc_pipe_bind_state *state;
2532 NTSTATUS status;
2534 if (!async_req_setup(mem_ctx, &result, &state,
2535 struct rpc_pipe_bind_state)) {
2536 return NULL;
2539 DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2540 rpccli_pipe_txt(debug_ctx(), cli),
2541 (unsigned int)auth->auth_type,
2542 (unsigned int)auth->auth_level ));
2544 state->ev = ev;
2545 state->cli = cli;
2546 state->rpc_call_id = get_rpc_call_id();
2548 prs_init_empty(&state->rpc_out, state, MARSHALL);
2549 talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2551 cli->auth = talloc_move(cli, &auth);
2553 /* Marshall the outgoing data. */
2554 status = create_rpc_bind_req(cli, &state->rpc_out,
2555 state->rpc_call_id,
2556 &cli->abstract_syntax,
2557 &cli->transfer_syntax,
2558 cli->auth->auth_type,
2559 cli->auth->auth_level);
2561 if (!NT_STATUS_IS_OK(status)) {
2562 goto post_status;
2565 subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2566 RPC_BINDACK);
2567 if (subreq == NULL) {
2568 status = NT_STATUS_NO_MEMORY;
2569 goto post_status;
2571 subreq->async.fn = rpc_pipe_bind_step_one_done;
2572 subreq->async.priv = result;
2573 return result;
2575 post_status:
2576 if (async_post_status(result, ev, status)) {
2577 return result;
2579 TALLOC_FREE(result);
2580 return NULL;
2583 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2585 struct async_req *req = talloc_get_type_abort(
2586 subreq->async.priv, struct async_req);
2587 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2588 req->private_data, struct rpc_pipe_bind_state);
2589 prs_struct reply_pdu;
2590 struct rpc_hdr_info hdr;
2591 struct rpc_hdr_ba_info hdr_ba;
2592 NTSTATUS status;
2594 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2595 TALLOC_FREE(subreq);
2596 if (!NT_STATUS_IS_OK(status)) {
2597 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2598 rpccli_pipe_txt(debug_ctx(), state->cli),
2599 nt_errstr(status)));
2600 async_req_error(req, status);
2601 return;
2604 /* Unmarshall the RPC header */
2605 if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2606 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2607 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2608 return;
2611 if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2612 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2613 "RPC_HDR_BA.\n"));
2614 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2615 return;
2618 if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2619 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2620 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2621 return;
2624 state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2625 state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2628 * For authenticated binds we may need to do 3 or 4 leg binds.
2631 switch(state->cli->auth->auth_type) {
2633 case PIPE_AUTH_TYPE_NONE:
2634 case PIPE_AUTH_TYPE_SCHANNEL:
2635 /* Bind complete. */
2636 async_req_done(req);
2637 break;
2639 case PIPE_AUTH_TYPE_NTLMSSP:
2640 /* Need to send AUTH3 packet - no reply. */
2641 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2642 &reply_pdu);
2643 if (!NT_STATUS_IS_OK(status)) {
2644 async_req_error(req, status);
2646 break;
2648 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2649 /* Need to send alter context request and reply. */
2650 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2651 &reply_pdu);
2652 if (!NT_STATUS_IS_OK(status)) {
2653 async_req_error(req, status);
2655 break;
2657 case PIPE_AUTH_TYPE_KRB5:
2658 /* */
2660 default:
2661 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2662 (unsigned int)state->cli->auth->auth_type));
2663 async_req_error(req, NT_STATUS_INTERNAL_ERROR);
2667 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2668 struct rpc_pipe_bind_state *state,
2669 struct rpc_hdr_info *phdr,
2670 prs_struct *reply_pdu)
2672 DATA_BLOB server_response = data_blob_null;
2673 DATA_BLOB client_reply = data_blob_null;
2674 struct rpc_hdr_auth_info hdr_auth;
2675 struct async_req *subreq;
2676 NTSTATUS status;
2678 if ((phdr->auth_len == 0)
2679 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2680 return NT_STATUS_INVALID_PARAMETER;
2683 if (!prs_set_offset(
2684 reply_pdu,
2685 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2686 return NT_STATUS_INVALID_PARAMETER;
2689 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2690 return NT_STATUS_INVALID_PARAMETER;
2693 /* TODO - check auth_type/auth_level match. */
2695 server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2696 prs_copy_data_out((char *)server_response.data, reply_pdu,
2697 phdr->auth_len);
2699 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2700 server_response, &client_reply);
2702 if (!NT_STATUS_IS_OK(status)) {
2703 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2704 "blob failed: %s.\n", nt_errstr(status)));
2705 return status;
2708 prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2710 status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2711 state->cli->auth->auth_type,
2712 state->cli->auth->auth_level,
2713 &client_reply, &state->rpc_out);
2714 data_blob_free(&client_reply);
2716 if (!NT_STATUS_IS_OK(status)) {
2717 return status;
2720 subreq = rpc_write_send(state, state->ev, state->cli->transport,
2721 (uint8_t *)prs_data_p(&state->rpc_out),
2722 prs_offset(&state->rpc_out));
2723 if (subreq == NULL) {
2724 return NT_STATUS_NO_MEMORY;
2726 subreq->async.fn = rpc_bind_auth3_write_done;
2727 subreq->async.priv = req;
2728 return NT_STATUS_OK;
2731 static void rpc_bind_auth3_write_done(struct async_req *subreq)
2733 struct async_req *req = talloc_get_type_abort(
2734 subreq->async.priv, struct async_req);
2735 NTSTATUS status;
2737 status = rpc_write_recv(subreq);
2738 TALLOC_FREE(subreq);
2739 if (!NT_STATUS_IS_OK(status)) {
2740 async_req_error(req, status);
2741 return;
2743 async_req_done(req);
2746 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2747 struct rpc_pipe_bind_state *state,
2748 struct rpc_hdr_info *phdr,
2749 prs_struct *reply_pdu)
2751 DATA_BLOB server_spnego_response = data_blob_null;
2752 DATA_BLOB server_ntlm_response = data_blob_null;
2753 DATA_BLOB client_reply = data_blob_null;
2754 DATA_BLOB tmp_blob = data_blob_null;
2755 RPC_HDR_AUTH hdr_auth;
2756 struct async_req *subreq;
2757 NTSTATUS status;
2759 if ((phdr->auth_len == 0)
2760 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2761 return NT_STATUS_INVALID_PARAMETER;
2764 /* Process the returned NTLMSSP blob first. */
2765 if (!prs_set_offset(
2766 reply_pdu,
2767 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2768 return NT_STATUS_INVALID_PARAMETER;
2771 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2772 return NT_STATUS_INVALID_PARAMETER;
2775 server_spnego_response = data_blob(NULL, phdr->auth_len);
2776 prs_copy_data_out((char *)server_spnego_response.data,
2777 reply_pdu, phdr->auth_len);
2780 * The server might give us back two challenges - tmp_blob is for the
2781 * second.
2783 if (!spnego_parse_challenge(server_spnego_response,
2784 &server_ntlm_response, &tmp_blob)) {
2785 data_blob_free(&server_spnego_response);
2786 data_blob_free(&server_ntlm_response);
2787 data_blob_free(&tmp_blob);
2788 return NT_STATUS_INVALID_PARAMETER;
2791 /* We're finished with the server spnego response and the tmp_blob. */
2792 data_blob_free(&server_spnego_response);
2793 data_blob_free(&tmp_blob);
2795 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2796 server_ntlm_response, &client_reply);
2798 /* Finished with the server_ntlm response */
2799 data_blob_free(&server_ntlm_response);
2801 if (!NT_STATUS_IS_OK(status)) {
2802 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2803 "using server blob failed.\n"));
2804 data_blob_free(&client_reply);
2805 return status;
2808 /* SPNEGO wrap the client reply. */
2809 tmp_blob = spnego_gen_auth(client_reply);
2810 data_blob_free(&client_reply);
2811 client_reply = tmp_blob;
2812 tmp_blob = data_blob_null;
2814 /* Now prepare the alter context pdu. */
2815 prs_init_empty(&state->rpc_out, state, MARSHALL);
2817 status = create_rpc_alter_context(state->rpc_call_id,
2818 &state->cli->abstract_syntax,
2819 &state->cli->transfer_syntax,
2820 state->cli->auth->auth_level,
2821 &client_reply,
2822 &state->rpc_out);
2823 data_blob_free(&client_reply);
2825 if (!NT_STATUS_IS_OK(status)) {
2826 return status;
2829 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2830 &state->rpc_out, RPC_ALTCONTRESP);
2831 if (subreq == NULL) {
2832 return NT_STATUS_NO_MEMORY;
2834 subreq->async.fn = rpc_bind_ntlmssp_api_done;
2835 subreq->async.priv = req;
2836 return NT_STATUS_OK;
2839 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2841 struct async_req *req = talloc_get_type_abort(
2842 subreq->async.priv, struct async_req);
2843 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2844 req->private_data, struct rpc_pipe_bind_state);
2845 DATA_BLOB server_spnego_response = data_blob_null;
2846 DATA_BLOB tmp_blob = data_blob_null;
2847 prs_struct reply_pdu;
2848 struct rpc_hdr_info hdr;
2849 struct rpc_hdr_auth_info hdr_auth;
2850 NTSTATUS status;
2852 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2853 TALLOC_FREE(subreq);
2854 if (!NT_STATUS_IS_OK(status)) {
2855 async_req_error(req, status);
2856 return;
2859 /* Get the auth blob from the reply. */
2860 if (!smb_io_rpc_hdr("rpc_hdr ", &hdr, &reply_pdu, 0)) {
2861 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2862 "unmarshall RPC_HDR.\n"));
2863 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2864 return;
2867 if (!prs_set_offset(
2868 &reply_pdu,
2869 hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2870 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2871 return;
2874 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2875 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2876 return;
2879 server_spnego_response = data_blob(NULL, hdr.auth_len);
2880 prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2881 hdr.auth_len);
2883 /* Check we got a valid auth response. */
2884 if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2885 OID_NTLMSSP, &tmp_blob)) {
2886 data_blob_free(&server_spnego_response);
2887 data_blob_free(&tmp_blob);
2888 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2889 return;
2892 data_blob_free(&server_spnego_response);
2893 data_blob_free(&tmp_blob);
2895 DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2896 "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2897 async_req_done(req);
2900 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2902 return async_req_simple_recv(req);
2905 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2906 struct cli_pipe_auth_data *auth)
2908 TALLOC_CTX *frame = talloc_stackframe();
2909 struct event_context *ev;
2910 struct async_req *req;
2911 NTSTATUS status = NT_STATUS_NO_MEMORY;
2913 ev = event_context_init(frame);
2914 if (ev == NULL) {
2915 goto fail;
2918 req = rpc_pipe_bind_send(frame, ev, cli, auth);
2919 if (req == NULL) {
2920 goto fail;
2923 while (req->state < ASYNC_REQ_DONE) {
2924 event_loop_once(ev);
2927 status = rpc_pipe_bind_recv(req);
2928 fail:
2929 TALLOC_FREE(frame);
2930 return status;
2933 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2934 unsigned int timeout)
2936 struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2938 if (cli == NULL) {
2939 return 0;
2941 return cli_set_timeout(cli, timeout);
2944 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2946 struct cli_state *cli;
2948 if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2949 || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2950 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2951 return true;
2954 cli = rpc_pipe_np_smb_conn(rpc_cli);
2955 if (cli == NULL) {
2956 return false;
2958 E_md4hash(cli->pwd.password, nt_hash);
2959 return true;
2962 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2963 struct cli_pipe_auth_data **presult)
2965 struct cli_pipe_auth_data *result;
2967 result = talloc(mem_ctx, struct cli_pipe_auth_data);
2968 if (result == NULL) {
2969 return NT_STATUS_NO_MEMORY;
2972 result->auth_type = PIPE_AUTH_TYPE_NONE;
2973 result->auth_level = PIPE_AUTH_LEVEL_NONE;
2975 result->user_name = talloc_strdup(result, "");
2976 result->domain = talloc_strdup(result, "");
2977 if ((result->user_name == NULL) || (result->domain == NULL)) {
2978 TALLOC_FREE(result);
2979 return NT_STATUS_NO_MEMORY;
2982 *presult = result;
2983 return NT_STATUS_OK;
2986 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
2988 ntlmssp_end(&auth->a_u.ntlmssp_state);
2989 return 0;
2992 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
2993 enum pipe_auth_type auth_type,
2994 enum pipe_auth_level auth_level,
2995 const char *domain,
2996 const char *username,
2997 const char *password,
2998 struct cli_pipe_auth_data **presult)
3000 struct cli_pipe_auth_data *result;
3001 NTSTATUS status;
3003 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3004 if (result == NULL) {
3005 return NT_STATUS_NO_MEMORY;
3008 result->auth_type = auth_type;
3009 result->auth_level = auth_level;
3011 result->user_name = talloc_strdup(result, username);
3012 result->domain = talloc_strdup(result, domain);
3013 if ((result->user_name == NULL) || (result->domain == NULL)) {
3014 status = NT_STATUS_NO_MEMORY;
3015 goto fail;
3018 status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3019 if (!NT_STATUS_IS_OK(status)) {
3020 goto fail;
3023 talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3025 status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3026 if (!NT_STATUS_IS_OK(status)) {
3027 goto fail;
3030 status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3031 if (!NT_STATUS_IS_OK(status)) {
3032 goto fail;
3035 status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3036 if (!NT_STATUS_IS_OK(status)) {
3037 goto fail;
3041 * Turn off sign+seal to allow selected auth level to turn it back on.
3043 result->a_u.ntlmssp_state->neg_flags &=
3044 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3046 if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3047 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3048 } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3049 result->a_u.ntlmssp_state->neg_flags
3050 |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3053 *presult = result;
3054 return NT_STATUS_OK;
3056 fail:
3057 TALLOC_FREE(result);
3058 return status;
3061 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3062 enum pipe_auth_level auth_level,
3063 const uint8_t sess_key[16],
3064 struct cli_pipe_auth_data **presult)
3066 struct cli_pipe_auth_data *result;
3068 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3069 if (result == NULL) {
3070 return NT_STATUS_NO_MEMORY;
3073 result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3074 result->auth_level = auth_level;
3076 result->user_name = talloc_strdup(result, "");
3077 result->domain = talloc_strdup(result, domain);
3078 if ((result->user_name == NULL) || (result->domain == NULL)) {
3079 goto fail;
3082 result->a_u.schannel_auth = talloc(result,
3083 struct schannel_auth_struct);
3084 if (result->a_u.schannel_auth == NULL) {
3085 goto fail;
3088 memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3089 sizeof(result->a_u.schannel_auth->sess_key));
3090 result->a_u.schannel_auth->seq_num = 0;
3092 *presult = result;
3093 return NT_STATUS_OK;
3095 fail:
3096 TALLOC_FREE(result);
3097 return NT_STATUS_NO_MEMORY;
3100 #ifdef HAVE_KRB5
3101 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3103 data_blob_free(&auth->session_key);
3104 return 0;
3106 #endif
3108 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3109 enum pipe_auth_level auth_level,
3110 const char *service_princ,
3111 const char *username,
3112 const char *password,
3113 struct cli_pipe_auth_data **presult)
3115 #ifdef HAVE_KRB5
3116 struct cli_pipe_auth_data *result;
3118 if ((username != NULL) && (password != NULL)) {
3119 int ret = kerberos_kinit_password(username, password, 0, NULL);
3120 if (ret != 0) {
3121 return NT_STATUS_ACCESS_DENIED;
3125 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3126 if (result == NULL) {
3127 return NT_STATUS_NO_MEMORY;
3130 result->auth_type = PIPE_AUTH_TYPE_KRB5;
3131 result->auth_level = auth_level;
3134 * Username / domain need fixing!
3136 result->user_name = talloc_strdup(result, "");
3137 result->domain = talloc_strdup(result, "");
3138 if ((result->user_name == NULL) || (result->domain == NULL)) {
3139 goto fail;
3142 result->a_u.kerberos_auth = TALLOC_ZERO_P(
3143 result, struct kerberos_auth_struct);
3144 if (result->a_u.kerberos_auth == NULL) {
3145 goto fail;
3147 talloc_set_destructor(result->a_u.kerberos_auth,
3148 cli_auth_kerberos_data_destructor);
3150 result->a_u.kerberos_auth->service_principal = talloc_strdup(
3151 result, service_princ);
3152 if (result->a_u.kerberos_auth->service_principal == NULL) {
3153 goto fail;
3156 *presult = result;
3157 return NT_STATUS_OK;
3159 fail:
3160 TALLOC_FREE(result);
3161 return NT_STATUS_NO_MEMORY;
3162 #else
3163 return NT_STATUS_NOT_SUPPORTED;
3164 #endif
3168 * Create an rpc pipe client struct, connecting to a tcp port.
3170 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3171 uint16_t port,
3172 const struct ndr_syntax_id *abstract_syntax,
3173 struct rpc_pipe_client **presult)
3175 struct rpc_pipe_client *result;
3176 struct sockaddr_storage addr;
3177 NTSTATUS status;
3178 int fd;
3180 result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3181 if (result == NULL) {
3182 return NT_STATUS_NO_MEMORY;
3185 result->abstract_syntax = *abstract_syntax;
3186 result->transfer_syntax = ndr_transfer_syntax;
3187 result->dispatch = cli_do_rpc_ndr;
3189 result->desthost = talloc_strdup(result, host);
3190 result->srv_name_slash = talloc_asprintf_strupper_m(
3191 result, "\\\\%s", result->desthost);
3192 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3193 status = NT_STATUS_NO_MEMORY;
3194 goto fail;
3197 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3198 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3200 if (!resolve_name(host, &addr, 0)) {
3201 status = NT_STATUS_NOT_FOUND;
3202 goto fail;
3205 status = open_socket_out(&addr, port, 60, &fd);
3206 if (!NT_STATUS_IS_OK(status)) {
3207 goto fail;
3209 set_socket_options(fd, lp_socket_options());
3211 status = rpc_transport_sock_init(result, fd, &result->transport);
3212 if (!NT_STATUS_IS_OK(status)) {
3213 close(fd);
3214 goto fail;
3217 *presult = result;
3218 return NT_STATUS_OK;
3220 fail:
3221 TALLOC_FREE(result);
3222 return status;
3226 * Determine the tcp port on which a dcerpc interface is listening
3227 * for the ncacn_ip_tcp transport via the endpoint mapper of the
3228 * target host.
3230 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3231 const struct ndr_syntax_id *abstract_syntax,
3232 uint16_t *pport)
3234 NTSTATUS status;
3235 struct rpc_pipe_client *epm_pipe = NULL;
3236 struct cli_pipe_auth_data *auth = NULL;
3237 struct dcerpc_binding *map_binding = NULL;
3238 struct dcerpc_binding *res_binding = NULL;
3239 struct epm_twr_t *map_tower = NULL;
3240 struct epm_twr_t *res_towers = NULL;
3241 struct policy_handle *entry_handle = NULL;
3242 uint32_t num_towers = 0;
3243 uint32_t max_towers = 1;
3244 struct epm_twr_p_t towers;
3245 TALLOC_CTX *tmp_ctx = talloc_stackframe();
3247 if (pport == NULL) {
3248 status = NT_STATUS_INVALID_PARAMETER;
3249 goto done;
3252 /* open the connection to the endpoint mapper */
3253 status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3254 &ndr_table_epmapper.syntax_id,
3255 &epm_pipe);
3257 if (!NT_STATUS_IS_OK(status)) {
3258 goto done;
3261 status = rpccli_anon_bind_data(tmp_ctx, &auth);
3262 if (!NT_STATUS_IS_OK(status)) {
3263 goto done;
3266 status = rpc_pipe_bind(epm_pipe, auth);
3267 if (!NT_STATUS_IS_OK(status)) {
3268 goto done;
3271 /* create tower for asking the epmapper */
3273 map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3274 if (map_binding == NULL) {
3275 status = NT_STATUS_NO_MEMORY;
3276 goto done;
3279 map_binding->transport = NCACN_IP_TCP;
3280 map_binding->object = *abstract_syntax;
3281 map_binding->host = host; /* needed? */
3282 map_binding->endpoint = "0"; /* correct? needed? */
3284 map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3285 if (map_tower == NULL) {
3286 status = NT_STATUS_NO_MEMORY;
3287 goto done;
3290 status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3291 &(map_tower->tower));
3292 if (!NT_STATUS_IS_OK(status)) {
3293 goto done;
3296 /* allocate further parameters for the epm_Map call */
3298 res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3299 if (res_towers == NULL) {
3300 status = NT_STATUS_NO_MEMORY;
3301 goto done;
3303 towers.twr = res_towers;
3305 entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3306 if (entry_handle == NULL) {
3307 status = NT_STATUS_NO_MEMORY;
3308 goto done;
3311 /* ask the endpoint mapper for the port */
3313 status = rpccli_epm_Map(epm_pipe,
3314 tmp_ctx,
3315 CONST_DISCARD(struct GUID *,
3316 &(abstract_syntax->uuid)),
3317 map_tower,
3318 entry_handle,
3319 max_towers,
3320 &num_towers,
3321 &towers);
3323 if (!NT_STATUS_IS_OK(status)) {
3324 goto done;
3327 if (num_towers != 1) {
3328 status = NT_STATUS_UNSUCCESSFUL;
3329 goto done;
3332 /* extract the port from the answer */
3334 status = dcerpc_binding_from_tower(tmp_ctx,
3335 &(towers.twr->tower),
3336 &res_binding);
3337 if (!NT_STATUS_IS_OK(status)) {
3338 goto done;
3341 /* are further checks here necessary? */
3342 if (res_binding->transport != NCACN_IP_TCP) {
3343 status = NT_STATUS_UNSUCCESSFUL;
3344 goto done;
3347 *pport = (uint16_t)atoi(res_binding->endpoint);
3349 done:
3350 TALLOC_FREE(tmp_ctx);
3351 return status;
3355 * Create a rpc pipe client struct, connecting to a host via tcp.
3356 * The port is determined by asking the endpoint mapper on the given
3357 * host.
3359 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3360 const struct ndr_syntax_id *abstract_syntax,
3361 struct rpc_pipe_client **presult)
3363 NTSTATUS status;
3364 uint16_t port = 0;
3366 *presult = NULL;
3368 status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3369 if (!NT_STATUS_IS_OK(status)) {
3370 goto done;
3373 status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3374 abstract_syntax, presult);
3376 done:
3377 return status;
3380 /********************************************************************
3381 Create a rpc pipe client struct, connecting to a unix domain socket
3382 ********************************************************************/
3383 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3384 const struct ndr_syntax_id *abstract_syntax,
3385 struct rpc_pipe_client **presult)
3387 struct rpc_pipe_client *result;
3388 struct sockaddr_un addr;
3389 NTSTATUS status;
3390 int fd;
3392 result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3393 if (result == NULL) {
3394 return NT_STATUS_NO_MEMORY;
3397 result->abstract_syntax = *abstract_syntax;
3398 result->transfer_syntax = ndr_transfer_syntax;
3399 result->dispatch = cli_do_rpc_ndr;
3401 result->desthost = talloc_get_myname(result);
3402 result->srv_name_slash = talloc_asprintf_strupper_m(
3403 result, "\\\\%s", result->desthost);
3404 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3405 status = NT_STATUS_NO_MEMORY;
3406 goto fail;
3409 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3410 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3412 fd = socket(AF_UNIX, SOCK_STREAM, 0);
3413 if (fd == -1) {
3414 status = map_nt_error_from_unix(errno);
3415 goto fail;
3418 ZERO_STRUCT(addr);
3419 addr.sun_family = AF_UNIX;
3420 strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3422 if (sys_connect(fd, (struct sockaddr *)&addr) == -1) {
3423 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3424 strerror(errno)));
3425 close(fd);
3426 return map_nt_error_from_unix(errno);
3429 status = rpc_transport_sock_init(result, fd, &result->transport);
3430 if (!NT_STATUS_IS_OK(status)) {
3431 close(fd);
3432 goto fail;
3435 *presult = result;
3436 return NT_STATUS_OK;
3438 fail:
3439 TALLOC_FREE(result);
3440 return status;
3443 static int rpc_pipe_client_np_destructor(struct rpc_pipe_client *p)
3445 struct cli_state *cli;
3447 cli = rpc_pipe_np_smb_conn(p);
3448 if (cli != NULL) {
3449 DLIST_REMOVE(cli->pipe_list, p);
3451 return 0;
3454 /****************************************************************************
3455 Open a named pipe over SMB to a remote server.
3457 * CAVEAT CALLER OF THIS FUNCTION:
3458 * The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3459 * so be sure that this function is called AFTER any structure (vs pointer)
3460 * assignment of the cli. In particular, libsmbclient does structure
3461 * assignments of cli, which invalidates the data in the returned
3462 * rpc_pipe_client if this function is called before the structure assignment
3463 * of cli.
3465 ****************************************************************************/
3467 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3468 const struct ndr_syntax_id *abstract_syntax,
3469 struct rpc_pipe_client **presult)
3471 struct rpc_pipe_client *result;
3472 NTSTATUS status;
3474 /* sanity check to protect against crashes */
3476 if ( !cli ) {
3477 return NT_STATUS_INVALID_HANDLE;
3480 result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3481 if (result == NULL) {
3482 return NT_STATUS_NO_MEMORY;
3485 result->abstract_syntax = *abstract_syntax;
3486 result->transfer_syntax = ndr_transfer_syntax;
3487 result->dispatch = cli_do_rpc_ndr;
3488 result->desthost = talloc_strdup(result, cli->desthost);
3489 result->srv_name_slash = talloc_asprintf_strupper_m(
3490 result, "\\\\%s", result->desthost);
3492 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3493 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3495 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3496 TALLOC_FREE(result);
3497 return NT_STATUS_NO_MEMORY;
3500 status = rpc_transport_np_init(result, cli, abstract_syntax,
3501 &result->transport);
3502 if (!NT_STATUS_IS_OK(status)) {
3503 TALLOC_FREE(result);
3504 return status;
3507 DLIST_ADD(cli->pipe_list, result);
3508 talloc_set_destructor(result, rpc_pipe_client_np_destructor);
3510 *presult = result;
3511 return NT_STATUS_OK;
3514 /****************************************************************************
3515 Open a pipe to a remote server.
3516 ****************************************************************************/
3518 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3519 const struct ndr_syntax_id *interface,
3520 struct rpc_pipe_client **presult)
3522 if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3524 * We should have a better way to figure out this drsuapi
3525 * speciality...
3527 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3528 presult);
3531 return rpc_pipe_open_np(cli, interface, presult);
3534 /****************************************************************************
3535 Open a named pipe to an SMB server and bind anonymously.
3536 ****************************************************************************/
3538 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3539 const struct ndr_syntax_id *interface,
3540 struct rpc_pipe_client **presult)
3542 struct rpc_pipe_client *result;
3543 struct cli_pipe_auth_data *auth;
3544 NTSTATUS status;
3546 status = cli_rpc_pipe_open(cli, interface, &result);
3547 if (!NT_STATUS_IS_OK(status)) {
3548 return status;
3551 status = rpccli_anon_bind_data(result, &auth);
3552 if (!NT_STATUS_IS_OK(status)) {
3553 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3554 nt_errstr(status)));
3555 TALLOC_FREE(result);
3556 return status;
3560 * This is a bit of an abstraction violation due to the fact that an
3561 * anonymous bind on an authenticated SMB inherits the user/domain
3562 * from the enclosing SMB creds
3565 TALLOC_FREE(auth->user_name);
3566 TALLOC_FREE(auth->domain);
3568 auth->user_name = talloc_strdup(auth, cli->user_name);
3569 auth->domain = talloc_strdup(auth, cli->domain);
3570 auth->user_session_key = data_blob_talloc(auth,
3571 cli->user_session_key.data,
3572 cli->user_session_key.length);
3574 if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3575 TALLOC_FREE(result);
3576 return NT_STATUS_NO_MEMORY;
3579 status = rpc_pipe_bind(result, auth);
3580 if (!NT_STATUS_IS_OK(status)) {
3581 int lvl = 0;
3582 if (ndr_syntax_id_equal(interface,
3583 &ndr_table_dssetup.syntax_id)) {
3584 /* non AD domains just don't have this pipe, avoid
3585 * level 0 statement in that case - gd */
3586 lvl = 3;
3588 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3589 "%s failed with error %s\n",
3590 cli_get_pipe_name_from_iface(debug_ctx(),
3591 interface),
3592 nt_errstr(status) ));
3593 TALLOC_FREE(result);
3594 return status;
3597 DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3598 "%s and bound anonymously.\n",
3599 cli_get_pipe_name_from_iface(debug_ctx(), interface),
3600 cli->desthost ));
3602 *presult = result;
3603 return NT_STATUS_OK;
3606 /****************************************************************************
3607 Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3608 ****************************************************************************/
3610 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3611 const struct ndr_syntax_id *interface,
3612 enum pipe_auth_type auth_type,
3613 enum pipe_auth_level auth_level,
3614 const char *domain,
3615 const char *username,
3616 const char *password,
3617 struct rpc_pipe_client **presult)
3619 struct rpc_pipe_client *result;
3620 struct cli_pipe_auth_data *auth;
3621 NTSTATUS status;
3623 status = cli_rpc_pipe_open(cli, interface, &result);
3624 if (!NT_STATUS_IS_OK(status)) {
3625 return status;
3628 status = rpccli_ntlmssp_bind_data(
3629 result, auth_type, auth_level, domain, username,
3630 cli->pwd.null_pwd ? NULL : password, &auth);
3631 if (!NT_STATUS_IS_OK(status)) {
3632 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3633 nt_errstr(status)));
3634 goto err;
3637 status = rpc_pipe_bind(result, auth);
3638 if (!NT_STATUS_IS_OK(status)) {
3639 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3640 nt_errstr(status) ));
3641 goto err;
3644 DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3645 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3646 cli_get_pipe_name_from_iface(debug_ctx(), interface),
3647 cli->desthost, domain, username ));
3649 *presult = result;
3650 return NT_STATUS_OK;
3652 err:
3654 TALLOC_FREE(result);
3655 return status;
3658 /****************************************************************************
3659 External interface.
3660 Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3661 ****************************************************************************/
3663 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3664 const struct ndr_syntax_id *interface,
3665 enum pipe_auth_level auth_level,
3666 const char *domain,
3667 const char *username,
3668 const char *password,
3669 struct rpc_pipe_client **presult)
3671 return cli_rpc_pipe_open_ntlmssp_internal(cli,
3672 interface,
3673 PIPE_AUTH_TYPE_NTLMSSP,
3674 auth_level,
3675 domain,
3676 username,
3677 password,
3678 presult);
3681 /****************************************************************************
3682 External interface.
3683 Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3684 ****************************************************************************/
3686 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3687 const struct ndr_syntax_id *interface,
3688 enum pipe_auth_level auth_level,
3689 const char *domain,
3690 const char *username,
3691 const char *password,
3692 struct rpc_pipe_client **presult)
3694 return cli_rpc_pipe_open_ntlmssp_internal(cli,
3695 interface,
3696 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3697 auth_level,
3698 domain,
3699 username,
3700 password,
3701 presult);
3704 /****************************************************************************
3705 Get a the schannel session key out of an already opened netlogon pipe.
3706 ****************************************************************************/
3707 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3708 struct cli_state *cli,
3709 const char *domain,
3710 uint32 *pneg_flags)
3712 uint32 sec_chan_type = 0;
3713 unsigned char machine_pwd[16];
3714 const char *machine_account;
3715 NTSTATUS status;
3717 /* Get the machine account credentials from secrets.tdb. */
3718 if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3719 &sec_chan_type))
3721 DEBUG(0, ("get_schannel_session_key: could not fetch "
3722 "trust account password for domain '%s'\n",
3723 domain));
3724 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3727 status = rpccli_netlogon_setup_creds(netlogon_pipe,
3728 cli->desthost, /* server name */
3729 domain, /* domain */
3730 global_myname(), /* client name */
3731 machine_account, /* machine account name */
3732 machine_pwd,
3733 sec_chan_type,
3734 pneg_flags);
3736 if (!NT_STATUS_IS_OK(status)) {
3737 DEBUG(3, ("get_schannel_session_key_common: "
3738 "rpccli_netlogon_setup_creds failed with result %s "
3739 "to server %s, domain %s, machine account %s.\n",
3740 nt_errstr(status), cli->desthost, domain,
3741 machine_account ));
3742 return status;
3745 if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3746 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3747 cli->desthost));
3748 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3751 return NT_STATUS_OK;;
3754 /****************************************************************************
3755 Open a netlogon pipe and get the schannel session key.
3756 Now exposed to external callers.
3757 ****************************************************************************/
3760 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3761 const char *domain,
3762 uint32 *pneg_flags,
3763 struct rpc_pipe_client **presult)
3765 struct rpc_pipe_client *netlogon_pipe = NULL;
3766 NTSTATUS status;
3768 status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3769 &netlogon_pipe);
3770 if (!NT_STATUS_IS_OK(status)) {
3771 return status;
3774 status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3775 pneg_flags);
3776 if (!NT_STATUS_IS_OK(status)) {
3777 TALLOC_FREE(netlogon_pipe);
3778 return status;
3781 *presult = netlogon_pipe;
3782 return NT_STATUS_OK;
3785 /****************************************************************************
3786 External interface.
3787 Open a named pipe to an SMB server and bind using schannel (bind type 68)
3788 using session_key. sign and seal.
3789 ****************************************************************************/
3791 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3792 const struct ndr_syntax_id *interface,
3793 enum pipe_auth_level auth_level,
3794 const char *domain,
3795 const struct dcinfo *pdc,
3796 struct rpc_pipe_client **presult)
3798 struct rpc_pipe_client *result;
3799 struct cli_pipe_auth_data *auth;
3800 NTSTATUS status;
3802 status = cli_rpc_pipe_open(cli, interface, &result);
3803 if (!NT_STATUS_IS_OK(status)) {
3804 return status;
3807 status = rpccli_schannel_bind_data(result, domain, auth_level,
3808 pdc->sess_key, &auth);
3809 if (!NT_STATUS_IS_OK(status)) {
3810 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3811 nt_errstr(status)));
3812 TALLOC_FREE(result);
3813 return status;
3816 status = rpc_pipe_bind(result, auth);
3817 if (!NT_STATUS_IS_OK(status)) {
3818 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3819 "cli_rpc_pipe_bind failed with error %s\n",
3820 nt_errstr(status) ));
3821 TALLOC_FREE(result);
3822 return status;
3826 * The credentials on a new netlogon pipe are the ones we are passed
3827 * in - copy them over.
3829 result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3830 if (result->dc == NULL) {
3831 DEBUG(0, ("talloc failed\n"));
3832 TALLOC_FREE(result);
3833 return NT_STATUS_NO_MEMORY;
3836 DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3837 "for domain %s and bound using schannel.\n",
3838 cli_get_pipe_name_from_iface(debug_ctx(), interface),
3839 cli->desthost, domain ));
3841 *presult = result;
3842 return NT_STATUS_OK;
3845 /****************************************************************************
3846 Open a named pipe to an SMB server and bind using schannel (bind type 68).
3847 Fetch the session key ourselves using a temporary netlogon pipe. This
3848 version uses an ntlmssp auth bound netlogon pipe to get the key.
3849 ****************************************************************************/
3851 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3852 const char *domain,
3853 const char *username,
3854 const char *password,
3855 uint32 *pneg_flags,
3856 struct rpc_pipe_client **presult)
3858 struct rpc_pipe_client *netlogon_pipe = NULL;
3859 NTSTATUS status;
3861 status = cli_rpc_pipe_open_spnego_ntlmssp(
3862 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3863 domain, username, password, &netlogon_pipe);
3864 if (!NT_STATUS_IS_OK(status)) {
3865 return status;
3868 status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3869 pneg_flags);
3870 if (!NT_STATUS_IS_OK(status)) {
3871 TALLOC_FREE(netlogon_pipe);
3872 return status;
3875 *presult = netlogon_pipe;
3876 return NT_STATUS_OK;
3879 /****************************************************************************
3880 Open a named pipe to an SMB server and bind using schannel (bind type 68).
3881 Fetch the session key ourselves using a temporary netlogon pipe. This version
3882 uses an ntlmssp bind to get the session key.
3883 ****************************************************************************/
3885 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
3886 const struct ndr_syntax_id *interface,
3887 enum pipe_auth_level auth_level,
3888 const char *domain,
3889 const char *username,
3890 const char *password,
3891 struct rpc_pipe_client **presult)
3893 uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
3894 struct rpc_pipe_client *netlogon_pipe = NULL;
3895 struct rpc_pipe_client *result = NULL;
3896 NTSTATUS status;
3898 status = get_schannel_session_key_auth_ntlmssp(
3899 cli, domain, username, password, &neg_flags, &netlogon_pipe);
3900 if (!NT_STATUS_IS_OK(status)) {
3901 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
3902 "key from server %s for domain %s.\n",
3903 cli->desthost, domain ));
3904 return status;
3907 status = cli_rpc_pipe_open_schannel_with_key(
3908 cli, interface, auth_level, domain, netlogon_pipe->dc,
3909 &result);
3911 /* Now we've bound using the session key we can close the netlog pipe. */
3912 TALLOC_FREE(netlogon_pipe);
3914 if (NT_STATUS_IS_OK(status)) {
3915 *presult = result;
3917 return status;
3920 /****************************************************************************
3921 Open a named pipe to an SMB server and bind using schannel (bind type 68).
3922 Fetch the session key ourselves using a temporary netlogon pipe.
3923 ****************************************************************************/
3925 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
3926 const struct ndr_syntax_id *interface,
3927 enum pipe_auth_level auth_level,
3928 const char *domain,
3929 struct rpc_pipe_client **presult)
3931 uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
3932 struct rpc_pipe_client *netlogon_pipe = NULL;
3933 struct rpc_pipe_client *result = NULL;
3934 NTSTATUS status;
3936 status = get_schannel_session_key(cli, domain, &neg_flags,
3937 &netlogon_pipe);
3938 if (!NT_STATUS_IS_OK(status)) {
3939 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
3940 "key from server %s for domain %s.\n",
3941 cli->desthost, domain ));
3942 return status;
3945 status = cli_rpc_pipe_open_schannel_with_key(
3946 cli, interface, auth_level, domain, netlogon_pipe->dc,
3947 &result);
3949 /* Now we've bound using the session key we can close the netlog pipe. */
3950 TALLOC_FREE(netlogon_pipe);
3952 if (NT_STATUS_IS_OK(status)) {
3953 *presult = result;
3956 return NT_STATUS_OK;
3959 /****************************************************************************
3960 Open a named pipe to an SMB server and bind using krb5 (bind type 16).
3961 The idea is this can be called with service_princ, username and password all
3962 NULL so long as the caller has a TGT.
3963 ****************************************************************************/
3965 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
3966 const struct ndr_syntax_id *interface,
3967 enum pipe_auth_level auth_level,
3968 const char *service_princ,
3969 const char *username,
3970 const char *password,
3971 struct rpc_pipe_client **presult)
3973 #ifdef HAVE_KRB5
3974 struct rpc_pipe_client *result;
3975 struct cli_pipe_auth_data *auth;
3976 NTSTATUS status;
3978 status = cli_rpc_pipe_open(cli, interface, &result);
3979 if (!NT_STATUS_IS_OK(status)) {
3980 return status;
3983 status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
3984 username, password, &auth);
3985 if (!NT_STATUS_IS_OK(status)) {
3986 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
3987 nt_errstr(status)));
3988 TALLOC_FREE(result);
3989 return status;
3992 status = rpc_pipe_bind(result, auth);
3993 if (!NT_STATUS_IS_OK(status)) {
3994 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
3995 "with error %s\n", nt_errstr(status)));
3996 TALLOC_FREE(result);
3997 return status;
4000 *presult = result;
4001 return NT_STATUS_OK;
4002 #else
4003 DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4004 return NT_STATUS_NOT_IMPLEMENTED;
4005 #endif
4008 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4009 struct rpc_pipe_client *cli,
4010 DATA_BLOB *session_key)
4012 if (!session_key || !cli) {
4013 return NT_STATUS_INVALID_PARAMETER;
4016 if (!cli->auth) {
4017 return NT_STATUS_INVALID_PARAMETER;
4020 switch (cli->auth->auth_type) {
4021 case PIPE_AUTH_TYPE_SCHANNEL:
4022 *session_key = data_blob_talloc(mem_ctx,
4023 cli->auth->a_u.schannel_auth->sess_key, 16);
4024 break;
4025 case PIPE_AUTH_TYPE_NTLMSSP:
4026 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4027 *session_key = data_blob_talloc(mem_ctx,
4028 cli->auth->a_u.ntlmssp_state->session_key.data,
4029 cli->auth->a_u.ntlmssp_state->session_key.length);
4030 break;
4031 case PIPE_AUTH_TYPE_KRB5:
4032 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4033 *session_key = data_blob_talloc(mem_ctx,
4034 cli->auth->a_u.kerberos_auth->session_key.data,
4035 cli->auth->a_u.kerberos_auth->session_key.length);
4036 break;
4037 case PIPE_AUTH_TYPE_NONE:
4038 *session_key = data_blob_talloc(mem_ctx,
4039 cli->auth->user_session_key.data,
4040 cli->auth->user_session_key.length);
4041 break;
4042 default:
4043 return NT_STATUS_NO_USER_SESSION_KEY;
4046 return NT_STATUS_OK;
4050 * Create a new RPC client context which uses a local dispatch function.
4052 NTSTATUS rpc_pipe_open_internal(TALLOC_CTX *mem_ctx, const struct ndr_syntax_id *abstract_syntax,
4053 NTSTATUS (*dispatch) (struct rpc_pipe_client *cli, TALLOC_CTX *mem_ctx, const struct ndr_interface_table *table, uint32_t opnum, void *r),
4054 struct auth_serversupplied_info *serversupplied_info,
4055 struct rpc_pipe_client **presult)
4057 struct rpc_pipe_client *result;
4059 result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
4060 if (result == NULL) {
4061 return NT_STATUS_NO_MEMORY;
4064 result->abstract_syntax = *abstract_syntax;
4065 result->transfer_syntax = ndr_transfer_syntax;
4066 result->dispatch = dispatch;
4068 result->pipes_struct = TALLOC_ZERO_P(mem_ctx, pipes_struct);
4069 if (result->pipes_struct == NULL) {
4070 TALLOC_FREE(result);
4071 return NT_STATUS_NO_MEMORY;
4073 result->pipes_struct->mem_ctx = mem_ctx;
4074 result->pipes_struct->server_info = serversupplied_info;
4075 result->pipes_struct->pipe_bound = true;
4077 result->max_xmit_frag = -1;
4078 result->max_recv_frag = -1;
4080 *presult = result;
4081 return NT_STATUS_OK;