2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
28 #define DBGC_CLASS DBGC_RPC_SRV
30 static int pipes_open
;
32 static pipes_struct
*InternalPipes
;
35 * the following prototypes are declared here to avoid
36 * code being moved about too much for a patch to be
37 * disrupted / less obvious.
39 * these functions, and associated functions that they
40 * call, should be moved behind a .so module-loading
41 * system _anyway_. so that's the next step...
44 static int close_internal_rpc_pipe_hnd(struct pipes_struct
*p
);
46 /****************************************************************************
47 Internal Pipe iterator functions.
48 ****************************************************************************/
50 pipes_struct
*get_first_internal_pipe(void)
55 pipes_struct
*get_next_internal_pipe(pipes_struct
*p
)
60 /****************************************************************************
61 Initialise an outgoing packet.
62 ****************************************************************************/
64 static bool pipe_init_outgoing_data(pipes_struct
*p
)
66 output_data
*o_data
= &p
->out_data
;
68 /* Reset the offset counters. */
69 o_data
->data_sent_length
= 0;
70 o_data
->current_pdu_sent
= 0;
72 prs_mem_free(&o_data
->frag
);
74 /* Free any memory in the current return data buffer. */
75 prs_mem_free(&o_data
->rdata
);
78 * Initialize the outgoing RPC data buffer.
79 * we will use this as the raw data area for replying to rpc requests.
81 if(!prs_init(&o_data
->rdata
, 128, p
->mem_ctx
, MARSHALL
)) {
82 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
89 /****************************************************************************
90 Make an internal namedpipes structure
91 ****************************************************************************/
93 static struct pipes_struct
*make_internal_rpc_pipe_p(TALLOC_CTX
*mem_ctx
,
94 const struct ndr_syntax_id
*syntax
,
95 const char *client_address
,
96 struct auth_serversupplied_info
*server_info
)
100 DEBUG(4,("Create pipe requested %s\n",
101 get_pipe_name_from_syntax(talloc_tos(), syntax
)));
103 p
= TALLOC_ZERO_P(mem_ctx
, struct pipes_struct
);
106 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
110 p
->mem_ctx
= talloc_named(p
, 0, "pipe %s %p",
111 get_pipe_name_from_syntax(talloc_tos(),
113 if (p
->mem_ctx
== NULL
) {
114 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
119 if (!init_pipe_handle_list(p
, syntax
)) {
120 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
126 * Initialize the incoming RPC data buffer with one PDU worth of memory.
127 * We cheat here and say we're marshalling, as we intend to add incoming
128 * data directly into the prs_struct and we want it to auto grow. We will
129 * change the type to UNMARSALLING before processing the stream.
132 if(!prs_init(&p
->in_data
.data
, 128, p
->mem_ctx
, MARSHALL
)) {
133 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
134 close_policy_by_pipe(p
);
139 p
->server_info
= copy_serverinfo(p
, server_info
);
140 if (p
->server_info
== NULL
) {
141 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
142 close_policy_by_pipe(p
);
147 DLIST_ADD(InternalPipes
, p
);
149 memcpy(p
->client_address
, client_address
, sizeof(p
->client_address
));
151 p
->endian
= RPC_LITTLE_ENDIAN
;
154 * Initialize the outgoing RPC data buffer with no memory.
156 prs_init_empty(&p
->out_data
.rdata
, p
->mem_ctx
, MARSHALL
);
160 DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
161 get_pipe_name_from_syntax(talloc_tos(), syntax
), pipes_open
));
163 talloc_set_destructor(p
, close_internal_rpc_pipe_hnd
);
168 /****************************************************************************
169 Sets the fault state on incoming packets.
170 ****************************************************************************/
172 static void set_incoming_fault(pipes_struct
*p
)
174 prs_mem_free(&p
->in_data
.data
);
175 p
->in_data
.pdu_needed_len
= 0;
176 p
->in_data
.pdu_received_len
= 0;
177 p
->fault_state
= True
;
178 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
179 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
)));
182 /****************************************************************************
183 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
184 ****************************************************************************/
186 static ssize_t
fill_rpc_header(pipes_struct
*p
, char *data
, size_t data_to_copy
)
188 size_t len_needed_to_complete_hdr
= MIN(data_to_copy
, RPC_HEADER_LEN
- p
->in_data
.pdu_received_len
);
190 DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
191 (unsigned int)data_to_copy
, (unsigned int)len_needed_to_complete_hdr
,
192 (unsigned int)p
->in_data
.pdu_received_len
));
194 if (p
->in_data
.current_in_pdu
== NULL
) {
195 p
->in_data
.current_in_pdu
= talloc_array(p
, uint8_t,
198 if (p
->in_data
.current_in_pdu
== NULL
) {
199 DEBUG(0, ("talloc failed\n"));
203 memcpy((char *)&p
->in_data
.current_in_pdu
[p
->in_data
.pdu_received_len
], data
, len_needed_to_complete_hdr
);
204 p
->in_data
.pdu_received_len
+= len_needed_to_complete_hdr
;
206 return (ssize_t
)len_needed_to_complete_hdr
;
209 /****************************************************************************
210 Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
211 ****************************************************************************/
213 static ssize_t
unmarshall_rpc_header(pipes_struct
*p
)
216 * Unmarshall the header to determine the needed length.
221 if(p
->in_data
.pdu_received_len
!= RPC_HEADER_LEN
) {
222 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
223 set_incoming_fault(p
);
227 prs_init_empty( &rpc_in
, p
->mem_ctx
, UNMARSHALL
);
228 prs_set_endian_data( &rpc_in
, p
->endian
);
230 prs_give_memory( &rpc_in
, (char *)&p
->in_data
.current_in_pdu
[0],
231 p
->in_data
.pdu_received_len
, False
);
234 * Unmarshall the header as this will tell us how much
235 * data we need to read to get the complete pdu.
236 * This also sets the endian flag in rpc_in.
239 if(!smb_io_rpc_hdr("", &p
->hdr
, &rpc_in
, 0)) {
240 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
241 set_incoming_fault(p
);
242 prs_mem_free(&rpc_in
);
247 * Validate the RPC header.
250 if(p
->hdr
.major
!= 5 && p
->hdr
.minor
!= 0) {
251 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
252 set_incoming_fault(p
);
253 prs_mem_free(&rpc_in
);
258 * If there's not data in the incoming buffer this should be the start of a new RPC.
261 if(prs_offset(&p
->in_data
.data
) == 0) {
264 * AS/U doesn't set FIRST flag in a BIND packet it seems.
267 if ((p
->hdr
.pkt_type
== DCERPC_PKT_REQUEST
) && !(p
->hdr
.flags
& DCERPC_PFC_FLAG_FIRST
)) {
269 * Ensure that the FIRST flag is set. If not then we have
270 * a stream missmatch.
273 DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
274 set_incoming_fault(p
);
275 prs_mem_free(&rpc_in
);
280 * If this is the first PDU then set the endianness
281 * flag in the pipe. We will need this when parsing all
285 p
->endian
= rpc_in
.bigendian_data
;
287 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
288 p
->endian
== RPC_LITTLE_ENDIAN
? "little-" : "big-" ));
293 * If this is *NOT* the first PDU then check the endianness
294 * flag in the pipe is the same as that in the PDU.
297 if (p
->endian
!= rpc_in
.bigendian_data
) {
298 DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p
->endian
));
299 set_incoming_fault(p
);
300 prs_mem_free(&rpc_in
);
306 * Ensure that the pdu length is sane.
309 if((p
->hdr
.frag_len
< RPC_HEADER_LEN
) || (p
->hdr
.frag_len
> RPC_MAX_PDU_FRAG_LEN
)) {
310 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
311 set_incoming_fault(p
);
312 prs_mem_free(&rpc_in
);
316 DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p
->hdr
.pkt_type
,
317 (unsigned int)p
->hdr
.flags
));
319 p
->in_data
.pdu_needed_len
= (uint32
)p
->hdr
.frag_len
- RPC_HEADER_LEN
;
321 prs_mem_free(&rpc_in
);
323 p
->in_data
.current_in_pdu
= TALLOC_REALLOC_ARRAY(
324 p
, p
->in_data
.current_in_pdu
, uint8_t, p
->hdr
.frag_len
);
325 if (p
->in_data
.current_in_pdu
== NULL
) {
326 DEBUG(0, ("talloc failed\n"));
327 set_incoming_fault(p
);
331 return 0; /* No extra data processed. */
334 /****************************************************************************
335 Call this to free any talloc'ed memory. Do this before and after processing
337 ****************************************************************************/
339 static void free_pipe_context(pipes_struct
*p
)
342 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
343 "%lu\n", (unsigned long)talloc_total_size(p
->mem_ctx
) ));
344 talloc_free_children(p
->mem_ctx
);
346 p
->mem_ctx
= talloc_named(p
, 0, "pipe %s %p",
347 get_pipe_name_from_syntax(talloc_tos(),
349 if (p
->mem_ctx
== NULL
) {
350 p
->fault_state
= True
;
355 /****************************************************************************
356 Processes a request pdu. This will do auth processing if needed, and
357 appends the data into the complete stream if the LAST flag is not set.
358 ****************************************************************************/
360 static bool process_request_pdu(pipes_struct
*p
, prs_struct
*rpc_in_p
)
362 uint32 ss_padding_len
= 0;
363 size_t data_len
= p
->hdr
.frag_len
- RPC_HEADER_LEN
- RPC_HDR_REQ_LEN
-
364 (p
->hdr
.auth_len
? RPC_HDR_AUTH_LEN
: 0) - p
->hdr
.auth_len
;
367 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
368 set_incoming_fault(p
);
373 * Check if we need to do authentication processing.
374 * This is only done on requests, not binds.
378 * Read the RPC request header.
381 if(!smb_io_rpc_hdr_req("req", &p
->hdr_req
, rpc_in_p
, 0)) {
382 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
383 set_incoming_fault(p
);
387 switch(p
->auth
.auth_type
) {
388 case PIPE_AUTH_TYPE_NONE
:
391 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP
:
392 case PIPE_AUTH_TYPE_NTLMSSP
:
395 if(!api_pipe_ntlmssp_auth_process(p
, rpc_in_p
, &ss_padding_len
, &status
)) {
396 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
397 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status
) ));
398 set_incoming_fault(p
);
404 case PIPE_AUTH_TYPE_SCHANNEL
:
405 if (!api_pipe_schannel_process(p
, rpc_in_p
, &ss_padding_len
)) {
406 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
407 set_incoming_fault(p
);
413 DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p
->auth
.auth_type
));
414 set_incoming_fault(p
);
418 /* Now we've done the sign/seal we can remove any padding data. */
419 if (data_len
> ss_padding_len
) {
420 data_len
-= ss_padding_len
;
424 * Check the data length doesn't go over the 15Mb limit.
425 * increased after observing a bug in the Windows NT 4.0 SP6a
426 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
427 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
430 if(prs_offset(&p
->in_data
.data
) + data_len
> MAX_RPC_DATA_SIZE
) {
431 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
432 (unsigned int)prs_data_size(&p
->in_data
.data
), (unsigned int)data_len
));
433 set_incoming_fault(p
);
438 * Append the data portion into the buffer and return.
441 if(!prs_append_some_prs_data(&p
->in_data
.data
, rpc_in_p
, prs_offset(rpc_in_p
), data_len
)) {
442 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
443 (unsigned int)data_len
, (unsigned int)prs_data_size(&p
->in_data
.data
) ));
444 set_incoming_fault(p
);
448 if(p
->hdr
.flags
& DCERPC_PFC_FLAG_LAST
) {
451 * Ok - we finally have a complete RPC stream.
452 * Call the rpc command to process it.
456 * Ensure the internal prs buffer size is *exactly* the same
457 * size as the current offset.
460 if(!prs_set_buffer_size(&p
->in_data
.data
, prs_offset(&p
->in_data
.data
))) {
461 DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
462 set_incoming_fault(p
);
467 * Set the parse offset to the start of the data and set the
468 * prs_struct to UNMARSHALL.
471 prs_set_offset(&p
->in_data
.data
, 0);
472 prs_switch_type(&p
->in_data
.data
, UNMARSHALL
);
475 * Process the complete data stream here.
478 free_pipe_context(p
);
480 if(pipe_init_outgoing_data(p
)) {
481 ret
= api_pipe_request(p
);
484 free_pipe_context(p
);
487 * We have consumed the whole data stream. Set back to
488 * marshalling and set the offset back to the start of
489 * the buffer to re-use it (we could also do a prs_mem_free()
490 * and then re_init on the next start of PDU. Not sure which
491 * is best here.... JRA.
494 prs_switch_type(&p
->in_data
.data
, MARSHALL
);
495 prs_set_offset(&p
->in_data
.data
, 0);
502 /****************************************************************************
503 Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
504 already been parsed and stored in p->hdr.
505 ****************************************************************************/
507 static void process_complete_pdu(pipes_struct
*p
)
510 size_t data_len
= p
->in_data
.pdu_received_len
- RPC_HEADER_LEN
;
511 char *data_p
= (char *)&p
->in_data
.current_in_pdu
[RPC_HEADER_LEN
];
515 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
516 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
)));
517 set_incoming_fault(p
);
518 setup_fault_pdu(p
, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR
));
522 prs_init_empty( &rpc_in
, p
->mem_ctx
, UNMARSHALL
);
525 * Ensure we're using the corrent endianness for both the
526 * RPC header flags and the raw data we will be reading from.
529 prs_set_endian_data( &rpc_in
, p
->endian
);
530 prs_set_endian_data( &p
->in_data
.data
, p
->endian
);
532 prs_give_memory( &rpc_in
, data_p
, (uint32
)data_len
, False
);
534 DEBUG(10,("process_complete_pdu: processing packet type %u\n",
535 (unsigned int)p
->hdr
.pkt_type
));
537 switch (p
->hdr
.pkt_type
) {
538 case DCERPC_PKT_REQUEST
:
539 reply
= process_request_pdu(p
, &rpc_in
);
542 case DCERPC_PKT_PING
: /* CL request - ignore... */
543 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
544 (unsigned int)p
->hdr
.pkt_type
,
545 get_pipe_name_from_syntax(talloc_tos(),
549 case DCERPC_PKT_RESPONSE
: /* No responses here. */
550 DEBUG(0,("process_complete_pdu: Error. DCERPC_PKT_RESPONSE received from client on pipe %s.\n",
551 get_pipe_name_from_syntax(talloc_tos(),
555 case DCERPC_PKT_FAULT
:
556 case DCERPC_PKT_WORKING
: /* CL request - reply to a ping when a call in process. */
557 case DCERPC_PKT_NOCALL
: /* CL - server reply to a ping call. */
558 case DCERPC_PKT_REJECT
:
560 case DCERPC_PKT_CL_CANCEL
:
561 case DCERPC_PKT_FACK
:
562 case DCERPC_PKT_CANCEL_ACK
:
563 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
564 (unsigned int)p
->hdr
.pkt_type
,
565 get_pipe_name_from_syntax(talloc_tos(),
569 case DCERPC_PKT_BIND
:
571 * We assume that a pipe bind is only in one pdu.
573 if(pipe_init_outgoing_data(p
)) {
574 reply
= api_pipe_bind_req(p
, &rpc_in
);
578 case DCERPC_PKT_BIND_ACK
:
579 case DCERPC_PKT_BIND_NAK
:
580 DEBUG(0,("process_complete_pdu: Error. DCERPC_PKT_BINDACK/DCERPC_PKT_BINDNACK packet type %u received on pipe %s.\n",
581 (unsigned int)p
->hdr
.pkt_type
,
582 get_pipe_name_from_syntax(talloc_tos(),
587 case DCERPC_PKT_ALTER
:
589 * We assume that a pipe bind is only in one pdu.
591 if(pipe_init_outgoing_data(p
)) {
592 reply
= api_pipe_alter_context(p
, &rpc_in
);
596 case DCERPC_PKT_ALTER_RESP
:
597 DEBUG(0,("process_complete_pdu: Error. DCERPC_PKT_ALTER_RESP on pipe %s: Should only be server -> client.\n",
598 get_pipe_name_from_syntax(talloc_tos(),
602 case DCERPC_PKT_AUTH3
:
604 * The third packet in an NTLMSSP auth exchange.
606 if(pipe_init_outgoing_data(p
)) {
607 reply
= api_pipe_bind_auth3(p
, &rpc_in
);
611 case DCERPC_PKT_SHUTDOWN
:
612 DEBUG(0,("process_complete_pdu: Error. DCERPC_PKT_SHUTDOWN on pipe %s: Should only be server -> client.\n",
613 get_pipe_name_from_syntax(talloc_tos(),
617 case DCERPC_PKT_CO_CANCEL
:
618 /* For now just free all client data and continue processing. */
619 DEBUG(3,("process_complete_pdu: DCERPC_PKT_CO_CANCEL. Abandoning rpc call.\n"));
620 /* As we never do asynchronous RPC serving, we can never cancel a
621 call (as far as I know). If we ever did we'd have to send a cancel_ack
622 reply. For now, just free all client data and continue processing. */
626 /* Enable this if we're doing async rpc. */
627 /* We must check the call-id matches the outstanding callid. */
628 if(pipe_init_outgoing_data(p
)) {
629 /* Send a cancel_ack PDU reply. */
630 /* We should probably check the auth-verifier here. */
631 reply
= setup_cancel_ack_reply(p
, &rpc_in
);
636 case DCERPC_PKT_ORPHANED
:
637 /* We should probably check the auth-verifier here.
638 For now just free all client data and continue processing. */
639 DEBUG(3,("process_complete_pdu: DCERPC_PKT_ORPHANED. Abandoning rpc call.\n"));
644 DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p
->hdr
.pkt_type
));
648 /* Reset to little endian. Probably don't need this but it won't hurt. */
649 prs_set_endian_data( &p
->in_data
.data
, RPC_LITTLE_ENDIAN
);
652 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
653 "pipe %s\n", get_pipe_name_from_syntax(talloc_tos(),
655 set_incoming_fault(p
);
656 setup_fault_pdu(p
, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR
));
657 prs_mem_free(&rpc_in
);
660 * Reset the lengths. We're ready for a new pdu.
662 TALLOC_FREE(p
->in_data
.current_in_pdu
);
663 p
->in_data
.pdu_needed_len
= 0;
664 p
->in_data
.pdu_received_len
= 0;
667 prs_mem_free(&rpc_in
);
670 /****************************************************************************
671 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
672 ****************************************************************************/
674 static ssize_t
process_incoming_data(pipes_struct
*p
, char *data
, size_t n
)
676 size_t data_to_copy
= MIN(n
, RPC_MAX_PDU_FRAG_LEN
- p
->in_data
.pdu_received_len
);
678 DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
679 (unsigned int)p
->in_data
.pdu_received_len
, (unsigned int)p
->in_data
.pdu_needed_len
,
682 if(data_to_copy
== 0) {
684 * This is an error - data is being received and there is no
685 * space in the PDU. Free the received data and go into the fault state.
687 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
688 incoming data size = %u\n", (unsigned int)p
->in_data
.pdu_received_len
, (unsigned int)n
));
689 set_incoming_fault(p
);
694 * If we have no data already, wait until we get at least a RPC_HEADER_LEN
695 * number of bytes before we can do anything.
698 if((p
->in_data
.pdu_needed_len
== 0) && (p
->in_data
.pdu_received_len
< RPC_HEADER_LEN
)) {
700 * Always return here. If we have more data then the RPC_HEADER
701 * will be processed the next time around the loop.
703 return fill_rpc_header(p
, data
, data_to_copy
);
707 * At this point we know we have at least an RPC_HEADER_LEN amount of data
708 * stored in current_in_pdu.
712 * If pdu_needed_len is zero this is a new pdu.
713 * Unmarshall the header so we know how much more
714 * data we need, then loop again.
717 if(p
->in_data
.pdu_needed_len
== 0) {
718 ssize_t rret
= unmarshall_rpc_header(p
);
719 if (rret
== -1 || p
->in_data
.pdu_needed_len
> 0) {
722 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
723 of an RPC_HEADER only. This is a DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or DCERPC_PKT_ORPHANED
724 pdu type. Deal with this in process_complete_pdu(). */
728 * Ok - at this point we have a valid RPC_HEADER in p->hdr.
729 * Keep reading until we have a full pdu.
732 data_to_copy
= MIN(data_to_copy
, p
->in_data
.pdu_needed_len
);
735 * Copy as much of the data as we need into the current_in_pdu buffer.
736 * pdu_needed_len becomes zero when we have a complete pdu.
739 memcpy( (char *)&p
->in_data
.current_in_pdu
[p
->in_data
.pdu_received_len
], data
, data_to_copy
);
740 p
->in_data
.pdu_received_len
+= data_to_copy
;
741 p
->in_data
.pdu_needed_len
-= data_to_copy
;
744 * Do we have a complete PDU ?
745 * (return the number of bytes handled in the call)
748 if(p
->in_data
.pdu_needed_len
== 0) {
749 process_complete_pdu(p
);
753 DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
754 (unsigned int)p
->in_data
.pdu_received_len
, (unsigned int)p
->in_data
.pdu_needed_len
));
756 return (ssize_t
)data_to_copy
;
759 /****************************************************************************
760 Accepts incoming data on an internal rpc pipe.
761 ****************************************************************************/
763 static ssize_t
write_to_internal_pipe(struct pipes_struct
*p
, char *data
, size_t n
)
765 size_t data_left
= n
;
770 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left
));
772 data_used
= process_incoming_data(p
, data
, data_left
);
774 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used
));
780 data_left
-= data_used
;
787 /****************************************************************************
788 Replies to a request to read data from a pipe.
790 Headers are interspersed with the data at PDU intervals. By the time
791 this function is called, the start of the data could possibly have been
792 read by an SMBtrans (file_offset != 0).
794 Calling create_rpc_reply() here is a hack. The data should already
795 have been prepared into arrays of headers + data stream sections.
796 ****************************************************************************/
798 static ssize_t
read_from_internal_pipe(struct pipes_struct
*p
, char *data
, size_t n
,
799 bool *is_data_outstanding
)
801 uint32 pdu_remaining
= 0;
802 ssize_t data_returned
= 0;
805 DEBUG(0,("read_from_pipe: pipe not open\n"));
809 DEBUG(6,(" name: %s len: %u\n",
810 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
),
814 * We cannot return more than one PDU length per
819 * This condition should result in the connection being closed.
820 * Netapp filers seem to set it to 0xffff which results in domain
821 * authentications failing. Just ignore it so things work.
824 if(n
> RPC_MAX_PDU_FRAG_LEN
) {
825 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
826 "pipe %s. We can only service %d sized reads.\n",
828 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
),
829 RPC_MAX_PDU_FRAG_LEN
));
830 n
= RPC_MAX_PDU_FRAG_LEN
;
834 * Determine if there is still data to send in the
835 * pipe PDU buffer. Always send this first. Never
836 * send more than is left in the current PDU. The
837 * client should send a new read request for a new
841 pdu_remaining
= prs_offset(&p
->out_data
.frag
)
842 - p
->out_data
.current_pdu_sent
;
844 if (pdu_remaining
> 0) {
845 data_returned
= (ssize_t
)MIN(n
, pdu_remaining
);
847 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
848 "current_pdu_sent = %u returning %d bytes.\n",
849 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
),
850 (unsigned int)prs_offset(&p
->out_data
.frag
),
851 (unsigned int)p
->out_data
.current_pdu_sent
,
852 (int)data_returned
));
855 prs_data_p(&p
->out_data
.frag
)
856 + p
->out_data
.current_pdu_sent
,
859 p
->out_data
.current_pdu_sent
+= (uint32
)data_returned
;
864 * At this point p->current_pdu_len == p->current_pdu_sent (which
865 * may of course be zero if this is the first return fragment.
868 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
869 "= %u, prs_offset(&p->out_data.rdata) = %u.\n",
870 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
),
872 (unsigned int)p
->out_data
.data_sent_length
,
873 (unsigned int)prs_offset(&p
->out_data
.rdata
) ));
875 if(p
->out_data
.data_sent_length
>= prs_offset(&p
->out_data
.rdata
)) {
877 * We have sent all possible data, return 0.
884 * We need to create a new PDU from the data left in p->rdata.
885 * Create the header/data/footers. This also sets up the fields
886 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
887 * and stores the outgoing PDU in p->current_pdu.
890 if(!create_next_pdu(p
)) {
891 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
892 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
)));
896 data_returned
= MIN(n
, prs_offset(&p
->out_data
.frag
));
898 memcpy( data
, prs_data_p(&p
->out_data
.frag
), (size_t)data_returned
);
899 p
->out_data
.current_pdu_sent
+= (uint32
)data_returned
;
902 (*is_data_outstanding
) = prs_offset(&p
->out_data
.frag
) > n
;
904 if (p
->out_data
.current_pdu_sent
== prs_offset(&p
->out_data
.frag
)) {
905 /* We've returned everything in the out_data.frag
906 * so we're done with this pdu. Free it and reset
907 * current_pdu_sent. */
908 p
->out_data
.current_pdu_sent
= 0;
909 prs_mem_free(&p
->out_data
.frag
);
911 return data_returned
;
914 /****************************************************************************
916 ****************************************************************************/
918 static int close_internal_rpc_pipe_hnd(struct pipes_struct
*p
)
921 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
925 prs_mem_free(&p
->out_data
.frag
);
926 prs_mem_free(&p
->out_data
.rdata
);
927 prs_mem_free(&p
->in_data
.data
);
929 if (p
->auth
.auth_data_free_func
) {
930 (*p
->auth
.auth_data_free_func
)(&p
->auth
);
933 free_pipe_rpc_context( p
->contexts
);
935 /* Free the handles database. */
936 close_policy_by_pipe(p
);
938 DLIST_REMOVE(InternalPipes
, p
);
945 bool fsp_is_np(struct files_struct
*fsp
)
947 enum FAKE_FILE_TYPE type
;
949 if ((fsp
== NULL
) || (fsp
->fake_file_handle
== NULL
)) {
953 type
= fsp
->fake_file_handle
->type
;
955 return ((type
== FAKE_FILE_TYPE_NAMED_PIPE
)
956 || (type
== FAKE_FILE_TYPE_NAMED_PIPE_PROXY
));
959 struct np_proxy_state
{
961 uint16_t device_state
;
962 uint64_t allocation_size
;
963 struct tstream_context
*npipe
;
964 struct tevent_queue
*read_queue
;
965 struct tevent_queue
*write_queue
;
968 static struct np_proxy_state
*make_external_rpc_pipe_p(TALLOC_CTX
*mem_ctx
,
969 const char *pipe_name
,
970 const struct tsocket_address
*local_address
,
971 const struct tsocket_address
*remote_address
,
972 struct auth_serversupplied_info
*server_info
)
974 struct np_proxy_state
*result
;
976 const char *socket_dir
;
977 struct tevent_context
*ev
;
978 struct tevent_req
*subreq
;
979 struct netr_SamInfo3
*info3
;
985 result
= talloc(mem_ctx
, struct np_proxy_state
);
986 if (result
== NULL
) {
987 DEBUG(0, ("talloc failed\n"));
991 result
->read_queue
= tevent_queue_create(result
, "np_read");
992 if (result
->read_queue
== NULL
) {
993 DEBUG(0, ("tevent_queue_create failed\n"));
997 result
->write_queue
= tevent_queue_create(result
, "np_write");
998 if (result
->write_queue
== NULL
) {
999 DEBUG(0, ("tevent_queue_create failed\n"));
1003 ev
= s3_tevent_context_init(talloc_tos());
1005 DEBUG(0, ("s3_tevent_context_init failed\n"));
1009 socket_dir
= lp_parm_const_string(
1010 GLOBAL_SECTION_SNUM
, "external_rpc_pipe", "socket_dir",
1011 get_dyn_NCALRPCDIR());
1012 if (socket_dir
== NULL
) {
1013 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
1016 socket_np_dir
= talloc_asprintf(talloc_tos(), "%s/np", socket_dir
);
1017 if (socket_np_dir
== NULL
) {
1018 DEBUG(0, ("talloc_asprintf failed\n"));
1022 info3
= talloc_zero(talloc_tos(), struct netr_SamInfo3
);
1023 if (info3
== NULL
) {
1024 DEBUG(0, ("talloc failed\n"));
1028 status
= serverinfo_to_SamInfo3(server_info
, NULL
, 0, info3
);
1029 if (!NT_STATUS_IS_OK(status
)) {
1031 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
1032 nt_errstr(status
)));
1037 subreq
= tstream_npa_connect_send(talloc_tos(), ev
,
1040 remote_address
, /* client_addr */
1041 NULL
, /* client_name */
1042 local_address
, /* server_addr */
1043 NULL
, /* server_name */
1045 server_info
->user_session_key
,
1046 data_blob_null
/* delegated_creds */);
1047 if (subreq
== NULL
) {
1049 DEBUG(0, ("tstream_npa_connect_send failed\n"));
1052 ok
= tevent_req_poll(subreq
, ev
);
1055 DEBUG(0, ("tevent_req_poll failed for tstream_npa_connect: %s\n",
1060 ret
= tstream_npa_connect_recv(subreq
, &sys_errno
,
1064 &result
->device_state
,
1065 &result
->allocation_size
);
1066 TALLOC_FREE(subreq
);
1068 DEBUG(0, ("tstream_npa_connect_recv failed: %s\n",
1069 strerror(sys_errno
)));
1076 TALLOC_FREE(result
);
1080 NTSTATUS
np_open(TALLOC_CTX
*mem_ctx
, const char *name
,
1081 const struct tsocket_address
*local_address
,
1082 const struct tsocket_address
*remote_address
,
1083 struct auth_serversupplied_info
*server_info
,
1084 struct fake_file_handle
**phandle
)
1086 const char **proxy_list
;
1087 struct fake_file_handle
*handle
;
1089 proxy_list
= lp_parm_string_list(-1, "np", "proxy", NULL
);
1091 handle
= talloc(mem_ctx
, struct fake_file_handle
);
1092 if (handle
== NULL
) {
1093 return NT_STATUS_NO_MEMORY
;
1096 if ((proxy_list
!= NULL
) && str_list_check_ci(proxy_list
, name
)) {
1097 struct np_proxy_state
*p
;
1099 p
= make_external_rpc_pipe_p(handle
, name
,
1104 handle
->type
= FAKE_FILE_TYPE_NAMED_PIPE_PROXY
;
1105 handle
->private_data
= p
;
1107 struct pipes_struct
*p
;
1108 struct ndr_syntax_id syntax
;
1109 const char *client_address
;
1111 if (!is_known_pipename(name
, &syntax
)) {
1112 TALLOC_FREE(handle
);
1113 return NT_STATUS_OBJECT_NAME_NOT_FOUND
;
1116 if (tsocket_address_is_inet(remote_address
, "ip")) {
1117 client_address
= tsocket_address_inet_addr_string(
1120 if (client_address
== NULL
) {
1121 TALLOC_FREE(handle
);
1122 return NT_STATUS_NO_MEMORY
;
1125 client_address
= "";
1128 p
= make_internal_rpc_pipe_p(handle
, &syntax
, client_address
,
1131 handle
->type
= FAKE_FILE_TYPE_NAMED_PIPE
;
1132 handle
->private_data
= p
;
1135 if (handle
->private_data
== NULL
) {
1136 TALLOC_FREE(handle
);
1137 return NT_STATUS_PIPE_NOT_AVAILABLE
;
1142 return NT_STATUS_OK
;
1145 bool np_read_in_progress(struct fake_file_handle
*handle
)
1147 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE
) {
1151 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE_PROXY
) {
1152 struct np_proxy_state
*p
= talloc_get_type_abort(
1153 handle
->private_data
, struct np_proxy_state
);
1156 read_count
= tevent_queue_length(p
->read_queue
);
1157 if (read_count
> 0) {
1167 struct np_write_state
{
1168 struct event_context
*ev
;
1169 struct np_proxy_state
*p
;
1174 static void np_write_done(struct tevent_req
*subreq
);
1176 struct tevent_req
*np_write_send(TALLOC_CTX
*mem_ctx
, struct event_context
*ev
,
1177 struct fake_file_handle
*handle
,
1178 const uint8_t *data
, size_t len
)
1180 struct tevent_req
*req
;
1181 struct np_write_state
*state
;
1184 DEBUG(6, ("np_write_send: len: %d\n", (int)len
));
1185 dump_data(50, data
, len
);
1187 req
= tevent_req_create(mem_ctx
, &state
, struct np_write_state
);
1193 state
->nwritten
= 0;
1194 status
= NT_STATUS_OK
;
1198 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE
) {
1199 struct pipes_struct
*p
= talloc_get_type_abort(
1200 handle
->private_data
, struct pipes_struct
);
1202 state
->nwritten
= write_to_internal_pipe(p
, (char *)data
, len
);
1204 status
= (state
->nwritten
>= 0)
1205 ? NT_STATUS_OK
: NT_STATUS_UNEXPECTED_IO_ERROR
;
1209 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE_PROXY
) {
1210 struct np_proxy_state
*p
= talloc_get_type_abort(
1211 handle
->private_data
, struct np_proxy_state
);
1212 struct tevent_req
*subreq
;
1216 state
->iov
.iov_base
= CONST_DISCARD(void *, data
);
1217 state
->iov
.iov_len
= len
;
1219 subreq
= tstream_writev_queue_send(state
, ev
,
1223 if (subreq
== NULL
) {
1226 tevent_req_set_callback(subreq
, np_write_done
, req
);
1230 status
= NT_STATUS_INVALID_HANDLE
;
1232 if (NT_STATUS_IS_OK(status
)) {
1233 tevent_req_done(req
);
1235 tevent_req_nterror(req
, status
);
1237 return tevent_req_post(req
, ev
);
1243 static void np_write_done(struct tevent_req
*subreq
)
1245 struct tevent_req
*req
= tevent_req_callback_data(
1246 subreq
, struct tevent_req
);
1247 struct np_write_state
*state
= tevent_req_data(
1248 req
, struct np_write_state
);
1252 received
= tstream_writev_queue_recv(subreq
, &err
);
1254 tevent_req_nterror(req
, map_nt_error_from_unix(err
));
1257 state
->nwritten
= received
;
1258 tevent_req_done(req
);
1261 NTSTATUS
np_write_recv(struct tevent_req
*req
, ssize_t
*pnwritten
)
1263 struct np_write_state
*state
= tevent_req_data(
1264 req
, struct np_write_state
);
1267 if (tevent_req_is_nterror(req
, &status
)) {
1270 *pnwritten
= state
->nwritten
;
1271 return NT_STATUS_OK
;
1274 struct np_ipc_readv_next_vector_state
{
1281 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state
*s
,
1282 uint8_t *buf
, size_t len
)
1287 s
->len
= MIN(len
, UINT16_MAX
);
1290 static int np_ipc_readv_next_vector(struct tstream_context
*stream
,
1292 TALLOC_CTX
*mem_ctx
,
1293 struct iovec
**_vector
,
1296 struct np_ipc_readv_next_vector_state
*state
=
1297 (struct np_ipc_readv_next_vector_state
*)private_data
;
1298 struct iovec
*vector
;
1302 if (state
->ofs
== state
->len
) {
1308 pending
= tstream_pending_bytes(stream
);
1309 if (pending
== -1) {
1313 if (pending
== 0 && state
->ofs
!= 0) {
1314 /* return a short read */
1321 /* we want at least one byte and recheck again */
1324 size_t missing
= state
->len
- state
->ofs
;
1325 if (pending
> missing
) {
1326 /* there's more available */
1327 state
->remaining
= pending
- missing
;
1330 /* read what we can get and recheck in the next cycle */
1335 vector
= talloc_array(mem_ctx
, struct iovec
, 1);
1340 vector
[0].iov_base
= state
->buf
+ state
->ofs
;
1341 vector
[0].iov_len
= wanted
;
1343 state
->ofs
+= wanted
;
1350 struct np_read_state
{
1351 struct np_proxy_state
*p
;
1352 struct np_ipc_readv_next_vector_state next_vector
;
1355 bool is_data_outstanding
;
1358 static void np_read_done(struct tevent_req
*subreq
);
1360 struct tevent_req
*np_read_send(TALLOC_CTX
*mem_ctx
, struct event_context
*ev
,
1361 struct fake_file_handle
*handle
,
1362 uint8_t *data
, size_t len
)
1364 struct tevent_req
*req
;
1365 struct np_read_state
*state
;
1368 req
= tevent_req_create(mem_ctx
, &state
, struct np_read_state
);
1373 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE
) {
1374 struct pipes_struct
*p
= talloc_get_type_abort(
1375 handle
->private_data
, struct pipes_struct
);
1377 state
->nread
= read_from_internal_pipe(
1378 p
, (char *)data
, len
, &state
->is_data_outstanding
);
1380 status
= (state
->nread
>= 0)
1381 ? NT_STATUS_OK
: NT_STATUS_UNEXPECTED_IO_ERROR
;
1385 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE_PROXY
) {
1386 struct np_proxy_state
*p
= talloc_get_type_abort(
1387 handle
->private_data
, struct np_proxy_state
);
1388 struct tevent_req
*subreq
;
1390 np_ipc_readv_next_vector_init(&state
->next_vector
,
1393 subreq
= tstream_readv_pdu_queue_send(state
,
1397 np_ipc_readv_next_vector
,
1398 &state
->next_vector
);
1399 if (subreq
== NULL
) {
1402 tevent_req_set_callback(subreq
, np_read_done
, req
);
1406 status
= NT_STATUS_INVALID_HANDLE
;
1408 if (NT_STATUS_IS_OK(status
)) {
1409 tevent_req_done(req
);
1411 tevent_req_nterror(req
, status
);
1413 return tevent_req_post(req
, ev
);
1416 static void np_read_done(struct tevent_req
*subreq
)
1418 struct tevent_req
*req
= tevent_req_callback_data(
1419 subreq
, struct tevent_req
);
1420 struct np_read_state
*state
= tevent_req_data(
1421 req
, struct np_read_state
);
1425 ret
= tstream_readv_pdu_queue_recv(subreq
, &err
);
1426 TALLOC_FREE(subreq
);
1428 tevent_req_nterror(req
, map_nt_error_from_unix(err
));
1433 state
->is_data_outstanding
= (state
->next_vector
.remaining
> 0);
1435 tevent_req_done(req
);
1439 NTSTATUS
np_read_recv(struct tevent_req
*req
, ssize_t
*nread
,
1440 bool *is_data_outstanding
)
1442 struct np_read_state
*state
= tevent_req_data(
1443 req
, struct np_read_state
);
1446 if (tevent_req_is_nterror(req
, &status
)) {
1449 *nread
= state
->nread
;
1450 *is_data_outstanding
= state
->is_data_outstanding
;
1451 return NT_STATUS_OK
;
1455 * @brief Create a new RPC client context which uses a local dispatch function.
1457 * @param[in] mem_ctx The memory context to use.
1459 * @param[in] abstract_syntax Normally the syntax_id of the autogenerated
1462 * @param[in] dispatch The corresponding autogenerated dispatch function
1463 * rpc_<name>_dispatch.
1465 * @param[in] serversupplied_info The server supplied authentication function.
1467 * @param[out] presult A pointer to store the connected rpc client pipe.
1469 * @return NT_STATUS_OK on success, a corresponding NT status if an
1473 * struct rpc_pipe_client *winreg_pipe;
1476 * status = rpc_pipe_open_internal(tmp_ctx,
1477 * &ndr_table_winreg.syntax_id,
1478 * rpc_winreg_dispatch,
1483 NTSTATUS
rpc_pipe_open_internal(TALLOC_CTX
*mem_ctx
,
1484 const struct ndr_syntax_id
*abstract_syntax
,
1485 NTSTATUS (*dispatch
) (struct rpc_pipe_client
*cli
,
1486 TALLOC_CTX
*mem_ctx
,
1487 const struct ndr_interface_table
*table
,
1488 uint32_t opnum
, void *r
),
1489 struct auth_serversupplied_info
*serversupplied_info
,
1490 struct rpc_pipe_client
**presult
)
1492 struct rpc_pipe_client
*result
;
1494 result
= TALLOC_ZERO_P(mem_ctx
, struct rpc_pipe_client
);
1495 if (result
== NULL
) {
1496 return NT_STATUS_NO_MEMORY
;
1499 result
->abstract_syntax
= *abstract_syntax
;
1500 result
->transfer_syntax
= ndr_transfer_syntax
;
1501 result
->dispatch
= dispatch
;
1503 result
->pipes_struct
= make_internal_rpc_pipe_p(
1504 result
, abstract_syntax
, "", serversupplied_info
);
1505 if (result
->pipes_struct
== NULL
) {
1506 TALLOC_FREE(result
);
1507 return NT_STATUS_NO_MEMORY
;
1510 result
->max_xmit_frag
= -1;
1511 result
->max_recv_frag
= -1;
1514 return NT_STATUS_OK
;
1518 * @brief Create a new RPC client context which uses a local dispatch function.
1520 * @param[in] conn The connection struct that will hold the pipe
1522 * @param[out] spoolss_pipe A pointer to the connected rpc client pipe.
1524 * @return NT_STATUS_OK on success, a corresponding NT status if an
1527 NTSTATUS
rpc_connect_spoolss_pipe(connection_struct
*conn
,
1528 struct rpc_pipe_client
**spoolss_pipe
)
1532 /* TODO: check and handle disconnections */
1534 if (!conn
->spoolss_pipe
) {
1535 status
= rpc_pipe_open_internal(conn
,
1536 &ndr_table_spoolss
.syntax_id
,
1537 rpc_spoolss_dispatch
,
1539 &conn
->spoolss_pipe
);
1540 if (!NT_STATUS_IS_OK(status
)) {
1545 *spoolss_pipe
= conn
->spoolss_pipe
;
1546 return NT_STATUS_OK
;