make read/write to internal pipes available externally
[Samba.git] / source / rpc_server / srv_pipe_hnd.c
blobbc6d180199e8eb913dd59019072aa31654664866
1 /*
2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
24 #undef DBGC_CLASS
25 #define DBGC_CLASS DBGC_RPC_SRV
27 #define PIPE "\\PIPE\\"
28 #define PIPELEN strlen(PIPE)
30 static smb_np_struct *chain_p;
31 static int pipes_open;
34 * Sometimes I can't decide if I hate Windows printer driver
35 * writers more than I hate the Windows spooler service driver
36 * writers. This gets around a combination of bugs in the spooler
37 * and the HP 8500 PCL driver that causes a spooler spin. JRA.
39 * bumped up from 20 -> 64 after viewing traffic from WordPerfect
40 * 2002 running on NT 4.- SP6
41 * bumped up from 64 -> 256 after viewing traffic from con2prt
42 * for lots of printers on a WinNT 4.x SP6 box.
45 #ifndef MAX_OPEN_SPOOLSS_PIPES
46 #define MAX_OPEN_SPOOLSS_PIPES 256
47 #endif
48 static int current_spoolss_pipes_open;
50 static smb_np_struct *Pipes;
51 static pipes_struct *InternalPipes;
52 static struct bitmap *bmap;
54 /* TODO
55 * the following prototypes are declared here to avoid
56 * code being moved about too much for a patch to be
57 * disrupted / less obvious.
59 * these functions, and associated functions that they
60 * call, should be moved behind a .so module-loading
61 * system _anyway_. so that's the next step...
64 static int close_internal_rpc_pipe_hnd(struct pipes_struct *pipe);
66 /****************************************************************************
67 Internal Pipe iterator functions.
68 ****************************************************************************/
70 pipes_struct *get_first_internal_pipe(void)
72 return InternalPipes;
75 pipes_struct *get_next_internal_pipe(pipes_struct *p)
77 return p->next;
80 /* this must be larger than the sum of the open files and directories */
81 static int pipe_handle_offset;
83 /****************************************************************************
84 Set the pipe_handle_offset. Called from smbd/files.c
85 ****************************************************************************/
87 void set_pipe_handle_offset(int max_open_files)
89 if(max_open_files < 0x7000) {
90 pipe_handle_offset = 0x7000;
91 } else {
92 pipe_handle_offset = max_open_files + 10; /* For safety. :-) */
96 /****************************************************************************
97 Reset pipe chain handle number.
98 ****************************************************************************/
100 void reset_chain_p(void)
102 chain_p = NULL;
105 /****************************************************************************
106 Initialise pipe handle states.
107 ****************************************************************************/
109 void init_rpc_pipe_hnd(void)
111 bmap = bitmap_allocate(MAX_OPEN_PIPES);
112 if (!bmap) {
113 exit_server("out of memory in init_rpc_pipe_hnd");
117 /****************************************************************************
118 Initialise an outgoing packet.
119 ****************************************************************************/
121 static bool pipe_init_outgoing_data(pipes_struct *p)
123 output_data *o_data = &p->out_data;
125 /* Reset the offset counters. */
126 o_data->data_sent_length = 0;
127 o_data->current_pdu_len = 0;
128 o_data->current_pdu_sent = 0;
130 memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
132 /* Free any memory in the current return data buffer. */
133 prs_mem_free(&o_data->rdata);
136 * Initialize the outgoing RPC data buffer.
137 * we will use this as the raw data area for replying to rpc requests.
139 if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
140 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
141 return False;
144 return True;
147 /****************************************************************************
148 Find first available pipe slot.
149 ****************************************************************************/
151 smb_np_struct *open_rpc_pipe_p(const char *pipe_name,
152 connection_struct *conn, uint16 vuid)
154 int i;
155 smb_np_struct *p, *p_it;
156 static int next_pipe;
157 bool is_spoolss_pipe = False;
159 DEBUG(4,("Open pipe requested %s (pipes_open=%d)\n",
160 pipe_name, pipes_open));
162 if (strstr(pipe_name, "spoolss")) {
163 is_spoolss_pipe = True;
166 if (is_spoolss_pipe && current_spoolss_pipes_open >= MAX_OPEN_SPOOLSS_PIPES) {
167 DEBUG(10,("open_rpc_pipe_p: spooler bug workaround. Denying open on pipe %s\n",
168 pipe_name ));
169 return NULL;
172 /* not repeating pipe numbers makes it easier to track things in
173 log files and prevents client bugs where pipe numbers are reused
174 over connection restarts */
176 if (next_pipe == 0) {
177 next_pipe = (sys_getpid() ^ time(NULL)) % MAX_OPEN_PIPES;
180 i = bitmap_find(bmap, next_pipe);
182 if (i == -1) {
183 DEBUG(0,("ERROR! Out of pipe structures\n"));
184 return NULL;
187 next_pipe = (i+1) % MAX_OPEN_PIPES;
189 for (p = Pipes; p; p = p->next) {
190 DEBUG(5,("open_rpc_pipe_p: name %s pnum=%x\n", p->name, p->pnum));
193 p = talloc(NULL, smb_np_struct);
194 if (!p) {
195 DEBUG(0,("ERROR! no memory for smb_np_struct!\n"));
196 return NULL;
199 ZERO_STRUCTP(p);
201 p->name = talloc_strdup(p, pipe_name);
202 if (p->name == NULL) {
203 TALLOC_FREE(p);
204 DEBUG(0,("ERROR! no memory for pipe name!\n"));
205 return NULL;
208 /* add a dso mechanism instead of this, here */
210 p->namedpipe_create = make_internal_rpc_pipe_p;
211 p->namedpipe_read = read_from_internal_pipe;
212 p->namedpipe_write = write_to_internal_pipe;
214 p->np_state = p->namedpipe_create(pipe_name, conn->client_address,
215 conn->server_info, vuid);
217 if (p->np_state == NULL) {
218 DEBUG(0,("open_rpc_pipe_p: make_internal_rpc_pipe_p failed.\n"));
219 TALLOC_FREE(p);
220 return NULL;
223 DLIST_ADD(Pipes, p);
226 * Initialize the incoming RPC data buffer with one PDU worth of memory.
227 * We cheat here and say we're marshalling, as we intend to add incoming
228 * data directly into the prs_struct and we want it to auto grow. We will
229 * change the type to UNMARSALLING before processing the stream.
232 bitmap_set(bmap, i);
233 i += pipe_handle_offset;
235 pipes_open++;
237 p->pnum = i;
239 p->open = True;
240 p->device_state = 0;
241 p->priority = 0;
242 p->conn = conn;
243 p->vuid = vuid;
245 p->max_trans_reply = 0;
247 DEBUG(4,("Opened pipe %s with handle %x (pipes_open=%d)\n",
248 pipe_name, i, pipes_open));
250 chain_p = p;
252 /* Iterate over p_it as a temp variable, to display all open pipes */
253 for (p_it = Pipes; p_it; p_it = p_it->next) {
254 DEBUG(5,("open pipes: name %s pnum=%x\n", p_it->name, p_it->pnum));
257 return chain_p;
260 /****************************************************************************
261 Make an internal namedpipes structure
262 ****************************************************************************/
264 struct pipes_struct *make_internal_rpc_pipe_p(const char *pipe_name,
265 const char *client_address,
266 struct auth_serversupplied_info *server_info,
267 uint16_t vuid)
269 pipes_struct *p;
271 DEBUG(4,("Create pipe requested %s\n", pipe_name));
273 p = TALLOC_ZERO_P(NULL, pipes_struct);
275 if (!p) {
276 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
277 return NULL;
280 if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
281 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
282 TALLOC_FREE(p);
283 return NULL;
286 if (!init_pipe_handle_list(p, pipe_name)) {
287 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
288 talloc_destroy(p->mem_ctx);
289 TALLOC_FREE(p);
290 return NULL;
294 * Initialize the incoming RPC data buffer with one PDU worth of memory.
295 * We cheat here and say we're marshalling, as we intend to add incoming
296 * data directly into the prs_struct and we want it to auto grow. We will
297 * change the type to UNMARSALLING before processing the stream.
300 if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
301 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
302 talloc_destroy(p->mem_ctx);
303 close_policy_by_pipe(p);
304 TALLOC_FREE(p);
305 return NULL;
308 p->server_info = copy_serverinfo(p, server_info);
309 if (p->server_info == NULL) {
310 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
311 talloc_destroy(p->mem_ctx);
312 close_policy_by_pipe(p);
313 TALLOC_FREE(p);
314 return NULL;
317 DLIST_ADD(InternalPipes, p);
319 memcpy(p->client_address, client_address, sizeof(p->client_address));
321 p->endian = RPC_LITTLE_ENDIAN;
323 ZERO_STRUCT(p->pipe_user);
325 p->pipe_user.vuid = vuid;
326 p->pipe_user.ut.uid = (uid_t)-1;
327 p->pipe_user.ut.gid = (gid_t)-1;
328 p->pipe_user.nt_user_token = dup_nt_token(NULL, server_info->ptok);
331 * Initialize the outgoing RPC data buffer with no memory.
333 prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
335 fstrcpy(p->name, pipe_name);
337 DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
338 pipe_name, pipes_open));
340 talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
342 return p;
345 /****************************************************************************
346 Sets the fault state on incoming packets.
347 ****************************************************************************/
349 static void set_incoming_fault(pipes_struct *p)
351 prs_mem_free(&p->in_data.data);
352 p->in_data.pdu_needed_len = 0;
353 p->in_data.pdu_received_len = 0;
354 p->fault_state = True;
355 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
356 p->name));
359 /****************************************************************************
360 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
361 ****************************************************************************/
363 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
365 size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
367 DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
368 (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
369 (unsigned int)p->in_data.pdu_received_len ));
371 memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
372 p->in_data.pdu_received_len += len_needed_to_complete_hdr;
374 return (ssize_t)len_needed_to_complete_hdr;
377 /****************************************************************************
378 Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
379 ****************************************************************************/
381 static ssize_t unmarshall_rpc_header(pipes_struct *p)
384 * Unmarshall the header to determine the needed length.
387 prs_struct rpc_in;
389 if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
390 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
391 set_incoming_fault(p);
392 return -1;
395 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
396 prs_set_endian_data( &rpc_in, p->endian);
398 prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
399 p->in_data.pdu_received_len, False);
402 * Unmarshall the header as this will tell us how much
403 * data we need to read to get the complete pdu.
404 * This also sets the endian flag in rpc_in.
407 if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
408 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
409 set_incoming_fault(p);
410 prs_mem_free(&rpc_in);
411 return -1;
415 * Validate the RPC header.
418 if(p->hdr.major != 5 && p->hdr.minor != 0) {
419 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
420 set_incoming_fault(p);
421 prs_mem_free(&rpc_in);
422 return -1;
426 * If there's not data in the incoming buffer this should be the start of a new RPC.
429 if(prs_offset(&p->in_data.data) == 0) {
432 * AS/U doesn't set FIRST flag in a BIND packet it seems.
435 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
437 * Ensure that the FIRST flag is set. If not then we have
438 * a stream missmatch.
441 DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
442 set_incoming_fault(p);
443 prs_mem_free(&rpc_in);
444 return -1;
448 * If this is the first PDU then set the endianness
449 * flag in the pipe. We will need this when parsing all
450 * data in this RPC.
453 p->endian = rpc_in.bigendian_data;
455 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
456 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
458 } else {
461 * If this is *NOT* the first PDU then check the endianness
462 * flag in the pipe is the same as that in the PDU.
465 if (p->endian != rpc_in.bigendian_data) {
466 DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
467 set_incoming_fault(p);
468 prs_mem_free(&rpc_in);
469 return -1;
474 * Ensure that the pdu length is sane.
477 if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
478 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
479 set_incoming_fault(p);
480 prs_mem_free(&rpc_in);
481 return -1;
484 DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
485 (unsigned int)p->hdr.flags ));
487 p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
489 prs_mem_free(&rpc_in);
491 return 0; /* No extra data processed. */
494 /****************************************************************************
495 Call this to free any talloc'ed memory. Do this before and after processing
496 a complete PDU.
497 ****************************************************************************/
499 static void free_pipe_context(pipes_struct *p)
501 if (p->mem_ctx) {
502 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
503 "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
504 talloc_free_children(p->mem_ctx);
505 } else {
506 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
507 if (p->mem_ctx == NULL) {
508 p->fault_state = True;
513 /****************************************************************************
514 Processes a request pdu. This will do auth processing if needed, and
515 appends the data into the complete stream if the LAST flag is not set.
516 ****************************************************************************/
518 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
520 uint32 ss_padding_len = 0;
521 size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
522 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
524 if(!p->pipe_bound) {
525 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
526 set_incoming_fault(p);
527 return False;
531 * Check if we need to do authentication processing.
532 * This is only done on requests, not binds.
536 * Read the RPC request header.
539 if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
540 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
541 set_incoming_fault(p);
542 return False;
545 switch(p->auth.auth_type) {
546 case PIPE_AUTH_TYPE_NONE:
547 break;
549 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
550 case PIPE_AUTH_TYPE_NTLMSSP:
552 NTSTATUS status;
553 if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
554 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
555 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
556 set_incoming_fault(p);
557 return False;
559 break;
562 case PIPE_AUTH_TYPE_SCHANNEL:
563 if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
564 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
565 set_incoming_fault(p);
566 return False;
568 break;
570 default:
571 DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
572 set_incoming_fault(p);
573 return False;
576 /* Now we've done the sign/seal we can remove any padding data. */
577 if (data_len > ss_padding_len) {
578 data_len -= ss_padding_len;
582 * Check the data length doesn't go over the 15Mb limit.
583 * increased after observing a bug in the Windows NT 4.0 SP6a
584 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
585 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
588 if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
589 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
590 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
591 set_incoming_fault(p);
592 return False;
596 * Append the data portion into the buffer and return.
599 if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
600 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
601 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
602 set_incoming_fault(p);
603 return False;
606 if(p->hdr.flags & RPC_FLG_LAST) {
607 bool ret = False;
609 * Ok - we finally have a complete RPC stream.
610 * Call the rpc command to process it.
614 * Ensure the internal prs buffer size is *exactly* the same
615 * size as the current offset.
618 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
619 DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
620 set_incoming_fault(p);
621 return False;
625 * Set the parse offset to the start of the data and set the
626 * prs_struct to UNMARSHALL.
629 prs_set_offset(&p->in_data.data, 0);
630 prs_switch_type(&p->in_data.data, UNMARSHALL);
633 * Process the complete data stream here.
636 free_pipe_context(p);
638 if(pipe_init_outgoing_data(p)) {
639 ret = api_pipe_request(p);
642 free_pipe_context(p);
645 * We have consumed the whole data stream. Set back to
646 * marshalling and set the offset back to the start of
647 * the buffer to re-use it (we could also do a prs_mem_free()
648 * and then re_init on the next start of PDU. Not sure which
649 * is best here.... JRA.
652 prs_switch_type(&p->in_data.data, MARSHALL);
653 prs_set_offset(&p->in_data.data, 0);
654 return ret;
657 return True;
660 /****************************************************************************
661 Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
662 already been parsed and stored in p->hdr.
663 ****************************************************************************/
665 static void process_complete_pdu(pipes_struct *p)
667 prs_struct rpc_in;
668 size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
669 char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
670 bool reply = False;
672 if(p->fault_state) {
673 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
674 p->name ));
675 set_incoming_fault(p);
676 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
677 return;
680 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
683 * Ensure we're using the corrent endianness for both the
684 * RPC header flags and the raw data we will be reading from.
687 prs_set_endian_data( &rpc_in, p->endian);
688 prs_set_endian_data( &p->in_data.data, p->endian);
690 prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
692 DEBUG(10,("process_complete_pdu: processing packet type %u\n",
693 (unsigned int)p->hdr.pkt_type ));
695 switch (p->hdr.pkt_type) {
696 case RPC_REQUEST:
697 reply = process_request_pdu(p, &rpc_in);
698 break;
700 case RPC_PING: /* CL request - ignore... */
701 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
702 (unsigned int)p->hdr.pkt_type, p->name));
703 break;
705 case RPC_RESPONSE: /* No responses here. */
706 DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
707 p->name ));
708 break;
710 case RPC_FAULT:
711 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
712 case RPC_NOCALL: /* CL - server reply to a ping call. */
713 case RPC_REJECT:
714 case RPC_ACK:
715 case RPC_CL_CANCEL:
716 case RPC_FACK:
717 case RPC_CANCEL_ACK:
718 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
719 (unsigned int)p->hdr.pkt_type, p->name));
720 break;
722 case RPC_BIND:
724 * We assume that a pipe bind is only in one pdu.
726 if(pipe_init_outgoing_data(p)) {
727 reply = api_pipe_bind_req(p, &rpc_in);
729 break;
731 case RPC_BINDACK:
732 case RPC_BINDNACK:
733 DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
734 (unsigned int)p->hdr.pkt_type, p->name));
735 break;
738 case RPC_ALTCONT:
740 * We assume that a pipe bind is only in one pdu.
742 if(pipe_init_outgoing_data(p)) {
743 reply = api_pipe_alter_context(p, &rpc_in);
745 break;
747 case RPC_ALTCONTRESP:
748 DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
749 p->name));
750 break;
752 case RPC_AUTH3:
754 * The third packet in an NTLMSSP auth exchange.
756 if(pipe_init_outgoing_data(p)) {
757 reply = api_pipe_bind_auth3(p, &rpc_in);
759 break;
761 case RPC_SHUTDOWN:
762 DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
763 p->name));
764 break;
766 case RPC_CO_CANCEL:
767 /* For now just free all client data and continue processing. */
768 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
769 /* As we never do asynchronous RPC serving, we can never cancel a
770 call (as far as I know). If we ever did we'd have to send a cancel_ack
771 reply. For now, just free all client data and continue processing. */
772 reply = True;
773 break;
774 #if 0
775 /* Enable this if we're doing async rpc. */
776 /* We must check the call-id matches the outstanding callid. */
777 if(pipe_init_outgoing_data(p)) {
778 /* Send a cancel_ack PDU reply. */
779 /* We should probably check the auth-verifier here. */
780 reply = setup_cancel_ack_reply(p, &rpc_in);
782 break;
783 #endif
785 case RPC_ORPHANED:
786 /* We should probably check the auth-verifier here.
787 For now just free all client data and continue processing. */
788 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
789 reply = True;
790 break;
792 default:
793 DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
794 break;
797 /* Reset to little endian. Probably don't need this but it won't hurt. */
798 prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
800 if (!reply) {
801 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
802 set_incoming_fault(p);
803 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
804 prs_mem_free(&rpc_in);
805 } else {
807 * Reset the lengths. We're ready for a new pdu.
809 p->in_data.pdu_needed_len = 0;
810 p->in_data.pdu_received_len = 0;
813 prs_mem_free(&rpc_in);
816 /****************************************************************************
817 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
818 ****************************************************************************/
820 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
822 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
824 DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
825 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
826 (unsigned int)n ));
828 if(data_to_copy == 0) {
830 * This is an error - data is being received and there is no
831 * space in the PDU. Free the received data and go into the fault state.
833 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
834 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
835 set_incoming_fault(p);
836 return -1;
840 * If we have no data already, wait until we get at least a RPC_HEADER_LEN
841 * number of bytes before we can do anything.
844 if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
846 * Always return here. If we have more data then the RPC_HEADER
847 * will be processed the next time around the loop.
849 return fill_rpc_header(p, data, data_to_copy);
853 * At this point we know we have at least an RPC_HEADER_LEN amount of data
854 * stored in current_in_pdu.
858 * If pdu_needed_len is zero this is a new pdu.
859 * Unmarshall the header so we know how much more
860 * data we need, then loop again.
863 if(p->in_data.pdu_needed_len == 0) {
864 ssize_t rret = unmarshall_rpc_header(p);
865 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
866 return rret;
868 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
869 of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
870 pdu type. Deal with this in process_complete_pdu(). */
874 * Ok - at this point we have a valid RPC_HEADER in p->hdr.
875 * Keep reading until we have a full pdu.
878 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
881 * Copy as much of the data as we need into the current_in_pdu buffer.
882 * pdu_needed_len becomes zero when we have a complete pdu.
885 memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
886 p->in_data.pdu_received_len += data_to_copy;
887 p->in_data.pdu_needed_len -= data_to_copy;
890 * Do we have a complete PDU ?
891 * (return the number of bytes handled in the call)
894 if(p->in_data.pdu_needed_len == 0) {
895 process_complete_pdu(p);
896 return data_to_copy;
899 DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
900 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
902 return (ssize_t)data_to_copy;
905 /****************************************************************************
906 Accepts incoming data on an rpc pipe.
907 ****************************************************************************/
909 ssize_t write_to_pipe(smb_np_struct *p, char *data, size_t n)
911 DEBUG(6,("write_to_pipe: %x", p->pnum));
913 DEBUG(6,(" name: %s open: %s len: %d\n",
914 p->name, BOOLSTR(p->open), (int)n));
916 dump_data(50, (uint8 *)data, n);
918 return p->namedpipe_write(p->np_state, data, n);
921 /****************************************************************************
922 Accepts incoming data on an internal rpc pipe.
923 ****************************************************************************/
925 ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
927 size_t data_left = n;
929 while(data_left) {
930 ssize_t data_used;
932 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
934 data_used = process_incoming_data(p, data, data_left);
936 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
938 if(data_used < 0) {
939 return -1;
942 data_left -= data_used;
943 data += data_used;
946 return n;
949 /****************************************************************************
950 Replies to a request to read data from a pipe.
952 Headers are interspersed with the data at PDU intervals. By the time
953 this function is called, the start of the data could possibly have been
954 read by an SMBtrans (file_offset != 0).
956 Calling create_rpc_reply() here is a hack. The data should already
957 have been prepared into arrays of headers + data stream sections.
958 ****************************************************************************/
960 ssize_t read_from_pipe(smb_np_struct *p, char *data, size_t n,
961 bool *is_data_outstanding)
963 if (!p || !p->open) {
964 DEBUG(0,("read_from_pipe: pipe not open\n"));
965 return -1;
968 DEBUG(6,("read_from_pipe: %x", p->pnum));
970 return p->namedpipe_read(p->np_state, data, n, is_data_outstanding);
973 /****************************************************************************
974 Replies to a request to read data from a pipe.
976 Headers are interspersed with the data at PDU intervals. By the time
977 this function is called, the start of the data could possibly have been
978 read by an SMBtrans (file_offset != 0).
980 Calling create_rpc_reply() here is a hack. The data should already
981 have been prepared into arrays of headers + data stream sections.
982 ****************************************************************************/
984 ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
985 bool *is_data_outstanding)
987 uint32 pdu_remaining = 0;
988 ssize_t data_returned = 0;
990 if (!p) {
991 DEBUG(0,("read_from_pipe: pipe not open\n"));
992 return -1;
995 DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
998 * We cannot return more than one PDU length per
999 * read request.
1003 * This condition should result in the connection being closed.
1004 * Netapp filers seem to set it to 0xffff which results in domain
1005 * authentications failing. Just ignore it so things work.
1008 if(n > RPC_MAX_PDU_FRAG_LEN) {
1009 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
1010 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
1011 n = RPC_MAX_PDU_FRAG_LEN;
1015 * Determine if there is still data to send in the
1016 * pipe PDU buffer. Always send this first. Never
1017 * send more than is left in the current PDU. The
1018 * client should send a new read request for a new
1019 * PDU.
1022 if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
1023 data_returned = (ssize_t)MIN(n, pdu_remaining);
1025 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
1026 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len,
1027 (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
1029 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
1030 p->out_data.current_pdu_sent += (uint32)data_returned;
1031 goto out;
1035 * At this point p->current_pdu_len == p->current_pdu_sent (which
1036 * may of course be zero if this is the first return fragment.
1039 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
1040 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
1041 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
1043 if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
1045 * We have sent all possible data, return 0.
1047 data_returned = 0;
1048 goto out;
1052 * We need to create a new PDU from the data left in p->rdata.
1053 * Create the header/data/footers. This also sets up the fields
1054 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
1055 * and stores the outgoing PDU in p->current_pdu.
1058 if(!create_next_pdu(p)) {
1059 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
1060 return -1;
1063 data_returned = MIN(n, p->out_data.current_pdu_len);
1065 memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
1066 p->out_data.current_pdu_sent += (uint32)data_returned;
1068 out:
1070 (*is_data_outstanding) = p->out_data.current_pdu_len > n;
1071 return data_returned;
1074 /****************************************************************************
1075 Wait device state on a pipe. Exactly what this is for is unknown...
1076 ****************************************************************************/
1078 bool wait_rpc_pipe_hnd_state(smb_np_struct *p, uint16 priority)
1080 if (p == NULL) {
1081 return False;
1084 if (p->open) {
1085 DEBUG(3,("wait_rpc_pipe_hnd_state: Setting pipe wait state priority=%x on pipe (name=%s)\n",
1086 priority, p->name));
1088 p->priority = priority;
1090 return True;
1093 DEBUG(3,("wait_rpc_pipe_hnd_state: Error setting pipe wait state priority=%x (name=%s)\n",
1094 priority, p->name));
1095 return False;
1099 /****************************************************************************
1100 Set device state on a pipe. Exactly what this is for is unknown...
1101 ****************************************************************************/
1103 bool set_rpc_pipe_hnd_state(smb_np_struct *p, uint16 device_state)
1105 if (p == NULL) {
1106 return False;
1109 if (p->open) {
1110 DEBUG(3,("set_rpc_pipe_hnd_state: Setting pipe device state=%x on pipe (name=%s)\n",
1111 device_state, p->name));
1113 p->device_state = device_state;
1115 return True;
1118 DEBUG(3,("set_rpc_pipe_hnd_state: Error setting pipe device state=%x (name=%s)\n",
1119 device_state, p->name));
1120 return False;
1124 /****************************************************************************
1125 Close an rpc pipe.
1126 ****************************************************************************/
1128 bool close_rpc_pipe_hnd(smb_np_struct *p)
1130 if (!p) {
1131 DEBUG(0,("Invalid pipe in close_rpc_pipe_hnd\n"));
1132 return False;
1135 TALLOC_FREE(p->np_state);
1137 bitmap_clear(bmap, p->pnum - pipe_handle_offset);
1139 pipes_open--;
1141 DEBUG(4,("closed pipe name %s pnum=%x (pipes_open=%d)\n",
1142 p->name, p->pnum, pipes_open));
1144 DLIST_REMOVE(Pipes, p);
1146 /* TODO: Remove from pipe open db */
1148 if ( !delete_pipe_opendb( p ) ) {
1149 DEBUG(3,("close_rpc_pipe_hnd: failed to delete %s "
1150 "pipe from open db.\n", p->name));
1153 TALLOC_FREE(p);
1155 return True;
1158 /****************************************************************************
1159 Close all pipes on a connection.
1160 ****************************************************************************/
1162 void pipe_close_conn(connection_struct *conn)
1164 smb_np_struct *p, *next;
1166 for (p=Pipes;p;p=next) {
1167 next = p->next;
1168 if (p->conn == conn) {
1169 close_rpc_pipe_hnd(p);
1174 /****************************************************************************
1175 Close an rpc pipe.
1176 ****************************************************************************/
1178 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
1180 if (!p) {
1181 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
1182 return False;
1185 prs_mem_free(&p->out_data.rdata);
1186 prs_mem_free(&p->in_data.data);
1188 if (p->auth.auth_data_free_func) {
1189 (*p->auth.auth_data_free_func)(&p->auth);
1192 if (p->mem_ctx) {
1193 talloc_destroy(p->mem_ctx);
1196 free_pipe_rpc_context( p->contexts );
1198 /* Free the handles database. */
1199 close_policy_by_pipe(p);
1201 TALLOC_FREE(p->pipe_user.nt_user_token);
1202 SAFE_FREE(p->pipe_user.ut.groups);
1204 DLIST_REMOVE(InternalPipes, p);
1206 ZERO_STRUCTP(p);
1208 TALLOC_FREE(p);
1210 return True;
1213 /****************************************************************************
1214 Find an rpc pipe given a pipe handle in a buffer and an offset.
1215 ****************************************************************************/
1217 smb_np_struct *get_rpc_pipe_p(uint16 pnum)
1219 if (chain_p) {
1220 return chain_p;
1223 return get_rpc_pipe(pnum);
1226 /****************************************************************************
1227 Find an rpc pipe given a pipe handle.
1228 ****************************************************************************/
1230 smb_np_struct *get_rpc_pipe(int pnum)
1232 smb_np_struct *p;
1234 DEBUG(4,("search for pipe pnum=%x\n", pnum));
1236 for (p=Pipes;p;p=p->next) {
1237 DEBUG(5,("pipe name %s pnum=%x (pipes_open=%d)\n",
1238 p->name, p->pnum, pipes_open));
1241 for (p=Pipes;p;p=p->next) {
1242 if (p->pnum == pnum) {
1243 chain_p = p;
1244 return p;
1248 return NULL;