Refactor make_internal_rpc_pipe_p: connection_struct is not needed
[Samba.git] / source / rpc_server / srv_pipe_hnd.c
blobaa5bd5394a17896a4bc8f0e48faa3529f843998d
1 /*
2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
24 #undef DBGC_CLASS
25 #define DBGC_CLASS DBGC_RPC_SRV
27 #define PIPE "\\PIPE\\"
28 #define PIPELEN strlen(PIPE)
30 static smb_np_struct *chain_p;
31 static int pipes_open;
34 * Sometimes I can't decide if I hate Windows printer driver
35 * writers more than I hate the Windows spooler service driver
36 * writers. This gets around a combination of bugs in the spooler
37 * and the HP 8500 PCL driver that causes a spooler spin. JRA.
39 * bumped up from 20 -> 64 after viewing traffic from WordPerfect
40 * 2002 running on NT 4.- SP6
41 * bumped up from 64 -> 256 after viewing traffic from con2prt
42 * for lots of printers on a WinNT 4.x SP6 box.
45 #ifndef MAX_OPEN_SPOOLSS_PIPES
46 #define MAX_OPEN_SPOOLSS_PIPES 256
47 #endif
48 static int current_spoolss_pipes_open;
50 static smb_np_struct *Pipes;
51 static pipes_struct *InternalPipes;
52 static struct bitmap *bmap;
54 /* TODO
55 * the following prototypes are declared here to avoid
56 * code being moved about too much for a patch to be
57 * disrupted / less obvious.
59 * these functions, and associated functions that they
60 * call, should be moved behind a .so module-loading
61 * system _anyway_. so that's the next step...
64 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
65 bool *is_data_outstanding);
66 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n);
67 static bool close_internal_rpc_pipe_hnd(void *np_conn);
69 /****************************************************************************
70 Internal Pipe iterator functions.
71 ****************************************************************************/
73 pipes_struct *get_first_internal_pipe(void)
75 return InternalPipes;
78 pipes_struct *get_next_internal_pipe(pipes_struct *p)
80 return p->next;
83 /* this must be larger than the sum of the open files and directories */
84 static int pipe_handle_offset;
86 /****************************************************************************
87 Set the pipe_handle_offset. Called from smbd/files.c
88 ****************************************************************************/
90 void set_pipe_handle_offset(int max_open_files)
92 if(max_open_files < 0x7000) {
93 pipe_handle_offset = 0x7000;
94 } else {
95 pipe_handle_offset = max_open_files + 10; /* For safety. :-) */
99 /****************************************************************************
100 Reset pipe chain handle number.
101 ****************************************************************************/
103 void reset_chain_p(void)
105 chain_p = NULL;
108 /****************************************************************************
109 Initialise pipe handle states.
110 ****************************************************************************/
112 void init_rpc_pipe_hnd(void)
114 bmap = bitmap_allocate(MAX_OPEN_PIPES);
115 if (!bmap) {
116 exit_server("out of memory in init_rpc_pipe_hnd");
120 /****************************************************************************
121 Initialise an outgoing packet.
122 ****************************************************************************/
124 static bool pipe_init_outgoing_data(pipes_struct *p)
126 output_data *o_data = &p->out_data;
128 /* Reset the offset counters. */
129 o_data->data_sent_length = 0;
130 o_data->current_pdu_len = 0;
131 o_data->current_pdu_sent = 0;
133 memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
135 /* Free any memory in the current return data buffer. */
136 prs_mem_free(&o_data->rdata);
139 * Initialize the outgoing RPC data buffer.
140 * we will use this as the raw data area for replying to rpc requests.
142 if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
143 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
144 return False;
147 return True;
150 /****************************************************************************
151 Find first available pipe slot.
152 ****************************************************************************/
154 smb_np_struct *open_rpc_pipe_p(const char *pipe_name,
155 connection_struct *conn, uint16 vuid)
157 int i;
158 smb_np_struct *p, *p_it;
159 static int next_pipe;
160 bool is_spoolss_pipe = False;
162 DEBUG(4,("Open pipe requested %s (pipes_open=%d)\n",
163 pipe_name, pipes_open));
165 if (strstr(pipe_name, "spoolss")) {
166 is_spoolss_pipe = True;
169 if (is_spoolss_pipe && current_spoolss_pipes_open >= MAX_OPEN_SPOOLSS_PIPES) {
170 DEBUG(10,("open_rpc_pipe_p: spooler bug workaround. Denying open on pipe %s\n",
171 pipe_name ));
172 return NULL;
175 /* not repeating pipe numbers makes it easier to track things in
176 log files and prevents client bugs where pipe numbers are reused
177 over connection restarts */
179 if (next_pipe == 0) {
180 next_pipe = (sys_getpid() ^ time(NULL)) % MAX_OPEN_PIPES;
183 i = bitmap_find(bmap, next_pipe);
185 if (i == -1) {
186 DEBUG(0,("ERROR! Out of pipe structures\n"));
187 return NULL;
190 next_pipe = (i+1) % MAX_OPEN_PIPES;
192 for (p = Pipes; p; p = p->next) {
193 DEBUG(5,("open_rpc_pipe_p: name %s pnum=%x\n", p->name, p->pnum));
196 p = talloc(NULL, smb_np_struct);
197 if (!p) {
198 DEBUG(0,("ERROR! no memory for smb_np_struct!\n"));
199 return NULL;
202 ZERO_STRUCTP(p);
204 p->name = talloc_strdup(p, pipe_name);
205 if (p->name == NULL) {
206 TALLOC_FREE(p);
207 DEBUG(0,("ERROR! no memory for pipe name!\n"));
208 return NULL;
211 /* add a dso mechanism instead of this, here */
213 p->namedpipe_create = make_internal_rpc_pipe_p;
214 p->namedpipe_read = read_from_internal_pipe;
215 p->namedpipe_write = write_to_internal_pipe;
216 p->namedpipe_close = close_internal_rpc_pipe_hnd;
218 p->np_state = p->namedpipe_create(pipe_name, conn->client_address,
219 conn->server_info, vuid);
221 if (p->np_state == NULL) {
222 DEBUG(0,("open_rpc_pipe_p: make_internal_rpc_pipe_p failed.\n"));
223 TALLOC_FREE(p);
224 return NULL;
227 DLIST_ADD(Pipes, p);
230 * Initialize the incoming RPC data buffer with one PDU worth of memory.
231 * We cheat here and say we're marshalling, as we intend to add incoming
232 * data directly into the prs_struct and we want it to auto grow. We will
233 * change the type to UNMARSALLING before processing the stream.
236 bitmap_set(bmap, i);
237 i += pipe_handle_offset;
239 pipes_open++;
241 p->pnum = i;
243 p->open = True;
244 p->device_state = 0;
245 p->priority = 0;
246 p->conn = conn;
247 p->vuid = vuid;
249 p->max_trans_reply = 0;
251 DEBUG(4,("Opened pipe %s with handle %x (pipes_open=%d)\n",
252 pipe_name, i, pipes_open));
254 chain_p = p;
256 /* Iterate over p_it as a temp variable, to display all open pipes */
257 for (p_it = Pipes; p_it; p_it = p_it->next) {
258 DEBUG(5,("open pipes: name %s pnum=%x\n", p_it->name, p_it->pnum));
261 return chain_p;
264 /****************************************************************************
265 Make an internal namedpipes structure
266 ****************************************************************************/
268 struct pipes_struct *make_internal_rpc_pipe_p(const char *pipe_name,
269 const char *client_address,
270 struct auth_serversupplied_info *server_info,
271 uint16_t vuid)
273 pipes_struct *p;
275 DEBUG(4,("Create pipe requested %s\n", pipe_name));
277 p = TALLOC_ZERO_P(NULL, pipes_struct);
279 if (!p) {
280 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
281 return NULL;
284 if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
285 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
286 TALLOC_FREE(p);
287 return NULL;
290 if (!init_pipe_handle_list(p, pipe_name)) {
291 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
292 talloc_destroy(p->mem_ctx);
293 TALLOC_FREE(p);
294 return NULL;
298 * Initialize the incoming RPC data buffer with one PDU worth of memory.
299 * We cheat here and say we're marshalling, as we intend to add incoming
300 * data directly into the prs_struct and we want it to auto grow. We will
301 * change the type to UNMARSALLING before processing the stream.
304 if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
305 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
306 talloc_destroy(p->mem_ctx);
307 close_policy_by_pipe(p);
308 TALLOC_FREE(p);
309 return NULL;
312 p->server_info = copy_serverinfo(p, server_info);
313 if (p->server_info == NULL) {
314 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
315 talloc_destroy(p->mem_ctx);
316 close_policy_by_pipe(p);
317 TALLOC_FREE(p);
318 return NULL;
321 DLIST_ADD(InternalPipes, p);
323 memcpy(p->client_address, client_address, sizeof(p->client_address));
325 p->endian = RPC_LITTLE_ENDIAN;
327 ZERO_STRUCT(p->pipe_user);
329 p->pipe_user.vuid = vuid;
330 p->pipe_user.ut.uid = (uid_t)-1;
331 p->pipe_user.ut.gid = (gid_t)-1;
332 p->pipe_user.nt_user_token = dup_nt_token(NULL, server_info->ptok);
335 * Initialize the outgoing RPC data buffer with no memory.
337 prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
339 fstrcpy(p->name, pipe_name);
341 DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
342 pipe_name, pipes_open));
344 return p;
347 /****************************************************************************
348 Sets the fault state on incoming packets.
349 ****************************************************************************/
351 static void set_incoming_fault(pipes_struct *p)
353 prs_mem_free(&p->in_data.data);
354 p->in_data.pdu_needed_len = 0;
355 p->in_data.pdu_received_len = 0;
356 p->fault_state = True;
357 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
358 p->name));
361 /****************************************************************************
362 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
363 ****************************************************************************/
365 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
367 size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
369 DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
370 (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
371 (unsigned int)p->in_data.pdu_received_len ));
373 memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
374 p->in_data.pdu_received_len += len_needed_to_complete_hdr;
376 return (ssize_t)len_needed_to_complete_hdr;
379 /****************************************************************************
380 Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
381 ****************************************************************************/
383 static ssize_t unmarshall_rpc_header(pipes_struct *p)
386 * Unmarshall the header to determine the needed length.
389 prs_struct rpc_in;
391 if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
392 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
393 set_incoming_fault(p);
394 return -1;
397 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
398 prs_set_endian_data( &rpc_in, p->endian);
400 prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
401 p->in_data.pdu_received_len, False);
404 * Unmarshall the header as this will tell us how much
405 * data we need to read to get the complete pdu.
406 * This also sets the endian flag in rpc_in.
409 if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
410 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
411 set_incoming_fault(p);
412 prs_mem_free(&rpc_in);
413 return -1;
417 * Validate the RPC header.
420 if(p->hdr.major != 5 && p->hdr.minor != 0) {
421 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
422 set_incoming_fault(p);
423 prs_mem_free(&rpc_in);
424 return -1;
428 * If there's not data in the incoming buffer this should be the start of a new RPC.
431 if(prs_offset(&p->in_data.data) == 0) {
434 * AS/U doesn't set FIRST flag in a BIND packet it seems.
437 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
439 * Ensure that the FIRST flag is set. If not then we have
440 * a stream missmatch.
443 DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
444 set_incoming_fault(p);
445 prs_mem_free(&rpc_in);
446 return -1;
450 * If this is the first PDU then set the endianness
451 * flag in the pipe. We will need this when parsing all
452 * data in this RPC.
455 p->endian = rpc_in.bigendian_data;
457 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
458 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
460 } else {
463 * If this is *NOT* the first PDU then check the endianness
464 * flag in the pipe is the same as that in the PDU.
467 if (p->endian != rpc_in.bigendian_data) {
468 DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
469 set_incoming_fault(p);
470 prs_mem_free(&rpc_in);
471 return -1;
476 * Ensure that the pdu length is sane.
479 if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
480 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
481 set_incoming_fault(p);
482 prs_mem_free(&rpc_in);
483 return -1;
486 DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
487 (unsigned int)p->hdr.flags ));
489 p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
491 prs_mem_free(&rpc_in);
493 return 0; /* No extra data processed. */
496 /****************************************************************************
497 Call this to free any talloc'ed memory. Do this before and after processing
498 a complete PDU.
499 ****************************************************************************/
501 static void free_pipe_context(pipes_struct *p)
503 if (p->mem_ctx) {
504 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
505 "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
506 talloc_free_children(p->mem_ctx);
507 } else {
508 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
509 if (p->mem_ctx == NULL) {
510 p->fault_state = True;
515 /****************************************************************************
516 Processes a request pdu. This will do auth processing if needed, and
517 appends the data into the complete stream if the LAST flag is not set.
518 ****************************************************************************/
520 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
522 uint32 ss_padding_len = 0;
523 size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
524 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
526 if(!p->pipe_bound) {
527 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
528 set_incoming_fault(p);
529 return False;
533 * Check if we need to do authentication processing.
534 * This is only done on requests, not binds.
538 * Read the RPC request header.
541 if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
542 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
543 set_incoming_fault(p);
544 return False;
547 switch(p->auth.auth_type) {
548 case PIPE_AUTH_TYPE_NONE:
549 break;
551 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
552 case PIPE_AUTH_TYPE_NTLMSSP:
554 NTSTATUS status;
555 if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
556 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
557 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
558 set_incoming_fault(p);
559 return False;
561 break;
564 case PIPE_AUTH_TYPE_SCHANNEL:
565 if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
566 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
567 set_incoming_fault(p);
568 return False;
570 break;
572 default:
573 DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
574 set_incoming_fault(p);
575 return False;
578 /* Now we've done the sign/seal we can remove any padding data. */
579 if (data_len > ss_padding_len) {
580 data_len -= ss_padding_len;
584 * Check the data length doesn't go over the 15Mb limit.
585 * increased after observing a bug in the Windows NT 4.0 SP6a
586 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
587 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
590 if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
591 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
592 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
593 set_incoming_fault(p);
594 return False;
598 * Append the data portion into the buffer and return.
601 if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
602 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
603 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
604 set_incoming_fault(p);
605 return False;
608 if(p->hdr.flags & RPC_FLG_LAST) {
609 bool ret = False;
611 * Ok - we finally have a complete RPC stream.
612 * Call the rpc command to process it.
616 * Ensure the internal prs buffer size is *exactly* the same
617 * size as the current offset.
620 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
621 DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
622 set_incoming_fault(p);
623 return False;
627 * Set the parse offset to the start of the data and set the
628 * prs_struct to UNMARSHALL.
631 prs_set_offset(&p->in_data.data, 0);
632 prs_switch_type(&p->in_data.data, UNMARSHALL);
635 * Process the complete data stream here.
638 free_pipe_context(p);
640 if(pipe_init_outgoing_data(p)) {
641 ret = api_pipe_request(p);
644 free_pipe_context(p);
647 * We have consumed the whole data stream. Set back to
648 * marshalling and set the offset back to the start of
649 * the buffer to re-use it (we could also do a prs_mem_free()
650 * and then re_init on the next start of PDU. Not sure which
651 * is best here.... JRA.
654 prs_switch_type(&p->in_data.data, MARSHALL);
655 prs_set_offset(&p->in_data.data, 0);
656 return ret;
659 return True;
662 /****************************************************************************
663 Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
664 already been parsed and stored in p->hdr.
665 ****************************************************************************/
667 static void process_complete_pdu(pipes_struct *p)
669 prs_struct rpc_in;
670 size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
671 char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
672 bool reply = False;
674 if(p->fault_state) {
675 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
676 p->name ));
677 set_incoming_fault(p);
678 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
679 return;
682 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
685 * Ensure we're using the corrent endianness for both the
686 * RPC header flags and the raw data we will be reading from.
689 prs_set_endian_data( &rpc_in, p->endian);
690 prs_set_endian_data( &p->in_data.data, p->endian);
692 prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
694 DEBUG(10,("process_complete_pdu: processing packet type %u\n",
695 (unsigned int)p->hdr.pkt_type ));
697 switch (p->hdr.pkt_type) {
698 case RPC_REQUEST:
699 reply = process_request_pdu(p, &rpc_in);
700 break;
702 case RPC_PING: /* CL request - ignore... */
703 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
704 (unsigned int)p->hdr.pkt_type, p->name));
705 break;
707 case RPC_RESPONSE: /* No responses here. */
708 DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
709 p->name ));
710 break;
712 case RPC_FAULT:
713 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
714 case RPC_NOCALL: /* CL - server reply to a ping call. */
715 case RPC_REJECT:
716 case RPC_ACK:
717 case RPC_CL_CANCEL:
718 case RPC_FACK:
719 case RPC_CANCEL_ACK:
720 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
721 (unsigned int)p->hdr.pkt_type, p->name));
722 break;
724 case RPC_BIND:
726 * We assume that a pipe bind is only in one pdu.
728 if(pipe_init_outgoing_data(p)) {
729 reply = api_pipe_bind_req(p, &rpc_in);
731 break;
733 case RPC_BINDACK:
734 case RPC_BINDNACK:
735 DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
736 (unsigned int)p->hdr.pkt_type, p->name));
737 break;
740 case RPC_ALTCONT:
742 * We assume that a pipe bind is only in one pdu.
744 if(pipe_init_outgoing_data(p)) {
745 reply = api_pipe_alter_context(p, &rpc_in);
747 break;
749 case RPC_ALTCONTRESP:
750 DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
751 p->name));
752 break;
754 case RPC_AUTH3:
756 * The third packet in an NTLMSSP auth exchange.
758 if(pipe_init_outgoing_data(p)) {
759 reply = api_pipe_bind_auth3(p, &rpc_in);
761 break;
763 case RPC_SHUTDOWN:
764 DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
765 p->name));
766 break;
768 case RPC_CO_CANCEL:
769 /* For now just free all client data and continue processing. */
770 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
771 /* As we never do asynchronous RPC serving, we can never cancel a
772 call (as far as I know). If we ever did we'd have to send a cancel_ack
773 reply. For now, just free all client data and continue processing. */
774 reply = True;
775 break;
776 #if 0
777 /* Enable this if we're doing async rpc. */
778 /* We must check the call-id matches the outstanding callid. */
779 if(pipe_init_outgoing_data(p)) {
780 /* Send a cancel_ack PDU reply. */
781 /* We should probably check the auth-verifier here. */
782 reply = setup_cancel_ack_reply(p, &rpc_in);
784 break;
785 #endif
787 case RPC_ORPHANED:
788 /* We should probably check the auth-verifier here.
789 For now just free all client data and continue processing. */
790 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
791 reply = True;
792 break;
794 default:
795 DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
796 break;
799 /* Reset to little endian. Probably don't need this but it won't hurt. */
800 prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
802 if (!reply) {
803 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
804 set_incoming_fault(p);
805 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
806 prs_mem_free(&rpc_in);
807 } else {
809 * Reset the lengths. We're ready for a new pdu.
811 p->in_data.pdu_needed_len = 0;
812 p->in_data.pdu_received_len = 0;
815 prs_mem_free(&rpc_in);
818 /****************************************************************************
819 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
820 ****************************************************************************/
822 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
824 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
826 DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
827 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
828 (unsigned int)n ));
830 if(data_to_copy == 0) {
832 * This is an error - data is being received and there is no
833 * space in the PDU. Free the received data and go into the fault state.
835 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
836 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
837 set_incoming_fault(p);
838 return -1;
842 * If we have no data already, wait until we get at least a RPC_HEADER_LEN
843 * number of bytes before we can do anything.
846 if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
848 * Always return here. If we have more data then the RPC_HEADER
849 * will be processed the next time around the loop.
851 return fill_rpc_header(p, data, data_to_copy);
855 * At this point we know we have at least an RPC_HEADER_LEN amount of data
856 * stored in current_in_pdu.
860 * If pdu_needed_len is zero this is a new pdu.
861 * Unmarshall the header so we know how much more
862 * data we need, then loop again.
865 if(p->in_data.pdu_needed_len == 0) {
866 ssize_t rret = unmarshall_rpc_header(p);
867 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
868 return rret;
870 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
871 of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
872 pdu type. Deal with this in process_complete_pdu(). */
876 * Ok - at this point we have a valid RPC_HEADER in p->hdr.
877 * Keep reading until we have a full pdu.
880 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
883 * Copy as much of the data as we need into the current_in_pdu buffer.
884 * pdu_needed_len becomes zero when we have a complete pdu.
887 memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
888 p->in_data.pdu_received_len += data_to_copy;
889 p->in_data.pdu_needed_len -= data_to_copy;
892 * Do we have a complete PDU ?
893 * (return the number of bytes handled in the call)
896 if(p->in_data.pdu_needed_len == 0) {
897 process_complete_pdu(p);
898 return data_to_copy;
901 DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
902 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
904 return (ssize_t)data_to_copy;
907 /****************************************************************************
908 Accepts incoming data on an rpc pipe.
909 ****************************************************************************/
911 ssize_t write_to_pipe(smb_np_struct *p, char *data, size_t n)
913 DEBUG(6,("write_to_pipe: %x", p->pnum));
915 DEBUG(6,(" name: %s open: %s len: %d\n",
916 p->name, BOOLSTR(p->open), (int)n));
918 dump_data(50, (uint8 *)data, n);
920 return p->namedpipe_write(p->np_state, data, n);
923 /****************************************************************************
924 Accepts incoming data on an internal rpc pipe.
925 ****************************************************************************/
927 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n)
929 pipes_struct *p = (pipes_struct*)np_conn;
930 size_t data_left = n;
932 while(data_left) {
933 ssize_t data_used;
935 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
937 data_used = process_incoming_data(p, data, data_left);
939 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
941 if(data_used < 0) {
942 return -1;
945 data_left -= data_used;
946 data += data_used;
949 return n;
952 /****************************************************************************
953 Replies to a request to read data from a pipe.
955 Headers are interspersed with the data at PDU intervals. By the time
956 this function is called, the start of the data could possibly have been
957 read by an SMBtrans (file_offset != 0).
959 Calling create_rpc_reply() here is a hack. The data should already
960 have been prepared into arrays of headers + data stream sections.
961 ****************************************************************************/
963 ssize_t read_from_pipe(smb_np_struct *p, char *data, size_t n,
964 bool *is_data_outstanding)
966 if (!p || !p->open) {
967 DEBUG(0,("read_from_pipe: pipe not open\n"));
968 return -1;
971 DEBUG(6,("read_from_pipe: %x", p->pnum));
973 return p->namedpipe_read(p->np_state, data, n, is_data_outstanding);
976 /****************************************************************************
977 Replies to a request to read data from a pipe.
979 Headers are interspersed with the data at PDU intervals. By the time
980 this function is called, the start of the data could possibly have been
981 read by an SMBtrans (file_offset != 0).
983 Calling create_rpc_reply() here is a hack. The data should already
984 have been prepared into arrays of headers + data stream sections.
985 ****************************************************************************/
987 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
988 bool *is_data_outstanding)
990 pipes_struct *p = (pipes_struct*)np_conn;
991 uint32 pdu_remaining = 0;
992 ssize_t data_returned = 0;
994 if (!p) {
995 DEBUG(0,("read_from_pipe: pipe not open\n"));
996 return -1;
999 DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
1002 * We cannot return more than one PDU length per
1003 * read request.
1007 * This condition should result in the connection being closed.
1008 * Netapp filers seem to set it to 0xffff which results in domain
1009 * authentications failing. Just ignore it so things work.
1012 if(n > RPC_MAX_PDU_FRAG_LEN) {
1013 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
1014 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
1015 n = RPC_MAX_PDU_FRAG_LEN;
1019 * Determine if there is still data to send in the
1020 * pipe PDU buffer. Always send this first. Never
1021 * send more than is left in the current PDU. The
1022 * client should send a new read request for a new
1023 * PDU.
1026 if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
1027 data_returned = (ssize_t)MIN(n, pdu_remaining);
1029 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
1030 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len,
1031 (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
1033 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
1034 p->out_data.current_pdu_sent += (uint32)data_returned;
1035 goto out;
1039 * At this point p->current_pdu_len == p->current_pdu_sent (which
1040 * may of course be zero if this is the first return fragment.
1043 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
1044 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
1045 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
1047 if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
1049 * We have sent all possible data, return 0.
1051 data_returned = 0;
1052 goto out;
1056 * We need to create a new PDU from the data left in p->rdata.
1057 * Create the header/data/footers. This also sets up the fields
1058 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
1059 * and stores the outgoing PDU in p->current_pdu.
1062 if(!create_next_pdu(p)) {
1063 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
1064 return -1;
1067 data_returned = MIN(n, p->out_data.current_pdu_len);
1069 memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
1070 p->out_data.current_pdu_sent += (uint32)data_returned;
1072 out:
1074 (*is_data_outstanding) = p->out_data.current_pdu_len > n;
1075 return data_returned;
1078 /****************************************************************************
1079 Wait device state on a pipe. Exactly what this is for is unknown...
1080 ****************************************************************************/
1082 bool wait_rpc_pipe_hnd_state(smb_np_struct *p, uint16 priority)
1084 if (p == NULL) {
1085 return False;
1088 if (p->open) {
1089 DEBUG(3,("wait_rpc_pipe_hnd_state: Setting pipe wait state priority=%x on pipe (name=%s)\n",
1090 priority, p->name));
1092 p->priority = priority;
1094 return True;
1097 DEBUG(3,("wait_rpc_pipe_hnd_state: Error setting pipe wait state priority=%x (name=%s)\n",
1098 priority, p->name));
1099 return False;
1103 /****************************************************************************
1104 Set device state on a pipe. Exactly what this is for is unknown...
1105 ****************************************************************************/
1107 bool set_rpc_pipe_hnd_state(smb_np_struct *p, uint16 device_state)
1109 if (p == NULL) {
1110 return False;
1113 if (p->open) {
1114 DEBUG(3,("set_rpc_pipe_hnd_state: Setting pipe device state=%x on pipe (name=%s)\n",
1115 device_state, p->name));
1117 p->device_state = device_state;
1119 return True;
1122 DEBUG(3,("set_rpc_pipe_hnd_state: Error setting pipe device state=%x (name=%s)\n",
1123 device_state, p->name));
1124 return False;
1128 /****************************************************************************
1129 Close an rpc pipe.
1130 ****************************************************************************/
1132 bool close_rpc_pipe_hnd(smb_np_struct *p)
1134 if (!p) {
1135 DEBUG(0,("Invalid pipe in close_rpc_pipe_hnd\n"));
1136 return False;
1139 p->namedpipe_close(p->np_state);
1141 bitmap_clear(bmap, p->pnum - pipe_handle_offset);
1143 pipes_open--;
1145 DEBUG(4,("closed pipe name %s pnum=%x (pipes_open=%d)\n",
1146 p->name, p->pnum, pipes_open));
1148 DLIST_REMOVE(Pipes, p);
1150 /* TODO: Remove from pipe open db */
1152 if ( !delete_pipe_opendb( p ) ) {
1153 DEBUG(3,("close_rpc_pipe_hnd: failed to delete %s "
1154 "pipe from open db.\n", p->name));
1157 TALLOC_FREE(p);
1159 return True;
1162 /****************************************************************************
1163 Close all pipes on a connection.
1164 ****************************************************************************/
1166 void pipe_close_conn(connection_struct *conn)
1168 smb_np_struct *p, *next;
1170 for (p=Pipes;p;p=next) {
1171 next = p->next;
1172 if (p->conn == conn) {
1173 close_rpc_pipe_hnd(p);
1178 /****************************************************************************
1179 Close an rpc pipe.
1180 ****************************************************************************/
1182 static bool close_internal_rpc_pipe_hnd(void *np_conn)
1184 pipes_struct *p = (pipes_struct *)np_conn;
1185 if (!p) {
1186 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
1187 return False;
1190 prs_mem_free(&p->out_data.rdata);
1191 prs_mem_free(&p->in_data.data);
1193 if (p->auth.auth_data_free_func) {
1194 (*p->auth.auth_data_free_func)(&p->auth);
1197 if (p->mem_ctx) {
1198 talloc_destroy(p->mem_ctx);
1201 free_pipe_rpc_context( p->contexts );
1203 /* Free the handles database. */
1204 close_policy_by_pipe(p);
1206 TALLOC_FREE(p->pipe_user.nt_user_token);
1207 SAFE_FREE(p->pipe_user.ut.groups);
1209 DLIST_REMOVE(InternalPipes, p);
1211 ZERO_STRUCTP(p);
1213 TALLOC_FREE(p);
1215 return True;
1218 /****************************************************************************
1219 Find an rpc pipe given a pipe handle in a buffer and an offset.
1220 ****************************************************************************/
1222 smb_np_struct *get_rpc_pipe_p(uint16 pnum)
1224 if (chain_p) {
1225 return chain_p;
1228 return get_rpc_pipe(pnum);
1231 /****************************************************************************
1232 Find an rpc pipe given a pipe handle.
1233 ****************************************************************************/
1235 smb_np_struct *get_rpc_pipe(int pnum)
1237 smb_np_struct *p;
1239 DEBUG(4,("search for pipe pnum=%x\n", pnum));
1241 for (p=Pipes;p;p=p->next) {
1242 DEBUG(5,("pipe name %s pnum=%x (pipes_open=%d)\n",
1243 p->name, p->pnum, pipes_open));
1246 for (p=Pipes;p;p=p->next) {
1247 if (p->pnum == pnum) {
1248 chain_p = p;
1249 return p;
1253 return NULL;