Remove unused #defines
[Samba/gebeck_regimport.git] / source3 / rpc_server / srv_pipe_hnd.c
blobc55e3287051965047ddbc232b2e7167932066b27
1 /*
2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
24 #undef DBGC_CLASS
25 #define DBGC_CLASS DBGC_RPC_SRV
27 static smb_np_struct *chain_p;
28 static int pipes_open;
31 * Sometimes I can't decide if I hate Windows printer driver
32 * writers more than I hate the Windows spooler service driver
33 * writers. This gets around a combination of bugs in the spooler
34 * and the HP 8500 PCL driver that causes a spooler spin. JRA.
36 * bumped up from 20 -> 64 after viewing traffic from WordPerfect
37 * 2002 running on NT 4.- SP6
38 * bumped up from 64 -> 256 after viewing traffic from con2prt
39 * for lots of printers on a WinNT 4.x SP6 box.
42 #ifndef MAX_OPEN_SPOOLSS_PIPES
43 #define MAX_OPEN_SPOOLSS_PIPES 256
44 #endif
45 static int current_spoolss_pipes_open;
47 static smb_np_struct *Pipes;
48 static pipes_struct *InternalPipes;
49 static struct bitmap *bmap;
51 /* TODO
52 * the following prototypes are declared here to avoid
53 * code being moved about too much for a patch to be
54 * disrupted / less obvious.
56 * these functions, and associated functions that they
57 * call, should be moved behind a .so module-loading
58 * system _anyway_. so that's the next step...
61 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
63 /****************************************************************************
64 Internal Pipe iterator functions.
65 ****************************************************************************/
67 pipes_struct *get_first_internal_pipe(void)
69 return InternalPipes;
72 pipes_struct *get_next_internal_pipe(pipes_struct *p)
74 return p->next;
77 /* this must be larger than the sum of the open files and directories */
78 static int pipe_handle_offset;
80 /****************************************************************************
81 Set the pipe_handle_offset. Called from smbd/files.c
82 ****************************************************************************/
84 void set_pipe_handle_offset(int max_open_files)
86 if(max_open_files < 0x7000) {
87 pipe_handle_offset = 0x7000;
88 } else {
89 pipe_handle_offset = max_open_files + 10; /* For safety. :-) */
93 /****************************************************************************
94 Reset pipe chain handle number.
95 ****************************************************************************/
97 void reset_chain_p(void)
99 chain_p = NULL;
102 /****************************************************************************
103 Initialise pipe handle states.
104 ****************************************************************************/
106 void init_rpc_pipe_hnd(void)
108 bmap = bitmap_allocate(MAX_OPEN_PIPES);
109 if (!bmap) {
110 exit_server("out of memory in init_rpc_pipe_hnd");
114 /****************************************************************************
115 Initialise an outgoing packet.
116 ****************************************************************************/
118 static bool pipe_init_outgoing_data(pipes_struct *p)
120 output_data *o_data = &p->out_data;
122 /* Reset the offset counters. */
123 o_data->data_sent_length = 0;
124 o_data->current_pdu_len = 0;
125 o_data->current_pdu_sent = 0;
127 memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
129 /* Free any memory in the current return data buffer. */
130 prs_mem_free(&o_data->rdata);
133 * Initialize the outgoing RPC data buffer.
134 * we will use this as the raw data area for replying to rpc requests.
136 if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
137 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
138 return False;
141 return True;
144 /****************************************************************************
145 Find first available pipe slot.
146 ****************************************************************************/
148 smb_np_struct *open_rpc_pipe_p(const char *pipe_name,
149 connection_struct *conn, uint16 vuid)
151 int i;
152 smb_np_struct *p, *p_it;
153 static int next_pipe;
154 bool is_spoolss_pipe = False;
156 DEBUG(4,("Open pipe requested %s (pipes_open=%d)\n",
157 pipe_name, pipes_open));
159 if (strstr(pipe_name, "spoolss")) {
160 is_spoolss_pipe = True;
163 if (is_spoolss_pipe && current_spoolss_pipes_open >= MAX_OPEN_SPOOLSS_PIPES) {
164 DEBUG(10,("open_rpc_pipe_p: spooler bug workaround. Denying open on pipe %s\n",
165 pipe_name ));
166 return NULL;
169 /* not repeating pipe numbers makes it easier to track things in
170 log files and prevents client bugs where pipe numbers are reused
171 over connection restarts */
173 if (next_pipe == 0) {
174 next_pipe = (sys_getpid() ^ time(NULL)) % MAX_OPEN_PIPES;
177 i = bitmap_find(bmap, next_pipe);
179 if (i == -1) {
180 DEBUG(0,("ERROR! Out of pipe structures\n"));
181 return NULL;
184 next_pipe = (i+1) % MAX_OPEN_PIPES;
186 for (p = Pipes; p; p = p->next) {
187 DEBUG(5,("open_rpc_pipe_p: name %s pnum=%x\n", p->name, p->pnum));
190 p = talloc(NULL, smb_np_struct);
191 if (!p) {
192 DEBUG(0,("ERROR! no memory for smb_np_struct!\n"));
193 return NULL;
196 ZERO_STRUCTP(p);
198 p->name = talloc_strdup(p, pipe_name);
199 if (p->name == NULL) {
200 TALLOC_FREE(p);
201 DEBUG(0,("ERROR! no memory for pipe name!\n"));
202 return NULL;
205 /* add a dso mechanism instead of this, here */
207 p->namedpipe_create = make_internal_rpc_pipe_p;
208 p->namedpipe_read = read_from_internal_pipe;
209 p->namedpipe_write = write_to_internal_pipe;
211 p->np_state = p->namedpipe_create(pipe_name, conn->client_address,
212 conn->server_info, vuid);
214 if (p->np_state == NULL) {
215 DEBUG(0,("open_rpc_pipe_p: make_internal_rpc_pipe_p failed.\n"));
216 TALLOC_FREE(p);
217 return NULL;
220 DLIST_ADD(Pipes, p);
223 * Initialize the incoming RPC data buffer with one PDU worth of memory.
224 * We cheat here and say we're marshalling, as we intend to add incoming
225 * data directly into the prs_struct and we want it to auto grow. We will
226 * change the type to UNMARSALLING before processing the stream.
229 bitmap_set(bmap, i);
230 i += pipe_handle_offset;
232 pipes_open++;
234 p->pnum = i;
236 p->open = True;
237 p->device_state = 0;
238 p->priority = 0;
239 p->conn = conn;
240 p->vuid = vuid;
242 p->max_trans_reply = 0;
244 DEBUG(4,("Opened pipe %s with handle %x (pipes_open=%d)\n",
245 pipe_name, i, pipes_open));
247 chain_p = p;
249 /* Iterate over p_it as a temp variable, to display all open pipes */
250 for (p_it = Pipes; p_it; p_it = p_it->next) {
251 DEBUG(5,("open pipes: name %s pnum=%x\n", p_it->name, p_it->pnum));
254 return chain_p;
257 /****************************************************************************
258 Make an internal namedpipes structure
259 ****************************************************************************/
261 struct pipes_struct *make_internal_rpc_pipe_p(const char *pipe_name,
262 const char *client_address,
263 struct auth_serversupplied_info *server_info,
264 uint16_t vuid)
266 pipes_struct *p;
268 DEBUG(4,("Create pipe requested %s\n", pipe_name));
270 p = TALLOC_ZERO_P(NULL, pipes_struct);
272 if (!p) {
273 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
274 return NULL;
277 if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
278 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
279 TALLOC_FREE(p);
280 return NULL;
283 if (!init_pipe_handle_list(p, pipe_name)) {
284 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
285 talloc_destroy(p->mem_ctx);
286 TALLOC_FREE(p);
287 return NULL;
291 * Initialize the incoming RPC data buffer with one PDU worth of memory.
292 * We cheat here and say we're marshalling, as we intend to add incoming
293 * data directly into the prs_struct and we want it to auto grow. We will
294 * change the type to UNMARSALLING before processing the stream.
297 if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
298 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
299 talloc_destroy(p->mem_ctx);
300 close_policy_by_pipe(p);
301 TALLOC_FREE(p);
302 return NULL;
305 p->server_info = copy_serverinfo(p, server_info);
306 if (p->server_info == NULL) {
307 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
308 talloc_destroy(p->mem_ctx);
309 close_policy_by_pipe(p);
310 TALLOC_FREE(p);
311 return NULL;
314 DLIST_ADD(InternalPipes, p);
316 memcpy(p->client_address, client_address, sizeof(p->client_address));
318 p->endian = RPC_LITTLE_ENDIAN;
320 ZERO_STRUCT(p->pipe_user);
322 p->pipe_user.vuid = vuid;
323 p->pipe_user.ut.uid = (uid_t)-1;
324 p->pipe_user.ut.gid = (gid_t)-1;
325 p->pipe_user.nt_user_token = dup_nt_token(NULL, server_info->ptok);
328 * Initialize the outgoing RPC data buffer with no memory.
330 prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
332 fstrcpy(p->name, pipe_name);
334 DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
335 pipe_name, pipes_open));
337 talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
339 return p;
342 /****************************************************************************
343 Sets the fault state on incoming packets.
344 ****************************************************************************/
346 static void set_incoming_fault(pipes_struct *p)
348 prs_mem_free(&p->in_data.data);
349 p->in_data.pdu_needed_len = 0;
350 p->in_data.pdu_received_len = 0;
351 p->fault_state = True;
352 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
353 p->name));
356 /****************************************************************************
357 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
358 ****************************************************************************/
360 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
362 size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
364 DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
365 (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
366 (unsigned int)p->in_data.pdu_received_len ));
368 memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
369 p->in_data.pdu_received_len += len_needed_to_complete_hdr;
371 return (ssize_t)len_needed_to_complete_hdr;
374 /****************************************************************************
375 Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
376 ****************************************************************************/
378 static ssize_t unmarshall_rpc_header(pipes_struct *p)
381 * Unmarshall the header to determine the needed length.
384 prs_struct rpc_in;
386 if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
387 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
388 set_incoming_fault(p);
389 return -1;
392 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
393 prs_set_endian_data( &rpc_in, p->endian);
395 prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
396 p->in_data.pdu_received_len, False);
399 * Unmarshall the header as this will tell us how much
400 * data we need to read to get the complete pdu.
401 * This also sets the endian flag in rpc_in.
404 if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
405 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
406 set_incoming_fault(p);
407 prs_mem_free(&rpc_in);
408 return -1;
412 * Validate the RPC header.
415 if(p->hdr.major != 5 && p->hdr.minor != 0) {
416 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
417 set_incoming_fault(p);
418 prs_mem_free(&rpc_in);
419 return -1;
423 * If there's not data in the incoming buffer this should be the start of a new RPC.
426 if(prs_offset(&p->in_data.data) == 0) {
429 * AS/U doesn't set FIRST flag in a BIND packet it seems.
432 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
434 * Ensure that the FIRST flag is set. If not then we have
435 * a stream missmatch.
438 DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
439 set_incoming_fault(p);
440 prs_mem_free(&rpc_in);
441 return -1;
445 * If this is the first PDU then set the endianness
446 * flag in the pipe. We will need this when parsing all
447 * data in this RPC.
450 p->endian = rpc_in.bigendian_data;
452 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
453 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
455 } else {
458 * If this is *NOT* the first PDU then check the endianness
459 * flag in the pipe is the same as that in the PDU.
462 if (p->endian != rpc_in.bigendian_data) {
463 DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
464 set_incoming_fault(p);
465 prs_mem_free(&rpc_in);
466 return -1;
471 * Ensure that the pdu length is sane.
474 if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
475 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
476 set_incoming_fault(p);
477 prs_mem_free(&rpc_in);
478 return -1;
481 DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
482 (unsigned int)p->hdr.flags ));
484 p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
486 prs_mem_free(&rpc_in);
488 return 0; /* No extra data processed. */
491 /****************************************************************************
492 Call this to free any talloc'ed memory. Do this before and after processing
493 a complete PDU.
494 ****************************************************************************/
496 static void free_pipe_context(pipes_struct *p)
498 if (p->mem_ctx) {
499 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
500 "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
501 talloc_free_children(p->mem_ctx);
502 } else {
503 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
504 if (p->mem_ctx == NULL) {
505 p->fault_state = True;
510 /****************************************************************************
511 Processes a request pdu. This will do auth processing if needed, and
512 appends the data into the complete stream if the LAST flag is not set.
513 ****************************************************************************/
515 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
517 uint32 ss_padding_len = 0;
518 size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
519 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
521 if(!p->pipe_bound) {
522 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
523 set_incoming_fault(p);
524 return False;
528 * Check if we need to do authentication processing.
529 * This is only done on requests, not binds.
533 * Read the RPC request header.
536 if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
537 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
538 set_incoming_fault(p);
539 return False;
542 switch(p->auth.auth_type) {
543 case PIPE_AUTH_TYPE_NONE:
544 break;
546 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
547 case PIPE_AUTH_TYPE_NTLMSSP:
549 NTSTATUS status;
550 if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
551 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
552 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
553 set_incoming_fault(p);
554 return False;
556 break;
559 case PIPE_AUTH_TYPE_SCHANNEL:
560 if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
561 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
562 set_incoming_fault(p);
563 return False;
565 break;
567 default:
568 DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
569 set_incoming_fault(p);
570 return False;
573 /* Now we've done the sign/seal we can remove any padding data. */
574 if (data_len > ss_padding_len) {
575 data_len -= ss_padding_len;
579 * Check the data length doesn't go over the 15Mb limit.
580 * increased after observing a bug in the Windows NT 4.0 SP6a
581 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
582 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
585 if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
586 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
587 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
588 set_incoming_fault(p);
589 return False;
593 * Append the data portion into the buffer and return.
596 if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
597 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
598 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
599 set_incoming_fault(p);
600 return False;
603 if(p->hdr.flags & RPC_FLG_LAST) {
604 bool ret = False;
606 * Ok - we finally have a complete RPC stream.
607 * Call the rpc command to process it.
611 * Ensure the internal prs buffer size is *exactly* the same
612 * size as the current offset.
615 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
616 DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
617 set_incoming_fault(p);
618 return False;
622 * Set the parse offset to the start of the data and set the
623 * prs_struct to UNMARSHALL.
626 prs_set_offset(&p->in_data.data, 0);
627 prs_switch_type(&p->in_data.data, UNMARSHALL);
630 * Process the complete data stream here.
633 free_pipe_context(p);
635 if(pipe_init_outgoing_data(p)) {
636 ret = api_pipe_request(p);
639 free_pipe_context(p);
642 * We have consumed the whole data stream. Set back to
643 * marshalling and set the offset back to the start of
644 * the buffer to re-use it (we could also do a prs_mem_free()
645 * and then re_init on the next start of PDU. Not sure which
646 * is best here.... JRA.
649 prs_switch_type(&p->in_data.data, MARSHALL);
650 prs_set_offset(&p->in_data.data, 0);
651 return ret;
654 return True;
657 /****************************************************************************
658 Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
659 already been parsed and stored in p->hdr.
660 ****************************************************************************/
662 static void process_complete_pdu(pipes_struct *p)
664 prs_struct rpc_in;
665 size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
666 char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
667 bool reply = False;
669 if(p->fault_state) {
670 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
671 p->name ));
672 set_incoming_fault(p);
673 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
674 return;
677 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
680 * Ensure we're using the corrent endianness for both the
681 * RPC header flags and the raw data we will be reading from.
684 prs_set_endian_data( &rpc_in, p->endian);
685 prs_set_endian_data( &p->in_data.data, p->endian);
687 prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
689 DEBUG(10,("process_complete_pdu: processing packet type %u\n",
690 (unsigned int)p->hdr.pkt_type ));
692 switch (p->hdr.pkt_type) {
693 case RPC_REQUEST:
694 reply = process_request_pdu(p, &rpc_in);
695 break;
697 case RPC_PING: /* CL request - ignore... */
698 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
699 (unsigned int)p->hdr.pkt_type, p->name));
700 break;
702 case RPC_RESPONSE: /* No responses here. */
703 DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
704 p->name ));
705 break;
707 case RPC_FAULT:
708 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
709 case RPC_NOCALL: /* CL - server reply to a ping call. */
710 case RPC_REJECT:
711 case RPC_ACK:
712 case RPC_CL_CANCEL:
713 case RPC_FACK:
714 case RPC_CANCEL_ACK:
715 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
716 (unsigned int)p->hdr.pkt_type, p->name));
717 break;
719 case RPC_BIND:
721 * We assume that a pipe bind is only in one pdu.
723 if(pipe_init_outgoing_data(p)) {
724 reply = api_pipe_bind_req(p, &rpc_in);
726 break;
728 case RPC_BINDACK:
729 case RPC_BINDNACK:
730 DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
731 (unsigned int)p->hdr.pkt_type, p->name));
732 break;
735 case RPC_ALTCONT:
737 * We assume that a pipe bind is only in one pdu.
739 if(pipe_init_outgoing_data(p)) {
740 reply = api_pipe_alter_context(p, &rpc_in);
742 break;
744 case RPC_ALTCONTRESP:
745 DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
746 p->name));
747 break;
749 case RPC_AUTH3:
751 * The third packet in an NTLMSSP auth exchange.
753 if(pipe_init_outgoing_data(p)) {
754 reply = api_pipe_bind_auth3(p, &rpc_in);
756 break;
758 case RPC_SHUTDOWN:
759 DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
760 p->name));
761 break;
763 case RPC_CO_CANCEL:
764 /* For now just free all client data and continue processing. */
765 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
766 /* As we never do asynchronous RPC serving, we can never cancel a
767 call (as far as I know). If we ever did we'd have to send a cancel_ack
768 reply. For now, just free all client data and continue processing. */
769 reply = True;
770 break;
771 #if 0
772 /* Enable this if we're doing async rpc. */
773 /* We must check the call-id matches the outstanding callid. */
774 if(pipe_init_outgoing_data(p)) {
775 /* Send a cancel_ack PDU reply. */
776 /* We should probably check the auth-verifier here. */
777 reply = setup_cancel_ack_reply(p, &rpc_in);
779 break;
780 #endif
782 case RPC_ORPHANED:
783 /* We should probably check the auth-verifier here.
784 For now just free all client data and continue processing. */
785 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
786 reply = True;
787 break;
789 default:
790 DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
791 break;
794 /* Reset to little endian. Probably don't need this but it won't hurt. */
795 prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
797 if (!reply) {
798 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
799 set_incoming_fault(p);
800 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
801 prs_mem_free(&rpc_in);
802 } else {
804 * Reset the lengths. We're ready for a new pdu.
806 p->in_data.pdu_needed_len = 0;
807 p->in_data.pdu_received_len = 0;
810 prs_mem_free(&rpc_in);
813 /****************************************************************************
814 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
815 ****************************************************************************/
817 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
819 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
821 DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
822 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
823 (unsigned int)n ));
825 if(data_to_copy == 0) {
827 * This is an error - data is being received and there is no
828 * space in the PDU. Free the received data and go into the fault state.
830 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
831 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
832 set_incoming_fault(p);
833 return -1;
837 * If we have no data already, wait until we get at least a RPC_HEADER_LEN
838 * number of bytes before we can do anything.
841 if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
843 * Always return here. If we have more data then the RPC_HEADER
844 * will be processed the next time around the loop.
846 return fill_rpc_header(p, data, data_to_copy);
850 * At this point we know we have at least an RPC_HEADER_LEN amount of data
851 * stored in current_in_pdu.
855 * If pdu_needed_len is zero this is a new pdu.
856 * Unmarshall the header so we know how much more
857 * data we need, then loop again.
860 if(p->in_data.pdu_needed_len == 0) {
861 ssize_t rret = unmarshall_rpc_header(p);
862 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
863 return rret;
865 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
866 of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
867 pdu type. Deal with this in process_complete_pdu(). */
871 * Ok - at this point we have a valid RPC_HEADER in p->hdr.
872 * Keep reading until we have a full pdu.
875 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
878 * Copy as much of the data as we need into the current_in_pdu buffer.
879 * pdu_needed_len becomes zero when we have a complete pdu.
882 memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
883 p->in_data.pdu_received_len += data_to_copy;
884 p->in_data.pdu_needed_len -= data_to_copy;
887 * Do we have a complete PDU ?
888 * (return the number of bytes handled in the call)
891 if(p->in_data.pdu_needed_len == 0) {
892 process_complete_pdu(p);
893 return data_to_copy;
896 DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
897 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
899 return (ssize_t)data_to_copy;
902 /****************************************************************************
903 Accepts incoming data on an rpc pipe.
904 ****************************************************************************/
906 ssize_t write_to_pipe(smb_np_struct *p, char *data, size_t n)
908 DEBUG(6,("write_to_pipe: %x", p->pnum));
910 DEBUG(6,(" name: %s open: %s len: %d\n",
911 p->name, BOOLSTR(p->open), (int)n));
913 dump_data(50, (uint8 *)data, n);
915 return p->namedpipe_write(p->np_state, data, n);
918 /****************************************************************************
919 Accepts incoming data on an internal rpc pipe.
920 ****************************************************************************/
922 ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
924 size_t data_left = n;
926 while(data_left) {
927 ssize_t data_used;
929 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
931 data_used = process_incoming_data(p, data, data_left);
933 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
935 if(data_used < 0) {
936 return -1;
939 data_left -= data_used;
940 data += data_used;
943 return n;
946 /****************************************************************************
947 Replies to a request to read data from a pipe.
949 Headers are interspersed with the data at PDU intervals. By the time
950 this function is called, the start of the data could possibly have been
951 read by an SMBtrans (file_offset != 0).
953 Calling create_rpc_reply() here is a hack. The data should already
954 have been prepared into arrays of headers + data stream sections.
955 ****************************************************************************/
957 ssize_t read_from_pipe(smb_np_struct *p, char *data, size_t n,
958 bool *is_data_outstanding)
960 if (!p || !p->open) {
961 DEBUG(0,("read_from_pipe: pipe not open\n"));
962 return -1;
965 DEBUG(6,("read_from_pipe: %x", p->pnum));
967 return p->namedpipe_read(p->np_state, data, n, is_data_outstanding);
970 /****************************************************************************
971 Replies to a request to read data from a pipe.
973 Headers are interspersed with the data at PDU intervals. By the time
974 this function is called, the start of the data could possibly have been
975 read by an SMBtrans (file_offset != 0).
977 Calling create_rpc_reply() here is a hack. The data should already
978 have been prepared into arrays of headers + data stream sections.
979 ****************************************************************************/
981 ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
982 bool *is_data_outstanding)
984 uint32 pdu_remaining = 0;
985 ssize_t data_returned = 0;
987 if (!p) {
988 DEBUG(0,("read_from_pipe: pipe not open\n"));
989 return -1;
992 DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
995 * We cannot return more than one PDU length per
996 * read request.
1000 * This condition should result in the connection being closed.
1001 * Netapp filers seem to set it to 0xffff which results in domain
1002 * authentications failing. Just ignore it so things work.
1005 if(n > RPC_MAX_PDU_FRAG_LEN) {
1006 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
1007 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
1008 n = RPC_MAX_PDU_FRAG_LEN;
1012 * Determine if there is still data to send in the
1013 * pipe PDU buffer. Always send this first. Never
1014 * send more than is left in the current PDU. The
1015 * client should send a new read request for a new
1016 * PDU.
1019 if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
1020 data_returned = (ssize_t)MIN(n, pdu_remaining);
1022 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
1023 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len,
1024 (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
1026 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
1027 p->out_data.current_pdu_sent += (uint32)data_returned;
1028 goto out;
1032 * At this point p->current_pdu_len == p->current_pdu_sent (which
1033 * may of course be zero if this is the first return fragment.
1036 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
1037 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
1038 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
1040 if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
1042 * We have sent all possible data, return 0.
1044 data_returned = 0;
1045 goto out;
1049 * We need to create a new PDU from the data left in p->rdata.
1050 * Create the header/data/footers. This also sets up the fields
1051 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
1052 * and stores the outgoing PDU in p->current_pdu.
1055 if(!create_next_pdu(p)) {
1056 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
1057 return -1;
1060 data_returned = MIN(n, p->out_data.current_pdu_len);
1062 memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
1063 p->out_data.current_pdu_sent += (uint32)data_returned;
1065 out:
1067 (*is_data_outstanding) = p->out_data.current_pdu_len > n;
1068 return data_returned;
1071 /****************************************************************************
1072 Wait device state on a pipe. Exactly what this is for is unknown...
1073 ****************************************************************************/
1075 bool wait_rpc_pipe_hnd_state(smb_np_struct *p, uint16 priority)
1077 if (p == NULL) {
1078 return False;
1081 if (p->open) {
1082 DEBUG(3,("wait_rpc_pipe_hnd_state: Setting pipe wait state priority=%x on pipe (name=%s)\n",
1083 priority, p->name));
1085 p->priority = priority;
1087 return True;
1090 DEBUG(3,("wait_rpc_pipe_hnd_state: Error setting pipe wait state priority=%x (name=%s)\n",
1091 priority, p->name));
1092 return False;
1096 /****************************************************************************
1097 Set device state on a pipe. Exactly what this is for is unknown...
1098 ****************************************************************************/
1100 bool set_rpc_pipe_hnd_state(smb_np_struct *p, uint16 device_state)
1102 if (p == NULL) {
1103 return False;
1106 if (p->open) {
1107 DEBUG(3,("set_rpc_pipe_hnd_state: Setting pipe device state=%x on pipe (name=%s)\n",
1108 device_state, p->name));
1110 p->device_state = device_state;
1112 return True;
1115 DEBUG(3,("set_rpc_pipe_hnd_state: Error setting pipe device state=%x (name=%s)\n",
1116 device_state, p->name));
1117 return False;
1121 /****************************************************************************
1122 Close an rpc pipe.
1123 ****************************************************************************/
1125 bool close_rpc_pipe_hnd(smb_np_struct *p)
1127 if (!p) {
1128 DEBUG(0,("Invalid pipe in close_rpc_pipe_hnd\n"));
1129 return False;
1132 TALLOC_FREE(p->np_state);
1134 bitmap_clear(bmap, p->pnum - pipe_handle_offset);
1136 pipes_open--;
1138 DEBUG(4,("closed pipe name %s pnum=%x (pipes_open=%d)\n",
1139 p->name, p->pnum, pipes_open));
1141 DLIST_REMOVE(Pipes, p);
1143 /* TODO: Remove from pipe open db */
1145 if ( !delete_pipe_opendb( p ) ) {
1146 DEBUG(3,("close_rpc_pipe_hnd: failed to delete %s "
1147 "pipe from open db.\n", p->name));
1150 TALLOC_FREE(p);
1152 return True;
1155 /****************************************************************************
1156 Close all pipes on a connection.
1157 ****************************************************************************/
1159 void pipe_close_conn(connection_struct *conn)
1161 smb_np_struct *p, *next;
1163 for (p=Pipes;p;p=next) {
1164 next = p->next;
1165 if (p->conn == conn) {
1166 close_rpc_pipe_hnd(p);
1171 /****************************************************************************
1172 Close an rpc pipe.
1173 ****************************************************************************/
1175 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
1177 if (!p) {
1178 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
1179 return False;
1182 prs_mem_free(&p->out_data.rdata);
1183 prs_mem_free(&p->in_data.data);
1185 if (p->auth.auth_data_free_func) {
1186 (*p->auth.auth_data_free_func)(&p->auth);
1189 if (p->mem_ctx) {
1190 talloc_destroy(p->mem_ctx);
1193 free_pipe_rpc_context( p->contexts );
1195 /* Free the handles database. */
1196 close_policy_by_pipe(p);
1198 TALLOC_FREE(p->pipe_user.nt_user_token);
1199 SAFE_FREE(p->pipe_user.ut.groups);
1201 DLIST_REMOVE(InternalPipes, p);
1203 ZERO_STRUCTP(p);
1205 TALLOC_FREE(p);
1207 return True;
1210 /****************************************************************************
1211 Find an rpc pipe given a pipe handle in a buffer and an offset.
1212 ****************************************************************************/
1214 smb_np_struct *get_rpc_pipe_p(uint16 pnum)
1216 if (chain_p) {
1217 return chain_p;
1220 return get_rpc_pipe(pnum);
1223 /****************************************************************************
1224 Find an rpc pipe given a pipe handle.
1225 ****************************************************************************/
1227 smb_np_struct *get_rpc_pipe(int pnum)
1229 smb_np_struct *p;
1231 DEBUG(4,("search for pipe pnum=%x\n", pnum));
1233 for (p=Pipes;p;p=p->next) {
1234 DEBUG(5,("pipe name %s pnum=%x (pipes_open=%d)\n",
1235 p->name, p->pnum, pipes_open));
1238 for (p=Pipes;p;p=p->next) {
1239 if (p->pnum == pnum) {
1240 chain_p = p;
1241 return p;
1245 return NULL;