Remove smb_np_struct
[Samba/gebeck_regimport.git] / source3 / rpc_server / srv_pipe_hnd.c
blob1711565c0dca543a221a26796b0371bd619f597d
1 /*
2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
24 #undef DBGC_CLASS
25 #define DBGC_CLASS DBGC_RPC_SRV
27 static int pipes_open;
30 * Sometimes I can't decide if I hate Windows printer driver
31 * writers more than I hate the Windows spooler service driver
32 * writers. This gets around a combination of bugs in the spooler
33 * and the HP 8500 PCL driver that causes a spooler spin. JRA.
35 * bumped up from 20 -> 64 after viewing traffic from WordPerfect
36 * 2002 running on NT 4.- SP6
37 * bumped up from 64 -> 256 after viewing traffic from con2prt
38 * for lots of printers on a WinNT 4.x SP6 box.
41 #ifndef MAX_OPEN_SPOOLSS_PIPES
42 #define MAX_OPEN_SPOOLSS_PIPES 256
43 #endif
44 static int current_spoolss_pipes_open;
46 static pipes_struct *InternalPipes;
47 static struct bitmap *bmap;
49 /* TODO
50 * the following prototypes are declared here to avoid
51 * code being moved about too much for a patch to be
52 * disrupted / less obvious.
54 * these functions, and associated functions that they
55 * call, should be moved behind a .so module-loading
56 * system _anyway_. so that's the next step...
59 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
61 /****************************************************************************
62 Internal Pipe iterator functions.
63 ****************************************************************************/
65 pipes_struct *get_first_internal_pipe(void)
67 return InternalPipes;
70 pipes_struct *get_next_internal_pipe(pipes_struct *p)
72 return p->next;
75 /* this must be larger than the sum of the open files and directories */
76 static int pipe_handle_offset;
78 /****************************************************************************
79 Set the pipe_handle_offset. Called from smbd/files.c
80 ****************************************************************************/
82 void set_pipe_handle_offset(int max_open_files)
84 if(max_open_files < 0x7000) {
85 pipe_handle_offset = 0x7000;
86 } else {
87 pipe_handle_offset = max_open_files + 10; /* For safety. :-) */
91 /****************************************************************************
92 Initialise pipe handle states.
93 ****************************************************************************/
95 void init_rpc_pipe_hnd(void)
97 bmap = bitmap_allocate(MAX_OPEN_PIPES);
98 if (!bmap) {
99 exit_server("out of memory in init_rpc_pipe_hnd");
103 /****************************************************************************
104 Initialise an outgoing packet.
105 ****************************************************************************/
107 static bool pipe_init_outgoing_data(pipes_struct *p)
109 output_data *o_data = &p->out_data;
111 /* Reset the offset counters. */
112 o_data->data_sent_length = 0;
113 o_data->current_pdu_len = 0;
114 o_data->current_pdu_sent = 0;
116 memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
118 /* Free any memory in the current return data buffer. */
119 prs_mem_free(&o_data->rdata);
122 * Initialize the outgoing RPC data buffer.
123 * we will use this as the raw data area for replying to rpc requests.
125 if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
126 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
127 return False;
130 return True;
133 /****************************************************************************
134 Make an internal namedpipes structure
135 ****************************************************************************/
137 struct pipes_struct *make_internal_rpc_pipe_p(TALLOC_CTX *mem_ctx,
138 const char *pipe_name,
139 const char *client_address,
140 struct auth_serversupplied_info *server_info,
141 uint16_t vuid)
143 pipes_struct *p;
145 DEBUG(4,("Create pipe requested %s\n", pipe_name));
147 p = TALLOC_ZERO_P(mem_ctx, struct pipes_struct);
149 if (!p) {
150 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
151 return NULL;
154 if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
155 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
156 TALLOC_FREE(p);
157 return NULL;
160 if (!init_pipe_handle_list(p, pipe_name)) {
161 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
162 talloc_destroy(p->mem_ctx);
163 TALLOC_FREE(p);
164 return NULL;
168 * Initialize the incoming RPC data buffer with one PDU worth of memory.
169 * We cheat here and say we're marshalling, as we intend to add incoming
170 * data directly into the prs_struct and we want it to auto grow. We will
171 * change the type to UNMARSALLING before processing the stream.
174 if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
175 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
176 talloc_destroy(p->mem_ctx);
177 close_policy_by_pipe(p);
178 TALLOC_FREE(p);
179 return NULL;
182 p->server_info = copy_serverinfo(p, server_info);
183 if (p->server_info == NULL) {
184 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
185 talloc_destroy(p->mem_ctx);
186 close_policy_by_pipe(p);
187 TALLOC_FREE(p);
188 return NULL;
191 DLIST_ADD(InternalPipes, p);
193 memcpy(p->client_address, client_address, sizeof(p->client_address));
195 p->endian = RPC_LITTLE_ENDIAN;
197 ZERO_STRUCT(p->pipe_user);
199 p->pipe_user.vuid = vuid;
200 p->pipe_user.ut.uid = (uid_t)-1;
201 p->pipe_user.ut.gid = (gid_t)-1;
202 p->pipe_user.nt_user_token = dup_nt_token(NULL, server_info->ptok);
205 * Initialize the outgoing RPC data buffer with no memory.
207 prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
209 fstrcpy(p->name, pipe_name);
211 DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
212 pipe_name, pipes_open));
214 talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
216 return p;
219 /****************************************************************************
220 Sets the fault state on incoming packets.
221 ****************************************************************************/
223 static void set_incoming_fault(pipes_struct *p)
225 prs_mem_free(&p->in_data.data);
226 p->in_data.pdu_needed_len = 0;
227 p->in_data.pdu_received_len = 0;
228 p->fault_state = True;
229 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
230 p->name));
233 /****************************************************************************
234 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
235 ****************************************************************************/
237 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
239 size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
241 DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
242 (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
243 (unsigned int)p->in_data.pdu_received_len ));
245 memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
246 p->in_data.pdu_received_len += len_needed_to_complete_hdr;
248 return (ssize_t)len_needed_to_complete_hdr;
251 /****************************************************************************
252 Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
253 ****************************************************************************/
255 static ssize_t unmarshall_rpc_header(pipes_struct *p)
258 * Unmarshall the header to determine the needed length.
261 prs_struct rpc_in;
263 if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
264 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
265 set_incoming_fault(p);
266 return -1;
269 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
270 prs_set_endian_data( &rpc_in, p->endian);
272 prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
273 p->in_data.pdu_received_len, False);
276 * Unmarshall the header as this will tell us how much
277 * data we need to read to get the complete pdu.
278 * This also sets the endian flag in rpc_in.
281 if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
282 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
283 set_incoming_fault(p);
284 prs_mem_free(&rpc_in);
285 return -1;
289 * Validate the RPC header.
292 if(p->hdr.major != 5 && p->hdr.minor != 0) {
293 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
294 set_incoming_fault(p);
295 prs_mem_free(&rpc_in);
296 return -1;
300 * If there's not data in the incoming buffer this should be the start of a new RPC.
303 if(prs_offset(&p->in_data.data) == 0) {
306 * AS/U doesn't set FIRST flag in a BIND packet it seems.
309 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
311 * Ensure that the FIRST flag is set. If not then we have
312 * a stream missmatch.
315 DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
316 set_incoming_fault(p);
317 prs_mem_free(&rpc_in);
318 return -1;
322 * If this is the first PDU then set the endianness
323 * flag in the pipe. We will need this when parsing all
324 * data in this RPC.
327 p->endian = rpc_in.bigendian_data;
329 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
330 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
332 } else {
335 * If this is *NOT* the first PDU then check the endianness
336 * flag in the pipe is the same as that in the PDU.
339 if (p->endian != rpc_in.bigendian_data) {
340 DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
341 set_incoming_fault(p);
342 prs_mem_free(&rpc_in);
343 return -1;
348 * Ensure that the pdu length is sane.
351 if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
352 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
353 set_incoming_fault(p);
354 prs_mem_free(&rpc_in);
355 return -1;
358 DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
359 (unsigned int)p->hdr.flags ));
361 p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
363 prs_mem_free(&rpc_in);
365 return 0; /* No extra data processed. */
368 /****************************************************************************
369 Call this to free any talloc'ed memory. Do this before and after processing
370 a complete PDU.
371 ****************************************************************************/
373 static void free_pipe_context(pipes_struct *p)
375 if (p->mem_ctx) {
376 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
377 "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
378 talloc_free_children(p->mem_ctx);
379 } else {
380 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
381 if (p->mem_ctx == NULL) {
382 p->fault_state = True;
387 /****************************************************************************
388 Processes a request pdu. This will do auth processing if needed, and
389 appends the data into the complete stream if the LAST flag is not set.
390 ****************************************************************************/
392 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
394 uint32 ss_padding_len = 0;
395 size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
396 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
398 if(!p->pipe_bound) {
399 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
400 set_incoming_fault(p);
401 return False;
405 * Check if we need to do authentication processing.
406 * This is only done on requests, not binds.
410 * Read the RPC request header.
413 if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
414 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
415 set_incoming_fault(p);
416 return False;
419 switch(p->auth.auth_type) {
420 case PIPE_AUTH_TYPE_NONE:
421 break;
423 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
424 case PIPE_AUTH_TYPE_NTLMSSP:
426 NTSTATUS status;
427 if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
428 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
429 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
430 set_incoming_fault(p);
431 return False;
433 break;
436 case PIPE_AUTH_TYPE_SCHANNEL:
437 if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
438 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
439 set_incoming_fault(p);
440 return False;
442 break;
444 default:
445 DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
446 set_incoming_fault(p);
447 return False;
450 /* Now we've done the sign/seal we can remove any padding data. */
451 if (data_len > ss_padding_len) {
452 data_len -= ss_padding_len;
456 * Check the data length doesn't go over the 15Mb limit.
457 * increased after observing a bug in the Windows NT 4.0 SP6a
458 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
459 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
462 if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
463 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
464 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
465 set_incoming_fault(p);
466 return False;
470 * Append the data portion into the buffer and return.
473 if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
474 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
475 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
476 set_incoming_fault(p);
477 return False;
480 if(p->hdr.flags & RPC_FLG_LAST) {
481 bool ret = False;
483 * Ok - we finally have a complete RPC stream.
484 * Call the rpc command to process it.
488 * Ensure the internal prs buffer size is *exactly* the same
489 * size as the current offset.
492 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
493 DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
494 set_incoming_fault(p);
495 return False;
499 * Set the parse offset to the start of the data and set the
500 * prs_struct to UNMARSHALL.
503 prs_set_offset(&p->in_data.data, 0);
504 prs_switch_type(&p->in_data.data, UNMARSHALL);
507 * Process the complete data stream here.
510 free_pipe_context(p);
512 if(pipe_init_outgoing_data(p)) {
513 ret = api_pipe_request(p);
516 free_pipe_context(p);
519 * We have consumed the whole data stream. Set back to
520 * marshalling and set the offset back to the start of
521 * the buffer to re-use it (we could also do a prs_mem_free()
522 * and then re_init on the next start of PDU. Not sure which
523 * is best here.... JRA.
526 prs_switch_type(&p->in_data.data, MARSHALL);
527 prs_set_offset(&p->in_data.data, 0);
528 return ret;
531 return True;
534 /****************************************************************************
535 Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
536 already been parsed and stored in p->hdr.
537 ****************************************************************************/
539 static void process_complete_pdu(pipes_struct *p)
541 prs_struct rpc_in;
542 size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
543 char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
544 bool reply = False;
546 if(p->fault_state) {
547 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
548 p->name ));
549 set_incoming_fault(p);
550 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
551 return;
554 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
557 * Ensure we're using the corrent endianness for both the
558 * RPC header flags and the raw data we will be reading from.
561 prs_set_endian_data( &rpc_in, p->endian);
562 prs_set_endian_data( &p->in_data.data, p->endian);
564 prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
566 DEBUG(10,("process_complete_pdu: processing packet type %u\n",
567 (unsigned int)p->hdr.pkt_type ));
569 switch (p->hdr.pkt_type) {
570 case RPC_REQUEST:
571 reply = process_request_pdu(p, &rpc_in);
572 break;
574 case RPC_PING: /* CL request - ignore... */
575 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
576 (unsigned int)p->hdr.pkt_type, p->name));
577 break;
579 case RPC_RESPONSE: /* No responses here. */
580 DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
581 p->name ));
582 break;
584 case RPC_FAULT:
585 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
586 case RPC_NOCALL: /* CL - server reply to a ping call. */
587 case RPC_REJECT:
588 case RPC_ACK:
589 case RPC_CL_CANCEL:
590 case RPC_FACK:
591 case RPC_CANCEL_ACK:
592 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
593 (unsigned int)p->hdr.pkt_type, p->name));
594 break;
596 case RPC_BIND:
598 * We assume that a pipe bind is only in one pdu.
600 if(pipe_init_outgoing_data(p)) {
601 reply = api_pipe_bind_req(p, &rpc_in);
603 break;
605 case RPC_BINDACK:
606 case RPC_BINDNACK:
607 DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
608 (unsigned int)p->hdr.pkt_type, p->name));
609 break;
612 case RPC_ALTCONT:
614 * We assume that a pipe bind is only in one pdu.
616 if(pipe_init_outgoing_data(p)) {
617 reply = api_pipe_alter_context(p, &rpc_in);
619 break;
621 case RPC_ALTCONTRESP:
622 DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
623 p->name));
624 break;
626 case RPC_AUTH3:
628 * The third packet in an NTLMSSP auth exchange.
630 if(pipe_init_outgoing_data(p)) {
631 reply = api_pipe_bind_auth3(p, &rpc_in);
633 break;
635 case RPC_SHUTDOWN:
636 DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
637 p->name));
638 break;
640 case RPC_CO_CANCEL:
641 /* For now just free all client data and continue processing. */
642 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
643 /* As we never do asynchronous RPC serving, we can never cancel a
644 call (as far as I know). If we ever did we'd have to send a cancel_ack
645 reply. For now, just free all client data and continue processing. */
646 reply = True;
647 break;
648 #if 0
649 /* Enable this if we're doing async rpc. */
650 /* We must check the call-id matches the outstanding callid. */
651 if(pipe_init_outgoing_data(p)) {
652 /* Send a cancel_ack PDU reply. */
653 /* We should probably check the auth-verifier here. */
654 reply = setup_cancel_ack_reply(p, &rpc_in);
656 break;
657 #endif
659 case RPC_ORPHANED:
660 /* We should probably check the auth-verifier here.
661 For now just free all client data and continue processing. */
662 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
663 reply = True;
664 break;
666 default:
667 DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
668 break;
671 /* Reset to little endian. Probably don't need this but it won't hurt. */
672 prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
674 if (!reply) {
675 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
676 set_incoming_fault(p);
677 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
678 prs_mem_free(&rpc_in);
679 } else {
681 * Reset the lengths. We're ready for a new pdu.
683 p->in_data.pdu_needed_len = 0;
684 p->in_data.pdu_received_len = 0;
687 prs_mem_free(&rpc_in);
690 /****************************************************************************
691 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
692 ****************************************************************************/
694 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
696 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
698 DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
699 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
700 (unsigned int)n ));
702 if(data_to_copy == 0) {
704 * This is an error - data is being received and there is no
705 * space in the PDU. Free the received data and go into the fault state.
707 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
708 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
709 set_incoming_fault(p);
710 return -1;
714 * If we have no data already, wait until we get at least a RPC_HEADER_LEN
715 * number of bytes before we can do anything.
718 if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
720 * Always return here. If we have more data then the RPC_HEADER
721 * will be processed the next time around the loop.
723 return fill_rpc_header(p, data, data_to_copy);
727 * At this point we know we have at least an RPC_HEADER_LEN amount of data
728 * stored in current_in_pdu.
732 * If pdu_needed_len is zero this is a new pdu.
733 * Unmarshall the header so we know how much more
734 * data we need, then loop again.
737 if(p->in_data.pdu_needed_len == 0) {
738 ssize_t rret = unmarshall_rpc_header(p);
739 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
740 return rret;
742 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
743 of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
744 pdu type. Deal with this in process_complete_pdu(). */
748 * Ok - at this point we have a valid RPC_HEADER in p->hdr.
749 * Keep reading until we have a full pdu.
752 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
755 * Copy as much of the data as we need into the current_in_pdu buffer.
756 * pdu_needed_len becomes zero when we have a complete pdu.
759 memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
760 p->in_data.pdu_received_len += data_to_copy;
761 p->in_data.pdu_needed_len -= data_to_copy;
764 * Do we have a complete PDU ?
765 * (return the number of bytes handled in the call)
768 if(p->in_data.pdu_needed_len == 0) {
769 process_complete_pdu(p);
770 return data_to_copy;
773 DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
774 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
776 return (ssize_t)data_to_copy;
779 /****************************************************************************
780 Accepts incoming data on an internal rpc pipe.
781 ****************************************************************************/
783 ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
785 size_t data_left = n;
787 while(data_left) {
788 ssize_t data_used;
790 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
792 data_used = process_incoming_data(p, data, data_left);
794 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
796 if(data_used < 0) {
797 return -1;
800 data_left -= data_used;
801 data += data_used;
804 return n;
807 /****************************************************************************
808 Replies to a request to read data from a pipe.
810 Headers are interspersed with the data at PDU intervals. By the time
811 this function is called, the start of the data could possibly have been
812 read by an SMBtrans (file_offset != 0).
814 Calling create_rpc_reply() here is a hack. The data should already
815 have been prepared into arrays of headers + data stream sections.
816 ****************************************************************************/
818 ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
819 bool *is_data_outstanding)
821 uint32 pdu_remaining = 0;
822 ssize_t data_returned = 0;
824 if (!p) {
825 DEBUG(0,("read_from_pipe: pipe not open\n"));
826 return -1;
829 DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
832 * We cannot return more than one PDU length per
833 * read request.
837 * This condition should result in the connection being closed.
838 * Netapp filers seem to set it to 0xffff which results in domain
839 * authentications failing. Just ignore it so things work.
842 if(n > RPC_MAX_PDU_FRAG_LEN) {
843 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
844 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
845 n = RPC_MAX_PDU_FRAG_LEN;
849 * Determine if there is still data to send in the
850 * pipe PDU buffer. Always send this first. Never
851 * send more than is left in the current PDU. The
852 * client should send a new read request for a new
853 * PDU.
856 if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
857 data_returned = (ssize_t)MIN(n, pdu_remaining);
859 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
860 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len,
861 (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
863 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
864 p->out_data.current_pdu_sent += (uint32)data_returned;
865 goto out;
869 * At this point p->current_pdu_len == p->current_pdu_sent (which
870 * may of course be zero if this is the first return fragment.
873 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
874 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
875 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
877 if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
879 * We have sent all possible data, return 0.
881 data_returned = 0;
882 goto out;
886 * We need to create a new PDU from the data left in p->rdata.
887 * Create the header/data/footers. This also sets up the fields
888 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
889 * and stores the outgoing PDU in p->current_pdu.
892 if(!create_next_pdu(p)) {
893 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
894 return -1;
897 data_returned = MIN(n, p->out_data.current_pdu_len);
899 memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
900 p->out_data.current_pdu_sent += (uint32)data_returned;
902 out:
904 (*is_data_outstanding) = p->out_data.current_pdu_len > n;
905 return data_returned;
908 /****************************************************************************
909 Close an rpc pipe.
910 ****************************************************************************/
912 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
914 if (!p) {
915 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
916 return False;
919 prs_mem_free(&p->out_data.rdata);
920 prs_mem_free(&p->in_data.data);
922 if (p->auth.auth_data_free_func) {
923 (*p->auth.auth_data_free_func)(&p->auth);
926 if (p->mem_ctx) {
927 talloc_destroy(p->mem_ctx);
930 free_pipe_rpc_context( p->contexts );
932 /* Free the handles database. */
933 close_policy_by_pipe(p);
935 TALLOC_FREE(p->pipe_user.nt_user_token);
936 SAFE_FREE(p->pipe_user.ut.groups);
938 DLIST_REMOVE(InternalPipes, p);
940 ZERO_STRUCTP(p);
942 TALLOC_FREE(p);
944 return True;
947 bool fsp_is_np(struct files_struct *fsp)
949 return ((fsp != NULL)
950 && (fsp->fake_file_handle != NULL)
951 && (fsp->fake_file_handle->type == FAKE_FILE_TYPE_NAMED_PIPE));
954 NTSTATUS np_open(struct smb_request *smb_req, struct connection_struct *conn,
955 const char *name, struct files_struct **pfsp)
957 NTSTATUS status;
958 struct files_struct *fsp;
959 struct pipes_struct *p;
961 status = file_new(smb_req, conn, &fsp);
962 if (!NT_STATUS_IS_OK(status)) {
963 DEBUG(0, ("file_new failed: %s\n", nt_errstr(status)));
964 return status;
967 fsp->conn = conn;
968 fsp->fh->fd = -1;
969 fsp->vuid = smb_req->vuid;
970 fsp->can_lock = false;
971 fsp->access_mask = FILE_READ_DATA | FILE_WRITE_DATA;
972 string_set(&fsp->fsp_name, name);
974 fsp->fake_file_handle = talloc(NULL, struct fake_file_handle);
975 if (fsp->fake_file_handle == NULL) {
976 file_free(smb_req, fsp);
977 return NT_STATUS_NO_MEMORY;
979 fsp->fake_file_handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
981 p = make_internal_rpc_pipe_p(fsp->fake_file_handle, name,
982 conn->client_address, conn->server_info,
983 smb_req->vuid);
984 if (p == NULL) {
985 file_free(smb_req, fsp);
986 return NT_STATUS_PIPE_NOT_AVAILABLE;
988 fsp->fake_file_handle->private_data = p;
990 *pfsp = fsp;
992 return NT_STATUS_OK;
995 NTSTATUS np_write(struct files_struct *fsp, uint8_t *data, size_t len,
996 ssize_t *nwritten)
998 struct pipes_struct *p;
1000 if (!fsp_is_np(fsp)) {
1001 return NT_STATUS_INVALID_HANDLE;
1004 p = talloc_get_type_abort(
1005 fsp->fake_file_handle->private_data, struct pipes_struct);
1007 DEBUG(6, ("np_write: %x name: %s len: %d\n", (int)fsp->fnum,
1008 fsp->fsp_name, (int)len));
1009 dump_data(50, data, len);
1011 *nwritten = write_to_internal_pipe(p, (char *)data, len);
1013 return ((*nwritten) >= 0)
1014 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1017 NTSTATUS np_read(struct files_struct *fsp, uint8_t *data, size_t len,
1018 ssize_t *nread, bool *is_data_outstanding)
1020 struct pipes_struct *p;
1022 if (!fsp_is_np(fsp)) {
1023 return NT_STATUS_INVALID_HANDLE;
1026 p = talloc_get_type_abort(
1027 fsp->fake_file_handle->private_data, struct pipes_struct);
1029 *nread = read_from_internal_pipe(p, (char *)data, len,
1030 is_data_outstanding);
1032 return ((*nread) >= 0)
1033 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;