TEMP: Rollup of lots of changes so they don't get lost
[Samba/vfs_proxy.git] / source3 / rpc_server / srv_pipe_hnd.c
blob1cff95dcaba0d713f5caa89cd433f0aa2854c60f
1 /*
2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #undef DBGC_CLASS
26 #define DBGC_CLASS DBGC_RPC_SRV
28 static int pipes_open;
30 static pipes_struct *InternalPipes;
31 static struct bitmap *bmap;
33 /* TODO
34 * the following prototypes are declared here to avoid
35 * code being moved about too much for a patch to be
36 * disrupted / less obvious.
38 * these functions, and associated functions that they
39 * call, should be moved behind a .so module-loading
40 * system _anyway_. so that's the next step...
43 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
45 /****************************************************************************
46 Internal Pipe iterator functions.
47 ****************************************************************************/
49 pipes_struct *get_first_internal_pipe(void)
51 return InternalPipes;
54 pipes_struct *get_next_internal_pipe(pipes_struct *p)
56 return p->next;
59 /****************************************************************************
60 Initialise pipe handle states.
61 ****************************************************************************/
63 void init_rpc_pipe_hnd(void)
65 bmap = bitmap_allocate(MAX_OPEN_PIPES);
66 if (!bmap) {
67 exit_server("out of memory in init_rpc_pipe_hnd");
71 /****************************************************************************
72 Initialise an outgoing packet.
73 ****************************************************************************/
75 static bool pipe_init_outgoing_data(pipes_struct *p)
77 output_data *o_data = &p->out_data;
79 /* Reset the offset counters. */
80 o_data->data_sent_length = 0;
81 o_data->current_pdu_len = 0;
82 o_data->current_pdu_sent = 0;
84 memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
86 /* Free any memory in the current return data buffer. */
87 prs_mem_free(&o_data->rdata);
90 * Initialize the outgoing RPC data buffer.
91 * we will use this as the raw data area for replying to rpc requests.
92 */
93 if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
94 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
95 return False;
98 return True;
101 /****************************************************************************
102 Make an internal namedpipes structure
103 ****************************************************************************/
105 static struct pipes_struct *make_internal_rpc_pipe_p(TALLOC_CTX *mem_ctx,
106 const char *pipe_name,
107 const char *client_address,
108 struct auth_serversupplied_info *server_info,
109 uint16_t vuid)
111 pipes_struct *p;
113 DEBUG(4,("Create pipe requested %s\n", pipe_name));
115 p = TALLOC_ZERO_P(mem_ctx, struct pipes_struct);
117 if (!p) {
118 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
119 return NULL;
122 if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
123 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
124 TALLOC_FREE(p);
125 return NULL;
128 if (!init_pipe_handle_list(p, pipe_name)) {
129 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
130 talloc_destroy(p->mem_ctx);
131 TALLOC_FREE(p);
132 return NULL;
136 * Initialize the incoming RPC data buffer with one PDU worth of memory.
137 * We cheat here and say we're marshalling, as we intend to add incoming
138 * data directly into the prs_struct and we want it to auto grow. We will
139 * change the type to UNMARSALLING before processing the stream.
142 if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
143 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
144 talloc_destroy(p->mem_ctx);
145 close_policy_by_pipe(p);
146 TALLOC_FREE(p);
147 return NULL;
150 p->server_info = copy_serverinfo(p, server_info);
151 if (p->server_info == NULL) {
152 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
153 talloc_destroy(p->mem_ctx);
154 close_policy_by_pipe(p);
155 TALLOC_FREE(p);
156 return NULL;
159 DLIST_ADD(InternalPipes, p);
161 memcpy(p->client_address, client_address, sizeof(p->client_address));
163 p->endian = RPC_LITTLE_ENDIAN;
166 * Initialize the outgoing RPC data buffer with no memory.
168 prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
170 fstrcpy(p->name, pipe_name);
172 DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
173 pipe_name, pipes_open));
175 talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
177 return p;
180 /****************************************************************************
181 Sets the fault state on incoming packets.
182 ****************************************************************************/
184 static void set_incoming_fault(pipes_struct *p)
186 prs_mem_free(&p->in_data.data);
187 p->in_data.pdu_needed_len = 0;
188 p->in_data.pdu_received_len = 0;
189 p->fault_state = True;
190 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
191 p->name));
194 /****************************************************************************
195 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
196 ****************************************************************************/
198 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
200 size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
202 DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
203 (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
204 (unsigned int)p->in_data.pdu_received_len ));
206 memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
207 p->in_data.pdu_received_len += len_needed_to_complete_hdr;
209 return (ssize_t)len_needed_to_complete_hdr;
212 /****************************************************************************
213 Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
214 ****************************************************************************/
216 static ssize_t unmarshall_rpc_header(pipes_struct *p)
219 * Unmarshall the header to determine the needed length.
222 prs_struct rpc_in;
224 if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
225 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
226 set_incoming_fault(p);
227 return -1;
230 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
231 prs_set_endian_data( &rpc_in, p->endian);
233 prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
234 p->in_data.pdu_received_len, False);
237 * Unmarshall the header as this will tell us how much
238 * data we need to read to get the complete pdu.
239 * This also sets the endian flag in rpc_in.
242 if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
243 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
244 set_incoming_fault(p);
245 prs_mem_free(&rpc_in);
246 return -1;
250 * Validate the RPC header.
253 if(p->hdr.major != 5 && p->hdr.minor != 0) {
254 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
255 set_incoming_fault(p);
256 prs_mem_free(&rpc_in);
257 return -1;
261 * If there's not data in the incoming buffer this should be the start of a new RPC.
264 if(prs_offset(&p->in_data.data) == 0) {
267 * AS/U doesn't set FIRST flag in a BIND packet it seems.
270 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
272 * Ensure that the FIRST flag is set. If not then we have
273 * a stream missmatch.
276 DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
277 set_incoming_fault(p);
278 prs_mem_free(&rpc_in);
279 return -1;
283 * If this is the first PDU then set the endianness
284 * flag in the pipe. We will need this when parsing all
285 * data in this RPC.
288 p->endian = rpc_in.bigendian_data;
290 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
291 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
293 } else {
296 * If this is *NOT* the first PDU then check the endianness
297 * flag in the pipe is the same as that in the PDU.
300 if (p->endian != rpc_in.bigendian_data) {
301 DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
302 set_incoming_fault(p);
303 prs_mem_free(&rpc_in);
304 return -1;
309 * Ensure that the pdu length is sane.
312 if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
313 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
314 set_incoming_fault(p);
315 prs_mem_free(&rpc_in);
316 return -1;
319 DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
320 (unsigned int)p->hdr.flags ));
322 p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
324 prs_mem_free(&rpc_in);
326 return 0; /* No extra data processed. */
329 /****************************************************************************
330 Call this to free any talloc'ed memory. Do this before and after processing
331 a complete PDU.
332 ****************************************************************************/
334 static void free_pipe_context(pipes_struct *p)
336 if (p->mem_ctx) {
337 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
338 "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
339 talloc_free_children(p->mem_ctx);
340 } else {
341 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
342 if (p->mem_ctx == NULL) {
343 p->fault_state = True;
348 /****************************************************************************
349 Processes a request pdu. This will do auth processing if needed, and
350 appends the data into the complete stream if the LAST flag is not set.
351 ****************************************************************************/
353 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
355 uint32 ss_padding_len = 0;
356 size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
357 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
359 if(!p->pipe_bound) {
360 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
361 set_incoming_fault(p);
362 return False;
366 * Check if we need to do authentication processing.
367 * This is only done on requests, not binds.
371 * Read the RPC request header.
374 if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
375 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
376 set_incoming_fault(p);
377 return False;
380 switch(p->auth.auth_type) {
381 case PIPE_AUTH_TYPE_NONE:
382 break;
384 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
385 case PIPE_AUTH_TYPE_NTLMSSP:
387 NTSTATUS status;
388 if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
389 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
390 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
391 set_incoming_fault(p);
392 return False;
394 break;
397 case PIPE_AUTH_TYPE_SCHANNEL:
398 if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
399 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
400 set_incoming_fault(p);
401 return False;
403 break;
405 default:
406 DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
407 set_incoming_fault(p);
408 return False;
411 /* Now we've done the sign/seal we can remove any padding data. */
412 if (data_len > ss_padding_len) {
413 data_len -= ss_padding_len;
417 * Check the data length doesn't go over the 15Mb limit.
418 * increased after observing a bug in the Windows NT 4.0 SP6a
419 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
420 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
423 if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
424 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
425 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
426 set_incoming_fault(p);
427 return False;
431 * Append the data portion into the buffer and return.
434 if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
435 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
436 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
437 set_incoming_fault(p);
438 return False;
441 if(p->hdr.flags & RPC_FLG_LAST) {
442 bool ret = False;
444 * Ok - we finally have a complete RPC stream.
445 * Call the rpc command to process it.
449 * Ensure the internal prs buffer size is *exactly* the same
450 * size as the current offset.
453 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
454 DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
455 set_incoming_fault(p);
456 return False;
460 * Set the parse offset to the start of the data and set the
461 * prs_struct to UNMARSHALL.
464 prs_set_offset(&p->in_data.data, 0);
465 prs_switch_type(&p->in_data.data, UNMARSHALL);
468 * Process the complete data stream here.
471 free_pipe_context(p);
473 if(pipe_init_outgoing_data(p)) {
474 ret = api_pipe_request(p);
477 free_pipe_context(p);
480 * We have consumed the whole data stream. Set back to
481 * marshalling and set the offset back to the start of
482 * the buffer to re-use it (we could also do a prs_mem_free()
483 * and then re_init on the next start of PDU. Not sure which
484 * is best here.... JRA.
487 prs_switch_type(&p->in_data.data, MARSHALL);
488 prs_set_offset(&p->in_data.data, 0);
489 return ret;
492 return True;
495 /****************************************************************************
496 Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
497 already been parsed and stored in p->hdr.
498 ****************************************************************************/
500 static void process_complete_pdu(pipes_struct *p)
502 prs_struct rpc_in;
503 size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
504 char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
505 bool reply = False;
507 if(p->fault_state) {
508 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
509 p->name ));
510 set_incoming_fault(p);
511 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
512 return;
515 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
518 * Ensure we're using the corrent endianness for both the
519 * RPC header flags and the raw data we will be reading from.
522 prs_set_endian_data( &rpc_in, p->endian);
523 prs_set_endian_data( &p->in_data.data, p->endian);
525 prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
527 DEBUG(10,("process_complete_pdu: processing packet type %u\n",
528 (unsigned int)p->hdr.pkt_type ));
530 switch (p->hdr.pkt_type) {
531 case RPC_REQUEST:
532 reply = process_request_pdu(p, &rpc_in);
533 break;
535 case RPC_PING: /* CL request - ignore... */
536 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
537 (unsigned int)p->hdr.pkt_type, p->name));
538 break;
540 case RPC_RESPONSE: /* No responses here. */
541 DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
542 p->name ));
543 break;
545 case RPC_FAULT:
546 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
547 case RPC_NOCALL: /* CL - server reply to a ping call. */
548 case RPC_REJECT:
549 case RPC_ACK:
550 case RPC_CL_CANCEL:
551 case RPC_FACK:
552 case RPC_CANCEL_ACK:
553 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
554 (unsigned int)p->hdr.pkt_type, p->name));
555 break;
557 case RPC_BIND:
559 * We assume that a pipe bind is only in one pdu.
561 if(pipe_init_outgoing_data(p)) {
562 reply = api_pipe_bind_req(p, &rpc_in);
564 break;
566 case RPC_BINDACK:
567 case RPC_BINDNACK:
568 DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
569 (unsigned int)p->hdr.pkt_type, p->name));
570 break;
573 case RPC_ALTCONT:
575 * We assume that a pipe bind is only in one pdu.
577 if(pipe_init_outgoing_data(p)) {
578 reply = api_pipe_alter_context(p, &rpc_in);
580 break;
582 case RPC_ALTCONTRESP:
583 DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
584 p->name));
585 break;
587 case RPC_AUTH3:
589 * The third packet in an NTLMSSP auth exchange.
591 if(pipe_init_outgoing_data(p)) {
592 reply = api_pipe_bind_auth3(p, &rpc_in);
594 break;
596 case RPC_SHUTDOWN:
597 DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
598 p->name));
599 break;
601 case RPC_CO_CANCEL:
602 /* For now just free all client data and continue processing. */
603 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
604 /* As we never do asynchronous RPC serving, we can never cancel a
605 call (as far as I know). If we ever did we'd have to send a cancel_ack
606 reply. For now, just free all client data and continue processing. */
607 reply = True;
608 break;
609 #if 0
610 /* Enable this if we're doing async rpc. */
611 /* We must check the call-id matches the outstanding callid. */
612 if(pipe_init_outgoing_data(p)) {
613 /* Send a cancel_ack PDU reply. */
614 /* We should probably check the auth-verifier here. */
615 reply = setup_cancel_ack_reply(p, &rpc_in);
617 break;
618 #endif
620 case RPC_ORPHANED:
621 /* We should probably check the auth-verifier here.
622 For now just free all client data and continue processing. */
623 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
624 reply = True;
625 break;
627 default:
628 DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
629 break;
632 /* Reset to little endian. Probably don't need this but it won't hurt. */
633 prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
635 if (!reply) {
636 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
637 set_incoming_fault(p);
638 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
639 prs_mem_free(&rpc_in);
640 } else {
642 * Reset the lengths. We're ready for a new pdu.
644 p->in_data.pdu_needed_len = 0;
645 p->in_data.pdu_received_len = 0;
648 prs_mem_free(&rpc_in);
651 /****************************************************************************
652 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
653 ****************************************************************************/
655 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
657 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
659 DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
660 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
661 (unsigned int)n ));
663 if(data_to_copy == 0) {
665 * This is an error - data is being received and there is no
666 * space in the PDU. Free the received data and go into the fault state.
668 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
669 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
670 set_incoming_fault(p);
671 return -1;
675 * If we have no data already, wait until we get at least a RPC_HEADER_LEN
676 * number of bytes before we can do anything.
679 if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
681 * Always return here. If we have more data then the RPC_HEADER
682 * will be processed the next time around the loop.
684 return fill_rpc_header(p, data, data_to_copy);
688 * At this point we know we have at least an RPC_HEADER_LEN amount of data
689 * stored in current_in_pdu.
693 * If pdu_needed_len is zero this is a new pdu.
694 * Unmarshall the header so we know how much more
695 * data we need, then loop again.
698 if(p->in_data.pdu_needed_len == 0) {
699 ssize_t rret = unmarshall_rpc_header(p);
700 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
701 return rret;
703 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
704 of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
705 pdu type. Deal with this in process_complete_pdu(). */
709 * Ok - at this point we have a valid RPC_HEADER in p->hdr.
710 * Keep reading until we have a full pdu.
713 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
716 * Copy as much of the data as we need into the current_in_pdu buffer.
717 * pdu_needed_len becomes zero when we have a complete pdu.
720 memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
721 p->in_data.pdu_received_len += data_to_copy;
722 p->in_data.pdu_needed_len -= data_to_copy;
725 * Do we have a complete PDU ?
726 * (return the number of bytes handled in the call)
729 if(p->in_data.pdu_needed_len == 0) {
730 process_complete_pdu(p);
731 return data_to_copy;
734 DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
735 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
737 return (ssize_t)data_to_copy;
740 /****************************************************************************
741 Accepts incoming data on an internal rpc pipe.
742 ****************************************************************************/
744 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
746 size_t data_left = n;
748 while(data_left) {
749 ssize_t data_used;
751 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
753 data_used = process_incoming_data(p, data, data_left);
755 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
757 if(data_used < 0) {
758 return -1;
761 data_left -= data_used;
762 data += data_used;
765 return n;
768 /****************************************************************************
769 Replies to a request to read data from a pipe.
771 Headers are interspersed with the data at PDU intervals. By the time
772 this function is called, the start of the data could possibly have been
773 read by an SMBtrans (file_offset != 0).
775 Calling create_rpc_reply() here is a hack. The data should already
776 have been prepared into arrays of headers + data stream sections.
777 ****************************************************************************/
779 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
780 bool *is_data_outstanding)
782 uint32 pdu_remaining = 0;
783 ssize_t data_returned = 0;
785 if (!p) {
786 DEBUG(0,("read_from_pipe: pipe not open\n"));
787 return -1;
790 DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
793 * We cannot return more than one PDU length per
794 * read request.
798 * This condition should result in the connection being closed.
799 * Netapp filers seem to set it to 0xffff which results in domain
800 * authentications failing. Just ignore it so things work.
803 if(n > RPC_MAX_PDU_FRAG_LEN) {
804 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
805 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
806 n = RPC_MAX_PDU_FRAG_LEN;
810 * Determine if there is still data to send in the
811 * pipe PDU buffer. Always send this first. Never
812 * send more than is left in the current PDU. The
813 * client should send a new read request for a new
814 * PDU.
817 if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
818 data_returned = (ssize_t)MIN(n, pdu_remaining);
820 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
821 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len,
822 (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
824 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
825 p->out_data.current_pdu_sent += (uint32)data_returned;
826 goto out;
830 * At this point p->current_pdu_len == p->current_pdu_sent (which
831 * may of course be zero if this is the first return fragment.
834 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
835 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
836 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
838 if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
840 * We have sent all possible data, return 0.
842 data_returned = 0;
843 goto out;
847 * We need to create a new PDU from the data left in p->rdata.
848 * Create the header/data/footers. This also sets up the fields
849 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
850 * and stores the outgoing PDU in p->current_pdu.
853 if(!create_next_pdu(p)) {
854 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
855 return -1;
858 data_returned = MIN(n, p->out_data.current_pdu_len);
860 memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
861 p->out_data.current_pdu_sent += (uint32)data_returned;
863 out:
865 (*is_data_outstanding) = p->out_data.current_pdu_len > n;
866 return data_returned;
869 /****************************************************************************
870 Close an rpc pipe.
871 ****************************************************************************/
873 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
875 if (!p) {
876 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
877 return False;
880 prs_mem_free(&p->out_data.rdata);
881 prs_mem_free(&p->in_data.data);
883 if (p->auth.auth_data_free_func) {
884 (*p->auth.auth_data_free_func)(&p->auth);
887 if (p->mem_ctx) {
888 talloc_destroy(p->mem_ctx);
891 free_pipe_rpc_context( p->contexts );
893 /* Free the handles database. */
894 close_policy_by_pipe(p);
896 DLIST_REMOVE(InternalPipes, p);
898 ZERO_STRUCTP(p);
900 TALLOC_FREE(p);
902 return True;
905 bool fsp_is_np(struct files_struct *fsp)
907 enum FAKE_FILE_TYPE type;
909 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
910 return false;
913 type = fsp->fake_file_handle->type;
915 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
916 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
919 struct np_proxy_state {
920 int fd;
923 static int np_proxy_state_destructor(struct np_proxy_state *state)
925 if (state->fd != -1) {
926 close(state->fd);
928 return 0;
931 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
932 const char *pipe_name,
933 struct auth_serversupplied_info *server_info)
935 struct np_proxy_state *result;
936 struct sockaddr_un addr;
937 char *socket_path;
938 const char *socket_dir;
940 DATA_BLOB req_blob;
941 struct netr_SamInfo3 *info3;
942 struct named_pipe_auth_req req;
943 DATA_BLOB rep_blob;
944 uint8 rep_buf[20];
945 struct named_pipe_auth_rep rep;
946 enum ndr_err_code ndr_err;
947 NTSTATUS status;
948 ssize_t written;
950 result = talloc(mem_ctx, struct np_proxy_state);
951 if (result == NULL) {
952 DEBUG(0, ("talloc failed\n"));
953 return NULL;
956 result->fd = socket(AF_UNIX, SOCK_STREAM, 0);
957 if (result->fd == -1) {
958 DEBUG(10, ("socket(2) failed: %s\n", strerror(errno)));
959 goto fail;
961 talloc_set_destructor(result, np_proxy_state_destructor);
963 ZERO_STRUCT(addr);
964 addr.sun_family = AF_UNIX;
966 socket_dir = lp_parm_const_string(
967 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
968 get_dyn_NCALRPCDIR());
969 if (socket_dir == NULL) {
970 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
971 goto fail;
974 socket_path = talloc_asprintf(talloc_tos(), "%s/np/%s",
975 socket_dir, pipe_name);
976 if (socket_path == NULL) {
977 DEBUG(0, ("talloc_asprintf failed\n"));
978 goto fail;
980 strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
981 TALLOC_FREE(socket_path);
983 become_root();
984 if (sys_connect(result->fd, (struct sockaddr *)&addr) == -1) {
985 unbecome_root();
986 DEBUG(0, ("connect(%s) failed: %s\n", addr.sun_path,
987 strerror(errno)));
988 goto fail;
990 unbecome_root();
992 info3 = talloc(talloc_tos(), struct netr_SamInfo3);
993 if (info3 == NULL) {
994 DEBUG(0, ("talloc failed\n"));
995 goto fail;
998 status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
999 if (!NT_STATUS_IS_OK(status)) {
1000 TALLOC_FREE(info3);
1001 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
1002 nt_errstr(status)));
1003 goto fail;
1006 req.level = 1;
1007 req.info.info1 = *info3;
1009 ndr_err = ndr_push_struct_blob(
1010 &req_blob, talloc_tos(), NULL, &req,
1011 (ndr_push_flags_fn_t)ndr_push_named_pipe_auth_req);
1013 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1014 DEBUG(10, ("ndr_push_named_pipe_auth_req failed: %s\n",
1015 ndr_errstr(ndr_err)));
1016 goto fail;
1019 DEBUG(10, ("named_pipe_auth_req(client)[%u]\n", (uint32_t)req_blob.length));
1020 dump_data(10, req_blob.data, req_blob.length);
1022 written = write_data(result->fd, (char *)req_blob.data,
1023 req_blob.length);
1024 if (written == -1) {
1025 DEBUG(3, ("Could not write auth req data to RPC server\n"));
1026 goto fail;
1029 status = read_data(result->fd, (char *)rep_buf, sizeof(rep_buf));
1030 if (!NT_STATUS_IS_OK(status)) {
1031 DEBUG(3, ("Could not read auth result\n"));
1032 goto fail;
1035 rep_blob = data_blob_const(rep_buf, sizeof(rep_buf));
1037 DEBUG(10,("name_pipe_auth_rep(client)[%u]\n", (uint32_t)rep_blob.length));
1038 dump_data(10, rep_blob.data, rep_blob.length);
1040 ndr_err = ndr_pull_struct_blob(
1041 &rep_blob, talloc_tos(), NULL, &rep,
1042 (ndr_pull_flags_fn_t)ndr_pull_named_pipe_auth_rep);
1044 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1045 DEBUG(0, ("ndr_pull_named_pipe_auth_rep failed: %s\n",
1046 ndr_errstr(ndr_err)));
1047 goto fail;
1050 if (rep.length != 16) {
1051 DEBUG(0, ("req invalid length: %u != 16\n",
1052 rep.length));
1053 goto fail;
1056 if (strcmp(NAMED_PIPE_AUTH_MAGIC, rep.magic) != 0) {
1057 DEBUG(0, ("req invalid magic: %s != %s\n",
1058 rep.magic, NAMED_PIPE_AUTH_MAGIC));
1059 goto fail;
1062 if (!NT_STATUS_IS_OK(rep.status)) {
1063 DEBUG(0, ("req failed: %s\n",
1064 nt_errstr(rep.status)));
1065 goto fail;
1068 if (rep.level != 1) {
1069 DEBUG(0, ("req invalid level: %u != 1\n",
1070 rep.level));
1071 goto fail;
1074 return result;
1076 fail:
1077 TALLOC_FREE(result);
1078 return NULL;
1081 NTSTATUS np_open(struct smb_request *smb_req, const char *name,
1082 struct files_struct **pfsp)
1084 struct connection_struct *conn = smb_req->conn;
1085 NTSTATUS status;
1086 struct files_struct *fsp;
1087 const char **proxy_list;
1089 proxy_list = lp_parm_string_list(SNUM(conn), "np", "proxy", NULL);
1091 status = file_new(smb_req, conn, &fsp);
1092 if (!NT_STATUS_IS_OK(status)) {
1093 DEBUG(0, ("file_new failed: %s\n", nt_errstr(status)));
1094 return status;
1097 fsp->conn = conn;
1098 fsp->fh->fd = -1;
1099 fsp->vuid = smb_req->vuid;
1100 fsp->can_lock = false;
1101 fsp->access_mask = FILE_READ_DATA | FILE_WRITE_DATA;
1102 string_set(&fsp->fsp_name, name);
1104 fsp->fake_file_handle = talloc(NULL, struct fake_file_handle);
1105 if (fsp->fake_file_handle == NULL) {
1106 file_free(smb_req, fsp);
1107 return NT_STATUS_NO_MEMORY;
1110 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1111 struct np_proxy_state *p;
1113 p = make_external_rpc_pipe_p(fsp->fake_file_handle, name,
1114 conn->server_info);
1116 fsp->fake_file_handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1117 fsp->fake_file_handle->private_data = p;
1118 } else {
1119 struct pipes_struct *p;
1121 if (!is_known_pipename(name)) {
1122 file_free(smb_req, fsp);
1123 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1126 p = make_internal_rpc_pipe_p(fsp->fake_file_handle, name,
1127 conn->client_address,
1128 conn->server_info,
1129 smb_req->vuid);
1131 fsp->fake_file_handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1132 fsp->fake_file_handle->private_data = p;
1135 if (fsp->fake_file_handle->private_data == NULL) {
1136 file_free(smb_req, fsp);
1137 return NT_STATUS_PIPE_NOT_AVAILABLE;
1140 *pfsp = fsp;
1142 return NT_STATUS_OK;
1145 NTSTATUS np_write(struct files_struct *fsp, const uint8_t *data, size_t len,
1146 ssize_t *nwritten)
1148 if (!fsp_is_np(fsp)) {
1149 return NT_STATUS_INVALID_HANDLE;
1152 DEBUG(6, ("np_write: %x name: %s len: %d\n", (int)fsp->fnum,
1153 fsp->fsp_name, (int)len));
1154 dump_data(50, data, len);
1156 switch (fsp->fake_file_handle->type) {
1157 case FAKE_FILE_TYPE_NAMED_PIPE: {
1158 struct pipes_struct *p = talloc_get_type_abort(
1159 fsp->fake_file_handle->private_data,
1160 struct pipes_struct);
1161 *nwritten = write_to_internal_pipe(p, (char *)data, len);
1162 break;
1164 case FAKE_FILE_TYPE_NAMED_PIPE_PROXY: {
1165 struct np_proxy_state *p = talloc_get_type_abort(
1166 fsp->fake_file_handle->private_data,
1167 struct np_proxy_state);
1168 *nwritten = write_data(p->fd, (char *)data, len);
1169 break;
1171 default:
1172 return NT_STATUS_INVALID_HANDLE;
1173 break;
1176 return ((*nwritten) >= 0)
1177 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1180 NTSTATUS np_read(struct files_struct *fsp, uint8_t *data, size_t len,
1181 ssize_t *nread, bool *is_data_outstanding)
1183 if (!fsp_is_np(fsp)) {
1184 return NT_STATUS_INVALID_HANDLE;
1187 switch (fsp->fake_file_handle->type) {
1188 case FAKE_FILE_TYPE_NAMED_PIPE: {
1189 struct pipes_struct *p = talloc_get_type_abort(
1190 fsp->fake_file_handle->private_data,
1191 struct pipes_struct);
1192 *nread = read_from_internal_pipe(p, (char *)data, len,
1193 is_data_outstanding);
1194 break;
1196 case FAKE_FILE_TYPE_NAMED_PIPE_PROXY: {
1197 struct np_proxy_state *p = talloc_get_type_abort(
1198 fsp->fake_file_handle->private_data,
1199 struct np_proxy_state);
1200 int available = 0;
1202 *nread = sys_read(p->fd, (char *)data, len);
1205 * We don't look at the ioctl result. We don't really care
1206 * if there is data available, because this is racy anyway.
1208 ioctl(p->fd, FIONREAD, &available);
1209 *is_data_outstanding = (available > 0);
1211 break;
1213 default:
1214 return NT_STATUS_INVALID_HANDLE;
1215 break;
1218 return ((*nread) >= 0)
1219 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;