Move python modules from source4/scripting/python/ to python/.
[Samba.git] / source3 / rpc_server / srv_pipe_hnd.c
blob0c615bdc3708b1914c8b1c6b9d613bdebfde6122
1 /*
2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "fake_file.h"
24 #include "rpc_dce.h"
25 #include "ntdomain.h"
26 #include "rpc_server/rpc_ncacn_np.h"
27 #include "rpc_server/srv_pipe_hnd.h"
28 #include "rpc_server/srv_pipe.h"
29 #include "rpc_server/rpc_server.h"
30 #include "rpc_server/rpc_config.h"
31 #include "../lib/tsocket/tsocket.h"
32 #include "../lib/util/tevent_ntstatus.h"
34 #undef DBGC_CLASS
35 #define DBGC_CLASS DBGC_RPC_SRV
37 /****************************************************************************
38 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
39 ****************************************************************************/
41 static ssize_t fill_rpc_header(struct pipes_struct *p, const char *data, size_t data_to_copy)
43 size_t len_needed_to_complete_hdr =
44 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
46 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
47 "len_needed_to_complete_hdr = %u, "
48 "receive_len = %u\n",
49 (unsigned int)data_to_copy,
50 (unsigned int)len_needed_to_complete_hdr,
51 (unsigned int)p->in_data.pdu.length ));
53 if (p->in_data.pdu.data == NULL) {
54 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
56 if (p->in_data.pdu.data == NULL) {
57 DEBUG(0, ("talloc failed\n"));
58 return -1;
61 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
62 data, len_needed_to_complete_hdr);
63 p->in_data.pdu.length += len_needed_to_complete_hdr;
65 return (ssize_t)len_needed_to_complete_hdr;
68 static bool get_pdu_size(struct pipes_struct *p)
70 uint16_t frag_len;
71 /* the fill_rpc_header() call insures we copy only
72 * RPC_HEADER_LEN bytes. If this doesn't match then
73 * somethign is very wrong and we can only abort */
74 if (p->in_data.pdu.length != RPC_HEADER_LEN) {
75 DEBUG(0, ("Unexpected RPC Header size! "
76 "got %d, expected %d)\n",
77 (int)p->in_data.pdu.length,
78 RPC_HEADER_LEN));
79 set_incoming_fault(p);
80 return false;
83 frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
85 /* verify it is a reasonable value */
86 if ((frag_len < RPC_HEADER_LEN) ||
87 (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
88 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
89 frag_len));
90 set_incoming_fault(p);
91 return false;
94 p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
96 /* allocate the space needed to fill the pdu */
97 p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
98 uint8_t, frag_len);
99 if (p->in_data.pdu.data == NULL) {
100 DEBUG(0, ("talloc_realloc failed\n"));
101 set_incoming_fault(p);
102 return false;
105 return true;
108 /****************************************************************************
109 Call this to free any talloc'ed memory. Do this after processing
110 a complete incoming and outgoing request (multiple incoming/outgoing
111 PDU's).
112 ****************************************************************************/
114 static void free_pipe_context(struct pipes_struct *p)
116 data_blob_free(&p->out_data.frag);
117 data_blob_free(&p->out_data.rdata);
118 data_blob_free(&p->in_data.data);
120 DEBUG(3, ("free_pipe_context: "
121 "destroying talloc pool of size %lu\n",
122 (unsigned long)talloc_total_size(p->mem_ctx)));
123 talloc_free_children(p->mem_ctx);
126 /****************************************************************************
127 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
128 ****************************************************************************/
130 ssize_t process_incoming_data(struct pipes_struct *p, const char *data, size_t n)
132 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
133 - p->in_data.pdu.length);
135 DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
136 "pdu_needed_len = %u, incoming data = %u\n",
137 (unsigned int)p->in_data.pdu.length,
138 (unsigned int)p->in_data.pdu_needed_len,
139 (unsigned int)n ));
141 if(data_to_copy == 0) {
143 * This is an error - data is being received and there is no
144 * space in the PDU. Free the received data and go into the
145 * fault state.
147 DEBUG(0, ("process_incoming_data: "
148 "No space in incoming pdu buffer. "
149 "Current size = %u incoming data size = %u\n",
150 (unsigned int)p->in_data.pdu.length,
151 (unsigned int)n));
152 set_incoming_fault(p);
153 return -1;
157 * If we have no data already, wait until we get at least
158 * a RPC_HEADER_LEN * number of bytes before we can do anything.
161 if ((p->in_data.pdu_needed_len == 0) &&
162 (p->in_data.pdu.length < RPC_HEADER_LEN)) {
164 * Always return here. If we have more data then the RPC_HEADER
165 * will be processed the next time around the loop.
167 return fill_rpc_header(p, data, data_to_copy);
171 * At this point we know we have at least an RPC_HEADER_LEN amount of
172 * data stored in p->in_data.pdu.
176 * If pdu_needed_len is zero this is a new pdu.
177 * Check how much more data we need, then loop again.
179 if (p->in_data.pdu_needed_len == 0) {
181 bool ok = get_pdu_size(p);
182 if (!ok) {
183 return -1;
185 if (p->in_data.pdu_needed_len > 0) {
186 return 0;
189 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
190 * that consists of an RPC_HEADER only. This is a
191 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
192 * DCERPC_PKT_ORPHANED pdu type.
193 * Deal with this in process_complete_pdu(). */
197 * Ok - at this point we have a valid RPC_HEADER.
198 * Keep reading until we have a full pdu.
201 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
204 * Copy as much of the data as we need into the p->in_data.pdu buffer.
205 * pdu_needed_len becomes zero when we have a complete pdu.
208 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
209 data, data_to_copy);
210 p->in_data.pdu.length += data_to_copy;
211 p->in_data.pdu_needed_len -= data_to_copy;
214 * Do we have a complete PDU ?
215 * (return the number of bytes handled in the call)
218 if(p->in_data.pdu_needed_len == 0) {
219 process_complete_pdu(p);
220 return data_to_copy;
223 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
224 "pdu.length = %u, pdu_needed_len = %u\n",
225 (unsigned int)p->in_data.pdu.length,
226 (unsigned int)p->in_data.pdu_needed_len));
228 return (ssize_t)data_to_copy;
231 /****************************************************************************
232 Accepts incoming data on an internal rpc pipe.
233 ****************************************************************************/
235 static ssize_t write_to_internal_pipe(struct pipes_struct *p, const char *data, size_t n)
237 size_t data_left = n;
239 while(data_left) {
240 ssize_t data_used;
242 DEBUG(10, ("write_to_pipe: data_left = %u\n",
243 (unsigned int)data_left));
245 data_used = process_incoming_data(p, data, data_left);
247 DEBUG(10, ("write_to_pipe: data_used = %d\n",
248 (int)data_used));
250 if(data_used < 0) {
251 return -1;
254 data_left -= data_used;
255 data += data_used;
258 return n;
261 /****************************************************************************
262 Replies to a request to read data from a pipe.
264 Headers are interspersed with the data at PDU intervals. By the time
265 this function is called, the start of the data could possibly have been
266 read by an SMBtrans (file_offset != 0).
268 Calling create_rpc_reply() here is a hack. The data should already
269 have been prepared into arrays of headers + data stream sections.
270 ****************************************************************************/
272 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
273 size_t n, bool *is_data_outstanding)
275 uint32 pdu_remaining = 0;
276 ssize_t data_returned = 0;
278 if (!p) {
279 DEBUG(0,("read_from_pipe: pipe not open\n"));
280 return -1;
283 DEBUG(6,(" name: %s len: %u\n",
284 get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax),
285 (unsigned int)n));
288 * We cannot return more than one PDU length per
289 * read request.
293 * This condition should result in the connection being closed.
294 * Netapp filers seem to set it to 0xffff which results in domain
295 * authentications failing. Just ignore it so things work.
298 if(n > RPC_MAX_PDU_FRAG_LEN) {
299 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
300 "pipe %s. We can only service %d sized reads.\n",
301 (unsigned int)n,
302 get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax),
303 RPC_MAX_PDU_FRAG_LEN ));
304 n = RPC_MAX_PDU_FRAG_LEN;
308 * Determine if there is still data to send in the
309 * pipe PDU buffer. Always send this first. Never
310 * send more than is left in the current PDU. The
311 * client should send a new read request for a new
312 * PDU.
315 pdu_remaining = p->out_data.frag.length
316 - p->out_data.current_pdu_sent;
318 if (pdu_remaining > 0) {
319 data_returned = (ssize_t)MIN(n, pdu_remaining);
321 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
322 "current_pdu_sent = %u returning %d bytes.\n",
323 get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax),
324 (unsigned int)p->out_data.frag.length,
325 (unsigned int)p->out_data.current_pdu_sent,
326 (int)data_returned));
328 memcpy(data,
329 p->out_data.frag.data
330 + p->out_data.current_pdu_sent,
331 data_returned);
333 p->out_data.current_pdu_sent += (uint32)data_returned;
334 goto out;
338 * At this point p->current_pdu_len == p->current_pdu_sent (which
339 * may of course be zero if this is the first return fragment.
342 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
343 "= %u, p->out_data.rdata.length = %u.\n",
344 get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax),
345 (int)p->fault_state,
346 (unsigned int)p->out_data.data_sent_length,
347 (unsigned int)p->out_data.rdata.length));
349 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
351 * We have sent all possible data, return 0.
353 data_returned = 0;
354 goto out;
358 * We need to create a new PDU from the data left in p->rdata.
359 * Create the header/data/footers. This also sets up the fields
360 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
361 * and stores the outgoing PDU in p->current_pdu.
364 if(!create_next_pdu(p)) {
365 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
366 get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax)));
367 return -1;
370 data_returned = MIN(n, p->out_data.frag.length);
372 memcpy(data, p->out_data.frag.data, (size_t)data_returned);
373 p->out_data.current_pdu_sent += (uint32)data_returned;
375 out:
376 (*is_data_outstanding) = p->out_data.frag.length > n;
378 if (p->out_data.current_pdu_sent == p->out_data.frag.length) {
379 /* We've returned everything in the out_data.frag
380 * so we're done with this pdu. Free it and reset
381 * current_pdu_sent. */
382 p->out_data.current_pdu_sent = 0;
383 data_blob_free(&p->out_data.frag);
385 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
387 * We're completely finished with both outgoing and
388 * incoming data streams. It's safe to free all
389 * temporary data from this request.
391 free_pipe_context(p);
395 return data_returned;
398 bool fsp_is_np(struct files_struct *fsp)
400 enum FAKE_FILE_TYPE type;
402 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
403 return false;
406 type = fsp->fake_file_handle->type;
408 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
409 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
412 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
413 const struct tsocket_address *local_address,
414 const struct tsocket_address *remote_address,
415 struct auth_session_info *session_info,
416 struct messaging_context *msg_ctx,
417 struct fake_file_handle **phandle)
419 enum rpc_service_mode_e pipe_mode;
420 const char **proxy_list;
421 struct fake_file_handle *handle;
422 struct ndr_syntax_id syntax;
424 proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
426 handle = talloc(mem_ctx, struct fake_file_handle);
427 if (handle == NULL) {
428 return NT_STATUS_NO_MEMORY;
431 /* Check what is the server type for this pipe.
432 Defaults to "embedded" */
433 pipe_mode = rpc_service_mode(name);
435 /* Still support the old method for defining external servers */
436 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
437 pipe_mode = RPC_SERVICE_MODE_EXTERNAL;
440 switch (pipe_mode) {
441 case RPC_SERVICE_MODE_EXTERNAL:
443 handle->private_data = (void *)make_external_rpc_pipe_p(
444 handle, name,
445 local_address,
446 remote_address,
447 session_info);
449 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
450 break;
452 case RPC_SERVICE_MODE_EMBEDDED:
454 if (!is_known_pipename(name, &syntax)) {
455 TALLOC_FREE(handle);
456 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
459 handle->private_data = (void *)make_internal_rpc_pipe_p(
460 handle, &syntax, remote_address,
461 session_info, msg_ctx);
463 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
464 break;
466 case RPC_SERVICE_MODE_DISABLED:
467 handle->private_data = NULL;
468 break;
471 if (handle->private_data == NULL) {
472 TALLOC_FREE(handle);
473 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
476 *phandle = handle;
478 return NT_STATUS_OK;
481 bool np_read_in_progress(struct fake_file_handle *handle)
483 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
484 return false;
487 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
488 struct np_proxy_state *p = talloc_get_type_abort(
489 handle->private_data, struct np_proxy_state);
490 size_t read_count;
492 read_count = tevent_queue_length(p->read_queue);
493 if (read_count > 0) {
494 return true;
497 return false;
500 return false;
503 struct np_write_state {
504 struct event_context *ev;
505 struct np_proxy_state *p;
506 struct iovec iov;
507 ssize_t nwritten;
510 static void np_write_done(struct tevent_req *subreq);
512 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
513 struct fake_file_handle *handle,
514 const uint8_t *data, size_t len)
516 struct tevent_req *req;
517 struct np_write_state *state;
518 NTSTATUS status;
520 DEBUG(6, ("np_write_send: len: %d\n", (int)len));
521 dump_data(50, data, len);
523 req = tevent_req_create(mem_ctx, &state, struct np_write_state);
524 if (req == NULL) {
525 return NULL;
528 if (len == 0) {
529 state->nwritten = 0;
530 status = NT_STATUS_OK;
531 goto post_status;
534 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
535 struct pipes_struct *p = talloc_get_type_abort(
536 handle->private_data, struct pipes_struct);
538 state->nwritten = write_to_internal_pipe(p, (const char *)data, len);
540 status = (state->nwritten >= 0)
541 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
542 goto post_status;
545 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
546 struct np_proxy_state *p = talloc_get_type_abort(
547 handle->private_data, struct np_proxy_state);
548 struct tevent_req *subreq;
550 state->ev = ev;
551 state->p = p;
552 state->iov.iov_base = discard_const_p(void, data);
553 state->iov.iov_len = len;
555 subreq = tstream_writev_queue_send(state, ev,
556 p->npipe,
557 p->write_queue,
558 &state->iov, 1);
559 if (subreq == NULL) {
560 goto fail;
562 tevent_req_set_callback(subreq, np_write_done, req);
563 return req;
566 status = NT_STATUS_INVALID_HANDLE;
567 post_status:
568 if (NT_STATUS_IS_OK(status)) {
569 tevent_req_done(req);
570 } else {
571 tevent_req_nterror(req, status);
573 return tevent_req_post(req, ev);
574 fail:
575 TALLOC_FREE(req);
576 return NULL;
579 static void np_write_done(struct tevent_req *subreq)
581 struct tevent_req *req = tevent_req_callback_data(
582 subreq, struct tevent_req);
583 struct np_write_state *state = tevent_req_data(
584 req, struct np_write_state);
585 ssize_t received;
586 int err;
588 received = tstream_writev_queue_recv(subreq, &err);
589 if (received < 0) {
590 tevent_req_nterror(req, map_nt_error_from_unix(err));
591 return;
593 state->nwritten = received;
594 tevent_req_done(req);
597 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
599 struct np_write_state *state = tevent_req_data(
600 req, struct np_write_state);
601 NTSTATUS status;
603 if (tevent_req_is_nterror(req, &status)) {
604 return status;
606 *pnwritten = state->nwritten;
607 return NT_STATUS_OK;
610 struct np_ipc_readv_next_vector_state {
611 uint8_t *buf;
612 size_t len;
613 off_t ofs;
614 size_t remaining;
617 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
618 uint8_t *buf, size_t len)
620 ZERO_STRUCTP(s);
622 s->buf = buf;
623 s->len = MIN(len, UINT16_MAX);
626 static int np_ipc_readv_next_vector(struct tstream_context *stream,
627 void *private_data,
628 TALLOC_CTX *mem_ctx,
629 struct iovec **_vector,
630 size_t *count)
632 struct np_ipc_readv_next_vector_state *state =
633 (struct np_ipc_readv_next_vector_state *)private_data;
634 struct iovec *vector;
635 ssize_t pending;
636 size_t wanted;
638 if (state->ofs == state->len) {
639 *_vector = NULL;
640 *count = 0;
641 return 0;
644 pending = tstream_pending_bytes(stream);
645 if (pending == -1) {
646 return -1;
649 if (pending == 0 && state->ofs != 0) {
650 /* return a short read */
651 *_vector = NULL;
652 *count = 0;
653 return 0;
656 if (pending == 0) {
657 /* we want at least one byte and recheck again */
658 wanted = 1;
659 } else {
660 size_t missing = state->len - state->ofs;
661 if (pending > missing) {
662 /* there's more available */
663 state->remaining = pending - missing;
664 wanted = missing;
665 } else {
666 /* read what we can get and recheck in the next cycle */
667 wanted = pending;
671 vector = talloc_array(mem_ctx, struct iovec, 1);
672 if (!vector) {
673 return -1;
676 vector[0].iov_base = state->buf + state->ofs;
677 vector[0].iov_len = wanted;
679 state->ofs += wanted;
681 *_vector = vector;
682 *count = 1;
683 return 0;
686 struct np_read_state {
687 struct np_proxy_state *p;
688 struct np_ipc_readv_next_vector_state next_vector;
690 ssize_t nread;
691 bool is_data_outstanding;
694 static void np_read_done(struct tevent_req *subreq);
696 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
697 struct fake_file_handle *handle,
698 uint8_t *data, size_t len)
700 struct tevent_req *req;
701 struct np_read_state *state;
702 NTSTATUS status;
704 req = tevent_req_create(mem_ctx, &state, struct np_read_state);
705 if (req == NULL) {
706 return NULL;
709 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
710 struct pipes_struct *p = talloc_get_type_abort(
711 handle->private_data, struct pipes_struct);
713 state->nread = read_from_internal_pipe(
714 p, (char *)data, len, &state->is_data_outstanding);
716 status = (state->nread >= 0)
717 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
718 goto post_status;
721 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
722 struct np_proxy_state *p = talloc_get_type_abort(
723 handle->private_data, struct np_proxy_state);
724 struct tevent_req *subreq;
726 np_ipc_readv_next_vector_init(&state->next_vector,
727 data, len);
729 subreq = tstream_readv_pdu_queue_send(state,
731 p->npipe,
732 p->read_queue,
733 np_ipc_readv_next_vector,
734 &state->next_vector);
735 if (subreq == NULL) {
736 status = NT_STATUS_NO_MEMORY;
737 goto post_status;
739 tevent_req_set_callback(subreq, np_read_done, req);
740 return req;
743 status = NT_STATUS_INVALID_HANDLE;
744 post_status:
745 if (NT_STATUS_IS_OK(status)) {
746 tevent_req_done(req);
747 } else {
748 tevent_req_nterror(req, status);
750 return tevent_req_post(req, ev);
753 static void np_read_done(struct tevent_req *subreq)
755 struct tevent_req *req = tevent_req_callback_data(
756 subreq, struct tevent_req);
757 struct np_read_state *state = tevent_req_data(
758 req, struct np_read_state);
759 ssize_t ret;
760 int err;
762 ret = tstream_readv_pdu_queue_recv(subreq, &err);
763 TALLOC_FREE(subreq);
764 if (ret == -1) {
765 tevent_req_nterror(req, map_nt_error_from_unix(err));
766 return;
769 state->nread = ret;
770 state->is_data_outstanding = (state->next_vector.remaining > 0);
772 tevent_req_done(req);
773 return;
776 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
777 bool *is_data_outstanding)
779 struct np_read_state *state = tevent_req_data(
780 req, struct np_read_state);
781 NTSTATUS status;
783 if (tevent_req_is_nterror(req, &status)) {
784 return status;
787 DEBUG(10, ("Received %d bytes. There is %smore data outstanding\n",
788 (int)state->nread, state->is_data_outstanding?"":"no "));
790 *nread = state->nread;
791 *is_data_outstanding = state->is_data_outstanding;
792 return NT_STATUS_OK;