docs: mention --port in nmbd manpage.
[Samba.git] / source3 / rpc_server / srv_pipe_hnd.c
bloba95aa06525a780f0187699b75c9bdeec5e9bb929
1 /*
2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "fake_file.h"
24 #include "rpc_dce.h"
25 #include "ntdomain.h"
26 #include "rpc_server/rpc_ncacn_np.h"
27 #include "rpc_server/srv_pipe_hnd.h"
28 #include "rpc_server/srv_pipe.h"
29 #include "rpc_server/rpc_server.h"
30 #include "rpc_server/rpc_config.h"
31 #include "../lib/tsocket/tsocket.h"
32 #include "../lib/util/tevent_ntstatus.h"
33 #include "librpc/ndr/ndr_table.h"
35 #undef DBGC_CLASS
36 #define DBGC_CLASS DBGC_RPC_SRV
38 /****************************************************************************
39 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
40 ****************************************************************************/
42 static ssize_t fill_rpc_header(struct pipes_struct *p, const char *data, size_t data_to_copy)
44 size_t len_needed_to_complete_hdr =
45 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
47 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
48 "len_needed_to_complete_hdr = %u, "
49 "receive_len = %u\n",
50 (unsigned int)data_to_copy,
51 (unsigned int)len_needed_to_complete_hdr,
52 (unsigned int)p->in_data.pdu.length ));
54 if (p->in_data.pdu.data == NULL) {
55 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
57 if (p->in_data.pdu.data == NULL) {
58 DEBUG(0, ("talloc failed\n"));
59 return -1;
62 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
63 data, len_needed_to_complete_hdr);
64 p->in_data.pdu.length += len_needed_to_complete_hdr;
66 return (ssize_t)len_needed_to_complete_hdr;
69 static bool get_pdu_size(struct pipes_struct *p)
71 uint16_t frag_len;
72 /* the fill_rpc_header() call insures we copy only
73 * RPC_HEADER_LEN bytes. If this doesn't match then
74 * somethign is very wrong and we can only abort */
75 if (p->in_data.pdu.length != RPC_HEADER_LEN) {
76 DEBUG(0, ("Unexpected RPC Header size! "
77 "got %d, expected %d)\n",
78 (int)p->in_data.pdu.length,
79 RPC_HEADER_LEN));
80 set_incoming_fault(p);
81 return false;
84 frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
86 /* verify it is a reasonable value */
87 if ((frag_len < RPC_HEADER_LEN) ||
88 (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
89 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
90 frag_len));
91 set_incoming_fault(p);
92 return false;
95 p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
97 /* allocate the space needed to fill the pdu */
98 p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
99 uint8_t, frag_len);
100 if (p->in_data.pdu.data == NULL) {
101 DEBUG(0, ("talloc_realloc failed\n"));
102 set_incoming_fault(p);
103 return false;
106 return true;
109 /****************************************************************************
110 Call this to free any talloc'ed memory. Do this after processing
111 a complete incoming and outgoing request (multiple incoming/outgoing
112 PDU's).
113 ****************************************************************************/
115 static void free_pipe_context(struct pipes_struct *p)
117 data_blob_free(&p->out_data.frag);
118 data_blob_free(&p->out_data.rdata);
119 data_blob_free(&p->in_data.data);
121 DEBUG(3, ("free_pipe_context: "
122 "destroying talloc pool of size %lu\n",
123 (unsigned long)talloc_total_size(p->mem_ctx)));
124 talloc_free_children(p->mem_ctx);
127 /****************************************************************************
128 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
129 ****************************************************************************/
131 ssize_t process_incoming_data(struct pipes_struct *p, const char *data, size_t n)
133 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
134 - p->in_data.pdu.length);
136 DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
137 "pdu_needed_len = %u, incoming data = %u\n",
138 (unsigned int)p->in_data.pdu.length,
139 (unsigned int)p->in_data.pdu_needed_len,
140 (unsigned int)n ));
142 if(data_to_copy == 0) {
144 * This is an error - data is being received and there is no
145 * space in the PDU. Free the received data and go into the
146 * fault state.
148 DEBUG(0, ("process_incoming_data: "
149 "No space in incoming pdu buffer. "
150 "Current size = %u incoming data size = %u\n",
151 (unsigned int)p->in_data.pdu.length,
152 (unsigned int)n));
153 set_incoming_fault(p);
154 return -1;
158 * If we have no data already, wait until we get at least
159 * a RPC_HEADER_LEN * number of bytes before we can do anything.
162 if ((p->in_data.pdu_needed_len == 0) &&
163 (p->in_data.pdu.length < RPC_HEADER_LEN)) {
165 * Always return here. If we have more data then the RPC_HEADER
166 * will be processed the next time around the loop.
168 return fill_rpc_header(p, data, data_to_copy);
172 * At this point we know we have at least an RPC_HEADER_LEN amount of
173 * data stored in p->in_data.pdu.
177 * If pdu_needed_len is zero this is a new pdu.
178 * Check how much more data we need, then loop again.
180 if (p->in_data.pdu_needed_len == 0) {
182 bool ok = get_pdu_size(p);
183 if (!ok) {
184 return -1;
186 if (p->in_data.pdu_needed_len > 0) {
187 return 0;
190 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
191 * that consists of an RPC_HEADER only. This is a
192 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
193 * DCERPC_PKT_ORPHANED pdu type.
194 * Deal with this in process_complete_pdu(). */
198 * Ok - at this point we have a valid RPC_HEADER.
199 * Keep reading until we have a full pdu.
202 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
205 * Copy as much of the data as we need into the p->in_data.pdu buffer.
206 * pdu_needed_len becomes zero when we have a complete pdu.
209 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
210 data, data_to_copy);
211 p->in_data.pdu.length += data_to_copy;
212 p->in_data.pdu_needed_len -= data_to_copy;
215 * Do we have a complete PDU ?
216 * (return the number of bytes handled in the call)
219 if(p->in_data.pdu_needed_len == 0) {
220 process_complete_pdu(p);
221 return data_to_copy;
224 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
225 "pdu.length = %u, pdu_needed_len = %u\n",
226 (unsigned int)p->in_data.pdu.length,
227 (unsigned int)p->in_data.pdu_needed_len));
229 return (ssize_t)data_to_copy;
232 /****************************************************************************
233 Accepts incoming data on an internal rpc pipe.
234 ****************************************************************************/
236 static ssize_t write_to_internal_pipe(struct pipes_struct *p, const char *data, size_t n)
238 size_t data_left = n;
240 while(data_left) {
241 ssize_t data_used;
243 DEBUG(10, ("write_to_pipe: data_left = %u\n",
244 (unsigned int)data_left));
246 data_used = process_incoming_data(p, data, data_left);
248 DEBUG(10, ("write_to_pipe: data_used = %d\n",
249 (int)data_used));
251 if(data_used < 0) {
252 return -1;
255 data_left -= data_used;
256 data += data_used;
259 return n;
262 /****************************************************************************
263 Replies to a request to read data from a pipe.
265 Headers are interspersed with the data at PDU intervals. By the time
266 this function is called, the start of the data could possibly have been
267 read by an SMBtrans (file_offset != 0).
269 Calling create_rpc_reply() here is a hack. The data should already
270 have been prepared into arrays of headers + data stream sections.
271 ****************************************************************************/
273 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
274 size_t n, bool *is_data_outstanding)
276 uint32 pdu_remaining = 0;
277 ssize_t data_returned = 0;
279 if (!p) {
280 DEBUG(0,("read_from_pipe: pipe not open\n"));
281 return -1;
284 DEBUG(6,(" name: %s len: %u\n",
285 ndr_interface_name(&p->contexts->syntax.uuid,
286 p->contexts->syntax.if_version),
287 (unsigned int)n));
290 * We cannot return more than one PDU length per
291 * read request.
295 * This condition should result in the connection being closed.
296 * Netapp filers seem to set it to 0xffff which results in domain
297 * authentications failing. Just ignore it so things work.
300 if(n > RPC_MAX_PDU_FRAG_LEN) {
301 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
302 "pipe %s. We can only service %d sized reads.\n",
303 (unsigned int)n,
304 ndr_interface_name(&p->contexts->syntax.uuid,
305 p->contexts->syntax.if_version),
306 RPC_MAX_PDU_FRAG_LEN ));
307 n = RPC_MAX_PDU_FRAG_LEN;
311 * Determine if there is still data to send in the
312 * pipe PDU buffer. Always send this first. Never
313 * send more than is left in the current PDU. The
314 * client should send a new read request for a new
315 * PDU.
318 pdu_remaining = p->out_data.frag.length
319 - p->out_data.current_pdu_sent;
321 if (pdu_remaining > 0) {
322 data_returned = (ssize_t)MIN(n, pdu_remaining);
324 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
325 "current_pdu_sent = %u returning %d bytes.\n",
326 ndr_interface_name(&p->contexts->syntax.uuid,
327 p->contexts->syntax.if_version),
328 (unsigned int)p->out_data.frag.length,
329 (unsigned int)p->out_data.current_pdu_sent,
330 (int)data_returned));
332 memcpy(data,
333 p->out_data.frag.data
334 + p->out_data.current_pdu_sent,
335 data_returned);
337 p->out_data.current_pdu_sent += (uint32)data_returned;
338 goto out;
342 * At this point p->current_pdu_len == p->current_pdu_sent (which
343 * may of course be zero if this is the first return fragment.
346 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
347 "= %u, p->out_data.rdata.length = %u.\n",
348 ndr_interface_name(&p->contexts->syntax.uuid,
349 p->contexts->syntax.if_version),
350 (int)p->fault_state,
351 (unsigned int)p->out_data.data_sent_length,
352 (unsigned int)p->out_data.rdata.length));
354 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
356 * We have sent all possible data, return 0.
358 data_returned = 0;
359 goto out;
363 * We need to create a new PDU from the data left in p->rdata.
364 * Create the header/data/footers. This also sets up the fields
365 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
366 * and stores the outgoing PDU in p->current_pdu.
369 if(!create_next_pdu(p)) {
370 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
371 ndr_interface_name(&p->contexts->syntax.uuid,
372 p->contexts->syntax.if_version)));
373 return -1;
376 data_returned = MIN(n, p->out_data.frag.length);
378 memcpy(data, p->out_data.frag.data, (size_t)data_returned);
379 p->out_data.current_pdu_sent += (uint32)data_returned;
381 out:
382 (*is_data_outstanding) = p->out_data.frag.length > n;
384 if (p->out_data.current_pdu_sent == p->out_data.frag.length) {
385 /* We've returned everything in the out_data.frag
386 * so we're done with this pdu. Free it and reset
387 * current_pdu_sent. */
388 p->out_data.current_pdu_sent = 0;
389 data_blob_free(&p->out_data.frag);
391 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
393 * We're completely finished with both outgoing and
394 * incoming data streams. It's safe to free all
395 * temporary data from this request.
397 free_pipe_context(p);
401 return data_returned;
404 bool fsp_is_np(struct files_struct *fsp)
406 enum FAKE_FILE_TYPE type;
408 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
409 return false;
412 type = fsp->fake_file_handle->type;
414 return (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY);
417 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
418 const struct tsocket_address *local_address,
419 const struct tsocket_address *remote_address,
420 struct auth_session_info *session_info,
421 struct tevent_context *ev_ctx,
422 struct messaging_context *msg_ctx,
423 struct fake_file_handle **phandle)
425 enum rpc_service_mode_e pipe_mode;
426 const char **proxy_list;
427 struct fake_file_handle *handle;
428 struct ndr_syntax_id syntax;
429 struct npa_state *npa = NULL;
430 NTSTATUS status;
431 bool ok;
433 proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
435 handle = talloc(mem_ctx, struct fake_file_handle);
436 if (handle == NULL) {
437 return NT_STATUS_NO_MEMORY;
440 /* Check what is the server type for this pipe.
441 Defaults to "embedded" */
442 pipe_mode = rpc_service_mode(name);
444 /* Still support the old method for defining external servers */
445 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
446 pipe_mode = RPC_SERVICE_MODE_EXTERNAL;
449 switch (pipe_mode) {
450 case RPC_SERVICE_MODE_EXTERNAL:
451 status = make_external_rpc_pipe(handle,
452 name,
453 local_address,
454 remote_address,
455 session_info,
456 &npa);
457 if (!NT_STATUS_IS_OK(status)) {
458 talloc_free(handle);
459 return status;
462 handle->private_data = (void *)npa;
463 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
465 break;
466 case RPC_SERVICE_MODE_EMBEDDED:
467 /* Check if we this daemon handles this pipe */
468 ok = is_known_pipename(name, &syntax);
469 if (!ok) {
470 DEBUG(0, ("ERROR! '%s' is not a registred pipe!\n",
471 name));
472 talloc_free(handle);
473 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
476 status = make_internal_rpc_pipe_socketpair(handle,
477 ev_ctx,
478 msg_ctx,
479 name,
480 &syntax,
481 remote_address,
482 session_info,
483 &npa);
484 if (!NT_STATUS_IS_OK(status)) {
485 talloc_free(handle);
486 return status;
489 handle->private_data = (void *)npa;
490 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
492 break;
493 case RPC_SERVICE_MODE_DISABLED:
494 talloc_free(handle);
495 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
498 *phandle = handle;
500 return NT_STATUS_OK;
503 bool np_read_in_progress(struct fake_file_handle *handle)
505 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
506 struct npa_state *p =
507 talloc_get_type_abort(handle->private_data,
508 struct npa_state);
509 size_t read_count;
511 read_count = tevent_queue_length(p->read_queue);
512 if (read_count > 0) {
513 return true;
516 return false;
519 return false;
522 struct np_write_state {
523 struct tevent_context *ev;
524 struct npa_state *p;
525 struct iovec iov;
526 ssize_t nwritten;
529 static void np_write_done(struct tevent_req *subreq);
531 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev,
532 struct fake_file_handle *handle,
533 const uint8_t *data, size_t len)
535 struct tevent_req *req;
536 struct np_write_state *state;
537 NTSTATUS status;
539 DEBUG(6, ("np_write_send: len: %d\n", (int)len));
540 dump_data(50, data, len);
542 req = tevent_req_create(mem_ctx, &state, struct np_write_state);
543 if (req == NULL) {
544 return NULL;
547 if (len == 0) {
548 state->nwritten = 0;
549 status = NT_STATUS_OK;
550 goto post_status;
553 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
554 struct npa_state *p = talloc_get_type_abort(
555 handle->private_data, struct npa_state);
556 struct tevent_req *subreq;
558 state->ev = ev;
559 state->p = p;
560 state->iov.iov_base = discard_const_p(void, data);
561 state->iov.iov_len = len;
563 subreq = tstream_writev_queue_send(state, ev,
564 p->stream,
565 p->write_queue,
566 &state->iov, 1);
567 if (subreq == NULL) {
568 goto fail;
570 tevent_req_set_callback(subreq, np_write_done, req);
571 return req;
574 status = NT_STATUS_INVALID_HANDLE;
575 post_status:
576 if (NT_STATUS_IS_OK(status)) {
577 tevent_req_done(req);
578 } else {
579 tevent_req_nterror(req, status);
581 return tevent_req_post(req, ev);
582 fail:
583 TALLOC_FREE(req);
584 return NULL;
587 static void np_write_done(struct tevent_req *subreq)
589 struct tevent_req *req = tevent_req_callback_data(
590 subreq, struct tevent_req);
591 struct np_write_state *state = tevent_req_data(
592 req, struct np_write_state);
593 ssize_t received;
594 int err;
596 received = tstream_writev_queue_recv(subreq, &err);
597 if (received < 0) {
598 tevent_req_nterror(req, map_nt_error_from_unix(err));
599 return;
601 state->nwritten = received;
602 tevent_req_done(req);
605 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
607 struct np_write_state *state = tevent_req_data(
608 req, struct np_write_state);
609 NTSTATUS status;
611 if (tevent_req_is_nterror(req, &status)) {
612 return status;
614 *pnwritten = state->nwritten;
615 return NT_STATUS_OK;
618 struct np_ipc_readv_next_vector_state {
619 uint8_t *buf;
620 size_t len;
621 off_t ofs;
622 size_t remaining;
625 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
626 uint8_t *buf, size_t len)
628 ZERO_STRUCTP(s);
630 s->buf = buf;
631 s->len = MIN(len, UINT16_MAX);
634 static int np_ipc_readv_next_vector(struct tstream_context *stream,
635 void *private_data,
636 TALLOC_CTX *mem_ctx,
637 struct iovec **_vector,
638 size_t *count)
640 struct np_ipc_readv_next_vector_state *state =
641 (struct np_ipc_readv_next_vector_state *)private_data;
642 struct iovec *vector;
643 ssize_t pending;
644 size_t wanted;
646 if (state->ofs == state->len) {
647 *_vector = NULL;
648 *count = 0;
649 return 0;
652 pending = tstream_pending_bytes(stream);
653 if (pending == -1) {
654 return -1;
657 if (pending == 0 && state->ofs != 0) {
658 /* return a short read */
659 *_vector = NULL;
660 *count = 0;
661 return 0;
664 if (pending == 0) {
665 /* we want at least one byte and recheck again */
666 wanted = 1;
667 } else {
668 size_t missing = state->len - state->ofs;
669 if (pending > missing) {
670 /* there's more available */
671 state->remaining = pending - missing;
672 wanted = missing;
673 } else {
674 /* read what we can get and recheck in the next cycle */
675 wanted = pending;
679 vector = talloc_array(mem_ctx, struct iovec, 1);
680 if (!vector) {
681 return -1;
684 vector[0].iov_base = state->buf + state->ofs;
685 vector[0].iov_len = wanted;
687 state->ofs += wanted;
689 *_vector = vector;
690 *count = 1;
691 return 0;
694 struct np_read_state {
695 struct npa_state *p;
696 struct np_ipc_readv_next_vector_state next_vector;
698 ssize_t nread;
699 bool is_data_outstanding;
702 static void np_read_done(struct tevent_req *subreq);
704 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev,
705 struct fake_file_handle *handle,
706 uint8_t *data, size_t len)
708 struct tevent_req *req;
709 struct np_read_state *state;
710 NTSTATUS status;
712 req = tevent_req_create(mem_ctx, &state, struct np_read_state);
713 if (req == NULL) {
714 return NULL;
717 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
718 struct npa_state *p = talloc_get_type_abort(
719 handle->private_data, struct npa_state);
720 struct tevent_req *subreq;
722 np_ipc_readv_next_vector_init(&state->next_vector,
723 data, len);
725 subreq = tstream_readv_pdu_queue_send(state,
727 p->stream,
728 p->read_queue,
729 np_ipc_readv_next_vector,
730 &state->next_vector);
731 if (subreq == NULL) {
732 status = NT_STATUS_NO_MEMORY;
733 goto post_status;
735 tevent_req_set_callback(subreq, np_read_done, req);
736 return req;
739 status = NT_STATUS_INVALID_HANDLE;
740 post_status:
741 if (NT_STATUS_IS_OK(status)) {
742 tevent_req_done(req);
743 } else {
744 tevent_req_nterror(req, status);
746 return tevent_req_post(req, ev);
749 static void np_read_done(struct tevent_req *subreq)
751 struct tevent_req *req = tevent_req_callback_data(
752 subreq, struct tevent_req);
753 struct np_read_state *state = tevent_req_data(
754 req, struct np_read_state);
755 ssize_t ret;
756 int err;
758 ret = tstream_readv_pdu_queue_recv(subreq, &err);
759 TALLOC_FREE(subreq);
760 if (ret == -1) {
761 tevent_req_nterror(req, map_nt_error_from_unix(err));
762 return;
765 state->nread = ret;
766 state->is_data_outstanding = (state->next_vector.remaining > 0);
768 tevent_req_done(req);
769 return;
772 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
773 bool *is_data_outstanding)
775 struct np_read_state *state = tevent_req_data(
776 req, struct np_read_state);
777 NTSTATUS status;
779 if (tevent_req_is_nterror(req, &status)) {
780 return status;
783 DEBUG(10, ("Received %d bytes. There is %smore data outstanding\n",
784 (int)state->nread, state->is_data_outstanding?"":"no "));
786 *nread = state->nread;
787 *is_data_outstanding = state->is_data_outstanding;
788 return NT_STATUS_OK;