s3-utils/net_rpc_printer.c: print more info on write error
[Samba/gebeck_regimport.git] / source3 / rpc_server / srv_pipe_hnd.c
blobdc01335b995e192af456bf5e5692cec4c89d9063
1 /*
2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "rpc_server.h"
24 #include "fake_file.h"
25 #include "rpc_dce.h"
26 #include "ntdomain.h"
27 #include "rpc_server/rpc_ncacn_np.h"
28 #include "rpc_server/srv_pipe_hnd.h"
29 #include "rpc_server/srv_pipe.h"
30 #include "../lib/tsocket/tsocket.h"
31 #include "../lib/util/tevent_ntstatus.h"
33 #undef DBGC_CLASS
34 #define DBGC_CLASS DBGC_RPC_SRV
36 /****************************************************************************
37 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
38 ****************************************************************************/
40 static ssize_t fill_rpc_header(struct pipes_struct *p, const char *data, size_t data_to_copy)
42 size_t len_needed_to_complete_hdr =
43 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
45 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
46 "len_needed_to_complete_hdr = %u, "
47 "receive_len = %u\n",
48 (unsigned int)data_to_copy,
49 (unsigned int)len_needed_to_complete_hdr,
50 (unsigned int)p->in_data.pdu.length ));
52 if (p->in_data.pdu.data == NULL) {
53 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
55 if (p->in_data.pdu.data == NULL) {
56 DEBUG(0, ("talloc failed\n"));
57 return -1;
60 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
61 data, len_needed_to_complete_hdr);
62 p->in_data.pdu.length += len_needed_to_complete_hdr;
64 return (ssize_t)len_needed_to_complete_hdr;
67 static bool get_pdu_size(struct pipes_struct *p)
69 uint16_t frag_len;
70 /* the fill_rpc_header() call insures we copy only
71 * RPC_HEADER_LEN bytes. If this doesn't match then
72 * somethign is very wrong and we can only abort */
73 if (p->in_data.pdu.length != RPC_HEADER_LEN) {
74 DEBUG(0, ("Unexpected RPC Header size! "
75 "got %d, expected %d)\n",
76 (int)p->in_data.pdu.length,
77 RPC_HEADER_LEN));
78 set_incoming_fault(p);
79 return false;
82 frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
84 /* verify it is a reasonable value */
85 if ((frag_len < RPC_HEADER_LEN) ||
86 (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
87 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
88 frag_len));
89 set_incoming_fault(p);
90 return false;
93 p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
95 /* allocate the space needed to fill the pdu */
96 p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
97 uint8_t, frag_len);
98 if (p->in_data.pdu.data == NULL) {
99 DEBUG(0, ("talloc_realloc failed\n"));
100 set_incoming_fault(p);
101 return false;
104 return true;
107 /****************************************************************************
108 Call this to free any talloc'ed memory. Do this after processing
109 a complete incoming and outgoing request (multiple incoming/outgoing
110 PDU's).
111 ****************************************************************************/
113 static void free_pipe_context(struct pipes_struct *p)
115 data_blob_free(&p->out_data.frag);
116 data_blob_free(&p->out_data.rdata);
117 data_blob_free(&p->in_data.data);
119 DEBUG(3, ("free_pipe_context: "
120 "destroying talloc pool of size %lu\n",
121 (unsigned long)talloc_total_size(p->mem_ctx)));
122 talloc_free_children(p->mem_ctx);
125 /****************************************************************************
126 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
127 ****************************************************************************/
129 ssize_t process_incoming_data(struct pipes_struct *p, const char *data, size_t n)
131 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
132 - p->in_data.pdu.length);
134 DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
135 "pdu_needed_len = %u, incoming data = %u\n",
136 (unsigned int)p->in_data.pdu.length,
137 (unsigned int)p->in_data.pdu_needed_len,
138 (unsigned int)n ));
140 if(data_to_copy == 0) {
142 * This is an error - data is being received and there is no
143 * space in the PDU. Free the received data and go into the
144 * fault state.
146 DEBUG(0, ("process_incoming_data: "
147 "No space in incoming pdu buffer. "
148 "Current size = %u incoming data size = %u\n",
149 (unsigned int)p->in_data.pdu.length,
150 (unsigned int)n));
151 set_incoming_fault(p);
152 return -1;
156 * If we have no data already, wait until we get at least
157 * a RPC_HEADER_LEN * number of bytes before we can do anything.
160 if ((p->in_data.pdu_needed_len == 0) &&
161 (p->in_data.pdu.length < RPC_HEADER_LEN)) {
163 * Always return here. If we have more data then the RPC_HEADER
164 * will be processed the next time around the loop.
166 return fill_rpc_header(p, data, data_to_copy);
170 * At this point we know we have at least an RPC_HEADER_LEN amount of
171 * data stored in p->in_data.pdu.
175 * If pdu_needed_len is zero this is a new pdu.
176 * Check how much more data we need, then loop again.
178 if (p->in_data.pdu_needed_len == 0) {
180 bool ok = get_pdu_size(p);
181 if (!ok) {
182 return -1;
184 if (p->in_data.pdu_needed_len > 0) {
185 return 0;
188 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
189 * that consists of an RPC_HEADER only. This is a
190 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
191 * DCERPC_PKT_ORPHANED pdu type.
192 * Deal with this in process_complete_pdu(). */
196 * Ok - at this point we have a valid RPC_HEADER.
197 * Keep reading until we have a full pdu.
200 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
203 * Copy as much of the data as we need into the p->in_data.pdu buffer.
204 * pdu_needed_len becomes zero when we have a complete pdu.
207 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
208 data, data_to_copy);
209 p->in_data.pdu.length += data_to_copy;
210 p->in_data.pdu_needed_len -= data_to_copy;
213 * Do we have a complete PDU ?
214 * (return the number of bytes handled in the call)
217 if(p->in_data.pdu_needed_len == 0) {
218 process_complete_pdu(p);
219 return data_to_copy;
222 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
223 "pdu.length = %u, pdu_needed_len = %u\n",
224 (unsigned int)p->in_data.pdu.length,
225 (unsigned int)p->in_data.pdu_needed_len));
227 return (ssize_t)data_to_copy;
230 /****************************************************************************
231 Accepts incoming data on an internal rpc pipe.
232 ****************************************************************************/
234 static ssize_t write_to_internal_pipe(struct pipes_struct *p, const char *data, size_t n)
236 size_t data_left = n;
238 while(data_left) {
239 ssize_t data_used;
241 DEBUG(10, ("write_to_pipe: data_left = %u\n",
242 (unsigned int)data_left));
244 data_used = process_incoming_data(p, data, data_left);
246 DEBUG(10, ("write_to_pipe: data_used = %d\n",
247 (int)data_used));
249 if(data_used < 0) {
250 return -1;
253 data_left -= data_used;
254 data += data_used;
257 return n;
260 /****************************************************************************
261 Replies to a request to read data from a pipe.
263 Headers are interspersed with the data at PDU intervals. By the time
264 this function is called, the start of the data could possibly have been
265 read by an SMBtrans (file_offset != 0).
267 Calling create_rpc_reply() here is a hack. The data should already
268 have been prepared into arrays of headers + data stream sections.
269 ****************************************************************************/
271 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
272 size_t n, bool *is_data_outstanding)
274 uint32 pdu_remaining = 0;
275 ssize_t data_returned = 0;
277 if (!p) {
278 DEBUG(0,("read_from_pipe: pipe not open\n"));
279 return -1;
282 DEBUG(6,(" name: %s len: %u\n",
283 get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax),
284 (unsigned int)n));
287 * We cannot return more than one PDU length per
288 * read request.
292 * This condition should result in the connection being closed.
293 * Netapp filers seem to set it to 0xffff which results in domain
294 * authentications failing. Just ignore it so things work.
297 if(n > RPC_MAX_PDU_FRAG_LEN) {
298 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
299 "pipe %s. We can only service %d sized reads.\n",
300 (unsigned int)n,
301 get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax),
302 RPC_MAX_PDU_FRAG_LEN ));
303 n = RPC_MAX_PDU_FRAG_LEN;
307 * Determine if there is still data to send in the
308 * pipe PDU buffer. Always send this first. Never
309 * send more than is left in the current PDU. The
310 * client should send a new read request for a new
311 * PDU.
314 pdu_remaining = p->out_data.frag.length
315 - p->out_data.current_pdu_sent;
317 if (pdu_remaining > 0) {
318 data_returned = (ssize_t)MIN(n, pdu_remaining);
320 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
321 "current_pdu_sent = %u returning %d bytes.\n",
322 get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax),
323 (unsigned int)p->out_data.frag.length,
324 (unsigned int)p->out_data.current_pdu_sent,
325 (int)data_returned));
327 memcpy(data,
328 p->out_data.frag.data
329 + p->out_data.current_pdu_sent,
330 data_returned);
332 p->out_data.current_pdu_sent += (uint32)data_returned;
333 goto out;
337 * At this point p->current_pdu_len == p->current_pdu_sent (which
338 * may of course be zero if this is the first return fragment.
341 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
342 "= %u, p->out_data.rdata.length = %u.\n",
343 get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax),
344 (int)p->fault_state,
345 (unsigned int)p->out_data.data_sent_length,
346 (unsigned int)p->out_data.rdata.length));
348 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
350 * We have sent all possible data, return 0.
352 data_returned = 0;
353 goto out;
357 * We need to create a new PDU from the data left in p->rdata.
358 * Create the header/data/footers. This also sets up the fields
359 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
360 * and stores the outgoing PDU in p->current_pdu.
363 if(!create_next_pdu(p)) {
364 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
365 get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax)));
366 return -1;
369 data_returned = MIN(n, p->out_data.frag.length);
371 memcpy(data, p->out_data.frag.data, (size_t)data_returned);
372 p->out_data.current_pdu_sent += (uint32)data_returned;
374 out:
375 (*is_data_outstanding) = p->out_data.frag.length > n;
377 if (p->out_data.current_pdu_sent == p->out_data.frag.length) {
378 /* We've returned everything in the out_data.frag
379 * so we're done with this pdu. Free it and reset
380 * current_pdu_sent. */
381 p->out_data.current_pdu_sent = 0;
382 data_blob_free(&p->out_data.frag);
384 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
386 * We're completely finished with both outgoing and
387 * incoming data streams. It's safe to free all
388 * temporary data from this request.
390 free_pipe_context(p);
394 return data_returned;
397 bool fsp_is_np(struct files_struct *fsp)
399 enum FAKE_FILE_TYPE type;
401 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
402 return false;
405 type = fsp->fake_file_handle->type;
407 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
408 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
411 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
412 const struct tsocket_address *local_address,
413 const struct tsocket_address *remote_address,
414 struct auth_session_info *session_info,
415 struct messaging_context *msg_ctx,
416 struct fake_file_handle **phandle)
418 const char *rpcsrv_type;
419 const char **proxy_list;
420 struct fake_file_handle *handle;
421 bool external = false;
423 proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
425 handle = talloc(mem_ctx, struct fake_file_handle);
426 if (handle == NULL) {
427 return NT_STATUS_NO_MEMORY;
430 /* Check what is the server type for this pipe.
431 Defaults to "embedded" */
432 rpcsrv_type = lp_parm_const_string(GLOBAL_SECTION_SNUM,
433 "rpc_server", name,
434 "embedded");
435 if (strcasecmp_m(rpcsrv_type, "embedded") != 0) {
436 external = true;
439 /* Still support the old method for defining external servers */
440 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
441 external = true;
444 if (external) {
445 struct np_proxy_state *p;
447 p = make_external_rpc_pipe_p(handle, name,
448 local_address,
449 remote_address,
450 session_info);
452 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
453 handle->private_data = p;
454 } else {
455 struct pipes_struct *p;
456 struct ndr_syntax_id syntax;
458 if (!is_known_pipename(name, &syntax)) {
459 TALLOC_FREE(handle);
460 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
463 p = make_internal_rpc_pipe_p(handle, &syntax, remote_address,
464 session_info, msg_ctx);
466 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
467 handle->private_data = p;
470 if (handle->private_data == NULL) {
471 TALLOC_FREE(handle);
472 return NT_STATUS_PIPE_NOT_AVAILABLE;
475 *phandle = handle;
477 return NT_STATUS_OK;
480 bool np_read_in_progress(struct fake_file_handle *handle)
482 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
483 return false;
486 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
487 struct np_proxy_state *p = talloc_get_type_abort(
488 handle->private_data, struct np_proxy_state);
489 size_t read_count;
491 read_count = tevent_queue_length(p->read_queue);
492 if (read_count > 0) {
493 return true;
496 return false;
499 return false;
502 struct np_write_state {
503 struct event_context *ev;
504 struct np_proxy_state *p;
505 struct iovec iov;
506 ssize_t nwritten;
509 static void np_write_done(struct tevent_req *subreq);
511 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
512 struct fake_file_handle *handle,
513 const uint8_t *data, size_t len)
515 struct tevent_req *req;
516 struct np_write_state *state;
517 NTSTATUS status;
519 DEBUG(6, ("np_write_send: len: %d\n", (int)len));
520 dump_data(50, data, len);
522 req = tevent_req_create(mem_ctx, &state, struct np_write_state);
523 if (req == NULL) {
524 return NULL;
527 if (len == 0) {
528 state->nwritten = 0;
529 status = NT_STATUS_OK;
530 goto post_status;
533 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
534 struct pipes_struct *p = talloc_get_type_abort(
535 handle->private_data, struct pipes_struct);
537 state->nwritten = write_to_internal_pipe(p, (const char *)data, len);
539 status = (state->nwritten >= 0)
540 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
541 goto post_status;
544 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
545 struct np_proxy_state *p = talloc_get_type_abort(
546 handle->private_data, struct np_proxy_state);
547 struct tevent_req *subreq;
549 state->ev = ev;
550 state->p = p;
551 state->iov.iov_base = discard_const_p(void, data);
552 state->iov.iov_len = len;
554 subreq = tstream_writev_queue_send(state, ev,
555 p->npipe,
556 p->write_queue,
557 &state->iov, 1);
558 if (subreq == NULL) {
559 goto fail;
561 tevent_req_set_callback(subreq, np_write_done, req);
562 return req;
565 status = NT_STATUS_INVALID_HANDLE;
566 post_status:
567 if (NT_STATUS_IS_OK(status)) {
568 tevent_req_done(req);
569 } else {
570 tevent_req_nterror(req, status);
572 return tevent_req_post(req, ev);
573 fail:
574 TALLOC_FREE(req);
575 return NULL;
578 static void np_write_done(struct tevent_req *subreq)
580 struct tevent_req *req = tevent_req_callback_data(
581 subreq, struct tevent_req);
582 struct np_write_state *state = tevent_req_data(
583 req, struct np_write_state);
584 ssize_t received;
585 int err;
587 received = tstream_writev_queue_recv(subreq, &err);
588 if (received < 0) {
589 tevent_req_nterror(req, map_nt_error_from_unix(err));
590 return;
592 state->nwritten = received;
593 tevent_req_done(req);
596 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
598 struct np_write_state *state = tevent_req_data(
599 req, struct np_write_state);
600 NTSTATUS status;
602 if (tevent_req_is_nterror(req, &status)) {
603 return status;
605 *pnwritten = state->nwritten;
606 return NT_STATUS_OK;
609 struct np_ipc_readv_next_vector_state {
610 uint8_t *buf;
611 size_t len;
612 off_t ofs;
613 size_t remaining;
616 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
617 uint8_t *buf, size_t len)
619 ZERO_STRUCTP(s);
621 s->buf = buf;
622 s->len = MIN(len, UINT16_MAX);
625 static int np_ipc_readv_next_vector(struct tstream_context *stream,
626 void *private_data,
627 TALLOC_CTX *mem_ctx,
628 struct iovec **_vector,
629 size_t *count)
631 struct np_ipc_readv_next_vector_state *state =
632 (struct np_ipc_readv_next_vector_state *)private_data;
633 struct iovec *vector;
634 ssize_t pending;
635 size_t wanted;
637 if (state->ofs == state->len) {
638 *_vector = NULL;
639 *count = 0;
640 return 0;
643 pending = tstream_pending_bytes(stream);
644 if (pending == -1) {
645 return -1;
648 if (pending == 0 && state->ofs != 0) {
649 /* return a short read */
650 *_vector = NULL;
651 *count = 0;
652 return 0;
655 if (pending == 0) {
656 /* we want at least one byte and recheck again */
657 wanted = 1;
658 } else {
659 size_t missing = state->len - state->ofs;
660 if (pending > missing) {
661 /* there's more available */
662 state->remaining = pending - missing;
663 wanted = missing;
664 } else {
665 /* read what we can get and recheck in the next cycle */
666 wanted = pending;
670 vector = talloc_array(mem_ctx, struct iovec, 1);
671 if (!vector) {
672 return -1;
675 vector[0].iov_base = state->buf + state->ofs;
676 vector[0].iov_len = wanted;
678 state->ofs += wanted;
680 *_vector = vector;
681 *count = 1;
682 return 0;
685 struct np_read_state {
686 struct np_proxy_state *p;
687 struct np_ipc_readv_next_vector_state next_vector;
689 size_t nread;
690 bool is_data_outstanding;
693 static void np_read_done(struct tevent_req *subreq);
695 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
696 struct fake_file_handle *handle,
697 uint8_t *data, size_t len)
699 struct tevent_req *req;
700 struct np_read_state *state;
701 NTSTATUS status;
703 req = tevent_req_create(mem_ctx, &state, struct np_read_state);
704 if (req == NULL) {
705 return NULL;
708 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
709 struct pipes_struct *p = talloc_get_type_abort(
710 handle->private_data, struct pipes_struct);
712 state->nread = read_from_internal_pipe(
713 p, (char *)data, len, &state->is_data_outstanding);
715 status = (state->nread >= 0)
716 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
717 goto post_status;
720 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
721 struct np_proxy_state *p = talloc_get_type_abort(
722 handle->private_data, struct np_proxy_state);
723 struct tevent_req *subreq;
725 np_ipc_readv_next_vector_init(&state->next_vector,
726 data, len);
728 subreq = tstream_readv_pdu_queue_send(state,
730 p->npipe,
731 p->read_queue,
732 np_ipc_readv_next_vector,
733 &state->next_vector);
734 if (subreq == NULL) {
735 status = NT_STATUS_NO_MEMORY;
736 goto post_status;
738 tevent_req_set_callback(subreq, np_read_done, req);
739 return req;
742 status = NT_STATUS_INVALID_HANDLE;
743 post_status:
744 if (NT_STATUS_IS_OK(status)) {
745 tevent_req_done(req);
746 } else {
747 tevent_req_nterror(req, status);
749 return tevent_req_post(req, ev);
752 static void np_read_done(struct tevent_req *subreq)
754 struct tevent_req *req = tevent_req_callback_data(
755 subreq, struct tevent_req);
756 struct np_read_state *state = tevent_req_data(
757 req, struct np_read_state);
758 ssize_t ret;
759 int err;
761 ret = tstream_readv_pdu_queue_recv(subreq, &err);
762 TALLOC_FREE(subreq);
763 if (ret == -1) {
764 tevent_req_nterror(req, map_nt_error_from_unix(err));
765 return;
768 state->nread = ret;
769 state->is_data_outstanding = (state->next_vector.remaining > 0);
771 tevent_req_done(req);
772 return;
775 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
776 bool *is_data_outstanding)
778 struct np_read_state *state = tevent_req_data(
779 req, struct np_read_state);
780 NTSTATUS status;
782 if (tevent_req_is_nterror(req, &status)) {
783 return status;
786 DEBUG(10, ("Received %d bytes. There is %smore data outstanding\n",
787 (int)state->nread, state->is_data_outstanding?"":"no "));
789 *nread = state->nread;
790 *is_data_outstanding = state->is_data_outstanding;
791 return NT_STATUS_OK;