s3: Fix the build of vfs_aixacl2.c
[Samba.git] / source3 / rpc_server / srv_pipe_hnd.c
blob0c128a0ac5500c358152989e0b12e1ee3a0625b5
1 /*
2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "rpc_server.h"
24 #include "fake_file.h"
25 #include "rpc_dce.h"
26 #include "ntdomain.h"
27 #include "rpc_server/rpc_ncacn_np.h"
28 #include "rpc_server/srv_pipe_hnd.h"
29 #include "rpc_server/srv_pipe.h"
30 #include "../lib/tsocket/tsocket.h"
31 #include "../lib/util/tevent_ntstatus.h"
33 #undef DBGC_CLASS
34 #define DBGC_CLASS DBGC_RPC_SRV
36 /****************************************************************************
37 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
38 ****************************************************************************/
40 static ssize_t fill_rpc_header(struct pipes_struct *p, char *data, size_t data_to_copy)
42 size_t len_needed_to_complete_hdr =
43 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
45 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
46 "len_needed_to_complete_hdr = %u, "
47 "receive_len = %u\n",
48 (unsigned int)data_to_copy,
49 (unsigned int)len_needed_to_complete_hdr,
50 (unsigned int)p->in_data.pdu.length ));
52 if (p->in_data.pdu.data == NULL) {
53 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
55 if (p->in_data.pdu.data == NULL) {
56 DEBUG(0, ("talloc failed\n"));
57 return -1;
60 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
61 data, len_needed_to_complete_hdr);
62 p->in_data.pdu.length += len_needed_to_complete_hdr;
64 return (ssize_t)len_needed_to_complete_hdr;
67 static bool get_pdu_size(struct pipes_struct *p)
69 uint16_t frag_len;
70 /* the fill_rpc_header() call insures we copy only
71 * RPC_HEADER_LEN bytes. If this doesn't match then
72 * somethign is very wrong and we can only abort */
73 if (p->in_data.pdu.length != RPC_HEADER_LEN) {
74 DEBUG(0, ("Unexpected RPC Header size! "
75 "got %d, expected %d)\n",
76 (int)p->in_data.pdu.length,
77 RPC_HEADER_LEN));
78 set_incoming_fault(p);
79 return false;
82 frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
84 /* verify it is a reasonable value */
85 if ((frag_len < RPC_HEADER_LEN) ||
86 (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
87 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
88 frag_len));
89 set_incoming_fault(p);
90 return false;
93 p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
95 /* allocate the space needed to fill the pdu */
96 p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
97 uint8_t, frag_len);
98 if (p->in_data.pdu.data == NULL) {
99 DEBUG(0, ("talloc_realloc failed\n"));
100 set_incoming_fault(p);
101 return false;
104 return true;
107 /****************************************************************************
108 Call this to free any talloc'ed memory. Do this after processing
109 a complete incoming and outgoing request (multiple incoming/outgoing
110 PDU's).
111 ****************************************************************************/
113 static void free_pipe_context(struct pipes_struct *p)
115 data_blob_free(&p->out_data.frag);
116 data_blob_free(&p->out_data.rdata);
117 data_blob_free(&p->in_data.data);
119 DEBUG(3, ("free_pipe_context: "
120 "destroying talloc pool of size %lu\n",
121 (unsigned long)talloc_total_size(p->mem_ctx)));
122 talloc_free_children(p->mem_ctx);
125 /****************************************************************************
126 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
127 ****************************************************************************/
129 ssize_t process_incoming_data(struct pipes_struct *p, char *data, size_t n)
131 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
132 - p->in_data.pdu.length);
134 DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
135 "pdu_needed_len = %u, incoming data = %u\n",
136 (unsigned int)p->in_data.pdu.length,
137 (unsigned int)p->in_data.pdu_needed_len,
138 (unsigned int)n ));
140 if(data_to_copy == 0) {
142 * This is an error - data is being received and there is no
143 * space in the PDU. Free the received data and go into the
144 * fault state.
146 DEBUG(0, ("process_incoming_data: "
147 "No space in incoming pdu buffer. "
148 "Current size = %u incoming data size = %u\n",
149 (unsigned int)p->in_data.pdu.length,
150 (unsigned int)n));
151 set_incoming_fault(p);
152 return -1;
156 * If we have no data already, wait until we get at least
157 * a RPC_HEADER_LEN * number of bytes before we can do anything.
160 if ((p->in_data.pdu_needed_len == 0) &&
161 (p->in_data.pdu.length < RPC_HEADER_LEN)) {
163 * Always return here. If we have more data then the RPC_HEADER
164 * will be processed the next time around the loop.
166 return fill_rpc_header(p, data, data_to_copy);
170 * At this point we know we have at least an RPC_HEADER_LEN amount of
171 * data stored in p->in_data.pdu.
175 * If pdu_needed_len is zero this is a new pdu.
176 * Check how much more data we need, then loop again.
178 if (p->in_data.pdu_needed_len == 0) {
180 bool ok = get_pdu_size(p);
181 if (!ok) {
182 return -1;
184 if (p->in_data.pdu_needed_len > 0) {
185 return 0;
188 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
189 * that consists of an RPC_HEADER only. This is a
190 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
191 * DCERPC_PKT_ORPHANED pdu type.
192 * Deal with this in process_complete_pdu(). */
196 * Ok - at this point we have a valid RPC_HEADER.
197 * Keep reading until we have a full pdu.
200 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
203 * Copy as much of the data as we need into the p->in_data.pdu buffer.
204 * pdu_needed_len becomes zero when we have a complete pdu.
207 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
208 data, data_to_copy);
209 p->in_data.pdu.length += data_to_copy;
210 p->in_data.pdu_needed_len -= data_to_copy;
213 * Do we have a complete PDU ?
214 * (return the number of bytes handled in the call)
217 if(p->in_data.pdu_needed_len == 0) {
218 process_complete_pdu(p);
219 return data_to_copy;
222 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
223 "pdu.length = %u, pdu_needed_len = %u\n",
224 (unsigned int)p->in_data.pdu.length,
225 (unsigned int)p->in_data.pdu_needed_len));
227 return (ssize_t)data_to_copy;
230 /****************************************************************************
231 Accepts incoming data on an internal rpc pipe.
232 ****************************************************************************/
234 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
236 size_t data_left = n;
238 while(data_left) {
239 ssize_t data_used;
241 DEBUG(10, ("write_to_pipe: data_left = %u\n",
242 (unsigned int)data_left));
244 data_used = process_incoming_data(p, data, data_left);
246 DEBUG(10, ("write_to_pipe: data_used = %d\n",
247 (int)data_used));
249 if(data_used < 0) {
250 return -1;
253 data_left -= data_used;
254 data += data_used;
257 return n;
260 /****************************************************************************
261 Replies to a request to read data from a pipe.
263 Headers are interspersed with the data at PDU intervals. By the time
264 this function is called, the start of the data could possibly have been
265 read by an SMBtrans (file_offset != 0).
267 Calling create_rpc_reply() here is a hack. The data should already
268 have been prepared into arrays of headers + data stream sections.
269 ****************************************************************************/
271 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
272 size_t n, bool *is_data_outstanding)
274 uint32 pdu_remaining = 0;
275 ssize_t data_returned = 0;
277 if (!p) {
278 DEBUG(0,("read_from_pipe: pipe not open\n"));
279 return -1;
282 DEBUG(6,(" name: %s len: %u\n",
283 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
284 (unsigned int)n));
287 * We cannot return more than one PDU length per
288 * read request.
292 * This condition should result in the connection being closed.
293 * Netapp filers seem to set it to 0xffff which results in domain
294 * authentications failing. Just ignore it so things work.
297 if(n > RPC_MAX_PDU_FRAG_LEN) {
298 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
299 "pipe %s. We can only service %d sized reads.\n",
300 (unsigned int)n,
301 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
302 RPC_MAX_PDU_FRAG_LEN ));
303 n = RPC_MAX_PDU_FRAG_LEN;
307 * Determine if there is still data to send in the
308 * pipe PDU buffer. Always send this first. Never
309 * send more than is left in the current PDU. The
310 * client should send a new read request for a new
311 * PDU.
314 pdu_remaining = p->out_data.frag.length
315 - p->out_data.current_pdu_sent;
317 if (pdu_remaining > 0) {
318 data_returned = (ssize_t)MIN(n, pdu_remaining);
320 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
321 "current_pdu_sent = %u returning %d bytes.\n",
322 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
323 (unsigned int)p->out_data.frag.length,
324 (unsigned int)p->out_data.current_pdu_sent,
325 (int)data_returned));
327 memcpy(data,
328 p->out_data.frag.data
329 + p->out_data.current_pdu_sent,
330 data_returned);
332 p->out_data.current_pdu_sent += (uint32)data_returned;
333 goto out;
337 * At this point p->current_pdu_len == p->current_pdu_sent (which
338 * may of course be zero if this is the first return fragment.
341 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
342 "= %u, p->out_data.rdata.length = %u.\n",
343 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
344 (int)p->fault_state,
345 (unsigned int)p->out_data.data_sent_length,
346 (unsigned int)p->out_data.rdata.length));
348 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
350 * We have sent all possible data, return 0.
352 data_returned = 0;
353 goto out;
357 * We need to create a new PDU from the data left in p->rdata.
358 * Create the header/data/footers. This also sets up the fields
359 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
360 * and stores the outgoing PDU in p->current_pdu.
363 if(!create_next_pdu(p)) {
364 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
365 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
366 return -1;
369 data_returned = MIN(n, p->out_data.frag.length);
371 memcpy(data, p->out_data.frag.data, (size_t)data_returned);
372 p->out_data.current_pdu_sent += (uint32)data_returned;
374 out:
375 (*is_data_outstanding) = p->out_data.frag.length > n;
377 if (p->out_data.current_pdu_sent == p->out_data.frag.length) {
378 /* We've returned everything in the out_data.frag
379 * so we're done with this pdu. Free it and reset
380 * current_pdu_sent. */
381 p->out_data.current_pdu_sent = 0;
382 data_blob_free(&p->out_data.frag);
384 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
386 * We're completely finished with both outgoing and
387 * incoming data streams. It's safe to free all
388 * temporary data from this request.
390 free_pipe_context(p);
394 return data_returned;
397 bool fsp_is_np(struct files_struct *fsp)
399 enum FAKE_FILE_TYPE type;
401 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
402 return false;
405 type = fsp->fake_file_handle->type;
407 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
408 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
411 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
412 const struct tsocket_address *local_address,
413 const struct tsocket_address *remote_address,
414 struct client_address *client_id,
415 struct auth_serversupplied_info *session_info,
416 struct messaging_context *msg_ctx,
417 struct fake_file_handle **phandle)
419 const char *rpcsrv_type;
420 const char **proxy_list;
421 struct fake_file_handle *handle;
422 bool external = false;
424 proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
426 handle = talloc(mem_ctx, struct fake_file_handle);
427 if (handle == NULL) {
428 return NT_STATUS_NO_MEMORY;
431 /* Check what is the server type for this pipe.
432 Defaults to "embedded" */
433 rpcsrv_type = lp_parm_const_string(GLOBAL_SECTION_SNUM,
434 "rpc_server", name,
435 "embedded");
436 if (StrCaseCmp(rpcsrv_type, "embedded") != 0) {
437 external = true;
440 /* Still support the old method for defining external servers */
441 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
442 external = true;
445 if (external) {
446 struct np_proxy_state *p;
448 p = make_external_rpc_pipe_p(handle, name,
449 local_address,
450 remote_address,
451 session_info);
453 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
454 handle->private_data = p;
455 } else {
456 struct pipes_struct *p;
457 struct ndr_syntax_id syntax;
459 if (!is_known_pipename(name, &syntax)) {
460 TALLOC_FREE(handle);
461 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
464 p = make_internal_rpc_pipe_p(handle, &syntax, client_id,
465 session_info, msg_ctx);
467 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
468 handle->private_data = p;
471 if (handle->private_data == NULL) {
472 TALLOC_FREE(handle);
473 return NT_STATUS_PIPE_NOT_AVAILABLE;
476 *phandle = handle;
478 return NT_STATUS_OK;
481 bool np_read_in_progress(struct fake_file_handle *handle)
483 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
484 return false;
487 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
488 struct np_proxy_state *p = talloc_get_type_abort(
489 handle->private_data, struct np_proxy_state);
490 size_t read_count;
492 read_count = tevent_queue_length(p->read_queue);
493 if (read_count > 0) {
494 return true;
497 return false;
500 return false;
503 struct np_write_state {
504 struct event_context *ev;
505 struct np_proxy_state *p;
506 struct iovec iov;
507 ssize_t nwritten;
510 static void np_write_done(struct tevent_req *subreq);
512 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
513 struct fake_file_handle *handle,
514 const uint8_t *data, size_t len)
516 struct tevent_req *req;
517 struct np_write_state *state;
518 NTSTATUS status;
520 DEBUG(6, ("np_write_send: len: %d\n", (int)len));
521 dump_data(50, data, len);
523 req = tevent_req_create(mem_ctx, &state, struct np_write_state);
524 if (req == NULL) {
525 return NULL;
528 if (len == 0) {
529 state->nwritten = 0;
530 status = NT_STATUS_OK;
531 goto post_status;
534 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
535 struct pipes_struct *p = talloc_get_type_abort(
536 handle->private_data, struct pipes_struct);
538 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
540 status = (state->nwritten >= 0)
541 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
542 goto post_status;
545 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
546 struct np_proxy_state *p = talloc_get_type_abort(
547 handle->private_data, struct np_proxy_state);
548 struct tevent_req *subreq;
550 state->ev = ev;
551 state->p = p;
552 state->iov.iov_base = CONST_DISCARD(void *, data);
553 state->iov.iov_len = len;
555 subreq = tstream_writev_queue_send(state, ev,
556 p->npipe,
557 p->write_queue,
558 &state->iov, 1);
559 if (subreq == NULL) {
560 goto fail;
562 tevent_req_set_callback(subreq, np_write_done, req);
563 return req;
566 status = NT_STATUS_INVALID_HANDLE;
567 post_status:
568 if (NT_STATUS_IS_OK(status)) {
569 tevent_req_done(req);
570 } else {
571 tevent_req_nterror(req, status);
573 return tevent_req_post(req, ev);
574 fail:
575 TALLOC_FREE(req);
576 return NULL;
579 static void np_write_done(struct tevent_req *subreq)
581 struct tevent_req *req = tevent_req_callback_data(
582 subreq, struct tevent_req);
583 struct np_write_state *state = tevent_req_data(
584 req, struct np_write_state);
585 ssize_t received;
586 int err;
588 received = tstream_writev_queue_recv(subreq, &err);
589 if (received < 0) {
590 tevent_req_nterror(req, map_nt_error_from_unix(err));
591 return;
593 state->nwritten = received;
594 tevent_req_done(req);
597 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
599 struct np_write_state *state = tevent_req_data(
600 req, struct np_write_state);
601 NTSTATUS status;
603 if (tevent_req_is_nterror(req, &status)) {
604 return status;
606 *pnwritten = state->nwritten;
607 return NT_STATUS_OK;
610 struct np_ipc_readv_next_vector_state {
611 uint8_t *buf;
612 size_t len;
613 off_t ofs;
614 size_t remaining;
617 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
618 uint8_t *buf, size_t len)
620 ZERO_STRUCTP(s);
622 s->buf = buf;
623 s->len = MIN(len, UINT16_MAX);
626 static int np_ipc_readv_next_vector(struct tstream_context *stream,
627 void *private_data,
628 TALLOC_CTX *mem_ctx,
629 struct iovec **_vector,
630 size_t *count)
632 struct np_ipc_readv_next_vector_state *state =
633 (struct np_ipc_readv_next_vector_state *)private_data;
634 struct iovec *vector;
635 ssize_t pending;
636 size_t wanted;
638 if (state->ofs == state->len) {
639 *_vector = NULL;
640 *count = 0;
641 return 0;
644 pending = tstream_pending_bytes(stream);
645 if (pending == -1) {
646 return -1;
649 if (pending == 0 && state->ofs != 0) {
650 /* return a short read */
651 *_vector = NULL;
652 *count = 0;
653 return 0;
656 if (pending == 0) {
657 /* we want at least one byte and recheck again */
658 wanted = 1;
659 } else {
660 size_t missing = state->len - state->ofs;
661 if (pending > missing) {
662 /* there's more available */
663 state->remaining = pending - missing;
664 wanted = missing;
665 } else {
666 /* read what we can get and recheck in the next cycle */
667 wanted = pending;
671 vector = talloc_array(mem_ctx, struct iovec, 1);
672 if (!vector) {
673 return -1;
676 vector[0].iov_base = state->buf + state->ofs;
677 vector[0].iov_len = wanted;
679 state->ofs += wanted;
681 *_vector = vector;
682 *count = 1;
683 return 0;
686 struct np_read_state {
687 struct np_proxy_state *p;
688 struct np_ipc_readv_next_vector_state next_vector;
690 size_t nread;
691 bool is_data_outstanding;
694 static void np_read_done(struct tevent_req *subreq);
696 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
697 struct fake_file_handle *handle,
698 uint8_t *data, size_t len)
700 struct tevent_req *req;
701 struct np_read_state *state;
702 NTSTATUS status;
704 req = tevent_req_create(mem_ctx, &state, struct np_read_state);
705 if (req == NULL) {
706 return NULL;
709 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
710 struct pipes_struct *p = talloc_get_type_abort(
711 handle->private_data, struct pipes_struct);
713 state->nread = read_from_internal_pipe(
714 p, (char *)data, len, &state->is_data_outstanding);
716 status = (state->nread >= 0)
717 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
718 goto post_status;
721 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
722 struct np_proxy_state *p = talloc_get_type_abort(
723 handle->private_data, struct np_proxy_state);
724 struct tevent_req *subreq;
726 np_ipc_readv_next_vector_init(&state->next_vector,
727 data, len);
729 subreq = tstream_readv_pdu_queue_send(state,
731 p->npipe,
732 p->read_queue,
733 np_ipc_readv_next_vector,
734 &state->next_vector);
735 if (subreq == NULL) {
736 status = NT_STATUS_NO_MEMORY;
737 goto post_status;
739 tevent_req_set_callback(subreq, np_read_done, req);
740 return req;
743 status = NT_STATUS_INVALID_HANDLE;
744 post_status:
745 if (NT_STATUS_IS_OK(status)) {
746 tevent_req_done(req);
747 } else {
748 tevent_req_nterror(req, status);
750 return tevent_req_post(req, ev);
753 static void np_read_done(struct tevent_req *subreq)
755 struct tevent_req *req = tevent_req_callback_data(
756 subreq, struct tevent_req);
757 struct np_read_state *state = tevent_req_data(
758 req, struct np_read_state);
759 ssize_t ret;
760 int err;
762 ret = tstream_readv_pdu_queue_recv(subreq, &err);
763 TALLOC_FREE(subreq);
764 if (ret == -1) {
765 tevent_req_nterror(req, map_nt_error_from_unix(err));
766 return;
769 state->nread = ret;
770 state->is_data_outstanding = (state->next_vector.remaining > 0);
772 tevent_req_done(req);
773 return;
776 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
777 bool *is_data_outstanding)
779 struct np_read_state *state = tevent_req_data(
780 req, struct np_read_state);
781 NTSTATUS status;
783 if (tevent_req_is_nterror(req, &status)) {
784 return status;
787 DEBUG(10, ("Received %d bytes. There is %smore data outstanding\n",
788 (int)state->nread, state->is_data_outstanding?"":"no "));
790 *nread = state->nread;
791 *is_data_outstanding = state->is_data_outstanding;
792 return NT_STATUS_OK;