s4/waf: add intl dependency for nss_winnbind
[Samba/gbeck.git] / source3 / rpc_server / srv_pipe_hnd.c
blobf531d04cdee21dd471403a47d6688410ed98ac89
1 /*
2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26 #include "rpc_server.h"
27 #include "smbd/globals.h"
28 #include "fake_file.h"
29 #include "rpc_dce.h"
30 #include "rpc_server/rpc_ncacn_np.h"
32 #undef DBGC_CLASS
33 #define DBGC_CLASS DBGC_RPC_SRV
35 /****************************************************************************
36 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
37 ****************************************************************************/
39 static ssize_t fill_rpc_header(struct pipes_struct *p, char *data, size_t data_to_copy)
41 size_t len_needed_to_complete_hdr =
42 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
44 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
45 "len_needed_to_complete_hdr = %u, "
46 "receive_len = %u\n",
47 (unsigned int)data_to_copy,
48 (unsigned int)len_needed_to_complete_hdr,
49 (unsigned int)p->in_data.pdu.length ));
51 if (p->in_data.pdu.data == NULL) {
52 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
54 if (p->in_data.pdu.data == NULL) {
55 DEBUG(0, ("talloc failed\n"));
56 return -1;
59 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
60 data, len_needed_to_complete_hdr);
61 p->in_data.pdu.length += len_needed_to_complete_hdr;
63 return (ssize_t)len_needed_to_complete_hdr;
66 static bool get_pdu_size(struct pipes_struct *p)
68 uint16_t frag_len;
69 /* the fill_rpc_header() call insures we copy only
70 * RPC_HEADER_LEN bytes. If this doesn't match then
71 * somethign is very wrong and we can only abort */
72 if (p->in_data.pdu.length != RPC_HEADER_LEN) {
73 DEBUG(0, ("Unexpected RPC Header size! "
74 "got %d, expected %d)\n",
75 (int)p->in_data.pdu.length,
76 RPC_HEADER_LEN));
77 set_incoming_fault(p);
78 return false;
81 frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
83 /* verify it is a reasonable value */
84 if ((frag_len < RPC_HEADER_LEN) ||
85 (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
86 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
87 frag_len));
88 set_incoming_fault(p);
89 return false;
92 p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
94 /* allocate the space needed to fill the pdu */
95 p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
96 uint8_t, frag_len);
97 if (p->in_data.pdu.data == NULL) {
98 DEBUG(0, ("talloc_realloc failed\n"));
99 set_incoming_fault(p);
100 return false;
103 return true;
106 /****************************************************************************
107 Call this to free any talloc'ed memory. Do this after processing
108 a complete incoming and outgoing request (multiple incoming/outgoing
109 PDU's).
110 ****************************************************************************/
112 static void free_pipe_context(struct pipes_struct *p)
114 data_blob_free(&p->out_data.frag);
115 data_blob_free(&p->out_data.rdata);
116 data_blob_free(&p->in_data.data);
118 DEBUG(3, ("free_pipe_context: "
119 "destroying talloc pool of size %lu\n",
120 (unsigned long)talloc_total_size(p->mem_ctx)));
121 talloc_free_children(p->mem_ctx);
124 /****************************************************************************
125 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
126 ****************************************************************************/
128 ssize_t process_incoming_data(struct pipes_struct *p, char *data, size_t n)
130 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
131 - p->in_data.pdu.length);
133 DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
134 "pdu_needed_len = %u, incoming data = %u\n",
135 (unsigned int)p->in_data.pdu.length,
136 (unsigned int)p->in_data.pdu_needed_len,
137 (unsigned int)n ));
139 if(data_to_copy == 0) {
141 * This is an error - data is being received and there is no
142 * space in the PDU. Free the received data and go into the
143 * fault state.
145 DEBUG(0, ("process_incoming_data: "
146 "No space in incoming pdu buffer. "
147 "Current size = %u incoming data size = %u\n",
148 (unsigned int)p->in_data.pdu.length,
149 (unsigned int)n));
150 set_incoming_fault(p);
151 return -1;
155 * If we have no data already, wait until we get at least
156 * a RPC_HEADER_LEN * number of bytes before we can do anything.
159 if ((p->in_data.pdu_needed_len == 0) &&
160 (p->in_data.pdu.length < RPC_HEADER_LEN)) {
162 * Always return here. If we have more data then the RPC_HEADER
163 * will be processed the next time around the loop.
165 return fill_rpc_header(p, data, data_to_copy);
169 * At this point we know we have at least an RPC_HEADER_LEN amount of
170 * data stored in p->in_data.pdu.
174 * If pdu_needed_len is zero this is a new pdu.
175 * Check how much more data we need, then loop again.
177 if (p->in_data.pdu_needed_len == 0) {
179 bool ok = get_pdu_size(p);
180 if (!ok) {
181 return -1;
183 if (p->in_data.pdu_needed_len > 0) {
184 return 0;
187 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
188 * that consists of an RPC_HEADER only. This is a
189 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
190 * DCERPC_PKT_ORPHANED pdu type.
191 * Deal with this in process_complete_pdu(). */
195 * Ok - at this point we have a valid RPC_HEADER.
196 * Keep reading until we have a full pdu.
199 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
202 * Copy as much of the data as we need into the p->in_data.pdu buffer.
203 * pdu_needed_len becomes zero when we have a complete pdu.
206 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
207 data, data_to_copy);
208 p->in_data.pdu.length += data_to_copy;
209 p->in_data.pdu_needed_len -= data_to_copy;
212 * Do we have a complete PDU ?
213 * (return the number of bytes handled in the call)
216 if(p->in_data.pdu_needed_len == 0) {
217 process_complete_pdu(p);
218 return data_to_copy;
221 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
222 "pdu.length = %u, pdu_needed_len = %u\n",
223 (unsigned int)p->in_data.pdu.length,
224 (unsigned int)p->in_data.pdu_needed_len));
226 return (ssize_t)data_to_copy;
229 /****************************************************************************
230 Accepts incoming data on an internal rpc pipe.
231 ****************************************************************************/
233 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
235 size_t data_left = n;
237 while(data_left) {
238 ssize_t data_used;
240 DEBUG(10, ("write_to_pipe: data_left = %u\n",
241 (unsigned int)data_left));
243 data_used = process_incoming_data(p, data, data_left);
245 DEBUG(10, ("write_to_pipe: data_used = %d\n",
246 (int)data_used));
248 if(data_used < 0) {
249 return -1;
252 data_left -= data_used;
253 data += data_used;
256 return n;
259 /****************************************************************************
260 Replies to a request to read data from a pipe.
262 Headers are interspersed with the data at PDU intervals. By the time
263 this function is called, the start of the data could possibly have been
264 read by an SMBtrans (file_offset != 0).
266 Calling create_rpc_reply() here is a hack. The data should already
267 have been prepared into arrays of headers + data stream sections.
268 ****************************************************************************/
270 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
271 size_t n, bool *is_data_outstanding)
273 uint32 pdu_remaining = 0;
274 ssize_t data_returned = 0;
276 if (!p) {
277 DEBUG(0,("read_from_pipe: pipe not open\n"));
278 return -1;
281 DEBUG(6,(" name: %s len: %u\n",
282 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
283 (unsigned int)n));
286 * We cannot return more than one PDU length per
287 * read request.
291 * This condition should result in the connection being closed.
292 * Netapp filers seem to set it to 0xffff which results in domain
293 * authentications failing. Just ignore it so things work.
296 if(n > RPC_MAX_PDU_FRAG_LEN) {
297 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
298 "pipe %s. We can only service %d sized reads.\n",
299 (unsigned int)n,
300 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
301 RPC_MAX_PDU_FRAG_LEN ));
302 n = RPC_MAX_PDU_FRAG_LEN;
306 * Determine if there is still data to send in the
307 * pipe PDU buffer. Always send this first. Never
308 * send more than is left in the current PDU. The
309 * client should send a new read request for a new
310 * PDU.
313 pdu_remaining = p->out_data.frag.length
314 - p->out_data.current_pdu_sent;
316 if (pdu_remaining > 0) {
317 data_returned = (ssize_t)MIN(n, pdu_remaining);
319 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
320 "current_pdu_sent = %u returning %d bytes.\n",
321 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
322 (unsigned int)p->out_data.frag.length,
323 (unsigned int)p->out_data.current_pdu_sent,
324 (int)data_returned));
326 memcpy(data,
327 p->out_data.frag.data
328 + p->out_data.current_pdu_sent,
329 data_returned);
331 p->out_data.current_pdu_sent += (uint32)data_returned;
332 goto out;
336 * At this point p->current_pdu_len == p->current_pdu_sent (which
337 * may of course be zero if this is the first return fragment.
340 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
341 "= %u, p->out_data.rdata.length = %u.\n",
342 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
343 (int)p->fault_state,
344 (unsigned int)p->out_data.data_sent_length,
345 (unsigned int)p->out_data.rdata.length));
347 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
349 * We have sent all possible data, return 0.
351 data_returned = 0;
352 goto out;
356 * We need to create a new PDU from the data left in p->rdata.
357 * Create the header/data/footers. This also sets up the fields
358 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
359 * and stores the outgoing PDU in p->current_pdu.
362 if(!create_next_pdu(p)) {
363 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
364 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
365 return -1;
368 data_returned = MIN(n, p->out_data.frag.length);
370 memcpy(data, p->out_data.frag.data, (size_t)data_returned);
371 p->out_data.current_pdu_sent += (uint32)data_returned;
373 out:
374 (*is_data_outstanding) = p->out_data.frag.length > n;
376 if (p->out_data.current_pdu_sent == p->out_data.frag.length) {
377 /* We've returned everything in the out_data.frag
378 * so we're done with this pdu. Free it and reset
379 * current_pdu_sent. */
380 p->out_data.current_pdu_sent = 0;
381 data_blob_free(&p->out_data.frag);
383 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
385 * We're completely finished with both outgoing and
386 * incoming data streams. It's safe to free all
387 * temporary data from this request.
389 free_pipe_context(p);
393 return data_returned;
396 bool fsp_is_np(struct files_struct *fsp)
398 enum FAKE_FILE_TYPE type;
400 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
401 return false;
404 type = fsp->fake_file_handle->type;
406 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
407 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
410 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
411 const struct tsocket_address *local_address,
412 const struct tsocket_address *remote_address,
413 struct client_address *client_id,
414 struct auth_serversupplied_info *server_info,
415 struct messaging_context *msg_ctx,
416 struct fake_file_handle **phandle)
418 const char *rpcsrv_type;
419 const char **proxy_list;
420 struct fake_file_handle *handle;
421 bool external = false;
423 proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
425 handle = talloc(mem_ctx, struct fake_file_handle);
426 if (handle == NULL) {
427 return NT_STATUS_NO_MEMORY;
430 /* Check what is the server type for this pipe.
431 Defaults to "embedded" */
432 rpcsrv_type = lp_parm_const_string(GLOBAL_SECTION_SNUM,
433 "rpc_server", name,
434 "embedded");
435 if (StrCaseCmp(rpcsrv_type, "embedded") != 0) {
436 external = true;
439 /* Still support the old method for defining external servers */
440 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
441 external = true;
444 if (external) {
445 struct np_proxy_state *p;
447 p = make_external_rpc_pipe_p(handle, name,
448 local_address,
449 remote_address,
450 server_info);
452 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
453 handle->private_data = p;
454 } else {
455 struct pipes_struct *p;
456 struct ndr_syntax_id syntax;
458 if (!is_known_pipename(name, &syntax)) {
459 TALLOC_FREE(handle);
460 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
463 p = make_internal_rpc_pipe_p(handle, &syntax, client_id,
464 server_info, msg_ctx);
466 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
467 handle->private_data = p;
470 if (handle->private_data == NULL) {
471 TALLOC_FREE(handle);
472 return NT_STATUS_PIPE_NOT_AVAILABLE;
475 *phandle = handle;
477 return NT_STATUS_OK;
480 bool np_read_in_progress(struct fake_file_handle *handle)
482 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
483 return false;
486 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
487 struct np_proxy_state *p = talloc_get_type_abort(
488 handle->private_data, struct np_proxy_state);
489 size_t read_count;
491 read_count = tevent_queue_length(p->read_queue);
492 if (read_count > 0) {
493 return true;
496 return false;
499 return false;
502 struct np_write_state {
503 struct event_context *ev;
504 struct np_proxy_state *p;
505 struct iovec iov;
506 ssize_t nwritten;
509 static void np_write_done(struct tevent_req *subreq);
511 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
512 struct fake_file_handle *handle,
513 const uint8_t *data, size_t len)
515 struct tevent_req *req;
516 struct np_write_state *state;
517 NTSTATUS status;
519 DEBUG(6, ("np_write_send: len: %d\n", (int)len));
520 dump_data(50, data, len);
522 req = tevent_req_create(mem_ctx, &state, struct np_write_state);
523 if (req == NULL) {
524 return NULL;
527 if (len == 0) {
528 state->nwritten = 0;
529 status = NT_STATUS_OK;
530 goto post_status;
533 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
534 struct pipes_struct *p = talloc_get_type_abort(
535 handle->private_data, struct pipes_struct);
537 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
539 status = (state->nwritten >= 0)
540 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
541 goto post_status;
544 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
545 struct np_proxy_state *p = talloc_get_type_abort(
546 handle->private_data, struct np_proxy_state);
547 struct tevent_req *subreq;
549 state->ev = ev;
550 state->p = p;
551 state->iov.iov_base = CONST_DISCARD(void *, data);
552 state->iov.iov_len = len;
554 subreq = tstream_writev_queue_send(state, ev,
555 p->npipe,
556 p->write_queue,
557 &state->iov, 1);
558 if (subreq == NULL) {
559 goto fail;
561 tevent_req_set_callback(subreq, np_write_done, req);
562 return req;
565 status = NT_STATUS_INVALID_HANDLE;
566 post_status:
567 if (NT_STATUS_IS_OK(status)) {
568 tevent_req_done(req);
569 } else {
570 tevent_req_nterror(req, status);
572 return tevent_req_post(req, ev);
573 fail:
574 TALLOC_FREE(req);
575 return NULL;
578 static void np_write_done(struct tevent_req *subreq)
580 struct tevent_req *req = tevent_req_callback_data(
581 subreq, struct tevent_req);
582 struct np_write_state *state = tevent_req_data(
583 req, struct np_write_state);
584 ssize_t received;
585 int err;
587 received = tstream_writev_queue_recv(subreq, &err);
588 if (received < 0) {
589 tevent_req_nterror(req, map_nt_error_from_unix(err));
590 return;
592 state->nwritten = received;
593 tevent_req_done(req);
596 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
598 struct np_write_state *state = tevent_req_data(
599 req, struct np_write_state);
600 NTSTATUS status;
602 if (tevent_req_is_nterror(req, &status)) {
603 return status;
605 *pnwritten = state->nwritten;
606 return NT_STATUS_OK;
609 struct np_ipc_readv_next_vector_state {
610 uint8_t *buf;
611 size_t len;
612 off_t ofs;
613 size_t remaining;
616 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
617 uint8_t *buf, size_t len)
619 ZERO_STRUCTP(s);
621 s->buf = buf;
622 s->len = MIN(len, UINT16_MAX);
625 static int np_ipc_readv_next_vector(struct tstream_context *stream,
626 void *private_data,
627 TALLOC_CTX *mem_ctx,
628 struct iovec **_vector,
629 size_t *count)
631 struct np_ipc_readv_next_vector_state *state =
632 (struct np_ipc_readv_next_vector_state *)private_data;
633 struct iovec *vector;
634 ssize_t pending;
635 size_t wanted;
637 if (state->ofs == state->len) {
638 *_vector = NULL;
639 *count = 0;
640 return 0;
643 pending = tstream_pending_bytes(stream);
644 if (pending == -1) {
645 return -1;
648 if (pending == 0 && state->ofs != 0) {
649 /* return a short read */
650 *_vector = NULL;
651 *count = 0;
652 return 0;
655 if (pending == 0) {
656 /* we want at least one byte and recheck again */
657 wanted = 1;
658 } else {
659 size_t missing = state->len - state->ofs;
660 if (pending > missing) {
661 /* there's more available */
662 state->remaining = pending - missing;
663 wanted = missing;
664 } else {
665 /* read what we can get and recheck in the next cycle */
666 wanted = pending;
670 vector = talloc_array(mem_ctx, struct iovec, 1);
671 if (!vector) {
672 return -1;
675 vector[0].iov_base = state->buf + state->ofs;
676 vector[0].iov_len = wanted;
678 state->ofs += wanted;
680 *_vector = vector;
681 *count = 1;
682 return 0;
685 struct np_read_state {
686 struct np_proxy_state *p;
687 struct np_ipc_readv_next_vector_state next_vector;
689 size_t nread;
690 bool is_data_outstanding;
693 static void np_read_done(struct tevent_req *subreq);
695 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
696 struct fake_file_handle *handle,
697 uint8_t *data, size_t len)
699 struct tevent_req *req;
700 struct np_read_state *state;
701 NTSTATUS status;
703 req = tevent_req_create(mem_ctx, &state, struct np_read_state);
704 if (req == NULL) {
705 return NULL;
708 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
709 struct pipes_struct *p = talloc_get_type_abort(
710 handle->private_data, struct pipes_struct);
712 state->nread = read_from_internal_pipe(
713 p, (char *)data, len, &state->is_data_outstanding);
715 status = (state->nread >= 0)
716 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
717 goto post_status;
720 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
721 struct np_proxy_state *p = talloc_get_type_abort(
722 handle->private_data, struct np_proxy_state);
723 struct tevent_req *subreq;
725 np_ipc_readv_next_vector_init(&state->next_vector,
726 data, len);
728 subreq = tstream_readv_pdu_queue_send(state,
730 p->npipe,
731 p->read_queue,
732 np_ipc_readv_next_vector,
733 &state->next_vector);
734 if (subreq == NULL) {
735 status = NT_STATUS_NO_MEMORY;
736 goto post_status;
738 tevent_req_set_callback(subreq, np_read_done, req);
739 return req;
742 status = NT_STATUS_INVALID_HANDLE;
743 post_status:
744 if (NT_STATUS_IS_OK(status)) {
745 tevent_req_done(req);
746 } else {
747 tevent_req_nterror(req, status);
749 return tevent_req_post(req, ev);
752 static void np_read_done(struct tevent_req *subreq)
754 struct tevent_req *req = tevent_req_callback_data(
755 subreq, struct tevent_req);
756 struct np_read_state *state = tevent_req_data(
757 req, struct np_read_state);
758 ssize_t ret;
759 int err;
761 ret = tstream_readv_pdu_queue_recv(subreq, &err);
762 TALLOC_FREE(subreq);
763 if (ret == -1) {
764 tevent_req_nterror(req, map_nt_error_from_unix(err));
765 return;
768 state->nread = ret;
769 state->is_data_outstanding = (state->next_vector.remaining > 0);
771 tevent_req_done(req);
772 return;
775 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
776 bool *is_data_outstanding)
778 struct np_read_state *state = tevent_req_data(
779 req, struct np_read_state);
780 NTSTATUS status;
782 if (tevent_req_is_nterror(req, &status)) {
783 return status;
786 DEBUG(10, ("Received %d bytes. There is %smore data outstanding\n",
787 (int)state->nread, state->is_data_outstanding?"":"no "));
789 *nread = state->nread;
790 *is_data_outstanding = state->is_data_outstanding;
791 return NT_STATUS_OK;