s3: Lift the server_messaging_context from send_spoolss_notify2_msg
[Samba/gbeck.git] / source3 / rpc_server / srv_pipe_hnd.c
blob37efe061c3f03bc515af307fd46a899e966f4394
1 /*
2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26 #include "rpc_server.h"
27 #include "smbd/globals.h"
29 #undef DBGC_CLASS
30 #define DBGC_CLASS DBGC_RPC_SRV
32 /****************************************************************************
33 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
34 ****************************************************************************/
36 static ssize_t fill_rpc_header(struct pipes_struct *p, char *data, size_t data_to_copy)
38 size_t len_needed_to_complete_hdr =
39 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
41 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
42 "len_needed_to_complete_hdr = %u, "
43 "receive_len = %u\n",
44 (unsigned int)data_to_copy,
45 (unsigned int)len_needed_to_complete_hdr,
46 (unsigned int)p->in_data.pdu.length ));
48 if (p->in_data.pdu.data == NULL) {
49 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
51 if (p->in_data.pdu.data == NULL) {
52 DEBUG(0, ("talloc failed\n"));
53 return -1;
56 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
57 data, len_needed_to_complete_hdr);
58 p->in_data.pdu.length += len_needed_to_complete_hdr;
60 return (ssize_t)len_needed_to_complete_hdr;
63 static bool get_pdu_size(struct pipes_struct *p)
65 uint16_t frag_len;
66 /* the fill_rpc_header() call insures we copy only
67 * RPC_HEADER_LEN bytes. If this doesn't match then
68 * somethign is very wrong and we can only abort */
69 if (p->in_data.pdu.length != RPC_HEADER_LEN) {
70 DEBUG(0, ("Unexpected RPC Header size! "
71 "got %d, expected %d)\n",
72 (int)p->in_data.pdu.length,
73 RPC_HEADER_LEN));
74 set_incoming_fault(p);
75 return false;
78 frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
80 /* verify it is a reasonable value */
81 if ((frag_len < RPC_HEADER_LEN) ||
82 (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
83 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
84 frag_len));
85 set_incoming_fault(p);
86 return false;
89 p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
91 /* allocate the space needed to fill the pdu */
92 p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
93 uint8_t, frag_len);
94 if (p->in_data.pdu.data == NULL) {
95 DEBUG(0, ("talloc_realloc failed\n"));
96 set_incoming_fault(p);
97 return false;
100 return true;
103 /****************************************************************************
104 Call this to free any talloc'ed memory. Do this after processing
105 a complete incoming and outgoing request (multiple incoming/outgoing
106 PDU's).
107 ****************************************************************************/
109 static void free_pipe_context(struct pipes_struct *p)
111 data_blob_free(&p->out_data.frag);
112 data_blob_free(&p->out_data.rdata);
113 data_blob_free(&p->in_data.data);
115 DEBUG(3, ("free_pipe_context: "
116 "destroying talloc pool of size %lu\n",
117 (unsigned long)talloc_total_size(p->mem_ctx)));
118 talloc_free_children(p->mem_ctx);
121 /****************************************************************************
122 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
123 ****************************************************************************/
125 static ssize_t process_incoming_data(struct pipes_struct *p, char *data, size_t n)
127 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
128 - p->in_data.pdu.length);
130 DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
131 "pdu_needed_len = %u, incoming data = %u\n",
132 (unsigned int)p->in_data.pdu.length,
133 (unsigned int)p->in_data.pdu_needed_len,
134 (unsigned int)n ));
136 if(data_to_copy == 0) {
138 * This is an error - data is being received and there is no
139 * space in the PDU. Free the received data and go into the
140 * fault state.
142 DEBUG(0, ("process_incoming_data: "
143 "No space in incoming pdu buffer. "
144 "Current size = %u incoming data size = %u\n",
145 (unsigned int)p->in_data.pdu.length,
146 (unsigned int)n));
147 set_incoming_fault(p);
148 return -1;
152 * If we have no data already, wait until we get at least
153 * a RPC_HEADER_LEN * number of bytes before we can do anything.
156 if ((p->in_data.pdu_needed_len == 0) &&
157 (p->in_data.pdu.length < RPC_HEADER_LEN)) {
159 * Always return here. If we have more data then the RPC_HEADER
160 * will be processed the next time around the loop.
162 return fill_rpc_header(p, data, data_to_copy);
166 * At this point we know we have at least an RPC_HEADER_LEN amount of
167 * data stored in p->in_data.pdu.
171 * If pdu_needed_len is zero this is a new pdu.
172 * Check how much more data we need, then loop again.
174 if (p->in_data.pdu_needed_len == 0) {
176 bool ok = get_pdu_size(p);
177 if (!ok) {
178 return -1;
180 if (p->in_data.pdu_needed_len > 0) {
181 return 0;
184 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
185 * that consists of an RPC_HEADER only. This is a
186 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
187 * DCERPC_PKT_ORPHANED pdu type.
188 * Deal with this in process_complete_pdu(). */
192 * Ok - at this point we have a valid RPC_HEADER.
193 * Keep reading until we have a full pdu.
196 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
199 * Copy as much of the data as we need into the p->in_data.pdu buffer.
200 * pdu_needed_len becomes zero when we have a complete pdu.
203 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
204 data, data_to_copy);
205 p->in_data.pdu.length += data_to_copy;
206 p->in_data.pdu_needed_len -= data_to_copy;
209 * Do we have a complete PDU ?
210 * (return the number of bytes handled in the call)
213 if(p->in_data.pdu_needed_len == 0) {
214 process_complete_pdu(p);
215 return data_to_copy;
218 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
219 "pdu.length = %u, pdu_needed_len = %u\n",
220 (unsigned int)p->in_data.pdu.length,
221 (unsigned int)p->in_data.pdu_needed_len));
223 return (ssize_t)data_to_copy;
226 /****************************************************************************
227 Accepts incoming data on an internal rpc pipe.
228 ****************************************************************************/
230 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
232 size_t data_left = n;
234 while(data_left) {
235 ssize_t data_used;
237 DEBUG(10, ("write_to_pipe: data_left = %u\n",
238 (unsigned int)data_left));
240 data_used = process_incoming_data(p, data, data_left);
242 DEBUG(10, ("write_to_pipe: data_used = %d\n",
243 (int)data_used));
245 if(data_used < 0) {
246 return -1;
249 data_left -= data_used;
250 data += data_used;
253 return n;
256 /****************************************************************************
257 Replies to a request to read data from a pipe.
259 Headers are interspersed with the data at PDU intervals. By the time
260 this function is called, the start of the data could possibly have been
261 read by an SMBtrans (file_offset != 0).
263 Calling create_rpc_reply() here is a hack. The data should already
264 have been prepared into arrays of headers + data stream sections.
265 ****************************************************************************/
267 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
268 size_t n, bool *is_data_outstanding)
270 uint32 pdu_remaining = 0;
271 ssize_t data_returned = 0;
273 if (!p) {
274 DEBUG(0,("read_from_pipe: pipe not open\n"));
275 return -1;
278 DEBUG(6,(" name: %s len: %u\n",
279 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
280 (unsigned int)n));
283 * We cannot return more than one PDU length per
284 * read request.
288 * This condition should result in the connection being closed.
289 * Netapp filers seem to set it to 0xffff which results in domain
290 * authentications failing. Just ignore it so things work.
293 if(n > RPC_MAX_PDU_FRAG_LEN) {
294 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
295 "pipe %s. We can only service %d sized reads.\n",
296 (unsigned int)n,
297 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
298 RPC_MAX_PDU_FRAG_LEN ));
299 n = RPC_MAX_PDU_FRAG_LEN;
303 * Determine if there is still data to send in the
304 * pipe PDU buffer. Always send this first. Never
305 * send more than is left in the current PDU. The
306 * client should send a new read request for a new
307 * PDU.
310 pdu_remaining = p->out_data.frag.length
311 - p->out_data.current_pdu_sent;
313 if (pdu_remaining > 0) {
314 data_returned = (ssize_t)MIN(n, pdu_remaining);
316 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
317 "current_pdu_sent = %u returning %d bytes.\n",
318 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
319 (unsigned int)p->out_data.frag.length,
320 (unsigned int)p->out_data.current_pdu_sent,
321 (int)data_returned));
323 memcpy(data,
324 p->out_data.frag.data
325 + p->out_data.current_pdu_sent,
326 data_returned);
328 p->out_data.current_pdu_sent += (uint32)data_returned;
329 goto out;
333 * At this point p->current_pdu_len == p->current_pdu_sent (which
334 * may of course be zero if this is the first return fragment.
337 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
338 "= %u, p->out_data.rdata.length = %u.\n",
339 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
340 (int)p->fault_state,
341 (unsigned int)p->out_data.data_sent_length,
342 (unsigned int)p->out_data.rdata.length));
344 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
346 * We have sent all possible data, return 0.
348 data_returned = 0;
349 goto out;
353 * We need to create a new PDU from the data left in p->rdata.
354 * Create the header/data/footers. This also sets up the fields
355 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
356 * and stores the outgoing PDU in p->current_pdu.
359 if(!create_next_pdu(p)) {
360 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
361 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
362 return -1;
365 data_returned = MIN(n, p->out_data.frag.length);
367 memcpy(data, p->out_data.frag.data, (size_t)data_returned);
368 p->out_data.current_pdu_sent += (uint32)data_returned;
370 out:
371 (*is_data_outstanding) = p->out_data.frag.length > n;
373 if (p->out_data.current_pdu_sent == p->out_data.frag.length) {
374 /* We've returned everything in the out_data.frag
375 * so we're done with this pdu. Free it and reset
376 * current_pdu_sent. */
377 p->out_data.current_pdu_sent = 0;
378 data_blob_free(&p->out_data.frag);
380 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
382 * We're completely finished with both outgoing and
383 * incoming data streams. It's safe to free all
384 * temporary data from this request.
386 free_pipe_context(p);
390 return data_returned;
393 bool fsp_is_np(struct files_struct *fsp)
395 enum FAKE_FILE_TYPE type;
397 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
398 return false;
401 type = fsp->fake_file_handle->type;
403 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
404 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
407 struct np_proxy_state {
408 uint16_t file_type;
409 uint16_t device_state;
410 uint64_t allocation_size;
411 struct tstream_context *npipe;
412 struct tevent_queue *read_queue;
413 struct tevent_queue *write_queue;
416 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
417 const char *pipe_name,
418 const struct tsocket_address *local_address,
419 const struct tsocket_address *remote_address,
420 struct auth_serversupplied_info *server_info)
422 struct np_proxy_state *result;
423 char *socket_np_dir;
424 const char *socket_dir;
425 struct tevent_context *ev;
426 struct tevent_req *subreq;
427 struct netr_SamInfo3 *info3;
428 NTSTATUS status;
429 bool ok;
430 int ret;
431 int sys_errno;
433 result = talloc(mem_ctx, struct np_proxy_state);
434 if (result == NULL) {
435 DEBUG(0, ("talloc failed\n"));
436 return NULL;
439 result->read_queue = tevent_queue_create(result, "np_read");
440 if (result->read_queue == NULL) {
441 DEBUG(0, ("tevent_queue_create failed\n"));
442 goto fail;
445 result->write_queue = tevent_queue_create(result, "np_write");
446 if (result->write_queue == NULL) {
447 DEBUG(0, ("tevent_queue_create failed\n"));
448 goto fail;
451 ev = s3_tevent_context_init(talloc_tos());
452 if (ev == NULL) {
453 DEBUG(0, ("s3_tevent_context_init failed\n"));
454 goto fail;
457 socket_dir = lp_parm_const_string(
458 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
459 get_dyn_NCALRPCDIR());
460 if (socket_dir == NULL) {
461 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
462 goto fail;
464 socket_np_dir = talloc_asprintf(talloc_tos(), "%s/np", socket_dir);
465 if (socket_np_dir == NULL) {
466 DEBUG(0, ("talloc_asprintf failed\n"));
467 goto fail;
470 info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
471 if (info3 == NULL) {
472 DEBUG(0, ("talloc failed\n"));
473 goto fail;
476 status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
477 if (!NT_STATUS_IS_OK(status)) {
478 TALLOC_FREE(info3);
479 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
480 nt_errstr(status)));
481 goto fail;
484 become_root();
485 subreq = tstream_npa_connect_send(talloc_tos(), ev,
486 socket_np_dir,
487 pipe_name,
488 remote_address, /* client_addr */
489 NULL, /* client_name */
490 local_address, /* server_addr */
491 NULL, /* server_name */
492 info3,
493 server_info->user_session_key,
494 data_blob_null /* delegated_creds */);
495 if (subreq == NULL) {
496 unbecome_root();
497 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
498 "user %s\\%s failed\n",
499 socket_np_dir, pipe_name, info3->base.domain.string,
500 info3->base.account_name.string));
501 goto fail;
503 ok = tevent_req_poll(subreq, ev);
504 unbecome_root();
505 if (!ok) {
506 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
507 "failed for tstream_npa_connect: %s\n",
508 socket_np_dir, pipe_name, info3->base.domain.string,
509 info3->base.account_name.string,
510 strerror(errno)));
511 goto fail;
514 ret = tstream_npa_connect_recv(subreq, &sys_errno,
515 result,
516 &result->npipe,
517 &result->file_type,
518 &result->device_state,
519 &result->allocation_size);
520 TALLOC_FREE(subreq);
521 if (ret != 0) {
522 DEBUG(0, ("tstream_npa_connect_recv to %s for pipe %s and "
523 "user %s\\%s failed: %s\n",
524 socket_np_dir, pipe_name, info3->base.domain.string,
525 info3->base.account_name.string,
526 strerror(sys_errno)));
527 goto fail;
530 return result;
532 fail:
533 TALLOC_FREE(result);
534 return NULL;
537 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
538 const struct tsocket_address *local_address,
539 const struct tsocket_address *remote_address,
540 struct auth_serversupplied_info *server_info,
541 struct messaging_context *msg_ctx,
542 struct fake_file_handle **phandle)
544 const char **proxy_list;
545 struct fake_file_handle *handle;
547 proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
549 handle = talloc(mem_ctx, struct fake_file_handle);
550 if (handle == NULL) {
551 return NT_STATUS_NO_MEMORY;
554 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
555 struct np_proxy_state *p;
557 p = make_external_rpc_pipe_p(handle, name,
558 local_address,
559 remote_address,
560 server_info);
562 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
563 handle->private_data = p;
564 } else {
565 struct pipes_struct *p;
566 struct ndr_syntax_id syntax;
567 const char *client_address;
569 if (!is_known_pipename(name, &syntax)) {
570 TALLOC_FREE(handle);
571 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
574 if (tsocket_address_is_inet(remote_address, "ip")) {
575 client_address = tsocket_address_inet_addr_string(
576 remote_address,
577 talloc_tos());
578 if (client_address == NULL) {
579 TALLOC_FREE(handle);
580 return NT_STATUS_NO_MEMORY;
582 } else {
583 client_address = "";
586 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
587 server_info, msg_ctx);
589 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
590 handle->private_data = p;
593 if (handle->private_data == NULL) {
594 TALLOC_FREE(handle);
595 return NT_STATUS_PIPE_NOT_AVAILABLE;
598 *phandle = handle;
600 return NT_STATUS_OK;
603 bool np_read_in_progress(struct fake_file_handle *handle)
605 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
606 return false;
609 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
610 struct np_proxy_state *p = talloc_get_type_abort(
611 handle->private_data, struct np_proxy_state);
612 size_t read_count;
614 read_count = tevent_queue_length(p->read_queue);
615 if (read_count > 0) {
616 return true;
619 return false;
622 return false;
625 struct np_write_state {
626 struct event_context *ev;
627 struct np_proxy_state *p;
628 struct iovec iov;
629 ssize_t nwritten;
632 static void np_write_done(struct tevent_req *subreq);
634 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
635 struct fake_file_handle *handle,
636 const uint8_t *data, size_t len)
638 struct tevent_req *req;
639 struct np_write_state *state;
640 NTSTATUS status;
642 DEBUG(6, ("np_write_send: len: %d\n", (int)len));
643 dump_data(50, data, len);
645 req = tevent_req_create(mem_ctx, &state, struct np_write_state);
646 if (req == NULL) {
647 return NULL;
650 if (len == 0) {
651 state->nwritten = 0;
652 status = NT_STATUS_OK;
653 goto post_status;
656 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
657 struct pipes_struct *p = talloc_get_type_abort(
658 handle->private_data, struct pipes_struct);
660 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
662 status = (state->nwritten >= 0)
663 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
664 goto post_status;
667 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
668 struct np_proxy_state *p = talloc_get_type_abort(
669 handle->private_data, struct np_proxy_state);
670 struct tevent_req *subreq;
672 state->ev = ev;
673 state->p = p;
674 state->iov.iov_base = CONST_DISCARD(void *, data);
675 state->iov.iov_len = len;
677 subreq = tstream_writev_queue_send(state, ev,
678 p->npipe,
679 p->write_queue,
680 &state->iov, 1);
681 if (subreq == NULL) {
682 goto fail;
684 tevent_req_set_callback(subreq, np_write_done, req);
685 return req;
688 status = NT_STATUS_INVALID_HANDLE;
689 post_status:
690 if (NT_STATUS_IS_OK(status)) {
691 tevent_req_done(req);
692 } else {
693 tevent_req_nterror(req, status);
695 return tevent_req_post(req, ev);
696 fail:
697 TALLOC_FREE(req);
698 return NULL;
701 static void np_write_done(struct tevent_req *subreq)
703 struct tevent_req *req = tevent_req_callback_data(
704 subreq, struct tevent_req);
705 struct np_write_state *state = tevent_req_data(
706 req, struct np_write_state);
707 ssize_t received;
708 int err;
710 received = tstream_writev_queue_recv(subreq, &err);
711 if (received < 0) {
712 tevent_req_nterror(req, map_nt_error_from_unix(err));
713 return;
715 state->nwritten = received;
716 tevent_req_done(req);
719 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
721 struct np_write_state *state = tevent_req_data(
722 req, struct np_write_state);
723 NTSTATUS status;
725 if (tevent_req_is_nterror(req, &status)) {
726 return status;
728 *pnwritten = state->nwritten;
729 return NT_STATUS_OK;
732 struct np_ipc_readv_next_vector_state {
733 uint8_t *buf;
734 size_t len;
735 off_t ofs;
736 size_t remaining;
739 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
740 uint8_t *buf, size_t len)
742 ZERO_STRUCTP(s);
744 s->buf = buf;
745 s->len = MIN(len, UINT16_MAX);
748 static int np_ipc_readv_next_vector(struct tstream_context *stream,
749 void *private_data,
750 TALLOC_CTX *mem_ctx,
751 struct iovec **_vector,
752 size_t *count)
754 struct np_ipc_readv_next_vector_state *state =
755 (struct np_ipc_readv_next_vector_state *)private_data;
756 struct iovec *vector;
757 ssize_t pending;
758 size_t wanted;
760 if (state->ofs == state->len) {
761 *_vector = NULL;
762 *count = 0;
763 return 0;
766 pending = tstream_pending_bytes(stream);
767 if (pending == -1) {
768 return -1;
771 if (pending == 0 && state->ofs != 0) {
772 /* return a short read */
773 *_vector = NULL;
774 *count = 0;
775 return 0;
778 if (pending == 0) {
779 /* we want at least one byte and recheck again */
780 wanted = 1;
781 } else {
782 size_t missing = state->len - state->ofs;
783 if (pending > missing) {
784 /* there's more available */
785 state->remaining = pending - missing;
786 wanted = missing;
787 } else {
788 /* read what we can get and recheck in the next cycle */
789 wanted = pending;
793 vector = talloc_array(mem_ctx, struct iovec, 1);
794 if (!vector) {
795 return -1;
798 vector[0].iov_base = state->buf + state->ofs;
799 vector[0].iov_len = wanted;
801 state->ofs += wanted;
803 *_vector = vector;
804 *count = 1;
805 return 0;
808 struct np_read_state {
809 struct np_proxy_state *p;
810 struct np_ipc_readv_next_vector_state next_vector;
812 size_t nread;
813 bool is_data_outstanding;
816 static void np_read_done(struct tevent_req *subreq);
818 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
819 struct fake_file_handle *handle,
820 uint8_t *data, size_t len)
822 struct tevent_req *req;
823 struct np_read_state *state;
824 NTSTATUS status;
826 req = tevent_req_create(mem_ctx, &state, struct np_read_state);
827 if (req == NULL) {
828 return NULL;
831 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
832 struct pipes_struct *p = talloc_get_type_abort(
833 handle->private_data, struct pipes_struct);
835 state->nread = read_from_internal_pipe(
836 p, (char *)data, len, &state->is_data_outstanding);
838 status = (state->nread >= 0)
839 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
840 goto post_status;
843 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
844 struct np_proxy_state *p = talloc_get_type_abort(
845 handle->private_data, struct np_proxy_state);
846 struct tevent_req *subreq;
848 np_ipc_readv_next_vector_init(&state->next_vector,
849 data, len);
851 subreq = tstream_readv_pdu_queue_send(state,
853 p->npipe,
854 p->read_queue,
855 np_ipc_readv_next_vector,
856 &state->next_vector);
857 if (subreq == NULL) {
860 tevent_req_set_callback(subreq, np_read_done, req);
861 return req;
864 status = NT_STATUS_INVALID_HANDLE;
865 post_status:
866 if (NT_STATUS_IS_OK(status)) {
867 tevent_req_done(req);
868 } else {
869 tevent_req_nterror(req, status);
871 return tevent_req_post(req, ev);
874 static void np_read_done(struct tevent_req *subreq)
876 struct tevent_req *req = tevent_req_callback_data(
877 subreq, struct tevent_req);
878 struct np_read_state *state = tevent_req_data(
879 req, struct np_read_state);
880 ssize_t ret;
881 int err;
883 ret = tstream_readv_pdu_queue_recv(subreq, &err);
884 TALLOC_FREE(subreq);
885 if (ret == -1) {
886 tevent_req_nterror(req, map_nt_error_from_unix(err));
887 return;
890 state->nread = ret;
891 state->is_data_outstanding = (state->next_vector.remaining > 0);
893 tevent_req_done(req);
894 return;
897 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
898 bool *is_data_outstanding)
900 struct np_read_state *state = tevent_req_data(
901 req, struct np_read_state);
902 NTSTATUS status;
904 if (tevent_req_is_nterror(req, &status)) {
905 return status;
907 *nread = state->nread;
908 *is_data_outstanding = state->is_data_outstanding;
909 return NT_STATUS_OK;
913 * @brief Create a new RPC client context which uses a local dispatch function.
915 * @param[in] conn The connection struct that will hold the pipe
917 * @param[out] spoolss_pipe A pointer to the connected rpc client pipe.
919 * @return NT_STATUS_OK on success, a corresponding NT status if an
920 * error occured.
922 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
923 struct rpc_pipe_client **spoolss_pipe)
925 NTSTATUS status;
927 /* TODO: check and handle disconnections */
929 if (!conn->spoolss_pipe) {
930 status = rpc_pipe_open_internal(conn,
931 &ndr_table_spoolss.syntax_id,
932 conn->server_info,
933 conn->sconn->msg_ctx,
934 &conn->spoolss_pipe);
935 if (!NT_STATUS_IS_OK(status)) {
936 return status;
940 *spoolss_pipe = conn->spoolss_pipe;
941 return NT_STATUS_OK;