2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26 #include "rpc_server.h"
27 #include "smbd/globals.h"
28 #include "fake_file.h"
32 #define DBGC_CLASS DBGC_RPC_SRV
34 /****************************************************************************
35 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
36 ****************************************************************************/
38 static ssize_t
fill_rpc_header(struct pipes_struct
*p
, char *data
, size_t data_to_copy
)
40 size_t len_needed_to_complete_hdr
=
41 MIN(data_to_copy
, RPC_HEADER_LEN
- p
->in_data
.pdu
.length
);
43 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
44 "len_needed_to_complete_hdr = %u, "
46 (unsigned int)data_to_copy
,
47 (unsigned int)len_needed_to_complete_hdr
,
48 (unsigned int)p
->in_data
.pdu
.length
));
50 if (p
->in_data
.pdu
.data
== NULL
) {
51 p
->in_data
.pdu
.data
= talloc_array(p
, uint8_t, RPC_HEADER_LEN
);
53 if (p
->in_data
.pdu
.data
== NULL
) {
54 DEBUG(0, ("talloc failed\n"));
58 memcpy((char *)&p
->in_data
.pdu
.data
[p
->in_data
.pdu
.length
],
59 data
, len_needed_to_complete_hdr
);
60 p
->in_data
.pdu
.length
+= len_needed_to_complete_hdr
;
62 return (ssize_t
)len_needed_to_complete_hdr
;
65 static bool get_pdu_size(struct pipes_struct
*p
)
68 /* the fill_rpc_header() call insures we copy only
69 * RPC_HEADER_LEN bytes. If this doesn't match then
70 * somethign is very wrong and we can only abort */
71 if (p
->in_data
.pdu
.length
!= RPC_HEADER_LEN
) {
72 DEBUG(0, ("Unexpected RPC Header size! "
73 "got %d, expected %d)\n",
74 (int)p
->in_data
.pdu
.length
,
76 set_incoming_fault(p
);
80 frag_len
= dcerpc_get_frag_length(&p
->in_data
.pdu
);
82 /* verify it is a reasonable value */
83 if ((frag_len
< RPC_HEADER_LEN
) ||
84 (frag_len
> RPC_MAX_PDU_FRAG_LEN
)) {
85 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
87 set_incoming_fault(p
);
91 p
->in_data
.pdu_needed_len
= frag_len
- RPC_HEADER_LEN
;
93 /* allocate the space needed to fill the pdu */
94 p
->in_data
.pdu
.data
= talloc_realloc(p
, p
->in_data
.pdu
.data
,
96 if (p
->in_data
.pdu
.data
== NULL
) {
97 DEBUG(0, ("talloc_realloc failed\n"));
98 set_incoming_fault(p
);
105 /****************************************************************************
106 Call this to free any talloc'ed memory. Do this after processing
107 a complete incoming and outgoing request (multiple incoming/outgoing
109 ****************************************************************************/
111 static void free_pipe_context(struct pipes_struct
*p
)
113 data_blob_free(&p
->out_data
.frag
);
114 data_blob_free(&p
->out_data
.rdata
);
115 data_blob_free(&p
->in_data
.data
);
117 DEBUG(3, ("free_pipe_context: "
118 "destroying talloc pool of size %lu\n",
119 (unsigned long)talloc_total_size(p
->mem_ctx
)));
120 talloc_free_children(p
->mem_ctx
);
123 /****************************************************************************
124 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
125 ****************************************************************************/
127 static ssize_t
process_incoming_data(struct pipes_struct
*p
, char *data
, size_t n
)
129 size_t data_to_copy
= MIN(n
, RPC_MAX_PDU_FRAG_LEN
130 - p
->in_data
.pdu
.length
);
132 DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
133 "pdu_needed_len = %u, incoming data = %u\n",
134 (unsigned int)p
->in_data
.pdu
.length
,
135 (unsigned int)p
->in_data
.pdu_needed_len
,
138 if(data_to_copy
== 0) {
140 * This is an error - data is being received and there is no
141 * space in the PDU. Free the received data and go into the
144 DEBUG(0, ("process_incoming_data: "
145 "No space in incoming pdu buffer. "
146 "Current size = %u incoming data size = %u\n",
147 (unsigned int)p
->in_data
.pdu
.length
,
149 set_incoming_fault(p
);
154 * If we have no data already, wait until we get at least
155 * a RPC_HEADER_LEN * number of bytes before we can do anything.
158 if ((p
->in_data
.pdu_needed_len
== 0) &&
159 (p
->in_data
.pdu
.length
< RPC_HEADER_LEN
)) {
161 * Always return here. If we have more data then the RPC_HEADER
162 * will be processed the next time around the loop.
164 return fill_rpc_header(p
, data
, data_to_copy
);
168 * At this point we know we have at least an RPC_HEADER_LEN amount of
169 * data stored in p->in_data.pdu.
173 * If pdu_needed_len is zero this is a new pdu.
174 * Check how much more data we need, then loop again.
176 if (p
->in_data
.pdu_needed_len
== 0) {
178 bool ok
= get_pdu_size(p
);
182 if (p
->in_data
.pdu_needed_len
> 0) {
186 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
187 * that consists of an RPC_HEADER only. This is a
188 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
189 * DCERPC_PKT_ORPHANED pdu type.
190 * Deal with this in process_complete_pdu(). */
194 * Ok - at this point we have a valid RPC_HEADER.
195 * Keep reading until we have a full pdu.
198 data_to_copy
= MIN(data_to_copy
, p
->in_data
.pdu_needed_len
);
201 * Copy as much of the data as we need into the p->in_data.pdu buffer.
202 * pdu_needed_len becomes zero when we have a complete pdu.
205 memcpy((char *)&p
->in_data
.pdu
.data
[p
->in_data
.pdu
.length
],
207 p
->in_data
.pdu
.length
+= data_to_copy
;
208 p
->in_data
.pdu_needed_len
-= data_to_copy
;
211 * Do we have a complete PDU ?
212 * (return the number of bytes handled in the call)
215 if(p
->in_data
.pdu_needed_len
== 0) {
216 process_complete_pdu(p
);
220 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
221 "pdu.length = %u, pdu_needed_len = %u\n",
222 (unsigned int)p
->in_data
.pdu
.length
,
223 (unsigned int)p
->in_data
.pdu_needed_len
));
225 return (ssize_t
)data_to_copy
;
228 /****************************************************************************
229 Accepts incoming data on an internal rpc pipe.
230 ****************************************************************************/
232 static ssize_t
write_to_internal_pipe(struct pipes_struct
*p
, char *data
, size_t n
)
234 size_t data_left
= n
;
239 DEBUG(10, ("write_to_pipe: data_left = %u\n",
240 (unsigned int)data_left
));
242 data_used
= process_incoming_data(p
, data
, data_left
);
244 DEBUG(10, ("write_to_pipe: data_used = %d\n",
251 data_left
-= data_used
;
258 /****************************************************************************
259 Replies to a request to read data from a pipe.
261 Headers are interspersed with the data at PDU intervals. By the time
262 this function is called, the start of the data could possibly have been
263 read by an SMBtrans (file_offset != 0).
265 Calling create_rpc_reply() here is a hack. The data should already
266 have been prepared into arrays of headers + data stream sections.
267 ****************************************************************************/
269 static ssize_t
read_from_internal_pipe(struct pipes_struct
*p
, char *data
,
270 size_t n
, bool *is_data_outstanding
)
272 uint32 pdu_remaining
= 0;
273 ssize_t data_returned
= 0;
276 DEBUG(0,("read_from_pipe: pipe not open\n"));
280 DEBUG(6,(" name: %s len: %u\n",
281 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
),
285 * We cannot return more than one PDU length per
290 * This condition should result in the connection being closed.
291 * Netapp filers seem to set it to 0xffff which results in domain
292 * authentications failing. Just ignore it so things work.
295 if(n
> RPC_MAX_PDU_FRAG_LEN
) {
296 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
297 "pipe %s. We can only service %d sized reads.\n",
299 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
),
300 RPC_MAX_PDU_FRAG_LEN
));
301 n
= RPC_MAX_PDU_FRAG_LEN
;
305 * Determine if there is still data to send in the
306 * pipe PDU buffer. Always send this first. Never
307 * send more than is left in the current PDU. The
308 * client should send a new read request for a new
312 pdu_remaining
= p
->out_data
.frag
.length
313 - p
->out_data
.current_pdu_sent
;
315 if (pdu_remaining
> 0) {
316 data_returned
= (ssize_t
)MIN(n
, pdu_remaining
);
318 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
319 "current_pdu_sent = %u returning %d bytes.\n",
320 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
),
321 (unsigned int)p
->out_data
.frag
.length
,
322 (unsigned int)p
->out_data
.current_pdu_sent
,
323 (int)data_returned
));
326 p
->out_data
.frag
.data
327 + p
->out_data
.current_pdu_sent
,
330 p
->out_data
.current_pdu_sent
+= (uint32
)data_returned
;
335 * At this point p->current_pdu_len == p->current_pdu_sent (which
336 * may of course be zero if this is the first return fragment.
339 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
340 "= %u, p->out_data.rdata.length = %u.\n",
341 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
),
343 (unsigned int)p
->out_data
.data_sent_length
,
344 (unsigned int)p
->out_data
.rdata
.length
));
346 if (p
->out_data
.data_sent_length
>= p
->out_data
.rdata
.length
) {
348 * We have sent all possible data, return 0.
355 * We need to create a new PDU from the data left in p->rdata.
356 * Create the header/data/footers. This also sets up the fields
357 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
358 * and stores the outgoing PDU in p->current_pdu.
361 if(!create_next_pdu(p
)) {
362 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
363 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
)));
367 data_returned
= MIN(n
, p
->out_data
.frag
.length
);
369 memcpy(data
, p
->out_data
.frag
.data
, (size_t)data_returned
);
370 p
->out_data
.current_pdu_sent
+= (uint32
)data_returned
;
373 (*is_data_outstanding
) = p
->out_data
.frag
.length
> n
;
375 if (p
->out_data
.current_pdu_sent
== p
->out_data
.frag
.length
) {
376 /* We've returned everything in the out_data.frag
377 * so we're done with this pdu. Free it and reset
378 * current_pdu_sent. */
379 p
->out_data
.current_pdu_sent
= 0;
380 data_blob_free(&p
->out_data
.frag
);
382 if (p
->out_data
.data_sent_length
>= p
->out_data
.rdata
.length
) {
384 * We're completely finished with both outgoing and
385 * incoming data streams. It's safe to free all
386 * temporary data from this request.
388 free_pipe_context(p
);
392 return data_returned
;
395 bool fsp_is_np(struct files_struct
*fsp
)
397 enum FAKE_FILE_TYPE type
;
399 if ((fsp
== NULL
) || (fsp
->fake_file_handle
== NULL
)) {
403 type
= fsp
->fake_file_handle
->type
;
405 return ((type
== FAKE_FILE_TYPE_NAMED_PIPE
)
406 || (type
== FAKE_FILE_TYPE_NAMED_PIPE_PROXY
));
409 struct np_proxy_state
{
411 uint16_t device_state
;
412 uint64_t allocation_size
;
413 struct tstream_context
*npipe
;
414 struct tevent_queue
*read_queue
;
415 struct tevent_queue
*write_queue
;
418 static struct np_proxy_state
*make_external_rpc_pipe_p(TALLOC_CTX
*mem_ctx
,
419 const char *pipe_name
,
420 const struct tsocket_address
*local_address
,
421 const struct tsocket_address
*remote_address
,
422 struct auth_serversupplied_info
*server_info
)
424 struct np_proxy_state
*result
;
426 const char *socket_dir
;
427 struct tevent_context
*ev
;
428 struct tevent_req
*subreq
;
429 struct netr_SamInfo3
*info3
;
435 result
= talloc(mem_ctx
, struct np_proxy_state
);
436 if (result
== NULL
) {
437 DEBUG(0, ("talloc failed\n"));
441 result
->read_queue
= tevent_queue_create(result
, "np_read");
442 if (result
->read_queue
== NULL
) {
443 DEBUG(0, ("tevent_queue_create failed\n"));
447 result
->write_queue
= tevent_queue_create(result
, "np_write");
448 if (result
->write_queue
== NULL
) {
449 DEBUG(0, ("tevent_queue_create failed\n"));
453 ev
= s3_tevent_context_init(talloc_tos());
455 DEBUG(0, ("s3_tevent_context_init failed\n"));
459 socket_dir
= lp_parm_const_string(
460 GLOBAL_SECTION_SNUM
, "external_rpc_pipe", "socket_dir",
461 get_dyn_NCALRPCDIR());
462 if (socket_dir
== NULL
) {
463 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
466 socket_np_dir
= talloc_asprintf(talloc_tos(), "%s/np", socket_dir
);
467 if (socket_np_dir
== NULL
) {
468 DEBUG(0, ("talloc_asprintf failed\n"));
472 info3
= talloc_zero(talloc_tos(), struct netr_SamInfo3
);
474 DEBUG(0, ("talloc failed\n"));
478 status
= serverinfo_to_SamInfo3(server_info
, NULL
, 0, info3
);
479 if (!NT_STATUS_IS_OK(status
)) {
481 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
487 subreq
= tstream_npa_connect_send(talloc_tos(), ev
,
490 remote_address
, /* client_addr */
491 NULL
, /* client_name */
492 local_address
, /* server_addr */
493 NULL
, /* server_name */
495 server_info
->user_session_key
,
496 data_blob_null
/* delegated_creds */);
497 if (subreq
== NULL
) {
499 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
500 "user %s\\%s failed\n",
501 socket_np_dir
, pipe_name
, info3
->base
.domain
.string
,
502 info3
->base
.account_name
.string
));
505 ok
= tevent_req_poll(subreq
, ev
);
508 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
509 "failed for tstream_npa_connect: %s\n",
510 socket_np_dir
, pipe_name
, info3
->base
.domain
.string
,
511 info3
->base
.account_name
.string
,
516 ret
= tstream_npa_connect_recv(subreq
, &sys_errno
,
520 &result
->device_state
,
521 &result
->allocation_size
);
524 DEBUG(0, ("tstream_npa_connect_recv to %s for pipe %s and "
525 "user %s\\%s failed: %s\n",
526 socket_np_dir
, pipe_name
, info3
->base
.domain
.string
,
527 info3
->base
.account_name
.string
,
528 strerror(sys_errno
)));
539 NTSTATUS
np_open(TALLOC_CTX
*mem_ctx
, const char *name
,
540 const struct tsocket_address
*local_address
,
541 const struct tsocket_address
*remote_address
,
542 struct client_address
*client_id
,
543 struct auth_serversupplied_info
*server_info
,
544 struct messaging_context
*msg_ctx
,
545 struct fake_file_handle
**phandle
)
547 const char **proxy_list
;
548 struct fake_file_handle
*handle
;
550 proxy_list
= lp_parm_string_list(-1, "np", "proxy", NULL
);
552 handle
= talloc(mem_ctx
, struct fake_file_handle
);
553 if (handle
== NULL
) {
554 return NT_STATUS_NO_MEMORY
;
557 if ((proxy_list
!= NULL
) && str_list_check_ci(proxy_list
, name
)) {
558 struct np_proxy_state
*p
;
560 p
= make_external_rpc_pipe_p(handle
, name
,
565 handle
->type
= FAKE_FILE_TYPE_NAMED_PIPE_PROXY
;
566 handle
->private_data
= p
;
568 struct pipes_struct
*p
;
569 struct ndr_syntax_id syntax
;
571 if (!is_known_pipename(name
, &syntax
)) {
573 return NT_STATUS_OBJECT_NAME_NOT_FOUND
;
576 p
= make_internal_rpc_pipe_p(handle
, &syntax
, client_id
,
577 server_info
, msg_ctx
);
579 handle
->type
= FAKE_FILE_TYPE_NAMED_PIPE
;
580 handle
->private_data
= p
;
583 if (handle
->private_data
== NULL
) {
585 return NT_STATUS_PIPE_NOT_AVAILABLE
;
593 bool np_read_in_progress(struct fake_file_handle
*handle
)
595 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE
) {
599 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE_PROXY
) {
600 struct np_proxy_state
*p
= talloc_get_type_abort(
601 handle
->private_data
, struct np_proxy_state
);
604 read_count
= tevent_queue_length(p
->read_queue
);
605 if (read_count
> 0) {
615 struct np_write_state
{
616 struct event_context
*ev
;
617 struct np_proxy_state
*p
;
622 static void np_write_done(struct tevent_req
*subreq
);
624 struct tevent_req
*np_write_send(TALLOC_CTX
*mem_ctx
, struct event_context
*ev
,
625 struct fake_file_handle
*handle
,
626 const uint8_t *data
, size_t len
)
628 struct tevent_req
*req
;
629 struct np_write_state
*state
;
632 DEBUG(6, ("np_write_send: len: %d\n", (int)len
));
633 dump_data(50, data
, len
);
635 req
= tevent_req_create(mem_ctx
, &state
, struct np_write_state
);
642 status
= NT_STATUS_OK
;
646 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE
) {
647 struct pipes_struct
*p
= talloc_get_type_abort(
648 handle
->private_data
, struct pipes_struct
);
650 state
->nwritten
= write_to_internal_pipe(p
, (char *)data
, len
);
652 status
= (state
->nwritten
>= 0)
653 ? NT_STATUS_OK
: NT_STATUS_UNEXPECTED_IO_ERROR
;
657 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE_PROXY
) {
658 struct np_proxy_state
*p
= talloc_get_type_abort(
659 handle
->private_data
, struct np_proxy_state
);
660 struct tevent_req
*subreq
;
664 state
->iov
.iov_base
= CONST_DISCARD(void *, data
);
665 state
->iov
.iov_len
= len
;
667 subreq
= tstream_writev_queue_send(state
, ev
,
671 if (subreq
== NULL
) {
674 tevent_req_set_callback(subreq
, np_write_done
, req
);
678 status
= NT_STATUS_INVALID_HANDLE
;
680 if (NT_STATUS_IS_OK(status
)) {
681 tevent_req_done(req
);
683 tevent_req_nterror(req
, status
);
685 return tevent_req_post(req
, ev
);
691 static void np_write_done(struct tevent_req
*subreq
)
693 struct tevent_req
*req
= tevent_req_callback_data(
694 subreq
, struct tevent_req
);
695 struct np_write_state
*state
= tevent_req_data(
696 req
, struct np_write_state
);
700 received
= tstream_writev_queue_recv(subreq
, &err
);
702 tevent_req_nterror(req
, map_nt_error_from_unix(err
));
705 state
->nwritten
= received
;
706 tevent_req_done(req
);
709 NTSTATUS
np_write_recv(struct tevent_req
*req
, ssize_t
*pnwritten
)
711 struct np_write_state
*state
= tevent_req_data(
712 req
, struct np_write_state
);
715 if (tevent_req_is_nterror(req
, &status
)) {
718 *pnwritten
= state
->nwritten
;
722 struct np_ipc_readv_next_vector_state
{
729 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state
*s
,
730 uint8_t *buf
, size_t len
)
735 s
->len
= MIN(len
, UINT16_MAX
);
738 static int np_ipc_readv_next_vector(struct tstream_context
*stream
,
741 struct iovec
**_vector
,
744 struct np_ipc_readv_next_vector_state
*state
=
745 (struct np_ipc_readv_next_vector_state
*)private_data
;
746 struct iovec
*vector
;
750 if (state
->ofs
== state
->len
) {
756 pending
= tstream_pending_bytes(stream
);
761 if (pending
== 0 && state
->ofs
!= 0) {
762 /* return a short read */
769 /* we want at least one byte and recheck again */
772 size_t missing
= state
->len
- state
->ofs
;
773 if (pending
> missing
) {
774 /* there's more available */
775 state
->remaining
= pending
- missing
;
778 /* read what we can get and recheck in the next cycle */
783 vector
= talloc_array(mem_ctx
, struct iovec
, 1);
788 vector
[0].iov_base
= state
->buf
+ state
->ofs
;
789 vector
[0].iov_len
= wanted
;
791 state
->ofs
+= wanted
;
798 struct np_read_state
{
799 struct np_proxy_state
*p
;
800 struct np_ipc_readv_next_vector_state next_vector
;
803 bool is_data_outstanding
;
806 static void np_read_done(struct tevent_req
*subreq
);
808 struct tevent_req
*np_read_send(TALLOC_CTX
*mem_ctx
, struct event_context
*ev
,
809 struct fake_file_handle
*handle
,
810 uint8_t *data
, size_t len
)
812 struct tevent_req
*req
;
813 struct np_read_state
*state
;
816 req
= tevent_req_create(mem_ctx
, &state
, struct np_read_state
);
821 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE
) {
822 struct pipes_struct
*p
= talloc_get_type_abort(
823 handle
->private_data
, struct pipes_struct
);
825 state
->nread
= read_from_internal_pipe(
826 p
, (char *)data
, len
, &state
->is_data_outstanding
);
828 status
= (state
->nread
>= 0)
829 ? NT_STATUS_OK
: NT_STATUS_UNEXPECTED_IO_ERROR
;
833 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE_PROXY
) {
834 struct np_proxy_state
*p
= talloc_get_type_abort(
835 handle
->private_data
, struct np_proxy_state
);
836 struct tevent_req
*subreq
;
838 np_ipc_readv_next_vector_init(&state
->next_vector
,
841 subreq
= tstream_readv_pdu_queue_send(state
,
845 np_ipc_readv_next_vector
,
846 &state
->next_vector
);
847 if (subreq
== NULL
) {
850 tevent_req_set_callback(subreq
, np_read_done
, req
);
854 status
= NT_STATUS_INVALID_HANDLE
;
856 if (NT_STATUS_IS_OK(status
)) {
857 tevent_req_done(req
);
859 tevent_req_nterror(req
, status
);
861 return tevent_req_post(req
, ev
);
864 static void np_read_done(struct tevent_req
*subreq
)
866 struct tevent_req
*req
= tevent_req_callback_data(
867 subreq
, struct tevent_req
);
868 struct np_read_state
*state
= tevent_req_data(
869 req
, struct np_read_state
);
873 ret
= tstream_readv_pdu_queue_recv(subreq
, &err
);
876 tevent_req_nterror(req
, map_nt_error_from_unix(err
));
881 state
->is_data_outstanding
= (state
->next_vector
.remaining
> 0);
883 tevent_req_done(req
);
887 NTSTATUS
np_read_recv(struct tevent_req
*req
, ssize_t
*nread
,
888 bool *is_data_outstanding
)
890 struct np_read_state
*state
= tevent_req_data(
891 req
, struct np_read_state
);
894 if (tevent_req_is_nterror(req
, &status
)) {
897 *nread
= state
->nread
;
898 *is_data_outstanding
= state
->is_data_outstanding
;
903 * @brief Create a new RPC client context which uses a local dispatch function.
905 * @param[in] conn The connection struct that will hold the pipe
907 * @param[out] spoolss_pipe A pointer to the connected rpc client pipe.
909 * @return NT_STATUS_OK on success, a corresponding NT status if an
912 NTSTATUS
rpc_connect_spoolss_pipe(connection_struct
*conn
,
913 struct rpc_pipe_client
**spoolss_pipe
)
917 /* TODO: check and handle disconnections */
919 if (!conn
->spoolss_pipe
) {
920 status
= rpc_pipe_open_internal(conn
,
921 &ndr_table_spoolss
.syntax_id
,
923 &conn
->sconn
->client_id
,
924 conn
->sconn
->msg_ctx
,
925 &conn
->spoolss_pipe
);
926 if (!NT_STATUS_IS_OK(status
)) {
931 *spoolss_pipe
= conn
->spoolss_pipe
;