2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26 #include "rpc_server.h"
29 #define DBGC_CLASS DBGC_RPC_SRV
31 /****************************************************************************
32 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
33 ****************************************************************************/
35 static ssize_t
fill_rpc_header(struct pipes_struct
*p
, char *data
, size_t data_to_copy
)
37 size_t len_needed_to_complete_hdr
=
38 MIN(data_to_copy
, RPC_HEADER_LEN
- p
->in_data
.pdu
.length
);
40 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
41 "len_needed_to_complete_hdr = %u, "
43 (unsigned int)data_to_copy
,
44 (unsigned int)len_needed_to_complete_hdr
,
45 (unsigned int)p
->in_data
.pdu
.length
));
47 if (p
->in_data
.pdu
.data
== NULL
) {
48 p
->in_data
.pdu
.data
= talloc_array(p
, uint8_t, RPC_HEADER_LEN
);
50 if (p
->in_data
.pdu
.data
== NULL
) {
51 DEBUG(0, ("talloc failed\n"));
55 memcpy((char *)&p
->in_data
.pdu
.data
[p
->in_data
.pdu
.length
],
56 data
, len_needed_to_complete_hdr
);
57 p
->in_data
.pdu
.length
+= len_needed_to_complete_hdr
;
59 return (ssize_t
)len_needed_to_complete_hdr
;
62 static bool get_pdu_size(struct pipes_struct
*p
)
65 /* the fill_rpc_header() call insures we copy only
66 * RPC_HEADER_LEN bytes. If this doesn't match then
67 * somethign is very wrong and we can only abort */
68 if (p
->in_data
.pdu
.length
!= RPC_HEADER_LEN
) {
69 DEBUG(0, ("Unexpected RPC Header size! "
70 "got %d, expected %d)\n",
71 (int)p
->in_data
.pdu
.length
,
73 set_incoming_fault(p
);
77 frag_len
= dcerpc_get_frag_length(&p
->in_data
.pdu
);
79 /* verify it is a reasonable value */
80 if ((frag_len
< RPC_HEADER_LEN
) ||
81 (frag_len
> RPC_MAX_PDU_FRAG_LEN
)) {
82 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
84 set_incoming_fault(p
);
88 p
->in_data
.pdu_needed_len
= frag_len
- RPC_HEADER_LEN
;
90 /* allocate the space needed to fill the pdu */
91 p
->in_data
.pdu
.data
= talloc_realloc(p
, p
->in_data
.pdu
.data
,
93 if (p
->in_data
.pdu
.data
== NULL
) {
94 DEBUG(0, ("talloc_realloc failed\n"));
95 set_incoming_fault(p
);
102 /****************************************************************************
103 Call this to free any talloc'ed memory. Do this after processing
104 a complete incoming and outgoing request (multiple incoming/outgoing
106 ****************************************************************************/
108 static void free_pipe_context(struct pipes_struct
*p
)
110 data_blob_free(&p
->out_data
.frag
);
111 data_blob_free(&p
->out_data
.rdata
);
112 data_blob_free(&p
->in_data
.data
);
114 DEBUG(3, ("free_pipe_context: "
115 "destroying talloc pool of size %lu\n",
116 (unsigned long)talloc_total_size(p
->mem_ctx
)));
117 talloc_free_children(p
->mem_ctx
);
120 /****************************************************************************
121 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
122 ****************************************************************************/
124 static ssize_t
process_incoming_data(struct pipes_struct
*p
, char *data
, size_t n
)
126 size_t data_to_copy
= MIN(n
, RPC_MAX_PDU_FRAG_LEN
127 - p
->in_data
.pdu
.length
);
129 DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
130 "pdu_needed_len = %u, incoming data = %u\n",
131 (unsigned int)p
->in_data
.pdu
.length
,
132 (unsigned int)p
->in_data
.pdu_needed_len
,
135 if(data_to_copy
== 0) {
137 * This is an error - data is being received and there is no
138 * space in the PDU. Free the received data and go into the
141 DEBUG(0, ("process_incoming_data: "
142 "No space in incoming pdu buffer. "
143 "Current size = %u incoming data size = %u\n",
144 (unsigned int)p
->in_data
.pdu
.length
,
146 set_incoming_fault(p
);
151 * If we have no data already, wait until we get at least
152 * a RPC_HEADER_LEN * number of bytes before we can do anything.
155 if ((p
->in_data
.pdu_needed_len
== 0) &&
156 (p
->in_data
.pdu
.length
< RPC_HEADER_LEN
)) {
158 * Always return here. If we have more data then the RPC_HEADER
159 * will be processed the next time around the loop.
161 return fill_rpc_header(p
, data
, data_to_copy
);
165 * At this point we know we have at least an RPC_HEADER_LEN amount of
166 * data stored in p->in_data.pdu.
170 * If pdu_needed_len is zero this is a new pdu.
171 * Check how much more data we need, then loop again.
173 if (p
->in_data
.pdu_needed_len
== 0) {
175 bool ok
= get_pdu_size(p
);
179 if (p
->in_data
.pdu_needed_len
> 0) {
183 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
184 * that consists of an RPC_HEADER only. This is a
185 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
186 * DCERPC_PKT_ORPHANED pdu type.
187 * Deal with this in process_complete_pdu(). */
191 * Ok - at this point we have a valid RPC_HEADER.
192 * Keep reading until we have a full pdu.
195 data_to_copy
= MIN(data_to_copy
, p
->in_data
.pdu_needed_len
);
198 * Copy as much of the data as we need into the p->in_data.pdu buffer.
199 * pdu_needed_len becomes zero when we have a complete pdu.
202 memcpy((char *)&p
->in_data
.pdu
.data
[p
->in_data
.pdu
.length
],
204 p
->in_data
.pdu
.length
+= data_to_copy
;
205 p
->in_data
.pdu_needed_len
-= data_to_copy
;
208 * Do we have a complete PDU ?
209 * (return the number of bytes handled in the call)
212 if(p
->in_data
.pdu_needed_len
== 0) {
213 process_complete_pdu(p
);
217 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
218 "pdu.length = %u, pdu_needed_len = %u\n",
219 (unsigned int)p
->in_data
.pdu
.length
,
220 (unsigned int)p
->in_data
.pdu_needed_len
));
222 return (ssize_t
)data_to_copy
;
225 /****************************************************************************
226 Accepts incoming data on an internal rpc pipe.
227 ****************************************************************************/
229 static ssize_t
write_to_internal_pipe(struct pipes_struct
*p
, char *data
, size_t n
)
231 size_t data_left
= n
;
236 DEBUG(10, ("write_to_pipe: data_left = %u\n",
237 (unsigned int)data_left
));
239 data_used
= process_incoming_data(p
, data
, data_left
);
241 DEBUG(10, ("write_to_pipe: data_used = %d\n",
248 data_left
-= data_used
;
255 /****************************************************************************
256 Replies to a request to read data from a pipe.
258 Headers are interspersed with the data at PDU intervals. By the time
259 this function is called, the start of the data could possibly have been
260 read by an SMBtrans (file_offset != 0).
262 Calling create_rpc_reply() here is a hack. The data should already
263 have been prepared into arrays of headers + data stream sections.
264 ****************************************************************************/
266 static ssize_t
read_from_internal_pipe(struct pipes_struct
*p
, char *data
,
267 size_t n
, bool *is_data_outstanding
)
269 uint32 pdu_remaining
= 0;
270 ssize_t data_returned
= 0;
273 DEBUG(0,("read_from_pipe: pipe not open\n"));
277 DEBUG(6,(" name: %s len: %u\n",
278 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
),
282 * We cannot return more than one PDU length per
287 * This condition should result in the connection being closed.
288 * Netapp filers seem to set it to 0xffff which results in domain
289 * authentications failing. Just ignore it so things work.
292 if(n
> RPC_MAX_PDU_FRAG_LEN
) {
293 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
294 "pipe %s. We can only service %d sized reads.\n",
296 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
),
297 RPC_MAX_PDU_FRAG_LEN
));
298 n
= RPC_MAX_PDU_FRAG_LEN
;
302 * Determine if there is still data to send in the
303 * pipe PDU buffer. Always send this first. Never
304 * send more than is left in the current PDU. The
305 * client should send a new read request for a new
309 pdu_remaining
= p
->out_data
.frag
.length
310 - p
->out_data
.current_pdu_sent
;
312 if (pdu_remaining
> 0) {
313 data_returned
= (ssize_t
)MIN(n
, pdu_remaining
);
315 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
316 "current_pdu_sent = %u returning %d bytes.\n",
317 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
),
318 (unsigned int)p
->out_data
.frag
.length
,
319 (unsigned int)p
->out_data
.current_pdu_sent
,
320 (int)data_returned
));
323 p
->out_data
.frag
.data
324 + p
->out_data
.current_pdu_sent
,
327 p
->out_data
.current_pdu_sent
+= (uint32
)data_returned
;
332 * At this point p->current_pdu_len == p->current_pdu_sent (which
333 * may of course be zero if this is the first return fragment.
336 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
337 "= %u, p->out_data.rdata.length = %u.\n",
338 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
),
340 (unsigned int)p
->out_data
.data_sent_length
,
341 (unsigned int)p
->out_data
.rdata
.length
));
343 if (p
->out_data
.data_sent_length
>= p
->out_data
.rdata
.length
) {
345 * We have sent all possible data, return 0.
352 * We need to create a new PDU from the data left in p->rdata.
353 * Create the header/data/footers. This also sets up the fields
354 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
355 * and stores the outgoing PDU in p->current_pdu.
358 if(!create_next_pdu(p
)) {
359 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
360 get_pipe_name_from_syntax(talloc_tos(), &p
->syntax
)));
364 data_returned
= MIN(n
, p
->out_data
.frag
.length
);
366 memcpy(data
, p
->out_data
.frag
.data
, (size_t)data_returned
);
367 p
->out_data
.current_pdu_sent
+= (uint32
)data_returned
;
370 (*is_data_outstanding
) = p
->out_data
.frag
.length
> n
;
372 if (p
->out_data
.current_pdu_sent
== p
->out_data
.frag
.length
) {
373 /* We've returned everything in the out_data.frag
374 * so we're done with this pdu. Free it and reset
375 * current_pdu_sent. */
376 p
->out_data
.current_pdu_sent
= 0;
377 data_blob_free(&p
->out_data
.frag
);
379 if (p
->out_data
.data_sent_length
>= p
->out_data
.rdata
.length
) {
381 * We're completely finished with both outgoing and
382 * incoming data streams. It's safe to free all
383 * temporary data from this request.
385 free_pipe_context(p
);
389 return data_returned
;
392 bool fsp_is_np(struct files_struct
*fsp
)
394 enum FAKE_FILE_TYPE type
;
396 if ((fsp
== NULL
) || (fsp
->fake_file_handle
== NULL
)) {
400 type
= fsp
->fake_file_handle
->type
;
402 return ((type
== FAKE_FILE_TYPE_NAMED_PIPE
)
403 || (type
== FAKE_FILE_TYPE_NAMED_PIPE_PROXY
));
406 struct np_proxy_state
{
408 uint16_t device_state
;
409 uint64_t allocation_size
;
410 struct tstream_context
*npipe
;
411 struct tevent_queue
*read_queue
;
412 struct tevent_queue
*write_queue
;
415 static struct np_proxy_state
*make_external_rpc_pipe_p(TALLOC_CTX
*mem_ctx
,
416 const char *pipe_name
,
417 const struct tsocket_address
*local_address
,
418 const struct tsocket_address
*remote_address
,
419 struct auth_serversupplied_info
*server_info
)
421 struct np_proxy_state
*result
;
423 const char *socket_dir
;
424 struct tevent_context
*ev
;
425 struct tevent_req
*subreq
;
426 struct netr_SamInfo3
*info3
;
432 result
= talloc(mem_ctx
, struct np_proxy_state
);
433 if (result
== NULL
) {
434 DEBUG(0, ("talloc failed\n"));
438 result
->read_queue
= tevent_queue_create(result
, "np_read");
439 if (result
->read_queue
== NULL
) {
440 DEBUG(0, ("tevent_queue_create failed\n"));
444 result
->write_queue
= tevent_queue_create(result
, "np_write");
445 if (result
->write_queue
== NULL
) {
446 DEBUG(0, ("tevent_queue_create failed\n"));
450 ev
= s3_tevent_context_init(talloc_tos());
452 DEBUG(0, ("s3_tevent_context_init failed\n"));
456 socket_dir
= lp_parm_const_string(
457 GLOBAL_SECTION_SNUM
, "external_rpc_pipe", "socket_dir",
458 get_dyn_NCALRPCDIR());
459 if (socket_dir
== NULL
) {
460 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
463 socket_np_dir
= talloc_asprintf(talloc_tos(), "%s/np", socket_dir
);
464 if (socket_np_dir
== NULL
) {
465 DEBUG(0, ("talloc_asprintf failed\n"));
469 info3
= talloc_zero(talloc_tos(), struct netr_SamInfo3
);
471 DEBUG(0, ("talloc failed\n"));
475 status
= serverinfo_to_SamInfo3(server_info
, NULL
, 0, info3
);
476 if (!NT_STATUS_IS_OK(status
)) {
478 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
484 subreq
= tstream_npa_connect_send(talloc_tos(), ev
,
487 remote_address
, /* client_addr */
488 NULL
, /* client_name */
489 local_address
, /* server_addr */
490 NULL
, /* server_name */
492 server_info
->user_session_key
,
493 data_blob_null
/* delegated_creds */);
494 if (subreq
== NULL
) {
496 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
497 "user %s\\%s failed\n",
498 socket_np_dir
, pipe_name
, info3
->base
.domain
.string
,
499 info3
->base
.account_name
.string
));
502 ok
= tevent_req_poll(subreq
, ev
);
505 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
506 "failed for tstream_npa_connect: %s\n",
507 socket_np_dir
, pipe_name
, info3
->base
.domain
.string
,
508 info3
->base
.account_name
.string
,
513 ret
= tstream_npa_connect_recv(subreq
, &sys_errno
,
517 &result
->device_state
,
518 &result
->allocation_size
);
521 DEBUG(0, ("tstream_npa_connect_recv to %s for pipe %s and "
522 "user %s\\%s failed: %s\n",
523 socket_np_dir
, pipe_name
, info3
->base
.domain
.string
,
524 info3
->base
.account_name
.string
,
525 strerror(sys_errno
)));
536 NTSTATUS
np_open(TALLOC_CTX
*mem_ctx
, const char *name
,
537 const struct tsocket_address
*local_address
,
538 const struct tsocket_address
*remote_address
,
539 struct auth_serversupplied_info
*server_info
,
540 struct fake_file_handle
**phandle
)
542 const char **proxy_list
;
543 struct fake_file_handle
*handle
;
545 proxy_list
= lp_parm_string_list(-1, "np", "proxy", NULL
);
547 handle
= talloc(mem_ctx
, struct fake_file_handle
);
548 if (handle
== NULL
) {
549 return NT_STATUS_NO_MEMORY
;
552 if ((proxy_list
!= NULL
) && str_list_check_ci(proxy_list
, name
)) {
553 struct np_proxy_state
*p
;
555 p
= make_external_rpc_pipe_p(handle
, name
,
560 handle
->type
= FAKE_FILE_TYPE_NAMED_PIPE_PROXY
;
561 handle
->private_data
= p
;
563 struct pipes_struct
*p
;
564 struct ndr_syntax_id syntax
;
565 const char *client_address
;
567 if (!is_known_pipename(name
, &syntax
)) {
569 return NT_STATUS_OBJECT_NAME_NOT_FOUND
;
572 if (tsocket_address_is_inet(remote_address
, "ip")) {
573 client_address
= tsocket_address_inet_addr_string(
576 if (client_address
== NULL
) {
578 return NT_STATUS_NO_MEMORY
;
584 p
= make_internal_rpc_pipe_p(handle
, &syntax
, client_address
,
587 handle
->type
= FAKE_FILE_TYPE_NAMED_PIPE
;
588 handle
->private_data
= p
;
591 if (handle
->private_data
== NULL
) {
593 return NT_STATUS_PIPE_NOT_AVAILABLE
;
601 bool np_read_in_progress(struct fake_file_handle
*handle
)
603 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE
) {
607 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE_PROXY
) {
608 struct np_proxy_state
*p
= talloc_get_type_abort(
609 handle
->private_data
, struct np_proxy_state
);
612 read_count
= tevent_queue_length(p
->read_queue
);
613 if (read_count
> 0) {
623 struct np_write_state
{
624 struct event_context
*ev
;
625 struct np_proxy_state
*p
;
630 static void np_write_done(struct tevent_req
*subreq
);
632 struct tevent_req
*np_write_send(TALLOC_CTX
*mem_ctx
, struct event_context
*ev
,
633 struct fake_file_handle
*handle
,
634 const uint8_t *data
, size_t len
)
636 struct tevent_req
*req
;
637 struct np_write_state
*state
;
640 DEBUG(6, ("np_write_send: len: %d\n", (int)len
));
641 dump_data(50, data
, len
);
643 req
= tevent_req_create(mem_ctx
, &state
, struct np_write_state
);
650 status
= NT_STATUS_OK
;
654 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE
) {
655 struct pipes_struct
*p
= talloc_get_type_abort(
656 handle
->private_data
, struct pipes_struct
);
658 state
->nwritten
= write_to_internal_pipe(p
, (char *)data
, len
);
660 status
= (state
->nwritten
>= 0)
661 ? NT_STATUS_OK
: NT_STATUS_UNEXPECTED_IO_ERROR
;
665 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE_PROXY
) {
666 struct np_proxy_state
*p
= talloc_get_type_abort(
667 handle
->private_data
, struct np_proxy_state
);
668 struct tevent_req
*subreq
;
672 state
->iov
.iov_base
= CONST_DISCARD(void *, data
);
673 state
->iov
.iov_len
= len
;
675 subreq
= tstream_writev_queue_send(state
, ev
,
679 if (subreq
== NULL
) {
682 tevent_req_set_callback(subreq
, np_write_done
, req
);
686 status
= NT_STATUS_INVALID_HANDLE
;
688 if (NT_STATUS_IS_OK(status
)) {
689 tevent_req_done(req
);
691 tevent_req_nterror(req
, status
);
693 return tevent_req_post(req
, ev
);
699 static void np_write_done(struct tevent_req
*subreq
)
701 struct tevent_req
*req
= tevent_req_callback_data(
702 subreq
, struct tevent_req
);
703 struct np_write_state
*state
= tevent_req_data(
704 req
, struct np_write_state
);
708 received
= tstream_writev_queue_recv(subreq
, &err
);
710 tevent_req_nterror(req
, map_nt_error_from_unix(err
));
713 state
->nwritten
= received
;
714 tevent_req_done(req
);
717 NTSTATUS
np_write_recv(struct tevent_req
*req
, ssize_t
*pnwritten
)
719 struct np_write_state
*state
= tevent_req_data(
720 req
, struct np_write_state
);
723 if (tevent_req_is_nterror(req
, &status
)) {
726 *pnwritten
= state
->nwritten
;
730 struct np_ipc_readv_next_vector_state
{
737 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state
*s
,
738 uint8_t *buf
, size_t len
)
743 s
->len
= MIN(len
, UINT16_MAX
);
746 static int np_ipc_readv_next_vector(struct tstream_context
*stream
,
749 struct iovec
**_vector
,
752 struct np_ipc_readv_next_vector_state
*state
=
753 (struct np_ipc_readv_next_vector_state
*)private_data
;
754 struct iovec
*vector
;
758 if (state
->ofs
== state
->len
) {
764 pending
= tstream_pending_bytes(stream
);
769 if (pending
== 0 && state
->ofs
!= 0) {
770 /* return a short read */
777 /* we want at least one byte and recheck again */
780 size_t missing
= state
->len
- state
->ofs
;
781 if (pending
> missing
) {
782 /* there's more available */
783 state
->remaining
= pending
- missing
;
786 /* read what we can get and recheck in the next cycle */
791 vector
= talloc_array(mem_ctx
, struct iovec
, 1);
796 vector
[0].iov_base
= state
->buf
+ state
->ofs
;
797 vector
[0].iov_len
= wanted
;
799 state
->ofs
+= wanted
;
806 struct np_read_state
{
807 struct np_proxy_state
*p
;
808 struct np_ipc_readv_next_vector_state next_vector
;
811 bool is_data_outstanding
;
814 static void np_read_done(struct tevent_req
*subreq
);
816 struct tevent_req
*np_read_send(TALLOC_CTX
*mem_ctx
, struct event_context
*ev
,
817 struct fake_file_handle
*handle
,
818 uint8_t *data
, size_t len
)
820 struct tevent_req
*req
;
821 struct np_read_state
*state
;
824 req
= tevent_req_create(mem_ctx
, &state
, struct np_read_state
);
829 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE
) {
830 struct pipes_struct
*p
= talloc_get_type_abort(
831 handle
->private_data
, struct pipes_struct
);
833 state
->nread
= read_from_internal_pipe(
834 p
, (char *)data
, len
, &state
->is_data_outstanding
);
836 status
= (state
->nread
>= 0)
837 ? NT_STATUS_OK
: NT_STATUS_UNEXPECTED_IO_ERROR
;
841 if (handle
->type
== FAKE_FILE_TYPE_NAMED_PIPE_PROXY
) {
842 struct np_proxy_state
*p
= talloc_get_type_abort(
843 handle
->private_data
, struct np_proxy_state
);
844 struct tevent_req
*subreq
;
846 np_ipc_readv_next_vector_init(&state
->next_vector
,
849 subreq
= tstream_readv_pdu_queue_send(state
,
853 np_ipc_readv_next_vector
,
854 &state
->next_vector
);
855 if (subreq
== NULL
) {
858 tevent_req_set_callback(subreq
, np_read_done
, req
);
862 status
= NT_STATUS_INVALID_HANDLE
;
864 if (NT_STATUS_IS_OK(status
)) {
865 tevent_req_done(req
);
867 tevent_req_nterror(req
, status
);
869 return tevent_req_post(req
, ev
);
872 static void np_read_done(struct tevent_req
*subreq
)
874 struct tevent_req
*req
= tevent_req_callback_data(
875 subreq
, struct tevent_req
);
876 struct np_read_state
*state
= tevent_req_data(
877 req
, struct np_read_state
);
881 ret
= tstream_readv_pdu_queue_recv(subreq
, &err
);
884 tevent_req_nterror(req
, map_nt_error_from_unix(err
));
889 state
->is_data_outstanding
= (state
->next_vector
.remaining
> 0);
891 tevent_req_done(req
);
895 NTSTATUS
np_read_recv(struct tevent_req
*req
, ssize_t
*nread
,
896 bool *is_data_outstanding
)
898 struct np_read_state
*state
= tevent_req_data(
899 req
, struct np_read_state
);
902 if (tevent_req_is_nterror(req
, &status
)) {
905 *nread
= state
->nread
;
906 *is_data_outstanding
= state
->is_data_outstanding
;
911 * @brief Create a new RPC client context which uses a local dispatch function.
913 * @param[in] conn The connection struct that will hold the pipe
915 * @param[out] spoolss_pipe A pointer to the connected rpc client pipe.
917 * @return NT_STATUS_OK on success, a corresponding NT status if an
920 NTSTATUS
rpc_connect_spoolss_pipe(connection_struct
*conn
,
921 struct rpc_pipe_client
**spoolss_pipe
)
925 /* TODO: check and handle disconnections */
927 if (!conn
->spoolss_pipe
) {
928 status
= rpc_pipe_open_internal(conn
,
929 &ndr_table_spoolss
.syntax_id
,
931 &conn
->spoolss_pipe
);
932 if (!NT_STATUS_IS_OK(status
)) {
937 *spoolss_pipe
= conn
->spoolss_pipe
;