2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2010
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "system/network.h"
22 #include "libsmb/libsmb.h"
23 #include "../lib/util/tevent_ntstatus.h"
24 #include "../lib/tsocket/tsocket.h"
25 #include "../lib/tsocket/tsocket_internal.h"
26 #include "cli_np_tstream.h"
28 static const struct tstream_context_ops tstream_cli_np_ops
;
31 * Windows uses 4280 (the max xmit/recv size negotiated on DCERPC).
32 * This is fits into the max_xmit negotiated at the SMB layer.
34 * On the sending side they may use SMBtranss if the request does not
35 * fit into a single SMBtrans call.
37 * Windows uses 1024 as max data size of a SMBtrans request and then
38 * possibly reads the rest of the DCERPC fragment (up to 3256 bytes)
41 * For now we just ask for the full 4280 bytes (max data size) in the SMBtrans
42 * request to get the whole fragment at once (like samba 3.5.x and below did.
44 * It is important that we use do SMBwriteX with the size of a full fragment,
45 * otherwise we may get NT_STATUS_PIPE_BUSY on the SMBtrans request
46 * from NT4 servers. (See bug #8195)
48 #define TSTREAM_CLI_NP_BUF_SIZE 4280
50 struct tstream_cli_np
{
51 struct cli_state
*cli
;
54 unsigned int default_timeout
;
58 struct tevent_req
*read_req
;
59 struct tevent_req
*write_req
;
66 uint8_t buf
[TSTREAM_CLI_NP_BUF_SIZE
];
70 static int tstream_cli_np_destructor(struct tstream_cli_np
*cli_nps
)
74 if (!cli_state_is_connected(cli_nps
->cli
)) {
79 * TODO: do not use a sync call with a destructor!!!
81 * This only happens, if a caller does talloc_free(),
82 * while the everything was still ok.
84 * If we get an unexpected failure within a normal
85 * operation, we already do an async cli_close_send()/_recv().
87 * Once we've fixed all callers to call
88 * tstream_disconnect_send()/_recv(), this will
91 status
= cli_close(cli_nps
->cli
, cli_nps
->fnum
);
92 if (!NT_STATUS_IS_OK(status
)) {
93 DEBUG(1, ("tstream_cli_np_destructor: cli_close "
94 "failed on pipe %s. Error was %s\n",
95 cli_nps
->npipe
, nt_errstr(status
)));
98 * We can't do much on failure
103 struct tstream_cli_np_open_state
{
104 struct cli_state
*cli
;
109 static void tstream_cli_np_open_done(struct tevent_req
*subreq
);
111 struct tevent_req
*tstream_cli_np_open_send(TALLOC_CTX
*mem_ctx
,
112 struct tevent_context
*ev
,
113 struct cli_state
*cli
,
116 struct tevent_req
*req
;
117 struct tstream_cli_np_open_state
*state
;
118 struct tevent_req
*subreq
;
120 req
= tevent_req_create(mem_ctx
, &state
,
121 struct tstream_cli_np_open_state
);
127 state
->npipe
= talloc_strdup(state
, npipe
);
128 if (tevent_req_nomem(state
->npipe
, req
)) {
129 return tevent_req_post(req
, ev
);
132 subreq
= cli_ntcreate_send(state
, ev
, cli
,
137 FILE_SHARE_READ
|FILE_SHARE_WRITE
,
141 if (tevent_req_nomem(subreq
, req
)) {
142 return tevent_req_post(req
, ev
);
144 tevent_req_set_callback(subreq
, tstream_cli_np_open_done
, req
);
149 static void tstream_cli_np_open_done(struct tevent_req
*subreq
)
151 struct tevent_req
*req
=
152 tevent_req_callback_data(subreq
, struct tevent_req
);
153 struct tstream_cli_np_open_state
*state
=
154 tevent_req_data(req
, struct tstream_cli_np_open_state
);
157 status
= cli_ntcreate_recv(subreq
, &state
->fnum
);
159 if (!NT_STATUS_IS_OK(status
)) {
160 tevent_req_nterror(req
, status
);
164 tevent_req_done(req
);
167 NTSTATUS
_tstream_cli_np_open_recv(struct tevent_req
*req
,
169 struct tstream_context
**_stream
,
170 const char *location
)
172 struct tstream_cli_np_open_state
*state
=
173 tevent_req_data(req
, struct tstream_cli_np_open_state
);
174 struct tstream_context
*stream
;
175 struct tstream_cli_np
*cli_nps
;
178 if (tevent_req_is_nterror(req
, &status
)) {
179 tevent_req_received(req
);
183 stream
= tstream_context_create(mem_ctx
,
186 struct tstream_cli_np
,
189 tevent_req_received(req
);
190 return NT_STATUS_NO_MEMORY
;
192 ZERO_STRUCTP(cli_nps
);
194 cli_nps
->cli
= state
->cli
;
195 cli_nps
->npipe
= talloc_move(cli_nps
, &state
->npipe
);
196 cli_nps
->fnum
= state
->fnum
;
197 cli_nps
->default_timeout
= state
->cli
->timeout
;
199 talloc_set_destructor(cli_nps
, tstream_cli_np_destructor
);
201 cli_nps
->trans
.active
= false;
202 cli_nps
->trans
.read_req
= NULL
;
203 cli_nps
->trans
.write_req
= NULL
;
204 SSVAL(cli_nps
->trans
.setup
+0, 0, TRANSACT_DCERPCCMD
);
205 SSVAL(cli_nps
->trans
.setup
+1, 0, cli_nps
->fnum
);
208 tevent_req_received(req
);
212 static ssize_t
tstream_cli_np_pending_bytes(struct tstream_context
*stream
)
214 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
215 struct tstream_cli_np
);
217 if (!cli_state_is_connected(cli_nps
->cli
)) {
222 return cli_nps
->read
.left
;
225 bool tstream_is_cli_np(struct tstream_context
*stream
)
227 struct tstream_cli_np
*cli_nps
=
228 talloc_get_type(_tstream_context_data(stream
),
229 struct tstream_cli_np
);
238 NTSTATUS
tstream_cli_np_use_trans(struct tstream_context
*stream
)
240 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
241 struct tstream_cli_np
);
243 if (cli_nps
->trans
.read_req
) {
244 return NT_STATUS_PIPE_BUSY
;
247 if (cli_nps
->trans
.write_req
) {
248 return NT_STATUS_PIPE_BUSY
;
251 if (cli_nps
->trans
.active
) {
252 return NT_STATUS_PIPE_BUSY
;
255 cli_nps
->trans
.active
= true;
260 unsigned int tstream_cli_np_set_timeout(struct tstream_context
*stream
,
261 unsigned int timeout
)
263 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
264 struct tstream_cli_np
);
266 if (!cli_state_is_connected(cli_nps
->cli
)) {
267 return cli_nps
->default_timeout
;
270 return cli_set_timeout(cli_nps
->cli
, timeout
);
273 struct cli_state
*tstream_cli_np_get_cli_state(struct tstream_context
*stream
)
275 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
276 struct tstream_cli_np
);
281 struct tstream_cli_np_writev_state
{
282 struct tstream_context
*stream
;
283 struct tevent_context
*ev
;
285 struct iovec
*vector
;
292 const char *location
;
296 static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state
*state
)
298 struct tstream_cli_np
*cli_nps
=
299 tstream_context_data(state
->stream
,
300 struct tstream_cli_np
);
302 cli_nps
->trans
.write_req
= NULL
;
307 static void tstream_cli_np_writev_write_next(struct tevent_req
*req
);
309 static struct tevent_req
*tstream_cli_np_writev_send(TALLOC_CTX
*mem_ctx
,
310 struct tevent_context
*ev
,
311 struct tstream_context
*stream
,
312 const struct iovec
*vector
,
315 struct tevent_req
*req
;
316 struct tstream_cli_np_writev_state
*state
;
317 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
318 struct tstream_cli_np
);
320 req
= tevent_req_create(mem_ctx
, &state
,
321 struct tstream_cli_np_writev_state
);
325 state
->stream
= stream
;
329 talloc_set_destructor(state
, tstream_cli_np_writev_state_destructor
);
331 if (!cli_state_is_connected(cli_nps
->cli
)) {
332 tevent_req_error(req
, ENOTCONN
);
333 return tevent_req_post(req
, ev
);
337 * we make a copy of the vector so we can change the structure
339 state
->vector
= talloc_array(state
, struct iovec
, count
);
340 if (tevent_req_nomem(state
->vector
, req
)) {
341 return tevent_req_post(req
, ev
);
343 memcpy(state
->vector
, vector
, sizeof(struct iovec
) * count
);
344 state
->count
= count
;
346 tstream_cli_np_writev_write_next(req
);
347 if (!tevent_req_is_in_progress(req
)) {
348 return tevent_req_post(req
, ev
);
354 static void tstream_cli_np_readv_trans_start(struct tevent_req
*req
);
355 static void tstream_cli_np_writev_write_done(struct tevent_req
*subreq
);
357 static void tstream_cli_np_writev_write_next(struct tevent_req
*req
)
359 struct tstream_cli_np_writev_state
*state
=
361 struct tstream_cli_np_writev_state
);
362 struct tstream_cli_np
*cli_nps
=
363 tstream_context_data(state
->stream
,
364 struct tstream_cli_np
);
365 struct tevent_req
*subreq
;
367 cli_nps
->write
.ofs
= 0;
368 cli_nps
->write
.left
= TSTREAM_CLI_NP_BUF_SIZE
;
371 * copy the pending buffer first
373 while (cli_nps
->write
.left
> 0 && state
->count
> 0) {
374 uint8_t *base
= (uint8_t *)state
->vector
[0].iov_base
;
375 size_t len
= MIN(cli_nps
->write
.left
, state
->vector
[0].iov_len
);
377 memcpy(cli_nps
->write
.buf
+ cli_nps
->write
.ofs
, base
, len
);
380 state
->vector
[0].iov_base
= base
;
381 state
->vector
[0].iov_len
-= len
;
383 cli_nps
->write
.ofs
+= len
;
384 cli_nps
->write
.left
-= len
;
386 if (state
->vector
[0].iov_len
== 0) {
394 if (cli_nps
->write
.ofs
== 0) {
395 tevent_req_done(req
);
399 if (cli_nps
->trans
.active
&& state
->count
== 0) {
400 cli_nps
->trans
.active
= false;
401 cli_nps
->trans
.write_req
= req
;
405 if (cli_nps
->trans
.read_req
&& state
->count
== 0) {
406 cli_nps
->trans
.write_req
= req
;
407 tstream_cli_np_readv_trans_start(cli_nps
->trans
.read_req
);
411 subreq
= cli_write_andx_send(state
, state
->ev
, cli_nps
->cli
,
413 8, /* 8 means message mode. */
414 cli_nps
->write
.buf
, 0,
416 if (tevent_req_nomem(subreq
, req
)) {
419 tevent_req_set_callback(subreq
,
420 tstream_cli_np_writev_write_done
,
424 static void tstream_cli_np_writev_disconnect_now(struct tevent_req
*req
,
426 const char *location
);
428 static void tstream_cli_np_writev_write_done(struct tevent_req
*subreq
)
430 struct tevent_req
*req
=
431 tevent_req_callback_data(subreq
, struct tevent_req
);
432 struct tstream_cli_np_writev_state
*state
=
433 tevent_req_data(req
, struct tstream_cli_np_writev_state
);
434 struct tstream_cli_np
*cli_nps
=
435 tstream_context_data(state
->stream
,
436 struct tstream_cli_np
);
440 status
= cli_write_andx_recv(subreq
, &written
);
442 if (!NT_STATUS_IS_OK(status
)) {
443 tstream_cli_np_writev_disconnect_now(req
, EIO
, __location__
);
447 if (written
!= cli_nps
->write
.ofs
) {
448 tstream_cli_np_writev_disconnect_now(req
, EIO
, __location__
);
452 tstream_cli_np_writev_write_next(req
);
455 static void tstream_cli_np_writev_disconnect_done(struct tevent_req
*subreq
);
457 static void tstream_cli_np_writev_disconnect_now(struct tevent_req
*req
,
459 const char *location
)
461 struct tstream_cli_np_writev_state
*state
=
463 struct tstream_cli_np_writev_state
);
464 struct tstream_cli_np
*cli_nps
=
465 tstream_context_data(state
->stream
,
466 struct tstream_cli_np
);
467 struct tevent_req
*subreq
;
469 state
->error
.val
= error
;
470 state
->error
.location
= location
;
472 if (!cli_state_is_connected(cli_nps
->cli
)) {
473 /* return the original error */
474 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
478 subreq
= cli_close_send(state
, state
->ev
, cli_nps
->cli
, cli_nps
->fnum
);
479 if (subreq
== NULL
) {
480 /* return the original error */
481 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
484 tevent_req_set_callback(subreq
,
485 tstream_cli_np_writev_disconnect_done
,
489 static void tstream_cli_np_writev_disconnect_done(struct tevent_req
*subreq
)
491 struct tevent_req
*req
=
492 tevent_req_callback_data(subreq
, struct tevent_req
);
493 struct tstream_cli_np_writev_state
*state
=
494 tevent_req_data(req
, struct tstream_cli_np_writev_state
);
495 struct tstream_cli_np
*cli_nps
=
496 tstream_context_data(state
->stream
, struct tstream_cli_np
);
499 status
= cli_close_recv(subreq
);
504 /* return the original error */
505 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
508 static int tstream_cli_np_writev_recv(struct tevent_req
*req
,
511 struct tstream_cli_np_writev_state
*state
=
513 struct tstream_cli_np_writev_state
);
516 ret
= tsocket_simple_int_recv(req
, perrno
);
521 tevent_req_received(req
);
525 struct tstream_cli_np_readv_state
{
526 struct tstream_context
*stream
;
527 struct tevent_context
*ev
;
529 struct iovec
*vector
;
535 struct tevent_immediate
*im
;
540 const char *location
;
544 static int tstream_cli_np_readv_state_destructor(struct tstream_cli_np_readv_state
*state
)
546 struct tstream_cli_np
*cli_nps
=
547 tstream_context_data(state
->stream
,
548 struct tstream_cli_np
);
550 cli_nps
->trans
.read_req
= NULL
;
555 static void tstream_cli_np_readv_read_next(struct tevent_req
*req
);
557 static struct tevent_req
*tstream_cli_np_readv_send(TALLOC_CTX
*mem_ctx
,
558 struct tevent_context
*ev
,
559 struct tstream_context
*stream
,
560 struct iovec
*vector
,
563 struct tevent_req
*req
;
564 struct tstream_cli_np_readv_state
*state
;
565 struct tstream_cli_np
*cli_nps
=
566 tstream_context_data(stream
, struct tstream_cli_np
);
568 req
= tevent_req_create(mem_ctx
, &state
,
569 struct tstream_cli_np_readv_state
);
573 state
->stream
= stream
;
577 talloc_set_destructor(state
, tstream_cli_np_readv_state_destructor
);
579 if (!cli_state_is_connected(cli_nps
->cli
)) {
580 tevent_req_error(req
, ENOTCONN
);
581 return tevent_req_post(req
, ev
);
585 * we make a copy of the vector so we can change the structure
587 state
->vector
= talloc_array(state
, struct iovec
, count
);
588 if (tevent_req_nomem(state
->vector
, req
)) {
589 return tevent_req_post(req
, ev
);
591 memcpy(state
->vector
, vector
, sizeof(struct iovec
) * count
);
592 state
->count
= count
;
594 tstream_cli_np_readv_read_next(req
);
595 if (!tevent_req_is_in_progress(req
)) {
596 return tevent_req_post(req
, ev
);
602 static void tstream_cli_np_readv_read_done(struct tevent_req
*subreq
);
604 static void tstream_cli_np_readv_read_next(struct tevent_req
*req
)
606 struct tstream_cli_np_readv_state
*state
=
608 struct tstream_cli_np_readv_state
);
609 struct tstream_cli_np
*cli_nps
=
610 tstream_context_data(state
->stream
,
611 struct tstream_cli_np
);
612 struct tevent_req
*subreq
;
615 * copy the pending buffer first
617 while (cli_nps
->read
.left
> 0 && state
->count
> 0) {
618 uint8_t *base
= (uint8_t *)state
->vector
[0].iov_base
;
619 size_t len
= MIN(cli_nps
->read
.left
, state
->vector
[0].iov_len
);
621 memcpy(base
, cli_nps
->read
.buf
+ cli_nps
->read
.ofs
, len
);
624 state
->vector
[0].iov_base
= base
;
625 state
->vector
[0].iov_len
-= len
;
627 cli_nps
->read
.ofs
+= len
;
628 cli_nps
->read
.left
-= len
;
630 if (state
->vector
[0].iov_len
== 0) {
638 if (state
->count
== 0) {
639 tevent_req_done(req
);
643 if (cli_nps
->trans
.active
) {
644 cli_nps
->trans
.active
= false;
645 cli_nps
->trans
.read_req
= req
;
649 if (cli_nps
->trans
.write_req
) {
650 cli_nps
->trans
.read_req
= req
;
651 tstream_cli_np_readv_trans_start(req
);
655 subreq
= cli_read_andx_send(state
, state
->ev
, cli_nps
->cli
,
656 cli_nps
->fnum
, 0, TSTREAM_CLI_NP_BUF_SIZE
);
657 if (tevent_req_nomem(subreq
, req
)) {
660 tevent_req_set_callback(subreq
,
661 tstream_cli_np_readv_read_done
,
665 static void tstream_cli_np_readv_trans_done(struct tevent_req
*subreq
);
667 static void tstream_cli_np_readv_trans_start(struct tevent_req
*req
)
669 struct tstream_cli_np_readv_state
*state
=
671 struct tstream_cli_np_readv_state
);
672 struct tstream_cli_np
*cli_nps
=
673 tstream_context_data(state
->stream
,
674 struct tstream_cli_np
);
675 struct tevent_req
*subreq
;
677 state
->trans
.im
= tevent_create_immediate(state
);
678 if (tevent_req_nomem(state
->trans
.im
, req
)) {
682 subreq
= cli_trans_send(state
, state
->ev
,
687 cli_nps
->trans
.setup
, 2,
692 TSTREAM_CLI_NP_BUF_SIZE
);
693 if (tevent_req_nomem(subreq
, req
)) {
696 tevent_req_set_callback(subreq
,
697 tstream_cli_np_readv_trans_done
,
701 static void tstream_cli_np_readv_disconnect_now(struct tevent_req
*req
,
703 const char *location
);
704 static void tstream_cli_np_readv_trans_next(struct tevent_context
*ctx
,
705 struct tevent_immediate
*im
,
708 static void tstream_cli_np_readv_trans_done(struct tevent_req
*subreq
)
710 struct tevent_req
*req
=
711 tevent_req_callback_data(subreq
, struct tevent_req
);
712 struct tstream_cli_np_readv_state
*state
=
713 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
714 struct tstream_cli_np
*cli_nps
=
715 tstream_context_data(state
->stream
, struct tstream_cli_np
);
720 status
= cli_trans_recv(subreq
, state
, NULL
, NULL
, 0, NULL
,
722 &rcvbuf
, 0, &received
);
724 if (NT_STATUS_EQUAL(status
, NT_STATUS_BUFFER_TOO_SMALL
)) {
725 status
= NT_STATUS_OK
;
727 if (!NT_STATUS_IS_OK(status
)) {
728 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
732 if (received
> TSTREAM_CLI_NP_BUF_SIZE
) {
733 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
738 tstream_cli_np_readv_disconnect_now(req
, EPIPE
, __location__
);
742 cli_nps
->read
.ofs
= 0;
743 cli_nps
->read
.left
= received
;
744 memcpy(cli_nps
->read
.buf
, rcvbuf
, received
);
747 if (cli_nps
->trans
.write_req
== NULL
) {
748 tstream_cli_np_readv_read_next(req
);
752 tevent_schedule_immediate(state
->trans
.im
, state
->ev
,
753 tstream_cli_np_readv_trans_next
, req
);
755 tevent_req_done(cli_nps
->trans
.write_req
);
758 static void tstream_cli_np_readv_trans_next(struct tevent_context
*ctx
,
759 struct tevent_immediate
*im
,
762 struct tevent_req
*req
=
763 talloc_get_type_abort(private_data
,
766 tstream_cli_np_readv_read_next(req
);
769 static void tstream_cli_np_readv_read_done(struct tevent_req
*subreq
)
771 struct tevent_req
*req
=
772 tevent_req_callback_data(subreq
, struct tevent_req
);
773 struct tstream_cli_np_readv_state
*state
=
774 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
775 struct tstream_cli_np
*cli_nps
=
776 tstream_context_data(state
->stream
, struct tstream_cli_np
);
782 * We must free subreq in this function as there is
783 * a timer event attached to it.
786 status
= cli_read_andx_recv(subreq
, &received
, &rcvbuf
);
788 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
791 if (NT_STATUS_EQUAL(status
, NT_STATUS_BUFFER_TOO_SMALL
)) {
793 * NT_STATUS_BUFFER_TOO_SMALL means that there's
794 * more data to read when the named pipe is used
795 * in message mode (which is the case here).
797 * But we hide this from the caller.
799 status
= NT_STATUS_OK
;
801 if (!NT_STATUS_IS_OK(status
)) {
803 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
807 if (received
> TSTREAM_CLI_NP_BUF_SIZE
) {
809 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
815 tstream_cli_np_readv_disconnect_now(req
, EPIPE
, __location__
);
819 cli_nps
->read
.ofs
= 0;
820 cli_nps
->read
.left
= received
;
821 memcpy(cli_nps
->read
.buf
, rcvbuf
, received
);
824 tstream_cli_np_readv_read_next(req
);
827 static void tstream_cli_np_readv_disconnect_done(struct tevent_req
*subreq
);
829 static void tstream_cli_np_readv_error(struct tevent_req
*req
);
831 static void tstream_cli_np_readv_disconnect_now(struct tevent_req
*req
,
833 const char *location
)
835 struct tstream_cli_np_readv_state
*state
=
837 struct tstream_cli_np_readv_state
);
838 struct tstream_cli_np
*cli_nps
=
839 tstream_context_data(state
->stream
,
840 struct tstream_cli_np
);
841 struct tevent_req
*subreq
;
843 state
->error
.val
= error
;
844 state
->error
.location
= location
;
846 if (!cli_state_is_connected(cli_nps
->cli
)) {
847 /* return the original error */
848 tstream_cli_np_readv_error(req
);
852 subreq
= cli_close_send(state
, state
->ev
, cli_nps
->cli
, cli_nps
->fnum
);
853 if (subreq
== NULL
) {
854 /* return the original error */
855 tstream_cli_np_readv_error(req
);
858 tevent_req_set_callback(subreq
,
859 tstream_cli_np_readv_disconnect_done
,
863 static void tstream_cli_np_readv_disconnect_done(struct tevent_req
*subreq
)
865 struct tevent_req
*req
=
866 tevent_req_callback_data(subreq
, struct tevent_req
);
867 struct tstream_cli_np_readv_state
*state
=
868 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
869 struct tstream_cli_np
*cli_nps
=
870 tstream_context_data(state
->stream
, struct tstream_cli_np
);
873 status
= cli_close_recv(subreq
);
878 tstream_cli_np_readv_error(req
);
881 static void tstream_cli_np_readv_error_trigger(struct tevent_context
*ctx
,
882 struct tevent_immediate
*im
,
885 static void tstream_cli_np_readv_error(struct tevent_req
*req
)
887 struct tstream_cli_np_readv_state
*state
=
889 struct tstream_cli_np_readv_state
);
890 struct tstream_cli_np
*cli_nps
=
891 tstream_context_data(state
->stream
,
892 struct tstream_cli_np
);
894 if (cli_nps
->trans
.write_req
== NULL
) {
895 /* return the original error */
896 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
900 if (state
->trans
.im
== NULL
) {
901 /* return the original error */
902 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
906 tevent_schedule_immediate(state
->trans
.im
, state
->ev
,
907 tstream_cli_np_readv_error_trigger
, req
);
909 /* return the original error for writev */
910 _tevent_req_error(cli_nps
->trans
.write_req
,
911 state
->error
.val
, state
->error
.location
);
914 static void tstream_cli_np_readv_error_trigger(struct tevent_context
*ctx
,
915 struct tevent_immediate
*im
,
918 struct tevent_req
*req
=
919 talloc_get_type_abort(private_data
,
921 struct tstream_cli_np_readv_state
*state
=
923 struct tstream_cli_np_readv_state
);
925 /* return the original error */
926 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
929 static int tstream_cli_np_readv_recv(struct tevent_req
*req
,
932 struct tstream_cli_np_readv_state
*state
=
933 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
936 ret
= tsocket_simple_int_recv(req
, perrno
);
941 tevent_req_received(req
);
945 struct tstream_cli_np_disconnect_state
{
946 struct tstream_context
*stream
;
949 static void tstream_cli_np_disconnect_done(struct tevent_req
*subreq
);
951 static struct tevent_req
*tstream_cli_np_disconnect_send(TALLOC_CTX
*mem_ctx
,
952 struct tevent_context
*ev
,
953 struct tstream_context
*stream
)
955 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
956 struct tstream_cli_np
);
957 struct tevent_req
*req
;
958 struct tstream_cli_np_disconnect_state
*state
;
959 struct tevent_req
*subreq
;
961 req
= tevent_req_create(mem_ctx
, &state
,
962 struct tstream_cli_np_disconnect_state
);
967 state
->stream
= stream
;
969 if (!cli_state_is_connected(cli_nps
->cli
)) {
970 tevent_req_error(req
, ENOTCONN
);
971 return tevent_req_post(req
, ev
);
974 subreq
= cli_close_send(state
, ev
, cli_nps
->cli
, cli_nps
->fnum
);
975 if (tevent_req_nomem(subreq
, req
)) {
976 return tevent_req_post(req
, ev
);
978 tevent_req_set_callback(subreq
, tstream_cli_np_disconnect_done
, req
);
983 static void tstream_cli_np_disconnect_done(struct tevent_req
*subreq
)
985 struct tevent_req
*req
= tevent_req_callback_data(subreq
,
987 struct tstream_cli_np_disconnect_state
*state
=
988 tevent_req_data(req
, struct tstream_cli_np_disconnect_state
);
989 struct tstream_cli_np
*cli_nps
=
990 tstream_context_data(state
->stream
, struct tstream_cli_np
);
993 status
= cli_close_recv(subreq
);
995 if (!NT_STATUS_IS_OK(status
)) {
996 tevent_req_error(req
, EIO
);
1000 cli_nps
->cli
= NULL
;
1002 tevent_req_done(req
);
1005 static int tstream_cli_np_disconnect_recv(struct tevent_req
*req
,
1010 ret
= tsocket_simple_int_recv(req
, perrno
);
1012 tevent_req_received(req
);
1016 static const struct tstream_context_ops tstream_cli_np_ops
= {
1019 .pending_bytes
= tstream_cli_np_pending_bytes
,
1021 .readv_send
= tstream_cli_np_readv_send
,
1022 .readv_recv
= tstream_cli_np_readv_recv
,
1024 .writev_send
= tstream_cli_np_writev_send
,
1025 .writev_recv
= tstream_cli_np_writev_recv
,
1027 .disconnect_send
= tstream_cli_np_disconnect_send
,
1028 .disconnect_recv
= tstream_cli_np_disconnect_recv
,
1031 NTSTATUS
_tstream_cli_np_existing(TALLOC_CTX
*mem_ctx
,
1032 struct cli_state
*cli
,
1034 struct tstream_context
**_stream
,
1035 const char *location
)
1037 struct tstream_context
*stream
;
1038 struct tstream_cli_np
*cli_nps
;
1040 stream
= tstream_context_create(mem_ctx
,
1041 &tstream_cli_np_ops
,
1043 struct tstream_cli_np
,
1046 return NT_STATUS_NO_MEMORY
;
1048 ZERO_STRUCTP(cli_nps
);
1051 cli_nps
->fnum
= fnum
;
1054 return NT_STATUS_OK
;