2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2010
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "system/network.h"
22 #include "libsmb/libsmb.h"
23 #include "../lib/util/tevent_ntstatus.h"
24 #include "../lib/tsocket/tsocket.h"
25 #include "../lib/tsocket/tsocket_internal.h"
26 #include "cli_np_tstream.h"
28 static const struct tstream_context_ops tstream_cli_np_ops
;
31 * Window uses 1024 hardcoded for read size and trans max data
33 #define TSTREAM_CLI_NP_BUF_SIZE 1024
35 struct tstream_cli_np
{
36 struct cli_state
*cli
;
39 unsigned int default_timeout
;
43 struct tevent_req
*read_req
;
44 struct tevent_req
*write_req
;
51 uint8_t buf
[TSTREAM_CLI_NP_BUF_SIZE
];
55 static int tstream_cli_np_destructor(struct tstream_cli_np
*cli_nps
)
59 if (!cli_state_is_connected(cli_nps
->cli
)) {
64 * TODO: do not use a sync call with a destructor!!!
66 * This only happens, if a caller does talloc_free(),
67 * while the everything was still ok.
69 * If we get an unexpected failure within a normal
70 * operation, we already do an async cli_close_send()/_recv().
72 * Once we've fixed all callers to call
73 * tstream_disconnect_send()/_recv(), this will
76 status
= cli_close(cli_nps
->cli
, cli_nps
->fnum
);
77 if (!NT_STATUS_IS_OK(status
)) {
78 DEBUG(1, ("tstream_cli_np_destructor: cli_close "
79 "failed on pipe %s. Error was %s\n",
80 cli_nps
->npipe
, nt_errstr(status
)));
83 * We can't do much on failure
88 struct tstream_cli_np_open_state
{
89 struct cli_state
*cli
;
94 static void tstream_cli_np_open_done(struct tevent_req
*subreq
);
96 struct tevent_req
*tstream_cli_np_open_send(TALLOC_CTX
*mem_ctx
,
97 struct tevent_context
*ev
,
98 struct cli_state
*cli
,
101 struct tevent_req
*req
;
102 struct tstream_cli_np_open_state
*state
;
103 struct tevent_req
*subreq
;
105 req
= tevent_req_create(mem_ctx
, &state
,
106 struct tstream_cli_np_open_state
);
112 state
->npipe
= talloc_strdup(state
, npipe
);
113 if (tevent_req_nomem(state
->npipe
, req
)) {
114 return tevent_req_post(req
, ev
);
117 subreq
= cli_ntcreate_send(state
, ev
, cli
,
122 FILE_SHARE_READ
|FILE_SHARE_WRITE
,
126 if (tevent_req_nomem(subreq
, req
)) {
127 return tevent_req_post(req
, ev
);
129 tevent_req_set_callback(subreq
, tstream_cli_np_open_done
, req
);
134 static void tstream_cli_np_open_done(struct tevent_req
*subreq
)
136 struct tevent_req
*req
=
137 tevent_req_callback_data(subreq
, struct tevent_req
);
138 struct tstream_cli_np_open_state
*state
=
139 tevent_req_data(req
, struct tstream_cli_np_open_state
);
142 status
= cli_ntcreate_recv(subreq
, &state
->fnum
);
144 if (!NT_STATUS_IS_OK(status
)) {
145 tevent_req_nterror(req
, status
);
149 tevent_req_done(req
);
152 NTSTATUS
_tstream_cli_np_open_recv(struct tevent_req
*req
,
154 struct tstream_context
**_stream
,
155 const char *location
)
157 struct tstream_cli_np_open_state
*state
=
158 tevent_req_data(req
, struct tstream_cli_np_open_state
);
159 struct tstream_context
*stream
;
160 struct tstream_cli_np
*cli_nps
;
163 if (tevent_req_is_nterror(req
, &status
)) {
164 tevent_req_received(req
);
168 stream
= tstream_context_create(mem_ctx
,
171 struct tstream_cli_np
,
174 tevent_req_received(req
);
175 return NT_STATUS_NO_MEMORY
;
177 ZERO_STRUCTP(cli_nps
);
179 cli_nps
->cli
= state
->cli
;
180 cli_nps
->npipe
= talloc_move(cli_nps
, &state
->npipe
);
181 cli_nps
->fnum
= state
->fnum
;
182 cli_nps
->default_timeout
= state
->cli
->timeout
;
184 talloc_set_destructor(cli_nps
, tstream_cli_np_destructor
);
186 cli_nps
->trans
.active
= false;
187 cli_nps
->trans
.read_req
= NULL
;
188 cli_nps
->trans
.write_req
= NULL
;
189 SSVAL(cli_nps
->trans
.setup
+0, 0, TRANSACT_DCERPCCMD
);
190 SSVAL(cli_nps
->trans
.setup
+1, 0, cli_nps
->fnum
);
193 tevent_req_received(req
);
197 static ssize_t
tstream_cli_np_pending_bytes(struct tstream_context
*stream
)
199 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
200 struct tstream_cli_np
);
202 if (!cli_state_is_connected(cli_nps
->cli
)) {
207 return cli_nps
->read
.left
;
210 bool tstream_is_cli_np(struct tstream_context
*stream
)
212 struct tstream_cli_np
*cli_nps
=
213 talloc_get_type(_tstream_context_data(stream
),
214 struct tstream_cli_np
);
223 NTSTATUS
tstream_cli_np_use_trans(struct tstream_context
*stream
)
225 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
226 struct tstream_cli_np
);
228 if (cli_nps
->trans
.read_req
) {
229 return NT_STATUS_PIPE_BUSY
;
232 if (cli_nps
->trans
.write_req
) {
233 return NT_STATUS_PIPE_BUSY
;
236 if (cli_nps
->trans
.active
) {
237 return NT_STATUS_PIPE_BUSY
;
240 cli_nps
->trans
.active
= true;
245 unsigned int tstream_cli_np_set_timeout(struct tstream_context
*stream
,
246 unsigned int timeout
)
248 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
249 struct tstream_cli_np
);
251 if (!cli_state_is_connected(cli_nps
->cli
)) {
252 return cli_nps
->default_timeout
;
255 return cli_set_timeout(cli_nps
->cli
, timeout
);
258 struct cli_state
*tstream_cli_np_get_cli_state(struct tstream_context
*stream
)
260 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
261 struct tstream_cli_np
);
266 struct tstream_cli_np_writev_state
{
267 struct tstream_context
*stream
;
268 struct tevent_context
*ev
;
270 struct iovec
*vector
;
277 const char *location
;
281 static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state
*state
)
283 struct tstream_cli_np
*cli_nps
=
284 tstream_context_data(state
->stream
,
285 struct tstream_cli_np
);
287 cli_nps
->trans
.write_req
= NULL
;
292 static void tstream_cli_np_writev_write_next(struct tevent_req
*req
);
294 static struct tevent_req
*tstream_cli_np_writev_send(TALLOC_CTX
*mem_ctx
,
295 struct tevent_context
*ev
,
296 struct tstream_context
*stream
,
297 const struct iovec
*vector
,
300 struct tevent_req
*req
;
301 struct tstream_cli_np_writev_state
*state
;
302 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
303 struct tstream_cli_np
);
305 req
= tevent_req_create(mem_ctx
, &state
,
306 struct tstream_cli_np_writev_state
);
310 state
->stream
= stream
;
314 talloc_set_destructor(state
, tstream_cli_np_writev_state_destructor
);
316 if (!cli_state_is_connected(cli_nps
->cli
)) {
317 tevent_req_error(req
, ENOTCONN
);
318 return tevent_req_post(req
, ev
);
322 * we make a copy of the vector so we can change the structure
324 state
->vector
= talloc_array(state
, struct iovec
, count
);
325 if (tevent_req_nomem(state
->vector
, req
)) {
326 return tevent_req_post(req
, ev
);
328 memcpy(state
->vector
, vector
, sizeof(struct iovec
) * count
);
329 state
->count
= count
;
331 tstream_cli_np_writev_write_next(req
);
332 if (!tevent_req_is_in_progress(req
)) {
333 return tevent_req_post(req
, ev
);
339 static void tstream_cli_np_readv_trans_start(struct tevent_req
*req
);
340 static void tstream_cli_np_writev_write_done(struct tevent_req
*subreq
);
342 static void tstream_cli_np_writev_write_next(struct tevent_req
*req
)
344 struct tstream_cli_np_writev_state
*state
=
346 struct tstream_cli_np_writev_state
);
347 struct tstream_cli_np
*cli_nps
=
348 tstream_context_data(state
->stream
,
349 struct tstream_cli_np
);
350 struct tevent_req
*subreq
;
352 cli_nps
->write
.ofs
= 0;
353 cli_nps
->write
.left
= TSTREAM_CLI_NP_BUF_SIZE
;
356 * copy the pending buffer first
358 while (cli_nps
->write
.left
> 0 && state
->count
> 0) {
359 uint8_t *base
= (uint8_t *)state
->vector
[0].iov_base
;
360 size_t len
= MIN(cli_nps
->write
.left
, state
->vector
[0].iov_len
);
362 memcpy(cli_nps
->write
.buf
+ cli_nps
->write
.ofs
, base
, len
);
365 state
->vector
[0].iov_base
= base
;
366 state
->vector
[0].iov_len
-= len
;
368 cli_nps
->write
.ofs
+= len
;
369 cli_nps
->write
.left
-= len
;
371 if (state
->vector
[0].iov_len
== 0) {
379 if (cli_nps
->write
.ofs
== 0) {
380 tevent_req_done(req
);
384 if (cli_nps
->trans
.active
&& state
->count
== 0) {
385 cli_nps
->trans
.active
= false;
386 cli_nps
->trans
.write_req
= req
;
390 if (cli_nps
->trans
.read_req
&& state
->count
== 0) {
391 cli_nps
->trans
.write_req
= req
;
392 tstream_cli_np_readv_trans_start(cli_nps
->trans
.read_req
);
396 subreq
= cli_write_andx_send(state
, state
->ev
, cli_nps
->cli
,
398 8, /* 8 means message mode. */
399 cli_nps
->write
.buf
, 0,
401 if (tevent_req_nomem(subreq
, req
)) {
404 tevent_req_set_callback(subreq
,
405 tstream_cli_np_writev_write_done
,
409 static void tstream_cli_np_writev_disconnect_now(struct tevent_req
*req
,
411 const char *location
);
413 static void tstream_cli_np_writev_write_done(struct tevent_req
*subreq
)
415 struct tevent_req
*req
=
416 tevent_req_callback_data(subreq
, struct tevent_req
);
417 struct tstream_cli_np_writev_state
*state
=
418 tevent_req_data(req
, struct tstream_cli_np_writev_state
);
419 struct tstream_cli_np
*cli_nps
=
420 tstream_context_data(state
->stream
,
421 struct tstream_cli_np
);
425 status
= cli_write_andx_recv(subreq
, &written
);
427 if (!NT_STATUS_IS_OK(status
)) {
428 tstream_cli_np_writev_disconnect_now(req
, EIO
, __location__
);
432 if (written
!= cli_nps
->write
.ofs
) {
433 tstream_cli_np_writev_disconnect_now(req
, EIO
, __location__
);
437 tstream_cli_np_writev_write_next(req
);
440 static void tstream_cli_np_writev_disconnect_done(struct tevent_req
*subreq
);
442 static void tstream_cli_np_writev_disconnect_now(struct tevent_req
*req
,
444 const char *location
)
446 struct tstream_cli_np_writev_state
*state
=
448 struct tstream_cli_np_writev_state
);
449 struct tstream_cli_np
*cli_nps
=
450 tstream_context_data(state
->stream
,
451 struct tstream_cli_np
);
452 struct tevent_req
*subreq
;
454 state
->error
.val
= error
;
455 state
->error
.location
= location
;
457 if (!cli_state_is_connected(cli_nps
->cli
)) {
458 /* return the original error */
459 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
463 subreq
= cli_close_send(state
, state
->ev
, cli_nps
->cli
, cli_nps
->fnum
);
464 if (subreq
== NULL
) {
465 /* return the original error */
466 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
469 tevent_req_set_callback(subreq
,
470 tstream_cli_np_writev_disconnect_done
,
474 static void tstream_cli_np_writev_disconnect_done(struct tevent_req
*subreq
)
476 struct tevent_req
*req
=
477 tevent_req_callback_data(subreq
, struct tevent_req
);
478 struct tstream_cli_np_writev_state
*state
=
479 tevent_req_data(req
, struct tstream_cli_np_writev_state
);
480 struct tstream_cli_np
*cli_nps
=
481 tstream_context_data(state
->stream
, struct tstream_cli_np
);
484 status
= cli_close_recv(subreq
);
489 /* return the original error */
490 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
493 static int tstream_cli_np_writev_recv(struct tevent_req
*req
,
496 struct tstream_cli_np_writev_state
*state
=
498 struct tstream_cli_np_writev_state
);
501 ret
= tsocket_simple_int_recv(req
, perrno
);
506 tevent_req_received(req
);
510 struct tstream_cli_np_readv_state
{
511 struct tstream_context
*stream
;
512 struct tevent_context
*ev
;
514 struct iovec
*vector
;
520 struct tevent_immediate
*im
;
525 const char *location
;
529 static int tstream_cli_np_readv_state_destructor(struct tstream_cli_np_readv_state
*state
)
531 struct tstream_cli_np
*cli_nps
=
532 tstream_context_data(state
->stream
,
533 struct tstream_cli_np
);
535 cli_nps
->trans
.read_req
= NULL
;
540 static void tstream_cli_np_readv_read_next(struct tevent_req
*req
);
542 static struct tevent_req
*tstream_cli_np_readv_send(TALLOC_CTX
*mem_ctx
,
543 struct tevent_context
*ev
,
544 struct tstream_context
*stream
,
545 struct iovec
*vector
,
548 struct tevent_req
*req
;
549 struct tstream_cli_np_readv_state
*state
;
550 struct tstream_cli_np
*cli_nps
=
551 tstream_context_data(stream
, struct tstream_cli_np
);
553 req
= tevent_req_create(mem_ctx
, &state
,
554 struct tstream_cli_np_readv_state
);
558 state
->stream
= stream
;
562 talloc_set_destructor(state
, tstream_cli_np_readv_state_destructor
);
564 if (!cli_state_is_connected(cli_nps
->cli
)) {
565 tevent_req_error(req
, ENOTCONN
);
566 return tevent_req_post(req
, ev
);
570 * we make a copy of the vector so we can change the structure
572 state
->vector
= talloc_array(state
, struct iovec
, count
);
573 if (tevent_req_nomem(state
->vector
, req
)) {
574 return tevent_req_post(req
, ev
);
576 memcpy(state
->vector
, vector
, sizeof(struct iovec
) * count
);
577 state
->count
= count
;
579 tstream_cli_np_readv_read_next(req
);
580 if (!tevent_req_is_in_progress(req
)) {
581 return tevent_req_post(req
, ev
);
587 static void tstream_cli_np_readv_read_done(struct tevent_req
*subreq
);
589 static void tstream_cli_np_readv_read_next(struct tevent_req
*req
)
591 struct tstream_cli_np_readv_state
*state
=
593 struct tstream_cli_np_readv_state
);
594 struct tstream_cli_np
*cli_nps
=
595 tstream_context_data(state
->stream
,
596 struct tstream_cli_np
);
597 struct tevent_req
*subreq
;
600 * copy the pending buffer first
602 while (cli_nps
->read
.left
> 0 && state
->count
> 0) {
603 uint8_t *base
= (uint8_t *)state
->vector
[0].iov_base
;
604 size_t len
= MIN(cli_nps
->read
.left
, state
->vector
[0].iov_len
);
606 memcpy(base
, cli_nps
->read
.buf
+ cli_nps
->read
.ofs
, len
);
609 state
->vector
[0].iov_base
= base
;
610 state
->vector
[0].iov_len
-= len
;
612 cli_nps
->read
.ofs
+= len
;
613 cli_nps
->read
.left
-= len
;
615 if (state
->vector
[0].iov_len
== 0) {
623 if (state
->count
== 0) {
624 tevent_req_done(req
);
628 if (cli_nps
->trans
.active
) {
629 cli_nps
->trans
.active
= false;
630 cli_nps
->trans
.read_req
= req
;
634 if (cli_nps
->trans
.write_req
) {
635 cli_nps
->trans
.read_req
= req
;
636 tstream_cli_np_readv_trans_start(req
);
640 subreq
= cli_read_andx_send(state
, state
->ev
, cli_nps
->cli
,
641 cli_nps
->fnum
, 0, TSTREAM_CLI_NP_BUF_SIZE
);
642 if (tevent_req_nomem(subreq
, req
)) {
645 tevent_req_set_callback(subreq
,
646 tstream_cli_np_readv_read_done
,
650 static void tstream_cli_np_readv_trans_done(struct tevent_req
*subreq
);
652 static void tstream_cli_np_readv_trans_start(struct tevent_req
*req
)
654 struct tstream_cli_np_readv_state
*state
=
656 struct tstream_cli_np_readv_state
);
657 struct tstream_cli_np
*cli_nps
=
658 tstream_context_data(state
->stream
,
659 struct tstream_cli_np
);
660 struct tevent_req
*subreq
;
662 state
->trans
.im
= tevent_create_immediate(state
);
663 if (tevent_req_nomem(state
->trans
.im
, req
)) {
667 subreq
= cli_trans_send(state
, state
->ev
,
672 cli_nps
->trans
.setup
, 2,
677 TSTREAM_CLI_NP_BUF_SIZE
);
678 if (tevent_req_nomem(subreq
, req
)) {
681 tevent_req_set_callback(subreq
,
682 tstream_cli_np_readv_trans_done
,
686 static void tstream_cli_np_readv_disconnect_now(struct tevent_req
*req
,
688 const char *location
);
689 static void tstream_cli_np_readv_trans_next(struct tevent_context
*ctx
,
690 struct tevent_immediate
*im
,
693 static void tstream_cli_np_readv_trans_done(struct tevent_req
*subreq
)
695 struct tevent_req
*req
=
696 tevent_req_callback_data(subreq
, struct tevent_req
);
697 struct tstream_cli_np_readv_state
*state
=
698 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
699 struct tstream_cli_np
*cli_nps
=
700 tstream_context_data(state
->stream
, struct tstream_cli_np
);
705 status
= cli_trans_recv(subreq
, state
, NULL
, NULL
, 0, NULL
,
707 &rcvbuf
, 0, &received
);
709 if (NT_STATUS_EQUAL(status
, NT_STATUS_BUFFER_TOO_SMALL
)) {
710 status
= NT_STATUS_OK
;
712 if (!NT_STATUS_IS_OK(status
)) {
713 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
717 if (received
> TSTREAM_CLI_NP_BUF_SIZE
) {
718 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
723 tstream_cli_np_readv_disconnect_now(req
, EPIPE
, __location__
);
727 cli_nps
->read
.ofs
= 0;
728 cli_nps
->read
.left
= received
;
729 memcpy(cli_nps
->read
.buf
, rcvbuf
, received
);
732 if (cli_nps
->trans
.write_req
== NULL
) {
733 tstream_cli_np_readv_read_next(req
);
737 tevent_schedule_immediate(state
->trans
.im
, state
->ev
,
738 tstream_cli_np_readv_trans_next
, req
);
740 tevent_req_done(cli_nps
->trans
.write_req
);
743 static void tstream_cli_np_readv_trans_next(struct tevent_context
*ctx
,
744 struct tevent_immediate
*im
,
747 struct tevent_req
*req
=
748 talloc_get_type_abort(private_data
,
751 tstream_cli_np_readv_read_next(req
);
754 static void tstream_cli_np_readv_read_done(struct tevent_req
*subreq
)
756 struct tevent_req
*req
=
757 tevent_req_callback_data(subreq
, struct tevent_req
);
758 struct tstream_cli_np_readv_state
*state
=
759 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
760 struct tstream_cli_np
*cli_nps
=
761 tstream_context_data(state
->stream
, struct tstream_cli_np
);
767 * We must free subreq in this function as there is
768 * a timer event attached to it.
771 status
= cli_read_andx_recv(subreq
, &received
, &rcvbuf
);
773 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
776 if (NT_STATUS_EQUAL(status
, NT_STATUS_BUFFER_TOO_SMALL
)) {
778 * NT_STATUS_BUFFER_TOO_SMALL means that there's
779 * more data to read when the named pipe is used
780 * in message mode (which is the case here).
782 * But we hide this from the caller.
784 status
= NT_STATUS_OK
;
786 if (!NT_STATUS_IS_OK(status
)) {
788 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
792 if (received
> TSTREAM_CLI_NP_BUF_SIZE
) {
794 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
800 tstream_cli_np_readv_disconnect_now(req
, EPIPE
, __location__
);
804 cli_nps
->read
.ofs
= 0;
805 cli_nps
->read
.left
= received
;
806 memcpy(cli_nps
->read
.buf
, rcvbuf
, received
);
809 tstream_cli_np_readv_read_next(req
);
812 static void tstream_cli_np_readv_disconnect_done(struct tevent_req
*subreq
);
814 static void tstream_cli_np_readv_error(struct tevent_req
*req
);
816 static void tstream_cli_np_readv_disconnect_now(struct tevent_req
*req
,
818 const char *location
)
820 struct tstream_cli_np_readv_state
*state
=
822 struct tstream_cli_np_readv_state
);
823 struct tstream_cli_np
*cli_nps
=
824 tstream_context_data(state
->stream
,
825 struct tstream_cli_np
);
826 struct tevent_req
*subreq
;
828 state
->error
.val
= error
;
829 state
->error
.location
= location
;
831 if (!cli_state_is_connected(cli_nps
->cli
)) {
832 /* return the original error */
833 tstream_cli_np_readv_error(req
);
837 subreq
= cli_close_send(state
, state
->ev
, cli_nps
->cli
, cli_nps
->fnum
);
838 if (subreq
== NULL
) {
839 /* return the original error */
840 tstream_cli_np_readv_error(req
);
843 tevent_req_set_callback(subreq
,
844 tstream_cli_np_readv_disconnect_done
,
848 static void tstream_cli_np_readv_disconnect_done(struct tevent_req
*subreq
)
850 struct tevent_req
*req
=
851 tevent_req_callback_data(subreq
, struct tevent_req
);
852 struct tstream_cli_np_readv_state
*state
=
853 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
854 struct tstream_cli_np
*cli_nps
=
855 tstream_context_data(state
->stream
, struct tstream_cli_np
);
858 status
= cli_close_recv(subreq
);
863 tstream_cli_np_readv_error(req
);
866 static void tstream_cli_np_readv_error_trigger(struct tevent_context
*ctx
,
867 struct tevent_immediate
*im
,
870 static void tstream_cli_np_readv_error(struct tevent_req
*req
)
872 struct tstream_cli_np_readv_state
*state
=
874 struct tstream_cli_np_readv_state
);
875 struct tstream_cli_np
*cli_nps
=
876 tstream_context_data(state
->stream
,
877 struct tstream_cli_np
);
879 if (cli_nps
->trans
.write_req
== NULL
) {
880 /* return the original error */
881 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
885 if (state
->trans
.im
== NULL
) {
886 /* return the original error */
887 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
891 tevent_schedule_immediate(state
->trans
.im
, state
->ev
,
892 tstream_cli_np_readv_error_trigger
, req
);
894 /* return the original error for writev */
895 _tevent_req_error(cli_nps
->trans
.write_req
,
896 state
->error
.val
, state
->error
.location
);
899 static void tstream_cli_np_readv_error_trigger(struct tevent_context
*ctx
,
900 struct tevent_immediate
*im
,
903 struct tevent_req
*req
=
904 talloc_get_type_abort(private_data
,
906 struct tstream_cli_np_readv_state
*state
=
908 struct tstream_cli_np_readv_state
);
910 /* return the original error */
911 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
914 static int tstream_cli_np_readv_recv(struct tevent_req
*req
,
917 struct tstream_cli_np_readv_state
*state
=
918 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
921 ret
= tsocket_simple_int_recv(req
, perrno
);
926 tevent_req_received(req
);
930 struct tstream_cli_np_disconnect_state
{
931 struct tstream_context
*stream
;
934 static void tstream_cli_np_disconnect_done(struct tevent_req
*subreq
);
936 static struct tevent_req
*tstream_cli_np_disconnect_send(TALLOC_CTX
*mem_ctx
,
937 struct tevent_context
*ev
,
938 struct tstream_context
*stream
)
940 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
941 struct tstream_cli_np
);
942 struct tevent_req
*req
;
943 struct tstream_cli_np_disconnect_state
*state
;
944 struct tevent_req
*subreq
;
946 req
= tevent_req_create(mem_ctx
, &state
,
947 struct tstream_cli_np_disconnect_state
);
952 state
->stream
= stream
;
954 if (!cli_state_is_connected(cli_nps
->cli
)) {
955 tevent_req_error(req
, ENOTCONN
);
956 return tevent_req_post(req
, ev
);
959 subreq
= cli_close_send(state
, ev
, cli_nps
->cli
, cli_nps
->fnum
);
960 if (tevent_req_nomem(subreq
, req
)) {
961 return tevent_req_post(req
, ev
);
963 tevent_req_set_callback(subreq
, tstream_cli_np_disconnect_done
, req
);
968 static void tstream_cli_np_disconnect_done(struct tevent_req
*subreq
)
970 struct tevent_req
*req
= tevent_req_callback_data(subreq
,
972 struct tstream_cli_np_disconnect_state
*state
=
973 tevent_req_data(req
, struct tstream_cli_np_disconnect_state
);
974 struct tstream_cli_np
*cli_nps
=
975 tstream_context_data(state
->stream
, struct tstream_cli_np
);
978 status
= cli_close_recv(subreq
);
980 if (!NT_STATUS_IS_OK(status
)) {
981 tevent_req_error(req
, EIO
);
987 tevent_req_done(req
);
990 static int tstream_cli_np_disconnect_recv(struct tevent_req
*req
,
995 ret
= tsocket_simple_int_recv(req
, perrno
);
997 tevent_req_received(req
);
1001 static const struct tstream_context_ops tstream_cli_np_ops
= {
1004 .pending_bytes
= tstream_cli_np_pending_bytes
,
1006 .readv_send
= tstream_cli_np_readv_send
,
1007 .readv_recv
= tstream_cli_np_readv_recv
,
1009 .writev_send
= tstream_cli_np_writev_send
,
1010 .writev_recv
= tstream_cli_np_writev_recv
,
1012 .disconnect_send
= tstream_cli_np_disconnect_send
,
1013 .disconnect_recv
= tstream_cli_np_disconnect_recv
,
1016 NTSTATUS
_tstream_cli_np_existing(TALLOC_CTX
*mem_ctx
,
1017 struct cli_state
*cli
,
1019 struct tstream_context
**_stream
,
1020 const char *location
)
1022 struct tstream_context
*stream
;
1023 struct tstream_cli_np
*cli_nps
;
1025 stream
= tstream_context_create(mem_ctx
,
1026 &tstream_cli_np_ops
,
1028 struct tstream_cli_np
,
1031 return NT_STATUS_NO_MEMORY
;
1033 ZERO_STRUCTP(cli_nps
);
1036 cli_nps
->fnum
= fnum
;
1039 return NT_STATUS_OK
;