2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2010
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "system/network.h"
22 #include "libsmb/libsmb.h"
23 #include "../lib/util/tevent_ntstatus.h"
24 #include "../lib/tsocket/tsocket.h"
25 #include "../lib/tsocket/tsocket_internal.h"
26 #include "cli_np_tstream.h"
28 static const struct tstream_context_ops tstream_cli_np_ops
;
31 * Windows uses 4280 (the max xmit/recv size negotiated on DCERPC).
32 * This is fits into the max_xmit negotiated at the SMB layer.
34 * On the sending side they may use SMBtranss if the request does not
35 * fit into a single SMBtrans call.
37 * Windows uses 1024 as max data size of a SMBtrans request and then
38 * possibly reads the rest of the DCERPC fragment (up to 3256 bytes)
41 * For now we just ask for the full 4280 bytes (max data size) in the SMBtrans
42 * request to get the whole fragment at once (like samba 3.5.x and below did.
44 * It is important that we use do SMBwriteX with the size of a full fragment,
45 * otherwise we may get NT_STATUS_PIPE_BUSY on the SMBtrans request
46 * from NT4 servers. (See bug #8195)
48 #define TSTREAM_CLI_NP_MAX_BUF_SIZE 4280
50 struct tstream_cli_np
{
51 struct cli_state
*cli
;
54 unsigned int default_timeout
;
58 struct tevent_req
*read_req
;
59 struct tevent_req
*write_req
;
70 static int tstream_cli_np_destructor(struct tstream_cli_np
*cli_nps
)
74 if (!cli_state_is_connected(cli_nps
->cli
)) {
79 * TODO: do not use a sync call with a destructor!!!
81 * This only happens, if a caller does talloc_free(),
82 * while the everything was still ok.
84 * If we get an unexpected failure within a normal
85 * operation, we already do an async cli_close_send()/_recv().
87 * Once we've fixed all callers to call
88 * tstream_disconnect_send()/_recv(), this will
91 status
= cli_close(cli_nps
->cli
, cli_nps
->fnum
);
92 if (!NT_STATUS_IS_OK(status
)) {
93 DEBUG(1, ("tstream_cli_np_destructor: cli_close "
94 "failed on pipe %s. Error was %s\n",
95 cli_nps
->npipe
, nt_errstr(status
)));
98 * We can't do much on failure
103 struct tstream_cli_np_open_state
{
104 struct cli_state
*cli
;
109 static void tstream_cli_np_open_done(struct tevent_req
*subreq
);
111 struct tevent_req
*tstream_cli_np_open_send(TALLOC_CTX
*mem_ctx
,
112 struct tevent_context
*ev
,
113 struct cli_state
*cli
,
116 struct tevent_req
*req
;
117 struct tstream_cli_np_open_state
*state
;
118 struct tevent_req
*subreq
;
120 req
= tevent_req_create(mem_ctx
, &state
,
121 struct tstream_cli_np_open_state
);
127 state
->npipe
= talloc_strdup(state
, npipe
);
128 if (tevent_req_nomem(state
->npipe
, req
)) {
129 return tevent_req_post(req
, ev
);
132 subreq
= cli_ntcreate_send(state
, ev
, cli
,
137 FILE_SHARE_READ
|FILE_SHARE_WRITE
,
141 if (tevent_req_nomem(subreq
, req
)) {
142 return tevent_req_post(req
, ev
);
144 tevent_req_set_callback(subreq
, tstream_cli_np_open_done
, req
);
149 static void tstream_cli_np_open_done(struct tevent_req
*subreq
)
151 struct tevent_req
*req
=
152 tevent_req_callback_data(subreq
, struct tevent_req
);
153 struct tstream_cli_np_open_state
*state
=
154 tevent_req_data(req
, struct tstream_cli_np_open_state
);
157 status
= cli_ntcreate_recv(subreq
, &state
->fnum
);
159 if (!NT_STATUS_IS_OK(status
)) {
160 tevent_req_nterror(req
, status
);
164 tevent_req_done(req
);
167 NTSTATUS
_tstream_cli_np_open_recv(struct tevent_req
*req
,
169 struct tstream_context
**_stream
,
170 const char *location
)
172 struct tstream_cli_np_open_state
*state
=
173 tevent_req_data(req
, struct tstream_cli_np_open_state
);
174 struct tstream_context
*stream
;
175 struct tstream_cli_np
*cli_nps
;
178 if (tevent_req_is_nterror(req
, &status
)) {
179 tevent_req_received(req
);
183 stream
= tstream_context_create(mem_ctx
,
186 struct tstream_cli_np
,
189 tevent_req_received(req
);
190 return NT_STATUS_NO_MEMORY
;
192 ZERO_STRUCTP(cli_nps
);
194 cli_nps
->cli
= state
->cli
;
195 cli_nps
->npipe
= talloc_move(cli_nps
, &state
->npipe
);
196 cli_nps
->fnum
= state
->fnum
;
197 cli_nps
->default_timeout
= cli_set_timeout(state
->cli
, 0);
198 cli_set_timeout(state
->cli
, cli_nps
->default_timeout
);
200 talloc_set_destructor(cli_nps
, tstream_cli_np_destructor
);
202 cli_nps
->trans
.active
= false;
203 cli_nps
->trans
.read_req
= NULL
;
204 cli_nps
->trans
.write_req
= NULL
;
205 SSVAL(cli_nps
->trans
.setup
+0, 0, TRANSACT_DCERPCCMD
);
206 SSVAL(cli_nps
->trans
.setup
+1, 0, cli_nps
->fnum
);
209 tevent_req_received(req
);
213 static ssize_t
tstream_cli_np_pending_bytes(struct tstream_context
*stream
)
215 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
216 struct tstream_cli_np
);
218 if (!cli_state_is_connected(cli_nps
->cli
)) {
223 return cli_nps
->read
.left
;
226 bool tstream_is_cli_np(struct tstream_context
*stream
)
228 struct tstream_cli_np
*cli_nps
=
229 talloc_get_type(_tstream_context_data(stream
),
230 struct tstream_cli_np
);
239 NTSTATUS
tstream_cli_np_use_trans(struct tstream_context
*stream
)
241 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
242 struct tstream_cli_np
);
244 if (cli_nps
->trans
.read_req
) {
245 return NT_STATUS_PIPE_BUSY
;
248 if (cli_nps
->trans
.write_req
) {
249 return NT_STATUS_PIPE_BUSY
;
252 if (cli_nps
->trans
.active
) {
253 return NT_STATUS_PIPE_BUSY
;
256 cli_nps
->trans
.active
= true;
261 unsigned int tstream_cli_np_set_timeout(struct tstream_context
*stream
,
262 unsigned int timeout
)
264 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
265 struct tstream_cli_np
);
267 if (!cli_state_is_connected(cli_nps
->cli
)) {
268 return cli_nps
->default_timeout
;
271 return cli_set_timeout(cli_nps
->cli
, timeout
);
274 struct cli_state
*tstream_cli_np_get_cli_state(struct tstream_context
*stream
)
276 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
277 struct tstream_cli_np
);
282 struct tstream_cli_np_writev_state
{
283 struct tstream_context
*stream
;
284 struct tevent_context
*ev
;
286 struct iovec
*vector
;
293 const char *location
;
297 static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state
*state
)
299 struct tstream_cli_np
*cli_nps
=
300 tstream_context_data(state
->stream
,
301 struct tstream_cli_np
);
303 cli_nps
->trans
.write_req
= NULL
;
308 static void tstream_cli_np_writev_write_next(struct tevent_req
*req
);
310 static struct tevent_req
*tstream_cli_np_writev_send(TALLOC_CTX
*mem_ctx
,
311 struct tevent_context
*ev
,
312 struct tstream_context
*stream
,
313 const struct iovec
*vector
,
316 struct tevent_req
*req
;
317 struct tstream_cli_np_writev_state
*state
;
318 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
319 struct tstream_cli_np
);
321 req
= tevent_req_create(mem_ctx
, &state
,
322 struct tstream_cli_np_writev_state
);
326 state
->stream
= stream
;
330 talloc_set_destructor(state
, tstream_cli_np_writev_state_destructor
);
332 if (!cli_state_is_connected(cli_nps
->cli
)) {
333 tevent_req_error(req
, ENOTCONN
);
334 return tevent_req_post(req
, ev
);
338 * we make a copy of the vector so we can change the structure
340 state
->vector
= talloc_array(state
, struct iovec
, count
);
341 if (tevent_req_nomem(state
->vector
, req
)) {
342 return tevent_req_post(req
, ev
);
344 memcpy(state
->vector
, vector
, sizeof(struct iovec
) * count
);
345 state
->count
= count
;
347 tstream_cli_np_writev_write_next(req
);
348 if (!tevent_req_is_in_progress(req
)) {
349 return tevent_req_post(req
, ev
);
355 static void tstream_cli_np_readv_trans_start(struct tevent_req
*req
);
356 static void tstream_cli_np_writev_write_done(struct tevent_req
*subreq
);
358 static void tstream_cli_np_writev_write_next(struct tevent_req
*req
)
360 struct tstream_cli_np_writev_state
*state
=
362 struct tstream_cli_np_writev_state
);
363 struct tstream_cli_np
*cli_nps
=
364 tstream_context_data(state
->stream
,
365 struct tstream_cli_np
);
366 struct tevent_req
*subreq
;
370 for (i
=0; i
< state
->count
; i
++) {
371 left
+= state
->vector
[i
].iov_len
;
375 TALLOC_FREE(cli_nps
->write
.buf
);
376 tevent_req_done(req
);
380 cli_nps
->write
.ofs
= 0;
381 cli_nps
->write
.left
= MIN(left
, TSTREAM_CLI_NP_MAX_BUF_SIZE
);
382 cli_nps
->write
.buf
= talloc_realloc(cli_nps
, cli_nps
->write
.buf
,
383 uint8_t, cli_nps
->write
.left
);
384 if (tevent_req_nomem(cli_nps
->write
.buf
, req
)) {
389 * copy the pending buffer first
391 while (cli_nps
->write
.left
> 0 && state
->count
> 0) {
392 uint8_t *base
= (uint8_t *)state
->vector
[0].iov_base
;
393 size_t len
= MIN(cli_nps
->write
.left
, state
->vector
[0].iov_len
);
395 memcpy(cli_nps
->write
.buf
+ cli_nps
->write
.ofs
, base
, len
);
398 state
->vector
[0].iov_base
= base
;
399 state
->vector
[0].iov_len
-= len
;
401 cli_nps
->write
.ofs
+= len
;
402 cli_nps
->write
.left
-= len
;
404 if (state
->vector
[0].iov_len
== 0) {
412 if (cli_nps
->trans
.active
&& state
->count
== 0) {
413 cli_nps
->trans
.active
= false;
414 cli_nps
->trans
.write_req
= req
;
418 if (cli_nps
->trans
.read_req
&& state
->count
== 0) {
419 cli_nps
->trans
.write_req
= req
;
420 tstream_cli_np_readv_trans_start(cli_nps
->trans
.read_req
);
424 subreq
= cli_write_andx_send(state
, state
->ev
, cli_nps
->cli
,
426 8, /* 8 means message mode. */
427 cli_nps
->write
.buf
, 0,
429 if (tevent_req_nomem(subreq
, req
)) {
432 tevent_req_set_callback(subreq
,
433 tstream_cli_np_writev_write_done
,
437 static void tstream_cli_np_writev_disconnect_now(struct tevent_req
*req
,
439 const char *location
);
441 static void tstream_cli_np_writev_write_done(struct tevent_req
*subreq
)
443 struct tevent_req
*req
=
444 tevent_req_callback_data(subreq
, struct tevent_req
);
445 struct tstream_cli_np_writev_state
*state
=
446 tevent_req_data(req
, struct tstream_cli_np_writev_state
);
447 struct tstream_cli_np
*cli_nps
=
448 tstream_context_data(state
->stream
,
449 struct tstream_cli_np
);
453 status
= cli_write_andx_recv(subreq
, &written
);
455 if (!NT_STATUS_IS_OK(status
)) {
456 tstream_cli_np_writev_disconnect_now(req
, EIO
, __location__
);
460 if (written
!= cli_nps
->write
.ofs
) {
461 tstream_cli_np_writev_disconnect_now(req
, EIO
, __location__
);
465 tstream_cli_np_writev_write_next(req
);
468 static void tstream_cli_np_writev_disconnect_done(struct tevent_req
*subreq
);
470 static void tstream_cli_np_writev_disconnect_now(struct tevent_req
*req
,
472 const char *location
)
474 struct tstream_cli_np_writev_state
*state
=
476 struct tstream_cli_np_writev_state
);
477 struct tstream_cli_np
*cli_nps
=
478 tstream_context_data(state
->stream
,
479 struct tstream_cli_np
);
480 struct tevent_req
*subreq
;
482 state
->error
.val
= error
;
483 state
->error
.location
= location
;
485 if (!cli_state_is_connected(cli_nps
->cli
)) {
486 /* return the original error */
487 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
491 subreq
= cli_close_send(state
, state
->ev
, cli_nps
->cli
, cli_nps
->fnum
);
492 if (subreq
== NULL
) {
493 /* return the original error */
494 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
497 tevent_req_set_callback(subreq
,
498 tstream_cli_np_writev_disconnect_done
,
502 static void tstream_cli_np_writev_disconnect_done(struct tevent_req
*subreq
)
504 struct tevent_req
*req
=
505 tevent_req_callback_data(subreq
, struct tevent_req
);
506 struct tstream_cli_np_writev_state
*state
=
507 tevent_req_data(req
, struct tstream_cli_np_writev_state
);
508 struct tstream_cli_np
*cli_nps
=
509 tstream_context_data(state
->stream
, struct tstream_cli_np
);
511 cli_close_recv(subreq
);
516 /* return the original error */
517 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
520 static int tstream_cli_np_writev_recv(struct tevent_req
*req
,
523 struct tstream_cli_np_writev_state
*state
=
525 struct tstream_cli_np_writev_state
);
528 ret
= tsocket_simple_int_recv(req
, perrno
);
533 tevent_req_received(req
);
537 struct tstream_cli_np_readv_state
{
538 struct tstream_context
*stream
;
539 struct tevent_context
*ev
;
541 struct iovec
*vector
;
547 struct tevent_immediate
*im
;
552 const char *location
;
556 static int tstream_cli_np_readv_state_destructor(struct tstream_cli_np_readv_state
*state
)
558 struct tstream_cli_np
*cli_nps
=
559 tstream_context_data(state
->stream
,
560 struct tstream_cli_np
);
562 cli_nps
->trans
.read_req
= NULL
;
567 static void tstream_cli_np_readv_read_next(struct tevent_req
*req
);
569 static struct tevent_req
*tstream_cli_np_readv_send(TALLOC_CTX
*mem_ctx
,
570 struct tevent_context
*ev
,
571 struct tstream_context
*stream
,
572 struct iovec
*vector
,
575 struct tevent_req
*req
;
576 struct tstream_cli_np_readv_state
*state
;
577 struct tstream_cli_np
*cli_nps
=
578 tstream_context_data(stream
, struct tstream_cli_np
);
580 req
= tevent_req_create(mem_ctx
, &state
,
581 struct tstream_cli_np_readv_state
);
585 state
->stream
= stream
;
589 talloc_set_destructor(state
, tstream_cli_np_readv_state_destructor
);
591 if (!cli_state_is_connected(cli_nps
->cli
)) {
592 tevent_req_error(req
, ENOTCONN
);
593 return tevent_req_post(req
, ev
);
597 * we make a copy of the vector so we can change the structure
599 state
->vector
= talloc_array(state
, struct iovec
, count
);
600 if (tevent_req_nomem(state
->vector
, req
)) {
601 return tevent_req_post(req
, ev
);
603 memcpy(state
->vector
, vector
, sizeof(struct iovec
) * count
);
604 state
->count
= count
;
606 tstream_cli_np_readv_read_next(req
);
607 if (!tevent_req_is_in_progress(req
)) {
608 return tevent_req_post(req
, ev
);
614 static void tstream_cli_np_readv_read_done(struct tevent_req
*subreq
);
616 static void tstream_cli_np_readv_read_next(struct tevent_req
*req
)
618 struct tstream_cli_np_readv_state
*state
=
620 struct tstream_cli_np_readv_state
);
621 struct tstream_cli_np
*cli_nps
=
622 tstream_context_data(state
->stream
,
623 struct tstream_cli_np
);
624 struct tevent_req
*subreq
;
627 * copy the pending buffer first
629 while (cli_nps
->read
.left
> 0 && state
->count
> 0) {
630 uint8_t *base
= (uint8_t *)state
->vector
[0].iov_base
;
631 size_t len
= MIN(cli_nps
->read
.left
, state
->vector
[0].iov_len
);
633 memcpy(base
, cli_nps
->read
.buf
+ cli_nps
->read
.ofs
, len
);
636 state
->vector
[0].iov_base
= base
;
637 state
->vector
[0].iov_len
-= len
;
639 cli_nps
->read
.ofs
+= len
;
640 cli_nps
->read
.left
-= len
;
642 if (state
->vector
[0].iov_len
== 0) {
650 if (cli_nps
->read
.left
== 0) {
651 TALLOC_FREE(cli_nps
->read
.buf
);
654 if (state
->count
== 0) {
655 tevent_req_done(req
);
659 if (cli_nps
->trans
.active
) {
660 cli_nps
->trans
.active
= false;
661 cli_nps
->trans
.read_req
= req
;
665 if (cli_nps
->trans
.write_req
) {
666 cli_nps
->trans
.read_req
= req
;
667 tstream_cli_np_readv_trans_start(req
);
671 subreq
= cli_read_andx_send(state
, state
->ev
, cli_nps
->cli
,
672 cli_nps
->fnum
, 0, TSTREAM_CLI_NP_MAX_BUF_SIZE
);
673 if (tevent_req_nomem(subreq
, req
)) {
676 tevent_req_set_callback(subreq
,
677 tstream_cli_np_readv_read_done
,
681 static void tstream_cli_np_readv_trans_done(struct tevent_req
*subreq
);
683 static void tstream_cli_np_readv_trans_start(struct tevent_req
*req
)
685 struct tstream_cli_np_readv_state
*state
=
687 struct tstream_cli_np_readv_state
);
688 struct tstream_cli_np
*cli_nps
=
689 tstream_context_data(state
->stream
,
690 struct tstream_cli_np
);
691 struct tevent_req
*subreq
;
693 state
->trans
.im
= tevent_create_immediate(state
);
694 if (tevent_req_nomem(state
->trans
.im
, req
)) {
698 subreq
= cli_trans_send(state
, state
->ev
,
703 cli_nps
->trans
.setup
, 2,
708 TSTREAM_CLI_NP_MAX_BUF_SIZE
);
709 if (tevent_req_nomem(subreq
, req
)) {
712 tevent_req_set_callback(subreq
,
713 tstream_cli_np_readv_trans_done
,
717 static void tstream_cli_np_readv_disconnect_now(struct tevent_req
*req
,
719 const char *location
);
720 static void tstream_cli_np_readv_trans_next(struct tevent_context
*ctx
,
721 struct tevent_immediate
*im
,
724 static void tstream_cli_np_readv_trans_done(struct tevent_req
*subreq
)
726 struct tevent_req
*req
=
727 tevent_req_callback_data(subreq
, struct tevent_req
);
728 struct tstream_cli_np_readv_state
*state
=
729 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
730 struct tstream_cli_np
*cli_nps
=
731 tstream_context_data(state
->stream
, struct tstream_cli_np
);
736 status
= cli_trans_recv(subreq
, state
, NULL
, NULL
, 0, NULL
,
738 &rcvbuf
, 0, &received
);
740 if (NT_STATUS_EQUAL(status
, NT_STATUS_BUFFER_TOO_SMALL
)) {
741 status
= NT_STATUS_OK
;
743 if (!NT_STATUS_IS_OK(status
)) {
744 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
748 if (received
> TSTREAM_CLI_NP_MAX_BUF_SIZE
) {
749 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
754 tstream_cli_np_readv_disconnect_now(req
, EPIPE
, __location__
);
758 cli_nps
->read
.ofs
= 0;
759 cli_nps
->read
.left
= received
;
760 cli_nps
->read
.buf
= talloc_move(cli_nps
, &rcvbuf
);
762 if (cli_nps
->trans
.write_req
== NULL
) {
763 tstream_cli_np_readv_read_next(req
);
767 tevent_schedule_immediate(state
->trans
.im
, state
->ev
,
768 tstream_cli_np_readv_trans_next
, req
);
770 tevent_req_done(cli_nps
->trans
.write_req
);
773 static void tstream_cli_np_readv_trans_next(struct tevent_context
*ctx
,
774 struct tevent_immediate
*im
,
777 struct tevent_req
*req
=
778 talloc_get_type_abort(private_data
,
781 tstream_cli_np_readv_read_next(req
);
784 static void tstream_cli_np_readv_read_done(struct tevent_req
*subreq
)
786 struct tevent_req
*req
=
787 tevent_req_callback_data(subreq
, struct tevent_req
);
788 struct tstream_cli_np_readv_state
*state
=
789 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
790 struct tstream_cli_np
*cli_nps
=
791 tstream_context_data(state
->stream
, struct tstream_cli_np
);
797 * We must free subreq in this function as there is
798 * a timer event attached to it.
801 status
= cli_read_andx_recv(subreq
, &received
, &rcvbuf
);
803 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
806 if (NT_STATUS_EQUAL(status
, NT_STATUS_BUFFER_TOO_SMALL
)) {
808 * NT_STATUS_BUFFER_TOO_SMALL means that there's
809 * more data to read when the named pipe is used
810 * in message mode (which is the case here).
812 * But we hide this from the caller.
814 status
= NT_STATUS_OK
;
816 if (!NT_STATUS_IS_OK(status
)) {
818 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
822 if (received
> TSTREAM_CLI_NP_MAX_BUF_SIZE
) {
824 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
830 tstream_cli_np_readv_disconnect_now(req
, EPIPE
, __location__
);
834 cli_nps
->read
.ofs
= 0;
835 cli_nps
->read
.left
= received
;
836 cli_nps
->read
.buf
= talloc_array(cli_nps
, uint8_t, received
);
837 if (cli_nps
->read
.buf
== NULL
) {
839 tevent_req_nomem(cli_nps
->read
.buf
, req
);
842 memcpy(cli_nps
->read
.buf
, rcvbuf
, received
);
845 tstream_cli_np_readv_read_next(req
);
848 static void tstream_cli_np_readv_disconnect_done(struct tevent_req
*subreq
);
850 static void tstream_cli_np_readv_error(struct tevent_req
*req
);
852 static void tstream_cli_np_readv_disconnect_now(struct tevent_req
*req
,
854 const char *location
)
856 struct tstream_cli_np_readv_state
*state
=
858 struct tstream_cli_np_readv_state
);
859 struct tstream_cli_np
*cli_nps
=
860 tstream_context_data(state
->stream
,
861 struct tstream_cli_np
);
862 struct tevent_req
*subreq
;
864 state
->error
.val
= error
;
865 state
->error
.location
= location
;
867 if (!cli_state_is_connected(cli_nps
->cli
)) {
868 /* return the original error */
869 tstream_cli_np_readv_error(req
);
873 subreq
= cli_close_send(state
, state
->ev
, cli_nps
->cli
, cli_nps
->fnum
);
874 if (subreq
== NULL
) {
875 /* return the original error */
876 tstream_cli_np_readv_error(req
);
879 tevent_req_set_callback(subreq
,
880 tstream_cli_np_readv_disconnect_done
,
884 static void tstream_cli_np_readv_disconnect_done(struct tevent_req
*subreq
)
886 struct tevent_req
*req
=
887 tevent_req_callback_data(subreq
, struct tevent_req
);
888 struct tstream_cli_np_readv_state
*state
=
889 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
890 struct tstream_cli_np
*cli_nps
=
891 tstream_context_data(state
->stream
, struct tstream_cli_np
);
893 cli_close_recv(subreq
);
898 tstream_cli_np_readv_error(req
);
901 static void tstream_cli_np_readv_error_trigger(struct tevent_context
*ctx
,
902 struct tevent_immediate
*im
,
905 static void tstream_cli_np_readv_error(struct tevent_req
*req
)
907 struct tstream_cli_np_readv_state
*state
=
909 struct tstream_cli_np_readv_state
);
910 struct tstream_cli_np
*cli_nps
=
911 tstream_context_data(state
->stream
,
912 struct tstream_cli_np
);
914 if (cli_nps
->trans
.write_req
== NULL
) {
915 /* return the original error */
916 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
920 if (state
->trans
.im
== NULL
) {
921 /* return the original error */
922 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
926 tevent_schedule_immediate(state
->trans
.im
, state
->ev
,
927 tstream_cli_np_readv_error_trigger
, req
);
929 /* return the original error for writev */
930 _tevent_req_error(cli_nps
->trans
.write_req
,
931 state
->error
.val
, state
->error
.location
);
934 static void tstream_cli_np_readv_error_trigger(struct tevent_context
*ctx
,
935 struct tevent_immediate
*im
,
938 struct tevent_req
*req
=
939 talloc_get_type_abort(private_data
,
941 struct tstream_cli_np_readv_state
*state
=
943 struct tstream_cli_np_readv_state
);
945 /* return the original error */
946 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
949 static int tstream_cli_np_readv_recv(struct tevent_req
*req
,
952 struct tstream_cli_np_readv_state
*state
=
953 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
956 ret
= tsocket_simple_int_recv(req
, perrno
);
961 tevent_req_received(req
);
965 struct tstream_cli_np_disconnect_state
{
966 struct tstream_context
*stream
;
969 static void tstream_cli_np_disconnect_done(struct tevent_req
*subreq
);
971 static struct tevent_req
*tstream_cli_np_disconnect_send(TALLOC_CTX
*mem_ctx
,
972 struct tevent_context
*ev
,
973 struct tstream_context
*stream
)
975 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
976 struct tstream_cli_np
);
977 struct tevent_req
*req
;
978 struct tstream_cli_np_disconnect_state
*state
;
979 struct tevent_req
*subreq
;
981 req
= tevent_req_create(mem_ctx
, &state
,
982 struct tstream_cli_np_disconnect_state
);
987 state
->stream
= stream
;
989 if (!cli_state_is_connected(cli_nps
->cli
)) {
990 tevent_req_error(req
, ENOTCONN
);
991 return tevent_req_post(req
, ev
);
994 subreq
= cli_close_send(state
, ev
, cli_nps
->cli
, cli_nps
->fnum
);
995 if (tevent_req_nomem(subreq
, req
)) {
996 return tevent_req_post(req
, ev
);
998 tevent_req_set_callback(subreq
, tstream_cli_np_disconnect_done
, req
);
1003 static void tstream_cli_np_disconnect_done(struct tevent_req
*subreq
)
1005 struct tevent_req
*req
= tevent_req_callback_data(subreq
,
1007 struct tstream_cli_np_disconnect_state
*state
=
1008 tevent_req_data(req
, struct tstream_cli_np_disconnect_state
);
1009 struct tstream_cli_np
*cli_nps
=
1010 tstream_context_data(state
->stream
, struct tstream_cli_np
);
1013 status
= cli_close_recv(subreq
);
1014 TALLOC_FREE(subreq
);
1015 if (!NT_STATUS_IS_OK(status
)) {
1016 tevent_req_error(req
, EIO
);
1020 cli_nps
->cli
= NULL
;
1022 tevent_req_done(req
);
1025 static int tstream_cli_np_disconnect_recv(struct tevent_req
*req
,
1030 ret
= tsocket_simple_int_recv(req
, perrno
);
1032 tevent_req_received(req
);
1036 static const struct tstream_context_ops tstream_cli_np_ops
= {
1039 .pending_bytes
= tstream_cli_np_pending_bytes
,
1041 .readv_send
= tstream_cli_np_readv_send
,
1042 .readv_recv
= tstream_cli_np_readv_recv
,
1044 .writev_send
= tstream_cli_np_writev_send
,
1045 .writev_recv
= tstream_cli_np_writev_recv
,
1047 .disconnect_send
= tstream_cli_np_disconnect_send
,
1048 .disconnect_recv
= tstream_cli_np_disconnect_recv
,
1051 NTSTATUS
_tstream_cli_np_existing(TALLOC_CTX
*mem_ctx
,
1052 struct cli_state
*cli
,
1054 struct tstream_context
**_stream
,
1055 const char *location
)
1057 struct tstream_context
*stream
;
1058 struct tstream_cli_np
*cli_nps
;
1060 stream
= tstream_context_create(mem_ctx
,
1061 &tstream_cli_np_ops
,
1063 struct tstream_cli_np
,
1066 return NT_STATUS_NO_MEMORY
;
1068 ZERO_STRUCTP(cli_nps
);
1071 cli_nps
->fnum
= fnum
;
1074 return NT_STATUS_OK
;