2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2010
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "system/network.h"
22 #include "libsmb/libsmb.h"
23 #include "libsmb/smb2cli.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/tsocket/tsocket.h"
26 #include "../lib/tsocket/tsocket_internal.h"
27 #include "cli_np_tstream.h"
29 static const struct tstream_context_ops tstream_cli_np_ops
;
32 * Windows uses 4280 (the max xmit/recv size negotiated on DCERPC).
33 * This is fits into the max_xmit negotiated at the SMB layer.
35 * On the sending side they may use SMBtranss if the request does not
36 * fit into a single SMBtrans call.
38 * Windows uses 1024 as max data size of a SMBtrans request and then
39 * possibly reads the rest of the DCERPC fragment (up to 3256 bytes)
42 * For now we just ask for the full 4280 bytes (max data size) in the SMBtrans
43 * request to get the whole fragment at once (like samba 3.5.x and below did.
45 * It is important that we use do SMBwriteX with the size of a full fragment,
46 * otherwise we may get NT_STATUS_PIPE_BUSY on the SMBtrans request
47 * from NT4 servers. (See bug #8195)
49 #define TSTREAM_CLI_NP_MAX_BUF_SIZE 4280
51 struct tstream_cli_np
{
52 struct cli_state
*cli
;
56 uint64_t fid_persistent
;
57 uint64_t fid_volatile
;
58 unsigned int default_timeout
;
62 struct tevent_req
*read_req
;
63 struct tevent_req
*write_req
;
74 static int tstream_cli_np_destructor(struct tstream_cli_np
*cli_nps
)
78 if (!cli_state_is_connected(cli_nps
->cli
)) {
83 * TODO: do not use a sync call with a destructor!!!
85 * This only happens, if a caller does talloc_free(),
86 * while the everything was still ok.
88 * If we get an unexpected failure within a normal
89 * operation, we already do an async cli_close_send()/_recv().
91 * Once we've fixed all callers to call
92 * tstream_disconnect_send()/_recv(), this will
95 if (cli_nps
->is_smb1
) {
96 status
= cli_close(cli_nps
->cli
, cli_nps
->fnum
);
98 status
= smb2cli_close(cli_nps
->cli
->conn
,
99 cli_nps
->cli
->timeout
,
100 cli_nps
->cli
->smb2
.session
,
101 cli_nps
->cli
->smb2
.tid
, 0,
102 cli_nps
->fid_persistent
,
103 cli_nps
->fid_volatile
);
105 if (!NT_STATUS_IS_OK(status
)) {
106 DEBUG(1, ("tstream_cli_np_destructor: cli_close "
107 "failed on pipe %s. Error was %s\n",
108 cli_nps
->npipe
, nt_errstr(status
)));
111 * We can't do much on failure
116 struct tstream_cli_np_open_state
{
117 struct cli_state
*cli
;
120 uint64_t fid_persistent
;
121 uint64_t fid_volatile
;
125 static void tstream_cli_np_open_done(struct tevent_req
*subreq
);
127 struct tevent_req
*tstream_cli_np_open_send(TALLOC_CTX
*mem_ctx
,
128 struct tevent_context
*ev
,
129 struct cli_state
*cli
,
132 struct tevent_req
*req
;
133 struct tstream_cli_np_open_state
*state
;
134 struct tevent_req
*subreq
;
136 req
= tevent_req_create(mem_ctx
, &state
,
137 struct tstream_cli_np_open_state
);
143 state
->npipe
= talloc_strdup(state
, npipe
);
144 if (tevent_req_nomem(state
->npipe
, req
)) {
145 return tevent_req_post(req
, ev
);
148 if (cli_state_protocol(cli
) < PROTOCOL_SMB2_02
) {
149 state
->is_smb1
= true;
152 if (state
->is_smb1
) {
153 subreq
= cli_ntcreate_send(state
, ev
, cli
,
158 FILE_SHARE_READ
|FILE_SHARE_WRITE
,
163 subreq
= smb2cli_create_send(state
, ev
, cli
->conn
,
164 cli
->timeout
, cli
->smb2
.session
,
167 SMB2_OPLOCK_LEVEL_NONE
,
168 SMB2_IMPERSONATION_IMPERSONATION
,
170 0, /* file_attributes */
171 FILE_SHARE_READ
|FILE_SHARE_WRITE
,
173 0, /* create_options */
176 if (tevent_req_nomem(subreq
, req
)) {
177 return tevent_req_post(req
, ev
);
179 tevent_req_set_callback(subreq
, tstream_cli_np_open_done
, req
);
184 static void tstream_cli_np_open_done(struct tevent_req
*subreq
)
186 struct tevent_req
*req
=
187 tevent_req_callback_data(subreq
, struct tevent_req
);
188 struct tstream_cli_np_open_state
*state
=
189 tevent_req_data(req
, struct tstream_cli_np_open_state
);
192 if (state
->is_smb1
) {
193 status
= cli_ntcreate_recv(subreq
, &state
->fnum
);
195 status
= smb2cli_create_recv(subreq
,
196 &state
->fid_persistent
,
197 &state
->fid_volatile
);
200 if (!NT_STATUS_IS_OK(status
)) {
201 tevent_req_nterror(req
, status
);
205 tevent_req_done(req
);
208 NTSTATUS
_tstream_cli_np_open_recv(struct tevent_req
*req
,
210 struct tstream_context
**_stream
,
211 const char *location
)
213 struct tstream_cli_np_open_state
*state
=
214 tevent_req_data(req
, struct tstream_cli_np_open_state
);
215 struct tstream_context
*stream
;
216 struct tstream_cli_np
*cli_nps
;
219 if (tevent_req_is_nterror(req
, &status
)) {
220 tevent_req_received(req
);
224 stream
= tstream_context_create(mem_ctx
,
227 struct tstream_cli_np
,
230 tevent_req_received(req
);
231 return NT_STATUS_NO_MEMORY
;
233 ZERO_STRUCTP(cli_nps
);
235 cli_nps
->cli
= state
->cli
;
236 cli_nps
->npipe
= talloc_move(cli_nps
, &state
->npipe
);
237 cli_nps
->is_smb1
= state
->is_smb1
;
238 cli_nps
->fnum
= state
->fnum
;
239 cli_nps
->fid_persistent
= state
->fid_persistent
;
240 cli_nps
->fid_volatile
= state
->fid_volatile
;
241 cli_nps
->default_timeout
= cli_set_timeout(state
->cli
, 0);
242 cli_set_timeout(state
->cli
, cli_nps
->default_timeout
);
244 talloc_set_destructor(cli_nps
, tstream_cli_np_destructor
);
246 cli_nps
->trans
.active
= false;
247 cli_nps
->trans
.read_req
= NULL
;
248 cli_nps
->trans
.write_req
= NULL
;
249 SSVAL(cli_nps
->trans
.setup
+0, 0, TRANSACT_DCERPCCMD
);
250 SSVAL(cli_nps
->trans
.setup
+1, 0, cli_nps
->fnum
);
253 tevent_req_received(req
);
257 static ssize_t
tstream_cli_np_pending_bytes(struct tstream_context
*stream
)
259 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
260 struct tstream_cli_np
);
262 if (!cli_state_is_connected(cli_nps
->cli
)) {
267 return cli_nps
->read
.left
;
270 bool tstream_is_cli_np(struct tstream_context
*stream
)
272 struct tstream_cli_np
*cli_nps
=
273 talloc_get_type(_tstream_context_data(stream
),
274 struct tstream_cli_np
);
283 NTSTATUS
tstream_cli_np_use_trans(struct tstream_context
*stream
)
285 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
286 struct tstream_cli_np
);
288 if (cli_nps
->trans
.read_req
) {
289 return NT_STATUS_PIPE_BUSY
;
292 if (cli_nps
->trans
.write_req
) {
293 return NT_STATUS_PIPE_BUSY
;
296 if (cli_nps
->trans
.active
) {
297 return NT_STATUS_PIPE_BUSY
;
300 cli_nps
->trans
.active
= true;
305 unsigned int tstream_cli_np_set_timeout(struct tstream_context
*stream
,
306 unsigned int timeout
)
308 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
309 struct tstream_cli_np
);
311 if (!cli_state_is_connected(cli_nps
->cli
)) {
312 return cli_nps
->default_timeout
;
315 return cli_set_timeout(cli_nps
->cli
, timeout
);
318 struct cli_state
*tstream_cli_np_get_cli_state(struct tstream_context
*stream
)
320 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
321 struct tstream_cli_np
);
326 struct tstream_cli_np_writev_state
{
327 struct tstream_context
*stream
;
328 struct tevent_context
*ev
;
330 struct iovec
*vector
;
337 const char *location
;
341 static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state
*state
)
343 struct tstream_cli_np
*cli_nps
=
344 tstream_context_data(state
->stream
,
345 struct tstream_cli_np
);
347 cli_nps
->trans
.write_req
= NULL
;
352 static void tstream_cli_np_writev_write_next(struct tevent_req
*req
);
354 static struct tevent_req
*tstream_cli_np_writev_send(TALLOC_CTX
*mem_ctx
,
355 struct tevent_context
*ev
,
356 struct tstream_context
*stream
,
357 const struct iovec
*vector
,
360 struct tevent_req
*req
;
361 struct tstream_cli_np_writev_state
*state
;
362 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
363 struct tstream_cli_np
);
365 req
= tevent_req_create(mem_ctx
, &state
,
366 struct tstream_cli_np_writev_state
);
370 state
->stream
= stream
;
374 talloc_set_destructor(state
, tstream_cli_np_writev_state_destructor
);
376 if (!cli_state_is_connected(cli_nps
->cli
)) {
377 tevent_req_error(req
, ENOTCONN
);
378 return tevent_req_post(req
, ev
);
382 * we make a copy of the vector so we can change the structure
384 state
->vector
= talloc_array(state
, struct iovec
, count
);
385 if (tevent_req_nomem(state
->vector
, req
)) {
386 return tevent_req_post(req
, ev
);
388 memcpy(state
->vector
, vector
, sizeof(struct iovec
) * count
);
389 state
->count
= count
;
391 tstream_cli_np_writev_write_next(req
);
392 if (!tevent_req_is_in_progress(req
)) {
393 return tevent_req_post(req
, ev
);
399 static void tstream_cli_np_readv_trans_start(struct tevent_req
*req
);
400 static void tstream_cli_np_writev_write_done(struct tevent_req
*subreq
);
402 static void tstream_cli_np_writev_write_next(struct tevent_req
*req
)
404 struct tstream_cli_np_writev_state
*state
=
406 struct tstream_cli_np_writev_state
);
407 struct tstream_cli_np
*cli_nps
=
408 tstream_context_data(state
->stream
,
409 struct tstream_cli_np
);
410 struct tevent_req
*subreq
;
414 for (i
=0; i
< state
->count
; i
++) {
415 left
+= state
->vector
[i
].iov_len
;
419 TALLOC_FREE(cli_nps
->write
.buf
);
420 tevent_req_done(req
);
424 cli_nps
->write
.ofs
= 0;
425 cli_nps
->write
.left
= MIN(left
, TSTREAM_CLI_NP_MAX_BUF_SIZE
);
426 cli_nps
->write
.buf
= talloc_realloc(cli_nps
, cli_nps
->write
.buf
,
427 uint8_t, cli_nps
->write
.left
);
428 if (tevent_req_nomem(cli_nps
->write
.buf
, req
)) {
433 * copy the pending buffer first
435 while (cli_nps
->write
.left
> 0 && state
->count
> 0) {
436 uint8_t *base
= (uint8_t *)state
->vector
[0].iov_base
;
437 size_t len
= MIN(cli_nps
->write
.left
, state
->vector
[0].iov_len
);
439 memcpy(cli_nps
->write
.buf
+ cli_nps
->write
.ofs
, base
, len
);
442 state
->vector
[0].iov_base
= base
;
443 state
->vector
[0].iov_len
-= len
;
445 cli_nps
->write
.ofs
+= len
;
446 cli_nps
->write
.left
-= len
;
448 if (state
->vector
[0].iov_len
== 0) {
456 if (cli_nps
->trans
.active
&& state
->count
== 0) {
457 cli_nps
->trans
.active
= false;
458 cli_nps
->trans
.write_req
= req
;
462 if (cli_nps
->trans
.read_req
&& state
->count
== 0) {
463 cli_nps
->trans
.write_req
= req
;
464 tstream_cli_np_readv_trans_start(cli_nps
->trans
.read_req
);
468 if (cli_nps
->is_smb1
) {
469 subreq
= cli_write_andx_send(state
, state
->ev
, cli_nps
->cli
,
471 8, /* 8 means message mode. */
474 cli_nps
->write
.ofs
); /* size */
476 subreq
= smb2cli_write_send(state
, state
->ev
, cli_nps
->cli
,
477 cli_nps
->write
.ofs
, /* length */
479 cli_nps
->fid_persistent
,
480 cli_nps
->fid_volatile
,
481 0, /* remaining_bytes */
485 if (tevent_req_nomem(subreq
, req
)) {
488 tevent_req_set_callback(subreq
,
489 tstream_cli_np_writev_write_done
,
493 static void tstream_cli_np_writev_disconnect_now(struct tevent_req
*req
,
495 const char *location
);
497 static void tstream_cli_np_writev_write_done(struct tevent_req
*subreq
)
499 struct tevent_req
*req
=
500 tevent_req_callback_data(subreq
, struct tevent_req
);
501 struct tstream_cli_np_writev_state
*state
=
502 tevent_req_data(req
, struct tstream_cli_np_writev_state
);
503 struct tstream_cli_np
*cli_nps
=
504 tstream_context_data(state
->stream
,
505 struct tstream_cli_np
);
509 if (cli_nps
->is_smb1
) {
510 status
= cli_write_andx_recv(subreq
, &written
);
512 status
= smb2cli_write_recv(subreq
);
513 written
= cli_nps
->write
.ofs
; // TODO: get the value from the server
516 if (!NT_STATUS_IS_OK(status
)) {
517 tstream_cli_np_writev_disconnect_now(req
, EIO
, __location__
);
521 if (written
!= cli_nps
->write
.ofs
) {
522 tstream_cli_np_writev_disconnect_now(req
, EIO
, __location__
);
526 tstream_cli_np_writev_write_next(req
);
529 static void tstream_cli_np_writev_disconnect_done(struct tevent_req
*subreq
);
531 static void tstream_cli_np_writev_disconnect_now(struct tevent_req
*req
,
533 const char *location
)
535 struct tstream_cli_np_writev_state
*state
=
537 struct tstream_cli_np_writev_state
);
538 struct tstream_cli_np
*cli_nps
=
539 tstream_context_data(state
->stream
,
540 struct tstream_cli_np
);
541 struct tevent_req
*subreq
;
543 state
->error
.val
= error
;
544 state
->error
.location
= location
;
546 if (!cli_state_is_connected(cli_nps
->cli
)) {
547 /* return the original error */
548 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
552 if (cli_nps
->is_smb1
) {
553 subreq
= cli_close_send(state
, state
->ev
, cli_nps
->cli
,
556 subreq
= smb2cli_close_send(state
, state
->ev
,
558 cli_nps
->cli
->timeout
,
559 cli_nps
->cli
->smb2
.session
,
560 cli_nps
->cli
->smb2
.tid
,
562 cli_nps
->fid_persistent
,
563 cli_nps
->fid_volatile
);
565 if (subreq
== NULL
) {
566 /* return the original error */
567 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
570 tevent_req_set_callback(subreq
,
571 tstream_cli_np_writev_disconnect_done
,
575 static void tstream_cli_np_writev_disconnect_done(struct tevent_req
*subreq
)
577 struct tevent_req
*req
=
578 tevent_req_callback_data(subreq
, struct tevent_req
);
579 struct tstream_cli_np_writev_state
*state
=
580 tevent_req_data(req
, struct tstream_cli_np_writev_state
);
581 struct tstream_cli_np
*cli_nps
=
582 tstream_context_data(state
->stream
, struct tstream_cli_np
);
584 if (cli_nps
->is_smb1
) {
585 cli_close_recv(subreq
);
587 smb2cli_close_recv(subreq
);
593 /* return the original error */
594 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
597 static int tstream_cli_np_writev_recv(struct tevent_req
*req
,
600 struct tstream_cli_np_writev_state
*state
=
602 struct tstream_cli_np_writev_state
);
605 ret
= tsocket_simple_int_recv(req
, perrno
);
610 tevent_req_received(req
);
614 struct tstream_cli_np_readv_state
{
615 struct tstream_context
*stream
;
616 struct tevent_context
*ev
;
618 struct iovec
*vector
;
624 struct tevent_immediate
*im
;
629 const char *location
;
633 static int tstream_cli_np_readv_state_destructor(struct tstream_cli_np_readv_state
*state
)
635 struct tstream_cli_np
*cli_nps
=
636 tstream_context_data(state
->stream
,
637 struct tstream_cli_np
);
639 cli_nps
->trans
.read_req
= NULL
;
644 static void tstream_cli_np_readv_read_next(struct tevent_req
*req
);
646 static struct tevent_req
*tstream_cli_np_readv_send(TALLOC_CTX
*mem_ctx
,
647 struct tevent_context
*ev
,
648 struct tstream_context
*stream
,
649 struct iovec
*vector
,
652 struct tevent_req
*req
;
653 struct tstream_cli_np_readv_state
*state
;
654 struct tstream_cli_np
*cli_nps
=
655 tstream_context_data(stream
, struct tstream_cli_np
);
657 req
= tevent_req_create(mem_ctx
, &state
,
658 struct tstream_cli_np_readv_state
);
662 state
->stream
= stream
;
666 talloc_set_destructor(state
, tstream_cli_np_readv_state_destructor
);
668 if (!cli_state_is_connected(cli_nps
->cli
)) {
669 tevent_req_error(req
, ENOTCONN
);
670 return tevent_req_post(req
, ev
);
674 * we make a copy of the vector so we can change the structure
676 state
->vector
= talloc_array(state
, struct iovec
, count
);
677 if (tevent_req_nomem(state
->vector
, req
)) {
678 return tevent_req_post(req
, ev
);
680 memcpy(state
->vector
, vector
, sizeof(struct iovec
) * count
);
681 state
->count
= count
;
683 tstream_cli_np_readv_read_next(req
);
684 if (!tevent_req_is_in_progress(req
)) {
685 return tevent_req_post(req
, ev
);
691 static void tstream_cli_np_readv_read_done(struct tevent_req
*subreq
);
693 static void tstream_cli_np_readv_read_next(struct tevent_req
*req
)
695 struct tstream_cli_np_readv_state
*state
=
697 struct tstream_cli_np_readv_state
);
698 struct tstream_cli_np
*cli_nps
=
699 tstream_context_data(state
->stream
,
700 struct tstream_cli_np
);
701 struct tevent_req
*subreq
;
704 * copy the pending buffer first
706 while (cli_nps
->read
.left
> 0 && state
->count
> 0) {
707 uint8_t *base
= (uint8_t *)state
->vector
[0].iov_base
;
708 size_t len
= MIN(cli_nps
->read
.left
, state
->vector
[0].iov_len
);
710 memcpy(base
, cli_nps
->read
.buf
+ cli_nps
->read
.ofs
, len
);
713 state
->vector
[0].iov_base
= base
;
714 state
->vector
[0].iov_len
-= len
;
716 cli_nps
->read
.ofs
+= len
;
717 cli_nps
->read
.left
-= len
;
719 if (state
->vector
[0].iov_len
== 0) {
727 if (cli_nps
->read
.left
== 0) {
728 TALLOC_FREE(cli_nps
->read
.buf
);
731 if (state
->count
== 0) {
732 tevent_req_done(req
);
736 if (cli_nps
->trans
.active
) {
737 cli_nps
->trans
.active
= false;
738 cli_nps
->trans
.read_req
= req
;
742 if (cli_nps
->trans
.write_req
) {
743 cli_nps
->trans
.read_req
= req
;
744 tstream_cli_np_readv_trans_start(req
);
748 if (cli_nps
->is_smb1
) {
749 subreq
= cli_read_andx_send(state
, state
->ev
, cli_nps
->cli
,
752 TSTREAM_CLI_NP_MAX_BUF_SIZE
);
754 subreq
= smb2cli_read_send(state
, state
->ev
,
756 cli_nps
->cli
->timeout
,
757 cli_nps
->cli
->smb2
.session
,
758 cli_nps
->cli
->smb2
.tid
,
759 TSTREAM_CLI_NP_MAX_BUF_SIZE
, /* length */
761 cli_nps
->fid_persistent
,
762 cli_nps
->fid_volatile
,
763 0, /* minimum_count */
764 0); /* remaining_bytes */
766 if (tevent_req_nomem(subreq
, req
)) {
769 tevent_req_set_callback(subreq
,
770 tstream_cli_np_readv_read_done
,
774 static void tstream_cli_np_readv_trans_done(struct tevent_req
*subreq
);
776 static void tstream_cli_np_readv_trans_start(struct tevent_req
*req
)
778 struct tstream_cli_np_readv_state
*state
=
780 struct tstream_cli_np_readv_state
);
781 struct tstream_cli_np
*cli_nps
=
782 tstream_context_data(state
->stream
,
783 struct tstream_cli_np
);
784 struct tevent_req
*subreq
;
786 state
->trans
.im
= tevent_create_immediate(state
);
787 if (tevent_req_nomem(state
->trans
.im
, req
)) {
791 if (cli_nps
->is_smb1
) {
792 subreq
= cli_trans_send(state
, state
->ev
,
797 cli_nps
->trans
.setup
, 2,
802 TSTREAM_CLI_NP_MAX_BUF_SIZE
);
804 DATA_BLOB in_input_buffer
= data_blob_null
;
805 DATA_BLOB in_output_buffer
= data_blob_null
;
807 in_input_buffer
= data_blob_const(cli_nps
->write
.buf
,
810 subreq
= smb2cli_ioctl_send(state
, state
->ev
,
812 cli_nps
->fid_persistent
,
813 cli_nps
->fid_volatile
,
814 FSCTL_NAMED_PIPE_READ_WRITE
,
815 0, /* in_max_input_length */
817 /* in_max_output_length */
818 TSTREAM_CLI_NP_MAX_BUF_SIZE
,
820 SMB2_IOCTL_FLAG_IS_FSCTL
);
822 if (tevent_req_nomem(subreq
, req
)) {
825 tevent_req_set_callback(subreq
,
826 tstream_cli_np_readv_trans_done
,
830 static void tstream_cli_np_readv_disconnect_now(struct tevent_req
*req
,
832 const char *location
);
833 static void tstream_cli_np_readv_trans_next(struct tevent_context
*ctx
,
834 struct tevent_immediate
*im
,
837 static void tstream_cli_np_readv_trans_done(struct tevent_req
*subreq
)
839 struct tevent_req
*req
=
840 tevent_req_callback_data(subreq
, struct tevent_req
);
841 struct tstream_cli_np_readv_state
*state
=
842 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
843 struct tstream_cli_np
*cli_nps
=
844 tstream_context_data(state
->stream
, struct tstream_cli_np
);
849 if (cli_nps
->is_smb1
) {
850 status
= cli_trans_recv(subreq
, state
, NULL
, NULL
, 0, NULL
,
852 &rcvbuf
, 0, &received
);
854 DATA_BLOB out_input_buffer
= data_blob_null
;
855 DATA_BLOB out_output_buffer
= data_blob_null
;
857 status
= smb2cli_ioctl_recv(subreq
, state
,
861 /* Note that rcvbuf is not a talloc pointer here */
862 rcvbuf
= out_output_buffer
.data
;
863 received
= out_output_buffer
.length
;
866 if (NT_STATUS_EQUAL(status
, NT_STATUS_BUFFER_TOO_SMALL
)) {
867 status
= NT_STATUS_OK
;
869 if (!NT_STATUS_IS_OK(status
)) {
870 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
874 if (received
> TSTREAM_CLI_NP_MAX_BUF_SIZE
) {
875 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
880 tstream_cli_np_readv_disconnect_now(req
, EPIPE
, __location__
);
884 cli_nps
->read
.ofs
= 0;
885 cli_nps
->read
.left
= received
;
886 cli_nps
->read
.buf
= talloc_array(cli_nps
, uint8_t, received
);
887 if (cli_nps
->read
.buf
== NULL
) {
889 tevent_req_nomem(cli_nps
->read
.buf
, req
);
892 memcpy(cli_nps
->read
.buf
, rcvbuf
, received
);
894 if (cli_nps
->trans
.write_req
== NULL
) {
895 tstream_cli_np_readv_read_next(req
);
899 tevent_schedule_immediate(state
->trans
.im
, state
->ev
,
900 tstream_cli_np_readv_trans_next
, req
);
902 tevent_req_done(cli_nps
->trans
.write_req
);
905 static void tstream_cli_np_readv_trans_next(struct tevent_context
*ctx
,
906 struct tevent_immediate
*im
,
909 struct tevent_req
*req
=
910 talloc_get_type_abort(private_data
,
913 tstream_cli_np_readv_read_next(req
);
916 static void tstream_cli_np_readv_read_done(struct tevent_req
*subreq
)
918 struct tevent_req
*req
=
919 tevent_req_callback_data(subreq
, struct tevent_req
);
920 struct tstream_cli_np_readv_state
*state
=
921 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
922 struct tstream_cli_np
*cli_nps
=
923 tstream_context_data(state
->stream
, struct tstream_cli_np
);
929 * We must free subreq in this function as there is
930 * a timer event attached to it.
933 if (cli_nps
->is_smb1
) {
934 status
= cli_read_andx_recv(subreq
, &received
, &rcvbuf
);
936 uint32_t data_length
= 0;
937 status
= smb2cli_read_recv(subreq
, state
, &rcvbuf
, &data_length
);
938 received
= data_length
;
941 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
944 if (NT_STATUS_EQUAL(status
, NT_STATUS_BUFFER_TOO_SMALL
)) {
946 * NT_STATUS_BUFFER_TOO_SMALL means that there's
947 * more data to read when the named pipe is used
948 * in message mode (which is the case here).
950 * But we hide this from the caller.
952 status
= NT_STATUS_OK
;
954 if (!NT_STATUS_IS_OK(status
)) {
956 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
960 if (received
> TSTREAM_CLI_NP_MAX_BUF_SIZE
) {
962 tstream_cli_np_readv_disconnect_now(req
, EIO
, __location__
);
968 tstream_cli_np_readv_disconnect_now(req
, EPIPE
, __location__
);
972 cli_nps
->read
.ofs
= 0;
973 cli_nps
->read
.left
= received
;
974 cli_nps
->read
.buf
= talloc_array(cli_nps
, uint8_t, received
);
975 if (cli_nps
->read
.buf
== NULL
) {
977 tevent_req_nomem(cli_nps
->read
.buf
, req
);
980 memcpy(cli_nps
->read
.buf
, rcvbuf
, received
);
983 tstream_cli_np_readv_read_next(req
);
986 static void tstream_cli_np_readv_disconnect_done(struct tevent_req
*subreq
);
988 static void tstream_cli_np_readv_error(struct tevent_req
*req
);
990 static void tstream_cli_np_readv_disconnect_now(struct tevent_req
*req
,
992 const char *location
)
994 struct tstream_cli_np_readv_state
*state
=
996 struct tstream_cli_np_readv_state
);
997 struct tstream_cli_np
*cli_nps
=
998 tstream_context_data(state
->stream
,
999 struct tstream_cli_np
);
1000 struct tevent_req
*subreq
;
1002 state
->error
.val
= error
;
1003 state
->error
.location
= location
;
1005 if (!cli_state_is_connected(cli_nps
->cli
)) {
1006 /* return the original error */
1007 tstream_cli_np_readv_error(req
);
1011 if (cli_nps
->is_smb1
) {
1012 subreq
= cli_close_send(state
, state
->ev
, cli_nps
->cli
,
1015 subreq
= smb2cli_close_send(state
, state
->ev
,
1017 cli_nps
->cli
->timeout
,
1018 cli_nps
->cli
->smb2
.session
,
1019 cli_nps
->cli
->smb2
.tid
,
1021 cli_nps
->fid_persistent
,
1022 cli_nps
->fid_volatile
);
1024 if (subreq
== NULL
) {
1025 /* return the original error */
1026 tstream_cli_np_readv_error(req
);
1029 tevent_req_set_callback(subreq
,
1030 tstream_cli_np_readv_disconnect_done
,
1034 static void tstream_cli_np_readv_disconnect_done(struct tevent_req
*subreq
)
1036 struct tevent_req
*req
=
1037 tevent_req_callback_data(subreq
, struct tevent_req
);
1038 struct tstream_cli_np_readv_state
*state
=
1039 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
1040 struct tstream_cli_np
*cli_nps
=
1041 tstream_context_data(state
->stream
, struct tstream_cli_np
);
1043 if (cli_nps
->is_smb1
) {
1044 cli_close_recv(subreq
);
1046 smb2cli_close_recv(subreq
);
1048 TALLOC_FREE(subreq
);
1050 cli_nps
->cli
= NULL
;
1052 tstream_cli_np_readv_error(req
);
1055 static void tstream_cli_np_readv_error_trigger(struct tevent_context
*ctx
,
1056 struct tevent_immediate
*im
,
1057 void *private_data
);
1059 static void tstream_cli_np_readv_error(struct tevent_req
*req
)
1061 struct tstream_cli_np_readv_state
*state
=
1062 tevent_req_data(req
,
1063 struct tstream_cli_np_readv_state
);
1064 struct tstream_cli_np
*cli_nps
=
1065 tstream_context_data(state
->stream
,
1066 struct tstream_cli_np
);
1068 if (cli_nps
->trans
.write_req
== NULL
) {
1069 /* return the original error */
1070 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
1074 if (state
->trans
.im
== NULL
) {
1075 /* return the original error */
1076 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
1080 tevent_schedule_immediate(state
->trans
.im
, state
->ev
,
1081 tstream_cli_np_readv_error_trigger
, req
);
1083 /* return the original error for writev */
1084 _tevent_req_error(cli_nps
->trans
.write_req
,
1085 state
->error
.val
, state
->error
.location
);
1088 static void tstream_cli_np_readv_error_trigger(struct tevent_context
*ctx
,
1089 struct tevent_immediate
*im
,
1092 struct tevent_req
*req
=
1093 talloc_get_type_abort(private_data
,
1095 struct tstream_cli_np_readv_state
*state
=
1096 tevent_req_data(req
,
1097 struct tstream_cli_np_readv_state
);
1099 /* return the original error */
1100 _tevent_req_error(req
, state
->error
.val
, state
->error
.location
);
1103 static int tstream_cli_np_readv_recv(struct tevent_req
*req
,
1106 struct tstream_cli_np_readv_state
*state
=
1107 tevent_req_data(req
, struct tstream_cli_np_readv_state
);
1110 ret
= tsocket_simple_int_recv(req
, perrno
);
1115 tevent_req_received(req
);
1119 struct tstream_cli_np_disconnect_state
{
1120 struct tstream_context
*stream
;
1123 static void tstream_cli_np_disconnect_done(struct tevent_req
*subreq
);
1125 static struct tevent_req
*tstream_cli_np_disconnect_send(TALLOC_CTX
*mem_ctx
,
1126 struct tevent_context
*ev
,
1127 struct tstream_context
*stream
)
1129 struct tstream_cli_np
*cli_nps
= tstream_context_data(stream
,
1130 struct tstream_cli_np
);
1131 struct tevent_req
*req
;
1132 struct tstream_cli_np_disconnect_state
*state
;
1133 struct tevent_req
*subreq
;
1135 req
= tevent_req_create(mem_ctx
, &state
,
1136 struct tstream_cli_np_disconnect_state
);
1141 state
->stream
= stream
;
1143 if (!cli_state_is_connected(cli_nps
->cli
)) {
1144 tevent_req_error(req
, ENOTCONN
);
1145 return tevent_req_post(req
, ev
);
1148 if (cli_nps
->is_smb1
) {
1149 subreq
= cli_close_send(state
, ev
, cli_nps
->cli
,
1152 subreq
= smb2cli_close_send(state
, ev
, cli_nps
->cli
->conn
,
1153 cli_nps
->cli
->timeout
,
1154 cli_nps
->cli
->smb2
.session
,
1155 cli_nps
->cli
->smb2
.tid
,
1157 cli_nps
->fid_persistent
,
1158 cli_nps
->fid_volatile
);
1160 if (tevent_req_nomem(subreq
, req
)) {
1161 return tevent_req_post(req
, ev
);
1163 tevent_req_set_callback(subreq
, tstream_cli_np_disconnect_done
, req
);
1168 static void tstream_cli_np_disconnect_done(struct tevent_req
*subreq
)
1170 struct tevent_req
*req
= tevent_req_callback_data(subreq
,
1172 struct tstream_cli_np_disconnect_state
*state
=
1173 tevent_req_data(req
, struct tstream_cli_np_disconnect_state
);
1174 struct tstream_cli_np
*cli_nps
=
1175 tstream_context_data(state
->stream
, struct tstream_cli_np
);
1178 if (cli_nps
->is_smb1
) {
1179 status
= cli_close_recv(subreq
);
1181 status
= smb2cli_close_recv(subreq
);
1183 TALLOC_FREE(subreq
);
1184 if (!NT_STATUS_IS_OK(status
)) {
1185 tevent_req_error(req
, EIO
);
1189 cli_nps
->cli
= NULL
;
1191 tevent_req_done(req
);
1194 static int tstream_cli_np_disconnect_recv(struct tevent_req
*req
,
1199 ret
= tsocket_simple_int_recv(req
, perrno
);
1201 tevent_req_received(req
);
1205 static const struct tstream_context_ops tstream_cli_np_ops
= {
1208 .pending_bytes
= tstream_cli_np_pending_bytes
,
1210 .readv_send
= tstream_cli_np_readv_send
,
1211 .readv_recv
= tstream_cli_np_readv_recv
,
1213 .writev_send
= tstream_cli_np_writev_send
,
1214 .writev_recv
= tstream_cli_np_writev_recv
,
1216 .disconnect_send
= tstream_cli_np_disconnect_send
,
1217 .disconnect_recv
= tstream_cli_np_disconnect_recv
,