2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "libsmb/libsmb.h"
22 #include "../lib/util/tevent_ntstatus.h"
23 #include "async_smb.h"
25 #include "../libcli/smb/smbXcli_base.h"
27 /****************************************************************************
28 Calculate the recommended read buffer size
29 ****************************************************************************/
30 static size_t cli_read_max_bufsize(struct cli_state
*cli
)
35 uint32_t useable_space
= 0;
37 data_offset
= HDR_VWV
;
38 data_offset
+= wct
* sizeof(uint16_t);
39 data_offset
+= sizeof(uint16_t); /* byte count */
40 data_offset
+= 1; /* pad */
42 min_space
= cli_state_available_size(cli
, data_offset
);
44 if (cli
->server_posix_capabilities
& CIFS_UNIX_LARGE_READ_CAP
) {
45 useable_space
= 0xFFFFFF - data_offset
;
47 if (smb1cli_conn_signing_is_active(cli
->conn
)) {
51 if (smb1cli_conn_encryption_on(cli
->conn
)) {
56 } else if (smb1cli_conn_capabilities(cli
->conn
) & CAP_LARGE_READX
) {
58 * Note: CAP_LARGE_READX also works with signing
60 useable_space
= 0x1FFFF - data_offset
;
62 useable_space
= MIN(useable_space
, UINT16_MAX
);
70 /****************************************************************************
71 Calculate the recommended write buffer size
72 ****************************************************************************/
73 static size_t cli_write_max_bufsize(struct cli_state
*cli
,
79 uint32_t useable_space
= 0;
81 data_offset
= HDR_VWV
;
82 data_offset
+= wct
* sizeof(uint16_t);
83 data_offset
+= sizeof(uint16_t); /* byte count */
84 data_offset
+= 1; /* pad */
86 min_space
= cli_state_available_size(cli
, data_offset
);
88 if (cli
->server_posix_capabilities
& CIFS_UNIX_LARGE_WRITE_CAP
) {
89 useable_space
= 0xFFFFFF - data_offset
;
90 } else if (smb1cli_conn_capabilities(cli
->conn
) & CAP_LARGE_WRITEX
) {
91 useable_space
= 0x1FFFF - data_offset
;
96 if (write_mode
!= 0) {
100 if (smb1cli_conn_signing_is_active(cli
->conn
)) {
104 if (smb1cli_conn_encryption_on(cli
->conn
)) {
108 if (strequal(cli
->dev
, "LPT1:")) {
112 return useable_space
;
115 struct cli_read_andx_state
{
123 static void cli_read_andx_done(struct tevent_req
*subreq
);
125 struct tevent_req
*cli_read_andx_create(TALLOC_CTX
*mem_ctx
,
126 struct tevent_context
*ev
,
127 struct cli_state
*cli
, uint16_t fnum
,
128 off_t offset
, size_t size
,
129 struct tevent_req
**psmbreq
)
131 struct tevent_req
*req
, *subreq
;
132 struct cli_read_andx_state
*state
;
135 req
= tevent_req_create(mem_ctx
, &state
, struct cli_read_andx_state
);
141 SCVAL(state
->vwv
+ 0, 0, 0xFF);
142 SCVAL(state
->vwv
+ 0, 1, 0);
143 SSVAL(state
->vwv
+ 1, 0, 0);
144 SSVAL(state
->vwv
+ 2, 0, fnum
);
145 SIVAL(state
->vwv
+ 3, 0, offset
);
146 SSVAL(state
->vwv
+ 5, 0, size
);
147 SSVAL(state
->vwv
+ 6, 0, size
);
148 SSVAL(state
->vwv
+ 7, 0, (size
>> 16));
149 SSVAL(state
->vwv
+ 8, 0, 0);
150 SSVAL(state
->vwv
+ 9, 0, 0);
152 if (smb1cli_conn_capabilities(cli
->conn
) & CAP_LARGE_FILES
) {
153 SIVAL(state
->vwv
+ 10, 0,
154 (((uint64_t)offset
)>>32) & 0xffffffff);
157 if ((((uint64_t)offset
) & 0xffffffff00000000LL
) != 0) {
158 DEBUG(10, ("cli_read_andx_send got large offset where "
159 "the server does not support it\n"));
160 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
161 return tevent_req_post(req
, ev
);
165 subreq
= cli_smb_req_create(state
, ev
, cli
, SMBreadX
, 0, wct
,
166 state
->vwv
, 0, NULL
);
167 if (subreq
== NULL
) {
171 tevent_req_set_callback(subreq
, cli_read_andx_done
, req
);
176 struct tevent_req
*cli_read_andx_send(TALLOC_CTX
*mem_ctx
,
177 struct tevent_context
*ev
,
178 struct cli_state
*cli
, uint16_t fnum
,
179 off_t offset
, size_t size
)
181 struct tevent_req
*req
, *subreq
;
184 req
= cli_read_andx_create(mem_ctx
, ev
, cli
, fnum
, offset
, size
,
190 status
= smb1cli_req_chain_submit(&subreq
, 1);
191 if (tevent_req_nterror(req
, status
)) {
192 return tevent_req_post(req
, ev
);
197 static void cli_read_andx_done(struct tevent_req
*subreq
)
199 struct tevent_req
*req
= tevent_req_callback_data(
200 subreq
, struct tevent_req
);
201 struct cli_read_andx_state
*state
= tevent_req_data(
202 req
, struct cli_read_andx_state
);
209 state
->status
= cli_smb_recv(subreq
, state
, &inbuf
, 12, &wct
, &vwv
,
212 if (NT_STATUS_IS_ERR(state
->status
)) {
213 tevent_req_nterror(req
, state
->status
);
217 /* size is the number of bytes the server returned.
219 state
->received
= SVAL(vwv
+ 5, 0);
220 state
->received
|= (((unsigned int)SVAL(vwv
+ 7, 0)) << 16);
222 if (state
->received
> state
->size
) {
223 DEBUG(5,("server returned more than we wanted!\n"));
224 tevent_req_nterror(req
, NT_STATUS_UNEXPECTED_IO_ERROR
);
229 * bcc field must be valid for small reads, for large reads the 16-bit
230 * bcc field can't be correct.
233 if ((state
->received
< 0xffff) && (state
->received
> num_bytes
)) {
234 DEBUG(5, ("server announced more bytes than sent\n"));
235 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
239 state
->buf
= discard_const_p(uint8_t, smb_base(inbuf
)) + SVAL(vwv
+6, 0);
241 if (trans_oob(smb_len_tcp(inbuf
), SVAL(vwv
+6, 0), state
->received
)
242 || ((state
->received
!= 0) && (state
->buf
< bytes
))) {
243 DEBUG(5, ("server returned invalid read&x data offset\n"));
244 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
247 tevent_req_done(req
);
251 * Pull the data out of a finished async read_and_x request. rcvbuf is
252 * talloced from the request, so better make sure that you copy it away before
253 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
257 NTSTATUS
cli_read_andx_recv(struct tevent_req
*req
, ssize_t
*received
,
260 struct cli_read_andx_state
*state
= tevent_req_data(
261 req
, struct cli_read_andx_state
);
264 if (tevent_req_is_nterror(req
, &status
)) {
267 *received
= state
->received
;
268 *rcvbuf
= state
->buf
;
272 struct cli_pull_chunk
;
274 struct cli_pull_state
{
275 struct tevent_context
*ev
;
276 struct cli_state
*cli
;
281 NTSTATUS (*sink
)(char *buf
, size_t n
, void *priv
);
289 * How many bytes did we push into "sink"?
294 * Outstanding requests
296 * The maximum is 256:
297 * - which would be a window of 256 MByte
298 * for SMB2 with multi-credit
299 * or smb1 unix extentions.
303 uint16_t num_waiting
;
304 struct cli_pull_chunk
*chunks
;
307 struct cli_pull_chunk
{
308 struct cli_pull_chunk
*prev
, *next
;
309 struct tevent_req
*req
;/* This is the main request! Not the subreq */
310 struct tevent_req
*subreq
;
318 static void cli_pull_setup_chunks(struct tevent_req
*req
);
319 static void cli_pull_chunk_ship(struct cli_pull_chunk
*chunk
);
320 static void cli_pull_chunk_done(struct tevent_req
*subreq
);
323 * Parallel read support.
325 * cli_pull sends as many read&x requests as the server would allow via
326 * max_mux at a time. When replies flow back in, the data is written into
327 * the callback function "sink" in the right order.
330 struct tevent_req
*cli_pull_send(TALLOC_CTX
*mem_ctx
,
331 struct tevent_context
*ev
,
332 struct cli_state
*cli
,
333 uint16_t fnum
, off_t start_offset
,
334 off_t size
, size_t window_size
,
335 NTSTATUS (*sink
)(char *buf
, size_t n
,
339 struct tevent_req
*req
;
340 struct cli_pull_state
*state
;
341 size_t page_size
= 1024;
344 req
= tevent_req_create(mem_ctx
, &state
, struct cli_pull_state
);
351 state
->start_offset
= start_offset
;
355 state
->next_offset
= start_offset
;
356 state
->remaining
= size
;
359 tevent_req_done(req
);
360 return tevent_req_post(req
, ev
);
363 if (smbXcli_conn_protocol(state
->cli
->conn
) >= PROTOCOL_SMB2_02
) {
364 state
->chunk_size
= smb2cli_conn_max_read_size(cli
->conn
);
366 state
->chunk_size
= cli_read_max_bufsize(cli
);
368 if (state
->chunk_size
> page_size
) {
369 state
->chunk_size
&= ~(page_size
- 1);
372 if (window_size
== 0) {
374 * We use 16 MByte as default window size.
376 window_size
= 16 * 1024 * 1024;
379 tmp64
= window_size
/state
->chunk_size
;
380 if ((window_size
% state
->chunk_size
) > 0) {
383 tmp64
= MAX(tmp64
, 1);
384 tmp64
= MIN(tmp64
, 256);
385 state
->max_chunks
= tmp64
;
388 * We defer the callback because of the complex
389 * substate/subfunction logic
391 tevent_req_defer_callback(req
, ev
);
393 cli_pull_setup_chunks(req
);
394 if (!tevent_req_is_in_progress(req
)) {
395 return tevent_req_post(req
, ev
);
401 static void cli_pull_setup_chunks(struct tevent_req
*req
)
403 struct cli_pull_state
*state
=
405 struct cli_pull_state
);
406 struct cli_pull_chunk
*chunk
, *next
= NULL
;
409 for (chunk
= state
->chunks
; chunk
; chunk
= next
) {
411 * Note that chunk might be removed from this call.
414 cli_pull_chunk_ship(chunk
);
415 if (!tevent_req_is_in_progress(req
)) {
420 for (i
= state
->num_chunks
; i
< state
->max_chunks
; i
++) {
422 if (state
->num_waiting
> 0) {
426 if (state
->remaining
== 0) {
430 chunk
= talloc_zero(state
, struct cli_pull_chunk
);
431 if (tevent_req_nomem(chunk
, req
)) {
435 chunk
->ofs
= state
->next_offset
;
436 chunk
->total_size
= MIN(state
->remaining
, state
->chunk_size
);
437 state
->next_offset
+= chunk
->total_size
;
438 state
->remaining
-= chunk
->total_size
;
440 DLIST_ADD_END(state
->chunks
, chunk
, NULL
);
442 state
->num_waiting
++;
444 cli_pull_chunk_ship(chunk
);
445 if (!tevent_req_is_in_progress(req
)) {
450 if (state
->remaining
> 0) {
454 if (state
->num_chunks
> 0) {
458 tevent_req_done(req
);
461 static void cli_pull_chunk_ship(struct cli_pull_chunk
*chunk
)
463 struct tevent_req
*req
= chunk
->req
;
464 struct cli_pull_state
*state
=
466 struct cli_pull_state
);
474 if (chunk
!= state
->chunks
) {
476 * this chunk is not the
477 * first one in the list.
479 * which means we should not
480 * push it into the sink yet.
485 if (chunk
->tmp_size
== 0) {
487 * we git a short read, we're done
489 tevent_req_done(req
);
493 status
= state
->sink((char *)chunk
->buf
,
496 if (tevent_req_nterror(req
, status
)) {
499 state
->pushed
+= chunk
->tmp_size
;
501 if (chunk
->tmp_size
< chunk
->total_size
) {
503 * we git a short read, we're done
505 tevent_req_done(req
);
509 DLIST_REMOVE(state
->chunks
, chunk
);
510 SMB_ASSERT(state
->num_chunks
> 0);
517 if (chunk
->subreq
!= NULL
) {
521 SMB_ASSERT(state
->num_waiting
> 0);
523 ofs
= chunk
->ofs
+ chunk
->tmp_size
;
524 size
= chunk
->total_size
- chunk
->tmp_size
;
526 if (smbXcli_conn_protocol(state
->cli
->conn
) >= PROTOCOL_SMB2_02
) {
529 ok
= smb2cli_conn_req_possible(state
->cli
->conn
, &max_size
);
535 * downgrade depending on the available credits
537 size
= MIN(max_size
, size
);
539 chunk
->subreq
= cli_smb2_read_send(chunk
,
545 if (tevent_req_nomem(chunk
->subreq
, req
)) {
549 ok
= smb1cli_conn_req_possible(state
->cli
->conn
);
554 chunk
->subreq
= cli_read_andx_send(chunk
,
560 if (tevent_req_nomem(chunk
->subreq
, req
)) {
564 tevent_req_set_callback(chunk
->subreq
,
568 state
->num_waiting
--;
572 static void cli_pull_chunk_done(struct tevent_req
*subreq
)
574 struct cli_pull_chunk
*chunk
=
575 tevent_req_callback_data(subreq
,
576 struct cli_pull_chunk
);
577 struct tevent_req
*req
= chunk
->req
;
578 struct cli_pull_state
*state
=
580 struct cli_pull_state
);
582 size_t expected
= chunk
->total_size
- chunk
->tmp_size
;
586 chunk
->subreq
= NULL
;
588 if (smbXcli_conn_protocol(state
->cli
->conn
) >= PROTOCOL_SMB2_02
) {
589 status
= cli_smb2_read_recv(subreq
, &received
, &buf
);
591 status
= cli_read_andx_recv(subreq
, &received
, &buf
);
593 if (NT_STATUS_EQUAL(status
, NT_STATUS_END_OF_FILE
)) {
595 status
= NT_STATUS_OK
;
597 if (tevent_req_nterror(req
, status
)) {
601 if (received
> expected
) {
602 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
608 * We got EOF we're done
611 cli_pull_setup_chunks(req
);
615 if (received
== chunk
->total_size
) {
617 * We got it in the first run.
619 * We don't call TALLOC_FREE(subreq)
620 * here and keep the returned buffer.
623 } else if (chunk
->buf
== NULL
) {
624 chunk
->buf
= talloc_array(chunk
, uint8_t, chunk
->total_size
);
625 if (tevent_req_nomem(chunk
->buf
, req
)) {
630 if (received
!= chunk
->total_size
) {
631 uint8_t *p
= chunk
->buf
+ chunk
->tmp_size
;
632 memcpy(p
, buf
, received
);
636 chunk
->tmp_size
+= received
;
638 if (chunk
->tmp_size
== chunk
->total_size
) {
641 state
->num_waiting
++;
644 cli_pull_setup_chunks(req
);
647 NTSTATUS
cli_pull_recv(struct tevent_req
*req
, off_t
*received
)
649 struct cli_pull_state
*state
= tevent_req_data(
650 req
, struct cli_pull_state
);
653 if (tevent_req_is_nterror(req
, &status
)) {
654 tevent_req_received(req
);
657 *received
= state
->pushed
;
658 tevent_req_received(req
);
662 NTSTATUS
cli_pull(struct cli_state
*cli
, uint16_t fnum
,
663 off_t start_offset
, off_t size
, size_t window_size
,
664 NTSTATUS (*sink
)(char *buf
, size_t n
, void *priv
),
665 void *priv
, off_t
*received
)
667 TALLOC_CTX
*frame
= talloc_stackframe();
668 struct tevent_context
*ev
;
669 struct tevent_req
*req
;
670 NTSTATUS status
= NT_STATUS_OK
;
672 if (smbXcli_conn_has_async_calls(cli
->conn
)) {
674 * Can't use sync call while an async call is in flight
676 status
= NT_STATUS_INVALID_PARAMETER
;
680 ev
= samba_tevent_context_init(frame
);
682 status
= NT_STATUS_NO_MEMORY
;
686 req
= cli_pull_send(frame
, ev
, cli
, fnum
, start_offset
, size
,
687 window_size
, sink
, priv
);
689 status
= NT_STATUS_NO_MEMORY
;
693 if (!tevent_req_poll_ntstatus(req
, ev
, &status
)) {
697 status
= cli_pull_recv(req
, received
);
703 static NTSTATUS
cli_read_sink(char *buf
, size_t n
, void *priv
)
705 char **pbuf
= (char **)priv
;
706 memcpy(*pbuf
, buf
, n
);
711 NTSTATUS
cli_read(struct cli_state
*cli
, uint16_t fnum
,
712 char *buf
, off_t offset
, size_t size
,
718 status
= cli_pull(cli
, fnum
, offset
, size
, size
,
719 cli_read_sink
, &buf
, &ret
);
720 if (!NT_STATUS_IS_OK(status
)) {
731 /****************************************************************************
732 write to a file using a SMBwrite and not bypassing 0 byte writes
733 ****************************************************************************/
735 NTSTATUS
cli_smbwrite(struct cli_state
*cli
, uint16_t fnum
, char *buf
,
736 off_t offset
, size_t size1
, size_t *ptotal
)
745 bytes
= talloc_array(talloc_tos(), uint8_t, 3);
747 return NT_STATUS_NO_MEMORY
;
752 uint32_t usable_space
= cli_state_available_size(cli
, 48);
753 size_t size
= MIN(size1
, usable_space
);
754 struct tevent_req
*req
;
759 SSVAL(vwv
+0, 0, fnum
);
760 SSVAL(vwv
+1, 0, size
);
761 SIVAL(vwv
+2, 0, offset
);
764 bytes
= talloc_realloc(talloc_tos(), bytes
, uint8_t,
767 return NT_STATUS_NO_MEMORY
;
769 SSVAL(bytes
, 1, size
);
770 memcpy(bytes
+ 3, buf
+ total
, size
);
772 status
= cli_smb(talloc_tos(), cli
, SMBwrite
, 0, 5, vwv
,
773 size
+3, bytes
, &req
, 1, NULL
, &ret_vwv
,
775 if (!NT_STATUS_IS_OK(status
)) {
780 size
= SVAL(ret_vwv
+0, 0);
793 if (ptotal
!= NULL
) {
800 * Send a write&x request
803 struct cli_write_andx_state
{
811 static void cli_write_andx_done(struct tevent_req
*subreq
);
813 struct tevent_req
*cli_write_andx_create(TALLOC_CTX
*mem_ctx
,
814 struct tevent_context
*ev
,
815 struct cli_state
*cli
, uint16_t fnum
,
816 uint16_t mode
, const uint8_t *buf
,
817 off_t offset
, size_t size
,
818 struct tevent_req
**reqs_before
,
820 struct tevent_req
**psmbreq
)
822 struct tevent_req
*req
, *subreq
;
823 struct cli_write_andx_state
*state
;
824 bool bigoffset
= ((smb1cli_conn_capabilities(cli
->conn
) & CAP_LARGE_FILES
) != 0);
825 uint8_t wct
= bigoffset
? 14 : 12;
826 size_t max_write
= cli_write_max_bufsize(cli
, mode
, wct
);
829 req
= tevent_req_create(mem_ctx
, &state
, struct cli_write_andx_state
);
834 state
->size
= MIN(size
, max_write
);
838 SCVAL(vwv
+0, 0, 0xFF);
841 SSVAL(vwv
+2, 0, fnum
);
842 SIVAL(vwv
+3, 0, offset
);
844 SSVAL(vwv
+7, 0, mode
);
846 SSVAL(vwv
+9, 0, (state
->size
>>16));
847 SSVAL(vwv
+10, 0, state
->size
);
850 smb1cli_req_wct_ofs(reqs_before
, num_reqs_before
)
851 + 1 /* the wct field */
853 + 2 /* num_bytes field */
857 SIVAL(vwv
+12, 0, (((uint64_t)offset
)>>32) & 0xffffffff);
861 state
->iov
[0].iov_base
= (void *)&state
->pad
;
862 state
->iov
[0].iov_len
= 1;
863 state
->iov
[1].iov_base
= discard_const_p(void, buf
);
864 state
->iov
[1].iov_len
= state
->size
;
866 subreq
= cli_smb_req_create(state
, ev
, cli
, SMBwriteX
, 0, wct
, vwv
,
868 if (tevent_req_nomem(subreq
, req
)) {
869 return tevent_req_post(req
, ev
);
871 tevent_req_set_callback(subreq
, cli_write_andx_done
, req
);
876 struct tevent_req
*cli_write_andx_send(TALLOC_CTX
*mem_ctx
,
877 struct tevent_context
*ev
,
878 struct cli_state
*cli
, uint16_t fnum
,
879 uint16_t mode
, const uint8_t *buf
,
880 off_t offset
, size_t size
)
882 struct tevent_req
*req
, *subreq
;
885 req
= cli_write_andx_create(mem_ctx
, ev
, cli
, fnum
, mode
, buf
, offset
,
886 size
, NULL
, 0, &subreq
);
891 status
= smb1cli_req_chain_submit(&subreq
, 1);
892 if (tevent_req_nterror(req
, status
)) {
893 return tevent_req_post(req
, ev
);
898 static void cli_write_andx_done(struct tevent_req
*subreq
)
900 struct tevent_req
*req
= tevent_req_callback_data(
901 subreq
, struct tevent_req
);
902 struct cli_write_andx_state
*state
= tevent_req_data(
903 req
, struct cli_write_andx_state
);
908 status
= cli_smb_recv(subreq
, state
, NULL
, 6, &wct
, &vwv
,
911 if (NT_STATUS_IS_ERR(status
)) {
912 tevent_req_nterror(req
, status
);
915 state
->written
= SVAL(vwv
+2, 0);
916 if (state
->size
> UINT16_MAX
) {
918 * It is important that we only set the
919 * high bits only if we asked for a large write.
921 * OS/2 print shares get this wrong and may send
926 state
->written
|= SVAL(vwv
+4, 0)<<16;
928 tevent_req_done(req
);
931 NTSTATUS
cli_write_andx_recv(struct tevent_req
*req
, size_t *pwritten
)
933 struct cli_write_andx_state
*state
= tevent_req_data(
934 req
, struct cli_write_andx_state
);
937 if (tevent_req_is_nterror(req
, &status
)) {
941 *pwritten
= state
->written
;
946 struct cli_writeall_state
{
947 struct tevent_context
*ev
;
948 struct cli_state
*cli
;
957 static void cli_writeall_written(struct tevent_req
*req
);
959 static struct tevent_req
*cli_writeall_send(TALLOC_CTX
*mem_ctx
,
960 struct tevent_context
*ev
,
961 struct cli_state
*cli
,
965 off_t offset
, size_t size
)
967 struct tevent_req
*req
, *subreq
;
968 struct cli_writeall_state
*state
;
970 req
= tevent_req_create(mem_ctx
, &state
, struct cli_writeall_state
);
979 state
->offset
= offset
;
983 subreq
= cli_write_andx_send(state
, state
->ev
, state
->cli
, state
->fnum
,
984 state
->mode
, state
->buf
, state
->offset
,
986 if (tevent_req_nomem(subreq
, req
)) {
987 return tevent_req_post(req
, ev
);
989 tevent_req_set_callback(subreq
, cli_writeall_written
, req
);
993 static void cli_writeall_written(struct tevent_req
*subreq
)
995 struct tevent_req
*req
= tevent_req_callback_data(
996 subreq
, struct tevent_req
);
997 struct cli_writeall_state
*state
= tevent_req_data(
998 req
, struct cli_writeall_state
);
1000 size_t written
, to_write
;
1002 status
= cli_write_andx_recv(subreq
, &written
);
1003 TALLOC_FREE(subreq
);
1004 if (tevent_req_nterror(req
, status
)) {
1008 state
->written
+= written
;
1010 if (state
->written
> state
->size
) {
1011 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
1015 to_write
= state
->size
- state
->written
;
1017 if (to_write
== 0) {
1018 tevent_req_done(req
);
1022 subreq
= cli_write_andx_send(state
, state
->ev
, state
->cli
, state
->fnum
,
1024 state
->buf
+ state
->written
,
1025 state
->offset
+ state
->written
, to_write
);
1026 if (tevent_req_nomem(subreq
, req
)) {
1029 tevent_req_set_callback(subreq
, cli_writeall_written
, req
);
1032 static NTSTATUS
cli_writeall_recv(struct tevent_req
*req
,
1035 struct cli_writeall_state
*state
= tevent_req_data(
1036 req
, struct cli_writeall_state
);
1039 if (tevent_req_is_nterror(req
, &status
)) {
1042 if (pwritten
!= NULL
) {
1043 *pwritten
= state
->written
;
1045 return NT_STATUS_OK
;
1048 NTSTATUS
cli_writeall(struct cli_state
*cli
, uint16_t fnum
, uint16_t mode
,
1049 const uint8_t *buf
, off_t offset
, size_t size
,
1052 TALLOC_CTX
*frame
= talloc_stackframe();
1053 struct tevent_context
*ev
;
1054 struct tevent_req
*req
;
1055 NTSTATUS status
= NT_STATUS_NO_MEMORY
;
1057 if (smbXcli_conn_has_async_calls(cli
->conn
)) {
1059 * Can't use sync call while an async call is in flight
1061 status
= NT_STATUS_INVALID_PARAMETER
;
1064 ev
= samba_tevent_context_init(frame
);
1068 if (smbXcli_conn_protocol(cli
->conn
) >= PROTOCOL_SMB2_02
) {
1069 req
= cli_smb2_writeall_send(frame
, ev
, cli
, fnum
, mode
,
1072 req
= cli_writeall_send(frame
, ev
, cli
, fnum
, mode
,
1078 if (!tevent_req_poll_ntstatus(req
, ev
, &status
)) {
1081 if (smbXcli_conn_protocol(cli
->conn
) >= PROTOCOL_SMB2_02
) {
1082 status
= cli_smb2_writeall_recv(req
, pwritten
);
1084 status
= cli_writeall_recv(req
, pwritten
);
1091 struct cli_push_chunk
;
1093 struct cli_push_state
{
1094 struct tevent_context
*ev
;
1095 struct cli_state
*cli
;
1100 size_t (*source
)(uint8_t *buf
, size_t n
, void *priv
);
1109 * Outstanding requests
1111 * The maximum is 256:
1112 * - which would be a window of 256 MByte
1113 * for SMB2 with multi-credit
1114 * or smb1 unix extentions.
1116 uint16_t max_chunks
;
1117 uint16_t num_chunks
;
1118 uint16_t num_waiting
;
1119 struct cli_push_chunk
*chunks
;
1122 struct cli_push_chunk
{
1123 struct cli_push_chunk
*prev
, *next
;
1124 struct tevent_req
*req
;/* This is the main request! Not the subreq */
1125 struct tevent_req
*subreq
;
1133 static void cli_push_setup_chunks(struct tevent_req
*req
);
1134 static void cli_push_chunk_ship(struct cli_push_chunk
*chunk
);
1135 static void cli_push_chunk_done(struct tevent_req
*subreq
);
1137 struct tevent_req
*cli_push_send(TALLOC_CTX
*mem_ctx
, struct tevent_context
*ev
,
1138 struct cli_state
*cli
,
1139 uint16_t fnum
, uint16_t mode
,
1140 off_t start_offset
, size_t window_size
,
1141 size_t (*source
)(uint8_t *buf
, size_t n
,
1145 struct tevent_req
*req
;
1146 struct cli_push_state
*state
;
1147 size_t page_size
= 1024;
1150 req
= tevent_req_create(mem_ctx
, &state
, struct cli_push_state
);
1157 state
->start_offset
= start_offset
;
1159 state
->source
= source
;
1161 state
->next_offset
= start_offset
;
1163 if (smbXcli_conn_protocol(state
->cli
->conn
) >= PROTOCOL_SMB2_02
) {
1164 state
->chunk_size
= smb2cli_conn_max_write_size(cli
->conn
);
1166 state
->chunk_size
= cli_write_max_bufsize(cli
, mode
, 14);
1168 if (state
->chunk_size
> page_size
) {
1169 state
->chunk_size
&= ~(page_size
- 1);
1172 if (window_size
== 0) {
1174 * We use 16 MByte as default window size.
1176 window_size
= 16 * 1024 * 1024;
1179 tmp64
= window_size
/state
->chunk_size
;
1180 if ((window_size
% state
->chunk_size
) > 0) {
1183 tmp64
= MAX(tmp64
, 1);
1184 tmp64
= MIN(tmp64
, 256);
1185 state
->max_chunks
= tmp64
;
1188 * We defer the callback because of the complex
1189 * substate/subfunction logic
1191 tevent_req_defer_callback(req
, ev
);
1193 cli_push_setup_chunks(req
);
1194 if (!tevent_req_is_in_progress(req
)) {
1195 return tevent_req_post(req
, ev
);
1201 static void cli_push_setup_chunks(struct tevent_req
*req
)
1203 struct cli_push_state
*state
=
1204 tevent_req_data(req
,
1205 struct cli_push_state
);
1206 struct cli_push_chunk
*chunk
, *next
= NULL
;
1209 for (chunk
= state
->chunks
; chunk
; chunk
= next
) {
1211 * Note that chunk might be removed from this call.
1214 cli_push_chunk_ship(chunk
);
1215 if (!tevent_req_is_in_progress(req
)) {
1220 for (i
= state
->num_chunks
; i
< state
->max_chunks
; i
++) {
1222 if (state
->num_waiting
> 0) {
1230 chunk
= talloc_zero(state
, struct cli_push_chunk
);
1231 if (tevent_req_nomem(chunk
, req
)) {
1235 chunk
->ofs
= state
->next_offset
;
1236 chunk
->buf
= talloc_array(chunk
,
1239 if (tevent_req_nomem(chunk
->buf
, req
)) {
1242 chunk
->total_size
= state
->source(chunk
->buf
,
1245 if (chunk
->total_size
== 0) {
1246 /* nothing to send */
1251 state
->next_offset
+= chunk
->total_size
;
1253 DLIST_ADD_END(state
->chunks
, chunk
, NULL
);
1254 state
->num_chunks
++;
1255 state
->num_waiting
++;
1257 cli_push_chunk_ship(chunk
);
1258 if (!tevent_req_is_in_progress(req
)) {
1267 if (state
->num_chunks
> 0) {
1271 tevent_req_done(req
);
1274 static void cli_push_chunk_ship(struct cli_push_chunk
*chunk
)
1276 struct tevent_req
*req
= chunk
->req
;
1277 struct cli_push_state
*state
=
1278 tevent_req_data(req
,
1279 struct cli_push_state
);
1286 DLIST_REMOVE(state
->chunks
, chunk
);
1287 SMB_ASSERT(state
->num_chunks
> 0);
1288 state
->num_chunks
--;
1294 if (chunk
->subreq
!= NULL
) {
1298 SMB_ASSERT(state
->num_waiting
> 0);
1300 buf
= chunk
->buf
+ chunk
->tmp_size
;
1301 ofs
= chunk
->ofs
+ chunk
->tmp_size
;
1302 size
= chunk
->total_size
- chunk
->tmp_size
;
1304 if (smbXcli_conn_protocol(state
->cli
->conn
) >= PROTOCOL_SMB2_02
) {
1307 ok
= smb2cli_conn_req_possible(state
->cli
->conn
, &max_size
);
1313 * downgrade depending on the available credits
1315 size
= MIN(max_size
, size
);
1317 chunk
->subreq
= cli_smb2_write_send(chunk
,
1325 if (tevent_req_nomem(chunk
->subreq
, req
)) {
1329 ok
= smb1cli_conn_req_possible(state
->cli
->conn
);
1334 chunk
->subreq
= cli_write_andx_send(chunk
,
1342 if (tevent_req_nomem(chunk
->subreq
, req
)) {
1346 tevent_req_set_callback(chunk
->subreq
,
1347 cli_push_chunk_done
,
1350 state
->num_waiting
--;
1354 static void cli_push_chunk_done(struct tevent_req
*subreq
)
1356 struct cli_push_chunk
*chunk
=
1357 tevent_req_callback_data(subreq
,
1358 struct cli_push_chunk
);
1359 struct tevent_req
*req
= chunk
->req
;
1360 struct cli_push_state
*state
=
1361 tevent_req_data(req
,
1362 struct cli_push_state
);
1364 size_t expected
= chunk
->total_size
- chunk
->tmp_size
;
1367 chunk
->subreq
= NULL
;
1369 if (smbXcli_conn_protocol(state
->cli
->conn
) >= PROTOCOL_SMB2_02
) {
1370 status
= cli_smb2_write_recv(subreq
, &written
);
1372 status
= cli_write_andx_recv(subreq
, &written
);
1374 TALLOC_FREE(subreq
);
1375 if (tevent_req_nterror(req
, status
)) {
1379 if (written
> expected
) {
1380 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
1385 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
1389 chunk
->tmp_size
+= written
;
1391 if (chunk
->tmp_size
== chunk
->total_size
) {
1394 state
->num_waiting
++;
1397 cli_push_setup_chunks(req
);
1400 NTSTATUS
cli_push_recv(struct tevent_req
*req
)
1402 return tevent_req_simple_recv_ntstatus(req
);
1405 NTSTATUS
cli_push(struct cli_state
*cli
, uint16_t fnum
, uint16_t mode
,
1406 off_t start_offset
, size_t window_size
,
1407 size_t (*source
)(uint8_t *buf
, size_t n
, void *priv
),
1410 TALLOC_CTX
*frame
= talloc_stackframe();
1411 struct tevent_context
*ev
;
1412 struct tevent_req
*req
;
1413 NTSTATUS status
= NT_STATUS_OK
;
1415 if (smbXcli_conn_has_async_calls(cli
->conn
)) {
1417 * Can't use sync call while an async call is in flight
1419 status
= NT_STATUS_INVALID_PARAMETER
;
1423 ev
= samba_tevent_context_init(frame
);
1425 status
= NT_STATUS_NO_MEMORY
;
1429 req
= cli_push_send(frame
, ev
, cli
, fnum
, mode
, start_offset
,
1430 window_size
, source
, priv
);
1432 status
= NT_STATUS_NO_MEMORY
;
1436 if (!tevent_req_poll_ntstatus(req
, ev
, &status
)) {
1440 status
= cli_push_recv(req
);
1446 #define SPLICE_BLOCK_SIZE 1024 * 1024
1448 static NTSTATUS
cli_splice_fallback(TALLOC_CTX
*frame
,
1449 struct cli_state
*srccli
,
1450 struct cli_state
*dstcli
,
1451 uint16_t src_fnum
, uint16_t dst_fnum
,
1453 off_t src_offset
, off_t dst_offset
,
1455 int (*splice_cb
)(off_t n
, void *priv
),
1459 uint8_t *buf
= talloc_size(frame
, SPLICE_BLOCK_SIZE
);
1461 off_t remaining
= initial_size
;
1464 status
= cli_read(srccli
, src_fnum
,
1465 (char *)buf
, src_offset
, SPLICE_BLOCK_SIZE
,
1467 if (!NT_STATUS_IS_OK(status
)) {
1471 status
= cli_writeall(dstcli
, dst_fnum
, 0,
1472 buf
, dst_offset
, nread
, NULL
);
1473 if (!NT_STATUS_IS_OK(status
)) {
1477 if ((src_offset
> INT64_MAX
- nread
) ||
1478 (dst_offset
> INT64_MAX
- nread
)) {
1479 return NT_STATUS_FILE_TOO_LARGE
;
1481 src_offset
+= nread
;
1482 dst_offset
+= nread
;
1483 if (remaining
< nread
) {
1484 return NT_STATUS_INTERNAL_ERROR
;
1487 if (!splice_cb(initial_size
- remaining
, priv
)) {
1488 return NT_STATUS_CANCELLED
;
1492 return NT_STATUS_OK
;
1495 NTSTATUS
cli_splice(struct cli_state
*srccli
, struct cli_state
*dstcli
,
1496 uint16_t src_fnum
, uint16_t dst_fnum
,
1498 off_t src_offset
, off_t dst_offset
,
1500 int (*splice_cb
)(off_t n
, void *priv
), void *priv
)
1502 TALLOC_CTX
*frame
= talloc_stackframe();
1503 struct tevent_context
*ev
;
1504 struct tevent_req
*req
;
1505 NTSTATUS status
= NT_STATUS_NO_MEMORY
;
1506 bool retry_fallback
= false;
1508 if (smbXcli_conn_has_async_calls(srccli
->conn
) ||
1509 smbXcli_conn_has_async_calls(dstcli
->conn
))
1512 * Can't use sync call while an async call is in flight
1514 status
= NT_STATUS_INVALID_PARAMETER
;
1519 ev
= samba_tevent_context_init(frame
);
1523 if (srccli
== dstcli
&&
1524 smbXcli_conn_protocol(srccli
->conn
) >= PROTOCOL_SMB2_02
&&
1527 req
= cli_smb2_splice_send(frame
, ev
,
1528 srccli
, src_fnum
, dst_fnum
,
1529 size
, src_offset
, dst_offset
,
1532 status
= cli_splice_fallback(frame
,
1536 src_offset
, dst_offset
,
1544 if (!tevent_req_poll(req
, ev
)) {
1545 status
= map_nt_error_from_unix(errno
);
1548 status
= cli_smb2_splice_recv(req
, written
);
1551 * Older versions of Samba don't support
1552 * FSCTL_SRV_COPYCHUNK_WRITE so use the fallback.
1554 retry_fallback
= NT_STATUS_EQUAL(status
, NT_STATUS_INVALID_DEVICE_REQUEST
);
1555 } while (retry_fallback
);