2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "libsmb/libsmb.h"
22 #include "../lib/util/tevent_ntstatus.h"
23 #include "async_smb.h"
25 #include "../libcli/smb/smbXcli_base.h"
27 /****************************************************************************
28 Calculate the recommended read buffer size
29 ****************************************************************************/
30 static size_t cli_read_max_bufsize(struct cli_state
*cli
)
35 uint32_t useable_space
= 0;
37 data_offset
= HDR_VWV
;
38 data_offset
+= wct
* sizeof(uint16_t);
39 data_offset
+= sizeof(uint16_t); /* byte count */
40 data_offset
+= 1; /* pad */
42 min_space
= cli_state_available_size(cli
, data_offset
);
44 if (cli
->server_posix_capabilities
& CIFS_UNIX_LARGE_READ_CAP
) {
45 useable_space
= 0xFFFFFF - data_offset
;
47 if (smb1cli_conn_signing_is_active(cli
->conn
)) {
51 if (smb1cli_conn_encryption_on(cli
->conn
)) {
56 } else if (smb1cli_conn_capabilities(cli
->conn
) & CAP_LARGE_READX
) {
58 * Note: CAP_LARGE_READX also works with signing
60 useable_space
= 0x1FFFF - data_offset
;
62 useable_space
= MIN(useable_space
, UINT16_MAX
);
70 /****************************************************************************
71 Calculate the recommended write buffer size
72 ****************************************************************************/
73 static size_t cli_write_max_bufsize(struct cli_state
*cli
,
79 uint32_t useable_space
= 0;
81 data_offset
= HDR_VWV
;
82 data_offset
+= wct
* sizeof(uint16_t);
83 data_offset
+= sizeof(uint16_t); /* byte count */
84 data_offset
+= 1; /* pad */
86 min_space
= cli_state_available_size(cli
, data_offset
);
88 if (cli
->server_posix_capabilities
& CIFS_UNIX_LARGE_WRITE_CAP
) {
89 useable_space
= 0xFFFFFF - data_offset
;
90 } else if (smb1cli_conn_capabilities(cli
->conn
) & CAP_LARGE_WRITEX
) {
91 useable_space
= 0x1FFFF - data_offset
;
96 if (write_mode
!= 0) {
100 if (smb1cli_conn_signing_is_active(cli
->conn
)) {
104 if (smb1cli_conn_encryption_on(cli
->conn
)) {
108 if (strequal(cli
->dev
, "LPT1:")) {
112 return useable_space
;
115 struct cli_read_andx_state
{
123 static void cli_read_andx_done(struct tevent_req
*subreq
);
125 struct tevent_req
*cli_read_andx_create(TALLOC_CTX
*mem_ctx
,
126 struct tevent_context
*ev
,
127 struct cli_state
*cli
, uint16_t fnum
,
128 off_t offset
, size_t size
,
129 struct tevent_req
**psmbreq
)
131 struct tevent_req
*req
, *subreq
;
132 struct cli_read_andx_state
*state
;
135 req
= tevent_req_create(mem_ctx
, &state
, struct cli_read_andx_state
);
141 SCVAL(state
->vwv
+ 0, 0, 0xFF);
142 SCVAL(state
->vwv
+ 0, 1, 0);
143 SSVAL(state
->vwv
+ 1, 0, 0);
144 SSVAL(state
->vwv
+ 2, 0, fnum
);
145 SIVAL(state
->vwv
+ 3, 0, offset
);
146 SSVAL(state
->vwv
+ 5, 0, size
);
147 SSVAL(state
->vwv
+ 6, 0, size
);
148 SSVAL(state
->vwv
+ 7, 0, (size
>> 16));
149 SSVAL(state
->vwv
+ 8, 0, 0);
150 SSVAL(state
->vwv
+ 9, 0, 0);
152 if (smb1cli_conn_capabilities(cli
->conn
) & CAP_LARGE_FILES
) {
153 SIVAL(state
->vwv
+ 10, 0,
154 (((uint64_t)offset
)>>32) & 0xffffffff);
157 if ((((uint64_t)offset
) & 0xffffffff00000000LL
) != 0) {
158 DEBUG(10, ("cli_read_andx_send got large offset where "
159 "the server does not support it\n"));
160 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
161 return tevent_req_post(req
, ev
);
165 subreq
= cli_smb_req_create(state
, ev
, cli
, SMBreadX
, 0, wct
,
166 state
->vwv
, 0, NULL
);
167 if (subreq
== NULL
) {
171 tevent_req_set_callback(subreq
, cli_read_andx_done
, req
);
176 struct tevent_req
*cli_read_andx_send(TALLOC_CTX
*mem_ctx
,
177 struct tevent_context
*ev
,
178 struct cli_state
*cli
, uint16_t fnum
,
179 off_t offset
, size_t size
)
181 struct tevent_req
*req
, *subreq
;
184 req
= cli_read_andx_create(mem_ctx
, ev
, cli
, fnum
, offset
, size
,
190 status
= smb1cli_req_chain_submit(&subreq
, 1);
191 if (tevent_req_nterror(req
, status
)) {
192 return tevent_req_post(req
, ev
);
197 static void cli_read_andx_done(struct tevent_req
*subreq
)
199 struct tevent_req
*req
= tevent_req_callback_data(
200 subreq
, struct tevent_req
);
201 struct cli_read_andx_state
*state
= tevent_req_data(
202 req
, struct cli_read_andx_state
);
209 state
->status
= cli_smb_recv(subreq
, state
, &inbuf
, 12, &wct
, &vwv
,
212 if (NT_STATUS_IS_ERR(state
->status
)) {
213 tevent_req_nterror(req
, state
->status
);
217 /* size is the number of bytes the server returned.
219 state
->received
= SVAL(vwv
+ 5, 0);
220 state
->received
|= (((unsigned int)SVAL(vwv
+ 7, 0)) << 16);
222 if (state
->received
> state
->size
) {
223 DEBUG(5,("server returned more than we wanted!\n"));
224 tevent_req_nterror(req
, NT_STATUS_UNEXPECTED_IO_ERROR
);
229 * bcc field must be valid for small reads, for large reads the 16-bit
230 * bcc field can't be correct.
233 if ((state
->received
< 0xffff) && (state
->received
> num_bytes
)) {
234 DEBUG(5, ("server announced more bytes than sent\n"));
235 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
239 state
->buf
= discard_const_p(uint8_t, smb_base(inbuf
)) + SVAL(vwv
+6, 0);
241 if (trans_oob(smb_len_tcp(inbuf
), SVAL(vwv
+6, 0), state
->received
)
242 || ((state
->received
!= 0) && (state
->buf
< bytes
))) {
243 DEBUG(5, ("server returned invalid read&x data offset\n"));
244 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
247 tevent_req_done(req
);
251 * Pull the data out of a finished async read_and_x request. rcvbuf is
252 * talloced from the request, so better make sure that you copy it away before
253 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
257 NTSTATUS
cli_read_andx_recv(struct tevent_req
*req
, ssize_t
*received
,
260 struct cli_read_andx_state
*state
= tevent_req_data(
261 req
, struct cli_read_andx_state
);
264 if (tevent_req_is_nterror(req
, &status
)) {
267 *received
= state
->received
;
268 *rcvbuf
= state
->buf
;
272 struct cli_pull_chunk
;
274 struct cli_pull_state
{
275 struct tevent_context
*ev
;
276 struct cli_state
*cli
;
281 NTSTATUS (*sink
)(char *buf
, size_t n
, void *priv
);
289 * How many bytes did we push into "sink"?
294 * Outstanding requests
296 * The maximum is 256:
297 * - which would be a window of 256 MByte
298 * for SMB2 with multi-credit
299 * or smb1 unix extentions.
303 uint16_t num_waiting
;
304 struct cli_pull_chunk
*chunks
;
307 struct cli_pull_chunk
{
308 struct cli_pull_chunk
*prev
, *next
;
309 struct tevent_req
*req
;/* This is the main request! Not the subreq */
310 struct tevent_req
*subreq
;
318 static void cli_pull_setup_chunks(struct tevent_req
*req
);
319 static void cli_pull_chunk_ship(struct cli_pull_chunk
*chunk
);
320 static void cli_pull_chunk_done(struct tevent_req
*subreq
);
323 * Parallel read support.
325 * cli_pull sends as many read&x requests as the server would allow via
326 * max_mux at a time. When replies flow back in, the data is written into
327 * the callback function "sink" in the right order.
330 struct tevent_req
*cli_pull_send(TALLOC_CTX
*mem_ctx
,
331 struct tevent_context
*ev
,
332 struct cli_state
*cli
,
333 uint16_t fnum
, off_t start_offset
,
334 off_t size
, size_t window_size
,
335 NTSTATUS (*sink
)(char *buf
, size_t n
,
339 struct tevent_req
*req
;
340 struct cli_pull_state
*state
;
341 size_t page_size
= 1024;
344 req
= tevent_req_create(mem_ctx
, &state
, struct cli_pull_state
);
351 state
->start_offset
= start_offset
;
355 state
->next_offset
= start_offset
;
356 state
->remaining
= size
;
359 tevent_req_done(req
);
360 return tevent_req_post(req
, ev
);
363 if (smbXcli_conn_protocol(state
->cli
->conn
) >= PROTOCOL_SMB2_02
) {
364 state
->chunk_size
= smb2cli_conn_max_read_size(cli
->conn
);
366 state
->chunk_size
= cli_read_max_bufsize(cli
);
368 if (state
->chunk_size
> page_size
) {
369 state
->chunk_size
&= ~(page_size
- 1);
372 if (window_size
== 0) {
374 * We use 16 MByte as default window size.
376 window_size
= 16 * 1024 * 1024;
379 tmp64
= window_size
/state
->chunk_size
;
380 if ((window_size
% state
->chunk_size
) > 0) {
383 tmp64
= MAX(tmp64
, 1);
384 tmp64
= MIN(tmp64
, 256);
385 state
->max_chunks
= tmp64
;
388 * We defer the callback because of the complex
389 * substate/subfunction logic
391 tevent_req_defer_callback(req
, ev
);
393 cli_pull_setup_chunks(req
);
394 if (!tevent_req_is_in_progress(req
)) {
395 return tevent_req_post(req
, ev
);
401 static void cli_pull_setup_chunks(struct tevent_req
*req
)
403 struct cli_pull_state
*state
=
405 struct cli_pull_state
);
406 struct cli_pull_chunk
*chunk
, *next
= NULL
;
409 for (chunk
= state
->chunks
; chunk
; chunk
= next
) {
411 * Note that chunk might be removed from this call.
414 cli_pull_chunk_ship(chunk
);
415 if (!tevent_req_is_in_progress(req
)) {
420 for (i
= state
->num_chunks
; i
< state
->max_chunks
; i
++) {
422 if (state
->num_waiting
> 0) {
426 if (state
->remaining
== 0) {
430 chunk
= talloc_zero(state
, struct cli_pull_chunk
);
431 if (tevent_req_nomem(chunk
, req
)) {
435 chunk
->ofs
= state
->next_offset
;
436 chunk
->total_size
= MIN(state
->remaining
, state
->chunk_size
);
437 state
->next_offset
+= chunk
->total_size
;
438 state
->remaining
-= chunk
->total_size
;
440 DLIST_ADD_END(state
->chunks
, chunk
, NULL
);
442 state
->num_waiting
++;
444 cli_pull_chunk_ship(chunk
);
445 if (!tevent_req_is_in_progress(req
)) {
450 if (state
->remaining
> 0) {
454 if (state
->num_chunks
> 0) {
458 tevent_req_done(req
);
461 static void cli_pull_chunk_ship(struct cli_pull_chunk
*chunk
)
463 struct tevent_req
*req
= chunk
->req
;
464 struct cli_pull_state
*state
=
466 struct cli_pull_state
);
474 if (chunk
!= state
->chunks
) {
476 * this chunk is not the
477 * first one in the list.
479 * which means we should not
480 * push it into the sink yet.
485 if (chunk
->tmp_size
== 0) {
487 * we git a short read, we're done
489 tevent_req_done(req
);
493 status
= state
->sink((char *)chunk
->buf
,
496 if (tevent_req_nterror(req
, status
)) {
499 state
->pushed
+= chunk
->tmp_size
;
501 if (chunk
->tmp_size
< chunk
->total_size
) {
503 * we git a short read, we're done
505 tevent_req_done(req
);
509 DLIST_REMOVE(state
->chunks
, chunk
);
510 SMB_ASSERT(state
->num_chunks
> 0);
517 if (chunk
->subreq
!= NULL
) {
521 SMB_ASSERT(state
->num_waiting
> 0);
523 ofs
= chunk
->ofs
+ chunk
->tmp_size
;
524 size
= chunk
->total_size
- chunk
->tmp_size
;
526 if (smbXcli_conn_protocol(state
->cli
->conn
) >= PROTOCOL_SMB2_02
) {
529 ok
= smb2cli_conn_req_possible(state
->cli
->conn
, &max_size
);
535 * downgrade depending on the available credits
537 size
= MIN(max_size
, size
);
539 chunk
->subreq
= cli_smb2_read_send(chunk
,
545 if (tevent_req_nomem(chunk
->subreq
, req
)) {
549 ok
= smb1cli_conn_req_possible(state
->cli
->conn
);
554 chunk
->subreq
= cli_read_andx_send(chunk
,
560 if (tevent_req_nomem(chunk
->subreq
, req
)) {
564 tevent_req_set_callback(chunk
->subreq
,
568 state
->num_waiting
--;
572 static void cli_pull_chunk_done(struct tevent_req
*subreq
)
574 struct cli_pull_chunk
*chunk
=
575 tevent_req_callback_data(subreq
,
576 struct cli_pull_chunk
);
577 struct tevent_req
*req
= chunk
->req
;
578 struct cli_pull_state
*state
=
580 struct cli_pull_state
);
582 size_t expected
= chunk
->total_size
- chunk
->tmp_size
;
586 chunk
->subreq
= NULL
;
588 if (smbXcli_conn_protocol(state
->cli
->conn
) >= PROTOCOL_SMB2_02
) {
589 status
= cli_smb2_read_recv(subreq
, &received
, &buf
);
591 status
= cli_read_andx_recv(subreq
, &received
, &buf
);
593 if (NT_STATUS_EQUAL(status
, NT_STATUS_END_OF_FILE
)) {
595 status
= NT_STATUS_OK
;
597 if (tevent_req_nterror(req
, status
)) {
601 if (received
> expected
) {
602 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
608 * We got EOF we're done
611 cli_pull_setup_chunks(req
);
615 if (received
== chunk
->total_size
) {
617 * We got it in the first run.
619 * We don't call TALLOC_FREE(subreq)
620 * here and keep the returned buffer.
623 } else if (chunk
->buf
== NULL
) {
624 chunk
->buf
= talloc_array(chunk
, uint8_t, chunk
->total_size
);
625 if (tevent_req_nomem(chunk
->buf
, req
)) {
630 if (received
!= chunk
->total_size
) {
631 uint8_t *p
= chunk
->buf
+ chunk
->tmp_size
;
632 memcpy(p
, buf
, received
);
636 chunk
->tmp_size
+= received
;
638 if (chunk
->tmp_size
== chunk
->total_size
) {
641 state
->num_waiting
++;
644 cli_pull_setup_chunks(req
);
647 NTSTATUS
cli_pull_recv(struct tevent_req
*req
, off_t
*received
)
649 struct cli_pull_state
*state
= tevent_req_data(
650 req
, struct cli_pull_state
);
653 if (tevent_req_is_nterror(req
, &status
)) {
654 tevent_req_received(req
);
657 *received
= state
->pushed
;
658 tevent_req_received(req
);
662 NTSTATUS
cli_pull(struct cli_state
*cli
, uint16_t fnum
,
663 off_t start_offset
, off_t size
, size_t window_size
,
664 NTSTATUS (*sink
)(char *buf
, size_t n
, void *priv
),
665 void *priv
, off_t
*received
)
667 TALLOC_CTX
*frame
= talloc_stackframe();
668 struct tevent_context
*ev
;
669 struct tevent_req
*req
;
670 NTSTATUS status
= NT_STATUS_OK
;
672 if (smbXcli_conn_has_async_calls(cli
->conn
)) {
674 * Can't use sync call while an async call is in flight
676 status
= NT_STATUS_INVALID_PARAMETER
;
680 ev
= samba_tevent_context_init(frame
);
682 status
= NT_STATUS_NO_MEMORY
;
686 req
= cli_pull_send(frame
, ev
, cli
, fnum
, start_offset
, size
,
687 window_size
, sink
, priv
);
689 status
= NT_STATUS_NO_MEMORY
;
693 if (!tevent_req_poll(req
, ev
)) {
694 status
= map_nt_error_from_unix(errno
);
698 status
= cli_pull_recv(req
, received
);
704 static NTSTATUS
cli_read_sink(char *buf
, size_t n
, void *priv
)
706 char **pbuf
= (char **)priv
;
707 memcpy(*pbuf
, buf
, n
);
712 NTSTATUS
cli_read(struct cli_state
*cli
, uint16_t fnum
,
713 char *buf
, off_t offset
, size_t size
,
719 status
= cli_pull(cli
, fnum
, offset
, size
, size
,
720 cli_read_sink
, &buf
, &ret
);
721 if (!NT_STATUS_IS_OK(status
)) {
732 /****************************************************************************
733 write to a file using a SMBwrite and not bypassing 0 byte writes
734 ****************************************************************************/
736 NTSTATUS
cli_smbwrite(struct cli_state
*cli
, uint16_t fnum
, char *buf
,
737 off_t offset
, size_t size1
, size_t *ptotal
)
746 bytes
= talloc_array(talloc_tos(), uint8_t, 3);
748 return NT_STATUS_NO_MEMORY
;
753 uint32_t usable_space
= cli_state_available_size(cli
, 48);
754 size_t size
= MIN(size1
, usable_space
);
755 struct tevent_req
*req
;
760 SSVAL(vwv
+0, 0, fnum
);
761 SSVAL(vwv
+1, 0, size
);
762 SIVAL(vwv
+2, 0, offset
);
765 bytes
= talloc_realloc(talloc_tos(), bytes
, uint8_t,
768 return NT_STATUS_NO_MEMORY
;
770 SSVAL(bytes
, 1, size
);
771 memcpy(bytes
+ 3, buf
+ total
, size
);
773 status
= cli_smb(talloc_tos(), cli
, SMBwrite
, 0, 5, vwv
,
774 size
+3, bytes
, &req
, 1, NULL
, &ret_vwv
,
776 if (!NT_STATUS_IS_OK(status
)) {
781 size
= SVAL(ret_vwv
+0, 0);
794 if (ptotal
!= NULL
) {
801 * Send a write&x request
804 struct cli_write_andx_state
{
812 static void cli_write_andx_done(struct tevent_req
*subreq
);
814 struct tevent_req
*cli_write_andx_create(TALLOC_CTX
*mem_ctx
,
815 struct tevent_context
*ev
,
816 struct cli_state
*cli
, uint16_t fnum
,
817 uint16_t mode
, const uint8_t *buf
,
818 off_t offset
, size_t size
,
819 struct tevent_req
**reqs_before
,
821 struct tevent_req
**psmbreq
)
823 struct tevent_req
*req
, *subreq
;
824 struct cli_write_andx_state
*state
;
825 bool bigoffset
= ((smb1cli_conn_capabilities(cli
->conn
) & CAP_LARGE_FILES
) != 0);
826 uint8_t wct
= bigoffset
? 14 : 12;
827 size_t max_write
= cli_write_max_bufsize(cli
, mode
, wct
);
830 req
= tevent_req_create(mem_ctx
, &state
, struct cli_write_andx_state
);
835 state
->size
= MIN(size
, max_write
);
839 SCVAL(vwv
+0, 0, 0xFF);
842 SSVAL(vwv
+2, 0, fnum
);
843 SIVAL(vwv
+3, 0, offset
);
845 SSVAL(vwv
+7, 0, mode
);
847 SSVAL(vwv
+9, 0, (state
->size
>>16));
848 SSVAL(vwv
+10, 0, state
->size
);
851 smb1cli_req_wct_ofs(reqs_before
, num_reqs_before
)
852 + 1 /* the wct field */
854 + 2 /* num_bytes field */
858 SIVAL(vwv
+12, 0, (((uint64_t)offset
)>>32) & 0xffffffff);
862 state
->iov
[0].iov_base
= (void *)&state
->pad
;
863 state
->iov
[0].iov_len
= 1;
864 state
->iov
[1].iov_base
= discard_const_p(void, buf
);
865 state
->iov
[1].iov_len
= state
->size
;
867 subreq
= cli_smb_req_create(state
, ev
, cli
, SMBwriteX
, 0, wct
, vwv
,
869 if (tevent_req_nomem(subreq
, req
)) {
870 return tevent_req_post(req
, ev
);
872 tevent_req_set_callback(subreq
, cli_write_andx_done
, req
);
877 struct tevent_req
*cli_write_andx_send(TALLOC_CTX
*mem_ctx
,
878 struct tevent_context
*ev
,
879 struct cli_state
*cli
, uint16_t fnum
,
880 uint16_t mode
, const uint8_t *buf
,
881 off_t offset
, size_t size
)
883 struct tevent_req
*req
, *subreq
;
886 req
= cli_write_andx_create(mem_ctx
, ev
, cli
, fnum
, mode
, buf
, offset
,
887 size
, NULL
, 0, &subreq
);
892 status
= smb1cli_req_chain_submit(&subreq
, 1);
893 if (tevent_req_nterror(req
, status
)) {
894 return tevent_req_post(req
, ev
);
899 static void cli_write_andx_done(struct tevent_req
*subreq
)
901 struct tevent_req
*req
= tevent_req_callback_data(
902 subreq
, struct tevent_req
);
903 struct cli_write_andx_state
*state
= tevent_req_data(
904 req
, struct cli_write_andx_state
);
909 status
= cli_smb_recv(subreq
, state
, NULL
, 6, &wct
, &vwv
,
912 if (NT_STATUS_IS_ERR(status
)) {
913 tevent_req_nterror(req
, status
);
916 state
->written
= SVAL(vwv
+2, 0);
917 if (state
->size
> UINT16_MAX
) {
919 * It is important that we only set the
920 * high bits only if we asked for a large write.
922 * OS/2 print shares get this wrong and may send
927 state
->written
|= SVAL(vwv
+4, 0)<<16;
929 tevent_req_done(req
);
932 NTSTATUS
cli_write_andx_recv(struct tevent_req
*req
, size_t *pwritten
)
934 struct cli_write_andx_state
*state
= tevent_req_data(
935 req
, struct cli_write_andx_state
);
938 if (tevent_req_is_nterror(req
, &status
)) {
942 *pwritten
= state
->written
;
947 struct cli_writeall_state
{
948 struct tevent_context
*ev
;
949 struct cli_state
*cli
;
958 static void cli_writeall_written(struct tevent_req
*req
);
960 static struct tevent_req
*cli_writeall_send(TALLOC_CTX
*mem_ctx
,
961 struct tevent_context
*ev
,
962 struct cli_state
*cli
,
966 off_t offset
, size_t size
)
968 struct tevent_req
*req
, *subreq
;
969 struct cli_writeall_state
*state
;
971 req
= tevent_req_create(mem_ctx
, &state
, struct cli_writeall_state
);
980 state
->offset
= offset
;
984 subreq
= cli_write_andx_send(state
, state
->ev
, state
->cli
, state
->fnum
,
985 state
->mode
, state
->buf
, state
->offset
,
987 if (tevent_req_nomem(subreq
, req
)) {
988 return tevent_req_post(req
, ev
);
990 tevent_req_set_callback(subreq
, cli_writeall_written
, req
);
994 static void cli_writeall_written(struct tevent_req
*subreq
)
996 struct tevent_req
*req
= tevent_req_callback_data(
997 subreq
, struct tevent_req
);
998 struct cli_writeall_state
*state
= tevent_req_data(
999 req
, struct cli_writeall_state
);
1001 size_t written
, to_write
;
1003 status
= cli_write_andx_recv(subreq
, &written
);
1004 TALLOC_FREE(subreq
);
1005 if (tevent_req_nterror(req
, status
)) {
1009 state
->written
+= written
;
1011 if (state
->written
> state
->size
) {
1012 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
1016 to_write
= state
->size
- state
->written
;
1018 if (to_write
== 0) {
1019 tevent_req_done(req
);
1023 subreq
= cli_write_andx_send(state
, state
->ev
, state
->cli
, state
->fnum
,
1025 state
->buf
+ state
->written
,
1026 state
->offset
+ state
->written
, to_write
);
1027 if (tevent_req_nomem(subreq
, req
)) {
1030 tevent_req_set_callback(subreq
, cli_writeall_written
, req
);
1033 static NTSTATUS
cli_writeall_recv(struct tevent_req
*req
,
1036 struct cli_writeall_state
*state
= tevent_req_data(
1037 req
, struct cli_writeall_state
);
1040 if (tevent_req_is_nterror(req
, &status
)) {
1043 if (pwritten
!= NULL
) {
1044 *pwritten
= state
->written
;
1046 return NT_STATUS_OK
;
1049 NTSTATUS
cli_writeall(struct cli_state
*cli
, uint16_t fnum
, uint16_t mode
,
1050 const uint8_t *buf
, off_t offset
, size_t size
,
1053 TALLOC_CTX
*frame
= talloc_stackframe();
1054 struct tevent_context
*ev
;
1055 struct tevent_req
*req
;
1056 NTSTATUS status
= NT_STATUS_NO_MEMORY
;
1058 if (smbXcli_conn_has_async_calls(cli
->conn
)) {
1060 * Can't use sync call while an async call is in flight
1062 status
= NT_STATUS_INVALID_PARAMETER
;
1065 ev
= samba_tevent_context_init(frame
);
1069 if (smbXcli_conn_protocol(cli
->conn
) >= PROTOCOL_SMB2_02
) {
1070 req
= cli_smb2_writeall_send(frame
, ev
, cli
, fnum
, mode
,
1073 req
= cli_writeall_send(frame
, ev
, cli
, fnum
, mode
,
1079 if (!tevent_req_poll(req
, ev
)) {
1080 status
= map_nt_error_from_unix(errno
);
1083 if (smbXcli_conn_protocol(cli
->conn
) >= PROTOCOL_SMB2_02
) {
1084 status
= cli_smb2_writeall_recv(req
, pwritten
);
1086 status
= cli_writeall_recv(req
, pwritten
);
1093 struct cli_push_chunk
;
1095 struct cli_push_state
{
1096 struct tevent_context
*ev
;
1097 struct cli_state
*cli
;
1102 size_t (*source
)(uint8_t *buf
, size_t n
, void *priv
);
1111 * Outstanding requests
1113 * The maximum is 256:
1114 * - which would be a window of 256 MByte
1115 * for SMB2 with multi-credit
1116 * or smb1 unix extentions.
1118 uint16_t max_chunks
;
1119 uint16_t num_chunks
;
1120 uint16_t num_waiting
;
1121 struct cli_push_chunk
*chunks
;
1124 struct cli_push_chunk
{
1125 struct cli_push_chunk
*prev
, *next
;
1126 struct tevent_req
*req
;/* This is the main request! Not the subreq */
1127 struct tevent_req
*subreq
;
1135 static void cli_push_setup_chunks(struct tevent_req
*req
);
1136 static void cli_push_chunk_ship(struct cli_push_chunk
*chunk
);
1137 static void cli_push_chunk_done(struct tevent_req
*subreq
);
1139 struct tevent_req
*cli_push_send(TALLOC_CTX
*mem_ctx
, struct tevent_context
*ev
,
1140 struct cli_state
*cli
,
1141 uint16_t fnum
, uint16_t mode
,
1142 off_t start_offset
, size_t window_size
,
1143 size_t (*source
)(uint8_t *buf
, size_t n
,
1147 struct tevent_req
*req
;
1148 struct cli_push_state
*state
;
1149 size_t page_size
= 1024;
1152 req
= tevent_req_create(mem_ctx
, &state
, struct cli_push_state
);
1159 state
->start_offset
= start_offset
;
1161 state
->source
= source
;
1163 state
->next_offset
= start_offset
;
1165 if (smbXcli_conn_protocol(state
->cli
->conn
) >= PROTOCOL_SMB2_02
) {
1166 state
->chunk_size
= smb2cli_conn_max_write_size(cli
->conn
);
1168 state
->chunk_size
= cli_write_max_bufsize(cli
, mode
, 14);
1170 if (state
->chunk_size
> page_size
) {
1171 state
->chunk_size
&= ~(page_size
- 1);
1174 if (window_size
== 0) {
1176 * We use 16 MByte as default window size.
1178 window_size
= 16 * 1024 * 1024;
1181 tmp64
= window_size
/state
->chunk_size
;
1182 if ((window_size
% state
->chunk_size
) > 0) {
1185 tmp64
= MAX(tmp64
, 1);
1186 tmp64
= MIN(tmp64
, 256);
1187 state
->max_chunks
= tmp64
;
1190 * We defer the callback because of the complex
1191 * substate/subfunction logic
1193 tevent_req_defer_callback(req
, ev
);
1195 cli_push_setup_chunks(req
);
1196 if (!tevent_req_is_in_progress(req
)) {
1197 return tevent_req_post(req
, ev
);
1203 static void cli_push_setup_chunks(struct tevent_req
*req
)
1205 struct cli_push_state
*state
=
1206 tevent_req_data(req
,
1207 struct cli_push_state
);
1208 struct cli_push_chunk
*chunk
, *next
= NULL
;
1211 for (chunk
= state
->chunks
; chunk
; chunk
= next
) {
1213 * Note that chunk might be removed from this call.
1216 cli_push_chunk_ship(chunk
);
1217 if (!tevent_req_is_in_progress(req
)) {
1222 for (i
= state
->num_chunks
; i
< state
->max_chunks
; i
++) {
1224 if (state
->num_waiting
> 0) {
1232 chunk
= talloc_zero(state
, struct cli_push_chunk
);
1233 if (tevent_req_nomem(chunk
, req
)) {
1237 chunk
->ofs
= state
->next_offset
;
1238 chunk
->buf
= talloc_array(chunk
,
1241 if (tevent_req_nomem(chunk
->buf
, req
)) {
1244 chunk
->total_size
= state
->source(chunk
->buf
,
1247 if (chunk
->total_size
== 0) {
1248 /* nothing to send */
1253 state
->next_offset
+= chunk
->total_size
;
1255 DLIST_ADD_END(state
->chunks
, chunk
, NULL
);
1256 state
->num_chunks
++;
1257 state
->num_waiting
++;
1259 cli_push_chunk_ship(chunk
);
1260 if (!tevent_req_is_in_progress(req
)) {
1269 if (state
->num_chunks
> 0) {
1273 tevent_req_done(req
);
1276 static void cli_push_chunk_ship(struct cli_push_chunk
*chunk
)
1278 struct tevent_req
*req
= chunk
->req
;
1279 struct cli_push_state
*state
=
1280 tevent_req_data(req
,
1281 struct cli_push_state
);
1288 DLIST_REMOVE(state
->chunks
, chunk
);
1289 SMB_ASSERT(state
->num_chunks
> 0);
1290 state
->num_chunks
--;
1296 if (chunk
->subreq
!= NULL
) {
1300 SMB_ASSERT(state
->num_waiting
> 0);
1302 buf
= chunk
->buf
+ chunk
->tmp_size
;
1303 ofs
= chunk
->ofs
+ chunk
->tmp_size
;
1304 size
= chunk
->total_size
- chunk
->tmp_size
;
1306 if (smbXcli_conn_protocol(state
->cli
->conn
) >= PROTOCOL_SMB2_02
) {
1309 ok
= smb2cli_conn_req_possible(state
->cli
->conn
, &max_size
);
1315 * downgrade depending on the available credits
1317 size
= MIN(max_size
, size
);
1319 chunk
->subreq
= cli_smb2_write_send(chunk
,
1327 if (tevent_req_nomem(chunk
->subreq
, req
)) {
1331 ok
= smb1cli_conn_req_possible(state
->cli
->conn
);
1336 chunk
->subreq
= cli_write_andx_send(chunk
,
1344 if (tevent_req_nomem(chunk
->subreq
, req
)) {
1348 tevent_req_set_callback(chunk
->subreq
,
1349 cli_push_chunk_done
,
1352 state
->num_waiting
--;
1356 static void cli_push_chunk_done(struct tevent_req
*subreq
)
1358 struct cli_push_chunk
*chunk
=
1359 tevent_req_callback_data(subreq
,
1360 struct cli_push_chunk
);
1361 struct tevent_req
*req
= chunk
->req
;
1362 struct cli_push_state
*state
=
1363 tevent_req_data(req
,
1364 struct cli_push_state
);
1366 size_t expected
= chunk
->total_size
- chunk
->tmp_size
;
1369 chunk
->subreq
= NULL
;
1371 if (smbXcli_conn_protocol(state
->cli
->conn
) >= PROTOCOL_SMB2_02
) {
1372 status
= cli_smb2_write_recv(subreq
, &written
);
1374 status
= cli_write_andx_recv(subreq
, &written
);
1376 TALLOC_FREE(subreq
);
1377 if (tevent_req_nterror(req
, status
)) {
1381 if (written
> expected
) {
1382 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
1387 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
1391 chunk
->tmp_size
+= written
;
1393 if (chunk
->tmp_size
== chunk
->total_size
) {
1396 state
->num_waiting
++;
1399 cli_push_setup_chunks(req
);
1402 NTSTATUS
cli_push_recv(struct tevent_req
*req
)
1404 return tevent_req_simple_recv_ntstatus(req
);
1407 NTSTATUS
cli_push(struct cli_state
*cli
, uint16_t fnum
, uint16_t mode
,
1408 off_t start_offset
, size_t window_size
,
1409 size_t (*source
)(uint8_t *buf
, size_t n
, void *priv
),
1412 TALLOC_CTX
*frame
= talloc_stackframe();
1413 struct tevent_context
*ev
;
1414 struct tevent_req
*req
;
1415 NTSTATUS status
= NT_STATUS_OK
;
1417 if (smbXcli_conn_has_async_calls(cli
->conn
)) {
1419 * Can't use sync call while an async call is in flight
1421 status
= NT_STATUS_INVALID_PARAMETER
;
1425 ev
= samba_tevent_context_init(frame
);
1427 status
= NT_STATUS_NO_MEMORY
;
1431 req
= cli_push_send(frame
, ev
, cli
, fnum
, mode
, start_offset
,
1432 window_size
, source
, priv
);
1434 status
= NT_STATUS_NO_MEMORY
;
1438 if (!tevent_req_poll(req
, ev
)) {
1439 status
= map_nt_error_from_unix(errno
);
1443 status
= cli_push_recv(req
);