2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "libsmb/libsmb.h"
22 #include "../lib/util/tevent_ntstatus.h"
23 #include "async_smb.h"
25 #include "../libcli/smb/smbXcli_base.h"
27 /****************************************************************************
28 Calculate the recommended read buffer size
29 ****************************************************************************/
30 static size_t cli_read_max_bufsize(struct cli_state
*cli
)
35 uint32_t useable_space
= 0;
37 data_offset
= HDR_VWV
;
38 data_offset
+= wct
* sizeof(uint16_t);
39 data_offset
+= sizeof(uint16_t); /* byte count */
40 data_offset
+= 1; /* pad */
42 min_space
= cli_state_available_size(cli
, data_offset
);
44 if (cli
->server_posix_capabilities
& CIFS_UNIX_LARGE_READ_CAP
) {
45 useable_space
= 0xFFFFFF - data_offset
;
47 if (smb1cli_conn_signing_is_active(cli
->conn
)) {
51 if (smb1cli_conn_encryption_on(cli
->conn
)) {
56 } else if (smb1cli_conn_capabilities(cli
->conn
) & CAP_LARGE_READX
) {
58 * Note: CAP_LARGE_READX also works with signing
60 useable_space
= 0x1FFFF - data_offset
;
62 useable_space
= MIN(useable_space
, UINT16_MAX
);
70 /****************************************************************************
71 Calculate the recommended write buffer size
72 ****************************************************************************/
73 static size_t cli_write_max_bufsize(struct cli_state
*cli
,
79 uint32_t useable_space
= 0;
81 data_offset
= HDR_VWV
;
82 data_offset
+= wct
* sizeof(uint16_t);
83 data_offset
+= sizeof(uint16_t); /* byte count */
84 data_offset
+= 1; /* pad */
86 min_space
= cli_state_available_size(cli
, data_offset
);
88 if (cli
->server_posix_capabilities
& CIFS_UNIX_LARGE_WRITE_CAP
) {
89 useable_space
= 0xFFFFFF - data_offset
;
90 } else if (smb1cli_conn_capabilities(cli
->conn
) & CAP_LARGE_WRITEX
) {
91 useable_space
= 0x1FFFF - data_offset
;
96 if (write_mode
!= 0) {
100 if (smb1cli_conn_signing_is_active(cli
->conn
)) {
104 if (smb1cli_conn_encryption_on(cli
->conn
)) {
108 if (strequal(cli
->dev
, "LPT1:")) {
112 return useable_space
;
115 struct cli_read_andx_state
{
123 static void cli_read_andx_done(struct tevent_req
*subreq
);
125 struct tevent_req
*cli_read_andx_create(TALLOC_CTX
*mem_ctx
,
126 struct tevent_context
*ev
,
127 struct cli_state
*cli
, uint16_t fnum
,
128 off_t offset
, size_t size
,
129 struct tevent_req
**psmbreq
)
131 struct tevent_req
*req
, *subreq
;
132 struct cli_read_andx_state
*state
;
135 if (size
> cli_read_max_bufsize(cli
)) {
136 DEBUG(0, ("cli_read_andx_send got size=%d, can only handle "
137 "size=%d\n", (int)size
,
138 (int)cli_read_max_bufsize(cli
)));
142 req
= tevent_req_create(mem_ctx
, &state
, struct cli_read_andx_state
);
148 SCVAL(state
->vwv
+ 0, 0, 0xFF);
149 SCVAL(state
->vwv
+ 0, 1, 0);
150 SSVAL(state
->vwv
+ 1, 0, 0);
151 SSVAL(state
->vwv
+ 2, 0, fnum
);
152 SIVAL(state
->vwv
+ 3, 0, offset
);
153 SSVAL(state
->vwv
+ 5, 0, size
);
154 SSVAL(state
->vwv
+ 6, 0, size
);
155 SSVAL(state
->vwv
+ 7, 0, (size
>> 16));
156 SSVAL(state
->vwv
+ 8, 0, 0);
157 SSVAL(state
->vwv
+ 9, 0, 0);
159 if (smb1cli_conn_capabilities(cli
->conn
) & CAP_LARGE_FILES
) {
160 SIVAL(state
->vwv
+ 10, 0,
161 (((uint64_t)offset
)>>32) & 0xffffffff);
164 if ((((uint64_t)offset
) & 0xffffffff00000000LL
) != 0) {
165 DEBUG(10, ("cli_read_andx_send got large offset where "
166 "the server does not support it\n"));
167 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
168 return tevent_req_post(req
, ev
);
172 subreq
= cli_smb_req_create(state
, ev
, cli
, SMBreadX
, 0, wct
,
173 state
->vwv
, 0, NULL
);
174 if (subreq
== NULL
) {
178 tevent_req_set_callback(subreq
, cli_read_andx_done
, req
);
183 struct tevent_req
*cli_read_andx_send(TALLOC_CTX
*mem_ctx
,
184 struct tevent_context
*ev
,
185 struct cli_state
*cli
, uint16_t fnum
,
186 off_t offset
, size_t size
)
188 struct tevent_req
*req
, *subreq
;
191 req
= cli_read_andx_create(mem_ctx
, ev
, cli
, fnum
, offset
, size
,
197 status
= smb1cli_req_chain_submit(&subreq
, 1);
198 if (tevent_req_nterror(req
, status
)) {
199 return tevent_req_post(req
, ev
);
204 static void cli_read_andx_done(struct tevent_req
*subreq
)
206 struct tevent_req
*req
= tevent_req_callback_data(
207 subreq
, struct tevent_req
);
208 struct cli_read_andx_state
*state
= tevent_req_data(
209 req
, struct cli_read_andx_state
);
216 state
->status
= cli_smb_recv(subreq
, state
, &inbuf
, 12, &wct
, &vwv
,
219 if (NT_STATUS_IS_ERR(state
->status
)) {
220 tevent_req_nterror(req
, state
->status
);
224 /* size is the number of bytes the server returned.
226 state
->received
= SVAL(vwv
+ 5, 0);
227 state
->received
|= (((unsigned int)SVAL(vwv
+ 7, 0)) << 16);
229 if (state
->received
> state
->size
) {
230 DEBUG(5,("server returned more than we wanted!\n"));
231 tevent_req_nterror(req
, NT_STATUS_UNEXPECTED_IO_ERROR
);
236 * bcc field must be valid for small reads, for large reads the 16-bit
237 * bcc field can't be correct.
240 if ((state
->received
< 0xffff) && (state
->received
> num_bytes
)) {
241 DEBUG(5, ("server announced more bytes than sent\n"));
242 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
246 state
->buf
= discard_const_p(uint8_t, smb_base(inbuf
)) + SVAL(vwv
+6, 0);
248 if (trans_oob(smb_len_tcp(inbuf
), SVAL(vwv
+6, 0), state
->received
)
249 || ((state
->received
!= 0) && (state
->buf
< bytes
))) {
250 DEBUG(5, ("server returned invalid read&x data offset\n"));
251 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
254 tevent_req_done(req
);
258 * Pull the data out of a finished async read_and_x request. rcvbuf is
259 * talloced from the request, so better make sure that you copy it away before
260 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
264 NTSTATUS
cli_read_andx_recv(struct tevent_req
*req
, ssize_t
*received
,
267 struct cli_read_andx_state
*state
= tevent_req_data(
268 req
, struct cli_read_andx_state
);
271 if (tevent_req_is_nterror(req
, &status
)) {
274 *received
= state
->received
;
275 *rcvbuf
= state
->buf
;
279 struct cli_readall_state
{
280 struct tevent_context
*ev
;
281 struct cli_state
*cli
;
289 static void cli_readall_done(struct tevent_req
*subreq
);
291 static struct tevent_req
*cli_readall_send(TALLOC_CTX
*mem_ctx
,
292 struct tevent_context
*ev
,
293 struct cli_state
*cli
,
295 off_t offset
, size_t size
)
297 struct tevent_req
*req
, *subreq
;
298 struct cli_readall_state
*state
;
300 req
= tevent_req_create(mem_ctx
, &state
, struct cli_readall_state
);
307 state
->start_offset
= offset
;
312 subreq
= cli_read_andx_send(state
, ev
, cli
, fnum
, offset
, size
);
313 if (tevent_req_nomem(subreq
, req
)) {
314 return tevent_req_post(req
, ev
);
316 tevent_req_set_callback(subreq
, cli_readall_done
, req
);
320 static void cli_readall_done(struct tevent_req
*subreq
)
322 struct tevent_req
*req
= tevent_req_callback_data(
323 subreq
, struct tevent_req
);
324 struct cli_readall_state
*state
= tevent_req_data(
325 req
, struct cli_readall_state
);
330 status
= cli_read_andx_recv(subreq
, &received
, &buf
);
331 if (tevent_req_nterror(req
, status
)) {
337 tevent_req_done(req
);
341 if ((state
->received
== 0) && (received
== state
->size
)) {
342 /* Ideal case: Got it all in one run */
344 state
->received
+= received
;
345 tevent_req_done(req
);
350 * We got a short read, issue a read for the
351 * rest. Unfortunately we have to allocate the buffer
352 * ourselves now, as our caller expects to receive a single
353 * buffer. cli_read_andx does it from the buffer received from
354 * the net, but with a short read we have to put it together
355 * from several reads.
358 if (state
->buf
== NULL
) {
359 state
->buf
= talloc_array(state
, uint8_t, state
->size
);
360 if (tevent_req_nomem(state
->buf
, req
)) {
364 memcpy(state
->buf
+ state
->received
, buf
, received
);
365 state
->received
+= received
;
369 if (state
->received
>= state
->size
) {
370 tevent_req_done(req
);
374 subreq
= cli_read_andx_send(state
, state
->ev
, state
->cli
, state
->fnum
,
375 state
->start_offset
+ state
->received
,
376 state
->size
- state
->received
);
377 if (tevent_req_nomem(subreq
, req
)) {
380 tevent_req_set_callback(subreq
, cli_readall_done
, req
);
383 static NTSTATUS
cli_readall_recv(struct tevent_req
*req
, ssize_t
*received
,
386 struct cli_readall_state
*state
= tevent_req_data(
387 req
, struct cli_readall_state
);
390 if (tevent_req_is_nterror(req
, &status
)) {
393 *received
= state
->received
;
394 *rcvbuf
= state
->buf
;
398 struct cli_pull_subreq
{
399 struct tevent_req
*req
;
405 * Parallel read support.
407 * cli_pull sends as many read&x requests as the server would allow via
408 * max_mux at a time. When replies flow back in, the data is written into
409 * the callback function "sink" in the right order.
412 struct cli_pull_state
{
413 struct tevent_req
*req
;
415 struct tevent_context
*ev
;
416 struct cli_state
*cli
;
421 NTSTATUS (*sink
)(char *buf
, size_t n
, void *priv
);
427 * Outstanding requests
431 struct cli_pull_subreq
*reqs
;
434 * For how many bytes did we send requests already?
439 * Next request index to push into "sink". This walks around the "req"
440 * array, taking care that the requests are pushed to "sink" in the
441 * right order. If necessary (i.e. replies don't come in in the right
442 * order), replies are held back in "reqs".
447 * How many bytes did we push into "sink"?
453 static char *cli_pull_print(struct tevent_req
*req
, TALLOC_CTX
*mem_ctx
)
455 struct cli_pull_state
*state
= tevent_req_data(
456 req
, struct cli_pull_state
);
459 result
= tevent_req_default_print(req
, mem_ctx
);
460 if (result
== NULL
) {
464 return talloc_asprintf_append_buffer(
465 result
, "num_reqs=%d, top_req=%d",
466 state
->num_reqs
, state
->top_req
);
469 static void cli_pull_read_done(struct tevent_req
*read_req
);
472 * Prepare an async pull request
475 struct tevent_req
*cli_pull_send(TALLOC_CTX
*mem_ctx
,
476 struct tevent_context
*ev
,
477 struct cli_state
*cli
,
478 uint16_t fnum
, off_t start_offset
,
479 off_t size
, size_t window_size
,
480 NTSTATUS (*sink
)(char *buf
, size_t n
,
484 struct tevent_req
*req
;
485 struct cli_pull_state
*state
;
487 size_t page_size
= 1024;
489 req
= tevent_req_create(mem_ctx
, &state
, struct cli_pull_state
);
493 tevent_req_set_print_fn(req
, cli_pull_print
);
499 state
->start_offset
= start_offset
;
508 tevent_req_done(req
);
509 return tevent_req_post(req
, ev
);
512 state
->chunk_size
= cli_read_max_bufsize(cli
);
513 if (state
->chunk_size
> page_size
) {
514 state
->chunk_size
&= ~(page_size
- 1);
517 state
->max_reqs
= smbXcli_conn_max_requests(cli
->conn
);
519 state
->num_reqs
= MAX(window_size
/state
->chunk_size
, 1);
520 state
->num_reqs
= MIN(state
->num_reqs
, state
->max_reqs
);
522 state
->reqs
= talloc_zero_array(state
, struct cli_pull_subreq
,
524 if (state
->reqs
== NULL
) {
528 state
->requested
= 0;
530 for (i
=0; i
<state
->num_reqs
; i
++) {
531 struct cli_pull_subreq
*subreq
= &state
->reqs
[i
];
533 size_t request_thistime
;
535 if (state
->requested
>= size
) {
540 size_left
= size
- state
->requested
;
541 request_thistime
= MIN(size_left
, state
->chunk_size
);
543 subreq
->req
= cli_readall_send(
544 state
->reqs
, ev
, cli
, fnum
,
545 state
->start_offset
+ state
->requested
,
548 if (subreq
->req
== NULL
) {
551 tevent_req_set_callback(subreq
->req
, cli_pull_read_done
, req
);
552 state
->requested
+= request_thistime
;
562 * Handle incoming read replies, push the data into sink and send out new
563 * requests if necessary.
566 static void cli_pull_read_done(struct tevent_req
*subreq
)
568 struct tevent_req
*req
= tevent_req_callback_data(
569 subreq
, struct tevent_req
);
570 struct cli_pull_state
*state
= tevent_req_data(
571 req
, struct cli_pull_state
);
572 struct cli_pull_subreq
*pull_subreq
= NULL
;
576 for (i
= 0; i
< state
->num_reqs
; i
++) {
577 pull_subreq
= &state
->reqs
[i
];
578 if (subreq
== pull_subreq
->req
) {
582 if (i
== state
->num_reqs
) {
583 /* Huh -- received something we did not send?? */
584 tevent_req_nterror(req
, NT_STATUS_INTERNAL_ERROR
);
588 status
= cli_readall_recv(subreq
, &pull_subreq
->received
,
590 if (!NT_STATUS_IS_OK(status
)) {
591 tevent_req_nterror(state
->req
, status
);
596 * This loop is the one to take care of out-of-order replies. All
597 * pending requests are in state->reqs, state->reqs[top_req] is the
598 * one that is to be pushed next. If however a request later than
599 * top_req is replied to, then we can't push yet. If top_req is
600 * replied to at a later point then, we need to push all the finished
604 while (state
->reqs
[state
->top_req
].req
!= NULL
) {
605 struct cli_pull_subreq
*top_subreq
;
607 DEBUG(11, ("cli_pull_read_done: top_req = %d\n",
610 top_subreq
= &state
->reqs
[state
->top_req
];
612 if (tevent_req_is_in_progress(top_subreq
->req
)) {
613 DEBUG(11, ("cli_pull_read_done: top request not yet "
618 DEBUG(10, ("cli_pull_read_done: Pushing %d bytes, %d already "
619 "pushed\n", (int)top_subreq
->received
,
620 (int)state
->pushed
));
622 status
= state
->sink((char *)top_subreq
->buf
,
623 top_subreq
->received
, state
->priv
);
624 if (tevent_req_nterror(state
->req
, status
)) {
627 state
->pushed
+= top_subreq
->received
;
629 TALLOC_FREE(state
->reqs
[state
->top_req
].req
);
631 if (state
->requested
< state
->size
) {
632 struct tevent_req
*new_req
;
634 size_t request_thistime
;
636 size_left
= state
->size
- state
->requested
;
637 request_thistime
= MIN(size_left
, state
->chunk_size
);
639 DEBUG(10, ("cli_pull_read_done: Requesting %d bytes "
640 "at %d, position %d\n",
641 (int)request_thistime
,
642 (int)(state
->start_offset
646 new_req
= cli_readall_send(
647 state
->reqs
, state
->ev
, state
->cli
,
649 state
->start_offset
+ state
->requested
,
652 if (tevent_req_nomem(new_req
, state
->req
)) {
655 tevent_req_set_callback(new_req
, cli_pull_read_done
,
658 state
->reqs
[state
->top_req
].req
= new_req
;
659 state
->requested
+= request_thistime
;
662 state
->top_req
= (state
->top_req
+1) % state
->num_reqs
;
665 tevent_req_done(req
);
668 NTSTATUS
cli_pull_recv(struct tevent_req
*req
, off_t
*received
)
670 struct cli_pull_state
*state
= tevent_req_data(
671 req
, struct cli_pull_state
);
674 if (tevent_req_is_nterror(req
, &status
)) {
677 *received
= state
->pushed
;
681 NTSTATUS
cli_pull(struct cli_state
*cli
, uint16_t fnum
,
682 off_t start_offset
, off_t size
, size_t window_size
,
683 NTSTATUS (*sink
)(char *buf
, size_t n
, void *priv
),
684 void *priv
, off_t
*received
)
686 TALLOC_CTX
*frame
= talloc_stackframe();
687 struct tevent_context
*ev
;
688 struct tevent_req
*req
;
689 NTSTATUS status
= NT_STATUS_OK
;
691 if (smbXcli_conn_has_async_calls(cli
->conn
)) {
693 * Can't use sync call while an async call is in flight
695 status
= NT_STATUS_INVALID_PARAMETER
;
699 ev
= samba_tevent_context_init(frame
);
701 status
= NT_STATUS_NO_MEMORY
;
705 req
= cli_pull_send(frame
, ev
, cli
, fnum
, start_offset
, size
,
706 window_size
, sink
, priv
);
708 status
= NT_STATUS_NO_MEMORY
;
712 if (!tevent_req_poll(req
, ev
)) {
713 status
= map_nt_error_from_unix(errno
);
717 status
= cli_pull_recv(req
, received
);
723 static NTSTATUS
cli_read_sink(char *buf
, size_t n
, void *priv
)
725 char **pbuf
= (char **)priv
;
726 memcpy(*pbuf
, buf
, n
);
731 NTSTATUS
cli_read(struct cli_state
*cli
, uint16_t fnum
,
732 char *buf
, off_t offset
, size_t size
,
738 status
= cli_pull(cli
, fnum
, offset
, size
, size
,
739 cli_read_sink
, &buf
, &ret
);
740 if (!NT_STATUS_IS_OK(status
)) {
751 /****************************************************************************
752 write to a file using a SMBwrite and not bypassing 0 byte writes
753 ****************************************************************************/
755 NTSTATUS
cli_smbwrite(struct cli_state
*cli
, uint16_t fnum
, char *buf
,
756 off_t offset
, size_t size1
, size_t *ptotal
)
765 bytes
= talloc_array(talloc_tos(), uint8_t, 3);
767 return NT_STATUS_NO_MEMORY
;
772 uint32_t usable_space
= cli_state_available_size(cli
, 48);
773 size_t size
= MIN(size1
, usable_space
);
774 struct tevent_req
*req
;
779 SSVAL(vwv
+0, 0, fnum
);
780 SSVAL(vwv
+1, 0, size
);
781 SIVAL(vwv
+2, 0, offset
);
784 bytes
= talloc_realloc(talloc_tos(), bytes
, uint8_t,
787 return NT_STATUS_NO_MEMORY
;
789 SSVAL(bytes
, 1, size
);
790 memcpy(bytes
+ 3, buf
+ total
, size
);
792 status
= cli_smb(talloc_tos(), cli
, SMBwrite
, 0, 5, vwv
,
793 size
+3, bytes
, &req
, 1, NULL
, &ret_vwv
,
795 if (!NT_STATUS_IS_OK(status
)) {
800 size
= SVAL(ret_vwv
+0, 0);
813 if (ptotal
!= NULL
) {
820 * Send a write&x request
823 struct cli_write_andx_state
{
831 static void cli_write_andx_done(struct tevent_req
*subreq
);
833 struct tevent_req
*cli_write_andx_create(TALLOC_CTX
*mem_ctx
,
834 struct tevent_context
*ev
,
835 struct cli_state
*cli
, uint16_t fnum
,
836 uint16_t mode
, const uint8_t *buf
,
837 off_t offset
, size_t size
,
838 struct tevent_req
**reqs_before
,
840 struct tevent_req
**psmbreq
)
842 struct tevent_req
*req
, *subreq
;
843 struct cli_write_andx_state
*state
;
844 bool bigoffset
= ((smb1cli_conn_capabilities(cli
->conn
) & CAP_LARGE_FILES
) != 0);
845 uint8_t wct
= bigoffset
? 14 : 12;
846 size_t max_write
= cli_write_max_bufsize(cli
, mode
, wct
);
849 req
= tevent_req_create(mem_ctx
, &state
, struct cli_write_andx_state
);
854 state
->size
= MIN(size
, max_write
);
858 SCVAL(vwv
+0, 0, 0xFF);
861 SSVAL(vwv
+2, 0, fnum
);
862 SIVAL(vwv
+3, 0, offset
);
864 SSVAL(vwv
+7, 0, mode
);
866 SSVAL(vwv
+9, 0, (state
->size
>>16));
867 SSVAL(vwv
+10, 0, state
->size
);
870 smb1cli_req_wct_ofs(reqs_before
, num_reqs_before
)
871 + 1 /* the wct field */
873 + 2 /* num_bytes field */
877 SIVAL(vwv
+12, 0, (((uint64_t)offset
)>>32) & 0xffffffff);
881 state
->iov
[0].iov_base
= (void *)&state
->pad
;
882 state
->iov
[0].iov_len
= 1;
883 state
->iov
[1].iov_base
= discard_const_p(void, buf
);
884 state
->iov
[1].iov_len
= state
->size
;
886 subreq
= cli_smb_req_create(state
, ev
, cli
, SMBwriteX
, 0, wct
, vwv
,
888 if (tevent_req_nomem(subreq
, req
)) {
889 return tevent_req_post(req
, ev
);
891 tevent_req_set_callback(subreq
, cli_write_andx_done
, req
);
896 struct tevent_req
*cli_write_andx_send(TALLOC_CTX
*mem_ctx
,
897 struct tevent_context
*ev
,
898 struct cli_state
*cli
, uint16_t fnum
,
899 uint16_t mode
, const uint8_t *buf
,
900 off_t offset
, size_t size
)
902 struct tevent_req
*req
, *subreq
;
905 req
= cli_write_andx_create(mem_ctx
, ev
, cli
, fnum
, mode
, buf
, offset
,
906 size
, NULL
, 0, &subreq
);
911 status
= smb1cli_req_chain_submit(&subreq
, 1);
912 if (tevent_req_nterror(req
, status
)) {
913 return tevent_req_post(req
, ev
);
918 static void cli_write_andx_done(struct tevent_req
*subreq
)
920 struct tevent_req
*req
= tevent_req_callback_data(
921 subreq
, struct tevent_req
);
922 struct cli_write_andx_state
*state
= tevent_req_data(
923 req
, struct cli_write_andx_state
);
928 status
= cli_smb_recv(subreq
, state
, NULL
, 6, &wct
, &vwv
,
931 if (NT_STATUS_IS_ERR(status
)) {
932 tevent_req_nterror(req
, status
);
935 state
->written
= SVAL(vwv
+2, 0);
936 if (state
->size
> UINT16_MAX
) {
938 * It is important that we only set the
939 * high bits only if we asked for a large write.
941 * OS/2 print shares get this wrong and may send
946 state
->written
|= SVAL(vwv
+4, 0)<<16;
948 tevent_req_done(req
);
951 NTSTATUS
cli_write_andx_recv(struct tevent_req
*req
, size_t *pwritten
)
953 struct cli_write_andx_state
*state
= tevent_req_data(
954 req
, struct cli_write_andx_state
);
957 if (tevent_req_is_nterror(req
, &status
)) {
961 *pwritten
= state
->written
;
966 struct cli_writeall_state
{
967 struct tevent_context
*ev
;
968 struct cli_state
*cli
;
977 static void cli_writeall_written(struct tevent_req
*req
);
979 static struct tevent_req
*cli_writeall_send(TALLOC_CTX
*mem_ctx
,
980 struct tevent_context
*ev
,
981 struct cli_state
*cli
,
985 off_t offset
, size_t size
)
987 struct tevent_req
*req
, *subreq
;
988 struct cli_writeall_state
*state
;
990 req
= tevent_req_create(mem_ctx
, &state
, struct cli_writeall_state
);
999 state
->offset
= offset
;
1003 subreq
= cli_write_andx_send(state
, state
->ev
, state
->cli
, state
->fnum
,
1004 state
->mode
, state
->buf
, state
->offset
,
1006 if (tevent_req_nomem(subreq
, req
)) {
1007 return tevent_req_post(req
, ev
);
1009 tevent_req_set_callback(subreq
, cli_writeall_written
, req
);
1013 static void cli_writeall_written(struct tevent_req
*subreq
)
1015 struct tevent_req
*req
= tevent_req_callback_data(
1016 subreq
, struct tevent_req
);
1017 struct cli_writeall_state
*state
= tevent_req_data(
1018 req
, struct cli_writeall_state
);
1020 size_t written
, to_write
;
1022 status
= cli_write_andx_recv(subreq
, &written
);
1023 TALLOC_FREE(subreq
);
1024 if (tevent_req_nterror(req
, status
)) {
1028 state
->written
+= written
;
1030 if (state
->written
> state
->size
) {
1031 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
1035 to_write
= state
->size
- state
->written
;
1037 if (to_write
== 0) {
1038 tevent_req_done(req
);
1042 subreq
= cli_write_andx_send(state
, state
->ev
, state
->cli
, state
->fnum
,
1044 state
->buf
+ state
->written
,
1045 state
->offset
+ state
->written
, to_write
);
1046 if (tevent_req_nomem(subreq
, req
)) {
1049 tevent_req_set_callback(subreq
, cli_writeall_written
, req
);
1052 static NTSTATUS
cli_writeall_recv(struct tevent_req
*req
,
1055 struct cli_writeall_state
*state
= tevent_req_data(
1056 req
, struct cli_writeall_state
);
1059 if (tevent_req_is_nterror(req
, &status
)) {
1062 if (pwritten
!= NULL
) {
1063 *pwritten
= state
->written
;
1065 return NT_STATUS_OK
;
1068 NTSTATUS
cli_writeall(struct cli_state
*cli
, uint16_t fnum
, uint16_t mode
,
1069 const uint8_t *buf
, off_t offset
, size_t size
,
1072 TALLOC_CTX
*frame
= talloc_stackframe();
1073 struct tevent_context
*ev
;
1074 struct tevent_req
*req
;
1075 NTSTATUS status
= NT_STATUS_NO_MEMORY
;
1077 if (smbXcli_conn_has_async_calls(cli
->conn
)) {
1079 * Can't use sync call while an async call is in flight
1081 status
= NT_STATUS_INVALID_PARAMETER
;
1084 ev
= samba_tevent_context_init(frame
);
1088 req
= cli_writeall_send(frame
, ev
, cli
, fnum
, mode
, buf
, offset
, size
);
1092 if (!tevent_req_poll(req
, ev
)) {
1093 status
= map_nt_error_from_unix(errno
);
1096 status
= cli_writeall_recv(req
, pwritten
);
1102 struct cli_push_write_state
{
1103 struct tevent_req
*req
;/* This is the main request! Not the subreq */
1110 struct cli_push_state
{
1111 struct tevent_context
*ev
;
1112 struct cli_state
*cli
;
1118 size_t (*source
)(uint8_t *buf
, size_t n
, void *priv
);
1127 * Outstanding requests
1132 struct cli_push_write_state
**reqs
;
1135 static void cli_push_written(struct tevent_req
*req
);
1137 static bool cli_push_write_setup(struct tevent_req
*req
,
1138 struct cli_push_state
*state
,
1141 struct cli_push_write_state
*substate
;
1142 struct tevent_req
*subreq
;
1144 substate
= talloc(state
->reqs
, struct cli_push_write_state
);
1148 substate
->req
= req
;
1149 substate
->idx
= idx
;
1150 substate
->ofs
= state
->next_offset
;
1151 substate
->buf
= talloc_array(substate
, uint8_t, state
->chunk_size
);
1152 if (!substate
->buf
) {
1153 talloc_free(substate
);
1156 substate
->size
= state
->source(substate
->buf
,
1159 if (substate
->size
== 0) {
1161 /* nothing to send */
1162 talloc_free(substate
);
1166 subreq
= cli_writeall_send(substate
,
1167 state
->ev
, state
->cli
,
1168 state
->fnum
, state
->mode
,
1173 talloc_free(substate
);
1176 tevent_req_set_callback(subreq
, cli_push_written
, substate
);
1178 state
->reqs
[idx
] = substate
;
1179 state
->pending
+= 1;
1180 state
->next_offset
+= substate
->size
;
1185 struct tevent_req
*cli_push_send(TALLOC_CTX
*mem_ctx
, struct tevent_context
*ev
,
1186 struct cli_state
*cli
,
1187 uint16_t fnum
, uint16_t mode
,
1188 off_t start_offset
, size_t window_size
,
1189 size_t (*source
)(uint8_t *buf
, size_t n
,
1193 struct tevent_req
*req
;
1194 struct cli_push_state
*state
;
1196 size_t page_size
= 1024;
1198 req
= tevent_req_create(mem_ctx
, &state
, struct cli_push_state
);
1205 state
->start_offset
= start_offset
;
1207 state
->source
= source
;
1211 state
->next_offset
= start_offset
;
1213 state
->chunk_size
= cli_write_max_bufsize(cli
, mode
, 14);
1214 if (state
->chunk_size
> page_size
) {
1215 state
->chunk_size
&= ~(page_size
- 1);
1218 state
->max_reqs
= smbXcli_conn_max_requests(cli
->conn
);
1220 if (window_size
== 0) {
1221 window_size
= state
->max_reqs
* state
->chunk_size
;
1223 state
->num_reqs
= window_size
/state
->chunk_size
;
1224 if ((window_size
% state
->chunk_size
) > 0) {
1225 state
->num_reqs
+= 1;
1227 state
->num_reqs
= MIN(state
->num_reqs
, state
->max_reqs
);
1228 state
->num_reqs
= MAX(state
->num_reqs
, 1);
1230 state
->reqs
= talloc_zero_array(state
, struct cli_push_write_state
*,
1232 if (state
->reqs
== NULL
) {
1236 for (i
=0; i
<state
->num_reqs
; i
++) {
1237 if (!cli_push_write_setup(req
, state
, i
)) {
1246 if (state
->pending
== 0) {
1247 tevent_req_done(req
);
1248 return tevent_req_post(req
, ev
);
1254 tevent_req_nterror(req
, NT_STATUS_NO_MEMORY
);
1255 return tevent_req_post(req
, ev
);
1258 static void cli_push_written(struct tevent_req
*subreq
)
1260 struct cli_push_write_state
*substate
= tevent_req_callback_data(
1261 subreq
, struct cli_push_write_state
);
1262 struct tevent_req
*req
= substate
->req
;
1263 struct cli_push_state
*state
= tevent_req_data(
1264 req
, struct cli_push_state
);
1266 uint32_t idx
= substate
->idx
;
1268 state
->reqs
[idx
] = NULL
;
1269 state
->pending
-= 1;
1271 status
= cli_writeall_recv(subreq
, NULL
);
1272 TALLOC_FREE(subreq
);
1273 TALLOC_FREE(substate
);
1274 if (tevent_req_nterror(req
, status
)) {
1279 if (!cli_push_write_setup(req
, state
, idx
)) {
1280 tevent_req_nterror(req
, NT_STATUS_NO_MEMORY
);
1285 if (state
->pending
== 0) {
1286 tevent_req_done(req
);
1291 NTSTATUS
cli_push_recv(struct tevent_req
*req
)
1293 return tevent_req_simple_recv_ntstatus(req
);
1296 NTSTATUS
cli_push(struct cli_state
*cli
, uint16_t fnum
, uint16_t mode
,
1297 off_t start_offset
, size_t window_size
,
1298 size_t (*source
)(uint8_t *buf
, size_t n
, void *priv
),
1301 TALLOC_CTX
*frame
= talloc_stackframe();
1302 struct tevent_context
*ev
;
1303 struct tevent_req
*req
;
1304 NTSTATUS status
= NT_STATUS_OK
;
1306 if (smbXcli_conn_has_async_calls(cli
->conn
)) {
1308 * Can't use sync call while an async call is in flight
1310 status
= NT_STATUS_INVALID_PARAMETER
;
1314 ev
= samba_tevent_context_init(frame
);
1316 status
= NT_STATUS_NO_MEMORY
;
1320 req
= cli_push_send(frame
, ev
, cli
, fnum
, mode
, start_offset
,
1321 window_size
, source
, priv
);
1323 status
= NT_STATUS_NO_MEMORY
;
1327 if (!tevent_req_poll(req
, ev
)) {
1328 status
= map_nt_error_from_unix(errno
);
1332 status
= cli_push_recv(req
);