2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "libsmb/libsmb.h"
22 #include "../lib/util/tevent_ntstatus.h"
23 #include "async_smb.h"
26 /****************************************************************************
27 Calculate the recommended read buffer size
28 ****************************************************************************/
29 static size_t cli_read_max_bufsize(struct cli_state
*cli
)
31 size_t data_offset
= smb_size
- 4;
36 if (!client_is_signing_on(cli
) && !cli_encryption_on(cli
)
37 && (cli
->server_posix_capabilities
& CIFS_UNIX_LARGE_READ_CAP
)) {
38 return CLI_SAMBA_MAX_POSIX_LARGE_READX_SIZE
;
40 if (cli_state_capabilities(cli
) & CAP_LARGE_READX
) {
42 ? CLI_SAMBA_MAX_LARGE_READX_SIZE
43 : CLI_WINDOWS_MAX_LARGE_READX_SIZE
;
46 data_offset
+= wct
* sizeof(uint16_t);
47 data_offset
+= 1; /* pad */
49 useable_space
= cli_state_available_size(cli
, data_offset
);
54 /****************************************************************************
55 Calculate the recommended write buffer size
56 ****************************************************************************/
57 static size_t cli_write_max_bufsize(struct cli_state
*cli
,
61 if (write_mode
== 0 &&
62 !client_is_signing_on(cli
) &&
63 !cli_encryption_on(cli
) &&
64 (cli
->server_posix_capabilities
& CIFS_UNIX_LARGE_WRITE_CAP
) &&
65 (cli_state_capabilities(cli
) & CAP_LARGE_FILES
)) {
66 /* Only do massive writes if we can do them direct
67 * with no signing or encrypting - not on a pipe. */
68 return CLI_SAMBA_MAX_POSIX_LARGE_WRITEX_SIZE
;
72 return CLI_SAMBA_MAX_LARGE_WRITEX_SIZE
;
75 if (((cli_state_capabilities(cli
) & CAP_LARGE_WRITEX
) == 0)
76 || client_is_signing_on(cli
)
77 || strequal(cli
->dev
, "LPT1:")) {
78 size_t data_offset
= smb_size
- 4;
81 data_offset
+= wct
* sizeof(uint16_t);
82 data_offset
+= 1; /* pad */
84 useable_space
= cli_state_available_size(cli
, data_offset
);
89 return CLI_WINDOWS_MAX_LARGE_WRITEX_SIZE
;
92 struct cli_read_andx_state
{
100 static void cli_read_andx_done(struct tevent_req
*subreq
);
102 struct tevent_req
*cli_read_andx_create(TALLOC_CTX
*mem_ctx
,
103 struct event_context
*ev
,
104 struct cli_state
*cli
, uint16_t fnum
,
105 off_t offset
, size_t size
,
106 struct tevent_req
**psmbreq
)
108 struct tevent_req
*req
, *subreq
;
109 struct cli_read_andx_state
*state
;
112 if (size
> cli_read_max_bufsize(cli
)) {
113 DEBUG(0, ("cli_read_andx_send got size=%d, can only handle "
114 "size=%d\n", (int)size
,
115 (int)cli_read_max_bufsize(cli
)));
119 req
= tevent_req_create(mem_ctx
, &state
, struct cli_read_andx_state
);
125 SCVAL(state
->vwv
+ 0, 0, 0xFF);
126 SCVAL(state
->vwv
+ 0, 1, 0);
127 SSVAL(state
->vwv
+ 1, 0, 0);
128 SSVAL(state
->vwv
+ 2, 0, fnum
);
129 SIVAL(state
->vwv
+ 3, 0, offset
);
130 SSVAL(state
->vwv
+ 5, 0, size
);
131 SSVAL(state
->vwv
+ 6, 0, size
);
132 SSVAL(state
->vwv
+ 7, 0, (size
>> 16));
133 SSVAL(state
->vwv
+ 8, 0, 0);
134 SSVAL(state
->vwv
+ 9, 0, 0);
136 if ((uint64_t)offset
>> 32) {
137 SIVAL(state
->vwv
+ 10, 0,
138 (((uint64_t)offset
)>>32) & 0xffffffff);
142 subreq
= cli_smb_req_create(state
, ev
, cli
, SMBreadX
, 0, wct
,
143 state
->vwv
, 0, NULL
);
144 if (subreq
== NULL
) {
148 tevent_req_set_callback(subreq
, cli_read_andx_done
, req
);
153 struct tevent_req
*cli_read_andx_send(TALLOC_CTX
*mem_ctx
,
154 struct event_context
*ev
,
155 struct cli_state
*cli
, uint16_t fnum
,
156 off_t offset
, size_t size
)
158 struct tevent_req
*req
, *subreq
;
161 req
= cli_read_andx_create(mem_ctx
, ev
, cli
, fnum
, offset
, size
,
167 status
= cli_smb_req_send(subreq
);
168 if (tevent_req_nterror(req
, status
)) {
169 return tevent_req_post(req
, ev
);
174 static void cli_read_andx_done(struct tevent_req
*subreq
)
176 struct tevent_req
*req
= tevent_req_callback_data(
177 subreq
, struct tevent_req
);
178 struct cli_read_andx_state
*state
= tevent_req_data(
179 req
, struct cli_read_andx_state
);
186 state
->status
= cli_smb_recv(subreq
, state
, &inbuf
, 12, &wct
, &vwv
,
189 if (NT_STATUS_IS_ERR(state
->status
)) {
190 tevent_req_nterror(req
, state
->status
);
194 /* size is the number of bytes the server returned.
196 state
->received
= SVAL(vwv
+ 5, 0);
197 state
->received
|= (((unsigned int)SVAL(vwv
+ 7, 0)) << 16);
199 if (state
->received
> state
->size
) {
200 DEBUG(5,("server returned more than we wanted!\n"));
201 tevent_req_nterror(req
, NT_STATUS_UNEXPECTED_IO_ERROR
);
206 * bcc field must be valid for small reads, for large reads the 16-bit
207 * bcc field can't be correct.
210 if ((state
->received
< 0xffff) && (state
->received
> num_bytes
)) {
211 DEBUG(5, ("server announced more bytes than sent\n"));
212 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
216 state
->buf
= discard_const_p(uint8_t, smb_base(inbuf
)) + SVAL(vwv
+6, 0);
218 if (trans_oob(smb_len(inbuf
), SVAL(vwv
+6, 0), state
->received
)
219 || ((state
->received
!= 0) && (state
->buf
< bytes
))) {
220 DEBUG(5, ("server returned invalid read&x data offset\n"));
221 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
224 tevent_req_done(req
);
228 * Pull the data out of a finished async read_and_x request. rcvbuf is
229 * talloced from the request, so better make sure that you copy it away before
230 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
234 NTSTATUS
cli_read_andx_recv(struct tevent_req
*req
, ssize_t
*received
,
237 struct cli_read_andx_state
*state
= tevent_req_data(
238 req
, struct cli_read_andx_state
);
241 if (tevent_req_is_nterror(req
, &status
)) {
244 *received
= state
->received
;
245 *rcvbuf
= state
->buf
;
249 struct cli_readall_state
{
250 struct tevent_context
*ev
;
251 struct cli_state
*cli
;
259 static void cli_readall_done(struct tevent_req
*subreq
);
261 static struct tevent_req
*cli_readall_send(TALLOC_CTX
*mem_ctx
,
262 struct event_context
*ev
,
263 struct cli_state
*cli
,
265 off_t offset
, size_t size
)
267 struct tevent_req
*req
, *subreq
;
268 struct cli_readall_state
*state
;
270 req
= tevent_req_create(mem_ctx
, &state
, struct cli_readall_state
);
277 state
->start_offset
= offset
;
282 subreq
= cli_read_andx_send(state
, ev
, cli
, fnum
, offset
, size
);
283 if (tevent_req_nomem(subreq
, req
)) {
284 return tevent_req_post(req
, ev
);
286 tevent_req_set_callback(subreq
, cli_readall_done
, req
);
290 static void cli_readall_done(struct tevent_req
*subreq
)
292 struct tevent_req
*req
= tevent_req_callback_data(
293 subreq
, struct tevent_req
);
294 struct cli_readall_state
*state
= tevent_req_data(
295 req
, struct cli_readall_state
);
300 status
= cli_read_andx_recv(subreq
, &received
, &buf
);
301 if (tevent_req_nterror(req
, status
)) {
307 tevent_req_done(req
);
311 if ((state
->received
== 0) && (received
== state
->size
)) {
312 /* Ideal case: Got it all in one run */
314 state
->received
+= received
;
315 tevent_req_done(req
);
320 * We got a short read, issue a read for the
321 * rest. Unfortunately we have to allocate the buffer
322 * ourselves now, as our caller expects to receive a single
323 * buffer. cli_read_andx does it from the buffer received from
324 * the net, but with a short read we have to put it together
325 * from several reads.
328 if (state
->buf
== NULL
) {
329 state
->buf
= talloc_array(state
, uint8_t, state
->size
);
330 if (tevent_req_nomem(state
->buf
, req
)) {
334 memcpy(state
->buf
+ state
->received
, buf
, received
);
335 state
->received
+= received
;
339 if (state
->received
>= state
->size
) {
340 tevent_req_done(req
);
344 subreq
= cli_read_andx_send(state
, state
->ev
, state
->cli
, state
->fnum
,
345 state
->start_offset
+ state
->received
,
346 state
->size
- state
->received
);
347 if (tevent_req_nomem(subreq
, req
)) {
350 tevent_req_set_callback(subreq
, cli_readall_done
, req
);
353 static NTSTATUS
cli_readall_recv(struct tevent_req
*req
, ssize_t
*received
,
356 struct cli_readall_state
*state
= tevent_req_data(
357 req
, struct cli_readall_state
);
360 if (tevent_req_is_nterror(req
, &status
)) {
363 *received
= state
->received
;
364 *rcvbuf
= state
->buf
;
368 struct cli_pull_subreq
{
369 struct tevent_req
*req
;
375 * Parallel read support.
377 * cli_pull sends as many read&x requests as the server would allow via
378 * max_mux at a time. When replies flow back in, the data is written into
379 * the callback function "sink" in the right order.
382 struct cli_pull_state
{
383 struct tevent_req
*req
;
385 struct event_context
*ev
;
386 struct cli_state
*cli
;
391 NTSTATUS (*sink
)(char *buf
, size_t n
, void *priv
);
397 * Outstanding requests
401 struct cli_pull_subreq
*reqs
;
404 * For how many bytes did we send requests already?
409 * Next request index to push into "sink". This walks around the "req"
410 * array, taking care that the requests are pushed to "sink" in the
411 * right order. If necessary (i.e. replies don't come in in the right
412 * order), replies are held back in "reqs".
417 * How many bytes did we push into "sink"?
423 static char *cli_pull_print(struct tevent_req
*req
, TALLOC_CTX
*mem_ctx
)
425 struct cli_pull_state
*state
= tevent_req_data(
426 req
, struct cli_pull_state
);
429 result
= tevent_req_default_print(req
, mem_ctx
);
430 if (result
== NULL
) {
434 return talloc_asprintf_append_buffer(
435 result
, "num_reqs=%d, top_req=%d",
436 state
->num_reqs
, state
->top_req
);
439 static void cli_pull_read_done(struct tevent_req
*read_req
);
442 * Prepare an async pull request
445 struct tevent_req
*cli_pull_send(TALLOC_CTX
*mem_ctx
,
446 struct event_context
*ev
,
447 struct cli_state
*cli
,
448 uint16_t fnum
, off_t start_offset
,
449 SMB_OFF_T size
, size_t window_size
,
450 NTSTATUS (*sink
)(char *buf
, size_t n
,
454 struct tevent_req
*req
;
455 struct cli_pull_state
*state
;
458 req
= tevent_req_create(mem_ctx
, &state
, struct cli_pull_state
);
462 tevent_req_set_print_fn(req
, cli_pull_print
);
468 state
->start_offset
= start_offset
;
477 tevent_req_done(req
);
478 return tevent_req_post(req
, ev
);
481 state
->chunk_size
= cli_read_max_bufsize(cli
);
483 state
->max_reqs
= cli_state_max_requests(cli
);
485 state
->num_reqs
= MAX(window_size
/state
->chunk_size
, 1);
486 state
->num_reqs
= MIN(state
->num_reqs
, state
->max_reqs
);
488 state
->reqs
= talloc_zero_array(state
, struct cli_pull_subreq
,
490 if (state
->reqs
== NULL
) {
494 state
->requested
= 0;
496 for (i
=0; i
<state
->num_reqs
; i
++) {
497 struct cli_pull_subreq
*subreq
= &state
->reqs
[i
];
499 size_t request_thistime
;
501 if (state
->requested
>= size
) {
506 size_left
= size
- state
->requested
;
507 request_thistime
= MIN(size_left
, state
->chunk_size
);
509 subreq
->req
= cli_readall_send(
510 state
->reqs
, ev
, cli
, fnum
,
511 state
->start_offset
+ state
->requested
,
514 if (subreq
->req
== NULL
) {
517 tevent_req_set_callback(subreq
->req
, cli_pull_read_done
, req
);
518 state
->requested
+= request_thistime
;
528 * Handle incoming read replies, push the data into sink and send out new
529 * requests if necessary.
532 static void cli_pull_read_done(struct tevent_req
*subreq
)
534 struct tevent_req
*req
= tevent_req_callback_data(
535 subreq
, struct tevent_req
);
536 struct cli_pull_state
*state
= tevent_req_data(
537 req
, struct cli_pull_state
);
538 struct cli_pull_subreq
*pull_subreq
= NULL
;
542 for (i
= 0; i
< state
->num_reqs
; i
++) {
543 pull_subreq
= &state
->reqs
[i
];
544 if (subreq
== pull_subreq
->req
) {
548 if (i
== state
->num_reqs
) {
549 /* Huh -- received something we did not send?? */
550 tevent_req_nterror(req
, NT_STATUS_INTERNAL_ERROR
);
554 status
= cli_readall_recv(subreq
, &pull_subreq
->received
,
556 if (!NT_STATUS_IS_OK(status
)) {
557 tevent_req_nterror(state
->req
, status
);
562 * This loop is the one to take care of out-of-order replies. All
563 * pending requests are in state->reqs, state->reqs[top_req] is the
564 * one that is to be pushed next. If however a request later than
565 * top_req is replied to, then we can't push yet. If top_req is
566 * replied to at a later point then, we need to push all the finished
570 while (state
->reqs
[state
->top_req
].req
!= NULL
) {
571 struct cli_pull_subreq
*top_subreq
;
573 DEBUG(11, ("cli_pull_read_done: top_req = %d\n",
576 top_subreq
= &state
->reqs
[state
->top_req
];
578 if (tevent_req_is_in_progress(top_subreq
->req
)) {
579 DEBUG(11, ("cli_pull_read_done: top request not yet "
584 DEBUG(10, ("cli_pull_read_done: Pushing %d bytes, %d already "
585 "pushed\n", (int)top_subreq
->received
,
586 (int)state
->pushed
));
588 status
= state
->sink((char *)top_subreq
->buf
,
589 top_subreq
->received
, state
->priv
);
590 if (tevent_req_nterror(state
->req
, status
)) {
593 state
->pushed
+= top_subreq
->received
;
595 TALLOC_FREE(state
->reqs
[state
->top_req
].req
);
597 if (state
->requested
< state
->size
) {
598 struct tevent_req
*new_req
;
600 size_t request_thistime
;
602 size_left
= state
->size
- state
->requested
;
603 request_thistime
= MIN(size_left
, state
->chunk_size
);
605 DEBUG(10, ("cli_pull_read_done: Requesting %d bytes "
606 "at %d, position %d\n",
607 (int)request_thistime
,
608 (int)(state
->start_offset
612 new_req
= cli_readall_send(
613 state
->reqs
, state
->ev
, state
->cli
,
615 state
->start_offset
+ state
->requested
,
618 if (tevent_req_nomem(new_req
, state
->req
)) {
621 tevent_req_set_callback(new_req
, cli_pull_read_done
,
624 state
->reqs
[state
->top_req
].req
= new_req
;
625 state
->requested
+= request_thistime
;
628 state
->top_req
= (state
->top_req
+1) % state
->num_reqs
;
631 tevent_req_done(req
);
634 NTSTATUS
cli_pull_recv(struct tevent_req
*req
, SMB_OFF_T
*received
)
636 struct cli_pull_state
*state
= tevent_req_data(
637 req
, struct cli_pull_state
);
640 if (tevent_req_is_nterror(req
, &status
)) {
643 *received
= state
->pushed
;
647 NTSTATUS
cli_pull(struct cli_state
*cli
, uint16_t fnum
,
648 off_t start_offset
, SMB_OFF_T size
, size_t window_size
,
649 NTSTATUS (*sink
)(char *buf
, size_t n
, void *priv
),
650 void *priv
, SMB_OFF_T
*received
)
652 TALLOC_CTX
*frame
= talloc_stackframe();
653 struct event_context
*ev
;
654 struct tevent_req
*req
;
655 NTSTATUS status
= NT_STATUS_OK
;
657 if (cli_has_async_calls(cli
)) {
659 * Can't use sync call while an async call is in flight
661 status
= NT_STATUS_INVALID_PARAMETER
;
665 ev
= event_context_init(frame
);
667 status
= NT_STATUS_NO_MEMORY
;
671 req
= cli_pull_send(frame
, ev
, cli
, fnum
, start_offset
, size
,
672 window_size
, sink
, priv
);
674 status
= NT_STATUS_NO_MEMORY
;
678 if (!tevent_req_poll(req
, ev
)) {
679 status
= map_nt_error_from_unix(errno
);
683 status
= cli_pull_recv(req
, received
);
689 static NTSTATUS
cli_read_sink(char *buf
, size_t n
, void *priv
)
691 char **pbuf
= (char **)priv
;
692 memcpy(*pbuf
, buf
, n
);
697 NTSTATUS
cli_read(struct cli_state
*cli
, uint16_t fnum
,
698 char *buf
, off_t offset
, size_t size
,
704 status
= cli_pull(cli
, fnum
, offset
, size
, size
,
705 cli_read_sink
, &buf
, &ret
);
706 if (!NT_STATUS_IS_OK(status
)) {
717 /****************************************************************************
718 write to a file using a SMBwrite and not bypassing 0 byte writes
719 ****************************************************************************/
721 NTSTATUS
cli_smbwrite(struct cli_state
*cli
, uint16_t fnum
, char *buf
,
722 off_t offset
, size_t size1
, size_t *ptotal
)
731 bytes
= talloc_array(talloc_tos(), uint8_t, 3);
733 return NT_STATUS_NO_MEMORY
;
738 uint32_t usable_space
= cli_state_available_size(cli
, 48);
739 size_t size
= MIN(size1
, usable_space
);
740 struct tevent_req
*req
;
745 SSVAL(vwv
+0, 0, fnum
);
746 SSVAL(vwv
+1, 0, size
);
747 SIVAL(vwv
+2, 0, offset
);
750 bytes
= talloc_realloc(talloc_tos(), bytes
, uint8_t,
753 return NT_STATUS_NO_MEMORY
;
755 SSVAL(bytes
, 1, size
);
756 memcpy(bytes
+ 3, buf
+ total
, size
);
758 status
= cli_smb(talloc_tos(), cli
, SMBwrite
, 0, 5, vwv
,
759 size
+3, bytes
, &req
, 1, NULL
, &ret_vwv
,
761 if (!NT_STATUS_IS_OK(status
)) {
766 size
= SVAL(ret_vwv
+0, 0);
779 if (ptotal
!= NULL
) {
786 * Send a write&x request
789 struct cli_write_andx_state
{
797 static void cli_write_andx_done(struct tevent_req
*subreq
);
799 struct tevent_req
*cli_write_andx_create(TALLOC_CTX
*mem_ctx
,
800 struct event_context
*ev
,
801 struct cli_state
*cli
, uint16_t fnum
,
802 uint16_t mode
, const uint8_t *buf
,
803 off_t offset
, size_t size
,
804 struct tevent_req
**reqs_before
,
806 struct tevent_req
**psmbreq
)
808 struct tevent_req
*req
, *subreq
;
809 struct cli_write_andx_state
*state
;
810 bool bigoffset
= ((cli_state_capabilities(cli
) & CAP_LARGE_FILES
) != 0);
811 uint8_t wct
= bigoffset
? 14 : 12;
812 size_t max_write
= cli_write_max_bufsize(cli
, mode
, wct
);
815 req
= tevent_req_create(mem_ctx
, &state
, struct cli_write_andx_state
);
820 size
= MIN(size
, max_write
);
824 SCVAL(vwv
+0, 0, 0xFF);
827 SSVAL(vwv
+2, 0, fnum
);
828 SIVAL(vwv
+3, 0, offset
);
830 SSVAL(vwv
+7, 0, mode
);
832 SSVAL(vwv
+9, 0, (size
>>16));
833 SSVAL(vwv
+10, 0, size
);
836 cli_smb_wct_ofs(reqs_before
, num_reqs_before
)
837 + 1 /* the wct field */
839 + 2 /* num_bytes field */
843 SIVAL(vwv
+12, 0, (((uint64_t)offset
)>>32) & 0xffffffff);
847 state
->iov
[0].iov_base
= (void *)&state
->pad
;
848 state
->iov
[0].iov_len
= 1;
849 state
->iov
[1].iov_base
= discard_const_p(void, buf
);
850 state
->iov
[1].iov_len
= size
;
852 subreq
= cli_smb_req_create(state
, ev
, cli
, SMBwriteX
, 0, wct
, vwv
,
854 if (tevent_req_nomem(subreq
, req
)) {
855 return tevent_req_post(req
, ev
);
857 tevent_req_set_callback(subreq
, cli_write_andx_done
, req
);
862 struct tevent_req
*cli_write_andx_send(TALLOC_CTX
*mem_ctx
,
863 struct event_context
*ev
,
864 struct cli_state
*cli
, uint16_t fnum
,
865 uint16_t mode
, const uint8_t *buf
,
866 off_t offset
, size_t size
)
868 struct tevent_req
*req
, *subreq
;
871 req
= cli_write_andx_create(mem_ctx
, ev
, cli
, fnum
, mode
, buf
, offset
,
872 size
, NULL
, 0, &subreq
);
877 status
= cli_smb_req_send(subreq
);
878 if (tevent_req_nterror(req
, status
)) {
879 return tevent_req_post(req
, ev
);
884 static void cli_write_andx_done(struct tevent_req
*subreq
)
886 struct tevent_req
*req
= tevent_req_callback_data(
887 subreq
, struct tevent_req
);
888 struct cli_write_andx_state
*state
= tevent_req_data(
889 req
, struct cli_write_andx_state
);
895 status
= cli_smb_recv(subreq
, state
, &inbuf
, 6, &wct
, &vwv
,
898 if (NT_STATUS_IS_ERR(status
)) {
899 tevent_req_nterror(req
, status
);
902 state
->written
= SVAL(vwv
+2, 0);
903 state
->written
|= SVAL(vwv
+4, 0)<<16;
904 tevent_req_done(req
);
907 NTSTATUS
cli_write_andx_recv(struct tevent_req
*req
, size_t *pwritten
)
909 struct cli_write_andx_state
*state
= tevent_req_data(
910 req
, struct cli_write_andx_state
);
913 if (tevent_req_is_nterror(req
, &status
)) {
917 *pwritten
= state
->written
;
922 struct cli_writeall_state
{
923 struct event_context
*ev
;
924 struct cli_state
*cli
;
933 static void cli_writeall_written(struct tevent_req
*req
);
935 static struct tevent_req
*cli_writeall_send(TALLOC_CTX
*mem_ctx
,
936 struct event_context
*ev
,
937 struct cli_state
*cli
,
941 off_t offset
, size_t size
)
943 struct tevent_req
*req
, *subreq
;
944 struct cli_writeall_state
*state
;
946 req
= tevent_req_create(mem_ctx
, &state
, struct cli_writeall_state
);
955 state
->offset
= offset
;
959 subreq
= cli_write_andx_send(state
, state
->ev
, state
->cli
, state
->fnum
,
960 state
->mode
, state
->buf
, state
->offset
,
962 if (tevent_req_nomem(subreq
, req
)) {
963 return tevent_req_post(req
, ev
);
965 tevent_req_set_callback(subreq
, cli_writeall_written
, req
);
969 static void cli_writeall_written(struct tevent_req
*subreq
)
971 struct tevent_req
*req
= tevent_req_callback_data(
972 subreq
, struct tevent_req
);
973 struct cli_writeall_state
*state
= tevent_req_data(
974 req
, struct cli_writeall_state
);
976 size_t written
, to_write
;
978 status
= cli_write_andx_recv(subreq
, &written
);
980 if (tevent_req_nterror(req
, status
)) {
984 state
->written
+= written
;
986 if (state
->written
> state
->size
) {
987 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
991 to_write
= state
->size
- state
->written
;
994 tevent_req_done(req
);
998 subreq
= cli_write_andx_send(state
, state
->ev
, state
->cli
, state
->fnum
,
1000 state
->buf
+ state
->written
,
1001 state
->offset
+ state
->written
, to_write
);
1002 if (tevent_req_nomem(subreq
, req
)) {
1005 tevent_req_set_callback(subreq
, cli_writeall_written
, req
);
1008 static NTSTATUS
cli_writeall_recv(struct tevent_req
*req
,
1011 struct cli_writeall_state
*state
= tevent_req_data(
1012 req
, struct cli_writeall_state
);
1015 if (tevent_req_is_nterror(req
, &status
)) {
1018 if (pwritten
!= NULL
) {
1019 *pwritten
= state
->written
;
1021 return NT_STATUS_OK
;
1024 NTSTATUS
cli_writeall(struct cli_state
*cli
, uint16_t fnum
, uint16_t mode
,
1025 const uint8_t *buf
, off_t offset
, size_t size
,
1028 TALLOC_CTX
*frame
= talloc_stackframe();
1029 struct event_context
*ev
;
1030 struct tevent_req
*req
;
1031 NTSTATUS status
= NT_STATUS_NO_MEMORY
;
1033 if (cli_has_async_calls(cli
)) {
1035 * Can't use sync call while an async call is in flight
1037 status
= NT_STATUS_INVALID_PARAMETER
;
1040 ev
= event_context_init(frame
);
1044 req
= cli_writeall_send(frame
, ev
, cli
, fnum
, mode
, buf
, offset
, size
);
1048 if (!tevent_req_poll(req
, ev
)) {
1049 status
= map_nt_error_from_unix(errno
);
1052 status
= cli_writeall_recv(req
, pwritten
);
1058 struct cli_push_write_state
{
1059 struct tevent_req
*req
;/* This is the main request! Not the subreq */
1066 struct cli_push_state
{
1067 struct event_context
*ev
;
1068 struct cli_state
*cli
;
1074 size_t (*source
)(uint8_t *buf
, size_t n
, void *priv
);
1083 * Outstanding requests
1088 struct cli_push_write_state
**reqs
;
1091 static void cli_push_written(struct tevent_req
*req
);
1093 static bool cli_push_write_setup(struct tevent_req
*req
,
1094 struct cli_push_state
*state
,
1097 struct cli_push_write_state
*substate
;
1098 struct tevent_req
*subreq
;
1100 substate
= talloc(state
->reqs
, struct cli_push_write_state
);
1104 substate
->req
= req
;
1105 substate
->idx
= idx
;
1106 substate
->ofs
= state
->next_offset
;
1107 substate
->buf
= talloc_array(substate
, uint8_t, state
->chunk_size
);
1108 if (!substate
->buf
) {
1109 talloc_free(substate
);
1112 substate
->size
= state
->source(substate
->buf
,
1115 if (substate
->size
== 0) {
1117 /* nothing to send */
1118 talloc_free(substate
);
1122 subreq
= cli_writeall_send(substate
,
1123 state
->ev
, state
->cli
,
1124 state
->fnum
, state
->mode
,
1129 talloc_free(substate
);
1132 tevent_req_set_callback(subreq
, cli_push_written
, substate
);
1134 state
->reqs
[idx
] = substate
;
1135 state
->pending
+= 1;
1136 state
->next_offset
+= substate
->size
;
1141 struct tevent_req
*cli_push_send(TALLOC_CTX
*mem_ctx
, struct event_context
*ev
,
1142 struct cli_state
*cli
,
1143 uint16_t fnum
, uint16_t mode
,
1144 off_t start_offset
, size_t window_size
,
1145 size_t (*source
)(uint8_t *buf
, size_t n
,
1149 struct tevent_req
*req
;
1150 struct cli_push_state
*state
;
1153 req
= tevent_req_create(mem_ctx
, &state
, struct cli_push_state
);
1160 state
->start_offset
= start_offset
;
1162 state
->source
= source
;
1166 state
->next_offset
= start_offset
;
1168 state
->chunk_size
= cli_write_max_bufsize(cli
, mode
, 14);
1170 state
->max_reqs
= cli_state_max_requests(cli
);
1172 if (window_size
== 0) {
1173 window_size
= state
->max_reqs
* state
->chunk_size
;
1175 state
->num_reqs
= window_size
/state
->chunk_size
;
1176 if ((window_size
% state
->chunk_size
) > 0) {
1177 state
->num_reqs
+= 1;
1179 state
->num_reqs
= MIN(state
->num_reqs
, state
->max_reqs
);
1180 state
->num_reqs
= MAX(state
->num_reqs
, 1);
1182 state
->reqs
= talloc_zero_array(state
, struct cli_push_write_state
*,
1184 if (state
->reqs
== NULL
) {
1188 for (i
=0; i
<state
->num_reqs
; i
++) {
1189 if (!cli_push_write_setup(req
, state
, i
)) {
1198 if (state
->pending
== 0) {
1199 tevent_req_done(req
);
1200 return tevent_req_post(req
, ev
);
1206 tevent_req_nterror(req
, NT_STATUS_NO_MEMORY
);
1207 return tevent_req_post(req
, ev
);
1210 static void cli_push_written(struct tevent_req
*subreq
)
1212 struct cli_push_write_state
*substate
= tevent_req_callback_data(
1213 subreq
, struct cli_push_write_state
);
1214 struct tevent_req
*req
= substate
->req
;
1215 struct cli_push_state
*state
= tevent_req_data(
1216 req
, struct cli_push_state
);
1218 uint32_t idx
= substate
->idx
;
1220 state
->reqs
[idx
] = NULL
;
1221 state
->pending
-= 1;
1223 status
= cli_writeall_recv(subreq
, NULL
);
1224 TALLOC_FREE(subreq
);
1225 TALLOC_FREE(substate
);
1226 if (tevent_req_nterror(req
, status
)) {
1231 if (!cli_push_write_setup(req
, state
, idx
)) {
1232 tevent_req_nterror(req
, NT_STATUS_NO_MEMORY
);
1237 if (state
->pending
== 0) {
1238 tevent_req_done(req
);
1243 NTSTATUS
cli_push_recv(struct tevent_req
*req
)
1245 return tevent_req_simple_recv_ntstatus(req
);
1248 NTSTATUS
cli_push(struct cli_state
*cli
, uint16_t fnum
, uint16_t mode
,
1249 off_t start_offset
, size_t window_size
,
1250 size_t (*source
)(uint8_t *buf
, size_t n
, void *priv
),
1253 TALLOC_CTX
*frame
= talloc_stackframe();
1254 struct event_context
*ev
;
1255 struct tevent_req
*req
;
1256 NTSTATUS status
= NT_STATUS_OK
;
1258 if (cli_has_async_calls(cli
)) {
1260 * Can't use sync call while an async call is in flight
1262 status
= NT_STATUS_INVALID_PARAMETER
;
1266 ev
= event_context_init(frame
);
1268 status
= NT_STATUS_NO_MEMORY
;
1272 req
= cli_push_send(frame
, ev
, cli
, fnum
, mode
, start_offset
,
1273 window_size
, source
, priv
);
1275 status
= NT_STATUS_NO_MEMORY
;
1279 if (!tevent_req_poll(req
, ev
)) {
1280 status
= map_nt_error_from_unix(errno
);
1284 status
= cli_push_recv(req
);