s3:net_time: make use of cli_state_server_time_zone() and cli_state_server_time()
[Samba.git] / source3 / libsmb / clireadwrite.c
blobcd732529e94bb82caba7e92a4ba467ffa32d7fb1
1 /*
2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "libsmb/libsmb.h"
22 #include "../lib/util/tevent_ntstatus.h"
23 #include "async_smb.h"
24 #include "trans2.h"
26 /****************************************************************************
27 Calculate the recommended read buffer size
28 ****************************************************************************/
29 static size_t cli_read_max_bufsize(struct cli_state *cli)
31 size_t data_offset = smb_size - 4;
32 size_t wct = 12;
34 size_t useable_space;
36 if (!client_is_signing_on(cli) && !cli_encryption_on(cli)
37 && (cli->server_posix_capabilities & CIFS_UNIX_LARGE_READ_CAP)) {
38 return CLI_SAMBA_MAX_POSIX_LARGE_READX_SIZE;
40 if (cli_state_capabilities(cli) & CAP_LARGE_READX) {
41 return cli->is_samba
42 ? CLI_SAMBA_MAX_LARGE_READX_SIZE
43 : CLI_WINDOWS_MAX_LARGE_READX_SIZE;
46 data_offset += wct * sizeof(uint16_t);
47 data_offset += 1; /* pad */
49 useable_space = cli_state_available_size(cli, data_offset);
51 return useable_space;
54 /****************************************************************************
55 Calculate the recommended write buffer size
56 ****************************************************************************/
57 static size_t cli_write_max_bufsize(struct cli_state *cli,
58 uint16_t write_mode,
59 uint8_t wct)
61 if (write_mode == 0 &&
62 !client_is_signing_on(cli) &&
63 !cli_encryption_on(cli) &&
64 (cli->server_posix_capabilities & CIFS_UNIX_LARGE_WRITE_CAP) &&
65 (cli_state_capabilities(cli) & CAP_LARGE_FILES)) {
66 /* Only do massive writes if we can do them direct
67 * with no signing or encrypting - not on a pipe. */
68 return CLI_SAMBA_MAX_POSIX_LARGE_WRITEX_SIZE;
71 if (cli->is_samba) {
72 return CLI_SAMBA_MAX_LARGE_WRITEX_SIZE;
75 if (((cli_state_capabilities(cli) & CAP_LARGE_WRITEX) == 0)
76 || client_is_signing_on(cli)
77 || strequal(cli->dev, "LPT1:")) {
78 size_t data_offset = smb_size - 4;
79 size_t useable_space;
81 data_offset += wct * sizeof(uint16_t);
82 data_offset += 1; /* pad */
84 useable_space = cli_state_available_size(cli, data_offset);
86 return useable_space;
89 return CLI_WINDOWS_MAX_LARGE_WRITEX_SIZE;
92 struct cli_read_andx_state {
93 size_t size;
94 uint16_t vwv[12];
95 NTSTATUS status;
96 size_t received;
97 uint8_t *buf;
100 static void cli_read_andx_done(struct tevent_req *subreq);
102 struct tevent_req *cli_read_andx_create(TALLOC_CTX *mem_ctx,
103 struct event_context *ev,
104 struct cli_state *cli, uint16_t fnum,
105 off_t offset, size_t size,
106 struct tevent_req **psmbreq)
108 struct tevent_req *req, *subreq;
109 struct cli_read_andx_state *state;
110 uint8_t wct = 10;
112 if (size > cli_read_max_bufsize(cli)) {
113 DEBUG(0, ("cli_read_andx_send got size=%d, can only handle "
114 "size=%d\n", (int)size,
115 (int)cli_read_max_bufsize(cli)));
116 return NULL;
119 req = tevent_req_create(mem_ctx, &state, struct cli_read_andx_state);
120 if (req == NULL) {
121 return NULL;
123 state->size = size;
125 SCVAL(state->vwv + 0, 0, 0xFF);
126 SCVAL(state->vwv + 0, 1, 0);
127 SSVAL(state->vwv + 1, 0, 0);
128 SSVAL(state->vwv + 2, 0, fnum);
129 SIVAL(state->vwv + 3, 0, offset);
130 SSVAL(state->vwv + 5, 0, size);
131 SSVAL(state->vwv + 6, 0, size);
132 SSVAL(state->vwv + 7, 0, (size >> 16));
133 SSVAL(state->vwv + 8, 0, 0);
134 SSVAL(state->vwv + 9, 0, 0);
136 if ((uint64_t)offset >> 32) {
137 SIVAL(state->vwv + 10, 0,
138 (((uint64_t)offset)>>32) & 0xffffffff);
139 wct += 2;
142 subreq = cli_smb_req_create(state, ev, cli, SMBreadX, 0, wct,
143 state->vwv, 0, NULL);
144 if (subreq == NULL) {
145 TALLOC_FREE(req);
146 return NULL;
148 tevent_req_set_callback(subreq, cli_read_andx_done, req);
149 *psmbreq = subreq;
150 return req;
153 struct tevent_req *cli_read_andx_send(TALLOC_CTX *mem_ctx,
154 struct event_context *ev,
155 struct cli_state *cli, uint16_t fnum,
156 off_t offset, size_t size)
158 struct tevent_req *req, *subreq;
159 NTSTATUS status;
161 req = cli_read_andx_create(mem_ctx, ev, cli, fnum, offset, size,
162 &subreq);
163 if (req == NULL) {
164 return NULL;
167 status = cli_smb_req_send(subreq);
168 if (tevent_req_nterror(req, status)) {
169 return tevent_req_post(req, ev);
171 return req;
174 static void cli_read_andx_done(struct tevent_req *subreq)
176 struct tevent_req *req = tevent_req_callback_data(
177 subreq, struct tevent_req);
178 struct cli_read_andx_state *state = tevent_req_data(
179 req, struct cli_read_andx_state);
180 uint8_t *inbuf;
181 uint8_t wct;
182 uint16_t *vwv;
183 uint32_t num_bytes;
184 uint8_t *bytes;
186 state->status = cli_smb_recv(subreq, state, &inbuf, 12, &wct, &vwv,
187 &num_bytes, &bytes);
188 TALLOC_FREE(subreq);
189 if (NT_STATUS_IS_ERR(state->status)) {
190 tevent_req_nterror(req, state->status);
191 return;
194 /* size is the number of bytes the server returned.
195 * Might be zero. */
196 state->received = SVAL(vwv + 5, 0);
197 state->received |= (((unsigned int)SVAL(vwv + 7, 0)) << 16);
199 if (state->received > state->size) {
200 DEBUG(5,("server returned more than we wanted!\n"));
201 tevent_req_nterror(req, NT_STATUS_UNEXPECTED_IO_ERROR);
202 return;
206 * bcc field must be valid for small reads, for large reads the 16-bit
207 * bcc field can't be correct.
210 if ((state->received < 0xffff) && (state->received > num_bytes)) {
211 DEBUG(5, ("server announced more bytes than sent\n"));
212 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
213 return;
216 state->buf = discard_const_p(uint8_t, smb_base(inbuf)) + SVAL(vwv+6, 0);
218 if (trans_oob(smb_len(inbuf), SVAL(vwv+6, 0), state->received)
219 || ((state->received != 0) && (state->buf < bytes))) {
220 DEBUG(5, ("server returned invalid read&x data offset\n"));
221 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
222 return;
224 tevent_req_done(req);
228 * Pull the data out of a finished async read_and_x request. rcvbuf is
229 * talloced from the request, so better make sure that you copy it away before
230 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
231 * talloc_move it!
234 NTSTATUS cli_read_andx_recv(struct tevent_req *req, ssize_t *received,
235 uint8_t **rcvbuf)
237 struct cli_read_andx_state *state = tevent_req_data(
238 req, struct cli_read_andx_state);
239 NTSTATUS status;
241 if (tevent_req_is_nterror(req, &status)) {
242 return status;
244 *received = state->received;
245 *rcvbuf = state->buf;
246 return NT_STATUS_OK;
249 struct cli_readall_state {
250 struct tevent_context *ev;
251 struct cli_state *cli;
252 uint16_t fnum;
253 off_t start_offset;
254 size_t size;
255 size_t received;
256 uint8_t *buf;
259 static void cli_readall_done(struct tevent_req *subreq);
261 static struct tevent_req *cli_readall_send(TALLOC_CTX *mem_ctx,
262 struct event_context *ev,
263 struct cli_state *cli,
264 uint16_t fnum,
265 off_t offset, size_t size)
267 struct tevent_req *req, *subreq;
268 struct cli_readall_state *state;
270 req = tevent_req_create(mem_ctx, &state, struct cli_readall_state);
271 if (req == NULL) {
272 return NULL;
274 state->ev = ev;
275 state->cli = cli;
276 state->fnum = fnum;
277 state->start_offset = offset;
278 state->size = size;
279 state->received = 0;
280 state->buf = NULL;
282 subreq = cli_read_andx_send(state, ev, cli, fnum, offset, size);
283 if (tevent_req_nomem(subreq, req)) {
284 return tevent_req_post(req, ev);
286 tevent_req_set_callback(subreq, cli_readall_done, req);
287 return req;
290 static void cli_readall_done(struct tevent_req *subreq)
292 struct tevent_req *req = tevent_req_callback_data(
293 subreq, struct tevent_req);
294 struct cli_readall_state *state = tevent_req_data(
295 req, struct cli_readall_state);
296 ssize_t received;
297 uint8_t *buf;
298 NTSTATUS status;
300 status = cli_read_andx_recv(subreq, &received, &buf);
301 if (tevent_req_nterror(req, status)) {
302 return;
305 if (received == 0) {
306 /* EOF */
307 tevent_req_done(req);
308 return;
311 if ((state->received == 0) && (received == state->size)) {
312 /* Ideal case: Got it all in one run */
313 state->buf = buf;
314 state->received += received;
315 tevent_req_done(req);
316 return;
320 * We got a short read, issue a read for the
321 * rest. Unfortunately we have to allocate the buffer
322 * ourselves now, as our caller expects to receive a single
323 * buffer. cli_read_andx does it from the buffer received from
324 * the net, but with a short read we have to put it together
325 * from several reads.
328 if (state->buf == NULL) {
329 state->buf = talloc_array(state, uint8_t, state->size);
330 if (tevent_req_nomem(state->buf, req)) {
331 return;
334 memcpy(state->buf + state->received, buf, received);
335 state->received += received;
337 TALLOC_FREE(subreq);
339 if (state->received >= state->size) {
340 tevent_req_done(req);
341 return;
344 subreq = cli_read_andx_send(state, state->ev, state->cli, state->fnum,
345 state->start_offset + state->received,
346 state->size - state->received);
347 if (tevent_req_nomem(subreq, req)) {
348 return;
350 tevent_req_set_callback(subreq, cli_readall_done, req);
353 static NTSTATUS cli_readall_recv(struct tevent_req *req, ssize_t *received,
354 uint8_t **rcvbuf)
356 struct cli_readall_state *state = tevent_req_data(
357 req, struct cli_readall_state);
358 NTSTATUS status;
360 if (tevent_req_is_nterror(req, &status)) {
361 return status;
363 *received = state->received;
364 *rcvbuf = state->buf;
365 return NT_STATUS_OK;
368 struct cli_pull_subreq {
369 struct tevent_req *req;
370 ssize_t received;
371 uint8_t *buf;
375 * Parallel read support.
377 * cli_pull sends as many read&x requests as the server would allow via
378 * max_mux at a time. When replies flow back in, the data is written into
379 * the callback function "sink" in the right order.
382 struct cli_pull_state {
383 struct tevent_req *req;
385 struct event_context *ev;
386 struct cli_state *cli;
387 uint16_t fnum;
388 off_t start_offset;
389 SMB_OFF_T size;
391 NTSTATUS (*sink)(char *buf, size_t n, void *priv);
392 void *priv;
394 size_t chunk_size;
397 * Outstanding requests
399 uint16_t max_reqs;
400 int num_reqs;
401 struct cli_pull_subreq *reqs;
404 * For how many bytes did we send requests already?
406 SMB_OFF_T requested;
409 * Next request index to push into "sink". This walks around the "req"
410 * array, taking care that the requests are pushed to "sink" in the
411 * right order. If necessary (i.e. replies don't come in in the right
412 * order), replies are held back in "reqs".
414 int top_req;
417 * How many bytes did we push into "sink"?
420 SMB_OFF_T pushed;
423 static char *cli_pull_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
425 struct cli_pull_state *state = tevent_req_data(
426 req, struct cli_pull_state);
427 char *result;
429 result = tevent_req_default_print(req, mem_ctx);
430 if (result == NULL) {
431 return NULL;
434 return talloc_asprintf_append_buffer(
435 result, "num_reqs=%d, top_req=%d",
436 state->num_reqs, state->top_req);
439 static void cli_pull_read_done(struct tevent_req *read_req);
442 * Prepare an async pull request
445 struct tevent_req *cli_pull_send(TALLOC_CTX *mem_ctx,
446 struct event_context *ev,
447 struct cli_state *cli,
448 uint16_t fnum, off_t start_offset,
449 SMB_OFF_T size, size_t window_size,
450 NTSTATUS (*sink)(char *buf, size_t n,
451 void *priv),
452 void *priv)
454 struct tevent_req *req;
455 struct cli_pull_state *state;
456 int i;
458 req = tevent_req_create(mem_ctx, &state, struct cli_pull_state);
459 if (req == NULL) {
460 return NULL;
462 tevent_req_set_print_fn(req, cli_pull_print);
463 state->req = req;
465 state->cli = cli;
466 state->ev = ev;
467 state->fnum = fnum;
468 state->start_offset = start_offset;
469 state->size = size;
470 state->sink = sink;
471 state->priv = priv;
473 state->pushed = 0;
474 state->top_req = 0;
476 if (size == 0) {
477 tevent_req_done(req);
478 return tevent_req_post(req, ev);
481 state->chunk_size = cli_read_max_bufsize(cli);
483 state->max_reqs = cli_state_max_requests(cli);
485 state->num_reqs = MAX(window_size/state->chunk_size, 1);
486 state->num_reqs = MIN(state->num_reqs, state->max_reqs);
488 state->reqs = talloc_zero_array(state, struct cli_pull_subreq,
489 state->num_reqs);
490 if (state->reqs == NULL) {
491 goto failed;
494 state->requested = 0;
496 for (i=0; i<state->num_reqs; i++) {
497 struct cli_pull_subreq *subreq = &state->reqs[i];
498 SMB_OFF_T size_left;
499 size_t request_thistime;
501 if (state->requested >= size) {
502 state->num_reqs = i;
503 break;
506 size_left = size - state->requested;
507 request_thistime = MIN(size_left, state->chunk_size);
509 subreq->req = cli_readall_send(
510 state->reqs, ev, cli, fnum,
511 state->start_offset + state->requested,
512 request_thistime);
514 if (subreq->req == NULL) {
515 goto failed;
517 tevent_req_set_callback(subreq->req, cli_pull_read_done, req);
518 state->requested += request_thistime;
520 return req;
522 failed:
523 TALLOC_FREE(req);
524 return NULL;
528 * Handle incoming read replies, push the data into sink and send out new
529 * requests if necessary.
532 static void cli_pull_read_done(struct tevent_req *subreq)
534 struct tevent_req *req = tevent_req_callback_data(
535 subreq, struct tevent_req);
536 struct cli_pull_state *state = tevent_req_data(
537 req, struct cli_pull_state);
538 struct cli_pull_subreq *pull_subreq = NULL;
539 NTSTATUS status;
540 int i;
542 for (i = 0; i < state->num_reqs; i++) {
543 pull_subreq = &state->reqs[i];
544 if (subreq == pull_subreq->req) {
545 break;
548 if (i == state->num_reqs) {
549 /* Huh -- received something we did not send?? */
550 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
551 return;
554 status = cli_readall_recv(subreq, &pull_subreq->received,
555 &pull_subreq->buf);
556 if (!NT_STATUS_IS_OK(status)) {
557 tevent_req_nterror(state->req, status);
558 return;
562 * This loop is the one to take care of out-of-order replies. All
563 * pending requests are in state->reqs, state->reqs[top_req] is the
564 * one that is to be pushed next. If however a request later than
565 * top_req is replied to, then we can't push yet. If top_req is
566 * replied to at a later point then, we need to push all the finished
567 * requests.
570 while (state->reqs[state->top_req].req != NULL) {
571 struct cli_pull_subreq *top_subreq;
573 DEBUG(11, ("cli_pull_read_done: top_req = %d\n",
574 state->top_req));
576 top_subreq = &state->reqs[state->top_req];
578 if (tevent_req_is_in_progress(top_subreq->req)) {
579 DEBUG(11, ("cli_pull_read_done: top request not yet "
580 "done\n"));
581 return;
584 DEBUG(10, ("cli_pull_read_done: Pushing %d bytes, %d already "
585 "pushed\n", (int)top_subreq->received,
586 (int)state->pushed));
588 status = state->sink((char *)top_subreq->buf,
589 top_subreq->received, state->priv);
590 if (tevent_req_nterror(state->req, status)) {
591 return;
593 state->pushed += top_subreq->received;
595 TALLOC_FREE(state->reqs[state->top_req].req);
597 if (state->requested < state->size) {
598 struct tevent_req *new_req;
599 SMB_OFF_T size_left;
600 size_t request_thistime;
602 size_left = state->size - state->requested;
603 request_thistime = MIN(size_left, state->chunk_size);
605 DEBUG(10, ("cli_pull_read_done: Requesting %d bytes "
606 "at %d, position %d\n",
607 (int)request_thistime,
608 (int)(state->start_offset
609 + state->requested),
610 state->top_req));
612 new_req = cli_readall_send(
613 state->reqs, state->ev, state->cli,
614 state->fnum,
615 state->start_offset + state->requested,
616 request_thistime);
618 if (tevent_req_nomem(new_req, state->req)) {
619 return;
621 tevent_req_set_callback(new_req, cli_pull_read_done,
622 req);
624 state->reqs[state->top_req].req = new_req;
625 state->requested += request_thistime;
628 state->top_req = (state->top_req+1) % state->num_reqs;
631 tevent_req_done(req);
634 NTSTATUS cli_pull_recv(struct tevent_req *req, SMB_OFF_T *received)
636 struct cli_pull_state *state = tevent_req_data(
637 req, struct cli_pull_state);
638 NTSTATUS status;
640 if (tevent_req_is_nterror(req, &status)) {
641 return status;
643 *received = state->pushed;
644 return NT_STATUS_OK;
647 NTSTATUS cli_pull(struct cli_state *cli, uint16_t fnum,
648 off_t start_offset, SMB_OFF_T size, size_t window_size,
649 NTSTATUS (*sink)(char *buf, size_t n, void *priv),
650 void *priv, SMB_OFF_T *received)
652 TALLOC_CTX *frame = talloc_stackframe();
653 struct event_context *ev;
654 struct tevent_req *req;
655 NTSTATUS status = NT_STATUS_OK;
657 if (cli_has_async_calls(cli)) {
659 * Can't use sync call while an async call is in flight
661 status = NT_STATUS_INVALID_PARAMETER;
662 goto fail;
665 ev = event_context_init(frame);
666 if (ev == NULL) {
667 status = NT_STATUS_NO_MEMORY;
668 goto fail;
671 req = cli_pull_send(frame, ev, cli, fnum, start_offset, size,
672 window_size, sink, priv);
673 if (req == NULL) {
674 status = NT_STATUS_NO_MEMORY;
675 goto fail;
678 if (!tevent_req_poll(req, ev)) {
679 status = map_nt_error_from_unix(errno);
680 goto fail;
683 status = cli_pull_recv(req, received);
684 fail:
685 TALLOC_FREE(frame);
686 return status;
689 static NTSTATUS cli_read_sink(char *buf, size_t n, void *priv)
691 char **pbuf = (char **)priv;
692 memcpy(*pbuf, buf, n);
693 *pbuf += n;
694 return NT_STATUS_OK;
697 NTSTATUS cli_read(struct cli_state *cli, uint16_t fnum,
698 char *buf, off_t offset, size_t size,
699 size_t *nread)
701 NTSTATUS status;
702 SMB_OFF_T ret;
704 status = cli_pull(cli, fnum, offset, size, size,
705 cli_read_sink, &buf, &ret);
706 if (!NT_STATUS_IS_OK(status)) {
707 return status;
710 if (nread) {
711 *nread = ret;
714 return NT_STATUS_OK;
717 /****************************************************************************
718 write to a file using a SMBwrite and not bypassing 0 byte writes
719 ****************************************************************************/
721 NTSTATUS cli_smbwrite(struct cli_state *cli, uint16_t fnum, char *buf,
722 off_t offset, size_t size1, size_t *ptotal)
724 uint8_t *bytes;
725 ssize_t total = 0;
728 * 3 bytes prefix
731 bytes = talloc_array(talloc_tos(), uint8_t, 3);
732 if (bytes == NULL) {
733 return NT_STATUS_NO_MEMORY;
735 bytes[0] = 1;
737 do {
738 uint32_t usable_space = cli_state_available_size(cli, 48);
739 size_t size = MIN(size1, usable_space);
740 struct tevent_req *req;
741 uint16_t vwv[5];
742 uint16_t *ret_vwv;
743 NTSTATUS status;
745 SSVAL(vwv+0, 0, fnum);
746 SSVAL(vwv+1, 0, size);
747 SIVAL(vwv+2, 0, offset);
748 SSVAL(vwv+4, 0, 0);
750 bytes = talloc_realloc(talloc_tos(), bytes, uint8_t,
751 size+3);
752 if (bytes == NULL) {
753 return NT_STATUS_NO_MEMORY;
755 SSVAL(bytes, 1, size);
756 memcpy(bytes + 3, buf + total, size);
758 status = cli_smb(talloc_tos(), cli, SMBwrite, 0, 5, vwv,
759 size+3, bytes, &req, 1, NULL, &ret_vwv,
760 NULL, NULL);
761 if (!NT_STATUS_IS_OK(status)) {
762 TALLOC_FREE(bytes);
763 return status;
766 size = SVAL(ret_vwv+0, 0);
767 TALLOC_FREE(req);
768 if (size == 0) {
769 break;
771 size1 -= size;
772 total += size;
773 offset += size;
775 } while (size1);
777 TALLOC_FREE(bytes);
779 if (ptotal != NULL) {
780 *ptotal = total;
782 return NT_STATUS_OK;
786 * Send a write&x request
789 struct cli_write_andx_state {
790 size_t size;
791 uint16_t vwv[14];
792 size_t written;
793 uint8_t pad;
794 struct iovec iov[2];
797 static void cli_write_andx_done(struct tevent_req *subreq);
799 struct tevent_req *cli_write_andx_create(TALLOC_CTX *mem_ctx,
800 struct event_context *ev,
801 struct cli_state *cli, uint16_t fnum,
802 uint16_t mode, const uint8_t *buf,
803 off_t offset, size_t size,
804 struct tevent_req **reqs_before,
805 int num_reqs_before,
806 struct tevent_req **psmbreq)
808 struct tevent_req *req, *subreq;
809 struct cli_write_andx_state *state;
810 bool bigoffset = ((cli_state_capabilities(cli) & CAP_LARGE_FILES) != 0);
811 uint8_t wct = bigoffset ? 14 : 12;
812 size_t max_write = cli_write_max_bufsize(cli, mode, wct);
813 uint16_t *vwv;
815 req = tevent_req_create(mem_ctx, &state, struct cli_write_andx_state);
816 if (req == NULL) {
817 return NULL;
820 size = MIN(size, max_write);
822 vwv = state->vwv;
824 SCVAL(vwv+0, 0, 0xFF);
825 SCVAL(vwv+0, 1, 0);
826 SSVAL(vwv+1, 0, 0);
827 SSVAL(vwv+2, 0, fnum);
828 SIVAL(vwv+3, 0, offset);
829 SIVAL(vwv+5, 0, 0);
830 SSVAL(vwv+7, 0, mode);
831 SSVAL(vwv+8, 0, 0);
832 SSVAL(vwv+9, 0, (size>>16));
833 SSVAL(vwv+10, 0, size);
835 SSVAL(vwv+11, 0,
836 cli_smb_wct_ofs(reqs_before, num_reqs_before)
837 + 1 /* the wct field */
838 + wct * 2 /* vwv */
839 + 2 /* num_bytes field */
840 + 1 /* pad */);
842 if (bigoffset) {
843 SIVAL(vwv+12, 0, (((uint64_t)offset)>>32) & 0xffffffff);
846 state->pad = 0;
847 state->iov[0].iov_base = (void *)&state->pad;
848 state->iov[0].iov_len = 1;
849 state->iov[1].iov_base = discard_const_p(void, buf);
850 state->iov[1].iov_len = size;
852 subreq = cli_smb_req_create(state, ev, cli, SMBwriteX, 0, wct, vwv,
853 2, state->iov);
854 if (tevent_req_nomem(subreq, req)) {
855 return tevent_req_post(req, ev);
857 tevent_req_set_callback(subreq, cli_write_andx_done, req);
858 *psmbreq = subreq;
859 return req;
862 struct tevent_req *cli_write_andx_send(TALLOC_CTX *mem_ctx,
863 struct event_context *ev,
864 struct cli_state *cli, uint16_t fnum,
865 uint16_t mode, const uint8_t *buf,
866 off_t offset, size_t size)
868 struct tevent_req *req, *subreq;
869 NTSTATUS status;
871 req = cli_write_andx_create(mem_ctx, ev, cli, fnum, mode, buf, offset,
872 size, NULL, 0, &subreq);
873 if (req == NULL) {
874 return NULL;
877 status = cli_smb_req_send(subreq);
878 if (tevent_req_nterror(req, status)) {
879 return tevent_req_post(req, ev);
881 return req;
884 static void cli_write_andx_done(struct tevent_req *subreq)
886 struct tevent_req *req = tevent_req_callback_data(
887 subreq, struct tevent_req);
888 struct cli_write_andx_state *state = tevent_req_data(
889 req, struct cli_write_andx_state);
890 uint8_t wct;
891 uint16_t *vwv;
892 uint8_t *inbuf;
893 NTSTATUS status;
895 status = cli_smb_recv(subreq, state, &inbuf, 6, &wct, &vwv,
896 NULL, NULL);
897 TALLOC_FREE(subreq);
898 if (NT_STATUS_IS_ERR(status)) {
899 tevent_req_nterror(req, status);
900 return;
902 state->written = SVAL(vwv+2, 0);
903 state->written |= SVAL(vwv+4, 0)<<16;
904 tevent_req_done(req);
907 NTSTATUS cli_write_andx_recv(struct tevent_req *req, size_t *pwritten)
909 struct cli_write_andx_state *state = tevent_req_data(
910 req, struct cli_write_andx_state);
911 NTSTATUS status;
913 if (tevent_req_is_nterror(req, &status)) {
914 return status;
916 if (pwritten != 0) {
917 *pwritten = state->written;
919 return NT_STATUS_OK;
922 struct cli_writeall_state {
923 struct event_context *ev;
924 struct cli_state *cli;
925 uint16_t fnum;
926 uint16_t mode;
927 const uint8_t *buf;
928 off_t offset;
929 size_t size;
930 size_t written;
933 static void cli_writeall_written(struct tevent_req *req);
935 static struct tevent_req *cli_writeall_send(TALLOC_CTX *mem_ctx,
936 struct event_context *ev,
937 struct cli_state *cli,
938 uint16_t fnum,
939 uint16_t mode,
940 const uint8_t *buf,
941 off_t offset, size_t size)
943 struct tevent_req *req, *subreq;
944 struct cli_writeall_state *state;
946 req = tevent_req_create(mem_ctx, &state, struct cli_writeall_state);
947 if (req == NULL) {
948 return NULL;
950 state->ev = ev;
951 state->cli = cli;
952 state->fnum = fnum;
953 state->mode = mode;
954 state->buf = buf;
955 state->offset = offset;
956 state->size = size;
957 state->written = 0;
959 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
960 state->mode, state->buf, state->offset,
961 state->size);
962 if (tevent_req_nomem(subreq, req)) {
963 return tevent_req_post(req, ev);
965 tevent_req_set_callback(subreq, cli_writeall_written, req);
966 return req;
969 static void cli_writeall_written(struct tevent_req *subreq)
971 struct tevent_req *req = tevent_req_callback_data(
972 subreq, struct tevent_req);
973 struct cli_writeall_state *state = tevent_req_data(
974 req, struct cli_writeall_state);
975 NTSTATUS status;
976 size_t written, to_write;
978 status = cli_write_andx_recv(subreq, &written);
979 TALLOC_FREE(subreq);
980 if (tevent_req_nterror(req, status)) {
981 return;
984 state->written += written;
986 if (state->written > state->size) {
987 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
988 return;
991 to_write = state->size - state->written;
993 if (to_write == 0) {
994 tevent_req_done(req);
995 return;
998 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
999 state->mode,
1000 state->buf + state->written,
1001 state->offset + state->written, to_write);
1002 if (tevent_req_nomem(subreq, req)) {
1003 return;
1005 tevent_req_set_callback(subreq, cli_writeall_written, req);
1008 static NTSTATUS cli_writeall_recv(struct tevent_req *req,
1009 size_t *pwritten)
1011 struct cli_writeall_state *state = tevent_req_data(
1012 req, struct cli_writeall_state);
1013 NTSTATUS status;
1015 if (tevent_req_is_nterror(req, &status)) {
1016 return status;
1018 if (pwritten != NULL) {
1019 *pwritten = state->written;
1021 return NT_STATUS_OK;
1024 NTSTATUS cli_writeall(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1025 const uint8_t *buf, off_t offset, size_t size,
1026 size_t *pwritten)
1028 TALLOC_CTX *frame = talloc_stackframe();
1029 struct event_context *ev;
1030 struct tevent_req *req;
1031 NTSTATUS status = NT_STATUS_NO_MEMORY;
1033 if (cli_has_async_calls(cli)) {
1035 * Can't use sync call while an async call is in flight
1037 status = NT_STATUS_INVALID_PARAMETER;
1038 goto fail;
1040 ev = event_context_init(frame);
1041 if (ev == NULL) {
1042 goto fail;
1044 req = cli_writeall_send(frame, ev, cli, fnum, mode, buf, offset, size);
1045 if (req == NULL) {
1046 goto fail;
1048 if (!tevent_req_poll(req, ev)) {
1049 status = map_nt_error_from_unix(errno);
1050 goto fail;
1052 status = cli_writeall_recv(req, pwritten);
1053 fail:
1054 TALLOC_FREE(frame);
1055 return status;
1058 struct cli_push_write_state {
1059 struct tevent_req *req;/* This is the main request! Not the subreq */
1060 uint32_t idx;
1061 off_t ofs;
1062 uint8_t *buf;
1063 size_t size;
1066 struct cli_push_state {
1067 struct event_context *ev;
1068 struct cli_state *cli;
1069 uint16_t fnum;
1070 uint16_t mode;
1071 off_t start_offset;
1072 size_t window_size;
1074 size_t (*source)(uint8_t *buf, size_t n, void *priv);
1075 void *priv;
1077 bool eof;
1079 size_t chunk_size;
1080 off_t next_offset;
1083 * Outstanding requests
1085 uint32_t pending;
1086 uint16_t max_reqs;
1087 uint32_t num_reqs;
1088 struct cli_push_write_state **reqs;
1091 static void cli_push_written(struct tevent_req *req);
1093 static bool cli_push_write_setup(struct tevent_req *req,
1094 struct cli_push_state *state,
1095 uint32_t idx)
1097 struct cli_push_write_state *substate;
1098 struct tevent_req *subreq;
1100 substate = talloc(state->reqs, struct cli_push_write_state);
1101 if (!substate) {
1102 return false;
1104 substate->req = req;
1105 substate->idx = idx;
1106 substate->ofs = state->next_offset;
1107 substate->buf = talloc_array(substate, uint8_t, state->chunk_size);
1108 if (!substate->buf) {
1109 talloc_free(substate);
1110 return false;
1112 substate->size = state->source(substate->buf,
1113 state->chunk_size,
1114 state->priv);
1115 if (substate->size == 0) {
1116 state->eof = true;
1117 /* nothing to send */
1118 talloc_free(substate);
1119 return true;
1122 subreq = cli_writeall_send(substate,
1123 state->ev, state->cli,
1124 state->fnum, state->mode,
1125 substate->buf,
1126 substate->ofs,
1127 substate->size);
1128 if (!subreq) {
1129 talloc_free(substate);
1130 return false;
1132 tevent_req_set_callback(subreq, cli_push_written, substate);
1134 state->reqs[idx] = substate;
1135 state->pending += 1;
1136 state->next_offset += substate->size;
1138 return true;
1141 struct tevent_req *cli_push_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1142 struct cli_state *cli,
1143 uint16_t fnum, uint16_t mode,
1144 off_t start_offset, size_t window_size,
1145 size_t (*source)(uint8_t *buf, size_t n,
1146 void *priv),
1147 void *priv)
1149 struct tevent_req *req;
1150 struct cli_push_state *state;
1151 uint32_t i;
1153 req = tevent_req_create(mem_ctx, &state, struct cli_push_state);
1154 if (req == NULL) {
1155 return NULL;
1157 state->cli = cli;
1158 state->ev = ev;
1159 state->fnum = fnum;
1160 state->start_offset = start_offset;
1161 state->mode = mode;
1162 state->source = source;
1163 state->priv = priv;
1164 state->eof = false;
1165 state->pending = 0;
1166 state->next_offset = start_offset;
1168 state->chunk_size = cli_write_max_bufsize(cli, mode, 14);
1170 state->max_reqs = cli_state_max_requests(cli);
1172 if (window_size == 0) {
1173 window_size = state->max_reqs * state->chunk_size;
1175 state->num_reqs = window_size/state->chunk_size;
1176 if ((window_size % state->chunk_size) > 0) {
1177 state->num_reqs += 1;
1179 state->num_reqs = MIN(state->num_reqs, state->max_reqs);
1180 state->num_reqs = MAX(state->num_reqs, 1);
1182 state->reqs = talloc_zero_array(state, struct cli_push_write_state *,
1183 state->num_reqs);
1184 if (state->reqs == NULL) {
1185 goto failed;
1188 for (i=0; i<state->num_reqs; i++) {
1189 if (!cli_push_write_setup(req, state, i)) {
1190 goto failed;
1193 if (state->eof) {
1194 break;
1198 if (state->pending == 0) {
1199 tevent_req_done(req);
1200 return tevent_req_post(req, ev);
1203 return req;
1205 failed:
1206 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1207 return tevent_req_post(req, ev);
1210 static void cli_push_written(struct tevent_req *subreq)
1212 struct cli_push_write_state *substate = tevent_req_callback_data(
1213 subreq, struct cli_push_write_state);
1214 struct tevent_req *req = substate->req;
1215 struct cli_push_state *state = tevent_req_data(
1216 req, struct cli_push_state);
1217 NTSTATUS status;
1218 uint32_t idx = substate->idx;
1220 state->reqs[idx] = NULL;
1221 state->pending -= 1;
1223 status = cli_writeall_recv(subreq, NULL);
1224 TALLOC_FREE(subreq);
1225 TALLOC_FREE(substate);
1226 if (tevent_req_nterror(req, status)) {
1227 return;
1230 if (!state->eof) {
1231 if (!cli_push_write_setup(req, state, idx)) {
1232 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1233 return;
1237 if (state->pending == 0) {
1238 tevent_req_done(req);
1239 return;
1243 NTSTATUS cli_push_recv(struct tevent_req *req)
1245 return tevent_req_simple_recv_ntstatus(req);
1248 NTSTATUS cli_push(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1249 off_t start_offset, size_t window_size,
1250 size_t (*source)(uint8_t *buf, size_t n, void *priv),
1251 void *priv)
1253 TALLOC_CTX *frame = talloc_stackframe();
1254 struct event_context *ev;
1255 struct tevent_req *req;
1256 NTSTATUS status = NT_STATUS_OK;
1258 if (cli_has_async_calls(cli)) {
1260 * Can't use sync call while an async call is in flight
1262 status = NT_STATUS_INVALID_PARAMETER;
1263 goto fail;
1266 ev = event_context_init(frame);
1267 if (ev == NULL) {
1268 status = NT_STATUS_NO_MEMORY;
1269 goto fail;
1272 req = cli_push_send(frame, ev, cli, fnum, mode, start_offset,
1273 window_size, source, priv);
1274 if (req == NULL) {
1275 status = NT_STATUS_NO_MEMORY;
1276 goto fail;
1279 if (!tevent_req_poll(req, ev)) {
1280 status = map_nt_error_from_unix(errno);
1281 goto fail;
1284 status = cli_push_recv(req);
1285 fail:
1286 TALLOC_FREE(frame);
1287 return status;