s3:libsmb: Fix bug 6606 -- short reads in smbclient were not handled
[Samba/aatanasov.git] / source3 / libsmb / clireadwrite.c
blobb6901961d06cd3875001ac40278b5e3d74b9cd3a
1 /*
2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
22 /****************************************************************************
23 Calculate the recommended read buffer size
24 ****************************************************************************/
25 static size_t cli_read_max_bufsize(struct cli_state *cli)
27 if (!client_is_signing_on(cli) && !cli_encryption_on(cli)
28 && (cli->posix_capabilities & CIFS_UNIX_LARGE_READ_CAP)) {
29 return CLI_SAMBA_MAX_POSIX_LARGE_READX_SIZE;
31 if (cli->capabilities & CAP_LARGE_READX) {
32 return cli->is_samba
33 ? CLI_SAMBA_MAX_LARGE_READX_SIZE
34 : CLI_WINDOWS_MAX_LARGE_READX_SIZE;
36 return (cli->max_xmit - (smb_size+32)) & ~1023;
39 /****************************************************************************
40 Calculate the recommended write buffer size
41 ****************************************************************************/
42 static size_t cli_write_max_bufsize(struct cli_state *cli, uint16_t write_mode)
44 if (write_mode == 0 &&
45 !client_is_signing_on(cli) &&
46 !cli_encryption_on(cli) &&
47 (cli->posix_capabilities & CIFS_UNIX_LARGE_WRITE_CAP) &&
48 (cli->capabilities & CAP_LARGE_FILES)) {
49 /* Only do massive writes if we can do them direct
50 * with no signing or encrypting - not on a pipe. */
51 return CLI_SAMBA_MAX_POSIX_LARGE_WRITEX_SIZE;
54 if (cli->is_samba) {
55 return CLI_SAMBA_MAX_LARGE_WRITEX_SIZE;
58 if (((cli->capabilities & CAP_LARGE_WRITEX) == 0)
59 || client_is_signing_on(cli)
60 || strequal(cli->dev, "LPT1:")) {
63 * Printer devices are restricted to max_xmit writesize in
64 * Vista and XPSP3 as are signing connections.
67 return (cli->max_xmit - (smb_size+32)) & ~1023;
70 return CLI_WINDOWS_MAX_LARGE_WRITEX_SIZE;
73 struct cli_read_andx_state {
74 size_t size;
75 uint16_t vwv[12];
76 NTSTATUS status;
77 size_t received;
78 uint8_t *buf;
81 static void cli_read_andx_done(struct tevent_req *subreq);
83 struct tevent_req *cli_read_andx_create(TALLOC_CTX *mem_ctx,
84 struct event_context *ev,
85 struct cli_state *cli, uint16_t fnum,
86 off_t offset, size_t size,
87 struct tevent_req **psmbreq)
89 struct tevent_req *req, *subreq;
90 struct cli_read_andx_state *state;
91 bool bigoffset = False;
92 uint8_t wct = 10;
94 if (size > cli_read_max_bufsize(cli)) {
95 DEBUG(0, ("cli_read_andx_send got size=%d, can only handle "
96 "size=%d\n", (int)size,
97 (int)cli_read_max_bufsize(cli)));
98 return NULL;
101 req = tevent_req_create(mem_ctx, &state, struct cli_read_andx_state);
102 if (req == NULL) {
103 return NULL;
105 state->size = size;
107 SCVAL(state->vwv + 0, 0, 0xFF);
108 SCVAL(state->vwv + 0, 1, 0);
109 SSVAL(state->vwv + 1, 0, 0);
110 SSVAL(state->vwv + 2, 0, fnum);
111 SIVAL(state->vwv + 3, 0, offset);
112 SSVAL(state->vwv + 5, 0, size);
113 SSVAL(state->vwv + 6, 0, size);
114 SSVAL(state->vwv + 7, 0, (size >> 16));
115 SSVAL(state->vwv + 8, 0, 0);
116 SSVAL(state->vwv + 9, 0, 0);
118 if ((uint64_t)offset >> 32) {
119 bigoffset = true;
120 SIVAL(state->vwv + 10, 0,
121 (((uint64_t)offset)>>32) & 0xffffffff);
122 wct += 2;
125 subreq = cli_smb_req_create(state, ev, cli, SMBreadX, 0, wct,
126 state->vwv, 0, NULL);
127 if (subreq == NULL) {
128 TALLOC_FREE(req);
129 return NULL;
131 tevent_req_set_callback(subreq, cli_read_andx_done, req);
132 *psmbreq = subreq;
133 return req;
136 struct tevent_req *cli_read_andx_send(TALLOC_CTX *mem_ctx,
137 struct event_context *ev,
138 struct cli_state *cli, uint16_t fnum,
139 off_t offset, size_t size)
141 struct tevent_req *req, *subreq;
142 NTSTATUS status;
144 req = cli_read_andx_create(mem_ctx, ev, cli, fnum, offset, size,
145 &subreq);
146 if (req == NULL) {
147 return NULL;
150 status = cli_smb_req_send(subreq);
151 if (!NT_STATUS_IS_OK(status)) {
152 tevent_req_nterror(req, status);
153 return tevent_req_post(req, ev);
155 return req;
158 static void cli_read_andx_done(struct tevent_req *subreq)
160 struct tevent_req *req = tevent_req_callback_data(
161 subreq, struct tevent_req);
162 struct cli_read_andx_state *state = tevent_req_data(
163 req, struct cli_read_andx_state);
164 uint8_t *inbuf;
165 uint8_t wct;
166 uint16_t *vwv;
167 uint32_t num_bytes;
168 uint8_t *bytes;
170 state->status = cli_smb_recv(subreq, 12, &wct, &vwv, &num_bytes,
171 &bytes);
172 if (NT_STATUS_IS_ERR(state->status)) {
173 tevent_req_nterror(req, state->status);
174 return;
177 /* size is the number of bytes the server returned.
178 * Might be zero. */
179 state->received = SVAL(vwv + 5, 0);
180 state->received |= (((unsigned int)SVAL(vwv + 7, 0)) << 16);
182 if (state->received > state->size) {
183 DEBUG(5,("server returned more than we wanted!\n"));
184 tevent_req_nterror(req, NT_STATUS_UNEXPECTED_IO_ERROR);
185 return;
189 * bcc field must be valid for small reads, for large reads the 16-bit
190 * bcc field can't be correct.
193 if ((state->received < 0xffff) && (state->received > num_bytes)) {
194 DEBUG(5, ("server announced more bytes than sent\n"));
195 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
196 return;
199 inbuf = cli_smb_inbuf(subreq);
200 state->buf = (uint8_t *)smb_base(inbuf) + SVAL(vwv+6, 0);
202 if (trans_oob(smb_len(inbuf), SVAL(vwv+6, 0), state->received)
203 || ((state->received != 0) && (state->buf < bytes))) {
204 DEBUG(5, ("server returned invalid read&x data offset\n"));
205 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
206 return;
208 tevent_req_done(req);
212 * Pull the data out of a finished async read_and_x request. rcvbuf is
213 * talloced from the request, so better make sure that you copy it away before
214 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
215 * talloc_move it!
218 NTSTATUS cli_read_andx_recv(struct tevent_req *req, ssize_t *received,
219 uint8_t **rcvbuf)
221 struct cli_read_andx_state *state = tevent_req_data(
222 req, struct cli_read_andx_state);
223 NTSTATUS status;
225 if (tevent_req_is_nterror(req, &status)) {
226 return status;
228 *received = state->received;
229 *rcvbuf = state->buf;
230 return NT_STATUS_OK;
233 struct cli_readall_state {
234 struct tevent_context *ev;
235 struct cli_state *cli;
236 uint16_t fnum;
237 off_t start_offset;
238 size_t size;
239 size_t received;
240 uint8_t *buf;
243 static void cli_readall_done(struct tevent_req *subreq);
245 static struct tevent_req *cli_readall_send(TALLOC_CTX *mem_ctx,
246 struct event_context *ev,
247 struct cli_state *cli,
248 uint16_t fnum,
249 off_t offset, size_t size)
251 struct tevent_req *req, *subreq;
252 struct cli_readall_state *state;
254 req = tevent_req_create(mem_ctx, &state, struct cli_readall_state);
255 if (req == NULL) {
256 return NULL;
258 state->ev = ev;
259 state->cli = cli;
260 state->fnum = fnum;
261 state->start_offset = offset;
262 state->size = size;
263 state->received = 0;
264 state->buf = NULL;
266 subreq = cli_read_andx_send(state, ev, cli, fnum, offset, size);
267 if (tevent_req_nomem(subreq, req)) {
268 return tevent_req_post(req, ev);
270 tevent_req_set_callback(subreq, cli_readall_done, req);
271 return req;
274 static void cli_readall_done(struct tevent_req *subreq)
276 struct tevent_req *req = tevent_req_callback_data(
277 subreq, struct tevent_req);
278 struct cli_readall_state *state = tevent_req_data(
279 req, struct cli_readall_state);
280 ssize_t received;
281 uint8_t *buf;
282 NTSTATUS status;
284 status = cli_read_andx_recv(subreq, &received, &buf);
285 if (!NT_STATUS_IS_OK(status)) {
286 tevent_req_nterror(req, status);
287 return;
290 if ((state->received == 0) && (received == state->size)) {
291 /* Ideal case: Got it all in one run */
292 state->buf = buf;
293 state->received += received;
294 tevent_req_done(req);
295 return;
299 * We got a short read, issue a read for the
300 * rest. Unfortunately we have to allocate the buffer
301 * ourselves now, as our caller expects to receive a single
302 * buffer. cli_read_andx does it from the buffer received from
303 * the net, but with a short read we have to put it together
304 * from several reads.
307 if (state->buf == NULL) {
308 state->buf = talloc_array(state, uint8_t, state->size);
309 if (tevent_req_nomem(state->buf, req)) {
310 return;
313 memcpy(state->buf + state->received, buf, received);
314 state->received += received;
316 TALLOC_FREE(subreq);
318 if (state->received >= state->size) {
319 tevent_req_done(req);
320 return;
323 subreq = cli_read_andx_send(state, state->ev, state->cli, state->fnum,
324 state->start_offset + state->received,
325 state->size - state->received);
326 if (tevent_req_nomem(subreq, req)) {
327 return;
329 tevent_req_set_callback(subreq, cli_readall_done, req);
332 static NTSTATUS cli_readall_recv(struct tevent_req *req, ssize_t *received,
333 uint8_t **rcvbuf)
335 struct cli_readall_state *state = tevent_req_data(
336 req, struct cli_readall_state);
337 NTSTATUS status;
339 if (tevent_req_is_nterror(req, &status)) {
340 return status;
342 *received = state->received;
343 *rcvbuf = state->buf;
344 return NT_STATUS_OK;
347 struct cli_pull_subreq {
348 struct tevent_req *req;
349 ssize_t received;
350 uint8_t *buf;
354 * Parallel read support.
356 * cli_pull sends as many read&x requests as the server would allow via
357 * max_mux at a time. When replies flow back in, the data is written into
358 * the callback function "sink" in the right order.
361 struct cli_pull_state {
362 struct tevent_req *req;
364 struct event_context *ev;
365 struct cli_state *cli;
366 uint16_t fnum;
367 off_t start_offset;
368 SMB_OFF_T size;
370 NTSTATUS (*sink)(char *buf, size_t n, void *priv);
371 void *priv;
373 size_t chunk_size;
376 * Outstanding requests
378 int num_reqs;
379 struct cli_pull_subreq *reqs;
382 * For how many bytes did we send requests already?
384 SMB_OFF_T requested;
387 * Next request index to push into "sink". This walks around the "req"
388 * array, taking care that the requests are pushed to "sink" in the
389 * right order. If necessary (i.e. replies don't come in in the right
390 * order), replies are held back in "reqs".
392 int top_req;
395 * How many bytes did we push into "sink"?
398 SMB_OFF_T pushed;
401 static char *cli_pull_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
403 struct cli_pull_state *state = tevent_req_data(
404 req, struct cli_pull_state);
405 char *result;
407 result = tevent_req_print(mem_ctx, req);
408 if (result == NULL) {
409 return NULL;
412 return talloc_asprintf_append_buffer(
413 result, "num_reqs=%d, top_req=%d",
414 state->num_reqs, state->top_req);
417 static void cli_pull_read_done(struct tevent_req *read_req);
420 * Prepare an async pull request
423 struct tevent_req *cli_pull_send(TALLOC_CTX *mem_ctx,
424 struct event_context *ev,
425 struct cli_state *cli,
426 uint16_t fnum, off_t start_offset,
427 SMB_OFF_T size, size_t window_size,
428 NTSTATUS (*sink)(char *buf, size_t n,
429 void *priv),
430 void *priv)
432 struct tevent_req *req;
433 struct cli_pull_state *state;
434 int i;
436 req = tevent_req_create(mem_ctx, &state, struct cli_pull_state);
437 if (req == NULL) {
438 return NULL;
440 tevent_req_set_print_fn(req, cli_pull_print);
441 state->req = req;
443 state->cli = cli;
444 state->ev = ev;
445 state->fnum = fnum;
446 state->start_offset = start_offset;
447 state->size = size;
448 state->sink = sink;
449 state->priv = priv;
451 state->pushed = 0;
452 state->top_req = 0;
454 if (size == 0) {
455 tevent_req_done(req);
456 return tevent_req_post(req, ev);
459 state->chunk_size = cli_read_max_bufsize(cli);
461 state->num_reqs = MAX(window_size/state->chunk_size, 1);
462 state->num_reqs = MIN(state->num_reqs, cli->max_mux);
464 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_pull_subreq,
465 state->num_reqs);
466 if (state->reqs == NULL) {
467 goto failed;
470 state->requested = 0;
472 for (i=0; i<state->num_reqs; i++) {
473 struct cli_pull_subreq *subreq = &state->reqs[i];
474 SMB_OFF_T size_left;
475 size_t request_thistime;
477 if (state->requested >= size) {
478 state->num_reqs = i;
479 break;
482 size_left = size - state->requested;
483 request_thistime = MIN(size_left, state->chunk_size);
485 subreq->req = cli_readall_send(
486 state->reqs, ev, cli, fnum,
487 state->start_offset + state->requested,
488 request_thistime);
490 if (subreq->req == NULL) {
491 goto failed;
493 tevent_req_set_callback(subreq->req, cli_pull_read_done, req);
494 state->requested += request_thistime;
496 return req;
498 failed:
499 TALLOC_FREE(req);
500 return NULL;
504 * Handle incoming read replies, push the data into sink and send out new
505 * requests if necessary.
508 static void cli_pull_read_done(struct tevent_req *subreq)
510 struct tevent_req *req = tevent_req_callback_data(
511 subreq, struct tevent_req);
512 struct cli_pull_state *state = tevent_req_data(
513 req, struct cli_pull_state);
514 struct cli_pull_subreq *pull_subreq = NULL;
515 NTSTATUS status;
516 int i;
518 for (i = 0; i < state->num_reqs; i++) {
519 pull_subreq = &state->reqs[i];
520 if (subreq == pull_subreq->req) {
521 break;
524 if (i == state->num_reqs) {
525 /* Huh -- received something we did not send?? */
526 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
527 return;
530 status = cli_readall_recv(subreq, &pull_subreq->received,
531 &pull_subreq->buf);
532 if (!NT_STATUS_IS_OK(status)) {
533 tevent_req_nterror(state->req, status);
534 return;
538 * This loop is the one to take care of out-of-order replies. All
539 * pending requests are in state->reqs, state->reqs[top_req] is the
540 * one that is to be pushed next. If however a request later than
541 * top_req is replied to, then we can't push yet. If top_req is
542 * replied to at a later point then, we need to push all the finished
543 * requests.
546 while (state->reqs[state->top_req].req != NULL) {
547 struct cli_pull_subreq *top_subreq;
549 DEBUG(11, ("cli_pull_read_done: top_req = %d\n",
550 state->top_req));
552 top_subreq = &state->reqs[state->top_req];
554 if (tevent_req_is_in_progress(top_subreq->req)) {
555 DEBUG(11, ("cli_pull_read_done: top request not yet "
556 "done\n"));
557 return;
560 DEBUG(10, ("cli_pull_read_done: Pushing %d bytes, %d already "
561 "pushed\n", (int)top_subreq->received,
562 (int)state->pushed));
564 status = state->sink((char *)top_subreq->buf,
565 top_subreq->received, state->priv);
566 if (!NT_STATUS_IS_OK(status)) {
567 tevent_req_nterror(state->req, status);
568 return;
570 state->pushed += top_subreq->received;
572 TALLOC_FREE(state->reqs[state->top_req].req);
574 if (state->requested < state->size) {
575 struct tevent_req *new_req;
576 SMB_OFF_T size_left;
577 size_t request_thistime;
579 size_left = state->size - state->requested;
580 request_thistime = MIN(size_left, state->chunk_size);
582 DEBUG(10, ("cli_pull_read_done: Requesting %d bytes "
583 "at %d, position %d\n",
584 (int)request_thistime,
585 (int)(state->start_offset
586 + state->requested),
587 state->top_req));
589 new_req = cli_readall_send(
590 state->reqs, state->ev, state->cli,
591 state->fnum,
592 state->start_offset + state->requested,
593 request_thistime);
595 if (tevent_req_nomem(new_req, state->req)) {
596 return;
598 tevent_req_set_callback(new_req, cli_pull_read_done,
599 req);
601 state->reqs[state->top_req].req = new_req;
602 state->requested += request_thistime;
605 state->top_req = (state->top_req+1) % state->num_reqs;
608 tevent_req_done(req);
611 NTSTATUS cli_pull_recv(struct tevent_req *req, SMB_OFF_T *received)
613 struct cli_pull_state *state = tevent_req_data(
614 req, struct cli_pull_state);
615 NTSTATUS status;
617 if (tevent_req_is_nterror(req, &status)) {
618 return status;
620 *received = state->pushed;
621 return NT_STATUS_OK;
624 NTSTATUS cli_pull(struct cli_state *cli, uint16_t fnum,
625 off_t start_offset, SMB_OFF_T size, size_t window_size,
626 NTSTATUS (*sink)(char *buf, size_t n, void *priv),
627 void *priv, SMB_OFF_T *received)
629 TALLOC_CTX *frame = talloc_stackframe();
630 struct event_context *ev;
631 struct tevent_req *req;
632 NTSTATUS status = NT_STATUS_OK;
634 if (cli_has_async_calls(cli)) {
636 * Can't use sync call while an async call is in flight
638 status = NT_STATUS_INVALID_PARAMETER;
639 goto fail;
642 ev = event_context_init(frame);
643 if (ev == NULL) {
644 status = NT_STATUS_NO_MEMORY;
645 goto fail;
648 req = cli_pull_send(frame, ev, cli, fnum, start_offset, size,
649 window_size, sink, priv);
650 if (req == NULL) {
651 status = NT_STATUS_NO_MEMORY;
652 goto fail;
655 if (!tevent_req_poll(req, ev)) {
656 status = map_nt_error_from_unix(errno);
657 goto fail;
660 status = cli_pull_recv(req, received);
661 fail:
662 TALLOC_FREE(frame);
663 if (!NT_STATUS_IS_OK(status)) {
664 cli_set_error(cli, status);
666 return status;
669 static NTSTATUS cli_read_sink(char *buf, size_t n, void *priv)
671 char **pbuf = (char **)priv;
672 memcpy(*pbuf, buf, n);
673 *pbuf += n;
674 return NT_STATUS_OK;
677 ssize_t cli_read(struct cli_state *cli, uint16_t fnum, char *buf,
678 off_t offset, size_t size)
680 NTSTATUS status;
681 SMB_OFF_T ret;
683 status = cli_pull(cli, fnum, offset, size, size,
684 cli_read_sink, &buf, &ret);
685 if (!NT_STATUS_IS_OK(status)) {
686 cli_set_error(cli, status);
687 return -1;
689 return ret;
692 /****************************************************************************
693 Issue a single SMBwrite and don't wait for a reply.
694 ****************************************************************************/
696 static bool cli_issue_write(struct cli_state *cli,
697 uint16_t fnum,
698 off_t offset,
699 uint16 mode,
700 const char *buf,
701 size_t size,
702 int i)
704 char *p;
705 bool large_writex = false;
706 /* We can only do direct writes if not signing and not encrypting. */
707 bool direct_writes = !client_is_signing_on(cli) && !cli_encryption_on(cli);
709 if (!direct_writes && size + 1 > cli->bufsize) {
710 cli->outbuf = (char *)SMB_REALLOC(cli->outbuf, size + 1024);
711 if (!cli->outbuf) {
712 return False;
714 cli->inbuf = (char *)SMB_REALLOC(cli->inbuf, size + 1024);
715 if (cli->inbuf == NULL) {
716 SAFE_FREE(cli->outbuf);
717 return False;
719 cli->bufsize = size + 1024;
722 memset(cli->outbuf,'\0',smb_size);
723 memset(cli->inbuf,'\0',smb_size);
725 if (cli->capabilities & CAP_LARGE_FILES) {
726 large_writex = True;
729 if (large_writex) {
730 cli_set_message(cli->outbuf,14,0,True);
731 } else {
732 cli_set_message(cli->outbuf,12,0,True);
735 SCVAL(cli->outbuf,smb_com,SMBwriteX);
736 SSVAL(cli->outbuf,smb_tid,cli->cnum);
737 cli_setup_packet(cli);
739 SCVAL(cli->outbuf,smb_vwv0,0xFF);
740 SSVAL(cli->outbuf,smb_vwv2,fnum);
742 SIVAL(cli->outbuf,smb_vwv3,offset);
743 SIVAL(cli->outbuf,smb_vwv5,0);
744 SSVAL(cli->outbuf,smb_vwv7,mode);
746 SSVAL(cli->outbuf,smb_vwv8,(mode & 0x0008) ? size : 0);
748 * According to CIFS-TR-1p00, this following field should only
749 * be set if CAP_LARGE_WRITEX is set. We should check this
750 * locally. However, this check might already have been
751 * done by our callers.
753 SSVAL(cli->outbuf,smb_vwv9,(size>>16));
754 SSVAL(cli->outbuf,smb_vwv10,size);
755 /* +1 is pad byte. */
756 SSVAL(cli->outbuf,smb_vwv11,
757 smb_buf(cli->outbuf) - smb_base(cli->outbuf) + 1);
759 if (large_writex) {
760 SIVAL(cli->outbuf,smb_vwv12,(((uint64_t)offset)>>32) & 0xffffffff);
763 p = smb_base(cli->outbuf) + SVAL(cli->outbuf,smb_vwv11) -1;
764 *p++ = '\0'; /* pad byte. */
765 if (!direct_writes) {
766 memcpy(p, buf, size);
768 if (size > 0x1FFFF) {
769 /* This is a POSIX 14 word large write. */
770 set_message_bcc(cli->outbuf, 0); /* Set bcc to zero. */
771 _smb_setlen_large(cli->outbuf,smb_size + 28 + 1 /* pad */ + size - 4);
772 } else {
773 cli_setup_bcc(cli, p+size);
776 SSVAL(cli->outbuf,smb_mid,cli->mid + i);
778 show_msg(cli->outbuf);
779 if (direct_writes) {
780 /* For direct writes we now need to write the data
781 * directly out of buf. */
782 return cli_send_smb_direct_writeX(cli, buf, size);
783 } else {
784 return cli_send_smb(cli);
788 /****************************************************************************
789 write to a file
790 write_mode: 0x0001 disallow write cacheing
791 0x0002 return bytes remaining
792 0x0004 use raw named pipe protocol
793 0x0008 start of message mode named pipe protocol
794 ****************************************************************************/
796 ssize_t cli_write(struct cli_state *cli,
797 uint16_t fnum, uint16 write_mode,
798 const char *buf, off_t offset, size_t size)
800 ssize_t bwritten = 0;
801 unsigned int issued = 0;
802 unsigned int received = 0;
803 int mpx = 1;
804 size_t writesize;
805 int blocks;
807 if(cli->max_mux > 1) {
808 mpx = cli->max_mux-1;
809 } else {
810 mpx = 1;
813 writesize = cli_write_max_bufsize(cli, write_mode);
815 blocks = (size + (writesize-1)) / writesize;
817 while (received < blocks) {
819 while ((issued - received < mpx) && (issued < blocks)) {
820 ssize_t bsent = issued * writesize;
821 ssize_t size1 = MIN(writesize, size - bsent);
823 if (!cli_issue_write(cli, fnum, offset + bsent,
824 write_mode,
825 buf + bsent,
826 size1, issued))
827 return -1;
828 issued++;
831 if (!cli_receive_smb(cli)) {
832 return bwritten;
835 received++;
837 if (cli_is_error(cli))
838 break;
840 bwritten += SVAL(cli->inbuf, smb_vwv2);
841 if (writesize > 0xFFFF) {
842 bwritten += (((int)(SVAL(cli->inbuf, smb_vwv4)))<<16);
846 while (received < issued && cli_receive_smb(cli)) {
847 received++;
850 return bwritten;
853 /****************************************************************************
854 write to a file using a SMBwrite and not bypassing 0 byte writes
855 ****************************************************************************/
857 ssize_t cli_smbwrite(struct cli_state *cli,
858 uint16_t fnum, char *buf, off_t offset, size_t size1)
860 char *p;
861 ssize_t total = 0;
863 do {
864 size_t size = MIN(size1, cli->max_xmit - 48);
866 memset(cli->outbuf,'\0',smb_size);
867 memset(cli->inbuf,'\0',smb_size);
869 cli_set_message(cli->outbuf,5, 0,True);
871 SCVAL(cli->outbuf,smb_com,SMBwrite);
872 SSVAL(cli->outbuf,smb_tid,cli->cnum);
873 cli_setup_packet(cli);
875 SSVAL(cli->outbuf,smb_vwv0,fnum);
876 SSVAL(cli->outbuf,smb_vwv1,size);
877 SIVAL(cli->outbuf,smb_vwv2,offset);
878 SSVAL(cli->outbuf,smb_vwv4,0);
880 p = smb_buf(cli->outbuf);
881 *p++ = 1;
882 SSVAL(p, 0, size); p += 2;
883 memcpy(p, buf + total, size); p += size;
885 cli_setup_bcc(cli, p);
887 if (!cli_send_smb(cli))
888 return -1;
890 if (!cli_receive_smb(cli))
891 return -1;
893 if (cli_is_error(cli))
894 return -1;
896 size = SVAL(cli->inbuf,smb_vwv0);
897 if (size == 0)
898 break;
900 size1 -= size;
901 total += size;
902 offset += size;
904 } while (size1);
906 return total;
910 * Send a write&x request
913 struct cli_write_andx_state {
914 size_t size;
915 uint16_t vwv[14];
916 size_t written;
917 uint8_t pad;
918 struct iovec iov[2];
921 static void cli_write_andx_done(struct tevent_req *subreq);
923 struct tevent_req *cli_write_andx_create(TALLOC_CTX *mem_ctx,
924 struct event_context *ev,
925 struct cli_state *cli, uint16_t fnum,
926 uint16_t mode, const uint8_t *buf,
927 off_t offset, size_t size,
928 struct tevent_req **reqs_before,
929 int num_reqs_before,
930 struct tevent_req **psmbreq)
932 struct tevent_req *req, *subreq;
933 struct cli_write_andx_state *state;
934 bool bigoffset = ((cli->capabilities & CAP_LARGE_FILES) != 0);
935 uint8_t wct = bigoffset ? 14 : 12;
936 size_t max_write = cli_write_max_bufsize(cli, mode);
937 uint16_t *vwv;
939 req = tevent_req_create(mem_ctx, &state, struct cli_write_andx_state);
940 if (req == NULL) {
941 return NULL;
944 size = MIN(size, max_write);
946 vwv = state->vwv;
948 SCVAL(vwv+0, 0, 0xFF);
949 SCVAL(vwv+0, 1, 0);
950 SSVAL(vwv+1, 0, 0);
951 SSVAL(vwv+2, 0, fnum);
952 SIVAL(vwv+3, 0, offset);
953 SIVAL(vwv+5, 0, 0);
954 SSVAL(vwv+7, 0, mode);
955 SSVAL(vwv+8, 0, 0);
956 SSVAL(vwv+9, 0, (size>>16));
957 SSVAL(vwv+10, 0, size);
959 SSVAL(vwv+11, 0,
960 cli_smb_wct_ofs(reqs_before, num_reqs_before)
961 + 1 /* the wct field */
962 + wct * 2 /* vwv */
963 + 2 /* num_bytes field */
964 + 1 /* pad */);
966 if (bigoffset) {
967 SIVAL(vwv+12, 0, (((uint64_t)offset)>>32) & 0xffffffff);
970 state->pad = 0;
971 state->iov[0].iov_base = (void *)&state->pad;
972 state->iov[0].iov_len = 1;
973 state->iov[1].iov_base = CONST_DISCARD(void *, buf);
974 state->iov[1].iov_len = size;
976 subreq = cli_smb_req_create(state, ev, cli, SMBwriteX, 0, wct, vwv,
977 2, state->iov);
978 if (tevent_req_nomem(subreq, req)) {
979 return tevent_req_post(req, ev);
981 tevent_req_set_callback(subreq, cli_write_andx_done, req);
982 *psmbreq = subreq;
983 return req;
986 struct tevent_req *cli_write_andx_send(TALLOC_CTX *mem_ctx,
987 struct event_context *ev,
988 struct cli_state *cli, uint16_t fnum,
989 uint16_t mode, const uint8_t *buf,
990 off_t offset, size_t size)
992 struct tevent_req *req, *subreq;
993 NTSTATUS status;
995 req = cli_write_andx_create(mem_ctx, ev, cli, fnum, mode, buf, offset,
996 size, NULL, 0, &subreq);
997 if (req == NULL) {
998 return NULL;
1001 status = cli_smb_req_send(subreq);
1002 if (!NT_STATUS_IS_OK(status)) {
1003 tevent_req_nterror(req, status);
1004 return tevent_req_post(req, ev);
1006 return req;
1009 static void cli_write_andx_done(struct tevent_req *subreq)
1011 struct tevent_req *req = tevent_req_callback_data(
1012 subreq, struct tevent_req);
1013 struct cli_write_andx_state *state = tevent_req_data(
1014 req, struct cli_write_andx_state);
1015 uint8_t wct;
1016 uint16_t *vwv;
1017 NTSTATUS status;
1019 status = cli_smb_recv(subreq, 6, &wct, &vwv, NULL, NULL);
1020 if (NT_STATUS_IS_ERR(status)) {
1021 TALLOC_FREE(subreq);
1022 tevent_req_nterror(req, status);
1023 return;
1025 state->written = SVAL(vwv+2, 0);
1026 state->written |= SVAL(vwv+4, 0)<<16;
1027 tevent_req_done(req);
1030 NTSTATUS cli_write_andx_recv(struct tevent_req *req, size_t *pwritten)
1032 struct cli_write_andx_state *state = tevent_req_data(
1033 req, struct cli_write_andx_state);
1034 NTSTATUS status;
1036 if (tevent_req_is_nterror(req, &status)) {
1037 return status;
1039 *pwritten = state->written;
1040 return NT_STATUS_OK;
1043 struct cli_writeall_state {
1044 struct event_context *ev;
1045 struct cli_state *cli;
1046 uint16_t fnum;
1047 uint16_t mode;
1048 const uint8_t *buf;
1049 off_t offset;
1050 size_t size;
1051 size_t written;
1054 static void cli_writeall_written(struct tevent_req *req);
1056 static struct tevent_req *cli_writeall_send(TALLOC_CTX *mem_ctx,
1057 struct event_context *ev,
1058 struct cli_state *cli,
1059 uint16_t fnum,
1060 uint16_t mode,
1061 const uint8_t *buf,
1062 off_t offset, size_t size)
1064 struct tevent_req *req, *subreq;
1065 struct cli_writeall_state *state;
1067 req = tevent_req_create(mem_ctx, &state, struct cli_writeall_state);
1068 if (req == NULL) {
1069 return NULL;
1071 state->ev = ev;
1072 state->cli = cli;
1073 state->fnum = fnum;
1074 state->mode = mode;
1075 state->buf = buf;
1076 state->offset = offset;
1077 state->size = size;
1078 state->written = 0;
1080 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
1081 state->mode, state->buf, state->offset,
1082 state->size);
1083 if (tevent_req_nomem(subreq, req)) {
1084 return tevent_req_post(req, ev);
1086 tevent_req_set_callback(subreq, cli_writeall_written, req);
1087 return req;
1090 static void cli_writeall_written(struct tevent_req *subreq)
1092 struct tevent_req *req = tevent_req_callback_data(
1093 subreq, struct tevent_req);
1094 struct cli_writeall_state *state = tevent_req_data(
1095 req, struct cli_writeall_state);
1096 NTSTATUS status;
1097 size_t written, to_write;
1099 status = cli_write_andx_recv(subreq, &written);
1100 TALLOC_FREE(subreq);
1101 if (!NT_STATUS_IS_OK(status)) {
1102 tevent_req_nterror(req, status);
1103 return;
1106 state->written += written;
1108 if (state->written > state->size) {
1109 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
1110 return;
1113 to_write = state->size - state->written;
1115 if (to_write == 0) {
1116 tevent_req_done(req);
1117 return;
1120 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
1121 state->mode,
1122 state->buf + state->written,
1123 state->offset + state->written, to_write);
1124 if (tevent_req_nomem(subreq, req)) {
1125 return;
1127 tevent_req_set_callback(subreq, cli_writeall_written, req);
1130 static NTSTATUS cli_writeall_recv(struct tevent_req *req)
1132 return tevent_req_simple_recv_ntstatus(req);
1135 struct cli_push_write_state {
1136 struct tevent_req *req;/* This is the main request! Not the subreq */
1137 uint32_t idx;
1138 off_t ofs;
1139 uint8_t *buf;
1140 size_t size;
1143 struct cli_push_state {
1144 struct event_context *ev;
1145 struct cli_state *cli;
1146 uint16_t fnum;
1147 uint16_t mode;
1148 off_t start_offset;
1149 size_t window_size;
1151 size_t (*source)(uint8_t *buf, size_t n, void *priv);
1152 void *priv;
1154 bool eof;
1156 size_t chunk_size;
1157 off_t next_offset;
1160 * Outstanding requests
1162 uint32_t pending;
1163 uint32_t num_reqs;
1164 struct cli_push_write_state **reqs;
1167 static void cli_push_written(struct tevent_req *req);
1169 static bool cli_push_write_setup(struct tevent_req *req,
1170 struct cli_push_state *state,
1171 uint32_t idx)
1173 struct cli_push_write_state *substate;
1174 struct tevent_req *subreq;
1176 substate = talloc(state->reqs, struct cli_push_write_state);
1177 if (!substate) {
1178 return false;
1180 substate->req = req;
1181 substate->idx = idx;
1182 substate->ofs = state->next_offset;
1183 substate->buf = talloc_array(substate, uint8_t, state->chunk_size);
1184 if (!substate->buf) {
1185 talloc_free(substate);
1186 return false;
1188 substate->size = state->source(substate->buf,
1189 state->chunk_size,
1190 state->priv);
1191 if (substate->size == 0) {
1192 state->eof = true;
1193 /* nothing to send */
1194 talloc_free(substate);
1195 return true;
1198 subreq = cli_writeall_send(substate,
1199 state->ev, state->cli,
1200 state->fnum, state->mode,
1201 substate->buf,
1202 substate->ofs,
1203 substate->size);
1204 if (!subreq) {
1205 talloc_free(substate);
1206 return false;
1208 tevent_req_set_callback(subreq, cli_push_written, substate);
1210 state->reqs[idx] = substate;
1211 state->pending += 1;
1212 state->next_offset += substate->size;
1214 return true;
1217 struct tevent_req *cli_push_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1218 struct cli_state *cli,
1219 uint16_t fnum, uint16_t mode,
1220 off_t start_offset, size_t window_size,
1221 size_t (*source)(uint8_t *buf, size_t n,
1222 void *priv),
1223 void *priv)
1225 struct tevent_req *req;
1226 struct cli_push_state *state;
1227 uint32_t i;
1229 req = tevent_req_create(mem_ctx, &state, struct cli_push_state);
1230 if (req == NULL) {
1231 return NULL;
1233 state->cli = cli;
1234 state->ev = ev;
1235 state->fnum = fnum;
1236 state->start_offset = start_offset;
1237 state->mode = mode;
1238 state->source = source;
1239 state->priv = priv;
1240 state->eof = false;
1241 state->pending = 0;
1242 state->next_offset = start_offset;
1244 state->chunk_size = cli_write_max_bufsize(cli, mode);
1246 if (window_size == 0) {
1247 window_size = cli->max_mux * state->chunk_size;
1249 state->num_reqs = window_size/state->chunk_size;
1250 if ((window_size % state->chunk_size) > 0) {
1251 state->num_reqs += 1;
1253 state->num_reqs = MIN(state->num_reqs, cli->max_mux);
1254 state->num_reqs = MAX(state->num_reqs, 1);
1256 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_push_write_state *,
1257 state->num_reqs);
1258 if (state->reqs == NULL) {
1259 goto failed;
1262 for (i=0; i<state->num_reqs; i++) {
1263 if (!cli_push_write_setup(req, state, i)) {
1264 goto failed;
1267 if (state->eof) {
1268 break;
1272 if (state->pending == 0) {
1273 tevent_req_done(req);
1274 return tevent_req_post(req, ev);
1277 return req;
1279 failed:
1280 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1281 return tevent_req_post(req, ev);
1284 static void cli_push_written(struct tevent_req *subreq)
1286 struct cli_push_write_state *substate = tevent_req_callback_data(
1287 subreq, struct cli_push_write_state);
1288 struct tevent_req *req = substate->req;
1289 struct cli_push_state *state = tevent_req_data(
1290 req, struct cli_push_state);
1291 NTSTATUS status;
1292 uint32_t idx = substate->idx;
1294 state->reqs[idx] = NULL;
1295 state->pending -= 1;
1297 status = cli_writeall_recv(subreq);
1298 TALLOC_FREE(subreq);
1299 TALLOC_FREE(substate);
1300 if (!NT_STATUS_IS_OK(status)) {
1301 tevent_req_nterror(req, status);
1302 return;
1305 if (!state->eof) {
1306 if (!cli_push_write_setup(req, state, idx)) {
1307 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1308 return;
1312 if (state->pending == 0) {
1313 tevent_req_done(req);
1314 return;
1318 NTSTATUS cli_push_recv(struct tevent_req *req)
1320 return tevent_req_simple_recv_ntstatus(req);
1323 NTSTATUS cli_push(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1324 off_t start_offset, size_t window_size,
1325 size_t (*source)(uint8_t *buf, size_t n, void *priv),
1326 void *priv)
1328 TALLOC_CTX *frame = talloc_stackframe();
1329 struct event_context *ev;
1330 struct tevent_req *req;
1331 NTSTATUS status = NT_STATUS_OK;
1333 if (cli_has_async_calls(cli)) {
1335 * Can't use sync call while an async call is in flight
1337 status = NT_STATUS_INVALID_PARAMETER;
1338 goto fail;
1341 ev = event_context_init(frame);
1342 if (ev == NULL) {
1343 status = NT_STATUS_NO_MEMORY;
1344 goto fail;
1347 req = cli_push_send(frame, ev, cli, fnum, mode, start_offset,
1348 window_size, source, priv);
1349 if (req == NULL) {
1350 status = NT_STATUS_NO_MEMORY;
1351 goto fail;
1354 if (!tevent_req_poll(req, ev)) {
1355 status = map_nt_error_from_unix(errno);
1356 goto fail;
1359 status = cli_push_recv(req);
1360 fail:
1361 TALLOC_FREE(frame);
1362 if (!NT_STATUS_IS_OK(status)) {
1363 cli_set_error(cli, status);
1365 return status;