Fix bug #7791 - gvfsd-smb (Gnome vfs) fails to copy files from a SMB share using...
[Samba/vl.git] / source3 / libsmb / clireadwrite.c
blobcd95b17f80e16ff2eeb9cc445dac8b4a1cc4b272
1 /*
2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "async_smb.h"
23 /****************************************************************************
24 Calculate the recommended read buffer size
25 ****************************************************************************/
26 static size_t cli_read_max_bufsize(struct cli_state *cli)
28 if (!client_is_signing_on(cli) && !cli_encryption_on(cli)
29 && (cli->server_posix_capabilities & CIFS_UNIX_LARGE_READ_CAP)) {
30 return CLI_SAMBA_MAX_POSIX_LARGE_READX_SIZE;
32 if (cli->capabilities & CAP_LARGE_READX) {
33 return cli->is_samba
34 ? CLI_SAMBA_MAX_LARGE_READX_SIZE
35 : CLI_WINDOWS_MAX_LARGE_READX_SIZE;
37 return (cli->max_xmit - (smb_size+32)) & ~1023;
40 /****************************************************************************
41 Calculate the recommended write buffer size
42 ****************************************************************************/
43 static size_t cli_write_max_bufsize(struct cli_state *cli, uint16_t write_mode)
45 if (write_mode == 0 &&
46 !client_is_signing_on(cli) &&
47 !cli_encryption_on(cli) &&
48 (cli->server_posix_capabilities & CIFS_UNIX_LARGE_WRITE_CAP) &&
49 (cli->capabilities & CAP_LARGE_FILES)) {
50 /* Only do massive writes if we can do them direct
51 * with no signing or encrypting - not on a pipe. */
52 return CLI_SAMBA_MAX_POSIX_LARGE_WRITEX_SIZE;
55 if (cli->is_samba) {
56 return CLI_SAMBA_MAX_LARGE_WRITEX_SIZE;
59 if (((cli->capabilities & CAP_LARGE_WRITEX) == 0)
60 || client_is_signing_on(cli)
61 || strequal(cli->dev, "LPT1:")) {
64 * Printer devices are restricted to max_xmit writesize in
65 * Vista and XPSP3 as are signing connections.
68 return (cli->max_xmit - (smb_size+32)) & ~1023;
71 return CLI_WINDOWS_MAX_LARGE_WRITEX_SIZE;
74 struct cli_read_andx_state {
75 size_t size;
76 uint16_t vwv[12];
77 NTSTATUS status;
78 size_t received;
79 uint8_t *buf;
82 static void cli_read_andx_done(struct tevent_req *subreq);
84 struct tevent_req *cli_read_andx_create(TALLOC_CTX *mem_ctx,
85 struct event_context *ev,
86 struct cli_state *cli, uint16_t fnum,
87 off_t offset, size_t size,
88 struct tevent_req **psmbreq)
90 struct tevent_req *req, *subreq;
91 struct cli_read_andx_state *state;
92 uint8_t wct = 10;
94 if (size > cli_read_max_bufsize(cli)) {
95 DEBUG(0, ("cli_read_andx_send got size=%d, can only handle "
96 "size=%d\n", (int)size,
97 (int)cli_read_max_bufsize(cli)));
98 return NULL;
101 req = tevent_req_create(mem_ctx, &state, struct cli_read_andx_state);
102 if (req == NULL) {
103 return NULL;
105 state->size = size;
107 SCVAL(state->vwv + 0, 0, 0xFF);
108 SCVAL(state->vwv + 0, 1, 0);
109 SSVAL(state->vwv + 1, 0, 0);
110 SSVAL(state->vwv + 2, 0, fnum);
111 SIVAL(state->vwv + 3, 0, offset);
112 SSVAL(state->vwv + 5, 0, size);
113 SSVAL(state->vwv + 6, 0, size);
114 SSVAL(state->vwv + 7, 0, (size >> 16));
115 SSVAL(state->vwv + 8, 0, 0);
116 SSVAL(state->vwv + 9, 0, 0);
118 if ((uint64_t)offset >> 32) {
119 SIVAL(state->vwv + 10, 0,
120 (((uint64_t)offset)>>32) & 0xffffffff);
121 wct += 2;
124 subreq = cli_smb_req_create(state, ev, cli, SMBreadX, 0, wct,
125 state->vwv, 0, NULL);
126 if (subreq == NULL) {
127 TALLOC_FREE(req);
128 return NULL;
130 tevent_req_set_callback(subreq, cli_read_andx_done, req);
131 *psmbreq = subreq;
132 return req;
135 struct tevent_req *cli_read_andx_send(TALLOC_CTX *mem_ctx,
136 struct event_context *ev,
137 struct cli_state *cli, uint16_t fnum,
138 off_t offset, size_t size)
140 struct tevent_req *req, *subreq;
141 NTSTATUS status;
143 req = cli_read_andx_create(mem_ctx, ev, cli, fnum, offset, size,
144 &subreq);
145 if (req == NULL) {
146 return NULL;
149 status = cli_smb_req_send(subreq);
150 if (!NT_STATUS_IS_OK(status)) {
151 tevent_req_nterror(req, status);
152 return tevent_req_post(req, ev);
154 return req;
157 static void cli_read_andx_done(struct tevent_req *subreq)
159 struct tevent_req *req = tevent_req_callback_data(
160 subreq, struct tevent_req);
161 struct cli_read_andx_state *state = tevent_req_data(
162 req, struct cli_read_andx_state);
163 uint8_t *inbuf;
164 uint8_t wct;
165 uint16_t *vwv;
166 uint32_t num_bytes;
167 uint8_t *bytes;
169 state->status = cli_smb_recv(subreq, state, &inbuf, 12, &wct, &vwv,
170 &num_bytes, &bytes);
171 TALLOC_FREE(subreq);
172 if (NT_STATUS_IS_ERR(state->status)) {
173 tevent_req_nterror(req, state->status);
174 return;
177 /* size is the number of bytes the server returned.
178 * Might be zero. */
179 state->received = SVAL(vwv + 5, 0);
180 state->received |= (((unsigned int)SVAL(vwv + 7, 0)) << 16);
182 if (state->received > state->size) {
183 DEBUG(5,("server returned more than we wanted!\n"));
184 tevent_req_nterror(req, NT_STATUS_UNEXPECTED_IO_ERROR);
185 return;
189 * bcc field must be valid for small reads, for large reads the 16-bit
190 * bcc field can't be correct.
193 if ((state->received < 0xffff) && (state->received > num_bytes)) {
194 DEBUG(5, ("server announced more bytes than sent\n"));
195 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
196 return;
199 state->buf = (uint8_t *)smb_base(inbuf) + SVAL(vwv+6, 0);
201 if (trans_oob(smb_len(inbuf), SVAL(vwv+6, 0), state->received)
202 || ((state->received != 0) && (state->buf < bytes))) {
203 DEBUG(5, ("server returned invalid read&x data offset\n"));
204 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
205 return;
207 tevent_req_done(req);
211 * Pull the data out of a finished async read_and_x request. rcvbuf is
212 * talloced from the request, so better make sure that you copy it away before
213 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
214 * talloc_move it!
217 NTSTATUS cli_read_andx_recv(struct tevent_req *req, ssize_t *received,
218 uint8_t **rcvbuf)
220 struct cli_read_andx_state *state = tevent_req_data(
221 req, struct cli_read_andx_state);
222 NTSTATUS status;
224 if (tevent_req_is_nterror(req, &status)) {
225 return status;
227 *received = state->received;
228 *rcvbuf = state->buf;
229 return NT_STATUS_OK;
232 struct cli_readall_state {
233 struct tevent_context *ev;
234 struct cli_state *cli;
235 uint16_t fnum;
236 off_t start_offset;
237 size_t size;
238 size_t received;
239 uint8_t *buf;
242 static void cli_readall_done(struct tevent_req *subreq);
244 static struct tevent_req *cli_readall_send(TALLOC_CTX *mem_ctx,
245 struct event_context *ev,
246 struct cli_state *cli,
247 uint16_t fnum,
248 off_t offset, size_t size)
250 struct tevent_req *req, *subreq;
251 struct cli_readall_state *state;
253 req = tevent_req_create(mem_ctx, &state, struct cli_readall_state);
254 if (req == NULL) {
255 return NULL;
257 state->ev = ev;
258 state->cli = cli;
259 state->fnum = fnum;
260 state->start_offset = offset;
261 state->size = size;
262 state->received = 0;
263 state->buf = NULL;
265 subreq = cli_read_andx_send(state, ev, cli, fnum, offset, size);
266 if (tevent_req_nomem(subreq, req)) {
267 return tevent_req_post(req, ev);
269 tevent_req_set_callback(subreq, cli_readall_done, req);
270 return req;
273 static void cli_readall_done(struct tevent_req *subreq)
275 struct tevent_req *req = tevent_req_callback_data(
276 subreq, struct tevent_req);
277 struct cli_readall_state *state = tevent_req_data(
278 req, struct cli_readall_state);
279 ssize_t received;
280 uint8_t *buf;
281 NTSTATUS status;
283 status = cli_read_andx_recv(subreq, &received, &buf);
284 if (!NT_STATUS_IS_OK(status)) {
285 tevent_req_nterror(req, status);
286 return;
289 if (received == 0) {
290 /* EOF */
291 tevent_req_done(req);
292 return;
295 if ((state->received == 0) && (received == state->size)) {
296 /* Ideal case: Got it all in one run */
297 state->buf = buf;
298 state->received += received;
299 tevent_req_done(req);
300 return;
304 * We got a short read, issue a read for the
305 * rest. Unfortunately we have to allocate the buffer
306 * ourselves now, as our caller expects to receive a single
307 * buffer. cli_read_andx does it from the buffer received from
308 * the net, but with a short read we have to put it together
309 * from several reads.
312 if (state->buf == NULL) {
313 state->buf = talloc_array(state, uint8_t, state->size);
314 if (tevent_req_nomem(state->buf, req)) {
315 return;
318 memcpy(state->buf + state->received, buf, received);
319 state->received += received;
321 TALLOC_FREE(subreq);
323 if (state->received >= state->size) {
324 tevent_req_done(req);
325 return;
328 subreq = cli_read_andx_send(state, state->ev, state->cli, state->fnum,
329 state->start_offset + state->received,
330 state->size - state->received);
331 if (tevent_req_nomem(subreq, req)) {
332 return;
334 tevent_req_set_callback(subreq, cli_readall_done, req);
337 static NTSTATUS cli_readall_recv(struct tevent_req *req, ssize_t *received,
338 uint8_t **rcvbuf)
340 struct cli_readall_state *state = tevent_req_data(
341 req, struct cli_readall_state);
342 NTSTATUS status;
344 if (tevent_req_is_nterror(req, &status)) {
345 return status;
347 *received = state->received;
348 *rcvbuf = state->buf;
349 return NT_STATUS_OK;
352 struct cli_pull_subreq {
353 struct tevent_req *req;
354 ssize_t received;
355 uint8_t *buf;
359 * Parallel read support.
361 * cli_pull sends as many read&x requests as the server would allow via
362 * max_mux at a time. When replies flow back in, the data is written into
363 * the callback function "sink" in the right order.
366 struct cli_pull_state {
367 struct tevent_req *req;
369 struct event_context *ev;
370 struct cli_state *cli;
371 uint16_t fnum;
372 off_t start_offset;
373 SMB_OFF_T size;
375 NTSTATUS (*sink)(char *buf, size_t n, void *priv);
376 void *priv;
378 size_t chunk_size;
381 * Outstanding requests
383 int num_reqs;
384 struct cli_pull_subreq *reqs;
387 * For how many bytes did we send requests already?
389 SMB_OFF_T requested;
392 * Next request index to push into "sink". This walks around the "req"
393 * array, taking care that the requests are pushed to "sink" in the
394 * right order. If necessary (i.e. replies don't come in in the right
395 * order), replies are held back in "reqs".
397 int top_req;
400 * How many bytes did we push into "sink"?
403 SMB_OFF_T pushed;
406 static char *cli_pull_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
408 struct cli_pull_state *state = tevent_req_data(
409 req, struct cli_pull_state);
410 char *result;
412 result = tevent_req_default_print(req, mem_ctx);
413 if (result == NULL) {
414 return NULL;
417 return talloc_asprintf_append_buffer(
418 result, "num_reqs=%d, top_req=%d",
419 state->num_reqs, state->top_req);
422 static void cli_pull_read_done(struct tevent_req *read_req);
425 * Prepare an async pull request
428 struct tevent_req *cli_pull_send(TALLOC_CTX *mem_ctx,
429 struct event_context *ev,
430 struct cli_state *cli,
431 uint16_t fnum, off_t start_offset,
432 SMB_OFF_T size, size_t window_size,
433 NTSTATUS (*sink)(char *buf, size_t n,
434 void *priv),
435 void *priv)
437 struct tevent_req *req;
438 struct cli_pull_state *state;
439 int i;
441 req = tevent_req_create(mem_ctx, &state, struct cli_pull_state);
442 if (req == NULL) {
443 return NULL;
445 tevent_req_set_print_fn(req, cli_pull_print);
446 state->req = req;
448 state->cli = cli;
449 state->ev = ev;
450 state->fnum = fnum;
451 state->start_offset = start_offset;
452 state->size = size;
453 state->sink = sink;
454 state->priv = priv;
456 state->pushed = 0;
457 state->top_req = 0;
459 if (size == 0) {
460 tevent_req_done(req);
461 return tevent_req_post(req, ev);
464 state->chunk_size = cli_read_max_bufsize(cli);
466 state->num_reqs = MAX(window_size/state->chunk_size, 1);
467 state->num_reqs = MIN(state->num_reqs, cli->max_mux);
469 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_pull_subreq,
470 state->num_reqs);
471 if (state->reqs == NULL) {
472 goto failed;
475 state->requested = 0;
477 for (i=0; i<state->num_reqs; i++) {
478 struct cli_pull_subreq *subreq = &state->reqs[i];
479 SMB_OFF_T size_left;
480 size_t request_thistime;
482 if (state->requested >= size) {
483 state->num_reqs = i;
484 break;
487 size_left = size - state->requested;
488 request_thistime = MIN(size_left, state->chunk_size);
490 subreq->req = cli_readall_send(
491 state->reqs, ev, cli, fnum,
492 state->start_offset + state->requested,
493 request_thistime);
495 if (subreq->req == NULL) {
496 goto failed;
498 tevent_req_set_callback(subreq->req, cli_pull_read_done, req);
499 state->requested += request_thistime;
501 return req;
503 failed:
504 TALLOC_FREE(req);
505 return NULL;
509 * Handle incoming read replies, push the data into sink and send out new
510 * requests if necessary.
513 static void cli_pull_read_done(struct tevent_req *subreq)
515 struct tevent_req *req = tevent_req_callback_data(
516 subreq, struct tevent_req);
517 struct cli_pull_state *state = tevent_req_data(
518 req, struct cli_pull_state);
519 struct cli_pull_subreq *pull_subreq = NULL;
520 NTSTATUS status;
521 int i;
523 for (i = 0; i < state->num_reqs; i++) {
524 pull_subreq = &state->reqs[i];
525 if (subreq == pull_subreq->req) {
526 break;
529 if (i == state->num_reqs) {
530 /* Huh -- received something we did not send?? */
531 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
532 return;
535 status = cli_readall_recv(subreq, &pull_subreq->received,
536 &pull_subreq->buf);
537 if (!NT_STATUS_IS_OK(status)) {
538 tevent_req_nterror(state->req, status);
539 return;
543 * This loop is the one to take care of out-of-order replies. All
544 * pending requests are in state->reqs, state->reqs[top_req] is the
545 * one that is to be pushed next. If however a request later than
546 * top_req is replied to, then we can't push yet. If top_req is
547 * replied to at a later point then, we need to push all the finished
548 * requests.
551 while (state->reqs[state->top_req].req != NULL) {
552 struct cli_pull_subreq *top_subreq;
554 DEBUG(11, ("cli_pull_read_done: top_req = %d\n",
555 state->top_req));
557 top_subreq = &state->reqs[state->top_req];
559 if (tevent_req_is_in_progress(top_subreq->req)) {
560 DEBUG(11, ("cli_pull_read_done: top request not yet "
561 "done\n"));
562 return;
565 DEBUG(10, ("cli_pull_read_done: Pushing %d bytes, %d already "
566 "pushed\n", (int)top_subreq->received,
567 (int)state->pushed));
569 status = state->sink((char *)top_subreq->buf,
570 top_subreq->received, state->priv);
571 if (!NT_STATUS_IS_OK(status)) {
572 tevent_req_nterror(state->req, status);
573 return;
575 state->pushed += top_subreq->received;
577 TALLOC_FREE(state->reqs[state->top_req].req);
579 if (state->requested < state->size) {
580 struct tevent_req *new_req;
581 SMB_OFF_T size_left;
582 size_t request_thistime;
584 size_left = state->size - state->requested;
585 request_thistime = MIN(size_left, state->chunk_size);
587 DEBUG(10, ("cli_pull_read_done: Requesting %d bytes "
588 "at %d, position %d\n",
589 (int)request_thistime,
590 (int)(state->start_offset
591 + state->requested),
592 state->top_req));
594 new_req = cli_readall_send(
595 state->reqs, state->ev, state->cli,
596 state->fnum,
597 state->start_offset + state->requested,
598 request_thistime);
600 if (tevent_req_nomem(new_req, state->req)) {
601 return;
603 tevent_req_set_callback(new_req, cli_pull_read_done,
604 req);
606 state->reqs[state->top_req].req = new_req;
607 state->requested += request_thistime;
610 state->top_req = (state->top_req+1) % state->num_reqs;
613 tevent_req_done(req);
616 NTSTATUS cli_pull_recv(struct tevent_req *req, SMB_OFF_T *received)
618 struct cli_pull_state *state = tevent_req_data(
619 req, struct cli_pull_state);
620 NTSTATUS status;
622 if (tevent_req_is_nterror(req, &status)) {
623 return status;
625 *received = state->pushed;
626 return NT_STATUS_OK;
629 NTSTATUS cli_pull(struct cli_state *cli, uint16_t fnum,
630 off_t start_offset, SMB_OFF_T size, size_t window_size,
631 NTSTATUS (*sink)(char *buf, size_t n, void *priv),
632 void *priv, SMB_OFF_T *received)
634 TALLOC_CTX *frame = talloc_stackframe();
635 struct event_context *ev;
636 struct tevent_req *req;
637 NTSTATUS status = NT_STATUS_OK;
639 if (cli_has_async_calls(cli)) {
641 * Can't use sync call while an async call is in flight
643 status = NT_STATUS_INVALID_PARAMETER;
644 goto fail;
647 ev = event_context_init(frame);
648 if (ev == NULL) {
649 status = NT_STATUS_NO_MEMORY;
650 goto fail;
653 req = cli_pull_send(frame, ev, cli, fnum, start_offset, size,
654 window_size, sink, priv);
655 if (req == NULL) {
656 status = NT_STATUS_NO_MEMORY;
657 goto fail;
660 if (!tevent_req_poll(req, ev)) {
661 status = map_nt_error_from_unix(errno);
662 goto fail;
665 status = cli_pull_recv(req, received);
666 fail:
667 TALLOC_FREE(frame);
668 if (!NT_STATUS_IS_OK(status)) {
669 cli_set_error(cli, status);
671 return status;
674 static NTSTATUS cli_read_sink(char *buf, size_t n, void *priv)
676 char **pbuf = (char **)priv;
677 memcpy(*pbuf, buf, n);
678 *pbuf += n;
679 return NT_STATUS_OK;
682 ssize_t cli_read(struct cli_state *cli, uint16_t fnum, char *buf,
683 off_t offset, size_t size)
685 NTSTATUS status;
686 SMB_OFF_T ret;
688 status = cli_pull(cli, fnum, offset, size, size,
689 cli_read_sink, &buf, &ret);
690 if (!NT_STATUS_IS_OK(status)) {
691 cli_set_error(cli, status);
692 return -1;
694 return ret;
697 /****************************************************************************
698 Issue a single SMBwrite and don't wait for a reply.
699 ****************************************************************************/
701 static bool cli_issue_write(struct cli_state *cli,
702 uint16_t fnum,
703 off_t offset,
704 uint16 mode,
705 const char *buf,
706 size_t size)
708 char *p;
709 bool large_writex = false;
710 /* We can only do direct writes if not signing and not encrypting. */
711 bool direct_writes = !client_is_signing_on(cli) && !cli_encryption_on(cli);
713 if (!direct_writes && size + 1 > cli->bufsize) {
714 cli->outbuf = (char *)SMB_REALLOC(cli->outbuf, size + 1024);
715 if (!cli->outbuf) {
716 return False;
718 cli->inbuf = (char *)SMB_REALLOC(cli->inbuf, size + 1024);
719 if (cli->inbuf == NULL) {
720 SAFE_FREE(cli->outbuf);
721 return False;
723 cli->bufsize = size + 1024;
726 memset(cli->outbuf,'\0',smb_size);
727 memset(cli->inbuf,'\0',smb_size);
729 if (cli->capabilities & CAP_LARGE_FILES) {
730 large_writex = True;
733 if (large_writex) {
734 cli_set_message(cli->outbuf,14,0,True);
735 } else {
736 cli_set_message(cli->outbuf,12,0,True);
739 SCVAL(cli->outbuf,smb_com,SMBwriteX);
740 SSVAL(cli->outbuf,smb_tid,cli->cnum);
741 cli_setup_packet(cli);
743 SCVAL(cli->outbuf,smb_vwv0,0xFF);
744 SSVAL(cli->outbuf,smb_vwv2,fnum);
746 SIVAL(cli->outbuf,smb_vwv3,offset);
747 SIVAL(cli->outbuf,smb_vwv5,0);
748 SSVAL(cli->outbuf,smb_vwv7,mode);
750 SSVAL(cli->outbuf,smb_vwv8,(mode & 0x0008) ? size : 0);
752 * According to CIFS-TR-1p00, this following field should only
753 * be set if CAP_LARGE_WRITEX is set. We should check this
754 * locally. However, this check might already have been
755 * done by our callers.
757 SSVAL(cli->outbuf,smb_vwv9,(size>>16));
758 SSVAL(cli->outbuf,smb_vwv10,size);
759 /* +1 is pad byte. */
760 SSVAL(cli->outbuf,smb_vwv11,
761 smb_buf(cli->outbuf) - smb_base(cli->outbuf) + 1);
763 if (large_writex) {
764 SIVAL(cli->outbuf,smb_vwv12,(((uint64_t)offset)>>32) & 0xffffffff);
767 p = smb_base(cli->outbuf) + SVAL(cli->outbuf,smb_vwv11) -1;
768 *p++ = '\0'; /* pad byte. */
769 if (!direct_writes) {
770 memcpy(p, buf, size);
772 if (size > 0x1FFFF) {
773 /* This is a POSIX 14 word large write. */
774 set_message_bcc(cli->outbuf, 0); /* Set bcc to zero. */
775 _smb_setlen_large(cli->outbuf,smb_size + 28 + 1 /* pad */ + size - 4);
776 } else {
777 cli_setup_bcc(cli, p+size);
780 show_msg(cli->outbuf);
781 if (direct_writes) {
782 /* For direct writes we now need to write the data
783 * directly out of buf. */
784 return cli_send_smb_direct_writeX(cli, buf, size);
785 } else {
786 return cli_send_smb(cli);
790 /****************************************************************************
791 write to a file
792 write_mode: 0x0001 disallow write cacheing
793 0x0002 return bytes remaining
794 0x0004 use raw named pipe protocol
795 0x0008 start of message mode named pipe protocol
796 ****************************************************************************/
798 ssize_t cli_write(struct cli_state *cli,
799 uint16_t fnum, uint16 write_mode,
800 const char *buf, off_t offset, size_t size)
802 ssize_t bwritten = 0;
803 unsigned int issued = 0;
804 unsigned int received = 0;
805 int mpx = 1;
806 size_t writesize;
807 int blocks;
809 if(cli->max_mux > 1) {
810 mpx = cli->max_mux-1;
811 } else {
812 mpx = 1;
815 writesize = cli_write_max_bufsize(cli, write_mode);
817 blocks = (size + (writesize-1)) / writesize;
819 while (received < blocks) {
821 while ((issued - received < mpx) && (issued < blocks)) {
822 ssize_t bsent = issued * writesize;
823 ssize_t size1 = MIN(writesize, size - bsent);
825 if (!cli_issue_write(cli, fnum, offset + bsent,
826 write_mode,
827 buf + bsent,
828 size1))
829 return -1;
830 issued++;
833 if (!cli_receive_smb(cli)) {
834 return bwritten;
837 received++;
839 if (cli_is_error(cli))
840 break;
842 bwritten += SVAL(cli->inbuf, smb_vwv2);
843 if (writesize > 0xFFFF) {
844 bwritten += (((int)(SVAL(cli->inbuf, smb_vwv4)))<<16);
848 while (received < issued && cli_receive_smb(cli)) {
849 received++;
852 return bwritten;
855 /****************************************************************************
856 write to a file using a SMBwrite and not bypassing 0 byte writes
857 ****************************************************************************/
859 ssize_t cli_smbwrite(struct cli_state *cli,
860 uint16_t fnum, char *buf, off_t offset, size_t size1)
862 char *p;
863 ssize_t total = 0;
865 do {
866 size_t size = MIN(size1, cli->max_xmit - 48);
868 memset(cli->outbuf,'\0',smb_size);
869 memset(cli->inbuf,'\0',smb_size);
871 cli_set_message(cli->outbuf,5, 0,True);
873 SCVAL(cli->outbuf,smb_com,SMBwrite);
874 SSVAL(cli->outbuf,smb_tid,cli->cnum);
875 cli_setup_packet(cli);
877 SSVAL(cli->outbuf,smb_vwv0,fnum);
878 SSVAL(cli->outbuf,smb_vwv1,size);
879 SIVAL(cli->outbuf,smb_vwv2,offset);
880 SSVAL(cli->outbuf,smb_vwv4,0);
882 p = smb_buf(cli->outbuf);
883 *p++ = 1;
884 SSVAL(p, 0, size); p += 2;
885 memcpy(p, buf + total, size); p += size;
887 cli_setup_bcc(cli, p);
889 if (!cli_send_smb(cli))
890 return -1;
892 if (!cli_receive_smb(cli))
893 return -1;
895 if (cli_is_error(cli))
896 return -1;
898 size = SVAL(cli->inbuf,smb_vwv0);
899 if (size == 0)
900 break;
902 size1 -= size;
903 total += size;
904 offset += size;
906 } while (size1);
908 return total;
912 * Send a write&x request
915 struct cli_write_andx_state {
916 size_t size;
917 uint16_t vwv[14];
918 size_t written;
919 uint8_t pad;
920 struct iovec iov[2];
923 static void cli_write_andx_done(struct tevent_req *subreq);
925 struct tevent_req *cli_write_andx_create(TALLOC_CTX *mem_ctx,
926 struct event_context *ev,
927 struct cli_state *cli, uint16_t fnum,
928 uint16_t mode, const uint8_t *buf,
929 off_t offset, size_t size,
930 struct tevent_req **reqs_before,
931 int num_reqs_before,
932 struct tevent_req **psmbreq)
934 struct tevent_req *req, *subreq;
935 struct cli_write_andx_state *state;
936 bool bigoffset = ((cli->capabilities & CAP_LARGE_FILES) != 0);
937 uint8_t wct = bigoffset ? 14 : 12;
938 size_t max_write = cli_write_max_bufsize(cli, mode);
939 uint16_t *vwv;
941 req = tevent_req_create(mem_ctx, &state, struct cli_write_andx_state);
942 if (req == NULL) {
943 return NULL;
946 size = MIN(size, max_write);
948 vwv = state->vwv;
950 SCVAL(vwv+0, 0, 0xFF);
951 SCVAL(vwv+0, 1, 0);
952 SSVAL(vwv+1, 0, 0);
953 SSVAL(vwv+2, 0, fnum);
954 SIVAL(vwv+3, 0, offset);
955 SIVAL(vwv+5, 0, 0);
956 SSVAL(vwv+7, 0, mode);
957 SSVAL(vwv+8, 0, 0);
958 SSVAL(vwv+9, 0, (size>>16));
959 SSVAL(vwv+10, 0, size);
961 SSVAL(vwv+11, 0,
962 cli_smb_wct_ofs(reqs_before, num_reqs_before)
963 + 1 /* the wct field */
964 + wct * 2 /* vwv */
965 + 2 /* num_bytes field */
966 + 1 /* pad */);
968 if (bigoffset) {
969 SIVAL(vwv+12, 0, (((uint64_t)offset)>>32) & 0xffffffff);
972 state->pad = 0;
973 state->iov[0].iov_base = (void *)&state->pad;
974 state->iov[0].iov_len = 1;
975 state->iov[1].iov_base = CONST_DISCARD(void *, buf);
976 state->iov[1].iov_len = size;
978 subreq = cli_smb_req_create(state, ev, cli, SMBwriteX, 0, wct, vwv,
979 2, state->iov);
980 if (tevent_req_nomem(subreq, req)) {
981 return tevent_req_post(req, ev);
983 tevent_req_set_callback(subreq, cli_write_andx_done, req);
984 *psmbreq = subreq;
985 return req;
988 struct tevent_req *cli_write_andx_send(TALLOC_CTX *mem_ctx,
989 struct event_context *ev,
990 struct cli_state *cli, uint16_t fnum,
991 uint16_t mode, const uint8_t *buf,
992 off_t offset, size_t size)
994 struct tevent_req *req, *subreq;
995 NTSTATUS status;
997 req = cli_write_andx_create(mem_ctx, ev, cli, fnum, mode, buf, offset,
998 size, NULL, 0, &subreq);
999 if (req == NULL) {
1000 return NULL;
1003 status = cli_smb_req_send(subreq);
1004 if (!NT_STATUS_IS_OK(status)) {
1005 tevent_req_nterror(req, status);
1006 return tevent_req_post(req, ev);
1008 return req;
1011 static void cli_write_andx_done(struct tevent_req *subreq)
1013 struct tevent_req *req = tevent_req_callback_data(
1014 subreq, struct tevent_req);
1015 struct cli_write_andx_state *state = tevent_req_data(
1016 req, struct cli_write_andx_state);
1017 uint8_t wct;
1018 uint16_t *vwv;
1019 uint8_t *inbuf;
1020 NTSTATUS status;
1022 status = cli_smb_recv(subreq, state, &inbuf, 6, &wct, &vwv,
1023 NULL, NULL);
1024 TALLOC_FREE(subreq);
1025 if (NT_STATUS_IS_ERR(status)) {
1026 tevent_req_nterror(req, status);
1027 return;
1029 state->written = SVAL(vwv+2, 0);
1030 state->written |= SVAL(vwv+4, 0)<<16;
1031 tevent_req_done(req);
1034 NTSTATUS cli_write_andx_recv(struct tevent_req *req, size_t *pwritten)
1036 struct cli_write_andx_state *state = tevent_req_data(
1037 req, struct cli_write_andx_state);
1038 NTSTATUS status;
1040 if (tevent_req_is_nterror(req, &status)) {
1041 return status;
1043 *pwritten = state->written;
1044 return NT_STATUS_OK;
1047 struct cli_writeall_state {
1048 struct event_context *ev;
1049 struct cli_state *cli;
1050 uint16_t fnum;
1051 uint16_t mode;
1052 const uint8_t *buf;
1053 off_t offset;
1054 size_t size;
1055 size_t written;
1058 static void cli_writeall_written(struct tevent_req *req);
1060 static struct tevent_req *cli_writeall_send(TALLOC_CTX *mem_ctx,
1061 struct event_context *ev,
1062 struct cli_state *cli,
1063 uint16_t fnum,
1064 uint16_t mode,
1065 const uint8_t *buf,
1066 off_t offset, size_t size)
1068 struct tevent_req *req, *subreq;
1069 struct cli_writeall_state *state;
1071 req = tevent_req_create(mem_ctx, &state, struct cli_writeall_state);
1072 if (req == NULL) {
1073 return NULL;
1075 state->ev = ev;
1076 state->cli = cli;
1077 state->fnum = fnum;
1078 state->mode = mode;
1079 state->buf = buf;
1080 state->offset = offset;
1081 state->size = size;
1082 state->written = 0;
1084 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
1085 state->mode, state->buf, state->offset,
1086 state->size);
1087 if (tevent_req_nomem(subreq, req)) {
1088 return tevent_req_post(req, ev);
1090 tevent_req_set_callback(subreq, cli_writeall_written, req);
1091 return req;
1094 static void cli_writeall_written(struct tevent_req *subreq)
1096 struct tevent_req *req = tevent_req_callback_data(
1097 subreq, struct tevent_req);
1098 struct cli_writeall_state *state = tevent_req_data(
1099 req, struct cli_writeall_state);
1100 NTSTATUS status;
1101 size_t written, to_write;
1103 status = cli_write_andx_recv(subreq, &written);
1104 TALLOC_FREE(subreq);
1105 if (!NT_STATUS_IS_OK(status)) {
1106 tevent_req_nterror(req, status);
1107 return;
1110 state->written += written;
1112 if (state->written > state->size) {
1113 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
1114 return;
1117 to_write = state->size - state->written;
1119 if (to_write == 0) {
1120 tevent_req_done(req);
1121 return;
1124 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
1125 state->mode,
1126 state->buf + state->written,
1127 state->offset + state->written, to_write);
1128 if (tevent_req_nomem(subreq, req)) {
1129 return;
1131 tevent_req_set_callback(subreq, cli_writeall_written, req);
1134 static NTSTATUS cli_writeall_recv(struct tevent_req *req)
1136 return tevent_req_simple_recv_ntstatus(req);
1139 struct cli_push_write_state {
1140 struct tevent_req *req;/* This is the main request! Not the subreq */
1141 uint32_t idx;
1142 off_t ofs;
1143 uint8_t *buf;
1144 size_t size;
1147 struct cli_push_state {
1148 struct event_context *ev;
1149 struct cli_state *cli;
1150 uint16_t fnum;
1151 uint16_t mode;
1152 off_t start_offset;
1153 size_t window_size;
1155 size_t (*source)(uint8_t *buf, size_t n, void *priv);
1156 void *priv;
1158 bool eof;
1160 size_t chunk_size;
1161 off_t next_offset;
1164 * Outstanding requests
1166 uint32_t pending;
1167 uint32_t num_reqs;
1168 struct cli_push_write_state **reqs;
1171 static void cli_push_written(struct tevent_req *req);
1173 static bool cli_push_write_setup(struct tevent_req *req,
1174 struct cli_push_state *state,
1175 uint32_t idx)
1177 struct cli_push_write_state *substate;
1178 struct tevent_req *subreq;
1180 substate = talloc(state->reqs, struct cli_push_write_state);
1181 if (!substate) {
1182 return false;
1184 substate->req = req;
1185 substate->idx = idx;
1186 substate->ofs = state->next_offset;
1187 substate->buf = talloc_array(substate, uint8_t, state->chunk_size);
1188 if (!substate->buf) {
1189 talloc_free(substate);
1190 return false;
1192 substate->size = state->source(substate->buf,
1193 state->chunk_size,
1194 state->priv);
1195 if (substate->size == 0) {
1196 state->eof = true;
1197 /* nothing to send */
1198 talloc_free(substate);
1199 return true;
1202 subreq = cli_writeall_send(substate,
1203 state->ev, state->cli,
1204 state->fnum, state->mode,
1205 substate->buf,
1206 substate->ofs,
1207 substate->size);
1208 if (!subreq) {
1209 talloc_free(substate);
1210 return false;
1212 tevent_req_set_callback(subreq, cli_push_written, substate);
1214 state->reqs[idx] = substate;
1215 state->pending += 1;
1216 state->next_offset += substate->size;
1218 return true;
1221 struct tevent_req *cli_push_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1222 struct cli_state *cli,
1223 uint16_t fnum, uint16_t mode,
1224 off_t start_offset, size_t window_size,
1225 size_t (*source)(uint8_t *buf, size_t n,
1226 void *priv),
1227 void *priv)
1229 struct tevent_req *req;
1230 struct cli_push_state *state;
1231 uint32_t i;
1233 req = tevent_req_create(mem_ctx, &state, struct cli_push_state);
1234 if (req == NULL) {
1235 return NULL;
1237 state->cli = cli;
1238 state->ev = ev;
1239 state->fnum = fnum;
1240 state->start_offset = start_offset;
1241 state->mode = mode;
1242 state->source = source;
1243 state->priv = priv;
1244 state->eof = false;
1245 state->pending = 0;
1246 state->next_offset = start_offset;
1248 state->chunk_size = cli_write_max_bufsize(cli, mode);
1250 if (window_size == 0) {
1251 window_size = cli->max_mux * state->chunk_size;
1253 state->num_reqs = window_size/state->chunk_size;
1254 if ((window_size % state->chunk_size) > 0) {
1255 state->num_reqs += 1;
1257 state->num_reqs = MIN(state->num_reqs, cli->max_mux);
1258 state->num_reqs = MAX(state->num_reqs, 1);
1260 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_push_write_state *,
1261 state->num_reqs);
1262 if (state->reqs == NULL) {
1263 goto failed;
1266 for (i=0; i<state->num_reqs; i++) {
1267 if (!cli_push_write_setup(req, state, i)) {
1268 goto failed;
1271 if (state->eof) {
1272 break;
1276 if (state->pending == 0) {
1277 tevent_req_done(req);
1278 return tevent_req_post(req, ev);
1281 return req;
1283 failed:
1284 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1285 return tevent_req_post(req, ev);
1288 static void cli_push_written(struct tevent_req *subreq)
1290 struct cli_push_write_state *substate = tevent_req_callback_data(
1291 subreq, struct cli_push_write_state);
1292 struct tevent_req *req = substate->req;
1293 struct cli_push_state *state = tevent_req_data(
1294 req, struct cli_push_state);
1295 NTSTATUS status;
1296 uint32_t idx = substate->idx;
1298 state->reqs[idx] = NULL;
1299 state->pending -= 1;
1301 status = cli_writeall_recv(subreq);
1302 TALLOC_FREE(subreq);
1303 TALLOC_FREE(substate);
1304 if (!NT_STATUS_IS_OK(status)) {
1305 tevent_req_nterror(req, status);
1306 return;
1309 if (!state->eof) {
1310 if (!cli_push_write_setup(req, state, idx)) {
1311 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1312 return;
1316 if (state->pending == 0) {
1317 tevent_req_done(req);
1318 return;
1322 NTSTATUS cli_push_recv(struct tevent_req *req)
1324 return tevent_req_simple_recv_ntstatus(req);
1327 NTSTATUS cli_push(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1328 off_t start_offset, size_t window_size,
1329 size_t (*source)(uint8_t *buf, size_t n, void *priv),
1330 void *priv)
1332 TALLOC_CTX *frame = talloc_stackframe();
1333 struct event_context *ev;
1334 struct tevent_req *req;
1335 NTSTATUS status = NT_STATUS_OK;
1337 if (cli_has_async_calls(cli)) {
1339 * Can't use sync call while an async call is in flight
1341 status = NT_STATUS_INVALID_PARAMETER;
1342 goto fail;
1345 ev = event_context_init(frame);
1346 if (ev == NULL) {
1347 status = NT_STATUS_NO_MEMORY;
1348 goto fail;
1351 req = cli_push_send(frame, ev, cli, fnum, mode, start_offset,
1352 window_size, source, priv);
1353 if (req == NULL) {
1354 status = NT_STATUS_NO_MEMORY;
1355 goto fail;
1358 if (!tevent_req_poll(req, ev)) {
1359 status = map_nt_error_from_unix(errno);
1360 goto fail;
1363 status = cli_push_recv(req);
1364 fail:
1365 TALLOC_FREE(frame);
1366 if (!NT_STATUS_IS_OK(status)) {
1367 cli_set_error(cli, status);
1369 return status;