s3: Explicitly handle inbuf in cli_write_andx_done
[Samba/kamenim.git] / source3 / libsmb / clireadwrite.c
blob7b688d4ff3328f03d4f7aed4ecf49e237af24003
1 /*
2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
22 /****************************************************************************
23 Calculate the recommended read buffer size
24 ****************************************************************************/
25 static size_t cli_read_max_bufsize(struct cli_state *cli)
27 if (!client_is_signing_on(cli) && !cli_encryption_on(cli)
28 && (cli->posix_capabilities & CIFS_UNIX_LARGE_READ_CAP)) {
29 return CLI_SAMBA_MAX_POSIX_LARGE_READX_SIZE;
31 if (cli->capabilities & CAP_LARGE_READX) {
32 return cli->is_samba
33 ? CLI_SAMBA_MAX_LARGE_READX_SIZE
34 : CLI_WINDOWS_MAX_LARGE_READX_SIZE;
36 return (cli->max_xmit - (smb_size+32)) & ~1023;
39 /****************************************************************************
40 Calculate the recommended write buffer size
41 ****************************************************************************/
42 static size_t cli_write_max_bufsize(struct cli_state *cli, uint16_t write_mode)
44 if (write_mode == 0 &&
45 !client_is_signing_on(cli) &&
46 !cli_encryption_on(cli) &&
47 (cli->posix_capabilities & CIFS_UNIX_LARGE_WRITE_CAP) &&
48 (cli->capabilities & CAP_LARGE_FILES)) {
49 /* Only do massive writes if we can do them direct
50 * with no signing or encrypting - not on a pipe. */
51 return CLI_SAMBA_MAX_POSIX_LARGE_WRITEX_SIZE;
54 if (cli->is_samba) {
55 return CLI_SAMBA_MAX_LARGE_WRITEX_SIZE;
58 if (((cli->capabilities & CAP_LARGE_WRITEX) == 0)
59 || client_is_signing_on(cli)
60 || strequal(cli->dev, "LPT1:")) {
63 * Printer devices are restricted to max_xmit writesize in
64 * Vista and XPSP3 as are signing connections.
67 return (cli->max_xmit - (smb_size+32)) & ~1023;
70 return CLI_WINDOWS_MAX_LARGE_WRITEX_SIZE;
73 struct cli_read_andx_state {
74 size_t size;
75 uint16_t vwv[12];
76 NTSTATUS status;
77 size_t received;
78 uint8_t *buf;
81 static void cli_read_andx_done(struct tevent_req *subreq);
83 struct tevent_req *cli_read_andx_create(TALLOC_CTX *mem_ctx,
84 struct event_context *ev,
85 struct cli_state *cli, uint16_t fnum,
86 off_t offset, size_t size,
87 struct tevent_req **psmbreq)
89 struct tevent_req *req, *subreq;
90 struct cli_read_andx_state *state;
91 uint8_t wct = 10;
93 if (size > cli_read_max_bufsize(cli)) {
94 DEBUG(0, ("cli_read_andx_send got size=%d, can only handle "
95 "size=%d\n", (int)size,
96 (int)cli_read_max_bufsize(cli)));
97 return NULL;
100 req = tevent_req_create(mem_ctx, &state, struct cli_read_andx_state);
101 if (req == NULL) {
102 return NULL;
104 state->size = size;
106 SCVAL(state->vwv + 0, 0, 0xFF);
107 SCVAL(state->vwv + 0, 1, 0);
108 SSVAL(state->vwv + 1, 0, 0);
109 SSVAL(state->vwv + 2, 0, fnum);
110 SIVAL(state->vwv + 3, 0, offset);
111 SSVAL(state->vwv + 5, 0, size);
112 SSVAL(state->vwv + 6, 0, size);
113 SSVAL(state->vwv + 7, 0, (size >> 16));
114 SSVAL(state->vwv + 8, 0, 0);
115 SSVAL(state->vwv + 9, 0, 0);
117 if ((uint64_t)offset >> 32) {
118 SIVAL(state->vwv + 10, 0,
119 (((uint64_t)offset)>>32) & 0xffffffff);
120 wct += 2;
123 subreq = cli_smb_req_create(state, ev, cli, SMBreadX, 0, wct,
124 state->vwv, 0, NULL);
125 if (subreq == NULL) {
126 TALLOC_FREE(req);
127 return NULL;
129 tevent_req_set_callback(subreq, cli_read_andx_done, req);
130 *psmbreq = subreq;
131 return req;
134 struct tevent_req *cli_read_andx_send(TALLOC_CTX *mem_ctx,
135 struct event_context *ev,
136 struct cli_state *cli, uint16_t fnum,
137 off_t offset, size_t size)
139 struct tevent_req *req, *subreq;
140 NTSTATUS status;
142 req = cli_read_andx_create(mem_ctx, ev, cli, fnum, offset, size,
143 &subreq);
144 if (req == NULL) {
145 return NULL;
148 status = cli_smb_req_send(subreq);
149 if (!NT_STATUS_IS_OK(status)) {
150 tevent_req_nterror(req, status);
151 return tevent_req_post(req, ev);
153 return req;
156 static void cli_read_andx_done(struct tevent_req *subreq)
158 struct tevent_req *req = tevent_req_callback_data(
159 subreq, struct tevent_req);
160 struct cli_read_andx_state *state = tevent_req_data(
161 req, struct cli_read_andx_state);
162 uint8_t *inbuf;
163 uint8_t wct;
164 uint16_t *vwv;
165 uint32_t num_bytes;
166 uint8_t *bytes;
168 state->status = cli_smb_recv(subreq, state, &inbuf, 12, &wct, &vwv,
169 &num_bytes, &bytes);
170 TALLOC_FREE(subreq);
171 if (NT_STATUS_IS_ERR(state->status)) {
172 tevent_req_nterror(req, state->status);
173 return;
176 /* size is the number of bytes the server returned.
177 * Might be zero. */
178 state->received = SVAL(vwv + 5, 0);
179 state->received |= (((unsigned int)SVAL(vwv + 7, 0)) << 16);
181 if (state->received > state->size) {
182 DEBUG(5,("server returned more than we wanted!\n"));
183 tevent_req_nterror(req, NT_STATUS_UNEXPECTED_IO_ERROR);
184 return;
188 * bcc field must be valid for small reads, for large reads the 16-bit
189 * bcc field can't be correct.
192 if ((state->received < 0xffff) && (state->received > num_bytes)) {
193 DEBUG(5, ("server announced more bytes than sent\n"));
194 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
195 return;
198 state->buf = (uint8_t *)smb_base(inbuf) + SVAL(vwv+6, 0);
200 if (trans_oob(smb_len(inbuf), SVAL(vwv+6, 0), state->received)
201 || ((state->received != 0) && (state->buf < bytes))) {
202 DEBUG(5, ("server returned invalid read&x data offset\n"));
203 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
204 return;
206 tevent_req_done(req);
210 * Pull the data out of a finished async read_and_x request. rcvbuf is
211 * talloced from the request, so better make sure that you copy it away before
212 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
213 * talloc_move it!
216 NTSTATUS cli_read_andx_recv(struct tevent_req *req, ssize_t *received,
217 uint8_t **rcvbuf)
219 struct cli_read_andx_state *state = tevent_req_data(
220 req, struct cli_read_andx_state);
221 NTSTATUS status;
223 if (tevent_req_is_nterror(req, &status)) {
224 return status;
226 *received = state->received;
227 *rcvbuf = state->buf;
228 return NT_STATUS_OK;
231 struct cli_readall_state {
232 struct tevent_context *ev;
233 struct cli_state *cli;
234 uint16_t fnum;
235 off_t start_offset;
236 size_t size;
237 size_t received;
238 uint8_t *buf;
241 static void cli_readall_done(struct tevent_req *subreq);
243 static struct tevent_req *cli_readall_send(TALLOC_CTX *mem_ctx,
244 struct event_context *ev,
245 struct cli_state *cli,
246 uint16_t fnum,
247 off_t offset, size_t size)
249 struct tevent_req *req, *subreq;
250 struct cli_readall_state *state;
252 req = tevent_req_create(mem_ctx, &state, struct cli_readall_state);
253 if (req == NULL) {
254 return NULL;
256 state->ev = ev;
257 state->cli = cli;
258 state->fnum = fnum;
259 state->start_offset = offset;
260 state->size = size;
261 state->received = 0;
262 state->buf = NULL;
264 subreq = cli_read_andx_send(state, ev, cli, fnum, offset, size);
265 if (tevent_req_nomem(subreq, req)) {
266 return tevent_req_post(req, ev);
268 tevent_req_set_callback(subreq, cli_readall_done, req);
269 return req;
272 static void cli_readall_done(struct tevent_req *subreq)
274 struct tevent_req *req = tevent_req_callback_data(
275 subreq, struct tevent_req);
276 struct cli_readall_state *state = tevent_req_data(
277 req, struct cli_readall_state);
278 ssize_t received;
279 uint8_t *buf;
280 NTSTATUS status;
282 status = cli_read_andx_recv(subreq, &received, &buf);
283 if (!NT_STATUS_IS_OK(status)) {
284 tevent_req_nterror(req, status);
285 return;
288 if (received == 0) {
289 /* EOF */
290 tevent_req_done(req);
291 return;
294 if ((state->received == 0) && (received == state->size)) {
295 /* Ideal case: Got it all in one run */
296 state->buf = buf;
297 state->received += received;
298 tevent_req_done(req);
299 return;
303 * We got a short read, issue a read for the
304 * rest. Unfortunately we have to allocate the buffer
305 * ourselves now, as our caller expects to receive a single
306 * buffer. cli_read_andx does it from the buffer received from
307 * the net, but with a short read we have to put it together
308 * from several reads.
311 if (state->buf == NULL) {
312 state->buf = talloc_array(state, uint8_t, state->size);
313 if (tevent_req_nomem(state->buf, req)) {
314 return;
317 memcpy(state->buf + state->received, buf, received);
318 state->received += received;
320 TALLOC_FREE(subreq);
322 if (state->received >= state->size) {
323 tevent_req_done(req);
324 return;
327 subreq = cli_read_andx_send(state, state->ev, state->cli, state->fnum,
328 state->start_offset + state->received,
329 state->size - state->received);
330 if (tevent_req_nomem(subreq, req)) {
331 return;
333 tevent_req_set_callback(subreq, cli_readall_done, req);
336 static NTSTATUS cli_readall_recv(struct tevent_req *req, ssize_t *received,
337 uint8_t **rcvbuf)
339 struct cli_readall_state *state = tevent_req_data(
340 req, struct cli_readall_state);
341 NTSTATUS status;
343 if (tevent_req_is_nterror(req, &status)) {
344 return status;
346 *received = state->received;
347 *rcvbuf = state->buf;
348 return NT_STATUS_OK;
351 struct cli_pull_subreq {
352 struct tevent_req *req;
353 ssize_t received;
354 uint8_t *buf;
358 * Parallel read support.
360 * cli_pull sends as many read&x requests as the server would allow via
361 * max_mux at a time. When replies flow back in, the data is written into
362 * the callback function "sink" in the right order.
365 struct cli_pull_state {
366 struct tevent_req *req;
368 struct event_context *ev;
369 struct cli_state *cli;
370 uint16_t fnum;
371 off_t start_offset;
372 SMB_OFF_T size;
374 NTSTATUS (*sink)(char *buf, size_t n, void *priv);
375 void *priv;
377 size_t chunk_size;
380 * Outstanding requests
382 int num_reqs;
383 struct cli_pull_subreq *reqs;
386 * For how many bytes did we send requests already?
388 SMB_OFF_T requested;
391 * Next request index to push into "sink". This walks around the "req"
392 * array, taking care that the requests are pushed to "sink" in the
393 * right order. If necessary (i.e. replies don't come in in the right
394 * order), replies are held back in "reqs".
396 int top_req;
399 * How many bytes did we push into "sink"?
402 SMB_OFF_T pushed;
405 static char *cli_pull_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
407 struct cli_pull_state *state = tevent_req_data(
408 req, struct cli_pull_state);
409 char *result;
411 result = tevent_req_print(mem_ctx, req);
412 if (result == NULL) {
413 return NULL;
416 return talloc_asprintf_append_buffer(
417 result, "num_reqs=%d, top_req=%d",
418 state->num_reqs, state->top_req);
421 static void cli_pull_read_done(struct tevent_req *read_req);
424 * Prepare an async pull request
427 struct tevent_req *cli_pull_send(TALLOC_CTX *mem_ctx,
428 struct event_context *ev,
429 struct cli_state *cli,
430 uint16_t fnum, off_t start_offset,
431 SMB_OFF_T size, size_t window_size,
432 NTSTATUS (*sink)(char *buf, size_t n,
433 void *priv),
434 void *priv)
436 struct tevent_req *req;
437 struct cli_pull_state *state;
438 int i;
440 req = tevent_req_create(mem_ctx, &state, struct cli_pull_state);
441 if (req == NULL) {
442 return NULL;
444 tevent_req_set_print_fn(req, cli_pull_print);
445 state->req = req;
447 state->cli = cli;
448 state->ev = ev;
449 state->fnum = fnum;
450 state->start_offset = start_offset;
451 state->size = size;
452 state->sink = sink;
453 state->priv = priv;
455 state->pushed = 0;
456 state->top_req = 0;
458 if (size == 0) {
459 tevent_req_done(req);
460 return tevent_req_post(req, ev);
463 state->chunk_size = cli_read_max_bufsize(cli);
465 state->num_reqs = MAX(window_size/state->chunk_size, 1);
466 state->num_reqs = MIN(state->num_reqs, cli->max_mux);
468 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_pull_subreq,
469 state->num_reqs);
470 if (state->reqs == NULL) {
471 goto failed;
474 state->requested = 0;
476 for (i=0; i<state->num_reqs; i++) {
477 struct cli_pull_subreq *subreq = &state->reqs[i];
478 SMB_OFF_T size_left;
479 size_t request_thistime;
481 if (state->requested >= size) {
482 state->num_reqs = i;
483 break;
486 size_left = size - state->requested;
487 request_thistime = MIN(size_left, state->chunk_size);
489 subreq->req = cli_readall_send(
490 state->reqs, ev, cli, fnum,
491 state->start_offset + state->requested,
492 request_thistime);
494 if (subreq->req == NULL) {
495 goto failed;
497 tevent_req_set_callback(subreq->req, cli_pull_read_done, req);
498 state->requested += request_thistime;
500 return req;
502 failed:
503 TALLOC_FREE(req);
504 return NULL;
508 * Handle incoming read replies, push the data into sink and send out new
509 * requests if necessary.
512 static void cli_pull_read_done(struct tevent_req *subreq)
514 struct tevent_req *req = tevent_req_callback_data(
515 subreq, struct tevent_req);
516 struct cli_pull_state *state = tevent_req_data(
517 req, struct cli_pull_state);
518 struct cli_pull_subreq *pull_subreq = NULL;
519 NTSTATUS status;
520 int i;
522 for (i = 0; i < state->num_reqs; i++) {
523 pull_subreq = &state->reqs[i];
524 if (subreq == pull_subreq->req) {
525 break;
528 if (i == state->num_reqs) {
529 /* Huh -- received something we did not send?? */
530 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
531 return;
534 status = cli_readall_recv(subreq, &pull_subreq->received,
535 &pull_subreq->buf);
536 if (!NT_STATUS_IS_OK(status)) {
537 tevent_req_nterror(state->req, status);
538 return;
542 * This loop is the one to take care of out-of-order replies. All
543 * pending requests are in state->reqs, state->reqs[top_req] is the
544 * one that is to be pushed next. If however a request later than
545 * top_req is replied to, then we can't push yet. If top_req is
546 * replied to at a later point then, we need to push all the finished
547 * requests.
550 while (state->reqs[state->top_req].req != NULL) {
551 struct cli_pull_subreq *top_subreq;
553 DEBUG(11, ("cli_pull_read_done: top_req = %d\n",
554 state->top_req));
556 top_subreq = &state->reqs[state->top_req];
558 if (tevent_req_is_in_progress(top_subreq->req)) {
559 DEBUG(11, ("cli_pull_read_done: top request not yet "
560 "done\n"));
561 return;
564 DEBUG(10, ("cli_pull_read_done: Pushing %d bytes, %d already "
565 "pushed\n", (int)top_subreq->received,
566 (int)state->pushed));
568 status = state->sink((char *)top_subreq->buf,
569 top_subreq->received, state->priv);
570 if (!NT_STATUS_IS_OK(status)) {
571 tevent_req_nterror(state->req, status);
572 return;
574 state->pushed += top_subreq->received;
576 TALLOC_FREE(state->reqs[state->top_req].req);
578 if (state->requested < state->size) {
579 struct tevent_req *new_req;
580 SMB_OFF_T size_left;
581 size_t request_thistime;
583 size_left = state->size - state->requested;
584 request_thistime = MIN(size_left, state->chunk_size);
586 DEBUG(10, ("cli_pull_read_done: Requesting %d bytes "
587 "at %d, position %d\n",
588 (int)request_thistime,
589 (int)(state->start_offset
590 + state->requested),
591 state->top_req));
593 new_req = cli_readall_send(
594 state->reqs, state->ev, state->cli,
595 state->fnum,
596 state->start_offset + state->requested,
597 request_thistime);
599 if (tevent_req_nomem(new_req, state->req)) {
600 return;
602 tevent_req_set_callback(new_req, cli_pull_read_done,
603 req);
605 state->reqs[state->top_req].req = new_req;
606 state->requested += request_thistime;
609 state->top_req = (state->top_req+1) % state->num_reqs;
612 tevent_req_done(req);
615 NTSTATUS cli_pull_recv(struct tevent_req *req, SMB_OFF_T *received)
617 struct cli_pull_state *state = tevent_req_data(
618 req, struct cli_pull_state);
619 NTSTATUS status;
621 if (tevent_req_is_nterror(req, &status)) {
622 return status;
624 *received = state->pushed;
625 return NT_STATUS_OK;
628 NTSTATUS cli_pull(struct cli_state *cli, uint16_t fnum,
629 off_t start_offset, SMB_OFF_T size, size_t window_size,
630 NTSTATUS (*sink)(char *buf, size_t n, void *priv),
631 void *priv, SMB_OFF_T *received)
633 TALLOC_CTX *frame = talloc_stackframe();
634 struct event_context *ev;
635 struct tevent_req *req;
636 NTSTATUS status = NT_STATUS_OK;
638 if (cli_has_async_calls(cli)) {
640 * Can't use sync call while an async call is in flight
642 status = NT_STATUS_INVALID_PARAMETER;
643 goto fail;
646 ev = event_context_init(frame);
647 if (ev == NULL) {
648 status = NT_STATUS_NO_MEMORY;
649 goto fail;
652 req = cli_pull_send(frame, ev, cli, fnum, start_offset, size,
653 window_size, sink, priv);
654 if (req == NULL) {
655 status = NT_STATUS_NO_MEMORY;
656 goto fail;
659 if (!tevent_req_poll(req, ev)) {
660 status = map_nt_error_from_unix(errno);
661 goto fail;
664 status = cli_pull_recv(req, received);
665 fail:
666 TALLOC_FREE(frame);
667 if (!NT_STATUS_IS_OK(status)) {
668 cli_set_error(cli, status);
670 return status;
673 static NTSTATUS cli_read_sink(char *buf, size_t n, void *priv)
675 char **pbuf = (char **)priv;
676 memcpy(*pbuf, buf, n);
677 *pbuf += n;
678 return NT_STATUS_OK;
681 ssize_t cli_read(struct cli_state *cli, uint16_t fnum, char *buf,
682 off_t offset, size_t size)
684 NTSTATUS status;
685 SMB_OFF_T ret;
687 status = cli_pull(cli, fnum, offset, size, size,
688 cli_read_sink, &buf, &ret);
689 if (!NT_STATUS_IS_OK(status)) {
690 cli_set_error(cli, status);
691 return -1;
693 return ret;
696 /****************************************************************************
697 Issue a single SMBwrite and don't wait for a reply.
698 ****************************************************************************/
700 static bool cli_issue_write(struct cli_state *cli,
701 uint16_t fnum,
702 off_t offset,
703 uint16 mode,
704 const char *buf,
705 size_t size,
706 int i)
708 char *p;
709 bool large_writex = false;
710 /* We can only do direct writes if not signing and not encrypting. */
711 bool direct_writes = !client_is_signing_on(cli) && !cli_encryption_on(cli);
713 if (!direct_writes && size + 1 > cli->bufsize) {
714 cli->outbuf = (char *)SMB_REALLOC(cli->outbuf, size + 1024);
715 if (!cli->outbuf) {
716 return False;
718 cli->inbuf = (char *)SMB_REALLOC(cli->inbuf, size + 1024);
719 if (cli->inbuf == NULL) {
720 SAFE_FREE(cli->outbuf);
721 return False;
723 cli->bufsize = size + 1024;
726 memset(cli->outbuf,'\0',smb_size);
727 memset(cli->inbuf,'\0',smb_size);
729 if (cli->capabilities & CAP_LARGE_FILES) {
730 large_writex = True;
733 if (large_writex) {
734 cli_set_message(cli->outbuf,14,0,True);
735 } else {
736 cli_set_message(cli->outbuf,12,0,True);
739 SCVAL(cli->outbuf,smb_com,SMBwriteX);
740 SSVAL(cli->outbuf,smb_tid,cli->cnum);
741 cli_setup_packet(cli);
743 SCVAL(cli->outbuf,smb_vwv0,0xFF);
744 SSVAL(cli->outbuf,smb_vwv2,fnum);
746 SIVAL(cli->outbuf,smb_vwv3,offset);
747 SIVAL(cli->outbuf,smb_vwv5,0);
748 SSVAL(cli->outbuf,smb_vwv7,mode);
750 SSVAL(cli->outbuf,smb_vwv8,(mode & 0x0008) ? size : 0);
752 * According to CIFS-TR-1p00, this following field should only
753 * be set if CAP_LARGE_WRITEX is set. We should check this
754 * locally. However, this check might already have been
755 * done by our callers.
757 SSVAL(cli->outbuf,smb_vwv9,(size>>16));
758 SSVAL(cli->outbuf,smb_vwv10,size);
759 /* +1 is pad byte. */
760 SSVAL(cli->outbuf,smb_vwv11,
761 smb_buf(cli->outbuf) - smb_base(cli->outbuf) + 1);
763 if (large_writex) {
764 SIVAL(cli->outbuf,smb_vwv12,(((uint64_t)offset)>>32) & 0xffffffff);
767 p = smb_base(cli->outbuf) + SVAL(cli->outbuf,smb_vwv11) -1;
768 *p++ = '\0'; /* pad byte. */
769 if (!direct_writes) {
770 memcpy(p, buf, size);
772 if (size > 0x1FFFF) {
773 /* This is a POSIX 14 word large write. */
774 set_message_bcc(cli->outbuf, 0); /* Set bcc to zero. */
775 _smb_setlen_large(cli->outbuf,smb_size + 28 + 1 /* pad */ + size - 4);
776 } else {
777 cli_setup_bcc(cli, p+size);
780 SSVAL(cli->outbuf,smb_mid,cli->mid + i);
782 show_msg(cli->outbuf);
783 if (direct_writes) {
784 /* For direct writes we now need to write the data
785 * directly out of buf. */
786 return cli_send_smb_direct_writeX(cli, buf, size);
787 } else {
788 return cli_send_smb(cli);
792 /****************************************************************************
793 write to a file
794 write_mode: 0x0001 disallow write cacheing
795 0x0002 return bytes remaining
796 0x0004 use raw named pipe protocol
797 0x0008 start of message mode named pipe protocol
798 ****************************************************************************/
800 ssize_t cli_write(struct cli_state *cli,
801 uint16_t fnum, uint16 write_mode,
802 const char *buf, off_t offset, size_t size)
804 ssize_t bwritten = 0;
805 unsigned int issued = 0;
806 unsigned int received = 0;
807 int mpx = 1;
808 size_t writesize;
809 int blocks;
811 if(cli->max_mux > 1) {
812 mpx = cli->max_mux-1;
813 } else {
814 mpx = 1;
817 writesize = cli_write_max_bufsize(cli, write_mode);
819 blocks = (size + (writesize-1)) / writesize;
821 while (received < blocks) {
823 while ((issued - received < mpx) && (issued < blocks)) {
824 ssize_t bsent = issued * writesize;
825 ssize_t size1 = MIN(writesize, size - bsent);
827 if (!cli_issue_write(cli, fnum, offset + bsent,
828 write_mode,
829 buf + bsent,
830 size1, issued))
831 return -1;
832 issued++;
835 if (!cli_receive_smb(cli)) {
836 return bwritten;
839 received++;
841 if (cli_is_error(cli))
842 break;
844 bwritten += SVAL(cli->inbuf, smb_vwv2);
845 if (writesize > 0xFFFF) {
846 bwritten += (((int)(SVAL(cli->inbuf, smb_vwv4)))<<16);
850 while (received < issued && cli_receive_smb(cli)) {
851 received++;
854 return bwritten;
857 /****************************************************************************
858 write to a file using a SMBwrite and not bypassing 0 byte writes
859 ****************************************************************************/
861 ssize_t cli_smbwrite(struct cli_state *cli,
862 uint16_t fnum, char *buf, off_t offset, size_t size1)
864 char *p;
865 ssize_t total = 0;
867 do {
868 size_t size = MIN(size1, cli->max_xmit - 48);
870 memset(cli->outbuf,'\0',smb_size);
871 memset(cli->inbuf,'\0',smb_size);
873 cli_set_message(cli->outbuf,5, 0,True);
875 SCVAL(cli->outbuf,smb_com,SMBwrite);
876 SSVAL(cli->outbuf,smb_tid,cli->cnum);
877 cli_setup_packet(cli);
879 SSVAL(cli->outbuf,smb_vwv0,fnum);
880 SSVAL(cli->outbuf,smb_vwv1,size);
881 SIVAL(cli->outbuf,smb_vwv2,offset);
882 SSVAL(cli->outbuf,smb_vwv4,0);
884 p = smb_buf(cli->outbuf);
885 *p++ = 1;
886 SSVAL(p, 0, size); p += 2;
887 memcpy(p, buf + total, size); p += size;
889 cli_setup_bcc(cli, p);
891 if (!cli_send_smb(cli))
892 return -1;
894 if (!cli_receive_smb(cli))
895 return -1;
897 if (cli_is_error(cli))
898 return -1;
900 size = SVAL(cli->inbuf,smb_vwv0);
901 if (size == 0)
902 break;
904 size1 -= size;
905 total += size;
906 offset += size;
908 } while (size1);
910 return total;
914 * Send a write&x request
917 struct cli_write_andx_state {
918 size_t size;
919 uint16_t vwv[14];
920 size_t written;
921 uint8_t pad;
922 struct iovec iov[2];
925 static void cli_write_andx_done(struct tevent_req *subreq);
927 struct tevent_req *cli_write_andx_create(TALLOC_CTX *mem_ctx,
928 struct event_context *ev,
929 struct cli_state *cli, uint16_t fnum,
930 uint16_t mode, const uint8_t *buf,
931 off_t offset, size_t size,
932 struct tevent_req **reqs_before,
933 int num_reqs_before,
934 struct tevent_req **psmbreq)
936 struct tevent_req *req, *subreq;
937 struct cli_write_andx_state *state;
938 bool bigoffset = ((cli->capabilities & CAP_LARGE_FILES) != 0);
939 uint8_t wct = bigoffset ? 14 : 12;
940 size_t max_write = cli_write_max_bufsize(cli, mode);
941 uint16_t *vwv;
943 req = tevent_req_create(mem_ctx, &state, struct cli_write_andx_state);
944 if (req == NULL) {
945 return NULL;
948 size = MIN(size, max_write);
950 vwv = state->vwv;
952 SCVAL(vwv+0, 0, 0xFF);
953 SCVAL(vwv+0, 1, 0);
954 SSVAL(vwv+1, 0, 0);
955 SSVAL(vwv+2, 0, fnum);
956 SIVAL(vwv+3, 0, offset);
957 SIVAL(vwv+5, 0, 0);
958 SSVAL(vwv+7, 0, mode);
959 SSVAL(vwv+8, 0, 0);
960 SSVAL(vwv+9, 0, (size>>16));
961 SSVAL(vwv+10, 0, size);
963 SSVAL(vwv+11, 0,
964 cli_smb_wct_ofs(reqs_before, num_reqs_before)
965 + 1 /* the wct field */
966 + wct * 2 /* vwv */
967 + 2 /* num_bytes field */
968 + 1 /* pad */);
970 if (bigoffset) {
971 SIVAL(vwv+12, 0, (((uint64_t)offset)>>32) & 0xffffffff);
974 state->pad = 0;
975 state->iov[0].iov_base = (void *)&state->pad;
976 state->iov[0].iov_len = 1;
977 state->iov[1].iov_base = CONST_DISCARD(void *, buf);
978 state->iov[1].iov_len = size;
980 subreq = cli_smb_req_create(state, ev, cli, SMBwriteX, 0, wct, vwv,
981 2, state->iov);
982 if (tevent_req_nomem(subreq, req)) {
983 return tevent_req_post(req, ev);
985 tevent_req_set_callback(subreq, cli_write_andx_done, req);
986 *psmbreq = subreq;
987 return req;
990 struct tevent_req *cli_write_andx_send(TALLOC_CTX *mem_ctx,
991 struct event_context *ev,
992 struct cli_state *cli, uint16_t fnum,
993 uint16_t mode, const uint8_t *buf,
994 off_t offset, size_t size)
996 struct tevent_req *req, *subreq;
997 NTSTATUS status;
999 req = cli_write_andx_create(mem_ctx, ev, cli, fnum, mode, buf, offset,
1000 size, NULL, 0, &subreq);
1001 if (req == NULL) {
1002 return NULL;
1005 status = cli_smb_req_send(subreq);
1006 if (!NT_STATUS_IS_OK(status)) {
1007 tevent_req_nterror(req, status);
1008 return tevent_req_post(req, ev);
1010 return req;
1013 static void cli_write_andx_done(struct tevent_req *subreq)
1015 struct tevent_req *req = tevent_req_callback_data(
1016 subreq, struct tevent_req);
1017 struct cli_write_andx_state *state = tevent_req_data(
1018 req, struct cli_write_andx_state);
1019 uint8_t wct;
1020 uint16_t *vwv;
1021 uint8_t *inbuf;
1022 NTSTATUS status;
1024 status = cli_smb_recv(subreq, state, &inbuf, 6, &wct, &vwv,
1025 NULL, NULL);
1026 TALLOC_FREE(subreq);
1027 if (NT_STATUS_IS_ERR(status)) {
1028 tevent_req_nterror(req, status);
1029 return;
1031 state->written = SVAL(vwv+2, 0);
1032 state->written |= SVAL(vwv+4, 0)<<16;
1033 tevent_req_done(req);
1036 NTSTATUS cli_write_andx_recv(struct tevent_req *req, size_t *pwritten)
1038 struct cli_write_andx_state *state = tevent_req_data(
1039 req, struct cli_write_andx_state);
1040 NTSTATUS status;
1042 if (tevent_req_is_nterror(req, &status)) {
1043 return status;
1045 *pwritten = state->written;
1046 return NT_STATUS_OK;
1049 struct cli_writeall_state {
1050 struct event_context *ev;
1051 struct cli_state *cli;
1052 uint16_t fnum;
1053 uint16_t mode;
1054 const uint8_t *buf;
1055 off_t offset;
1056 size_t size;
1057 size_t written;
1060 static void cli_writeall_written(struct tevent_req *req);
1062 static struct tevent_req *cli_writeall_send(TALLOC_CTX *mem_ctx,
1063 struct event_context *ev,
1064 struct cli_state *cli,
1065 uint16_t fnum,
1066 uint16_t mode,
1067 const uint8_t *buf,
1068 off_t offset, size_t size)
1070 struct tevent_req *req, *subreq;
1071 struct cli_writeall_state *state;
1073 req = tevent_req_create(mem_ctx, &state, struct cli_writeall_state);
1074 if (req == NULL) {
1075 return NULL;
1077 state->ev = ev;
1078 state->cli = cli;
1079 state->fnum = fnum;
1080 state->mode = mode;
1081 state->buf = buf;
1082 state->offset = offset;
1083 state->size = size;
1084 state->written = 0;
1086 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
1087 state->mode, state->buf, state->offset,
1088 state->size);
1089 if (tevent_req_nomem(subreq, req)) {
1090 return tevent_req_post(req, ev);
1092 tevent_req_set_callback(subreq, cli_writeall_written, req);
1093 return req;
1096 static void cli_writeall_written(struct tevent_req *subreq)
1098 struct tevent_req *req = tevent_req_callback_data(
1099 subreq, struct tevent_req);
1100 struct cli_writeall_state *state = tevent_req_data(
1101 req, struct cli_writeall_state);
1102 NTSTATUS status;
1103 size_t written, to_write;
1105 status = cli_write_andx_recv(subreq, &written);
1106 TALLOC_FREE(subreq);
1107 if (!NT_STATUS_IS_OK(status)) {
1108 tevent_req_nterror(req, status);
1109 return;
1112 state->written += written;
1114 if (state->written > state->size) {
1115 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
1116 return;
1119 to_write = state->size - state->written;
1121 if (to_write == 0) {
1122 tevent_req_done(req);
1123 return;
1126 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
1127 state->mode,
1128 state->buf + state->written,
1129 state->offset + state->written, to_write);
1130 if (tevent_req_nomem(subreq, req)) {
1131 return;
1133 tevent_req_set_callback(subreq, cli_writeall_written, req);
1136 static NTSTATUS cli_writeall_recv(struct tevent_req *req)
1138 return tevent_req_simple_recv_ntstatus(req);
1141 struct cli_push_write_state {
1142 struct tevent_req *req;/* This is the main request! Not the subreq */
1143 uint32_t idx;
1144 off_t ofs;
1145 uint8_t *buf;
1146 size_t size;
1149 struct cli_push_state {
1150 struct event_context *ev;
1151 struct cli_state *cli;
1152 uint16_t fnum;
1153 uint16_t mode;
1154 off_t start_offset;
1155 size_t window_size;
1157 size_t (*source)(uint8_t *buf, size_t n, void *priv);
1158 void *priv;
1160 bool eof;
1162 size_t chunk_size;
1163 off_t next_offset;
1166 * Outstanding requests
1168 uint32_t pending;
1169 uint32_t num_reqs;
1170 struct cli_push_write_state **reqs;
1173 static void cli_push_written(struct tevent_req *req);
1175 static bool cli_push_write_setup(struct tevent_req *req,
1176 struct cli_push_state *state,
1177 uint32_t idx)
1179 struct cli_push_write_state *substate;
1180 struct tevent_req *subreq;
1182 substate = talloc(state->reqs, struct cli_push_write_state);
1183 if (!substate) {
1184 return false;
1186 substate->req = req;
1187 substate->idx = idx;
1188 substate->ofs = state->next_offset;
1189 substate->buf = talloc_array(substate, uint8_t, state->chunk_size);
1190 if (!substate->buf) {
1191 talloc_free(substate);
1192 return false;
1194 substate->size = state->source(substate->buf,
1195 state->chunk_size,
1196 state->priv);
1197 if (substate->size == 0) {
1198 state->eof = true;
1199 /* nothing to send */
1200 talloc_free(substate);
1201 return true;
1204 subreq = cli_writeall_send(substate,
1205 state->ev, state->cli,
1206 state->fnum, state->mode,
1207 substate->buf,
1208 substate->ofs,
1209 substate->size);
1210 if (!subreq) {
1211 talloc_free(substate);
1212 return false;
1214 tevent_req_set_callback(subreq, cli_push_written, substate);
1216 state->reqs[idx] = substate;
1217 state->pending += 1;
1218 state->next_offset += substate->size;
1220 return true;
1223 struct tevent_req *cli_push_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1224 struct cli_state *cli,
1225 uint16_t fnum, uint16_t mode,
1226 off_t start_offset, size_t window_size,
1227 size_t (*source)(uint8_t *buf, size_t n,
1228 void *priv),
1229 void *priv)
1231 struct tevent_req *req;
1232 struct cli_push_state *state;
1233 uint32_t i;
1235 req = tevent_req_create(mem_ctx, &state, struct cli_push_state);
1236 if (req == NULL) {
1237 return NULL;
1239 state->cli = cli;
1240 state->ev = ev;
1241 state->fnum = fnum;
1242 state->start_offset = start_offset;
1243 state->mode = mode;
1244 state->source = source;
1245 state->priv = priv;
1246 state->eof = false;
1247 state->pending = 0;
1248 state->next_offset = start_offset;
1250 state->chunk_size = cli_write_max_bufsize(cli, mode);
1252 if (window_size == 0) {
1253 window_size = cli->max_mux * state->chunk_size;
1255 state->num_reqs = window_size/state->chunk_size;
1256 if ((window_size % state->chunk_size) > 0) {
1257 state->num_reqs += 1;
1259 state->num_reqs = MIN(state->num_reqs, cli->max_mux);
1260 state->num_reqs = MAX(state->num_reqs, 1);
1262 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_push_write_state *,
1263 state->num_reqs);
1264 if (state->reqs == NULL) {
1265 goto failed;
1268 for (i=0; i<state->num_reqs; i++) {
1269 if (!cli_push_write_setup(req, state, i)) {
1270 goto failed;
1273 if (state->eof) {
1274 break;
1278 if (state->pending == 0) {
1279 tevent_req_done(req);
1280 return tevent_req_post(req, ev);
1283 return req;
1285 failed:
1286 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1287 return tevent_req_post(req, ev);
1290 static void cli_push_written(struct tevent_req *subreq)
1292 struct cli_push_write_state *substate = tevent_req_callback_data(
1293 subreq, struct cli_push_write_state);
1294 struct tevent_req *req = substate->req;
1295 struct cli_push_state *state = tevent_req_data(
1296 req, struct cli_push_state);
1297 NTSTATUS status;
1298 uint32_t idx = substate->idx;
1300 state->reqs[idx] = NULL;
1301 state->pending -= 1;
1303 status = cli_writeall_recv(subreq);
1304 TALLOC_FREE(subreq);
1305 TALLOC_FREE(substate);
1306 if (!NT_STATUS_IS_OK(status)) {
1307 tevent_req_nterror(req, status);
1308 return;
1311 if (!state->eof) {
1312 if (!cli_push_write_setup(req, state, idx)) {
1313 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1314 return;
1318 if (state->pending == 0) {
1319 tevent_req_done(req);
1320 return;
1324 NTSTATUS cli_push_recv(struct tevent_req *req)
1326 return tevent_req_simple_recv_ntstatus(req);
1329 NTSTATUS cli_push(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1330 off_t start_offset, size_t window_size,
1331 size_t (*source)(uint8_t *buf, size_t n, void *priv),
1332 void *priv)
1334 TALLOC_CTX *frame = talloc_stackframe();
1335 struct event_context *ev;
1336 struct tevent_req *req;
1337 NTSTATUS status = NT_STATUS_OK;
1339 if (cli_has_async_calls(cli)) {
1341 * Can't use sync call while an async call is in flight
1343 status = NT_STATUS_INVALID_PARAMETER;
1344 goto fail;
1347 ev = event_context_init(frame);
1348 if (ev == NULL) {
1349 status = NT_STATUS_NO_MEMORY;
1350 goto fail;
1353 req = cli_push_send(frame, ev, cli, fnum, mode, start_offset,
1354 window_size, source, priv);
1355 if (req == NULL) {
1356 status = NT_STATUS_NO_MEMORY;
1357 goto fail;
1360 if (!tevent_req_poll(req, ev)) {
1361 status = map_nt_error_from_unix(errno);
1362 goto fail;
1365 status = cli_push_recv(req);
1366 fail:
1367 TALLOC_FREE(frame);
1368 if (!NT_STATUS_IS_OK(status)) {
1369 cli_set_error(cli, status);
1371 return status;