Revert "s3:libsmb: add an option to cli_push to let the caller provide the buffers"
[Samba/bb.git] / source3 / libsmb / clireadwrite.c
blobf2f447b4c95661f7d87190473afd6f4b200cc073
1 /*
2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
22 /****************************************************************************
23 Calculate the recommended read buffer size
24 ****************************************************************************/
25 static size_t cli_read_max_bufsize(struct cli_state *cli)
27 if (!client_is_signing_on(cli) && !cli_encryption_on(cli)
28 && (cli->posix_capabilities & CIFS_UNIX_LARGE_READ_CAP)) {
29 return CLI_SAMBA_MAX_POSIX_LARGE_READX_SIZE;
31 if (cli->capabilities & CAP_LARGE_READX) {
32 return cli->is_samba
33 ? CLI_SAMBA_MAX_LARGE_READX_SIZE
34 : CLI_WINDOWS_MAX_LARGE_READX_SIZE;
36 return (cli->max_xmit - (smb_size+32)) & ~1023;
39 /****************************************************************************
40 Calculate the recommended write buffer size
41 ****************************************************************************/
42 static size_t cli_write_max_bufsize(struct cli_state *cli, uint16_t write_mode)
44 if (write_mode == 0 &&
45 !client_is_signing_on(cli) &&
46 !cli_encryption_on(cli) &&
47 (cli->posix_capabilities & CIFS_UNIX_LARGE_WRITE_CAP) &&
48 (cli->capabilities & CAP_LARGE_FILES)) {
49 /* Only do massive writes if we can do them direct
50 * with no signing or encrypting - not on a pipe. */
51 return CLI_SAMBA_MAX_POSIX_LARGE_WRITEX_SIZE;
54 if (cli->is_samba) {
55 return CLI_SAMBA_MAX_LARGE_WRITEX_SIZE;
58 if (((cli->capabilities & CAP_LARGE_WRITEX) == 0)
59 || client_is_signing_on(cli)
60 || strequal(cli->dev, "LPT1:")) {
63 * Printer devices are restricted to max_xmit writesize in
64 * Vista and XPSP3 as are signing connections.
67 return (cli->max_xmit - (smb_size+32)) & ~1023;
70 return CLI_WINDOWS_MAX_LARGE_WRITEX_SIZE;
75 * Send a read&x request
78 struct async_req *cli_read_andx_send(TALLOC_CTX *mem_ctx,
79 struct event_context *ev,
80 struct cli_state *cli, int fnum,
81 off_t offset, size_t size)
83 struct async_req *result;
84 struct cli_request *req;
85 bool bigoffset = False;
87 uint16_t vwv[12];
88 uint8_t wct = 10;
90 if (size > cli_read_max_bufsize(cli)) {
91 DEBUG(0, ("cli_read_andx_send got size=%d, can only handle "
92 "size=%d\n", (int)size,
93 (int)cli_read_max_bufsize(cli)));
94 return NULL;
97 SCVAL(vwv + 0, 0, 0xFF);
98 SCVAL(vwv + 0, 1, 0);
99 SSVAL(vwv + 1, 0, 0);
100 SSVAL(vwv + 2, 0, fnum);
101 SIVAL(vwv + 3, 0, offset);
102 SSVAL(vwv + 5, 0, size);
103 SSVAL(vwv + 6, 0, size);
104 SSVAL(vwv + 7, 0, (size >> 16));
105 SSVAL(vwv + 8, 0, 0);
106 SSVAL(vwv + 9, 0, 0);
108 if ((uint64_t)offset >> 32) {
109 bigoffset = True;
110 SIVAL(vwv + 10, 0,
111 (((uint64_t)offset)>>32) & 0xffffffff);
112 wct += 2;
115 result = cli_request_send(mem_ctx, ev, cli, SMBreadX, 0, wct, vwv, 0,
116 0, NULL);
117 if (result == NULL) {
118 return NULL;
121 req = talloc_get_type_abort(result->private_data, struct cli_request);
123 req->data.read.ofs = offset;
124 req->data.read.size = size;
125 req->data.read.received = 0;
126 req->data.read.rcvbuf = NULL;
128 return result;
132 * Pull the data out of a finished async read_and_x request. rcvbuf is
133 * talloced from the request, so better make sure that you copy it away before
134 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
135 * talloc_move it!
138 NTSTATUS cli_read_andx_recv(struct async_req *req, ssize_t *received,
139 uint8_t **rcvbuf)
141 struct cli_request *cli_req = talloc_get_type_abort(
142 req->private_data, struct cli_request);
143 uint8_t wct;
144 uint16_t *vwv;
145 uint16_t num_bytes;
146 uint8_t *bytes;
147 uint8_t *buf;
148 NTSTATUS status;
149 size_t size;
151 if (async_req_is_nterror(req, &status)) {
152 return status;
155 status = cli_pull_reply(req, &wct, &vwv, &num_bytes, &bytes);
157 if (NT_STATUS_IS_ERR(status)) {
158 return status;
161 if (wct < 12) {
162 return NT_STATUS_INVALID_NETWORK_RESPONSE;
165 /* size is the number of bytes the server returned.
166 * Might be zero. */
167 size = SVAL(vwv + 5, 0);
168 size |= (((unsigned int)SVAL(vwv + 7, 0)) << 16);
170 if (size > cli_req->data.read.size) {
171 DEBUG(5,("server returned more than we wanted!\n"));
172 return NT_STATUS_UNEXPECTED_IO_ERROR;
176 * bcc field must be valid for small reads, for large reads the 16-bit
177 * bcc field can't be correct.
180 if ((size < 0xffff) && (size > num_bytes)) {
181 DEBUG(5, ("server announced more bytes than sent\n"));
182 return NT_STATUS_INVALID_NETWORK_RESPONSE;
185 buf = (uint8_t *)smb_base(cli_req->inbuf) + SVAL(vwv+6, 0);
187 if (trans_oob(smb_len(cli_req->inbuf), SVAL(vwv+6, 0), size)
188 || (buf < bytes)) {
189 DEBUG(5, ("server returned invalid read&x data offset\n"));
190 return NT_STATUS_INVALID_NETWORK_RESPONSE;
193 *rcvbuf = (uint8_t *)(smb_base(cli_req->inbuf) + SVAL(vwv + 6, 0));
194 *received = size;
195 return NT_STATUS_OK;
199 * Parallel read support.
201 * cli_pull sends as many read&x requests as the server would allow via
202 * max_mux at a time. When replies flow back in, the data is written into
203 * the callback function "sink" in the right order.
206 struct cli_pull_state {
207 struct async_req *req;
209 struct event_context *ev;
210 struct cli_state *cli;
211 uint16_t fnum;
212 off_t start_offset;
213 SMB_OFF_T size;
215 NTSTATUS (*sink)(char *buf, size_t n, void *priv);
216 void *priv;
218 size_t chunk_size;
221 * Outstanding requests
223 int num_reqs;
224 struct async_req **reqs;
227 * For how many bytes did we send requests already?
229 SMB_OFF_T requested;
232 * Next request index to push into "sink". This walks around the "req"
233 * array, taking care that the requests are pushed to "sink" in the
234 * right order. If necessary (i.e. replies don't come in in the right
235 * order), replies are held back in "reqs".
237 int top_req;
240 * How many bytes did we push into "sink"?
243 SMB_OFF_T pushed;
246 static char *cli_pull_print(TALLOC_CTX *mem_ctx, struct async_req *req)
248 struct cli_pull_state *state = talloc_get_type_abort(
249 req->private_data, struct cli_pull_state);
250 char *result;
252 result = async_req_print(mem_ctx, req);
253 if (result == NULL) {
254 return NULL;
257 return talloc_asprintf_append_buffer(
258 result, "num_reqs=%d, top_req=%d",
259 state->num_reqs, state->top_req);
262 static void cli_pull_read_done(struct async_req *read_req);
265 * Prepare an async pull request
268 struct async_req *cli_pull_send(TALLOC_CTX *mem_ctx,
269 struct event_context *ev,
270 struct cli_state *cli,
271 uint16_t fnum, off_t start_offset,
272 SMB_OFF_T size, size_t window_size,
273 NTSTATUS (*sink)(char *buf, size_t n,
274 void *priv),
275 void *priv)
277 struct async_req *result;
278 struct cli_pull_state *state;
279 int i;
281 if (!async_req_setup(mem_ctx, &result, &state,
282 struct cli_pull_state)) {
283 return NULL;
285 result->print = cli_pull_print;
286 state->req = result;
288 state->cli = cli;
289 state->ev = ev;
290 state->fnum = fnum;
291 state->start_offset = start_offset;
292 state->size = size;
293 state->sink = sink;
294 state->priv = priv;
296 state->pushed = 0;
297 state->top_req = 0;
299 if (size == 0) {
300 if (!async_post_ntstatus(result, ev, NT_STATUS_OK)) {
301 goto failed;
303 return result;
306 state->chunk_size = cli_read_max_bufsize(cli);
308 state->num_reqs = MAX(window_size/state->chunk_size, 1);
309 state->num_reqs = MIN(state->num_reqs, cli->max_mux);
311 state->reqs = TALLOC_ZERO_ARRAY(state, struct async_req *,
312 state->num_reqs);
313 if (state->reqs == NULL) {
314 goto failed;
317 state->requested = 0;
319 for (i=0; i<state->num_reqs; i++) {
320 SMB_OFF_T size_left;
321 size_t request_thistime;
323 if (state->requested >= size) {
324 state->num_reqs = i;
325 break;
328 size_left = size - state->requested;
329 request_thistime = MIN(size_left, state->chunk_size);
331 state->reqs[i] = cli_read_andx_send(
332 state->reqs, ev, cli, fnum,
333 state->start_offset + state->requested,
334 request_thistime);
336 if (state->reqs[i] == NULL) {
337 goto failed;
340 state->reqs[i]->async.fn = cli_pull_read_done;
341 state->reqs[i]->async.priv = result;
343 state->requested += request_thistime;
345 return result;
347 failed:
348 TALLOC_FREE(result);
349 return NULL;
353 * Handle incoming read replies, push the data into sink and send out new
354 * requests if necessary.
357 static void cli_pull_read_done(struct async_req *read_req)
359 struct async_req *pull_req = talloc_get_type_abort(
360 read_req->async.priv, struct async_req);
361 struct cli_pull_state *state = talloc_get_type_abort(
362 pull_req->private_data, struct cli_pull_state);
363 struct cli_request *read_state = talloc_get_type_abort(
364 read_req->private_data, struct cli_request);
365 NTSTATUS status;
367 status = cli_read_andx_recv(read_req, &read_state->data.read.received,
368 &read_state->data.read.rcvbuf);
369 if (!NT_STATUS_IS_OK(status)) {
370 async_req_nterror(state->req, status);
371 return;
375 * This loop is the one to take care of out-of-order replies. All
376 * pending requests are in state->reqs, state->reqs[top_req] is the
377 * one that is to be pushed next. If however a request later than
378 * top_req is replied to, then we can't push yet. If top_req is
379 * replied to at a later point then, we need to push all the finished
380 * requests.
383 while (state->reqs[state->top_req] != NULL) {
384 struct cli_request *top_read;
386 DEBUG(11, ("cli_pull_read_done: top_req = %d\n",
387 state->top_req));
389 if (state->reqs[state->top_req]->state < ASYNC_REQ_DONE) {
390 DEBUG(11, ("cli_pull_read_done: top request not yet "
391 "done\n"));
392 return;
395 top_read = talloc_get_type_abort(
396 state->reqs[state->top_req]->private_data,
397 struct cli_request);
399 DEBUG(10, ("cli_pull_read_done: Pushing %d bytes, %d already "
400 "pushed\n", (int)top_read->data.read.received,
401 (int)state->pushed));
403 status = state->sink((char *)top_read->data.read.rcvbuf,
404 top_read->data.read.received,
405 state->priv);
406 if (!NT_STATUS_IS_OK(status)) {
407 async_req_nterror(state->req, status);
408 return;
410 state->pushed += top_read->data.read.received;
412 TALLOC_FREE(state->reqs[state->top_req]);
414 if (state->requested < state->size) {
415 struct async_req *new_req;
416 SMB_OFF_T size_left;
417 size_t request_thistime;
419 size_left = state->size - state->requested;
420 request_thistime = MIN(size_left, state->chunk_size);
422 DEBUG(10, ("cli_pull_read_done: Requesting %d bytes "
423 "at %d, position %d\n",
424 (int)request_thistime,
425 (int)(state->start_offset
426 + state->requested),
427 state->top_req));
429 new_req = cli_read_andx_send(
430 state->reqs, state->ev, state->cli,
431 state->fnum,
432 state->start_offset + state->requested,
433 request_thistime);
435 if (async_req_nomem(new_req, state->req)) {
436 return;
439 new_req->async.fn = cli_pull_read_done;
440 new_req->async.priv = pull_req;
442 state->reqs[state->top_req] = new_req;
443 state->requested += request_thistime;
446 state->top_req = (state->top_req+1) % state->num_reqs;
449 async_req_done(pull_req);
452 NTSTATUS cli_pull_recv(struct async_req *req, SMB_OFF_T *received)
454 struct cli_pull_state *state = talloc_get_type_abort(
455 req->private_data, struct cli_pull_state);
456 NTSTATUS status;
458 if (async_req_is_nterror(req, &status)) {
459 return status;
461 *received = state->pushed;
462 return NT_STATUS_OK;
465 NTSTATUS cli_pull(struct cli_state *cli, uint16_t fnum,
466 off_t start_offset, SMB_OFF_T size, size_t window_size,
467 NTSTATUS (*sink)(char *buf, size_t n, void *priv),
468 void *priv, SMB_OFF_T *received)
470 TALLOC_CTX *frame = talloc_stackframe();
471 struct event_context *ev;
472 struct async_req *req;
473 NTSTATUS result = NT_STATUS_NO_MEMORY;
475 if (cli->fd_event != NULL) {
477 * Can't use sync call while an async call is in flight
479 return NT_STATUS_INVALID_PARAMETER;
482 ev = event_context_init(frame);
483 if (ev == NULL) {
484 goto nomem;
487 req = cli_pull_send(frame, ev, cli, fnum, start_offset, size,
488 window_size, sink, priv);
489 if (req == NULL) {
490 goto nomem;
493 while (req->state < ASYNC_REQ_DONE) {
494 event_loop_once(ev);
497 result = cli_pull_recv(req, received);
498 nomem:
499 TALLOC_FREE(frame);
500 return result;
503 static NTSTATUS cli_read_sink(char *buf, size_t n, void *priv)
505 char **pbuf = (char **)priv;
506 memcpy(*pbuf, buf, n);
507 *pbuf += n;
508 return NT_STATUS_OK;
511 ssize_t cli_read(struct cli_state *cli, int fnum, char *buf,
512 off_t offset, size_t size)
514 NTSTATUS status;
515 SMB_OFF_T ret;
517 status = cli_pull(cli, fnum, offset, size, size,
518 cli_read_sink, &buf, &ret);
519 if (!NT_STATUS_IS_OK(status)) {
520 cli_set_error(cli, status);
521 return -1;
523 return ret;
526 /****************************************************************************
527 Issue a single SMBwrite and don't wait for a reply.
528 ****************************************************************************/
530 static bool cli_issue_write(struct cli_state *cli,
531 int fnum,
532 off_t offset,
533 uint16 mode,
534 const char *buf,
535 size_t size,
536 int i)
538 char *p;
539 bool large_writex = false;
540 /* We can only do direct writes if not signing and not encrypting. */
541 bool direct_writes = !client_is_signing_on(cli) && !cli_encryption_on(cli);
543 if (!direct_writes && size + 1 > cli->bufsize) {
544 cli->outbuf = (char *)SMB_REALLOC(cli->outbuf, size + 1024);
545 if (!cli->outbuf) {
546 return False;
548 cli->inbuf = (char *)SMB_REALLOC(cli->inbuf, size + 1024);
549 if (cli->inbuf == NULL) {
550 SAFE_FREE(cli->outbuf);
551 return False;
553 cli->bufsize = size + 1024;
556 memset(cli->outbuf,'\0',smb_size);
557 memset(cli->inbuf,'\0',smb_size);
559 if (cli->capabilities & CAP_LARGE_FILES) {
560 large_writex = True;
563 if (large_writex) {
564 cli_set_message(cli->outbuf,14,0,True);
565 } else {
566 cli_set_message(cli->outbuf,12,0,True);
569 SCVAL(cli->outbuf,smb_com,SMBwriteX);
570 SSVAL(cli->outbuf,smb_tid,cli->cnum);
571 cli_setup_packet(cli);
573 SCVAL(cli->outbuf,smb_vwv0,0xFF);
574 SSVAL(cli->outbuf,smb_vwv2,fnum);
576 SIVAL(cli->outbuf,smb_vwv3,offset);
577 SIVAL(cli->outbuf,smb_vwv5,0);
578 SSVAL(cli->outbuf,smb_vwv7,mode);
580 SSVAL(cli->outbuf,smb_vwv8,(mode & 0x0008) ? size : 0);
582 * According to CIFS-TR-1p00, this following field should only
583 * be set if CAP_LARGE_WRITEX is set. We should check this
584 * locally. However, this check might already have been
585 * done by our callers.
587 SSVAL(cli->outbuf,smb_vwv9,(size>>16));
588 SSVAL(cli->outbuf,smb_vwv10,size);
589 /* +1 is pad byte. */
590 SSVAL(cli->outbuf,smb_vwv11,
591 smb_buf(cli->outbuf) - smb_base(cli->outbuf) + 1);
593 if (large_writex) {
594 SIVAL(cli->outbuf,smb_vwv12,(((uint64_t)offset)>>32) & 0xffffffff);
597 p = smb_base(cli->outbuf) + SVAL(cli->outbuf,smb_vwv11) -1;
598 *p++ = '\0'; /* pad byte. */
599 if (!direct_writes) {
600 memcpy(p, buf, size);
602 if (size > 0x1FFFF) {
603 /* This is a POSIX 14 word large write. */
604 set_message_bcc(cli->outbuf, 0); /* Set bcc to zero. */
605 _smb_setlen_large(cli->outbuf,smb_size + 28 + 1 /* pad */ + size - 4);
606 } else {
607 cli_setup_bcc(cli, p+size);
610 SSVAL(cli->outbuf,smb_mid,cli->mid + i);
612 show_msg(cli->outbuf);
613 if (direct_writes) {
614 /* For direct writes we now need to write the data
615 * directly out of buf. */
616 return cli_send_smb_direct_writeX(cli, buf, size);
617 } else {
618 return cli_send_smb(cli);
622 /****************************************************************************
623 write to a file
624 write_mode: 0x0001 disallow write cacheing
625 0x0002 return bytes remaining
626 0x0004 use raw named pipe protocol
627 0x0008 start of message mode named pipe protocol
628 ****************************************************************************/
630 ssize_t cli_write(struct cli_state *cli,
631 int fnum, uint16 write_mode,
632 const char *buf, off_t offset, size_t size)
634 ssize_t bwritten = 0;
635 unsigned int issued = 0;
636 unsigned int received = 0;
637 int mpx = 1;
638 size_t writesize;
639 int blocks;
641 if(cli->max_mux > 1) {
642 mpx = cli->max_mux-1;
643 } else {
644 mpx = 1;
647 writesize = cli_write_max_bufsize(cli, write_mode);
649 blocks = (size + (writesize-1)) / writesize;
651 while (received < blocks) {
653 while ((issued - received < mpx) && (issued < blocks)) {
654 ssize_t bsent = issued * writesize;
655 ssize_t size1 = MIN(writesize, size - bsent);
657 if (!cli_issue_write(cli, fnum, offset + bsent,
658 write_mode,
659 buf + bsent,
660 size1, issued))
661 return -1;
662 issued++;
665 if (!cli_receive_smb(cli)) {
666 return bwritten;
669 received++;
671 if (cli_is_error(cli))
672 break;
674 bwritten += SVAL(cli->inbuf, smb_vwv2);
675 if (writesize > 0xFFFF) {
676 bwritten += (((int)(SVAL(cli->inbuf, smb_vwv4)))<<16);
680 while (received < issued && cli_receive_smb(cli)) {
681 received++;
684 return bwritten;
687 /****************************************************************************
688 write to a file using a SMBwrite and not bypassing 0 byte writes
689 ****************************************************************************/
691 ssize_t cli_smbwrite(struct cli_state *cli,
692 int fnum, char *buf, off_t offset, size_t size1)
694 char *p;
695 ssize_t total = 0;
697 do {
698 size_t size = MIN(size1, cli->max_xmit - 48);
700 memset(cli->outbuf,'\0',smb_size);
701 memset(cli->inbuf,'\0',smb_size);
703 cli_set_message(cli->outbuf,5, 0,True);
705 SCVAL(cli->outbuf,smb_com,SMBwrite);
706 SSVAL(cli->outbuf,smb_tid,cli->cnum);
707 cli_setup_packet(cli);
709 SSVAL(cli->outbuf,smb_vwv0,fnum);
710 SSVAL(cli->outbuf,smb_vwv1,size);
711 SIVAL(cli->outbuf,smb_vwv2,offset);
712 SSVAL(cli->outbuf,smb_vwv4,0);
714 p = smb_buf(cli->outbuf);
715 *p++ = 1;
716 SSVAL(p, 0, size); p += 2;
717 memcpy(p, buf + total, size); p += size;
719 cli_setup_bcc(cli, p);
721 if (!cli_send_smb(cli))
722 return -1;
724 if (!cli_receive_smb(cli))
725 return -1;
727 if (cli_is_error(cli))
728 return -1;
730 size = SVAL(cli->inbuf,smb_vwv0);
731 if (size == 0)
732 break;
734 size1 -= size;
735 total += size;
736 offset += size;
738 } while (size1);
740 return total;
744 * Send a write&x request
747 struct async_req *cli_write_andx_send(TALLOC_CTX *mem_ctx,
748 struct event_context *ev,
749 struct cli_state *cli, uint16_t fnum,
750 uint16_t mode, const uint8_t *buf,
751 off_t offset, size_t size)
753 bool bigoffset = ((cli->capabilities & CAP_LARGE_FILES) != 0);
754 uint8_t wct = bigoffset ? 14 : 12;
755 size_t max_write = cli_write_max_bufsize(cli, mode);
756 uint16_t vwv[14];
758 size = MIN(size, max_write);
760 SCVAL(vwv+0, 0, 0xFF);
761 SCVAL(vwv+0, 1, 0);
762 SSVAL(vwv+1, 0, 0);
763 SSVAL(vwv+2, 0, fnum);
764 SIVAL(vwv+3, 0, offset);
765 SIVAL(vwv+5, 0, 0);
766 SSVAL(vwv+7, 0, mode);
767 SSVAL(vwv+8, 0, 0);
768 SSVAL(vwv+9, 0, (size>>16));
769 SSVAL(vwv+10, 0, size);
771 SSVAL(vwv+11, 0,
772 cli_wct_ofs(cli)
773 + 1 /* the wct field */
774 + wct * 2 /* vwv */
775 + 2 /* num_bytes field */
776 + 1 /* pad */);
778 if (bigoffset) {
779 SIVAL(vwv+12, 0, (((uint64_t)offset)>>32) & 0xffffffff);
782 return cli_request_send(mem_ctx, ev, cli, SMBwriteX, 0, wct, vwv,
783 2, size, buf);
786 NTSTATUS cli_write_andx_recv(struct async_req *req, size_t *pwritten)
788 uint8_t wct;
789 uint16_t *vwv;
790 uint16_t num_bytes;
791 uint8_t *bytes;
792 NTSTATUS status;
793 size_t written;
795 if (async_req_is_nterror(req, &status)) {
796 return status;
799 status = cli_pull_reply(req, &wct, &vwv, &num_bytes, &bytes);
801 if (NT_STATUS_IS_ERR(status)) {
802 return status;
805 if (wct < 6) {
806 return NT_STATUS_INVALID_NETWORK_RESPONSE;
809 written = SVAL(vwv+2, 0);
810 written |= SVAL(vwv+4, 0)<<16;
811 *pwritten = written;
813 return NT_STATUS_OK;
816 struct cli_writeall_state {
817 struct event_context *ev;
818 struct cli_state *cli;
819 uint16_t fnum;
820 uint16_t mode;
821 const uint8_t *buf;
822 off_t offset;
823 size_t size;
824 size_t written;
827 static void cli_writeall_written(struct async_req *req);
829 static struct async_req *cli_writeall_send(TALLOC_CTX *mem_ctx,
830 struct event_context *ev,
831 struct cli_state *cli,
832 uint16_t fnum,
833 uint16_t mode,
834 const uint8_t *buf,
835 off_t offset, size_t size)
837 struct async_req *result;
838 struct async_req *subreq;
839 struct cli_writeall_state *state;
841 if (!async_req_setup(mem_ctx, &result, &state,
842 struct cli_writeall_state)) {
843 return NULL;
845 state->ev = ev;
846 state->cli = cli;
847 state->fnum = fnum;
848 state->mode = mode;
849 state->buf = buf;
850 state->offset = offset;
851 state->size = size;
852 state->written = 0;
854 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
855 state->mode, state->buf, state->offset,
856 state->size);
857 if (subreq == NULL) {
858 goto fail;
861 subreq->async.fn = cli_writeall_written;
862 subreq->async.priv = result;
863 return result;
865 fail:
866 TALLOC_FREE(result);
867 return NULL;
870 static void cli_writeall_written(struct async_req *subreq)
872 struct async_req *req = talloc_get_type_abort(
873 subreq->async.priv, struct async_req);
874 struct cli_writeall_state *state = talloc_get_type_abort(
875 req->private_data, struct cli_writeall_state);
876 NTSTATUS status;
877 size_t written, to_write;
879 status = cli_write_andx_recv(subreq, &written);
880 TALLOC_FREE(subreq);
881 if (!NT_STATUS_IS_OK(status)) {
882 async_req_nterror(req, status);
883 return;
886 state->written += written;
888 if (state->written > state->size) {
889 async_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
890 return;
893 to_write = state->size - state->written;
895 if (to_write == 0) {
896 async_req_done(req);
897 return;
900 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
901 state->mode,
902 state->buf + state->written,
903 state->offset + state->written, to_write);
904 if (subreq == NULL) {
905 async_req_nterror(req, NT_STATUS_NO_MEMORY);
906 return;
909 subreq->async.fn = cli_writeall_written;
910 subreq->async.priv = req;
913 static NTSTATUS cli_writeall_recv(struct async_req *req)
915 return async_req_simple_recv_ntstatus(req);
918 struct cli_push_write_state {
919 struct async_req *req;/* This is the main request! Not the subreq */
920 uint32_t idx;
921 off_t ofs;
922 uint8_t *buf;
923 size_t size;
926 struct cli_push_state {
927 struct event_context *ev;
928 struct cli_state *cli;
929 uint16_t fnum;
930 uint16_t mode;
931 off_t start_offset;
932 size_t window_size;
934 size_t (*source)(uint8_t *buf, size_t n, void *priv);
935 void *priv;
937 bool eof;
939 size_t chunk_size;
940 off_t next_offset;
943 * Outstanding requests
945 uint32_t pending;
946 uint32_t num_reqs;
947 struct cli_push_write_state **reqs;
950 static void cli_push_written(struct async_req *req);
952 static bool cli_push_write_setup(struct async_req *req,
953 struct cli_push_state *state,
954 uint32_t idx)
956 struct cli_push_write_state *substate;
957 struct async_req *subreq;
959 substate = talloc(state->reqs, struct cli_push_write_state);
960 if (!substate) {
961 return false;
963 substate->req = req;
964 substate->idx = idx;
965 substate->ofs = state->next_offset;
966 substate->buf = talloc_array(substate, uint8_t, state->chunk_size);
967 if (!substate->buf) {
968 talloc_free(substate);
969 return false;
971 substate->size = state->source(substate->buf,
972 state->chunk_size,
973 state->priv);
974 if (substate->size == 0) {
975 state->eof = true;
976 /* nothing to send */
977 talloc_free(substate);
978 return true;
981 subreq = cli_writeall_send(substate,
982 state->ev, state->cli,
983 state->fnum, state->mode,
984 substate->buf,
985 substate->ofs,
986 substate->size);
987 if (!subreq) {
988 talloc_free(substate);
989 return false;
991 subreq->async.fn = cli_push_written;
992 subreq->async.priv = substate;
994 state->reqs[idx] = substate;
995 state->pending += 1;
996 state->next_offset += substate->size;
998 return true;
1001 struct async_req *cli_push_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1002 struct cli_state *cli,
1003 uint16_t fnum, uint16_t mode,
1004 off_t start_offset, size_t window_size,
1005 size_t (*source)(uint8_t *buf, size_t n,
1006 void *priv),
1007 void *priv)
1009 struct async_req *req;
1010 struct cli_push_state *state;
1011 uint32_t i;
1013 if (!async_req_setup(mem_ctx, &req, &state,
1014 struct cli_push_state)) {
1015 return NULL;
1017 state->cli = cli;
1018 state->ev = ev;
1019 state->fnum = fnum;
1020 state->start_offset = start_offset;
1021 state->mode = mode;
1022 state->source = source;
1023 state->priv = priv;
1024 state->eof = false;
1025 state->pending = 0;
1026 state->next_offset = start_offset;
1028 state->chunk_size = cli_write_max_bufsize(cli, mode);
1030 if (window_size == 0) {
1031 window_size = cli->max_mux * state->chunk_size;
1033 state->num_reqs = window_size/state->chunk_size;
1034 if ((window_size % state->chunk_size) > 0) {
1035 state->num_reqs += 1;
1037 state->num_reqs = MIN(state->num_reqs, cli->max_mux);
1038 state->num_reqs = MAX(state->num_reqs, 1);
1040 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_push_write_state *,
1041 state->num_reqs);
1042 if (state->reqs == NULL) {
1043 goto failed;
1046 for (i=0; i<state->num_reqs; i++) {
1047 if (!cli_push_write_setup(req, state, i)) {
1048 goto failed;
1051 if (state->eof) {
1052 break;
1056 if (state->pending == 0) {
1057 if (!async_post_ntstatus(req, ev, NT_STATUS_OK)) {
1058 goto failed;
1060 return req;
1063 return req;
1065 failed:
1066 TALLOC_FREE(req);
1067 return NULL;
1070 static void cli_push_written(struct async_req *subreq)
1072 struct cli_push_write_state *substate = talloc_get_type_abort(
1073 subreq->async.priv, struct cli_push_write_state);
1074 struct async_req *req = substate->req;
1075 struct cli_push_state *state = talloc_get_type_abort(
1076 req->private_data, struct cli_push_state);
1077 NTSTATUS status;
1078 uint32_t idx = substate->idx;
1080 state->reqs[idx] = NULL;
1081 state->pending -= 1;
1083 status = cli_writeall_recv(subreq);
1084 TALLOC_FREE(subreq);
1085 TALLOC_FREE(substate);
1086 if (!NT_STATUS_IS_OK(status)) {
1087 async_req_nterror(req, status);
1088 return;
1091 if (!state->eof) {
1092 if (!cli_push_write_setup(req, state, idx)) {
1093 async_req_nomem(NULL, req);
1094 return;
1098 if (state->pending == 0) {
1099 async_req_done(req);
1100 return;
1104 NTSTATUS cli_push_recv(struct async_req *req)
1106 return async_req_simple_recv_ntstatus(req);
1109 NTSTATUS cli_push(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1110 off_t start_offset, size_t window_size,
1111 size_t (*source)(uint8_t *buf, size_t n, void *priv),
1112 void *priv)
1114 TALLOC_CTX *frame = talloc_stackframe();
1115 struct event_context *ev;
1116 struct async_req *req;
1117 NTSTATUS result = NT_STATUS_NO_MEMORY;
1119 if (cli->fd_event != NULL) {
1121 * Can't use sync call while an async call is in flight
1123 return NT_STATUS_INVALID_PARAMETER;
1126 ev = event_context_init(frame);
1127 if (ev == NULL) {
1128 goto nomem;
1131 req = cli_push_send(frame, ev, cli, fnum, mode, start_offset,
1132 window_size, source, priv);
1133 if (req == NULL) {
1134 goto nomem;
1137 while (req->state < ASYNC_REQ_DONE) {
1138 event_loop_once(ev);
1141 result = cli_push_recv(req);
1142 nomem:
1143 TALLOC_FREE(frame);
1144 return result;