s3:libsmb: s/struct event_context/struct tevent_context
[Samba/gebeck_regimport.git] / source3 / libsmb / clireadwrite.c
blob75c168342780227fc9ccf9eda38c098e3a6e4d9d
1 /*
2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "libsmb/libsmb.h"
22 #include "../lib/util/tevent_ntstatus.h"
23 #include "async_smb.h"
24 #include "trans2.h"
25 #include "../libcli/smb/smbXcli_base.h"
27 /****************************************************************************
28 Calculate the recommended read buffer size
29 ****************************************************************************/
30 static size_t cli_read_max_bufsize(struct cli_state *cli)
32 uint8_t wct = 12;
33 uint32_t min_space;
34 uint32_t data_offset;
35 uint32_t useable_space = 0;
37 data_offset = HDR_VWV;
38 data_offset += wct * sizeof(uint16_t);
39 data_offset += sizeof(uint16_t); /* byte count */
40 data_offset += 1; /* pad */
42 min_space = cli_state_available_size(cli, data_offset);
44 if (cli->server_posix_capabilities & CIFS_UNIX_LARGE_READ_CAP) {
45 useable_space = 0xFFFFFF - data_offset;
47 if (smb1cli_conn_signing_is_active(cli->conn)) {
48 return min_space;
51 if (smb1cli_conn_encryption_on(cli->conn)) {
52 return min_space;
55 return useable_space;
56 } else if (smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_READX) {
58 * Note: CAP_LARGE_READX also works with signing
60 useable_space = 0x1FFFF - data_offset;
62 useable_space = MIN(useable_space, UINT16_MAX);
64 return useable_space;
67 return min_space;
70 /****************************************************************************
71 Calculate the recommended write buffer size
72 ****************************************************************************/
73 static size_t cli_write_max_bufsize(struct cli_state *cli,
74 uint16_t write_mode,
75 uint8_t wct)
77 uint32_t min_space;
78 uint32_t data_offset;
79 uint32_t useable_space = 0;
81 data_offset = HDR_VWV;
82 data_offset += wct * sizeof(uint16_t);
83 data_offset += sizeof(uint16_t); /* byte count */
84 data_offset += 1; /* pad */
86 min_space = cli_state_available_size(cli, data_offset);
88 if (cli->server_posix_capabilities & CIFS_UNIX_LARGE_WRITE_CAP) {
89 useable_space = 0xFFFFFF - data_offset;
90 } else if (smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_WRITEX) {
91 useable_space = 0x1FFFF - data_offset;
92 } else {
93 return min_space;
96 if (write_mode != 0) {
97 return min_space;
100 if (smb1cli_conn_signing_is_active(cli->conn)) {
101 return min_space;
104 if (smb1cli_conn_encryption_on(cli->conn)) {
105 return min_space;
108 if (strequal(cli->dev, "LPT1:")) {
109 return min_space;
112 return useable_space;
115 struct cli_read_andx_state {
116 size_t size;
117 uint16_t vwv[12];
118 NTSTATUS status;
119 size_t received;
120 uint8_t *buf;
123 static void cli_read_andx_done(struct tevent_req *subreq);
125 struct tevent_req *cli_read_andx_create(TALLOC_CTX *mem_ctx,
126 struct tevent_context *ev,
127 struct cli_state *cli, uint16_t fnum,
128 off_t offset, size_t size,
129 struct tevent_req **psmbreq)
131 struct tevent_req *req, *subreq;
132 struct cli_read_andx_state *state;
133 uint8_t wct = 10;
135 if (size > cli_read_max_bufsize(cli)) {
136 DEBUG(0, ("cli_read_andx_send got size=%d, can only handle "
137 "size=%d\n", (int)size,
138 (int)cli_read_max_bufsize(cli)));
139 return NULL;
142 req = tevent_req_create(mem_ctx, &state, struct cli_read_andx_state);
143 if (req == NULL) {
144 return NULL;
146 state->size = size;
148 SCVAL(state->vwv + 0, 0, 0xFF);
149 SCVAL(state->vwv + 0, 1, 0);
150 SSVAL(state->vwv + 1, 0, 0);
151 SSVAL(state->vwv + 2, 0, fnum);
152 SIVAL(state->vwv + 3, 0, offset);
153 SSVAL(state->vwv + 5, 0, size);
154 SSVAL(state->vwv + 6, 0, size);
155 SSVAL(state->vwv + 7, 0, (size >> 16));
156 SSVAL(state->vwv + 8, 0, 0);
157 SSVAL(state->vwv + 9, 0, 0);
159 if (smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_FILES) {
160 SIVAL(state->vwv + 10, 0,
161 (((uint64_t)offset)>>32) & 0xffffffff);
162 wct = 12;
163 } else {
164 if ((((uint64_t)offset) & 0xffffffff00000000LL) != 0) {
165 DEBUG(10, ("cli_read_andx_send got large offset where "
166 "the server does not support it\n"));
167 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
168 return tevent_req_post(req, ev);
172 subreq = cli_smb_req_create(state, ev, cli, SMBreadX, 0, wct,
173 state->vwv, 0, NULL);
174 if (subreq == NULL) {
175 TALLOC_FREE(req);
176 return NULL;
178 tevent_req_set_callback(subreq, cli_read_andx_done, req);
179 *psmbreq = subreq;
180 return req;
183 struct tevent_req *cli_read_andx_send(TALLOC_CTX *mem_ctx,
184 struct tevent_context *ev,
185 struct cli_state *cli, uint16_t fnum,
186 off_t offset, size_t size)
188 struct tevent_req *req, *subreq;
189 NTSTATUS status;
191 req = cli_read_andx_create(mem_ctx, ev, cli, fnum, offset, size,
192 &subreq);
193 if (req == NULL) {
194 return NULL;
197 status = smb1cli_req_chain_submit(&subreq, 1);
198 if (tevent_req_nterror(req, status)) {
199 return tevent_req_post(req, ev);
201 return req;
204 static void cli_read_andx_done(struct tevent_req *subreq)
206 struct tevent_req *req = tevent_req_callback_data(
207 subreq, struct tevent_req);
208 struct cli_read_andx_state *state = tevent_req_data(
209 req, struct cli_read_andx_state);
210 uint8_t *inbuf;
211 uint8_t wct;
212 uint16_t *vwv;
213 uint32_t num_bytes;
214 uint8_t *bytes;
216 state->status = cli_smb_recv(subreq, state, &inbuf, 12, &wct, &vwv,
217 &num_bytes, &bytes);
218 TALLOC_FREE(subreq);
219 if (NT_STATUS_IS_ERR(state->status)) {
220 tevent_req_nterror(req, state->status);
221 return;
224 /* size is the number of bytes the server returned.
225 * Might be zero. */
226 state->received = SVAL(vwv + 5, 0);
227 state->received |= (((unsigned int)SVAL(vwv + 7, 0)) << 16);
229 if (state->received > state->size) {
230 DEBUG(5,("server returned more than we wanted!\n"));
231 tevent_req_nterror(req, NT_STATUS_UNEXPECTED_IO_ERROR);
232 return;
236 * bcc field must be valid for small reads, for large reads the 16-bit
237 * bcc field can't be correct.
240 if ((state->received < 0xffff) && (state->received > num_bytes)) {
241 DEBUG(5, ("server announced more bytes than sent\n"));
242 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
243 return;
246 state->buf = discard_const_p(uint8_t, smb_base(inbuf)) + SVAL(vwv+6, 0);
248 if (trans_oob(smb_len_tcp(inbuf), SVAL(vwv+6, 0), state->received)
249 || ((state->received != 0) && (state->buf < bytes))) {
250 DEBUG(5, ("server returned invalid read&x data offset\n"));
251 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
252 return;
254 tevent_req_done(req);
258 * Pull the data out of a finished async read_and_x request. rcvbuf is
259 * talloced from the request, so better make sure that you copy it away before
260 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
261 * talloc_move it!
264 NTSTATUS cli_read_andx_recv(struct tevent_req *req, ssize_t *received,
265 uint8_t **rcvbuf)
267 struct cli_read_andx_state *state = tevent_req_data(
268 req, struct cli_read_andx_state);
269 NTSTATUS status;
271 if (tevent_req_is_nterror(req, &status)) {
272 return status;
274 *received = state->received;
275 *rcvbuf = state->buf;
276 return NT_STATUS_OK;
279 struct cli_readall_state {
280 struct tevent_context *ev;
281 struct cli_state *cli;
282 uint16_t fnum;
283 off_t start_offset;
284 size_t size;
285 size_t received;
286 uint8_t *buf;
289 static void cli_readall_done(struct tevent_req *subreq);
291 static struct tevent_req *cli_readall_send(TALLOC_CTX *mem_ctx,
292 struct tevent_context *ev,
293 struct cli_state *cli,
294 uint16_t fnum,
295 off_t offset, size_t size)
297 struct tevent_req *req, *subreq;
298 struct cli_readall_state *state;
300 req = tevent_req_create(mem_ctx, &state, struct cli_readall_state);
301 if (req == NULL) {
302 return NULL;
304 state->ev = ev;
305 state->cli = cli;
306 state->fnum = fnum;
307 state->start_offset = offset;
308 state->size = size;
309 state->received = 0;
310 state->buf = NULL;
312 subreq = cli_read_andx_send(state, ev, cli, fnum, offset, size);
313 if (tevent_req_nomem(subreq, req)) {
314 return tevent_req_post(req, ev);
316 tevent_req_set_callback(subreq, cli_readall_done, req);
317 return req;
320 static void cli_readall_done(struct tevent_req *subreq)
322 struct tevent_req *req = tevent_req_callback_data(
323 subreq, struct tevent_req);
324 struct cli_readall_state *state = tevent_req_data(
325 req, struct cli_readall_state);
326 ssize_t received;
327 uint8_t *buf;
328 NTSTATUS status;
330 status = cli_read_andx_recv(subreq, &received, &buf);
331 if (tevent_req_nterror(req, status)) {
332 return;
335 if (received == 0) {
336 /* EOF */
337 tevent_req_done(req);
338 return;
341 if ((state->received == 0) && (received == state->size)) {
342 /* Ideal case: Got it all in one run */
343 state->buf = buf;
344 state->received += received;
345 tevent_req_done(req);
346 return;
350 * We got a short read, issue a read for the
351 * rest. Unfortunately we have to allocate the buffer
352 * ourselves now, as our caller expects to receive a single
353 * buffer. cli_read_andx does it from the buffer received from
354 * the net, but with a short read we have to put it together
355 * from several reads.
358 if (state->buf == NULL) {
359 state->buf = talloc_array(state, uint8_t, state->size);
360 if (tevent_req_nomem(state->buf, req)) {
361 return;
364 memcpy(state->buf + state->received, buf, received);
365 state->received += received;
367 TALLOC_FREE(subreq);
369 if (state->received >= state->size) {
370 tevent_req_done(req);
371 return;
374 subreq = cli_read_andx_send(state, state->ev, state->cli, state->fnum,
375 state->start_offset + state->received,
376 state->size - state->received);
377 if (tevent_req_nomem(subreq, req)) {
378 return;
380 tevent_req_set_callback(subreq, cli_readall_done, req);
383 static NTSTATUS cli_readall_recv(struct tevent_req *req, ssize_t *received,
384 uint8_t **rcvbuf)
386 struct cli_readall_state *state = tevent_req_data(
387 req, struct cli_readall_state);
388 NTSTATUS status;
390 if (tevent_req_is_nterror(req, &status)) {
391 return status;
393 *received = state->received;
394 *rcvbuf = state->buf;
395 return NT_STATUS_OK;
398 struct cli_pull_subreq {
399 struct tevent_req *req;
400 ssize_t received;
401 uint8_t *buf;
405 * Parallel read support.
407 * cli_pull sends as many read&x requests as the server would allow via
408 * max_mux at a time. When replies flow back in, the data is written into
409 * the callback function "sink" in the right order.
412 struct cli_pull_state {
413 struct tevent_req *req;
415 struct tevent_context *ev;
416 struct cli_state *cli;
417 uint16_t fnum;
418 off_t start_offset;
419 off_t size;
421 NTSTATUS (*sink)(char *buf, size_t n, void *priv);
422 void *priv;
424 size_t chunk_size;
427 * Outstanding requests
429 uint16_t max_reqs;
430 int num_reqs;
431 struct cli_pull_subreq *reqs;
434 * For how many bytes did we send requests already?
436 off_t requested;
439 * Next request index to push into "sink". This walks around the "req"
440 * array, taking care that the requests are pushed to "sink" in the
441 * right order. If necessary (i.e. replies don't come in in the right
442 * order), replies are held back in "reqs".
444 int top_req;
447 * How many bytes did we push into "sink"?
450 off_t pushed;
453 static char *cli_pull_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
455 struct cli_pull_state *state = tevent_req_data(
456 req, struct cli_pull_state);
457 char *result;
459 result = tevent_req_default_print(req, mem_ctx);
460 if (result == NULL) {
461 return NULL;
464 return talloc_asprintf_append_buffer(
465 result, "num_reqs=%d, top_req=%d",
466 state->num_reqs, state->top_req);
469 static void cli_pull_read_done(struct tevent_req *read_req);
472 * Prepare an async pull request
475 struct tevent_req *cli_pull_send(TALLOC_CTX *mem_ctx,
476 struct tevent_context *ev,
477 struct cli_state *cli,
478 uint16_t fnum, off_t start_offset,
479 off_t size, size_t window_size,
480 NTSTATUS (*sink)(char *buf, size_t n,
481 void *priv),
482 void *priv)
484 struct tevent_req *req;
485 struct cli_pull_state *state;
486 int i;
487 size_t page_size = 1024;
489 req = tevent_req_create(mem_ctx, &state, struct cli_pull_state);
490 if (req == NULL) {
491 return NULL;
493 tevent_req_set_print_fn(req, cli_pull_print);
494 state->req = req;
496 state->cli = cli;
497 state->ev = ev;
498 state->fnum = fnum;
499 state->start_offset = start_offset;
500 state->size = size;
501 state->sink = sink;
502 state->priv = priv;
504 state->pushed = 0;
505 state->top_req = 0;
507 if (size == 0) {
508 tevent_req_done(req);
509 return tevent_req_post(req, ev);
512 state->chunk_size = cli_read_max_bufsize(cli);
513 if (state->chunk_size > page_size) {
514 state->chunk_size &= ~(page_size - 1);
517 state->max_reqs = smbXcli_conn_max_requests(cli->conn);
519 state->num_reqs = MAX(window_size/state->chunk_size, 1);
520 state->num_reqs = MIN(state->num_reqs, state->max_reqs);
522 state->reqs = talloc_zero_array(state, struct cli_pull_subreq,
523 state->num_reqs);
524 if (state->reqs == NULL) {
525 goto failed;
528 state->requested = 0;
530 for (i=0; i<state->num_reqs; i++) {
531 struct cli_pull_subreq *subreq = &state->reqs[i];
532 off_t size_left;
533 size_t request_thistime;
535 if (state->requested >= size) {
536 state->num_reqs = i;
537 break;
540 size_left = size - state->requested;
541 request_thistime = MIN(size_left, state->chunk_size);
543 subreq->req = cli_readall_send(
544 state->reqs, ev, cli, fnum,
545 state->start_offset + state->requested,
546 request_thistime);
548 if (subreq->req == NULL) {
549 goto failed;
551 tevent_req_set_callback(subreq->req, cli_pull_read_done, req);
552 state->requested += request_thistime;
554 return req;
556 failed:
557 TALLOC_FREE(req);
558 return NULL;
562 * Handle incoming read replies, push the data into sink and send out new
563 * requests if necessary.
566 static void cli_pull_read_done(struct tevent_req *subreq)
568 struct tevent_req *req = tevent_req_callback_data(
569 subreq, struct tevent_req);
570 struct cli_pull_state *state = tevent_req_data(
571 req, struct cli_pull_state);
572 struct cli_pull_subreq *pull_subreq = NULL;
573 NTSTATUS status;
574 int i;
576 for (i = 0; i < state->num_reqs; i++) {
577 pull_subreq = &state->reqs[i];
578 if (subreq == pull_subreq->req) {
579 break;
582 if (i == state->num_reqs) {
583 /* Huh -- received something we did not send?? */
584 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
585 return;
588 status = cli_readall_recv(subreq, &pull_subreq->received,
589 &pull_subreq->buf);
590 if (!NT_STATUS_IS_OK(status)) {
591 tevent_req_nterror(state->req, status);
592 return;
596 * This loop is the one to take care of out-of-order replies. All
597 * pending requests are in state->reqs, state->reqs[top_req] is the
598 * one that is to be pushed next. If however a request later than
599 * top_req is replied to, then we can't push yet. If top_req is
600 * replied to at a later point then, we need to push all the finished
601 * requests.
604 while (state->reqs[state->top_req].req != NULL) {
605 struct cli_pull_subreq *top_subreq;
607 DEBUG(11, ("cli_pull_read_done: top_req = %d\n",
608 state->top_req));
610 top_subreq = &state->reqs[state->top_req];
612 if (tevent_req_is_in_progress(top_subreq->req)) {
613 DEBUG(11, ("cli_pull_read_done: top request not yet "
614 "done\n"));
615 return;
618 DEBUG(10, ("cli_pull_read_done: Pushing %d bytes, %d already "
619 "pushed\n", (int)top_subreq->received,
620 (int)state->pushed));
622 status = state->sink((char *)top_subreq->buf,
623 top_subreq->received, state->priv);
624 if (tevent_req_nterror(state->req, status)) {
625 return;
627 state->pushed += top_subreq->received;
629 TALLOC_FREE(state->reqs[state->top_req].req);
631 if (state->requested < state->size) {
632 struct tevent_req *new_req;
633 off_t size_left;
634 size_t request_thistime;
636 size_left = state->size - state->requested;
637 request_thistime = MIN(size_left, state->chunk_size);
639 DEBUG(10, ("cli_pull_read_done: Requesting %d bytes "
640 "at %d, position %d\n",
641 (int)request_thistime,
642 (int)(state->start_offset
643 + state->requested),
644 state->top_req));
646 new_req = cli_readall_send(
647 state->reqs, state->ev, state->cli,
648 state->fnum,
649 state->start_offset + state->requested,
650 request_thistime);
652 if (tevent_req_nomem(new_req, state->req)) {
653 return;
655 tevent_req_set_callback(new_req, cli_pull_read_done,
656 req);
658 state->reqs[state->top_req].req = new_req;
659 state->requested += request_thistime;
662 state->top_req = (state->top_req+1) % state->num_reqs;
665 tevent_req_done(req);
668 NTSTATUS cli_pull_recv(struct tevent_req *req, off_t *received)
670 struct cli_pull_state *state = tevent_req_data(
671 req, struct cli_pull_state);
672 NTSTATUS status;
674 if (tevent_req_is_nterror(req, &status)) {
675 return status;
677 *received = state->pushed;
678 return NT_STATUS_OK;
681 NTSTATUS cli_pull(struct cli_state *cli, uint16_t fnum,
682 off_t start_offset, off_t size, size_t window_size,
683 NTSTATUS (*sink)(char *buf, size_t n, void *priv),
684 void *priv, off_t *received)
686 TALLOC_CTX *frame = talloc_stackframe();
687 struct tevent_context *ev;
688 struct tevent_req *req;
689 NTSTATUS status = NT_STATUS_OK;
691 if (smbXcli_conn_has_async_calls(cli->conn)) {
693 * Can't use sync call while an async call is in flight
695 status = NT_STATUS_INVALID_PARAMETER;
696 goto fail;
699 ev = samba_tevent_context_init(frame);
700 if (ev == NULL) {
701 status = NT_STATUS_NO_MEMORY;
702 goto fail;
705 req = cli_pull_send(frame, ev, cli, fnum, start_offset, size,
706 window_size, sink, priv);
707 if (req == NULL) {
708 status = NT_STATUS_NO_MEMORY;
709 goto fail;
712 if (!tevent_req_poll(req, ev)) {
713 status = map_nt_error_from_unix(errno);
714 goto fail;
717 status = cli_pull_recv(req, received);
718 fail:
719 TALLOC_FREE(frame);
720 return status;
723 static NTSTATUS cli_read_sink(char *buf, size_t n, void *priv)
725 char **pbuf = (char **)priv;
726 memcpy(*pbuf, buf, n);
727 *pbuf += n;
728 return NT_STATUS_OK;
731 NTSTATUS cli_read(struct cli_state *cli, uint16_t fnum,
732 char *buf, off_t offset, size_t size,
733 size_t *nread)
735 NTSTATUS status;
736 off_t ret;
738 status = cli_pull(cli, fnum, offset, size, size,
739 cli_read_sink, &buf, &ret);
740 if (!NT_STATUS_IS_OK(status)) {
741 return status;
744 if (nread) {
745 *nread = ret;
748 return NT_STATUS_OK;
751 /****************************************************************************
752 write to a file using a SMBwrite and not bypassing 0 byte writes
753 ****************************************************************************/
755 NTSTATUS cli_smbwrite(struct cli_state *cli, uint16_t fnum, char *buf,
756 off_t offset, size_t size1, size_t *ptotal)
758 uint8_t *bytes;
759 ssize_t total = 0;
762 * 3 bytes prefix
765 bytes = talloc_array(talloc_tos(), uint8_t, 3);
766 if (bytes == NULL) {
767 return NT_STATUS_NO_MEMORY;
769 bytes[0] = 1;
771 do {
772 uint32_t usable_space = cli_state_available_size(cli, 48);
773 size_t size = MIN(size1, usable_space);
774 struct tevent_req *req;
775 uint16_t vwv[5];
776 uint16_t *ret_vwv;
777 NTSTATUS status;
779 SSVAL(vwv+0, 0, fnum);
780 SSVAL(vwv+1, 0, size);
781 SIVAL(vwv+2, 0, offset);
782 SSVAL(vwv+4, 0, 0);
784 bytes = talloc_realloc(talloc_tos(), bytes, uint8_t,
785 size+3);
786 if (bytes == NULL) {
787 return NT_STATUS_NO_MEMORY;
789 SSVAL(bytes, 1, size);
790 memcpy(bytes + 3, buf + total, size);
792 status = cli_smb(talloc_tos(), cli, SMBwrite, 0, 5, vwv,
793 size+3, bytes, &req, 1, NULL, &ret_vwv,
794 NULL, NULL);
795 if (!NT_STATUS_IS_OK(status)) {
796 TALLOC_FREE(bytes);
797 return status;
800 size = SVAL(ret_vwv+0, 0);
801 TALLOC_FREE(req);
802 if (size == 0) {
803 break;
805 size1 -= size;
806 total += size;
807 offset += size;
809 } while (size1);
811 TALLOC_FREE(bytes);
813 if (ptotal != NULL) {
814 *ptotal = total;
816 return NT_STATUS_OK;
820 * Send a write&x request
823 struct cli_write_andx_state {
824 size_t size;
825 uint16_t vwv[14];
826 size_t written;
827 uint8_t pad;
828 struct iovec iov[2];
831 static void cli_write_andx_done(struct tevent_req *subreq);
833 struct tevent_req *cli_write_andx_create(TALLOC_CTX *mem_ctx,
834 struct tevent_context *ev,
835 struct cli_state *cli, uint16_t fnum,
836 uint16_t mode, const uint8_t *buf,
837 off_t offset, size_t size,
838 struct tevent_req **reqs_before,
839 int num_reqs_before,
840 struct tevent_req **psmbreq)
842 struct tevent_req *req, *subreq;
843 struct cli_write_andx_state *state;
844 bool bigoffset = ((smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_FILES) != 0);
845 uint8_t wct = bigoffset ? 14 : 12;
846 size_t max_write = cli_write_max_bufsize(cli, mode, wct);
847 uint16_t *vwv;
849 req = tevent_req_create(mem_ctx, &state, struct cli_write_andx_state);
850 if (req == NULL) {
851 return NULL;
854 state->size = MIN(size, max_write);
856 vwv = state->vwv;
858 SCVAL(vwv+0, 0, 0xFF);
859 SCVAL(vwv+0, 1, 0);
860 SSVAL(vwv+1, 0, 0);
861 SSVAL(vwv+2, 0, fnum);
862 SIVAL(vwv+3, 0, offset);
863 SIVAL(vwv+5, 0, 0);
864 SSVAL(vwv+7, 0, mode);
865 SSVAL(vwv+8, 0, 0);
866 SSVAL(vwv+9, 0, (state->size>>16));
867 SSVAL(vwv+10, 0, state->size);
869 SSVAL(vwv+11, 0,
870 smb1cli_req_wct_ofs(reqs_before, num_reqs_before)
871 + 1 /* the wct field */
872 + wct * 2 /* vwv */
873 + 2 /* num_bytes field */
874 + 1 /* pad */);
876 if (bigoffset) {
877 SIVAL(vwv+12, 0, (((uint64_t)offset)>>32) & 0xffffffff);
880 state->pad = 0;
881 state->iov[0].iov_base = (void *)&state->pad;
882 state->iov[0].iov_len = 1;
883 state->iov[1].iov_base = discard_const_p(void, buf);
884 state->iov[1].iov_len = state->size;
886 subreq = cli_smb_req_create(state, ev, cli, SMBwriteX, 0, wct, vwv,
887 2, state->iov);
888 if (tevent_req_nomem(subreq, req)) {
889 return tevent_req_post(req, ev);
891 tevent_req_set_callback(subreq, cli_write_andx_done, req);
892 *psmbreq = subreq;
893 return req;
896 struct tevent_req *cli_write_andx_send(TALLOC_CTX *mem_ctx,
897 struct tevent_context *ev,
898 struct cli_state *cli, uint16_t fnum,
899 uint16_t mode, const uint8_t *buf,
900 off_t offset, size_t size)
902 struct tevent_req *req, *subreq;
903 NTSTATUS status;
905 req = cli_write_andx_create(mem_ctx, ev, cli, fnum, mode, buf, offset,
906 size, NULL, 0, &subreq);
907 if (req == NULL) {
908 return NULL;
911 status = smb1cli_req_chain_submit(&subreq, 1);
912 if (tevent_req_nterror(req, status)) {
913 return tevent_req_post(req, ev);
915 return req;
918 static void cli_write_andx_done(struct tevent_req *subreq)
920 struct tevent_req *req = tevent_req_callback_data(
921 subreq, struct tevent_req);
922 struct cli_write_andx_state *state = tevent_req_data(
923 req, struct cli_write_andx_state);
924 uint8_t wct;
925 uint16_t *vwv;
926 NTSTATUS status;
928 status = cli_smb_recv(subreq, state, NULL, 6, &wct, &vwv,
929 NULL, NULL);
930 TALLOC_FREE(subreq);
931 if (NT_STATUS_IS_ERR(status)) {
932 tevent_req_nterror(req, status);
933 return;
935 state->written = SVAL(vwv+2, 0);
936 if (state->size > UINT16_MAX) {
938 * It is important that we only set the
939 * high bits only if we asked for a large write.
941 * OS/2 print shares get this wrong and may send
942 * invalid values.
944 * See bug #5326.
946 state->written |= SVAL(vwv+4, 0)<<16;
948 tevent_req_done(req);
951 NTSTATUS cli_write_andx_recv(struct tevent_req *req, size_t *pwritten)
953 struct cli_write_andx_state *state = tevent_req_data(
954 req, struct cli_write_andx_state);
955 NTSTATUS status;
957 if (tevent_req_is_nterror(req, &status)) {
958 return status;
960 if (pwritten != 0) {
961 *pwritten = state->written;
963 return NT_STATUS_OK;
966 struct cli_writeall_state {
967 struct tevent_context *ev;
968 struct cli_state *cli;
969 uint16_t fnum;
970 uint16_t mode;
971 const uint8_t *buf;
972 off_t offset;
973 size_t size;
974 size_t written;
977 static void cli_writeall_written(struct tevent_req *req);
979 static struct tevent_req *cli_writeall_send(TALLOC_CTX *mem_ctx,
980 struct tevent_context *ev,
981 struct cli_state *cli,
982 uint16_t fnum,
983 uint16_t mode,
984 const uint8_t *buf,
985 off_t offset, size_t size)
987 struct tevent_req *req, *subreq;
988 struct cli_writeall_state *state;
990 req = tevent_req_create(mem_ctx, &state, struct cli_writeall_state);
991 if (req == NULL) {
992 return NULL;
994 state->ev = ev;
995 state->cli = cli;
996 state->fnum = fnum;
997 state->mode = mode;
998 state->buf = buf;
999 state->offset = offset;
1000 state->size = size;
1001 state->written = 0;
1003 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
1004 state->mode, state->buf, state->offset,
1005 state->size);
1006 if (tevent_req_nomem(subreq, req)) {
1007 return tevent_req_post(req, ev);
1009 tevent_req_set_callback(subreq, cli_writeall_written, req);
1010 return req;
1013 static void cli_writeall_written(struct tevent_req *subreq)
1015 struct tevent_req *req = tevent_req_callback_data(
1016 subreq, struct tevent_req);
1017 struct cli_writeall_state *state = tevent_req_data(
1018 req, struct cli_writeall_state);
1019 NTSTATUS status;
1020 size_t written, to_write;
1022 status = cli_write_andx_recv(subreq, &written);
1023 TALLOC_FREE(subreq);
1024 if (tevent_req_nterror(req, status)) {
1025 return;
1028 state->written += written;
1030 if (state->written > state->size) {
1031 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
1032 return;
1035 to_write = state->size - state->written;
1037 if (to_write == 0) {
1038 tevent_req_done(req);
1039 return;
1042 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
1043 state->mode,
1044 state->buf + state->written,
1045 state->offset + state->written, to_write);
1046 if (tevent_req_nomem(subreq, req)) {
1047 return;
1049 tevent_req_set_callback(subreq, cli_writeall_written, req);
1052 static NTSTATUS cli_writeall_recv(struct tevent_req *req,
1053 size_t *pwritten)
1055 struct cli_writeall_state *state = tevent_req_data(
1056 req, struct cli_writeall_state);
1057 NTSTATUS status;
1059 if (tevent_req_is_nterror(req, &status)) {
1060 return status;
1062 if (pwritten != NULL) {
1063 *pwritten = state->written;
1065 return NT_STATUS_OK;
1068 NTSTATUS cli_writeall(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1069 const uint8_t *buf, off_t offset, size_t size,
1070 size_t *pwritten)
1072 TALLOC_CTX *frame = talloc_stackframe();
1073 struct tevent_context *ev;
1074 struct tevent_req *req;
1075 NTSTATUS status = NT_STATUS_NO_MEMORY;
1077 if (smbXcli_conn_has_async_calls(cli->conn)) {
1079 * Can't use sync call while an async call is in flight
1081 status = NT_STATUS_INVALID_PARAMETER;
1082 goto fail;
1084 ev = samba_tevent_context_init(frame);
1085 if (ev == NULL) {
1086 goto fail;
1088 req = cli_writeall_send(frame, ev, cli, fnum, mode, buf, offset, size);
1089 if (req == NULL) {
1090 goto fail;
1092 if (!tevent_req_poll(req, ev)) {
1093 status = map_nt_error_from_unix(errno);
1094 goto fail;
1096 status = cli_writeall_recv(req, pwritten);
1097 fail:
1098 TALLOC_FREE(frame);
1099 return status;
1102 struct cli_push_write_state {
1103 struct tevent_req *req;/* This is the main request! Not the subreq */
1104 uint32_t idx;
1105 off_t ofs;
1106 uint8_t *buf;
1107 size_t size;
1110 struct cli_push_state {
1111 struct tevent_context *ev;
1112 struct cli_state *cli;
1113 uint16_t fnum;
1114 uint16_t mode;
1115 off_t start_offset;
1116 size_t window_size;
1118 size_t (*source)(uint8_t *buf, size_t n, void *priv);
1119 void *priv;
1121 bool eof;
1123 size_t chunk_size;
1124 off_t next_offset;
1127 * Outstanding requests
1129 uint32_t pending;
1130 uint16_t max_reqs;
1131 uint32_t num_reqs;
1132 struct cli_push_write_state **reqs;
1135 static void cli_push_written(struct tevent_req *req);
1137 static bool cli_push_write_setup(struct tevent_req *req,
1138 struct cli_push_state *state,
1139 uint32_t idx)
1141 struct cli_push_write_state *substate;
1142 struct tevent_req *subreq;
1144 substate = talloc(state->reqs, struct cli_push_write_state);
1145 if (!substate) {
1146 return false;
1148 substate->req = req;
1149 substate->idx = idx;
1150 substate->ofs = state->next_offset;
1151 substate->buf = talloc_array(substate, uint8_t, state->chunk_size);
1152 if (!substate->buf) {
1153 talloc_free(substate);
1154 return false;
1156 substate->size = state->source(substate->buf,
1157 state->chunk_size,
1158 state->priv);
1159 if (substate->size == 0) {
1160 state->eof = true;
1161 /* nothing to send */
1162 talloc_free(substate);
1163 return true;
1166 subreq = cli_writeall_send(substate,
1167 state->ev, state->cli,
1168 state->fnum, state->mode,
1169 substate->buf,
1170 substate->ofs,
1171 substate->size);
1172 if (!subreq) {
1173 talloc_free(substate);
1174 return false;
1176 tevent_req_set_callback(subreq, cli_push_written, substate);
1178 state->reqs[idx] = substate;
1179 state->pending += 1;
1180 state->next_offset += substate->size;
1182 return true;
1185 struct tevent_req *cli_push_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev,
1186 struct cli_state *cli,
1187 uint16_t fnum, uint16_t mode,
1188 off_t start_offset, size_t window_size,
1189 size_t (*source)(uint8_t *buf, size_t n,
1190 void *priv),
1191 void *priv)
1193 struct tevent_req *req;
1194 struct cli_push_state *state;
1195 uint32_t i;
1196 size_t page_size = 1024;
1198 req = tevent_req_create(mem_ctx, &state, struct cli_push_state);
1199 if (req == NULL) {
1200 return NULL;
1202 state->cli = cli;
1203 state->ev = ev;
1204 state->fnum = fnum;
1205 state->start_offset = start_offset;
1206 state->mode = mode;
1207 state->source = source;
1208 state->priv = priv;
1209 state->eof = false;
1210 state->pending = 0;
1211 state->next_offset = start_offset;
1213 state->chunk_size = cli_write_max_bufsize(cli, mode, 14);
1214 if (state->chunk_size > page_size) {
1215 state->chunk_size &= ~(page_size - 1);
1218 state->max_reqs = smbXcli_conn_max_requests(cli->conn);
1220 if (window_size == 0) {
1221 window_size = state->max_reqs * state->chunk_size;
1223 state->num_reqs = window_size/state->chunk_size;
1224 if ((window_size % state->chunk_size) > 0) {
1225 state->num_reqs += 1;
1227 state->num_reqs = MIN(state->num_reqs, state->max_reqs);
1228 state->num_reqs = MAX(state->num_reqs, 1);
1230 state->reqs = talloc_zero_array(state, struct cli_push_write_state *,
1231 state->num_reqs);
1232 if (state->reqs == NULL) {
1233 goto failed;
1236 for (i=0; i<state->num_reqs; i++) {
1237 if (!cli_push_write_setup(req, state, i)) {
1238 goto failed;
1241 if (state->eof) {
1242 break;
1246 if (state->pending == 0) {
1247 tevent_req_done(req);
1248 return tevent_req_post(req, ev);
1251 return req;
1253 failed:
1254 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1255 return tevent_req_post(req, ev);
1258 static void cli_push_written(struct tevent_req *subreq)
1260 struct cli_push_write_state *substate = tevent_req_callback_data(
1261 subreq, struct cli_push_write_state);
1262 struct tevent_req *req = substate->req;
1263 struct cli_push_state *state = tevent_req_data(
1264 req, struct cli_push_state);
1265 NTSTATUS status;
1266 uint32_t idx = substate->idx;
1268 state->reqs[idx] = NULL;
1269 state->pending -= 1;
1271 status = cli_writeall_recv(subreq, NULL);
1272 TALLOC_FREE(subreq);
1273 TALLOC_FREE(substate);
1274 if (tevent_req_nterror(req, status)) {
1275 return;
1278 if (!state->eof) {
1279 if (!cli_push_write_setup(req, state, idx)) {
1280 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1281 return;
1285 if (state->pending == 0) {
1286 tevent_req_done(req);
1287 return;
1291 NTSTATUS cli_push_recv(struct tevent_req *req)
1293 return tevent_req_simple_recv_ntstatus(req);
1296 NTSTATUS cli_push(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1297 off_t start_offset, size_t window_size,
1298 size_t (*source)(uint8_t *buf, size_t n, void *priv),
1299 void *priv)
1301 TALLOC_CTX *frame = talloc_stackframe();
1302 struct tevent_context *ev;
1303 struct tevent_req *req;
1304 NTSTATUS status = NT_STATUS_OK;
1306 if (smbXcli_conn_has_async_calls(cli->conn)) {
1308 * Can't use sync call while an async call is in flight
1310 status = NT_STATUS_INVALID_PARAMETER;
1311 goto fail;
1314 ev = samba_tevent_context_init(frame);
1315 if (ev == NULL) {
1316 status = NT_STATUS_NO_MEMORY;
1317 goto fail;
1320 req = cli_push_send(frame, ev, cli, fnum, mode, start_offset,
1321 window_size, source, priv);
1322 if (req == NULL) {
1323 status = NT_STATUS_NO_MEMORY;
1324 goto fail;
1327 if (!tevent_req_poll(req, ev)) {
1328 status = map_nt_error_from_unix(errno);
1329 goto fail;
1332 status = cli_push_recv(req);
1333 fail:
1334 TALLOC_FREE(frame);
1335 return status;