s3:libsmb: rewrite cli_pull* to use smb1cli_conn_req_possible()
[Samba.git] / source3 / libsmb / clireadwrite.c
blobdd5d4c2865b2a4c4ced67be79bf6d529cbb42da0
1 /*
2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "libsmb/libsmb.h"
22 #include "../lib/util/tevent_ntstatus.h"
23 #include "async_smb.h"
24 #include "trans2.h"
25 #include "../libcli/smb/smbXcli_base.h"
27 /****************************************************************************
28 Calculate the recommended read buffer size
29 ****************************************************************************/
30 static size_t cli_read_max_bufsize(struct cli_state *cli)
32 uint8_t wct = 12;
33 uint32_t min_space;
34 uint32_t data_offset;
35 uint32_t useable_space = 0;
37 data_offset = HDR_VWV;
38 data_offset += wct * sizeof(uint16_t);
39 data_offset += sizeof(uint16_t); /* byte count */
40 data_offset += 1; /* pad */
42 min_space = cli_state_available_size(cli, data_offset);
44 if (cli->server_posix_capabilities & CIFS_UNIX_LARGE_READ_CAP) {
45 useable_space = 0xFFFFFF - data_offset;
47 if (smb1cli_conn_signing_is_active(cli->conn)) {
48 return min_space;
51 if (smb1cli_conn_encryption_on(cli->conn)) {
52 return min_space;
55 return useable_space;
56 } else if (smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_READX) {
58 * Note: CAP_LARGE_READX also works with signing
60 useable_space = 0x1FFFF - data_offset;
62 useable_space = MIN(useable_space, UINT16_MAX);
64 return useable_space;
67 return min_space;
70 /****************************************************************************
71 Calculate the recommended write buffer size
72 ****************************************************************************/
73 static size_t cli_write_max_bufsize(struct cli_state *cli,
74 uint16_t write_mode,
75 uint8_t wct)
77 uint32_t min_space;
78 uint32_t data_offset;
79 uint32_t useable_space = 0;
81 data_offset = HDR_VWV;
82 data_offset += wct * sizeof(uint16_t);
83 data_offset += sizeof(uint16_t); /* byte count */
84 data_offset += 1; /* pad */
86 min_space = cli_state_available_size(cli, data_offset);
88 if (cli->server_posix_capabilities & CIFS_UNIX_LARGE_WRITE_CAP) {
89 useable_space = 0xFFFFFF - data_offset;
90 } else if (smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_WRITEX) {
91 useable_space = 0x1FFFF - data_offset;
92 } else {
93 return min_space;
96 if (write_mode != 0) {
97 return min_space;
100 if (smb1cli_conn_signing_is_active(cli->conn)) {
101 return min_space;
104 if (smb1cli_conn_encryption_on(cli->conn)) {
105 return min_space;
108 if (strequal(cli->dev, "LPT1:")) {
109 return min_space;
112 return useable_space;
115 struct cli_read_andx_state {
116 size_t size;
117 uint16_t vwv[12];
118 NTSTATUS status;
119 size_t received;
120 uint8_t *buf;
123 static void cli_read_andx_done(struct tevent_req *subreq);
125 struct tevent_req *cli_read_andx_create(TALLOC_CTX *mem_ctx,
126 struct tevent_context *ev,
127 struct cli_state *cli, uint16_t fnum,
128 off_t offset, size_t size,
129 struct tevent_req **psmbreq)
131 struct tevent_req *req, *subreq;
132 struct cli_read_andx_state *state;
133 uint8_t wct = 10;
135 req = tevent_req_create(mem_ctx, &state, struct cli_read_andx_state);
136 if (req == NULL) {
137 return NULL;
139 state->size = size;
141 SCVAL(state->vwv + 0, 0, 0xFF);
142 SCVAL(state->vwv + 0, 1, 0);
143 SSVAL(state->vwv + 1, 0, 0);
144 SSVAL(state->vwv + 2, 0, fnum);
145 SIVAL(state->vwv + 3, 0, offset);
146 SSVAL(state->vwv + 5, 0, size);
147 SSVAL(state->vwv + 6, 0, size);
148 SSVAL(state->vwv + 7, 0, (size >> 16));
149 SSVAL(state->vwv + 8, 0, 0);
150 SSVAL(state->vwv + 9, 0, 0);
152 if (smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_FILES) {
153 SIVAL(state->vwv + 10, 0,
154 (((uint64_t)offset)>>32) & 0xffffffff);
155 wct = 12;
156 } else {
157 if ((((uint64_t)offset) & 0xffffffff00000000LL) != 0) {
158 DEBUG(10, ("cli_read_andx_send got large offset where "
159 "the server does not support it\n"));
160 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
161 return tevent_req_post(req, ev);
165 subreq = cli_smb_req_create(state, ev, cli, SMBreadX, 0, wct,
166 state->vwv, 0, NULL);
167 if (subreq == NULL) {
168 TALLOC_FREE(req);
169 return NULL;
171 tevent_req_set_callback(subreq, cli_read_andx_done, req);
172 *psmbreq = subreq;
173 return req;
176 struct tevent_req *cli_read_andx_send(TALLOC_CTX *mem_ctx,
177 struct tevent_context *ev,
178 struct cli_state *cli, uint16_t fnum,
179 off_t offset, size_t size)
181 struct tevent_req *req, *subreq;
182 NTSTATUS status;
184 req = cli_read_andx_create(mem_ctx, ev, cli, fnum, offset, size,
185 &subreq);
186 if (req == NULL) {
187 return NULL;
190 status = smb1cli_req_chain_submit(&subreq, 1);
191 if (tevent_req_nterror(req, status)) {
192 return tevent_req_post(req, ev);
194 return req;
197 static void cli_read_andx_done(struct tevent_req *subreq)
199 struct tevent_req *req = tevent_req_callback_data(
200 subreq, struct tevent_req);
201 struct cli_read_andx_state *state = tevent_req_data(
202 req, struct cli_read_andx_state);
203 uint8_t *inbuf;
204 uint8_t wct;
205 uint16_t *vwv;
206 uint32_t num_bytes;
207 uint8_t *bytes;
209 state->status = cli_smb_recv(subreq, state, &inbuf, 12, &wct, &vwv,
210 &num_bytes, &bytes);
211 TALLOC_FREE(subreq);
212 if (NT_STATUS_IS_ERR(state->status)) {
213 tevent_req_nterror(req, state->status);
214 return;
217 /* size is the number of bytes the server returned.
218 * Might be zero. */
219 state->received = SVAL(vwv + 5, 0);
220 state->received |= (((unsigned int)SVAL(vwv + 7, 0)) << 16);
222 if (state->received > state->size) {
223 DEBUG(5,("server returned more than we wanted!\n"));
224 tevent_req_nterror(req, NT_STATUS_UNEXPECTED_IO_ERROR);
225 return;
229 * bcc field must be valid for small reads, for large reads the 16-bit
230 * bcc field can't be correct.
233 if ((state->received < 0xffff) && (state->received > num_bytes)) {
234 DEBUG(5, ("server announced more bytes than sent\n"));
235 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
236 return;
239 state->buf = discard_const_p(uint8_t, smb_base(inbuf)) + SVAL(vwv+6, 0);
241 if (trans_oob(smb_len_tcp(inbuf), SVAL(vwv+6, 0), state->received)
242 || ((state->received != 0) && (state->buf < bytes))) {
243 DEBUG(5, ("server returned invalid read&x data offset\n"));
244 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
245 return;
247 tevent_req_done(req);
251 * Pull the data out of a finished async read_and_x request. rcvbuf is
252 * talloced from the request, so better make sure that you copy it away before
253 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
254 * talloc_move it!
257 NTSTATUS cli_read_andx_recv(struct tevent_req *req, ssize_t *received,
258 uint8_t **rcvbuf)
260 struct cli_read_andx_state *state = tevent_req_data(
261 req, struct cli_read_andx_state);
262 NTSTATUS status;
264 if (tevent_req_is_nterror(req, &status)) {
265 return status;
267 *received = state->received;
268 *rcvbuf = state->buf;
269 return NT_STATUS_OK;
272 struct cli_readall_state {
273 struct tevent_context *ev;
274 struct cli_state *cli;
275 uint16_t fnum;
276 off_t start_offset;
277 size_t size;
278 size_t received;
279 uint8_t *buf;
282 static void cli_readall_done(struct tevent_req *subreq);
284 static struct tevent_req *cli_readall_send(TALLOC_CTX *mem_ctx,
285 struct tevent_context *ev,
286 struct cli_state *cli,
287 uint16_t fnum,
288 off_t offset, size_t size)
290 struct tevent_req *req, *subreq;
291 struct cli_readall_state *state;
293 req = tevent_req_create(mem_ctx, &state, struct cli_readall_state);
294 if (req == NULL) {
295 return NULL;
297 state->ev = ev;
298 state->cli = cli;
299 state->fnum = fnum;
300 state->start_offset = offset;
301 state->size = size;
302 state->received = 0;
303 state->buf = NULL;
305 subreq = cli_read_andx_send(state, ev, cli, fnum, offset, size);
306 if (tevent_req_nomem(subreq, req)) {
307 return tevent_req_post(req, ev);
309 tevent_req_set_callback(subreq, cli_readall_done, req);
310 return req;
313 static void cli_readall_done(struct tevent_req *subreq)
315 struct tevent_req *req = tevent_req_callback_data(
316 subreq, struct tevent_req);
317 struct cli_readall_state *state = tevent_req_data(
318 req, struct cli_readall_state);
319 ssize_t received;
320 uint8_t *buf;
321 NTSTATUS status;
323 status = cli_read_andx_recv(subreq, &received, &buf);
324 if (tevent_req_nterror(req, status)) {
325 return;
328 if (received == 0) {
329 /* EOF */
330 tevent_req_done(req);
331 return;
334 if ((state->received == 0) && (received == state->size)) {
335 /* Ideal case: Got it all in one run */
336 state->buf = buf;
337 state->received += received;
338 tevent_req_done(req);
339 return;
343 * We got a short read, issue a read for the
344 * rest. Unfortunately we have to allocate the buffer
345 * ourselves now, as our caller expects to receive a single
346 * buffer. cli_read_andx does it from the buffer received from
347 * the net, but with a short read we have to put it together
348 * from several reads.
351 if (state->buf == NULL) {
352 state->buf = talloc_array(state, uint8_t, state->size);
353 if (tevent_req_nomem(state->buf, req)) {
354 return;
357 memcpy(state->buf + state->received, buf, received);
358 state->received += received;
360 TALLOC_FREE(subreq);
362 if (state->received >= state->size) {
363 tevent_req_done(req);
364 return;
367 subreq = cli_read_andx_send(state, state->ev, state->cli, state->fnum,
368 state->start_offset + state->received,
369 state->size - state->received);
370 if (tevent_req_nomem(subreq, req)) {
371 return;
373 tevent_req_set_callback(subreq, cli_readall_done, req);
376 static NTSTATUS cli_readall_recv(struct tevent_req *req, ssize_t *received,
377 uint8_t **rcvbuf)
379 struct cli_readall_state *state = tevent_req_data(
380 req, struct cli_readall_state);
381 NTSTATUS status;
383 if (tevent_req_is_nterror(req, &status)) {
384 return status;
386 *received = state->received;
387 *rcvbuf = state->buf;
388 return NT_STATUS_OK;
391 struct cli_pull_chunk;
393 struct cli_pull_state {
394 struct tevent_context *ev;
395 struct cli_state *cli;
396 uint16_t fnum;
397 off_t start_offset;
398 off_t size;
400 NTSTATUS (*sink)(char *buf, size_t n, void *priv);
401 void *priv;
403 size_t chunk_size;
404 off_t next_offset;
405 off_t remaining;
408 * How many bytes did we push into "sink"?
410 off_t pushed;
413 * Outstanding requests
415 * The maximum is 256:
416 * - which would be a window of 256 MByte
417 * for SMB2 with multi-credit
418 * or smb1 unix extentions.
420 uint16_t max_chunks;
421 uint16_t num_chunks;
422 uint16_t num_waiting;
423 struct cli_pull_chunk *chunks;
426 struct cli_pull_chunk {
427 struct cli_pull_chunk *prev, *next;
428 struct tevent_req *req;/* This is the main request! Not the subreq */
429 struct tevent_req *subreq;
430 off_t ofs;
431 uint8_t *buf;
432 size_t total_size;
433 size_t tmp_size;
434 bool done;
437 static void cli_pull_setup_chunks(struct tevent_req *req);
438 static void cli_pull_chunk_ship(struct cli_pull_chunk *chunk);
439 static void cli_pull_chunk_done(struct tevent_req *subreq);
442 * Parallel read support.
444 * cli_pull sends as many read&x requests as the server would allow via
445 * max_mux at a time. When replies flow back in, the data is written into
446 * the callback function "sink" in the right order.
449 struct tevent_req *cli_pull_send(TALLOC_CTX *mem_ctx,
450 struct tevent_context *ev,
451 struct cli_state *cli,
452 uint16_t fnum, off_t start_offset,
453 off_t size, size_t window_size,
454 NTSTATUS (*sink)(char *buf, size_t n,
455 void *priv),
456 void *priv)
458 struct tevent_req *req;
459 struct cli_pull_state *state;
460 size_t page_size = 1024;
461 uint64_t tmp64;
463 req = tevent_req_create(mem_ctx, &state, struct cli_pull_state);
464 if (req == NULL) {
465 return NULL;
467 state->cli = cli;
468 state->ev = ev;
469 state->fnum = fnum;
470 state->start_offset = start_offset;
471 state->size = size;
472 state->sink = sink;
473 state->priv = priv;
474 state->next_offset = start_offset;
475 state->remaining = size;
477 if (size == 0) {
478 tevent_req_done(req);
479 return tevent_req_post(req, ev);
482 state->chunk_size = cli_read_max_bufsize(cli);
483 if (state->chunk_size > page_size) {
484 state->chunk_size &= ~(page_size - 1);
487 if (window_size == 0) {
489 * We use 16 MByte as default window size.
491 window_size = 16 * 1024 * 1024;
494 tmp64 = window_size/state->chunk_size;
495 if ((window_size % state->chunk_size) > 0) {
496 tmp64 += 1;
498 tmp64 = MAX(tmp64, 1);
499 tmp64 = MIN(tmp64, 256);
500 state->max_chunks = tmp64;
503 * We defer the callback because of the complex
504 * substate/subfunction logic
506 tevent_req_defer_callback(req, ev);
508 cli_pull_setup_chunks(req);
509 if (!tevent_req_is_in_progress(req)) {
510 return tevent_req_post(req, ev);
513 return req;
516 static void cli_pull_setup_chunks(struct tevent_req *req)
518 struct cli_pull_state *state =
519 tevent_req_data(req,
520 struct cli_pull_state);
521 struct cli_pull_chunk *chunk, *next = NULL;
522 size_t i;
524 for (chunk = state->chunks; chunk; chunk = next) {
526 * Note that chunk might be removed from this call.
528 next = chunk->next;
529 cli_pull_chunk_ship(chunk);
530 if (!tevent_req_is_in_progress(req)) {
531 return;
535 for (i = state->num_chunks; i < state->max_chunks; i++) {
537 if (state->num_waiting > 0) {
538 return;
541 if (state->remaining == 0) {
542 break;
545 chunk = talloc_zero(state, struct cli_pull_chunk);
546 if (tevent_req_nomem(chunk, req)) {
547 return;
549 chunk->req = req;
550 chunk->ofs = state->next_offset;
551 chunk->total_size = MIN(state->remaining, state->chunk_size);
552 state->next_offset += chunk->total_size;
553 state->remaining -= chunk->total_size;
555 DLIST_ADD_END(state->chunks, chunk, NULL);
556 state->num_chunks++;
557 state->num_waiting++;
559 cli_pull_chunk_ship(chunk);
560 if (!tevent_req_is_in_progress(req)) {
561 return;
565 if (state->remaining > 0) {
566 return;
569 if (state->num_chunks > 0) {
570 return;
573 tevent_req_done(req);
576 static void cli_pull_chunk_ship(struct cli_pull_chunk *chunk)
578 struct tevent_req *req = chunk->req;
579 struct cli_pull_state *state =
580 tevent_req_data(req,
581 struct cli_pull_state);
582 bool ok;
583 off_t ofs;
584 size_t size;
586 if (chunk->done) {
587 NTSTATUS status;
589 if (chunk != state->chunks) {
591 * this chunk is not the
592 * first one in the list.
594 * which means we should not
595 * push it into the sink yet.
597 return;
600 if (chunk->tmp_size == 0) {
602 * we git a short read, we're done
604 tevent_req_done(req);
605 return;
608 status = state->sink((char *)chunk->buf,
609 chunk->tmp_size,
610 state->priv);
611 if (tevent_req_nterror(req, status)) {
612 return;
614 state->pushed += chunk->tmp_size;
616 if (chunk->tmp_size < chunk->total_size) {
618 * we git a short read, we're done
620 tevent_req_done(req);
621 return;
624 DLIST_REMOVE(state->chunks, chunk);
625 SMB_ASSERT(state->num_chunks > 0);
626 state->num_chunks--;
627 TALLOC_FREE(chunk);
629 return;
632 if (chunk->subreq != NULL) {
633 return;
636 SMB_ASSERT(state->num_waiting > 0);
638 ofs = chunk->ofs + chunk->tmp_size;
639 size = chunk->total_size - chunk->tmp_size;
641 ok = smb1cli_conn_req_possible(state->cli->conn);
642 if (!ok) {
643 return;
646 chunk->subreq = cli_read_andx_send(chunk,
647 state->ev,
648 state->cli,
649 state->fnum,
650 ofs,
651 size);
652 if (tevent_req_nomem(chunk->subreq, req)) {
653 return;
655 tevent_req_set_callback(chunk->subreq,
656 cli_pull_chunk_done,
657 chunk);
659 state->num_waiting--;
660 return;
663 static void cli_pull_chunk_done(struct tevent_req *subreq)
665 struct cli_pull_chunk *chunk =
666 tevent_req_callback_data(subreq,
667 struct cli_pull_chunk);
668 struct tevent_req *req = chunk->req;
669 struct cli_pull_state *state =
670 tevent_req_data(req,
671 struct cli_pull_state);
672 NTSTATUS status;
673 size_t expected = chunk->total_size - chunk->tmp_size;
674 ssize_t received;
675 uint8_t *buf = NULL;
677 chunk->subreq = NULL;
679 status = cli_read_andx_recv(subreq, &received, &buf);
680 if (NT_STATUS_EQUAL(status, NT_STATUS_END_OF_FILE)) {
681 received = 0;
682 status = NT_STATUS_OK;
684 if (tevent_req_nterror(req, status)) {
685 return;
688 if (received > expected) {
689 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
690 return;
693 if (received == 0) {
695 * We got EOF we're done
697 chunk->done = true;
698 cli_pull_setup_chunks(req);
699 return;
702 if (received == chunk->total_size) {
704 * We got it in the first run.
706 * We don't call TALLOC_FREE(subreq)
707 * here and keep the returned buffer.
709 chunk->buf = buf;
710 } else if (chunk->buf == NULL) {
711 chunk->buf = talloc_array(chunk, uint8_t, chunk->total_size);
712 if (tevent_req_nomem(chunk->buf, req)) {
713 return;
717 if (received != chunk->total_size) {
718 uint8_t *p = chunk->buf + chunk->tmp_size;
719 memcpy(p, buf, received);
720 TALLOC_FREE(subreq);
723 chunk->tmp_size += received;
725 if (chunk->tmp_size == chunk->total_size) {
726 chunk->done = true;
727 } else {
728 state->num_waiting++;
731 cli_pull_setup_chunks(req);
734 NTSTATUS cli_pull_recv(struct tevent_req *req, off_t *received)
736 struct cli_pull_state *state = tevent_req_data(
737 req, struct cli_pull_state);
738 NTSTATUS status;
740 if (tevent_req_is_nterror(req, &status)) {
741 tevent_req_received(req);
742 return status;
744 *received = state->pushed;
745 tevent_req_received(req);
746 return NT_STATUS_OK;
749 NTSTATUS cli_pull(struct cli_state *cli, uint16_t fnum,
750 off_t start_offset, off_t size, size_t window_size,
751 NTSTATUS (*sink)(char *buf, size_t n, void *priv),
752 void *priv, off_t *received)
754 TALLOC_CTX *frame = talloc_stackframe();
755 struct tevent_context *ev;
756 struct tevent_req *req;
757 NTSTATUS status = NT_STATUS_OK;
759 if (smbXcli_conn_has_async_calls(cli->conn)) {
761 * Can't use sync call while an async call is in flight
763 status = NT_STATUS_INVALID_PARAMETER;
764 goto fail;
767 ev = samba_tevent_context_init(frame);
768 if (ev == NULL) {
769 status = NT_STATUS_NO_MEMORY;
770 goto fail;
773 req = cli_pull_send(frame, ev, cli, fnum, start_offset, size,
774 window_size, sink, priv);
775 if (req == NULL) {
776 status = NT_STATUS_NO_MEMORY;
777 goto fail;
780 if (!tevent_req_poll(req, ev)) {
781 status = map_nt_error_from_unix(errno);
782 goto fail;
785 status = cli_pull_recv(req, received);
786 fail:
787 TALLOC_FREE(frame);
788 return status;
791 static NTSTATUS cli_read_sink(char *buf, size_t n, void *priv)
793 char **pbuf = (char **)priv;
794 memcpy(*pbuf, buf, n);
795 *pbuf += n;
796 return NT_STATUS_OK;
799 NTSTATUS cli_read(struct cli_state *cli, uint16_t fnum,
800 char *buf, off_t offset, size_t size,
801 size_t *nread)
803 NTSTATUS status;
804 off_t ret;
806 status = cli_pull(cli, fnum, offset, size, size,
807 cli_read_sink, &buf, &ret);
808 if (!NT_STATUS_IS_OK(status)) {
809 return status;
812 if (nread) {
813 *nread = ret;
816 return NT_STATUS_OK;
819 /****************************************************************************
820 write to a file using a SMBwrite and not bypassing 0 byte writes
821 ****************************************************************************/
823 NTSTATUS cli_smbwrite(struct cli_state *cli, uint16_t fnum, char *buf,
824 off_t offset, size_t size1, size_t *ptotal)
826 uint8_t *bytes;
827 ssize_t total = 0;
830 * 3 bytes prefix
833 bytes = talloc_array(talloc_tos(), uint8_t, 3);
834 if (bytes == NULL) {
835 return NT_STATUS_NO_MEMORY;
837 bytes[0] = 1;
839 do {
840 uint32_t usable_space = cli_state_available_size(cli, 48);
841 size_t size = MIN(size1, usable_space);
842 struct tevent_req *req;
843 uint16_t vwv[5];
844 uint16_t *ret_vwv;
845 NTSTATUS status;
847 SSVAL(vwv+0, 0, fnum);
848 SSVAL(vwv+1, 0, size);
849 SIVAL(vwv+2, 0, offset);
850 SSVAL(vwv+4, 0, 0);
852 bytes = talloc_realloc(talloc_tos(), bytes, uint8_t,
853 size+3);
854 if (bytes == NULL) {
855 return NT_STATUS_NO_MEMORY;
857 SSVAL(bytes, 1, size);
858 memcpy(bytes + 3, buf + total, size);
860 status = cli_smb(talloc_tos(), cli, SMBwrite, 0, 5, vwv,
861 size+3, bytes, &req, 1, NULL, &ret_vwv,
862 NULL, NULL);
863 if (!NT_STATUS_IS_OK(status)) {
864 TALLOC_FREE(bytes);
865 return status;
868 size = SVAL(ret_vwv+0, 0);
869 TALLOC_FREE(req);
870 if (size == 0) {
871 break;
873 size1 -= size;
874 total += size;
875 offset += size;
877 } while (size1);
879 TALLOC_FREE(bytes);
881 if (ptotal != NULL) {
882 *ptotal = total;
884 return NT_STATUS_OK;
888 * Send a write&x request
891 struct cli_write_andx_state {
892 size_t size;
893 uint16_t vwv[14];
894 size_t written;
895 uint8_t pad;
896 struct iovec iov[2];
899 static void cli_write_andx_done(struct tevent_req *subreq);
901 struct tevent_req *cli_write_andx_create(TALLOC_CTX *mem_ctx,
902 struct tevent_context *ev,
903 struct cli_state *cli, uint16_t fnum,
904 uint16_t mode, const uint8_t *buf,
905 off_t offset, size_t size,
906 struct tevent_req **reqs_before,
907 int num_reqs_before,
908 struct tevent_req **psmbreq)
910 struct tevent_req *req, *subreq;
911 struct cli_write_andx_state *state;
912 bool bigoffset = ((smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_FILES) != 0);
913 uint8_t wct = bigoffset ? 14 : 12;
914 size_t max_write = cli_write_max_bufsize(cli, mode, wct);
915 uint16_t *vwv;
917 req = tevent_req_create(mem_ctx, &state, struct cli_write_andx_state);
918 if (req == NULL) {
919 return NULL;
922 state->size = MIN(size, max_write);
924 vwv = state->vwv;
926 SCVAL(vwv+0, 0, 0xFF);
927 SCVAL(vwv+0, 1, 0);
928 SSVAL(vwv+1, 0, 0);
929 SSVAL(vwv+2, 0, fnum);
930 SIVAL(vwv+3, 0, offset);
931 SIVAL(vwv+5, 0, 0);
932 SSVAL(vwv+7, 0, mode);
933 SSVAL(vwv+8, 0, 0);
934 SSVAL(vwv+9, 0, (state->size>>16));
935 SSVAL(vwv+10, 0, state->size);
937 SSVAL(vwv+11, 0,
938 smb1cli_req_wct_ofs(reqs_before, num_reqs_before)
939 + 1 /* the wct field */
940 + wct * 2 /* vwv */
941 + 2 /* num_bytes field */
942 + 1 /* pad */);
944 if (bigoffset) {
945 SIVAL(vwv+12, 0, (((uint64_t)offset)>>32) & 0xffffffff);
948 state->pad = 0;
949 state->iov[0].iov_base = (void *)&state->pad;
950 state->iov[0].iov_len = 1;
951 state->iov[1].iov_base = discard_const_p(void, buf);
952 state->iov[1].iov_len = state->size;
954 subreq = cli_smb_req_create(state, ev, cli, SMBwriteX, 0, wct, vwv,
955 2, state->iov);
956 if (tevent_req_nomem(subreq, req)) {
957 return tevent_req_post(req, ev);
959 tevent_req_set_callback(subreq, cli_write_andx_done, req);
960 *psmbreq = subreq;
961 return req;
964 struct tevent_req *cli_write_andx_send(TALLOC_CTX *mem_ctx,
965 struct tevent_context *ev,
966 struct cli_state *cli, uint16_t fnum,
967 uint16_t mode, const uint8_t *buf,
968 off_t offset, size_t size)
970 struct tevent_req *req, *subreq;
971 NTSTATUS status;
973 req = cli_write_andx_create(mem_ctx, ev, cli, fnum, mode, buf, offset,
974 size, NULL, 0, &subreq);
975 if (req == NULL) {
976 return NULL;
979 status = smb1cli_req_chain_submit(&subreq, 1);
980 if (tevent_req_nterror(req, status)) {
981 return tevent_req_post(req, ev);
983 return req;
986 static void cli_write_andx_done(struct tevent_req *subreq)
988 struct tevent_req *req = tevent_req_callback_data(
989 subreq, struct tevent_req);
990 struct cli_write_andx_state *state = tevent_req_data(
991 req, struct cli_write_andx_state);
992 uint8_t wct;
993 uint16_t *vwv;
994 NTSTATUS status;
996 status = cli_smb_recv(subreq, state, NULL, 6, &wct, &vwv,
997 NULL, NULL);
998 TALLOC_FREE(subreq);
999 if (NT_STATUS_IS_ERR(status)) {
1000 tevent_req_nterror(req, status);
1001 return;
1003 state->written = SVAL(vwv+2, 0);
1004 if (state->size > UINT16_MAX) {
1006 * It is important that we only set the
1007 * high bits only if we asked for a large write.
1009 * OS/2 print shares get this wrong and may send
1010 * invalid values.
1012 * See bug #5326.
1014 state->written |= SVAL(vwv+4, 0)<<16;
1016 tevent_req_done(req);
1019 NTSTATUS cli_write_andx_recv(struct tevent_req *req, size_t *pwritten)
1021 struct cli_write_andx_state *state = tevent_req_data(
1022 req, struct cli_write_andx_state);
1023 NTSTATUS status;
1025 if (tevent_req_is_nterror(req, &status)) {
1026 return status;
1028 if (pwritten != 0) {
1029 *pwritten = state->written;
1031 return NT_STATUS_OK;
1034 struct cli_writeall_state {
1035 struct tevent_context *ev;
1036 struct cli_state *cli;
1037 uint16_t fnum;
1038 uint16_t mode;
1039 const uint8_t *buf;
1040 off_t offset;
1041 size_t size;
1042 size_t written;
1045 static void cli_writeall_written(struct tevent_req *req);
1047 static struct tevent_req *cli_writeall_send(TALLOC_CTX *mem_ctx,
1048 struct tevent_context *ev,
1049 struct cli_state *cli,
1050 uint16_t fnum,
1051 uint16_t mode,
1052 const uint8_t *buf,
1053 off_t offset, size_t size)
1055 struct tevent_req *req, *subreq;
1056 struct cli_writeall_state *state;
1058 req = tevent_req_create(mem_ctx, &state, struct cli_writeall_state);
1059 if (req == NULL) {
1060 return NULL;
1062 state->ev = ev;
1063 state->cli = cli;
1064 state->fnum = fnum;
1065 state->mode = mode;
1066 state->buf = buf;
1067 state->offset = offset;
1068 state->size = size;
1069 state->written = 0;
1071 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
1072 state->mode, state->buf, state->offset,
1073 state->size);
1074 if (tevent_req_nomem(subreq, req)) {
1075 return tevent_req_post(req, ev);
1077 tevent_req_set_callback(subreq, cli_writeall_written, req);
1078 return req;
1081 static void cli_writeall_written(struct tevent_req *subreq)
1083 struct tevent_req *req = tevent_req_callback_data(
1084 subreq, struct tevent_req);
1085 struct cli_writeall_state *state = tevent_req_data(
1086 req, struct cli_writeall_state);
1087 NTSTATUS status;
1088 size_t written, to_write;
1090 status = cli_write_andx_recv(subreq, &written);
1091 TALLOC_FREE(subreq);
1092 if (tevent_req_nterror(req, status)) {
1093 return;
1096 state->written += written;
1098 if (state->written > state->size) {
1099 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
1100 return;
1103 to_write = state->size - state->written;
1105 if (to_write == 0) {
1106 tevent_req_done(req);
1107 return;
1110 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
1111 state->mode,
1112 state->buf + state->written,
1113 state->offset + state->written, to_write);
1114 if (tevent_req_nomem(subreq, req)) {
1115 return;
1117 tevent_req_set_callback(subreq, cli_writeall_written, req);
1120 static NTSTATUS cli_writeall_recv(struct tevent_req *req,
1121 size_t *pwritten)
1123 struct cli_writeall_state *state = tevent_req_data(
1124 req, struct cli_writeall_state);
1125 NTSTATUS status;
1127 if (tevent_req_is_nterror(req, &status)) {
1128 return status;
1130 if (pwritten != NULL) {
1131 *pwritten = state->written;
1133 return NT_STATUS_OK;
1136 NTSTATUS cli_writeall(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1137 const uint8_t *buf, off_t offset, size_t size,
1138 size_t *pwritten)
1140 TALLOC_CTX *frame = talloc_stackframe();
1141 struct tevent_context *ev;
1142 struct tevent_req *req;
1143 NTSTATUS status = NT_STATUS_NO_MEMORY;
1145 if (smbXcli_conn_has_async_calls(cli->conn)) {
1147 * Can't use sync call while an async call is in flight
1149 status = NT_STATUS_INVALID_PARAMETER;
1150 goto fail;
1152 ev = samba_tevent_context_init(frame);
1153 if (ev == NULL) {
1154 goto fail;
1156 req = cli_writeall_send(frame, ev, cli, fnum, mode, buf, offset, size);
1157 if (req == NULL) {
1158 goto fail;
1160 if (!tevent_req_poll(req, ev)) {
1161 status = map_nt_error_from_unix(errno);
1162 goto fail;
1164 status = cli_writeall_recv(req, pwritten);
1165 fail:
1166 TALLOC_FREE(frame);
1167 return status;
1170 struct cli_push_chunk;
1172 struct cli_push_state {
1173 struct tevent_context *ev;
1174 struct cli_state *cli;
1175 uint16_t fnum;
1176 uint16_t mode;
1177 off_t start_offset;
1179 size_t (*source)(uint8_t *buf, size_t n, void *priv);
1180 void *priv;
1182 bool eof;
1184 size_t chunk_size;
1185 off_t next_offset;
1188 * Outstanding requests
1190 * The maximum is 256:
1191 * - which would be a window of 256 MByte
1192 * for SMB2 with multi-credit
1193 * or smb1 unix extentions.
1195 uint16_t max_chunks;
1196 uint16_t num_chunks;
1197 uint16_t num_waiting;
1198 struct cli_push_chunk *chunks;
1201 struct cli_push_chunk {
1202 struct cli_push_chunk *prev, *next;
1203 struct tevent_req *req;/* This is the main request! Not the subreq */
1204 struct tevent_req *subreq;
1205 off_t ofs;
1206 uint8_t *buf;
1207 size_t total_size;
1208 size_t tmp_size;
1209 bool done;
1212 static void cli_push_setup_chunks(struct tevent_req *req);
1213 static void cli_push_chunk_ship(struct cli_push_chunk *chunk);
1214 static void cli_push_chunk_done(struct tevent_req *subreq);
1216 struct tevent_req *cli_push_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev,
1217 struct cli_state *cli,
1218 uint16_t fnum, uint16_t mode,
1219 off_t start_offset, size_t window_size,
1220 size_t (*source)(uint8_t *buf, size_t n,
1221 void *priv),
1222 void *priv)
1224 struct tevent_req *req;
1225 struct cli_push_state *state;
1226 size_t page_size = 1024;
1227 uint64_t tmp64;
1229 req = tevent_req_create(mem_ctx, &state, struct cli_push_state);
1230 if (req == NULL) {
1231 return NULL;
1233 state->cli = cli;
1234 state->ev = ev;
1235 state->fnum = fnum;
1236 state->start_offset = start_offset;
1237 state->mode = mode;
1238 state->source = source;
1239 state->priv = priv;
1240 state->next_offset = start_offset;
1242 state->chunk_size = cli_write_max_bufsize(cli, mode, 14);
1243 if (state->chunk_size > page_size) {
1244 state->chunk_size &= ~(page_size - 1);
1247 if (window_size == 0) {
1249 * We use 16 MByte as default window size.
1251 window_size = 16 * 1024 * 1024;
1254 tmp64 = window_size/state->chunk_size;
1255 if ((window_size % state->chunk_size) > 0) {
1256 tmp64 += 1;
1258 tmp64 = MAX(tmp64, 1);
1259 tmp64 = MIN(tmp64, 256);
1260 state->max_chunks = tmp64;
1263 * We defer the callback because of the complex
1264 * substate/subfunction logic
1266 tevent_req_defer_callback(req, ev);
1268 cli_push_setup_chunks(req);
1269 if (!tevent_req_is_in_progress(req)) {
1270 return tevent_req_post(req, ev);
1273 return req;
1276 static void cli_push_setup_chunks(struct tevent_req *req)
1278 struct cli_push_state *state =
1279 tevent_req_data(req,
1280 struct cli_push_state);
1281 struct cli_push_chunk *chunk, *next = NULL;
1282 size_t i;
1284 for (chunk = state->chunks; chunk; chunk = next) {
1286 * Note that chunk might be removed from this call.
1288 next = chunk->next;
1289 cli_push_chunk_ship(chunk);
1290 if (!tevent_req_is_in_progress(req)) {
1291 return;
1295 for (i = state->num_chunks; i < state->max_chunks; i++) {
1297 if (state->num_waiting > 0) {
1298 return;
1301 if (state->eof) {
1302 break;
1305 chunk = talloc_zero(state, struct cli_push_chunk);
1306 if (tevent_req_nomem(chunk, req)) {
1307 return;
1309 chunk->req = req;
1310 chunk->ofs = state->next_offset;
1311 chunk->buf = talloc_array(chunk,
1312 uint8_t,
1313 state->chunk_size);
1314 if (tevent_req_nomem(chunk->buf, req)) {
1315 return;
1317 chunk->total_size = state->source(chunk->buf,
1318 state->chunk_size,
1319 state->priv);
1320 if (chunk->total_size == 0) {
1321 /* nothing to send */
1322 talloc_free(chunk);
1323 state->eof = true;
1324 break;
1326 state->next_offset += chunk->total_size;
1328 DLIST_ADD_END(state->chunks, chunk, NULL);
1329 state->num_chunks++;
1330 state->num_waiting++;
1332 cli_push_chunk_ship(chunk);
1333 if (!tevent_req_is_in_progress(req)) {
1334 return;
1338 if (!state->eof) {
1339 return;
1342 if (state->num_chunks > 0) {
1343 return;
1346 tevent_req_done(req);
1349 static void cli_push_chunk_ship(struct cli_push_chunk *chunk)
1351 struct tevent_req *req = chunk->req;
1352 struct cli_push_state *state =
1353 tevent_req_data(req,
1354 struct cli_push_state);
1355 bool ok;
1356 const uint8_t *buf;
1357 off_t ofs;
1358 size_t size;
1360 if (chunk->done) {
1361 DLIST_REMOVE(state->chunks, chunk);
1362 SMB_ASSERT(state->num_chunks > 0);
1363 state->num_chunks--;
1364 TALLOC_FREE(chunk);
1366 return;
1369 if (chunk->subreq != NULL) {
1370 return;
1373 SMB_ASSERT(state->num_waiting > 0);
1375 buf = chunk->buf + chunk->tmp_size;
1376 ofs = chunk->ofs + chunk->tmp_size;
1377 size = chunk->total_size - chunk->tmp_size;
1379 ok = smb1cli_conn_req_possible(state->cli->conn);
1380 if (!ok) {
1381 return;
1384 chunk->subreq = cli_write_andx_send(chunk,
1385 state->ev,
1386 state->cli,
1387 state->fnum,
1388 state->mode,
1389 buf,
1390 ofs,
1391 size);
1392 if (tevent_req_nomem(chunk->subreq, req)) {
1393 return;
1395 tevent_req_set_callback(chunk->subreq,
1396 cli_push_chunk_done,
1397 chunk);
1399 state->num_waiting--;
1400 return;
1403 static void cli_push_chunk_done(struct tevent_req *subreq)
1405 struct cli_push_chunk *chunk =
1406 tevent_req_callback_data(subreq,
1407 struct cli_push_chunk);
1408 struct tevent_req *req = chunk->req;
1409 struct cli_push_state *state =
1410 tevent_req_data(req,
1411 struct cli_push_state);
1412 NTSTATUS status;
1413 size_t expected = chunk->total_size - chunk->tmp_size;
1414 size_t written;
1416 chunk->subreq = NULL;
1418 status = cli_write_andx_recv(subreq, &written);
1419 TALLOC_FREE(subreq);
1420 if (tevent_req_nterror(req, status)) {
1421 return;
1424 if (written > expected) {
1425 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
1426 return;
1429 if (written == 0) {
1430 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
1431 return;
1434 chunk->tmp_size += written;
1436 if (chunk->tmp_size == chunk->total_size) {
1437 chunk->done = true;
1438 } else {
1439 state->num_waiting++;
1442 cli_push_setup_chunks(req);
1445 NTSTATUS cli_push_recv(struct tevent_req *req)
1447 return tevent_req_simple_recv_ntstatus(req);
1450 NTSTATUS cli_push(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1451 off_t start_offset, size_t window_size,
1452 size_t (*source)(uint8_t *buf, size_t n, void *priv),
1453 void *priv)
1455 TALLOC_CTX *frame = talloc_stackframe();
1456 struct tevent_context *ev;
1457 struct tevent_req *req;
1458 NTSTATUS status = NT_STATUS_OK;
1460 if (smbXcli_conn_has_async_calls(cli->conn)) {
1462 * Can't use sync call while an async call is in flight
1464 status = NT_STATUS_INVALID_PARAMETER;
1465 goto fail;
1468 ev = samba_tevent_context_init(frame);
1469 if (ev == NULL) {
1470 status = NT_STATUS_NO_MEMORY;
1471 goto fail;
1474 req = cli_push_send(frame, ev, cli, fnum, mode, start_offset,
1475 window_size, source, priv);
1476 if (req == NULL) {
1477 status = NT_STATUS_NO_MEMORY;
1478 goto fail;
1481 if (!tevent_req_poll(req, ev)) {
1482 status = map_nt_error_from_unix(errno);
1483 goto fail;
1486 status = cli_push_recv(req);
1487 fail:
1488 TALLOC_FREE(frame);
1489 return status;