s4: Better way to call "dom_sid_to_rid" from ldap.py
[Samba/aatanasov.git] / source3 / libsmb / clireadwrite.c
blobd38de1950800d4f4d25ac6b4ef9558b056f9de07
1 /*
2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
22 /****************************************************************************
23 Calculate the recommended read buffer size
24 ****************************************************************************/
25 static size_t cli_read_max_bufsize(struct cli_state *cli)
27 if (!client_is_signing_on(cli) && !cli_encryption_on(cli)
28 && (cli->posix_capabilities & CIFS_UNIX_LARGE_READ_CAP)) {
29 return CLI_SAMBA_MAX_POSIX_LARGE_READX_SIZE;
31 if (cli->capabilities & CAP_LARGE_READX) {
32 return cli->is_samba
33 ? CLI_SAMBA_MAX_LARGE_READX_SIZE
34 : CLI_WINDOWS_MAX_LARGE_READX_SIZE;
36 return (cli->max_xmit - (smb_size+32)) & ~1023;
39 /****************************************************************************
40 Calculate the recommended write buffer size
41 ****************************************************************************/
42 static size_t cli_write_max_bufsize(struct cli_state *cli, uint16_t write_mode)
44 if (write_mode == 0 &&
45 !client_is_signing_on(cli) &&
46 !cli_encryption_on(cli) &&
47 (cli->posix_capabilities & CIFS_UNIX_LARGE_WRITE_CAP) &&
48 (cli->capabilities & CAP_LARGE_FILES)) {
49 /* Only do massive writes if we can do them direct
50 * with no signing or encrypting - not on a pipe. */
51 return CLI_SAMBA_MAX_POSIX_LARGE_WRITEX_SIZE;
54 if (cli->is_samba) {
55 return CLI_SAMBA_MAX_LARGE_WRITEX_SIZE;
58 if (((cli->capabilities & CAP_LARGE_WRITEX) == 0)
59 || client_is_signing_on(cli)
60 || strequal(cli->dev, "LPT1:")) {
63 * Printer devices are restricted to max_xmit writesize in
64 * Vista and XPSP3 as are signing connections.
67 return (cli->max_xmit - (smb_size+32)) & ~1023;
70 return CLI_WINDOWS_MAX_LARGE_WRITEX_SIZE;
73 struct cli_read_andx_state {
74 size_t size;
75 uint16_t vwv[12];
76 NTSTATUS status;
77 size_t received;
78 uint8_t *buf;
81 static void cli_read_andx_done(struct tevent_req *subreq);
83 struct tevent_req *cli_read_andx_create(TALLOC_CTX *mem_ctx,
84 struct event_context *ev,
85 struct cli_state *cli, uint16_t fnum,
86 off_t offset, size_t size,
87 struct tevent_req **psmbreq)
89 struct tevent_req *req, *subreq;
90 struct cli_read_andx_state *state;
91 bool bigoffset = False;
92 uint8_t wct = 10;
94 if (size > cli_read_max_bufsize(cli)) {
95 DEBUG(0, ("cli_read_andx_send got size=%d, can only handle "
96 "size=%d\n", (int)size,
97 (int)cli_read_max_bufsize(cli)));
98 return NULL;
101 req = tevent_req_create(mem_ctx, &state, struct cli_read_andx_state);
102 if (req == NULL) {
103 return NULL;
105 state->size = size;
107 SCVAL(state->vwv + 0, 0, 0xFF);
108 SCVAL(state->vwv + 0, 1, 0);
109 SSVAL(state->vwv + 1, 0, 0);
110 SSVAL(state->vwv + 2, 0, fnum);
111 SIVAL(state->vwv + 3, 0, offset);
112 SSVAL(state->vwv + 5, 0, size);
113 SSVAL(state->vwv + 6, 0, size);
114 SSVAL(state->vwv + 7, 0, (size >> 16));
115 SSVAL(state->vwv + 8, 0, 0);
116 SSVAL(state->vwv + 9, 0, 0);
118 if ((uint64_t)offset >> 32) {
119 bigoffset = true;
120 SIVAL(state->vwv + 10, 0,
121 (((uint64_t)offset)>>32) & 0xffffffff);
122 wct += 2;
125 subreq = cli_smb_req_create(state, ev, cli, SMBreadX, 0, wct,
126 state->vwv, 0, NULL);
127 if (subreq == NULL) {
128 TALLOC_FREE(req);
129 return NULL;
131 tevent_req_set_callback(subreq, cli_read_andx_done, req);
132 *psmbreq = subreq;
133 return req;
136 struct tevent_req *cli_read_andx_send(TALLOC_CTX *mem_ctx,
137 struct event_context *ev,
138 struct cli_state *cli, uint16_t fnum,
139 off_t offset, size_t size)
141 struct tevent_req *req, *subreq;
142 NTSTATUS status;
144 req = cli_read_andx_create(mem_ctx, ev, cli, fnum, offset, size,
145 &subreq);
146 if (req == NULL) {
147 return NULL;
150 status = cli_smb_req_send(subreq);
151 if (!NT_STATUS_IS_OK(status)) {
152 tevent_req_nterror(req, status);
153 return tevent_req_post(req, ev);
155 return req;
158 static void cli_read_andx_done(struct tevent_req *subreq)
160 struct tevent_req *req = tevent_req_callback_data(
161 subreq, struct tevent_req);
162 struct cli_read_andx_state *state = tevent_req_data(
163 req, struct cli_read_andx_state);
164 uint8_t *inbuf;
165 uint8_t wct;
166 uint16_t *vwv;
167 uint32_t num_bytes;
168 uint8_t *bytes;
170 state->status = cli_smb_recv(subreq, 12, &wct, &vwv, &num_bytes,
171 &bytes);
172 if (NT_STATUS_IS_ERR(state->status)) {
173 tevent_req_nterror(req, state->status);
174 return;
177 /* size is the number of bytes the server returned.
178 * Might be zero. */
179 state->received = SVAL(vwv + 5, 0);
180 state->received |= (((unsigned int)SVAL(vwv + 7, 0)) << 16);
182 if (state->received > state->size) {
183 DEBUG(5,("server returned more than we wanted!\n"));
184 tevent_req_nterror(req, NT_STATUS_UNEXPECTED_IO_ERROR);
185 return;
189 * bcc field must be valid for small reads, for large reads the 16-bit
190 * bcc field can't be correct.
193 if ((state->received < 0xffff) && (state->received > num_bytes)) {
194 DEBUG(5, ("server announced more bytes than sent\n"));
195 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
196 return;
199 inbuf = cli_smb_inbuf(subreq);
200 state->buf = (uint8_t *)smb_base(inbuf) + SVAL(vwv+6, 0);
202 if (trans_oob(smb_len(inbuf), SVAL(vwv+6, 0), state->received)
203 || (state->buf < bytes)) {
204 DEBUG(5, ("server returned invalid read&x data offset\n"));
205 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
206 return;
208 tevent_req_done(req);
212 * Pull the data out of a finished async read_and_x request. rcvbuf is
213 * talloced from the request, so better make sure that you copy it away before
214 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
215 * talloc_move it!
218 NTSTATUS cli_read_andx_recv(struct tevent_req *req, ssize_t *received,
219 uint8_t **rcvbuf)
221 struct cli_read_andx_state *state = tevent_req_data(
222 req, struct cli_read_andx_state);
223 NTSTATUS status;
225 if (tevent_req_is_nterror(req, &status)) {
226 return status;
228 *received = state->received;
229 *rcvbuf = state->buf;
230 return NT_STATUS_OK;
233 struct cli_pull_subreq {
234 struct tevent_req *req;
235 ssize_t received;
236 uint8_t *buf;
240 * Parallel read support.
242 * cli_pull sends as many read&x requests as the server would allow via
243 * max_mux at a time. When replies flow back in, the data is written into
244 * the callback function "sink" in the right order.
247 struct cli_pull_state {
248 struct tevent_req *req;
250 struct event_context *ev;
251 struct cli_state *cli;
252 uint16_t fnum;
253 off_t start_offset;
254 SMB_OFF_T size;
256 NTSTATUS (*sink)(char *buf, size_t n, void *priv);
257 void *priv;
259 size_t chunk_size;
262 * Outstanding requests
264 int num_reqs;
265 struct cli_pull_subreq *reqs;
268 * For how many bytes did we send requests already?
270 SMB_OFF_T requested;
273 * Next request index to push into "sink". This walks around the "req"
274 * array, taking care that the requests are pushed to "sink" in the
275 * right order. If necessary (i.e. replies don't come in in the right
276 * order), replies are held back in "reqs".
278 int top_req;
281 * How many bytes did we push into "sink"?
284 SMB_OFF_T pushed;
287 static char *cli_pull_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
289 struct cli_pull_state *state = tevent_req_data(
290 req, struct cli_pull_state);
291 char *result;
293 result = tevent_req_print(mem_ctx, req);
294 if (result == NULL) {
295 return NULL;
298 return talloc_asprintf_append_buffer(
299 result, "num_reqs=%d, top_req=%d",
300 state->num_reqs, state->top_req);
303 static void cli_pull_read_done(struct tevent_req *read_req);
306 * Prepare an async pull request
309 struct tevent_req *cli_pull_send(TALLOC_CTX *mem_ctx,
310 struct event_context *ev,
311 struct cli_state *cli,
312 uint16_t fnum, off_t start_offset,
313 SMB_OFF_T size, size_t window_size,
314 NTSTATUS (*sink)(char *buf, size_t n,
315 void *priv),
316 void *priv)
318 struct tevent_req *req;
319 struct cli_pull_state *state;
320 int i;
322 req = tevent_req_create(mem_ctx, &state, struct cli_pull_state);
323 if (req == NULL) {
324 return NULL;
326 tevent_req_set_print_fn(req, cli_pull_print);
327 state->req = req;
329 state->cli = cli;
330 state->ev = ev;
331 state->fnum = fnum;
332 state->start_offset = start_offset;
333 state->size = size;
334 state->sink = sink;
335 state->priv = priv;
337 state->pushed = 0;
338 state->top_req = 0;
340 if (size == 0) {
341 tevent_req_done(req);
342 return tevent_req_post(req, ev);
345 state->chunk_size = cli_read_max_bufsize(cli);
347 state->num_reqs = MAX(window_size/state->chunk_size, 1);
348 state->num_reqs = MIN(state->num_reqs, cli->max_mux);
350 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_pull_subreq,
351 state->num_reqs);
352 if (state->reqs == NULL) {
353 goto failed;
356 state->requested = 0;
358 for (i=0; i<state->num_reqs; i++) {
359 struct cli_pull_subreq *subreq = &state->reqs[i];
360 SMB_OFF_T size_left;
361 size_t request_thistime;
363 if (state->requested >= size) {
364 state->num_reqs = i;
365 break;
368 size_left = size - state->requested;
369 request_thistime = MIN(size_left, state->chunk_size);
371 subreq->req = cli_read_andx_send(
372 state->reqs, ev, cli, fnum,
373 state->start_offset + state->requested,
374 request_thistime);
376 if (subreq->req == NULL) {
377 goto failed;
379 tevent_req_set_callback(subreq->req, cli_pull_read_done, req);
380 state->requested += request_thistime;
382 return req;
384 failed:
385 TALLOC_FREE(req);
386 return NULL;
390 * Handle incoming read replies, push the data into sink and send out new
391 * requests if necessary.
394 static void cli_pull_read_done(struct tevent_req *subreq)
396 struct tevent_req *req = tevent_req_callback_data(
397 subreq, struct tevent_req);
398 struct cli_pull_state *state = tevent_req_data(
399 req, struct cli_pull_state);
400 struct cli_pull_subreq *pull_subreq = NULL;
401 NTSTATUS status;
402 int i;
404 for (i = 0; i < state->num_reqs; i++) {
405 pull_subreq = &state->reqs[i];
406 if (subreq == pull_subreq->req) {
407 break;
410 if (i == state->num_reqs) {
411 /* Huh -- received something we did not send?? */
412 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
413 return;
416 status = cli_read_andx_recv(subreq, &pull_subreq->received,
417 &pull_subreq->buf);
418 if (!NT_STATUS_IS_OK(status)) {
419 tevent_req_nterror(state->req, status);
420 return;
424 * This loop is the one to take care of out-of-order replies. All
425 * pending requests are in state->reqs, state->reqs[top_req] is the
426 * one that is to be pushed next. If however a request later than
427 * top_req is replied to, then we can't push yet. If top_req is
428 * replied to at a later point then, we need to push all the finished
429 * requests.
432 while (state->reqs[state->top_req].req != NULL) {
433 struct cli_pull_subreq *top_subreq;
435 DEBUG(11, ("cli_pull_read_done: top_req = %d\n",
436 state->top_req));
438 top_subreq = &state->reqs[state->top_req];
440 if (tevent_req_is_in_progress(top_subreq->req)) {
441 DEBUG(11, ("cli_pull_read_done: top request not yet "
442 "done\n"));
443 return;
446 DEBUG(10, ("cli_pull_read_done: Pushing %d bytes, %d already "
447 "pushed\n", (int)top_subreq->received,
448 (int)state->pushed));
450 status = state->sink((char *)top_subreq->buf,
451 top_subreq->received, state->priv);
452 if (!NT_STATUS_IS_OK(status)) {
453 tevent_req_nterror(state->req, status);
454 return;
456 state->pushed += top_subreq->received;
458 TALLOC_FREE(state->reqs[state->top_req].req);
460 if (state->requested < state->size) {
461 struct tevent_req *new_req;
462 SMB_OFF_T size_left;
463 size_t request_thistime;
465 size_left = state->size - state->requested;
466 request_thistime = MIN(size_left, state->chunk_size);
468 DEBUG(10, ("cli_pull_read_done: Requesting %d bytes "
469 "at %d, position %d\n",
470 (int)request_thistime,
471 (int)(state->start_offset
472 + state->requested),
473 state->top_req));
475 new_req = cli_read_andx_send(
476 state->reqs, state->ev, state->cli,
477 state->fnum,
478 state->start_offset + state->requested,
479 request_thistime);
481 if (tevent_req_nomem(new_req, state->req)) {
482 return;
484 tevent_req_set_callback(new_req, cli_pull_read_done,
485 req);
487 state->reqs[state->top_req].req = new_req;
488 state->requested += request_thistime;
491 state->top_req = (state->top_req+1) % state->num_reqs;
494 tevent_req_done(req);
497 NTSTATUS cli_pull_recv(struct tevent_req *req, SMB_OFF_T *received)
499 struct cli_pull_state *state = tevent_req_data(
500 req, struct cli_pull_state);
501 NTSTATUS status;
503 if (tevent_req_is_nterror(req, &status)) {
504 return status;
506 *received = state->pushed;
507 return NT_STATUS_OK;
510 NTSTATUS cli_pull(struct cli_state *cli, uint16_t fnum,
511 off_t start_offset, SMB_OFF_T size, size_t window_size,
512 NTSTATUS (*sink)(char *buf, size_t n, void *priv),
513 void *priv, SMB_OFF_T *received)
515 TALLOC_CTX *frame = talloc_stackframe();
516 struct event_context *ev;
517 struct tevent_req *req;
518 NTSTATUS status = NT_STATUS_OK;
520 if (cli_has_async_calls(cli)) {
522 * Can't use sync call while an async call is in flight
524 status = NT_STATUS_INVALID_PARAMETER;
525 goto fail;
528 ev = event_context_init(frame);
529 if (ev == NULL) {
530 status = NT_STATUS_NO_MEMORY;
531 goto fail;
534 req = cli_pull_send(frame, ev, cli, fnum, start_offset, size,
535 window_size, sink, priv);
536 if (req == NULL) {
537 status = NT_STATUS_NO_MEMORY;
538 goto fail;
541 if (!tevent_req_poll(req, ev)) {
542 status = map_nt_error_from_unix(errno);
543 goto fail;
546 status = cli_pull_recv(req, received);
547 fail:
548 TALLOC_FREE(frame);
549 if (!NT_STATUS_IS_OK(status)) {
550 cli_set_error(cli, status);
552 return status;
555 static NTSTATUS cli_read_sink(char *buf, size_t n, void *priv)
557 char **pbuf = (char **)priv;
558 memcpy(*pbuf, buf, n);
559 *pbuf += n;
560 return NT_STATUS_OK;
563 ssize_t cli_read(struct cli_state *cli, uint16_t fnum, char *buf,
564 off_t offset, size_t size)
566 NTSTATUS status;
567 SMB_OFF_T ret;
569 status = cli_pull(cli, fnum, offset, size, size,
570 cli_read_sink, &buf, &ret);
571 if (!NT_STATUS_IS_OK(status)) {
572 cli_set_error(cli, status);
573 return -1;
575 return ret;
578 /****************************************************************************
579 Issue a single SMBwrite and don't wait for a reply.
580 ****************************************************************************/
582 static bool cli_issue_write(struct cli_state *cli,
583 uint16_t fnum,
584 off_t offset,
585 uint16 mode,
586 const char *buf,
587 size_t size,
588 int i)
590 char *p;
591 bool large_writex = false;
592 /* We can only do direct writes if not signing and not encrypting. */
593 bool direct_writes = !client_is_signing_on(cli) && !cli_encryption_on(cli);
595 if (!direct_writes && size + 1 > cli->bufsize) {
596 cli->outbuf = (char *)SMB_REALLOC(cli->outbuf, size + 1024);
597 if (!cli->outbuf) {
598 return False;
600 cli->inbuf = (char *)SMB_REALLOC(cli->inbuf, size + 1024);
601 if (cli->inbuf == NULL) {
602 SAFE_FREE(cli->outbuf);
603 return False;
605 cli->bufsize = size + 1024;
608 memset(cli->outbuf,'\0',smb_size);
609 memset(cli->inbuf,'\0',smb_size);
611 if (cli->capabilities & CAP_LARGE_FILES) {
612 large_writex = True;
615 if (large_writex) {
616 cli_set_message(cli->outbuf,14,0,True);
617 } else {
618 cli_set_message(cli->outbuf,12,0,True);
621 SCVAL(cli->outbuf,smb_com,SMBwriteX);
622 SSVAL(cli->outbuf,smb_tid,cli->cnum);
623 cli_setup_packet(cli);
625 SCVAL(cli->outbuf,smb_vwv0,0xFF);
626 SSVAL(cli->outbuf,smb_vwv2,fnum);
628 SIVAL(cli->outbuf,smb_vwv3,offset);
629 SIVAL(cli->outbuf,smb_vwv5,0);
630 SSVAL(cli->outbuf,smb_vwv7,mode);
632 SSVAL(cli->outbuf,smb_vwv8,(mode & 0x0008) ? size : 0);
634 * According to CIFS-TR-1p00, this following field should only
635 * be set if CAP_LARGE_WRITEX is set. We should check this
636 * locally. However, this check might already have been
637 * done by our callers.
639 SSVAL(cli->outbuf,smb_vwv9,(size>>16));
640 SSVAL(cli->outbuf,smb_vwv10,size);
641 /* +1 is pad byte. */
642 SSVAL(cli->outbuf,smb_vwv11,
643 smb_buf(cli->outbuf) - smb_base(cli->outbuf) + 1);
645 if (large_writex) {
646 SIVAL(cli->outbuf,smb_vwv12,(((uint64_t)offset)>>32) & 0xffffffff);
649 p = smb_base(cli->outbuf) + SVAL(cli->outbuf,smb_vwv11) -1;
650 *p++ = '\0'; /* pad byte. */
651 if (!direct_writes) {
652 memcpy(p, buf, size);
654 if (size > 0x1FFFF) {
655 /* This is a POSIX 14 word large write. */
656 set_message_bcc(cli->outbuf, 0); /* Set bcc to zero. */
657 _smb_setlen_large(cli->outbuf,smb_size + 28 + 1 /* pad */ + size - 4);
658 } else {
659 cli_setup_bcc(cli, p+size);
662 SSVAL(cli->outbuf,smb_mid,cli->mid + i);
664 show_msg(cli->outbuf);
665 if (direct_writes) {
666 /* For direct writes we now need to write the data
667 * directly out of buf. */
668 return cli_send_smb_direct_writeX(cli, buf, size);
669 } else {
670 return cli_send_smb(cli);
674 /****************************************************************************
675 write to a file
676 write_mode: 0x0001 disallow write cacheing
677 0x0002 return bytes remaining
678 0x0004 use raw named pipe protocol
679 0x0008 start of message mode named pipe protocol
680 ****************************************************************************/
682 ssize_t cli_write(struct cli_state *cli,
683 uint16_t fnum, uint16 write_mode,
684 const char *buf, off_t offset, size_t size)
686 ssize_t bwritten = 0;
687 unsigned int issued = 0;
688 unsigned int received = 0;
689 int mpx = 1;
690 size_t writesize;
691 int blocks;
693 if(cli->max_mux > 1) {
694 mpx = cli->max_mux-1;
695 } else {
696 mpx = 1;
699 writesize = cli_write_max_bufsize(cli, write_mode);
701 blocks = (size + (writesize-1)) / writesize;
703 while (received < blocks) {
705 while ((issued - received < mpx) && (issued < blocks)) {
706 ssize_t bsent = issued * writesize;
707 ssize_t size1 = MIN(writesize, size - bsent);
709 if (!cli_issue_write(cli, fnum, offset + bsent,
710 write_mode,
711 buf + bsent,
712 size1, issued))
713 return -1;
714 issued++;
717 if (!cli_receive_smb(cli)) {
718 return bwritten;
721 received++;
723 if (cli_is_error(cli))
724 break;
726 bwritten += SVAL(cli->inbuf, smb_vwv2);
727 if (writesize > 0xFFFF) {
728 bwritten += (((int)(SVAL(cli->inbuf, smb_vwv4)))<<16);
732 while (received < issued && cli_receive_smb(cli)) {
733 received++;
736 return bwritten;
739 /****************************************************************************
740 write to a file using a SMBwrite and not bypassing 0 byte writes
741 ****************************************************************************/
743 ssize_t cli_smbwrite(struct cli_state *cli,
744 uint16_t fnum, char *buf, off_t offset, size_t size1)
746 char *p;
747 ssize_t total = 0;
749 do {
750 size_t size = MIN(size1, cli->max_xmit - 48);
752 memset(cli->outbuf,'\0',smb_size);
753 memset(cli->inbuf,'\0',smb_size);
755 cli_set_message(cli->outbuf,5, 0,True);
757 SCVAL(cli->outbuf,smb_com,SMBwrite);
758 SSVAL(cli->outbuf,smb_tid,cli->cnum);
759 cli_setup_packet(cli);
761 SSVAL(cli->outbuf,smb_vwv0,fnum);
762 SSVAL(cli->outbuf,smb_vwv1,size);
763 SIVAL(cli->outbuf,smb_vwv2,offset);
764 SSVAL(cli->outbuf,smb_vwv4,0);
766 p = smb_buf(cli->outbuf);
767 *p++ = 1;
768 SSVAL(p, 0, size); p += 2;
769 memcpy(p, buf + total, size); p += size;
771 cli_setup_bcc(cli, p);
773 if (!cli_send_smb(cli))
774 return -1;
776 if (!cli_receive_smb(cli))
777 return -1;
779 if (cli_is_error(cli))
780 return -1;
782 size = SVAL(cli->inbuf,smb_vwv0);
783 if (size == 0)
784 break;
786 size1 -= size;
787 total += size;
788 offset += size;
790 } while (size1);
792 return total;
796 * Send a write&x request
799 struct cli_write_andx_state {
800 size_t size;
801 uint16_t vwv[14];
802 size_t written;
803 uint8_t pad;
804 struct iovec iov[2];
807 static void cli_write_andx_done(struct tevent_req *subreq);
809 struct tevent_req *cli_write_andx_create(TALLOC_CTX *mem_ctx,
810 struct event_context *ev,
811 struct cli_state *cli, uint16_t fnum,
812 uint16_t mode, const uint8_t *buf,
813 off_t offset, size_t size,
814 struct tevent_req **reqs_before,
815 int num_reqs_before,
816 struct tevent_req **psmbreq)
818 struct tevent_req *req, *subreq;
819 struct cli_write_andx_state *state;
820 bool bigoffset = ((cli->capabilities & CAP_LARGE_FILES) != 0);
821 uint8_t wct = bigoffset ? 14 : 12;
822 size_t max_write = cli_write_max_bufsize(cli, mode);
823 uint16_t *vwv;
825 req = tevent_req_create(mem_ctx, &state, struct cli_write_andx_state);
826 if (req == NULL) {
827 return NULL;
830 size = MIN(size, max_write);
832 vwv = state->vwv;
834 SCVAL(vwv+0, 0, 0xFF);
835 SCVAL(vwv+0, 1, 0);
836 SSVAL(vwv+1, 0, 0);
837 SSVAL(vwv+2, 0, fnum);
838 SIVAL(vwv+3, 0, offset);
839 SIVAL(vwv+5, 0, 0);
840 SSVAL(vwv+7, 0, mode);
841 SSVAL(vwv+8, 0, 0);
842 SSVAL(vwv+9, 0, (size>>16));
843 SSVAL(vwv+10, 0, size);
845 SSVAL(vwv+11, 0,
846 cli_smb_wct_ofs(reqs_before, num_reqs_before)
847 + 1 /* the wct field */
848 + wct * 2 /* vwv */
849 + 2 /* num_bytes field */
850 + 1 /* pad */);
852 if (bigoffset) {
853 SIVAL(vwv+12, 0, (((uint64_t)offset)>>32) & 0xffffffff);
856 state->pad = 0;
857 state->iov[0].iov_base = (void *)&state->pad;
858 state->iov[0].iov_len = 1;
859 state->iov[1].iov_base = CONST_DISCARD(void *, buf);
860 state->iov[1].iov_len = size;
862 subreq = cli_smb_req_create(state, ev, cli, SMBwriteX, 0, wct, vwv,
863 2, state->iov);
864 if (tevent_req_nomem(subreq, req)) {
865 return tevent_req_post(req, ev);
867 tevent_req_set_callback(subreq, cli_write_andx_done, req);
868 *psmbreq = subreq;
869 return req;
872 struct tevent_req *cli_write_andx_send(TALLOC_CTX *mem_ctx,
873 struct event_context *ev,
874 struct cli_state *cli, uint16_t fnum,
875 uint16_t mode, const uint8_t *buf,
876 off_t offset, size_t size)
878 struct tevent_req *req, *subreq;
879 NTSTATUS status;
881 req = cli_write_andx_create(mem_ctx, ev, cli, fnum, mode, buf, offset,
882 size, NULL, 0, &subreq);
883 if (req == NULL) {
884 return NULL;
887 status = cli_smb_req_send(subreq);
888 if (!NT_STATUS_IS_OK(status)) {
889 tevent_req_nterror(req, status);
890 return tevent_req_post(req, ev);
892 return req;
895 static void cli_write_andx_done(struct tevent_req *subreq)
897 struct tevent_req *req = tevent_req_callback_data(
898 subreq, struct tevent_req);
899 struct cli_write_andx_state *state = tevent_req_data(
900 req, struct cli_write_andx_state);
901 uint8_t wct;
902 uint16_t *vwv;
903 NTSTATUS status;
905 status = cli_smb_recv(subreq, 6, &wct, &vwv, NULL, NULL);
906 if (NT_STATUS_IS_ERR(status)) {
907 TALLOC_FREE(subreq);
908 tevent_req_nterror(req, status);
909 return;
911 state->written = SVAL(vwv+2, 0);
912 state->written |= SVAL(vwv+4, 0)<<16;
913 tevent_req_done(req);
916 NTSTATUS cli_write_andx_recv(struct tevent_req *req, size_t *pwritten)
918 struct cli_write_andx_state *state = tevent_req_data(
919 req, struct cli_write_andx_state);
920 NTSTATUS status;
922 if (tevent_req_is_nterror(req, &status)) {
923 return status;
925 *pwritten = state->written;
926 return NT_STATUS_OK;
929 struct cli_writeall_state {
930 struct event_context *ev;
931 struct cli_state *cli;
932 uint16_t fnum;
933 uint16_t mode;
934 const uint8_t *buf;
935 off_t offset;
936 size_t size;
937 size_t written;
940 static void cli_writeall_written(struct tevent_req *req);
942 static struct tevent_req *cli_writeall_send(TALLOC_CTX *mem_ctx,
943 struct event_context *ev,
944 struct cli_state *cli,
945 uint16_t fnum,
946 uint16_t mode,
947 const uint8_t *buf,
948 off_t offset, size_t size)
950 struct tevent_req *req, *subreq;
951 struct cli_writeall_state *state;
953 req = tevent_req_create(mem_ctx, &state, struct cli_writeall_state);
954 if (req == NULL) {
955 return NULL;
957 state->ev = ev;
958 state->cli = cli;
959 state->fnum = fnum;
960 state->mode = mode;
961 state->buf = buf;
962 state->offset = offset;
963 state->size = size;
964 state->written = 0;
966 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
967 state->mode, state->buf, state->offset,
968 state->size);
969 if (tevent_req_nomem(subreq, req)) {
970 return tevent_req_post(req, ev);
972 tevent_req_set_callback(subreq, cli_writeall_written, req);
973 return req;
976 static void cli_writeall_written(struct tevent_req *subreq)
978 struct tevent_req *req = tevent_req_callback_data(
979 subreq, struct tevent_req);
980 struct cli_writeall_state *state = tevent_req_data(
981 req, struct cli_writeall_state);
982 NTSTATUS status;
983 size_t written, to_write;
985 status = cli_write_andx_recv(subreq, &written);
986 TALLOC_FREE(subreq);
987 if (!NT_STATUS_IS_OK(status)) {
988 tevent_req_nterror(req, status);
989 return;
992 state->written += written;
994 if (state->written > state->size) {
995 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
996 return;
999 to_write = state->size - state->written;
1001 if (to_write == 0) {
1002 tevent_req_done(req);
1003 return;
1006 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
1007 state->mode,
1008 state->buf + state->written,
1009 state->offset + state->written, to_write);
1010 if (tevent_req_nomem(subreq, req)) {
1011 return;
1013 tevent_req_set_callback(subreq, cli_writeall_written, req);
1016 static NTSTATUS cli_writeall_recv(struct tevent_req *req)
1018 return tevent_req_simple_recv_ntstatus(req);
1021 struct cli_push_write_state {
1022 struct tevent_req *req;/* This is the main request! Not the subreq */
1023 uint32_t idx;
1024 off_t ofs;
1025 uint8_t *buf;
1026 size_t size;
1029 struct cli_push_state {
1030 struct event_context *ev;
1031 struct cli_state *cli;
1032 uint16_t fnum;
1033 uint16_t mode;
1034 off_t start_offset;
1035 size_t window_size;
1037 size_t (*source)(uint8_t *buf, size_t n, void *priv);
1038 void *priv;
1040 bool eof;
1042 size_t chunk_size;
1043 off_t next_offset;
1046 * Outstanding requests
1048 uint32_t pending;
1049 uint32_t num_reqs;
1050 struct cli_push_write_state **reqs;
1053 static void cli_push_written(struct tevent_req *req);
1055 static bool cli_push_write_setup(struct tevent_req *req,
1056 struct cli_push_state *state,
1057 uint32_t idx)
1059 struct cli_push_write_state *substate;
1060 struct tevent_req *subreq;
1062 substate = talloc(state->reqs, struct cli_push_write_state);
1063 if (!substate) {
1064 return false;
1066 substate->req = req;
1067 substate->idx = idx;
1068 substate->ofs = state->next_offset;
1069 substate->buf = talloc_array(substate, uint8_t, state->chunk_size);
1070 if (!substate->buf) {
1071 talloc_free(substate);
1072 return false;
1074 substate->size = state->source(substate->buf,
1075 state->chunk_size,
1076 state->priv);
1077 if (substate->size == 0) {
1078 state->eof = true;
1079 /* nothing to send */
1080 talloc_free(substate);
1081 return true;
1084 subreq = cli_writeall_send(substate,
1085 state->ev, state->cli,
1086 state->fnum, state->mode,
1087 substate->buf,
1088 substate->ofs,
1089 substate->size);
1090 if (!subreq) {
1091 talloc_free(substate);
1092 return false;
1094 tevent_req_set_callback(subreq, cli_push_written, substate);
1096 state->reqs[idx] = substate;
1097 state->pending += 1;
1098 state->next_offset += substate->size;
1100 return true;
1103 struct tevent_req *cli_push_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1104 struct cli_state *cli,
1105 uint16_t fnum, uint16_t mode,
1106 off_t start_offset, size_t window_size,
1107 size_t (*source)(uint8_t *buf, size_t n,
1108 void *priv),
1109 void *priv)
1111 struct tevent_req *req;
1112 struct cli_push_state *state;
1113 uint32_t i;
1115 req = tevent_req_create(mem_ctx, &state, struct cli_push_state);
1116 if (req == NULL) {
1117 return NULL;
1119 state->cli = cli;
1120 state->ev = ev;
1121 state->fnum = fnum;
1122 state->start_offset = start_offset;
1123 state->mode = mode;
1124 state->source = source;
1125 state->priv = priv;
1126 state->eof = false;
1127 state->pending = 0;
1128 state->next_offset = start_offset;
1130 state->chunk_size = cli_write_max_bufsize(cli, mode);
1132 if (window_size == 0) {
1133 window_size = cli->max_mux * state->chunk_size;
1135 state->num_reqs = window_size/state->chunk_size;
1136 if ((window_size % state->chunk_size) > 0) {
1137 state->num_reqs += 1;
1139 state->num_reqs = MIN(state->num_reqs, cli->max_mux);
1140 state->num_reqs = MAX(state->num_reqs, 1);
1142 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_push_write_state *,
1143 state->num_reqs);
1144 if (state->reqs == NULL) {
1145 goto failed;
1148 for (i=0; i<state->num_reqs; i++) {
1149 if (!cli_push_write_setup(req, state, i)) {
1150 goto failed;
1153 if (state->eof) {
1154 break;
1158 if (state->pending == 0) {
1159 tevent_req_done(req);
1160 return tevent_req_post(req, ev);
1163 return req;
1165 failed:
1166 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1167 return tevent_req_post(req, ev);
1170 static void cli_push_written(struct tevent_req *subreq)
1172 struct cli_push_write_state *substate = tevent_req_callback_data(
1173 subreq, struct cli_push_write_state);
1174 struct tevent_req *req = substate->req;
1175 struct cli_push_state *state = tevent_req_data(
1176 req, struct cli_push_state);
1177 NTSTATUS status;
1178 uint32_t idx = substate->idx;
1180 state->reqs[idx] = NULL;
1181 state->pending -= 1;
1183 status = cli_writeall_recv(subreq);
1184 TALLOC_FREE(subreq);
1185 TALLOC_FREE(substate);
1186 if (!NT_STATUS_IS_OK(status)) {
1187 tevent_req_nterror(req, status);
1188 return;
1191 if (!state->eof) {
1192 if (!cli_push_write_setup(req, state, idx)) {
1193 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1194 return;
1198 if (state->pending == 0) {
1199 tevent_req_done(req);
1200 return;
1204 NTSTATUS cli_push_recv(struct tevent_req *req)
1206 return tevent_req_simple_recv_ntstatus(req);
1209 NTSTATUS cli_push(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1210 off_t start_offset, size_t window_size,
1211 size_t (*source)(uint8_t *buf, size_t n, void *priv),
1212 void *priv)
1214 TALLOC_CTX *frame = talloc_stackframe();
1215 struct event_context *ev;
1216 struct tevent_req *req;
1217 NTSTATUS status = NT_STATUS_OK;
1219 if (cli_has_async_calls(cli)) {
1221 * Can't use sync call while an async call is in flight
1223 status = NT_STATUS_INVALID_PARAMETER;
1224 goto fail;
1227 ev = event_context_init(frame);
1228 if (ev == NULL) {
1229 status = NT_STATUS_NO_MEMORY;
1230 goto fail;
1233 req = cli_push_send(frame, ev, cli, fnum, mode, start_offset,
1234 window_size, source, priv);
1235 if (req == NULL) {
1236 status = NT_STATUS_NO_MEMORY;
1237 goto fail;
1240 if (!tevent_req_poll(req, ev)) {
1241 status = map_nt_error_from_unix(errno);
1242 goto fail;
1245 status = cli_push_recv(req);
1246 fail:
1247 TALLOC_FREE(frame);
1248 if (!NT_STATUS_IS_OK(status)) {
1249 cli_set_error(cli, status);
1251 return status;